Compare commits
150 Commits
v0.4.0
...
fix/allowl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c3138990a8 | ||
|
|
e5c6caba1f | ||
|
|
ec44829c30 | ||
|
|
c452f99a45 | ||
|
|
d66a88f467 | ||
|
|
8da03b1b8c | ||
|
|
652fca5b80 | ||
|
|
de15b32325 | ||
|
|
6f961c5ec2 | ||
|
|
20bf14e91c | ||
|
|
e860731c01 | ||
|
|
f556b60ce4 | ||
|
|
422726f1c8 | ||
|
|
dd021d8642 | ||
|
|
f20c72a829 | ||
|
|
44cd17cf84 | ||
|
|
fb0a21e5e6 | ||
|
|
66b937f710 | ||
|
|
524aed7fa1 | ||
|
|
11e3fdeae6 | ||
|
|
636c45b3d7 | ||
|
|
f602687d93 | ||
|
|
b8b0fda1e0 | ||
|
|
9a3fae9a0c | ||
|
|
a31ac36957 | ||
|
|
9001b14fed | ||
|
|
63ac69a222 | ||
|
|
1f6bdff8f8 | ||
|
|
643d6b01e1 | ||
|
|
17c8e70aa3 | ||
|
|
389ac09907 | ||
|
|
5308e9648c | ||
|
|
819614fa7d | ||
|
|
fab8b698d8 | ||
|
|
a6f23a5ddb | ||
|
|
27dfaab360 | ||
|
|
b2ed2e6aec | ||
|
|
79ecb73d87 | ||
|
|
bf5565ac26 | ||
|
|
679b346246 | ||
|
|
039254280b | ||
|
|
1b2f682026 | ||
|
|
82cc588c67 | ||
|
|
bc54ea930f | ||
|
|
7001ba2e51 | ||
|
|
6887c8e02e | ||
|
|
7f52bd8a32 | ||
|
|
c98e6c3ea9 | ||
|
|
186e709373 | ||
|
|
bacc49667a | ||
|
|
7d0fe19462 | ||
|
|
1632fc36f2 | ||
|
|
0a73cdf4db | ||
|
|
2b0c4e3d5e | ||
|
|
357c710ec4 | ||
|
|
7742858b7b | ||
|
|
1239ed0e72 | ||
|
|
cb54ab3dfc | ||
|
|
aa8923b2c6 | ||
|
|
14efc51340 | ||
|
|
e4350ae81c | ||
|
|
766935ec97 | ||
|
|
efe3669540 | ||
|
|
ad34fe2d9e | ||
|
|
80fcfd10ae | ||
|
|
e4a8893214 | ||
|
|
d979cd9505 | ||
|
|
8c421b9fa3 | ||
|
|
ad7884f2f6 | ||
|
|
6a70ab0f1b | ||
|
|
0b883d1c0d | ||
|
|
7f46f6271e | ||
|
|
f3ca83246c | ||
|
|
da93a3cde3 | ||
|
|
98da440c84 | ||
|
|
4e5b88496c | ||
|
|
d5f7ce9e2d | ||
|
|
cc704be590 | ||
|
|
ff1200eb10 | ||
|
|
49535568d9 | ||
|
|
cd1beedf38 | ||
|
|
be52e5c305 | ||
|
|
669498e85f | ||
|
|
d325b92e44 | ||
|
|
261fd2e148 | ||
|
|
30e46e549c | ||
|
|
ac49658c2b | ||
|
|
5265f571d0 | ||
|
|
0ebd924825 | ||
|
|
06d4e91cd2 | ||
|
|
71dbb138bc | ||
|
|
fbf3ca6d11 | ||
|
|
a84f2e7f1d | ||
|
|
7aee90c99b | ||
|
|
1304b1c02c | ||
|
|
59397ecce4 | ||
|
|
f849a4d65f | ||
|
|
962b400f4c | ||
|
|
1f4063d5db | ||
|
|
c6bc307f0a | ||
|
|
c5208e934d | ||
|
|
d69b79451e | ||
|
|
0b194256a9 | ||
|
|
e0c1997056 | ||
|
|
9e07064c94 | ||
|
|
43cedf11f7 | ||
|
|
cd6a54c652 | ||
|
|
9f89627c5a | ||
|
|
e7e5c173f2 | ||
|
|
c6b35045d8 | ||
|
|
10f1602803 | ||
|
|
41a97bb930 | ||
|
|
c4e733c8ef | ||
|
|
4020776b8e | ||
|
|
763ba1de91 | ||
|
|
51dc06690e | ||
|
|
fb89b78226 | ||
|
|
64c4d146ec | ||
|
|
9c290b6ef4 | ||
|
|
c836903db5 | ||
|
|
5e5a6544bc | ||
|
|
227af04564 | ||
|
|
4c58ff49b0 | ||
|
|
d261e8bc86 | ||
|
|
2de337ac36 | ||
|
|
5810ee5aac | ||
|
|
06850de728 | ||
|
|
995916d01b | ||
|
|
7aca3b1991 | ||
|
|
b7d64a9707 | ||
|
|
c333705a0e | ||
|
|
50d17ae118 | ||
|
|
5495107c9e | ||
|
|
02e83ccd72 | ||
|
|
ccbf893b92 | ||
|
|
cd90b50d68 | ||
|
|
5866ff1ba1 | ||
|
|
9a3de2f231 | ||
|
|
6fdadd637c | ||
|
|
9041ccc2e1 | ||
|
|
c9f1d98f45 | ||
|
|
6a8e47bbb5 | ||
|
|
de50720834 | ||
|
|
216ec76640 | ||
|
|
08aaebec7e | ||
|
|
3e40f795da | ||
|
|
8dcebaaca6 | ||
|
|
a48809fc25 | ||
|
|
e94e75101f | ||
|
|
32f50cd254 |
19
.SRCINFO
Normal file
19
.SRCINFO
Normal file
@@ -0,0 +1,19 @@
|
||||
pkgbase = numa-git
|
||||
pkgdesc = Portable DNS resolver in Rust — .numa local domains, ad blocking, developer overrides, DNS-over-HTTPS
|
||||
pkgver = 0.10.1.r0.g0000000
|
||||
pkgrel = 1
|
||||
url = https://github.com/razvandimescu/numa
|
||||
arch = x86_64
|
||||
license = MIT
|
||||
options = !lto
|
||||
makedepends = cargo
|
||||
makedepends = git
|
||||
depends = gcc-libs
|
||||
depends = glibc
|
||||
provides = numa
|
||||
conflicts = numa
|
||||
backup = etc/numa.toml
|
||||
source = numa::git+https://github.com/razvandimescu/numa.git
|
||||
sha256sums = SKIP
|
||||
|
||||
pkgname = numa-git
|
||||
34
.github/dependabot.yml
vendored
Normal file
34
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "cargo"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
commit-message:
|
||||
prefix: "chore(deps)"
|
||||
groups:
|
||||
minor-and-patch:
|
||||
patterns: ["*"]
|
||||
update-types: ["minor", "patch"]
|
||||
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
commit-message:
|
||||
prefix: "chore(deps)"
|
||||
groups:
|
||||
minor-and-patch:
|
||||
patterns: ["*"]
|
||||
update-types: ["minor", "patch"]
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
commit-message:
|
||||
prefix: "chore(deps)"
|
||||
groups:
|
||||
minor-and-patch:
|
||||
patterns: ["*"]
|
||||
update-types: ["minor", "patch"]
|
||||
33
.github/workflows/ci.yml
vendored
33
.github/workflows/ci.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: rustfmt, clippy
|
||||
@@ -24,3 +24,34 @@ jobs:
|
||||
run: cargo clippy -- -D warnings
|
||||
- name: test
|
||||
run: cargo test
|
||||
- name: audit
|
||||
run: cargo install cargo-audit && cargo audit
|
||||
|
||||
check-macos:
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: clippy
|
||||
run: cargo clippy -- -D warnings
|
||||
- name: test
|
||||
run: cargo test
|
||||
|
||||
check-windows:
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: build
|
||||
run: cargo build
|
||||
- name: clippy
|
||||
run: cargo clippy -- -D warnings
|
||||
- name: test
|
||||
run: cargo test
|
||||
- name: Upload binary
|
||||
uses: actions/upload-artifact@v7
|
||||
with:
|
||||
name: numa-windows-x86_64
|
||||
path: target/debug/numa.exe
|
||||
|
||||
77
.github/workflows/homebrew-bump.yml
vendored
Normal file
77
.github/workflows/homebrew-bump.yml
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
name: Bump Homebrew Tap
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Version to bump (e.g. 0.10.0 or v0.10.0)'
|
||||
type: string
|
||||
required: true
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Version to bump (e.g. 0.10.0 or v0.10.0)'
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
bump:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Determine version
|
||||
id: ver
|
||||
env:
|
||||
INPUT_VERSION: ${{ inputs.version }}
|
||||
run: |
|
||||
V="${INPUT_VERSION#v}"
|
||||
echo "version=$V" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Fetch sha256 checksums from release assets
|
||||
id: shas
|
||||
env:
|
||||
V: ${{ steps.ver.outputs.version }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
base="https://github.com/razvandimescu/numa/releases/download/v${V}"
|
||||
for t in macos-aarch64 macos-x86_64 linux-aarch64 linux-x86_64; do
|
||||
sha=$(curl -fsSL "${base}/numa-${t}.tar.gz.sha256" | awk '{print $1}')
|
||||
if [ -z "$sha" ]; then
|
||||
echo "ERROR: failed to fetch sha256 for $t" >&2
|
||||
exit 1
|
||||
fi
|
||||
key=$(echo "$t" | tr '[:lower:]-' '[:upper:]_')
|
||||
echo "SHA_${key}=${sha}" >> "$GITHUB_ENV"
|
||||
done
|
||||
|
||||
- name: Clone homebrew-tap
|
||||
env:
|
||||
HOMEBREW_TAP_GITHUB_TOKEN: ${{ secrets.HOMEBREW_TAP_GITHUB_TOKEN }}
|
||||
run: |
|
||||
git clone "https://x-access-token:${HOMEBREW_TAP_GITHUB_TOKEN}@github.com/razvandimescu/homebrew-tap.git" tap
|
||||
|
||||
- name: Update formula
|
||||
env:
|
||||
VERSION: ${{ steps.ver.outputs.version }}
|
||||
run: |
|
||||
python3 scripts/update-homebrew-formula.py tap/numa.rb
|
||||
echo "--- updated numa.rb ---"
|
||||
cat tap/numa.rb
|
||||
|
||||
- name: Commit and push
|
||||
working-directory: tap
|
||||
env:
|
||||
V: ${{ steps.ver.outputs.version }}
|
||||
run: |
|
||||
if git diff --quiet; then
|
||||
echo "numa.rb already at v${V}, nothing to commit"
|
||||
exit 0
|
||||
fi
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||
git add numa.rb
|
||||
git commit -m "chore: bump numa to v${V}"
|
||||
git push origin main
|
||||
159
.github/workflows/publish-aur.yml
vendored
Normal file
159
.github/workflows/publish-aur.yml
vendored
Normal file
@@ -0,0 +1,159 @@
|
||||
# `publish-aur.yml` - Arch Linux AUR Package Workflow
|
||||
# --------------------
|
||||
# This workflow automates the validation and publishing of the 'numa-git' package to the
|
||||
# Arch User Repository (AUR). The AUR is a community-driven repository for Arch Linux users.
|
||||
#
|
||||
# Workflow Overview:
|
||||
# 1. Validate: Builds and tests the package for Arch Linux x86_64 using a clean
|
||||
# Arch Linux container.
|
||||
# 2. Audit: Checks Rust dependencies for known security vulnerabilities using
|
||||
# 'cargo-audit'.
|
||||
# 3. Publish: If on the 'main' branch, it pushes the updated PKGBUILD and
|
||||
# .SRCINFO to the AUR.
|
||||
#
|
||||
# Security Best Practices:
|
||||
# - SHA Pinning: All GitHub Actions are pinned to a full-length commit SHA (e.g., v6.0.2 @ SHA)
|
||||
# to ensure the code is immutable and protects against supply-chain attacks where a tag
|
||||
# might be maliciously moved to a compromised commit.
|
||||
# - SSH Hygiene: Uses ssh-agent to keep the private key in memory rather than on disk.
|
||||
# - Audit: Runs 'cargo audit' to prevent publishing known vulnerable dependencies.
|
||||
|
||||
name: Publish - Arch Linux AUR Package
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
# The 'validate' job ensures that the PKGBUILD is correct and the software builds/tests
|
||||
# successfully on Arch Linux before we attempt to publish it.
|
||||
validate:
|
||||
name: Validate PKGBUILD (${{ matrix.arch }})
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
arch: [x86_64]
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- name: Build and Test Package
|
||||
timeout-minutes: 60
|
||||
env:
|
||||
AUR_PKGNAME: ${{ secrets.AUR_PACKAGE_NAME }}
|
||||
run: |
|
||||
# We use a temporary directory to avoid Docker permission issues with the workspace.
|
||||
mkdir -p build-dir
|
||||
cp PKGBUILD build-dir/
|
||||
|
||||
docker run --rm -v $PWD/build-dir:/pkg -w /pkg archlinux:latest /bin/bash -c "
|
||||
# ARCH LINUX SECURITY REQUIREMENT:
|
||||
# 'makepkg' (the tool that builds Arch packages) refuses to run as root for safety.
|
||||
# We must create a standard user and give them sudo access.
|
||||
|
||||
# Install build-time dependencies.
|
||||
# 'base-devel' includes essential tools like gcc, make, and binutils.
|
||||
# Install 'rust' directly to avoid the interactive virtual-package
|
||||
# prompt for 'cargo' on current Arch images.
|
||||
pacman -Syu --noconfirm --needed base-devel rust git sudo cargo-audit
|
||||
|
||||
useradd -m builduser
|
||||
chown -R builduser:builduser /pkg
|
||||
|
||||
# Allow the build user to install dependencies during the build process.
|
||||
echo 'builduser ALL=(ALL) NOPASSWD: ALL' > /etc/sudoers.d/builduser
|
||||
|
||||
# Fetch the source tree first so pkgver() and cargo-audit have a
|
||||
# real Cargo.lock to inspect.
|
||||
sudo -u builduser makepkg -o --nobuild --nocheck --nodeps --noprepare
|
||||
|
||||
# SECURITY AUDIT:
|
||||
# Fail early if any dependencies have known security vulnerabilities.
|
||||
sudo -u builduser sh -lc 'cd /pkg/src/numa && cargo audit'
|
||||
|
||||
# BUILD & TEST:
|
||||
# 'makepkg -s' will:
|
||||
# 1. Download source files (cloning this repo)
|
||||
# 2. Run prepare(), build(), and check() (running cargo test)
|
||||
# 3. Create the final .pkg.tar.zst package
|
||||
sudo -u builduser makepkg -s --noconfirm
|
||||
"
|
||||
|
||||
# The 'publish' job updates the AUR repository with our latest PKGBUILD and .SRCINFO.
|
||||
publish:
|
||||
name: Publish to AUR
|
||||
needs: validate
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
# Securely configure SSH for AUR access.
|
||||
- name: Configure SSH
|
||||
run: |
|
||||
mkdir -p ~/.ssh
|
||||
# Official AUR Ed25519 fingerprint (prevents Man-in-the-Middle attacks).
|
||||
echo "aur.archlinux.org ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEuBKrPzbawxA/k2g6NcyV5jmqwJ2s+zpgZGZ7tpLIcN" >> ~/.ssh/known_hosts
|
||||
|
||||
# Use ssh-agent to keep the private key in memory rather than writing it to disk.
|
||||
eval $(ssh-agent -s)
|
||||
echo "${{ secrets.AUR_SSH_PRIVATE_KEY }}" | tr -d '\r' | ssh-add -
|
||||
|
||||
# Export the agent socket so subsequent 'git' commands can use it.
|
||||
echo "SSH_AUTH_SOCK=$SSH_AUTH_SOCK" >> $GITHUB_ENV
|
||||
echo "SSH_AGENT_PID=$SSH_AGENT_PID" >> $GITHUB_ENV
|
||||
|
||||
- name: Push to AUR
|
||||
env:
|
||||
AUR_PKGNAME: ${{ secrets.AUR_PACKAGE_NAME }}
|
||||
AUR_EMAIL: ${{ secrets.AUR_EMAIL }}
|
||||
AUR_USER: ${{ secrets.AUR_USERNAME }}
|
||||
run: |
|
||||
# AUR repos are managed via Git. Each package has its own repo at:
|
||||
# ssh://aur@aur.archlinux.org/<package-name>.git
|
||||
git clone ssh://aur@aur.archlinux.org/$AUR_PKGNAME.git aur-repo
|
||||
|
||||
cp PKGBUILD aur-repo/
|
||||
cd aur-repo
|
||||
|
||||
# METADATA GENERATION:
|
||||
# '.SRCINFO' is a machine-readable version of the PKGBUILD.
|
||||
# We must run this as a non-root user ('builduser') inside the container.
|
||||
docker run --rm -v $(pwd):/pkg archlinux:latest /bin/bash -c "
|
||||
pacman -Syu --noconfirm --needed binutils git sudo
|
||||
useradd -m builduser
|
||||
chown -R builduser:builduser /pkg
|
||||
cd /pkg
|
||||
sudo -u builduser git config --global --add safe.directory '*'
|
||||
# makepkg -od fetches the source first so pkgver() can calculate the version.
|
||||
# --noprepare skips the prepare() function, which invokes cargo and would
|
||||
# otherwise require a full rust toolchain in this metadata-only container.
|
||||
# pkgver() runs before prepare(), so .SRCINFO still gets the correct version.
|
||||
sudo -u builduser makepkg -od --noprepare && sudo -u builduser makepkg --printsrcinfo > .SRCINFO
|
||||
"
|
||||
|
||||
# Reclaim ownership: the in-container 'chown -R builduser:builduser /pkg'
|
||||
# propagates through the bind mount, leaving .git/ owned by the container's
|
||||
# builduser UID. Without this, subsequent 'git config' on the host fails with
|
||||
# "could not lock config file .git/config: Permission denied".
|
||||
sudo chown -R "$(id -u):$(id -g)" .
|
||||
|
||||
# Set the commit identity using secrets for security and auditability.
|
||||
git config user.name "$AUR_USER"
|
||||
git config user.email "$AUR_EMAIL"
|
||||
|
||||
# Stage and commit both the human-readable PKGBUILD and machine-readable .SRCINFO.
|
||||
git add PKGBUILD .SRCINFO
|
||||
|
||||
if ! git diff --cached --quiet; then
|
||||
git commit -m "chore: update PKGBUILD to ${{ github.sha }}"
|
||||
git push origin master
|
||||
else
|
||||
echo "No changes to commit (metadata and PKGBUILD are already up-to-date)."
|
||||
fi
|
||||
73
.github/workflows/release.yml
vendored
73
.github/workflows/release.yml
vendored
@@ -19,53 +19,84 @@ jobs:
|
||||
- target: aarch64-apple-darwin
|
||||
os: macos-latest
|
||||
name: numa-macos-aarch64
|
||||
- target: x86_64-unknown-linux-gnu
|
||||
- target: x86_64-unknown-linux-musl
|
||||
os: ubuntu-latest
|
||||
name: numa-linux-x86_64
|
||||
- target: aarch64-unknown-linux-gnu
|
||||
- target: aarch64-unknown-linux-musl
|
||||
os: ubuntu-latest
|
||||
name: numa-linux-aarch64
|
||||
- target: x86_64-pc-windows-msvc
|
||||
os: windows-latest
|
||||
name: numa-windows-x86_64
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
|
||||
- name: Install cross-compilation tools
|
||||
if: matrix.target == 'aarch64-unknown-linux-gnu'
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu
|
||||
- name: Install musl tools (x86_64)
|
||||
if: matrix.target == 'x86_64-unknown-linux-musl'
|
||||
run: sudo apt-get update && sudo apt-get install -y musl-tools
|
||||
|
||||
- name: Build
|
||||
- name: Install cross (aarch64)
|
||||
if: matrix.target == 'aarch64-unknown-linux-musl'
|
||||
run: cargo install cross
|
||||
|
||||
- name: Build (native)
|
||||
if: matrix.target != 'aarch64-unknown-linux-musl'
|
||||
run: cargo build --release --target ${{ matrix.target }}
|
||||
env:
|
||||
CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER: aarch64-linux-gnu-gcc
|
||||
|
||||
- name: Package
|
||||
- name: Build (cross)
|
||||
if: matrix.target == 'aarch64-unknown-linux-musl'
|
||||
run: cross build --release --target ${{ matrix.target }}
|
||||
|
||||
- name: Package (Unix)
|
||||
if: runner.os != 'Windows'
|
||||
run: |
|
||||
cd target/${{ matrix.target }}/release
|
||||
tar czf ../../../${{ matrix.name }}.tar.gz numa
|
||||
cd ../../..
|
||||
sha256sum ${{ matrix.name }}.tar.gz > ${{ matrix.name }}.tar.gz.sha256
|
||||
sha256sum ${{ matrix.name }}.tar.gz > ${{ matrix.name }}.tar.gz.sha256 || shasum -a 256 ${{ matrix.name }}.tar.gz > ${{ matrix.name }}.tar.gz.sha256
|
||||
|
||||
- name: Package (Windows)
|
||||
if: runner.os == 'Windows'
|
||||
shell: pwsh
|
||||
run: |
|
||||
Compress-Archive -Path "target/${{ matrix.target }}/release/numa.exe" -DestinationPath "${{ matrix.name }}.zip"
|
||||
(Get-FileHash "${{ matrix.name }}.zip" -Algorithm SHA256).Hash.ToLower() + " ${{ matrix.name }}.zip" | Out-File "${{ matrix.name }}.zip.sha256" -Encoding ascii
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v7
|
||||
with:
|
||||
name: ${{ matrix.name }}
|
||||
path: |
|
||||
${{ matrix.name }}.tar.gz
|
||||
${{ matrix.name }}.tar.gz.sha256
|
||||
${{ matrix.name }}.zip
|
||||
${{ matrix.name }}.zip.sha256
|
||||
|
||||
release:
|
||||
needs: build
|
||||
publish:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/download-artifact@v4
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
|
||||
- name: Publish to crates.io
|
||||
run: cargo publish
|
||||
env:
|
||||
CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
|
||||
|
||||
release:
|
||||
needs: [build, publish]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/download-artifact@v8
|
||||
with:
|
||||
merge-multiple: true
|
||||
|
||||
@@ -75,4 +106,12 @@ jobs:
|
||||
generate_release_notes: true
|
||||
files: |
|
||||
*.tar.gz
|
||||
*.zip
|
||||
*.sha256
|
||||
|
||||
bump-homebrew:
|
||||
needs: release
|
||||
uses: ./.github/workflows/homebrew-bump.yml
|
||||
with:
|
||||
version: ${{ github.ref_name }}
|
||||
secrets: inherit
|
||||
|
||||
47
.github/workflows/static.yml
vendored
Normal file
47
.github/workflows/static.yml
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
# Simple workflow for deploying static content to GitHub Pages
|
||||
name: Deploy static content to Pages
|
||||
|
||||
on:
|
||||
# Runs on pushes targeting the default branch
|
||||
push:
|
||||
branches: ["main"]
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
|
||||
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
|
||||
concurrency:
|
||||
group: "pages"
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
# Single deploy job since we're just deploying
|
||||
deploy:
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
- name: Install pandoc
|
||||
run: sudo apt-get install -y pandoc
|
||||
- name: Generate blog HTML
|
||||
run: make blog
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v6
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v4
|
||||
with:
|
||||
# Upload entire repository
|
||||
path: './site'
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v5
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,3 +1,5 @@
|
||||
/target
|
||||
/build-dir
|
||||
CLAUDE.md
|
||||
docs/
|
||||
site/blog/posts/
|
||||
|
||||
596
Cargo.lock
generated
596
Cargo.lock
generated
@@ -18,10 +18,25 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anstream"
|
||||
version = "0.6.21"
|
||||
name = "alloca"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a"
|
||||
checksum = "e5a7d05ea6aea7e9e64d25b9156ba2fee3fdd659e34e41063cd2fc7cd020d7f4"
|
||||
dependencies = [
|
||||
"cc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anes"
|
||||
version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
|
||||
|
||||
[[package]]
|
||||
name = "anstream"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "824a212faf96e9acacdbd09febd34438f8f711fb84e09a8916013cd7815ca28d"
|
||||
dependencies = [
|
||||
"anstyle",
|
||||
"anstyle-parse",
|
||||
@@ -34,15 +49,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "anstyle"
|
||||
version = "1.0.13"
|
||||
version = "1.0.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78"
|
||||
checksum = "940b3a0ca603d1eade50a4846a2afffd5ef57a9feac2c0e2ec2e14f9ead76000"
|
||||
|
||||
[[package]]
|
||||
name = "anstyle-parse"
|
||||
version = "0.2.7"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2"
|
||||
checksum = "52ce7f38b242319f7cabaa6813055467063ecdc9d355bbb4ce0c68908cd8130e"
|
||||
dependencies = [
|
||||
"utf8parse",
|
||||
]
|
||||
@@ -68,10 +83,19 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "asn1-rs"
|
||||
version = "0.6.2"
|
||||
name = "arc-swap"
|
||||
version = "1.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5493c3bedbacf7fd7382c6346bbd66687d12bbaad3a89a2d2c303ee6cf20b048"
|
||||
checksum = "a07d1f37ff60921c83bdfc7407723bdefe89b44b98a9b772f225c8f9d67141a6"
|
||||
dependencies = [
|
||||
"rustversion",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "asn1-rs"
|
||||
version = "0.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "56624a96882bb8c26d61312ae18cb45868e5a9992ea73c58e45c3101e56a1e60"
|
||||
dependencies = [
|
||||
"asn1-rs-derive",
|
||||
"asn1-rs-impl",
|
||||
@@ -79,15 +103,15 @@ dependencies = [
|
||||
"nom",
|
||||
"num-traits",
|
||||
"rusticata-macros",
|
||||
"thiserror 1.0.69",
|
||||
"thiserror",
|
||||
"time",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "asn1-rs-derive"
|
||||
version = "0.5.1"
|
||||
version = "0.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490"
|
||||
checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -142,9 +166,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-lc-sys"
|
||||
version = "0.39.0"
|
||||
version = "0.39.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1fa7e52a4c5c547c741610a2c6f123f3881e409b714cd27e6798ef020c514f0a"
|
||||
checksum = "83a25cf98105baa966497416dbd42565ce3a8cf8dbfd59803ec9ad46f3126399"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"cmake",
|
||||
@@ -229,10 +253,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.57"
|
||||
name = "cast"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7a0dd1ca384932ff3641c8718a02769f1698e7563dc6974ffd03346116310423"
|
||||
checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.58"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e1e928d4b69e3077709075a938a05ffbedfa53a84c8f766efbf8220bb1ff60e1"
|
||||
dependencies = [
|
||||
"find-msvc-tools",
|
||||
"jobserver",
|
||||
@@ -253,19 +283,71 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"
|
||||
|
||||
[[package]]
|
||||
name = "cmake"
|
||||
version = "0.1.57"
|
||||
name = "ciborium"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d"
|
||||
checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e"
|
||||
dependencies = [
|
||||
"ciborium-io",
|
||||
"ciborium-ll",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ciborium-io"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757"
|
||||
|
||||
[[package]]
|
||||
name = "ciborium-ll"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9"
|
||||
dependencies = [
|
||||
"ciborium-io",
|
||||
"half",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "4.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b193af5b67834b676abd72466a96c1024e6a6ad978a1f484bd90b85c94041351"
|
||||
dependencies = [
|
||||
"clap_builder",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap_builder"
|
||||
version = "4.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "714a53001bf66416adb0e2ef5ac857140e7dc3a0c48fb28b2f10762fc4b5069f"
|
||||
dependencies = [
|
||||
"anstyle",
|
||||
"clap_lex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "clap_lex"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c8d4a3bb8b1e0c1050499d1815f5ab16d04f0959b233085fb31653fbfc9d98f9"
|
||||
|
||||
[[package]]
|
||||
name = "cmake"
|
||||
version = "0.1.58"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c0f78a02292a74a88ac736019ab962ece0bc380e3f977bf72e376c5d78ff0678"
|
||||
dependencies = [
|
||||
"cc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "colorchoice"
|
||||
version = "1.0.4"
|
||||
version = "1.0.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75"
|
||||
checksum = "1d07550c9036bf2ae0c684c4297d503f838287c83c53686d05370d0e139ae570"
|
||||
|
||||
[[package]]
|
||||
name = "compression-codecs"
|
||||
@@ -293,6 +375,72 @@ dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "criterion"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "950046b2aa2492f9a536f5f4f9a3de7b9e2476e575e05bd6c333371add4d98f3"
|
||||
dependencies = [
|
||||
"alloca",
|
||||
"anes",
|
||||
"cast",
|
||||
"ciborium",
|
||||
"clap",
|
||||
"criterion-plot",
|
||||
"itertools",
|
||||
"num-traits",
|
||||
"oorandom",
|
||||
"page_size",
|
||||
"plotters",
|
||||
"rayon",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tinytemplate",
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "criterion-plot"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d8d80a2f4f5b554395e47b5d8305bc3d27813bacb73493eb1001e8f76dae29ea"
|
||||
dependencies = [
|
||||
"cast",
|
||||
"itertools",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-deque"
|
||||
version = "0.8.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51"
|
||||
dependencies = [
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-epoch"
|
||||
version = "0.9.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
|
||||
dependencies = [
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-utils"
|
||||
version = "0.8.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
|
||||
|
||||
[[package]]
|
||||
name = "crunchy"
|
||||
version = "0.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5"
|
||||
|
||||
[[package]]
|
||||
name = "data-encoding"
|
||||
version = "2.10.0"
|
||||
@@ -301,9 +449,9 @@ checksum = "d7a1e2f27636f116493b8b860f5546edb47c8d8f8ea73e1d2a20be88e28d1fea"
|
||||
|
||||
[[package]]
|
||||
name = "der-parser"
|
||||
version = "9.0.0"
|
||||
version = "10.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553"
|
||||
checksum = "07da5016415d5a3c4dd39b11ed26f915f52fc4e0dc197d87908bc916e51bc1a6"
|
||||
dependencies = [
|
||||
"asn1-rs",
|
||||
"displaydoc",
|
||||
@@ -340,10 +488,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813"
|
||||
|
||||
[[package]]
|
||||
name = "env_filter"
|
||||
version = "1.0.0"
|
||||
name = "either"
|
||||
version = "1.15.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7a1c3cc8e57274ec99de65301228b537f1e4eedc1b8e0f9411c6caac8ae7308f"
|
||||
checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719"
|
||||
|
||||
[[package]]
|
||||
name = "env_filter"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "32e90c2accc4b07a8456ea0debdc2e7587bdd890680d71173a15d4ae604f6eef"
|
||||
dependencies = [
|
||||
"log",
|
||||
"regex",
|
||||
@@ -351,9 +505,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "env_logger"
|
||||
version = "0.11.9"
|
||||
version = "0.11.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b2daee4ea451f429a58296525ddf28b45a3b64f1acf6587e2067437bb11e218d"
|
||||
checksum = "0621c04f2196ac3f488dd583365b9c09be011a4ab8b9f37248ffcc8f6198b56a"
|
||||
dependencies = [
|
||||
"anstream",
|
||||
"anstyle",
|
||||
@@ -368,6 +522,16 @@ version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f"
|
||||
|
||||
[[package]]
|
||||
name = "errno"
|
||||
version = "0.3.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys 0.61.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "find-msvc-tools"
|
||||
version = "0.1.9"
|
||||
@@ -384,6 +548,12 @@ dependencies = [
|
||||
"miniz_oxide",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fnv"
|
||||
version = "1.0.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
|
||||
|
||||
[[package]]
|
||||
name = "form_urlencoded"
|
||||
version = "1.2.2"
|
||||
@@ -514,6 +684,36 @@ dependencies = [
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "h2"
|
||||
version = "0.4.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54"
|
||||
dependencies = [
|
||||
"atomic-waker",
|
||||
"bytes",
|
||||
"fnv",
|
||||
"futures-core",
|
||||
"futures-sink",
|
||||
"http",
|
||||
"indexmap",
|
||||
"slab",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "half"
|
||||
version = "2.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"crunchy",
|
||||
"zerocopy",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hashbrown"
|
||||
version = "0.16.1"
|
||||
@@ -575,6 +775,7 @@ dependencies = [
|
||||
"bytes",
|
||||
"futures-channel",
|
||||
"futures-core",
|
||||
"h2",
|
||||
"http",
|
||||
"http-body",
|
||||
"httparse",
|
||||
@@ -747,9 +948,9 @@ checksum = "d98f6fed1fde3f8c21bc40a1abb88dd75e67924f9cffc3ef95607bad8017f8e2"
|
||||
|
||||
[[package]]
|
||||
name = "iri-string"
|
||||
version = "0.7.10"
|
||||
version = "0.7.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a"
|
||||
checksum = "d8e7418f59cc01c88316161279a7f665217ae316b388e58a0d10e29f54f1e5eb"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
"serde",
|
||||
@@ -762,10 +963,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695"
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "1.0.17"
|
||||
name = "itertools"
|
||||
version = "0.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2"
|
||||
checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186"
|
||||
dependencies = [
|
||||
"either",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "1.0.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8f42a60cbdf9a97f5d2305f08a87dc4e09308d1276d28c869c684d7777685682"
|
||||
|
||||
[[package]]
|
||||
name = "jiff"
|
||||
@@ -803,10 +1013,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "js-sys"
|
||||
version = "0.3.91"
|
||||
version = "0.3.92"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b49715b7073f385ba4bc528e5747d02e66cb39c6146efb66b781f131f0fb399c"
|
||||
checksum = "cc4c90f45aa2e6eacbe8645f77fdea542ac97a494bcd117a67df9ff4d611f995"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"futures-util",
|
||||
"once_cell",
|
||||
"wasm-bindgen",
|
||||
]
|
||||
@@ -877,9 +1089,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mio"
|
||||
version = "1.1.1"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc"
|
||||
checksum = "50b7e5b27aa02a74bac8c3f23f448f8d87ff11f92d3aac1a6ed369ee08cc56c1"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"wasi",
|
||||
@@ -908,9 +1120,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "num-conv"
|
||||
version = "0.2.0"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050"
|
||||
checksum = "c6673768db2d862beb9b39a78fdcb1a69439615d5794a1be50caa9bc92c81967"
|
||||
|
||||
[[package]]
|
||||
name = "num-integer"
|
||||
@@ -932,32 +1144,39 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "numa"
|
||||
version = "0.1.0"
|
||||
version = "0.11.0"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"axum",
|
||||
"criterion",
|
||||
"env_logger",
|
||||
"futures",
|
||||
"http",
|
||||
"http-body-util",
|
||||
"hyper",
|
||||
"hyper-util",
|
||||
"log",
|
||||
"qrcode",
|
||||
"rcgen",
|
||||
"reqwest",
|
||||
"ring",
|
||||
"rustls",
|
||||
"rustls-pemfile",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"socket2",
|
||||
"time",
|
||||
"tokio",
|
||||
"tokio-rustls",
|
||||
"toml",
|
||||
"tower",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "oid-registry"
|
||||
version = "0.7.1"
|
||||
version = "0.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a8d8034d9489cdaf79228eb9f6a3b8d7bb32ba00d6645ebd48eef4077ceb5bd9"
|
||||
checksum = "12f40cff3dde1b6087cc5d5f5d4d65712f34016a03ed60e9c08dcc392736b5b7"
|
||||
dependencies = [
|
||||
"asn1-rs",
|
||||
]
|
||||
@@ -974,6 +1193,22 @@ version = "1.70.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe"
|
||||
|
||||
[[package]]
|
||||
name = "oorandom"
|
||||
version = "11.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e"
|
||||
|
||||
[[package]]
|
||||
name = "page_size"
|
||||
version = "0.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "30d5b2194ed13191c1999ae0704b7839fb18384fa22e49b57eeaa97d79ce40da"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pem"
|
||||
version = "3.0.6"
|
||||
@@ -1002,6 +1237,34 @@ version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "plotters"
|
||||
version = "0.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747"
|
||||
dependencies = [
|
||||
"num-traits",
|
||||
"plotters-backend",
|
||||
"plotters-svg",
|
||||
"wasm-bindgen",
|
||||
"web-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "plotters-backend"
|
||||
version = "0.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a"
|
||||
|
||||
[[package]]
|
||||
name = "plotters-svg"
|
||||
version = "0.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670"
|
||||
dependencies = [
|
||||
"plotters-backend",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "portable-atomic"
|
||||
version = "1.13.1"
|
||||
@@ -1010,9 +1273,9 @@ checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49"
|
||||
|
||||
[[package]]
|
||||
name = "portable-atomic-util"
|
||||
version = "0.2.5"
|
||||
version = "0.2.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7a9db96d7fa8782dd8c15ce32ffe8680bbd1e978a43bf51a34d39483540495f5"
|
||||
checksum = "091397be61a01d4be58e7841595bd4bfedb15f1cd54977d79b8271e94ed799a3"
|
||||
dependencies = [
|
||||
"portable-atomic",
|
||||
]
|
||||
@@ -1050,6 +1313,12 @@ dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "qrcode"
|
||||
version = "0.14.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d68782463e408eb1e668cf6152704bd856c78c5b6417adaee3203d8f4c1fc9ec"
|
||||
|
||||
[[package]]
|
||||
name = "quinn"
|
||||
version = "0.11.9"
|
||||
@@ -1064,7 +1333,7 @@ dependencies = [
|
||||
"rustc-hash",
|
||||
"rustls",
|
||||
"socket2",
|
||||
"thiserror 2.0.18",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tracing",
|
||||
"web-time",
|
||||
@@ -1085,7 +1354,7 @@ dependencies = [
|
||||
"rustls",
|
||||
"rustls-pki-types",
|
||||
"slab",
|
||||
"thiserror 2.0.18",
|
||||
"thiserror",
|
||||
"tinyvec",
|
||||
"tracing",
|
||||
"web-time",
|
||||
@@ -1150,10 +1419,30 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rcgen"
|
||||
version = "0.13.2"
|
||||
name = "rayon"
|
||||
version = "1.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75e669e5202259b5314d1ea5397316ad400819437857b90861765f24c4cf80a2"
|
||||
checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f"
|
||||
dependencies = [
|
||||
"either",
|
||||
"rayon-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rayon-core"
|
||||
version = "1.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91"
|
||||
dependencies = [
|
||||
"crossbeam-deque",
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rcgen"
|
||||
version = "0.14.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "10b99e0098aa4082912d4c649628623db6aba77335e4f4569ff5083a6448b32e"
|
||||
dependencies = [
|
||||
"pem",
|
||||
"ring",
|
||||
@@ -1201,6 +1490,7 @@ dependencies = [
|
||||
"base64",
|
||||
"bytes",
|
||||
"futures-core",
|
||||
"h2",
|
||||
"http",
|
||||
"http-body",
|
||||
"http-body-util",
|
||||
@@ -1296,9 +1586,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustls-webpki"
|
||||
version = "0.103.9"
|
||||
version = "0.103.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53"
|
||||
checksum = "df33b2b81ac578cabaf06b89b0631153a3f416b0a886e8a7a1707fb51abbd1ef"
|
||||
dependencies = [
|
||||
"aws-lc-rs",
|
||||
"ring",
|
||||
@@ -1318,6 +1608,15 @@ version = "1.0.23"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f"
|
||||
|
||||
[[package]]
|
||||
name = "same-file"
|
||||
version = "1.0.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
|
||||
dependencies = [
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.228"
|
||||
@@ -1374,11 +1673,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_spanned"
|
||||
version = "0.6.9"
|
||||
version = "1.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3"
|
||||
checksum = "6662b5879511e06e8999a8a235d848113e942c9124f211511b16466ee2995f26"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1400,10 +1699,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
|
||||
|
||||
[[package]]
|
||||
name = "simd-adler32"
|
||||
version = "0.3.8"
|
||||
name = "signal-hook-registry"
|
||||
version = "1.4.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2"
|
||||
checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b"
|
||||
dependencies = [
|
||||
"errno",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "simd-adler32"
|
||||
version = "0.3.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "703d5c7ef118737c72f1af64ad2f6f8c5e1921f818cdcb97b8fe6fc69bf66214"
|
||||
|
||||
[[package]]
|
||||
name = "slab"
|
||||
@@ -1470,33 +1779,13 @@ dependencies = [
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thiserror"
|
||||
version = "1.0.69"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52"
|
||||
dependencies = [
|
||||
"thiserror-impl 1.0.69",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thiserror"
|
||||
version = "2.0.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4"
|
||||
dependencies = [
|
||||
"thiserror-impl 2.0.18",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thiserror-impl"
|
||||
version = "1.0.69"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"thiserror-impl",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1551,6 +1840,16 @@ dependencies = [
|
||||
"zerovec",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tinytemplate"
|
||||
version = "1.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tinyvec"
|
||||
version = "1.11.0"
|
||||
@@ -1576,6 +1875,7 @@ dependencies = [
|
||||
"libc",
|
||||
"mio",
|
||||
"pin-project-lite",
|
||||
"signal-hook-registry",
|
||||
"socket2",
|
||||
"tokio-macros",
|
||||
"windows-sys 0.61.2",
|
||||
@@ -1617,44 +1917,42 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "0.8.23"
|
||||
version = "1.1.2+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
"toml_edit",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_datetime"
|
||||
version = "0.6.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_edit"
|
||||
version = "0.22.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a"
|
||||
checksum = "81f3d15e84cbcd896376e6730314d59fb5a87f31e4b038454184435cd57defee"
|
||||
dependencies = [
|
||||
"indexmap",
|
||||
"serde",
|
||||
"serde_core",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
"toml_write",
|
||||
"toml_parser",
|
||||
"toml_writer",
|
||||
"winnow",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_write"
|
||||
version = "0.1.2"
|
||||
name = "toml_datetime"
|
||||
version = "1.1.1+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801"
|
||||
checksum = "3165f65f62e28e0115a00b2ebdd37eb6f3b641855f9d636d3cd4103767159ad7"
|
||||
dependencies = [
|
||||
"serde_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_parser"
|
||||
version = "1.1.2+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a2abe9b86193656635d2411dc43050282ca48aa31c2451210f4202550afb7526"
|
||||
dependencies = [
|
||||
"winnow",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_writer"
|
||||
version = "1.1.1+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "756daf9b1013ebe47a8776667b466417e2d4c5679d441c26230efd9ef78692db"
|
||||
|
||||
[[package]]
|
||||
name = "tower"
|
||||
@@ -1769,6 +2067,16 @@ version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
|
||||
|
||||
[[package]]
|
||||
name = "walkdir"
|
||||
version = "2.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
|
||||
dependencies = [
|
||||
"same-file",
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "want"
|
||||
version = "0.3.1"
|
||||
@@ -1795,9 +2103,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen"
|
||||
version = "0.2.114"
|
||||
version = "0.2.115"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6532f9a5c1ece3798cb1c2cfdba640b9b3ba884f5db45973a6f442510a87d38e"
|
||||
checksum = "6523d69017b7633e396a89c5efab138161ed5aafcbc8d3e5c5a42ae38f50495a"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"once_cell",
|
||||
@@ -1808,23 +2116,19 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-futures"
|
||||
version = "0.4.64"
|
||||
version = "0.4.65"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e9c5522b3a28661442748e09d40924dfb9ca614b21c00d3fd135720e48b67db8"
|
||||
checksum = "2d1faf851e778dfa54db7cd438b70758eba9755cb47403f3496edd7c8fc212f0"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"futures-util",
|
||||
"js-sys",
|
||||
"once_cell",
|
||||
"wasm-bindgen",
|
||||
"web-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro"
|
||||
version = "0.2.114"
|
||||
version = "0.2.115"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "18a2d50fcf105fb33bb15f00e7a77b772945a2ee45dcf454961fd843e74c18e6"
|
||||
checksum = "4e3a6c758eb2f701ed3d052ff5737f5bfe6614326ea7f3bbac7156192dc32e67"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"wasm-bindgen-macro-support",
|
||||
@@ -1832,9 +2136,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro-support"
|
||||
version = "0.2.114"
|
||||
version = "0.2.115"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "03ce4caeaac547cdf713d280eda22a730824dd11e6b8c3ca9e42247b25c631e3"
|
||||
checksum = "921de2737904886b52bcbb237301552d05969a6f9c40d261eb0533c8b055fedf"
|
||||
dependencies = [
|
||||
"bumpalo",
|
||||
"proc-macro2",
|
||||
@@ -1845,18 +2149,18 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-shared"
|
||||
version = "0.2.114"
|
||||
version = "0.2.115"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75a326b8c223ee17883a4251907455a2431acc2791c98c26279376490c378c16"
|
||||
checksum = "a93e946af942b58934c604527337bad9ae33ba1d5c6900bbb41c2c07c2364a93"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "web-sys"
|
||||
version = "0.3.91"
|
||||
version = "0.3.92"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "854ba17bb104abfb26ba36da9729addc7ce7f06f5c0f90f3c391f8461cca21f9"
|
||||
checksum = "84cde8507f4d7cfcb1185b8cb5890c494ffea65edbe1ba82cfd63661c805ed94"
|
||||
dependencies = [
|
||||
"js-sys",
|
||||
"wasm-bindgen",
|
||||
@@ -1881,6 +2185,37 @@ dependencies = [
|
||||
"rustls-pki-types",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.3.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
|
||||
dependencies = [
|
||||
"winapi-i686-pc-windows-gnu",
|
||||
"winapi-x86_64-pc-windows-gnu",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-i686-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-util"
|
||||
version = "0.1.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22"
|
||||
dependencies = [
|
||||
"windows-sys 0.61.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
|
||||
[[package]]
|
||||
name = "windows-link"
|
||||
version = "0.2.1"
|
||||
@@ -2045,12 +2380,9 @@ checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650"
|
||||
|
||||
[[package]]
|
||||
name = "winnow"
|
||||
version = "0.7.15"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "df79d97927682d2fd8adb29682d1140b343be4ac0f08fd68b7765d9c059d3945"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
checksum = "09dac053f1cd375980747450bfc7250c264eaae0583872e845c0c7cd578872b5"
|
||||
|
||||
[[package]]
|
||||
name = "wit-bindgen"
|
||||
@@ -2066,9 +2398,9 @@ checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9"
|
||||
|
||||
[[package]]
|
||||
name = "x509-parser"
|
||||
version = "0.16.0"
|
||||
version = "0.18.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69"
|
||||
checksum = "d43b0f71ce057da06bc0851b23ee24f3f86190b07203dd8f567d0b706a185202"
|
||||
dependencies = [
|
||||
"asn1-rs",
|
||||
"data-encoding",
|
||||
@@ -2078,7 +2410,7 @@ dependencies = [
|
||||
"oid-registry",
|
||||
"ring",
|
||||
"rusticata-macros",
|
||||
"thiserror 1.0.69",
|
||||
"thiserror",
|
||||
"time",
|
||||
]
|
||||
|
||||
|
||||
37
Cargo.toml
37
Cargo.toml
@@ -1,29 +1,50 @@
|
||||
[package]
|
||||
name = "numa"
|
||||
version = "0.1.0"
|
||||
version = "0.11.0"
|
||||
authors = ["razvandimescu <razvan@dimescu.com>"]
|
||||
edition = "2021"
|
||||
description = "Ephemeral DNS overrides for development and testing. Point any hostname to any endpoint. Auto-revert when you're done."
|
||||
description = "Portable DNS resolver in Rust — .numa local domains, ad blocking, developer overrides, DNS-over-HTTPS"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/razvandimescu/numa"
|
||||
keywords = ["dns", "proxy", "override", "development", "networking"]
|
||||
keywords = ["dns", "dns-server", "ad-blocking", "reverse-proxy", "developer-tools"]
|
||||
categories = ["network-programming", "development-tools"]
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1", features = ["rt-multi-thread", "macros", "net", "time"] }
|
||||
tokio = { version = "1", features = ["rt-multi-thread", "macros", "net", "time", "sync", "signal"] }
|
||||
axum = "0.8"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
toml = "0.8"
|
||||
toml = "1.1"
|
||||
log = "0.4"
|
||||
env_logger = "0.11"
|
||||
reqwest = { version = "0.12", features = ["rustls-tls", "gzip"], default-features = false }
|
||||
reqwest = { version = "0.12", features = ["rustls-tls", "gzip", "http2"], default-features = false }
|
||||
hyper = { version = "1", features = ["client", "http1", "server"] }
|
||||
hyper-util = { version = "0.1", features = ["client-legacy", "http1", "tokio"] }
|
||||
http-body-util = "0.1"
|
||||
futures = "0.3"
|
||||
rcgen = { version = "0.13", features = ["pem", "x509-parser"] }
|
||||
socket2 = { version = "0.6", features = ["all"] }
|
||||
rcgen = { version = "0.14", features = ["pem", "x509-parser"] }
|
||||
time = "0.3"
|
||||
rustls = "0.23"
|
||||
tokio-rustls = "0.26"
|
||||
rustls-pemfile = "2"
|
||||
arc-swap = "1"
|
||||
ring = "0.17"
|
||||
rustls-pemfile = "2.2.0"
|
||||
qrcode = { version = "0.14", default-features = false }
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = { version = "0.8", features = ["html_reports"] }
|
||||
tower = { version = "0.5", features = ["util"] }
|
||||
http = "1"
|
||||
|
||||
[[bench]]
|
||||
name = "hot_path"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "throughput"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "dnssec"
|
||||
harness = false
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM rust:1.88-alpine AS builder
|
||||
FROM rust:1.94-alpine AS builder
|
||||
RUN apk add --no-cache musl-dev cmake make perl
|
||||
WORKDIR /app
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
@@ -11,7 +11,7 @@ COPY numa.toml com.numa.dns.plist numa.service ./
|
||||
RUN touch src/main.rs src/lib.rs
|
||||
RUN cargo build --release
|
||||
|
||||
FROM alpine:3.20
|
||||
FROM alpine:3.23
|
||||
COPY --from=builder /app/target/release/numa /usr/local/bin/numa
|
||||
EXPOSE 53/udp 80/tcp 443/tcp 5380/tcp
|
||||
EXPOSE 53/udp 80/tcp 443/tcp 853/tcp 5380/tcp
|
||||
ENTRYPOINT ["numa"]
|
||||
|
||||
29
Makefile
29
Makefile
@@ -1,11 +1,11 @@
|
||||
.PHONY: all build lint fmt check test clean deploy
|
||||
.PHONY: all build lint fmt check audit test coverage bench clean deploy blog release
|
||||
|
||||
all: lint build
|
||||
all: lint build test
|
||||
|
||||
build:
|
||||
cargo build
|
||||
|
||||
lint: fmt check
|
||||
lint: fmt check audit
|
||||
|
||||
fmt:
|
||||
cargo fmt --check
|
||||
@@ -13,9 +13,32 @@ fmt:
|
||||
check:
|
||||
cargo clippy -- -D warnings
|
||||
|
||||
audit:
|
||||
cargo audit
|
||||
|
||||
test:
|
||||
cargo test
|
||||
|
||||
coverage:
|
||||
cargo tarpaulin --skip-clean --out stdout
|
||||
|
||||
bench:
|
||||
cargo bench
|
||||
|
||||
blog:
|
||||
@mkdir -p site/blog/posts
|
||||
@for f in blog/*.md; do \
|
||||
name=$$(basename "$$f" .md); \
|
||||
pandoc "$$f" --template=site/blog-template.html -o "site/blog/posts/$$name.html"; \
|
||||
echo " $$f → site/blog/posts/$$name.html"; \
|
||||
done
|
||||
|
||||
release:
|
||||
ifndef VERSION
|
||||
$(error Usage: make release VERSION=0.8.0)
|
||||
endif
|
||||
./scripts/release.sh $(VERSION)
|
||||
|
||||
clean:
|
||||
cargo clean
|
||||
|
||||
|
||||
62
PKGBUILD
Normal file
62
PKGBUILD
Normal file
@@ -0,0 +1,62 @@
|
||||
# Maintainer: razvandimescu <razvan@dimescu.com>
|
||||
pkgname=numa-git
|
||||
_pkgname=numa
|
||||
pkgver=0.10.1.r0.g0000000 # Placeholder — pkgver() rewrites this on each makepkg run
|
||||
pkgrel=1
|
||||
pkgdesc="Portable DNS resolver in Rust — .numa local domains, ad blocking, developer overrides, DNS-over-HTTPS"
|
||||
arch=('x86_64')
|
||||
url="https://github.com/razvandimescu/numa"
|
||||
license=('MIT')
|
||||
options=('!lto')
|
||||
depends=('gcc-libs' 'glibc')
|
||||
makedepends=('cargo' 'git')
|
||||
provides=("$_pkgname")
|
||||
conflicts=("$_pkgname")
|
||||
backup=('etc/numa.toml')
|
||||
source=("$_pkgname::git+$url.git")
|
||||
sha256sums=('SKIP')
|
||||
|
||||
pkgver() {
|
||||
cd "$srcdir/$_pkgname"
|
||||
( set -o pipefail
|
||||
git describe --long --tags 2>/dev/null | sed 's/\([^-]*-g\)/r\1/;s/-/./g' ||
|
||||
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
|
||||
) | sed 's/^v//'
|
||||
}
|
||||
|
||||
prepare() {
|
||||
cd "$srcdir/$_pkgname"
|
||||
# numa v0.10.1+ uses FHS-compliant paths on Linux by default
|
||||
# (/var/lib/numa for data, journalctl for logs), so no source
|
||||
# patching is needed. The earlier sed targeted /usr/local/bin/numa,
|
||||
# which only appears in a comment in current main.
|
||||
export RUSTUP_TOOLCHAIN=stable
|
||||
cargo fetch --locked
|
||||
}
|
||||
|
||||
build() {
|
||||
cd "$srcdir/$_pkgname"
|
||||
export RUSTUP_TOOLCHAIN=stable
|
||||
cargo build --frozen --release
|
||||
}
|
||||
|
||||
check() {
|
||||
cd "$srcdir/$_pkgname"
|
||||
export RUSTUP_TOOLCHAIN=stable
|
||||
cargo test --frozen
|
||||
}
|
||||
|
||||
package() {
|
||||
cd "$srcdir/$_pkgname"
|
||||
install -Dm755 "target/release/$_pkgname" "$pkgdir/usr/bin/$_pkgname"
|
||||
|
||||
# numa.service uses {{exe_path}} as a placeholder substituted by
|
||||
# `numa install` at runtime via replace_exe_path(). For an AUR
|
||||
# package install (no `numa install` step), we substitute it
|
||||
# statically here so systemd gets a real ExecStart path.
|
||||
sed 's|{{exe_path}}|/usr/bin/numa /etc/numa.toml|g' numa.service > numa.service.patched
|
||||
install -Dm644 "numa.service.patched" "$pkgdir/usr/lib/systemd/system/numa.service"
|
||||
|
||||
install -Dm644 "numa.toml" "$pkgdir/etc/numa.toml"
|
||||
install -Dm644 "LICENSE" "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
|
||||
}
|
||||
161
README.md
161
README.md
@@ -1,100 +1,147 @@
|
||||
# Numa
|
||||
|
||||
**DNS you own. Everywhere you go.**
|
||||
[](https://github.com/razvandimescu/numa/actions)
|
||||
[](https://crates.io/crates/numa)
|
||||
[](LICENSE)
|
||||
|
||||
**DNS you own. Everywhere you go.** — [numa.rs](https://numa.rs)
|
||||
|
||||
A portable DNS resolver in a single binary. Block ads on any network, name your local services (`frontend.numa`), and override any hostname with auto-revert — all from your laptop, no cloud account or Raspberry Pi required.
|
||||
|
||||
Built from scratch in Rust. Zero DNS libraries. RFC 1035 wire protocol parsed by hand.
|
||||
Built from scratch in Rust. Zero DNS libraries. RFC 1035 wire protocol parsed by hand. Caching, ad blocking, and local service domains out of the box. Optional recursive resolution from root nameservers with full DNSSEC chain-of-trust validation, plus a DNS-over-TLS listener for encrypted client connections (iOS Private DNS, systemd-resolved, etc.). One ~8MB binary, everything embedded.
|
||||
|
||||

|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Install
|
||||
# macOS
|
||||
brew install razvandimescu/tap/numa
|
||||
|
||||
# Linux
|
||||
curl -fsSL https://raw.githubusercontent.com/razvandimescu/numa/main/install.sh | sh
|
||||
|
||||
# Run (port 53 requires root)
|
||||
sudo numa
|
||||
# Arch Linux (AUR)
|
||||
yay -S numa-git
|
||||
|
||||
# Try it
|
||||
dig @127.0.0.1 google.com # ✓ resolves normally
|
||||
dig @127.0.0.1 ads.google.com # ✗ blocked → 0.0.0.0
|
||||
# Windows — download from GitHub Releases
|
||||
# All platforms
|
||||
cargo install numa
|
||||
```
|
||||
|
||||
```bash
|
||||
sudo numa # run in foreground (port 53 requires root/admin)
|
||||
```
|
||||
|
||||
Open the dashboard: **http://numa.numa** (or `http://localhost:5380`)
|
||||
|
||||
Or build from source:
|
||||
```bash
|
||||
git clone https://github.com/razvandimescu/numa.git && cd numa
|
||||
cargo build --release
|
||||
sudo ./target/release/numa
|
||||
```
|
||||
Set as system DNS:
|
||||
|
||||
## Why Numa
|
||||
| Platform | Install | Uninstall |
|
||||
|----------|---------|-----------|
|
||||
| macOS | `sudo numa install` | `sudo numa uninstall` |
|
||||
| Linux | `sudo numa install` | `sudo numa uninstall` |
|
||||
| Windows | `numa install` (admin) + reboot | `numa uninstall` (admin) + reboot |
|
||||
|
||||
- **Ad blocking that travels with you** — 385K+ domains blocked via [Hagezi Pro](https://github.com/hagezi/dns-blocklists). Works on any network: coffee shops, hotels, airports.
|
||||
- **Local service proxy** — `https://frontend.numa` instead of `localhost:5173`. Auto-generated TLS certs, WebSocket support for HMR. Like `/etc/hosts` but with a dashboard and auto-revert.
|
||||
- **Developer overrides** — point any hostname to any IP, auto-reverts after N minutes. REST API with 22 endpoints.
|
||||
- **Sub-millisecond caching** — cached lookups in 0ms. Faster than any public resolver.
|
||||
- **Live dashboard** — real-time stats, query log, blocking controls, service management.
|
||||
- **macOS + Linux** — `numa install` configures system DNS, `numa service start` runs as launchd/systemd service.
|
||||
On macOS and Linux, numa runs as a system service (launchd/systemd). On Windows, numa auto-starts on login via registry.
|
||||
|
||||
## Local Service Proxy
|
||||
## Local Services
|
||||
|
||||
Name your local dev services with `.numa` domains:
|
||||
Name your dev services instead of remembering port numbers:
|
||||
|
||||
```bash
|
||||
curl -X POST localhost:5380/services \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"name":"frontend","target_port":5173}'
|
||||
|
||||
open http://frontend.numa # → proxied to localhost:5173
|
||||
```
|
||||
|
||||
- **HTTPS with green lock** — auto-generated local CA + per-service TLS certs
|
||||
- **WebSocket** — Vite/webpack HMR works through the proxy
|
||||
- **Health checks** — dashboard shows green/red status per service
|
||||
- **Persistent** — services survive restarts
|
||||
- Or configure in `numa.toml`:
|
||||
Now `https://frontend.numa` works in your browser — green lock, valid cert, WebSocket passthrough for HMR. No mkcert, no nginx, no `/etc/hosts`.
|
||||
|
||||
```toml
|
||||
[[services]]
|
||||
name = "frontend"
|
||||
target_port = 5173
|
||||
Add path-based routing (`app.numa/api → :5001`), share services across machines via LAN discovery, or configure everything in [`numa.toml`](numa.toml).
|
||||
|
||||
## Ad Blocking & Privacy
|
||||
|
||||
385K+ domains blocked via [Hagezi Pro](https://github.com/hagezi/dns-blocklists). Works on any network — coffee shops, hotels, airports. Travels with your laptop.
|
||||
|
||||
Three resolution modes:
|
||||
|
||||
- **`forward`** (default) — transparent proxy to your existing system DNS. Everything works as before, just with caching and ad blocking on top. Captive portals, VPNs, corporate DNS — all respected.
|
||||
- **`recursive`** — resolve directly from root nameservers. No upstream dependency, no single entity sees your full query pattern. Add `[dnssec] enabled = true` for full chain-of-trust validation.
|
||||
- **`auto`** — probe root servers on startup, recursive if reachable, encrypted DoH fallback if blocked.
|
||||
|
||||
DNSSEC validates the full chain of trust: RRSIG signatures, DNSKEY verification, DS delegation, NSEC/NSEC3 denial proofs. [Read how it works →](https://numa.rs/blog/posts/dnssec-from-scratch.html)
|
||||
|
||||
**DNS-over-TLS listener** (RFC 7858) — accept encrypted queries on port 853 from strict clients like iOS Private DNS, systemd-resolved, or stubby. Two modes:
|
||||
|
||||
- **Self-signed** (default) — numa generates a local CA automatically. `numa install` adds it to the system trust store on macOS, Linux (Debian/Ubuntu, Fedora/RHEL/SUSE, Arch), and Windows. On iOS, install the `.mobileconfig` from `numa setup-phone`. Firefox keeps its own NSS store and ignores the system one — trust the CA there manually if you need HTTPS for `.numa` services in Firefox.
|
||||
- **Bring-your-own cert** — point `[dot] cert_path` / `key_path` at a publicly-trusted cert (e.g., Let's Encrypt via DNS-01 challenge on a domain pointing at your numa instance). Clients connect without any trust-store setup — same UX as AdGuard Home or Cloudflare `1.1.1.1`.
|
||||
|
||||
ALPN `"dot"` is advertised and enforced in both modes; a handshake with mismatched ALPN is rejected as a cross-protocol confusion defense.
|
||||
|
||||
**Phone setup** — point your iPhone or Android at Numa in one step:
|
||||
|
||||
```bash
|
||||
numa setup-phone
|
||||
```
|
||||
|
||||
Prints a QR code. Scan it, install the profile, toggle certificate trust — your phone's DNS now routes through Numa over TLS. Requires `[mobile] enabled = true` in `numa.toml`.
|
||||
|
||||
## LAN Discovery
|
||||
|
||||
Run Numa on multiple machines. They find each other automatically via mDNS:
|
||||
|
||||
```
|
||||
Machine A (192.168.1.5) Machine B (192.168.1.20)
|
||||
┌──────────────────────┐ ┌──────────────────────┐
|
||||
│ Numa │ mDNS │ Numa │
|
||||
│ - api (port 8000) │◄───────────►│ - grafana (3000) │
|
||||
│ - frontend (5173) │ discovery │ │
|
||||
└──────────────────────┘ └──────────────────────┘
|
||||
```
|
||||
|
||||
From Machine B: `curl http://api.numa` → proxied to Machine A's port 8000. Enable with `numa lan on`.
|
||||
|
||||
**Hub mode**: run one instance with `bind_addr = "0.0.0.0:53"` and point other devices' DNS to it — they get ad blocking + `.numa` resolution without installing anything.
|
||||
|
||||
## How It Compares
|
||||
|
||||
| | Pi-hole | AdGuard Home | NextDNS | Cloudflare | Numa |
|
||||
|---|---|---|---|---|---|
|
||||
| Ad blocking | Yes | Yes | Yes | Limited | 385K+ domains |
|
||||
| Portable (travels with laptop) | No (appliance) | No (appliance) | Cloud only | Cloud only | Single binary |
|
||||
| Developer overrides | No | No | No | No | REST API + auto-expiry |
|
||||
| Local service proxy | No | No | No | No | `.numa` + HTTPS + WS |
|
||||
| Data stays local | Yes | Yes | Cloud | Cloud | 100% local |
|
||||
| Zero config | Complex | Docker/setup | Yes | Yes | Works out of the box |
|
||||
| Self-sovereign DNS | No | No | No | No | pkarr/DHT roadmap |
|
||||
| | Pi-hole | AdGuard Home | Unbound | Numa |
|
||||
|---|---|---|---|---|
|
||||
| Local service proxy + auto TLS | — | — | — | `.numa` domains, HTTPS, WebSocket |
|
||||
| LAN service discovery | — | — | — | mDNS, zero config |
|
||||
| Developer overrides (REST API) | — | — | — | Auto-revert, scriptable |
|
||||
| Recursive resolver | — | — | Yes | Yes, with SRTT selection |
|
||||
| DNSSEC validation | — | — | Yes | Yes (RSA, ECDSA, Ed25519) |
|
||||
| Ad blocking | Yes | Yes | — | 385K+ domains |
|
||||
| Web admin UI | Full | Full | — | Dashboard |
|
||||
| Encrypted upstream (DoH) | Needs cloudflared | Yes | — | Native |
|
||||
| Encrypted clients (DoT listener) | Needs stunnel sidecar | Yes | Yes | Native (RFC 7858) |
|
||||
| Portable (laptop) | No (appliance) | No (appliance) | Server | Single binary, macOS/Linux/Windows |
|
||||
| Community maturity | 56K stars, 10 years | 33K stars | 20 years | New |
|
||||
|
||||
## How It Works
|
||||
## Performance
|
||||
|
||||
```
|
||||
Query → Overrides → .numa TLD → Blocklist → Local Zones → Cache → Upstream
|
||||
```
|
||||
691ns cached round-trip. ~2.0M qps throughput. Zero heap allocations in the hot path. Recursive queries average 237ms after SRTT warmup (12x improvement over round-robin). ECDSA P-256 DNSSEC verification: 174ns. [Benchmarks →](bench/)
|
||||
|
||||
No DNS libraries. The wire protocol — headers, labels, compression pointers, record types — is parsed and serialized by hand. Runs on `tokio` + `axum`, async per-query task spawning.
|
||||
## Learn More
|
||||
|
||||
[Configuration reference](numa.toml)
|
||||
- [Blog: DNS-over-TLS from Scratch in Rust](https://numa.rs/blog/posts/dot-from-scratch.html)
|
||||
- [Blog: Implementing DNSSEC from Scratch in Rust](https://numa.rs/blog/posts/dnssec-from-scratch.html)
|
||||
- [Blog: I Built a DNS Resolver from Scratch](https://numa.rs/blog/posts/dns-from-scratch.html)
|
||||
- [Configuration reference](numa.toml) — all options documented inline
|
||||
- [REST API](src/api.rs) — 27 endpoints across overrides, cache, blocking, services, diagnostics
|
||||
|
||||
## Roadmap
|
||||
|
||||
- [x] DNS proxy core — forwarding, caching, local zones
|
||||
- [x] Developer overrides — REST API with auto-expiry
|
||||
- [x] Ad blocking — 385K+ domains, live dashboard, allowlist
|
||||
- [x] System integration — macOS + Linux, launchd/systemd, Tailscale/VPN auto-discovery
|
||||
- [x] Local service proxy — `.numa` domains, HTTP/HTTPS proxy, auto TLS, WebSocket
|
||||
- [ ] pkarr integration — self-sovereign DNS via Mainline DHT (15M nodes)
|
||||
- [ ] Global `.numa` names — self-publish, DHT-backed, first-come-first-served
|
||||
- [x] DNS forwarding, caching, ad blocking, developer overrides
|
||||
- [x] `.numa` local domains — auto TLS, path routing, WebSocket proxy
|
||||
- [x] LAN service discovery — mDNS, cross-machine DNS + proxy
|
||||
- [x] DNS-over-HTTPS — encrypted upstream
|
||||
- [x] DNS-over-TLS listener — encrypted client connections (RFC 7858, ALPN strict)
|
||||
- [x] Recursive resolution + DNSSEC — chain-of-trust, NSEC/NSEC3
|
||||
- [x] SRTT-based nameserver selection
|
||||
- [x] Mobile onboarding — `setup-phone` QR flow, mobile API, mobileconfig profiles
|
||||
- [ ] pkarr integration — self-sovereign DNS via Mainline DHT
|
||||
- [ ] Global `.numa` names — DHT-backed, no registrar
|
||||
|
||||
## License
|
||||
|
||||
|
||||
BIN
assets/devto-cover.png
Normal file
BIN
assets/devto-cover.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 65 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 808 KiB After Width: | Height: | Size: 927 KiB |
87
bench/README.md
Normal file
87
bench/README.md
Normal file
@@ -0,0 +1,87 @@
|
||||
# Benchmarks
|
||||
|
||||
Numa has two benchmark suites measuring different layers of performance.
|
||||
|
||||
## Micro-benchmarks (`benches/`, criterion)
|
||||
|
||||
Nanosecond-precision measurement of individual operations on the hot path.
|
||||
No running server required — these are pure Rust unit-level benchmarks.
|
||||
|
||||
```sh
|
||||
cargo bench # run all
|
||||
cargo bench --bench hot_path # parse, serialize, cache, clone
|
||||
cargo bench --bench throughput # pipeline QPS, buffer alloc
|
||||
```
|
||||
|
||||
### What's measured
|
||||
|
||||
**hot_path** — individual operations:
|
||||
|
||||
| Benchmark | What it measures |
|
||||
|-----------|-----------------|
|
||||
| `buffer_parse` | Wire bytes → DnsPacket (typical response with 4 records) |
|
||||
| `buffer_serialize` | DnsPacket → wire bytes |
|
||||
| `packet_clone` | Full DnsPacket clone (what cache hit costs) |
|
||||
| `cache_lookup_hit` | Cache lookup on a single-entry cache |
|
||||
| `cache_lookup_hit_populated` | Cache lookup with 1000 entries |
|
||||
| `cache_lookup_miss` | HashMap miss (baseline) |
|
||||
| `cache_insert` | Insert into cache with packet clone |
|
||||
| `round_trip_cached` | Full cached path: parse query → cache hit → serialize response |
|
||||
|
||||
**throughput** — pipeline capacity:
|
||||
|
||||
| Benchmark | What it measures |
|
||||
|-----------|-----------------|
|
||||
| `pipeline_throughput/N` | N cached queries end-to-end (parse → lookup → serialize) |
|
||||
| `buffer_alloc` | BytePacketBuffer 4KB zero-init cost |
|
||||
|
||||
### Reading results
|
||||
|
||||
Criterion auto-compares against the previous run:
|
||||
|
||||
```
|
||||
round_trip_cached time: [710.5 ns 715.2 ns 720.1 ns]
|
||||
change: [-2.48% -1.85% -1.21%] (p = 0.00 < 0.05)
|
||||
Performance has improved.
|
||||
```
|
||||
|
||||
- The three values are [lower bound, estimate, upper bound] of the mean
|
||||
- `change` shows the delta vs the last saved baseline
|
||||
- HTML reports with charts: `target/criterion/report/index.html`
|
||||
|
||||
To save a named baseline for comparison:
|
||||
|
||||
```sh
|
||||
cargo bench -- --save-baseline before
|
||||
# ... make changes ...
|
||||
cargo bench -- --baseline before
|
||||
```
|
||||
|
||||
## End-to-end benchmark (`bench/dns-bench.sh`)
|
||||
|
||||
Real-world latency comparison using `dig` against a running Numa instance
|
||||
and public resolvers. Measures millisecond-level latency including network I/O.
|
||||
|
||||
```sh
|
||||
# Start Numa first (default port 15353 for testing)
|
||||
python3 bench/dns-bench.sh [port] [rounds]
|
||||
python3 bench/dns-bench.sh 15353 20 # default
|
||||
```
|
||||
|
||||
### What's measured
|
||||
|
||||
- **Numa (cold)**: cache flushed before each query — measures upstream forwarding
|
||||
- **Numa (cached)**: queries hit cache — measures local processing
|
||||
- **System / Google / Cloudflare / Quad9**: public resolver comparison
|
||||
|
||||
Results saved to `bench/results.json`.
|
||||
|
||||
### When to use which
|
||||
|
||||
| Question | Use |
|
||||
|----------|-----|
|
||||
| Did my code change make parsing faster? | `cargo bench --bench hot_path` |
|
||||
| Is the cached path still sub-microsecond? | `cargo bench --bench hot_path` (round_trip_cached) |
|
||||
| How many queries/sec can we handle? | `cargo bench --bench throughput` |
|
||||
| Is Numa still competitive with system resolver? | `bench/dns-bench.sh` |
|
||||
| Did upstream forwarding regress? | `bench/dns-bench.sh` |
|
||||
50
bench/results.json
Normal file
50
bench/results.json
Normal file
@@ -0,0 +1,50 @@
|
||||
{
|
||||
"Numa(cold)": {
|
||||
"avg": 9,
|
||||
"p50": 9,
|
||||
"p99": 18,
|
||||
"min": 8,
|
||||
"max": 18,
|
||||
"count": 50
|
||||
},
|
||||
"Numa(cached)": {
|
||||
"avg": 0,
|
||||
"p50": 0,
|
||||
"p99": 0,
|
||||
"min": 0,
|
||||
"max": 0,
|
||||
"count": 50
|
||||
},
|
||||
"System": {
|
||||
"avg": 9.1,
|
||||
"p50": 8,
|
||||
"p99": 44,
|
||||
"min": 7,
|
||||
"max": 44,
|
||||
"count": 50
|
||||
},
|
||||
"Google": {
|
||||
"avg": 22.4,
|
||||
"p50": 17,
|
||||
"p99": 37,
|
||||
"min": 13,
|
||||
"max": 37,
|
||||
"count": 50
|
||||
},
|
||||
"Cloudflare": {
|
||||
"avg": 18.7,
|
||||
"p50": 14,
|
||||
"p99": 132,
|
||||
"min": 12,
|
||||
"max": 132,
|
||||
"count": 50
|
||||
},
|
||||
"Quad9": {
|
||||
"avg": 14.5,
|
||||
"p50": 13,
|
||||
"p99": 43,
|
||||
"min": 12,
|
||||
"max": 43,
|
||||
"count": 50
|
||||
}
|
||||
}
|
||||
183
benches/dnssec.rs
Normal file
183
benches/dnssec.rs
Normal file
@@ -0,0 +1,183 @@
|
||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||
|
||||
use numa::dnssec;
|
||||
use numa::question::QueryType;
|
||||
use numa::record::DnsRecord;
|
||||
|
||||
// Realistic ECDSA P-256 key (64 bytes) and signature (64 bytes)
|
||||
fn make_ecdsa_key() -> Vec<u8> {
|
||||
vec![0xAB; 64]
|
||||
}
|
||||
fn make_ecdsa_sig() -> Vec<u8> {
|
||||
vec![0xCD; 64]
|
||||
}
|
||||
|
||||
// Realistic RSA-2048 key (RFC 3110 format: exp_len=3, exp=65537, mod=256 bytes)
|
||||
fn make_rsa_key() -> Vec<u8> {
|
||||
let mut key = vec![3u8]; // exponent length
|
||||
key.extend(&[0x01, 0x00, 0x01]); // exponent = 65537
|
||||
key.extend(vec![0xFF; 256]); // modulus (256 bytes = 2048 bits)
|
||||
key
|
||||
}
|
||||
|
||||
fn make_ed25519_key() -> Vec<u8> {
|
||||
vec![0xEF; 32]
|
||||
}
|
||||
|
||||
fn make_dnskey(algorithm: u8, public_key: Vec<u8>) -> DnsRecord {
|
||||
DnsRecord::DNSKEY {
|
||||
domain: "example.com".into(),
|
||||
flags: 257,
|
||||
protocol: 3,
|
||||
algorithm,
|
||||
public_key,
|
||||
ttl: 3600,
|
||||
}
|
||||
}
|
||||
|
||||
fn make_rrsig(algorithm: u8, signature: Vec<u8>) -> DnsRecord {
|
||||
DnsRecord::RRSIG {
|
||||
domain: "example.com".into(),
|
||||
type_covered: QueryType::A.to_num(),
|
||||
algorithm,
|
||||
labels: 2,
|
||||
original_ttl: 300,
|
||||
expiration: 2000000000,
|
||||
inception: 1600000000,
|
||||
key_tag: 12345,
|
||||
signer_name: "example.com".into(),
|
||||
signature,
|
||||
ttl: 300,
|
||||
}
|
||||
}
|
||||
|
||||
fn make_rrset() -> Vec<DnsRecord> {
|
||||
vec![
|
||||
DnsRecord::A {
|
||||
domain: "example.com".into(),
|
||||
addr: "93.184.216.34".parse().unwrap(),
|
||||
ttl: 300,
|
||||
},
|
||||
DnsRecord::A {
|
||||
domain: "example.com".into(),
|
||||
addr: "93.184.216.35".parse().unwrap(),
|
||||
ttl: 300,
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
fn bench_key_tag(c: &mut Criterion) {
|
||||
let key = make_rsa_key();
|
||||
c.bench_function("key_tag_rsa2048", |b| {
|
||||
b.iter(|| {
|
||||
dnssec::compute_key_tag(black_box(257), black_box(3), black_box(8), black_box(&key))
|
||||
})
|
||||
});
|
||||
|
||||
let key = make_ecdsa_key();
|
||||
c.bench_function("key_tag_ecdsa_p256", |b| {
|
||||
b.iter(|| {
|
||||
dnssec::compute_key_tag(black_box(257), black_box(3), black_box(13), black_box(&key))
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_name_to_wire(c: &mut Criterion) {
|
||||
c.bench_function("name_to_wire_short", |b| {
|
||||
b.iter(|| dnssec::name_to_wire(black_box("example.com")))
|
||||
});
|
||||
c.bench_function("name_to_wire_long", |b| {
|
||||
b.iter(|| dnssec::name_to_wire(black_box("sub.deep.nested.example.co.uk")))
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_build_signed_data(c: &mut Criterion) {
|
||||
let rrsig = make_rrsig(13, make_ecdsa_sig());
|
||||
let rrset = make_rrset();
|
||||
let rrset_refs: Vec<&DnsRecord> = rrset.iter().collect();
|
||||
|
||||
c.bench_function("build_signed_data_2_A_records", |b| {
|
||||
b.iter(|| dnssec::build_signed_data(black_box(&rrsig), black_box(&rrset_refs)))
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_verify_signature(c: &mut Criterion) {
|
||||
// These will fail verification (keys/sigs are random), but we measure the
|
||||
// crypto overhead — ring still does the full algorithm before returning error.
|
||||
let data = vec![0u8; 128]; // typical signed data size
|
||||
|
||||
let rsa_key = make_rsa_key();
|
||||
let rsa_sig = vec![0xAA; 256]; // RSA-2048 signature
|
||||
c.bench_function("verify_rsa_sha256_2048", |b| {
|
||||
b.iter(|| {
|
||||
dnssec::verify_signature(
|
||||
black_box(8),
|
||||
black_box(&rsa_key),
|
||||
black_box(&data),
|
||||
black_box(&rsa_sig),
|
||||
)
|
||||
})
|
||||
});
|
||||
|
||||
let ecdsa_key = make_ecdsa_key();
|
||||
let ecdsa_sig = make_ecdsa_sig();
|
||||
c.bench_function("verify_ecdsa_p256", |b| {
|
||||
b.iter(|| {
|
||||
dnssec::verify_signature(
|
||||
black_box(13),
|
||||
black_box(&ecdsa_key),
|
||||
black_box(&data),
|
||||
black_box(&ecdsa_sig),
|
||||
)
|
||||
})
|
||||
});
|
||||
|
||||
let ed_key = make_ed25519_key();
|
||||
let ed_sig = vec![0xBB; 64];
|
||||
c.bench_function("verify_ed25519", |b| {
|
||||
b.iter(|| {
|
||||
dnssec::verify_signature(
|
||||
black_box(15),
|
||||
black_box(&ed_key),
|
||||
black_box(&data),
|
||||
black_box(&ed_sig),
|
||||
)
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_ds_verification(c: &mut Criterion) {
|
||||
let dk = make_dnskey(8, make_rsa_key());
|
||||
|
||||
// Compute correct DS digest
|
||||
let owner_wire = dnssec::name_to_wire("example.com");
|
||||
let mut dnskey_rdata = vec![1u8, 1, 3, 8]; // flags=257, proto=3, algo=8
|
||||
dnskey_rdata.extend(&make_rsa_key());
|
||||
let mut input = Vec::new();
|
||||
input.extend(&owner_wire);
|
||||
input.extend(&dnskey_rdata);
|
||||
let digest = ring::digest::digest(&ring::digest::SHA256, &input);
|
||||
|
||||
let ds = DnsRecord::DS {
|
||||
domain: "example.com".into(),
|
||||
key_tag: dnssec::compute_key_tag(257, 3, 8, &make_rsa_key()),
|
||||
algorithm: 8,
|
||||
digest_type: 2,
|
||||
digest: digest.as_ref().to_vec(),
|
||||
ttl: 86400,
|
||||
};
|
||||
|
||||
c.bench_function("verify_ds_sha256", |b| {
|
||||
b.iter(|| dnssec::verify_ds(black_box(&ds), black_box(&dk), black_box("example.com")))
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
dnssec_benches,
|
||||
bench_key_tag,
|
||||
bench_name_to_wire,
|
||||
bench_build_signed_data,
|
||||
bench_verify_signature,
|
||||
bench_ds_verification,
|
||||
);
|
||||
criterion_main!(dnssec_benches);
|
||||
185
benches/hot_path.rs
Normal file
185
benches/hot_path.rs
Normal file
@@ -0,0 +1,185 @@
|
||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||
use std::net::Ipv4Addr;
|
||||
|
||||
use numa::buffer::BytePacketBuffer;
|
||||
use numa::cache::DnsCache;
|
||||
use numa::header::ResultCode;
|
||||
use numa::packet::DnsPacket;
|
||||
use numa::question::{DnsQuestion, QueryType};
|
||||
use numa::record::DnsRecord;
|
||||
|
||||
fn make_response(domain: &str) -> DnsPacket {
|
||||
let mut pkt = DnsPacket::new();
|
||||
pkt.header.id = 0x1234;
|
||||
pkt.header.response = true;
|
||||
pkt.header.recursion_desired = true;
|
||||
pkt.header.recursion_available = true;
|
||||
pkt.header.rescode = ResultCode::NOERROR;
|
||||
pkt.questions
|
||||
.push(DnsQuestion::new(domain.to_string(), QueryType::A));
|
||||
pkt.answers.push(DnsRecord::A {
|
||||
domain: domain.to_string(),
|
||||
addr: Ipv4Addr::new(93, 184, 216, 34),
|
||||
ttl: 300,
|
||||
});
|
||||
// Typical response includes authority + additional records
|
||||
pkt.authorities.push(DnsRecord::NS {
|
||||
domain: domain.to_string(),
|
||||
host: format!("ns1.{domain}"),
|
||||
ttl: 172800,
|
||||
});
|
||||
pkt.authorities.push(DnsRecord::NS {
|
||||
domain: domain.to_string(),
|
||||
host: format!("ns2.{domain}"),
|
||||
ttl: 172800,
|
||||
});
|
||||
pkt.resources.push(DnsRecord::A {
|
||||
domain: format!("ns1.{domain}"),
|
||||
addr: Ipv4Addr::new(198, 51, 100, 1),
|
||||
ttl: 172800,
|
||||
});
|
||||
pkt
|
||||
}
|
||||
|
||||
fn to_wire(pkt: &DnsPacket) -> Vec<u8> {
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
pkt.write(&mut buf).unwrap();
|
||||
buf.filled().to_vec()
|
||||
}
|
||||
|
||||
fn bench_buffer_parse(c: &mut Criterion) {
|
||||
let pkt = make_response("example.com");
|
||||
let wire = to_wire(&pkt);
|
||||
|
||||
c.bench_function("buffer_parse", |b| {
|
||||
b.iter(|| {
|
||||
let mut buf = BytePacketBuffer::from_bytes(black_box(&wire));
|
||||
DnsPacket::from_buffer(&mut buf).unwrap()
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_buffer_serialize(c: &mut Criterion) {
|
||||
let pkt = make_response("example.com");
|
||||
|
||||
c.bench_function("buffer_serialize", |b| {
|
||||
b.iter(|| {
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
black_box(&pkt).write(&mut buf).unwrap();
|
||||
black_box(buf.pos());
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_packet_clone(c: &mut Criterion) {
|
||||
let pkt = make_response("example.com");
|
||||
|
||||
c.bench_function("packet_clone", |b| b.iter(|| black_box(&pkt).clone()));
|
||||
}
|
||||
|
||||
fn bench_cache_lookup_hit(c: &mut Criterion) {
|
||||
let mut cache = DnsCache::new(10_000, 60, 86400);
|
||||
let pkt = make_response("example.com");
|
||||
cache.insert("example.com", QueryType::A, &pkt);
|
||||
|
||||
c.bench_function("cache_lookup_hit", |b| {
|
||||
b.iter(|| {
|
||||
cache
|
||||
.lookup(black_box("example.com"), QueryType::A)
|
||||
.unwrap()
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_cache_lookup_miss(c: &mut Criterion) {
|
||||
let cache = DnsCache::new(10_000, 60, 86400);
|
||||
|
||||
c.bench_function("cache_lookup_miss", |b| {
|
||||
b.iter(|| cache.lookup(black_box("nonexistent.com"), QueryType::A))
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_cache_insert(c: &mut Criterion) {
|
||||
let pkt = make_response("example.com");
|
||||
|
||||
c.bench_function("cache_insert", |b| {
|
||||
let mut cache = DnsCache::new(10_000, 60, 86400);
|
||||
let mut i = 0u64;
|
||||
b.iter(|| {
|
||||
let domain = format!("bench-{i}.example.com");
|
||||
cache.insert(&domain, QueryType::A, black_box(&pkt));
|
||||
i += 1;
|
||||
// Reset cache periodically to avoid filling up
|
||||
if i % 5000 == 0 {
|
||||
cache.clear();
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_round_trip(c: &mut Criterion) {
|
||||
// Simulates the cached hot path: parse query → cache hit → serialize response
|
||||
let query_pkt = {
|
||||
let mut q = DnsPacket::new();
|
||||
q.header.id = 0xABCD;
|
||||
q.header.recursion_desired = true;
|
||||
q.questions
|
||||
.push(DnsQuestion::new("example.com".to_string(), QueryType::A));
|
||||
q
|
||||
};
|
||||
let query_wire = to_wire(&query_pkt);
|
||||
|
||||
let response = make_response("example.com");
|
||||
let mut cache = DnsCache::new(10_000, 60, 86400);
|
||||
cache.insert("example.com", QueryType::A, &response);
|
||||
|
||||
c.bench_function("round_trip_cached", |b| {
|
||||
b.iter(|| {
|
||||
// 1. Parse incoming query
|
||||
let mut buf = BytePacketBuffer::from_bytes(black_box(&query_wire));
|
||||
let query = DnsPacket::from_buffer(&mut buf).unwrap();
|
||||
let qname = &query.questions[0].name;
|
||||
let qtype = query.questions[0].qtype;
|
||||
|
||||
// 2. Cache lookup
|
||||
let mut resp = cache.lookup(qname, qtype).unwrap();
|
||||
resp.header.id = query.header.id;
|
||||
|
||||
// 3. Serialize response
|
||||
let mut resp_buf = BytePacketBuffer::new();
|
||||
resp.write(&mut resp_buf).unwrap();
|
||||
black_box(resp_buf.pos());
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_cache_populated_lookup(c: &mut Criterion) {
|
||||
// Benchmark with a realistically populated cache (1000 entries)
|
||||
let mut cache = DnsCache::new(10_000, 60, 86400);
|
||||
for i in 0..1000 {
|
||||
let domain = format!("domain-{i}.example.com");
|
||||
let pkt = make_response(&domain);
|
||||
cache.insert(&domain, QueryType::A, &pkt);
|
||||
}
|
||||
|
||||
c.bench_function("cache_lookup_hit_populated", |b| {
|
||||
b.iter(|| {
|
||||
cache
|
||||
.lookup(black_box("domain-500.example.com"), QueryType::A)
|
||||
.unwrap()
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(
|
||||
benches,
|
||||
bench_buffer_parse,
|
||||
bench_buffer_serialize,
|
||||
bench_packet_clone,
|
||||
bench_cache_lookup_hit,
|
||||
bench_cache_lookup_miss,
|
||||
bench_cache_insert,
|
||||
bench_round_trip,
|
||||
bench_cache_populated_lookup,
|
||||
);
|
||||
criterion_main!(benches);
|
||||
94
benches/throughput.rs
Normal file
94
benches/throughput.rs
Normal file
@@ -0,0 +1,94 @@
|
||||
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
|
||||
use std::net::Ipv4Addr;
|
||||
|
||||
use numa::buffer::BytePacketBuffer;
|
||||
use numa::header::ResultCode;
|
||||
use numa::packet::DnsPacket;
|
||||
use numa::question::{DnsQuestion, QueryType};
|
||||
use numa::record::DnsRecord;
|
||||
|
||||
fn make_query_wire(domain: &str) -> Vec<u8> {
|
||||
let mut q = DnsPacket::new();
|
||||
q.header.id = 0xABCD;
|
||||
q.header.recursion_desired = true;
|
||||
q.questions
|
||||
.push(DnsQuestion::new(domain.to_string(), QueryType::A));
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
q.write(&mut buf).unwrap();
|
||||
buf.filled().to_vec()
|
||||
}
|
||||
|
||||
fn make_response(domain: &str) -> DnsPacket {
|
||||
let mut pkt = DnsPacket::new();
|
||||
pkt.header.id = 0xABCD;
|
||||
pkt.header.response = true;
|
||||
pkt.header.recursion_desired = true;
|
||||
pkt.header.recursion_available = true;
|
||||
pkt.header.rescode = ResultCode::NOERROR;
|
||||
pkt.questions
|
||||
.push(DnsQuestion::new(domain.to_string(), QueryType::A));
|
||||
pkt.answers.push(DnsRecord::A {
|
||||
domain: domain.to_string(),
|
||||
addr: Ipv4Addr::new(93, 184, 216, 34),
|
||||
ttl: 300,
|
||||
});
|
||||
pkt
|
||||
}
|
||||
|
||||
/// Simulates the complete cached query pipeline (sans network I/O):
|
||||
/// parse → cache lookup → TTL adjust → serialize response
|
||||
fn simulate_cached_pipeline(query_wire: &[u8], cache: &numa::cache::DnsCache) -> usize {
|
||||
let mut buf = BytePacketBuffer::from_bytes(query_wire);
|
||||
let query = DnsPacket::from_buffer(&mut buf).unwrap();
|
||||
let q = &query.questions[0];
|
||||
|
||||
let mut resp = cache.lookup(&q.name, q.qtype).unwrap();
|
||||
resp.header.id = query.header.id;
|
||||
|
||||
let mut resp_buf = BytePacketBuffer::new();
|
||||
resp.write(&mut resp_buf).unwrap();
|
||||
resp_buf.pos()
|
||||
}
|
||||
|
||||
fn bench_pipeline_throughput(c: &mut Criterion) {
|
||||
let domains: Vec<String> = (0..100)
|
||||
.map(|i| format!("domain-{i}.example.com"))
|
||||
.collect();
|
||||
|
||||
let mut cache = numa::cache::DnsCache::new(10_000, 60, 86400);
|
||||
for d in &domains {
|
||||
cache.insert(d, QueryType::A, &make_response(d));
|
||||
}
|
||||
|
||||
let query_wires: Vec<Vec<u8>> = domains.iter().map(|d| make_query_wire(d)).collect();
|
||||
|
||||
let mut group = c.benchmark_group("pipeline_throughput");
|
||||
|
||||
for count in [1, 10, 100] {
|
||||
group.throughput(Throughput::Elements(count));
|
||||
group.bench_with_input(BenchmarkId::from_parameter(count), &count, |b, &count| {
|
||||
let mut idx = 0usize;
|
||||
b.iter(|| {
|
||||
for _ in 0..count {
|
||||
let wire = &query_wires[idx % query_wires.len()];
|
||||
simulate_cached_pipeline(wire, &cache);
|
||||
idx += 1;
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
|
||||
/// Measures the overhead of BytePacketBuffer allocation + zero-init
|
||||
fn bench_buffer_alloc(c: &mut Criterion) {
|
||||
c.bench_function("buffer_alloc", |b| {
|
||||
b.iter(|| {
|
||||
let buf = BytePacketBuffer::new();
|
||||
criterion::black_box(buf.pos());
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_pipeline_throughput, bench_buffer_alloc,);
|
||||
criterion_main!(benches);
|
||||
327
blog/dns-from-scratch.md
Normal file
327
blog/dns-from-scratch.md
Normal file
@@ -0,0 +1,327 @@
|
||||
---
|
||||
title: I Built a DNS Resolver from Scratch in Rust
|
||||
description: How DNS actually works at the wire level — label compression, TTL tricks, DoH, and what surprised me building a resolver with zero DNS libraries.
|
||||
date: March 2026
|
||||
---
|
||||
|
||||
I wanted to understand how DNS actually works. Not the "it translates domain names to IP addresses" explanation — the actual bytes on the wire. What does a DNS packet look like? How does label compression work? Why is everything crammed into 512 bytes?
|
||||
|
||||
So I built one from scratch in Rust. No `hickory-dns`, no `trust-dns`, no `simple-dns`. The entire RFC 1035 wire protocol — headers, labels, compression pointers, record types — parsed and serialized by hand. It started as a weekend learning project, became a side project I kept coming back to over 6 years, and eventually turned into [Numa](https://github.com/razvandimescu/numa) — which I now use as my actual system DNS.
|
||||
|
||||
A note on terminology: Numa supports two resolution modes. *Forward* mode relays queries to an upstream (Quad9, Cloudflare, or any DoH provider). *Recursive* mode walks the delegation chain from root servers itself — iterative queries to root, TLD, and authoritative nameservers, with full DNSSEC validation. In both modes, Numa does useful things with your DNS traffic locally (caching, ad blocking, overrides, local service domains) before resolving what it can't answer. This post covers the wire protocol and forwarding path; [the next post](/blog/posts/dnssec-from-scratch.html) covers recursive resolution and DNSSEC.
|
||||
|
||||
Here's what surprised me along the way.
|
||||
|
||||
## What does a DNS packet actually look like?
|
||||
|
||||
You can see a real one yourself. Run this:
|
||||
|
||||
```bash
|
||||
dig @127.0.0.1 example.com A +noedns
|
||||
```
|
||||
|
||||
```
|
||||
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 15242
|
||||
;; flags: qr rd ra; QUERY: 1, ANSWER: 2, AUTHORITY: 0, ADDITIONAL: 0
|
||||
|
||||
;; QUESTION SECTION:
|
||||
;example.com. IN A
|
||||
|
||||
;; ANSWER SECTION:
|
||||
example.com. 53 IN A 104.18.27.120
|
||||
example.com. 53 IN A 104.18.26.120
|
||||
```
|
||||
|
||||
That's the human-readable version. But what's actually on the wire? A DNS query for `example.com A` is just 29 bytes:
|
||||
|
||||
```
|
||||
ID Flags QCount ACount NSCount ARCount
|
||||
┌────┐ ┌────┐ ┌────┐ ┌────┐ ┌────┐ ┌────┐
|
||||
Header: AB CD 01 00 00 01 00 00 00 00 00 00
|
||||
└────┘ └────┘ └────┘ └────┘ └────┘ └────┘
|
||||
↑ ↑ ↑
|
||||
│ │ └─ 1 question, 0 answers, 0 authority, 0 additional
|
||||
│ └─ Standard query, recursion desired
|
||||
└─ Random ID (we'll match this in the response)
|
||||
|
||||
Question: 07 65 78 61 6D 70 6C 65 03 63 6F 6D 00 00 01 00 01
|
||||
── ───────────────────── ── ───────── ── ───── ─────
|
||||
7 e x a m p l e 3 c o m end A IN
|
||||
↑ ↑ ↑
|
||||
└─ length prefix └─ length └─ root label (end of name)
|
||||
```
|
||||
|
||||
12 bytes of header + 17 bytes of question = 29 bytes to ask "what's the IP for example.com?" Compare that to an HTTP request for the same information — you'd need hundreds of bytes just for headers.
|
||||
|
||||
We can send exactly those bytes and capture what comes back:
|
||||
|
||||
```python
|
||||
python3 -c "
|
||||
import socket
|
||||
# Hand-craft a DNS query: header (12 bytes) + question (17 bytes)
|
||||
q = b'\xab\xcd\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00' # header
|
||||
q += b'\x07example\x03com\x00\x00\x01\x00\x01' # question
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
s.sendto(q, ('127.0.0.1', 53))
|
||||
resp = s.recv(512)
|
||||
for i in range(0, len(resp), 16):
|
||||
h = ' '.join(f'{b:02x}' for b in resp[i:i+16])
|
||||
a = ''.join(chr(b) if 32<=b<127 else '.' for b in resp[i:i+16])
|
||||
print(f'{i:08x} {h:<48s} {a}')
|
||||
"
|
||||
```
|
||||
|
||||
```
|
||||
00000000 ab cd 81 80 00 01 00 02 00 00 00 00 07 65 78 61 .............exa
|
||||
00000010 6d 70 6c 65 03 63 6f 6d 00 00 01 00 01 07 65 78 mple.com......ex
|
||||
00000020 61 6d 70 6c 65 03 63 6f 6d 00 00 01 00 01 00 00 ample.com.......
|
||||
00000030 00 19 00 04 68 12 1b 78 07 65 78 61 6d 70 6c 65 ....h..x.example
|
||||
00000040 03 63 6f 6d 00 00 01 00 01 00 00 00 19 00 04 68 .com...........h
|
||||
00000050 12 1a 78 ..x
|
||||
```
|
||||
|
||||
83 bytes back. Let's annotate the response:
|
||||
|
||||
```
|
||||
ID Flags QCount ACount NSCount ARCount
|
||||
┌────┐ ┌────┐ ┌────┐ ┌────┐ ┌────┐ ┌────┐
|
||||
Header: AB CD 81 80 00 01 00 02 00 00 00 00
|
||||
└────┘ └────┘ └────┘ └────┘ └────┘ └────┘
|
||||
↑ ↑ ↑ ↑
|
||||
│ │ │ └─ 2 answers
|
||||
│ │ └─ 1 question (echoed back)
|
||||
│ └─ Response flag set, recursion available
|
||||
└─ Same ID as our query
|
||||
|
||||
Question: 07 65 78 61 6D 70 6C 65 03 63 6F 6D 00 00 01 00 01
|
||||
(same as our query — echoed back)
|
||||
|
||||
Answer 1: 07 65 78 61 6D 70 6C 65 03 63 6F 6D 00 00 01 00 01
|
||||
───────────────────────────────────── ── ───── ─────
|
||||
e x a m p l e . c o m end A IN
|
||||
|
||||
00 00 00 19 00 04 68 12 1B 78
|
||||
─────────── ───── ───────────
|
||||
TTL: 25s len:4 104.18.27.120
|
||||
|
||||
Answer 2: (same domain repeated) 00 01 00 01 00 00 00 19 00 04 68 12 1A 78
|
||||
───────────
|
||||
104.18.26.120
|
||||
```
|
||||
|
||||
Notice something wasteful? The domain `example.com` appears *three times* — once in the question, twice in the answers. That's 39 bytes of repeated names in an 83-byte packet. DNS has a solution for this — but first, the overall structure.
|
||||
|
||||
The whole thing fits in a single UDP datagram. The structure is:
|
||||
|
||||
```
|
||||
+--+--+--+--+--+--+--+--+
|
||||
| Header | 12 bytes: ID, flags, counts
|
||||
+--+--+--+--+--+--+--+--+
|
||||
| Questions | What you're asking
|
||||
+--+--+--+--+--+--+--+--+
|
||||
| Answers | The response records
|
||||
+--+--+--+--+--+--+--+--+
|
||||
| Authorities | NS records for the zone
|
||||
+--+--+--+--+--+--+--+--+
|
||||
| Additional | Extra helpful records
|
||||
+--+--+--+--+--+--+--+--+
|
||||
```
|
||||
|
||||
In Rust, parsing the header is just reading 12 bytes and unpacking the flags:
|
||||
|
||||
```rust
|
||||
pub fn read(buffer: &mut BytePacketBuffer) -> Result<DnsHeader> {
|
||||
let id = buffer.read_u16()?;
|
||||
let flags = buffer.read_u16()?;
|
||||
// Flags pack 9 fields into 16 bits
|
||||
let recursion_desired = (flags & (1 << 8)) > 0;
|
||||
let truncated_message = (flags & (1 << 9)) > 0;
|
||||
let authoritative_answer = (flags & (1 << 10)) > 0;
|
||||
let opcode = (flags >> 11) & 0x0F;
|
||||
let response = (flags & (1 << 15)) > 0;
|
||||
// ... and so on
|
||||
}
|
||||
```
|
||||
|
||||
No padding, no alignment, no JSON overhead. DNS was designed in 1987 when every byte counted, and honestly? The wire format is kind of beautiful in its efficiency.
|
||||
|
||||
## Label compression is the clever part
|
||||
|
||||
Remember how `example.com` appeared three times in that 83-byte response? Domain names in DNS are stored as a sequence of **labels** — length-prefixed segments:
|
||||
|
||||
```
|
||||
example.com → [7]example[3]com[0]
|
||||
```
|
||||
|
||||
The `[7]` means "the next 7 bytes are a label." The `[0]` is the root label (end of name). That's 13 bytes per occurrence, 39 bytes for three repetitions. In a response with authority and additional records, domain names can account for half the packet.
|
||||
|
||||
DNS solves this with **compression pointers** — if the top two bits of a length byte are `11`, the remaining 14 bits are an offset back into the packet where the rest of the name can be found. A well-compressed version of our response would replace the answer names with `C0 0C` — a 2-byte pointer to offset 12 where `example.com` first appears in the question section. That turns 39 bytes of names into 15 (13 + 2 + 2). Our upstream didn't bother compressing, but many do — especially when related domains appear:
|
||||
|
||||
```
|
||||
Offset 0x20: [6]google[3]com[0] ← full name
|
||||
Offset 0x40: [4]mail[0xC0][0x20] ← "mail" + pointer to offset 0x20
|
||||
Offset 0x50: [3]www[0xC0][0x20] ← "www" + pointer to offset 0x20
|
||||
```
|
||||
|
||||
Pointers can chain — a pointer can point to another pointer. Parsing this correctly requires tracking your position in the buffer and handling jumps:
|
||||
|
||||
```rust
|
||||
pub fn read_qname(&mut self, outstr: &mut String) -> Result<()> {
|
||||
let mut pos = self.pos();
|
||||
let mut jumped = false;
|
||||
let mut delim = "";
|
||||
|
||||
loop {
|
||||
let len = self.get(pos)?;
|
||||
|
||||
// Top two bits set = compression pointer
|
||||
if (len & 0xC0) == 0xC0 {
|
||||
if !jumped {
|
||||
self.seek(pos + 2)?; // advance past the pointer
|
||||
}
|
||||
let offset = (((len as u16) ^ 0xC0) << 8) | self.get(pos + 1)? as u16;
|
||||
pos = offset as usize;
|
||||
jumped = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
pos += 1;
|
||||
if len == 0 { break; } // root label
|
||||
|
||||
outstr.push_str(delim);
|
||||
outstr.push_str(&self.get_range(pos, len as usize)?
|
||||
.iter().map(|&b| b as char).collect::<String>());
|
||||
delim = ".";
|
||||
pos += len as usize;
|
||||
}
|
||||
|
||||
if !jumped {
|
||||
self.seek(pos)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
This one bit me: when you follow a pointer, you must *not* advance the buffer's read position past where you jumped from. The pointer is 2 bytes, so you advance by 2, but the actual label data lives elsewhere in the packet. If you follow the pointer and also advance past it, you'll skip over the next record entirely. I spent a fun evening debugging that one.
|
||||
|
||||
## TTL adjustment on read, not write
|
||||
|
||||
This is my favorite trick in the whole codebase. I initially stored the remaining TTL and decremented it, which meant I needed a background thread to sweep expired entries. It worked, but it felt wrong — too much machinery for something simple.
|
||||
|
||||
The cleaner approach: store the original TTL and the timestamp when the record was cached. On read, compute `remaining = original_ttl - elapsed`. If it's zero or negative, the entry is stale — evict it lazily.
|
||||
|
||||
```rust
|
||||
pub fn lookup(&mut self, domain: &str, qtype: QueryType) -> Option<DnsPacket> {
|
||||
let key = (domain.to_lowercase(), qtype);
|
||||
let entry = self.entries.get(&key)?;
|
||||
let elapsed = entry.cached_at.elapsed().as_secs() as u32;
|
||||
|
||||
if elapsed >= entry.original_ttl {
|
||||
self.entries.remove(&key);
|
||||
return None;
|
||||
}
|
||||
|
||||
// Adjust TTLs in the response to reflect remaining time
|
||||
let mut packet = entry.packet.clone();
|
||||
for answer in &mut packet.answers {
|
||||
answer.set_ttl(entry.original_ttl.saturating_sub(elapsed));
|
||||
}
|
||||
Some(packet)
|
||||
}
|
||||
```
|
||||
|
||||
No background thread. No timer. Entries expire lazily. The cache stays consistent because every consumer sees the adjusted TTL.
|
||||
|
||||
## The resolution pipeline
|
||||
|
||||
Each incoming UDP packet spawns a tokio task. Each task walks a deterministic pipeline — every step either answers or passes to the next:
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────┐
|
||||
│ Numa Resolution Pipeline │
|
||||
└─────────────────────────────────────────────────────┘
|
||||
|
||||
Query ──→ Overrides ──→ .numa TLD ──→ Blocklist ──→ Zones ──→ Cache ──→ DoH
|
||||
│ │ │ │ │ │ │
|
||||
│ │ match? │ match? │ blocked? │ match? │ hit? │
|
||||
│ ↓ ↓ ↓ ↓ ↓ ↓
|
||||
│ respond respond 0.0.0.0 respond respond forward
|
||||
│ (auto-reverts (reverse (ad gone) (static (TTL to upstream
|
||||
│ after N min) proxy+TLS) records) adjusted) (encrypted)
|
||||
│
|
||||
└──→ Each step either answers or passes to the next.
|
||||
```
|
||||
|
||||
This is where "from scratch" pays off. Want conditional forwarding for Tailscale? Insert a step before the upstream. Want to override `api.example.com` for 5 minutes while debugging? Add an entry in the overrides step — it auto-expires. A DNS library would have hidden this pipeline behind an opaque `resolve()` call.
|
||||
|
||||
## DNS-over-HTTPS: the "wait, that's it?" moment
|
||||
|
||||
The most recent addition, and honestly the one that surprised me with how little code it needed. DoH (RFC 8484) is conceptually simple: take the exact same DNS wire-format packet you'd send over UDP, POST it to an HTTPS endpoint with `Content-Type: application/dns-message`, and parse the response the same way. Same bytes, different transport.
|
||||
|
||||
```rust
|
||||
async fn forward_doh(
|
||||
query: &DnsPacket,
|
||||
url: &str,
|
||||
client: &reqwest::Client,
|
||||
timeout_duration: Duration,
|
||||
) -> Result<DnsPacket> {
|
||||
let mut send_buffer = BytePacketBuffer::new();
|
||||
query.write(&mut send_buffer)?;
|
||||
|
||||
let resp = timeout(timeout_duration, client
|
||||
.post(url)
|
||||
.header("content-type", "application/dns-message")
|
||||
.header("accept", "application/dns-message")
|
||||
.body(send_buffer.filled().to_vec())
|
||||
.send())
|
||||
.await??.error_for_status()?;
|
||||
|
||||
let bytes = resp.bytes().await?;
|
||||
let mut recv_buffer = BytePacketBuffer::from_bytes(&bytes);
|
||||
DnsPacket::from_buffer(&mut recv_buffer)
|
||||
}
|
||||
```
|
||||
|
||||
The one gotcha that cost me an hour: Quad9 and other DoH providers require HTTP/2. My first attempt used HTTP/1.1 and got a cryptic 400 Bad Request. Adding the `http2` feature to reqwest fixed it. The upside of HTTP/2? Connection multiplexing means subsequent queries reuse the TLS session — ~16ms vs ~50ms for the first query. Free performance.
|
||||
|
||||
The `Upstream` enum dispatches between UDP and DoH based on the URL scheme:
|
||||
|
||||
```rust
|
||||
pub enum Upstream {
|
||||
Udp(SocketAddr),
|
||||
Doh { url: String, client: reqwest::Client },
|
||||
}
|
||||
```
|
||||
|
||||
If the configured address starts with `https://`, it's DoH. Otherwise, plain UDP. Simple, no toggles.
|
||||
|
||||
## "Why not just use dnsmasq + nginx + mkcert?"
|
||||
|
||||
You absolutely can — those are mature, battle-tested tools. The difference is integration: with dnsmasq + nginx + mkcert, you're configuring three tools with three config formats. Numa puts the DNS record, reverse proxy, and TLS cert behind one API call:
|
||||
|
||||
```bash
|
||||
curl -X POST localhost:5380/services -d '{"name":"frontend","target_port":5173}'
|
||||
```
|
||||
|
||||
That creates the DNS entry, generates a TLS certificate, and starts proxying — including WebSocket upgrade for Vite HMR. One command, no config files. Having full control over the resolution pipeline is what makes auto-revert overrides and LAN discovery possible.
|
||||
|
||||
## What I learned
|
||||
|
||||
**DNS is a 40-year-old protocol that works remarkably well.** The wire format is tight, the caching model is elegant, and the hierarchical delegation system has scaled to billions of queries per day. The things people complain about (DNSSEC complexity, lack of encryption) are extensions bolted on decades later, not flaws in the original design.
|
||||
|
||||
**The hard parts aren't where you'd expect.** Parsing the wire protocol was straightforward (RFC 1035 is well-written). The hard parts were: browsers rejecting wildcard certs under single-label TLDs, macOS resolver quirks (`scutil` vs `/etc/resolv.conf`), and getting multiple processes to bind the same multicast port (`SO_REUSEPORT` on macOS, `SO_REUSEADDR` on Linux).
|
||||
|
||||
**Learn the vocabulary before you show up.** I initially called Numa a "DNS resolver" and got corrected — it's a forwarding resolver. The distinction matters to people who work with DNS professionally, and being sloppy about it cost me credibility in my first community posts.
|
||||
|
||||
## What's next
|
||||
|
||||
**Update (March 2026):** Recursive resolution and DNSSEC validation are now shipped. Numa resolves from root nameservers with full chain-of-trust verification (RSA/SHA-256, ECDSA P-256, Ed25519) and NSEC/NSEC3 authenticated denial of existence.
|
||||
|
||||
**[Read the follow-up: Implementing DNSSEC from Scratch in Rust →](/blog/posts/dnssec-from-scratch.html)**
|
||||
|
||||
Still on the roadmap:
|
||||
|
||||
- **DoT (DNS-over-TLS)** — DoH was first because it passes through captive portals and corporate firewalls (port 443 vs 853). DoT has less framing overhead, so it's faster. Both will be available.
|
||||
- **[pkarr](https://github.com/pubky/pkarr) integration** — self-sovereign DNS via the Mainline BitTorrent DHT. Publish DNS records signed with your Ed25519 key, no registrar needed.
|
||||
|
||||
[github.com/razvandimescu/numa](https://github.com/razvandimescu/numa)
|
||||
189
blog/dnssec-from-scratch.md
Normal file
189
blog/dnssec-from-scratch.md
Normal file
@@ -0,0 +1,189 @@
|
||||
---
|
||||
title: Implementing DNSSEC from Scratch in Rust
|
||||
description: Recursive resolution from root hints, chain-of-trust validation, NSEC/NSEC3 denial proofs, and what I learned implementing DNSSEC with zero DNS libraries.
|
||||
date: March 2026
|
||||
---
|
||||
|
||||
In the [previous post](/blog/posts/dns-from-scratch.html) I covered how DNS works at the wire level — packet format, label compression, TTL caching, DoH. Numa was a forwarding resolver: it parsed packets, did useful things locally, and relayed the rest to Cloudflare or Quad9.
|
||||
|
||||
That post ended with "recursive resolution and DNSSEC are on the roadmap." This post is about building both.
|
||||
|
||||
The short version: Numa now resolves from root nameservers with iterative queries, validates the full DNSSEC chain of trust, and cryptographically proves that non-existent domains don't exist. No upstream dependency. No DNS libraries. Just `ring` for the crypto primitives and a lot of RFC reading.
|
||||
|
||||
## Why recursive?
|
||||
|
||||
A forwarding resolver trusts its upstream. When you ask Quad9 for `cloudflare.com`, you trust that Quad9 returns the real answer. If Quad9 lies, gets compromised, or is legally compelled to redirect you — you have no way to know.
|
||||
|
||||
A recursive resolver doesn't trust anyone. It starts at the root nameservers (operated by 12 independent organizations) and follows the delegation chain: root → `.com` TLD → `cloudflare.com` authoritative servers. Each server only answers for its own zone. No single entity sees your full query pattern.
|
||||
|
||||
DNSSEC adds cryptographic proof to each step. The root signs `.com`'s key. `.com` signs `cloudflare.com`'s key. `cloudflare.com` signs its own records. If any step is tampered with, the chain breaks and Numa rejects the response.
|
||||
|
||||
## The iterative resolution loop
|
||||
|
||||
Recursive resolution is a misnomer — the resolver actually uses *iterative* queries. It asks root "where is `cloudflare.com`?", root says "I don't know, but here are the `.com` nameservers." It asks `.com`, which says "here are cloudflare's nameservers." It asks those, and gets the answer.
|
||||
|
||||
```
|
||||
resolve("cloudflare.com", A)
|
||||
→ ask 198.41.0.4 (a.root-servers.net)
|
||||
← "try .com: ns1.gtld-servers.net (192.5.6.30)" [referral + glue]
|
||||
→ ask 192.5.6.30 (ns1.gtld-servers.net)
|
||||
← "try cloudflare: ns1.cloudflare.com (173.245.58.51)" [referral + glue]
|
||||
→ ask 173.245.58.51 (ns1.cloudflare.com)
|
||||
← "104.16.132.229" [answer]
|
||||
```
|
||||
|
||||
The implementation (`src/recursive.rs`) is a loop with three possible outcomes per query:
|
||||
|
||||
1. **Answer** — the server knows the record. Cache it, return it.
|
||||
2. **Referral** — the server delegates to another zone. Extract NS records and glue (A/AAAA records for the nameservers, included in the additional section to avoid a chicken-and-egg problem), then query the next server.
|
||||
3. **NXDOMAIN/REFUSED** — the name doesn't exist or the server refuses. Cache the negative result.
|
||||
|
||||
CNAME chasing adds complexity: if you ask for `www.cloudflare.com` and get a CNAME to `cloudflare.com`, you need to restart resolution for the new name. I cap this at 8 levels.
|
||||
|
||||
### TLD priming
|
||||
|
||||
Cold-cache resolution is slow. Every query needs root → TLD → authoritative, each with its own network round-trip. For the first query to `example.com`, that's three serial UDP round-trips before you get an answer.
|
||||
|
||||
TLD priming solves this. On startup, Numa queries root for NS records of 34 common TLDs (`.com`, `.org`, `.net`, `.io`, `.dev`, plus EU ccTLDs), caching NS records, glue addresses, DS records, and DNSKEY records. After priming, the first query to any `.com` domain skips root entirely — it already knows where `.com`'s nameservers are, and already has the DNSSEC keys needed to validate the response.
|
||||
|
||||
## DNSSEC chain of trust
|
||||
|
||||
DNSSEC doesn't encrypt DNS traffic. It *signs* it. Every DNS record can have an accompanying RRSIG (signature) record. The resolver verifies the signature against the zone's DNSKEY, then verifies that DNSKEY against the parent zone's DS (delegation signer) record, walking up until it reaches the root trust anchor — a hardcoded public key that IANA publishes and the entire internet agrees on.
|
||||
|
||||
<img src="../dnssec-chain.svg" alt="DNSSEC chain of trust diagram — verifying cloudflare.com from answer through .com TLD to root trust anchor">
|
||||
|
||||
### How keys get there
|
||||
|
||||
The domain owner generates the DNSKEY keypair — typically their DNS provider (Cloudflare, etc.) does this. The owner then submits the DS record (a hash of their DNSKEY) to their registrar (Namecheap, GoDaddy), who passes it to the registry (Verisign for `.com`). The registry signs it into the TLD zone, and IANA signs the TLD's DS into the root. Trust flows up; keys flow down.
|
||||
|
||||
The irony: you "own" your DNSSEC keys, but your registrar controls whether the DS record gets published. If they remove it — by mistake, by policy, or by court order — your DNSSEC chain breaks silently.
|
||||
|
||||
### The trust anchor
|
||||
|
||||
IANA's root KSK (Key Signing Key) has key tag 20326, algorithm 8 (RSA/SHA-256), and a 256-byte public key. It was last rolled in 2018. I hardcode it as a `const` array — this is the one thing in the entire system that requires out-of-band trust.
|
||||
|
||||
```rust
|
||||
const ROOT_KSK_PUBLIC_KEY: &[u8] = &[
|
||||
0x03, 0x01, 0x00, 0x01, 0xac, 0xff, 0xb4, 0x09,
|
||||
// ... 256 bytes total
|
||||
];
|
||||
```
|
||||
|
||||
When IANA rolls this key (rare — the previous key lasted from 2010 to 2018), every DNSSEC validator on the internet needs updating. For Numa, that means a binary update. Something to watch. Every DNSKEY also has a key tag — a 16-bit checksum over its RDATA. The first test I wrote: compute the root KSK's key tag and assert it equals 20326. Instant confidence that the encoding is correct.
|
||||
|
||||
## The crypto
|
||||
|
||||
Numa uses `ring` for all cryptographic operations. Three algorithms cover the vast majority of signed zones:
|
||||
|
||||
| Algorithm | ID | Usage | Verify time |
|
||||
|---|---|---|---|
|
||||
| RSA/SHA-256 | 8 | Root, most TLDs | 10.9 µs |
|
||||
| ECDSA P-256 | 13 | Cloudflare, many modern zones | 174 ns |
|
||||
| Ed25519 | 15 | Newer zones | ~200 ns |
|
||||
|
||||
### RSA key format conversion
|
||||
|
||||
DNS stores RSA public keys in RFC 3110 format (exponent length, exponent, modulus). `ring` expects PKCS#1 DER (ASN.1 encoded). Converting between them means writing a minimal ASN.1 encoder with leading-zero stripping and sign-bit padding. Getting this wrong produces keys that `ring` silently rejects — one of the harder bugs to track down.
|
||||
|
||||
### ECDSA is simpler
|
||||
|
||||
ECDSA P-256 keys in DNS are 64 bytes (x + y coordinates). `ring` expects uncompressed point format: `0x04` prefix + 64 bytes. One line:
|
||||
|
||||
```rust
|
||||
let mut uncompressed = Vec::with_capacity(65);
|
||||
uncompressed.push(0x04);
|
||||
uncompressed.extend_from_slice(public_key); // 64 bytes from DNS
|
||||
```
|
||||
|
||||
Signatures are also 64 bytes (r + s), used directly. No format conversion needed.
|
||||
|
||||
### Building the signed data
|
||||
|
||||
RRSIG verification doesn't sign the DNS packet — it signs a canonical form of the records. Building this correctly is the most detail-sensitive part of DNSSEC. The signed data is:
|
||||
|
||||
1. RRSIG RDATA fields (type covered, algorithm, labels, original TTL, expiration, inception, key tag, signer name) — *without* the signature itself
|
||||
2. For each record in the RRset: owner name (lowercased, uncompressed) + type + class + original TTL (from the RRSIG, not the record's current TTL) + RDATA length + canonical RDATA
|
||||
|
||||
The records must be sorted by their canonical wire-format representation. Owner names must be lowercased. The TTL must be the *original* TTL from the RRSIG, not the decremented TTL from caching.
|
||||
|
||||
Getting any of these details wrong — wrong TTL, wrong case, wrong sort order, wrong RDATA encoding — produces a valid-looking but incorrect signed data blob, and `ring` returns a signature mismatch with no diagnostic information. I spent more time debugging signed data construction than any other part of DNSSEC.
|
||||
|
||||
## Proving a name doesn't exist
|
||||
|
||||
Verifying that `cloudflare.com` has a valid A record is one thing. Proving that `doesnotexist.cloudflare.com` *doesn't* exist — cryptographically, in a way that can't be forged — is harder.
|
||||
|
||||
### NSEC
|
||||
|
||||
NSEC records form a chain. Each NSEC says "the next name in this zone after me is X, and at my name these record types exist." If you query `beta.example.com` and the zone has `alpha.example.com → NSEC → gamma.example.com`, the gap proves `beta` doesn't exist — there's nothing between `alpha` and `gamma`.
|
||||
|
||||
For NXDOMAIN proofs, RFC 4035 §5.4 requires two things:
|
||||
1. An NSEC record whose gap covers the queried name
|
||||
2. An NSEC record proving no wildcard exists at the closest encloser
|
||||
|
||||
The canonical DNS name ordering (RFC 4034 §6.1) compares labels right-to-left, case-insensitive. `a.example.com` < `b.example.com` because at the `example.com` level they're equal, then `a` < `b`. But `z.example.com` < `a.example.org` because `.com` < `.org` at the TLD level.
|
||||
|
||||
### NSEC3
|
||||
|
||||
NSEC3 solves NSEC's zone enumeration problem — with NSEC, you can walk the chain and discover every name in the zone. NSEC3 hashes the names first (iterated SHA-1 with a salt), so the NSEC3 chain reveals hashes, not names.
|
||||
|
||||
The proof is a 3-part closest encloser proof (RFC 5155 §8.4): find an ancestor whose hash matches an NSEC3 owner, prove the next-closer name falls within a hash range gap, and prove the wildcard at the closest encloser also falls within a gap. All three must hold, or the denial is rejected.
|
||||
|
||||
I cap NSEC3 iterations at 500 (RFC 9276 recommends 0). Higher iteration counts are a DoS vector — each verification requires `iterations + 1` SHA-1 hashes.
|
||||
|
||||
## Making it fast
|
||||
|
||||
Cold-cache DNSSEC validation initially required ~5 network fetches per query (DNSKEY for each zone in the chain, plus DS records). Three optimizations brought this down to ~1:
|
||||
|
||||
**TLD priming** (startup) — fetch root DNSKEY + each TLD's NS/DS/DNSKEY. After priming, the trust chain from root to any `.com` zone is fully cached.
|
||||
|
||||
**Referral DS piggybacking** — when a TLD server refers you to `cloudflare.com`'s nameservers, the authority section often includes DS records for the child zone. Cache them during resolution instead of fetching separately during validation.
|
||||
|
||||
**DNSKEY prefetch** — before the validation loop, scan all RRSIGs for signer zones and batch-fetch any missing DNSKEYs. This avoids serial DNSKEY fetches inside the per-RRset verification loop.
|
||||
|
||||
Result: a cold-cache query for `cloudflare.com` with full DNSSEC validation takes ~90ms. The TLD chain is already warm; only one DNSKEY fetch is needed (for `cloudflare.com` itself).
|
||||
|
||||
| Operation | Time |
|
||||
|---|---|
|
||||
| ECDSA P-256 verify | 174 ns |
|
||||
| Ed25519 verify | ~200 ns |
|
||||
| RSA/SHA-256 verify | 10.9 µs |
|
||||
| DS digest (SHA-256) | 257 ns |
|
||||
| Key tag computation | 20–63 ns |
|
||||
| Cold-cache validation (1 fetch) | ~90 ms |
|
||||
|
||||
The network fetch dominates. The crypto is noise.
|
||||
|
||||
## Surviving hostile networks
|
||||
|
||||
I deployed Numa as my system DNS and switched networks. Everything broke — every query SERVFAIL, 3-second timeout. The ISP blocks outbound UDP port 53 to everything except whitelisted public resolvers. Root servers, TLD servers, authoritative servers — all unreachable over UDP.
|
||||
|
||||
But TCP port 53 worked. Every DNS server is required to support TCP (RFC 1035 section 4.2.2). The ISP only filters UDP.
|
||||
|
||||
The fix has three parts:
|
||||
|
||||
**TCP fallback.** Every outbound query tries UDP first (800ms timeout). If UDP fails or the response is truncated, retry immediately over TCP. TCP uses a 2-byte length prefix before the DNS message — trivial to implement, and it handles DNSSEC responses that exceed the UDP payload limit.
|
||||
|
||||
**UDP auto-disable.** After 3 consecutive UDP failures, flip a global `AtomicBool` and skip UDP entirely — go TCP-first for all queries. The flag resets when the network changes (detected via LAN IP monitoring).
|
||||
|
||||
<img src="../hostile-network.svg" alt="Latency profile on a hostile network: queries 1-3 each spend 800ms waiting for a UDP timeout before retrying over TCP, taking 1,100ms total per query. After 3 consecutive failures the UDP auto-disable flag flips, and queries 4+ go TCP-first and complete in 300ms each — 3.7× faster.">
|
||||
|
||||
**Query minimization (RFC 7816).** When querying root servers, send only the TLD — `com` instead of `secret-project.example.com`. Root servers handle trillions of queries and are operated by 12 organizations. Minimization reduces what they learn from yours.
|
||||
|
||||
I wouldn't have found this without dogfooding. The code worked perfectly on my home network. It took a real hostile network to expose the assumption that UDP always works.
|
||||
|
||||
## What I learned
|
||||
|
||||
**DNSSEC is a verification system, not an encryption system.** It proves authenticity — this record was signed by the zone owner. It doesn't hide what you're querying. For privacy, you still need encrypted transport (DoH/DoT) or recursive resolution (no single upstream).
|
||||
|
||||
**The hardest bugs are in data serialization, not crypto.** `ring` either verifies or it doesn't — a binary answer. But getting the signed data blob exactly right (correct TTL, correct case, correct sort, correct RDATA encoding for each record type) requires extreme precision. A single wrong byte means verification fails with no hint about what's wrong.
|
||||
|
||||
**Negative proofs are harder than positive proofs.** Verifying a record exists: verify one RRSIG. Proving a record doesn't exist: find the right NSEC/NSEC3 records, verify their RRSIGs, check gap coverage, check wildcard denial, compute hashes. The NSEC3 closest encloser proof alone has three sub-proofs, each requiring hash computation and range checking.
|
||||
|
||||
**Performance optimization is about avoiding network, not avoiding CPU.** The crypto takes nanoseconds to microseconds. The network fetch takes tens of milliseconds. Every optimization that matters — TLD priming, DS piggybacking, DNSKEY prefetch — is about eliminating a round trip, not speeding up a hash.
|
||||
|
||||
## What's next
|
||||
|
||||
- **[pkarr](https://github.com/pubky/pkarr) integration** — self-sovereign DNS via the Mainline BitTorrent DHT. Your Ed25519 key is your domain. No registrar, no ICANN.
|
||||
- **DoT (DNS-over-TLS)** — the last encrypted transport we don't support
|
||||
|
||||
The code is at [github.com/razvandimescu/numa](https://github.com/razvandimescu/numa) — the DNSSEC validation is in [`src/dnssec.rs`](https://github.com/razvandimescu/numa/blob/main/src/dnssec.rs) and the recursive resolver in [`src/recursive.rs`](https://github.com/razvandimescu/numa/blob/main/src/recursive.rs). MIT license.
|
||||
167
blog/dot-from-scratch.md
Normal file
167
blog/dot-from-scratch.md
Normal file
@@ -0,0 +1,167 @@
|
||||
---
|
||||
title: DNS-over-TLS from Scratch in Rust
|
||||
description: Building RFC 7858 on top of rustls — length-prefix framing, ALPN cross-protocol defense, and two bugs that only the strict clients caught.
|
||||
date: April 2026
|
||||
---
|
||||
|
||||
The [previous post](/blog/posts/dnssec-from-scratch.html) ended with "DoT — the last encrypted transport we don't support." This post is about building it.
|
||||
|
||||
Numa now runs a DoT listener on port 853. My iPhone uses it as its system resolver, so ad blocking, DNSSEC validation, and recursive resolution follow my phone through the day. No cloud, no account, no companion app — a self-signed cert, a `.mobileconfig` profile, and a QR code in the terminal.
|
||||
|
||||
RFC 7858 is ten pages. The hard parts weren't in the RFC. They were in cross-protocol confusion defenses, a crypto-provider init gotcha that only triggered in one specific config combination, and a certificate SAN bug iOS was happy to accept and `kdig` immediately rejected. This post is about those parts.
|
||||
|
||||
## Why DoT when you already have DoH?
|
||||
|
||||
Numa has shipped DoH since v0.1. Both protocols tunnel DNS over TLS; DoH wraps queries in HTTP/2, DoT is DNS-over-TCP with TLS in front. Same privacy guarantees, different wrapper.
|
||||
|
||||
The answer to "why both" is that **phones ask for DoT by name.** iOS system DNS configures it with two fields (IP + server name) instead of a URL template. Android 9+ "Private DNS" speaks DoT natively. Linux stubs default to DoT. I wanted my phone on Numa without installing anything on the phone itself, and DoT is the protocol iOS and Android already speak for that.
|
||||
|
||||
## The wire format is refreshingly small
|
||||
|
||||
RFC 7858 is one sentence of wire protocol: *DNS-over-TCP (RFC 1035 §4.2.2) with TLS in front, on port 853.* DNS-over-TCP has existed since 1987 — a 2-byte length prefix followed by the DNS message. DoT is that, wrapped in a TLS session. The entire framing code is seven lines:
|
||||
|
||||
```rust
|
||||
async fn write_framed<S>(stream: &mut S, msg: &[u8]) -> io::Result<()>
|
||||
where S: AsyncWriteExt + Unpin {
|
||||
let mut out = Vec::with_capacity(2 + msg.len());
|
||||
out.extend_from_slice(&(msg.len() as u16).to_be_bytes());
|
||||
out.extend_from_slice(msg);
|
||||
stream.write_all(&out).await?;
|
||||
stream.flush().await
|
||||
}
|
||||
```
|
||||
|
||||
Reads are symmetric: `read_exact` two bytes, convert to `u16`, `read_exact` that many bytes. No HTTP headers, no chunked encoding, no framing layer.
|
||||
|
||||
## Persistent connections
|
||||
|
||||
A fresh TCP+TLS handshake is at least 3 RTTs — about 300ms on a 100ms connection, 60× the cost of a UDP query. RFC 7858 §3.4 says clients SHOULD reuse the TCP connection for multiple queries, and every real DoT client does: iOS, Android, systemd, stubby. A single connection often carries hundreds of queries.
|
||||
|
||||
<img src="../dot-handshake.svg" alt="Timing diagram comparing a DNS lookup over plain UDP (1 RTT), over DoT on a fresh connection (3 RTTs — TCP handshake, TLS 1.3 handshake, then the query), and over a reused DoT session (1 RTT, same as UDP).">
|
||||
|
||||
The amortization point is the whole game. If you only ever do one query per connection, DoT is roughly 3× slower than UDP and you should not use it. If you reuse the same TLS session for a browsing session's worth of queries, the handshake is paid once and every subsequent query is effectively free.
|
||||
|
||||
The server is a loop that reads a length-prefixed message, resolves it, writes the response framed the same way, waits for the next one. Three timeouts keep it honest:
|
||||
|
||||
- **Handshake timeout (10s)** — a slowloris that opens TCP but never sends a ClientHello can't pin a worker.
|
||||
- **Idle timeout (30s)** — a connected client with nothing to say gets dropped.
|
||||
- **Write timeout (10s)** — a stalled reader can't hold a response buffer indefinitely.
|
||||
|
||||
A semaphore caps concurrent connections at 512 so a burst of handshakes can't exhaust the tokio runtime.
|
||||
|
||||
## ALPN, the cross-protocol defense that matters
|
||||
|
||||
If DoT lives on port 853 and HTTPS on 443, what stops an HTTP/2 client from hitting 853 and getting confused replies? [Cross-protocol attacks](https://alpaca-attack.com/) exist and have had real CVEs. The defense is ALPN: during the TLS handshake the client advertises protocols, the server picks one it supports or fails. A DoT server advertises `"dot"`; a client offering only `"h2"` gets a `no_application_protocol` fatal alert before any frames are exchanged.
|
||||
|
||||
rustls enforces this by default when you set `alpn_protocols`:
|
||||
|
||||
```rust
|
||||
let mut config = ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(certs, key)?;
|
||||
config.alpn_protocols = vec![b"dot".to_vec()];
|
||||
```
|
||||
|
||||
"The library enforces it by default" has a latent risk: a future rustls upgrade could change the default, and the defense would quietly evaporate. I wrote a test that pins the behavior so any regression in a dependency update fails loudly:
|
||||
|
||||
```rust
|
||||
#[tokio::test]
|
||||
async fn dot_rejects_non_dot_alpn() {
|
||||
let (addr, cert_der) = spawn_dot_server().await;
|
||||
let client_config = dot_client(&cert_der, vec![b"h2".to_vec()]);
|
||||
let connector = tokio_rustls::TlsConnector::from(client_config);
|
||||
let tcp = tokio::net::TcpStream::connect(addr).await.unwrap();
|
||||
let result = connector
|
||||
.connect(ServerName::try_from("numa.numa").unwrap(), tcp)
|
||||
.await;
|
||||
assert!(result.is_err(),
|
||||
"DoT server must reject ALPN that doesn't include \"dot\"");
|
||||
}
|
||||
```
|
||||
|
||||
When you're leaning on a library's default for a security-critical invariant, the test is the contract.
|
||||
|
||||
## Two bugs that hid for days
|
||||
|
||||
Both were fixed before v0.10 shipped. Both stayed hidden because my initial tests used *permissive* clients.
|
||||
|
||||
### The rustls crypto provider panic
|
||||
|
||||
rustls 0.23 requires a `CryptoProvider` installed before you can build a `ServerConfig`. Numa's HTTPS proxy calls `install_default` as a side effect when it builds its own config, so DoT "just worked" for users who enabled both — the proxy had already initialized the provider before DoT's first handshake.
|
||||
|
||||
Then I added support for user-provided DoT certificates. Someone running DoT with their own Let's Encrypt cert, with the HTTPS proxy disabled, would hit:
|
||||
|
||||
```
|
||||
thread 'dot' panicked at rustls-0.23.25/src/crypto/mod.rs:185:14:
|
||||
no process-level CryptoProvider available -- call
|
||||
CryptoProvider::install_default() before this point
|
||||
```
|
||||
|
||||
The panic happened on the first client connection, not at startup. While writing the integration suite for "DoT with BYO cert, proxy disabled" — the one combination nobody had ever actually exercised — the first run panicked. Fix is two lines: call `install_default` inside `load_tls_config` so DoT can stand alone. If a side effect initializes something and you have a path that skips that side effect, you have a bug waiting for a specific deployment.
|
||||
|
||||
### The SAN bug iOS was happy to accept
|
||||
|
||||
Numa's self-signed DoT cert is generated on first run from a local CA alongside the data directory. It needs to match whatever `ServerName` the client sends as SNI. For the HTTPS proxy, that's the wildcard domain pattern `*.numa` (matching `frontend.numa`, `api.numa`, etc.). I initially reused the same SAN list for DoT: a wildcard `*.numa` and nothing else.
|
||||
|
||||
On an iPhone this worked perfectly. Full browsing session, persistent connections in the log, ad blocking active. I was about to merge when I ran one last smoke test with `kdig` (GnuTLS-backed, from [Knot DNS](https://www.knot-dns.cz/)):
|
||||
|
||||
```
|
||||
$ kdig @192.168.1.16 -p 853 +tls \
|
||||
+tls-ca=/usr/local/var/numa/ca.pem \
|
||||
+tls-hostname=numa.numa example.com A
|
||||
|
||||
;; TLS, handshake failed (Error in the certificate.)
|
||||
```
|
||||
|
||||
Huh.
|
||||
|
||||
[RFC 6125 §6.4.3](https://datatracker.ietf.org/doc/html/rfc6125#section-6.4.3): a wildcard in a certificate's DNS-ID matches exactly one label. `*.numa` matches `frontend.numa`, but not `numa.numa`, because the wildcard wants at least one label to substitute and strict clients reject wildcards in the leftmost label under single-label TLDs as ambiguous.
|
||||
|
||||
iOS's TLS stack is lenient and accepts it. GnuTLS, NSS (Firefox), and most non-Apple validators don't. The fix is five lines — add an explicit `numa.numa` SAN alongside the wildcard. But the lesson is the one that stuck: I wrote a commit message saying "fix an iOS bug" and had to rewrite it, because iOS was fine. The real bug was that every GnuTLS/NSS-based client on the planet would have rejected the cert, and I only found it by running one more test with a stricter tool.
|
||||
|
||||
> Test with the strict client. The permissive client hides your bugs.
|
||||
|
||||
## Getting your phone onto it
|
||||
|
||||
A DoT server is useless without a way to point a phone at it. iOS won't let you type an IP and a server name into Settings directly — you install a `.mobileconfig` profile that bundles the CA as a trust anchor and the DNS settings in a single payload.
|
||||
|
||||
Numa ships a subcommand that builds one on the fly and serves it over a QR code in the terminal:
|
||||
|
||||
```
|
||||
$ numa setup-phone
|
||||
|
||||
Numa Phone Setup
|
||||
|
||||
Profile URL: http://192.168.1.16:8765/mobileconfig
|
||||
|
||||
█▀▀▀▀▀▀▀█▀▀██ ██ ▀█▀▀▀▀▀▀▀█
|
||||
█ █▀▀▀█ █▀▄▀▀▀▀▄▄█ █▀▀▀█ █
|
||||
...
|
||||
|
||||
On your iPhone:
|
||||
1. Open Camera, point at the QR code, tap the yellow banner
|
||||
2. Allow the download when Safari asks
|
||||
3. Settings → "Profile Downloaded" → Install
|
||||
4. Settings → General → About → Certificate Trust Settings
|
||||
Toggle ON "Numa Local CA" — required for DoT to work
|
||||
```
|
||||
|
||||
Step 4 is non-negotiable. Even though the CA is bundled in the same profile that installs the DNS settings, iOS still requires the user to explicitly toggle trust in Certificate Trust Settings. It's a deliberate iOS policy to prevent profile-based trust injection — annoying, and correct.
|
||||
|
||||
I've been dogfooding this since v0.10 shipped in early April. The phone resolves through Numa over DoT whenever I'm home; persistent connections are visible in the log as a single source port living through dozens of queries. The one real caveat: if the laptop's LAN IP changes, the profile breaks. [RFC 9462 DDR](https://datatracker.ietf.org/doc/html/rfc9462) fixes that — Numa can respond to `_dns.resolver.arpa IN SVCB` with its current IP and iOS picks it up on each network join. Next piece of work.
|
||||
|
||||
## What I learned
|
||||
|
||||
**RFC-level small, API-level hard.** RFC 7858 is ten pages. The framing is trivial. But the subtle stuff — ALPN, timeouts, connection caps, handshake vs idle vs write deadlines, backoff on accept errors — isn't in the RFC. Miss any of it and you leak a DoS vector or a protocol confusion hole.
|
||||
|
||||
**Your test matrix is your security matrix.** Both bugs in this post were hidden by lenient clients. In both cases the strict client — kdig, or a specific config combination — surfaced the bug instantly. Pick test tools for strictness, not convenience. The moment you find yourself thinking "but iOS accepts it," stop and run kdig.
|
||||
|
||||
**Don't initialize global state via side effects.** "Module A installs a global, module B silently depends on it, disabling A breaks B" is a bug pattern that keeps coming back. Fix: have module B initialize its dependency explicitly, even if it means calling an idempotent `install_default` twice. The dependency graph should be local and obvious.
|
||||
|
||||
## What's next
|
||||
|
||||
- **DoH server** — Numa already has a DoH client; the other half unlocks Firefox's built-in DoH setting pointing at Numa.
|
||||
- **DoQ server (RFC 9250)** — DNS over QUIC. Android 14+ supports it natively.
|
||||
- **DDR (RFC 9462)** — auto-discovery via `_dns.resolver.arpa IN SVCB`, so phones pick up a moved Numa instance without the installed profile going stale.
|
||||
|
||||
The code is at [github.com/razvandimescu/numa](https://github.com/razvandimescu/numa) — the DoT listener is in [`src/dot.rs`](https://github.com/razvandimescu/numa/blob/main/src/dot.rs) and the phone onboarding flow is in [`src/setup_phone.rs`](https://github.com/razvandimescu/numa/blob/main/src/setup_phone.rs) and [`src/mobileconfig.rs`](https://github.com/razvandimescu/numa/blob/main/src/mobileconfig.rs). MIT license.
|
||||
@@ -6,7 +6,7 @@
|
||||
<string>com.numa.dns</string>
|
||||
<key>ProgramArguments</key>
|
||||
<array>
|
||||
<string>/usr/local/bin/numa</string>
|
||||
<string>{{exe_path}}</string>
|
||||
</array>
|
||||
<key>RunAtLoad</key>
|
||||
<true/>
|
||||
|
||||
60
deploy.sh
Executable file
60
deploy.sh
Executable file
@@ -0,0 +1,60 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
VERSION="${1:-}"
|
||||
|
||||
if [ -z "$VERSION" ]; then
|
||||
echo "Usage: ./deploy.sh v0.5.1"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Strip leading 'v' for Cargo.toml (accepts both "v0.5.1" and "0.5.1")
|
||||
SEMVER="${VERSION#v}"
|
||||
TAG="v${SEMVER}"
|
||||
|
||||
# Validate semver format
|
||||
if ! [[ "$SEMVER" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "Error: '$SEMVER' is not a valid semver (expected: X.Y.Z)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check we're on main
|
||||
BRANCH=$(git branch --show-current)
|
||||
if [ "$BRANCH" != "main" ]; then
|
||||
echo "Error: must be on main branch (currently on '$BRANCH')"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check working tree is clean
|
||||
if [ -n "$(git status --porcelain -- ':!deploy.sh' ':!Cargo.toml' ':!Cargo.lock')" ]; then
|
||||
echo "Error: working tree has uncommitted changes"
|
||||
git status --short
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check tag doesn't already exist
|
||||
if git rev-parse "$TAG" >/dev/null 2>&1; then
|
||||
echo "Error: tag '$TAG' already exists"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CURRENT=$(grep '^version = ' Cargo.toml | head -1 | sed 's/version = "\(.*\)"/\1/')
|
||||
echo "Bumping $CURRENT → $SEMVER"
|
||||
|
||||
# Update Cargo.toml version
|
||||
sed -i '' "s/^version = \"$CURRENT\"/version = \"$SEMVER\"/" Cargo.toml
|
||||
|
||||
# Update Cargo.lock
|
||||
cargo check --quiet 2>/dev/null
|
||||
|
||||
# Commit, tag, push
|
||||
git add Cargo.toml Cargo.lock
|
||||
git commit -m "bump version to $SEMVER"
|
||||
git tag "$TAG"
|
||||
git push
|
||||
git push origin "$TAG"
|
||||
|
||||
echo ""
|
||||
echo "✓ Tagged $TAG and pushed"
|
||||
echo " → GitHub Actions: release binaries + crates.io publish"
|
||||
echo " → Watch: gh run list --limit 1"
|
||||
10
install.sh
10
install.sh
@@ -70,8 +70,10 @@ echo ""
|
||||
echo " \033[38;2;107;124;78mInstalled:\033[0m $INSTALL_DIR/numa ($TAG)"
|
||||
echo ""
|
||||
echo " Get started:"
|
||||
echo " sudo numa # start the DNS server"
|
||||
echo " sudo numa install # set as system DNS"
|
||||
echo " sudo numa service start # run as persistent service"
|
||||
echo " open http://localhost:5380 # dashboard"
|
||||
echo " sudo numa install # install service + set as system DNS"
|
||||
echo " open http://localhost:5380 # dashboard"
|
||||
echo ""
|
||||
echo " Other commands:"
|
||||
echo " sudo numa # run in foreground (no service)"
|
||||
echo " sudo numa uninstall # restore original DNS"
|
||||
echo ""
|
||||
|
||||
@@ -5,7 +5,7 @@ Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/usr/local/bin/numa
|
||||
ExecStart={{exe_path}}
|
||||
Restart=always
|
||||
RestartSec=2
|
||||
StandardOutput=journal
|
||||
|
||||
87
numa.toml
87
numa.toml
@@ -1,12 +1,54 @@
|
||||
[server]
|
||||
bind_addr = "0.0.0.0:53"
|
||||
api_port = 5380
|
||||
# api_bind_addr = "127.0.0.1" # default; set to "0.0.0.0" for LAN dashboard access
|
||||
# data_dir = "/var/lib/numa" # where numa stores TLS CA and cert material
|
||||
# Defaults: /var/lib/numa on linux (FHS),
|
||||
# /usr/local/var/numa on macos (homebrew prefix),
|
||||
# %PROGRAMDATA%\numa on windows. Override for
|
||||
# containerized deploys or tests that can't
|
||||
# write to the system path.
|
||||
|
||||
# [upstream]
|
||||
# address = "" # auto-detect from system resolver (default)
|
||||
# address = "9.9.9.9" # or set explicitly
|
||||
# port = 53
|
||||
# mode = "forward" # "forward" (default) — relay to upstream
|
||||
# # "recursive" — resolve from root hints (no address needed)
|
||||
# address = "https://dns.quad9.net/dns-query" # DNS-over-HTTPS (encrypted)
|
||||
# address = "https://cloudflare-dns.com/dns-query" # Cloudflare DoH
|
||||
# address = "9.9.9.9" # plain UDP
|
||||
# port = 53 # only for forward mode, plain UDP
|
||||
# timeout_ms = 3000
|
||||
# root_hints = [ # only used in recursive mode
|
||||
# "198.41.0.4", # a.root-servers.net (Verisign)
|
||||
# "199.9.14.201", # b.root-servers.net (USC-ISI)
|
||||
# "192.33.4.12", # c.root-servers.net (Cogent)
|
||||
# "199.7.91.13", # d.root-servers.net (UMD)
|
||||
# "192.203.230.10", # e.root-servers.net (NASA)
|
||||
# "192.5.5.241", # f.root-servers.net (ISC)
|
||||
# "192.112.36.4", # g.root-servers.net (US DoD)
|
||||
# "198.97.190.53", # h.root-servers.net (US Army)
|
||||
# "192.36.148.17", # i.root-servers.net (Netnod)
|
||||
# "192.58.128.30", # j.root-servers.net (Verisign)
|
||||
# "193.0.14.129", # k.root-servers.net (RIPE NCC)
|
||||
# "199.7.83.42", # l.root-servers.net (ICANN)
|
||||
# "202.12.27.33", # m.root-servers.net (WIDE)
|
||||
# ]
|
||||
# prime_tlds = [ # TLDs to pre-warm on startup (recursive mode)
|
||||
# "com", "net", "org", "info", # gTLDs
|
||||
# "io", "dev", "app", "xyz", "me",
|
||||
# "eu", "uk", "de", "fr", "nl", # EU + European ccTLDs
|
||||
# "it", "es", "pl", "se", "no",
|
||||
# "dk", "fi", "at", "be", "ie",
|
||||
# "pt", "cz", "ro", "gr", "hu",
|
||||
# "bg", "hr", "sk", "si", "lt",
|
||||
# "lv", "ee", "ch", "is",
|
||||
# "co", "br", "au", "ca", "jp", # other major ccTLDs
|
||||
# ]
|
||||
|
||||
# [blocking]
|
||||
# enabled = true # set to false to disable ad blocking
|
||||
# refresh_hours = 24
|
||||
# lists = ["https://cdn.jsdelivr.net/gh/hagezi/dns-blocklists@latest/hosts/pro.txt"]
|
||||
# allowlist = ["example.com"] # domains to never block
|
||||
|
||||
[cache]
|
||||
max_entries = 10000
|
||||
@@ -18,6 +60,7 @@ enabled = true
|
||||
port = 80
|
||||
tls_port = 443
|
||||
tld = "numa"
|
||||
# bind_addr = "127.0.0.1" # default; set to "0.0.0.0" for LAN access to .numa services
|
||||
|
||||
# Pre-configured services (numa.numa is always added automatically)
|
||||
# [[services]]
|
||||
@@ -40,3 +83,41 @@ tld = "numa"
|
||||
# record_type = "A"
|
||||
# value = "127.0.0.1"
|
||||
# ttl = 60
|
||||
|
||||
# DNSSEC signature validation (requires mode = "recursive")
|
||||
# [dnssec]
|
||||
# enabled = false # opt-in: verify chain of trust from root KSK
|
||||
# strict = false # true = SERVFAIL on bogus signatures
|
||||
|
||||
# DNS-over-TLS listener (RFC 7858) — encrypted DNS on port 853
|
||||
# [dot]
|
||||
# enabled = false # opt-in: accept DoT queries
|
||||
# port = 853 # standard DoT port
|
||||
# bind_addr = "0.0.0.0" # IPv4 or IPv6; unspecified binds all interfaces
|
||||
# cert_path = "/etc/numa/dot.crt" # PEM cert; omit to use self-signed (proxy CA if available)
|
||||
# key_path = "/etc/numa/dot.key" # PEM private key; must be set together with cert_path
|
||||
|
||||
# LAN service discovery via mDNS (disabled by default — no network traffic unless enabled)
|
||||
# [lan]
|
||||
# enabled = true # discover other Numa instances via mDNS (_numa._tcp.local)
|
||||
# broadcast_interval_secs = 30
|
||||
# peer_timeout_secs = 90
|
||||
|
||||
# Mobile API — persistent HTTP listener serving read-only routes
|
||||
# (/health, /ca.pem, /mobileconfig, /ca.mobileconfig) on a LAN-reachable
|
||||
# port. Consumed by the iOS/Android companion apps for discovery and
|
||||
# profile fetching, and by `numa setup-phone` for QR-based onboarding.
|
||||
#
|
||||
# Opt-in because the listener binds to the LAN by default. None of the
|
||||
# exposed routes are cryptographically sensitive (no private keys, no
|
||||
# state mutations, all idempotent GETs), but enabling it does add a new
|
||||
# listener to any device on the LAN that scans port 8765.
|
||||
#
|
||||
# Safe for home LANs. Think twice before enabling on untrusted LANs
|
||||
# (office Wi-Fi, coffee shops, etc.) — an attacker on the same network
|
||||
# could run a competing Numa instance that shadows yours via mDNS and
|
||||
# trick companion apps into installing their profile instead of yours.
|
||||
[mobile]
|
||||
enabled = true # opt-in to the mobile API listener
|
||||
# port = 8765 # default; matches Discovery.swift defaultAPIPort
|
||||
# bind_addr = "0.0.0.0" # default; set to "127.0.0.1" for localhost-only
|
||||
|
||||
306
scripts/benchmark.sh
Executable file
306
scripts/benchmark.sh
Executable file
@@ -0,0 +1,306 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
API="${NUMA_API:-http://127.0.0.1:5380}"
|
||||
DNS="${NUMA_DNS:-127.0.0.1}"
|
||||
NUMA_BIN="${NUMA_BIN:-/usr/local/bin/numa}"
|
||||
LAUNCHD_PLIST="/Library/LaunchDaemons/com.numa.dns.plist"
|
||||
|
||||
DOMAINS=(
|
||||
paypal.com ebay.com zoom.us slack.com discord.com
|
||||
microsoft.com apple.com meta.com oracle.com ibm.com
|
||||
docker.com kubernetes.io prometheus.io grafana.com terraform.io
|
||||
python.org nodejs.org golang.org wikipedia.org reddit.com
|
||||
stackoverflow.com stripe.com linear.app nytimes.com bbc.co.uk
|
||||
rust-lang.org fastly.com hetzner.com uber.com airbnb.com
|
||||
notion.so figma.com netflix.com spotify.com dropbox.com
|
||||
gitlab.com twitch.tv shopify.com vercel.app mozilla.org
|
||||
)
|
||||
|
||||
stats() {
|
||||
curl -s "$API/query-log" | python3 -c "
|
||||
import sys, json
|
||||
|
||||
data = json.load(sys.stdin)
|
||||
rec = [q for q in data if q['path'] == 'RECURSIVE']
|
||||
if not rec:
|
||||
print('No recursive queries in log.')
|
||||
sys.exit()
|
||||
|
||||
vals = sorted([q['latency_ms'] for q in rec])
|
||||
n = len(vals)
|
||||
|
||||
print(f'Recursive queries: {n}')
|
||||
print(f' Avg: {sum(vals)/n:.1f}ms')
|
||||
print(f' Median: {vals[n//2]:.1f}ms')
|
||||
print(f' P95: {vals[int(n*0.95)]:.1f}ms')
|
||||
print(f' P99: {vals[int(n*0.99)]:.1f}ms')
|
||||
print(f' Min: {min(vals):.1f}ms')
|
||||
print(f' Max: {max(vals):.1f}ms')
|
||||
print(f' <100ms: {sum(1 for v in vals if v < 100)}')
|
||||
print(f' <200ms: {sum(1 for v in vals if v < 200)}')
|
||||
print(f' <500ms: {sum(1 for v in vals if v < 500)}')
|
||||
print(f' >1s: {sum(1 for v in vals if v >= 1000)}')
|
||||
print()
|
||||
print('Slowest 5:')
|
||||
for q in sorted(rec, key=lambda q: q['latency_ms'], reverse=True)[:5]:
|
||||
print(f' {q[\"latency_ms\"]:>8.1f}ms {q[\"query_type\"]:5s} {q[\"domain\"]:35s} {q[\"rescode\"]}')
|
||||
print()
|
||||
print('Fastest 5:')
|
||||
for q in sorted(rec, key=lambda q: q['latency_ms'])[:5]:
|
||||
print(f' {q[\"latency_ms\"]:>8.1f}ms {q[\"query_type\"]:5s} {q[\"domain\"]:35s} {q[\"rescode\"]}')
|
||||
"
|
||||
}
|
||||
|
||||
query_all() {
|
||||
local label="$1"
|
||||
echo "=== $label ==="
|
||||
for d in "${DOMAINS[@]}"; do
|
||||
printf " %-25s " "$d"
|
||||
dig "@$DNS" "$d" A +noall +stats 2>/dev/null | grep "Query time"
|
||||
done
|
||||
echo
|
||||
}
|
||||
|
||||
flush_cache() {
|
||||
curl -s -X DELETE "$API/cache" > /dev/null
|
||||
echo "Cache flushed ($(curl -s "$API/stats" | python3 -c "import sys,json; print(json.load(sys.stdin)['cache']['entries'])" 2>/dev/null || echo '?') entries)."
|
||||
}
|
||||
|
||||
wait_for_api() {
|
||||
local attempts=0
|
||||
while ! curl -sf "$API/health" > /dev/null 2>&1; do
|
||||
attempts=$((attempts + 1))
|
||||
if [ $attempts -ge 20 ]; then
|
||||
echo "ERROR: API not reachable at $API after 10s" >&2
|
||||
exit 1
|
||||
fi
|
||||
sleep 0.5
|
||||
done
|
||||
}
|
||||
|
||||
wait_for_priming() {
|
||||
echo -n "Waiting for TLD priming..."
|
||||
local prev=0
|
||||
local stable=0
|
||||
for _ in $(seq 1 60); do
|
||||
local entries
|
||||
entries=$(curl -s "$API/stats" | python3 -c "import sys,json; print(json.load(sys.stdin)['cache']['entries'])" 2>/dev/null || echo 0)
|
||||
if [ "$entries" -gt 0 ] && [ "$entries" = "$prev" ]; then
|
||||
stable=$((stable + 1))
|
||||
if [ $stable -ge 3 ]; then
|
||||
echo " done ($entries cache entries)."
|
||||
return
|
||||
fi
|
||||
else
|
||||
stable=0
|
||||
fi
|
||||
prev="$entries"
|
||||
sleep 1
|
||||
done
|
||||
echo " timeout (cache: $prev entries)."
|
||||
}
|
||||
|
||||
# restart_numa <config_toml_body>
|
||||
# Writes config to a temp file, stops numa (launchd or manual), starts with that config.
|
||||
restart_numa() {
|
||||
local config_body="$1"
|
||||
local tmpconf
|
||||
tmpconf=$(mktemp /tmp/numa-bench-XXXXXX)
|
||||
mv "$tmpconf" "${tmpconf}.toml"
|
||||
tmpconf="${tmpconf}.toml"
|
||||
echo "$config_body" > "$tmpconf"
|
||||
|
||||
# Stop launchd-managed numa if active
|
||||
if sudo launchctl list com.numa.dns &>/dev/null; then
|
||||
sudo launchctl unload "$LAUNCHD_PLIST" 2>/dev/null || true
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
# Kill any remaining
|
||||
sudo killall numa 2>/dev/null || true
|
||||
sleep 2
|
||||
|
||||
sudo "$NUMA_BIN" "$tmpconf" &
|
||||
wait_for_api
|
||||
wait_for_priming
|
||||
echo "numa ready (pid $(pgrep numa | head -1), config: $tmpconf)."
|
||||
}
|
||||
|
||||
# Restore the launchd service
|
||||
restore_launchd() {
|
||||
sudo killall numa 2>/dev/null || true
|
||||
sleep 1
|
||||
if [ -f "$LAUNCHD_PLIST" ]; then
|
||||
sudo launchctl load "$LAUNCHD_PLIST" 2>/dev/null || true
|
||||
echo "Restored launchd service."
|
||||
fi
|
||||
}
|
||||
|
||||
run_pass() {
|
||||
local label="$1"
|
||||
flush_cache
|
||||
sleep 0.5
|
||||
query_all "$label"
|
||||
echo "=== $label — stats ==="
|
||||
stats
|
||||
}
|
||||
|
||||
case "${1:-full}" in
|
||||
cold)
|
||||
echo "--- Cold cache benchmark ---"
|
||||
run_pass "Cold SRTT + Cold cache"
|
||||
;;
|
||||
warm)
|
||||
echo "--- Warm SRTT benchmark ---"
|
||||
echo "Priming SRTT..."
|
||||
for d in "${DOMAINS[@]}"; do dig "@$DNS" "$d" A +short > /dev/null 2>&1; done
|
||||
run_pass "Warm SRTT + Cold cache"
|
||||
;;
|
||||
stats)
|
||||
stats
|
||||
;;
|
||||
compare-srtt)
|
||||
echo "============================================"
|
||||
echo " A/B: SRTT OFF vs ON (dnssec off)"
|
||||
echo "============================================"
|
||||
echo
|
||||
|
||||
restart_numa "$(cat <<'TOML'
|
||||
[upstream]
|
||||
mode = "recursive"
|
||||
srtt = false
|
||||
TOML
|
||||
)"
|
||||
echo
|
||||
run_pass "SRTT OFF"
|
||||
|
||||
echo
|
||||
echo "--------------------------------------------"
|
||||
echo
|
||||
|
||||
restart_numa "$(cat <<'TOML'
|
||||
[upstream]
|
||||
mode = "recursive"
|
||||
srtt = true
|
||||
TOML
|
||||
)"
|
||||
echo
|
||||
run_pass "SRTT ON"
|
||||
|
||||
echo
|
||||
restore_launchd
|
||||
;;
|
||||
compare-dnssec)
|
||||
echo "============================================"
|
||||
echo " A/B: DNSSEC OFF vs ON (srtt on)"
|
||||
echo "============================================"
|
||||
echo
|
||||
|
||||
restart_numa "$(cat <<'TOML'
|
||||
[upstream]
|
||||
mode = "recursive"
|
||||
srtt = true
|
||||
|
||||
[dnssec]
|
||||
enabled = false
|
||||
TOML
|
||||
)"
|
||||
echo
|
||||
run_pass "DNSSEC OFF"
|
||||
|
||||
echo
|
||||
echo "--------------------------------------------"
|
||||
echo
|
||||
|
||||
restart_numa "$(cat <<'TOML'
|
||||
[upstream]
|
||||
mode = "recursive"
|
||||
srtt = true
|
||||
|
||||
[dnssec]
|
||||
enabled = true
|
||||
TOML
|
||||
)"
|
||||
echo
|
||||
run_pass "DNSSEC ON"
|
||||
|
||||
echo
|
||||
restore_launchd
|
||||
;;
|
||||
compare-all)
|
||||
echo "============================================"
|
||||
echo " Full A/B matrix"
|
||||
echo " 1. SRTT OFF + DNSSEC OFF (baseline)"
|
||||
echo " 2. SRTT ON + DNSSEC OFF"
|
||||
echo " 3. SRTT ON + DNSSEC ON"
|
||||
echo "============================================"
|
||||
echo
|
||||
|
||||
# --- 1. Baseline ---
|
||||
restart_numa "$(cat <<'TOML'
|
||||
[upstream]
|
||||
mode = "recursive"
|
||||
srtt = false
|
||||
|
||||
[dnssec]
|
||||
enabled = false
|
||||
TOML
|
||||
)"
|
||||
echo
|
||||
run_pass "SRTT OFF + DNSSEC OFF"
|
||||
|
||||
echo
|
||||
echo "--------------------------------------------"
|
||||
echo
|
||||
|
||||
# --- 2. SRTT only ---
|
||||
restart_numa "$(cat <<'TOML'
|
||||
[upstream]
|
||||
mode = "recursive"
|
||||
srtt = true
|
||||
|
||||
[dnssec]
|
||||
enabled = false
|
||||
TOML
|
||||
)"
|
||||
echo
|
||||
run_pass "SRTT ON + DNSSEC OFF"
|
||||
|
||||
echo
|
||||
echo "--------------------------------------------"
|
||||
echo
|
||||
|
||||
# --- 3. Both ---
|
||||
restart_numa "$(cat <<'TOML'
|
||||
[upstream]
|
||||
mode = "recursive"
|
||||
srtt = true
|
||||
|
||||
[dnssec]
|
||||
enabled = true
|
||||
TOML
|
||||
)"
|
||||
echo
|
||||
run_pass "SRTT ON + DNSSEC ON"
|
||||
|
||||
echo
|
||||
restore_launchd
|
||||
;;
|
||||
full|*)
|
||||
echo "--- Full benchmark (cold → warm → SRTT-only) ---"
|
||||
echo
|
||||
|
||||
wait_for_priming
|
||||
flush_cache
|
||||
sleep 0.5
|
||||
query_all "Pass 1: Cold SRTT + Cold cache"
|
||||
|
||||
flush_cache
|
||||
sleep 0.5
|
||||
query_all "Pass 2: Warm SRTT + Cold cache"
|
||||
|
||||
echo "=== Pass 2 stats (SRTT-warm) ==="
|
||||
stats
|
||||
;;
|
||||
esac
|
||||
@@ -8,8 +8,10 @@
|
||||
# 1. Opens the dashboard in Chrome --app mode (clean, no address bar)
|
||||
# 2. Generates DNS traffic (forward, cache hit, blocked)
|
||||
# 3. Types "peekm" / "6419" into the Local Services form on camera
|
||||
# 4. Opens peekm.numa to show the proxy working
|
||||
# 5. Records via ffmpeg and converts to optimized GIF
|
||||
# 4. Shows LAN accessibility badge ("local only" / "LAN")
|
||||
# 5. Checks a blocked domain
|
||||
# 6. Opens peekm.numa to show the proxy working
|
||||
# 7. Records via ffmpeg and converts to optimized GIF
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
@@ -228,18 +230,10 @@ dig @127.0.0.1 github.com +short > /dev/null 2>&1
|
||||
dig @127.0.0.1 ad.doubleclick.net +short > /dev/null 2>&1
|
||||
sleep 3
|
||||
|
||||
# --------------- Scene 2: Check Domain blocker (3-6s) ---------------
|
||||
log "Scene 2: Check Domain — blocked tracker..."
|
||||
type_into "#checkDomainInput" "ads.doubleclick.net" 0.04
|
||||
sleep 0.3
|
||||
# Click Check button
|
||||
run_js "document.querySelector('#checkDomainInput').closest('form').querySelector('.btn').click();"
|
||||
sleep 2
|
||||
# --------------- Scene 2: Add peekm service via UI (3-7s) ---------------
|
||||
log "Scene 2: Adding peekm.numa service..."
|
||||
|
||||
# --------------- Scene 3: Add peekm service via UI (6-10s) ---------------
|
||||
log "Scene 3: Adding peekm.numa service..."
|
||||
|
||||
# Scroll to Local Services form
|
||||
# Services panel is now first — scroll to it
|
||||
run_js "
|
||||
var svcPanel = document.getElementById('serviceForm');
|
||||
if (svcPanel) svcPanel.scrollIntoView({behavior: 'smooth', block: 'center'});
|
||||
@@ -251,20 +245,34 @@ sleep 0.2
|
||||
type_into "#svcPort" "6419" 0.1
|
||||
sleep 0.3
|
||||
|
||||
# Click "Add Service"
|
||||
# Click "Add Service" — LAN badge ("local only" or "LAN") will appear
|
||||
run_js "document.querySelector('#serviceForm .btn-add').click();"
|
||||
sleep 1.5
|
||||
sleep 2
|
||||
|
||||
# --------------- Scene 4: Open peekm.numa (10-14s) ---------------
|
||||
log "Scene 4: Opening peekm.numa in browser..."
|
||||
# --------------- Scene 3: Open peekm.numa (7-11s) ---------------
|
||||
log "Scene 3: Opening peekm.numa in browser..."
|
||||
open "http://peekm.numa/view/peekm/README.md" 2>/dev/null || true
|
||||
sleep 4
|
||||
|
||||
# --------------- Scene 5: Back to dashboard (14-17s) ---------------
|
||||
log "Scene 5: Back to dashboard — LOCAL queries visible..."
|
||||
# --------------- Scene 4: Back to dashboard (11-14s) ---------------
|
||||
log "Scene 4: Back to dashboard — LAN badges + LOCAL queries visible..."
|
||||
osascript -e "tell application \"System Events\" to set frontmost of (first process whose unix id is $CHROME_PID) to true" 2>/dev/null || true
|
||||
sleep 3
|
||||
|
||||
# --------------- Scene 5: Check Domain blocker (14-17s) ---------------
|
||||
log "Scene 5: Check Domain — blocked tracker..."
|
||||
# Scroll down to blocking panel
|
||||
run_js "
|
||||
var blockPanel = document.getElementById('blockingPanel');
|
||||
if (blockPanel) blockPanel.scrollIntoView({behavior: 'smooth', block: 'center'});
|
||||
"
|
||||
sleep 0.5
|
||||
type_into "#checkDomainInput" "ads.doubleclick.net" 0.04
|
||||
sleep 0.3
|
||||
# Click Check button
|
||||
run_js "document.querySelector('#checkDomainInput').closest('form').querySelector('.btn').click();"
|
||||
sleep 2
|
||||
|
||||
# --------------- Scene 6: Terminal-style dig overlay (17-20s) ---------------
|
||||
log "Scene 6: dig proof overlay..."
|
||||
DIG_RESULT=$(dig @127.0.0.1 peekm.numa +short 2>/dev/null | head -1)
|
||||
|
||||
43
scripts/release.sh
Executable file
43
scripts/release.sh
Executable file
@@ -0,0 +1,43 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
if [ $# -ne 1 ]; then
|
||||
echo "Usage: $0 <version> (e.g. 0.7.0)" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
VERSION="$1"
|
||||
TAG="v$VERSION"
|
||||
|
||||
# Sanity checks
|
||||
if ! git diff --quiet || ! git diff --cached --quiet; then
|
||||
echo "ERROR: working tree is dirty — commit or stash first" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$(git branch --show-current)" != "main" ]; then
|
||||
echo "ERROR: must be on main branch" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if git tag -l "$TAG" | grep -q .; then
|
||||
echo "ERROR: tag $TAG already exists" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CURRENT=$(grep '^version = ' Cargo.toml | head -1 | sed 's/version = "\(.*\)"/\1/')
|
||||
echo "Bumping $CURRENT -> $VERSION"
|
||||
|
||||
# Bump version
|
||||
sed -i.bak "s/^version = \"$CURRENT\"/version = \"$VERSION\"/" Cargo.toml
|
||||
rm -f Cargo.toml.bak
|
||||
cargo update --workspace
|
||||
|
||||
# Commit, tag, push
|
||||
git add Cargo.toml Cargo.lock
|
||||
git commit -m "chore: bump version to $VERSION"
|
||||
git tag "$TAG"
|
||||
git push origin main "$TAG"
|
||||
|
||||
echo
|
||||
echo "Released $TAG — GitHub Actions will build, publish to crates.io, and create the release."
|
||||
57
scripts/update-homebrew-formula.py
Executable file
57
scripts/update-homebrew-formula.py
Executable file
@@ -0,0 +1,57 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Rewrite a Homebrew formula in place: bump version, URL paths, and sha256 lines.
|
||||
|
||||
Reads the formula path from argv[1], and the following env vars:
|
||||
VERSION e.g. "0.10.0" (no leading v)
|
||||
SHA_MACOS_AARCH64
|
||||
SHA_MACOS_X86_64
|
||||
SHA_LINUX_AARCH64
|
||||
SHA_LINUX_X86_64
|
||||
|
||||
Assumptions about the formula:
|
||||
- Has `version "X.Y.Z"` somewhere
|
||||
- Has `url "...releases/download/vX.Y.Z/numa-<target>.tar.gz"` lines
|
||||
- May or may not already have `sha256 "..."` lines immediately after each url
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
formula_path = sys.argv[1]
|
||||
version = os.environ["VERSION"].lstrip("v")
|
||||
shas = {
|
||||
"macos-aarch64": os.environ["SHA_MACOS_AARCH64"],
|
||||
"macos-x86_64": os.environ["SHA_MACOS_X86_64"],
|
||||
"linux-aarch64": os.environ["SHA_LINUX_AARCH64"],
|
||||
"linux-x86_64": os.environ["SHA_LINUX_X86_64"],
|
||||
}
|
||||
|
||||
with open(formula_path) as f:
|
||||
content = f.read()
|
||||
|
||||
content = re.sub(r'version "[^"]*"', f'version "{version}"', content)
|
||||
content = re.sub(
|
||||
r"releases/download/v[\d.]+/numa-",
|
||||
f"releases/download/v{version}/numa-",
|
||||
content,
|
||||
)
|
||||
content = re.sub(r'\n[ \t]*sha256 "[^"]*"', "", content)
|
||||
|
||||
|
||||
def add_sha(match: re.Match) -> str:
|
||||
indent = match.group(1)
|
||||
target = match.group(2)
|
||||
if target not in shas:
|
||||
return match.group(0)
|
||||
return f'{match.group(0)}\n{indent}sha256 "{shas[target]}"'
|
||||
|
||||
|
||||
content = re.sub(
|
||||
r'^([ \t]+)url "[^"]*numa-([\w-]+)\.tar\.gz"',
|
||||
add_sha,
|
||||
content,
|
||||
flags=re.MULTILINE,
|
||||
)
|
||||
|
||||
with open(formula_path, "w") as f:
|
||||
f.write(content)
|
||||
1
site/CNAME
Normal file
1
site/CNAME
Normal file
@@ -0,0 +1 @@
|
||||
numa.rs
|
||||
304
site/blog-template.html
Normal file
304
site/blog-template.html
Normal file
@@ -0,0 +1,304 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>$title$ — Numa</title>
|
||||
<meta name="description" content="$description$">
|
||||
<link rel="stylesheet" href="/fonts/fonts.css">
|
||||
<style>
|
||||
*, *::before, *::after { margin: 0; padding: 0; box-sizing: border-box; }
|
||||
|
||||
:root {
|
||||
--bg-deep: #f5f0e8;
|
||||
--bg-surface: #ece5da;
|
||||
--bg-elevated: #e3dbce;
|
||||
--bg-card: #faf7f2;
|
||||
--amber: #c0623a;
|
||||
--amber-dim: #9e4e2d;
|
||||
--teal: #6b7c4e;
|
||||
--teal-dim: #566540;
|
||||
--violet: #64748b;
|
||||
--text-primary: #2c2418;
|
||||
--text-secondary: #6b5e4f;
|
||||
--text-dim: #a39888;
|
||||
--border: rgba(0, 0, 0, 0.08);
|
||||
--border-amber: rgba(192, 98, 58, 0.22);
|
||||
--font-display: 'Instrument Serif', Georgia, serif;
|
||||
--font-body: 'DM Sans', system-ui, sans-serif;
|
||||
--font-mono: 'JetBrains Mono', monospace;
|
||||
}
|
||||
|
||||
html { scroll-behavior: smooth; }
|
||||
|
||||
body {
|
||||
background: var(--bg-deep);
|
||||
color: var(--text-primary);
|
||||
font-family: var(--font-body);
|
||||
font-weight: 400;
|
||||
line-height: 1.7;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
}
|
||||
|
||||
body::before {
|
||||
content: '';
|
||||
position: fixed;
|
||||
inset: 0;
|
||||
background-image: url("data:image/svg+xml,%3Csvg viewBox='0 0 256 256' xmlns='http://www.w3.org/2000/svg'%3E%3Cfilter id='n'%3E%3CfeTurbulence type='fractalNoise' baseFrequency='0.9' numOctaves='4' stitchTiles='stitch'/%3E%3C/filter%3E%3Crect width='100%25' height='100%25' filter='url(%23n)' opacity='0.025'/%3E%3C/svg%3E");
|
||||
pointer-events: none;
|
||||
z-index: 9999;
|
||||
}
|
||||
|
||||
/* --- Blog nav --- */
|
||||
.blog-nav {
|
||||
padding: 1.5rem 2rem;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 1.5rem;
|
||||
}
|
||||
|
||||
.blog-nav a {
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.75rem;
|
||||
letter-spacing: 0.08em;
|
||||
text-transform: uppercase;
|
||||
color: var(--text-dim);
|
||||
text-decoration: none;
|
||||
transition: color 0.2s;
|
||||
}
|
||||
.blog-nav a:hover { color: var(--amber); }
|
||||
|
||||
.blog-nav .wordmark {
|
||||
font-family: var(--font-display);
|
||||
font-size: 1.4rem;
|
||||
font-weight: 400;
|
||||
color: var(--text-primary);
|
||||
text-decoration: none;
|
||||
text-transform: none;
|
||||
letter-spacing: -0.02em;
|
||||
}
|
||||
.blog-nav .wordmark:hover { color: var(--amber); }
|
||||
|
||||
.blog-nav .sep {
|
||||
color: var(--text-dim);
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.75rem;
|
||||
}
|
||||
|
||||
/* --- Article --- */
|
||||
.article {
|
||||
max-width: 720px;
|
||||
margin: 0 auto;
|
||||
padding: 3rem 2rem 6rem;
|
||||
}
|
||||
|
||||
.article-header {
|
||||
margin-bottom: 3rem;
|
||||
padding-bottom: 2rem;
|
||||
border-bottom: 1px solid var(--border);
|
||||
}
|
||||
|
||||
.article-header h1 {
|
||||
font-family: var(--font-display);
|
||||
font-weight: 400;
|
||||
font-size: clamp(2rem, 5vw, 3rem);
|
||||
line-height: 1.15;
|
||||
margin-bottom: 1rem;
|
||||
color: var(--text-primary);
|
||||
}
|
||||
|
||||
.article-meta {
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.75rem;
|
||||
color: var(--text-dim);
|
||||
letter-spacing: 0.04em;
|
||||
}
|
||||
|
||||
.article-meta a {
|
||||
color: var(--amber);
|
||||
text-decoration: none;
|
||||
}
|
||||
.article-meta a:hover { text-decoration: underline; }
|
||||
|
||||
/* --- Prose --- */
|
||||
.article h2 {
|
||||
font-family: var(--font-display);
|
||||
font-weight: 600;
|
||||
font-size: 1.8rem;
|
||||
line-height: 1.2;
|
||||
margin: 3rem 0 1rem;
|
||||
color: var(--text-primary);
|
||||
}
|
||||
|
||||
.article h3 {
|
||||
font-family: var(--font-body);
|
||||
font-weight: 600;
|
||||
font-size: 1.2rem;
|
||||
margin: 2rem 0 0.75rem;
|
||||
color: var(--text-primary);
|
||||
}
|
||||
|
||||
.article p {
|
||||
margin-bottom: 1.25rem;
|
||||
color: var(--text-secondary);
|
||||
font-size: 1.05rem;
|
||||
}
|
||||
|
||||
.article a {
|
||||
color: var(--amber);
|
||||
text-decoration: underline;
|
||||
text-decoration-color: rgba(192, 98, 58, 0.3);
|
||||
text-underline-offset: 2px;
|
||||
transition: text-decoration-color 0.2s;
|
||||
}
|
||||
.article a:hover {
|
||||
text-decoration-color: var(--amber);
|
||||
}
|
||||
|
||||
.article strong {
|
||||
color: var(--text-primary);
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.article ul, .article ol {
|
||||
margin-bottom: 1.25rem;
|
||||
padding-left: 1.5rem;
|
||||
color: var(--text-secondary);
|
||||
}
|
||||
|
||||
.article li {
|
||||
margin-bottom: 0.4rem;
|
||||
font-size: 1.05rem;
|
||||
}
|
||||
|
||||
.article blockquote {
|
||||
border-left: 3px solid var(--amber);
|
||||
padding: 0.75rem 1.25rem;
|
||||
margin: 1.5rem 0;
|
||||
background: rgba(192, 98, 58, 0.04);
|
||||
border-radius: 0 4px 4px 0;
|
||||
}
|
||||
|
||||
.article blockquote p {
|
||||
color: var(--text-secondary);
|
||||
font-style: italic;
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
/* --- Code --- */
|
||||
.article code {
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.88em;
|
||||
background: var(--bg-elevated);
|
||||
padding: 0.15em 0.4em;
|
||||
border-radius: 3px;
|
||||
color: var(--amber-dim);
|
||||
}
|
||||
|
||||
.article pre {
|
||||
background: var(--bg-card);
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 6px;
|
||||
padding: 1.25rem 1.5rem;
|
||||
margin: 1.5rem 0;
|
||||
overflow-x: auto;
|
||||
line-height: 1.55;
|
||||
}
|
||||
|
||||
.article pre code {
|
||||
background: none;
|
||||
padding: 0;
|
||||
border-radius: 0;
|
||||
color: var(--text-primary);
|
||||
font-size: 0.85rem;
|
||||
}
|
||||
|
||||
/* --- Images --- */
|
||||
.article img {
|
||||
max-width: 100%;
|
||||
border-radius: 6px;
|
||||
border: 1px solid var(--border);
|
||||
margin: 1.5rem 0;
|
||||
}
|
||||
|
||||
/* --- Tables --- */
|
||||
.article table {
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
margin: 1.5rem 0;
|
||||
font-size: 0.95rem;
|
||||
}
|
||||
|
||||
.article th {
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.75rem;
|
||||
letter-spacing: 0.06em;
|
||||
text-transform: uppercase;
|
||||
color: var(--text-dim);
|
||||
text-align: left;
|
||||
padding: 0.6rem 1rem;
|
||||
border-bottom: 2px solid var(--border);
|
||||
}
|
||||
|
||||
.article td {
|
||||
padding: 0.6rem 1rem;
|
||||
border-bottom: 1px solid var(--border);
|
||||
color: var(--text-secondary);
|
||||
}
|
||||
|
||||
/* --- Footer --- */
|
||||
.blog-footer {
|
||||
text-align: center;
|
||||
padding: 3rem 2rem;
|
||||
border-top: 1px solid var(--border);
|
||||
max-width: 720px;
|
||||
margin: 0 auto;
|
||||
}
|
||||
|
||||
.blog-footer a {
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.75rem;
|
||||
letter-spacing: 0.08em;
|
||||
text-transform: uppercase;
|
||||
color: var(--text-dim);
|
||||
text-decoration: none;
|
||||
margin: 0 1rem;
|
||||
}
|
||||
.blog-footer a:hover { color: var(--amber); }
|
||||
|
||||
/* --- Responsive --- */
|
||||
@media (max-width: 640px) {
|
||||
.article { padding: 2rem 1.25rem 4rem; }
|
||||
.article pre { padding: 1rem; margin-left: -0.5rem; margin-right: -0.5rem; border-radius: 0; border-left: none; border-right: none; }
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<nav class="blog-nav">
|
||||
<a href="/" class="wordmark">Numa</a>
|
||||
<span class="sep">/</span>
|
||||
<a href="/blog/">Blog</a>
|
||||
</nav>
|
||||
|
||||
<article class="article">
|
||||
<header class="article-header">
|
||||
<h1>$title$</h1>
|
||||
<div class="article-meta">
|
||||
$date$ · <a href="https://dimescu.ro">Razvan Dimescu</a>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
$body$
|
||||
</article>
|
||||
|
||||
<footer class="blog-footer">
|
||||
<a href="https://github.com/razvandimescu/numa">GitHub</a>
|
||||
<a href="/">Home</a>
|
||||
<a href="/blog/">Blog</a>
|
||||
</footer>
|
||||
|
||||
<script data-goatcounter="https://razvandimescu.goatcounter.com/count"
|
||||
async src="//gc.zgo.at/count.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
136
site/blog/dnssec-chain.svg
Normal file
136
site/blog/dnssec-chain.svg
Normal file
@@ -0,0 +1,136 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 720 680" font-family="'DM Sans', system-ui, sans-serif" font-size="13">
|
||||
<defs>
|
||||
<marker id="arr" viewBox="0 0 10 10" refX="10" refY="5" markerWidth="7" markerHeight="7" orient="auto-start-reverse">
|
||||
<path d="M 0 0 L 10 5 L 0 10 z" fill="#64748b"/>
|
||||
</marker>
|
||||
<marker id="arr-amber" viewBox="0 0 10 10" refX="10" refY="5" markerWidth="7" markerHeight="7" orient="auto-start-reverse">
|
||||
<path d="M 0 0 L 10 5 L 0 10 z" fill="#c0623a"/>
|
||||
</marker>
|
||||
<marker id="arr-teal" viewBox="0 0 10 10" refX="10" refY="5" markerWidth="7" markerHeight="7" orient="auto-start-reverse">
|
||||
<path d="M 0 0 L 10 5 L 0 10 z" fill="#6b7c4e"/>
|
||||
</marker>
|
||||
<filter id="s" x="-3%" y="-3%" width="106%" height="106%">
|
||||
<feDropShadow dx="0" dy="1" stdDeviation="2" flood-opacity="0.06"/>
|
||||
</filter>
|
||||
</defs>
|
||||
|
||||
<!-- Background -->
|
||||
<rect width="720" height="680" rx="8" fill="#faf7f2"/>
|
||||
|
||||
<!-- Title -->
|
||||
<text x="360" y="36" text-anchor="middle" font-size="15" font-weight="600" fill="#2c2418" font-family="'Instrument Serif', Georgia, serif" letter-spacing="-0.02em">DNSSEC Chain of Trust</text>
|
||||
<text x="360" y="54" text-anchor="middle" font-size="11" fill="#a39888">Verifying cloudflare.com — from answer to root trust anchor</text>
|
||||
|
||||
<!-- Legend -->
|
||||
<g transform="translate(28, 72)">
|
||||
<rect width="14" height="14" rx="3" fill="#c0623a" opacity="0.15" stroke="#c0623a" stroke-width="1"/>
|
||||
<text x="20" y="12" font-size="11" fill="#6b5e4f">Verify signature (RRSIG → DNSKEY)</text>
|
||||
<rect x="230" width="14" height="14" rx="3" fill="#6b7c4e" opacity="0.15" stroke="#6b7c4e" stroke-width="1"/>
|
||||
<text x="250" y="12" font-size="11" fill="#6b5e4f">Vouch for key (DS → parent DNSKEY)</text>
|
||||
<rect x="478" width="14" height="14" rx="3" fill="#2c2418" opacity="0.08" stroke="#2c2418" stroke-opacity="0.15" stroke-width="1"/>
|
||||
<text x="498" y="12" font-size="11" fill="#6b5e4f">DNS record / key</text>
|
||||
</g>
|
||||
|
||||
<!-- ═══ ZONE: cloudflare.com ═══ -->
|
||||
<rect x="40" y="104" width="640" height="152" rx="8" fill="none" stroke="rgba(0,0,0,0.06)" stroke-dasharray="4,3"/>
|
||||
<text x="56" y="122" font-size="10" font-weight="600" fill="#a39888" letter-spacing="0.08em" font-family="'JetBrains Mono', monospace">CLOUDFLARE.COM ZONE</text>
|
||||
|
||||
<!-- A record -->
|
||||
<rect x="80" y="138" width="320" height="38" rx="6" fill="white" stroke="rgba(0,0,0,0.08)" filter="url(#s)"/>
|
||||
<text x="96" y="157" font-size="12" font-weight="600" fill="#2c2418" font-family="'JetBrains Mono', monospace">cloudflare.com A 104.16.132.229</text>
|
||||
<text x="96" y="170" font-size="10" fill="#a39888">The answer we want to verify</text>
|
||||
|
||||
<!-- RRSIG -->
|
||||
<line x1="400" y1="157" x2="440" y2="157" stroke="#c0623a" stroke-width="1.5" marker-end="url(#arr-amber)"/>
|
||||
<text x="412" y="149" font-size="9" fill="#c0623a" font-weight="600">signed by</text>
|
||||
|
||||
<rect x="445" y="138" width="220" height="38" rx="6" fill="rgba(192,98,58,0.06)" stroke="rgba(192,98,58,0.2)" filter="url(#s)"/>
|
||||
<text x="461" y="155" font-size="11" font-weight="600" fill="#9e4e2d" font-family="'JetBrains Mono', monospace">RRSIG</text>
|
||||
<text x="505" y="155" font-size="11" fill="#6b5e4f">tag=34505, algo=13</text>
|
||||
<text x="461" y="170" font-size="10" fill="#a39888">signer: cloudflare.com</text>
|
||||
|
||||
<!-- DNSKEY -->
|
||||
<rect x="80" y="192" width="320" height="50" rx="6" fill="white" stroke="rgba(0,0,0,0.08)" filter="url(#s)"/>
|
||||
<text x="96" y="211" font-size="11" font-weight="600" fill="#2c2418" font-family="'JetBrains Mono', monospace">DNSKEY</text>
|
||||
<text x="156" y="211" font-size="11" fill="#6b5e4f">cloudflare.com, tag=34505</text>
|
||||
<text x="96" y="228" font-size="11" fill="#6b7c4e" font-weight="500">ECDSA P-256</text>
|
||||
<text x="194" y="228" font-size="10" fill="#a39888">— 174ns to verify</text>
|
||||
|
||||
<!-- RRSIG → DNSKEY arrow -->
|
||||
<path d="M 555 176 L 555 192 L 400 192 L 400 200" stroke="#c0623a" stroke-width="1.5" fill="none" marker-end="url(#arr-amber)"/>
|
||||
<text x="460" y="189" font-size="9" fill="#c0623a" font-weight="600">verified with</text>
|
||||
|
||||
<!-- ═══ ZONE: .com ═══ -->
|
||||
<rect x="40" y="270" width="640" height="132" rx="8" fill="none" stroke="rgba(0,0,0,0.06)" stroke-dasharray="4,3"/>
|
||||
<text x="56" y="288" font-size="10" font-weight="600" fill="#a39888" letter-spacing="0.08em" font-family="'JetBrains Mono', monospace">.COM TLD ZONE</text>
|
||||
|
||||
<!-- DS connecting zones -->
|
||||
<line x1="240" y1="242" x2="240" y2="302" stroke="#6b7c4e" stroke-width="1.5" marker-end="url(#arr-teal)"/>
|
||||
<text x="252" y="276" font-size="9" fill="#6b7c4e" font-weight="600">vouched for by</text>
|
||||
|
||||
<!-- DS record at .com -->
|
||||
<rect x="80" y="304" width="320" height="38" rx="6" fill="rgba(107,124,78,0.06)" stroke="rgba(107,124,78,0.2)" filter="url(#s)"/>
|
||||
<text x="96" y="321" font-size="11" font-weight="600" fill="#566540" font-family="'JetBrains Mono', monospace">DS</text>
|
||||
<text x="118" y="321" font-size="11" fill="#6b5e4f">tag=2371, digest=SHA-256</text>
|
||||
<text x="96" y="336" font-size="10" fill="#a39888">hash of cloudflare.com DNSKEY</text>
|
||||
|
||||
<!-- DS signed by RRSIG -->
|
||||
<line x1="400" y1="323" x2="440" y2="323" stroke="#c0623a" stroke-width="1.5" marker-end="url(#arr-amber)"/>
|
||||
<text x="412" y="315" font-size="9" fill="#c0623a" font-weight="600">signed by</text>
|
||||
|
||||
<rect x="445" y="304" width="220" height="38" rx="6" fill="rgba(192,98,58,0.06)" stroke="rgba(192,98,58,0.2)" filter="url(#s)"/>
|
||||
<text x="461" y="321" font-size="11" font-weight="600" fill="#9e4e2d" font-family="'JetBrains Mono', monospace">RRSIG</text>
|
||||
<text x="505" y="321" font-size="11" fill="#6b5e4f">tag=19718, signer=com</text>
|
||||
|
||||
<!-- .com DNSKEY -->
|
||||
<rect x="80" y="356" width="320" height="32" rx="6" fill="white" stroke="rgba(0,0,0,0.08)" filter="url(#s)"/>
|
||||
<text x="96" y="377" font-size="11" font-weight="600" fill="#2c2418" font-family="'JetBrains Mono', monospace">DNSKEY</text>
|
||||
<text x="156" y="377" font-size="11" fill="#6b5e4f">com, tag=19718</text>
|
||||
|
||||
<!-- RRSIG → .com DNSKEY -->
|
||||
<path d="M 555 342 L 555 356 L 400 356 L 400 366" stroke="#c0623a" stroke-width="1.5" fill="none" marker-end="url(#arr-amber)"/>
|
||||
<text x="460" y="353" font-size="9" fill="#c0623a" font-weight="600">verified with</text>
|
||||
|
||||
<!-- ═══ ZONE: root ═══ -->
|
||||
<rect x="40" y="404" width="640" height="132" rx="8" fill="none" stroke="rgba(0,0,0,0.06)" stroke-dasharray="4,3"/>
|
||||
<text x="56" y="422" font-size="10" font-weight="600" fill="#a39888" letter-spacing="0.08em" font-family="'JetBrains Mono', monospace">ROOT ZONE (.)</text>
|
||||
|
||||
<!-- DS connecting .com → root -->
|
||||
<line x1="240" y1="388" x2="240" y2="436" stroke="#6b7c4e" stroke-width="1.5" marker-end="url(#arr-teal)"/>
|
||||
<text x="252" y="416" font-size="9" fill="#6b7c4e" font-weight="600">vouched for by</text>
|
||||
|
||||
<!-- DS at root -->
|
||||
<rect x="80" y="438" width="320" height="38" rx="6" fill="rgba(107,124,78,0.06)" stroke="rgba(107,124,78,0.2)" filter="url(#s)"/>
|
||||
<text x="96" y="455" font-size="11" font-weight="600" fill="#566540" font-family="'JetBrains Mono', monospace">DS</text>
|
||||
<text x="118" y="455" font-size="11" fill="#6b5e4f">tag=30909, digest=SHA-256</text>
|
||||
<text x="96" y="470" font-size="10" fill="#a39888">hash of com DNSKEY</text>
|
||||
|
||||
<!-- DS signed by root RRSIG -->
|
||||
<line x1="400" y1="457" x2="440" y2="457" stroke="#c0623a" stroke-width="1.5" marker-end="url(#arr-amber)"/>
|
||||
<text x="412" y="449" font-size="9" fill="#c0623a" font-weight="600">signed by</text>
|
||||
|
||||
<rect x="445" y="438" width="220" height="38" rx="6" fill="rgba(192,98,58,0.06)" stroke="rgba(192,98,58,0.2)" filter="url(#s)"/>
|
||||
<text x="461" y="455" font-size="11" font-weight="600" fill="#9e4e2d" font-family="'JetBrains Mono', monospace">RRSIG</text>
|
||||
<text x="505" y="455" font-size="11" fill="#6b5e4f">signer=.</text>
|
||||
|
||||
<!-- Root DNSKEY -->
|
||||
<rect x="80" y="490" width="320" height="32" rx="6" fill="white" stroke="rgba(0,0,0,0.08)" filter="url(#s)"/>
|
||||
<text x="96" y="511" font-size="11" font-weight="600" fill="#2c2418" font-family="'JetBrains Mono', monospace">DNSKEY</text>
|
||||
<text x="156" y="511" font-size="11" fill="#6b5e4f">root (.), tag=20326, RSA/SHA-256</text>
|
||||
|
||||
<!-- RRSIG → root DNSKEY -->
|
||||
<path d="M 555 476 L 555 490 L 400 490 L 400 500" stroke="#c0623a" stroke-width="1.5" fill="none" marker-end="url(#arr-amber)"/>
|
||||
<text x="460" y="487" font-size="9" fill="#c0623a" font-weight="600">verified with</text>
|
||||
|
||||
<!-- ═══ TRUST ANCHOR ═══ -->
|
||||
<line x1="240" y1="522" x2="240" y2="558" stroke="#2c2418" stroke-width="2" stroke-dasharray="4,3"/>
|
||||
|
||||
<rect x="120" y="560" width="480" height="52" rx="8" fill="#2c2418" filter="url(#s)"/>
|
||||
<text x="360" y="582" text-anchor="middle" font-size="12" font-weight="600" fill="#faf7f2" font-family="'JetBrains Mono', monospace">ROOT TRUST ANCHOR</text>
|
||||
<text x="360" y="600" text-anchor="middle" font-size="11" fill="#a39888">IANA KSK, key_tag=20326 — hardcoded in Numa as const [u8; 256]</text>
|
||||
|
||||
<!-- Flow summary -->
|
||||
<text x="360" y="646" text-anchor="middle" font-size="12" fill="#6b5e4f" font-style="italic">Trust flows up (DS records). Keys flow down (DNSKEY → RRSIG).</text>
|
||||
<text x="360" y="664" text-anchor="middle" font-size="11" fill="#a39888">If any link breaks — wrong signature, missing DS, expired RRSIG — Numa rejects the response.</text>
|
||||
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 9.2 KiB |
129
site/blog/dot-handshake.svg
Normal file
129
site/blog/dot-handshake.svg
Normal file
@@ -0,0 +1,129 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 720 360" font-family="'DM Sans', system-ui, sans-serif" font-size="12">
|
||||
<defs>
|
||||
<marker id="arr-amber" viewBox="0 0 10 10" refX="9" refY="5" markerWidth="6" markerHeight="6" orient="auto">
|
||||
<path d="M 0 0 L 10 5 L 0 10 z" fill="#c0623a"/>
|
||||
</marker>
|
||||
<marker id="arr-dim" viewBox="0 0 10 10" refX="9" refY="5" markerWidth="6" markerHeight="6" orient="auto">
|
||||
<path d="M 0 0 L 10 5 L 0 10 z" fill="#a39888"/>
|
||||
</marker>
|
||||
<filter id="shadow" x="-3%" y="-3%" width="106%" height="106%">
|
||||
<feDropShadow dx="0" dy="1" stdDeviation="2" flood-opacity="0.06"/>
|
||||
</filter>
|
||||
</defs>
|
||||
|
||||
<!-- Background -->
|
||||
<rect width="720" height="360" rx="8" fill="#faf7f2"/>
|
||||
|
||||
<!-- Title -->
|
||||
<text x="360" y="32" text-anchor="middle" font-size="15" font-weight="600" fill="#2c2418" font-family="'Instrument Serif', Georgia, serif" letter-spacing="-0.02em">UDP vs DoT — one lookup, three scenarios</text>
|
||||
<text x="360" y="50" text-anchor="middle" font-size="11" fill="#a39888">Time flows downward. Amber = DNS work. Gray = TCP/TLS handshake overhead.</text>
|
||||
|
||||
<!-- ==================== Column 1: Plain UDP ==================== -->
|
||||
<g transform="translate(20, 0)">
|
||||
<!-- Column header -->
|
||||
<text x="90" y="84" text-anchor="middle" font-size="13" font-weight="600" fill="#2c2418">Plain UDP DNS</text>
|
||||
<text x="90" y="101" text-anchor="middle" font-size="10" fill="#a39888" letter-spacing="0.06em">PORT 53 · CLEARTEXT</text>
|
||||
|
||||
<!-- Lane labels -->
|
||||
<text x="25" y="128" font-size="10" fill="#6b5e4f">client</text>
|
||||
<text x="133" y="128" font-size="10" fill="#6b5e4f">server</text>
|
||||
|
||||
<!-- Lanes -->
|
||||
<line x1="35" y1="138" x2="35" y2="198" stroke="#d4cbba" stroke-width="1" stroke-dasharray="2 3"/>
|
||||
<line x1="145" y1="138" x2="145" y2="198" stroke="#d4cbba" stroke-width="1" stroke-dasharray="2 3"/>
|
||||
|
||||
<!-- query -->
|
||||
<line x1="37" y1="148" x2="143" y2="160" stroke="#c0623a" stroke-width="2" marker-end="url(#arr-amber)"/>
|
||||
<text x="90" y="143" text-anchor="middle" font-size="10" fill="#9e4e2d" font-weight="500">query</text>
|
||||
|
||||
<!-- response -->
|
||||
<line x1="143" y1="178" x2="37" y2="190" stroke="#c0623a" stroke-width="2" marker-end="url(#arr-amber)"/>
|
||||
<text x="90" y="205" text-anchor="middle" font-size="10" fill="#9e4e2d" font-weight="500">response</text>
|
||||
|
||||
<!-- Total cost badge -->
|
||||
<rect x="20" y="225" width="140" height="32" rx="4" fill="#faf7f2" stroke="#d4cbba" stroke-width="1" filter="url(#shadow)"/>
|
||||
<text x="90" y="241" text-anchor="middle" font-size="9" fill="#a39888" letter-spacing="0.04em">TOTAL LATENCY</text>
|
||||
<text x="90" y="253" text-anchor="middle" font-size="11" font-weight="600" fill="#c0623a" font-family="'JetBrains Mono', monospace">1 × RTT</text>
|
||||
</g>
|
||||
|
||||
<!-- ==================== Column 2: DoT cold ==================== -->
|
||||
<g transform="translate(270, 0)">
|
||||
<!-- Column header -->
|
||||
<text x="90" y="84" text-anchor="middle" font-size="13" font-weight="600" fill="#2c2418">DoT — first query</text>
|
||||
<text x="90" y="101" text-anchor="middle" font-size="10" fill="#a39888" letter-spacing="0.06em">PORT 853 · NEW CONNECTION</text>
|
||||
|
||||
<!-- Lane labels -->
|
||||
<text x="25" y="128" font-size="10" fill="#6b5e4f">client</text>
|
||||
<text x="133" y="128" font-size="10" fill="#6b5e4f">server</text>
|
||||
|
||||
<!-- Lanes -->
|
||||
<line x1="35" y1="138" x2="35" y2="308" stroke="#d4cbba" stroke-width="1" stroke-dasharray="2 3"/>
|
||||
<line x1="145" y1="138" x2="145" y2="308" stroke="#d4cbba" stroke-width="1" stroke-dasharray="2 3"/>
|
||||
|
||||
<!-- === RTT 1: TCP handshake === -->
|
||||
<!-- SYN -->
|
||||
<line x1="37" y1="145" x2="143" y2="153" stroke="#a39888" stroke-width="1.5" marker-end="url(#arr-dim)"/>
|
||||
<!-- SYN-ACK -->
|
||||
<line x1="143" y1="163" x2="37" y2="171" stroke="#a39888" stroke-width="1.5" marker-end="url(#arr-dim)"/>
|
||||
<!-- ACK -->
|
||||
<line x1="37" y1="181" x2="143" y2="189" stroke="#a39888" stroke-width="1.5" marker-end="url(#arr-dim)"/>
|
||||
<!-- Label + RTT marker -->
|
||||
<text x="168" y="170" font-size="9" fill="#a39888" font-family="'JetBrains Mono', monospace">1 rtt</text>
|
||||
<text x="90" y="143" text-anchor="middle" font-size="9" fill="#6b5e4f" font-style="italic">TCP handshake</text>
|
||||
|
||||
<!-- === RTT 2: TLS 1.3 handshake === -->
|
||||
<!-- ClientHello -->
|
||||
<line x1="37" y1="208" x2="143" y2="216" stroke="#a39888" stroke-width="1.5" marker-end="url(#arr-dim)"/>
|
||||
<!-- ServerHello + Cert + Finished -->
|
||||
<line x1="143" y1="226" x2="37" y2="234" stroke="#a39888" stroke-width="1.5" marker-end="url(#arr-dim)"/>
|
||||
<!-- Label + RTT marker -->
|
||||
<text x="168" y="222" font-size="9" fill="#a39888" font-family="'JetBrains Mono', monospace">2 rtt</text>
|
||||
<text x="90" y="205" text-anchor="middle" font-size="9" fill="#6b5e4f" font-style="italic">TLS 1.3 handshake</text>
|
||||
|
||||
<!-- === RTT 3: DNS exchange === -->
|
||||
<!-- query (piggybacked on ClientFinished) -->
|
||||
<line x1="37" y1="253" x2="143" y2="261" stroke="#c0623a" stroke-width="2" marker-end="url(#arr-amber)"/>
|
||||
<!-- response -->
|
||||
<line x1="143" y1="271" x2="37" y2="279" stroke="#c0623a" stroke-width="2" marker-end="url(#arr-amber)"/>
|
||||
<!-- Label + RTT marker -->
|
||||
<text x="168" y="267" font-size="9" fill="#a39888" font-family="'JetBrains Mono', monospace">3 rtt</text>
|
||||
<text x="90" y="250" text-anchor="middle" font-size="10" fill="#9e4e2d" font-weight="500">query + response</text>
|
||||
|
||||
<!-- Total cost badge -->
|
||||
<rect x="20" y="295" width="140" height="32" rx="4" fill="#faf7f2" stroke="#d4cbba" stroke-width="1" filter="url(#shadow)"/>
|
||||
<text x="90" y="311" text-anchor="middle" font-size="9" fill="#a39888" letter-spacing="0.04em">TOTAL LATENCY</text>
|
||||
<text x="90" y="323" text-anchor="middle" font-size="11" font-weight="600" fill="#c0623a" font-family="'JetBrains Mono', monospace">3 × RTT</text>
|
||||
</g>
|
||||
|
||||
<!-- ==================== Column 3: DoT reused ==================== -->
|
||||
<g transform="translate(520, 0)">
|
||||
<!-- Column header -->
|
||||
<text x="90" y="84" text-anchor="middle" font-size="13" font-weight="600" fill="#2c2418">DoT — reused session</text>
|
||||
<text x="90" y="101" text-anchor="middle" font-size="10" fill="#a39888" letter-spacing="0.06em">PORT 853 · PERSISTENT TCP/TLS</text>
|
||||
|
||||
<!-- Lane labels -->
|
||||
<text x="25" y="128" font-size="10" fill="#6b5e4f">client</text>
|
||||
<text x="133" y="128" font-size="10" fill="#6b5e4f">server</text>
|
||||
|
||||
<!-- Lanes -->
|
||||
<line x1="35" y1="138" x2="35" y2="198" stroke="#d4cbba" stroke-width="1" stroke-dasharray="2 3"/>
|
||||
<line x1="145" y1="138" x2="145" y2="198" stroke="#d4cbba" stroke-width="1" stroke-dasharray="2 3"/>
|
||||
|
||||
<!-- query -->
|
||||
<line x1="37" y1="148" x2="143" y2="160" stroke="#c0623a" stroke-width="2" marker-end="url(#arr-amber)"/>
|
||||
<text x="90" y="143" text-anchor="middle" font-size="10" fill="#9e4e2d" font-weight="500">query</text>
|
||||
|
||||
<!-- response -->
|
||||
<line x1="143" y1="178" x2="37" y2="190" stroke="#c0623a" stroke-width="2" marker-end="url(#arr-amber)"/>
|
||||
<text x="90" y="205" text-anchor="middle" font-size="10" fill="#9e4e2d" font-weight="500">response</text>
|
||||
|
||||
<!-- Total cost badge -->
|
||||
<rect x="20" y="225" width="140" height="32" rx="4" fill="#faf7f2" stroke="#d4cbba" stroke-width="1" filter="url(#shadow)"/>
|
||||
<text x="90" y="241" text-anchor="middle" font-size="9" fill="#a39888" letter-spacing="0.04em">TOTAL LATENCY</text>
|
||||
<text x="90" y="253" text-anchor="middle" font-size="11" font-weight="600" fill="#c0623a" font-family="'JetBrains Mono', monospace">1 × RTT</text>
|
||||
|
||||
<!-- Tiny caption -->
|
||||
<text x="90" y="280" text-anchor="middle" font-size="9" fill="#a39888" font-style="italic">(handshake amortized</text>
|
||||
<text x="90" y="292" text-anchor="middle" font-size="9" fill="#a39888" font-style="italic">across the session)</text>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 7.7 KiB |
92
site/blog/hostile-network.svg
Normal file
92
site/blog/hostile-network.svg
Normal file
@@ -0,0 +1,92 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 720 330" font-family="'DM Sans', system-ui, sans-serif" font-size="12">
|
||||
<defs>
|
||||
<filter id="shadow" x="-3%" y="-3%" width="106%" height="106%">
|
||||
<feDropShadow dx="0" dy="1" stdDeviation="2" flood-opacity="0.06"/>
|
||||
</filter>
|
||||
<!-- Diagonal hatch for "wasted" UDP timeout regions. Darker warm gray
|
||||
base + slightly darker diagonal stripes at 45°. The stripe pattern
|
||||
is the Gantt convention for "dead/blocked time" — it reads as
|
||||
"this time was thrown away" without needing the legend. -->
|
||||
<pattern id="wasted-hatch" patternUnits="userSpaceOnUse" width="7" height="7" patternTransform="rotate(-45)">
|
||||
<rect width="7" height="7" fill="#8b7f6f"/>
|
||||
<line x1="0" y1="0" x2="0" y2="7" stroke="#3d3427" stroke-width="1.6" opacity="0.38"/>
|
||||
</pattern>
|
||||
</defs>
|
||||
|
||||
<!-- Background -->
|
||||
<rect width="720" height="330" rx="8" fill="#faf7f2"/>
|
||||
|
||||
<!-- Title -->
|
||||
<text x="360" y="32" text-anchor="middle" font-size="15" font-weight="600" fill="#2c2418" font-family="'Instrument Serif', Georgia, serif" letter-spacing="-0.02em">TCP fallback with UDP auto-disable</text>
|
||||
<text x="360" y="50" text-anchor="middle" font-size="11" fill="#a39888">Latency profile on an ISP that blocks outbound UDP:53</text>
|
||||
|
||||
<!-- Legend -->
|
||||
<g transform="translate(160, 70)">
|
||||
<rect width="14" height="12" rx="2" fill="url(#wasted-hatch)"/>
|
||||
<text x="22" y="10" font-size="11" fill="#6b5e4f">UDP timeout — 800 ms wasted</text>
|
||||
<rect x="220" width="14" height="12" rx="2" fill="#c0623a"/>
|
||||
<text x="242" y="10" font-size="11" fill="#6b5e4f">TCP — successful exchange</text>
|
||||
</g>
|
||||
|
||||
<!-- Time axis -->
|
||||
<!-- bar area: x=90 to x=570 (480px), representing 0-1200ms, scale 0.4 px/ms -->
|
||||
<line x1="90" y1="108" x2="570" y2="108" stroke="#d4cbba" stroke-width="1"/>
|
||||
<!-- tick marks -->
|
||||
<line x1="90" y1="106" x2="90" y2="112" stroke="#a39888" stroke-width="1"/>
|
||||
<line x1="210" y1="106" x2="210" y2="112" stroke="#a39888" stroke-width="1"/>
|
||||
<line x1="330" y1="106" x2="330" y2="112" stroke="#a39888" stroke-width="1"/>
|
||||
<line x1="410" y1="106" x2="410" y2="112" stroke="#a39888" stroke-width="1"/>
|
||||
<line x1="530" y1="106" x2="530" y2="112" stroke="#a39888" stroke-width="1"/>
|
||||
<!-- tick labels -->
|
||||
<text x="90" y="102" text-anchor="middle" font-size="9" fill="#a39888" font-family="'JetBrains Mono', monospace">0</text>
|
||||
<text x="210" y="102" text-anchor="middle" font-size="9" fill="#a39888" font-family="'JetBrains Mono', monospace">300</text>
|
||||
<text x="330" y="102" text-anchor="middle" font-size="9" fill="#a39888" font-family="'JetBrains Mono', monospace">600</text>
|
||||
<text x="410" y="102" text-anchor="middle" font-size="9" fill="#a39888" font-family="'JetBrains Mono', monospace">800</text>
|
||||
<text x="530" y="102" text-anchor="middle" font-size="9" fill="#a39888" font-family="'JetBrains Mono', monospace">1100 ms</text>
|
||||
|
||||
<!-- ============ Phase 1: UDP-first (wasted 800ms per query) ============ -->
|
||||
|
||||
<!-- Query 1 -->
|
||||
<text x="82" y="135" text-anchor="end" font-size="11" fill="#6b5e4f">query 1</text>
|
||||
<rect x="90" y="125" width="320" height="16" rx="2" fill="url(#wasted-hatch)"/>
|
||||
<rect x="410" y="125" width="120" height="16" rx="2" fill="#c0623a"/>
|
||||
<text x="540" y="137" font-size="10" fill="#6b5e4f" font-family="'JetBrains Mono', monospace">1,100 ms</text>
|
||||
|
||||
<!-- Query 2 -->
|
||||
<text x="82" y="159" text-anchor="end" font-size="11" fill="#6b5e4f">query 2</text>
|
||||
<rect x="90" y="149" width="320" height="16" rx="2" fill="url(#wasted-hatch)"/>
|
||||
<rect x="410" y="149" width="120" height="16" rx="2" fill="#c0623a"/>
|
||||
<text x="540" y="161" font-size="10" fill="#6b5e4f" font-family="'JetBrains Mono', monospace">1,100 ms</text>
|
||||
|
||||
<!-- Query 3 -->
|
||||
<text x="82" y="183" text-anchor="end" font-size="11" fill="#6b5e4f">query 3</text>
|
||||
<rect x="90" y="173" width="320" height="16" rx="2" fill="url(#wasted-hatch)"/>
|
||||
<rect x="410" y="173" width="120" height="16" rx="2" fill="#c0623a"/>
|
||||
<text x="540" y="185" font-size="10" fill="#6b5e4f" font-family="'JetBrains Mono', monospace">1,100 ms</text>
|
||||
|
||||
<!-- State-change divider -->
|
||||
<line x1="90" y1="206" x2="570" y2="206" stroke="#6b7c4e" stroke-width="1" stroke-dasharray="4 3"/>
|
||||
<rect x="200" y="198" width="260" height="18" rx="9" fill="#faf7f2" stroke="#6b7c4e" stroke-width="1" filter="url(#shadow)"/>
|
||||
<text x="330" y="210" text-anchor="middle" font-size="10" fill="#566540" font-weight="500">3 consecutive failures → UDP auto-disabled</text>
|
||||
|
||||
<!-- ============ Phase 2: TCP-first (UDP skipped) ============ -->
|
||||
|
||||
<!-- Query 4 -->
|
||||
<text x="82" y="235" text-anchor="end" font-size="11" fill="#6b5e4f">query 4</text>
|
||||
<rect x="90" y="225" width="120" height="16" rx="2" fill="#c0623a"/>
|
||||
<text x="220" y="237" font-size="10" fill="#6b5e4f" font-family="'JetBrains Mono', monospace">300 ms</text>
|
||||
|
||||
<!-- Query 5 -->
|
||||
<text x="82" y="259" text-anchor="end" font-size="11" fill="#6b5e4f">query 5</text>
|
||||
<rect x="90" y="249" width="120" height="16" rx="2" fill="#c0623a"/>
|
||||
<text x="220" y="261" font-size="10" fill="#6b5e4f" font-family="'JetBrains Mono', monospace">300 ms</text>
|
||||
|
||||
<!-- Speedup callout -->
|
||||
<g transform="translate(300, 246)">
|
||||
<line x1="0" y1="-10" x2="0" y2="22" stroke="#6b7c4e" stroke-width="1" stroke-dasharray="2 2"/>
|
||||
<text x="10" y="6" font-size="10" fill="#566540" font-style="italic">3.7× faster — no more UDP wait</text>
|
||||
</g>
|
||||
|
||||
<!-- Footer caption -->
|
||||
<text x="360" y="298" text-anchor="middle" font-size="10" fill="#a39888" font-style="italic">The flag resets on network change (LAN IP delta). Switch back to a clean network and UDP is tried again.</text>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 5.6 KiB |
203
site/blog/index.html
Normal file
203
site/blog/index.html
Normal file
@@ -0,0 +1,203 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Blog — Numa</title>
|
||||
<meta name="description" content="Technical writing about DNS, Rust, and building infrastructure from scratch.">
|
||||
<link rel="stylesheet" href="/fonts/fonts.css">
|
||||
<style>
|
||||
*, *::before, *::after { margin: 0; padding: 0; box-sizing: border-box; }
|
||||
|
||||
:root {
|
||||
--bg-deep: #f5f0e8;
|
||||
--bg-surface: #ece5da;
|
||||
--bg-card: #faf7f2;
|
||||
--amber: #c0623a;
|
||||
--amber-dim: #9e4e2d;
|
||||
--teal: #6b7c4e;
|
||||
--text-primary: #2c2418;
|
||||
--text-secondary: #6b5e4f;
|
||||
--text-dim: #a39888;
|
||||
--border: rgba(0, 0, 0, 0.08);
|
||||
--font-display: 'Instrument Serif', Georgia, serif;
|
||||
--font-body: 'DM Sans', system-ui, sans-serif;
|
||||
--font-mono: 'JetBrains Mono', monospace;
|
||||
}
|
||||
|
||||
body {
|
||||
background: var(--bg-deep);
|
||||
color: var(--text-primary);
|
||||
font-family: var(--font-body);
|
||||
font-weight: 400;
|
||||
line-height: 1.7;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
}
|
||||
|
||||
body::before {
|
||||
content: '';
|
||||
position: fixed;
|
||||
inset: 0;
|
||||
background-image: url("data:image/svg+xml,%3Csvg viewBox='0 0 256 256' xmlns='http://www.w3.org/2000/svg'%3E%3Cfilter id='n'%3E%3CfeTurbulence type='fractalNoise' baseFrequency='0.9' numOctaves='4' stitchTiles='stitch'/%3E%3C/filter%3E%3Crect width='100%25' height='100%25' filter='url(%23n)' opacity='0.025'/%3E%3C/svg%3E");
|
||||
pointer-events: none;
|
||||
z-index: 9999;
|
||||
}
|
||||
|
||||
.blog-nav {
|
||||
padding: 1.5rem 2rem;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 1.5rem;
|
||||
}
|
||||
|
||||
.blog-nav a {
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.75rem;
|
||||
letter-spacing: 0.08em;
|
||||
text-transform: uppercase;
|
||||
color: var(--text-dim);
|
||||
text-decoration: none;
|
||||
transition: color 0.2s;
|
||||
}
|
||||
.blog-nav a:hover { color: var(--amber); }
|
||||
|
||||
.blog-nav .wordmark {
|
||||
font-family: var(--font-display);
|
||||
font-size: 1.4rem;
|
||||
font-weight: 400;
|
||||
color: var(--text-primary);
|
||||
text-decoration: none;
|
||||
text-transform: none;
|
||||
letter-spacing: -0.02em;
|
||||
}
|
||||
.blog-nav .wordmark:hover { color: var(--amber); }
|
||||
|
||||
.blog-nav .sep {
|
||||
color: var(--text-dim);
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.75rem;
|
||||
}
|
||||
|
||||
.blog-index {
|
||||
max-width: 720px;
|
||||
margin: 0 auto;
|
||||
padding: 3rem 2rem 6rem;
|
||||
}
|
||||
|
||||
.blog-index h1 {
|
||||
font-family: var(--font-display);
|
||||
font-weight: 400;
|
||||
font-size: 2.5rem;
|
||||
margin-bottom: 3rem;
|
||||
}
|
||||
|
||||
.post-list {
|
||||
list-style: none;
|
||||
}
|
||||
|
||||
.post-list li {
|
||||
padding: 1.5rem 0;
|
||||
border-bottom: 1px solid var(--border);
|
||||
}
|
||||
|
||||
.post-list li:first-child {
|
||||
border-top: 1px solid var(--border);
|
||||
}
|
||||
|
||||
.post-list a {
|
||||
text-decoration: none;
|
||||
display: block;
|
||||
}
|
||||
|
||||
.post-list .post-title {
|
||||
font-family: var(--font-display);
|
||||
font-size: 1.4rem;
|
||||
font-weight: 600;
|
||||
color: var(--text-primary);
|
||||
line-height: 1.3;
|
||||
margin-bottom: 0.4rem;
|
||||
transition: color 0.2s;
|
||||
}
|
||||
|
||||
.post-list a:hover .post-title {
|
||||
color: var(--amber);
|
||||
}
|
||||
|
||||
.post-list .post-desc {
|
||||
font-size: 0.95rem;
|
||||
color: var(--text-secondary);
|
||||
line-height: 1.5;
|
||||
margin-bottom: 0.4rem;
|
||||
}
|
||||
|
||||
.post-list .post-date {
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.72rem;
|
||||
color: var(--text-dim);
|
||||
letter-spacing: 0.04em;
|
||||
}
|
||||
|
||||
.blog-footer {
|
||||
text-align: center;
|
||||
padding: 3rem 2rem;
|
||||
border-top: 1px solid var(--border);
|
||||
max-width: 720px;
|
||||
margin: 0 auto;
|
||||
}
|
||||
|
||||
.blog-footer a {
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.75rem;
|
||||
letter-spacing: 0.08em;
|
||||
text-transform: uppercase;
|
||||
color: var(--text-dim);
|
||||
text-decoration: none;
|
||||
margin: 0 1rem;
|
||||
}
|
||||
.blog-footer a:hover { color: var(--amber); }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<nav class="blog-nav">
|
||||
<a href="/" class="wordmark">Numa</a>
|
||||
<span class="sep">/</span>
|
||||
<a href="/blog/">Blog</a>
|
||||
</nav>
|
||||
|
||||
<main class="blog-index">
|
||||
<h1>Blog</h1>
|
||||
<ul class="post-list">
|
||||
<li>
|
||||
<a href="/blog/posts/dot-from-scratch.html">
|
||||
<div class="post-title">DNS-over-TLS from Scratch in Rust</div>
|
||||
<div class="post-desc">Building RFC 7858 on top of rustls — length-prefix framing, ALPN cross-protocol defense, iPhone dogfooding, and two bugs that only the strict clients caught.</div>
|
||||
<div class="post-date">April 2026</div>
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="/blog/posts/dnssec-from-scratch.html">
|
||||
<div class="post-title">Implementing DNSSEC from Scratch in Rust</div>
|
||||
<div class="post-desc">Recursive resolution from root hints, chain-of-trust validation, NSEC/NSEC3 denial proofs, and what I learned implementing DNSSEC with zero DNS libraries.</div>
|
||||
<div class="post-date">March 2026</div>
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="/blog/posts/dns-from-scratch.html">
|
||||
<div class="post-title">I Built a DNS Resolver from Scratch in Rust</div>
|
||||
<div class="post-desc">How DNS actually works at the wire level — label compression, TTL tricks, DoH implementation, and what I learned building a resolver with zero DNS libraries.</div>
|
||||
<div class="post-date">March 2026</div>
|
||||
</a>
|
||||
</li>
|
||||
</ul>
|
||||
</main>
|
||||
|
||||
<footer class="blog-footer">
|
||||
<a href="https://github.com/razvandimescu/numa">GitHub</a>
|
||||
<a href="/">Home</a>
|
||||
</footer>
|
||||
|
||||
<script data-goatcounter="https://razvandimescu.goatcounter.com/count"
|
||||
async src="//gc.zgo.at/count.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
@@ -4,9 +4,7 @@
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Numa — Dashboard</title>
|
||||
<link rel="preconnect" href="https://fonts.googleapis.com">
|
||||
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
||||
<link href="https://fonts.googleapis.com/css2?family=Instrument+Serif:ital@0;1&family=DM+Sans:opsz,wght@9..40,400;9..40,500;9..40,600&family=JetBrains+Mono:wght@400;500&display=swap" rel="stylesheet">
|
||||
<link rel="stylesheet" href="/fonts/fonts.css">
|
||||
<style>
|
||||
*, *::before, *::after { margin: 0; padding: 0; box-sizing: border-box; }
|
||||
|
||||
@@ -103,7 +101,7 @@ body {
|
||||
/* Stat cards row */
|
||||
.stats-row {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(5, 1fr);
|
||||
grid-template-columns: repeat(6, 1fr);
|
||||
gap: 1rem;
|
||||
}
|
||||
.stat-card {
|
||||
@@ -127,6 +125,8 @@ body {
|
||||
.stat-card.blocked::before { background: var(--rose); }
|
||||
.stat-card.overrides::before { background: var(--violet); }
|
||||
.stat-card.uptime::before { background: var(--cyan); }
|
||||
.stat-card.memory::before { background: var(--text-dim); }
|
||||
.stat-card.memory .stat-value { color: var(--text-secondary); }
|
||||
|
||||
.stat-label {
|
||||
font-size: 0.7rem;
|
||||
@@ -217,6 +217,7 @@ body {
|
||||
min-width: 2px;
|
||||
}
|
||||
.path-bar-fill.forward { background: var(--amber); }
|
||||
.path-bar-fill.recursive { background: var(--cyan); }
|
||||
.path-bar-fill.cached { background: var(--teal); }
|
||||
.path-bar-fill.local { background: var(--violet); }
|
||||
.path-bar-fill.override { background: var(--emerald); }
|
||||
@@ -280,11 +281,14 @@ body {
|
||||
font-weight: 500;
|
||||
}
|
||||
.path-tag.FORWARD { background: rgba(192, 98, 58, 0.12); color: var(--amber-dim); }
|
||||
.path-tag.RECURSIVE { background: rgba(74, 124, 138, 0.12); color: var(--cyan); }
|
||||
.path-tag.CACHED { background: rgba(107, 124, 78, 0.12); color: var(--teal-dim); }
|
||||
.path-tag.LOCAL { background: rgba(100, 116, 139, 0.12); color: var(--violet-dim); }
|
||||
.path-tag.OVERRIDE { background: rgba(82, 122, 82, 0.12); color: var(--emerald); }
|
||||
.path-tag.SERVFAIL { background: rgba(181, 68, 58, 0.12); color: var(--rose); }
|
||||
.path-tag.BLOCKED { background: rgba(163, 152, 136, 0.15); color: var(--text-dim); }
|
||||
.path-tag.COALESCED { background: rgba(138, 104, 158, 0.12); color: var(--violet-dim); }
|
||||
.src-tag { font-size: 0.6rem; color: var(--text-dim); letter-spacing: 0.02em; }
|
||||
|
||||
/* Sidebar panels */
|
||||
.sidebar {
|
||||
@@ -382,6 +386,15 @@ body {
|
||||
}
|
||||
.health-dot.up { background: var(--emerald); }
|
||||
.health-dot.down { background: var(--rose); }
|
||||
.lan-badge {
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.58rem;
|
||||
padding: 1px 5px;
|
||||
border-radius: 3px;
|
||||
margin-left: 0.3rem;
|
||||
}
|
||||
.lan-badge.shared { background: rgba(82, 122, 82, 0.12); color: var(--emerald); }
|
||||
.lan-badge.local-only { background: rgba(192, 98, 58, 0.12); color: var(--amber-dim); }
|
||||
|
||||
/* Override form */
|
||||
.override-form {
|
||||
@@ -458,10 +471,74 @@ body {
|
||||
display: none;
|
||||
}
|
||||
|
||||
/* Memory sidebar panel */
|
||||
.memory-bar {
|
||||
display: flex;
|
||||
height: 18px;
|
||||
border-radius: 4px;
|
||||
overflow: hidden;
|
||||
background: var(--bg-surface);
|
||||
margin-bottom: 0.8rem;
|
||||
}
|
||||
.memory-bar-seg {
|
||||
height: 100%;
|
||||
min-width: 2px;
|
||||
transition: width 0.6s ease;
|
||||
}
|
||||
.memory-bar-seg.cache { background: var(--teal); }
|
||||
.memory-bar-seg.blocklist { background: var(--rose); }
|
||||
.memory-bar-seg.querylog { background: var(--amber); }
|
||||
.memory-bar-seg.srtt { background: var(--cyan); }
|
||||
.memory-bar-seg.overrides { background: var(--violet); }
|
||||
.memory-row {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
padding: 0.3rem 0;
|
||||
border-bottom: 1px solid var(--border);
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.72rem;
|
||||
}
|
||||
.memory-row:last-child { border-bottom: none; }
|
||||
.memory-row-dot {
|
||||
width: 8px;
|
||||
height: 8px;
|
||||
border-radius: 2px;
|
||||
flex-shrink: 0;
|
||||
margin-right: 0.5rem;
|
||||
}
|
||||
.memory-row-label {
|
||||
flex: 1;
|
||||
color: var(--text-secondary);
|
||||
}
|
||||
.memory-row-size {
|
||||
width: 65px;
|
||||
text-align: right;
|
||||
color: var(--text-primary);
|
||||
font-weight: 500;
|
||||
}
|
||||
.memory-row-entries {
|
||||
width: 90px;
|
||||
text-align: right;
|
||||
color: var(--text-dim);
|
||||
}
|
||||
.memory-rss {
|
||||
margin-top: 0.5rem;
|
||||
padding-top: 0.5rem;
|
||||
border-top: 1px solid var(--border);
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.72rem;
|
||||
color: var(--text-dim);
|
||||
}
|
||||
|
||||
/* Responsive */
|
||||
@media (max-width: 1100px) {
|
||||
.main-grid { grid-template-columns: 1fr; }
|
||||
}
|
||||
@media (max-width: 900px) {
|
||||
.stats-row { grid-template-columns: repeat(3, 1fr); }
|
||||
}
|
||||
@media (max-width: 700px) {
|
||||
.stats-row { grid-template-columns: repeat(2, 1fr); }
|
||||
.dashboard { padding: 1rem; }
|
||||
@@ -514,6 +591,11 @@ body {
|
||||
<div class="stat-value" id="uptime">—</div>
|
||||
<div class="stat-sub" id="uptimeSub"> </div>
|
||||
</div>
|
||||
<div class="stat-card memory">
|
||||
<div class="stat-label">Memory</div>
|
||||
<div class="stat-value" id="memoryRss">—</div>
|
||||
<div class="stat-sub" id="memorySub"> </div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Resolution paths -->
|
||||
@@ -538,6 +620,8 @@ body {
|
||||
<select id="logFilterPath" onchange="applyLogFilter()"
|
||||
style="font-family:var(--font-mono);font-size:0.7rem;padding:0.25rem 0.4rem;border:1px solid var(--border);border-radius:4px;background:var(--bg-surface);color:var(--text-secondary);outline:none;">
|
||||
<option value="">all paths</option>
|
||||
<option value="RECURSIVE">recursive</option>
|
||||
<option value="COALESCED">coalesced</option>
|
||||
<option value="FORWARD">forward</option>
|
||||
<option value="CACHED">cached</option>
|
||||
<option value="BLOCKED">blocked</option>
|
||||
@@ -568,22 +652,27 @@ body {
|
||||
|
||||
<!-- Sidebar -->
|
||||
<div class="sidebar">
|
||||
<!-- Blocking -->
|
||||
<div class="panel" id="blockingPanel">
|
||||
<!-- Local services -->
|
||||
<div class="panel">
|
||||
<div class="panel-header">
|
||||
<span class="panel-title">Blocking</span>
|
||||
<span class="panel-title" id="blockingRefresh" style="color:var(--text-dim);font-weight:400;"></span>
|
||||
<div style="flex:1;">
|
||||
<span class="panel-title">Local Services</span>
|
||||
<div style="font-size:0.68rem;color:var(--text-dim);margin-top:0.15rem;">Give localhost apps clean .numa URLs. Persistent, with HTTP proxy.</div>
|
||||
</div>
|
||||
<span id="lanToggle" style="font-family:var(--font-mono);font-size:0.68rem;cursor:default;user-select:none;" title=""></span>
|
||||
</div>
|
||||
<div class="panel-body">
|
||||
<form class="override-form" onsubmit="return checkDomain(event)" style="margin-bottom:0;border-bottom:none;padding-bottom:0;">
|
||||
<form class="override-form" id="serviceForm" onsubmit="return addService(event)">
|
||||
<div class="override-form-row">
|
||||
<input type="text" id="checkDomainInput" placeholder="Is this domain blocked?" required style="flex:3">
|
||||
<button type="submit" class="btn" style="background:var(--violet);color:white;flex-shrink:0;">Check</button>
|
||||
<input type="text" id="svcName" placeholder="name (becomes name.numa)" required style="flex:2">
|
||||
<input type="number" id="svcPort" placeholder="port (e.g. 3000)" required min="1" max="65535" style="flex:1">
|
||||
</div>
|
||||
<button type="submit" class="btn btn-add">Add Service</button>
|
||||
<div class="override-error" id="serviceError"></div>
|
||||
</form>
|
||||
<div id="checkResult" style="display:none;margin-top:0.6rem;padding:0.5rem 0.6rem;border-radius:5px;font-family:var(--font-mono);font-size:0.72rem;"></div>
|
||||
<div id="blockingSources" style="margin-top:0.8rem;padding-top:0.6rem;border-top:1px solid var(--border);"></div>
|
||||
<div id="blockingAllowlist" style="margin-top:0.8rem;padding-top:0.6rem;border-top:1px solid var(--border);"></div>
|
||||
<div id="servicesList">
|
||||
<div class="empty-state">No services configured</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -612,26 +701,33 @@ body {
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Local services -->
|
||||
<div class="panel">
|
||||
<!-- Blocking -->
|
||||
<div class="panel" id="blockingPanel">
|
||||
<div class="panel-header">
|
||||
<div>
|
||||
<span class="panel-title">Local Services</span>
|
||||
<div style="font-size:0.68rem;color:var(--text-dim);margin-top:0.15rem;">Give localhost apps clean .numa URLs. Persistent, with HTTP proxy.</div>
|
||||
</div>
|
||||
<span class="panel-title">Blocking</span>
|
||||
<span class="panel-title" id="blockingRefresh" style="color:var(--text-dim);font-weight:400;"></span>
|
||||
</div>
|
||||
<div class="panel-body">
|
||||
<form class="override-form" id="serviceForm" onsubmit="return addService(event)">
|
||||
<form class="override-form" onsubmit="return checkDomain(event)" style="margin-bottom:0;border-bottom:none;padding-bottom:0;">
|
||||
<div class="override-form-row">
|
||||
<input type="text" id="svcName" placeholder="name (becomes name.numa)" required style="flex:2">
|
||||
<input type="number" id="svcPort" placeholder="port (e.g. 3000)" required min="1" max="65535" style="flex:1">
|
||||
<input type="text" id="checkDomainInput" placeholder="Is this domain blocked?" required style="flex:3">
|
||||
<button type="submit" class="btn" style="background:var(--violet);color:white;flex-shrink:0;">Check</button>
|
||||
</div>
|
||||
<button type="submit" class="btn btn-add">Add Service</button>
|
||||
<div class="override-error" id="serviceError"></div>
|
||||
</form>
|
||||
<div id="servicesList">
|
||||
<div class="empty-state">No services configured</div>
|
||||
</div>
|
||||
<div id="checkResult" style="display:none;margin-top:0.6rem;padding:0.5rem 0.6rem;border-radius:5px;font-family:var(--font-mono);font-size:0.72rem;"></div>
|
||||
<div id="blockingSources" style="margin-top:0.8rem;padding-top:0.6rem;border-top:1px solid var(--border);"></div>
|
||||
<div id="blockingAllowlist" style="margin-top:0.8rem;padding-top:0.6rem;border-top:1px solid var(--border);"></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Memory breakdown -->
|
||||
<div class="panel" id="memoryPanel">
|
||||
<div class="panel-header">
|
||||
<span class="panel-title">Memory</span>
|
||||
<span class="panel-title" id="memoryTotal" style="color: var(--text-dim)"></span>
|
||||
</div>
|
||||
<div class="panel-body" id="memoryBody">
|
||||
<div class="empty-state">No memory data</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -651,6 +747,7 @@ body {
|
||||
|
||||
<script>
|
||||
const API = '';
|
||||
const h = s => String(s).replace(/&/g,'&').replace(/</g,'<').replace(/>/g,'>').replace(/"/g,'"').replace(/'/g,''');
|
||||
let prevTotal = null;
|
||||
let lastLogEntries = [];
|
||||
let prevTime = null;
|
||||
@@ -691,6 +788,13 @@ function formatTime(epoch) {
|
||||
return d.toLocaleTimeString([], { hour12: false });
|
||||
}
|
||||
|
||||
function shortSrc(addr) {
|
||||
if (!addr) return '';
|
||||
const ip = addr.replace(/:\d+$/, '');
|
||||
if (ip === '127.0.0.1' || ip === '::1') return 'localhost';
|
||||
return ip;
|
||||
}
|
||||
|
||||
function formatRemaining(secs) {
|
||||
if (secs == null) return 'permanent';
|
||||
if (secs < 60) return `${secs}s left`;
|
||||
@@ -698,8 +802,72 @@ function formatRemaining(secs) {
|
||||
return `${Math.floor(secs / 3600)}h ${Math.floor((secs % 3600) / 60)}m left`;
|
||||
}
|
||||
|
||||
function formatBytes(bytes) {
|
||||
if (bytes === 0) return '0 B';
|
||||
if (bytes < 1024) return bytes + ' B';
|
||||
if (bytes < 1048576) return (bytes / 1024).toFixed(1) + ' KB';
|
||||
if (bytes < 1073741824) return (bytes / 1048576).toFixed(1) + ' MB';
|
||||
return (bytes / 1073741824).toFixed(1) + ' GB';
|
||||
}
|
||||
|
||||
const MEMORY_COMPONENTS = [
|
||||
{ key: 'cache', label: 'Cache', cls: 'cache', color: 'var(--teal)' },
|
||||
{ key: 'blocklist', label: 'Blocklist', cls: 'blocklist', color: 'var(--rose)' },
|
||||
{ key: 'query_log', label: 'Query Log', cls: 'querylog', color: 'var(--amber)' },
|
||||
{ key: 'srtt', label: 'SRTT', cls: 'srtt', color: 'var(--cyan)' },
|
||||
{ key: 'overrides', label: 'Overrides', cls: 'overrides', color: 'var(--violet)' },
|
||||
];
|
||||
|
||||
function renderMemory(mem, stats) {
|
||||
if (!mem) return;
|
||||
|
||||
// Stat card
|
||||
document.getElementById('memoryRss').textContent = formatBytes(mem.process_memory_bytes);
|
||||
document.getElementById('memorySub').textContent = 'est. ' + formatBytes(mem.total_estimated_bytes);
|
||||
|
||||
const entryCounts = {
|
||||
cache: stats.cache.entries,
|
||||
blocklist: stats.blocking.domains_loaded,
|
||||
query_log: mem.query_log_entries,
|
||||
srtt: mem.srtt_entries,
|
||||
overrides: stats.overrides.active,
|
||||
};
|
||||
|
||||
// Sidebar panel
|
||||
const total = mem.total_estimated_bytes || 1;
|
||||
document.getElementById('memoryTotal').textContent = formatBytes(total);
|
||||
|
||||
const barSegments = MEMORY_COMPONENTS.map(c => {
|
||||
const bytes = mem[c.key + '_bytes'] || 0;
|
||||
const pct = ((bytes / total) * 100).toFixed(1);
|
||||
return `<div class="memory-bar-seg ${c.cls}" style="width:${pct}%" title="${c.label}: ${formatBytes(bytes)} (${pct}%)"></div>`;
|
||||
}).join('');
|
||||
|
||||
const rows = MEMORY_COMPONENTS.map(c => {
|
||||
const bytes = mem[c.key + '_bytes'] || 0;
|
||||
const entries = entryCounts[c.key] || 0;
|
||||
return `
|
||||
<div class="memory-row">
|
||||
<div class="memory-row-dot" style="background:${c.color}"></div>
|
||||
<span class="memory-row-label">${c.label}</span>
|
||||
<span class="memory-row-size">${formatBytes(bytes)}</span>
|
||||
<span class="memory-row-entries">${formatNumber(entries)} entries</span>
|
||||
</div>`;
|
||||
}).join('');
|
||||
|
||||
document.getElementById('memoryBody').innerHTML = `
|
||||
<div class="memory-bar">${barSegments}</div>
|
||||
${rows}
|
||||
<div class="memory-rss">
|
||||
<span>Process Footprint</span>
|
||||
<span>${formatBytes(mem.process_memory_bytes)}</span>
|
||||
</div>
|
||||
`;
|
||||
}
|
||||
|
||||
const PATH_DEFS = [
|
||||
{ key: 'forwarded', label: 'Forward', cls: 'forward' },
|
||||
{ key: 'recursive', label: 'Recursive', cls: 'recursive' },
|
||||
{ key: 'cached', label: 'Cached', cls: 'cached' },
|
||||
{ key: 'local', label: 'Local', cls: 'local' },
|
||||
{ key: 'overridden', label: 'Override', cls: 'override' },
|
||||
@@ -752,12 +920,12 @@ function applyLogFilter() {
|
||||
? ` <button class="btn-delete" onclick="allowDomain('${e.domain}')" title="Allow this domain" style="color:var(--emerald);font-size:0.65rem;">allow</button>`
|
||||
: '';
|
||||
return `
|
||||
<tr>
|
||||
<td>${formatTime(e.timestamp_epoch)}</td>
|
||||
<tr title="Source: ${e.src || 'unknown'}">
|
||||
<td>${formatTime(e.timestamp_epoch)}<br><span class="src-tag">${shortSrc(e.src)}</span></td>
|
||||
<td>${e.query_type}</td>
|
||||
<td class="domain-cell" title="${e.domain}">${e.domain}${allowBtn}</td>
|
||||
<td><span class="path-tag ${e.path}">${e.path}</span></td>
|
||||
<td>${e.rescode}</td>
|
||||
<td style="white-space:nowrap;"><span style="display:inline-block;width:15px;text-align:center;">${e.dnssec === 'secure' ? '<svg title="DNSSEC verified" width="12" height="12" viewBox="0 0 24 24" fill="none" stroke="var(--emerald)" stroke-width="2.5" stroke-linecap="round" stroke-linejoin="round" style="vertical-align:-1px;"><path d="M12 22s8-4 8-10V5l-8-3-8 3v7c0 6 8 10 8 10z"/><path d="m9 12 2 2 4-4"/></svg>' : ''}</span>${e.rescode}</td>
|
||||
<td>${e.latency_ms.toFixed(1)}ms</td>
|
||||
</tr>`;
|
||||
}).join('');
|
||||
@@ -864,6 +1032,32 @@ async function refresh() {
|
||||
document.getElementById('totalQueries').textContent = formatNumber(q.total);
|
||||
document.getElementById('uptime').textContent = formatUptime(stats.uptime_secs);
|
||||
document.getElementById('uptimeSub').textContent = formatUptimeSub(stats.uptime_secs);
|
||||
document.getElementById('footerUpstream').textContent = stats.upstream || '';
|
||||
document.getElementById('footerConfig').textContent = stats.config_path || '';
|
||||
document.getElementById('footerData').textContent = stats.data_dir || '';
|
||||
const modeEl = document.getElementById('footerMode');
|
||||
modeEl.textContent = stats.mode || '—';
|
||||
modeEl.style.color = stats.mode === 'recursive' ? 'var(--emerald)' : 'var(--amber)';
|
||||
document.getElementById('footerDnssec').textContent = stats.dnssec ? 'on' : 'off';
|
||||
document.getElementById('footerDnssec').style.color = stats.dnssec ? 'var(--emerald)' : 'var(--text-dim)';
|
||||
document.getElementById('footerSrtt').textContent = stats.srtt ? 'on' : 'off';
|
||||
document.getElementById('footerSrtt').style.color = stats.srtt ? 'var(--emerald)' : 'var(--text-dim)';
|
||||
|
||||
// LAN status indicator
|
||||
const lanEl = document.getElementById('lanToggle');
|
||||
if (stats.lan) {
|
||||
if (!stats.lan.enabled) {
|
||||
lanEl.style.color = 'var(--text-dim)';
|
||||
lanEl.textContent = 'LAN off';
|
||||
lanEl.title = 'Enable with: numa lan on';
|
||||
} else {
|
||||
const pc = stats.lan.peers || 0;
|
||||
lanEl.style.color = pc > 0 ? 'var(--emerald)' : 'var(--teal)';
|
||||
lanEl.textContent = `LAN on · ${pc} peer${pc !== 1 ? 's' : ''}`;
|
||||
lanEl.title = 'mDNS discovery active (_numa._tcp.local)';
|
||||
}
|
||||
}
|
||||
|
||||
document.getElementById('overrideCount').textContent = stats.overrides.active;
|
||||
document.getElementById('blockedCount').textContent = formatNumber(q.blocked);
|
||||
const bl = stats.blocking;
|
||||
@@ -907,7 +1101,7 @@ async function refresh() {
|
||||
prevTime = now;
|
||||
|
||||
// Cache hit rate
|
||||
const answered = q.cached + q.forwarded + q.local + q.overridden;
|
||||
const answered = q.cached + q.forwarded + q.recursive + q.coalesced + q.local + q.overridden;
|
||||
const hitRate = answered > 0 ? ((q.cached / answered) * 100).toFixed(1) : '0.0';
|
||||
document.getElementById('cacheRate').textContent = hitRate + '%';
|
||||
|
||||
@@ -919,6 +1113,7 @@ async function refresh() {
|
||||
renderServices(services);
|
||||
renderBlockingInfo(blockingInfo);
|
||||
renderAllowlist(allowlist);
|
||||
renderMemory(stats.memory, stats);
|
||||
|
||||
} catch (err) {
|
||||
document.getElementById('statusDot').className = 'status-dot error';
|
||||
@@ -979,14 +1174,14 @@ async function checkDomain(event) {
|
||||
if (result.blocked) {
|
||||
el.style.background = 'rgba(181, 68, 58, 0.1)';
|
||||
el.style.color = 'var(--rose)';
|
||||
el.innerHTML = `<strong>Blocked</strong> — ${result.reason}` +
|
||||
(result.matched_rule ? `<br>Rule: <code>${result.matched_rule}</code>` : '') +
|
||||
` <button class="btn-delete" onclick="allowDomain('${domain}')" style="color:var(--emerald);font-size:0.7rem;margin-left:0.4rem;">allow</button>`;
|
||||
el.innerHTML = `<strong>Blocked</strong> — ${h(result.reason)}` +
|
||||
(result.matched_rule ? `<br>Rule: <code>${h(result.matched_rule)}</code>` : '') +
|
||||
` <button class="btn-delete" onclick="allowDomain('${h(domain)}')" style="color:var(--emerald);font-size:0.7rem;margin-left:0.4rem;">allow</button>`;
|
||||
} else {
|
||||
el.style.background = 'rgba(82, 122, 82, 0.1)';
|
||||
el.style.color = 'var(--emerald)';
|
||||
el.innerHTML = `<strong>Allowed</strong> — ${result.reason}` +
|
||||
(result.matched_rule ? `<br>Rule: <code>${result.matched_rule}</code>` : '');
|
||||
el.innerHTML = `<strong>Allowed</strong> — ${h(result.reason)}` +
|
||||
(result.matched_rule ? `<br>Rule: <code>${h(result.matched_rule)}</code>` : '');
|
||||
}
|
||||
} catch (err) {
|
||||
el.style.display = 'block';
|
||||
@@ -1076,22 +1271,82 @@ async function removeAllowlistDomain(domain) {
|
||||
} catch (err) {}
|
||||
}
|
||||
|
||||
let editingRoute = false;
|
||||
|
||||
function renderServices(entries) {
|
||||
if (editingRoute) return;
|
||||
const el = document.getElementById('servicesList');
|
||||
if (!entries.length) {
|
||||
el.innerHTML = '<div class="empty-state">No services configured</div>';
|
||||
return;
|
||||
}
|
||||
el.innerHTML = entries.map(e => `
|
||||
el.innerHTML = entries.map(e => {
|
||||
const lanBadge = e.healthy
|
||||
? (e.lan_accessible
|
||||
? '<span class="lan-badge shared" title="Reachable from other devices on the network">LAN</span>'
|
||||
: '<span class="lan-badge local-only" title="Bound to localhost — not reachable from other devices. Start with 0.0.0.0 to share on LAN.">local only</span>')
|
||||
: '';
|
||||
const routeLines = (e.routes || []).map(r =>
|
||||
`<div class="service-port" style="color:var(--text-dim);display:flex;align-items:center;gap:0.3rem;">` +
|
||||
`<span style="display:inline-block;min-width:60px;">${h(r.path)}</span> ` +
|
||||
`→ :${parseInt(r.port)||0}` +
|
||||
(r.strip ? ` <span style="opacity:0.6;">(strip)</span>` : '') +
|
||||
(e.name === 'numa' ? '' : ` <button class="btn-delete" onclick="deleteRoute('${h(e.name)}','${h(r.path)}')" title="Remove route" style="font-size:0.65rem;padding:0 0.25rem;min-width:auto;opacity:0.5;">×</button>`) +
|
||||
`</div>`
|
||||
).join('');
|
||||
const deletable = e.source !== 'config' && e.name !== 'numa';
|
||||
const name = h(e.name);
|
||||
return `
|
||||
<div class="service-item">
|
||||
<span class="health-dot ${e.healthy ? 'up' : 'down'}" title="${e.healthy ? 'running' : 'not reachable'}"></span>
|
||||
<div class="service-info">
|
||||
<div class="service-name"><a href="${e.url}" target="_blank">${e.name}.numa</a></div>
|
||||
<div class="service-port">localhost:${e.target_port} → proxied</div>
|
||||
<div class="service-name"><a href="${h(e.url)}" target="_blank">${name}.numa</a>${lanBadge}</div>
|
||||
<div class="service-port">localhost:${parseInt(e.target_port)||0} → proxied</div>
|
||||
${routeLines}
|
||||
${e.name === 'numa' ? '' : `<div style="margin-top:0.3rem;"><button onclick="toggleRouteForm('${name}')" style="font-size:0.7rem;padding:0.1rem 0.4rem;background:var(--emerald);color:var(--bg);border:none;border-radius:4px;cursor:pointer;">+ route</button><div id="routeForm-${name}" style="display:none;margin-top:0.3rem;"><div style="display:flex;gap:0.3rem;align-items:center;"><input type="text" id="routePath-${name}" placeholder="/path" style="flex:2;padding:0.25rem 0.4rem;font-size:0.75rem;"><input type="number" id="routePort-${name}" value="${parseInt(e.target_port)||0}" min="1" max="65535" style="flex:1;padding:0.25rem 0.4rem;font-size:0.75rem;"><label style="font-size:0.7rem;color:var(--text-dim);display:flex;align-items:center;gap:0.2rem;"><input type="checkbox" id="routeStrip-${name}">strip</label><button onclick="addRoute('${name}')" style="font-size:0.7rem;padding:0.2rem 0.5rem;background:var(--emerald);color:var(--bg);border:none;border-radius:4px;cursor:pointer;">add</button></div><div class="override-error" id="routeError-${name}" style="display:none;font-size:0.7rem;"></div></div></div>`}
|
||||
</div>
|
||||
${e.name === 'numa' ? '' : `<button class="btn-delete" onclick="deleteService('${e.name}')" title="Remove service">×</button>`}
|
||||
${deletable ? `<button class="btn-delete" onclick="deleteService('${name}')" title="Remove service">×</button>` : ''}
|
||||
</div>
|
||||
`).join('');
|
||||
`}).join('');
|
||||
}
|
||||
|
||||
function toggleRouteForm(name) {
|
||||
const el = document.getElementById('routeForm-' + name);
|
||||
const opening = el.style.display === 'none';
|
||||
el.style.display = opening ? 'block' : 'none';
|
||||
editingRoute = opening;
|
||||
}
|
||||
|
||||
async function addRoute(name) {
|
||||
const errEl = document.getElementById('routeError-' + name);
|
||||
errEl.style.display = 'none';
|
||||
try {
|
||||
const path = document.getElementById('routePath-' + name).value.trim();
|
||||
const port = parseInt(document.getElementById('routePort-' + name).value) || 0;
|
||||
const strip = document.getElementById('routeStrip-' + name).checked;
|
||||
const res = await fetch(API + '/services/' + encodeURIComponent(name) + '/routes', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ path, port, strip }),
|
||||
});
|
||||
if (!res.ok) throw new Error(await res.text());
|
||||
editingRoute = false;
|
||||
refresh();
|
||||
} catch (err) {
|
||||
errEl.textContent = err.message;
|
||||
errEl.style.display = 'block';
|
||||
}
|
||||
}
|
||||
|
||||
async function deleteRoute(name, path) {
|
||||
try {
|
||||
await fetch(API + '/services/' + encodeURIComponent(name) + '/routes', {
|
||||
method: 'DELETE',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ path }),
|
||||
});
|
||||
refresh();
|
||||
} catch (err) { /* next refresh will update */ }
|
||||
}
|
||||
|
||||
async function addService(event) {
|
||||
@@ -1135,7 +1390,13 @@ setInterval(refresh, 2000);
|
||||
</script>
|
||||
|
||||
<div style="text-align:center;padding:0.8rem;font-family:var(--font-mono);font-size:0.68rem;color:var(--text-dim);">
|
||||
Logs: <span id="logPath" style="user-select:all;">macOS: /usr/local/var/log/numa.log · Linux: journalctl -u numa -f</span>
|
||||
Config: <span id="footerConfig" style="user-select:all;color:var(--emerald);"></span>
|
||||
· Data: <span id="footerData" style="user-select:all;color:var(--emerald);"></span>
|
||||
· Upstream: <span id="footerUpstream" style="user-select:all;color:var(--emerald);"></span>
|
||||
· Mode: <span id="footerMode" style="color:var(--text-dim);">—</span>
|
||||
· DNSSEC: <span id="footerDnssec" style="color:var(--text-dim);">—</span>
|
||||
· SRTT: <span id="footerSrtt" style="color:var(--text-dim);">—</span>
|
||||
· Logs: <span style="user-select:all;color:var(--emerald);">macOS: /usr/local/var/log/numa.log · Linux: journalctl -u numa -f</span>
|
||||
· <a href="https://github.com/razvandimescu/numa" target="_blank" rel="noopener" style="color:var(--amber);text-decoration:none;">GitHub</a>
|
||||
</div>
|
||||
|
||||
|
||||
BIN
site/fonts/dm-sans-italic-latin.woff2
Normal file
BIN
site/fonts/dm-sans-italic-latin.woff2
Normal file
Binary file not shown.
BIN
site/fonts/dm-sans-latin.woff2
Normal file
BIN
site/fonts/dm-sans-latin.woff2
Normal file
Binary file not shown.
36
site/fonts/fonts.css
Normal file
36
site/fonts/fonts.css
Normal file
@@ -0,0 +1,36 @@
|
||||
/* Self-hosted fonts — no external requests to Google */
|
||||
@font-face {
|
||||
font-family: 'Instrument Serif';
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
font-display: swap;
|
||||
src: url(/fonts/instrument-serif-latin.woff2) format('woff2');
|
||||
}
|
||||
@font-face {
|
||||
font-family: 'Instrument Serif';
|
||||
font-style: italic;
|
||||
font-weight: 400;
|
||||
font-display: swap;
|
||||
src: url(/fonts/instrument-serif-italic-latin.woff2) format('woff2');
|
||||
}
|
||||
@font-face {
|
||||
font-family: 'DM Sans';
|
||||
font-style: normal;
|
||||
font-weight: 400 600;
|
||||
font-display: swap;
|
||||
src: url(/fonts/dm-sans-latin.woff2) format('woff2');
|
||||
}
|
||||
@font-face {
|
||||
font-family: 'DM Sans';
|
||||
font-style: italic;
|
||||
font-weight: 400;
|
||||
font-display: swap;
|
||||
src: url(/fonts/dm-sans-italic-latin.woff2) format('woff2');
|
||||
}
|
||||
@font-face {
|
||||
font-family: 'JetBrains Mono';
|
||||
font-style: normal;
|
||||
font-weight: 400 500;
|
||||
font-display: swap;
|
||||
src: url(/fonts/jetbrains-mono-latin.woff2) format('woff2');
|
||||
}
|
||||
BIN
site/fonts/instrument-serif-italic-latin.woff2
Normal file
BIN
site/fonts/instrument-serif-italic-latin.woff2
Normal file
Binary file not shown.
BIN
site/fonts/instrument-serif-latin.woff2
Normal file
BIN
site/fonts/instrument-serif-latin.woff2
Normal file
Binary file not shown.
BIN
site/fonts/jetbrains-mono-latin.woff2
Normal file
BIN
site/fonts/jetbrains-mono-latin.woff2
Normal file
Binary file not shown.
604
site/index.html
604
site/index.html
@@ -3,11 +3,14 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Numa — DNS that governs itself</title>
|
||||
<meta name="description" content="DNS you own. Block ads, override DNS for development, name your local services with .numa domains, cache for speed. A single portable binary built from scratch in Rust.">
|
||||
<link rel="preconnect" href="https://fonts.googleapis.com">
|
||||
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
||||
<link href="https://fonts.googleapis.com/css2?family=Instrument+Serif:ital@0;1&family=DM+Sans:ital,opsz,wght@0,9..40,400;0,9..40,500;0,9..40,600;1,9..40,400&family=JetBrains+Mono:wght@400;500&display=swap" rel="stylesheet">
|
||||
<title>Numa — DNS you own. Everywhere you go.</title>
|
||||
<meta name="description" content="DNS you own. Portable DNS resolver with caching, ad blocking, .numa local domains, developer overrides. Optional recursive resolution with full DNSSEC validation. Built from scratch in Rust.">
|
||||
<link rel="canonical" href="https://numa.rs">
|
||||
<meta property="og:title" content="Numa — DNS you own. Everywhere you go.">
|
||||
<meta property="og:description" content="Portable DNS resolver with caching, ad blocking, .numa local domains, and developer overrides. Optional recursive resolution with full DNSSEC validation. Built from scratch in Rust.">
|
||||
<meta property="og:type" content="website">
|
||||
<meta property="og:url" content="https://numa.rs">
|
||||
<link rel="stylesheet" href="/fonts/fonts.css">
|
||||
<style>
|
||||
*, *::before, *::after { margin: 0; padding: 0; box-sizing: border-box; }
|
||||
|
||||
@@ -163,7 +166,7 @@ section {
|
||||
|
||||
h2 {
|
||||
font-family: var(--font-display);
|
||||
font-weight: 600;
|
||||
font-weight: 400;
|
||||
font-size: clamp(2rem, 4vw, 3rem);
|
||||
line-height: 1.2;
|
||||
margin-bottom: 1.5rem;
|
||||
@@ -185,11 +188,50 @@ p.lead {
|
||||
line-height: 1.8;
|
||||
}
|
||||
|
||||
/* ===========================
|
||||
TOP NAV
|
||||
=========================== */
|
||||
.site-nav {
|
||||
padding: 1.5rem 2rem;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 1.5rem;
|
||||
position: relative;
|
||||
z-index: 10;
|
||||
}
|
||||
|
||||
.site-nav a {
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.75rem;
|
||||
letter-spacing: 0.08em;
|
||||
text-transform: uppercase;
|
||||
color: var(--text-dim);
|
||||
text-decoration: none;
|
||||
transition: color 0.2s ease;
|
||||
}
|
||||
.site-nav a:hover { color: var(--amber); }
|
||||
|
||||
.site-nav .wordmark {
|
||||
font-family: var(--font-display);
|
||||
font-size: 1.4rem;
|
||||
font-weight: 400;
|
||||
color: var(--text-primary);
|
||||
text-transform: none;
|
||||
letter-spacing: -0.02em;
|
||||
}
|
||||
.site-nav .wordmark:hover { color: var(--amber); }
|
||||
|
||||
.site-nav .sep {
|
||||
color: var(--text-dim);
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.75rem;
|
||||
}
|
||||
|
||||
/* ===========================
|
||||
HERO
|
||||
=========================== */
|
||||
.hero {
|
||||
min-height: 100vh;
|
||||
min-height: calc(100vh - 5rem);
|
||||
display: flex;
|
||||
align-items: center;
|
||||
position: relative;
|
||||
@@ -226,7 +268,7 @@ p.lead {
|
||||
|
||||
.hero .wordmark {
|
||||
font-family: var(--font-display);
|
||||
font-weight: 700;
|
||||
font-weight: 400;
|
||||
font-size: clamp(4.5rem, 12vw, 9rem);
|
||||
line-height: 0.9;
|
||||
letter-spacing: -0.03em;
|
||||
@@ -508,7 +550,7 @@ p.lead {
|
||||
.layer-card h3 {
|
||||
font-family: var(--font-display);
|
||||
font-size: 1.4rem;
|
||||
font-weight: 600;
|
||||
font-weight: 400;
|
||||
margin-bottom: 1.25rem;
|
||||
}
|
||||
|
||||
@@ -552,7 +594,7 @@ p.lead {
|
||||
.arch-subsection h3 {
|
||||
font-family: var(--font-display);
|
||||
font-size: 1.5rem;
|
||||
font-weight: 600;
|
||||
font-weight: 400;
|
||||
margin-bottom: 2rem;
|
||||
}
|
||||
|
||||
@@ -785,6 +827,169 @@ p.lead {
|
||||
background: rgba(82, 122, 82, 0.04);
|
||||
}
|
||||
|
||||
/* ===========================
|
||||
PERFORMANCE
|
||||
=========================== */
|
||||
.perf-section {
|
||||
background: var(--bg-surface);
|
||||
}
|
||||
|
||||
.perf-grid {
|
||||
display: grid;
|
||||
grid-template-columns: 1fr 1fr;
|
||||
gap: 3rem;
|
||||
margin-top: 3rem;
|
||||
align-items: start;
|
||||
}
|
||||
|
||||
.perf-table-wrapper {
|
||||
overflow-x: auto;
|
||||
border: 1px solid var(--border);
|
||||
}
|
||||
|
||||
.perf-table {
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
font-size: 0.85rem;
|
||||
min-width: 380px;
|
||||
}
|
||||
|
||||
.perf-table thead th {
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.7rem;
|
||||
letter-spacing: 0.08em;
|
||||
text-transform: uppercase;
|
||||
color: var(--text-dim);
|
||||
padding: 0.8rem 1rem;
|
||||
text-align: right;
|
||||
border-bottom: 1px solid var(--border);
|
||||
background: var(--bg-elevated);
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.perf-table thead th:first-child {
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
.perf-table tbody td {
|
||||
padding: 0.65rem 1rem;
|
||||
border-bottom: 1px solid var(--border);
|
||||
color: var(--text-secondary);
|
||||
text-align: right;
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.82rem;
|
||||
}
|
||||
|
||||
.perf-table tbody td:first-child {
|
||||
font-family: var(--font-body);
|
||||
font-size: 0.85rem;
|
||||
color: var(--text-primary);
|
||||
text-align: left;
|
||||
font-weight: 400;
|
||||
}
|
||||
|
||||
.perf-table tbody tr:hover {
|
||||
background: var(--bg-elevated);
|
||||
}
|
||||
|
||||
.perf-table tbody tr.perf-highlight td {
|
||||
color: var(--emerald);
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.perf-table tbody tr.perf-highlight td:first-child {
|
||||
color: var(--emerald);
|
||||
}
|
||||
|
||||
.perf-sidebar {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 1.5rem;
|
||||
}
|
||||
|
||||
.perf-stat {
|
||||
background: var(--bg-card);
|
||||
border: 1px solid var(--border);
|
||||
padding: 1.5rem;
|
||||
box-shadow: 0 1px 4px rgba(0,0,0,0.04);
|
||||
}
|
||||
|
||||
.perf-stat-value {
|
||||
font-family: var(--font-display);
|
||||
font-size: 2.2rem;
|
||||
font-weight: 400;
|
||||
line-height: 1.1;
|
||||
}
|
||||
|
||||
.perf-stat-value.emerald { color: var(--emerald); }
|
||||
.perf-stat-value.teal { color: var(--teal); }
|
||||
.perf-stat-value.amber { color: var(--amber); }
|
||||
|
||||
.perf-stat-label {
|
||||
font-size: 0.82rem;
|
||||
color: var(--text-secondary);
|
||||
margin-top: 0.4rem;
|
||||
}
|
||||
|
||||
.perf-bar-group {
|
||||
margin-top: 1.5rem;
|
||||
}
|
||||
|
||||
.perf-bar-row {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.75rem;
|
||||
margin-bottom: 0.6rem;
|
||||
}
|
||||
|
||||
.perf-bar-label {
|
||||
font-size: 0.75rem;
|
||||
color: var(--text-secondary);
|
||||
width: 80px;
|
||||
flex-shrink: 0;
|
||||
text-align: right;
|
||||
}
|
||||
|
||||
.perf-bar-track {
|
||||
flex: 1;
|
||||
height: 18px;
|
||||
background: var(--bg-elevated);
|
||||
border-radius: 2px;
|
||||
overflow: hidden;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.perf-bar-fill {
|
||||
height: 100%;
|
||||
border-radius: 2px;
|
||||
transition: width 0.6s ease;
|
||||
}
|
||||
|
||||
.perf-bar-fill.emerald { background: var(--emerald); }
|
||||
.perf-bar-fill.teal { background: var(--teal); }
|
||||
.perf-bar-fill.dim { background: var(--text-dim); }
|
||||
|
||||
.perf-bar-ms {
|
||||
font-family: var(--font-mono);
|
||||
font-size: 0.7rem;
|
||||
color: var(--text-dim);
|
||||
width: 42px;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.perf-note {
|
||||
font-size: 0.78rem;
|
||||
color: var(--text-dim);
|
||||
margin-top: 2rem;
|
||||
line-height: 1.6;
|
||||
}
|
||||
|
||||
.perf-note a {
|
||||
color: var(--teal-dim);
|
||||
text-decoration: none;
|
||||
border-bottom: 1px solid var(--border-teal);
|
||||
}
|
||||
|
||||
/* ===========================
|
||||
TECHNICAL
|
||||
=========================== */
|
||||
@@ -824,6 +1029,8 @@ p.lead {
|
||||
color: var(--text-secondary);
|
||||
overflow-x: auto;
|
||||
position: relative;
|
||||
white-space: pre-wrap;
|
||||
word-break: break-all;
|
||||
}
|
||||
|
||||
.code-block::before {
|
||||
@@ -980,6 +1187,7 @@ footer .closing {
|
||||
.problem-grid { grid-template-columns: 1fr; gap: 2rem; }
|
||||
.layers-grid { grid-template-columns: 1fr; }
|
||||
.tech-grid { grid-template-columns: 1fr; }
|
||||
.perf-grid { grid-template-columns: 1fr; }
|
||||
.network-grid { grid-template-columns: repeat(2, 1fr); }
|
||||
.network-connections { display: none; }
|
||||
.hero-line { display: none; }
|
||||
@@ -989,6 +1197,9 @@ footer .closing {
|
||||
@media (max-width: 600px) {
|
||||
section { padding: 4rem 0; }
|
||||
.container { padding: 0 1.25rem; }
|
||||
.site-nav { padding: 1rem 1.25rem; gap: 1rem; }
|
||||
.site-nav .wordmark { font-size: 1.2rem; }
|
||||
.hero { min-height: calc(100vh - 4rem); }
|
||||
.network-grid { grid-template-columns: 1fr; }
|
||||
.pipeline { flex-direction: column; align-items: stretch; gap: 0; }
|
||||
.pipeline-arrow { transform: rotate(90deg); padding: 0.15rem 0; align-self: center; }
|
||||
@@ -1002,6 +1213,14 @@ footer .closing {
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<nav class="site-nav">
|
||||
<a href="/" class="wordmark">Numa</a>
|
||||
<span class="sep">/</span>
|
||||
<a href="/blog/">Blog</a>
|
||||
<span class="sep">/</span>
|
||||
<a href="https://github.com/razvandimescu/numa" target="_blank" rel="noopener">GitHub</a>
|
||||
</nav>
|
||||
|
||||
<!-- ==================== HERO ==================== -->
|
||||
<section class="hero">
|
||||
<div class="roman-bricks" aria-hidden="true"></div>
|
||||
@@ -1036,9 +1255,9 @@ footer .closing {
|
||||
</div>
|
||||
<div class="problem-grid">
|
||||
<div class="problem-text reveal reveal-delay-1">
|
||||
<p>Every time you visit a website, you ask a DNS resolver where to go. That resolver sees every domain you visit, when, and how often.</p>
|
||||
<p>Today, a handful of operators control this infrastructure. ICANN governs the root. Registrars can seize domains. Governments compel censorship. Your ISP logs your queries by default.</p>
|
||||
<p>The protocol that underpins the entire internet has no built-in privacy, no cryptographic ownership, and no way for users to choose who they trust.</p>
|
||||
<p>Every time you visit a website, you ask a DNS resolver where to go. That resolver sees every domain you visit, when, and how often. Your ISP logs these queries by default.</p>
|
||||
<p>Ad blockers work in one browser. Pi-hole needs a Raspberry Pi. Your local dev services live at <code>localhost:5173</code> and you can never remember which port is which.</p>
|
||||
<p>DNS is the foundation of everything you do on the internet, but the tools for controlling it locally are either too complex (dnsmasq + nginx + mkcert) or too limited (cloud-only, appliance-only).</p>
|
||||
</div>
|
||||
<div class="dns-diagram reveal reveal-delay-2">
|
||||
<div class="dns-node"><span class="node-dot dim"></span>Your browser</div>
|
||||
@@ -1062,44 +1281,46 @@ footer .closing {
|
||||
<div class="container">
|
||||
<div class="reveal">
|
||||
<div class="section-label">How It Works</div>
|
||||
<h2>Three layers, built incrementally</h2>
|
||||
<p class="lead">Numa starts as a practical developer tool and evolves toward a decentralized network. Each layer stands on its own.</p>
|
||||
<h2>What it does today</h2>
|
||||
<p class="lead">A DNS resolver with caching, ad blocking, local service domains, and a REST API. Optional recursive resolution with DNSSEC. Everything runs in a single binary.</p>
|
||||
</div>
|
||||
<div class="layers-grid">
|
||||
<div class="layer-card reveal reveal-delay-1">
|
||||
<div class="layer-badge">Today</div>
|
||||
<h3>DNS You Control</h3>
|
||||
<div class="layer-badge">Layer 1</div>
|
||||
<h3>Resolve & Protect</h3>
|
||||
<ul>
|
||||
<li>Forward mode by default — transparent proxy to your existing DNS, with caching</li>
|
||||
<li>Ad & tracker blocking — 385K+ domains, zero config</li>
|
||||
<li>Ephemeral DNS overrides with auto-revert</li>
|
||||
<li>Local service proxy — <code>frontend.numa</code> instead of <code>localhost:5173</code></li>
|
||||
<li>Live dashboard with real-time stats and controls</li>
|
||||
<li>REST API — 22 endpoints for programmatic control</li>
|
||||
<li>Recursive resolution — opt-in, resolve from root nameservers, no upstream needed</li>
|
||||
<li>DNSSEC validation — chain-of-trust + NSEC/NSEC3 denial proofs (RSA, ECDSA, Ed25519)</li>
|
||||
<li>DNS-over-TLS listener — encrypted DNS for phones and strict clients (RFC 7858 with ALPN defense)</li>
|
||||
<li>Hostile-network resilience — TCP fallback with UDP auto-disable when ISPs block port 53</li>
|
||||
<li>TTL-aware caching (sub-ms lookups)</li>
|
||||
<li>Single binary, portable — your ad blocker travels with you</li>
|
||||
<li>Single binary, portable — macOS, Linux, and Windows</li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="layer-card reveal reveal-delay-2">
|
||||
<div class="layer-badge">Next</div>
|
||||
<h3>Self-Sovereign DNS</h3>
|
||||
<div class="layer-badge">Layer 2</div>
|
||||
<h3>Developer Tools</h3>
|
||||
<ul>
|
||||
<li>pkarr integration: Ed25519 keys as domains</li>
|
||||
<li>Resolve via Mainline BitTorrent DHT (10M+ nodes)</li>
|
||||
<li>No registrar, no blockchain, no ICANN</li>
|
||||
<li>Cryptographic verification built-in</li>
|
||||
<li>Human-readable aliases for pkarr domains</li>
|
||||
<li>Local service proxy — <code>frontend.numa</code> instead of <code>localhost:5173</code></li>
|
||||
<li>Path-based routing — <code>app.numa/api</code> → <code>:5001</code></li>
|
||||
<li>Ephemeral DNS overrides with auto-revert</li>
|
||||
<li>LAN service discovery via mDNS</li>
|
||||
<li>Conditional forwarding — plays nice with Tailscale/VPN split-DNS</li>
|
||||
<li>REST API — script everything, automate anything</li>
|
||||
<li>Live dashboard with real-time stats and controls</li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="layer-card reveal reveal-delay-3">
|
||||
<div class="layer-badge">Vision</div>
|
||||
<h3>Decentralized Resolver Network</h3>
|
||||
<div class="layer-badge">The Vision</div>
|
||||
<h3>Self-Sovereign DNS</h3>
|
||||
<ul>
|
||||
<li>Operators run Numa nodes and stake tokens</li>
|
||||
<li>Earn rewards for uptime, correctness, latency</li>
|
||||
<li>Independent auditors send challenge queries</li>
|
||||
<li>Slashing for NXDOMAIN hijacking or poisoned records</li>
|
||||
<li>Geographic diversity bonuses</li>
|
||||
<li>Privacy-preserving resolution (DoH/DoT)</li>
|
||||
<li>pkarr integration — DNS via Mainline DHT, no registrar needed</li>
|
||||
<li>Global <code>.numa</code> names — self-publish, DHT-backed</li>
|
||||
<li>.onion bridge — human-readable names for Tor hidden services</li>
|
||||
<li>Ed25519 same-key binding — zero new trust assumptions</li>
|
||||
<li>No blockchain required for core naming</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
@@ -1131,66 +1352,14 @@ footer .closing {
|
||||
<span class="pipeline-arrow">→</span>
|
||||
<div class="pipeline-node"><div class="pipeline-box">Cache</div></div>
|
||||
<span class="pipeline-arrow">→</span>
|
||||
<div class="pipeline-node"><div class="pipeline-box hl-violet">pkarr / DHT</div></div>
|
||||
<div class="pipeline-node"><div class="pipeline-box hl-violet">Recursive / Forward (DoH)</div></div>
|
||||
<span class="pipeline-arrow">→</span>
|
||||
<div class="pipeline-node"><div class="pipeline-box">Upstream</div></div>
|
||||
<div class="pipeline-node"><div class="pipeline-box highlight">DNSSEC Validate</div></div>
|
||||
<span class="pipeline-arrow">→</span>
|
||||
<div class="pipeline-node"><div class="pipeline-box hl-emerald">Respond</div></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="arch-subsection reveal">
|
||||
<h3>Layered resilience</h3>
|
||||
<div class="layer-stack">
|
||||
<div class="stack-row">
|
||||
<div class="stack-label" style="color: var(--violet)">L4 Permanence</div>
|
||||
<div class="stack-value">Arweave immutable zone snapshots (future)</div>
|
||||
</div>
|
||||
<div class="stack-row">
|
||||
<div class="stack-label" style="color: var(--violet-dim)">L3 Distribution</div>
|
||||
<div class="stack-value">Mainline DHT via pkarr — 10M+ nodes</div>
|
||||
</div>
|
||||
<div class="stack-row">
|
||||
<div class="stack-label" style="color: var(--amber)">L2 Serving</div>
|
||||
<div class="stack-value">Numa instances worldwide</div>
|
||||
</div>
|
||||
<div class="stack-row">
|
||||
<div class="stack-label" style="color: var(--teal)">L1 Compatibility</div>
|
||||
<div class="stack-value">Standard DNS wire protocol — RFC 1035</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="arch-subsection reveal">
|
||||
<h3>Network actors</h3>
|
||||
<div class="network-grid">
|
||||
<div class="network-actor">
|
||||
<span class="actor-icon" style="color: var(--teal)" aria-hidden="true">∘</span>
|
||||
<h4 style="color: var(--teal)">Users</h4>
|
||||
<p>Choose resolvers from a decentralized marketplace based on latency, privacy, and reputation</p>
|
||||
</div>
|
||||
<div class="network-actor">
|
||||
<span class="actor-icon" style="color: var(--amber)" aria-hidden="true">⋄</span>
|
||||
<h4 style="color: var(--amber)">Operators</h4>
|
||||
<p>Stake tokens, run Numa nodes, earn rewards proportional to verified service quality</p>
|
||||
</div>
|
||||
<div class="network-actor">
|
||||
<span class="actor-icon" style="color: var(--rose)" aria-hidden="true">⌖</span>
|
||||
<h4 style="color: var(--rose)">Auditors</h4>
|
||||
<p>Send challenge queries from diverse locations, verify correctness and latency</p>
|
||||
</div>
|
||||
<div class="network-actor">
|
||||
<span class="actor-icon" style="color: var(--violet)" aria-hidden="true">≡</span>
|
||||
<h4 style="color: var(--violet)">Chain</h4>
|
||||
<p>Accounting, reputation scores, reward distribution, slashing proofs</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="network-connections" aria-hidden="true">
|
||||
<div class="network-conn-line"></div>
|
||||
<div class="network-conn-line"></div>
|
||||
<div class="network-conn-line"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
@@ -1217,6 +1386,22 @@ footer .closing {
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>Recursive resolver</td>
|
||||
<td class="cross">No (needs Unbound)</td>
|
||||
<td class="cross">Cloud only</td>
|
||||
<td class="cross">Cloud only</td>
|
||||
<td class="cross">No</td>
|
||||
<td class="check">Root hints + full DNSSEC</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>DNSSEC validation</td>
|
||||
<td class="muted">Passthrough</td>
|
||||
<td class="muted">Cloud only</td>
|
||||
<td class="muted">Cloud only</td>
|
||||
<td class="muted">Passthrough</td>
|
||||
<td class="check">Full chain-of-trust</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Ad & tracker blocking</td>
|
||||
<td class="check">Yes</td>
|
||||
@@ -1265,6 +1450,30 @@ footer .closing {
|
||||
<td class="check">Yes</td>
|
||||
<td class="check">Real-time + controls</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>DNS-over-HTTPS upstream</td>
|
||||
<td class="cross">No</td>
|
||||
<td class="check">Yes</td>
|
||||
<td class="check">Yes</td>
|
||||
<td class="cross">No</td>
|
||||
<td class="check">Built in (HTTP/2 + rustls)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>DNS-over-TLS listener</td>
|
||||
<td class="cross">No</td>
|
||||
<td class="muted">Cloud only</td>
|
||||
<td class="muted">Cloud only</td>
|
||||
<td class="check">Yes (cert required)</td>
|
||||
<td class="check">Self-signed or BYO</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Conditional forwarding</td>
|
||||
<td class="cross">No</td>
|
||||
<td class="cross">No</td>
|
||||
<td class="cross">No</td>
|
||||
<td class="muted">Manual</td>
|
||||
<td class="check">Auto-detects Tailscale/VPN</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Zero config needed</td>
|
||||
<td class="cross">Complex setup</td>
|
||||
@@ -1273,14 +1482,6 @@ footer .closing {
|
||||
<td class="cross">Docker/setup</td>
|
||||
<td class="check">Works out of the box</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Self-sovereign DNS roadmap</td>
|
||||
<td class="cross">No</td>
|
||||
<td class="cross">No</td>
|
||||
<td class="cross">No</td>
|
||||
<td class="cross">No</td>
|
||||
<td class="check">pkarr / DHT</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
@@ -1289,6 +1490,133 @@ footer .closing {
|
||||
|
||||
<div class="section-road" aria-hidden="true"><div class="roman-bricks"></div></div>
|
||||
|
||||
<!-- ==================== PERFORMANCE ==================== -->
|
||||
<section class="perf-section" id="performance">
|
||||
<div class="container">
|
||||
<div class="reveal">
|
||||
<div class="section-label" style="color: var(--emerald)">Performance</div>
|
||||
<h2>Measured, not claimed</h2>
|
||||
<p class="lead">Benchmarked with <code style="font-size:0.85em">dig</code> against public resolvers on the same machine. Cached queries resolve in under a microsecond.</p>
|
||||
</div>
|
||||
|
||||
<div class="perf-grid">
|
||||
<div class="reveal reveal-delay-1">
|
||||
<div class="perf-table-wrapper">
|
||||
<table class="perf-table">
|
||||
<caption class="sr-only">DNS resolver latency comparison</caption>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Resolver</th>
|
||||
<th>Avg</th>
|
||||
<th>P50</th>
|
||||
<th>P99</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr class="perf-highlight">
|
||||
<td>Numa (cached)</td>
|
||||
<td><1ms</td>
|
||||
<td><1ms</td>
|
||||
<td><1ms</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Numa (cold)</td>
|
||||
<td>9ms</td>
|
||||
<td>9ms</td>
|
||||
<td>18ms</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>System resolver</td>
|
||||
<td>9ms</td>
|
||||
<td>8ms</td>
|
||||
<td>44ms</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Quad9</td>
|
||||
<td>15ms</td>
|
||||
<td>13ms</td>
|
||||
<td>43ms</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Cloudflare</td>
|
||||
<td>19ms</td>
|
||||
<td>14ms</td>
|
||||
<td>132ms</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Google</td>
|
||||
<td>22ms</td>
|
||||
<td>17ms</td>
|
||||
<td>37ms</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<div class="perf-bar-group">
|
||||
<div class="perf-bar-row">
|
||||
<span class="perf-bar-label">Numa</span>
|
||||
<div class="perf-bar-track"><div class="perf-bar-fill emerald" style="width: 2%"></div></div>
|
||||
<span class="perf-bar-ms"><1ms</span>
|
||||
</div>
|
||||
<div class="perf-bar-row">
|
||||
<span class="perf-bar-label">System</span>
|
||||
<div class="perf-bar-track"><div class="perf-bar-fill dim" style="width: 20%"></div></div>
|
||||
<span class="perf-bar-ms">9ms</span>
|
||||
</div>
|
||||
<div class="perf-bar-row">
|
||||
<span class="perf-bar-label">Quad9</span>
|
||||
<div class="perf-bar-track"><div class="perf-bar-fill dim" style="width: 33%"></div></div>
|
||||
<span class="perf-bar-ms">15ms</span>
|
||||
</div>
|
||||
<div class="perf-bar-row">
|
||||
<span class="perf-bar-label">Cloudflare</span>
|
||||
<div class="perf-bar-track"><div class="perf-bar-fill dim" style="width: 42%"></div></div>
|
||||
<span class="perf-bar-ms">19ms</span>
|
||||
</div>
|
||||
<div class="perf-bar-row">
|
||||
<span class="perf-bar-label">Google</span>
|
||||
<div class="perf-bar-track"><div class="perf-bar-fill dim" style="width: 49%"></div></div>
|
||||
<span class="perf-bar-ms">22ms</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="perf-sidebar reveal reveal-delay-2">
|
||||
<div class="perf-stat">
|
||||
<div class="perf-stat-value emerald">689 ns</div>
|
||||
<div class="perf-stat-label">Cached round-trip — parse query, cache lookup, serialize response</div>
|
||||
</div>
|
||||
<div class="perf-stat">
|
||||
<div class="perf-stat-value teal">2.0M</div>
|
||||
<div class="perf-stat-label">Queries per second (single-threaded pipeline throughput, batched)</div>
|
||||
</div>
|
||||
<div class="perf-stat">
|
||||
<div class="perf-stat-value amber">0 allocations</div>
|
||||
<div class="perf-stat-label">Heap allocations in the I/O path — 4KB stack buffers, inline serialization</div>
|
||||
</div>
|
||||
<div class="perf-stat">
|
||||
<div class="perf-stat-value teal">174 ns</div>
|
||||
<div class="perf-stat-label">ECDSA P-256 signature verification (DNSSEC). RSA/SHA-256: 10.9µs. DS digest: 257ns.</div>
|
||||
</div>
|
||||
<div class="perf-stat">
|
||||
<div class="perf-stat-value emerald">~90 ms</div>
|
||||
<div class="perf-stat-label">Cold-cache DNSSEC validation — only 1 network fetch needed (TLD chain pre-warmed on startup)</div>
|
||||
</div>
|
||||
|
||||
<p class="perf-note">
|
||||
Cold queries match system resolver speed — the bottleneck is upstream RTT, not Numa. We don't claim to be faster when the network is the limit.
|
||||
<br><br>
|
||||
Benchmarks are reproducible: <code style="font-size:0.85em">cargo bench</code> for micro-benchmarks, <code style="font-size:0.85em">python3 bench/dns-bench.sh</code> for end-to-end.
|
||||
<a href="https://github.com/razvandimescu/numa/tree/main/bench">Methodology →</a>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<div class="section-road on-surface" aria-hidden="true"><div class="roman-bricks"></div></div>
|
||||
|
||||
<!-- ==================== TECHNICAL ==================== -->
|
||||
<section id="technical">
|
||||
<div class="container">
|
||||
@@ -1304,26 +1632,37 @@ footer .closing {
|
||||
<dt>DNS Libraries</dt>
|
||||
<dd>Zero — wire protocol parsed from scratch</dd>
|
||||
|
||||
<dt>Resolution Modes</dt>
|
||||
<dd>Recursive (iterative from root hints, CNAME chasing, glue extraction) or Forward (DoH / plain UDP)</dd>
|
||||
|
||||
<dt>Listeners</dt>
|
||||
<dd>UDP:53 + TCP:53 (plain DNS), DoT:853 (RFC 7858 + ALPN), HTTP proxy :80 / HTTPS proxy :443, dashboard :5380</dd>
|
||||
|
||||
<dt>DNSSEC</dt>
|
||||
<dd>Chain-of-trust via ring — RSA/SHA-256, ECDSA P-256, Ed25519. NSEC/NSEC3 denial proofs. EDNS0 DO bit, 1232-byte payload (DNS Flag Day 2020).</dd>
|
||||
|
||||
<dt>Dependencies</dt>
|
||||
<dd>8 runtime crates (tokio, axum, hyper, serde, serde_json, toml, log, futures)</dd>
|
||||
<dd>A focused set — tokio, axum, hyper, ring (DNSSEC), reqwest (DoH), rcgen + rustls + tokio-rustls (TLS/DoT), socket2 (multicast), serde. No transitive DNS library.</dd>
|
||||
|
||||
<dt>Packet Format</dt>
|
||||
<dd>RFC 1035 compliant, 4096-byte UDP (EDNS)</dd>
|
||||
<dd>RFC 1035 compliant. EDNS0 OPT pseudo-record. Parses A, AAAA, NS, CNAME, MX, SOA, SRV, HTTPS, DNSKEY, DS, RRSIG, NSEC, NSEC3.</dd>
|
||||
|
||||
<dt>Concurrency</dt>
|
||||
<dd>Arc<ServerCtx> + std::sync::Mutex (sub-µs holds, never across .await)</dd>
|
||||
|
||||
<dt>Signatures</dt>
|
||||
<dd>Ed25519 via pkarr for self-sovereign domains</dd>
|
||||
<dd>Arc<ServerCtx> + RwLock for reads, Mutex for writes (never across .await)</dd>
|
||||
</dl>
|
||||
<div class="code-block reveal reveal-delay-2">
|
||||
<span class="comment"># Install (pick one)</span>
|
||||
<span class="prompt">$</span> <span class="cmd">brew install</span> razvandimescu/tap/numa
|
||||
<span class="prompt">$</span> <span class="cmd">cargo install</span> numa
|
||||
<span class="prompt">$</span> <span class="cmd">sudo numa</span> <span class="comment"># bind to :53, :80, :5380</span>
|
||||
<span class="prompt">$</span> <span class="cmd">curl</span> <span class="flag">-fsSL</span> https://raw.githubusercontent.com/razvandimescu/numa/main/install.sh <span class="flag">|</span> <span class="cmd">sh</span>
|
||||
|
||||
<span class="comment"># Run</span>
|
||||
<span class="prompt">$</span> <span class="cmd">sudo numa</span> <span class="comment"># bind :53, :80, :443, :853, :5380</span>
|
||||
<span class="prompt">$</span> <span class="cmd">dig</span> <span class="flag">@127.0.0.1</span> google.com <span class="comment"># test resolution</span>
|
||||
<span class="prompt">$</span> <span class="cmd">open</span> http://numa.numa <span class="comment"># dashboard</span>
|
||||
<span class="prompt">$</span> <span class="cmd">open</span> http://localhost:5380 <span class="comment"># dashboard</span>
|
||||
<span class="prompt">$</span> <span class="cmd">curl</span> <span class="flag">-X POST</span> localhost:5380/services \
|
||||
<span class="flag">-d</span> <span class="str">'{"name":"frontend",
|
||||
"target_port":5173}'</span> <span class="comment"># http://frontend.numa</span>
|
||||
"target_port":5173}'</span> <span class="comment"># https://frontend.numa</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -1345,7 +1684,7 @@ footer .closing {
|
||||
</div>
|
||||
<div class="roadmap-item done">
|
||||
<span class="phase">Phase 1</span>
|
||||
<span class="phase-desc">Override layer + REST API with 18 endpoints</span>
|
||||
<span class="phase-desc">Override layer + REST API for programmatic DNS control</span>
|
||||
</div>
|
||||
<div class="roadmap-item done">
|
||||
<span class="phase">Phase 2</span>
|
||||
@@ -1359,25 +1698,41 @@ footer .closing {
|
||||
<span class="phase">Phase 4</span>
|
||||
<span class="phase-desc">Local service proxy — .numa domains, HTTP/HTTPS reverse proxy, auto TLS, WebSocket</span>
|
||||
</div>
|
||||
<div class="roadmap-item phase-teal">
|
||||
<div class="roadmap-item done">
|
||||
<span class="phase">Phase 5</span>
|
||||
<span class="phase-desc">pkarr integration — resolve Ed25519 keys via Mainline DHT (15M nodes)</span>
|
||||
<span class="phase-desc">DNS-over-HTTPS — encrypted upstream, HTTP/2 connection pooling</span>
|
||||
</div>
|
||||
<div class="roadmap-item done">
|
||||
<span class="phase">Phase 6</span>
|
||||
<span class="phase-desc">Recursive resolution — resolve from root nameservers, no upstream dependency</span>
|
||||
</div>
|
||||
<div class="roadmap-item done">
|
||||
<span class="phase">Phase 7</span>
|
||||
<span class="phase-desc">DNSSEC validation — chain-of-trust, NSEC/NSEC3 denial proofs, RSA + ECDSA + Ed25519</span>
|
||||
</div>
|
||||
<div class="roadmap-item done">
|
||||
<span class="phase">Phase 8</span>
|
||||
<span class="phase-desc">Hostile-network resilience — TCP fallback with UDP auto-disable when ISPs block :53, RFC 7816 query minimization</span>
|
||||
</div>
|
||||
<div class="roadmap-item done">
|
||||
<span class="phase">Phase 9</span>
|
||||
<span class="phase-desc">Windows support — cross-platform install/uninstall, <code>netsh</code> DNS config, service integration</span>
|
||||
</div>
|
||||
<div class="roadmap-item done">
|
||||
<span class="phase">Phase 10</span>
|
||||
<span class="phase-desc">DNS-over-TLS listener (RFC 7858) — ALPN enforcement, persistent connections, self-signed or BYO cert</span>
|
||||
</div>
|
||||
<div class="roadmap-item phase-teal">
|
||||
<span class="phase">Phase 6</span>
|
||||
<span class="phase">Phase 11</span>
|
||||
<span class="phase-desc">pkarr integration — self-sovereign DNS via Mainline DHT, no registrar needed</span>
|
||||
</div>
|
||||
<div class="roadmap-item phase-teal">
|
||||
<span class="phase">Phase 12</span>
|
||||
<span class="phase-desc">Global .numa names — self-publish, DHT-backed, first-come-first-served</span>
|
||||
</div>
|
||||
<div class="roadmap-item phase-amber">
|
||||
<span class="phase">Phase 7</span>
|
||||
<span class="phase-desc">Audit protocol — challenge-based verification of resolver honesty</span>
|
||||
</div>
|
||||
<div class="roadmap-item phase-violet">
|
||||
<span class="phase">Phase 8</span>
|
||||
<span class="phase-desc">Numa Network — proof-of-service consensus, NUMA token, paid .numa domains</span>
|
||||
</div>
|
||||
<div class="roadmap-item phase-violet">
|
||||
<span class="phase">Phase 9</span>
|
||||
<span class="phase-desc">.onion bridge — human-readable .numa names for Tor hidden services</span>
|
||||
<div class="roadmap-item phase-teal">
|
||||
<span class="phase">Phase 13</span>
|
||||
<span class="phase-desc">.onion bridge — human-readable Tor naming via Ed25519 same-key binding</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -1391,6 +1746,7 @@ footer .closing {
|
||||
</p>
|
||||
<div class="footer-links reveal reveal-delay-1">
|
||||
<a href="https://github.com/razvandimescu/numa" target="_blank" rel="noopener">GitHub</a>
|
||||
<a href="/blog/">Blog</a>
|
||||
<a href="https://github.com/razvandimescu/numa/blob/main/LICENSE" target="_blank" rel="noopener">MIT License</a>
|
||||
</div>
|
||||
<p class="closing reveal reveal-delay-2">Built from scratch in Rust. No dependencies on trust.</p>
|
||||
@@ -1413,5 +1769,7 @@ const observer = new IntersectionObserver((entries) => {
|
||||
document.querySelectorAll('.reveal').forEach(el => observer.observe(el));
|
||||
</script>
|
||||
|
||||
<script data-goatcounter="https://razvandimescu.goatcounter.com/count"
|
||||
async src="//gc.zgo.at/count.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
633
src/api.rs
633
src/api.rs
@@ -9,12 +9,19 @@ use axum::{Json, Router};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::ctx::ServerCtx;
|
||||
use crate::forward::forward_query;
|
||||
use crate::forward::{forward_query, Upstream};
|
||||
use crate::query_log::QueryLogFilter;
|
||||
use crate::question::QueryType;
|
||||
use crate::stats::QueryPath;
|
||||
|
||||
const DASHBOARD_HTML: &str = include_str!("../site/dashboard.html");
|
||||
const FONTS_CSS: &str = include_str!("../site/fonts/fonts.css");
|
||||
const FONT_DM_SANS: &[u8] = include_bytes!("../site/fonts/dm-sans-latin.woff2");
|
||||
const FONT_DM_SANS_ITALIC: &[u8] = include_bytes!("../site/fonts/dm-sans-italic-latin.woff2");
|
||||
const FONT_INSTRUMENT: &[u8] = include_bytes!("../site/fonts/instrument-serif-latin.woff2");
|
||||
const FONT_INSTRUMENT_ITALIC: &[u8] =
|
||||
include_bytes!("../site/fonts/instrument-serif-italic-latin.woff2");
|
||||
const FONT_JETBRAINS: &[u8] = include_bytes!("../site/fonts/jetbrains-mono-latin.woff2");
|
||||
|
||||
pub fn router(ctx: Arc<ServerCtx>) -> Router {
|
||||
Router::new()
|
||||
@@ -46,6 +53,31 @@ pub fn router(ctx: Arc<ServerCtx>) -> Router {
|
||||
.route("/services", get(list_services))
|
||||
.route("/services", post(create_service))
|
||||
.route("/services/{name}", delete(remove_service))
|
||||
.route("/services/{name}/routes", get(list_routes))
|
||||
.route("/services/{name}/routes", post(add_route))
|
||||
.route("/services/{name}/routes", delete(remove_route))
|
||||
.route("/ca.pem", get(serve_ca))
|
||||
.route("/fonts/fonts.css", get(serve_fonts_css))
|
||||
.route(
|
||||
"/fonts/dm-sans-latin.woff2",
|
||||
get(|| async { serve_font(FONT_DM_SANS) }),
|
||||
)
|
||||
.route(
|
||||
"/fonts/dm-sans-italic-latin.woff2",
|
||||
get(|| async { serve_font(FONT_DM_SANS_ITALIC) }),
|
||||
)
|
||||
.route(
|
||||
"/fonts/instrument-serif-latin.woff2",
|
||||
get(|| async { serve_font(FONT_INSTRUMENT) }),
|
||||
)
|
||||
.route(
|
||||
"/fonts/instrument-serif-italic-latin.woff2",
|
||||
get(|| async { serve_font(FONT_INSTRUMENT_ITALIC) }),
|
||||
)
|
||||
.route(
|
||||
"/fonts/jetbrains-mono-latin.woff2",
|
||||
get(|| async { serve_font(FONT_JETBRAINS) }),
|
||||
)
|
||||
.with_state(ctx)
|
||||
}
|
||||
|
||||
@@ -121,21 +153,38 @@ struct QueryLogResponse {
|
||||
path: String,
|
||||
rescode: String,
|
||||
latency_ms: f64,
|
||||
dnssec: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct StatsResponse {
|
||||
uptime_secs: u64,
|
||||
upstream: String,
|
||||
mode: &'static str, // "recursive" or "forward" — never "auto" at runtime
|
||||
config_path: String,
|
||||
data_dir: String,
|
||||
dnssec: bool,
|
||||
srtt: bool,
|
||||
queries: QueriesStats,
|
||||
cache: CacheStats,
|
||||
overrides: OverrideStats,
|
||||
blocking: BlockingStatsResponse,
|
||||
lan: LanStatsResponse,
|
||||
memory: MemoryStats,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct LanStatsResponse {
|
||||
enabled: bool,
|
||||
peers: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct QueriesStats {
|
||||
total: u64,
|
||||
forwarded: u64,
|
||||
recursive: u64,
|
||||
coalesced: u64,
|
||||
cached: u64,
|
||||
local: u64,
|
||||
overridden: u64,
|
||||
@@ -162,6 +211,19 @@ struct BlockingStatsResponse {
|
||||
allowlist_size: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct MemoryStats {
|
||||
cache_bytes: usize,
|
||||
blocklist_bytes: usize,
|
||||
query_log_bytes: usize,
|
||||
query_log_entries: usize,
|
||||
srtt_bytes: usize,
|
||||
srtt_entries: usize,
|
||||
overrides_bytes: usize,
|
||||
total_estimated_bytes: usize,
|
||||
process_memory_bytes: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct DiagnoseResponse {
|
||||
domain: String,
|
||||
@@ -206,7 +268,7 @@ async fn create_overrides(
|
||||
})
|
||||
.collect::<Result<Vec<_>, (StatusCode, String)>>()?;
|
||||
|
||||
let mut store = ctx.overrides.lock().unwrap();
|
||||
let mut store = ctx.overrides.write().unwrap();
|
||||
let mut responses = Vec::with_capacity(parsed.len());
|
||||
|
||||
for (domain, target, ttl, duration_secs) in parsed {
|
||||
@@ -227,7 +289,7 @@ async fn create_overrides(
|
||||
}
|
||||
|
||||
async fn list_overrides(State(ctx): State<Arc<ServerCtx>>) -> Json<Vec<OverrideResponse>> {
|
||||
let store = ctx.overrides.lock().unwrap();
|
||||
let store = ctx.overrides.read().unwrap();
|
||||
let entries: Vec<OverrideResponse> = store
|
||||
.list()
|
||||
.into_iter()
|
||||
@@ -240,7 +302,7 @@ async fn get_override(
|
||||
State(ctx): State<Arc<ServerCtx>>,
|
||||
Path(domain): Path<String>,
|
||||
) -> Result<Json<OverrideResponse>, StatusCode> {
|
||||
let store = ctx.overrides.lock().unwrap();
|
||||
let store = ctx.overrides.read().unwrap();
|
||||
let entry = store.get(&domain).ok_or(StatusCode::NOT_FOUND)?;
|
||||
Ok(Json(OverrideResponse::from(entry)))
|
||||
}
|
||||
@@ -249,7 +311,7 @@ async fn remove_override(
|
||||
State(ctx): State<Arc<ServerCtx>>,
|
||||
Path(domain): Path<String>,
|
||||
) -> StatusCode {
|
||||
let mut store = ctx.overrides.lock().unwrap();
|
||||
let mut store = ctx.overrides.write().unwrap();
|
||||
if store.remove(&domain) {
|
||||
StatusCode::NO_CONTENT
|
||||
} else {
|
||||
@@ -258,7 +320,7 @@ async fn remove_override(
|
||||
}
|
||||
|
||||
async fn clear_overrides(State(ctx): State<Arc<ServerCtx>>) -> StatusCode {
|
||||
ctx.overrides.lock().unwrap().clear();
|
||||
ctx.overrides.write().unwrap().clear();
|
||||
StatusCode::NO_CONTENT
|
||||
}
|
||||
|
||||
@@ -266,7 +328,7 @@ async fn load_environment(
|
||||
State(ctx): State<Arc<ServerCtx>>,
|
||||
Json(req): Json<EnvironmentRequest>,
|
||||
) -> Result<(StatusCode, Json<EnvironmentResponse>), (StatusCode, String)> {
|
||||
let mut store = ctx.overrides.lock().unwrap();
|
||||
let mut store = ctx.overrides.write().unwrap();
|
||||
|
||||
for entry in &req.overrides {
|
||||
let duration = entry.duration_secs.or(req.duration_secs);
|
||||
@@ -293,7 +355,7 @@ async fn diagnose(
|
||||
|
||||
// Check overrides
|
||||
{
|
||||
let store = ctx.overrides.lock().unwrap();
|
||||
let store = ctx.overrides.read().unwrap();
|
||||
let entry = store.get(&domain_lower);
|
||||
steps.push(DiagnoseStep {
|
||||
source: "override".to_string(),
|
||||
@@ -305,7 +367,7 @@ async fn diagnose(
|
||||
|
||||
// Check blocklist
|
||||
{
|
||||
let bl = ctx.blocklist.lock().unwrap();
|
||||
let bl = ctx.blocklist.read().unwrap();
|
||||
let blocked = bl.is_blocked(&domain_lower);
|
||||
steps.push(DiagnoseStep {
|
||||
source: "blocklist".to_string(),
|
||||
@@ -331,7 +393,7 @@ async fn diagnose(
|
||||
|
||||
// Check cache
|
||||
{
|
||||
let mut cache = ctx.cache.lock().unwrap();
|
||||
let cache = ctx.cache.read().unwrap();
|
||||
let cached = cache.lookup(&domain_lower, qtype);
|
||||
steps.push(DiagnoseStep {
|
||||
source: "cache".to_string(),
|
||||
@@ -341,8 +403,9 @@ async fn diagnose(
|
||||
}
|
||||
|
||||
// Check upstream (async, no locks held)
|
||||
let upstream = ctx.upstream.lock().unwrap().clone();
|
||||
let (upstream_matched, upstream_detail) =
|
||||
forward_query_for_diagnose(&domain_lower, ctx.upstream, ctx.timeout).await;
|
||||
forward_query_for_diagnose(&domain_lower, &upstream, ctx.timeout).await;
|
||||
steps.push(DiagnoseStep {
|
||||
source: "upstream".to_string(),
|
||||
matched: upstream_matched,
|
||||
@@ -358,18 +421,12 @@ async fn diagnose(
|
||||
|
||||
async fn forward_query_for_diagnose(
|
||||
domain: &str,
|
||||
upstream: std::net::SocketAddr,
|
||||
upstream: &Upstream,
|
||||
timeout: std::time::Duration,
|
||||
) -> (bool, String) {
|
||||
use crate::packet::DnsPacket;
|
||||
use crate::question::DnsQuestion;
|
||||
|
||||
let mut query = DnsPacket::new();
|
||||
query.header.id = 0xBEEF;
|
||||
query.header.recursion_desired = true;
|
||||
query
|
||||
.questions
|
||||
.push(DnsQuestion::new(domain.to_string(), QueryType::A));
|
||||
let query = DnsPacket::query(0xBEEF, domain, QueryType::A);
|
||||
|
||||
match forward_query(&query, upstream, timeout).await {
|
||||
Ok(resp) => (
|
||||
@@ -417,6 +474,7 @@ async fn query_log(
|
||||
path: e.path.as_str().to_string(),
|
||||
rescode: e.rescode.as_str().to_string(),
|
||||
latency_ms: e.latency_us as f64 / 1000.0,
|
||||
dnssec: e.dnssec.as_str().to_string(),
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
@@ -427,18 +485,49 @@ async fn query_log(
|
||||
|
||||
async fn stats(State(ctx): State<Arc<ServerCtx>>) -> Json<StatsResponse> {
|
||||
let snap = ctx.stats.lock().unwrap().snapshot();
|
||||
let (cache_len, cache_max) = {
|
||||
let cache = ctx.cache.lock().unwrap();
|
||||
(cache.len(), cache.max_entries())
|
||||
let (cache_len, cache_max, cache_bytes) = {
|
||||
let cache = ctx.cache.read().unwrap();
|
||||
(cache.len(), cache.max_entries(), cache.heap_bytes())
|
||||
};
|
||||
let (override_count, overrides_bytes) = {
|
||||
let ov = ctx.overrides.read().unwrap();
|
||||
(ov.active_count(), ov.heap_bytes())
|
||||
};
|
||||
let (bl_stats, blocklist_bytes) = {
|
||||
let bl = ctx.blocklist.read().unwrap();
|
||||
(bl.stats(), bl.heap_bytes())
|
||||
};
|
||||
let (query_log_bytes, query_log_entries) = {
|
||||
let log = ctx.query_log.lock().unwrap();
|
||||
(log.heap_bytes(), log.len())
|
||||
};
|
||||
let (srtt_bytes, srtt_entries, srtt_enabled) = {
|
||||
let s = ctx.srtt.read().unwrap();
|
||||
(s.heap_bytes(), s.len(), s.is_enabled())
|
||||
};
|
||||
|
||||
let total_estimated =
|
||||
cache_bytes + blocklist_bytes + query_log_bytes + srtt_bytes + overrides_bytes;
|
||||
|
||||
let upstream = if ctx.upstream_mode == crate::config::UpstreamMode::Recursive {
|
||||
"recursive (root hints)".to_string()
|
||||
} else {
|
||||
ctx.upstream.lock().unwrap().to_string()
|
||||
};
|
||||
let override_count = ctx.overrides.lock().unwrap().active_count();
|
||||
let bl_stats = ctx.blocklist.lock().unwrap().stats();
|
||||
|
||||
Json(StatsResponse {
|
||||
uptime_secs: snap.uptime_secs,
|
||||
upstream,
|
||||
mode: ctx.upstream_mode.as_str(),
|
||||
config_path: ctx.config_path.clone(),
|
||||
data_dir: ctx.data_dir.to_string_lossy().to_string(),
|
||||
dnssec: ctx.dnssec_enabled,
|
||||
srtt: srtt_enabled,
|
||||
queries: QueriesStats {
|
||||
total: snap.total,
|
||||
forwarded: snap.forwarded,
|
||||
recursive: snap.recursive,
|
||||
coalesced: snap.coalesced,
|
||||
cached: snap.cached,
|
||||
local: snap.local,
|
||||
overridden: snap.overridden,
|
||||
@@ -458,11 +547,26 @@ async fn stats(State(ctx): State<Arc<ServerCtx>>) -> Json<StatsResponse> {
|
||||
domains_loaded: bl_stats.domains_loaded,
|
||||
allowlist_size: bl_stats.allowlist_size,
|
||||
},
|
||||
lan: LanStatsResponse {
|
||||
enabled: ctx.lan_enabled,
|
||||
peers: ctx.lan_peers.lock().unwrap().list().len(),
|
||||
},
|
||||
memory: MemoryStats {
|
||||
cache_bytes,
|
||||
blocklist_bytes,
|
||||
query_log_bytes,
|
||||
query_log_entries,
|
||||
srtt_bytes,
|
||||
srtt_entries,
|
||||
overrides_bytes,
|
||||
total_estimated_bytes: total_estimated,
|
||||
process_memory_bytes: crate::stats::process_memory_bytes(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
async fn list_cache(State(ctx): State<Arc<ServerCtx>>) -> Json<Vec<CacheEntryResponse>> {
|
||||
let cache = ctx.cache.lock().unwrap();
|
||||
let cache = ctx.cache.read().unwrap();
|
||||
let entries: Vec<CacheEntryResponse> = cache
|
||||
.list()
|
||||
.into_iter()
|
||||
@@ -476,7 +580,7 @@ async fn list_cache(State(ctx): State<Arc<ServerCtx>>) -> Json<Vec<CacheEntryRes
|
||||
}
|
||||
|
||||
async fn flush_cache(State(ctx): State<Arc<ServerCtx>>) -> StatusCode {
|
||||
ctx.cache.lock().unwrap().clear();
|
||||
ctx.cache.write().unwrap().clear();
|
||||
StatusCode::NO_CONTENT
|
||||
}
|
||||
|
||||
@@ -484,18 +588,29 @@ async fn flush_cache_domain(
|
||||
State(ctx): State<Arc<ServerCtx>>,
|
||||
Path(domain): Path<String>,
|
||||
) -> StatusCode {
|
||||
ctx.cache.lock().unwrap().remove(&domain);
|
||||
ctx.cache.write().unwrap().remove(&domain);
|
||||
StatusCode::NO_CONTENT
|
||||
}
|
||||
|
||||
async fn health() -> Json<serde_json::Value> {
|
||||
Json(serde_json::json!({ "status": "ok" }))
|
||||
/// Enriched `/health` handler shared between the main API and the mobile API.
|
||||
///
|
||||
/// Returns the cached `HealthMeta` assembled with live fields (LAN IP,
|
||||
/// uptime). Backward compatible with the previous minimal response in
|
||||
/// that `status` is still the first field and `"ok"` is still the value.
|
||||
/// The iOS companion app's `HealthInfo` Swift struct decodes the full
|
||||
/// response; any HTTP client asserting only on `"status"` keeps working.
|
||||
pub async fn health(State(ctx): State<Arc<ServerCtx>>) -> Json<crate::health::HealthResponse> {
|
||||
let lan_ip = Some(*ctx.lan_ip.lock().unwrap());
|
||||
Json(crate::health::HealthResponse::build(
|
||||
&ctx.health_meta,
|
||||
lan_ip,
|
||||
))
|
||||
}
|
||||
|
||||
// --- Blocking handlers ---
|
||||
|
||||
async fn blocking_stats(State(ctx): State<Arc<ServerCtx>>) -> Json<serde_json::Value> {
|
||||
let stats = ctx.blocklist.lock().unwrap().stats();
|
||||
let stats = ctx.blocklist.read().unwrap().stats();
|
||||
Json(serde_json::json!({
|
||||
"enabled": stats.enabled,
|
||||
"paused": stats.paused,
|
||||
@@ -515,7 +630,7 @@ async fn blocking_toggle(
|
||||
State(ctx): State<Arc<ServerCtx>>,
|
||||
Json(req): Json<BlockingToggleRequest>,
|
||||
) -> Json<serde_json::Value> {
|
||||
ctx.blocklist.lock().unwrap().set_enabled(req.enabled);
|
||||
ctx.blocklist.write().unwrap().set_enabled(req.enabled);
|
||||
Json(serde_json::json!({ "enabled": req.enabled }))
|
||||
}
|
||||
|
||||
@@ -533,12 +648,12 @@ async fn blocking_pause(
|
||||
State(ctx): State<Arc<ServerCtx>>,
|
||||
Json(req): Json<BlockingPauseRequest>,
|
||||
) -> Json<serde_json::Value> {
|
||||
ctx.blocklist.lock().unwrap().pause(req.minutes * 60);
|
||||
ctx.blocklist.write().unwrap().pause(req.minutes * 60);
|
||||
Json(serde_json::json!({ "paused_minutes": req.minutes }))
|
||||
}
|
||||
|
||||
async fn blocking_unpause(State(ctx): State<Arc<ServerCtx>>) -> Json<serde_json::Value> {
|
||||
ctx.blocklist.lock().unwrap().unpause();
|
||||
ctx.blocklist.write().unwrap().unpause();
|
||||
Json(serde_json::json!({ "paused": false }))
|
||||
}
|
||||
|
||||
@@ -546,12 +661,12 @@ async fn blocking_check(
|
||||
State(ctx): State<Arc<ServerCtx>>,
|
||||
Path(domain): Path<String>,
|
||||
) -> Json<crate::blocklist::BlockCheckResult> {
|
||||
let result = ctx.blocklist.lock().unwrap().check(&domain);
|
||||
let result = ctx.blocklist.read().unwrap().check(&domain);
|
||||
Json(result)
|
||||
}
|
||||
|
||||
async fn blocking_allowlist(State(ctx): State<Arc<ServerCtx>>) -> Json<Vec<String>> {
|
||||
let list = ctx.blocklist.lock().unwrap().allowlist();
|
||||
let list = ctx.blocklist.read().unwrap().allowlist();
|
||||
Json(list)
|
||||
}
|
||||
|
||||
@@ -564,7 +679,7 @@ async fn blocking_allowlist_add(
|
||||
State(ctx): State<Arc<ServerCtx>>,
|
||||
Json(req): Json<AllowlistRequest>,
|
||||
) -> (StatusCode, Json<serde_json::Value>) {
|
||||
ctx.blocklist.lock().unwrap().add_to_allowlist(&req.domain);
|
||||
ctx.blocklist.write().unwrap().add_to_allowlist(&req.domain);
|
||||
(
|
||||
StatusCode::CREATED,
|
||||
Json(serde_json::json!({ "allowed": req.domain })),
|
||||
@@ -575,7 +690,12 @@ async fn blocking_allowlist_remove(
|
||||
State(ctx): State<Arc<ServerCtx>>,
|
||||
Path(domain): Path<String>,
|
||||
) -> StatusCode {
|
||||
if ctx.blocklist.lock().unwrap().remove_from_allowlist(&domain) {
|
||||
if ctx
|
||||
.blocklist
|
||||
.write()
|
||||
.unwrap()
|
||||
.remove_from_allowlist(&domain)
|
||||
{
|
||||
StatusCode::NO_CONTENT
|
||||
} else {
|
||||
StatusCode::NOT_FOUND
|
||||
@@ -590,6 +710,10 @@ struct ServiceResponse {
|
||||
target_port: u16,
|
||||
url: String,
|
||||
healthy: bool,
|
||||
lan_accessible: bool,
|
||||
#[serde(skip_serializing_if = "Vec::is_empty")]
|
||||
routes: Vec<crate::service_store::RouteEntry>,
|
||||
source: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -604,27 +728,57 @@ async fn list_services(State(ctx): State<Arc<ServerCtx>>) -> Json<Vec<ServiceRes
|
||||
store
|
||||
.list()
|
||||
.into_iter()
|
||||
.map(|e| (e.name.clone(), e.target_port))
|
||||
.map(|e| {
|
||||
let source = if store.is_config_service(&e.name) {
|
||||
"config"
|
||||
} else {
|
||||
"api"
|
||||
};
|
||||
(
|
||||
e.name.clone(),
|
||||
e.target_port,
|
||||
e.routes.clone(),
|
||||
source.to_string(),
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
let tld = &ctx.proxy_tld;
|
||||
|
||||
// Run all health checks concurrently
|
||||
let health_futures: Vec<_> = entries
|
||||
let lan_ip = crate::lan::detect_lan_ip();
|
||||
|
||||
let check_futures: Vec<_> = entries
|
||||
.iter()
|
||||
.map(|(_, port)| check_health(*port))
|
||||
.map(|(_, port, _, _)| {
|
||||
let port = *port;
|
||||
let localhost = std::net::SocketAddr::from(([127, 0, 0, 1], port));
|
||||
let lan_addr = lan_ip.map(|ip| std::net::SocketAddr::new(ip.into(), port));
|
||||
async move {
|
||||
let healthy = check_tcp(localhost).await;
|
||||
let lan_accessible = match lan_addr {
|
||||
Some(addr) => check_tcp(addr).await,
|
||||
None => false,
|
||||
};
|
||||
(healthy, lan_accessible)
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
let health_results = futures::future::join_all(health_futures).await;
|
||||
let check_results = futures::future::join_all(check_futures).await;
|
||||
|
||||
let results: Vec<_> = entries
|
||||
.into_iter()
|
||||
.zip(health_results)
|
||||
.map(|((name, port), healthy)| ServiceResponse {
|
||||
url: format!("http://{}.{}", name, tld),
|
||||
name,
|
||||
target_port: port,
|
||||
healthy,
|
||||
})
|
||||
.zip(check_results)
|
||||
.map(
|
||||
|((name, port, routes, source), (healthy, lan_accessible))| ServiceResponse {
|
||||
url: format!("http://{}.{}", name, tld),
|
||||
name,
|
||||
target_port: port,
|
||||
healthy,
|
||||
lan_accessible,
|
||||
routes,
|
||||
source,
|
||||
},
|
||||
)
|
||||
.collect();
|
||||
Json(results)
|
||||
}
|
||||
@@ -653,9 +807,21 @@ async fn create_service(
|
||||
}
|
||||
|
||||
let tld = &ctx.proxy_tld;
|
||||
let is_new = !ctx.services.lock().unwrap().has_name(&name);
|
||||
ctx.services.lock().unwrap().insert(&name, req.target_port);
|
||||
if is_new {
|
||||
crate::tls::regenerate_tls(&ctx);
|
||||
}
|
||||
|
||||
let healthy = check_health(req.target_port).await;
|
||||
let localhost = std::net::SocketAddr::from(([127, 0, 0, 1], req.target_port));
|
||||
let lan_addr =
|
||||
crate::lan::detect_lan_ip().map(|ip| std::net::SocketAddr::new(ip.into(), req.target_port));
|
||||
let (healthy, lan_accessible) = tokio::join!(check_tcp(localhost), async {
|
||||
match lan_addr {
|
||||
Some(a) => check_tcp(a).await,
|
||||
None => false,
|
||||
}
|
||||
});
|
||||
Ok((
|
||||
StatusCode::CREATED,
|
||||
Json(ServiceResponse {
|
||||
@@ -663,6 +829,9 @@ async fn create_service(
|
||||
name,
|
||||
target_port: req.target_port,
|
||||
healthy,
|
||||
lan_accessible,
|
||||
routes: Vec::new(),
|
||||
source: "api".to_string(),
|
||||
}),
|
||||
))
|
||||
}
|
||||
@@ -671,20 +840,376 @@ async fn remove_service(State(ctx): State<Arc<ServerCtx>>, Path(name): Path<Stri
|
||||
if name.eq_ignore_ascii_case("numa") {
|
||||
return StatusCode::FORBIDDEN;
|
||||
}
|
||||
let mut store = ctx.services.lock().unwrap();
|
||||
if store.remove(&name) {
|
||||
let removed = ctx.services.lock().unwrap().remove(&name);
|
||||
if removed {
|
||||
crate::tls::regenerate_tls(&ctx);
|
||||
StatusCode::NO_CONTENT
|
||||
} else {
|
||||
StatusCode::NOT_FOUND
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_health(port: u16) -> bool {
|
||||
// --- Route handlers ---
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct AddRouteRequest {
|
||||
path: String,
|
||||
port: u16,
|
||||
#[serde(default)]
|
||||
strip: bool,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct RemoveRouteRequest {
|
||||
path: String,
|
||||
}
|
||||
|
||||
async fn list_routes(
|
||||
State(ctx): State<Arc<ServerCtx>>,
|
||||
Path(name): Path<String>,
|
||||
) -> Result<Json<Vec<crate::service_store::RouteEntry>>, StatusCode> {
|
||||
let store = ctx.services.lock().unwrap();
|
||||
match store.lookup(&name) {
|
||||
Some(entry) => Ok(Json(entry.routes.clone())),
|
||||
None => Err(StatusCode::NOT_FOUND),
|
||||
}
|
||||
}
|
||||
|
||||
async fn add_route(
|
||||
State(ctx): State<Arc<ServerCtx>>,
|
||||
Path(name): Path<String>,
|
||||
Json(req): Json<AddRouteRequest>,
|
||||
) -> Result<StatusCode, (StatusCode, String)> {
|
||||
if req.path.is_empty() || !req.path.starts_with('/') {
|
||||
return Err((StatusCode::BAD_REQUEST, "path must start with /".into()));
|
||||
}
|
||||
if req.path.contains("/../") || req.path.ends_with("/..") || req.path.contains("%") {
|
||||
return Err((
|
||||
StatusCode::BAD_REQUEST,
|
||||
"path must not contain '..' or percent-encoding".into(),
|
||||
));
|
||||
}
|
||||
if req.port == 0 {
|
||||
return Err((StatusCode::BAD_REQUEST, "port must be > 0".into()));
|
||||
}
|
||||
let mut store = ctx.services.lock().unwrap();
|
||||
if store.add_route(&name, req.path, req.port, req.strip) {
|
||||
Ok(StatusCode::CREATED)
|
||||
} else {
|
||||
Err((
|
||||
StatusCode::NOT_FOUND,
|
||||
format!("service '{}' not found", name),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
async fn remove_route(
|
||||
State(ctx): State<Arc<ServerCtx>>,
|
||||
Path(name): Path<String>,
|
||||
Json(req): Json<RemoveRouteRequest>,
|
||||
) -> StatusCode {
|
||||
let mut store = ctx.services.lock().unwrap();
|
||||
if store.remove_route(&name, &req.path) {
|
||||
StatusCode::NO_CONTENT
|
||||
} else {
|
||||
StatusCode::NOT_FOUND
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn serve_ca(State(ctx): State<Arc<ServerCtx>>) -> Result<impl IntoResponse, StatusCode> {
|
||||
let pem = ctx.ca_pem.as_deref().ok_or(StatusCode::NOT_FOUND)?;
|
||||
Ok((
|
||||
[
|
||||
(header::CONTENT_TYPE, "application/x-pem-file"),
|
||||
(
|
||||
header::CONTENT_DISPOSITION,
|
||||
"attachment; filename=\"numa-ca.pem\"",
|
||||
),
|
||||
(header::CACHE_CONTROL, "public, max-age=86400"),
|
||||
],
|
||||
pem.to_string(),
|
||||
))
|
||||
}
|
||||
|
||||
async fn serve_fonts_css() -> impl IntoResponse {
|
||||
(
|
||||
[
|
||||
(header::CONTENT_TYPE, "text/css"),
|
||||
(header::CACHE_CONTROL, "public, max-age=31536000"),
|
||||
],
|
||||
FONTS_CSS,
|
||||
)
|
||||
}
|
||||
|
||||
fn serve_font(data: &'static [u8]) -> impl IntoResponse {
|
||||
(
|
||||
[
|
||||
(header::CONTENT_TYPE, "font/woff2"),
|
||||
(header::CACHE_CONTROL, "public, max-age=31536000"),
|
||||
],
|
||||
data,
|
||||
)
|
||||
}
|
||||
|
||||
async fn check_tcp(addr: std::net::SocketAddr) -> bool {
|
||||
tokio::time::timeout(
|
||||
std::time::Duration::from_millis(100),
|
||||
tokio::net::TcpStream::connect(format!("127.0.0.1:{}", port)),
|
||||
tokio::net::TcpStream::connect(addr),
|
||||
)
|
||||
.await
|
||||
.map(|r| r.is_ok())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use axum::body::Body;
|
||||
use http::Request;
|
||||
use std::sync::{Mutex, RwLock};
|
||||
use tower::ServiceExt;
|
||||
|
||||
async fn test_ctx() -> Arc<ServerCtx> {
|
||||
let socket = tokio::net::UdpSocket::bind("127.0.0.1:0").await.unwrap();
|
||||
Arc::new(ServerCtx {
|
||||
socket,
|
||||
zone_map: std::collections::HashMap::new(),
|
||||
cache: RwLock::new(crate::cache::DnsCache::new(100, 60, 86400)),
|
||||
stats: Mutex::new(crate::stats::ServerStats::new()),
|
||||
overrides: RwLock::new(crate::override_store::OverrideStore::new()),
|
||||
blocklist: RwLock::new(crate::blocklist::BlocklistStore::new()),
|
||||
query_log: Mutex::new(crate::query_log::QueryLog::new(100)),
|
||||
services: Mutex::new(crate::service_store::ServiceStore::new()),
|
||||
lan_peers: Mutex::new(crate::lan::PeerStore::new(90)),
|
||||
forwarding_rules: Vec::new(),
|
||||
upstream: Mutex::new(crate::forward::Upstream::Udp(
|
||||
"127.0.0.1:53".parse().unwrap(),
|
||||
)),
|
||||
upstream_auto: false,
|
||||
upstream_port: 53,
|
||||
lan_ip: Mutex::new(std::net::Ipv4Addr::LOCALHOST),
|
||||
timeout: std::time::Duration::from_secs(3),
|
||||
proxy_tld: "numa".to_string(),
|
||||
proxy_tld_suffix: ".numa".to_string(),
|
||||
lan_enabled: false,
|
||||
config_path: "/tmp/test-numa.toml".to_string(),
|
||||
config_found: false,
|
||||
config_dir: std::path::PathBuf::from("/tmp"),
|
||||
data_dir: std::path::PathBuf::from("/tmp"),
|
||||
tls_config: None,
|
||||
upstream_mode: crate::config::UpstreamMode::Forward,
|
||||
root_hints: Vec::new(),
|
||||
srtt: RwLock::new(crate::srtt::SrttCache::new(true)),
|
||||
inflight: Mutex::new(std::collections::HashMap::new()),
|
||||
dnssec_enabled: false,
|
||||
dnssec_strict: false,
|
||||
health_meta: crate::health::HealthMeta::test_fixture(),
|
||||
ca_pem: None,
|
||||
})
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn health_returns_ok() {
|
||||
let ctx = test_ctx().await;
|
||||
let resp = router(ctx)
|
||||
.oneshot(Request::get("/health").body(Body::empty()).unwrap())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(resp.status(), 200);
|
||||
let body = axum::body::to_bytes(resp.into_body(), 1000).await.unwrap();
|
||||
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
|
||||
assert_eq!(json["status"], "ok");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn stats_returns_json() {
|
||||
let ctx = test_ctx().await;
|
||||
let resp = router(ctx)
|
||||
.oneshot(Request::get("/stats").body(Body::empty()).unwrap())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(resp.status(), 200);
|
||||
let body = axum::body::to_bytes(resp.into_body(), 10000).await.unwrap();
|
||||
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
|
||||
assert!(json["uptime_secs"].is_number());
|
||||
assert!(json["queries"]["total"].is_number());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn query_log_empty() {
|
||||
let ctx = test_ctx().await;
|
||||
let resp = router(ctx)
|
||||
.oneshot(
|
||||
Request::get("/query-log?limit=10")
|
||||
.body(Body::empty())
|
||||
.unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(resp.status(), 200);
|
||||
let body = axum::body::to_bytes(resp.into_body(), 10000).await.unwrap();
|
||||
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
|
||||
assert!(json.as_array().unwrap().is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn overrides_crud() {
|
||||
let ctx = test_ctx().await;
|
||||
let a = router(ctx.clone());
|
||||
|
||||
// Create
|
||||
let resp = a
|
||||
.clone()
|
||||
.oneshot(
|
||||
Request::post("/overrides")
|
||||
.header("content-type", "application/json")
|
||||
.body(Body::from(
|
||||
r#"{"domain":"test.dev","target":"1.2.3.4","duration_secs":60}"#,
|
||||
))
|
||||
.unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(resp.status().is_success());
|
||||
|
||||
// List
|
||||
let resp = a
|
||||
.clone()
|
||||
.oneshot(Request::get("/overrides").body(Body::empty()).unwrap())
|
||||
.await
|
||||
.unwrap();
|
||||
let body = axum::body::to_bytes(resp.into_body(), 10000).await.unwrap();
|
||||
assert!(String::from_utf8_lossy(&body).contains("test.dev"));
|
||||
|
||||
// Get
|
||||
let resp = a
|
||||
.clone()
|
||||
.oneshot(
|
||||
Request::get("/overrides/test.dev")
|
||||
.body(Body::empty())
|
||||
.unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(resp.status(), 200);
|
||||
|
||||
// Delete
|
||||
let resp = a
|
||||
.clone()
|
||||
.oneshot(
|
||||
Request::delete("/overrides/test.dev")
|
||||
.body(Body::empty())
|
||||
.unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(resp.status().is_success());
|
||||
|
||||
// Verify deleted
|
||||
let resp = a
|
||||
.oneshot(
|
||||
Request::get("/overrides/test.dev")
|
||||
.body(Body::empty())
|
||||
.unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(resp.status(), 404);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn cache_list_and_flush() {
|
||||
let ctx = test_ctx().await;
|
||||
let a = router(ctx.clone());
|
||||
|
||||
// List (empty)
|
||||
let resp = a
|
||||
.clone()
|
||||
.oneshot(Request::get("/cache").body(Body::empty()).unwrap())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(resp.status(), 200);
|
||||
|
||||
// Flush
|
||||
let resp = a
|
||||
.oneshot(Request::delete("/cache").body(Body::empty()).unwrap())
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(resp.status().is_success());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blocking_stats_returns_json() {
|
||||
let ctx = test_ctx().await;
|
||||
let resp = router(ctx)
|
||||
.oneshot(Request::get("/blocking/stats").body(Body::empty()).unwrap())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(resp.status(), 200);
|
||||
let body = axum::body::to_bytes(resp.into_body(), 10000).await.unwrap();
|
||||
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
|
||||
assert!(json["enabled"].is_boolean());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn services_crud() {
|
||||
let ctx = test_ctx().await;
|
||||
let a = router(ctx);
|
||||
|
||||
// Add service
|
||||
let resp = a
|
||||
.clone()
|
||||
.oneshot(
|
||||
Request::post("/services")
|
||||
.header("content-type", "application/json")
|
||||
.body(Body::from(r#"{"name":"testapp","target_port":3000}"#))
|
||||
.unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(resp.status().is_success());
|
||||
|
||||
// List
|
||||
let resp = a
|
||||
.clone()
|
||||
.oneshot(Request::get("/services").body(Body::empty()).unwrap())
|
||||
.await
|
||||
.unwrap();
|
||||
let body = axum::body::to_bytes(resp.into_body(), 10000).await.unwrap();
|
||||
assert!(String::from_utf8_lossy(&body).contains("testapp"));
|
||||
|
||||
// Delete
|
||||
let resp = a
|
||||
.clone()
|
||||
.oneshot(
|
||||
Request::delete("/services/testapp")
|
||||
.body(Body::empty())
|
||||
.unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(resp.status().is_success());
|
||||
|
||||
// Verify deleted
|
||||
let resp = a
|
||||
.oneshot(Request::get("/services").body(Body::empty()).unwrap())
|
||||
.await
|
||||
.unwrap();
|
||||
let body = axum::body::to_bytes(resp.into_body(), 10000).await.unwrap();
|
||||
assert!(!String::from_utf8_lossy(&body).contains("testapp"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn dashboard_returns_html() {
|
||||
let ctx = test_ctx().await;
|
||||
let resp = router(ctx)
|
||||
.oneshot(Request::get("/").body(Body::empty()).unwrap())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(resp.status(), 200);
|
||||
let body = axum::body::to_bytes(resp.into_body(), 100000)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(String::from_utf8_lossy(&body).contains("Numa"));
|
||||
}
|
||||
}
|
||||
|
||||
199
src/blocklist.rs
199
src/blocklist.rs
@@ -81,66 +81,70 @@ impl BlocklistStore {
|
||||
if !self.enabled {
|
||||
return false;
|
||||
}
|
||||
|
||||
if let Some(until) = self.paused_until {
|
||||
if Instant::now() < until {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if self.allowlist.contains(domain) {
|
||||
let domain = Self::normalize(domain);
|
||||
if Self::find_in_set(&domain, &self.allowlist).is_some() {
|
||||
return false;
|
||||
}
|
||||
|
||||
if self.domains.contains(domain) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Walk up: ads.tracker.example.com → tracker.example.com → example.com
|
||||
let mut d = domain;
|
||||
while let Some(dot) = d.find('.') {
|
||||
d = &d[dot + 1..];
|
||||
if self.allowlist.contains(d) {
|
||||
return false;
|
||||
}
|
||||
if self.domains.contains(d) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
Self::find_in_set(&domain, &self.domains).is_some()
|
||||
}
|
||||
|
||||
/// Check if a domain is blocked and return the reason.
|
||||
pub fn check(&self, domain: &str) -> BlockCheckResult {
|
||||
let domain = domain.to_lowercase();
|
||||
|
||||
if !self.enabled {
|
||||
return BlockCheckResult::disabled();
|
||||
}
|
||||
|
||||
if self.allowlist.contains(&domain) {
|
||||
return BlockCheckResult::allowed(&domain, "exact match in allowlist");
|
||||
if let Some(until) = self.paused_until {
|
||||
if Instant::now() < until {
|
||||
return BlockCheckResult::disabled();
|
||||
}
|
||||
}
|
||||
|
||||
if self.domains.contains(&domain) {
|
||||
return BlockCheckResult::blocked(&domain, "exact match in blocklist");
|
||||
let domain = Self::normalize(domain);
|
||||
|
||||
if let Some(matched) = Self::find_in_set(&domain, &self.allowlist) {
|
||||
let reason = if matched == domain {
|
||||
"exact match in allowlist"
|
||||
} else {
|
||||
"parent domain in allowlist"
|
||||
};
|
||||
return BlockCheckResult::allowed(matched, reason);
|
||||
}
|
||||
|
||||
let mut d = domain.as_str();
|
||||
while let Some(dot) = d.find('.') {
|
||||
d = &d[dot + 1..];
|
||||
if self.allowlist.contains(d) {
|
||||
return BlockCheckResult::allowed(d, "parent domain in allowlist");
|
||||
}
|
||||
if self.domains.contains(d) {
|
||||
return BlockCheckResult::blocked(d, "parent domain in blocklist");
|
||||
}
|
||||
if let Some(matched) = Self::find_in_set(&domain, &self.domains) {
|
||||
let reason = if matched == domain {
|
||||
"exact match in blocklist"
|
||||
} else {
|
||||
"parent domain in blocklist"
|
||||
};
|
||||
return BlockCheckResult::blocked(matched, reason);
|
||||
}
|
||||
|
||||
BlockCheckResult::not_blocked()
|
||||
}
|
||||
|
||||
fn normalize(domain: &str) -> String {
|
||||
domain.to_lowercase().trim_end_matches('.').to_string()
|
||||
}
|
||||
|
||||
fn find_in_set<'a>(domain: &'a str, set: &HashSet<String>) -> Option<&'a str> {
|
||||
if set.contains(domain) {
|
||||
return Some(domain);
|
||||
}
|
||||
let mut d = domain;
|
||||
while let Some(dot) = d.find('.') {
|
||||
d = &d[dot + 1..];
|
||||
if set.contains(d) {
|
||||
return Some(d);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Atomically swap in a new domain set. Build the set outside the lock,
|
||||
/// then call this to swap — keeps lock hold time sub-microsecond.
|
||||
pub fn swap_domains(&mut self, domains: HashSet<String>, sources: Vec<String>) {
|
||||
@@ -172,17 +176,26 @@ impl BlocklistStore {
|
||||
}
|
||||
|
||||
pub fn add_to_allowlist(&mut self, domain: &str) {
|
||||
self.allowlist.insert(domain.to_lowercase());
|
||||
self.allowlist.insert(Self::normalize(domain));
|
||||
}
|
||||
|
||||
pub fn remove_from_allowlist(&mut self, domain: &str) -> bool {
|
||||
self.allowlist.remove(&domain.to_lowercase())
|
||||
self.allowlist.remove(&Self::normalize(domain))
|
||||
}
|
||||
|
||||
pub fn allowlist(&self) -> Vec<String> {
|
||||
self.allowlist.iter().cloned().collect()
|
||||
}
|
||||
|
||||
pub fn heap_bytes(&self) -> usize {
|
||||
let per_slot_overhead = std::mem::size_of::<u64>() + std::mem::size_of::<String>() + 1;
|
||||
let domains_table = self.domains.capacity() * per_slot_overhead;
|
||||
let domains_heap: usize = self.domains.iter().map(|d| d.capacity()).sum();
|
||||
let allow_table = self.allowlist.capacity() * per_slot_overhead;
|
||||
let allow_heap: usize = self.allowlist.iter().map(|d| d.capacity()).sum();
|
||||
domains_table + domains_heap + allow_table + allow_heap
|
||||
}
|
||||
|
||||
pub fn stats(&self) -> BlocklistStats {
|
||||
BlocklistStats {
|
||||
enabled: self.is_enabled(),
|
||||
@@ -234,6 +247,114 @@ pub fn parse_blocklist(text: &str) -> HashSet<String> {
|
||||
domains
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn store_with(domains: &[&str], allowlist: &[&str]) -> BlocklistStore {
|
||||
let mut store = BlocklistStore::new();
|
||||
store.swap_domains(domains.iter().map(|s| s.to_string()).collect(), vec![]);
|
||||
for d in allowlist {
|
||||
store.add_to_allowlist(d);
|
||||
}
|
||||
store
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn exact_block() {
|
||||
let store = store_with(&["ads.example.com"], &[]);
|
||||
assert!(store.is_blocked("ads.example.com"));
|
||||
assert!(!store.is_blocked("example.com"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parent_block_covers_subdomain() {
|
||||
let store = store_with(&["tracker.com"], &[]);
|
||||
assert!(store.is_blocked("tracker.com"));
|
||||
assert!(store.is_blocked("www.tracker.com"));
|
||||
assert!(store.is_blocked("deep.sub.tracker.com"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn exact_allowlist_unblocks() {
|
||||
let store = store_with(&["ads.example.com"], &["ads.example.com"]);
|
||||
assert!(!store.is_blocked("ads.example.com"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parent_allowlist_unblocks_subdomain() {
|
||||
let store = store_with(&["example.com", "www.example.com"], &["example.com"]);
|
||||
assert!(!store.is_blocked("example.com"));
|
||||
assert!(!store.is_blocked("www.example.com"));
|
||||
assert!(!store.is_blocked("sub.deep.example.com"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn allowlist_does_not_unblock_sibling() {
|
||||
let store = store_with(
|
||||
&["www.example.com", "ads.example.com"],
|
||||
&["www.example.com"],
|
||||
);
|
||||
assert!(!store.is_blocked("www.example.com"));
|
||||
assert!(store.is_blocked("ads.example.com"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn check_reports_parent_allowlist() {
|
||||
let store = store_with(
|
||||
&["goatcounter.com", "www.goatcounter.com"],
|
||||
&["goatcounter.com"],
|
||||
);
|
||||
let result = store.check("www.goatcounter.com");
|
||||
assert!(!result.blocked);
|
||||
assert_eq!(result.matched_rule.as_deref(), Some("goatcounter.com"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn disabled_never_blocks() {
|
||||
let mut store = store_with(&["ads.example.com"], &[]);
|
||||
store.set_enabled(false);
|
||||
assert!(!store.is_blocked("ads.example.com"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn trailing_dot_normalized() {
|
||||
let store = store_with(&["ads.example.com"], &["safe.example.com"]);
|
||||
assert!(store.is_blocked("ads.example.com."));
|
||||
assert!(!store.is_blocked("safe.example.com."));
|
||||
let result = store.check("ads.example.com.");
|
||||
assert!(result.blocked);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn case_insensitive() {
|
||||
let store = store_with(&["ads.example.com"], &["safe.example.com"]);
|
||||
assert!(store.is_blocked("ADS.Example.COM"));
|
||||
assert!(!store.is_blocked("Safe.Example.COM"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn domain_in_neither_list() {
|
||||
let store = store_with(&["ads.example.com"], &[]);
|
||||
let result = store.check("clean.example.org");
|
||||
assert!(!result.blocked);
|
||||
assert_eq!(result.reason, "not in blocklist");
|
||||
assert!(result.matched_rule.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn heap_bytes_grows_with_domains() {
|
||||
let mut store = BlocklistStore::new();
|
||||
let empty = store.heap_bytes();
|
||||
let domains: HashSet<String> = ["example.com", "example.org", "test.net"]
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
store.swap_domains(domains, vec![]);
|
||||
assert!(store.heap_bytes() > empty);
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn download_blocklists(lists: &[String]) -> Vec<(String, String)> {
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(std::time::Duration::from_secs(30))
|
||||
|
||||
258
src/buffer.rs
258
src/buffer.rs
@@ -21,6 +21,13 @@ impl BytePacketBuffer {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_bytes(data: &[u8]) -> Self {
|
||||
let mut buf = Self::new();
|
||||
let len = data.len().min(BUF_SIZE);
|
||||
buf.buf[..len].copy_from_slice(&data[..len]);
|
||||
buf
|
||||
}
|
||||
|
||||
pub fn pos(&self) -> usize {
|
||||
self.pos
|
||||
}
|
||||
@@ -77,6 +84,11 @@ impl BytePacketBuffer {
|
||||
|
||||
/// Read a qname, handling label compression (pointer jumps).
|
||||
/// Converts wire format like [3]www[6]google[3]com[0] into "www.google.com".
|
||||
///
|
||||
/// Label bytes are escaped per RFC 1035 §5.1:
|
||||
/// - literal `.` within a label → `\.`
|
||||
/// - literal `\` → `\\`
|
||||
/// - bytes outside `0x21..=0x7E` (excluding `.` and `\`) → `\DDD` (3-digit decimal)
|
||||
pub fn read_qname(&mut self, outstr: &mut String) -> Result<()> {
|
||||
let mut pos = self.pos();
|
||||
let mut jumped = false;
|
||||
@@ -114,7 +126,18 @@ impl BytePacketBuffer {
|
||||
|
||||
let str_buffer = self.get_range(pos, len as usize)?;
|
||||
for &b in str_buffer {
|
||||
outstr.push(b.to_ascii_lowercase() as char);
|
||||
let c = b.to_ascii_lowercase();
|
||||
match c {
|
||||
b'.' => outstr.push_str("\\."),
|
||||
b'\\' => outstr.push_str("\\\\"),
|
||||
0x21..=0x7E => outstr.push(c as char),
|
||||
_ => {
|
||||
outstr.push('\\');
|
||||
outstr.push((b'0' + c / 100) as char);
|
||||
outstr.push((b'0' + (c / 10) % 10) as char);
|
||||
outstr.push((b'0' + c % 10) as char);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delim = ".";
|
||||
@@ -156,16 +179,68 @@ impl BytePacketBuffer {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write a qname in wire format, parsing RFC 1035 §5.1 text escapes.
|
||||
/// See `read_qname` for the escape grammar.
|
||||
pub fn write_qname(&mut self, qname: &str) -> Result<()> {
|
||||
for label in qname.split('.') {
|
||||
let len = label.len();
|
||||
if len > 0x3f {
|
||||
return Err("Single label exceeds 63 characters of length".into());
|
||||
if qname.is_empty() || qname == "." {
|
||||
self.write_u8(0)?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let bytes = qname.as_bytes();
|
||||
let mut i = 0;
|
||||
while i < bytes.len() {
|
||||
let len_pos = self.pos;
|
||||
self.write_u8(0)?; // placeholder length byte, backpatched below
|
||||
let body_start = self.pos;
|
||||
|
||||
while i < bytes.len() && bytes[i] != b'.' {
|
||||
let b = bytes[i];
|
||||
if b == b'\\' {
|
||||
i += 1;
|
||||
let c1 = *bytes.get(i).ok_or("trailing backslash in qname")?;
|
||||
if c1.is_ascii_digit() {
|
||||
let c2 = *bytes
|
||||
.get(i + 1)
|
||||
.ok_or("invalid \\DDD escape: expected 3 digits")?;
|
||||
let c3 = *bytes
|
||||
.get(i + 2)
|
||||
.ok_or("invalid \\DDD escape: expected 3 digits")?;
|
||||
if !c2.is_ascii_digit() || !c3.is_ascii_digit() {
|
||||
return Err("invalid \\DDD escape: expected 3 digits".into());
|
||||
}
|
||||
let val =
|
||||
(c1 - b'0') as u16 * 100 + (c2 - b'0') as u16 * 10 + (c3 - b'0') as u16;
|
||||
if val > 255 {
|
||||
return Err(format!("\\DDD escape out of range: {}", val).into());
|
||||
}
|
||||
self.write_u8(val as u8)?;
|
||||
i += 3;
|
||||
} else {
|
||||
// \. \\ and any other \X → literal next byte
|
||||
self.write_u8(c1)?;
|
||||
i += 1;
|
||||
}
|
||||
} else {
|
||||
self.write_u8(b)?;
|
||||
i += 1;
|
||||
}
|
||||
|
||||
if self.pos - body_start > 0x3f {
|
||||
return Err("Single label exceeds 63 characters of length".into());
|
||||
}
|
||||
}
|
||||
|
||||
self.write_u8(len as u8)?;
|
||||
for b in label.as_bytes() {
|
||||
self.write_u8(*b)?;
|
||||
let label_len = self.pos - body_start;
|
||||
if label_len == 0 && i < bytes.len() {
|
||||
// Empty label from leading/consecutive dots — roll back the placeholder.
|
||||
self.pos = len_pos;
|
||||
} else {
|
||||
self.set(len_pos, label_len as u8)?;
|
||||
}
|
||||
|
||||
if i < bytes.len() && bytes[i] == b'.' {
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -173,6 +248,16 @@ impl BytePacketBuffer {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn write_bytes(&mut self, data: &[u8]) -> Result<()> {
|
||||
let end = self.pos + data.len();
|
||||
if end > BUF_SIZE {
|
||||
return Err("End of buffer".into());
|
||||
}
|
||||
self.buf[self.pos..end].copy_from_slice(data);
|
||||
self.pos = end;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn set(&mut self, pos: usize, val: u8) -> Result<()> {
|
||||
if pos >= BUF_SIZE {
|
||||
return Err("End of buffer".into());
|
||||
@@ -187,3 +272,160 @@ impl BytePacketBuffer {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn roundtrip(wire: &[u8]) -> String {
|
||||
let mut buf = BytePacketBuffer::from_bytes(wire);
|
||||
let mut out = String::new();
|
||||
buf.read_qname(&mut out).unwrap();
|
||||
out
|
||||
}
|
||||
|
||||
fn write_then_read(text: &str) -> String {
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
buf.write_qname(text).unwrap();
|
||||
let wire_end = buf.pos();
|
||||
buf.seek(0).unwrap();
|
||||
let mut out = String::new();
|
||||
buf.read_qname(&mut out).unwrap();
|
||||
assert_eq!(
|
||||
buf.pos(),
|
||||
wire_end,
|
||||
"reader should consume exactly what writer wrote"
|
||||
);
|
||||
out
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_plain_domain() {
|
||||
// [3]www[6]google[3]com[0]
|
||||
let wire = b"\x03www\x06google\x03com\x00";
|
||||
assert_eq!(roundtrip(wire), "www.google.com");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_label_with_literal_dot_is_escaped() {
|
||||
// fanf2's example: [8]exa.mple[3]com[0] — two labels, first contains 0x2E
|
||||
let wire = b"\x08exa.mple\x03com\x00";
|
||||
assert_eq!(roundtrip(wire), "exa\\.mple.com");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_label_with_backslash_is_escaped() {
|
||||
// [4]a\bc[3]com[0]
|
||||
let wire = b"\x04a\\bc\x03com\x00";
|
||||
assert_eq!(roundtrip(wire), "a\\\\bc.com");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_label_with_nonprintable_byte_uses_decimal_escape() {
|
||||
// [4]\x00foo[3]com[0] — null byte at label start
|
||||
let wire = b"\x04\x00foo\x03com\x00";
|
||||
assert_eq!(roundtrip(wire), "\\000foo.com");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn read_label_with_space_uses_decimal_escape() {
|
||||
// Space (0x20) is outside 0x21..=0x7E, so it must be decimal-escaped.
|
||||
let wire = b"\x05a b c\x00";
|
||||
assert_eq!(roundtrip(wire), "a\\032b\\032c");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_plain_domain() {
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
buf.write_qname("www.google.com").unwrap();
|
||||
assert_eq!(&buf.buf[..buf.pos], b"\x03www\x06google\x03com\x00");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_escaped_dot_does_not_split_label() {
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
buf.write_qname("exa\\.mple.com").unwrap();
|
||||
assert_eq!(&buf.buf[..buf.pos], b"\x08exa.mple\x03com\x00");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_escaped_backslash() {
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
buf.write_qname("a\\\\bc.com").unwrap();
|
||||
assert_eq!(&buf.buf[..buf.pos], b"\x04a\\bc\x03com\x00");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_decimal_escape_yields_raw_byte() {
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
buf.write_qname("\\000foo.com").unwrap();
|
||||
assert_eq!(&buf.buf[..buf.pos], b"\x04\x00foo\x03com\x00");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_skips_empty_labels() {
|
||||
// Leading dot — first (empty) label is rolled back.
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
buf.write_qname(".foo.com").unwrap();
|
||||
assert_eq!(&buf.buf[..buf.pos], b"\x03foo\x03com\x00");
|
||||
|
||||
// Consecutive dots — middle empty label is rolled back.
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
buf.write_qname("foo..com").unwrap();
|
||||
assert_eq!(&buf.buf[..buf.pos], b"\x03foo\x03com\x00");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_rejects_out_of_range_decimal_escape() {
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
assert!(buf.write_qname("\\999foo.com").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_rejects_trailing_backslash() {
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
assert!(buf.write_qname("foo\\").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_rejects_short_decimal_escape() {
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
assert!(buf.write_qname("\\1").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn write_rejects_label_over_63_bytes() {
|
||||
// 64 bytes exceeds the wire-format label cap.
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
assert!(buf.write_qname(&"a".repeat(64)).is_err());
|
||||
|
||||
// 63 bytes is the maximum permitted label length.
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
assert!(buf.write_qname(&"a".repeat(63)).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_preserves_dot_in_label() {
|
||||
assert_eq!(write_then_read("exa\\.mple.com"), "exa\\.mple.com");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_preserves_backslash_in_label() {
|
||||
assert_eq!(write_then_read("a\\\\b.com"), "a\\\\b.com");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn roundtrip_preserves_nonprintable_byte() {
|
||||
assert_eq!(write_then_read("\\000foo.com"), "\\000foo.com");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn root_name_empty_and_dot_both_produce_single_zero() {
|
||||
let mut a = BytePacketBuffer::new();
|
||||
a.write_qname("").unwrap();
|
||||
let mut b = BytePacketBuffer::new();
|
||||
b.write_qname(".").unwrap();
|
||||
assert_eq!(&a.buf[..a.pos], b"\x00");
|
||||
assert_eq!(&b.buf[..b.pos], b"\x00");
|
||||
}
|
||||
}
|
||||
|
||||
98
src/cache.rs
98
src/cache.rs
@@ -5,10 +5,31 @@ use crate::packet::DnsPacket;
|
||||
use crate::question::QueryType;
|
||||
use crate::record::DnsRecord;
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
|
||||
pub enum DnssecStatus {
|
||||
Secure,
|
||||
Insecure,
|
||||
Bogus,
|
||||
#[default]
|
||||
Indeterminate,
|
||||
}
|
||||
|
||||
impl DnssecStatus {
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
DnssecStatus::Secure => "secure",
|
||||
DnssecStatus::Insecure => "insecure",
|
||||
DnssecStatus::Bogus => "bogus",
|
||||
DnssecStatus::Indeterminate => "indeterminate",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct CacheEntry {
|
||||
packet: DnsPacket,
|
||||
inserted_at: Instant,
|
||||
ttl: Duration,
|
||||
dnssec_status: DnssecStatus,
|
||||
}
|
||||
|
||||
/// DNS cache using a two-level map (domain -> query_type -> entry) so that
|
||||
@@ -19,7 +40,6 @@ pub struct DnsCache {
|
||||
max_entries: usize,
|
||||
min_ttl: u32,
|
||||
max_ttl: u32,
|
||||
query_count: u64,
|
||||
}
|
||||
|
||||
impl DnsCache {
|
||||
@@ -30,29 +50,24 @@ impl DnsCache {
|
||||
max_entries,
|
||||
min_ttl,
|
||||
max_ttl,
|
||||
query_count: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn lookup(&mut self, domain: &str, qtype: QueryType) -> Option<DnsPacket> {
|
||||
self.query_count += 1;
|
||||
|
||||
if self.query_count.is_multiple_of(1000) {
|
||||
self.evict_expired();
|
||||
}
|
||||
/// Read-only lookup — expired entries are left in place (cleaned up on insert).
|
||||
pub fn lookup(&self, domain: &str, qtype: QueryType) -> Option<DnsPacket> {
|
||||
self.lookup_with_status(domain, qtype).map(|(pkt, _)| pkt)
|
||||
}
|
||||
|
||||
pub fn lookup_with_status(
|
||||
&self,
|
||||
domain: &str,
|
||||
qtype: QueryType,
|
||||
) -> Option<(DnsPacket, DnssecStatus)> {
|
||||
let type_map = self.entries.get(domain)?;
|
||||
let entry = type_map.get(&qtype)?;
|
||||
|
||||
let elapsed = entry.inserted_at.elapsed();
|
||||
if elapsed >= entry.ttl {
|
||||
// Expired: remove this entry
|
||||
let type_map = self.entries.get_mut(domain).unwrap();
|
||||
type_map.remove(&qtype);
|
||||
self.entry_count -= 1;
|
||||
if type_map.is_empty() {
|
||||
self.entries.remove(domain);
|
||||
}
|
||||
return None;
|
||||
}
|
||||
|
||||
@@ -64,10 +79,20 @@ impl DnsCache {
|
||||
adjust_ttls(&mut packet.authorities, remaining);
|
||||
adjust_ttls(&mut packet.resources, remaining);
|
||||
|
||||
Some(packet)
|
||||
Some((packet, entry.dnssec_status))
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, domain: &str, qtype: QueryType, packet: &DnsPacket) {
|
||||
self.insert_with_status(domain, qtype, packet, DnssecStatus::Indeterminate);
|
||||
}
|
||||
|
||||
pub fn insert_with_status(
|
||||
&mut self,
|
||||
domain: &str,
|
||||
qtype: QueryType,
|
||||
packet: &DnsPacket,
|
||||
dnssec_status: DnssecStatus,
|
||||
) {
|
||||
if self.entry_count >= self.max_entries {
|
||||
self.evict_expired();
|
||||
if self.entry_count >= self.max_entries {
|
||||
@@ -95,6 +120,7 @@ impl DnsCache {
|
||||
packet: packet.clone(),
|
||||
inserted_at: Instant::now(),
|
||||
ttl: Duration::from_secs(min_ttl as u64),
|
||||
dnssec_status,
|
||||
},
|
||||
);
|
||||
}
|
||||
@@ -116,6 +142,26 @@ impl DnsCache {
|
||||
self.entry_count = 0;
|
||||
}
|
||||
|
||||
pub fn heap_bytes(&self) -> usize {
|
||||
let outer_slot = std::mem::size_of::<u64>()
|
||||
+ std::mem::size_of::<String>()
|
||||
+ std::mem::size_of::<HashMap<QueryType, CacheEntry>>()
|
||||
+ 1;
|
||||
let mut total = self.entries.capacity() * outer_slot;
|
||||
for (domain, type_map) in &self.entries {
|
||||
total += domain.capacity();
|
||||
let inner_slot = std::mem::size_of::<u64>()
|
||||
+ std::mem::size_of::<QueryType>()
|
||||
+ std::mem::size_of::<CacheEntry>()
|
||||
+ 1;
|
||||
total += type_map.capacity() * inner_slot;
|
||||
for entry in type_map.values() {
|
||||
total += entry.packet.heap_bytes();
|
||||
}
|
||||
}
|
||||
total
|
||||
}
|
||||
|
||||
pub fn remove(&mut self, domain: &str) {
|
||||
let domain_lower = domain.to_lowercase();
|
||||
if let Some(type_map) = self.entries.remove(&domain_lower) {
|
||||
@@ -168,3 +214,23 @@ fn adjust_ttls(records: &mut [DnsRecord], new_ttl: u32) {
|
||||
record.set_ttl(new_ttl);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::packet::DnsPacket;
|
||||
|
||||
#[test]
|
||||
fn heap_bytes_grows_with_entries() {
|
||||
let mut cache = DnsCache::new(100, 1, 3600);
|
||||
let empty = cache.heap_bytes();
|
||||
let mut pkt = DnsPacket::new();
|
||||
pkt.answers.push(DnsRecord::A {
|
||||
domain: "example.com".into(),
|
||||
addr: "1.2.3.4".parse().unwrap(),
|
||||
ttl: 300,
|
||||
});
|
||||
cache.insert("example.com", QueryType::A, &pkt);
|
||||
assert!(cache.heap_bytes() > empty);
|
||||
}
|
||||
}
|
||||
|
||||
392
src/config.rs
392
src/config.rs
@@ -1,7 +1,7 @@
|
||||
use std::collections::HashMap;
|
||||
use std::net::Ipv4Addr;
|
||||
use std::net::Ipv6Addr;
|
||||
use std::path::Path;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use serde::Deserialize;
|
||||
|
||||
@@ -25,6 +25,14 @@ pub struct Config {
|
||||
pub proxy: ProxyConfig,
|
||||
#[serde(default)]
|
||||
pub services: Vec<ServiceConfig>,
|
||||
#[serde(default)]
|
||||
pub lan: LanConfig,
|
||||
#[serde(default)]
|
||||
pub dnssec: DnssecConfig,
|
||||
#[serde(default)]
|
||||
pub dot: DotConfig,
|
||||
#[serde(default)]
|
||||
pub mobile: MobileConfig,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -33,6 +41,12 @@ pub struct ServerConfig {
|
||||
pub bind_addr: String,
|
||||
#[serde(default = "default_api_port")]
|
||||
pub api_port: u16,
|
||||
#[serde(default = "default_api_bind_addr")]
|
||||
pub api_bind_addr: String,
|
||||
/// Where numa writes TLS material (CA, leaf certs, regenerated state).
|
||||
/// Defaults to `crate::data_dir()` (platform-specific system path) if unset.
|
||||
#[serde(default)]
|
||||
pub data_dir: Option<PathBuf>,
|
||||
}
|
||||
|
||||
impl Default for ServerConfig {
|
||||
@@ -40,38 +54,154 @@ impl Default for ServerConfig {
|
||||
ServerConfig {
|
||||
bind_addr: default_bind_addr(),
|
||||
api_port: default_api_port(),
|
||||
api_bind_addr: default_api_bind_addr(),
|
||||
data_dir: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn default_api_bind_addr() -> String {
|
||||
"127.0.0.1".to_string()
|
||||
}
|
||||
|
||||
fn default_bind_addr() -> String {
|
||||
"0.0.0.0:53".to_string()
|
||||
}
|
||||
|
||||
pub const DEFAULT_API_PORT: u16 = 5380;
|
||||
|
||||
fn default_api_port() -> u16 {
|
||||
5380
|
||||
DEFAULT_API_PORT
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Default, PartialEq, Eq, Clone, Copy)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum UpstreamMode {
|
||||
Auto,
|
||||
#[default]
|
||||
Forward,
|
||||
Recursive,
|
||||
}
|
||||
|
||||
impl UpstreamMode {
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
UpstreamMode::Auto => "auto",
|
||||
UpstreamMode::Forward => "forward",
|
||||
UpstreamMode::Recursive => "recursive",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct UpstreamConfig {
|
||||
#[serde(default)]
|
||||
pub mode: UpstreamMode,
|
||||
#[serde(default = "default_upstream_addr")]
|
||||
pub address: String,
|
||||
#[serde(default = "default_upstream_port")]
|
||||
pub port: u16,
|
||||
#[serde(default = "default_timeout_ms")]
|
||||
pub timeout_ms: u64,
|
||||
#[serde(default = "default_root_hints")]
|
||||
pub root_hints: Vec<String>,
|
||||
#[serde(default = "default_prime_tlds")]
|
||||
pub prime_tlds: Vec<String>,
|
||||
#[serde(default = "default_srtt")]
|
||||
pub srtt: bool,
|
||||
}
|
||||
|
||||
impl Default for UpstreamConfig {
|
||||
fn default() -> Self {
|
||||
UpstreamConfig {
|
||||
mode: UpstreamMode::default(),
|
||||
address: default_upstream_addr(),
|
||||
port: default_upstream_port(),
|
||||
timeout_ms: default_timeout_ms(),
|
||||
root_hints: default_root_hints(),
|
||||
prime_tlds: default_prime_tlds(),
|
||||
srtt: default_srtt(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn default_true() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn default_srtt() -> bool {
|
||||
default_true()
|
||||
}
|
||||
|
||||
fn default_prime_tlds() -> Vec<String> {
|
||||
vec![
|
||||
// gTLDs
|
||||
"com".into(),
|
||||
"net".into(),
|
||||
"org".into(),
|
||||
"info".into(),
|
||||
"io".into(),
|
||||
"dev".into(),
|
||||
"app".into(),
|
||||
"xyz".into(),
|
||||
"me".into(),
|
||||
// EU + European ccTLDs
|
||||
"eu".into(),
|
||||
"uk".into(),
|
||||
"de".into(),
|
||||
"fr".into(),
|
||||
"nl".into(),
|
||||
"it".into(),
|
||||
"es".into(),
|
||||
"pl".into(),
|
||||
"se".into(),
|
||||
"no".into(),
|
||||
"dk".into(),
|
||||
"fi".into(),
|
||||
"at".into(),
|
||||
"be".into(),
|
||||
"ie".into(),
|
||||
"pt".into(),
|
||||
"cz".into(),
|
||||
"ro".into(),
|
||||
"gr".into(),
|
||||
"hu".into(),
|
||||
"bg".into(),
|
||||
"hr".into(),
|
||||
"sk".into(),
|
||||
"si".into(),
|
||||
"lt".into(),
|
||||
"lv".into(),
|
||||
"ee".into(),
|
||||
"ch".into(),
|
||||
"is".into(),
|
||||
// Other major ccTLDs
|
||||
"co".into(),
|
||||
"br".into(),
|
||||
"au".into(),
|
||||
"ca".into(),
|
||||
"jp".into(),
|
||||
]
|
||||
}
|
||||
|
||||
fn default_root_hints() -> Vec<String> {
|
||||
vec![
|
||||
"198.41.0.4".into(), // a.root-servers.net
|
||||
"199.9.14.201".into(), // b.root-servers.net
|
||||
"192.33.4.12".into(), // c.root-servers.net
|
||||
"199.7.91.13".into(), // d.root-servers.net
|
||||
"192.203.230.10".into(), // e.root-servers.net
|
||||
"192.5.5.241".into(), // f.root-servers.net
|
||||
"192.112.36.4".into(), // g.root-servers.net
|
||||
"198.97.190.53".into(), // h.root-servers.net
|
||||
"192.36.148.17".into(), // i.root-servers.net
|
||||
"192.58.128.30".into(), // j.root-servers.net
|
||||
"193.0.14.129".into(), // k.root-servers.net
|
||||
"199.7.83.42".into(), // l.root-servers.net
|
||||
"202.12.27.33".into(), // m.root-servers.net
|
||||
]
|
||||
}
|
||||
|
||||
fn default_upstream_addr() -> String {
|
||||
String::new() // empty = auto-detect from system resolver
|
||||
}
|
||||
@@ -79,7 +209,7 @@ fn default_upstream_port() -> u16 {
|
||||
53
|
||||
}
|
||||
fn default_timeout_ms() -> u64 {
|
||||
3000
|
||||
5000
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
@@ -170,6 +300,8 @@ pub struct ProxyConfig {
|
||||
pub tls_port: u16,
|
||||
#[serde(default = "default_proxy_tld")]
|
||||
pub tld: String,
|
||||
#[serde(default = "default_proxy_bind_addr")]
|
||||
pub bind_addr: String,
|
||||
}
|
||||
|
||||
impl Default for ProxyConfig {
|
||||
@@ -179,10 +311,15 @@ impl Default for ProxyConfig {
|
||||
port: default_proxy_port(),
|
||||
tls_port: default_proxy_tls_port(),
|
||||
tld: default_proxy_tld(),
|
||||
bind_addr: default_proxy_bind_addr(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn default_proxy_bind_addr() -> String {
|
||||
"127.0.0.1".to_string()
|
||||
}
|
||||
|
||||
fn default_proxy_enabled() -> bool {
|
||||
true
|
||||
}
|
||||
@@ -200,15 +337,252 @@ fn default_proxy_tld() -> String {
|
||||
pub struct ServiceConfig {
|
||||
pub name: String,
|
||||
pub target_port: u16,
|
||||
#[serde(default)]
|
||||
pub routes: Vec<crate::service_store::RouteEntry>,
|
||||
}
|
||||
|
||||
pub fn load_config(path: &str) -> Result<Config> {
|
||||
if !Path::new(path).exists() {
|
||||
return Ok(Config::default());
|
||||
#[derive(Deserialize, Clone)]
|
||||
pub struct LanConfig {
|
||||
#[serde(default = "default_lan_enabled")]
|
||||
pub enabled: bool,
|
||||
#[serde(default = "default_lan_broadcast_interval")]
|
||||
pub broadcast_interval_secs: u64,
|
||||
#[serde(default = "default_lan_peer_timeout")]
|
||||
pub peer_timeout_secs: u64,
|
||||
}
|
||||
|
||||
impl Default for LanConfig {
|
||||
fn default() -> Self {
|
||||
LanConfig {
|
||||
enabled: default_lan_enabled(),
|
||||
broadcast_interval_secs: default_lan_broadcast_interval(),
|
||||
peer_timeout_secs: default_lan_peer_timeout(),
|
||||
}
|
||||
}
|
||||
let contents = std::fs::read_to_string(path)?;
|
||||
let config: Config = toml::from_str(&contents)?;
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
fn default_lan_enabled() -> bool {
|
||||
false
|
||||
}
|
||||
fn default_lan_broadcast_interval() -> u64 {
|
||||
30
|
||||
}
|
||||
fn default_lan_peer_timeout() -> u64 {
|
||||
90
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Clone, Default)]
|
||||
pub struct DnssecConfig {
|
||||
#[serde(default)]
|
||||
pub enabled: bool,
|
||||
#[serde(default)]
|
||||
pub strict: bool,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Clone)]
|
||||
pub struct DotConfig {
|
||||
#[serde(default)]
|
||||
pub enabled: bool,
|
||||
#[serde(default = "default_dot_port")]
|
||||
pub port: u16,
|
||||
#[serde(default = "default_dot_bind_addr")]
|
||||
pub bind_addr: String,
|
||||
/// Path to TLS certificate (PEM). If None, uses self-signed CA.
|
||||
#[serde(default)]
|
||||
pub cert_path: Option<PathBuf>,
|
||||
/// Path to TLS private key (PEM). If None, uses self-signed CA.
|
||||
#[serde(default)]
|
||||
pub key_path: Option<PathBuf>,
|
||||
}
|
||||
|
||||
impl Default for DotConfig {
|
||||
fn default() -> Self {
|
||||
DotConfig {
|
||||
enabled: false,
|
||||
port: default_dot_port(),
|
||||
bind_addr: default_dot_bind_addr(),
|
||||
cert_path: None,
|
||||
key_path: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn default_dot_port() -> u16 {
|
||||
853
|
||||
}
|
||||
fn default_dot_bind_addr() -> String {
|
||||
"0.0.0.0".to_string()
|
||||
}
|
||||
|
||||
/// Configuration for the mobile API — a persistent HTTP listener that
|
||||
/// serves a read-only subset of routes (`/health`, `/ca.pem`,
|
||||
/// `/mobileconfig`, `/ca.mobileconfig`) on a LAN-reachable port, for
|
||||
/// consumption by the iOS/Android companion apps.
|
||||
///
|
||||
/// Unlike the main API (port 5380, localhost-only by default, supports
|
||||
/// state-mutating routes), the mobile API is safe to expose on the LAN
|
||||
/// because every route is idempotent and read-only.
|
||||
#[derive(Deserialize, Clone)]
|
||||
pub struct MobileConfig {
|
||||
/// If true, spawn the mobile API listener at startup. **Default false.**
|
||||
/// Opt-in because the listener binds to the LAN by default and exposes
|
||||
/// a few read-only endpoints to any device on the same network (`/health`,
|
||||
/// `/ca.pem`, `/mobileconfig`, `/ca.mobileconfig`). None of those are
|
||||
/// cryptographically sensitive (the CA private key is never served),
|
||||
/// but users should enable this explicitly rather than have a new
|
||||
/// LAN-reachable port appear after an upgrade.
|
||||
#[serde(default)]
|
||||
pub enabled: bool,
|
||||
/// Port for the mobile API. Default 8765.
|
||||
#[serde(default = "default_mobile_port")]
|
||||
pub port: u16,
|
||||
/// Bind address for the mobile API. Default "0.0.0.0" (all interfaces)
|
||||
/// so phones on the LAN can reach it. Set to "127.0.0.1" to restrict
|
||||
/// to localhost — useful if you're running behind another front-end.
|
||||
#[serde(default = "default_mobile_bind_addr")]
|
||||
pub bind_addr: String,
|
||||
}
|
||||
|
||||
impl Default for MobileConfig {
|
||||
fn default() -> Self {
|
||||
MobileConfig {
|
||||
enabled: false,
|
||||
port: default_mobile_port(),
|
||||
bind_addr: default_mobile_bind_addr(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn default_mobile_port() -> u16 {
|
||||
8765
|
||||
}
|
||||
|
||||
fn default_mobile_bind_addr() -> String {
|
||||
"0.0.0.0".to_string()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn lan_disabled_by_default() {
|
||||
assert!(!LanConfig::default().enabled);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn api_binds_localhost_by_default() {
|
||||
assert_eq!(ServerConfig::default().api_bind_addr, "127.0.0.1");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn proxy_binds_localhost_by_default() {
|
||||
assert_eq!(ProxyConfig::default().bind_addr, "127.0.0.1");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_toml_gives_defaults() {
|
||||
let config: Config = toml::from_str("").unwrap();
|
||||
assert!(!config.lan.enabled);
|
||||
assert_eq!(config.server.api_bind_addr, "127.0.0.1");
|
||||
assert_eq!(config.proxy.bind_addr, "127.0.0.1");
|
||||
assert_eq!(config.server.api_port, ServerConfig::default().api_port);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lan_enabled_parses() {
|
||||
let config: Config = toml::from_str("[lan]\nenabled = true").unwrap();
|
||||
assert!(config.lan.enabled);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn custom_bind_addrs_parse() {
|
||||
let toml = r#"
|
||||
[server]
|
||||
api_bind_addr = "0.0.0.0"
|
||||
[proxy]
|
||||
bind_addr = "0.0.0.0"
|
||||
"#;
|
||||
let config: Config = toml::from_str(toml).unwrap();
|
||||
assert_eq!(config.server.api_bind_addr, "0.0.0.0");
|
||||
assert_eq!(config.proxy.bind_addr, "0.0.0.0");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn service_routes_parse_from_toml() {
|
||||
let toml = r#"
|
||||
[[services]]
|
||||
name = "app"
|
||||
target_port = 3000
|
||||
routes = [
|
||||
{ path = "/api", port = 4000, strip = true },
|
||||
{ path = "/static", port = 5000 },
|
||||
]
|
||||
"#;
|
||||
let config: Config = toml::from_str(toml).unwrap();
|
||||
assert_eq!(config.services.len(), 1);
|
||||
assert_eq!(config.services[0].routes.len(), 2);
|
||||
assert!(config.services[0].routes[0].strip);
|
||||
assert!(!config.services[0].routes[1].strip); // default false
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ConfigLoad {
|
||||
pub config: Config,
|
||||
pub path: String,
|
||||
pub found: bool,
|
||||
}
|
||||
|
||||
fn resolve_path(path: &str) -> String {
|
||||
// canonicalize gives the real absolute path for existing files;
|
||||
// for non-existent files, build an absolute path manually
|
||||
std::fs::canonicalize(path)
|
||||
.or_else(|_| std::env::current_dir().map(|cwd| cwd.join(path)))
|
||||
.unwrap_or_else(|_| Path::new(path).to_path_buf())
|
||||
.to_string_lossy()
|
||||
.to_string()
|
||||
}
|
||||
|
||||
pub fn load_config(path: &str) -> Result<ConfigLoad> {
|
||||
// Try the given path first, then well-known locations (for service mode where cwd is /)
|
||||
let candidates: Vec<std::path::PathBuf> = {
|
||||
let p = Path::new(path);
|
||||
let mut v = vec![p.to_path_buf()];
|
||||
if p.is_relative() {
|
||||
let filename = p.file_name().unwrap_or(p.as_os_str());
|
||||
v.push(crate::config_dir().join(filename));
|
||||
v.push(crate::data_dir().join(filename));
|
||||
}
|
||||
v
|
||||
};
|
||||
|
||||
for candidate in &candidates {
|
||||
match std::fs::read_to_string(candidate) {
|
||||
Ok(contents) => {
|
||||
let resolved = resolve_path(&candidate.to_string_lossy());
|
||||
let config: Config = toml::from_str(&contents)?;
|
||||
return Ok(ConfigLoad {
|
||||
config,
|
||||
path: resolved,
|
||||
found: true,
|
||||
});
|
||||
}
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => continue,
|
||||
Err(e) => return Err(e.into()),
|
||||
}
|
||||
}
|
||||
|
||||
// Show config_dir candidate as the "expected" path — it's actionable
|
||||
let display_path = candidates
|
||||
.get(1)
|
||||
.map(|p| p.to_string_lossy().to_string())
|
||||
.unwrap_or_else(|| resolve_path(path));
|
||||
log::info!("config not found, using defaults (create {})", display_path);
|
||||
Ok(ConfigLoad {
|
||||
config: Config::default(),
|
||||
path: display_path,
|
||||
found: false,
|
||||
})
|
||||
}
|
||||
|
||||
pub type ZoneMap = HashMap<String, HashMap<QueryType, Vec<DnsRecord>>>;
|
||||
|
||||
890
src/ctx.rs
890
src/ctx.rs
@@ -1,119 +1,232 @@
|
||||
use std::collections::HashMap;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Mutex;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::{Mutex, RwLock};
|
||||
use std::time::{Duration, Instant, SystemTime};
|
||||
|
||||
use arc_swap::ArcSwap;
|
||||
use log::{debug, error, info, warn};
|
||||
use rustls::ServerConfig;
|
||||
use tokio::net::UdpSocket;
|
||||
use tokio::sync::broadcast;
|
||||
|
||||
type InflightMap = HashMap<(String, QueryType), broadcast::Sender<Option<DnsPacket>>>;
|
||||
|
||||
use crate::blocklist::BlocklistStore;
|
||||
use crate::buffer::BytePacketBuffer;
|
||||
use crate::cache::DnsCache;
|
||||
use crate::config::ZoneMap;
|
||||
use crate::forward::forward_query;
|
||||
use crate::cache::{DnsCache, DnssecStatus};
|
||||
use crate::config::{UpstreamMode, ZoneMap};
|
||||
use crate::forward::{forward_query, Upstream};
|
||||
use crate::header::ResultCode;
|
||||
use crate::health::HealthMeta;
|
||||
use crate::lan::PeerStore;
|
||||
use crate::override_store::OverrideStore;
|
||||
use crate::packet::DnsPacket;
|
||||
use crate::query_log::{QueryLog, QueryLogEntry};
|
||||
use crate::question::QueryType;
|
||||
use crate::record::DnsRecord;
|
||||
use crate::service_store::ServiceStore;
|
||||
use crate::srtt::SrttCache;
|
||||
use crate::stats::{QueryPath, ServerStats};
|
||||
use crate::system_dns::ForwardingRule;
|
||||
|
||||
pub struct ServerCtx {
|
||||
pub socket: UdpSocket,
|
||||
pub zone_map: ZoneMap,
|
||||
pub cache: Mutex<DnsCache>,
|
||||
/// std::sync::RwLock (not tokio) — locks must never be held across .await points.
|
||||
pub cache: RwLock<DnsCache>,
|
||||
pub stats: Mutex<ServerStats>,
|
||||
pub overrides: Mutex<OverrideStore>,
|
||||
pub blocklist: Mutex<BlocklistStore>,
|
||||
pub overrides: RwLock<OverrideStore>,
|
||||
pub blocklist: RwLock<BlocklistStore>,
|
||||
pub query_log: Mutex<QueryLog>,
|
||||
pub services: Mutex<ServiceStore>,
|
||||
pub lan_peers: Mutex<PeerStore>,
|
||||
pub forwarding_rules: Vec<ForwardingRule>,
|
||||
pub upstream: SocketAddr,
|
||||
pub upstream: Mutex<Upstream>,
|
||||
pub upstream_auto: bool,
|
||||
pub upstream_port: u16,
|
||||
pub lan_ip: Mutex<std::net::Ipv4Addr>,
|
||||
pub timeout: Duration,
|
||||
pub proxy_tld: String,
|
||||
pub proxy_tld_suffix: String, // pre-computed ".{tld}" to avoid per-query allocation
|
||||
pub lan_enabled: bool,
|
||||
pub config_path: String,
|
||||
pub config_found: bool,
|
||||
pub config_dir: PathBuf,
|
||||
pub data_dir: PathBuf,
|
||||
pub tls_config: Option<ArcSwap<ServerConfig>>,
|
||||
pub upstream_mode: UpstreamMode,
|
||||
pub root_hints: Vec<SocketAddr>,
|
||||
pub srtt: RwLock<SrttCache>,
|
||||
pub inflight: Mutex<InflightMap>,
|
||||
pub dnssec_enabled: bool,
|
||||
pub dnssec_strict: bool,
|
||||
/// Cached health metadata (version, hostname, DoT config, CA
|
||||
/// fingerprint, features). Shared between the main and mobile
|
||||
/// API `/health` handlers. Built once at startup in `main.rs`.
|
||||
pub health_meta: HealthMeta,
|
||||
/// CA certificate in PEM form, cached at startup. `None` if no
|
||||
/// TLS-using feature is enabled and the CA hasn't been generated.
|
||||
/// Used by `/ca.pem`, `/mobileconfig`, and `/ca.mobileconfig`
|
||||
/// handlers to avoid per-request disk I/O on the hot path.
|
||||
pub ca_pem: Option<String>,
|
||||
}
|
||||
|
||||
pub async fn handle_query(
|
||||
mut buffer: BytePacketBuffer,
|
||||
/// Transport-agnostic DNS resolution. Runs the full pipeline (overrides, blocklist,
|
||||
/// cache, upstream, DNSSEC) and returns the serialized response in a buffer.
|
||||
/// Callers use `.filled()` to get the response bytes without heap allocation.
|
||||
/// Callers are responsible for parsing the incoming buffer into a `DnsPacket`
|
||||
/// (and logging parse errors) before calling this function.
|
||||
pub async fn resolve_query(
|
||||
query: DnsPacket,
|
||||
src_addr: SocketAddr,
|
||||
ctx: &ServerCtx,
|
||||
) -> crate::Result<()> {
|
||||
) -> crate::Result<BytePacketBuffer> {
|
||||
let start = Instant::now();
|
||||
|
||||
let query = match DnsPacket::from_buffer(&mut buffer) {
|
||||
Ok(packet) => packet,
|
||||
Err(e) => {
|
||||
warn!("{} | PARSE ERROR | {}", src_addr, e);
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let (qname, qtype) = match query.questions.first() {
|
||||
Some(q) => (q.name.clone(), q.qtype),
|
||||
None => return Ok(()),
|
||||
None => return Err("empty question section".into()),
|
||||
};
|
||||
|
||||
// Pipeline: overrides -> .tld interception -> blocklist -> local zones -> cache -> upstream
|
||||
// Each lock is scoped to avoid holding MutexGuard across await points.
|
||||
let (response, path) = {
|
||||
let override_record = ctx.overrides.lock().unwrap().lookup(&qname);
|
||||
let (response, path, dnssec) = {
|
||||
let override_record = ctx.overrides.read().unwrap().lookup(&qname);
|
||||
if let Some(record) = override_record {
|
||||
let mut resp = DnsPacket::response_from(&query, ResultCode::NOERROR);
|
||||
resp.answers.push(record);
|
||||
(resp, QueryPath::Overridden)
|
||||
(resp, QueryPath::Overridden, DnssecStatus::Indeterminate)
|
||||
} else if qname == "localhost" || qname.ends_with(".localhost") {
|
||||
// RFC 6761: .localhost always resolves to loopback
|
||||
let mut resp = DnsPacket::response_from(&query, ResultCode::NOERROR);
|
||||
resp.answers.push(sinkhole_record(
|
||||
&qname,
|
||||
qtype,
|
||||
std::net::Ipv4Addr::LOCALHOST,
|
||||
std::net::Ipv6Addr::LOCALHOST,
|
||||
300,
|
||||
));
|
||||
(resp, QueryPath::Local, DnssecStatus::Indeterminate)
|
||||
} else if is_special_use_domain(&qname) {
|
||||
// RFC 6761/8880: private PTR, DDR, NAT64 — answer locally
|
||||
let resp = special_use_response(&query, &qname, qtype);
|
||||
(resp, QueryPath::Local, DnssecStatus::Indeterminate)
|
||||
} else if !ctx.proxy_tld_suffix.is_empty()
|
||||
&& (qname.ends_with(&ctx.proxy_tld_suffix) || qname == ctx.proxy_tld)
|
||||
{
|
||||
// Resolve .numa: remote clients get LAN IP (can't reach 127.0.0.1), local get loopback
|
||||
let service_name = qname.strip_suffix(&ctx.proxy_tld_suffix).unwrap_or(&qname);
|
||||
let is_remote = !src_addr.ip().is_loopback();
|
||||
let resolve_ip = {
|
||||
let local = ctx.services.lock().unwrap();
|
||||
if local.lookup(service_name).is_some() {
|
||||
if is_remote {
|
||||
*ctx.lan_ip.lock().unwrap()
|
||||
} else {
|
||||
std::net::Ipv4Addr::LOCALHOST
|
||||
}
|
||||
} else {
|
||||
let mut peers = ctx.lan_peers.lock().unwrap();
|
||||
peers
|
||||
.lookup(service_name)
|
||||
.and_then(|(ip, _)| match ip {
|
||||
std::net::IpAddr::V4(v4) => Some(v4),
|
||||
_ => None,
|
||||
})
|
||||
.unwrap_or(std::net::Ipv4Addr::LOCALHOST)
|
||||
}
|
||||
};
|
||||
let v6 = if resolve_ip == std::net::Ipv4Addr::LOCALHOST {
|
||||
std::net::Ipv6Addr::LOCALHOST
|
||||
} else {
|
||||
resolve_ip.to_ipv6_mapped()
|
||||
};
|
||||
let mut resp = DnsPacket::response_from(&query, ResultCode::NOERROR);
|
||||
match qtype {
|
||||
QueryType::AAAA => resp.answers.push(DnsRecord::AAAA {
|
||||
domain: qname.clone(),
|
||||
addr: std::net::Ipv6Addr::LOCALHOST,
|
||||
ttl: 300,
|
||||
}),
|
||||
_ => resp.answers.push(DnsRecord::A {
|
||||
domain: qname.clone(),
|
||||
addr: std::net::Ipv4Addr::LOCALHOST,
|
||||
ttl: 300,
|
||||
}),
|
||||
}
|
||||
(resp, QueryPath::Local)
|
||||
} else if ctx.blocklist.lock().unwrap().is_blocked(&qname) {
|
||||
resp.answers
|
||||
.push(sinkhole_record(&qname, qtype, resolve_ip, v6, 300));
|
||||
(resp, QueryPath::Local, DnssecStatus::Indeterminate)
|
||||
} else if ctx.blocklist.read().unwrap().is_blocked(&qname) {
|
||||
let mut resp = DnsPacket::response_from(&query, ResultCode::NOERROR);
|
||||
match qtype {
|
||||
QueryType::AAAA => resp.answers.push(DnsRecord::AAAA {
|
||||
domain: qname.clone(),
|
||||
addr: std::net::Ipv6Addr::UNSPECIFIED,
|
||||
ttl: 60,
|
||||
}),
|
||||
_ => resp.answers.push(DnsRecord::A {
|
||||
domain: qname.clone(),
|
||||
addr: std::net::Ipv4Addr::UNSPECIFIED,
|
||||
ttl: 60,
|
||||
}),
|
||||
}
|
||||
(resp, QueryPath::Blocked)
|
||||
resp.answers.push(sinkhole_record(
|
||||
&qname,
|
||||
qtype,
|
||||
std::net::Ipv4Addr::UNSPECIFIED,
|
||||
std::net::Ipv6Addr::UNSPECIFIED,
|
||||
60,
|
||||
));
|
||||
(resp, QueryPath::Blocked, DnssecStatus::Indeterminate)
|
||||
} else if let Some(records) = ctx.zone_map.get(qname.as_str()).and_then(|m| m.get(&qtype)) {
|
||||
let mut resp = DnsPacket::response_from(&query, ResultCode::NOERROR);
|
||||
resp.answers = records.clone();
|
||||
(resp, QueryPath::Local)
|
||||
(resp, QueryPath::Local, DnssecStatus::Indeterminate)
|
||||
} else {
|
||||
let cached = ctx.cache.lock().unwrap().lookup(&qname, qtype);
|
||||
if let Some(cached) = cached {
|
||||
let cached = ctx.cache.read().unwrap().lookup_with_status(&qname, qtype);
|
||||
if let Some((cached, cached_dnssec)) = cached {
|
||||
let mut resp = cached;
|
||||
resp.header.id = query.header.id;
|
||||
(resp, QueryPath::Cached)
|
||||
if cached_dnssec == DnssecStatus::Secure {
|
||||
resp.header.authed_data = true;
|
||||
}
|
||||
(resp, QueryPath::Cached, cached_dnssec)
|
||||
} else if let Some(fwd_addr) =
|
||||
crate::system_dns::match_forwarding_rule(&qname, &ctx.forwarding_rules)
|
||||
{
|
||||
// Conditional forwarding takes priority over recursive mode
|
||||
// (e.g. Tailscale .ts.net, VPC private zones)
|
||||
let upstream = Upstream::Udp(fwd_addr);
|
||||
match forward_query(&query, &upstream, ctx.timeout).await {
|
||||
Ok(resp) => {
|
||||
ctx.cache.write().unwrap().insert(&qname, qtype, &resp);
|
||||
(resp, QueryPath::Forwarded, DnssecStatus::Indeterminate)
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
"{} | {:?} {} | FORWARD ERROR | {}",
|
||||
src_addr, qtype, qname, e
|
||||
);
|
||||
(
|
||||
DnsPacket::response_from(&query, ResultCode::SERVFAIL),
|
||||
QueryPath::UpstreamError,
|
||||
DnssecStatus::Indeterminate,
|
||||
)
|
||||
}
|
||||
}
|
||||
} else if ctx.upstream_mode == UpstreamMode::Recursive {
|
||||
let key = (qname.clone(), qtype);
|
||||
let (resp, path, err) = resolve_coalesced(&ctx.inflight, key, &query, || {
|
||||
crate::recursive::resolve_recursive(
|
||||
&qname,
|
||||
qtype,
|
||||
&ctx.cache,
|
||||
&query,
|
||||
&ctx.root_hints,
|
||||
&ctx.srtt,
|
||||
)
|
||||
})
|
||||
.await;
|
||||
if path == QueryPath::Coalesced {
|
||||
debug!("{} | {:?} {} | COALESCED", src_addr, qtype, qname);
|
||||
} else if path == QueryPath::UpstreamError {
|
||||
error!(
|
||||
"{} | {:?} {} | RECURSIVE ERROR | {}",
|
||||
src_addr,
|
||||
qtype,
|
||||
qname,
|
||||
err.as_deref().unwrap_or("leader failed")
|
||||
);
|
||||
}
|
||||
(resp, path, DnssecStatus::Indeterminate)
|
||||
} else {
|
||||
let upstream =
|
||||
crate::system_dns::match_forwarding_rule(&qname, &ctx.forwarding_rules)
|
||||
.unwrap_or(ctx.upstream);
|
||||
match forward_query(&query, upstream, ctx.timeout).await {
|
||||
match crate::system_dns::match_forwarding_rule(&qname, &ctx.forwarding_rules) {
|
||||
Some(addr) => Upstream::Udp(addr),
|
||||
None => ctx.upstream.lock().unwrap().clone(),
|
||||
};
|
||||
match forward_query(&query, &upstream, ctx.timeout).await {
|
||||
Ok(resp) => {
|
||||
ctx.cache.lock().unwrap().insert(&qname, qtype, &resp);
|
||||
(resp, QueryPath::Forwarded)
|
||||
ctx.cache.write().unwrap().insert(&qname, qtype, &resp);
|
||||
(resp, QueryPath::Forwarded, DnssecStatus::Indeterminate)
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
@@ -123,6 +236,7 @@ pub async fn handle_query(
|
||||
(
|
||||
DnsPacket::response_from(&query, ResultCode::SERVFAIL),
|
||||
QueryPath::UpstreamError,
|
||||
DnssecStatus::Indeterminate,
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -130,6 +244,56 @@ pub async fn handle_query(
|
||||
}
|
||||
};
|
||||
|
||||
let client_do = query.edns.as_ref().is_some_and(|e| e.do_bit);
|
||||
let mut response = response;
|
||||
|
||||
// DNSSEC validation (recursive/forwarded responses only)
|
||||
let mut dnssec = dnssec;
|
||||
if ctx.dnssec_enabled && path == QueryPath::Recursive {
|
||||
let (status, vstats) =
|
||||
crate::dnssec::validate_response(&response, &ctx.cache, &ctx.root_hints, &ctx.srtt)
|
||||
.await;
|
||||
|
||||
debug!(
|
||||
"DNSSEC | {} | {:?} | {}ms | dnskey_hit={} dnskey_fetch={} ds_hit={} ds_fetch={}",
|
||||
qname,
|
||||
status,
|
||||
vstats.elapsed_ms,
|
||||
vstats.dnskey_cache_hits,
|
||||
vstats.dnskey_fetches,
|
||||
vstats.ds_cache_hits,
|
||||
vstats.ds_fetches,
|
||||
);
|
||||
|
||||
dnssec = status;
|
||||
|
||||
if status == DnssecStatus::Secure {
|
||||
response.header.authed_data = true;
|
||||
}
|
||||
|
||||
if status == DnssecStatus::Bogus && ctx.dnssec_strict {
|
||||
response = DnsPacket::response_from(&query, ResultCode::SERVFAIL);
|
||||
}
|
||||
|
||||
ctx.cache
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert_with_status(&qname, qtype, &response, status);
|
||||
}
|
||||
|
||||
// Strip DNSSEC records if client didn't set DO bit
|
||||
if !client_do {
|
||||
strip_dnssec_records(&mut response);
|
||||
}
|
||||
|
||||
// Echo EDNS back if client sent it
|
||||
if query.edns.is_some() {
|
||||
response.edns = Some(crate::packet::EdnsOpt {
|
||||
do_bit: client_do,
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
info!(
|
||||
@@ -149,17 +313,17 @@ pub async fn handle_query(
|
||||
response.resources.len(),
|
||||
);
|
||||
|
||||
// Serialize response
|
||||
// TODO: TC bit is UDP-specific; DoT connections could carry up to 65535 bytes.
|
||||
// Once BytePacketBuffer supports larger buffers, skip truncation for TCP/TLS.
|
||||
let mut resp_buffer = BytePacketBuffer::new();
|
||||
if response.write(&mut resp_buffer).is_err() {
|
||||
// Response too large for UDP — set TC bit and send header + question only
|
||||
// Response too large — set TC bit and send header + question only
|
||||
debug!("response too large, setting TC bit for {}", qname);
|
||||
let mut tc_response = DnsPacket::response_from(&query, response.header.rescode);
|
||||
tc_response.header.truncated_message = true;
|
||||
let mut tc_buffer = BytePacketBuffer::new();
|
||||
tc_response.write(&mut tc_buffer)?;
|
||||
ctx.socket.send_to(tc_buffer.filled(), src_addr).await?;
|
||||
} else {
|
||||
ctx.socket.send_to(resp_buffer.filled(), src_addr).await?;
|
||||
resp_buffer = BytePacketBuffer::new();
|
||||
tc_response.write(&mut resp_buffer)?;
|
||||
}
|
||||
|
||||
// Record stats and query log
|
||||
@@ -179,7 +343,603 @@ pub async fn handle_query(
|
||||
path,
|
||||
rescode: response.header.rescode,
|
||||
latency_us: elapsed.as_micros() as u64,
|
||||
dnssec,
|
||||
});
|
||||
|
||||
Ok(resp_buffer)
|
||||
}
|
||||
|
||||
/// Handle a DNS query received over UDP. Thin wrapper around resolve_query.
|
||||
pub async fn handle_query(
|
||||
mut buffer: BytePacketBuffer,
|
||||
src_addr: SocketAddr,
|
||||
ctx: &ServerCtx,
|
||||
) -> crate::Result<()> {
|
||||
let query = match DnsPacket::from_buffer(&mut buffer) {
|
||||
Ok(packet) => packet,
|
||||
Err(e) => {
|
||||
warn!("{} | PARSE ERROR | {}", src_addr, e);
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
match resolve_query(query, src_addr, ctx).await {
|
||||
Ok(resp_buffer) => {
|
||||
ctx.socket.send_to(resp_buffer.filled(), src_addr).await?;
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("{} | RESOLVE ERROR | {}", src_addr, e);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn is_dnssec_record(r: &DnsRecord) -> bool {
|
||||
matches!(
|
||||
r.query_type(),
|
||||
QueryType::RRSIG | QueryType::DNSKEY | QueryType::DS | QueryType::NSEC | QueryType::NSEC3
|
||||
)
|
||||
}
|
||||
|
||||
fn strip_dnssec_records(pkt: &mut DnsPacket) {
|
||||
pkt.answers.retain(|r| !is_dnssec_record(r));
|
||||
pkt.authorities.retain(|r| !is_dnssec_record(r));
|
||||
pkt.resources.retain(|r| !is_dnssec_record(r));
|
||||
}
|
||||
|
||||
fn is_special_use_domain(qname: &str) -> bool {
|
||||
if qname.ends_with(".in-addr.arpa") {
|
||||
// RFC 6303: private + loopback + link-local reverse DNS
|
||||
if qname.ends_with(".10.in-addr.arpa")
|
||||
|| qname.ends_with(".168.192.in-addr.arpa")
|
||||
|| qname.ends_with(".127.in-addr.arpa")
|
||||
|| qname.ends_with(".254.169.in-addr.arpa")
|
||||
|| qname.ends_with(".0.in-addr.arpa")
|
||||
|| qname.contains("_dns-sd._udp")
|
||||
{
|
||||
return true;
|
||||
}
|
||||
// 172.16-31.x.x (RFC 1918) — extract second octet from reverse name
|
||||
if qname.ends_with(".172.in-addr.arpa") {
|
||||
if let Some(octet_str) = qname
|
||||
.strip_suffix(".172.in-addr.arpa")
|
||||
.and_then(|s| s.rsplit('.').next())
|
||||
{
|
||||
if let Ok(octet) = octet_str.parse::<u8>() {
|
||||
return (16..=31).contains(&octet);
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
// DDR (RFC 9462)
|
||||
if qname == "_dns.resolver.arpa" || qname.ends_with("._dns.resolver.arpa") {
|
||||
return true;
|
||||
}
|
||||
// NAT64 (RFC 8880)
|
||||
if qname == "ipv4only.arpa" {
|
||||
return true;
|
||||
}
|
||||
// RFC 6762: .local is reserved for mDNS — never forward to upstream
|
||||
qname == "local" || qname.ends_with(".local")
|
||||
}
|
||||
|
||||
fn sinkhole_record(
|
||||
domain: &str,
|
||||
qtype: QueryType,
|
||||
v4: std::net::Ipv4Addr,
|
||||
v6: std::net::Ipv6Addr,
|
||||
ttl: u32,
|
||||
) -> DnsRecord {
|
||||
match qtype {
|
||||
QueryType::AAAA => DnsRecord::AAAA {
|
||||
domain: domain.to_string(),
|
||||
addr: v6,
|
||||
ttl,
|
||||
},
|
||||
_ => DnsRecord::A {
|
||||
domain: domain.to_string(),
|
||||
addr: v4,
|
||||
ttl,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
enum Disposition {
|
||||
Leader(broadcast::Sender<Option<DnsPacket>>),
|
||||
Follower(broadcast::Receiver<Option<DnsPacket>>),
|
||||
}
|
||||
|
||||
fn acquire_inflight(inflight: &Mutex<InflightMap>, key: (String, QueryType)) -> Disposition {
|
||||
let mut map = inflight.lock().unwrap();
|
||||
if let Some(tx) = map.get(&key) {
|
||||
Disposition::Follower(tx.subscribe())
|
||||
} else {
|
||||
let (tx, _) = broadcast::channel::<Option<DnsPacket>>(1);
|
||||
map.insert(key, tx.clone());
|
||||
Disposition::Leader(tx)
|
||||
}
|
||||
}
|
||||
|
||||
/// Run a resolve function with in-flight coalescing. Multiple concurrent calls
|
||||
/// for the same key share a single resolution — the first caller (leader)
|
||||
/// executes `resolve_fn`, and followers wait for the broadcast result.
|
||||
async fn resolve_coalesced<F, Fut>(
|
||||
inflight: &Mutex<InflightMap>,
|
||||
key: (String, QueryType),
|
||||
query: &DnsPacket,
|
||||
resolve_fn: F,
|
||||
) -> (DnsPacket, QueryPath, Option<String>)
|
||||
where
|
||||
F: FnOnce() -> Fut,
|
||||
Fut: std::future::Future<Output = crate::Result<DnsPacket>>,
|
||||
{
|
||||
let disposition = acquire_inflight(inflight, key.clone());
|
||||
|
||||
match disposition {
|
||||
Disposition::Follower(mut rx) => match rx.recv().await {
|
||||
Ok(Some(mut resp)) => {
|
||||
resp.header.id = query.header.id;
|
||||
(resp, QueryPath::Coalesced, None)
|
||||
}
|
||||
_ => (
|
||||
DnsPacket::response_from(query, ResultCode::SERVFAIL),
|
||||
QueryPath::UpstreamError,
|
||||
None,
|
||||
),
|
||||
},
|
||||
Disposition::Leader(tx) => {
|
||||
let guard = InflightGuard { inflight, key };
|
||||
let result = resolve_fn().await;
|
||||
drop(guard);
|
||||
|
||||
match result {
|
||||
Ok(resp) => {
|
||||
let _ = tx.send(Some(resp.clone()));
|
||||
(resp, QueryPath::Recursive, None)
|
||||
}
|
||||
Err(e) => {
|
||||
let _ = tx.send(None);
|
||||
let err_msg = e.to_string();
|
||||
(
|
||||
DnsPacket::response_from(query, ResultCode::SERVFAIL),
|
||||
QueryPath::UpstreamError,
|
||||
Some(err_msg),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct InflightGuard<'a> {
|
||||
inflight: &'a Mutex<InflightMap>,
|
||||
key: (String, QueryType),
|
||||
}
|
||||
|
||||
impl Drop for InflightGuard<'_> {
|
||||
fn drop(&mut self) {
|
||||
self.inflight.lock().unwrap().remove(&self.key);
|
||||
}
|
||||
}
|
||||
|
||||
fn special_use_response(query: &DnsPacket, qname: &str, qtype: QueryType) -> DnsPacket {
|
||||
use std::net::{Ipv4Addr, Ipv6Addr};
|
||||
if qname == "ipv4only.arpa" {
|
||||
// RFC 8880: well-known NAT64 addresses
|
||||
let mut resp = DnsPacket::response_from(query, ResultCode::NOERROR);
|
||||
let domain = qname.to_string();
|
||||
match qtype {
|
||||
QueryType::A => {
|
||||
resp.answers.push(DnsRecord::A {
|
||||
domain: domain.clone(),
|
||||
addr: Ipv4Addr::new(192, 0, 0, 170),
|
||||
ttl: 300,
|
||||
});
|
||||
resp.answers.push(DnsRecord::A {
|
||||
domain,
|
||||
addr: Ipv4Addr::new(192, 0, 0, 171),
|
||||
ttl: 300,
|
||||
});
|
||||
}
|
||||
QueryType::AAAA => {
|
||||
resp.answers.push(DnsRecord::AAAA {
|
||||
domain,
|
||||
addr: Ipv6Addr::new(0x0064, 0xff9b, 0, 0, 0, 0, 0xc000, 0x00aa),
|
||||
ttl: 300,
|
||||
});
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
resp
|
||||
} else {
|
||||
DnsPacket::response_from(query, ResultCode::NXDOMAIN)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::collections::HashMap;
|
||||
use std::net::Ipv4Addr;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use tokio::sync::broadcast;
|
||||
|
||||
// ---- InflightGuard unit tests ----
|
||||
|
||||
#[test]
|
||||
fn inflight_guard_removes_key_on_drop() {
|
||||
let map: Mutex<InflightMap> = Mutex::new(HashMap::new());
|
||||
let key = ("example.com".to_string(), QueryType::A);
|
||||
let (tx, _) = broadcast::channel::<Option<DnsPacket>>(1);
|
||||
map.lock().unwrap().insert(key.clone(), tx);
|
||||
|
||||
assert_eq!(map.lock().unwrap().len(), 1);
|
||||
{
|
||||
let _guard = InflightGuard {
|
||||
inflight: &map,
|
||||
key: key.clone(),
|
||||
};
|
||||
} // guard dropped here
|
||||
assert!(map.lock().unwrap().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn inflight_guard_only_removes_own_key() {
|
||||
let map: Mutex<InflightMap> = Mutex::new(HashMap::new());
|
||||
let key_a = ("a.com".to_string(), QueryType::A);
|
||||
let key_b = ("b.com".to_string(), QueryType::A);
|
||||
let (tx_a, _) = broadcast::channel::<Option<DnsPacket>>(1);
|
||||
let (tx_b, _) = broadcast::channel::<Option<DnsPacket>>(1);
|
||||
map.lock().unwrap().insert(key_a.clone(), tx_a);
|
||||
map.lock().unwrap().insert(key_b.clone(), tx_b);
|
||||
|
||||
{
|
||||
let _guard = InflightGuard {
|
||||
inflight: &map,
|
||||
key: key_a,
|
||||
};
|
||||
}
|
||||
let m = map.lock().unwrap();
|
||||
assert_eq!(m.len(), 1);
|
||||
assert!(m.contains_key(&key_b));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn inflight_guard_same_domain_different_qtype_independent() {
|
||||
let map: Mutex<InflightMap> = Mutex::new(HashMap::new());
|
||||
let key_a = ("example.com".to_string(), QueryType::A);
|
||||
let key_aaaa = ("example.com".to_string(), QueryType::AAAA);
|
||||
let (tx_a, _) = broadcast::channel::<Option<DnsPacket>>(1);
|
||||
let (tx_aaaa, _) = broadcast::channel::<Option<DnsPacket>>(1);
|
||||
map.lock().unwrap().insert(key_a.clone(), tx_a);
|
||||
map.lock().unwrap().insert(key_aaaa.clone(), tx_aaaa);
|
||||
|
||||
{
|
||||
let _guard = InflightGuard {
|
||||
inflight: &map,
|
||||
key: key_a,
|
||||
};
|
||||
}
|
||||
let m = map.lock().unwrap();
|
||||
assert_eq!(m.len(), 1);
|
||||
assert!(m.contains_key(&key_aaaa));
|
||||
}
|
||||
|
||||
// ---- Coalescing disposition tests (via acquire_inflight) ----
|
||||
|
||||
#[test]
|
||||
fn first_caller_becomes_leader() {
|
||||
let map: Mutex<InflightMap> = Mutex::new(HashMap::new());
|
||||
let key = ("test.com".to_string(), QueryType::A);
|
||||
|
||||
let d = acquire_inflight(&map, key.clone());
|
||||
assert!(matches!(d, Disposition::Leader(_)));
|
||||
assert_eq!(map.lock().unwrap().len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn second_caller_becomes_follower() {
|
||||
let map: Mutex<InflightMap> = Mutex::new(HashMap::new());
|
||||
let key = ("test.com".to_string(), QueryType::A);
|
||||
|
||||
let _leader = acquire_inflight(&map, key.clone());
|
||||
let follower = acquire_inflight(&map, key);
|
||||
assert!(matches!(follower, Disposition::Follower(_)));
|
||||
// Map still has exactly 1 entry — follower subscribes, doesn't insert
|
||||
assert_eq!(map.lock().unwrap().len(), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn leader_broadcast_reaches_follower() {
|
||||
let map: Mutex<InflightMap> = Mutex::new(HashMap::new());
|
||||
let key = ("test.com".to_string(), QueryType::A);
|
||||
|
||||
let leader = acquire_inflight(&map, key.clone());
|
||||
let follower = acquire_inflight(&map, key);
|
||||
|
||||
let tx = match leader {
|
||||
Disposition::Leader(tx) => tx,
|
||||
_ => panic!("expected leader"),
|
||||
};
|
||||
let mut rx = match follower {
|
||||
Disposition::Follower(rx) => rx,
|
||||
_ => panic!("expected follower"),
|
||||
};
|
||||
|
||||
let mut resp = DnsPacket::new();
|
||||
resp.header.id = 42;
|
||||
resp.answers.push(DnsRecord::A {
|
||||
domain: "test.com".into(),
|
||||
addr: Ipv4Addr::new(1, 2, 3, 4),
|
||||
ttl: 300,
|
||||
});
|
||||
let _ = tx.send(Some(resp));
|
||||
|
||||
let received = rx.recv().await.unwrap().unwrap();
|
||||
assert_eq!(received.header.id, 42);
|
||||
assert_eq!(received.answers.len(), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn leader_none_signals_failure_to_follower() {
|
||||
let map: Mutex<InflightMap> = Mutex::new(HashMap::new());
|
||||
let key = ("test.com".to_string(), QueryType::A);
|
||||
|
||||
let leader = acquire_inflight(&map, key.clone());
|
||||
let follower = acquire_inflight(&map, key);
|
||||
|
||||
let tx = match leader {
|
||||
Disposition::Leader(tx) => tx,
|
||||
_ => panic!("expected leader"),
|
||||
};
|
||||
let mut rx = match follower {
|
||||
Disposition::Follower(rx) => rx,
|
||||
_ => panic!("expected follower"),
|
||||
};
|
||||
|
||||
let _ = tx.send(None);
|
||||
assert!(rx.recv().await.unwrap().is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn multiple_followers_all_receive_via_acquire() {
|
||||
let map: Mutex<InflightMap> = Mutex::new(HashMap::new());
|
||||
let key = ("multi.com".to_string(), QueryType::A);
|
||||
|
||||
let leader = acquire_inflight(&map, key.clone());
|
||||
let f1 = acquire_inflight(&map, key.clone());
|
||||
let f2 = acquire_inflight(&map, key.clone());
|
||||
let f3 = acquire_inflight(&map, key);
|
||||
|
||||
let tx = match leader {
|
||||
Disposition::Leader(tx) => tx,
|
||||
_ => panic!("expected leader"),
|
||||
};
|
||||
|
||||
let mut resp = DnsPacket::new();
|
||||
resp.answers.push(DnsRecord::A {
|
||||
domain: "multi.com".into(),
|
||||
addr: Ipv4Addr::new(10, 0, 0, 1),
|
||||
ttl: 60,
|
||||
});
|
||||
let _ = tx.send(Some(resp));
|
||||
|
||||
for f in [f1, f2, f3] {
|
||||
let mut rx = match f {
|
||||
Disposition::Follower(rx) => rx,
|
||||
_ => panic!("expected follower"),
|
||||
};
|
||||
let r = rx.recv().await.unwrap().unwrap();
|
||||
assert_eq!(r.answers.len(), 1);
|
||||
}
|
||||
}
|
||||
|
||||
// ---- Integration: resolve_coalesced with mock futures ----
|
||||
|
||||
fn mock_response(domain: &str) -> DnsPacket {
|
||||
let mut resp = DnsPacket::new();
|
||||
resp.header.response = true;
|
||||
resp.header.rescode = ResultCode::NOERROR;
|
||||
resp.answers.push(DnsRecord::A {
|
||||
domain: domain.to_string(),
|
||||
addr: Ipv4Addr::new(10, 0, 0, 1),
|
||||
ttl: 300,
|
||||
});
|
||||
resp
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn concurrent_queries_coalesce_to_single_resolution() {
|
||||
let inflight = Arc::new(Mutex::new(HashMap::new()));
|
||||
let resolve_count = Arc::new(std::sync::atomic::AtomicU32::new(0));
|
||||
|
||||
let mut handles = Vec::new();
|
||||
for i in 0..5u16 {
|
||||
let count = resolve_count.clone();
|
||||
let inf = inflight.clone();
|
||||
let key = ("coalesce.test".to_string(), QueryType::A);
|
||||
let query = DnsPacket::query(100 + i, "coalesce.test", QueryType::A);
|
||||
handles.push(tokio::spawn(async move {
|
||||
resolve_coalesced(&inf, key, &query, || async {
|
||||
count.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
tokio::time::sleep(Duration::from_millis(200)).await;
|
||||
Ok(mock_response("coalesce.test"))
|
||||
})
|
||||
.await
|
||||
}));
|
||||
}
|
||||
|
||||
let mut paths = Vec::new();
|
||||
for h in handles {
|
||||
let (_, path, _) = h.await.unwrap();
|
||||
paths.push(path);
|
||||
}
|
||||
|
||||
let actual = resolve_count.load(std::sync::atomic::Ordering::Relaxed);
|
||||
assert_eq!(actual, 1, "expected 1 resolution, got {}", actual);
|
||||
|
||||
let recursive = paths.iter().filter(|p| **p == QueryPath::Recursive).count();
|
||||
let coalesced = paths.iter().filter(|p| **p == QueryPath::Coalesced).count();
|
||||
assert_eq!(recursive, 1, "expected 1 RECURSIVE, got {}", recursive);
|
||||
assert_eq!(coalesced, 4, "expected 4 COALESCED, got {}", coalesced);
|
||||
|
||||
assert!(inflight.lock().unwrap().is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn different_qtypes_not_coalesced() {
|
||||
let inflight = Arc::new(Mutex::new(HashMap::new()));
|
||||
let resolve_count = Arc::new(std::sync::atomic::AtomicU32::new(0));
|
||||
|
||||
let inf1 = inflight.clone();
|
||||
let inf2 = inflight.clone();
|
||||
let count1 = resolve_count.clone();
|
||||
let count2 = resolve_count.clone();
|
||||
|
||||
let query_a = DnsPacket::query(200, "same.domain", QueryType::A);
|
||||
let query_aaaa = DnsPacket::query(201, "same.domain", QueryType::AAAA);
|
||||
|
||||
let h1 = tokio::spawn(async move {
|
||||
resolve_coalesced(
|
||||
&inf1,
|
||||
("same.domain".to_string(), QueryType::A),
|
||||
&query_a,
|
||||
|| async {
|
||||
count1.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
Ok(mock_response("same.domain"))
|
||||
},
|
||||
)
|
||||
.await
|
||||
});
|
||||
let h2 = tokio::spawn(async move {
|
||||
resolve_coalesced(
|
||||
&inf2,
|
||||
("same.domain".to_string(), QueryType::AAAA),
|
||||
&query_aaaa,
|
||||
|| async {
|
||||
count2.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
Ok(mock_response("same.domain"))
|
||||
},
|
||||
)
|
||||
.await
|
||||
});
|
||||
|
||||
let (_, path1, _) = h1.await.unwrap();
|
||||
let (_, path2, _) = h2.await.unwrap();
|
||||
|
||||
let actual = resolve_count.load(std::sync::atomic::Ordering::Relaxed);
|
||||
assert_eq!(actual, 2, "A and AAAA should each resolve, got {}", actual);
|
||||
assert_eq!(path1, QueryPath::Recursive);
|
||||
assert_eq!(path2, QueryPath::Recursive);
|
||||
|
||||
assert!(inflight.lock().unwrap().is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn inflight_map_cleaned_after_error() {
|
||||
let inflight: Mutex<InflightMap> = Mutex::new(HashMap::new());
|
||||
let query = DnsPacket::query(300, "will-fail.test", QueryType::A);
|
||||
|
||||
let (_, path, _) = resolve_coalesced(
|
||||
&inflight,
|
||||
("will-fail.test".to_string(), QueryType::A),
|
||||
&query,
|
||||
|| async { Err::<DnsPacket, _>("upstream timeout".into()) },
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(path, QueryPath::UpstreamError);
|
||||
assert!(inflight.lock().unwrap().is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn follower_gets_servfail_when_leader_fails() {
|
||||
let inflight = Arc::new(Mutex::new(HashMap::new()));
|
||||
|
||||
let mut handles = Vec::new();
|
||||
for i in 0..3u16 {
|
||||
let inf = inflight.clone();
|
||||
let query = DnsPacket::query(400 + i, "fail.test", QueryType::A);
|
||||
handles.push(tokio::spawn(async move {
|
||||
resolve_coalesced(
|
||||
&inf,
|
||||
("fail.test".to_string(), QueryType::A),
|
||||
&query,
|
||||
|| async {
|
||||
tokio::time::sleep(Duration::from_millis(200)).await;
|
||||
Err::<DnsPacket, _>("upstream error".into())
|
||||
},
|
||||
)
|
||||
.await
|
||||
}));
|
||||
}
|
||||
|
||||
let mut paths = Vec::new();
|
||||
for h in handles {
|
||||
let (resp, path, _) = h.await.unwrap();
|
||||
assert_eq!(resp.header.rescode, ResultCode::SERVFAIL);
|
||||
assert_eq!(
|
||||
resp.questions.len(),
|
||||
1,
|
||||
"SERVFAIL must echo question section"
|
||||
);
|
||||
assert_eq!(resp.questions[0].name, "fail.test");
|
||||
paths.push(path);
|
||||
}
|
||||
|
||||
let errors = paths
|
||||
.iter()
|
||||
.filter(|p| **p == QueryPath::UpstreamError)
|
||||
.count();
|
||||
assert_eq!(errors, 3, "all 3 should be UpstreamError, got {}", errors);
|
||||
|
||||
assert!(inflight.lock().unwrap().is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn servfail_leader_includes_question_section() {
|
||||
let inflight: Mutex<InflightMap> = Mutex::new(HashMap::new());
|
||||
let query = DnsPacket::query(500, "question.test", QueryType::A);
|
||||
|
||||
let (resp, _, _) = resolve_coalesced(
|
||||
&inflight,
|
||||
("question.test".to_string(), QueryType::A),
|
||||
&query,
|
||||
|| async { Err::<DnsPacket, _>("fail".into()) },
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(resp.header.rescode, ResultCode::SERVFAIL);
|
||||
assert_eq!(
|
||||
resp.questions.len(),
|
||||
1,
|
||||
"SERVFAIL must echo question section"
|
||||
);
|
||||
assert_eq!(resp.questions[0].name, "question.test");
|
||||
assert_eq!(resp.questions[0].qtype, QueryType::A);
|
||||
assert_eq!(resp.header.id, 500);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn leader_error_preserves_message() {
|
||||
let inflight: Mutex<InflightMap> = Mutex::new(HashMap::new());
|
||||
let query = DnsPacket::query(700, "err-msg.test", QueryType::A);
|
||||
|
||||
let (_, path, err) = resolve_coalesced(
|
||||
&inflight,
|
||||
("err-msg.test".to_string(), QueryType::A),
|
||||
&query,
|
||||
|| async { Err::<DnsPacket, _>("connection refused by upstream".into()) },
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(path, QueryPath::UpstreamError);
|
||||
assert_eq!(
|
||||
err.as_deref(),
|
||||
Some("connection refused by upstream"),
|
||||
"error message must be preserved for logging"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
1715
src/dnssec.rs
Normal file
1715
src/dnssec.rs
Normal file
File diff suppressed because it is too large
Load Diff
544
src/dot.rs
Normal file
544
src/dot.rs
Normal file
@@ -0,0 +1,544 @@
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use log::{debug, error, info, warn};
|
||||
use rustls::ServerConfig;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::sync::Semaphore;
|
||||
use tokio_rustls::TlsAcceptor;
|
||||
|
||||
use crate::buffer::BytePacketBuffer;
|
||||
use crate::config::DotConfig;
|
||||
use crate::ctx::{resolve_query, ServerCtx};
|
||||
use crate::header::ResultCode;
|
||||
use crate::packet::DnsPacket;
|
||||
|
||||
const MAX_CONNECTIONS: usize = 512;
|
||||
const IDLE_TIMEOUT: Duration = Duration::from_secs(30);
|
||||
const HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
const WRITE_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
// Matches BytePacketBuffer::BUF_SIZE — RFC 7858 allows up to 65535 but our
|
||||
// buffer would silently truncate anything larger.
|
||||
const MAX_MSG_LEN: usize = 4096;
|
||||
|
||||
fn dot_alpn() -> Vec<Vec<u8>> {
|
||||
vec![b"dot".to_vec()]
|
||||
}
|
||||
|
||||
/// Build a TLS ServerConfig for DoT from user-provided cert/key PEM files.
|
||||
fn load_tls_config(cert_path: &Path, key_path: &Path) -> crate::Result<Arc<ServerConfig>> {
|
||||
// rustls needs a CryptoProvider installed before ServerConfig::builder().
|
||||
// The proxy's build_tls_config also does this; we repeat it here because
|
||||
// running DoT with user-provided certs while the proxy is disabled would
|
||||
// otherwise panic on first handshake (no default provider).
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
|
||||
let cert_pem = std::fs::read(cert_path)?;
|
||||
let key_pem = std::fs::read(key_path)?;
|
||||
|
||||
let certs: Vec<_> = rustls_pemfile::certs(&mut &cert_pem[..]).collect::<Result<_, _>>()?;
|
||||
let key = rustls_pemfile::private_key(&mut &key_pem[..])?
|
||||
.ok_or("no private key found in key file")?;
|
||||
|
||||
let mut config = ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(certs, key)?;
|
||||
config.alpn_protocols = dot_alpn();
|
||||
|
||||
Ok(Arc::new(config))
|
||||
}
|
||||
|
||||
/// Build a self-signed DoT TLS config. Can't reuse `ctx.tls_config` (the
|
||||
/// proxy's shared config) because DoT needs its own ALPN advertisement.
|
||||
///
|
||||
/// Pass `proxy_tld` itself as a service name so the cert gets an explicit
|
||||
/// `{tld}.{tld}` SAN (e.g. "numa.numa") matching the ServerName that
|
||||
/// setup-phone's mobileconfig sends as SNI. The `*.{tld}` wildcard alone
|
||||
/// is rejected by strict TLS clients under single-label TLDs (per the
|
||||
/// note in tls.rs::generate_service_cert).
|
||||
fn self_signed_tls(ctx: &ServerCtx) -> Option<Arc<ServerConfig>> {
|
||||
let service_names = [ctx.proxy_tld.clone()];
|
||||
match crate::tls::build_tls_config(&ctx.proxy_tld, &service_names, dot_alpn(), &ctx.data_dir) {
|
||||
Ok(cfg) => Some(cfg),
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"DoT: failed to generate self-signed TLS: {} — DoT disabled",
|
||||
e
|
||||
);
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Start the DNS-over-TLS listener (RFC 7858).
|
||||
pub async fn start_dot(ctx: Arc<ServerCtx>, config: &DotConfig) {
|
||||
let tls_config = match (&config.cert_path, &config.key_path) {
|
||||
(Some(cert), Some(key)) => match load_tls_config(cert, key) {
|
||||
Ok(cfg) => cfg,
|
||||
Err(e) => {
|
||||
warn!("DoT: failed to load TLS cert/key: {} — DoT disabled", e);
|
||||
return;
|
||||
}
|
||||
},
|
||||
_ => match self_signed_tls(&ctx) {
|
||||
Some(cfg) => cfg,
|
||||
None => return,
|
||||
},
|
||||
};
|
||||
|
||||
let bind_addr: IpAddr = config
|
||||
.bind_addr
|
||||
.parse()
|
||||
.unwrap_or(IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED));
|
||||
let addr = SocketAddr::new(bind_addr, config.port);
|
||||
let listener = match TcpListener::bind(addr).await {
|
||||
Ok(l) => l,
|
||||
Err(e) => {
|
||||
warn!("DoT: could not bind {} ({}) — DoT disabled", addr, e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
info!("DoT listening on {}", addr);
|
||||
|
||||
accept_loop(listener, TlsAcceptor::from(tls_config), ctx).await;
|
||||
}
|
||||
|
||||
async fn accept_loop(listener: TcpListener, acceptor: TlsAcceptor, ctx: Arc<ServerCtx>) {
|
||||
let semaphore = Arc::new(Semaphore::new(MAX_CONNECTIONS));
|
||||
|
||||
loop {
|
||||
let (tcp_stream, remote_addr) = match listener.accept().await {
|
||||
Ok(conn) => conn,
|
||||
Err(e) => {
|
||||
error!("DoT: TCP accept error: {}", e);
|
||||
// Back off to avoid tight-looping on persistent failures (e.g. fd exhaustion).
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let permit = match semaphore.clone().try_acquire_owned() {
|
||||
Ok(p) => p,
|
||||
Err(_) => {
|
||||
debug!("DoT: connection limit reached, rejecting {}", remote_addr);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let acceptor = acceptor.clone();
|
||||
let ctx = Arc::clone(&ctx);
|
||||
|
||||
tokio::spawn(async move {
|
||||
let _permit = permit; // held until task exits
|
||||
|
||||
let tls_stream =
|
||||
match tokio::time::timeout(HANDSHAKE_TIMEOUT, acceptor.accept(tcp_stream)).await {
|
||||
Ok(Ok(s)) => s,
|
||||
Ok(Err(e)) => {
|
||||
debug!("DoT: TLS handshake failed from {}: {}", remote_addr, e);
|
||||
return;
|
||||
}
|
||||
Err(_) => {
|
||||
debug!("DoT: TLS handshake timeout from {}", remote_addr);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
handle_dot_connection(tls_stream, remote_addr, &ctx).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle a single persistent DoT connection (RFC 7858).
|
||||
/// Reads length-prefixed DNS queries until EOF, idle timeout, or error.
|
||||
async fn handle_dot_connection<S>(mut stream: S, remote_addr: SocketAddr, ctx: &ServerCtx)
|
||||
where
|
||||
S: AsyncReadExt + AsyncWriteExt + Unpin,
|
||||
{
|
||||
loop {
|
||||
// Read 2-byte length prefix (RFC 1035 §4.2.2) with idle timeout
|
||||
let mut len_buf = [0u8; 2];
|
||||
let Ok(Ok(_)) = tokio::time::timeout(IDLE_TIMEOUT, stream.read_exact(&mut len_buf)).await
|
||||
else {
|
||||
break;
|
||||
};
|
||||
let msg_len = u16::from_be_bytes(len_buf) as usize;
|
||||
if msg_len > MAX_MSG_LEN {
|
||||
debug!("DoT: oversized message {} from {}", msg_len, remote_addr);
|
||||
break;
|
||||
}
|
||||
|
||||
let mut buffer = BytePacketBuffer::new();
|
||||
let Ok(Ok(_)) =
|
||||
tokio::time::timeout(IDLE_TIMEOUT, stream.read_exact(&mut buffer.buf[..msg_len])).await
|
||||
else {
|
||||
break;
|
||||
};
|
||||
|
||||
// Parse query up-front so we can echo its question section in SERVFAIL
|
||||
// responses when resolve_query fails.
|
||||
let query = match DnsPacket::from_buffer(&mut buffer) {
|
||||
Ok(q) => q,
|
||||
Err(e) => {
|
||||
warn!("{} | PARSE ERROR | {}", remote_addr, e);
|
||||
// BytePacketBuffer is zero-initialized, so buf[0..2] reads as 0x0000
|
||||
// for sub-2-byte messages — harmless FORMERR with id=0.
|
||||
let query_id = u16::from_be_bytes([buffer.buf[0], buffer.buf[1]]);
|
||||
let mut resp = DnsPacket::new();
|
||||
resp.header.id = query_id;
|
||||
resp.header.response = true;
|
||||
resp.header.rescode = ResultCode::FORMERR;
|
||||
if send_response(&mut stream, &resp, remote_addr)
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
match resolve_query(query.clone(), remote_addr, ctx).await {
|
||||
Ok(resp_buffer) => {
|
||||
if write_framed(&mut stream, resp_buffer.filled())
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("{} | RESOLVE ERROR | {}", remote_addr, e);
|
||||
// SERVFAIL that echoes the original question section.
|
||||
let resp = DnsPacket::response_from(&query, ResultCode::SERVFAIL);
|
||||
if send_response(&mut stream, &resp, remote_addr)
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialize a DNS response and send it framed. Logs serialization failures
|
||||
/// and returns Err so the caller can tear down the connection.
|
||||
async fn send_response<S>(
|
||||
stream: &mut S,
|
||||
resp: &DnsPacket,
|
||||
remote_addr: SocketAddr,
|
||||
) -> std::io::Result<()>
|
||||
where
|
||||
S: AsyncWriteExt + Unpin,
|
||||
{
|
||||
let mut out_buf = BytePacketBuffer::new();
|
||||
if resp.write(&mut out_buf).is_err() {
|
||||
debug!(
|
||||
"DoT: failed to serialize {:?} response for {}",
|
||||
resp.header.rescode, remote_addr
|
||||
);
|
||||
return Err(std::io::Error::other("serialize failed"));
|
||||
}
|
||||
write_framed(stream, out_buf.filled()).await
|
||||
}
|
||||
|
||||
/// Write a DNS message with its 2-byte length prefix, coalesced into one syscall.
|
||||
/// Bounded by WRITE_TIMEOUT so a stalled reader can't indefinitely hold a worker.
|
||||
async fn write_framed<S>(stream: &mut S, msg: &[u8]) -> std::io::Result<()>
|
||||
where
|
||||
S: AsyncWriteExt + Unpin,
|
||||
{
|
||||
let mut out = Vec::with_capacity(2 + msg.len());
|
||||
out.extend_from_slice(&(msg.len() as u16).to_be_bytes());
|
||||
out.extend_from_slice(msg);
|
||||
match tokio::time::timeout(WRITE_TIMEOUT, async {
|
||||
stream.write_all(&out).await?;
|
||||
stream.flush().await
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(result) => result,
|
||||
Err(_) => Err(std::io::Error::other("write timeout")),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Mutex, RwLock};
|
||||
|
||||
use rcgen::{CertificateParams, DnType, KeyPair};
|
||||
use rustls::pki_types::{CertificateDer, PrivateKeyDer, PrivatePkcs8KeyDer, ServerName};
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
|
||||
use crate::buffer::BytePacketBuffer;
|
||||
use crate::header::ResultCode;
|
||||
use crate::packet::DnsPacket;
|
||||
use crate::question::QueryType;
|
||||
use crate::record::DnsRecord;
|
||||
|
||||
/// Generate a self-signed DoT server config and return its leaf cert DER
|
||||
/// so callers can build matching client configs with arbitrary ALPN.
|
||||
fn test_tls_configs() -> (Arc<ServerConfig>, CertificateDer<'static>) {
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
|
||||
// Mirror production self_signed_tls SAN shape: *.numa wildcard plus
|
||||
// explicit numa.numa apex (the ServerName setup-phone uses as SNI).
|
||||
let key_pair = KeyPair::generate().unwrap();
|
||||
let mut params = CertificateParams::default();
|
||||
params
|
||||
.distinguished_name
|
||||
.push(DnType::CommonName, "Numa .numa services");
|
||||
params.subject_alt_names = vec![
|
||||
rcgen::SanType::DnsName("*.numa".try_into().unwrap()),
|
||||
rcgen::SanType::DnsName("numa.numa".try_into().unwrap()),
|
||||
];
|
||||
let cert = params.self_signed(&key_pair).unwrap();
|
||||
|
||||
let cert_der = CertificateDer::from(cert.der().to_vec());
|
||||
let key_der = PrivateKeyDer::Pkcs8(PrivatePkcs8KeyDer::from(key_pair.serialize_der()));
|
||||
|
||||
let mut server_config = ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(vec![cert_der.clone()], key_der)
|
||||
.unwrap();
|
||||
server_config.alpn_protocols = dot_alpn();
|
||||
|
||||
(Arc::new(server_config), cert_der)
|
||||
}
|
||||
|
||||
/// Build a TLS client config that trusts `cert_der` and advertises the
|
||||
/// given ALPN protocols. Used by tests to vary ALPN per test case.
|
||||
fn dot_client(
|
||||
cert_der: &CertificateDer<'static>,
|
||||
alpn: Vec<Vec<u8>>,
|
||||
) -> Arc<rustls::ClientConfig> {
|
||||
let mut root_store = rustls::RootCertStore::empty();
|
||||
root_store.add(cert_der.clone()).unwrap();
|
||||
let mut config = rustls::ClientConfig::builder()
|
||||
.with_root_certificates(root_store)
|
||||
.with_no_client_auth();
|
||||
config.alpn_protocols = alpn;
|
||||
Arc::new(config)
|
||||
}
|
||||
|
||||
/// Spin up a DoT listener with a test TLS config. Returns the bind addr
|
||||
/// and the leaf cert DER so callers can build clients with arbitrary ALPN.
|
||||
/// The upstream is pointed at a bound-but-unresponsive UDP socket we own, so
|
||||
/// any query that escapes to the upstream path times out deterministically
|
||||
/// (SERVFAIL) regardless of what the host has running on port 53.
|
||||
async fn spawn_dot_server() -> (SocketAddr, CertificateDer<'static>) {
|
||||
let (server_tls, cert_der) = test_tls_configs();
|
||||
|
||||
let socket = tokio::net::UdpSocket::bind("127.0.0.1:0").await.unwrap();
|
||||
// Bind an unresponsive upstream and leak it so it lives for the test duration.
|
||||
let blackhole = Box::leak(Box::new(std::net::UdpSocket::bind("127.0.0.1:0").unwrap()));
|
||||
let upstream_addr = blackhole.local_addr().unwrap();
|
||||
let ctx = Arc::new(ServerCtx {
|
||||
socket,
|
||||
zone_map: {
|
||||
let mut m = HashMap::new();
|
||||
let mut inner = HashMap::new();
|
||||
inner.insert(
|
||||
QueryType::A,
|
||||
vec![DnsRecord::A {
|
||||
domain: "dot-test.example".to_string(),
|
||||
addr: std::net::Ipv4Addr::new(10, 0, 0, 1),
|
||||
ttl: 300,
|
||||
}],
|
||||
);
|
||||
m.insert("dot-test.example".to_string(), inner);
|
||||
m
|
||||
},
|
||||
cache: RwLock::new(crate::cache::DnsCache::new(100, 60, 86400)),
|
||||
stats: Mutex::new(crate::stats::ServerStats::new()),
|
||||
overrides: RwLock::new(crate::override_store::OverrideStore::new()),
|
||||
blocklist: RwLock::new(crate::blocklist::BlocklistStore::new()),
|
||||
query_log: Mutex::new(crate::query_log::QueryLog::new(100)),
|
||||
services: Mutex::new(crate::service_store::ServiceStore::new()),
|
||||
lan_peers: Mutex::new(crate::lan::PeerStore::new(90)),
|
||||
forwarding_rules: Vec::new(),
|
||||
upstream: Mutex::new(crate::forward::Upstream::Udp(upstream_addr)),
|
||||
upstream_auto: false,
|
||||
upstream_port: 53,
|
||||
lan_ip: Mutex::new(std::net::Ipv4Addr::LOCALHOST),
|
||||
timeout: Duration::from_millis(200),
|
||||
proxy_tld: "numa".to_string(),
|
||||
proxy_tld_suffix: ".numa".to_string(),
|
||||
lan_enabled: false,
|
||||
config_path: String::new(),
|
||||
config_found: false,
|
||||
config_dir: std::path::PathBuf::from("/tmp"),
|
||||
data_dir: std::path::PathBuf::from("/tmp"),
|
||||
tls_config: Some(arc_swap::ArcSwap::from(server_tls)),
|
||||
upstream_mode: crate::config::UpstreamMode::Forward,
|
||||
root_hints: Vec::new(),
|
||||
srtt: RwLock::new(crate::srtt::SrttCache::new(true)),
|
||||
inflight: Mutex::new(HashMap::new()),
|
||||
dnssec_enabled: false,
|
||||
dnssec_strict: false,
|
||||
health_meta: crate::health::HealthMeta::test_fixture(),
|
||||
ca_pem: None,
|
||||
});
|
||||
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let addr = listener.local_addr().unwrap();
|
||||
|
||||
let tls_config = Arc::clone(&*ctx.tls_config.as_ref().unwrap().load());
|
||||
let acceptor = TlsAcceptor::from(tls_config);
|
||||
|
||||
tokio::spawn(accept_loop(listener, acceptor, ctx));
|
||||
|
||||
(addr, cert_der)
|
||||
}
|
||||
|
||||
/// Open a TLS connection to the DoT server and return the stream.
|
||||
/// Uses SNI "numa.numa" to mirror what setup-phone's mobileconfig sends.
|
||||
async fn dot_connect(
|
||||
addr: SocketAddr,
|
||||
client_config: &Arc<rustls::ClientConfig>,
|
||||
) -> tokio_rustls::client::TlsStream<tokio::net::TcpStream> {
|
||||
let connector = tokio_rustls::TlsConnector::from(Arc::clone(client_config));
|
||||
let tcp = tokio::net::TcpStream::connect(addr).await.unwrap();
|
||||
connector
|
||||
.connect(ServerName::try_from("numa.numa").unwrap(), tcp)
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Send a DNS query over a DoT stream and read the response.
|
||||
async fn dot_exchange(
|
||||
stream: &mut tokio_rustls::client::TlsStream<tokio::net::TcpStream>,
|
||||
query: &DnsPacket,
|
||||
) -> DnsPacket {
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
query.write(&mut buf).unwrap();
|
||||
let msg = buf.filled();
|
||||
|
||||
let mut out = Vec::with_capacity(2 + msg.len());
|
||||
out.extend_from_slice(&(msg.len() as u16).to_be_bytes());
|
||||
out.extend_from_slice(msg);
|
||||
stream.write_all(&out).await.unwrap();
|
||||
|
||||
let mut len_buf = [0u8; 2];
|
||||
stream.read_exact(&mut len_buf).await.unwrap();
|
||||
let resp_len = u16::from_be_bytes(len_buf) as usize;
|
||||
|
||||
let mut data = vec![0u8; resp_len];
|
||||
stream.read_exact(&mut data).await.unwrap();
|
||||
|
||||
let mut resp_buf = BytePacketBuffer::from_bytes(&data);
|
||||
DnsPacket::from_buffer(&mut resp_buf).unwrap()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn dot_resolves_local_zone() {
|
||||
let (addr, cert_der) = spawn_dot_server().await;
|
||||
let client_config = dot_client(&cert_der, dot_alpn());
|
||||
let mut stream = dot_connect(addr, &client_config).await;
|
||||
|
||||
let query = DnsPacket::query(0x1234, "dot-test.example", QueryType::A);
|
||||
let resp = dot_exchange(&mut stream, &query).await;
|
||||
|
||||
assert_eq!(resp.header.id, 0x1234);
|
||||
assert!(resp.header.response);
|
||||
assert_eq!(resp.header.rescode, ResultCode::NOERROR);
|
||||
assert_eq!(resp.answers.len(), 1);
|
||||
match &resp.answers[0] {
|
||||
DnsRecord::A { domain, addr, ttl } => {
|
||||
assert_eq!(domain, "dot-test.example");
|
||||
assert_eq!(*addr, std::net::Ipv4Addr::new(10, 0, 0, 1));
|
||||
assert_eq!(*ttl, 300);
|
||||
}
|
||||
other => panic!("expected A record, got {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn dot_multiple_queries_on_persistent_connection() {
|
||||
let (addr, cert_der) = spawn_dot_server().await;
|
||||
let client_config = dot_client(&cert_der, dot_alpn());
|
||||
let mut stream = dot_connect(addr, &client_config).await;
|
||||
|
||||
for i in 0..3u16 {
|
||||
let query = DnsPacket::query(0xA000 + i, "dot-test.example", QueryType::A);
|
||||
let resp = dot_exchange(&mut stream, &query).await;
|
||||
assert_eq!(resp.header.id, 0xA000 + i);
|
||||
assert_eq!(resp.header.rescode, ResultCode::NOERROR);
|
||||
assert_eq!(resp.answers.len(), 1);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn dot_nxdomain_for_unknown() {
|
||||
let (addr, cert_der) = spawn_dot_server().await;
|
||||
let client_config = dot_client(&cert_der, dot_alpn());
|
||||
let mut stream = dot_connect(addr, &client_config).await;
|
||||
|
||||
let query = DnsPacket::query(0xBEEF, "nonexistent.test", QueryType::A);
|
||||
let resp = dot_exchange(&mut stream, &query).await;
|
||||
|
||||
assert_eq!(resp.header.id, 0xBEEF);
|
||||
assert!(resp.header.response);
|
||||
// Query goes to the blackhole upstream which never replies → SERVFAIL.
|
||||
// The SERVFAIL response echoes the question section.
|
||||
assert_eq!(resp.header.rescode, ResultCode::SERVFAIL);
|
||||
assert_eq!(resp.questions.len(), 1);
|
||||
assert_eq!(resp.questions[0].name, "nonexistent.test");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn dot_negotiates_alpn() {
|
||||
let (addr, cert_der) = spawn_dot_server().await;
|
||||
let client_config = dot_client(&cert_der, dot_alpn());
|
||||
let stream = dot_connect(addr, &client_config).await;
|
||||
let (_io, conn) = stream.get_ref();
|
||||
assert_eq!(conn.alpn_protocol(), Some(&b"dot"[..]));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn dot_rejects_non_dot_alpn() {
|
||||
// Cross-protocol confusion defense: a client that only offers "h2"
|
||||
// (e.g. an HTTP/2 client mistakenly hitting :853) must not complete
|
||||
// a TLS handshake with the DoT server. Verifies the rustls server
|
||||
// sends `no_application_protocol` rather than silently negotiating.
|
||||
let (addr, cert_der) = spawn_dot_server().await;
|
||||
let client_config = dot_client(&cert_der, vec![b"h2".to_vec()]);
|
||||
let connector = tokio_rustls::TlsConnector::from(client_config);
|
||||
let tcp = tokio::net::TcpStream::connect(addr).await.unwrap();
|
||||
let result = connector
|
||||
.connect(ServerName::try_from("numa.numa").unwrap(), tcp)
|
||||
.await;
|
||||
assert!(
|
||||
result.is_err(),
|
||||
"DoT server must reject ALPN that doesn't include \"dot\""
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn dot_concurrent_connections() {
|
||||
let (addr, cert_der) = spawn_dot_server().await;
|
||||
let client_config = dot_client(&cert_der, dot_alpn());
|
||||
|
||||
let mut handles = Vec::new();
|
||||
for i in 0..5u16 {
|
||||
let cfg = Arc::clone(&client_config);
|
||||
handles.push(tokio::spawn(async move {
|
||||
let mut stream = dot_connect(addr, &cfg).await;
|
||||
let query = DnsPacket::query(0xC000 + i, "dot-test.example", QueryType::A);
|
||||
let resp = dot_exchange(&mut stream, &query).await;
|
||||
assert_eq!(resp.header.id, 0xC000 + i);
|
||||
assert_eq!(resp.header.rescode, ResultCode::NOERROR);
|
||||
assert_eq!(resp.answers.len(), 1);
|
||||
}));
|
||||
}
|
||||
|
||||
for h in handles {
|
||||
h.await.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
239
src/forward.rs
239
src/forward.rs
@@ -1,3 +1,4 @@
|
||||
use std::fmt;
|
||||
use std::net::SocketAddr;
|
||||
use std::time::Duration;
|
||||
|
||||
@@ -8,7 +9,46 @@ use crate::buffer::BytePacketBuffer;
|
||||
use crate::packet::DnsPacket;
|
||||
use crate::Result;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum Upstream {
|
||||
Udp(SocketAddr),
|
||||
Doh {
|
||||
url: String,
|
||||
client: reqwest::Client,
|
||||
},
|
||||
}
|
||||
|
||||
impl PartialEq for Upstream {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
match (self, other) {
|
||||
(Self::Udp(a), Self::Udp(b)) => a == b,
|
||||
(Self::Doh { url: a, .. }, Self::Doh { url: b, .. }) => a == b,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Upstream {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Upstream::Udp(addr) => write!(f, "{}", addr),
|
||||
Upstream::Doh { url, .. } => f.write_str(url),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn forward_query(
|
||||
query: &DnsPacket,
|
||||
upstream: &Upstream,
|
||||
timeout_duration: Duration,
|
||||
) -> Result<DnsPacket> {
|
||||
match upstream {
|
||||
Upstream::Udp(addr) => forward_udp(query, *addr, timeout_duration).await,
|
||||
Upstream::Doh { url, client } => forward_doh(query, url, client, timeout_duration).await,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn forward_udp(
|
||||
query: &DnsPacket,
|
||||
upstream: SocketAddr,
|
||||
timeout_duration: Duration,
|
||||
@@ -33,3 +73,202 @@ pub async fn forward_query(
|
||||
|
||||
DnsPacket::from_buffer(&mut recv_buffer)
|
||||
}
|
||||
|
||||
/// DNS over TCP (RFC 1035 §4.2.2): 2-byte length prefix, then the DNS message.
|
||||
pub(crate) async fn forward_tcp(
|
||||
query: &DnsPacket,
|
||||
upstream: SocketAddr,
|
||||
timeout_duration: Duration,
|
||||
) -> Result<DnsPacket> {
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
|
||||
let mut send_buffer = BytePacketBuffer::new();
|
||||
query.write(&mut send_buffer)?;
|
||||
let msg = send_buffer.filled();
|
||||
|
||||
let mut stream = timeout(timeout_duration, TcpStream::connect(upstream)).await??;
|
||||
|
||||
// Single write: Microsoft/Azure DNS servers close TCP connections on split segments
|
||||
let mut outbuf = Vec::with_capacity(2 + msg.len());
|
||||
outbuf.extend_from_slice(&(msg.len() as u16).to_be_bytes());
|
||||
outbuf.extend_from_slice(msg);
|
||||
stream.write_all(&outbuf).await?;
|
||||
|
||||
// Read length-prefixed response
|
||||
let mut len_buf = [0u8; 2];
|
||||
timeout(timeout_duration, stream.read_exact(&mut len_buf)).await??;
|
||||
let resp_len = u16::from_be_bytes(len_buf) as usize;
|
||||
|
||||
let mut data = vec![0u8; resp_len];
|
||||
timeout(timeout_duration, stream.read_exact(&mut data)).await??;
|
||||
|
||||
let mut recv_buffer = BytePacketBuffer::from_bytes(&data);
|
||||
DnsPacket::from_buffer(&mut recv_buffer)
|
||||
}
|
||||
|
||||
async fn forward_doh(
|
||||
query: &DnsPacket,
|
||||
url: &str,
|
||||
client: &reqwest::Client,
|
||||
timeout_duration: Duration,
|
||||
) -> Result<DnsPacket> {
|
||||
let mut send_buffer = BytePacketBuffer::new();
|
||||
query.write(&mut send_buffer)?;
|
||||
|
||||
let resp = timeout(
|
||||
timeout_duration,
|
||||
client
|
||||
.post(url)
|
||||
.header("content-type", "application/dns-message")
|
||||
.header("accept", "application/dns-message")
|
||||
.body(send_buffer.filled().to_vec())
|
||||
.send(),
|
||||
)
|
||||
.await??
|
||||
.error_for_status()?;
|
||||
|
||||
let bytes = resp.bytes().await?;
|
||||
log::debug!("DoH response: {} bytes", bytes.len());
|
||||
|
||||
let mut recv_buffer = BytePacketBuffer::from_bytes(&bytes);
|
||||
DnsPacket::from_buffer(&mut recv_buffer)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::future::IntoFuture;
|
||||
|
||||
use crate::header::ResultCode;
|
||||
use crate::question::QueryType;
|
||||
use crate::record::DnsRecord;
|
||||
|
||||
#[test]
|
||||
fn upstream_display_udp() {
|
||||
let u = Upstream::Udp("9.9.9.9:53".parse().unwrap());
|
||||
assert_eq!(u.to_string(), "9.9.9.9:53");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn upstream_display_doh() {
|
||||
let u = Upstream::Doh {
|
||||
url: "https://dns.quad9.net/dns-query".to_string(),
|
||||
client: reqwest::Client::new(),
|
||||
};
|
||||
assert_eq!(u.to_string(), "https://dns.quad9.net/dns-query");
|
||||
}
|
||||
|
||||
fn make_query() -> DnsPacket {
|
||||
DnsPacket::query(0xABCD, "example.com", QueryType::A)
|
||||
}
|
||||
|
||||
fn make_response(query: &DnsPacket) -> DnsPacket {
|
||||
let mut resp = DnsPacket::response_from(query, ResultCode::NOERROR);
|
||||
resp.answers.push(DnsRecord::A {
|
||||
domain: "example.com".to_string(),
|
||||
addr: "93.184.216.34".parse().unwrap(),
|
||||
ttl: 300,
|
||||
});
|
||||
resp
|
||||
}
|
||||
|
||||
fn to_wire(pkt: &DnsPacket) -> Vec<u8> {
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
pkt.write(&mut buf).unwrap();
|
||||
buf.filled().to_vec()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn doh_mock_server_resolves() {
|
||||
let query = make_query();
|
||||
let response_bytes = to_wire(&make_response(&query));
|
||||
|
||||
let app = axum::Router::new().route(
|
||||
"/dns-query",
|
||||
axum::routing::post(move || {
|
||||
let body = response_bytes.clone();
|
||||
async move {
|
||||
(
|
||||
[(axum::http::header::CONTENT_TYPE, "application/dns-message")],
|
||||
body,
|
||||
)
|
||||
}
|
||||
}),
|
||||
);
|
||||
|
||||
let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let addr = listener.local_addr().unwrap();
|
||||
tokio::spawn(axum::serve(listener, app).into_future());
|
||||
|
||||
let upstream = Upstream::Doh {
|
||||
url: format!("http://{}/dns-query", addr),
|
||||
client: reqwest::Client::new(),
|
||||
};
|
||||
|
||||
let result = forward_query(&query, &upstream, Duration::from_secs(2))
|
||||
.await
|
||||
.expect("DoH forward should succeed");
|
||||
|
||||
assert_eq!(result.header.id, 0xABCD);
|
||||
assert!(result.header.response);
|
||||
assert_eq!(result.header.rescode, ResultCode::NOERROR);
|
||||
assert_eq!(result.answers.len(), 1);
|
||||
match &result.answers[0] {
|
||||
DnsRecord::A { domain, addr, ttl } => {
|
||||
assert_eq!(domain, "example.com");
|
||||
assert_eq!(
|
||||
*addr,
|
||||
"93.184.216.34".parse::<std::net::Ipv4Addr>().unwrap()
|
||||
);
|
||||
assert_eq!(*ttl, 300);
|
||||
}
|
||||
other => panic!("expected A record, got {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn doh_http_error_propagates() {
|
||||
let app = axum::Router::new().route(
|
||||
"/dns-query",
|
||||
axum::routing::post(|| async {
|
||||
(axum::http::StatusCode::INTERNAL_SERVER_ERROR, "bad")
|
||||
}),
|
||||
);
|
||||
|
||||
let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let addr = listener.local_addr().unwrap();
|
||||
tokio::spawn(axum::serve(listener, app).into_future());
|
||||
|
||||
let upstream = Upstream::Doh {
|
||||
url: format!("http://{}/dns-query", addr),
|
||||
client: reqwest::Client::new(),
|
||||
};
|
||||
|
||||
let result = forward_query(&make_query(), &upstream, Duration::from_secs(2)).await;
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn doh_timeout() {
|
||||
let app = axum::Router::new().route(
|
||||
"/dns-query",
|
||||
axum::routing::post(|| async {
|
||||
tokio::time::sleep(Duration::from_secs(10)).await;
|
||||
"never"
|
||||
}),
|
||||
);
|
||||
|
||||
let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let addr = listener.local_addr().unwrap();
|
||||
tokio::spawn(axum::serve(listener, app).into_future());
|
||||
|
||||
let upstream = Upstream::Doh {
|
||||
url: format!("http://{}/dns-query", addr),
|
||||
client: reqwest::Client::new(),
|
||||
};
|
||||
|
||||
let result = forward_query(&make_query(), &upstream, Duration::from_millis(100)).await;
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
|
||||
254
src/health.rs
Normal file
254
src/health.rs
Normal file
@@ -0,0 +1,254 @@
|
||||
//! Health metadata and `/health` response shape, shared between the main
|
||||
//! HTTP API and the mobile API.
|
||||
//!
|
||||
//! The static fields (version, hostname, DoT config, CA fingerprint,
|
||||
//! feature list) are computed once at startup and stored in [`HealthMeta`]
|
||||
//! on `ServerCtx`. Per-request fields (uptime, LAN IP) are computed live.
|
||||
//! Both handlers call [`HealthResponse::build`] to assemble the JSON
|
||||
//! response from `HealthMeta` + live inputs.
|
||||
//!
|
||||
//! JSON schema is documented in `docs/implementation/ios-companion-app.md`
|
||||
//! §4.2. The iOS companion app's `HealthInfo` struct is the canonical
|
||||
//! consumer; any change to this response must keep that struct decoding
|
||||
//! cleanly (all consumed fields are optional on the Swift side, but
|
||||
//! `lan_ip` is load-bearing for the pipeline).
|
||||
|
||||
use std::net::Ipv4Addr;
|
||||
use std::path::Path;
|
||||
use std::time::Instant;
|
||||
|
||||
use ring::digest::{digest, SHA256};
|
||||
use serde::Serialize;
|
||||
|
||||
/// Immutable health metadata cached on `ServerCtx`. Built once at startup
|
||||
/// from config + file-system state (CA cert).
|
||||
#[derive(Clone)]
|
||||
pub struct HealthMeta {
|
||||
pub version: &'static str,
|
||||
pub hostname: String,
|
||||
pub sni: String,
|
||||
pub dot_enabled: bool,
|
||||
pub dot_port: u16,
|
||||
pub api_port: u16,
|
||||
pub ca_fingerprint_sha256: Option<String>,
|
||||
pub features: Vec<String>,
|
||||
pub started_at: Instant,
|
||||
}
|
||||
|
||||
impl HealthMeta {
|
||||
/// Minimal `HealthMeta` for unit tests that construct a `ServerCtx`
|
||||
/// without needing the real startup flow (CA file reads, hostname
|
||||
/// detection, etc.). Deterministic values so test JSON assertions
|
||||
/// stay stable.
|
||||
#[cfg(test)]
|
||||
pub fn test_fixture() -> Self {
|
||||
HealthMeta {
|
||||
version: env!("CARGO_PKG_VERSION"),
|
||||
hostname: "test-host".to_string(),
|
||||
sni: "numa.numa".to_string(),
|
||||
dot_enabled: false,
|
||||
dot_port: 853,
|
||||
api_port: 8765,
|
||||
ca_fingerprint_sha256: None,
|
||||
features: vec![],
|
||||
started_at: Instant::now(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Build a new HealthMeta from config + startup-time environment.
|
||||
/// Call once at server boot; the returned value is cheap to clone
|
||||
/// (small number of short strings) and lives on `ServerCtx`.
|
||||
///
|
||||
/// The argument count is deliberate — each flag corresponds to a
|
||||
/// specific config value and is clearly named at the call site.
|
||||
/// Collapsing into a struct hides nothing meaningful for a one-call
|
||||
/// initializer.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn build(
|
||||
data_dir: &Path,
|
||||
dot_enabled: bool,
|
||||
dot_port: u16,
|
||||
api_port: u16,
|
||||
dnssec_enabled: bool,
|
||||
recursive_enabled: bool,
|
||||
mdns_enabled: bool,
|
||||
blocking_enabled: bool,
|
||||
) -> Self {
|
||||
let ca_path = data_dir.join("ca.pem");
|
||||
let ca_fingerprint_sha256 = compute_ca_fingerprint(&ca_path);
|
||||
|
||||
let mut features = Vec::new();
|
||||
if dot_enabled {
|
||||
features.push("dot".to_string());
|
||||
}
|
||||
if recursive_enabled {
|
||||
features.push("recursive".to_string());
|
||||
}
|
||||
if blocking_enabled {
|
||||
features.push("blocking".to_string());
|
||||
}
|
||||
if mdns_enabled {
|
||||
features.push("mdns".to_string());
|
||||
}
|
||||
if dnssec_enabled {
|
||||
features.push("dnssec".to_string());
|
||||
}
|
||||
|
||||
HealthMeta {
|
||||
version: env!("CARGO_PKG_VERSION"),
|
||||
hostname: crate::hostname(),
|
||||
sni: "numa.numa".to_string(),
|
||||
dot_enabled,
|
||||
dot_port,
|
||||
api_port,
|
||||
ca_fingerprint_sha256,
|
||||
features,
|
||||
started_at: Instant::now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// JSON response shape returned by `GET /health` on both main and mobile APIs.
|
||||
///
|
||||
/// Fields are organized to match the iOS companion app's
|
||||
/// `HealthInfo` Swift struct — see `ios-companion-app.md` §4.2.
|
||||
#[derive(Serialize)]
|
||||
pub struct HealthResponse {
|
||||
pub status: &'static str,
|
||||
pub version: &'static str,
|
||||
pub uptime_secs: u64,
|
||||
pub hostname: String,
|
||||
pub lan_ip: Option<String>,
|
||||
pub sni: String,
|
||||
pub dot: DotBlock,
|
||||
pub api: ApiBlock,
|
||||
pub ca: CaBlock,
|
||||
pub features: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct DotBlock {
|
||||
pub enabled: bool,
|
||||
pub port: Option<u16>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct ApiBlock {
|
||||
pub port: u16,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct CaBlock {
|
||||
pub present: bool,
|
||||
pub fingerprint_sha256: Option<String>,
|
||||
}
|
||||
|
||||
impl HealthResponse {
|
||||
/// Assemble a fresh `HealthResponse` from the cached metadata and
|
||||
/// the current LAN IP (which may change across network transitions).
|
||||
/// Pass `None` for `lan_ip` if detection fails — the response still
|
||||
/// returns 200 OK, just without the LAN address.
|
||||
pub fn build(meta: &HealthMeta, lan_ip: Option<Ipv4Addr>) -> Self {
|
||||
HealthResponse {
|
||||
status: "ok",
|
||||
version: meta.version,
|
||||
uptime_secs: meta.started_at.elapsed().as_secs(),
|
||||
hostname: meta.hostname.clone(),
|
||||
lan_ip: lan_ip.map(|ip| ip.to_string()),
|
||||
sni: meta.sni.clone(),
|
||||
dot: DotBlock {
|
||||
enabled: meta.dot_enabled,
|
||||
port: if meta.dot_enabled {
|
||||
Some(meta.dot_port)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
},
|
||||
api: ApiBlock {
|
||||
port: meta.api_port,
|
||||
},
|
||||
ca: CaBlock {
|
||||
present: meta.ca_fingerprint_sha256.is_some(),
|
||||
fingerprint_sha256: meta.ca_fingerprint_sha256.clone(),
|
||||
},
|
||||
features: meta.features.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Read the CA cert at `ca_path` and return its SHA-256 fingerprint as a
|
||||
/// lowercase hex string, or None if the file doesn't exist or can't be read.
|
||||
///
|
||||
/// Hashes the raw PEM bytes for simplicity. A more canonical SPKI-based
|
||||
/// fingerprint would require parsing the PEM → DER → extracting
|
||||
/// SubjectPublicKeyInfo, which adds complexity without meaningful benefit
|
||||
/// for our use case (the iOS app uses the fingerprint only for display
|
||||
/// and to detect rotation).
|
||||
fn compute_ca_fingerprint(ca_path: &Path) -> Option<String> {
|
||||
let pem = std::fs::read(ca_path).ok()?;
|
||||
let hash = digest(&SHA256, &pem);
|
||||
let hex: String = hash.as_ref().iter().map(|b| format!("{:02x}", b)).collect();
|
||||
Some(hex)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn health_response_contains_required_fields() {
|
||||
let meta = HealthMeta {
|
||||
version: "0.10.0",
|
||||
hostname: "test-host".to_string(),
|
||||
sni: "numa.numa".to_string(),
|
||||
dot_enabled: true,
|
||||
dot_port: 853,
|
||||
api_port: 8765,
|
||||
ca_fingerprint_sha256: Some("abcd1234".to_string()),
|
||||
features: vec!["dot".to_string(), "dnssec".to_string()],
|
||||
started_at: Instant::now(),
|
||||
};
|
||||
|
||||
let response = HealthResponse::build(&meta, Some(Ipv4Addr::new(192, 168, 1, 50)));
|
||||
let json = serde_json::to_string(&response).unwrap();
|
||||
|
||||
assert!(json.contains("\"status\":\"ok\""));
|
||||
assert!(json.contains("\"version\":\"0.10.0\""));
|
||||
assert!(json.contains("\"hostname\":\"test-host\""));
|
||||
assert!(json.contains("\"lan_ip\":\"192.168.1.50\""));
|
||||
assert!(json.contains("\"sni\":\"numa.numa\""));
|
||||
assert!(json.contains("\"port\":853"));
|
||||
assert!(json.contains("\"port\":8765"));
|
||||
assert!(json.contains("\"fingerprint_sha256\":\"abcd1234\""));
|
||||
assert!(json.contains("\"features\":[\"dot\",\"dnssec\"]"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn health_response_omits_dot_port_when_disabled() {
|
||||
let meta = HealthMeta {
|
||||
version: "0.10.0",
|
||||
hostname: "t".to_string(),
|
||||
sni: "numa.numa".to_string(),
|
||||
dot_enabled: false,
|
||||
dot_port: 853,
|
||||
api_port: 8765,
|
||||
ca_fingerprint_sha256: None,
|
||||
features: vec![],
|
||||
started_at: Instant::now(),
|
||||
};
|
||||
|
||||
let response = HealthResponse::build(&meta, None);
|
||||
let json = serde_json::to_string(&response).unwrap();
|
||||
|
||||
assert!(json.contains("\"enabled\":false"));
|
||||
assert!(json.contains("\"dot\":{\"enabled\":false,\"port\":null}"));
|
||||
assert!(json.contains("\"present\":false"));
|
||||
assert!(json.contains("\"lan_ip\":null"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ca_fingerprint_returns_none_for_missing_file() {
|
||||
let fp = compute_ca_fingerprint(Path::new("/nonexistent/ca.pem"));
|
||||
assert!(fp.is_none());
|
||||
}
|
||||
}
|
||||
514
src/lan.rs
Normal file
514
src/lan.rs
Normal file
@@ -0,0 +1,514 @@
|
||||
use std::collections::HashMap;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, SocketAddrV4};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use log::{debug, info, warn};
|
||||
|
||||
use crate::buffer::BytePacketBuffer;
|
||||
use crate::config::LanConfig;
|
||||
use crate::ctx::ServerCtx;
|
||||
use crate::header::DnsHeader;
|
||||
use crate::health::HealthMeta;
|
||||
use crate::question::{DnsQuestion, QueryType};
|
||||
|
||||
// --- Constants ---
|
||||
|
||||
const MDNS_ADDR: Ipv4Addr = Ipv4Addr::new(224, 0, 0, 251);
|
||||
const MDNS_PORT: u16 = 5353;
|
||||
const SERVICE_TYPE: &str = "_numa._tcp.local";
|
||||
const MDNS_TTL: u32 = 120;
|
||||
|
||||
// TXT record key prefixes (including the trailing `=`). Shared between
|
||||
// the sender (`build_announcement`) and the receiver (`parse_mdns_response`)
|
||||
// to prevent drift — both sides match on the same literal, not on two
|
||||
// independent string constants that could diverge.
|
||||
const TXT_SERVICES: &str = "services=";
|
||||
const TXT_ID: &str = "id=";
|
||||
const TXT_VERSION: &str = "version=";
|
||||
const TXT_API_PORT: &str = "api_port=";
|
||||
const TXT_PROTO: &str = "proto=";
|
||||
const TXT_DOT_PORT: &str = "dot_port=";
|
||||
const TXT_CA_FP: &str = "ca_fp=";
|
||||
|
||||
// --- Peer Store ---
|
||||
|
||||
pub struct PeerStore {
|
||||
peers: HashMap<String, (IpAddr, u16, Instant)>,
|
||||
timeout: Duration,
|
||||
}
|
||||
|
||||
impl PeerStore {
|
||||
pub fn new(timeout_secs: u64) -> Self {
|
||||
PeerStore {
|
||||
peers: HashMap::new(),
|
||||
timeout: Duration::from_secs(timeout_secs),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if a previously-unseen name was inserted.
|
||||
pub fn update(&mut self, host: IpAddr, services: &[(String, u16)]) -> bool {
|
||||
let now = Instant::now();
|
||||
let mut changed = false;
|
||||
for (name, port) in services {
|
||||
let key = name.to_lowercase();
|
||||
if !self.peers.contains_key(&key) {
|
||||
changed = true;
|
||||
}
|
||||
self.peers.insert(key, (host, *port, now));
|
||||
}
|
||||
changed
|
||||
}
|
||||
|
||||
pub fn lookup(&mut self, name: &str) -> Option<(IpAddr, u16)> {
|
||||
let key = name.to_lowercase();
|
||||
let entry = self.peers.get(&key)?;
|
||||
if entry.2.elapsed() > self.timeout {
|
||||
self.peers.remove(&key);
|
||||
return None;
|
||||
}
|
||||
Some((entry.0, entry.1))
|
||||
}
|
||||
|
||||
pub fn list(&mut self) -> Vec<(String, IpAddr, u16, u64)> {
|
||||
let now = Instant::now();
|
||||
self.peers
|
||||
.retain(|_, (_, _, seen)| now.duration_since(*seen) < self.timeout);
|
||||
self.peers
|
||||
.iter()
|
||||
.map(|(name, (ip, port, seen))| {
|
||||
(
|
||||
name.clone(),
|
||||
*ip,
|
||||
*port,
|
||||
now.duration_since(*seen).as_secs(),
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn names(&mut self) -> Vec<String> {
|
||||
let now = Instant::now();
|
||||
self.peers
|
||||
.retain(|_, (_, _, seen)| now.duration_since(*seen) < self.timeout);
|
||||
self.peers.keys().cloned().collect()
|
||||
}
|
||||
|
||||
pub fn clear(&mut self) {
|
||||
self.peers.clear();
|
||||
}
|
||||
}
|
||||
|
||||
// --- mDNS Discovery ---
|
||||
|
||||
pub fn detect_lan_ip() -> Option<Ipv4Addr> {
|
||||
let socket = std::net::UdpSocket::bind("0.0.0.0:0").ok()?;
|
||||
socket.connect("8.8.8.8:80").ok()?;
|
||||
match socket.local_addr().ok()? {
|
||||
SocketAddr::V4(addr) => Some(*addr.ip()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Short hostname for mDNS instance names (`<short>._numa._tcp.local`).
|
||||
/// Truncates at the first `.` so `macbook-pro.local` becomes `macbook-pro`.
|
||||
/// Uses the shared `crate::hostname()` helper as the source.
|
||||
fn get_hostname() -> String {
|
||||
crate::hostname()
|
||||
.split('.')
|
||||
.next()
|
||||
.filter(|s| !s.is_empty())
|
||||
.unwrap_or("numa")
|
||||
.to_string()
|
||||
}
|
||||
|
||||
/// Generate a per-process instance ID for self-filtering on multi-instance hosts
|
||||
fn instance_id() -> String {
|
||||
format!(
|
||||
"{}:{}",
|
||||
std::process::id(),
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_nanos()
|
||||
% 1_000_000
|
||||
)
|
||||
}
|
||||
|
||||
pub async fn start_lan_discovery(ctx: Arc<ServerCtx>, config: &LanConfig) {
|
||||
let interval = Duration::from_secs(config.broadcast_interval_secs);
|
||||
let local_ip = *ctx.lan_ip.lock().unwrap();
|
||||
let hostname = get_hostname();
|
||||
let our_instance_id = instance_id();
|
||||
|
||||
info!(
|
||||
"LAN discovery via mDNS on {}:{}, local IP {}, instance {}._numa._tcp.local",
|
||||
MDNS_ADDR, MDNS_PORT, local_ip, hostname
|
||||
);
|
||||
|
||||
let std_socket = match create_mdns_socket() {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"LAN: could not bind mDNS socket: {} — LAN discovery disabled",
|
||||
e
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
let socket = match tokio::net::UdpSocket::from_std(std_socket) {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
warn!("LAN: tokio socket conversion failed: {}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
let socket = Arc::new(socket);
|
||||
let dest = SocketAddr::new(IpAddr::V4(MDNS_ADDR), MDNS_PORT);
|
||||
|
||||
// Spawn sender: announce our services periodically
|
||||
let sender_ctx = Arc::clone(&ctx);
|
||||
let sender_socket = Arc::clone(&socket);
|
||||
let sender_hostname = hostname.clone();
|
||||
let sender_instance_id = our_instance_id.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut ticker = tokio::time::interval(interval);
|
||||
loop {
|
||||
ticker.tick().await;
|
||||
let services: Vec<(String, u16)> = {
|
||||
let store = sender_ctx.services.lock().unwrap();
|
||||
store
|
||||
.list()
|
||||
.iter()
|
||||
.map(|e| (e.name.clone(), e.target_port))
|
||||
.collect()
|
||||
};
|
||||
// Note: we always announce ourselves, even when the
|
||||
// services list is empty. The announcement still carries
|
||||
// the mobile API port + version + CA fingerprint in TXT,
|
||||
// which is what the iOS companion app browses for via
|
||||
// NWBrowser on `_numa._tcp.local`. Other Numa peers
|
||||
// receive these empty-services announcements too and
|
||||
// correctly ignore them in parse_mdns_response (the
|
||||
// receiver only processes when services is non-empty).
|
||||
let current_ip = *sender_ctx.lan_ip.lock().unwrap();
|
||||
if let Ok(pkt) = build_announcement(
|
||||
&sender_hostname,
|
||||
current_ip,
|
||||
&services,
|
||||
&sender_instance_id,
|
||||
&sender_ctx.health_meta,
|
||||
) {
|
||||
let _ = sender_socket.send_to(pkt.filled(), dest).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Send initial browse query
|
||||
if let Ok(pkt) = build_browse_query() {
|
||||
let _ = socket.send_to(pkt.filled(), dest).await;
|
||||
}
|
||||
|
||||
// Receiver loop: parse mDNS responses for _numa._tcp
|
||||
let mut buf = vec![0u8; 4096];
|
||||
loop {
|
||||
let (len, _src) = match socket.recv_from(&mut buf).await {
|
||||
Ok(r) => r,
|
||||
Err(e) => {
|
||||
debug!("mDNS recv error: {}", e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let data = &buf[..len];
|
||||
if let Some(ann) = parse_mdns_response(data) {
|
||||
// Skip our own announcements via instance ID (works on multi-instance same-host)
|
||||
if ann.instance_id.as_deref() == Some(our_instance_id.as_str()) {
|
||||
continue;
|
||||
}
|
||||
if !ann.services.is_empty() {
|
||||
let changed = ctx
|
||||
.lan_peers
|
||||
.lock()
|
||||
.unwrap()
|
||||
.update(ann.peer_ip, &ann.services);
|
||||
if changed {
|
||||
crate::tls::regenerate_tls(&ctx);
|
||||
}
|
||||
debug!(
|
||||
"LAN: {} services from {} (mDNS)",
|
||||
ann.services.len(),
|
||||
ann.peer_ip
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --- mDNS Packet Building ---
|
||||
|
||||
fn build_browse_query() -> crate::Result<BytePacketBuffer> {
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
|
||||
let mut header = DnsHeader::new();
|
||||
header.questions = 1;
|
||||
header.write(&mut buf)?;
|
||||
|
||||
DnsQuestion::new(SERVICE_TYPE.to_string(), QueryType::PTR).write(&mut buf)?;
|
||||
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
fn build_announcement(
|
||||
hostname: &str,
|
||||
ip: Ipv4Addr,
|
||||
services: &[(String, u16)],
|
||||
inst_id: &str,
|
||||
meta: &HealthMeta,
|
||||
) -> crate::Result<BytePacketBuffer> {
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
let instance_name = format!("{}._numa._tcp.local", hostname);
|
||||
let host_local = format!("{}.local", hostname);
|
||||
|
||||
let mut header = DnsHeader::new();
|
||||
header.response = true;
|
||||
header.authoritative_answer = true;
|
||||
header.answers = 4; // PTR + SRV + TXT + A
|
||||
header.write(&mut buf)?;
|
||||
|
||||
// PTR: _numa._tcp.local → <hostname>._numa._tcp.local
|
||||
write_record_header(&mut buf, SERVICE_TYPE, QueryType::PTR.to_num(), 1, MDNS_TTL)?;
|
||||
let rdlen_pos = buf.pos();
|
||||
buf.write_u16(0)?;
|
||||
let rdata_start = buf.pos();
|
||||
buf.write_qname(&instance_name)?;
|
||||
patch_rdlen(&mut buf, rdlen_pos, rdata_start)?;
|
||||
|
||||
// SRV: <instance>._numa._tcp.local → <hostname>.local
|
||||
// Port = mobile API port, which is what the iOS companion app resolves
|
||||
// the SRV record for. Legacy Numa peers don't read the SRV port (see
|
||||
// parse_mdns_response — it only uses TXT services= for peer discovery),
|
||||
// so changing the SRV port from "first service's port" to the mobile
|
||||
// API port is backwards compatible.
|
||||
write_record_header(
|
||||
&mut buf,
|
||||
&instance_name,
|
||||
QueryType::SRV.to_num(),
|
||||
0x8001,
|
||||
MDNS_TTL,
|
||||
)?;
|
||||
let rdlen_pos = buf.pos();
|
||||
buf.write_u16(0)?;
|
||||
let rdata_start = buf.pos();
|
||||
buf.write_u16(0)?; // priority
|
||||
buf.write_u16(0)?; // weight
|
||||
buf.write_u16(meta.api_port)?; // mobile API port, for iOS companion app
|
||||
buf.write_qname(&host_local)?;
|
||||
patch_rdlen(&mut buf, rdlen_pos, rdata_start)?;
|
||||
|
||||
// TXT: legacy peer-discovery entries (services, id) + enriched entries
|
||||
// for the iOS companion app (version, api_port, proto, dot_port, ca_fp).
|
||||
// All in one TXT RRset per mDNS convention.
|
||||
write_record_header(
|
||||
&mut buf,
|
||||
&instance_name,
|
||||
QueryType::TXT.to_num(),
|
||||
0x8001,
|
||||
MDNS_TTL,
|
||||
)?;
|
||||
let rdlen_pos = buf.pos();
|
||||
buf.write_u16(0)?;
|
||||
let rdata_start = buf.pos();
|
||||
let svc_str = services
|
||||
.iter()
|
||||
.map(|(name, port)| format!("{}:{}", name, port))
|
||||
.collect::<Vec<_>>()
|
||||
.join(",");
|
||||
// Legacy peer-discovery entries (consumed by parse_mdns_response)
|
||||
write_txt_string(&mut buf, &format!("{}{}", TXT_SERVICES, svc_str))?;
|
||||
write_txt_string(&mut buf, &format!("{}{}", TXT_ID, inst_id))?;
|
||||
// Enriched entries (consumed by the iOS/Android companion apps)
|
||||
write_txt_string(&mut buf, &format!("{}{}", TXT_VERSION, meta.version))?;
|
||||
write_txt_string(&mut buf, &format!("{}{}", TXT_API_PORT, meta.api_port))?;
|
||||
if meta.dot_enabled {
|
||||
write_txt_string(&mut buf, &format!("{}dot", TXT_PROTO))?;
|
||||
write_txt_string(&mut buf, &format!("{}{}", TXT_DOT_PORT, meta.dot_port))?;
|
||||
} else {
|
||||
write_txt_string(&mut buf, &format!("{}plain", TXT_PROTO))?;
|
||||
}
|
||||
if let Some(fp) = &meta.ca_fingerprint_sha256 {
|
||||
write_txt_string(&mut buf, &format!("{}{}", TXT_CA_FP, fp))?;
|
||||
}
|
||||
patch_rdlen(&mut buf, rdlen_pos, rdata_start)?;
|
||||
|
||||
// A: <hostname>.local → IP
|
||||
write_record_header(
|
||||
&mut buf,
|
||||
&host_local,
|
||||
QueryType::A.to_num(),
|
||||
0x8001,
|
||||
MDNS_TTL,
|
||||
)?;
|
||||
buf.write_u16(4)?;
|
||||
for &b in &ip.octets() {
|
||||
buf.write_u8(b)?;
|
||||
}
|
||||
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
fn write_record_header(
|
||||
buf: &mut BytePacketBuffer,
|
||||
name: &str,
|
||||
rtype: u16,
|
||||
class: u16,
|
||||
ttl: u32,
|
||||
) -> crate::Result<()> {
|
||||
buf.write_qname(name)?;
|
||||
buf.write_u16(rtype)?;
|
||||
buf.write_u16(class)?;
|
||||
buf.write_u32(ttl)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn patch_rdlen(
|
||||
buf: &mut BytePacketBuffer,
|
||||
rdlen_pos: usize,
|
||||
rdata_start: usize,
|
||||
) -> crate::Result<()> {
|
||||
let rdlen = (buf.pos() - rdata_start) as u16;
|
||||
buf.set_u16(rdlen_pos, rdlen)
|
||||
}
|
||||
|
||||
fn write_txt_string(buf: &mut BytePacketBuffer, s: &str) -> crate::Result<()> {
|
||||
let bytes = s.as_bytes();
|
||||
for chunk in bytes.chunks(255) {
|
||||
buf.write_u8(chunk.len() as u8)?;
|
||||
for &b in chunk {
|
||||
buf.write_u8(b)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// --- mDNS Packet Parsing ---
|
||||
|
||||
struct MdnsAnnouncement {
|
||||
services: Vec<(String, u16)>,
|
||||
peer_ip: IpAddr,
|
||||
instance_id: Option<String>,
|
||||
}
|
||||
|
||||
fn parse_mdns_response(data: &[u8]) -> Option<MdnsAnnouncement> {
|
||||
if data.len() < 12 {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
buf.buf[..data.len()].copy_from_slice(data);
|
||||
|
||||
let mut header = DnsHeader::new();
|
||||
header.read(&mut buf).ok()?;
|
||||
|
||||
if !header.response || header.answers == 0 {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Skip questions
|
||||
for _ in 0..header.questions {
|
||||
let mut q = DnsQuestion::new(String::new(), QueryType::UNKNOWN(0));
|
||||
q.read(&mut buf).ok()?;
|
||||
}
|
||||
|
||||
let total = header.answers + header.authoritative_entries + header.resource_entries;
|
||||
let mut txt_services: Option<Vec<(String, u16)>> = None;
|
||||
let mut peer_instance_id: Option<String> = None;
|
||||
let mut a_ip: Option<IpAddr> = None;
|
||||
let mut name = String::with_capacity(64);
|
||||
|
||||
for _ in 0..total {
|
||||
if buf.pos() >= data.len() {
|
||||
break;
|
||||
}
|
||||
|
||||
name.clear();
|
||||
if buf.read_qname(&mut name).is_err() {
|
||||
break;
|
||||
}
|
||||
|
||||
let rtype = buf.read_u16().unwrap_or(0);
|
||||
let _rclass = buf.read_u16().unwrap_or(0);
|
||||
let _ttl = buf.read_u32().unwrap_or(0);
|
||||
let rdlength = buf.read_u16().unwrap_or(0) as usize;
|
||||
let rdata_start = buf.pos();
|
||||
|
||||
match rtype {
|
||||
t if t == QueryType::TXT.to_num() && name.contains("_numa._tcp") => {
|
||||
let mut pos = rdata_start;
|
||||
while pos < rdata_start + rdlength && pos < data.len() {
|
||||
let txt_len = data[pos] as usize;
|
||||
pos += 1;
|
||||
if pos + txt_len > data.len() {
|
||||
break;
|
||||
}
|
||||
if let Ok(txt) = std::str::from_utf8(&data[pos..pos + txt_len]) {
|
||||
if let Some(val) = txt.strip_prefix(TXT_SERVICES) {
|
||||
let svcs: Vec<(String, u16)> = val
|
||||
.split(',')
|
||||
.filter_map(|s| {
|
||||
let mut parts = s.splitn(2, ':');
|
||||
let svc_name = parts.next()?.to_string();
|
||||
let port = parts.next()?.parse().ok()?;
|
||||
Some((svc_name, port))
|
||||
})
|
||||
.collect();
|
||||
if !svcs.is_empty() {
|
||||
txt_services = Some(svcs);
|
||||
}
|
||||
} else if let Some(id) = txt.strip_prefix(TXT_ID) {
|
||||
peer_instance_id = Some(id.to_string());
|
||||
}
|
||||
}
|
||||
pos += txt_len;
|
||||
}
|
||||
}
|
||||
t if t == QueryType::A.to_num() && rdlength == 4 && rdata_start + 4 <= data.len() => {
|
||||
a_ip = Some(IpAddr::V4(Ipv4Addr::new(
|
||||
data[rdata_start],
|
||||
data[rdata_start + 1],
|
||||
data[rdata_start + 2],
|
||||
data[rdata_start + 3],
|
||||
)));
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
buf.seek(rdata_start + rdlength).ok();
|
||||
}
|
||||
|
||||
let services = txt_services?;
|
||||
// Trust the A record IP if present, otherwise this isn't a complete announcement
|
||||
let peer_ip = a_ip?;
|
||||
|
||||
Some(MdnsAnnouncement {
|
||||
services,
|
||||
peer_ip,
|
||||
instance_id: peer_instance_id,
|
||||
})
|
||||
}
|
||||
|
||||
fn create_mdns_socket() -> std::io::Result<std::net::UdpSocket> {
|
||||
let addr = SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, MDNS_PORT);
|
||||
let socket = socket2::Socket::new(
|
||||
socket2::Domain::IPV4,
|
||||
socket2::Type::DGRAM,
|
||||
Some(socket2::Protocol::UDP),
|
||||
)?;
|
||||
socket.set_reuse_address(true)?;
|
||||
#[cfg(unix)]
|
||||
socket.set_reuse_port(true)?;
|
||||
socket.set_nonblocking(true)?;
|
||||
socket.bind(&socket2::SockAddr::from(addr))?;
|
||||
socket.join_multicast_v4(&MDNS_ADDR, &Ipv4Addr::UNSPECIFIED)?;
|
||||
Ok(socket.into())
|
||||
}
|
||||
126
src/lib.rs
126
src/lib.rs
@@ -4,15 +4,24 @@ pub mod buffer;
|
||||
pub mod cache;
|
||||
pub mod config;
|
||||
pub mod ctx;
|
||||
pub mod dnssec;
|
||||
pub mod dot;
|
||||
pub mod forward;
|
||||
pub mod header;
|
||||
pub mod health;
|
||||
pub mod lan;
|
||||
pub mod mobile_api;
|
||||
pub mod mobileconfig;
|
||||
pub mod override_store;
|
||||
pub mod packet;
|
||||
pub mod proxy;
|
||||
pub mod query_log;
|
||||
pub mod question;
|
||||
pub mod record;
|
||||
pub mod recursive;
|
||||
pub mod service_store;
|
||||
pub mod setup_phone;
|
||||
pub mod srtt;
|
||||
pub mod stats;
|
||||
pub mod system_dns;
|
||||
pub mod tls;
|
||||
@@ -20,9 +29,42 @@ pub mod tls;
|
||||
pub type Error = Box<dyn std::error::Error + Send + Sync>;
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
/// Shared config directory: ~/.config/numa/
|
||||
/// Handles sudo (uses SUDO_USER) and launchd (falls back to /usr/local/var/numa/).
|
||||
/// Detect the machine hostname via the `hostname` command. Returns the
|
||||
/// full hostname (e.g., `macbook-pro.local`), or `"numa"` if the command
|
||||
/// fails. Call sites that need the short form (e.g., mDNS instance
|
||||
/// names) should truncate at the first `.`.
|
||||
pub fn hostname() -> String {
|
||||
std::process::Command::new("hostname")
|
||||
.output()
|
||||
.ok()
|
||||
.and_then(|o| String::from_utf8(o.stdout).ok())
|
||||
.map(|h| h.trim().to_string())
|
||||
.filter(|h| !h.is_empty())
|
||||
.unwrap_or_else(|| "numa".to_string())
|
||||
}
|
||||
|
||||
/// Shared config directory for persistent data (services.json, etc).
|
||||
/// Unix users: ~/.config/numa/
|
||||
/// Linux root daemon: /var/lib/numa (FHS) — falls back to /usr/local/var/numa
|
||||
/// if a pre-v0.10.1 install already lives there.
|
||||
/// macOS root daemon: /usr/local/var/numa (Homebrew prefix)
|
||||
/// Windows: %APPDATA%\numa
|
||||
pub fn config_dir() -> std::path::PathBuf {
|
||||
#[cfg(windows)]
|
||||
{
|
||||
std::path::PathBuf::from(
|
||||
std::env::var("APPDATA").unwrap_or_else(|_| "C:\\ProgramData".into()),
|
||||
)
|
||||
.join("numa")
|
||||
}
|
||||
#[cfg(not(windows))]
|
||||
{
|
||||
config_dir_unix()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(windows))]
|
||||
fn config_dir_unix() -> std::path::PathBuf {
|
||||
// When run via sudo, SUDO_USER has the real user
|
||||
if let Ok(user) = std::env::var("SUDO_USER") {
|
||||
let home = if cfg!(target_os = "macos") {
|
||||
@@ -36,12 +78,88 @@ pub fn config_dir() -> std::path::PathBuf {
|
||||
// Normal user (not root)
|
||||
if let Ok(home) = std::env::var("HOME") {
|
||||
let path = std::path::PathBuf::from(&home);
|
||||
// /var/root on macOS is read-only (SIP), use /usr/local/var/numa instead
|
||||
if !home.starts_with("/var/root") && !home.starts_with("/root") {
|
||||
return path.join(".config").join("numa");
|
||||
}
|
||||
}
|
||||
|
||||
// Running as root daemon (launchd/systemd) — use system-wide path
|
||||
std::path::PathBuf::from("/usr/local/var/numa")
|
||||
daemon_data_dir()
|
||||
}
|
||||
|
||||
/// Default system-wide data directory for TLS certs. Overridable via
|
||||
/// `[server] data_dir = "..."` in numa.toml — this function only provides
|
||||
/// the fallback when the config doesn't set it.
|
||||
/// Linux: /var/lib/numa (FHS) — falls back to /usr/local/var/numa if a
|
||||
/// pre-v0.10.1 install already has data there.
|
||||
/// macOS: /usr/local/var/numa (Homebrew prefix)
|
||||
/// Windows: %PROGRAMDATA%\numa
|
||||
pub fn data_dir() -> std::path::PathBuf {
|
||||
#[cfg(windows)]
|
||||
{
|
||||
std::path::PathBuf::from(
|
||||
std::env::var("PROGRAMDATA").unwrap_or_else(|_| "C:\\ProgramData".into()),
|
||||
)
|
||||
.join("numa")
|
||||
}
|
||||
#[cfg(not(windows))]
|
||||
{
|
||||
daemon_data_dir()
|
||||
}
|
||||
}
|
||||
|
||||
/// Resolve the system-wide data directory for the running platform.
|
||||
/// Honors backwards compatibility with pre-v0.10.1 installs that still
|
||||
/// have their CA cert + services.json under `/usr/local/var/numa`.
|
||||
#[cfg(not(windows))]
|
||||
fn daemon_data_dir() -> std::path::PathBuf {
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
std::path::PathBuf::from(resolve_linux_data_dir(
|
||||
std::path::Path::new("/usr/local/var/numa").exists(),
|
||||
std::path::Path::new("/var/lib/numa").exists(),
|
||||
))
|
||||
}
|
||||
#[cfg(target_os = "macos")]
|
||||
{
|
||||
// macOS uses the Homebrew prefix convention; no FHS migration needed.
|
||||
std::path::PathBuf::from("/usr/local/var/numa")
|
||||
}
|
||||
}
|
||||
|
||||
/// Extracted as a pure function so the migration logic is unit-testable
|
||||
/// without touching the real filesystem.
|
||||
#[cfg(any(target_os = "linux", test))]
|
||||
fn resolve_linux_data_dir(legacy_exists: bool, fhs_exists: bool) -> &'static str {
|
||||
if legacy_exists && !fhs_exists {
|
||||
"/usr/local/var/numa"
|
||||
} else {
|
||||
"/var/lib/numa"
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn linux_data_dir_fresh_install_uses_fhs() {
|
||||
assert_eq!(resolve_linux_data_dir(false, false), "/var/lib/numa");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn linux_data_dir_upgrading_install_keeps_legacy() {
|
||||
// Migration must keep legacy so the user doesn't lose their CA on upgrade.
|
||||
assert_eq!(resolve_linux_data_dir(true, false), "/usr/local/var/numa");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn linux_data_dir_after_migration_uses_fhs() {
|
||||
assert_eq!(resolve_linux_data_dir(true, true), "/var/lib/numa");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn linux_data_dir_only_fhs_uses_fhs() {
|
||||
assert_eq!(resolve_linux_data_dir(false, true), "/var/lib/numa");
|
||||
}
|
||||
}
|
||||
|
||||
607
src/main.rs
607
src/main.rs
@@ -1,24 +1,28 @@
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::time::Duration;
|
||||
|
||||
use arc_swap::ArcSwap;
|
||||
use log::{error, info};
|
||||
use tokio::net::UdpSocket;
|
||||
|
||||
use numa::blocklist::{download_blocklists, parse_blocklist, BlocklistStore};
|
||||
use numa::buffer::BytePacketBuffer;
|
||||
use numa::cache::DnsCache;
|
||||
use numa::config::{build_zone_map, load_config};
|
||||
use numa::config::{build_zone_map, load_config, ConfigLoad};
|
||||
use numa::ctx::{handle_query, ServerCtx};
|
||||
use numa::forward::Upstream;
|
||||
use numa::override_store::OverrideStore;
|
||||
use numa::query_log::QueryLog;
|
||||
use numa::service_store::ServiceStore;
|
||||
use numa::stats::ServerStats;
|
||||
use numa::system_dns::{
|
||||
discover_system_dns, install_service, install_system_dns, restart_service, service_status,
|
||||
uninstall_service, uninstall_system_dns,
|
||||
discover_system_dns, install_service, restart_service, service_status, uninstall_service,
|
||||
};
|
||||
|
||||
const QUAD9_IP: &str = "9.9.9.9";
|
||||
const DOH_FALLBACK: &str = "https://9.9.9.9/dns-query";
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> numa::Result<()> {
|
||||
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info"))
|
||||
@@ -29,12 +33,12 @@ async fn main() -> numa::Result<()> {
|
||||
let arg1 = std::env::args().nth(1).unwrap_or_default();
|
||||
match arg1.as_str() {
|
||||
"install" => {
|
||||
eprintln!("\x1b[1;38;2;192;98;58mNuma\x1b[0m — configuring system DNS\n");
|
||||
return install_system_dns().map_err(|e| e.into());
|
||||
eprintln!("\x1b[1;38;2;192;98;58mNuma\x1b[0m — installing\n");
|
||||
return install_service().map_err(|e| e.into());
|
||||
}
|
||||
"uninstall" => {
|
||||
eprintln!("\x1b[1;38;2;192;98;58mNuma\x1b[0m — restoring system DNS\n");
|
||||
return uninstall_system_dns().map_err(|e| e.into());
|
||||
eprintln!("\x1b[1;38;2;192;98;58mNuma\x1b[0m — uninstalling\n");
|
||||
return uninstall_service().map_err(|e| e.into());
|
||||
}
|
||||
"service" => {
|
||||
let sub = std::env::args().nth(2).unwrap_or_default();
|
||||
@@ -50,6 +54,23 @@ async fn main() -> numa::Result<()> {
|
||||
}
|
||||
};
|
||||
}
|
||||
"setup-phone" => {
|
||||
return numa::setup_phone::run().await.map_err(|e| e.into());
|
||||
}
|
||||
"lan" => {
|
||||
let sub = std::env::args().nth(2).unwrap_or_default();
|
||||
let config_path = std::env::args()
|
||||
.nth(3)
|
||||
.unwrap_or_else(|| "numa.toml".to_string());
|
||||
return match sub.as_str() {
|
||||
"on" => set_lan_enabled(true, &config_path),
|
||||
"off" => set_lan_enabled(false, &config_path),
|
||||
_ => {
|
||||
eprintln!("Usage: numa lan <on|off> [config-path]");
|
||||
Ok(())
|
||||
}
|
||||
};
|
||||
}
|
||||
"version" | "--version" | "-V" => {
|
||||
eprintln!("numa {}", env!("CARGO_PKG_VERSION"));
|
||||
return Ok(());
|
||||
@@ -65,12 +86,29 @@ async fn main() -> numa::Result<()> {
|
||||
eprintln!(" service stop Uninstall the system service");
|
||||
eprintln!(" service restart Restart the service with updated binary");
|
||||
eprintln!(" service status Check if the service is running");
|
||||
eprintln!(" lan on Enable LAN service discovery (mDNS)");
|
||||
eprintln!(" lan off Disable LAN service discovery");
|
||||
eprintln!(" setup-phone Generate a QR code to install Numa DoT on a phone");
|
||||
eprintln!(" help Show this help");
|
||||
eprintln!();
|
||||
eprintln!("Config path defaults to numa.toml");
|
||||
return Ok(());
|
||||
}
|
||||
_ => {}
|
||||
_ => {
|
||||
if !arg1.is_empty()
|
||||
&& arg1 != "run"
|
||||
&& !arg1.contains('/')
|
||||
&& !arg1.contains('\\')
|
||||
&& !arg1.ends_with(".toml")
|
||||
{
|
||||
eprintln!(
|
||||
"\x1b[1;38;2;192;98;58mNuma\x1b[0m — unknown command: \x1b[1m{}\x1b[0m\n",
|
||||
arg1
|
||||
);
|
||||
eprintln!("Run \x1b[1mnuma help\x1b[0m for a list of commands.");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let config_path = if arg1.is_empty() || arg1 == "run" {
|
||||
@@ -80,20 +118,90 @@ async fn main() -> numa::Result<()> {
|
||||
} else {
|
||||
arg1 // treat as config path for backwards compatibility
|
||||
};
|
||||
let config = load_config(&config_path)?;
|
||||
let ConfigLoad {
|
||||
config,
|
||||
path: resolved_config_path,
|
||||
found: config_found,
|
||||
} = load_config(&config_path)?;
|
||||
|
||||
// Discover system DNS in a single pass (upstream + forwarding rules)
|
||||
let system_dns = discover_system_dns();
|
||||
|
||||
let upstream_addr = if config.upstream.address.is_empty() {
|
||||
system_dns.default_upstream.unwrap_or_else(|| {
|
||||
info!("could not detect system DNS, falling back to 9.9.9.9 (Quad9)");
|
||||
"9.9.9.9".to_string()
|
||||
})
|
||||
} else {
|
||||
config.upstream.address.clone()
|
||||
let root_hints = numa::recursive::parse_root_hints(&config.upstream.root_hints);
|
||||
|
||||
let (resolved_mode, upstream_auto, upstream, upstream_label) = match config.upstream.mode {
|
||||
numa::config::UpstreamMode::Auto => {
|
||||
info!("auto mode: probing recursive resolution...");
|
||||
if numa::recursive::probe_recursive(&root_hints).await {
|
||||
info!("recursive probe succeeded — self-sovereign mode");
|
||||
let dummy = Upstream::Udp("0.0.0.0:0".parse().unwrap());
|
||||
(
|
||||
numa::config::UpstreamMode::Recursive,
|
||||
false,
|
||||
dummy,
|
||||
"recursive (root hints)".to_string(),
|
||||
)
|
||||
} else {
|
||||
log::warn!("recursive probe failed — falling back to Quad9 DoH");
|
||||
let client = reqwest::Client::builder()
|
||||
.use_rustls_tls()
|
||||
.build()
|
||||
.unwrap_or_default();
|
||||
let url = DOH_FALLBACK.to_string();
|
||||
let label = url.clone();
|
||||
(
|
||||
numa::config::UpstreamMode::Forward,
|
||||
false,
|
||||
Upstream::Doh { url, client },
|
||||
label,
|
||||
)
|
||||
}
|
||||
}
|
||||
numa::config::UpstreamMode::Recursive => {
|
||||
let dummy = Upstream::Udp("0.0.0.0:0".parse().unwrap());
|
||||
(
|
||||
numa::config::UpstreamMode::Recursive,
|
||||
false,
|
||||
dummy,
|
||||
"recursive (root hints)".to_string(),
|
||||
)
|
||||
}
|
||||
numa::config::UpstreamMode::Forward => {
|
||||
let upstream_addr = if config.upstream.address.is_empty() {
|
||||
system_dns
|
||||
.default_upstream
|
||||
.or_else(numa::system_dns::detect_dhcp_dns)
|
||||
.unwrap_or_else(|| {
|
||||
info!("could not detect system DNS, falling back to Quad9 DoH");
|
||||
DOH_FALLBACK.to_string()
|
||||
})
|
||||
} else {
|
||||
config.upstream.address.clone()
|
||||
};
|
||||
|
||||
let upstream: Upstream = if upstream_addr.starts_with("https://") {
|
||||
let client = reqwest::Client::builder()
|
||||
.use_rustls_tls()
|
||||
.build()
|
||||
.unwrap_or_default();
|
||||
Upstream::Doh {
|
||||
url: upstream_addr,
|
||||
client,
|
||||
}
|
||||
} else {
|
||||
let addr: SocketAddr =
|
||||
format!("{}:{}", upstream_addr, config.upstream.port).parse()?;
|
||||
Upstream::Udp(addr)
|
||||
};
|
||||
let label = upstream.to_string();
|
||||
(
|
||||
numa::config::UpstreamMode::Forward,
|
||||
config.upstream.address.is_empty(),
|
||||
upstream,
|
||||
label,
|
||||
)
|
||||
}
|
||||
};
|
||||
let upstream: SocketAddr = format!("{}:{}", upstream_addr, config.upstream.port).parse()?;
|
||||
let api_port = config.server.api_port;
|
||||
|
||||
let mut blocklist = BlocklistStore::new();
|
||||
@@ -106,29 +214,90 @@ async fn main() -> numa::Result<()> {
|
||||
|
||||
// Build service store: config services + persisted user services
|
||||
let mut service_store = ServiceStore::new();
|
||||
service_store.insert_from_config("numa", config.server.api_port);
|
||||
service_store.insert_from_config("numa", config.server.api_port, Vec::new());
|
||||
for svc in &config.services {
|
||||
service_store.insert_from_config(&svc.name, svc.target_port);
|
||||
service_store.insert_from_config(&svc.name, svc.target_port, svc.routes.clone());
|
||||
}
|
||||
service_store.load_persisted();
|
||||
|
||||
let forwarding_rules = system_dns.forwarding_rules;
|
||||
|
||||
// Resolve data_dir from config, falling back to the platform default.
|
||||
// Used for TLS CA storage below and stored on ServerCtx for runtime use.
|
||||
let resolved_data_dir = config
|
||||
.server
|
||||
.data_dir
|
||||
.clone()
|
||||
.unwrap_or_else(numa::data_dir);
|
||||
|
||||
// Build initial TLS config before ServerCtx (so ArcSwap is ready at construction)
|
||||
let initial_tls = if config.proxy.enabled && config.proxy.tls_port > 0 {
|
||||
let service_names = service_store.names();
|
||||
match numa::tls::build_tls_config(
|
||||
&config.proxy.tld,
|
||||
&service_names,
|
||||
Vec::new(),
|
||||
&resolved_data_dir,
|
||||
) {
|
||||
Ok(tls_config) => Some(ArcSwap::from(tls_config)),
|
||||
Err(e) => {
|
||||
if let Some(advisory) = numa::tls::try_data_dir_advisory(&e, &resolved_data_dir) {
|
||||
eprint!("{}", advisory);
|
||||
} else {
|
||||
log::warn!("TLS setup failed, HTTPS proxy disabled: {}", e);
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let health_meta = numa::health::HealthMeta::build(
|
||||
&resolved_data_dir,
|
||||
config.dot.enabled,
|
||||
config.dot.port,
|
||||
config.mobile.port,
|
||||
config.dnssec.enabled,
|
||||
resolved_mode == numa::config::UpstreamMode::Recursive,
|
||||
config.lan.enabled,
|
||||
config.blocking.enabled,
|
||||
);
|
||||
|
||||
let ca_pem = std::fs::read_to_string(resolved_data_dir.join("ca.pem")).ok();
|
||||
|
||||
let socket = match UdpSocket::bind(&config.server.bind_addr).await {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
if let Some(advisory) =
|
||||
numa::system_dns::try_port53_advisory(&config.server.bind_addr, &e)
|
||||
{
|
||||
eprint!("{}", advisory);
|
||||
std::process::exit(1);
|
||||
}
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
|
||||
let ctx = Arc::new(ServerCtx {
|
||||
socket: UdpSocket::bind(&config.server.bind_addr).await?,
|
||||
socket,
|
||||
zone_map: build_zone_map(&config.zones)?,
|
||||
cache: Mutex::new(DnsCache::new(
|
||||
cache: RwLock::new(DnsCache::new(
|
||||
config.cache.max_entries,
|
||||
config.cache.min_ttl,
|
||||
config.cache.max_ttl,
|
||||
)),
|
||||
stats: Mutex::new(ServerStats::new()),
|
||||
overrides: Mutex::new(OverrideStore::new()),
|
||||
blocklist: Mutex::new(blocklist),
|
||||
overrides: RwLock::new(OverrideStore::new()),
|
||||
blocklist: RwLock::new(blocklist),
|
||||
query_log: Mutex::new(QueryLog::new(1000)),
|
||||
services: Mutex::new(service_store),
|
||||
lan_peers: Mutex::new(numa::lan::PeerStore::new(config.lan.peer_timeout_secs)),
|
||||
forwarding_rules,
|
||||
upstream,
|
||||
upstream: Mutex::new(upstream),
|
||||
upstream_auto,
|
||||
upstream_port: config.upstream.port,
|
||||
lan_ip: Mutex::new(numa::lan::detect_lan_ip().unwrap_or(std::net::Ipv4Addr::LOCALHOST)),
|
||||
timeout: Duration::from_millis(config.upstream.timeout_ms),
|
||||
proxy_tld_suffix: if config.proxy.tld.is_empty() {
|
||||
String::new()
|
||||
@@ -136,40 +305,156 @@ async fn main() -> numa::Result<()> {
|
||||
format!(".{}", config.proxy.tld)
|
||||
},
|
||||
proxy_tld: config.proxy.tld.clone(),
|
||||
lan_enabled: config.lan.enabled,
|
||||
config_path: resolved_config_path,
|
||||
config_found,
|
||||
config_dir: numa::config_dir(),
|
||||
data_dir: resolved_data_dir,
|
||||
tls_config: initial_tls,
|
||||
upstream_mode: resolved_mode,
|
||||
root_hints,
|
||||
srtt: std::sync::RwLock::new(numa::srtt::SrttCache::new(config.upstream.srtt)),
|
||||
inflight: std::sync::Mutex::new(std::collections::HashMap::new()),
|
||||
dnssec_enabled: config.dnssec.enabled,
|
||||
dnssec_strict: config.dnssec.strict,
|
||||
health_meta,
|
||||
ca_pem,
|
||||
});
|
||||
|
||||
let zone_count: usize = ctx.zone_map.values().map(|m| m.len()).sum();
|
||||
eprintln!("\n\x1b[38;2;192;98;58m ╔══════════════════════════════════════════╗\x1b[0m");
|
||||
eprintln!("\x1b[38;2;192;98;58m ║\x1b[0m \x1b[1;38;2;192;98;58mNUMA\x1b[0m \x1b[3;38;2;163;152;136mDNS that governs itself\x1b[0m \x1b[38;2;163;152;136mv{}\x1b[0m \x1b[38;2;192;98;58m║\x1b[0m", env!("CARGO_PKG_VERSION"));
|
||||
eprintln!("\x1b[38;2;192;98;58m ╠══════════════════════════════════════════╣\x1b[0m");
|
||||
eprintln!("\x1b[38;2;192;98;58m ║\x1b[0m \x1b[38;2;107;124;78mDNS\x1b[0m {:<30}\x1b[38;2;192;98;58m║\x1b[0m", config.server.bind_addr);
|
||||
eprintln!("\x1b[38;2;192;98;58m ║\x1b[0m \x1b[38;2;107;124;78mAPI\x1b[0m http://localhost:{:<16}\x1b[38;2;192;98;58m║\x1b[0m", api_port);
|
||||
eprintln!("\x1b[38;2;192;98;58m ║\x1b[0m \x1b[38;2;107;124;78mDashboard\x1b[0m http://localhost:{:<16}\x1b[38;2;192;98;58m║\x1b[0m", api_port);
|
||||
eprintln!("\x1b[38;2;192;98;58m ║\x1b[0m \x1b[38;2;107;124;78mUpstream\x1b[0m {:<30}\x1b[38;2;192;98;58m║\x1b[0m", upstream);
|
||||
eprintln!("\x1b[38;2;192;98;58m ║\x1b[0m \x1b[38;2;107;124;78mZones\x1b[0m {:<30}\x1b[38;2;192;98;58m║\x1b[0m", format!("{} records", zone_count));
|
||||
eprintln!("\x1b[38;2;192;98;58m ║\x1b[0m \x1b[38;2;107;124;78mCache\x1b[0m {:<30}\x1b[38;2;192;98;58m║\x1b[0m", format!("max {} entries", config.cache.max_entries));
|
||||
eprintln!("\x1b[38;2;192;98;58m ║\x1b[0m \x1b[38;2;107;124;78mBlocking\x1b[0m {:<30}\x1b[38;2;192;98;58m║\x1b[0m",
|
||||
if config.blocking.enabled { format!("{} lists", config.blocking.lists.len()) } else { "disabled".to_string() });
|
||||
if config.proxy.enabled {
|
||||
let schemes = if config.proxy.tls_port > 0 {
|
||||
format!(
|
||||
// Build banner rows, then size the box to fit the longest value
|
||||
let api_url = format!("http://localhost:{}", api_port);
|
||||
let proxy_label = if config.proxy.enabled {
|
||||
if config.proxy.tls_port > 0 {
|
||||
Some(format!(
|
||||
"http://:{} https://:{}",
|
||||
config.proxy.port, config.proxy.tls_port
|
||||
)
|
||||
))
|
||||
} else {
|
||||
format!("http://*.{} on :{}", config.proxy.tld, config.proxy.port)
|
||||
};
|
||||
eprintln!("\x1b[38;2;192;98;58m ║\x1b[0m \x1b[38;2;107;124;78mProxy\x1b[0m {:<30}\x1b[38;2;192;98;58m║\x1b[0m", schemes);
|
||||
Some(format!(
|
||||
"http://*.{} on :{}",
|
||||
config.proxy.tld, config.proxy.port
|
||||
))
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let config_label = if ctx.config_found {
|
||||
ctx.config_path.clone()
|
||||
} else {
|
||||
format!("{} (defaults)", ctx.config_path)
|
||||
};
|
||||
let data_label = ctx.data_dir.display().to_string();
|
||||
let services_label = ctx.config_dir.join("services.json").display().to_string();
|
||||
|
||||
// label (10) + value + padding (2) = inner width; minimum 40 for the title row
|
||||
let val_w = [
|
||||
config.server.bind_addr.len(),
|
||||
api_url.len(),
|
||||
upstream_label.len(),
|
||||
config_label.len(),
|
||||
data_label.len(),
|
||||
services_label.len(),
|
||||
]
|
||||
.into_iter()
|
||||
.chain(proxy_label.as_ref().map(|s| s.len()))
|
||||
.max()
|
||||
.unwrap_or(30);
|
||||
let w = (val_w + 12).max(42); // 10 label + 2 padding, min 42 for title
|
||||
|
||||
let o = "\x1b[38;2;192;98;58m"; // orange
|
||||
let g = "\x1b[38;2;107;124;78m"; // green
|
||||
let d = "\x1b[38;2;163;152;136m"; // dim
|
||||
let r = "\x1b[0m"; // reset
|
||||
let b = "\x1b[1;38;2;192;98;58m"; // bold orange
|
||||
let it = "\x1b[3;38;2;163;152;136m"; // italic dim
|
||||
|
||||
let bar_top = "═".repeat(w);
|
||||
let bar_mid = "─".repeat(w);
|
||||
let row = |label: &str, color: &str, value: &str| {
|
||||
eprintln!(
|
||||
"{o} ║{r} {color}{:<9}{r} {:<vw$}{o}║{r}",
|
||||
label,
|
||||
value,
|
||||
vw = w - 12
|
||||
);
|
||||
};
|
||||
|
||||
// Title row: center within the box
|
||||
let title = format!(
|
||||
"{b}NUMA{r} {it}DNS that governs itself{r} {d}v{}{r}",
|
||||
env!("CARGO_PKG_VERSION")
|
||||
);
|
||||
// The title contains ANSI codes; visible length is ~38 chars. Pad to fill the box.
|
||||
let title_visible_len = 4 + 2 + 24 + 2 + 1 + env!("CARGO_PKG_VERSION").len() + 1;
|
||||
let title_pad = w.saturating_sub(title_visible_len);
|
||||
eprintln!("\n{o} ╔{bar_top}╗{r}");
|
||||
eprint!("{o} ║{r} {title}");
|
||||
eprintln!("{}{o}║{r}", " ".repeat(title_pad));
|
||||
eprintln!("{o} ╠{bar_top}╣{r}");
|
||||
row("DNS", g, &config.server.bind_addr);
|
||||
row("API", g, &api_url);
|
||||
row("Dashboard", g, &api_url);
|
||||
row(
|
||||
"Upstream",
|
||||
g,
|
||||
if ctx.upstream_mode == numa::config::UpstreamMode::Recursive {
|
||||
"recursive (root hints)"
|
||||
} else {
|
||||
&upstream_label
|
||||
},
|
||||
);
|
||||
row("Zones", g, &format!("{} records", zone_count));
|
||||
row(
|
||||
"Cache",
|
||||
g,
|
||||
&format!("max {} entries", config.cache.max_entries),
|
||||
);
|
||||
row(
|
||||
"Blocking",
|
||||
g,
|
||||
&if config.blocking.enabled {
|
||||
format!("{} lists", config.blocking.lists.len())
|
||||
} else {
|
||||
"disabled".to_string()
|
||||
},
|
||||
);
|
||||
if let Some(ref label) = proxy_label {
|
||||
row("Proxy", g, label);
|
||||
if config.proxy.bind_addr == "127.0.0.1" {
|
||||
let y = "\x1b[38;2;204;176;59m"; // yellow
|
||||
row(
|
||||
"",
|
||||
y,
|
||||
&format!(
|
||||
"⚠ proxy on 127.0.0.1 — .{} not LAN reachable",
|
||||
config.proxy.tld
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
if config.dot.enabled {
|
||||
row("DoT", g, &format!("tls://:{}", config.dot.port));
|
||||
}
|
||||
if config.lan.enabled {
|
||||
row("LAN", g, "mDNS (_numa._tcp.local)");
|
||||
}
|
||||
if !ctx.forwarding_rules.is_empty() {
|
||||
eprintln!("\x1b[38;2;192;98;58m ║\x1b[0m \x1b[38;2;107;124;78mRouting\x1b[0m {:<30}\x1b[38;2;192;98;58m║\x1b[0m",
|
||||
format!("{} conditional rules", ctx.forwarding_rules.len()));
|
||||
row(
|
||||
"Routing",
|
||||
g,
|
||||
&format!("{} conditional rules", ctx.forwarding_rules.len()),
|
||||
);
|
||||
}
|
||||
eprintln!("\x1b[38;2;192;98;58m ╚══════════════════════════════════════════╝\x1b[0m\n");
|
||||
eprintln!("{o} ╠{bar_mid}╣{r}");
|
||||
row("Config", d, &config_label);
|
||||
row("Data", d, &data_label);
|
||||
row("Services", d, &services_label);
|
||||
eprintln!("{o} ╚{bar_top}╝{r}\n");
|
||||
|
||||
info!(
|
||||
"numa listening on {}, upstream {}, {} zone records, cache max {}, API on port {}",
|
||||
config.server.bind_addr, upstream, zone_count, config.cache.max_entries, api_port,
|
||||
config.server.bind_addr, upstream_label, zone_count, config.cache.max_entries, api_port,
|
||||
);
|
||||
|
||||
// Download blocklists on startup
|
||||
@@ -192,9 +477,24 @@ async fn main() -> numa::Result<()> {
|
||||
});
|
||||
}
|
||||
|
||||
// Prime TLD cache (recursive mode only)
|
||||
if ctx.upstream_mode == numa::config::UpstreamMode::Recursive {
|
||||
let prime_ctx = Arc::clone(&ctx);
|
||||
let prime_tlds = config.upstream.prime_tlds;
|
||||
tokio::spawn(async move {
|
||||
numa::recursive::prime_tld_cache(
|
||||
&prime_ctx.cache,
|
||||
&prime_ctx.root_hints,
|
||||
&prime_tlds,
|
||||
&prime_ctx.srtt,
|
||||
)
|
||||
.await;
|
||||
});
|
||||
}
|
||||
|
||||
// Spawn HTTP API server
|
||||
let api_ctx = Arc::clone(&ctx);
|
||||
let api_addr: SocketAddr = format!("0.0.0.0:{}", api_port).parse()?;
|
||||
let api_addr: SocketAddr = format!("{}:{}", config.server.api_bind_addr, api_port).parse()?;
|
||||
tokio::spawn(async move {
|
||||
let app = numa::api::router(api_ctx);
|
||||
let listener = tokio::net::TcpListener::bind(api_addr).await.unwrap();
|
||||
@@ -202,44 +502,83 @@ async fn main() -> numa::Result<()> {
|
||||
axum::serve(listener, app).await.unwrap();
|
||||
});
|
||||
|
||||
// Spawn Mobile API listener (read-only subset for iOS/Android companion
|
||||
// apps, LAN-bound by default so phones can reach it). Only idempotent
|
||||
// GETs; no state-mutating routes are exposed here regardless of
|
||||
// the main API's bind address.
|
||||
if config.mobile.enabled {
|
||||
let mobile_ctx = Arc::clone(&ctx);
|
||||
let mobile_bind = config.mobile.bind_addr.clone();
|
||||
let mobile_port = config.mobile.port;
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = numa::mobile_api::start(mobile_ctx, mobile_bind, mobile_port).await {
|
||||
log::warn!("Mobile API listener failed: {}", e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
let proxy_bind: std::net::Ipv4Addr = config
|
||||
.proxy
|
||||
.bind_addr
|
||||
.parse()
|
||||
.unwrap_or(std::net::Ipv4Addr::LOCALHOST);
|
||||
|
||||
// Spawn HTTP reverse proxy for .numa domains
|
||||
if config.proxy.enabled {
|
||||
let proxy_ctx = Arc::clone(&ctx);
|
||||
let proxy_port = config.proxy.port;
|
||||
tokio::spawn(async move {
|
||||
numa::proxy::start_proxy(proxy_ctx, proxy_port).await;
|
||||
numa::proxy::start_proxy(proxy_ctx, proxy_port, proxy_bind).await;
|
||||
});
|
||||
}
|
||||
|
||||
// Spawn HTTPS reverse proxy with TLS termination
|
||||
if config.proxy.enabled && config.proxy.tls_port > 0 {
|
||||
let service_names: Vec<String> = ctx
|
||||
.services
|
||||
.lock()
|
||||
.unwrap()
|
||||
.list()
|
||||
.iter()
|
||||
.map(|e| e.name.clone())
|
||||
.collect();
|
||||
match numa::tls::build_tls_config(&config.proxy.tld, &service_names) {
|
||||
Ok(tls_config) => {
|
||||
let proxy_ctx = Arc::clone(&ctx);
|
||||
let tls_port = config.proxy.tls_port;
|
||||
tokio::spawn(async move {
|
||||
numa::proxy::start_proxy_tls(proxy_ctx, tls_port, tls_config).await;
|
||||
});
|
||||
}
|
||||
Err(e) => {
|
||||
log::warn!("TLS setup failed, HTTPS proxy disabled: {}", e);
|
||||
}
|
||||
}
|
||||
if config.proxy.enabled && config.proxy.tls_port > 0 && ctx.tls_config.is_some() {
|
||||
let proxy_ctx = Arc::clone(&ctx);
|
||||
let tls_port = config.proxy.tls_port;
|
||||
tokio::spawn(async move {
|
||||
numa::proxy::start_proxy_tls(proxy_ctx, tls_port, proxy_bind).await;
|
||||
});
|
||||
}
|
||||
|
||||
// Spawn network change watcher (upstream re-detection, LAN IP update, peer flush)
|
||||
{
|
||||
let watch_ctx = Arc::clone(&ctx);
|
||||
tokio::spawn(async move {
|
||||
network_watch_loop(watch_ctx).await;
|
||||
});
|
||||
}
|
||||
|
||||
// Spawn LAN service discovery
|
||||
if config.lan.enabled {
|
||||
let lan_ctx = Arc::clone(&ctx);
|
||||
let lan_config = config.lan.clone();
|
||||
tokio::spawn(async move {
|
||||
numa::lan::start_lan_discovery(lan_ctx, &lan_config).await;
|
||||
});
|
||||
}
|
||||
|
||||
// Spawn DNS-over-TLS listener (RFC 7858)
|
||||
if config.dot.enabled {
|
||||
let dot_ctx = Arc::clone(&ctx);
|
||||
let dot_config = config.dot.clone();
|
||||
tokio::spawn(async move {
|
||||
numa::dot::start_dot(dot_ctx, &dot_config).await;
|
||||
});
|
||||
}
|
||||
|
||||
// UDP DNS listener
|
||||
#[allow(clippy::infinite_loop)]
|
||||
loop {
|
||||
let mut buffer = BytePacketBuffer::new();
|
||||
let (_, src_addr) = ctx.socket.recv_from(&mut buffer.buf).await?;
|
||||
let (_, src_addr) = match ctx.socket.recv_from(&mut buffer.buf).await {
|
||||
Ok(r) => r,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::ConnectionReset => {
|
||||
// Windows delivers ICMP port-unreachable as ConnectionReset on UDP sockets
|
||||
continue;
|
||||
}
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
|
||||
let ctx = Arc::clone(&ctx);
|
||||
tokio::spawn(async move {
|
||||
@@ -250,6 +589,130 @@ async fn main() -> numa::Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
async fn network_watch_loop(ctx: Arc<numa::ctx::ServerCtx>) {
|
||||
let mut tick: u64 = 0;
|
||||
|
||||
let mut interval = tokio::time::interval(Duration::from_secs(5));
|
||||
interval.tick().await; // skip immediate tick
|
||||
|
||||
loop {
|
||||
interval.tick().await;
|
||||
tick += 1;
|
||||
let mut changed = false;
|
||||
|
||||
// Check LAN IP change (every 5s — cheap, one UDP socket call)
|
||||
if let Some(new_ip) = numa::lan::detect_lan_ip() {
|
||||
let mut current_ip = ctx.lan_ip.lock().unwrap();
|
||||
if new_ip != *current_ip {
|
||||
info!("LAN IP changed: {} → {}", current_ip, new_ip);
|
||||
*current_ip = new_ip;
|
||||
changed = true;
|
||||
numa::recursive::reset_udp_state();
|
||||
}
|
||||
}
|
||||
|
||||
// Re-detect upstream every 30s or on LAN IP change (UDP only —
|
||||
// DoH upstreams are explicitly configured via URL, not auto-detected)
|
||||
if ctx.upstream_auto
|
||||
&& matches!(*ctx.upstream.lock().unwrap(), Upstream::Udp(_))
|
||||
&& (changed || tick.is_multiple_of(6))
|
||||
{
|
||||
let dns_info = numa::system_dns::discover_system_dns();
|
||||
let new_addr = dns_info
|
||||
.default_upstream
|
||||
.or_else(numa::system_dns::detect_dhcp_dns)
|
||||
.unwrap_or_else(|| QUAD9_IP.to_string());
|
||||
if let Ok(new_sock) =
|
||||
format!("{}:{}", new_addr, ctx.upstream_port).parse::<SocketAddr>()
|
||||
{
|
||||
let new_upstream = Upstream::Udp(new_sock);
|
||||
let mut upstream = ctx.upstream.lock().unwrap();
|
||||
if *upstream != new_upstream {
|
||||
info!("upstream changed: {} → {}", upstream, new_upstream);
|
||||
*upstream = new_upstream;
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Flush stale LAN peers on any network change
|
||||
if changed {
|
||||
ctx.lan_peers.lock().unwrap().clear();
|
||||
info!("flushed LAN peers after network change");
|
||||
}
|
||||
|
||||
// Re-probe UDP every 5 minutes when disabled
|
||||
if tick.is_multiple_of(60) {
|
||||
numa::recursive::probe_udp(&ctx.root_hints).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn set_lan_enabled(enabled: bool, path: &str) -> numa::Result<()> {
|
||||
let contents = match std::fs::read_to_string(path) {
|
||||
Ok(c) => c,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
|
||||
std::fs::write(path, format!("[lan]\nenabled = {}\n", enabled))?;
|
||||
print_lan_status(enabled);
|
||||
return Ok(());
|
||||
}
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
|
||||
// Track current TOML section while scanning lines
|
||||
let mut in_lan = false;
|
||||
let mut found = false;
|
||||
let mut lines: Vec<String> = contents
|
||||
.lines()
|
||||
.map(|line| {
|
||||
let trimmed = line.trim();
|
||||
if trimmed.starts_with('[') {
|
||||
in_lan = trimmed == "[lan]";
|
||||
}
|
||||
if in_lan && !found {
|
||||
if let Some((key, _)) = trimmed.split_once('=') {
|
||||
if key.trim() == "enabled" {
|
||||
found = true;
|
||||
let indent = &line[..line.len() - trimmed.len()];
|
||||
return format!("{}enabled = {}", indent, enabled);
|
||||
}
|
||||
}
|
||||
}
|
||||
line.to_string()
|
||||
})
|
||||
.collect();
|
||||
|
||||
if !found {
|
||||
if let Some(i) = lines.iter().position(|l| l.trim() == "[lan]") {
|
||||
lines.insert(i + 1, format!("enabled = {}", enabled));
|
||||
} else {
|
||||
lines.push(String::new());
|
||||
lines.push("[lan]".to_string());
|
||||
lines.push(format!("enabled = {}", enabled));
|
||||
}
|
||||
}
|
||||
|
||||
let mut result = lines.join("\n");
|
||||
if !result.ends_with('\n') {
|
||||
result.push('\n');
|
||||
}
|
||||
std::fs::write(path, result)?;
|
||||
print_lan_status(enabled);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn print_lan_status(enabled: bool) {
|
||||
let label = if enabled { "enabled" } else { "disabled" };
|
||||
let color = if enabled { "32" } else { "33" };
|
||||
eprintln!(
|
||||
"\x1b[1;38;2;192;98;58mNuma\x1b[0m — LAN discovery \x1b[{}m{}\x1b[0m",
|
||||
color, label
|
||||
);
|
||||
if enabled {
|
||||
eprintln!(" Restart Numa to start mDNS discovery");
|
||||
}
|
||||
}
|
||||
|
||||
async fn load_blocklists(ctx: &ServerCtx, lists: &[String]) {
|
||||
let downloaded = download_blocklists(lists).await;
|
||||
|
||||
@@ -266,7 +729,7 @@ async fn load_blocklists(ctx: &ServerCtx, lists: &[String]) {
|
||||
|
||||
// Swap under lock — sub-microsecond
|
||||
ctx.blocklist
|
||||
.lock()
|
||||
.write()
|
||||
.unwrap()
|
||||
.swap_domains(all_domains, sources);
|
||||
info!(
|
||||
|
||||
107
src/mobile_api.rs
Normal file
107
src/mobile_api.rs
Normal file
@@ -0,0 +1,107 @@
|
||||
//! Mobile API — persistent HTTP listener for iOS/Android companion apps.
|
||||
//!
|
||||
//! Read-only subset of Numa's HTTP surface served on a separate port
|
||||
//! (default 8765) bound to the LAN. Unlike the main API on port 5380
|
||||
//! (which defaults to `127.0.0.1` and serves mutating routes like
|
||||
//! `DELETE /services/{name}` or `PUT /blocking/toggle`), this listener
|
||||
//! is safe to expose on the LAN because every route is idempotent and
|
||||
//! read-only.
|
||||
//!
|
||||
//! Routes (all GET):
|
||||
//!
|
||||
//! - `/health` — enriched status + metadata, shares the handler with the
|
||||
//! main API via `crate::api::health`
|
||||
//! - `/ca.pem` — Numa local CA in PEM form, shares the handler with the
|
||||
//! main API via `crate::api::serve_ca`
|
||||
//! - `/mobileconfig` — combined CA + DNS settings profile (Full mode)
|
||||
//! - `/ca.mobileconfig` — CA-only trust profile (no DNS override)
|
||||
//!
|
||||
//! The mobile API does NOT include the mutating routes (overrides, cache
|
||||
//! flush, blocking toggle, service CRUD, etc.). Even if a user sets
|
||||
//! `api_bind_addr` to `0.0.0.0` for the main API, those routes stay on
|
||||
//! port 5380; the mobile API on port 8765 never serves them. This is the
|
||||
//! primary security boundary: anything exposed to the LAN is read-only.
|
||||
|
||||
use std::net::Ipv4Addr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use axum::extract::State;
|
||||
use axum::http::{header, StatusCode};
|
||||
use axum::response::IntoResponse;
|
||||
use axum::routing::get;
|
||||
use axum::Router;
|
||||
use log::info;
|
||||
|
||||
use crate::ctx::ServerCtx;
|
||||
use crate::mobileconfig::{build_mobileconfig, ProfileMode};
|
||||
|
||||
/// Content-Disposition for the full CA + DNS profile download.
|
||||
const FULL_PROFILE_DISPOSITION: &str = "attachment; filename=\"numa.mobileconfig\"";
|
||||
|
||||
/// Content-Disposition for the CA-only profile download.
|
||||
const CA_ONLY_PROFILE_DISPOSITION: &str = "attachment; filename=\"numa-ca.mobileconfig\"";
|
||||
|
||||
/// Build the axum router for the mobile API.
|
||||
///
|
||||
/// Shares handler functions with the main API where possible (`health`,
|
||||
/// `serve_ca`) so the response shapes are identical across both ports.
|
||||
pub fn router(ctx: Arc<ServerCtx>) -> Router {
|
||||
Router::new()
|
||||
.route("/health", get(crate::api::health))
|
||||
.route("/ca.pem", get(crate::api::serve_ca))
|
||||
.route("/mobileconfig", get(serve_full_mobileconfig))
|
||||
.route("/ca.mobileconfig", get(serve_ca_only_mobileconfig))
|
||||
.with_state(ctx)
|
||||
}
|
||||
|
||||
/// Start the mobile API listener on `bind_addr:port`. Runs until the
|
||||
/// caller cancels the spawned task. Logs the URL on successful bind.
|
||||
pub async fn start(ctx: Arc<ServerCtx>, bind_addr: String, port: u16) -> crate::Result<()> {
|
||||
let addr: std::net::SocketAddr = format!("{}:{}", bind_addr, port).parse()?;
|
||||
let listener = tokio::net::TcpListener::bind(addr).await?;
|
||||
|
||||
info!("Mobile API listening on http://{}", addr);
|
||||
|
||||
let app = router(ctx);
|
||||
axum::serve(listener, app).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Serve the full mobileconfig profile (CA + DNS settings), with the
|
||||
/// DNS payload pointing at the current LAN IP. Each request reads the
|
||||
/// fresh LAN IP from `ctx.lan_ip` so the profile always reflects the
|
||||
/// laptop's current network state.
|
||||
async fn serve_full_mobileconfig(
|
||||
State(ctx): State<Arc<ServerCtx>>,
|
||||
) -> Result<impl IntoResponse, StatusCode> {
|
||||
let ca_pem = ctx.ca_pem.as_deref().ok_or(StatusCode::NOT_FOUND)?;
|
||||
let lan_ip: Ipv4Addr = *ctx.lan_ip.lock().unwrap();
|
||||
let profile = build_mobileconfig(ProfileMode::Full { lan_ip }, ca_pem);
|
||||
Ok(profile_response(profile, FULL_PROFILE_DISPOSITION))
|
||||
}
|
||||
|
||||
/// Serve the CA-only mobileconfig profile. Trusts the Numa local CA but
|
||||
/// does NOT change the device's DNS settings. Used by the iOS companion
|
||||
/// app's DoT mode, where the app configures DNS via `NEDNSSettingsManager`
|
||||
/// and only needs the system trust store to accept Numa's self-signed cert.
|
||||
async fn serve_ca_only_mobileconfig(
|
||||
State(ctx): State<Arc<ServerCtx>>,
|
||||
) -> Result<impl IntoResponse, StatusCode> {
|
||||
let ca_pem = ctx.ca_pem.as_deref().ok_or(StatusCode::NOT_FOUND)?;
|
||||
let profile = build_mobileconfig(ProfileMode::CaOnly, ca_pem);
|
||||
Ok(profile_response(profile, CA_ONLY_PROFILE_DISPOSITION))
|
||||
}
|
||||
|
||||
/// Shared response constructor for both mobileconfig variants.
|
||||
/// Identical headers; only the Content-Disposition filename differs.
|
||||
fn profile_response(profile: String, disposition: &'static str) -> impl IntoResponse {
|
||||
(
|
||||
[
|
||||
(header::CONTENT_TYPE, "application/x-apple-aspen-config"),
|
||||
(header::CONTENT_DISPOSITION, disposition),
|
||||
(header::CACHE_CONTROL, "no-store"),
|
||||
],
|
||||
profile,
|
||||
)
|
||||
}
|
||||
294
src/mobileconfig.rs
Normal file
294
src/mobileconfig.rs
Normal file
@@ -0,0 +1,294 @@
|
||||
//! Apple `.mobileconfig` profile generator.
|
||||
//!
|
||||
//! Builds iOS Configuration Profiles that Numa serves to phones for one-tap
|
||||
//! CA trust and DNS-over-TLS setup. The plist structure is hand-rendered
|
||||
//! via `format!` — no plist crate dependency, deterministic output, small
|
||||
//! binary footprint.
|
||||
//!
|
||||
//! Two modes:
|
||||
//!
|
||||
//! - [`ProfileMode::Full`]: CA trust payload + DNS settings payload pointing
|
||||
//! at a specific LAN IP over DoT. This is what `numa setup-phone` has
|
||||
//! always produced — the user scans a QR, installs this profile, and the
|
||||
//! phone is configured for DoT through Numa in a single step (after the
|
||||
//! iOS Certificate Trust Settings toggle, which is a separate system
|
||||
//! gate we can't bypass).
|
||||
//!
|
||||
//! - [`ProfileMode::CaOnly`]: CA trust payload only, no DNS settings. Used
|
||||
//! by the future iOS companion app flow where `NEDNSSettingsManager`
|
||||
//! configures DNS programmatically and we only need the system trust
|
||||
//! store to accept Numa's DoT cert. Installing this profile does NOT
|
||||
//! change the user's DNS at all.
|
||||
//!
|
||||
//! Payload identifiers and UUIDs are fixed (not randomized) so iOS replaces
|
||||
//! the existing profile on re-install rather than accumulating duplicates.
|
||||
//! The `Full` and `CaOnly` profiles have distinct top-level UUIDs so they
|
||||
//! can coexist as separate installed profiles, but they share the same CA
|
||||
//! payload UUID since the CA itself is the same trust anchor in both.
|
||||
|
||||
use std::net::Ipv4Addr;
|
||||
|
||||
/// Top-level UUID and PayloadIdentifier for the full profile (CA + DNS).
|
||||
/// Changing this breaks in-place replacement on existing iOS installs.
|
||||
const FULL_PROFILE_UUID: &str = "F1E2D3C4-B5A6-7890-1234-567890ABCDEF";
|
||||
const FULL_PROFILE_ID: &str = "com.numa.dns.profile";
|
||||
|
||||
/// Top-level UUID and PayloadIdentifier for the CA-only profile.
|
||||
/// Distinct from `FULL_PROFILE_UUID` so a user can install one, the other,
|
||||
/// or both without the latest install silently replacing a different mode.
|
||||
const CA_ONLY_PROFILE_UUID: &str = "F2E3D4C5-B6A7-8901-2345-67890ABCDEF0";
|
||||
const CA_ONLY_PROFILE_ID: &str = "com.numa.dns.ca.profile";
|
||||
|
||||
/// CA trust payload UUID. Same in both modes — iOS will see "the same CA
|
||||
/// trust anchor" regardless of which wrapping profile contains it.
|
||||
const CA_PAYLOAD_UUID: &str = "B2C3D4E5-F6A7-8901-BCDE-F12345678901";
|
||||
const CA_PAYLOAD_ID: &str = "com.numa.dns.ca";
|
||||
|
||||
/// DNS settings payload UUID (Full mode only).
|
||||
const DNS_PAYLOAD_UUID: &str = "A1B2C3D4-E5F6-7890-ABCD-EF1234567890";
|
||||
const DNS_PAYLOAD_ID: &str = "com.numa.dns.dot";
|
||||
|
||||
/// Profile mode determines which payloads are included in the generated
|
||||
/// `.mobileconfig`.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ProfileMode {
|
||||
/// Full profile: CA trust anchor + managed DNS settings payload
|
||||
/// pointing at the given LAN IP over DoT. This is what the classic
|
||||
/// `numa setup-phone` QR flow serves.
|
||||
Full { lan_ip: Ipv4Addr },
|
||||
|
||||
/// CA-only profile: just the trust anchor, no DNS settings. For use
|
||||
/// with the iOS companion app which manages DNS programmatically via
|
||||
/// `NEDNSSettingsManager` and only needs the system trust store to
|
||||
/// accept Numa's self-signed DoT cert.
|
||||
CaOnly,
|
||||
}
|
||||
|
||||
/// Build a full `.mobileconfig` profile as an XML plist string.
|
||||
pub fn build_mobileconfig(mode: ProfileMode, ca_pem: &str) -> String {
|
||||
let ca_payload = build_ca_payload(ca_pem);
|
||||
|
||||
match mode {
|
||||
ProfileMode::Full { lan_ip } => {
|
||||
let dns_payload = build_dns_payload(lan_ip);
|
||||
let payloads = format!("{}\n{}", ca_payload, dns_payload);
|
||||
let description = format!(
|
||||
"Trusts the Numa local CA and routes DNS queries to Numa over DoT on your local network ({lan_ip})"
|
||||
);
|
||||
wrap_plist(
|
||||
&payloads,
|
||||
FULL_PROFILE_UUID,
|
||||
FULL_PROFILE_ID,
|
||||
&description,
|
||||
"Numa DNS",
|
||||
)
|
||||
}
|
||||
ProfileMode::CaOnly => wrap_plist(
|
||||
&ca_payload,
|
||||
CA_ONLY_PROFILE_UUID,
|
||||
CA_ONLY_PROFILE_ID,
|
||||
"Trusts the Numa local Certificate Authority. Does not change your DNS settings.",
|
||||
"Numa CA",
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
/// Strip the PEM header/footer and newlines from a CA cert, leaving raw
|
||||
/// base64 for embedding in a plist `<data>` block.
|
||||
fn pem_to_base64(pem: &str) -> String {
|
||||
pem.lines()
|
||||
.filter(|line| !line.starts_with("-----"))
|
||||
.collect::<String>()
|
||||
}
|
||||
|
||||
/// Wrap the base64 CA cert at 52 chars per line for plist readability
|
||||
/// (matches Apple convention in hand-written profiles).
|
||||
fn chunk_base64(base64: &str) -> String {
|
||||
base64
|
||||
.chars()
|
||||
.collect::<Vec<_>>()
|
||||
.chunks(52)
|
||||
.map(|chunk| format!("\t\t\t{}", chunk.iter().collect::<String>()))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n")
|
||||
}
|
||||
|
||||
/// Render the `com.apple.security.root` payload dict containing the CA cert.
|
||||
fn build_ca_payload(ca_pem: &str) -> String {
|
||||
let ca_wrapped = chunk_base64(&pem_to_base64(ca_pem));
|
||||
format!(
|
||||
r#" <dict>
|
||||
<key>PayloadCertificateFileName</key>
|
||||
<string>numa-ca.pem</string>
|
||||
<key>PayloadContent</key>
|
||||
<data>
|
||||
{ca}
|
||||
</data>
|
||||
<key>PayloadDescription</key>
|
||||
<string>Numa local Certificate Authority — required for DoT trust</string>
|
||||
<key>PayloadDisplayName</key>
|
||||
<string>Numa Local CA</string>
|
||||
<key>PayloadIdentifier</key>
|
||||
<string>{ca_id}</string>
|
||||
<key>PayloadType</key>
|
||||
<string>com.apple.security.root</string>
|
||||
<key>PayloadUUID</key>
|
||||
<string>{ca_uuid}</string>
|
||||
<key>PayloadVersion</key>
|
||||
<integer>1</integer>
|
||||
</dict>"#,
|
||||
ca = ca_wrapped,
|
||||
ca_id = CA_PAYLOAD_ID,
|
||||
ca_uuid = CA_PAYLOAD_UUID,
|
||||
)
|
||||
}
|
||||
|
||||
/// Render the `com.apple.dnsSettings.managed` payload dict for Full mode.
|
||||
/// Pins the device to Numa as its system resolver over DoT with
|
||||
/// `ServerName = "numa.numa"` (must match the DoT cert SAN).
|
||||
fn build_dns_payload(lan_ip: Ipv4Addr) -> String {
|
||||
format!(
|
||||
r#" <dict>
|
||||
<key>DNSSettings</key>
|
||||
<dict>
|
||||
<key>DNSProtocol</key>
|
||||
<string>TLS</string>
|
||||
<key>ServerAddresses</key>
|
||||
<array>
|
||||
<string>{ip}</string>
|
||||
</array>
|
||||
<key>ServerName</key>
|
||||
<string>numa.numa</string>
|
||||
</dict>
|
||||
<key>PayloadDescription</key>
|
||||
<string>Routes all DNS queries through Numa over DNS-over-TLS</string>
|
||||
<key>PayloadDisplayName</key>
|
||||
<string>Numa DNS-over-TLS</string>
|
||||
<key>PayloadIdentifier</key>
|
||||
<string>{dns_id}</string>
|
||||
<key>PayloadType</key>
|
||||
<string>com.apple.dnsSettings.managed</string>
|
||||
<key>PayloadUUID</key>
|
||||
<string>{dns_uuid}</string>
|
||||
<key>PayloadVersion</key>
|
||||
<integer>1</integer>
|
||||
</dict>"#,
|
||||
ip = lan_ip,
|
||||
dns_id = DNS_PAYLOAD_ID,
|
||||
dns_uuid = DNS_PAYLOAD_UUID,
|
||||
)
|
||||
}
|
||||
|
||||
/// Wrap one or more payload dicts in the top-level plist structure
|
||||
/// with Configuration type, PayloadContent array, and profile metadata.
|
||||
fn wrap_plist(
|
||||
payloads: &str,
|
||||
top_uuid: &str,
|
||||
top_id: &str,
|
||||
description: &str,
|
||||
display_name: &str,
|
||||
) -> String {
|
||||
format!(
|
||||
r#"<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>PayloadContent</key>
|
||||
<array>
|
||||
{payloads}
|
||||
</array>
|
||||
<key>PayloadDescription</key>
|
||||
<string>{description}</string>
|
||||
<key>PayloadDisplayName</key>
|
||||
<string>{display_name}</string>
|
||||
<key>PayloadIdentifier</key>
|
||||
<string>{top_id}</string>
|
||||
<key>PayloadRemovalDisallowed</key>
|
||||
<false/>
|
||||
<key>PayloadType</key>
|
||||
<string>Configuration</string>
|
||||
<key>PayloadUUID</key>
|
||||
<string>{top_uuid}</string>
|
||||
<key>PayloadVersion</key>
|
||||
<integer>1</integer>
|
||||
</dict>
|
||||
</plist>
|
||||
"#,
|
||||
payloads = payloads,
|
||||
description = description,
|
||||
display_name = display_name,
|
||||
top_id = top_id,
|
||||
top_uuid = top_uuid,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
const SAMPLE_PEM: &str =
|
||||
"-----BEGIN CERTIFICATE-----\nMIIBkDCCATagAwIBAgIUTEST\n-----END CERTIFICATE-----\n";
|
||||
|
||||
#[test]
|
||||
fn pem_to_base64_strips_headers() {
|
||||
let pem = "-----BEGIN CERTIFICATE-----\nABCDEF\nGHIJKL\n-----END CERTIFICATE-----\n";
|
||||
assert_eq!(pem_to_base64(pem), "ABCDEFGHIJKL");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn full_profile_contains_ip_and_ca() {
|
||||
let config = build_mobileconfig(
|
||||
ProfileMode::Full {
|
||||
lan_ip: Ipv4Addr::new(192, 168, 1, 100),
|
||||
},
|
||||
SAMPLE_PEM,
|
||||
);
|
||||
assert!(config.contains("192.168.1.100"));
|
||||
assert!(config.contains("MIIBkDCCATagAwIBAgIUTEST"));
|
||||
assert!(config.contains("com.apple.security.root"));
|
||||
assert!(config.contains("com.apple.dnsSettings.managed"));
|
||||
assert!(config.contains("DNSProtocol"));
|
||||
assert!(config.contains(FULL_PROFILE_UUID));
|
||||
assert!(config.contains(FULL_PROFILE_ID));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ca_only_profile_contains_ca_but_not_dns() {
|
||||
let config = build_mobileconfig(ProfileMode::CaOnly, SAMPLE_PEM);
|
||||
assert!(config.contains("MIIBkDCCATagAwIBAgIUTEST"));
|
||||
assert!(config.contains("com.apple.security.root"));
|
||||
assert!(!config.contains("com.apple.dnsSettings.managed"));
|
||||
assert!(!config.contains("DNSProtocol"));
|
||||
assert!(!config.contains("ServerAddresses"));
|
||||
assert!(config.contains(CA_ONLY_PROFILE_UUID));
|
||||
assert!(config.contains(CA_ONLY_PROFILE_ID));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn full_and_ca_only_have_distinct_top_uuids() {
|
||||
let full = build_mobileconfig(
|
||||
ProfileMode::Full {
|
||||
lan_ip: Ipv4Addr::new(10, 0, 0, 1),
|
||||
},
|
||||
SAMPLE_PEM,
|
||||
);
|
||||
let ca_only = build_mobileconfig(ProfileMode::CaOnly, SAMPLE_PEM);
|
||||
assert!(full.contains(FULL_PROFILE_UUID));
|
||||
assert!(!full.contains(CA_ONLY_PROFILE_UUID));
|
||||
assert!(ca_only.contains(CA_ONLY_PROFILE_UUID));
|
||||
assert!(!ca_only.contains(FULL_PROFILE_UUID));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn both_modes_share_ca_payload_uuid() {
|
||||
let full = build_mobileconfig(
|
||||
ProfileMode::Full {
|
||||
lan_ip: Ipv4Addr::new(10, 0, 0, 1),
|
||||
},
|
||||
SAMPLE_PEM,
|
||||
);
|
||||
let ca_only = build_mobileconfig(ProfileMode::CaOnly, SAMPLE_PEM);
|
||||
assert!(full.contains(CA_PAYLOAD_UUID));
|
||||
assert!(ca_only.contains(CA_PAYLOAD_UUID));
|
||||
}
|
||||
}
|
||||
@@ -64,6 +64,9 @@ impl OverrideStore {
|
||||
ttl: u32,
|
||||
duration_secs: Option<u64>,
|
||||
) -> Result<QueryType> {
|
||||
// Clean up expired entries on write
|
||||
self.entries.retain(|_, e| !e.is_expired());
|
||||
|
||||
let domain_lower = domain.to_lowercase();
|
||||
let (qtype, record) = parse_target(&domain_lower, target, ttl)?;
|
||||
|
||||
@@ -84,10 +87,10 @@ impl OverrideStore {
|
||||
}
|
||||
|
||||
/// Hot path: assumes `domain` is already lowercased (the parser does this).
|
||||
pub fn lookup(&mut self, domain: &str) -> Option<DnsRecord> {
|
||||
/// Read-only — expired entries are left in place (cleaned up on write operations).
|
||||
pub fn lookup(&self, domain: &str) -> Option<DnsRecord> {
|
||||
let entry = self.entries.get(domain)?;
|
||||
if entry.is_expired() {
|
||||
self.entries.remove(domain);
|
||||
return None;
|
||||
}
|
||||
Some(entry.record.clone())
|
||||
@@ -114,6 +117,22 @@ impl OverrideStore {
|
||||
self.entries.clear();
|
||||
}
|
||||
|
||||
pub fn heap_bytes(&self) -> usize {
|
||||
let per_slot = std::mem::size_of::<u64>()
|
||||
+ std::mem::size_of::<String>()
|
||||
+ std::mem::size_of::<OverrideEntry>()
|
||||
+ 1;
|
||||
let table = self.entries.capacity() * per_slot;
|
||||
let heap: usize = self
|
||||
.entries
|
||||
.iter()
|
||||
.map(|(k, v)| {
|
||||
k.capacity() + v.domain.capacity() + v.target.capacity() + v.record.heap_bytes()
|
||||
})
|
||||
.sum();
|
||||
table + heap
|
||||
}
|
||||
|
||||
pub fn active_count(&self) -> usize {
|
||||
self.entries.values().filter(|e| !e.is_expired()).count()
|
||||
}
|
||||
@@ -151,3 +170,16 @@ fn parse_target(domain: &str, target: &str, ttl: u32) -> Result<(QueryType, DnsR
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn heap_bytes_grows_with_entries() {
|
||||
let mut store = OverrideStore::new();
|
||||
let empty = store.heap_bytes();
|
||||
store.insert("example.com", "1.2.3.4", 300, None).unwrap();
|
||||
assert!(store.heap_bytes() > empty);
|
||||
}
|
||||
}
|
||||
|
||||
539
src/packet.rs
539
src/packet.rs
@@ -4,6 +4,31 @@ use crate::question::{DnsQuestion, QueryType};
|
||||
use crate::record::DnsRecord;
|
||||
use crate::Result;
|
||||
|
||||
/// Recommended EDNS0 UDP payload size (DNS Flag Day 2020) — avoids IP fragmentation.
|
||||
pub const DEFAULT_EDNS_PAYLOAD: u16 = 1232;
|
||||
|
||||
/// EDNS0 OPT pseudo-record (RFC 6891)
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct EdnsOpt {
|
||||
pub udp_payload_size: u16,
|
||||
pub extended_rcode: u8,
|
||||
pub version: u8,
|
||||
pub do_bit: bool,
|
||||
pub options: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Default for EdnsOpt {
|
||||
fn default() -> Self {
|
||||
EdnsOpt {
|
||||
udp_payload_size: DEFAULT_EDNS_PAYLOAD,
|
||||
extended_rcode: 0,
|
||||
version: 0,
|
||||
do_bit: false,
|
||||
options: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DnsPacket {
|
||||
pub header: DnsHeader,
|
||||
@@ -11,6 +36,7 @@ pub struct DnsPacket {
|
||||
pub answers: Vec<DnsRecord>,
|
||||
pub authorities: Vec<DnsRecord>,
|
||||
pub resources: Vec<DnsRecord>,
|
||||
pub edns: Option<EdnsOpt>,
|
||||
}
|
||||
|
||||
impl Default for DnsPacket {
|
||||
@@ -27,9 +53,38 @@ impl DnsPacket {
|
||||
answers: Vec::new(),
|
||||
authorities: Vec::new(),
|
||||
resources: Vec::new(),
|
||||
edns: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn query(id: u16, domain: &str, qtype: crate::question::QueryType) -> DnsPacket {
|
||||
let mut pkt = DnsPacket::new();
|
||||
pkt.header.id = id;
|
||||
pkt.header.recursion_desired = true;
|
||||
pkt.questions
|
||||
.push(crate::question::DnsQuestion::new(domain.to_string(), qtype));
|
||||
pkt
|
||||
}
|
||||
|
||||
pub fn heap_bytes(&self) -> usize {
|
||||
fn records_heap(records: &[DnsRecord]) -> usize {
|
||||
records
|
||||
.iter()
|
||||
.map(|r| std::mem::size_of::<DnsRecord>() + r.heap_bytes())
|
||||
.sum::<usize>()
|
||||
}
|
||||
let questions: usize = self
|
||||
.questions
|
||||
.iter()
|
||||
.map(|q| std::mem::size_of::<DnsQuestion>() + q.name.capacity())
|
||||
.sum();
|
||||
questions
|
||||
+ records_heap(&self.answers)
|
||||
+ records_heap(&self.authorities)
|
||||
+ records_heap(&self.resources)
|
||||
+ self.edns.as_ref().map_or(0, |e| e.options.capacity())
|
||||
}
|
||||
|
||||
pub fn response_from(query: &DnsPacket, rescode: crate::header::ResultCode) -> DnsPacket {
|
||||
let mut resp = DnsPacket::new();
|
||||
resp.header.id = query.header.id;
|
||||
@@ -46,7 +101,7 @@ impl DnsPacket {
|
||||
result.header.read(buffer)?;
|
||||
|
||||
for _ in 0..result.header.questions {
|
||||
let mut question = DnsQuestion::new("".to_string(), QueryType::UNKNOWN(0));
|
||||
let mut question = DnsQuestion::new(String::with_capacity(64), QueryType::UNKNOWN(0));
|
||||
question.read(buffer)?;
|
||||
result.questions.push(question);
|
||||
}
|
||||
@@ -60,44 +115,83 @@ impl DnsPacket {
|
||||
result.authorities.push(rec);
|
||||
}
|
||||
for _ in 0..result.header.resource_entries {
|
||||
let rec = DnsRecord::read(buffer)?;
|
||||
result.resources.push(rec);
|
||||
// Peek at type field to detect OPT pseudo-records.
|
||||
// OPT name is always root (0x00), so name byte + type field starts at pos+1.
|
||||
let peek_pos = buffer.pos();
|
||||
let name_byte = buffer.get(peek_pos)?;
|
||||
let is_opt = if name_byte == 0 {
|
||||
// Root name (single zero byte) — peek at type
|
||||
let type_hi = buffer.get(peek_pos + 1)?;
|
||||
let type_lo = buffer.get(peek_pos + 2)?;
|
||||
u16::from_be_bytes([type_hi, type_lo]) == 41
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
if is_opt {
|
||||
// Parse OPT manually to capture the class field (= UDP payload size)
|
||||
buffer.step(1)?; // skip root name (0x00)
|
||||
let _ = buffer.read_u16()?; // type (41)
|
||||
let udp_payload_size = buffer.read_u16()?; // class = UDP payload size
|
||||
let ttl_field = buffer.read_u32()?; // packed flags
|
||||
let rdlength = buffer.read_u16()?;
|
||||
let options = buffer.get_range(buffer.pos(), rdlength as usize)?.to_vec();
|
||||
buffer.step(rdlength as usize)?;
|
||||
|
||||
result.edns = Some(EdnsOpt {
|
||||
udp_payload_size,
|
||||
extended_rcode: ((ttl_field >> 24) & 0xFF) as u8,
|
||||
version: ((ttl_field >> 16) & 0xFF) as u8,
|
||||
do_bit: (ttl_field >> 15) & 1 == 1,
|
||||
options,
|
||||
});
|
||||
} else {
|
||||
let rec = DnsRecord::read(buffer)?;
|
||||
result.resources.push(rec);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn write(&self, buffer: &mut BytePacketBuffer) -> Result<()> {
|
||||
// Filter out UNKNOWN records (e.g. EDNS OPT) that we can't re-serialize
|
||||
let answers: Vec<_> = self.answers.iter().filter(|r| !r.is_unknown()).collect();
|
||||
let authorities: Vec<_> = self
|
||||
.authorities
|
||||
.iter()
|
||||
.filter(|r| !r.is_unknown())
|
||||
.collect();
|
||||
let resources: Vec<_> = self.resources.iter().filter(|r| !r.is_unknown()).collect();
|
||||
let edns_count = if self.edns.is_some() { 1u16 } else { 0 };
|
||||
|
||||
let mut header = self.header.clone();
|
||||
header.questions = self.questions.len() as u16;
|
||||
header.answers = answers.len() as u16;
|
||||
header.authoritative_entries = authorities.len() as u16;
|
||||
header.resource_entries = resources.len() as u16;
|
||||
header.answers = self.answers.len() as u16;
|
||||
header.authoritative_entries = self.authorities.len() as u16;
|
||||
header.resource_entries = self.resources.len() as u16 + edns_count;
|
||||
|
||||
header.write(buffer)?;
|
||||
|
||||
for question in &self.questions {
|
||||
question.write(buffer)?;
|
||||
}
|
||||
for rec in answers {
|
||||
for rec in &self.answers {
|
||||
rec.write(buffer)?;
|
||||
}
|
||||
for rec in authorities {
|
||||
for rec in &self.authorities {
|
||||
rec.write(buffer)?;
|
||||
}
|
||||
for rec in resources {
|
||||
for rec in &self.resources {
|
||||
rec.write(buffer)?;
|
||||
}
|
||||
|
||||
// Write EDNS0 OPT pseudo-record
|
||||
if let Some(ref edns) = self.edns {
|
||||
buffer.write_u8(0)?; // root name
|
||||
buffer.write_u16(QueryType::OPT.to_num())?; // type 41
|
||||
buffer.write_u16(edns.udp_payload_size)?; // class = UDP payload size
|
||||
// TTL = extended_rcode(8) | version(8) | DO(1) | Z(15)
|
||||
let ttl_field = ((edns.extended_rcode as u32) << 24)
|
||||
| ((edns.version as u32) << 16)
|
||||
| (if edns.do_bit { 1u32 << 15 } else { 0 });
|
||||
buffer.write_u32(ttl_field)?;
|
||||
buffer.write_u16(edns.options.len() as u16)?; // RDLENGTH
|
||||
buffer.write_bytes(&edns.options)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -116,5 +210,416 @@ impl DnsPacket {
|
||||
for rec in &self.resources {
|
||||
println!("{:#?}", rec);
|
||||
}
|
||||
if let Some(ref edns) = self.edns {
|
||||
println!("EDNS: {:?}", edns);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::header::ResultCode;
|
||||
|
||||
#[test]
|
||||
fn edns_round_trip() {
|
||||
let mut pkt = DnsPacket::new();
|
||||
pkt.header.id = 0x1234;
|
||||
pkt.header.response = true;
|
||||
pkt.header.rescode = ResultCode::NOERROR;
|
||||
pkt.edns = Some(EdnsOpt {
|
||||
do_bit: true,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
pkt.write(&mut buf).unwrap();
|
||||
buf.seek(0).unwrap();
|
||||
let parsed = DnsPacket::from_buffer(&mut buf).unwrap();
|
||||
|
||||
let edns = parsed.edns.expect("EDNS should be present");
|
||||
assert_eq!(edns.udp_payload_size, DEFAULT_EDNS_PAYLOAD);
|
||||
assert!(edns.do_bit);
|
||||
assert_eq!(edns.version, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn edns_do_bit_false() {
|
||||
let mut pkt = DnsPacket::new();
|
||||
pkt.header.id = 0x5678;
|
||||
pkt.header.response = true;
|
||||
pkt.edns = Some(EdnsOpt {
|
||||
udp_payload_size: 1232,
|
||||
do_bit: false,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
pkt.write(&mut buf).unwrap();
|
||||
buf.seek(0).unwrap();
|
||||
let parsed = DnsPacket::from_buffer(&mut buf).unwrap();
|
||||
|
||||
let edns = parsed.edns.expect("EDNS should be present");
|
||||
assert_eq!(edns.udp_payload_size, DEFAULT_EDNS_PAYLOAD);
|
||||
assert!(!edns.do_bit);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_edns_by_default() {
|
||||
let pkt = DnsPacket::new();
|
||||
assert!(pkt.edns.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn packet_without_edns_round_trips() {
|
||||
let mut pkt = DnsPacket::new();
|
||||
pkt.header.id = 0xABCD;
|
||||
pkt.header.response = true;
|
||||
pkt.header.rescode = ResultCode::NOERROR;
|
||||
pkt.answers.push(crate::record::DnsRecord::A {
|
||||
domain: "example.com".into(),
|
||||
addr: "1.2.3.4".parse().unwrap(),
|
||||
ttl: 300,
|
||||
});
|
||||
|
||||
let parsed = packet_round_trip(&pkt);
|
||||
assert!(parsed.edns.is_none());
|
||||
assert_eq!(parsed.answers.len(), 1);
|
||||
}
|
||||
|
||||
fn packet_round_trip(pkt: &DnsPacket) -> DnsPacket {
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
pkt.write(&mut buf).unwrap();
|
||||
let wire_len = buf.pos();
|
||||
buf.seek(0).unwrap();
|
||||
let parsed = DnsPacket::from_buffer(&mut buf).unwrap();
|
||||
// Verify we consumed exactly what was written
|
||||
assert_eq!(
|
||||
buf.pos(),
|
||||
wire_len,
|
||||
"parse did not consume all written bytes"
|
||||
);
|
||||
parsed
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nxdomain_with_nsec_authority_round_trips() {
|
||||
use crate::question::DnsQuestion;
|
||||
use crate::record::DnsRecord;
|
||||
|
||||
let mut pkt = DnsPacket::new();
|
||||
pkt.header.id = 0x1111;
|
||||
pkt.header.response = true;
|
||||
pkt.header.rescode = ResultCode::NXDOMAIN;
|
||||
pkt.questions.push(DnsQuestion::new(
|
||||
"nonexistent.example.com".into(),
|
||||
QueryType::A,
|
||||
));
|
||||
|
||||
pkt.authorities.push(DnsRecord::NSEC {
|
||||
domain: "alpha.example.com".into(),
|
||||
next_domain: "gamma.example.com".into(),
|
||||
type_bitmap: vec![0, 2, 0x40, 0x01], // A + MX
|
||||
ttl: 3600,
|
||||
});
|
||||
pkt.authorities.push(DnsRecord::RRSIG {
|
||||
domain: "alpha.example.com".into(),
|
||||
type_covered: QueryType::NSEC.to_num(),
|
||||
algorithm: 13,
|
||||
labels: 3,
|
||||
original_ttl: 3600,
|
||||
expiration: 1700000000,
|
||||
inception: 1690000000,
|
||||
key_tag: 12345,
|
||||
signer_name: "example.com".into(),
|
||||
signature: vec![0xAA; 64],
|
||||
ttl: 3600,
|
||||
});
|
||||
|
||||
// Wildcard denial NSEC
|
||||
pkt.authorities.push(DnsRecord::NSEC {
|
||||
domain: "example.com".into(),
|
||||
next_domain: "alpha.example.com".into(),
|
||||
type_bitmap: vec![0, 3, 0x62, 0x01, 0x80], // A, NS, SOA, MX, RRSIG
|
||||
ttl: 3600,
|
||||
});
|
||||
|
||||
pkt.edns = Some(EdnsOpt {
|
||||
do_bit: true,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let parsed = packet_round_trip(&pkt);
|
||||
|
||||
assert_eq!(parsed.header.id, 0x1111);
|
||||
assert_eq!(parsed.header.rescode, ResultCode::NXDOMAIN);
|
||||
assert_eq!(parsed.questions.len(), 1);
|
||||
assert_eq!(parsed.questions[0].name, "nonexistent.example.com");
|
||||
assert_eq!(parsed.authorities.len(), 3);
|
||||
|
||||
// Verify NSEC records survived
|
||||
if let DnsRecord::NSEC {
|
||||
domain,
|
||||
next_domain,
|
||||
type_bitmap,
|
||||
..
|
||||
} = &parsed.authorities[0]
|
||||
{
|
||||
assert_eq!(domain, "alpha.example.com");
|
||||
assert_eq!(next_domain, "gamma.example.com");
|
||||
assert_eq!(type_bitmap, &[0, 2, 0x40, 0x01]);
|
||||
} else {
|
||||
panic!("expected NSEC, got {:?}", parsed.authorities[0]);
|
||||
}
|
||||
|
||||
// Verify RRSIG survived
|
||||
if let DnsRecord::RRSIG {
|
||||
type_covered,
|
||||
signer_name,
|
||||
signature,
|
||||
..
|
||||
} = &parsed.authorities[1]
|
||||
{
|
||||
assert_eq!(*type_covered, QueryType::NSEC.to_num());
|
||||
assert_eq!(signer_name, "example.com");
|
||||
assert_eq!(signature.len(), 64);
|
||||
} else {
|
||||
panic!("expected RRSIG, got {:?}", parsed.authorities[1]);
|
||||
}
|
||||
|
||||
// Verify EDNS survived
|
||||
assert!(parsed.edns.as_ref().unwrap().do_bit);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nxdomain_with_nsec3_authority_round_trips() {
|
||||
use crate::question::DnsQuestion;
|
||||
use crate::record::DnsRecord;
|
||||
|
||||
let mut pkt = DnsPacket::new();
|
||||
pkt.header.id = 0x2222;
|
||||
pkt.header.response = true;
|
||||
pkt.header.rescode = ResultCode::NXDOMAIN;
|
||||
pkt.questions
|
||||
.push(DnsQuestion::new("no.example.com".into(), QueryType::AAAA));
|
||||
|
||||
// Three NSEC3 records (closest encloser, next closer, wildcard)
|
||||
let salt = vec![0xAB, 0xCD];
|
||||
pkt.authorities.push(DnsRecord::NSEC3 {
|
||||
domain: "ABC123.example.com".into(),
|
||||
hash_algorithm: 1,
|
||||
flags: 0,
|
||||
iterations: 5,
|
||||
salt: salt.clone(),
|
||||
next_hashed_owner: vec![
|
||||
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E,
|
||||
0x0F, 0x10, 0x11, 0x12, 0x13, 0x14,
|
||||
],
|
||||
type_bitmap: vec![0, 2, 0x60, 0x01], // NS, SOA, MX
|
||||
ttl: 300,
|
||||
});
|
||||
pkt.authorities.push(DnsRecord::NSEC3 {
|
||||
domain: "DEF456.example.com".into(),
|
||||
hash_algorithm: 1,
|
||||
flags: 0,
|
||||
iterations: 5,
|
||||
salt: salt.clone(),
|
||||
next_hashed_owner: vec![0x20; 20],
|
||||
type_bitmap: vec![0, 1, 0x40], // A
|
||||
ttl: 300,
|
||||
});
|
||||
pkt.authorities.push(DnsRecord::RRSIG {
|
||||
domain: "ABC123.example.com".into(),
|
||||
type_covered: QueryType::NSEC3.to_num(),
|
||||
algorithm: 8,
|
||||
labels: 3,
|
||||
original_ttl: 300,
|
||||
expiration: 2000000000,
|
||||
inception: 1600000000,
|
||||
key_tag: 54321,
|
||||
signer_name: "example.com".into(),
|
||||
signature: vec![0xBB; 128],
|
||||
ttl: 300,
|
||||
});
|
||||
|
||||
pkt.edns = Some(EdnsOpt {
|
||||
do_bit: true,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let parsed = packet_round_trip(&pkt);
|
||||
|
||||
assert_eq!(parsed.header.rescode, ResultCode::NXDOMAIN);
|
||||
assert_eq!(parsed.authorities.len(), 3);
|
||||
|
||||
// Verify first NSEC3 survived with all fields intact
|
||||
if let DnsRecord::NSEC3 {
|
||||
domain,
|
||||
hash_algorithm,
|
||||
flags,
|
||||
iterations,
|
||||
salt: parsed_salt,
|
||||
next_hashed_owner,
|
||||
type_bitmap,
|
||||
..
|
||||
} = &parsed.authorities[0]
|
||||
{
|
||||
assert_eq!(domain, "abc123.example.com");
|
||||
assert_eq!(*hash_algorithm, 1);
|
||||
assert_eq!(*flags, 0);
|
||||
assert_eq!(*iterations, 5);
|
||||
assert_eq!(parsed_salt, &salt);
|
||||
assert_eq!(next_hashed_owner.len(), 20);
|
||||
assert_eq!(type_bitmap, &[0, 2, 0x60, 0x01]);
|
||||
} else {
|
||||
panic!("expected NSEC3, got {:?}", parsed.authorities[0]);
|
||||
}
|
||||
|
||||
// Verify RRSIG covering NSEC3
|
||||
if let DnsRecord::RRSIG {
|
||||
type_covered,
|
||||
algorithm,
|
||||
signature,
|
||||
..
|
||||
} = &parsed.authorities[2]
|
||||
{
|
||||
assert_eq!(*type_covered, QueryType::NSEC3.to_num());
|
||||
assert_eq!(*algorithm, 8);
|
||||
assert_eq!(signature.len(), 128);
|
||||
} else {
|
||||
panic!("expected RRSIG, got {:?}", parsed.authorities[2]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dnssec_answer_with_rrsig_round_trips() {
|
||||
use crate::question::DnsQuestion;
|
||||
use crate::record::DnsRecord;
|
||||
|
||||
let mut pkt = DnsPacket::new();
|
||||
pkt.header.id = 0x3333;
|
||||
pkt.header.response = true;
|
||||
pkt.header.rescode = ResultCode::NOERROR;
|
||||
pkt.header.authed_data = true;
|
||||
pkt.questions
|
||||
.push(DnsQuestion::new("example.com".into(), QueryType::A));
|
||||
|
||||
pkt.answers.push(DnsRecord::A {
|
||||
domain: "example.com".into(),
|
||||
addr: "93.184.216.34".parse().unwrap(),
|
||||
ttl: 300,
|
||||
});
|
||||
pkt.answers.push(DnsRecord::RRSIG {
|
||||
domain: "example.com".into(),
|
||||
type_covered: QueryType::A.to_num(),
|
||||
algorithm: 13,
|
||||
labels: 2,
|
||||
original_ttl: 300,
|
||||
expiration: 1700000000,
|
||||
inception: 1690000000,
|
||||
key_tag: 11111,
|
||||
signer_name: "example.com".into(),
|
||||
signature: vec![0xCC; 64],
|
||||
ttl: 300,
|
||||
});
|
||||
|
||||
// Authority: NS + DS
|
||||
pkt.authorities.push(DnsRecord::NS {
|
||||
domain: "example.com".into(),
|
||||
host: "ns1.example.com".into(),
|
||||
ttl: 3600,
|
||||
});
|
||||
pkt.authorities.push(DnsRecord::DS {
|
||||
domain: "example.com".into(),
|
||||
key_tag: 22222,
|
||||
algorithm: 8,
|
||||
digest_type: 2,
|
||||
digest: vec![0xDD; 32],
|
||||
ttl: 86400,
|
||||
});
|
||||
|
||||
// Additional: glue A + DNSKEY
|
||||
pkt.resources.push(DnsRecord::A {
|
||||
domain: "ns1.example.com".into(),
|
||||
addr: "198.51.100.1".parse().unwrap(),
|
||||
ttl: 3600,
|
||||
});
|
||||
pkt.resources.push(DnsRecord::DNSKEY {
|
||||
domain: "example.com".into(),
|
||||
flags: 257,
|
||||
protocol: 3,
|
||||
algorithm: 13,
|
||||
public_key: vec![0xEE; 64],
|
||||
ttl: 3600,
|
||||
});
|
||||
|
||||
pkt.edns = Some(EdnsOpt {
|
||||
do_bit: true,
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
let parsed = packet_round_trip(&pkt);
|
||||
|
||||
assert_eq!(parsed.header.id, 0x3333);
|
||||
assert!(parsed.header.authed_data);
|
||||
assert_eq!(parsed.answers.len(), 2);
|
||||
assert_eq!(parsed.authorities.len(), 2);
|
||||
assert_eq!(parsed.resources.len(), 2);
|
||||
|
||||
// Verify A record
|
||||
if let DnsRecord::A { addr, .. } = &parsed.answers[0] {
|
||||
assert_eq!(addr.to_string(), "93.184.216.34");
|
||||
} else {
|
||||
panic!("expected A");
|
||||
}
|
||||
|
||||
// Verify RRSIG in answers
|
||||
if let DnsRecord::RRSIG {
|
||||
type_covered,
|
||||
key_tag,
|
||||
signer_name,
|
||||
..
|
||||
} = &parsed.answers[1]
|
||||
{
|
||||
assert_eq!(*type_covered, 1); // A
|
||||
assert_eq!(*key_tag, 11111);
|
||||
assert_eq!(signer_name, "example.com");
|
||||
} else {
|
||||
panic!("expected RRSIG");
|
||||
}
|
||||
|
||||
// Verify DS in authority
|
||||
if let DnsRecord::DS {
|
||||
key_tag, digest, ..
|
||||
} = &parsed.authorities[1]
|
||||
{
|
||||
assert_eq!(*key_tag, 22222);
|
||||
assert_eq!(digest.len(), 32);
|
||||
} else {
|
||||
panic!("expected DS");
|
||||
}
|
||||
|
||||
// Verify DNSKEY in additional
|
||||
if let DnsRecord::DNSKEY {
|
||||
flags, public_key, ..
|
||||
} = &parsed.resources[1]
|
||||
{
|
||||
assert_eq!(*flags, 257);
|
||||
assert_eq!(public_key.len(), 64);
|
||||
} else {
|
||||
panic!("expected DNSKEY");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn heap_bytes_accounts_for_records() {
|
||||
let mut pkt = DnsPacket::new();
|
||||
let empty = pkt.heap_bytes();
|
||||
pkt.answers.push(DnsRecord::A {
|
||||
domain: "example.com".into(),
|
||||
addr: "1.2.3.4".parse().unwrap(),
|
||||
ttl: 300,
|
||||
});
|
||||
assert!(pkt.heap_bytes() > empty);
|
||||
}
|
||||
}
|
||||
|
||||
190
src/proxy.rs
190
src/proxy.rs
@@ -1,4 +1,4 @@
|
||||
use std::net::SocketAddr;
|
||||
use std::net::{Ipv4Addr, SocketAddr};
|
||||
use std::sync::Arc;
|
||||
|
||||
use axum::body::Body;
|
||||
@@ -11,7 +11,6 @@ use hyper::StatusCode;
|
||||
use hyper_util::client::legacy::Client;
|
||||
use hyper_util::rt::TokioExecutor;
|
||||
use log::{debug, error, info, warn};
|
||||
use rustls::ServerConfig;
|
||||
use tokio::io::copy_bidirectional;
|
||||
use tokio_rustls::TlsAcceptor;
|
||||
|
||||
@@ -25,8 +24,8 @@ struct ProxyState {
|
||||
client: HttpClient,
|
||||
}
|
||||
|
||||
pub async fn start_proxy(ctx: Arc<ServerCtx>, port: u16) {
|
||||
let addr: SocketAddr = ([0, 0, 0, 0], port).into();
|
||||
pub async fn start_proxy(ctx: Arc<ServerCtx>, port: u16, bind_addr: Ipv4Addr) {
|
||||
let addr: SocketAddr = (bind_addr, port).into();
|
||||
let listener = match tokio::net::TcpListener::bind(addr).await {
|
||||
Ok(l) => l,
|
||||
Err(e) => {
|
||||
@@ -50,8 +49,8 @@ pub async fn start_proxy(ctx: Arc<ServerCtx>, port: u16) {
|
||||
axum::serve(listener, app).await.unwrap();
|
||||
}
|
||||
|
||||
pub async fn start_proxy_tls(ctx: Arc<ServerCtx>, port: u16, tls_config: Arc<ServerConfig>) {
|
||||
let addr: SocketAddr = ([0, 0, 0, 0], port).into();
|
||||
pub async fn start_proxy_tls(ctx: Arc<ServerCtx>, port: u16, bind_addr: Ipv4Addr) {
|
||||
let addr: SocketAddr = (bind_addr, port).into();
|
||||
let listener = match tokio::net::TcpListener::bind(addr).await {
|
||||
Ok(l) => l,
|
||||
Err(e) => {
|
||||
@@ -64,11 +63,17 @@ pub async fn start_proxy_tls(ctx: Arc<ServerCtx>, port: u16, tls_config: Arc<Ser
|
||||
};
|
||||
info!("HTTPS proxy listening on {}", addr);
|
||||
|
||||
let acceptor = TlsAcceptor::from(tls_config);
|
||||
if ctx.tls_config.is_none() {
|
||||
warn!("proxy: no TLS config — HTTPS proxy disabled");
|
||||
return;
|
||||
}
|
||||
|
||||
let client: HttpClient = Client::builder(TokioExecutor::new())
|
||||
.http1_preserve_header_case(true)
|
||||
.build_http();
|
||||
|
||||
// Hold a separate Arc so we can access tls_config after ctx moves into ProxyState
|
||||
let tls_holder = Arc::clone(&ctx);
|
||||
let state = ProxyState { ctx, client };
|
||||
|
||||
let app = Router::new().fallback(any(proxy_handler)).with_state(state);
|
||||
@@ -82,7 +87,10 @@ pub async fn start_proxy_tls(ctx: Arc<ServerCtx>, port: u16, tls_config: Arc<Ser
|
||||
}
|
||||
};
|
||||
|
||||
let acceptor = acceptor.clone();
|
||||
// Load the latest TLS config on each connection (picks up new service certs)
|
||||
// unwrap safe: guarded by is_none() check above
|
||||
let acceptor =
|
||||
TlsAcceptor::from(Arc::clone(&*tls_holder.tls_config.as_ref().unwrap().load()));
|
||||
let app = app.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
@@ -109,51 +117,15 @@ pub async fn start_proxy_tls(ctx: Arc<ServerCtx>, port: u16, tls_config: Arc<Ser
|
||||
}
|
||||
}
|
||||
|
||||
fn extract_host(req: &Request) -> Option<String> {
|
||||
req.headers()
|
||||
.get(hyper::header::HOST)
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(|h| h.split(':').next().unwrap_or(h).to_lowercase())
|
||||
}
|
||||
|
||||
async fn proxy_handler(State(state): State<ProxyState>, req: Request) -> axum::response::Response {
|
||||
let hostname = match extract_host(&req) {
|
||||
Some(h) => h,
|
||||
None => {
|
||||
return (StatusCode::BAD_REQUEST, "missing Host header").into_response();
|
||||
}
|
||||
};
|
||||
|
||||
let service_name = match hostname.strip_suffix(state.ctx.proxy_tld_suffix.as_str()) {
|
||||
Some(name) => name.to_string(),
|
||||
None => {
|
||||
return (
|
||||
StatusCode::BAD_GATEWAY,
|
||||
format!("not a {} domain: {}", state.ctx.proxy_tld_suffix, hostname),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
};
|
||||
|
||||
let target_port = {
|
||||
let store = state.ctx.services.lock().unwrap();
|
||||
match store.lookup(&service_name) {
|
||||
Some(entry) => entry.target_port,
|
||||
None => {
|
||||
return (
|
||||
StatusCode::NOT_FOUND,
|
||||
[(hyper::header::CONTENT_TYPE, "text/html; charset=utf-8")],
|
||||
format!(
|
||||
r##"<!DOCTYPE html>
|
||||
fn error_page(title: &str, body: &str) -> String {
|
||||
format!(
|
||||
r##"<!DOCTYPE html>
|
||||
<html lang="en"><head><meta charset="UTF-8"><meta name="viewport" content="width=device-width,initial-scale=1">
|
||||
<title>404 — {0}{1}</title>
|
||||
<link rel="preconnect" href="https://fonts.googleapis.com">
|
||||
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
||||
<link href="https://fonts.googleapis.com/css2?family=Instrument+Serif:ital@0;1&family=DM+Sans:opsz,wght@9..40,400;9..40,500&family=JetBrains+Mono:wght@400&display=swap" rel="stylesheet">
|
||||
<title>{title} — Numa</title>
|
||||
<style>
|
||||
*,*::before,*::after {{ margin:0;padding:0;box-sizing:border-box }}
|
||||
body {{
|
||||
font-family: 'DM Sans', system-ui, sans-serif;
|
||||
font-family: system-ui, -apple-system, sans-serif;
|
||||
background: #f5f0e8;
|
||||
color: #2c2418;
|
||||
min-height: 100vh;
|
||||
@@ -187,16 +159,24 @@ body::before {{
|
||||
from {{ opacity:0; transform:translateY(20px) }}
|
||||
to {{ opacity:1; transform:translateY(0) }}
|
||||
}}
|
||||
.code {{
|
||||
font-family: 'Instrument Serif', Georgia, serif;
|
||||
.hero-text {{
|
||||
font-family: Georgia, 'Times New Roman', serif;
|
||||
font-size: 6rem;
|
||||
line-height: 1;
|
||||
color: #c0623a;
|
||||
letter-spacing: 0.04em;
|
||||
opacity: 0.85;
|
||||
}}
|
||||
.label {{
|
||||
font-family: ui-monospace, 'SF Mono', monospace;
|
||||
font-size: 0.7rem;
|
||||
letter-spacing: 0.12em;
|
||||
text-transform: uppercase;
|
||||
color: #b5443a;
|
||||
margin-bottom: 1rem;
|
||||
}}
|
||||
.domain {{
|
||||
font-family: 'JetBrains Mono', monospace;
|
||||
font-family: ui-monospace, 'SF Mono', monospace;
|
||||
font-size: 1.1rem;
|
||||
color: #2c2418;
|
||||
margin-top: 1rem;
|
||||
@@ -224,7 +204,7 @@ pre {{
|
||||
color: #e8e0d4;
|
||||
padding: 1rem 1.2rem;
|
||||
border-radius: 8px;
|
||||
font-family: 'JetBrains Mono', monospace;
|
||||
font-family: ui-monospace, 'SF Mono', monospace;
|
||||
font-size: 0.78rem;
|
||||
line-height: 1.7;
|
||||
margin-top: 1.2rem;
|
||||
@@ -233,9 +213,9 @@ pre {{
|
||||
pre .prompt {{ color: #8baa6e }}
|
||||
pre .flag {{ color: #8b9fbb }}
|
||||
pre .str {{ color: #d48a5a }}
|
||||
.lyrics {{
|
||||
.aside {{
|
||||
margin-top: 2.5rem;
|
||||
font-family: 'Instrument Serif', Georgia, serif;
|
||||
font-family: Georgia, 'Times New Roman', serif;
|
||||
font-style: italic;
|
||||
font-size: 0.85rem;
|
||||
color: #a39888;
|
||||
@@ -246,31 +226,103 @@ pre .str {{ color: #d48a5a }}
|
||||
@keyframes fade {{ to {{ opacity: 1 }} }}
|
||||
</style></head><body>
|
||||
<div class="container">
|
||||
<div class="code">404</div>
|
||||
{body}
|
||||
</div>
|
||||
</body></html>"##
|
||||
)
|
||||
}
|
||||
|
||||
fn extract_host(req: &Request) -> Option<String> {
|
||||
req.headers()
|
||||
.get(hyper::header::HOST)
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(|h| h.split(':').next().unwrap_or(h).to_lowercase())
|
||||
}
|
||||
|
||||
async fn proxy_handler(State(state): State<ProxyState>, req: Request) -> axum::response::Response {
|
||||
let hostname = match extract_host(&req) {
|
||||
Some(h) => h,
|
||||
None => {
|
||||
return (StatusCode::BAD_REQUEST, "missing Host header").into_response();
|
||||
}
|
||||
};
|
||||
|
||||
let service_name = match hostname.strip_suffix(state.ctx.proxy_tld_suffix.as_str()) {
|
||||
Some(name) => name.to_string(),
|
||||
None => {
|
||||
// Check if this domain was blocked — show a helpful styled page
|
||||
if state.ctx.blocklist.read().unwrap().is_blocked(&hostname) {
|
||||
let body = format!(
|
||||
r#" <div class="hero-text">🛡</div>
|
||||
<div class="label">Blocked by Numa</div>
|
||||
<div class="domain">{0}</div>
|
||||
<p class="message">This domain is on the ad & tracker blocklist.<br>To allow it, use the <a href="http://numa.numa">dashboard</a> or:</p>
|
||||
<pre><span class="prompt">$</span> <span class="str">curl</span> <span class="flag">-X POST</span> localhost:5380/blocking/allowlist \
|
||||
<span class="flag">-d</span> '<span class="str">{{"domain":"{0}"}}</span>'</pre>"#,
|
||||
hostname
|
||||
);
|
||||
return (
|
||||
StatusCode::FORBIDDEN,
|
||||
[(hyper::header::CONTENT_TYPE, "text/html; charset=utf-8")],
|
||||
error_page(&format!("Blocked — {}", hostname), &body),
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
return (
|
||||
StatusCode::BAD_GATEWAY,
|
||||
format!("not a {} domain: {}", state.ctx.proxy_tld_suffix, hostname),
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
};
|
||||
|
||||
let request_path = req.uri().path().to_string();
|
||||
|
||||
let (target_host, target_port, rewritten_path) = {
|
||||
let store = state.ctx.services.lock().unwrap();
|
||||
if let Some(entry) = store.lookup(&service_name) {
|
||||
let (port, path) = entry.resolve_route(&request_path);
|
||||
("localhost".to_string(), port, path)
|
||||
} else {
|
||||
let mut peers = state.ctx.lan_peers.lock().unwrap();
|
||||
match peers.lookup(&service_name) {
|
||||
Some((ip, port)) => (ip.to_string(), port, request_path.clone()),
|
||||
None => {
|
||||
let body = format!(
|
||||
r#" <div class="hero-text">404</div>
|
||||
<div class="domain">{0}{1}</div>
|
||||
<p class="message">This service isn't registered yet.<br>Add it from the <a href="http://numa.numa">dashboard</a> or:</p>
|
||||
<pre><span class="prompt">$</span> <span class="str">curl</span> <span class="flag">-X POST</span> numa.numa:5380/services \
|
||||
<span class="flag">-H</span> 'Content-Type: application/json' \
|
||||
<span class="flag">-d</span> '<span class="str">{{"name":"{0}","target_port":3000}}</span>'</pre>
|
||||
<div class="lyrics">ma-ia hii, ma-ia huu, ma-ia haa, ma-ia ha-ha</div>
|
||||
</div>
|
||||
</body></html>"##,
|
||||
<div class="aside">ma-ia hii, ma-ia huu, ma-ia haa, ma-ia ha-ha</div>"#,
|
||||
service_name, state.ctx.proxy_tld_suffix
|
||||
),
|
||||
)
|
||||
.into_response()
|
||||
);
|
||||
return (
|
||||
StatusCode::NOT_FOUND,
|
||||
[(hyper::header::CONTENT_TYPE, "text/html; charset=utf-8")],
|
||||
error_page(
|
||||
&format!("404 — {}{}", service_name, state.ctx.proxy_tld_suffix),
|
||||
&body,
|
||||
),
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let path_and_query = req
|
||||
let query_string = req
|
||||
.uri()
|
||||
.path_and_query()
|
||||
.map(|pq| pq.as_str())
|
||||
.unwrap_or("/");
|
||||
let target_uri: hyper::Uri = format!("http://localhost:{}{}", target_port, path_and_query)
|
||||
.parse()
|
||||
.unwrap();
|
||||
.query()
|
||||
.map(|q| format!("?{}", q))
|
||||
.unwrap_or_default();
|
||||
let target_uri: hyper::Uri = format!(
|
||||
"http://{}:{}{}{}",
|
||||
target_host, target_port, rewritten_path, query_string
|
||||
)
|
||||
.parse()
|
||||
.unwrap();
|
||||
|
||||
// Check for upgrade request (WebSocket, etc.)
|
||||
let is_upgrade = req.headers().get(hyper::header::UPGRADE).is_some();
|
||||
|
||||
@@ -2,6 +2,7 @@ use std::collections::VecDeque;
|
||||
use std::net::SocketAddr;
|
||||
use std::time::SystemTime;
|
||||
|
||||
use crate::cache::DnssecStatus;
|
||||
use crate::header::ResultCode;
|
||||
use crate::question::QueryType;
|
||||
use crate::stats::QueryPath;
|
||||
@@ -14,6 +15,7 @@ pub struct QueryLogEntry {
|
||||
pub path: QueryPath,
|
||||
pub rescode: ResultCode,
|
||||
pub latency_us: u64,
|
||||
pub dnssec: DnssecStatus,
|
||||
}
|
||||
|
||||
pub struct QueryLog {
|
||||
@@ -36,6 +38,21 @@ impl QueryLog {
|
||||
self.entries.push_back(entry);
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.entries.len()
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.entries.is_empty()
|
||||
}
|
||||
|
||||
pub fn heap_bytes(&self) -> usize {
|
||||
self.entries
|
||||
.iter()
|
||||
.map(|e| std::mem::size_of::<QueryLogEntry>() + e.domain.capacity())
|
||||
.sum()
|
||||
}
|
||||
|
||||
pub fn query(&self, filter: &QueryLogFilter) -> Vec<&QueryLogEntry> {
|
||||
self.entries
|
||||
.iter()
|
||||
@@ -75,3 +92,25 @@ pub struct QueryLogFilter {
|
||||
pub since: Option<SystemTime>,
|
||||
pub limit: Option<usize>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn heap_bytes_grows_with_entries() {
|
||||
let mut log = QueryLog::new(100);
|
||||
let empty = log.heap_bytes();
|
||||
log.push(QueryLogEntry {
|
||||
timestamp: SystemTime::now(),
|
||||
src_addr: "127.0.0.1:1234".parse().unwrap(),
|
||||
domain: "example.com".into(),
|
||||
query_type: QueryType::A,
|
||||
path: QueryPath::Forwarded,
|
||||
rescode: ResultCode::NOERROR,
|
||||
latency_us: 500,
|
||||
dnssec: DnssecStatus::Indeterminate,
|
||||
});
|
||||
assert!(log.heap_bytes() > empty);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,16 +4,22 @@ use crate::Result;
|
||||
#[derive(PartialEq, Eq, Debug, Clone, Hash, Copy)]
|
||||
pub enum QueryType {
|
||||
UNKNOWN(u16),
|
||||
A, // 1
|
||||
NS, // 2
|
||||
CNAME, // 5
|
||||
SOA, // 6
|
||||
PTR, // 12
|
||||
MX, // 15
|
||||
TXT, // 16
|
||||
AAAA, // 28
|
||||
SRV, // 33
|
||||
HTTPS, // 65
|
||||
A, // 1
|
||||
NS, // 2
|
||||
CNAME, // 5
|
||||
SOA, // 6
|
||||
PTR, // 12
|
||||
MX, // 15
|
||||
TXT, // 16
|
||||
AAAA, // 28
|
||||
SRV, // 33
|
||||
DS, // 43
|
||||
RRSIG, // 46
|
||||
NSEC, // 47
|
||||
DNSKEY, // 48
|
||||
NSEC3, // 50
|
||||
OPT, // 41 (EDNS0 pseudo-type)
|
||||
HTTPS, // 65
|
||||
}
|
||||
|
||||
impl QueryType {
|
||||
@@ -29,6 +35,12 @@ impl QueryType {
|
||||
QueryType::TXT => 16,
|
||||
QueryType::AAAA => 28,
|
||||
QueryType::SRV => 33,
|
||||
QueryType::OPT => 41,
|
||||
QueryType::DS => 43,
|
||||
QueryType::RRSIG => 46,
|
||||
QueryType::NSEC => 47,
|
||||
QueryType::DNSKEY => 48,
|
||||
QueryType::NSEC3 => 50,
|
||||
QueryType::HTTPS => 65,
|
||||
}
|
||||
}
|
||||
@@ -44,6 +56,12 @@ impl QueryType {
|
||||
16 => QueryType::TXT,
|
||||
28 => QueryType::AAAA,
|
||||
33 => QueryType::SRV,
|
||||
41 => QueryType::OPT,
|
||||
43 => QueryType::DS,
|
||||
46 => QueryType::RRSIG,
|
||||
47 => QueryType::NSEC,
|
||||
48 => QueryType::DNSKEY,
|
||||
50 => QueryType::NSEC3,
|
||||
65 => QueryType::HTTPS,
|
||||
_ => QueryType::UNKNOWN(num),
|
||||
}
|
||||
@@ -60,6 +78,12 @@ impl QueryType {
|
||||
QueryType::TXT => "TXT",
|
||||
QueryType::AAAA => "AAAA",
|
||||
QueryType::SRV => "SRV",
|
||||
QueryType::OPT => "OPT",
|
||||
QueryType::DS => "DS",
|
||||
QueryType::RRSIG => "RRSIG",
|
||||
QueryType::NSEC => "NSEC",
|
||||
QueryType::DNSKEY => "DNSKEY",
|
||||
QueryType::NSEC3 => "NSEC3",
|
||||
QueryType::HTTPS => "HTTPS",
|
||||
QueryType::UNKNOWN(_) => "UNKNOWN",
|
||||
}
|
||||
@@ -76,6 +100,11 @@ impl QueryType {
|
||||
"TXT" => Some(QueryType::TXT),
|
||||
"AAAA" => Some(QueryType::AAAA),
|
||||
"SRV" => Some(QueryType::SRV),
|
||||
"DS" => Some(QueryType::DS),
|
||||
"RRSIG" => Some(QueryType::RRSIG),
|
||||
"DNSKEY" => Some(QueryType::DNSKEY),
|
||||
"NSEC" => Some(QueryType::NSEC),
|
||||
"NSEC3" => Some(QueryType::NSEC3),
|
||||
"HTTPS" => Some(QueryType::HTTPS),
|
||||
_ => None,
|
||||
}
|
||||
|
||||
550
src/record.rs
550
src/record.rs
@@ -11,7 +11,7 @@ pub enum DnsRecord {
|
||||
UNKNOWN {
|
||||
domain: String,
|
||||
qtype: u16,
|
||||
data_len: u16,
|
||||
data: Vec<u8>,
|
||||
ttl: u32,
|
||||
},
|
||||
A {
|
||||
@@ -40,11 +40,84 @@ pub enum DnsRecord {
|
||||
addr: Ipv6Addr,
|
||||
ttl: u32,
|
||||
},
|
||||
DNSKEY {
|
||||
domain: String,
|
||||
flags: u16,
|
||||
protocol: u8,
|
||||
algorithm: u8,
|
||||
public_key: Vec<u8>,
|
||||
ttl: u32,
|
||||
},
|
||||
DS {
|
||||
domain: String,
|
||||
key_tag: u16,
|
||||
algorithm: u8,
|
||||
digest_type: u8,
|
||||
digest: Vec<u8>,
|
||||
ttl: u32,
|
||||
},
|
||||
RRSIG {
|
||||
domain: String,
|
||||
type_covered: u16,
|
||||
algorithm: u8,
|
||||
labels: u8,
|
||||
original_ttl: u32,
|
||||
expiration: u32,
|
||||
inception: u32,
|
||||
key_tag: u16,
|
||||
signer_name: String,
|
||||
signature: Vec<u8>,
|
||||
ttl: u32,
|
||||
},
|
||||
NSEC {
|
||||
domain: String,
|
||||
next_domain: String,
|
||||
type_bitmap: Vec<u8>,
|
||||
ttl: u32,
|
||||
},
|
||||
NSEC3 {
|
||||
domain: String,
|
||||
hash_algorithm: u8,
|
||||
flags: u8,
|
||||
iterations: u16,
|
||||
salt: Vec<u8>,
|
||||
next_hashed_owner: Vec<u8>,
|
||||
type_bitmap: Vec<u8>,
|
||||
ttl: u32,
|
||||
},
|
||||
}
|
||||
|
||||
impl DnsRecord {
|
||||
pub fn is_unknown(&self) -> bool {
|
||||
matches!(self, DnsRecord::UNKNOWN { .. })
|
||||
pub fn domain(&self) -> &str {
|
||||
match self {
|
||||
DnsRecord::A { domain, .. }
|
||||
| DnsRecord::NS { domain, .. }
|
||||
| DnsRecord::CNAME { domain, .. }
|
||||
| DnsRecord::MX { domain, .. }
|
||||
| DnsRecord::AAAA { domain, .. }
|
||||
| DnsRecord::DNSKEY { domain, .. }
|
||||
| DnsRecord::DS { domain, .. }
|
||||
| DnsRecord::RRSIG { domain, .. }
|
||||
| DnsRecord::NSEC { domain, .. }
|
||||
| DnsRecord::NSEC3 { domain, .. }
|
||||
| DnsRecord::UNKNOWN { domain, .. } => domain,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn query_type(&self) -> QueryType {
|
||||
match self {
|
||||
DnsRecord::A { .. } => QueryType::A,
|
||||
DnsRecord::AAAA { .. } => QueryType::AAAA,
|
||||
DnsRecord::NS { .. } => QueryType::NS,
|
||||
DnsRecord::CNAME { .. } => QueryType::CNAME,
|
||||
DnsRecord::MX { .. } => QueryType::MX,
|
||||
DnsRecord::DNSKEY { .. } => QueryType::DNSKEY,
|
||||
DnsRecord::DS { .. } => QueryType::DS,
|
||||
DnsRecord::RRSIG { .. } => QueryType::RRSIG,
|
||||
DnsRecord::NSEC { .. } => QueryType::NSEC,
|
||||
DnsRecord::NSEC3 { .. } => QueryType::NSEC3,
|
||||
DnsRecord::UNKNOWN { qtype, .. } => QueryType::UNKNOWN(*qtype),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ttl(&self) -> u32 {
|
||||
@@ -54,10 +127,55 @@ impl DnsRecord {
|
||||
| DnsRecord::CNAME { ttl, .. }
|
||||
| DnsRecord::MX { ttl, .. }
|
||||
| DnsRecord::AAAA { ttl, .. }
|
||||
| DnsRecord::DNSKEY { ttl, .. }
|
||||
| DnsRecord::DS { ttl, .. }
|
||||
| DnsRecord::RRSIG { ttl, .. }
|
||||
| DnsRecord::NSEC { ttl, .. }
|
||||
| DnsRecord::NSEC3 { ttl, .. }
|
||||
| DnsRecord::UNKNOWN { ttl, .. } => *ttl,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn heap_bytes(&self) -> usize {
|
||||
match self {
|
||||
DnsRecord::A { domain, .. } => domain.capacity(),
|
||||
DnsRecord::NS { domain, host, .. } | DnsRecord::CNAME { domain, host, .. } => {
|
||||
domain.capacity() + host.capacity()
|
||||
}
|
||||
DnsRecord::MX { domain, host, .. } => domain.capacity() + host.capacity(),
|
||||
DnsRecord::AAAA { domain, .. } => domain.capacity(),
|
||||
DnsRecord::DNSKEY {
|
||||
domain, public_key, ..
|
||||
} => domain.capacity() + public_key.capacity(),
|
||||
DnsRecord::DS { domain, digest, .. } => domain.capacity() + digest.capacity(),
|
||||
DnsRecord::RRSIG {
|
||||
domain,
|
||||
signer_name,
|
||||
signature,
|
||||
..
|
||||
} => domain.capacity() + signer_name.capacity() + signature.capacity(),
|
||||
DnsRecord::NSEC {
|
||||
domain,
|
||||
next_domain,
|
||||
type_bitmap,
|
||||
..
|
||||
} => domain.capacity() + next_domain.capacity() + type_bitmap.capacity(),
|
||||
DnsRecord::NSEC3 {
|
||||
domain,
|
||||
salt,
|
||||
next_hashed_owner,
|
||||
type_bitmap,
|
||||
..
|
||||
} => {
|
||||
domain.capacity()
|
||||
+ salt.capacity()
|
||||
+ next_hashed_owner.capacity()
|
||||
+ type_bitmap.capacity()
|
||||
}
|
||||
DnsRecord::UNKNOWN { domain, data, .. } => domain.capacity() + data.capacity(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_ttl(&mut self, new_ttl: u32) {
|
||||
match self {
|
||||
DnsRecord::A { ttl, .. }
|
||||
@@ -65,19 +183,25 @@ impl DnsRecord {
|
||||
| DnsRecord::CNAME { ttl, .. }
|
||||
| DnsRecord::MX { ttl, .. }
|
||||
| DnsRecord::AAAA { ttl, .. }
|
||||
| DnsRecord::DNSKEY { ttl, .. }
|
||||
| DnsRecord::DS { ttl, .. }
|
||||
| DnsRecord::RRSIG { ttl, .. }
|
||||
| DnsRecord::NSEC { ttl, .. }
|
||||
| DnsRecord::NSEC3 { ttl, .. }
|
||||
| DnsRecord::UNKNOWN { ttl, .. } => *ttl = new_ttl,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read(buffer: &mut BytePacketBuffer) -> Result<DnsRecord> {
|
||||
let mut domain = String::new();
|
||||
let mut domain = String::with_capacity(64);
|
||||
buffer.read_qname(&mut domain)?;
|
||||
|
||||
let qtype_num = buffer.read_u16()?;
|
||||
let qtype = QueryType::from_num(qtype_num);
|
||||
let _ = buffer.read_u16()?;
|
||||
let _ = buffer.read_u16()?; // class
|
||||
let ttl = buffer.read_u32()?;
|
||||
let data_len = buffer.read_u16()?;
|
||||
let rdata_start = buffer.pos();
|
||||
|
||||
match qtype {
|
||||
QueryType::A => {
|
||||
@@ -88,7 +212,6 @@ impl DnsRecord {
|
||||
((raw_addr >> 8) & 0xFF) as u8,
|
||||
(raw_addr & 0xFF) as u8,
|
||||
);
|
||||
|
||||
Ok(DnsRecord::A { domain, addr, ttl })
|
||||
}
|
||||
QueryType::AAAA => {
|
||||
@@ -106,13 +229,11 @@ impl DnsRecord {
|
||||
((raw_addr4 >> 16) & 0xFFFF) as u16,
|
||||
(raw_addr4 & 0xFFFF) as u16,
|
||||
);
|
||||
|
||||
Ok(DnsRecord::AAAA { domain, addr, ttl })
|
||||
}
|
||||
QueryType::NS => {
|
||||
let mut ns = String::new();
|
||||
let mut ns = String::with_capacity(64);
|
||||
buffer.read_qname(&mut ns)?;
|
||||
|
||||
Ok(DnsRecord::NS {
|
||||
domain,
|
||||
host: ns,
|
||||
@@ -120,9 +241,8 @@ impl DnsRecord {
|
||||
})
|
||||
}
|
||||
QueryType::CNAME => {
|
||||
let mut cname = String::new();
|
||||
let mut cname = String::with_capacity(64);
|
||||
buffer.read_qname(&mut cname)?;
|
||||
|
||||
Ok(DnsRecord::CNAME {
|
||||
domain,
|
||||
host: cname,
|
||||
@@ -131,9 +251,8 @@ impl DnsRecord {
|
||||
}
|
||||
QueryType::MX => {
|
||||
let priority = buffer.read_u16()?;
|
||||
let mut mx = String::new();
|
||||
let mut mx = String::with_capacity(64);
|
||||
buffer.read_qname(&mut mx)?;
|
||||
|
||||
Ok(DnsRecord::MX {
|
||||
domain,
|
||||
priority,
|
||||
@@ -141,13 +260,119 @@ impl DnsRecord {
|
||||
ttl,
|
||||
})
|
||||
}
|
||||
QueryType::DNSKEY => {
|
||||
let flags = buffer.read_u16()?;
|
||||
let protocol = buffer.read()?;
|
||||
let algorithm = buffer.read()?;
|
||||
let key_len = data_len as usize - 4; // flags(2) + protocol(1) + algorithm(1)
|
||||
let public_key = buffer.get_range(buffer.pos(), key_len)?.to_vec();
|
||||
buffer.step(key_len)?;
|
||||
Ok(DnsRecord::DNSKEY {
|
||||
domain,
|
||||
flags,
|
||||
protocol,
|
||||
algorithm,
|
||||
public_key,
|
||||
ttl,
|
||||
})
|
||||
}
|
||||
QueryType::DS => {
|
||||
let key_tag = buffer.read_u16()?;
|
||||
let algorithm = buffer.read()?;
|
||||
let digest_type = buffer.read()?;
|
||||
let digest_len = data_len as usize - 4; // key_tag(2) + algorithm(1) + digest_type(1)
|
||||
let digest = buffer.get_range(buffer.pos(), digest_len)?.to_vec();
|
||||
buffer.step(digest_len)?;
|
||||
Ok(DnsRecord::DS {
|
||||
domain,
|
||||
key_tag,
|
||||
algorithm,
|
||||
digest_type,
|
||||
digest,
|
||||
ttl,
|
||||
})
|
||||
}
|
||||
QueryType::RRSIG => {
|
||||
let type_covered = buffer.read_u16()?;
|
||||
let algorithm = buffer.read()?;
|
||||
let labels = buffer.read()?;
|
||||
let original_ttl = buffer.read_u32()?;
|
||||
let expiration = buffer.read_u32()?;
|
||||
let inception = buffer.read_u32()?;
|
||||
let key_tag = buffer.read_u16()?;
|
||||
let mut signer_name = String::with_capacity(64);
|
||||
buffer.read_qname(&mut signer_name)?;
|
||||
let rdata_end = rdata_start + data_len as usize;
|
||||
let sig_len = rdata_end
|
||||
.checked_sub(buffer.pos())
|
||||
.ok_or("RRSIG data_len too short for fixed fields + signer_name")?;
|
||||
let signature = buffer.get_range(buffer.pos(), sig_len)?.to_vec();
|
||||
buffer.step(sig_len)?;
|
||||
Ok(DnsRecord::RRSIG {
|
||||
domain,
|
||||
type_covered,
|
||||
algorithm,
|
||||
labels,
|
||||
original_ttl,
|
||||
expiration,
|
||||
inception,
|
||||
key_tag,
|
||||
signer_name,
|
||||
signature,
|
||||
ttl,
|
||||
})
|
||||
}
|
||||
QueryType::NSEC => {
|
||||
let rdata_end = rdata_start + data_len as usize;
|
||||
let mut next_domain = String::with_capacity(64);
|
||||
buffer.read_qname(&mut next_domain)?;
|
||||
let bitmap_len = rdata_end
|
||||
.checked_sub(buffer.pos())
|
||||
.ok_or("NSEC data_len too short for type bitmap")?;
|
||||
let type_bitmap = buffer.get_range(buffer.pos(), bitmap_len)?.to_vec();
|
||||
buffer.step(bitmap_len)?;
|
||||
Ok(DnsRecord::NSEC {
|
||||
domain,
|
||||
next_domain,
|
||||
type_bitmap,
|
||||
ttl,
|
||||
})
|
||||
}
|
||||
QueryType::NSEC3 => {
|
||||
let rdata_end = rdata_start + data_len as usize;
|
||||
let hash_algorithm = buffer.read()?;
|
||||
let flags = buffer.read()?;
|
||||
let iterations = buffer.read_u16()?;
|
||||
let salt_length = buffer.read()? as usize;
|
||||
let salt = buffer.get_range(buffer.pos(), salt_length)?.to_vec();
|
||||
buffer.step(salt_length)?;
|
||||
let hash_length = buffer.read()? as usize;
|
||||
let next_hashed_owner = buffer.get_range(buffer.pos(), hash_length)?.to_vec();
|
||||
buffer.step(hash_length)?;
|
||||
let bitmap_len = rdata_end
|
||||
.checked_sub(buffer.pos())
|
||||
.ok_or("NSEC3 data_len too short for type bitmap")?;
|
||||
let type_bitmap = buffer.get_range(buffer.pos(), bitmap_len)?.to_vec();
|
||||
buffer.step(bitmap_len)?;
|
||||
Ok(DnsRecord::NSEC3 {
|
||||
domain,
|
||||
hash_algorithm,
|
||||
flags,
|
||||
iterations,
|
||||
salt,
|
||||
next_hashed_owner,
|
||||
type_bitmap,
|
||||
ttl,
|
||||
})
|
||||
}
|
||||
_ => {
|
||||
// SOA, TXT, SRV, etc. — stored as opaque bytes until parsed natively
|
||||
let data = buffer.get_range(buffer.pos(), data_len as usize)?.to_vec();
|
||||
buffer.step(data_len as usize)?;
|
||||
|
||||
Ok(DnsRecord::UNKNOWN {
|
||||
domain,
|
||||
qtype: qtype_num,
|
||||
data_len,
|
||||
data,
|
||||
ttl,
|
||||
})
|
||||
}
|
||||
@@ -163,32 +388,19 @@ impl DnsRecord {
|
||||
ref addr,
|
||||
ttl,
|
||||
} => {
|
||||
buffer.write_qname(domain)?;
|
||||
buffer.write_u16(QueryType::A.to_num())?;
|
||||
buffer.write_u16(1)?;
|
||||
buffer.write_u32(ttl)?;
|
||||
write_header(buffer, domain, QueryType::A.to_num(), ttl)?;
|
||||
buffer.write_u16(4)?;
|
||||
|
||||
let octets = addr.octets();
|
||||
buffer.write_u8(octets[0])?;
|
||||
buffer.write_u8(octets[1])?;
|
||||
buffer.write_u8(octets[2])?;
|
||||
buffer.write_u8(octets[3])?;
|
||||
buffer.write_bytes(&addr.octets())?;
|
||||
}
|
||||
DnsRecord::NS {
|
||||
ref domain,
|
||||
ref host,
|
||||
ttl,
|
||||
} => {
|
||||
buffer.write_qname(domain)?;
|
||||
buffer.write_u16(QueryType::NS.to_num())?;
|
||||
buffer.write_u16(1)?;
|
||||
buffer.write_u32(ttl)?;
|
||||
|
||||
write_header(buffer, domain, QueryType::NS.to_num(), ttl)?;
|
||||
let pos = buffer.pos();
|
||||
buffer.write_u16(0)?;
|
||||
buffer.write_qname(host)?;
|
||||
|
||||
let size = buffer.pos() - (pos + 2);
|
||||
buffer.set_u16(pos, size as u16)?;
|
||||
}
|
||||
@@ -197,15 +409,10 @@ impl DnsRecord {
|
||||
ref host,
|
||||
ttl,
|
||||
} => {
|
||||
buffer.write_qname(domain)?;
|
||||
buffer.write_u16(QueryType::CNAME.to_num())?;
|
||||
buffer.write_u16(1)?;
|
||||
buffer.write_u32(ttl)?;
|
||||
|
||||
write_header(buffer, domain, QueryType::CNAME.to_num(), ttl)?;
|
||||
let pos = buffer.pos();
|
||||
buffer.write_u16(0)?;
|
||||
buffer.write_qname(host)?;
|
||||
|
||||
let size = buffer.pos() - (pos + 2);
|
||||
buffer.set_u16(pos, size as u16)?;
|
||||
}
|
||||
@@ -215,16 +422,11 @@ impl DnsRecord {
|
||||
ref host,
|
||||
ttl,
|
||||
} => {
|
||||
buffer.write_qname(domain)?;
|
||||
buffer.write_u16(QueryType::MX.to_num())?;
|
||||
buffer.write_u16(1)?;
|
||||
buffer.write_u32(ttl)?;
|
||||
|
||||
write_header(buffer, domain, QueryType::MX.to_num(), ttl)?;
|
||||
let pos = buffer.pos();
|
||||
buffer.write_u16(0)?;
|
||||
buffer.write_u16(priority)?;
|
||||
buffer.write_qname(host)?;
|
||||
|
||||
let size = buffer.pos() - (pos + 2);
|
||||
buffer.set_u16(pos, size as u16)?;
|
||||
}
|
||||
@@ -233,21 +435,269 @@ impl DnsRecord {
|
||||
ref addr,
|
||||
ttl,
|
||||
} => {
|
||||
buffer.write_qname(domain)?;
|
||||
buffer.write_u16(QueryType::AAAA.to_num())?;
|
||||
buffer.write_u16(1)?;
|
||||
buffer.write_u32(ttl)?;
|
||||
write_header(buffer, domain, QueryType::AAAA.to_num(), ttl)?;
|
||||
buffer.write_u16(16)?;
|
||||
|
||||
for octet in &addr.segments() {
|
||||
buffer.write_u16(*octet)?;
|
||||
}
|
||||
}
|
||||
DnsRecord::UNKNOWN { .. } => {
|
||||
log::debug!("Skipping record: {:?}", self);
|
||||
DnsRecord::DNSKEY {
|
||||
ref domain,
|
||||
flags,
|
||||
protocol,
|
||||
algorithm,
|
||||
ref public_key,
|
||||
ttl,
|
||||
} => {
|
||||
write_header(buffer, domain, QueryType::DNSKEY.to_num(), ttl)?;
|
||||
buffer.write_u16((4 + public_key.len()) as u16)?;
|
||||
buffer.write_u16(flags)?;
|
||||
buffer.write_u8(protocol)?;
|
||||
buffer.write_u8(algorithm)?;
|
||||
buffer.write_bytes(public_key)?;
|
||||
}
|
||||
DnsRecord::DS {
|
||||
ref domain,
|
||||
key_tag,
|
||||
algorithm,
|
||||
digest_type,
|
||||
ref digest,
|
||||
ttl,
|
||||
} => {
|
||||
write_header(buffer, domain, QueryType::DS.to_num(), ttl)?;
|
||||
buffer.write_u16((4 + digest.len()) as u16)?;
|
||||
buffer.write_u16(key_tag)?;
|
||||
buffer.write_u8(algorithm)?;
|
||||
buffer.write_u8(digest_type)?;
|
||||
buffer.write_bytes(digest)?;
|
||||
}
|
||||
DnsRecord::RRSIG {
|
||||
ref domain,
|
||||
type_covered,
|
||||
algorithm,
|
||||
labels,
|
||||
original_ttl,
|
||||
expiration,
|
||||
inception,
|
||||
key_tag,
|
||||
ref signer_name,
|
||||
ref signature,
|
||||
ttl,
|
||||
} => {
|
||||
write_header(buffer, domain, QueryType::RRSIG.to_num(), ttl)?;
|
||||
let rdlen_pos = buffer.pos();
|
||||
buffer.write_u16(0)?; // RDLENGTH placeholder
|
||||
buffer.write_u16(type_covered)?;
|
||||
buffer.write_u8(algorithm)?;
|
||||
buffer.write_u8(labels)?;
|
||||
buffer.write_u32(original_ttl)?;
|
||||
buffer.write_u32(expiration)?;
|
||||
buffer.write_u32(inception)?;
|
||||
buffer.write_u16(key_tag)?;
|
||||
buffer.write_qname(signer_name)?;
|
||||
buffer.write_bytes(signature)?;
|
||||
let rdlen = buffer.pos() - (rdlen_pos + 2);
|
||||
buffer.set_u16(rdlen_pos, rdlen as u16)?;
|
||||
}
|
||||
DnsRecord::NSEC {
|
||||
ref domain,
|
||||
ref next_domain,
|
||||
ref type_bitmap,
|
||||
ttl,
|
||||
} => {
|
||||
write_header(buffer, domain, QueryType::NSEC.to_num(), ttl)?;
|
||||
let rdlen_pos = buffer.pos();
|
||||
buffer.write_u16(0)?;
|
||||
buffer.write_qname(next_domain)?;
|
||||
buffer.write_bytes(type_bitmap)?;
|
||||
let rdlen = buffer.pos() - (rdlen_pos + 2);
|
||||
buffer.set_u16(rdlen_pos, rdlen as u16)?;
|
||||
}
|
||||
DnsRecord::NSEC3 {
|
||||
ref domain,
|
||||
hash_algorithm,
|
||||
flags,
|
||||
iterations,
|
||||
ref salt,
|
||||
ref next_hashed_owner,
|
||||
ref type_bitmap,
|
||||
ttl,
|
||||
} => {
|
||||
write_header(buffer, domain, QueryType::NSEC3.to_num(), ttl)?;
|
||||
let rdlen =
|
||||
1 + 1 + 2 + 1 + salt.len() + 1 + next_hashed_owner.len() + type_bitmap.len();
|
||||
buffer.write_u16(rdlen as u16)?;
|
||||
buffer.write_u8(hash_algorithm)?;
|
||||
buffer.write_u8(flags)?;
|
||||
buffer.write_u16(iterations)?;
|
||||
buffer.write_u8(salt.len() as u8)?;
|
||||
buffer.write_bytes(salt)?;
|
||||
buffer.write_u8(next_hashed_owner.len() as u8)?;
|
||||
buffer.write_bytes(next_hashed_owner)?;
|
||||
buffer.write_bytes(type_bitmap)?;
|
||||
}
|
||||
DnsRecord::UNKNOWN {
|
||||
ref domain,
|
||||
qtype,
|
||||
ref data,
|
||||
ttl,
|
||||
} => {
|
||||
write_header(buffer, domain, qtype, ttl)?;
|
||||
buffer.write_u16(data.len() as u16)?;
|
||||
buffer.write_bytes(data)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(buffer.pos() - start_pos)
|
||||
}
|
||||
}
|
||||
|
||||
fn write_header(buffer: &mut BytePacketBuffer, domain: &str, qtype: u16, ttl: u32) -> Result<()> {
|
||||
buffer.write_qname(domain)?;
|
||||
buffer.write_u16(qtype)?;
|
||||
buffer.write_u16(1)?; // class IN
|
||||
buffer.write_u32(ttl)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn round_trip(record: &DnsRecord) -> DnsRecord {
|
||||
let mut buf = BytePacketBuffer::new();
|
||||
record.write(&mut buf).unwrap();
|
||||
buf.seek(0).unwrap();
|
||||
DnsRecord::read(&mut buf).unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unknown_preserves_raw_bytes() {
|
||||
let rec = DnsRecord::UNKNOWN {
|
||||
domain: "example.com".into(),
|
||||
qtype: 99,
|
||||
data: vec![0xDE, 0xAD, 0xBE, 0xEF],
|
||||
ttl: 300,
|
||||
};
|
||||
let parsed = round_trip(&rec);
|
||||
if let DnsRecord::UNKNOWN { data, .. } = &parsed {
|
||||
assert_eq!(data.len(), 4);
|
||||
assert_eq!(data, &[0xDE, 0xAD, 0xBE, 0xEF]);
|
||||
} else {
|
||||
panic!("expected UNKNOWN");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dnskey_round_trip() {
|
||||
let rec = DnsRecord::DNSKEY {
|
||||
domain: "example.com".into(),
|
||||
flags: 257, // KSK
|
||||
protocol: 3,
|
||||
algorithm: 13, // ECDSAP256SHA256
|
||||
public_key: vec![1, 2, 3, 4, 5, 6, 7, 8],
|
||||
ttl: 3600,
|
||||
};
|
||||
let parsed = round_trip(&rec);
|
||||
assert_eq!(rec, parsed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ds_round_trip() {
|
||||
let rec = DnsRecord::DS {
|
||||
domain: "example.com".into(),
|
||||
key_tag: 12345,
|
||||
algorithm: 8,
|
||||
digest_type: 2,
|
||||
digest: vec![0xAA, 0xBB, 0xCC, 0xDD],
|
||||
ttl: 86400,
|
||||
};
|
||||
let parsed = round_trip(&rec);
|
||||
assert_eq!(rec, parsed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rrsig_round_trip() {
|
||||
let rec = DnsRecord::RRSIG {
|
||||
domain: "example.com".into(),
|
||||
type_covered: 1, // A
|
||||
algorithm: 13,
|
||||
labels: 2,
|
||||
original_ttl: 300,
|
||||
expiration: 1700000000,
|
||||
inception: 1690000000,
|
||||
key_tag: 54321,
|
||||
signer_name: "example.com".into(),
|
||||
signature: vec![0x01, 0x02, 0x03, 0x04, 0x05],
|
||||
ttl: 300,
|
||||
};
|
||||
let parsed = round_trip(&rec);
|
||||
assert_eq!(rec, parsed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn query_type_method() {
|
||||
assert_eq!(
|
||||
DnsRecord::DNSKEY {
|
||||
domain: String::new(),
|
||||
flags: 0,
|
||||
protocol: 3,
|
||||
algorithm: 8,
|
||||
public_key: vec![],
|
||||
ttl: 0,
|
||||
}
|
||||
.query_type(),
|
||||
QueryType::DNSKEY
|
||||
);
|
||||
assert_eq!(
|
||||
DnsRecord::DS {
|
||||
domain: String::new(),
|
||||
key_tag: 0,
|
||||
algorithm: 0,
|
||||
digest_type: 0,
|
||||
digest: vec![],
|
||||
ttl: 0,
|
||||
}
|
||||
.query_type(),
|
||||
QueryType::DS
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nsec_round_trip() {
|
||||
let rec = DnsRecord::NSEC {
|
||||
domain: "alpha.example.com".into(),
|
||||
next_domain: "gamma.example.com".into(),
|
||||
type_bitmap: vec![0, 2, 0x40, 0x01], // A(1), MX(15)
|
||||
ttl: 3600,
|
||||
};
|
||||
let parsed = round_trip(&rec);
|
||||
assert_eq!(rec, parsed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn nsec3_round_trip() {
|
||||
let rec = DnsRecord::NSEC3 {
|
||||
domain: "abc123.example.com".into(),
|
||||
hash_algorithm: 1,
|
||||
flags: 0,
|
||||
iterations: 10,
|
||||
salt: vec![0xAB, 0xCD],
|
||||
next_hashed_owner: vec![0x01, 0x02, 0x03, 0x04, 0x05],
|
||||
type_bitmap: vec![0, 1, 0x40], // A(1)
|
||||
ttl: 3600,
|
||||
};
|
||||
let parsed = round_trip(&rec);
|
||||
assert_eq!(rec, parsed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn heap_bytes_reflects_string_capacity() {
|
||||
let rec = DnsRecord::CNAME {
|
||||
domain: "a]".repeat(100),
|
||||
host: "b".repeat(200),
|
||||
ttl: 60,
|
||||
};
|
||||
assert!(rec.heap_bytes() >= 300);
|
||||
}
|
||||
}
|
||||
|
||||
1135
src/recursive.rs
Normal file
1135
src/recursive.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
use std::collections::HashMap;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::path::PathBuf;
|
||||
|
||||
use log::{info, warn};
|
||||
@@ -8,12 +8,56 @@ use serde::{Deserialize, Serialize};
|
||||
pub struct ServiceEntry {
|
||||
pub name: String,
|
||||
pub target_port: u16,
|
||||
#[serde(default)]
|
||||
pub routes: Vec<RouteEntry>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Serialize, Deserialize)]
|
||||
pub struct RouteEntry {
|
||||
pub path: String,
|
||||
pub port: u16,
|
||||
#[serde(default)]
|
||||
pub strip: bool,
|
||||
}
|
||||
|
||||
impl ServiceEntry {
|
||||
/// Resolve backend port and (possibly rewritten) path for a request
|
||||
pub fn resolve_route(&self, request_path: &str) -> (u16, String) {
|
||||
// Longest prefix match
|
||||
let matched = self
|
||||
.routes
|
||||
.iter()
|
||||
.filter(|r| {
|
||||
request_path == r.path
|
||||
|| (request_path.starts_with(&r.path)
|
||||
&& (r.path.ends_with('/')
|
||||
|| request_path.as_bytes().get(r.path.len()) == Some(&b'/')))
|
||||
})
|
||||
.max_by_key(|r| r.path.len());
|
||||
|
||||
match matched {
|
||||
Some(route) => {
|
||||
let path = if route.strip {
|
||||
let stripped = &request_path[route.path.len()..];
|
||||
if stripped.is_empty() || !stripped.starts_with('/') {
|
||||
format!("/{}", stripped.trim_start_matches('/'))
|
||||
} else {
|
||||
stripped.to_string()
|
||||
}
|
||||
} else {
|
||||
request_path.to_string()
|
||||
};
|
||||
(route.port, path)
|
||||
}
|
||||
None => (self.target_port, request_path.to_string()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ServiceStore {
|
||||
entries: HashMap<String, ServiceEntry>,
|
||||
/// Services defined in numa.toml (not persisted to user file)
|
||||
config_services: std::collections::HashSet<String>,
|
||||
config_services: HashSet<String>,
|
||||
persist_path: PathBuf,
|
||||
}
|
||||
|
||||
@@ -28,13 +72,13 @@ impl ServiceStore {
|
||||
let persist_path = dirs_path();
|
||||
ServiceStore {
|
||||
entries: HashMap::new(),
|
||||
config_services: std::collections::HashSet::new(),
|
||||
config_services: HashSet::new(),
|
||||
persist_path,
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert a service from numa.toml config (not persisted)
|
||||
pub fn insert_from_config(&mut self, name: &str, target_port: u16) {
|
||||
pub fn insert_from_config(&mut self, name: &str, target_port: u16, routes: Vec<RouteEntry>) {
|
||||
let key = name.to_lowercase();
|
||||
self.config_services.insert(key.clone());
|
||||
self.entries.insert(
|
||||
@@ -42,6 +86,7 @@ impl ServiceStore {
|
||||
ServiceEntry {
|
||||
name: key,
|
||||
target_port,
|
||||
routes,
|
||||
},
|
||||
);
|
||||
}
|
||||
@@ -54,11 +99,37 @@ impl ServiceStore {
|
||||
ServiceEntry {
|
||||
name: key,
|
||||
target_port,
|
||||
routes: Vec::new(),
|
||||
},
|
||||
);
|
||||
self.save();
|
||||
}
|
||||
|
||||
pub fn add_route(&mut self, service: &str, path: String, port: u16, strip: bool) -> bool {
|
||||
let key = service.to_lowercase();
|
||||
if let Some(entry) = self.entries.get_mut(&key) {
|
||||
entry.routes.retain(|r| r.path != path);
|
||||
entry.routes.push(RouteEntry { path, port, strip });
|
||||
self.save();
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn remove_route(&mut self, service: &str, path: &str) -> bool {
|
||||
let key = service.to_lowercase();
|
||||
if let Some(entry) = self.entries.get_mut(&key) {
|
||||
let before = entry.routes.len();
|
||||
entry.routes.retain(|r| r.path != path);
|
||||
if entry.routes.len() < before {
|
||||
self.save();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
pub fn lookup(&self, name: &str) -> Option<&ServiceEntry> {
|
||||
self.entries.get(&name.to_lowercase())
|
||||
}
|
||||
@@ -72,12 +143,26 @@ impl ServiceStore {
|
||||
removed
|
||||
}
|
||||
|
||||
/// Names are always stored lowercased, so callers must pass lowercase keys.
|
||||
pub fn is_config_service(&self, name: &str) -> bool {
|
||||
self.config_services.contains(name)
|
||||
}
|
||||
|
||||
pub fn list(&self) -> Vec<&ServiceEntry> {
|
||||
let mut entries: Vec<_> = self.entries.values().collect();
|
||||
entries.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
entries
|
||||
}
|
||||
|
||||
pub fn names(&self) -> Vec<String> {
|
||||
self.entries.keys().cloned().collect()
|
||||
}
|
||||
|
||||
/// Returns true if the name is new (not already registered).
|
||||
pub fn has_name(&self, name: &str) -> bool {
|
||||
self.entries.contains_key(&name.to_lowercase())
|
||||
}
|
||||
|
||||
/// Load user-defined services from ~/.config/numa/services.json
|
||||
pub fn load_persisted(&mut self) {
|
||||
if !self.persist_path.exists() {
|
||||
@@ -133,3 +218,157 @@ impl ServiceStore {
|
||||
fn dirs_path() -> PathBuf {
|
||||
crate::config_dir().join("services.json")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::path::PathBuf;
|
||||
|
||||
fn entry(port: u16, routes: Vec<RouteEntry>) -> ServiceEntry {
|
||||
ServiceEntry {
|
||||
name: "app".into(),
|
||||
target_port: port,
|
||||
routes,
|
||||
}
|
||||
}
|
||||
|
||||
fn route(path: &str, port: u16, strip: bool) -> RouteEntry {
|
||||
RouteEntry {
|
||||
path: path.into(),
|
||||
port,
|
||||
strip,
|
||||
}
|
||||
}
|
||||
|
||||
fn test_store() -> ServiceStore {
|
||||
ServiceStore {
|
||||
entries: HashMap::new(),
|
||||
config_services: HashSet::new(),
|
||||
persist_path: PathBuf::from("/dev/null"),
|
||||
}
|
||||
}
|
||||
|
||||
// --- resolve_route ---
|
||||
|
||||
#[test]
|
||||
fn no_routes_returns_default_port() {
|
||||
let e = entry(3000, vec![]);
|
||||
assert_eq!(e.resolve_route("/anything"), (3000, "/anything".into()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn exact_match() {
|
||||
let e = entry(3000, vec![route("/api", 4000, false)]);
|
||||
assert_eq!(e.resolve_route("/api"), (4000, "/api".into()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prefix_match() {
|
||||
let e = entry(3000, vec![route("/api", 4000, false)]);
|
||||
assert_eq!(e.resolve_route("/api/users"), (4000, "/api/users".into()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn segment_boundary_rejects_partial() {
|
||||
let e = entry(3000, vec![route("/api", 4000, false)]);
|
||||
// /apiary must NOT match /api — different segment
|
||||
assert_eq!(e.resolve_route("/apiary"), (3000, "/apiary".into()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn segment_boundary_rejects_apikey() {
|
||||
let e = entry(3000, vec![route("/api", 4000, false)]);
|
||||
assert_eq!(e.resolve_route("/apikey"), (3000, "/apikey".into()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn longest_prefix_wins() {
|
||||
let e = entry(
|
||||
3000,
|
||||
vec![route("/api", 4000, false), route("/api/v2", 5000, false)],
|
||||
);
|
||||
assert_eq!(
|
||||
e.resolve_route("/api/v2/users"),
|
||||
(5000, "/api/v2/users".into())
|
||||
);
|
||||
// shorter prefix still works for non-v2 paths
|
||||
assert_eq!(
|
||||
e.resolve_route("/api/v1/users"),
|
||||
(4000, "/api/v1/users".into())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn strip_removes_prefix() {
|
||||
let e = entry(3000, vec![route("/api", 4000, true)]);
|
||||
assert_eq!(e.resolve_route("/api/users"), (4000, "/users".into()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn strip_exact_path_gives_root() {
|
||||
let e = entry(3000, vec![route("/api", 4000, true)]);
|
||||
assert_eq!(e.resolve_route("/api"), (4000, "/".into()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn trailing_slash_route_matches() {
|
||||
let e = entry(3000, vec![route("/app/", 4000, false)]);
|
||||
assert_eq!(
|
||||
e.resolve_route("/app/dashboard"),
|
||||
(4000, "/app/dashboard".into())
|
||||
);
|
||||
}
|
||||
|
||||
// --- ServiceStore: add_route / remove_route ---
|
||||
|
||||
#[test]
|
||||
fn add_route_to_existing_service() {
|
||||
let mut store = test_store();
|
||||
store.insert_from_config("app", 3000, vec![]);
|
||||
assert!(store.add_route("app", "/api".into(), 4000, false));
|
||||
let entry = store.lookup("app").unwrap();
|
||||
assert_eq!(entry.routes.len(), 1);
|
||||
assert_eq!(entry.routes[0].path, "/api");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_route_to_missing_service_returns_false() {
|
||||
let mut store = test_store();
|
||||
assert!(!store.add_route("ghost", "/api".into(), 4000, false));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_route_deduplicates_by_path() {
|
||||
let mut store = test_store();
|
||||
store.insert_from_config("app", 3000, vec![]);
|
||||
store.add_route("app", "/api".into(), 4000, false);
|
||||
store.add_route("app", "/api".into(), 5000, true);
|
||||
let entry = store.lookup("app").unwrap();
|
||||
assert_eq!(entry.routes.len(), 1);
|
||||
assert_eq!(entry.routes[0].port, 5000);
|
||||
assert!(entry.routes[0].strip);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn remove_route_returns_true_when_found() {
|
||||
let mut store = test_store();
|
||||
store.insert_from_config("app", 3000, vec![route("/api", 4000, false)]);
|
||||
assert!(store.remove_route("app", "/api"));
|
||||
assert!(store.lookup("app").unwrap().routes.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn remove_route_returns_false_when_missing() {
|
||||
let mut store = test_store();
|
||||
store.insert_from_config("app", 3000, vec![]);
|
||||
assert!(!store.remove_route("app", "/nope"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lookup_is_case_insensitive() {
|
||||
let mut store = test_store();
|
||||
store.insert_from_config("MyApp", 3000, vec![]);
|
||||
assert!(store.lookup("myapp").is_some());
|
||||
assert!(store.lookup("MYAPP").is_some());
|
||||
}
|
||||
}
|
||||
|
||||
126
src/setup_phone.rs
Normal file
126
src/setup_phone.rs
Normal file
@@ -0,0 +1,126 @@
|
||||
//! `numa setup-phone` CLI — thin QR wrapper over the persistent mobile API.
|
||||
//!
|
||||
//! Before the mobile API existed, this command spawned its own one-shot
|
||||
//! HTTP server on port 8765 to serve a freshly-generated mobileconfig
|
||||
//! for a single download. That role now belongs to
|
||||
//! [`crate::mobile_api`], which runs persistently alongside the main
|
||||
//! API and serves `/mobileconfig` at the same port whenever Numa is
|
||||
//! running.
|
||||
//!
|
||||
//! This command is now a thin terminal-side wrapper:
|
||||
//!
|
||||
//! 1. Detect the current LAN IP
|
||||
//! 2. Render a terminal QR code pointing at
|
||||
//! `http://<lan_ip>:8765/mobileconfig`
|
||||
//! 3. Print install instructions and exit
|
||||
//!
|
||||
//! The user scans the QR, iOS fetches the profile from the mobile API
|
||||
//! (which is always up as long as `numa` is running), installs, and the
|
||||
//! user walks through Settings → Certificate Trust Settings to enable
|
||||
//! trust.
|
||||
//!
|
||||
//! Numa must be running for the profile download to succeed; if the
|
||||
//! mobile API is not listening on port 8765, the download will fail
|
||||
//! and the user will see Safari's "Cannot Connect to Server" error.
|
||||
//! The CLI prints a reminder about this at the bottom of the output.
|
||||
|
||||
use qrcode::render::unicode;
|
||||
use qrcode::QrCode;
|
||||
|
||||
/// Default port where the persistent mobile API serves `/mobileconfig`.
|
||||
/// Matches `MobileConfig::default().port` in `config.rs`. If the user
|
||||
/// has overridden `[mobile] port = N` in `numa.toml`, they'll need to
|
||||
/// adjust the URL manually — this CLI uses the default without parsing
|
||||
/// `numa.toml`.
|
||||
const SETUP_PORT: u16 = 8765;
|
||||
|
||||
fn render_qr(url: &str) -> Result<String, String> {
|
||||
let code = QrCode::new(url).map_err(|e| format!("failed to encode QR: {}", e))?;
|
||||
Ok(code
|
||||
.render::<unicode::Dense1x2>()
|
||||
.dark_color(unicode::Dense1x2::Light)
|
||||
.light_color(unicode::Dense1x2::Dark)
|
||||
.build())
|
||||
}
|
||||
|
||||
/// Run the `numa setup-phone` flow.
|
||||
pub async fn run() -> Result<(), String> {
|
||||
let lan_ip = crate::lan::detect_lan_ip()
|
||||
.ok_or("could not detect LAN IP — are you connected to a network?")?;
|
||||
|
||||
let addr = std::net::SocketAddr::from(([127, 0, 0, 1], SETUP_PORT));
|
||||
let api_reachable = tokio::time::timeout(
|
||||
std::time::Duration::from_millis(500),
|
||||
tokio::net::TcpStream::connect(addr),
|
||||
)
|
||||
.await
|
||||
.map(|r| r.is_ok())
|
||||
.unwrap_or(false);
|
||||
|
||||
if !api_reachable {
|
||||
eprintln!();
|
||||
eprintln!(
|
||||
" \x1b[1;38;2;192;98;58mNuma\x1b[0m — mobile API is not reachable on port {}.",
|
||||
SETUP_PORT
|
||||
);
|
||||
eprintln!();
|
||||
eprintln!(" The phone won't be able to download the profile until the mobile");
|
||||
eprintln!(" API is running. Add this to your numa.toml and restart Numa:");
|
||||
eprintln!();
|
||||
eprintln!(" [mobile]");
|
||||
eprintln!(" enabled = true");
|
||||
eprintln!();
|
||||
return Err("mobile API not running".into());
|
||||
}
|
||||
|
||||
let url = format!("http://{}:{}/mobileconfig", lan_ip, SETUP_PORT);
|
||||
let qr = render_qr(&url)?;
|
||||
|
||||
eprintln!();
|
||||
eprintln!(" \x1b[1;38;2;192;98;58mNuma Phone Setup\x1b[0m");
|
||||
eprintln!();
|
||||
eprintln!(" Profile URL: \x1b[36m{}\x1b[0m", url);
|
||||
eprintln!();
|
||||
for line in qr.lines() {
|
||||
eprintln!(" {}", line);
|
||||
}
|
||||
eprintln!();
|
||||
eprintln!(" \x1b[1mOn your iPhone:\x1b[0m");
|
||||
eprintln!(" 1. Open Camera, point at the QR code, tap the yellow banner");
|
||||
eprintln!(" 2. Allow the download when Safari asks");
|
||||
eprintln!(" 3. Open Settings — tap \"Profile Downloaded\" near the top");
|
||||
eprintln!(" (or: Settings → General → VPN & Device Management → Numa DNS)");
|
||||
eprintln!(" 4. Tap Install (top right), enter passcode, Install again");
|
||||
eprintln!(" 5. \x1b[1mSettings → General → About → Certificate Trust Settings\x1b[0m");
|
||||
eprintln!(" Toggle ON \"Numa Local CA\" — required for DoT to work");
|
||||
eprintln!();
|
||||
eprintln!(
|
||||
" \x1b[33mNote:\x1b[0m profile uses your laptop's current IP ({}). If your",
|
||||
lan_ip
|
||||
);
|
||||
eprintln!(" laptop changes networks, re-scan this QR — iOS will replace the");
|
||||
eprintln!(" existing profile automatically (fixed UUID).");
|
||||
eprintln!();
|
||||
eprintln!(
|
||||
" \x1b[90mThe profile is served by Numa's persistent mobile API on port {}.\x1b[0m",
|
||||
SETUP_PORT
|
||||
);
|
||||
eprintln!(" \x1b[90mMake sure `numa` is running before scanning. If it's not,\x1b[0m");
|
||||
eprintln!(" \x1b[90mstart it with `sudo numa install` or run it interactively.\x1b[0m");
|
||||
eprintln!();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn render_qr_produces_unicode() {
|
||||
let qr = render_qr("http://192.168.1.9:8765/mobileconfig").unwrap();
|
||||
assert!(!qr.is_empty());
|
||||
// Dense1x2 uses these block characters
|
||||
assert!(qr.chars().any(|c| matches!(c, '█' | '▀' | '▄' | ' ')));
|
||||
}
|
||||
}
|
||||
318
src/srtt.rs
Normal file
318
src/srtt.rs
Normal file
@@ -0,0 +1,318 @@
|
||||
use std::collections::HashMap;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::time::Instant;
|
||||
|
||||
const INITIAL_SRTT_MS: u64 = 200;
|
||||
const FAILURE_PENALTY_MS: u64 = 5000;
|
||||
const TCP_PENALTY_MS: u64 = 100;
|
||||
const DECAY_AFTER_SECS: u64 = 300;
|
||||
const MAX_ENTRIES: usize = 4096;
|
||||
const EVICT_BATCH: usize = 64;
|
||||
|
||||
struct SrttEntry {
|
||||
srtt_ms: u64,
|
||||
updated_at: Instant,
|
||||
}
|
||||
|
||||
pub struct SrttCache {
|
||||
entries: HashMap<IpAddr, SrttEntry>,
|
||||
enabled: bool,
|
||||
}
|
||||
|
||||
impl Default for SrttCache {
|
||||
fn default() -> Self {
|
||||
Self::new(true)
|
||||
}
|
||||
}
|
||||
|
||||
impl SrttCache {
|
||||
pub fn new(enabled: bool) -> Self {
|
||||
Self {
|
||||
entries: HashMap::new(),
|
||||
enabled,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_enabled(&self) -> bool {
|
||||
self.enabled
|
||||
}
|
||||
|
||||
/// Get current SRTT for an IP, applying decay if stale. Returns INITIAL for unknown.
|
||||
pub fn get(&self, ip: IpAddr) -> u64 {
|
||||
match self.entries.get(&ip) {
|
||||
Some(entry) => Self::decayed_srtt(entry),
|
||||
None => INITIAL_SRTT_MS,
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply time-based decay: each DECAY_AFTER_SECS period halves distance to INITIAL.
|
||||
fn decayed_srtt(entry: &SrttEntry) -> u64 {
|
||||
Self::decay_for_age(entry.srtt_ms, entry.updated_at.elapsed().as_secs())
|
||||
}
|
||||
|
||||
fn decay_for_age(srtt_ms: u64, age_secs: u64) -> u64 {
|
||||
if age_secs > DECAY_AFTER_SECS {
|
||||
let periods = (age_secs / DECAY_AFTER_SECS).min(8);
|
||||
let mut srtt = srtt_ms;
|
||||
for _ in 0..periods {
|
||||
srtt = (srtt + INITIAL_SRTT_MS) / 2;
|
||||
}
|
||||
srtt
|
||||
} else {
|
||||
srtt_ms
|
||||
}
|
||||
}
|
||||
|
||||
/// Record a successful query RTT. No-op when disabled.
|
||||
pub fn record_rtt(&mut self, ip: IpAddr, rtt_ms: u64, tcp: bool) {
|
||||
if !self.enabled {
|
||||
return;
|
||||
}
|
||||
let effective = if tcp { rtt_ms + TCP_PENALTY_MS } else { rtt_ms };
|
||||
self.maybe_evict();
|
||||
let entry = self.entries.entry(ip).or_insert(SrttEntry {
|
||||
srtt_ms: effective,
|
||||
updated_at: Instant::now(),
|
||||
});
|
||||
// Apply decay before EWMA so recovered servers aren't stuck at stale penalties
|
||||
let base = Self::decayed_srtt(entry);
|
||||
// BIND EWMA: new = (old * 7 + sample) / 8
|
||||
entry.srtt_ms = (base * 7 + effective) / 8;
|
||||
entry.updated_at = Instant::now();
|
||||
}
|
||||
|
||||
/// Record a failure (timeout or error). No-op when disabled.
|
||||
pub fn record_failure(&mut self, ip: IpAddr) {
|
||||
if !self.enabled {
|
||||
return;
|
||||
}
|
||||
self.maybe_evict();
|
||||
let entry = self.entries.entry(ip).or_insert(SrttEntry {
|
||||
srtt_ms: FAILURE_PENALTY_MS,
|
||||
updated_at: Instant::now(),
|
||||
});
|
||||
entry.srtt_ms = FAILURE_PENALTY_MS;
|
||||
entry.updated_at = Instant::now();
|
||||
}
|
||||
|
||||
/// Sort addresses by SRTT ascending (lowest/fastest first). No-op when disabled.
|
||||
pub fn sort_by_rtt(&self, addrs: &mut [SocketAddr]) {
|
||||
if !self.enabled {
|
||||
return;
|
||||
}
|
||||
addrs.sort_by_key(|a| self.get(a.ip()));
|
||||
}
|
||||
|
||||
pub fn heap_bytes(&self) -> usize {
|
||||
let per_slot = std::mem::size_of::<u64>()
|
||||
+ std::mem::size_of::<IpAddr>()
|
||||
+ std::mem::size_of::<SrttEntry>()
|
||||
+ 1;
|
||||
self.entries.capacity() * per_slot
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.entries.len()
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.entries.is_empty()
|
||||
}
|
||||
|
||||
fn maybe_evict(&mut self) {
|
||||
if self.entries.len() < MAX_ENTRIES {
|
||||
return;
|
||||
}
|
||||
// Batch eviction: remove the oldest EVICT_BATCH entries at once
|
||||
let mut by_age: Vec<IpAddr> = self.entries.keys().copied().collect();
|
||||
by_age.sort_by_key(|ip| self.entries[ip].updated_at);
|
||||
for ip in by_age.into_iter().take(EVICT_BATCH) {
|
||||
self.entries.remove(&ip);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::net::Ipv4Addr;
|
||||
|
||||
fn ip(last: u8) -> IpAddr {
|
||||
IpAddr::V4(Ipv4Addr::new(192, 0, 2, last))
|
||||
}
|
||||
|
||||
fn sock(last: u8) -> SocketAddr {
|
||||
SocketAddr::new(ip(last), 53)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unknown_returns_initial() {
|
||||
let cache = SrttCache::new(true);
|
||||
assert_eq!(cache.get(ip(1)), INITIAL_SRTT_MS);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ewma_converges() {
|
||||
let mut cache = SrttCache::new(true);
|
||||
for _ in 0..20 {
|
||||
cache.record_rtt(ip(1), 100, false);
|
||||
}
|
||||
let srtt = cache.get(ip(1));
|
||||
assert!(srtt >= 98 && srtt <= 102, "srtt={}", srtt);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn failure_sets_penalty() {
|
||||
let mut cache = SrttCache::new(true);
|
||||
cache.record_rtt(ip(1), 50, false);
|
||||
cache.record_failure(ip(1));
|
||||
assert_eq!(cache.get(ip(1)), FAILURE_PENALTY_MS);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tcp_penalty_added() {
|
||||
let mut cache = SrttCache::new(true);
|
||||
for _ in 0..20 {
|
||||
cache.record_rtt(ip(1), 50, true);
|
||||
}
|
||||
let srtt = cache.get(ip(1));
|
||||
assert!(srtt >= 148 && srtt <= 152, "srtt={}", srtt);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sort_by_rtt_orders_correctly() {
|
||||
let mut cache = SrttCache::new(true);
|
||||
for _ in 0..20 {
|
||||
cache.record_rtt(ip(1), 500, false);
|
||||
cache.record_rtt(ip(2), 100, false);
|
||||
cache.record_rtt(ip(3), 10, false);
|
||||
}
|
||||
let mut addrs = vec![sock(1), sock(2), sock(3)];
|
||||
cache.sort_by_rtt(&mut addrs);
|
||||
assert_eq!(addrs, vec![sock(3), sock(2), sock(1)]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unknown_servers_sort_equal() {
|
||||
let cache = SrttCache::new(true);
|
||||
let mut addrs = vec![sock(1), sock(2), sock(3)];
|
||||
let original = addrs.clone();
|
||||
cache.sort_by_rtt(&mut addrs);
|
||||
assert_eq!(addrs, original);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn disabled_is_noop() {
|
||||
let mut cache = SrttCache::new(false);
|
||||
cache.record_rtt(ip(1), 50, false);
|
||||
cache.record_failure(ip(2));
|
||||
assert_eq!(cache.len(), 0);
|
||||
|
||||
let mut addrs = vec![sock(2), sock(1)];
|
||||
let original = addrs.clone();
|
||||
cache.sort_by_rtt(&mut addrs);
|
||||
assert_eq!(addrs, original);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn no_decay_within_threshold() {
|
||||
// At exactly DECAY_AFTER_SECS, no decay applied
|
||||
let result = SrttCache::decay_for_age(FAILURE_PENALTY_MS, DECAY_AFTER_SECS);
|
||||
assert_eq!(result, FAILURE_PENALTY_MS);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn one_decay_period() {
|
||||
let result = SrttCache::decay_for_age(FAILURE_PENALTY_MS, DECAY_AFTER_SECS + 1);
|
||||
let expected = (FAILURE_PENALTY_MS + INITIAL_SRTT_MS) / 2;
|
||||
assert_eq!(result, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn multiple_decay_periods() {
|
||||
let result = SrttCache::decay_for_age(FAILURE_PENALTY_MS, DECAY_AFTER_SECS * 4 + 1);
|
||||
let mut expected = FAILURE_PENALTY_MS;
|
||||
for _ in 0..4 {
|
||||
expected = (expected + INITIAL_SRTT_MS) / 2;
|
||||
}
|
||||
assert_eq!(result, expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decay_caps_at_8_periods() {
|
||||
// 9 periods and 100 periods should produce the same result (capped at 8)
|
||||
let a = SrttCache::decay_for_age(FAILURE_PENALTY_MS, DECAY_AFTER_SECS * 9 + 1);
|
||||
let b = SrttCache::decay_for_age(FAILURE_PENALTY_MS, DECAY_AFTER_SECS * 100);
|
||||
assert_eq!(a, b);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decay_converges_toward_initial() {
|
||||
let decayed = SrttCache::decay_for_age(FAILURE_PENALTY_MS, DECAY_AFTER_SECS * 100);
|
||||
let diff = decayed.abs_diff(INITIAL_SRTT_MS);
|
||||
assert!(
|
||||
diff < 25,
|
||||
"expected near INITIAL_SRTT_MS, got {} (diff={})",
|
||||
decayed,
|
||||
diff
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn record_rtt_applies_decay_before_ewma() {
|
||||
// Verify decay is applied before EWMA in record_rtt by checking
|
||||
// that a saturated penalty + long age + new sample produces a low SRTT
|
||||
let decayed = SrttCache::decay_for_age(FAILURE_PENALTY_MS, DECAY_AFTER_SECS * 8);
|
||||
// EWMA: (decayed * 7 + 50) / 8
|
||||
let after_ewma = (decayed * 7 + 50) / 8;
|
||||
assert!(
|
||||
after_ewma < 500,
|
||||
"expected decay before EWMA, got srtt={}",
|
||||
after_ewma
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decay_reranks_stale_failures() {
|
||||
// After enough decay, a failed server (5000ms) converges toward
|
||||
// INITIAL (200ms), which is below a stable server at 300ms
|
||||
let decayed = SrttCache::decay_for_age(FAILURE_PENALTY_MS, DECAY_AFTER_SECS * 100);
|
||||
assert!(
|
||||
decayed < 300,
|
||||
"expected decayed penalty ({}) < 300ms",
|
||||
decayed
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn heap_bytes_grows_with_entries() {
|
||||
let mut cache = SrttCache::new(true);
|
||||
let empty = cache.heap_bytes();
|
||||
for i in 1..=10u8 {
|
||||
cache.record_rtt(ip(i), 100, false);
|
||||
}
|
||||
assert!(cache.heap_bytes() > empty);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn eviction_removes_oldest() {
|
||||
let mut cache = SrttCache::new(true);
|
||||
for i in 0..MAX_ENTRIES {
|
||||
let octets = [
|
||||
10,
|
||||
((i >> 16) & 0xFF) as u8,
|
||||
((i >> 8) & 0xFF) as u8,
|
||||
(i & 0xFF) as u8,
|
||||
];
|
||||
cache.record_rtt(
|
||||
IpAddr::V4(Ipv4Addr::new(octets[0], octets[1], octets[2], octets[3])),
|
||||
100,
|
||||
false,
|
||||
);
|
||||
}
|
||||
assert_eq!(cache.len(), MAX_ENTRIES);
|
||||
cache.record_rtt(ip(1), 100, false);
|
||||
// Batch eviction removes EVICT_BATCH entries
|
||||
assert!(cache.len() <= MAX_ENTRIES - EVICT_BATCH + 1);
|
||||
}
|
||||
}
|
||||
111
src/stats.rs
111
src/stats.rs
@@ -1,8 +1,97 @@
|
||||
use std::time::Instant;
|
||||
|
||||
/// Returns the process memory footprint in bytes, or 0 if unavailable.
|
||||
/// macOS: phys_footprint (matches Activity Monitor). Linux: RSS from /proc/self/statm.
|
||||
pub fn process_memory_bytes() -> usize {
|
||||
#[cfg(target_os = "macos")]
|
||||
{
|
||||
macos_rss()
|
||||
}
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
linux_rss()
|
||||
}
|
||||
#[cfg(not(any(target_os = "macos", target_os = "linux")))]
|
||||
{
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "macos")]
|
||||
fn macos_rss() -> usize {
|
||||
use std::mem;
|
||||
extern "C" {
|
||||
fn mach_task_self() -> u32;
|
||||
fn task_info(
|
||||
target_task: u32,
|
||||
flavor: u32,
|
||||
task_info_out: *mut TaskVmInfo,
|
||||
task_info_count: *mut u32,
|
||||
) -> i32;
|
||||
}
|
||||
// Partial task_vm_info_data_t — only fields up to phys_footprint.
|
||||
#[repr(C)]
|
||||
struct TaskVmInfo {
|
||||
virtual_size: u64,
|
||||
region_count: i32,
|
||||
page_size: i32,
|
||||
resident_size: u64,
|
||||
resident_size_peak: u64,
|
||||
device: u64,
|
||||
device_peak: u64,
|
||||
internal: u64,
|
||||
internal_peak: u64,
|
||||
external: u64,
|
||||
external_peak: u64,
|
||||
reusable: u64,
|
||||
reusable_peak: u64,
|
||||
purgeable_volatile_pmap: u64,
|
||||
purgeable_volatile_resident: u64,
|
||||
purgeable_volatile_virtual: u64,
|
||||
compressed: u64,
|
||||
compressed_peak: u64,
|
||||
compressed_lifetime: u64,
|
||||
phys_footprint: u64,
|
||||
}
|
||||
const TASK_VM_INFO: u32 = 22;
|
||||
let mut info: TaskVmInfo = unsafe { mem::zeroed() };
|
||||
let mut count = (mem::size_of::<TaskVmInfo>() / mem::size_of::<u32>()) as u32;
|
||||
let kr = unsafe { task_info(mach_task_self(), TASK_VM_INFO, &mut info, &mut count) };
|
||||
if kr == 0 {
|
||||
info.phys_footprint as usize
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
fn linux_rss() -> usize {
|
||||
extern "C" {
|
||||
fn sysconf(name: i32) -> i64;
|
||||
}
|
||||
const SC_PAGESIZE: i32 = 30; // x86_64 + aarch64; differs on mips (28), sparc (29)
|
||||
let page_size = unsafe { sysconf(SC_PAGESIZE) };
|
||||
let page_size = if page_size > 0 {
|
||||
page_size as usize
|
||||
} else {
|
||||
4096
|
||||
};
|
||||
|
||||
if let Ok(statm) = std::fs::read_to_string("/proc/self/statm") {
|
||||
if let Some(rss_pages) = statm.split_whitespace().nth(1) {
|
||||
if let Ok(pages) = rss_pages.parse::<usize>() {
|
||||
return pages * page_size;
|
||||
}
|
||||
}
|
||||
}
|
||||
0
|
||||
}
|
||||
|
||||
pub struct ServerStats {
|
||||
queries_total: u64,
|
||||
queries_forwarded: u64,
|
||||
queries_recursive: u64,
|
||||
queries_coalesced: u64,
|
||||
queries_cached: u64,
|
||||
queries_blocked: u64,
|
||||
queries_local: u64,
|
||||
@@ -11,11 +100,13 @@ pub struct ServerStats {
|
||||
started_at: Instant,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq)]
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum QueryPath {
|
||||
Local,
|
||||
Cached,
|
||||
Forwarded,
|
||||
Recursive,
|
||||
Coalesced,
|
||||
Blocked,
|
||||
Overridden,
|
||||
UpstreamError,
|
||||
@@ -27,6 +118,8 @@ impl QueryPath {
|
||||
QueryPath::Local => "LOCAL",
|
||||
QueryPath::Cached => "CACHED",
|
||||
QueryPath::Forwarded => "FORWARD",
|
||||
QueryPath::Recursive => "RECURSIVE",
|
||||
QueryPath::Coalesced => "COALESCED",
|
||||
QueryPath::Blocked => "BLOCKED",
|
||||
QueryPath::Overridden => "OVERRIDE",
|
||||
QueryPath::UpstreamError => "SERVFAIL",
|
||||
@@ -40,6 +133,10 @@ impl QueryPath {
|
||||
Some(QueryPath::Cached)
|
||||
} else if s.eq_ignore_ascii_case("FORWARD") {
|
||||
Some(QueryPath::Forwarded)
|
||||
} else if s.eq_ignore_ascii_case("RECURSIVE") {
|
||||
Some(QueryPath::Recursive)
|
||||
} else if s.eq_ignore_ascii_case("COALESCED") {
|
||||
Some(QueryPath::Coalesced)
|
||||
} else if s.eq_ignore_ascii_case("BLOCKED") {
|
||||
Some(QueryPath::Blocked)
|
||||
} else if s.eq_ignore_ascii_case("OVERRIDE") {
|
||||
@@ -63,6 +160,8 @@ impl ServerStats {
|
||||
ServerStats {
|
||||
queries_total: 0,
|
||||
queries_forwarded: 0,
|
||||
queries_recursive: 0,
|
||||
queries_coalesced: 0,
|
||||
queries_cached: 0,
|
||||
queries_blocked: 0,
|
||||
queries_local: 0,
|
||||
@@ -78,6 +177,8 @@ impl ServerStats {
|
||||
QueryPath::Local => self.queries_local += 1,
|
||||
QueryPath::Cached => self.queries_cached += 1,
|
||||
QueryPath::Forwarded => self.queries_forwarded += 1,
|
||||
QueryPath::Recursive => self.queries_recursive += 1,
|
||||
QueryPath::Coalesced => self.queries_coalesced += 1,
|
||||
QueryPath::Blocked => self.queries_blocked += 1,
|
||||
QueryPath::Overridden => self.queries_overridden += 1,
|
||||
QueryPath::UpstreamError => self.upstream_errors += 1,
|
||||
@@ -98,6 +199,8 @@ impl ServerStats {
|
||||
uptime_secs: self.uptime_secs(),
|
||||
total: self.queries_total,
|
||||
forwarded: self.queries_forwarded,
|
||||
recursive: self.queries_recursive,
|
||||
coalesced: self.queries_coalesced,
|
||||
cached: self.queries_cached,
|
||||
local: self.queries_local,
|
||||
overridden: self.queries_overridden,
|
||||
@@ -113,10 +216,12 @@ impl ServerStats {
|
||||
let secs = uptime.as_secs() % 60;
|
||||
|
||||
log::info!(
|
||||
"STATS | uptime {}h{}m{}s | total {} | fwd {} | cached {} | local {} | override {} | blocked {} | errors {}",
|
||||
"STATS | uptime {}h{}m{}s | total {} | fwd {} | recursive {} | coalesced {} | cached {} | local {} | override {} | blocked {} | errors {}",
|
||||
hours, mins, secs,
|
||||
self.queries_total,
|
||||
self.queries_forwarded,
|
||||
self.queries_recursive,
|
||||
self.queries_coalesced,
|
||||
self.queries_cached,
|
||||
self.queries_local,
|
||||
self.queries_overridden,
|
||||
@@ -130,6 +235,8 @@ pub struct StatsSnapshot {
|
||||
pub uptime_secs: u64,
|
||||
pub total: u64,
|
||||
pub forwarded: u64,
|
||||
pub recursive: u64,
|
||||
pub coalesced: u64,
|
||||
pub cached: u64,
|
||||
pub local: u64,
|
||||
pub overridden: u64,
|
||||
|
||||
1452
src/system_dns.rs
1452
src/system_dns.rs
File diff suppressed because it is too large
Load Diff
157
src/tls.rs
157
src/tls.rs
@@ -1,8 +1,13 @@
|
||||
use std::collections::HashSet;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use log::{info, warn};
|
||||
use rcgen::{BasicConstraints, CertificateParams, DnType, IsCa, KeyPair, KeyUsagePurpose, SanType};
|
||||
|
||||
use crate::ctx::ServerCtx;
|
||||
use rcgen::{
|
||||
BasicConstraints, CertificateParams, DnType, IsCa, Issuer, KeyPair, KeyUsagePurpose, SanType,
|
||||
};
|
||||
use rustls::pki_types::{CertificateDer, PrivateKeyDer, PrivatePkcs8KeyDer};
|
||||
use rustls::ServerConfig;
|
||||
use time::{Duration, OffsetDateTime};
|
||||
@@ -10,23 +15,90 @@ use time::{Duration, OffsetDateTime};
|
||||
const CA_VALIDITY_DAYS: i64 = 3650; // 10 years
|
||||
const CERT_VALIDITY_DAYS: i64 = 365; // 1 year
|
||||
|
||||
/// TLS certs use a fixed system path — both the daemon and `sudo numa install` must agree.
|
||||
pub const TLS_DIR: &str = "/usr/local/var/numa";
|
||||
/// Common Name on Numa's local CA. Referenced by trust-store helpers
|
||||
/// (`security`, `certutil`) when locating the cert for removal.
|
||||
pub const CA_COMMON_NAME: &str = "Numa Local CA";
|
||||
|
||||
/// Filename of the CA certificate inside the data dir.
|
||||
pub const CA_FILE_NAME: &str = "ca.pem";
|
||||
|
||||
/// Collect all service + LAN peer names and regenerate the TLS cert.
|
||||
pub fn regenerate_tls(ctx: &ServerCtx) {
|
||||
let tls = match &ctx.tls_config {
|
||||
Some(t) => t,
|
||||
None => return,
|
||||
};
|
||||
|
||||
let mut names: HashSet<String> = ctx.services.lock().unwrap().names().into_iter().collect();
|
||||
names.extend(ctx.lan_peers.lock().unwrap().names());
|
||||
let names: Vec<String> = names.into_iter().collect();
|
||||
|
||||
match build_tls_config(&ctx.proxy_tld, &names, Vec::new(), &ctx.data_dir) {
|
||||
Ok(new_config) => {
|
||||
tls.store(new_config);
|
||||
info!("TLS cert regenerated for {} services", names.len());
|
||||
}
|
||||
Err(e) => warn!("TLS regeneration failed: {}", e),
|
||||
}
|
||||
}
|
||||
|
||||
/// Advisory for TLS-setup failures caused by a non-writable data dir;
|
||||
/// `None` if not applicable so the caller can fall back to the raw error.
|
||||
pub fn try_data_dir_advisory(err: &crate::Error, data_dir: &Path) -> Option<String> {
|
||||
let io_err = err.downcast_ref::<std::io::Error>()?;
|
||||
if io_err.kind() != std::io::ErrorKind::PermissionDenied {
|
||||
return None;
|
||||
}
|
||||
let o = "\x1b[1;38;2;192;98;58m";
|
||||
let r = "\x1b[0m";
|
||||
Some(format!(
|
||||
"
|
||||
{o}Numa{r} — HTTPS proxy disabled: cannot write TLS CA to {}.
|
||||
|
||||
The data directory is not writable by the current user. Numa needs
|
||||
to persist a local Certificate Authority there to serve .numa over
|
||||
HTTPS. DNS resolution and plain-HTTP proxy continue to work.
|
||||
|
||||
Fix — pick one:
|
||||
|
||||
1. Install Numa as the system resolver (sets up a writable data dir):
|
||||
|
||||
sudo numa install (on Windows, run as Administrator)
|
||||
|
||||
2. Point data_dir at a path you can write.
|
||||
Create ~/.config/numa/numa.toml with:
|
||||
|
||||
[server]
|
||||
data_dir = \"/path/you/can/write\"
|
||||
|
||||
",
|
||||
data_dir.display()
|
||||
))
|
||||
}
|
||||
|
||||
/// Build a TLS config with a cert covering all provided service names.
|
||||
/// Wildcards under single-label TLDs (*.numa) are rejected by browsers,
|
||||
/// so we list each service explicitly as a SAN.
|
||||
pub fn build_tls_config(tld: &str, service_names: &[String]) -> crate::Result<Arc<ServerConfig>> {
|
||||
let dir = std::path::PathBuf::from(TLS_DIR);
|
||||
let (ca_cert, ca_key) = ensure_ca(&dir)?;
|
||||
let (cert_chain, key) = generate_service_cert(&ca_cert, &ca_key, tld, service_names)?;
|
||||
/// `alpn` is advertised in the TLS ServerHello — pass empty for the proxy
|
||||
/// (which accepts any ALPN), or `[b"dot"]` for DoT (RFC 7858 §3.2).
|
||||
/// `data_dir` is where the CA material is stored — taken from
|
||||
/// `[server] data_dir` in numa.toml (defaults to `crate::data_dir()`).
|
||||
pub fn build_tls_config(
|
||||
tld: &str,
|
||||
service_names: &[String],
|
||||
alpn: Vec<Vec<u8>>,
|
||||
data_dir: &Path,
|
||||
) -> crate::Result<Arc<ServerConfig>> {
|
||||
let (ca_der, issuer) = ensure_ca(data_dir)?;
|
||||
let (cert_chain, key) = generate_service_cert(&ca_der, &issuer, tld, service_names)?;
|
||||
|
||||
// Ensure a crypto provider is installed (rustls needs one)
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
|
||||
let config = ServerConfig::builder()
|
||||
let mut config = ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(cert_chain, key)?;
|
||||
config.alpn_protocols = alpn;
|
||||
|
||||
info!(
|
||||
"TLS configured for {} .{} domains",
|
||||
@@ -36,18 +108,20 @@ pub fn build_tls_config(tld: &str, service_names: &[String]) -> crate::Result<Ar
|
||||
Ok(Arc::new(config))
|
||||
}
|
||||
|
||||
fn ensure_ca(dir: &Path) -> crate::Result<(rcgen::Certificate, KeyPair)> {
|
||||
fn ensure_ca(dir: &Path) -> crate::Result<(CertificateDer<'static>, Issuer<'static, KeyPair>)> {
|
||||
let ca_key_path = dir.join("ca.key");
|
||||
let ca_cert_path = dir.join("ca.pem");
|
||||
let ca_cert_path = dir.join(CA_FILE_NAME);
|
||||
|
||||
if ca_key_path.exists() && ca_cert_path.exists() {
|
||||
let key_pem = std::fs::read_to_string(&ca_key_path)?;
|
||||
let cert_pem = std::fs::read_to_string(&ca_cert_path)?;
|
||||
let key_pair = KeyPair::from_pem(&key_pem)?;
|
||||
let params = CertificateParams::from_ca_cert_pem(&cert_pem)?;
|
||||
let cert = params.self_signed(&key_pair)?;
|
||||
let ca_der = rustls_pemfile::certs(&mut cert_pem.as_bytes())
|
||||
.next()
|
||||
.ok_or("empty CA PEM file")??;
|
||||
let issuer = Issuer::from_ca_cert_der(&ca_der, key_pair)?;
|
||||
info!("loaded CA from {:?}", ca_cert_path);
|
||||
return Ok((cert, key_pair));
|
||||
return Ok((ca_der, issuer));
|
||||
}
|
||||
|
||||
// Generate new CA
|
||||
@@ -57,7 +131,7 @@ fn ensure_ca(dir: &Path) -> crate::Result<(rcgen::Certificate, KeyPair)> {
|
||||
let mut params = CertificateParams::default();
|
||||
params
|
||||
.distinguished_name
|
||||
.push(DnType::CommonName, "Numa Local CA");
|
||||
.push(DnType::CommonName, CA_COMMON_NAME);
|
||||
params.is_ca = IsCa::Ca(BasicConstraints::Unconstrained);
|
||||
params.key_usages = vec![KeyUsagePurpose::KeyCertSign, KeyUsagePurpose::CrlSign];
|
||||
params.not_before = OffsetDateTime::now_utc();
|
||||
@@ -75,14 +149,16 @@ fn ensure_ca(dir: &Path) -> crate::Result<(rcgen::Certificate, KeyPair)> {
|
||||
}
|
||||
|
||||
info!("generated CA at {:?}", ca_cert_path);
|
||||
Ok((cert, key_pair))
|
||||
let ca_der = cert.der().clone();
|
||||
let issuer = Issuer::new(params, key_pair);
|
||||
Ok((ca_der, issuer))
|
||||
}
|
||||
|
||||
/// Generate a cert with explicit SANs for each service name.
|
||||
/// Always regenerated at startup (~5ms) — no disk caching needed.
|
||||
fn generate_service_cert(
|
||||
ca_cert: &rcgen::Certificate,
|
||||
ca_key: &KeyPair,
|
||||
ca_der: &CertificateDer<'static>,
|
||||
issuer: &Issuer<'_, KeyPair>,
|
||||
tld: &str,
|
||||
service_names: &[String],
|
||||
) -> crate::Result<(Vec<CertificateDer<'static>>, PrivateKeyDer<'static>)> {
|
||||
@@ -92,8 +168,15 @@ fn generate_service_cert(
|
||||
.distinguished_name
|
||||
.push(DnType::CommonName, format!("Numa .{} services", tld));
|
||||
|
||||
// Add each service as an explicit SAN: numa.numa, peekm.numa, api.numa, etc.
|
||||
// Add a wildcard SAN so any .numa domain gets a valid cert (including
|
||||
// unregistered services — lets the proxy show a styled 404 over HTTPS).
|
||||
// Also add each service explicitly for clients that don't match wildcards.
|
||||
let mut sans = Vec::new();
|
||||
let wildcard = format!("*.{}", tld);
|
||||
match wildcard.clone().try_into() {
|
||||
Ok(ia5) => sans.push(SanType::DnsName(ia5)),
|
||||
Err(e) => warn!("invalid wildcard SAN {}: {}", wildcard, e),
|
||||
}
|
||||
for name in service_names {
|
||||
let fqdn = format!("{}.{}", name, tld);
|
||||
match fqdn.clone().try_into() {
|
||||
@@ -110,7 +193,7 @@ fn generate_service_cert(
|
||||
params.not_before = OffsetDateTime::now_utc();
|
||||
params.not_after = OffsetDateTime::now_utc() + Duration::days(CERT_VALIDITY_DAYS);
|
||||
|
||||
let cert = params.signed_by(&key_pair, ca_cert, ca_key)?;
|
||||
let cert = params.signed_by(&key_pair, issuer)?;
|
||||
|
||||
info!(
|
||||
"generated TLS cert for: {}",
|
||||
@@ -121,9 +204,39 @@ fn generate_service_cert(
|
||||
.join(", ")
|
||||
);
|
||||
|
||||
let cert_der = CertificateDer::from(cert.der().to_vec());
|
||||
let ca_der = CertificateDer::from(ca_cert.der().to_vec());
|
||||
let cert_der = cert.der().clone();
|
||||
let ca_cert_der = ca_der.clone();
|
||||
let key_der = PrivateKeyDer::Pkcs8(PrivatePkcs8KeyDer::from(key_pair.serialize_der()));
|
||||
|
||||
Ok((vec![cert_der, ca_der], key_der))
|
||||
Ok((vec![cert_der, ca_cert_der], key_der))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[test]
|
||||
fn try_data_dir_advisory_permission_denied() {
|
||||
let err: crate::Error =
|
||||
Box::new(std::io::Error::from(std::io::ErrorKind::PermissionDenied));
|
||||
let path = PathBuf::from("/usr/local/var/numa");
|
||||
let msg = try_data_dir_advisory(&err, &path).expect("should advise");
|
||||
assert!(msg.contains("HTTPS proxy disabled"));
|
||||
assert!(msg.contains("/usr/local/var/numa"));
|
||||
assert!(msg.contains("numa install"));
|
||||
assert!(msg.contains("data_dir"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn try_data_dir_advisory_skips_other_io_kinds() {
|
||||
let err: crate::Error = Box::new(std::io::Error::from(std::io::ErrorKind::NotFound));
|
||||
assert!(try_data_dir_advisory(&err, &PathBuf::from("/x")).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn try_data_dir_advisory_skips_non_io_errors() {
|
||||
let err: crate::Error = "rcgen failure".into();
|
||||
assert!(try_data_dir_advisory(&err, &PathBuf::from("/x")).is_none());
|
||||
}
|
||||
}
|
||||
|
||||
123
tests/docker/install-trust.sh
Executable file
123
tests/docker/install-trust.sh
Executable file
@@ -0,0 +1,123 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Cross-distro CA trust contract test for issue #35.
|
||||
#
|
||||
# Runs the exact shell commands `src/system_dns.rs::trust_ca_linux` would run
|
||||
# on each Linux trust-store family (Debian, Fedora pki, Arch p11-kit), and
|
||||
# asserts the certificate ends up in (and is removed from) the system bundle.
|
||||
#
|
||||
# This is a contract test, not an integration test: it doesn't drive the Rust
|
||||
# code (that would need systemd-in-container). It verifies the assumptions in
|
||||
# `LINUX_TRUST_STORES` against the real distro behavior. If you change that
|
||||
# table in src/system_dns.rs, update the per-distro cases below to match.
|
||||
#
|
||||
# Requirements: docker, openssl (host).
|
||||
# Usage: ./tests/docker/install-trust.sh
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
cd "$(dirname "$0")/../.."
|
||||
|
||||
GREEN="\033[32m"; RED="\033[31m"; RESET="\033[0m"
|
||||
|
||||
# Self-signed CA fixture, mounted into each container as ca.pem.
|
||||
# basicConstraints=CA:TRUE is required — without it, Debian's
|
||||
# update-ca-certificates silently skips the cert during bundle build.
|
||||
FIXTURE_DIR=$(mktemp -d)
|
||||
trap 'rm -rf "$FIXTURE_DIR"' EXIT
|
||||
openssl req -x509 -newkey rsa:2048 -nodes -days 1 \
|
||||
-keyout "$FIXTURE_DIR/ca.key" \
|
||||
-out "$FIXTURE_DIR/ca.pem" \
|
||||
-subj "/CN=Numa Local CA Test $(date +%s)" \
|
||||
-addext "basicConstraints=critical,CA:TRUE" \
|
||||
-addext "keyUsage=critical,keyCertSign,cRLSign" >/dev/null 2>&1
|
||||
|
||||
# Distro bundles store certs differently — Debian writes raw PEM only,
|
||||
# Fedora prepends "# CN" comment headers, Arch via extract-compat is
|
||||
# raw PEM. To detect cert presence uniformly we grep for a deterministic
|
||||
# substring of the base64 body (first base64 line is unique per cert).
|
||||
CERT_TAG=$(sed -n '2p' "$FIXTURE_DIR/ca.pem")
|
||||
|
||||
PASSED=0; FAILED=0
|
||||
|
||||
run_case() {
|
||||
local distro="$1"; shift
|
||||
local image="$1"; shift
|
||||
local platform="$1"; shift
|
||||
local script="$1"
|
||||
|
||||
printf "── %s (%s) ──\n" "$distro" "$image"
|
||||
if docker run --rm \
|
||||
--platform "$platform" \
|
||||
--security-opt seccomp=unconfined \
|
||||
-e CERT_TAG="$CERT_TAG" \
|
||||
-e DEBIAN_FRONTEND=noninteractive \
|
||||
-v "$FIXTURE_DIR/ca.pem:/fixture/ca.pem:ro" \
|
||||
"$image" bash -c "$script"; then
|
||||
printf "${GREEN}✓${RESET} %s\n\n" "$distro"
|
||||
PASSED=$((PASSED + 1))
|
||||
else
|
||||
printf "${RED}✗${RESET} %s\n\n" "$distro"
|
||||
FAILED=$((FAILED + 1))
|
||||
fi
|
||||
}
|
||||
|
||||
# Debian / Ubuntu / Mint — anchor: /usr/local/share/ca-certificates/*.crt
|
||||
run_case "debian" "debian:stable" "linux/amd64" '
|
||||
set -e
|
||||
apt-get update -qq
|
||||
apt-get install -qq -y ca-certificates >/dev/null
|
||||
install -m 0644 /fixture/ca.pem /usr/local/share/ca-certificates/numa-local-ca.crt
|
||||
update-ca-certificates >/dev/null 2>&1
|
||||
grep -q "$CERT_TAG" /etc/ssl/certs/ca-certificates.crt
|
||||
echo " install: cert present in bundle"
|
||||
rm /usr/local/share/ca-certificates/numa-local-ca.crt
|
||||
update-ca-certificates --fresh >/dev/null 2>&1
|
||||
if grep -q "$CERT_TAG" /etc/ssl/certs/ca-certificates.crt; then
|
||||
echo " uninstall: cert STILL present (regression)" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo " uninstall: cert removed from bundle"
|
||||
'
|
||||
|
||||
# Fedora / RHEL / CentOS / SUSE — anchor: /etc/pki/ca-trust/source/anchors/*.pem
|
||||
run_case "fedora" "fedora:latest" "linux/amd64" '
|
||||
set -e
|
||||
dnf install -q -y ca-certificates >/dev/null
|
||||
install -m 0644 /fixture/ca.pem /etc/pki/ca-trust/source/anchors/numa-local-ca.pem
|
||||
update-ca-trust extract
|
||||
grep -q "$CERT_TAG" /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
|
||||
echo " install: cert present in bundle"
|
||||
rm /etc/pki/ca-trust/source/anchors/numa-local-ca.pem
|
||||
update-ca-trust extract
|
||||
if grep -q "$CERT_TAG" /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem; then
|
||||
echo " uninstall: cert STILL present (regression)" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo " uninstall: cert removed from bundle"
|
||||
'
|
||||
|
||||
# Arch / Manjaro — anchor: /etc/ca-certificates/trust-source/anchors/*.pem
|
||||
# archlinux:latest is x86_64-only; --platform forces emulation on Apple Silicon.
|
||||
run_case "arch" "archlinux:latest" "linux/amd64" '
|
||||
set -e
|
||||
# pacman 7+ filters syscalls in its own sandbox; disable for Rosetta/qemu emulation.
|
||||
sed -i "s/^#DisableSandboxSyscalls/DisableSandboxSyscalls/" /etc/pacman.conf
|
||||
pacman -Sy --noconfirm --needed ca-certificates p11-kit >/dev/null 2>&1
|
||||
install -m 0644 /fixture/ca.pem /etc/ca-certificates/trust-source/anchors/numa-local-ca.pem
|
||||
trust extract-compat
|
||||
grep -q "$CERT_TAG" /etc/ssl/certs/ca-certificates.crt
|
||||
echo " install: cert present in bundle"
|
||||
rm /etc/ca-certificates/trust-source/anchors/numa-local-ca.pem
|
||||
trust extract-compat
|
||||
if grep -q "$CERT_TAG" /etc/ssl/certs/ca-certificates.crt; then
|
||||
echo " uninstall: cert STILL present (regression)" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo " uninstall: cert removed from bundle"
|
||||
'
|
||||
|
||||
printf "── summary ──\n"
|
||||
printf " ${GREEN}passed${RESET}: %d\n" "$PASSED"
|
||||
printf " ${RED}failed${RESET}: %d\n" "$FAILED"
|
||||
[ "$FAILED" -eq 0 ]
|
||||
147
tests/docker/smoke-arch.sh
Executable file
147
tests/docker/smoke-arch.sh
Executable file
@@ -0,0 +1,147 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Arch Linux compatibility smoke test.
|
||||
#
|
||||
# Builds numa from source inside an archlinux:latest container, runs it
|
||||
# in forward mode on port 5354, and verifies a single DNS query returns
|
||||
# an A record. Validates the "Arch compatible" claim end-to-end before
|
||||
# release announcements.
|
||||
#
|
||||
# Dogfooding: the test numa forwards to the host's running numa via
|
||||
# host.docker.internal (Docker Desktop's host gateway). This avoids the
|
||||
# Docker NAT/UDP issues with public resolvers and exercises the realistic
|
||||
# numa-on-numa shape. Requires the host to be running numa on port 53.
|
||||
#
|
||||
# First run is slow (~8-12 min): image pull + pacman + cold cargo build.
|
||||
# No caching across runs.
|
||||
#
|
||||
# Requirements: docker, host running numa on 0.0.0.0:53
|
||||
# Usage: ./tests/docker/smoke-arch.sh
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
cd "$(dirname "$0")/../.."
|
||||
|
||||
GREEN="\033[32m"; RED="\033[31m"; RESET="\033[0m"
|
||||
|
||||
# Precondition: the test numa-on-arch forwards to the host numa as its
|
||||
# upstream (dogfood pattern). Fail fast with a clear error if there is
|
||||
# no working DNS on the host, rather than letting the dig inside the
|
||||
# container time out with "deadline has elapsed".
|
||||
if ! dig @127.0.0.1 google.com A +short +time=1 +tries=1 >/dev/null 2>&1; then
|
||||
printf "${RED}error:${RESET} host numa is not answering on 127.0.0.1:53\n" >&2
|
||||
echo " This test forwards to the host numa via host.docker.internal." >&2
|
||||
echo " Start numa on the host first (sudo numa install), then rerun." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "── building + running numa on archlinux:latest ──"
|
||||
echo " (first run is slow: image pull + pacman + cold cargo build, ~8-12 min)"
|
||||
echo
|
||||
|
||||
docker run --rm \
|
||||
--platform linux/amd64 \
|
||||
--security-opt seccomp=unconfined \
|
||||
-v "$PWD:/src:ro" \
|
||||
-v numa-arch-cargo:/root/.cargo \
|
||||
-v numa-arch-target:/work/target \
|
||||
archlinux:latest bash -c '
|
||||
set -e
|
||||
|
||||
# pacman 7+ filters syscalls in its own sandbox; disable for Rosetta/qemu
|
||||
sed -i "s/^#DisableSandboxSyscalls/DisableSandboxSyscalls/" /etc/pacman.conf
|
||||
|
||||
echo "── pacman: installing build + runtime deps ──"
|
||||
pacman -Sy --noconfirm --needed rust gcc pkgconf cmake make perl bind 2>&1 | tail -3
|
||||
echo
|
||||
|
||||
# Copy source to a writable workdir, skipping target/ + .git so we
|
||||
# do not pull in the host (macOS) build artifacts.
|
||||
mkdir -p /work
|
||||
tar -C /src --exclude=./target --exclude=./.git -cf - . | tar -C /work -xf -
|
||||
cd /work
|
||||
|
||||
echo "── cargo build --release --locked ──"
|
||||
cargo build --release --locked 2>&1 | tail -5
|
||||
echo
|
||||
|
||||
# Dogfood: forward to the host numa via host.docker.internal.
|
||||
# numa parses upstream.address as a literal SocketAddr, so we resolve
|
||||
# the hostname to an IPv4 address first (force v4 — getent hosts may
|
||||
# return IPv6 first, and IPv6 addresses need bracketed addr:port form).
|
||||
HOST_IP=$(getent ahostsv4 host.docker.internal | awk "/STREAM/ {print \$1; exit}")
|
||||
if [ -z "$HOST_IP" ]; then
|
||||
echo " ✗ could not resolve host.docker.internal to IPv4 (not on Docker Desktop?)"
|
||||
exit 1
|
||||
fi
|
||||
echo "── starting numa on :5354 (forward to host numa at $HOST_IP:53) ──"
|
||||
# Intentionally NOT setting [server] data_dir — we want to exercise the
|
||||
# default code path (data_dir() → daemon_data_dir() → /var/lib/numa) so
|
||||
# the FHS-path assertion below verifies the live wiring, not just the
|
||||
# unit-tested helper.
|
||||
cat > /tmp/numa.toml <<EOF
|
||||
[server]
|
||||
bind_addr = "127.0.0.1:5354"
|
||||
api_port = 5381
|
||||
|
||||
[upstream]
|
||||
mode = "forward"
|
||||
address = "$HOST_IP"
|
||||
port = 53
|
||||
EOF
|
||||
|
||||
./target/release/numa /tmp/numa.toml > /tmp/numa.log 2>&1 &
|
||||
NUMA_PID=$!
|
||||
|
||||
# Poll for readiness — numa is ready when it answers a query
|
||||
READY=0
|
||||
for i in 1 2 3 4 5 6 7 8; do
|
||||
sleep 1
|
||||
if dig @127.0.0.1 -p 5354 google.com A +short +time=1 +tries=1 2>/dev/null \
|
||||
| grep -qE "^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$"; then
|
||||
READY=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$READY" -ne 1 ]; then
|
||||
echo " ✗ numa did not return an A record after 8s"
|
||||
echo " numa log:"
|
||||
cat /tmp/numa.log
|
||||
kill $NUMA_PID 2>/dev/null || true
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "── dig @127.0.0.1 -p 5354 google.com A ──"
|
||||
ANSWER=$(dig @127.0.0.1 -p 5354 google.com A +short +time=2 +tries=1)
|
||||
echo "$ANSWER" | sed "s/^/ /"
|
||||
|
||||
kill $NUMA_PID 2>/dev/null || true
|
||||
|
||||
# FHS path assertion: the default data dir on Linux must be /var/lib/numa
|
||||
# (not the legacy /usr/local/var/numa). The CA cert generated at startup
|
||||
# is the canonical proof that numa wrote to the right place.
|
||||
echo
|
||||
echo "── FHS path check ──"
|
||||
if [ -f /var/lib/numa/ca.pem ]; then
|
||||
echo " ✓ CA cert at /var/lib/numa/ca.pem (FHS path)"
|
||||
else
|
||||
echo " ✗ CA cert NOT at /var/lib/numa/ca.pem"
|
||||
echo " ls /var/lib/numa/:"
|
||||
ls -la /var/lib/numa/ 2>&1 | sed "s/^/ /"
|
||||
echo " ls /usr/local/var/numa/:"
|
||||
ls -la /usr/local/var/numa/ 2>&1 | sed "s/^/ /"
|
||||
exit 1
|
||||
fi
|
||||
if [ -e /usr/local/var/numa ]; then
|
||||
echo " ✗ legacy path /usr/local/var/numa unexpectedly exists on a fresh container"
|
||||
exit 1
|
||||
fi
|
||||
echo " ✓ legacy path /usr/local/var/numa absent (fresh install used FHS)"
|
||||
|
||||
echo
|
||||
echo " ✓ numa built, ran, answered a forward query, and used the FHS data dir on Arch"
|
||||
'
|
||||
|
||||
echo
|
||||
printf "${GREEN}── smoke-arch passed ──${RESET}\n"
|
||||
138
tests/docker/smoke-port53.sh
Executable file
138
tests/docker/smoke-port53.sh
Executable file
@@ -0,0 +1,138 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Port-53 conflict advisory integration test.
|
||||
#
|
||||
# Builds numa from source inside a debian:bookworm container, pre-binds
|
||||
# port 53 with a UDP socket, then runs numa bare (default bind_addr
|
||||
# 0.0.0.0:53). Verifies:
|
||||
# - process exits with code 1
|
||||
# - stderr contains the advisory ("cannot bind to")
|
||||
# - stderr contains both fix suggestions ("numa install", "bind_addr")
|
||||
#
|
||||
# This is the end-to-end test for the fix in:
|
||||
# src/main.rs — AddrInUse match arm → eprint advisory + process::exit(1)
|
||||
#
|
||||
# No systemd-resolved needed — the conflict is simulated by a Python
|
||||
# UDP socket held open before numa starts.
|
||||
#
|
||||
# Requirements: docker
|
||||
# Usage: ./tests/docker/smoke-port53.sh
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
cd "$(dirname "$0")/../.."
|
||||
|
||||
GREEN="\033[32m"; RED="\033[31m"; RESET="\033[0m"
|
||||
|
||||
pass() { printf " ${GREEN}✓${RESET} %s\n" "$1"; }
|
||||
fail() { printf " ${RED}✗${RESET} %s\n" "$1"; printf " %s\n" "$2"; FAILED=$((FAILED+1)); }
|
||||
FAILED=0
|
||||
|
||||
echo "── smoke-port53: building + testing numa on debian:bookworm ──"
|
||||
echo " (first run is slow: image pull + cold cargo build, ~5-8 min)"
|
||||
echo
|
||||
|
||||
OUTPUT=$(docker run --rm \
|
||||
--platform linux/amd64 \
|
||||
-v "$PWD:/src:ro" \
|
||||
-v numa-port53-cargo:/root/.cargo \
|
||||
-v numa-port53-target:/work/target \
|
||||
debian:bookworm bash -c '
|
||||
set -e
|
||||
|
||||
apt-get update -qq && apt-get install -y -qq curl build-essential python3 2>&1 | tail -3
|
||||
|
||||
# Install rustup if not already in the cargo cache volume
|
||||
if ! command -v cargo &>/dev/null; then
|
||||
curl -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal --quiet
|
||||
fi
|
||||
. "$HOME/.cargo/env"
|
||||
|
||||
# Copy source to a writable workdir
|
||||
mkdir -p /work
|
||||
tar -C /src --exclude=./target --exclude=./.git -cf - . | tar -C /work -xf -
|
||||
cd /work
|
||||
|
||||
echo "── cargo build --release --locked ──"
|
||||
cargo build --release --locked 2>&1 | tail -5
|
||||
echo
|
||||
|
||||
# Write the holder script to a file to avoid quoting hell.
|
||||
# Holds port 53 until killed — no sleep race.
|
||||
cat > /tmp/hold53.py << '"'"'PYEOF'"'"'
|
||||
import socket, signal
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 0)
|
||||
s.bind(("", 53))
|
||||
signal.pause()
|
||||
PYEOF
|
||||
|
||||
python3 /tmp/hold53.py &
|
||||
HOLDER_PID=$!
|
||||
|
||||
# Verify the holder is actually up before proceeding
|
||||
sleep 0.3
|
||||
if ! kill -0 $HOLDER_PID 2>/dev/null; then
|
||||
echo "holder_failed=1"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "── running numa with port 53 already bound ──"
|
||||
# timeout 5: guards against numa not exiting (advisory not fired, bug present)
|
||||
# Capture stderr to a file so the exit code is not clobbered by || or $()
|
||||
set +e
|
||||
timeout 5 ./target/release/numa > /tmp/numa-stderr.txt 2>&1
|
||||
EXIT_CODE=$?
|
||||
set -e
|
||||
STDERR=$(cat /tmp/numa-stderr.txt)
|
||||
|
||||
kill $HOLDER_PID 2>/dev/null || true
|
||||
|
||||
echo "exit_code=$EXIT_CODE"
|
||||
printf "%s" "$STDERR" | sed "s/^/ numa: /"
|
||||
' 2>&1)
|
||||
|
||||
echo "$OUTPUT"
|
||||
|
||||
echo
|
||||
echo "── assertions ──"
|
||||
|
||||
if echo "$OUTPUT" | grep -q "holder_failed=1"; then
|
||||
echo " SETUP FAILED: could not pre-bind port 53 inside container"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
EXIT_CODE=$(echo "$OUTPUT" | grep '^exit_code=' | cut -d= -f2)
|
||||
|
||||
if [ "${EXIT_CODE:-}" = "1" ]; then
|
||||
pass "exits with code 1"
|
||||
else
|
||||
fail "exits with code 1" "got: exit_code=${EXIT_CODE:-<missing>}"
|
||||
fi
|
||||
|
||||
if echo "$OUTPUT" | grep -q "cannot bind to"; then
|
||||
pass "advisory printed to stderr"
|
||||
else
|
||||
fail "advisory printed to stderr" "stderr did not contain 'cannot bind to'"
|
||||
fi
|
||||
|
||||
if echo "$OUTPUT" | grep -q "numa install"; then
|
||||
pass "advisory offers 'sudo numa install'"
|
||||
else
|
||||
fail "advisory offers 'sudo numa install'" "not found in output"
|
||||
fi
|
||||
|
||||
if echo "$OUTPUT" | grep -q "bind_addr"; then
|
||||
pass "advisory offers non-privileged port alternative"
|
||||
else
|
||||
fail "advisory offers non-privileged port alternative" "'bind_addr' not found in output"
|
||||
fi
|
||||
|
||||
echo
|
||||
if [ "$FAILED" -eq 0 ]; then
|
||||
printf "${GREEN}── smoke-port53 passed ──${RESET}\n"
|
||||
exit 0
|
||||
else
|
||||
printf "${RED}── smoke-port53 failed ($FAILED assertion(s)) ──${RESET}\n"
|
||||
exit 1
|
||||
fi
|
||||
654
tests/integration.sh
Executable file
654
tests/integration.sh
Executable file
@@ -0,0 +1,654 @@
|
||||
#!/usr/bin/env bash
|
||||
# Integration test suite for Numa
|
||||
# Runs a test instance on port 5354, validates all features, exits with status.
|
||||
# Usage: ./tests/integration.sh [release|debug]
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
MODE="${1:-release}"
|
||||
BINARY="./target/$MODE/numa"
|
||||
PORT=5354
|
||||
API_PORT=5381
|
||||
CONFIG="/tmp/numa-integration-test.toml"
|
||||
LOG="/tmp/numa-integration-test.log"
|
||||
PASSED=0
|
||||
FAILED=0
|
||||
|
||||
# Colors
|
||||
GREEN="\033[32m"
|
||||
RED="\033[31m"
|
||||
DIM="\033[90m"
|
||||
RESET="\033[0m"
|
||||
|
||||
check() {
|
||||
local name="$1"
|
||||
local expected="$2"
|
||||
local actual="$3"
|
||||
|
||||
if echo "$actual" | grep -q "$expected"; then
|
||||
PASSED=$((PASSED + 1))
|
||||
printf " ${GREEN}✓${RESET} %s\n" "$name"
|
||||
else
|
||||
FAILED=$((FAILED + 1))
|
||||
printf " ${RED}✗${RESET} %s\n" "$name"
|
||||
printf " ${DIM}expected: %s${RESET}\n" "$expected"
|
||||
printf " ${DIM} got: %s${RESET}\n" "$actual"
|
||||
fi
|
||||
}
|
||||
|
||||
# Build if needed
|
||||
if [ ! -f "$BINARY" ]; then
|
||||
echo "Building $MODE..."
|
||||
cargo build --$MODE
|
||||
fi
|
||||
|
||||
run_test_suite() {
|
||||
local SUITE_NAME="$1"
|
||||
local SUITE_CONFIG="$2"
|
||||
|
||||
cat > "$CONFIG" << CONF
|
||||
$SUITE_CONFIG
|
||||
CONF
|
||||
|
||||
echo "Starting Numa on :$PORT ($SUITE_NAME)..."
|
||||
RUST_LOG=info "$BINARY" "$CONFIG" > "$LOG" 2>&1 &
|
||||
NUMA_PID=$!
|
||||
sleep 4
|
||||
|
||||
if ! kill -0 "$NUMA_PID" 2>/dev/null; then
|
||||
echo "Failed to start Numa:"
|
||||
tail -5 "$LOG"
|
||||
return 1
|
||||
fi
|
||||
|
||||
DIG="dig @127.0.0.1 -p $PORT +time=5 +tries=1"
|
||||
|
||||
echo ""
|
||||
echo "=== Resolution ==="
|
||||
|
||||
check "A record (google.com)" \
|
||||
"." \
|
||||
"$($DIG google.com A +short)"
|
||||
|
||||
check "AAAA record (google.com)" \
|
||||
":" \
|
||||
"$($DIG google.com AAAA +short)"
|
||||
|
||||
check "CNAME chasing (www.github.com)" \
|
||||
"github.com" \
|
||||
"$($DIG www.github.com A +short)"
|
||||
|
||||
check "MX records (gmail.com)" \
|
||||
"gmail-smtp-in" \
|
||||
"$($DIG gmail.com MX +short)"
|
||||
|
||||
check "NS records (cloudflare.com)" \
|
||||
"cloudflare.com" \
|
||||
"$($DIG cloudflare.com NS +short)"
|
||||
|
||||
check "NXDOMAIN" \
|
||||
"NXDOMAIN" \
|
||||
"$($DIG nope12345678.com A 2>&1 | grep status:)"
|
||||
|
||||
echo ""
|
||||
echo "=== Ad Blocking ==="
|
||||
|
||||
if echo "$SUITE_CONFIG" | grep -q 'enabled = true'; then
|
||||
check "Blocked domain → 0.0.0.0" \
|
||||
"0.0.0.0" \
|
||||
"$($DIG ads.google.com A +short)"
|
||||
else
|
||||
local ADS=$($DIG ads.google.com A +short 2>/dev/null)
|
||||
if echo "$ADS" | grep -q "0.0.0.0"; then
|
||||
check "Blocking disabled but domain blocked" "should-resolve" "0.0.0.0"
|
||||
else
|
||||
check "Blocking disabled — domain resolves normally" "." "$ADS"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "=== Cache ==="
|
||||
|
||||
$DIG example.com A +short > /dev/null 2>&1
|
||||
sleep 1
|
||||
check "Cache hit returns result" \
|
||||
"." \
|
||||
"$($DIG example.com A +short)"
|
||||
|
||||
echo ""
|
||||
echo "=== Connectivity ==="
|
||||
|
||||
# Apple captive portal can be slow/flaky on some networks
|
||||
local CAPTIVE
|
||||
CAPTIVE=$($DIG captive.apple.com A +short 2>/dev/null || echo "timeout")
|
||||
if echo "$CAPTIVE" | grep -q "apple\|17\.\|timeout"; then
|
||||
check "Apple captive portal" "." "$CAPTIVE"
|
||||
else
|
||||
check "Apple captive portal" "apple" "$CAPTIVE"
|
||||
fi
|
||||
|
||||
check "CDN (jsdelivr)" \
|
||||
"." \
|
||||
"$($DIG cdn.jsdelivr.net A +short)"
|
||||
|
||||
echo ""
|
||||
echo "=== API ==="
|
||||
|
||||
check "Health endpoint" \
|
||||
"ok" \
|
||||
"$(curl -s http://127.0.0.1:$API_PORT/health)"
|
||||
|
||||
check "Stats endpoint" \
|
||||
"uptime_secs" \
|
||||
"$(curl -s http://127.0.0.1:$API_PORT/stats)"
|
||||
|
||||
echo ""
|
||||
echo "=== Log Health ==="
|
||||
|
||||
ERRORS=$(grep -c 'RECURSIVE ERROR\|PARSE ERROR\|HANDLER ERROR\|panic' "$LOG" 2>/dev/null || echo 0)
|
||||
check "No critical errors in log" \
|
||||
"0" \
|
||||
"$ERRORS"
|
||||
|
||||
kill "$NUMA_PID" 2>/dev/null || true
|
||||
wait "$NUMA_PID" 2>/dev/null || true
|
||||
sleep 1
|
||||
}
|
||||
|
||||
# ---- Suite 1: Recursive mode + DNSSEC ----
|
||||
echo ""
|
||||
echo "╔══════════════════════════════════════════╗"
|
||||
echo "║ Suite 1: Recursive + DNSSEC + Blocking ║"
|
||||
echo "╚══════════════════════════════════════════╝"
|
||||
|
||||
run_test_suite "recursive + DNSSEC + blocking" "
|
||||
[server]
|
||||
bind_addr = \"127.0.0.1:5354\"
|
||||
api_port = 5381
|
||||
|
||||
[upstream]
|
||||
mode = \"recursive\"
|
||||
|
||||
[cache]
|
||||
max_entries = 10000
|
||||
min_ttl = 60
|
||||
max_ttl = 86400
|
||||
|
||||
[blocking]
|
||||
enabled = true
|
||||
|
||||
[proxy]
|
||||
enabled = false
|
||||
|
||||
[dnssec]
|
||||
enabled = true
|
||||
"
|
||||
|
||||
DIG="dig @127.0.0.1 -p $PORT +time=5 +tries=1"
|
||||
|
||||
echo ""
|
||||
echo "=== DNSSEC (recursive only) ==="
|
||||
|
||||
# Re-start for DNSSEC checks (suite 1 instance was killed)
|
||||
RUST_LOG=info "$BINARY" "$CONFIG" > "$LOG" 2>&1 &
|
||||
NUMA_PID=$!
|
||||
sleep 4
|
||||
|
||||
check "AD bit set (cloudflare.com)" \
|
||||
" ad" \
|
||||
"$($DIG cloudflare.com A +dnssec 2>&1 | grep flags:)"
|
||||
|
||||
check "EDNS DO bit echoed" \
|
||||
"flags: do" \
|
||||
"$($DIG cloudflare.com A +dnssec 2>&1 | grep 'EDNS:')"
|
||||
|
||||
echo ""
|
||||
echo "=== TCP wire format (real servers) ==="
|
||||
|
||||
# Microsoft's Azure DNS servers require length+message in a single TCP segment.
|
||||
# This test catches the split-write bug that caused early-eof SERVFAILs.
|
||||
check "Microsoft domain (update.code.visualstudio.com)" \
|
||||
"NOERROR" \
|
||||
"$($DIG update.code.visualstudio.com A 2>&1 | grep status:)"
|
||||
|
||||
check "Office domain (ecs.office.com)" \
|
||||
"NOERROR" \
|
||||
"$($DIG ecs.office.com A 2>&1 | grep status:)"
|
||||
|
||||
# Azure Application Insights — another strict TCP server
|
||||
check "Azure telemetry (eastus2-3.in.applicationinsights.azure.com)" \
|
||||
"." \
|
||||
"$($DIG eastus2-3.in.applicationinsights.azure.com A +short 2>/dev/null || echo 'timeout')"
|
||||
|
||||
kill "$NUMA_PID" 2>/dev/null || true
|
||||
wait "$NUMA_PID" 2>/dev/null || true
|
||||
sleep 1
|
||||
|
||||
# ---- Suite 2: Forward mode (backward compat) ----
|
||||
echo ""
|
||||
echo "╔══════════════════════════════════════════╗"
|
||||
echo "║ Suite 2: Forward (DoH) + Blocking ║"
|
||||
echo "╚══════════════════════════════════════════╝"
|
||||
|
||||
run_test_suite "forward DoH + blocking" "
|
||||
[server]
|
||||
bind_addr = \"127.0.0.1:5354\"
|
||||
api_port = 5381
|
||||
|
||||
[upstream]
|
||||
mode = \"forward\"
|
||||
address = \"https://9.9.9.9/dns-query\"
|
||||
|
||||
[cache]
|
||||
max_entries = 10000
|
||||
min_ttl = 60
|
||||
max_ttl = 86400
|
||||
|
||||
[blocking]
|
||||
enabled = true
|
||||
|
||||
[proxy]
|
||||
enabled = false
|
||||
"
|
||||
|
||||
# ---- Suite 3: Forward UDP (plain, no DoH) ----
|
||||
echo ""
|
||||
echo "╔══════════════════════════════════════════╗"
|
||||
echo "║ Suite 3: Forward (UDP) + No Blocking ║"
|
||||
echo "╚══════════════════════════════════════════╝"
|
||||
|
||||
run_test_suite "forward UDP, no blocking" "
|
||||
[server]
|
||||
bind_addr = \"127.0.0.1:5354\"
|
||||
api_port = 5381
|
||||
|
||||
[upstream]
|
||||
mode = \"forward\"
|
||||
address = \"9.9.9.9\"
|
||||
port = 53
|
||||
|
||||
[cache]
|
||||
max_entries = 10000
|
||||
min_ttl = 60
|
||||
max_ttl = 86400
|
||||
|
||||
[blocking]
|
||||
enabled = false
|
||||
|
||||
[proxy]
|
||||
enabled = false
|
||||
"
|
||||
|
||||
# Verify blocking is actually off
|
||||
RUST_LOG=info "$BINARY" "$CONFIG" > "$LOG" 2>&1 &
|
||||
NUMA_PID=$!
|
||||
sleep 3
|
||||
|
||||
echo ""
|
||||
echo "=== Blocking disabled ==="
|
||||
ADS_RESULT=$($DIG ads.google.com A +short 2>/dev/null)
|
||||
if echo "$ADS_RESULT" | grep -q "0.0.0.0"; then
|
||||
check "ads.google.com NOT blocked (blocking disabled)" "not-0.0.0.0" "0.0.0.0"
|
||||
else
|
||||
check "ads.google.com NOT blocked (blocking disabled)" "." "$ADS_RESULT"
|
||||
fi
|
||||
|
||||
kill "$NUMA_PID" 2>/dev/null || true
|
||||
wait "$NUMA_PID" 2>/dev/null || true
|
||||
sleep 1
|
||||
|
||||
# ---- Suite 4: Local zones + Overrides API ----
|
||||
echo ""
|
||||
echo "╔══════════════════════════════════════════╗"
|
||||
echo "║ Suite 4: Local Zones + Overrides API ║"
|
||||
echo "╚══════════════════════════════════════════╝"
|
||||
|
||||
cat > "$CONFIG" << 'CONF'
|
||||
[server]
|
||||
bind_addr = "127.0.0.1:5354"
|
||||
api_port = 5381
|
||||
|
||||
[upstream]
|
||||
mode = "forward"
|
||||
address = "9.9.9.9"
|
||||
port = 53
|
||||
|
||||
[cache]
|
||||
max_entries = 10000
|
||||
|
||||
[blocking]
|
||||
enabled = false
|
||||
|
||||
[proxy]
|
||||
enabled = false
|
||||
|
||||
[[zones]]
|
||||
domain = "test.local"
|
||||
record_type = "A"
|
||||
value = "10.0.0.1"
|
||||
ttl = 60
|
||||
|
||||
[[zones]]
|
||||
domain = "mail.local"
|
||||
record_type = "MX"
|
||||
value = "10 smtp.local"
|
||||
ttl = 60
|
||||
CONF
|
||||
|
||||
RUST_LOG=info "$BINARY" "$CONFIG" > "$LOG" 2>&1 &
|
||||
NUMA_PID=$!
|
||||
sleep 3
|
||||
|
||||
echo ""
|
||||
echo "=== Local Zones ==="
|
||||
|
||||
check "Local A record (test.local)" \
|
||||
"10.0.0.1" \
|
||||
"$($DIG test.local A +short)"
|
||||
|
||||
check "Local MX record (mail.local)" \
|
||||
"smtp.local" \
|
||||
"$($DIG mail.local MX +short)"
|
||||
|
||||
check "Non-local domain still resolves" \
|
||||
"." \
|
||||
"$($DIG example.com A +short)"
|
||||
|
||||
echo ""
|
||||
echo "=== Overrides API ==="
|
||||
|
||||
# Create override
|
||||
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" -X POST http://127.0.0.1:$API_PORT/overrides \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"domain":"override.test","target":"192.168.1.100","duration_secs":60}')
|
||||
check "Create override (HTTP 200/201)" \
|
||||
"20" \
|
||||
"$HTTP_CODE"
|
||||
|
||||
sleep 1
|
||||
|
||||
check "Override resolves" \
|
||||
"192.168.1.100" \
|
||||
"$($DIG override.test A +short)"
|
||||
|
||||
# List overrides
|
||||
check "List overrides" \
|
||||
"override.test" \
|
||||
"$(curl -s http://127.0.0.1:$API_PORT/overrides)"
|
||||
|
||||
# Delete override
|
||||
curl -s -X DELETE http://127.0.0.1:$API_PORT/overrides/override.test > /dev/null
|
||||
|
||||
sleep 1
|
||||
|
||||
# After delete, should not resolve to override
|
||||
AFTER_DELETE=$($DIG override.test A +short 2>/dev/null)
|
||||
if echo "$AFTER_DELETE" | grep -q "192.168.1.100"; then
|
||||
check "Override deleted" "not-192.168.1.100" "$AFTER_DELETE"
|
||||
else
|
||||
check "Override deleted" "." "deleted"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "=== Cache API ==="
|
||||
|
||||
check "Cache list" \
|
||||
"domain" \
|
||||
"$(curl -s http://127.0.0.1:$API_PORT/cache)"
|
||||
|
||||
# Flush cache
|
||||
curl -s -X DELETE http://127.0.0.1:$API_PORT/cache > /dev/null
|
||||
check "Cache flushed" \
|
||||
"0" \
|
||||
"$(curl -s http://127.0.0.1:$API_PORT/stats | grep -o '"entries":[0-9]*' | grep -o '[0-9]*')"
|
||||
|
||||
kill "$NUMA_PID" 2>/dev/null || true
|
||||
wait "$NUMA_PID" 2>/dev/null || true
|
||||
sleep 1
|
||||
|
||||
# ---- Suite 5: DNS-over-TLS (RFC 7858) ----
|
||||
echo ""
|
||||
echo "╔══════════════════════════════════════════╗"
|
||||
echo "║ Suite 5: DNS-over-TLS (RFC 7858) ║"
|
||||
echo "╚══════════════════════════════════════════╝"
|
||||
|
||||
if ! command -v kdig >/dev/null 2>&1; then
|
||||
printf " ${DIM}skipped — install 'knot' for kdig${RESET}\n"
|
||||
elif ! command -v openssl >/dev/null 2>&1; then
|
||||
printf " ${DIM}skipped — openssl not found${RESET}\n"
|
||||
else
|
||||
DOT_PORT=8853
|
||||
DOT_CERT=/tmp/numa-integration-dot.crt
|
||||
DOT_KEY=/tmp/numa-integration-dot.key
|
||||
|
||||
# Generate a test cert mirroring production self_signed_tls SAN shape
|
||||
# (*.numa wildcard + explicit numa.numa apex).
|
||||
openssl req -x509 -newkey rsa:2048 -nodes -days 1 \
|
||||
-keyout "$DOT_KEY" -out "$DOT_CERT" \
|
||||
-subj "/CN=Numa .numa services" \
|
||||
-addext "subjectAltName=DNS:*.numa,DNS:numa.numa" \
|
||||
>/dev/null 2>&1
|
||||
|
||||
# Suite 5 uses a local zone so it's upstream-independent — the point is
|
||||
# to exercise the DoT transport layer (handshake, ALPN, framing,
|
||||
# persistent connections), not re-test recursive resolution.
|
||||
cat > "$CONFIG" << CONF
|
||||
[server]
|
||||
bind_addr = "127.0.0.1:$PORT"
|
||||
api_port = $API_PORT
|
||||
|
||||
[upstream]
|
||||
mode = "forward"
|
||||
address = "127.0.0.1"
|
||||
port = 65535
|
||||
|
||||
[cache]
|
||||
max_entries = 10000
|
||||
|
||||
[blocking]
|
||||
enabled = false
|
||||
|
||||
[proxy]
|
||||
enabled = false
|
||||
|
||||
[dot]
|
||||
enabled = true
|
||||
port = $DOT_PORT
|
||||
bind_addr = "127.0.0.1"
|
||||
cert_path = "$DOT_CERT"
|
||||
key_path = "$DOT_KEY"
|
||||
|
||||
[[zones]]
|
||||
domain = "dot-test.example"
|
||||
record_type = "A"
|
||||
value = "10.0.0.1"
|
||||
ttl = 60
|
||||
CONF
|
||||
|
||||
RUST_LOG=info "$BINARY" "$CONFIG" > "$LOG" 2>&1 &
|
||||
NUMA_PID=$!
|
||||
sleep 4
|
||||
|
||||
if ! kill -0 "$NUMA_PID" 2>/dev/null; then
|
||||
FAILED=$((FAILED + 1))
|
||||
printf " ${RED}✗${RESET} DoT startup\n"
|
||||
printf " ${DIM}%s${RESET}\n" "$(tail -5 "$LOG")"
|
||||
else
|
||||
echo ""
|
||||
echo "=== Listener ==="
|
||||
|
||||
check "DoT bound on 127.0.0.1:$DOT_PORT" \
|
||||
"DoT listening on 127.0.0.1:$DOT_PORT" \
|
||||
"$(grep 'DoT listening' "$LOG")"
|
||||
|
||||
KDIG="kdig @127.0.0.1 -p $DOT_PORT +tls +tls-ca=$DOT_CERT +tls-hostname=numa.numa +time=5 +retry=0"
|
||||
|
||||
echo ""
|
||||
echo "=== Queries over DoT ==="
|
||||
|
||||
check "DoT local zone A record" \
|
||||
"10.0.0.1" \
|
||||
"$($KDIG +short dot-test.example A 2>/dev/null)"
|
||||
|
||||
# +keepopen reuses one TLS connection for multiple queries — tests
|
||||
# persistent connection handling. kdig applies options left-to-right,
|
||||
# so +short and +keepopen must come before the query specs.
|
||||
check "DoT persistent connection (3 queries, 1 handshake)" \
|
||||
"10.0.0.1" \
|
||||
"$($KDIG +keepopen +short dot-test.example A dot-test.example A dot-test.example A 2>/dev/null | head -1)"
|
||||
|
||||
echo ""
|
||||
echo "=== ALPN ==="
|
||||
|
||||
# Positive case: client offers "dot", server picks it.
|
||||
ALPN_OK=$(echo "" | openssl s_client -connect "127.0.0.1:$DOT_PORT" \
|
||||
-servername numa.numa -alpn dot -CAfile "$DOT_CERT" 2>&1 </dev/null || true)
|
||||
check "DoT negotiates ALPN \"dot\"" \
|
||||
"ALPN protocol: dot" \
|
||||
"$ALPN_OK"
|
||||
|
||||
# Negative case: client offers only "h2", server must reject the
|
||||
# handshake with no_application_protocol alert (cross-protocol
|
||||
# confusion defense, RFC 7858bis §3.2).
|
||||
if echo "" | openssl s_client -connect "127.0.0.1:$DOT_PORT" \
|
||||
-servername numa.numa -alpn h2 -CAfile "$DOT_CERT" \
|
||||
</dev/null >/dev/null 2>&1; then
|
||||
ALPN_MISMATCH="handshake unexpectedly succeeded"
|
||||
else
|
||||
ALPN_MISMATCH="rejected"
|
||||
fi
|
||||
check "DoT rejects non-dot ALPN" \
|
||||
"rejected" \
|
||||
"$ALPN_MISMATCH"
|
||||
fi
|
||||
|
||||
kill "$NUMA_PID" 2>/dev/null || true
|
||||
wait "$NUMA_PID" 2>/dev/null || true
|
||||
rm -f "$DOT_CERT" "$DOT_KEY"
|
||||
fi
|
||||
sleep 1
|
||||
|
||||
# ---- Suite 6: Proxy + DoT coexistence ----
|
||||
echo ""
|
||||
echo "╔══════════════════════════════════════════╗"
|
||||
echo "║ Suite 6: Proxy + DoT Coexistence ║"
|
||||
echo "╚══════════════════════════════════════════╝"
|
||||
|
||||
if ! command -v kdig >/dev/null 2>&1 || ! command -v openssl >/dev/null 2>&1; then
|
||||
printf " ${DIM}skipped — needs kdig + openssl${RESET}\n"
|
||||
else
|
||||
DOT_PORT=8853
|
||||
PROXY_HTTP_PORT=8080
|
||||
PROXY_HTTPS_PORT=8443
|
||||
NUMA_DATA=/tmp/numa-integration-data
|
||||
|
||||
# Fresh data dir so we generate a fresh CA for this suite. Path is set
|
||||
# via [server] data_dir in the TOML below, not an env var — numa treats
|
||||
# its config file as the single source of truth for all knobs.
|
||||
rm -rf "$NUMA_DATA"
|
||||
mkdir -p "$NUMA_DATA"
|
||||
|
||||
cat > "$CONFIG" << CONF
|
||||
[server]
|
||||
bind_addr = "127.0.0.1:$PORT"
|
||||
api_port = $API_PORT
|
||||
data_dir = "$NUMA_DATA"
|
||||
|
||||
[upstream]
|
||||
mode = "forward"
|
||||
address = "127.0.0.1"
|
||||
port = 65535
|
||||
|
||||
[cache]
|
||||
max_entries = 10000
|
||||
|
||||
[blocking]
|
||||
enabled = false
|
||||
|
||||
[proxy]
|
||||
enabled = true
|
||||
port = $PROXY_HTTP_PORT
|
||||
tls_port = $PROXY_HTTPS_PORT
|
||||
tld = "numa"
|
||||
bind_addr = "127.0.0.1"
|
||||
|
||||
[dot]
|
||||
enabled = true
|
||||
port = $DOT_PORT
|
||||
bind_addr = "127.0.0.1"
|
||||
|
||||
[[zones]]
|
||||
domain = "dot-test.example"
|
||||
record_type = "A"
|
||||
value = "10.0.0.1"
|
||||
ttl = 60
|
||||
CONF
|
||||
|
||||
RUST_LOG=info "$BINARY" "$CONFIG" > "$LOG" 2>&1 &
|
||||
NUMA_PID=$!
|
||||
sleep 4
|
||||
|
||||
if ! kill -0 "$NUMA_PID" 2>/dev/null; then
|
||||
FAILED=$((FAILED + 1))
|
||||
printf " ${RED}✗${RESET} Startup with proxy + DoT\n"
|
||||
printf " ${DIM}%s${RESET}\n" "$(tail -5 "$LOG")"
|
||||
else
|
||||
echo ""
|
||||
echo "=== Both listeners ==="
|
||||
|
||||
check "DoT listener bound" \
|
||||
"DoT listening on 127.0.0.1:$DOT_PORT" \
|
||||
"$(grep 'DoT listening' "$LOG")"
|
||||
|
||||
check "HTTPS proxy listener bound" \
|
||||
"HTTPS proxy listening on 127.0.0.1:$PROXY_HTTPS_PORT" \
|
||||
"$(grep 'HTTPS proxy listening' "$LOG")"
|
||||
|
||||
PANIC_COUNT=$(grep -c 'panicked' "$LOG" 2>/dev/null || echo 0)
|
||||
check "No startup panics in log" \
|
||||
"^0$" \
|
||||
"$PANIC_COUNT"
|
||||
|
||||
echo ""
|
||||
echo "=== DoT works with proxy enabled ==="
|
||||
|
||||
# Proxy's build_tls_config runs first and creates the CA in
|
||||
# $NUMA_DATA_DIR. DoT self_signed_tls then loads the same CA and
|
||||
# issues its own leaf cert. One CA trusts both listeners.
|
||||
CA="$NUMA_DATA/ca.pem"
|
||||
KDIG="kdig @127.0.0.1 -p $DOT_PORT +tls +tls-ca=$CA +tls-hostname=numa.numa +time=5 +retry=0"
|
||||
|
||||
check "DoT local zone A (with proxy on)" \
|
||||
"10.0.0.1" \
|
||||
"$($KDIG +short dot-test.example A 2>/dev/null)"
|
||||
|
||||
echo ""
|
||||
echo "=== Proxy TLS works with DoT enabled ==="
|
||||
|
||||
# Proxy cert has SAN numa.numa (auto-added "numa" service). A
|
||||
# successful handshake validates that the proxy's separate
|
||||
# ServerConfig wasn't disturbed by DoT's own cert generation.
|
||||
PROXY_TLS=$(echo "" | openssl s_client -connect "127.0.0.1:$PROXY_HTTPS_PORT" \
|
||||
-servername numa.numa -CAfile "$CA" 2>&1 </dev/null || true)
|
||||
check "Proxy HTTPS TLS handshake succeeds" \
|
||||
"Verify return code: 0 (ok)" \
|
||||
"$PROXY_TLS"
|
||||
fi
|
||||
|
||||
kill "$NUMA_PID" 2>/dev/null || true
|
||||
wait "$NUMA_PID" 2>/dev/null || true
|
||||
rm -rf "$NUMA_DATA"
|
||||
fi
|
||||
|
||||
# Summary
|
||||
echo ""
|
||||
TOTAL=$((PASSED + FAILED))
|
||||
if [ "$FAILED" -eq 0 ]; then
|
||||
printf "${GREEN}All %d tests passed.${RESET}\n" "$TOTAL"
|
||||
exit 0
|
||||
else
|
||||
printf "${RED}%d/%d tests failed.${RESET}\n" "$FAILED" "$TOTAL"
|
||||
echo ""
|
||||
echo "Log: $LOG"
|
||||
exit 1
|
||||
fi
|
||||
94
tests/manual/install-trust-macos.sh
Executable file
94
tests/manual/install-trust-macos.sh
Executable file
@@ -0,0 +1,94 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Manual macOS CA trust contract test.
|
||||
#
|
||||
# Mirrors src/system_dns.rs::trust_ca_macos / untrust_ca_macos by running
|
||||
# the same `security` shell commands against a fixture cert with a unique
|
||||
# CN. Safe to run alongside a production numa install:
|
||||
#
|
||||
# - Test cert CN = "Numa Local CA Test <pid-ts>", always strictly longer
|
||||
# than the production CN "Numa Local CA". `security find-certificate -c`
|
||||
# does substring matching, so the test's search for $TEST_CN can never
|
||||
# match the production cert (the search term is longer than the prod CN).
|
||||
# - All deletes use `delete-certificate -Z <hash>`, which only touches the
|
||||
# cert with that exact hash. Production and test certs have different
|
||||
# hashes by construction (different key material), so the delete cannot
|
||||
# reach the production cert even if a CN search somehow returned both.
|
||||
#
|
||||
# Mutates the System keychain (briefly). Cleans up on success or interrupt.
|
||||
# Requires sudo for `security add-trusted-cert` and `delete-certificate`.
|
||||
#
|
||||
# Usage: ./tests/manual/install-trust-macos.sh
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if [[ "$OSTYPE" != darwin* ]]; then
|
||||
echo "This test is macOS-only." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
GREEN="\033[32m"; RED="\033[31m"; RESET="\033[0m"
|
||||
|
||||
# Production constant from src/tls.rs::CA_COMMON_NAME — keep in sync.
|
||||
PROD_CN="Numa Local CA"
|
||||
KEYCHAIN="/Library/Keychains/System.keychain"
|
||||
|
||||
# Notice if production numa is already installed. We proceed regardless —
|
||||
# see header for why coexistence is safe (unique CN + by-hash deletion).
|
||||
if security find-certificate -c "$PROD_CN" "$KEYCHAIN" >/dev/null 2>&1; then
|
||||
echo " note: production '$PROD_CN' detected — proceeding alongside (test cert can't touch it)"
|
||||
echo
|
||||
fi
|
||||
|
||||
# Unique CN ensures the test cert can never collide with production.
|
||||
TEST_CN="Numa Local CA Test $$-$(date +%s)"
|
||||
FIXTURE_DIR=$(mktemp -d)
|
||||
|
||||
cleanup() {
|
||||
# Best-effort: remove any test certs by hash if still present.
|
||||
if security find-certificate -c "$TEST_CN" "$KEYCHAIN" >/dev/null 2>&1; then
|
||||
echo " cleanup: removing leftover test cert"
|
||||
security find-certificate -c "$TEST_CN" -a -Z "$KEYCHAIN" 2>/dev/null \
|
||||
| awk '/^SHA-1 hash:/ {print $NF}' \
|
||||
| while read -r hash; do
|
||||
sudo security delete-certificate -Z "$hash" "$KEYCHAIN" >/dev/null 2>&1 || true
|
||||
done
|
||||
fi
|
||||
rm -rf "$FIXTURE_DIR"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
echo "── generating fixture CA ──"
|
||||
openssl req -x509 -newkey rsa:2048 -nodes -days 1 \
|
||||
-keyout "$FIXTURE_DIR/ca.key" \
|
||||
-out "$FIXTURE_DIR/ca.pem" \
|
||||
-subj "/CN=$TEST_CN" \
|
||||
-addext "basicConstraints=critical,CA:TRUE" \
|
||||
-addext "keyUsage=critical,keyCertSign,cRLSign" >/dev/null 2>&1
|
||||
echo " CN: $TEST_CN"
|
||||
echo
|
||||
|
||||
echo "── trust step (mirrors trust_ca_macos) ──"
|
||||
sudo security add-trusted-cert -d -r trustRoot -k "$KEYCHAIN" "$FIXTURE_DIR/ca.pem"
|
||||
if security find-certificate -c "$TEST_CN" "$KEYCHAIN" >/dev/null 2>&1; then
|
||||
printf " ${GREEN}✓${RESET} test cert found in keychain\n"
|
||||
else
|
||||
printf " ${RED}✗${RESET} test cert NOT found after add-trusted-cert\n"
|
||||
exit 1
|
||||
fi
|
||||
echo
|
||||
|
||||
echo "── untrust step (mirrors untrust_ca_macos) ──"
|
||||
security find-certificate -c "$TEST_CN" -a -Z "$KEYCHAIN" 2>/dev/null \
|
||||
| awk '/^SHA-1 hash:/ {print $NF}' \
|
||||
| while read -r hash; do
|
||||
sudo security delete-certificate -Z "$hash" "$KEYCHAIN" >/dev/null
|
||||
done
|
||||
if security find-certificate -c "$TEST_CN" "$KEYCHAIN" >/dev/null 2>&1; then
|
||||
printf " ${RED}✗${RESET} test cert STILL present after delete (regression)\n"
|
||||
exit 1
|
||||
fi
|
||||
printf " ${GREEN}✓${RESET} test cert removed from keychain\n"
|
||||
echo
|
||||
|
||||
printf "${GREEN}all checks passed${RESET}\n"
|
||||
128
tests/network-probe.sh
Executable file
128
tests/network-probe.sh
Executable file
@@ -0,0 +1,128 @@
|
||||
#!/usr/bin/env bash
|
||||
# Network probe: tests which DNS transports are available on the current network.
|
||||
# Run on a problematic network to diagnose what's blocked.
|
||||
# Usage: ./tests/network-probe.sh
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
GREEN="\033[32m"
|
||||
RED="\033[31m"
|
||||
DIM="\033[90m"
|
||||
RESET="\033[0m"
|
||||
|
||||
PASSED=0
|
||||
FAILED=0
|
||||
|
||||
probe() {
|
||||
local name="$1"
|
||||
local cmd="$2"
|
||||
local expect="$3"
|
||||
|
||||
local result
|
||||
result=$(eval "$cmd" 2>&1) || true
|
||||
|
||||
if echo "$result" | grep -q "$expect"; then
|
||||
PASSED=$((PASSED + 1))
|
||||
printf " ${GREEN}✓${RESET} %-45s ${DIM}%s${RESET}\n" "$name" "$(echo "$result" | head -1 | cut -c1-60)"
|
||||
else
|
||||
FAILED=$((FAILED + 1))
|
||||
printf " ${RED}✗${RESET} %-45s ${DIM}blocked/timeout${RESET}\n" "$name"
|
||||
fi
|
||||
}
|
||||
|
||||
echo ""
|
||||
echo "Network DNS Transport Probe"
|
||||
echo "==========================="
|
||||
echo "Network: $(networksetup -getairportnetwork en0 2>/dev/null | sed 's/Current Wi-Fi Network: //' || echo 'unknown')"
|
||||
echo "Local IP: $(ipconfig getifaddr en0 2>/dev/null || echo 'unknown')"
|
||||
echo "Gateway: $(route -n get default 2>/dev/null | grep gateway | awk '{print $2}' || echo 'unknown')"
|
||||
echo ""
|
||||
|
||||
echo "=== UDP port 53 (recursive resolution) ==="
|
||||
probe "Root server a (198.41.0.4)" \
|
||||
"dig @198.41.0.4 . NS +short +time=5 +tries=1" \
|
||||
"root-servers"
|
||||
|
||||
probe "Root server k (193.0.14.129)" \
|
||||
"dig @193.0.14.129 . NS +short +time=5 +tries=1" \
|
||||
"root-servers"
|
||||
|
||||
probe "Google DNS (8.8.8.8)" \
|
||||
"dig @8.8.8.8 google.com A +short +time=5 +tries=1" \
|
||||
"\."
|
||||
|
||||
probe "Cloudflare (1.1.1.1)" \
|
||||
"dig @1.1.1.1 cloudflare.com A +short +time=5 +tries=1" \
|
||||
"\."
|
||||
|
||||
probe ".com TLD (192.5.6.30)" \
|
||||
"dig @192.5.6.30 google.com NS +short +time=5 +tries=1" \
|
||||
"google"
|
||||
|
||||
echo ""
|
||||
echo "=== TCP port 53 ==="
|
||||
probe "Google DNS TCP (8.8.8.8)" \
|
||||
"dig @8.8.8.8 google.com A +short +tcp +time=5 +tries=1" \
|
||||
"\."
|
||||
|
||||
probe "Root server TCP (198.41.0.4)" \
|
||||
"dig @198.41.0.4 . NS +short +tcp +time=5 +tries=1" \
|
||||
"root-servers"
|
||||
|
||||
echo ""
|
||||
echo "=== DoT port 853 (DNS-over-TLS) ==="
|
||||
probe "Quad9 DoT (9.9.9.9:853)" \
|
||||
"echo Q | openssl s_client -connect 9.9.9.9:853 -servername dns.quad9.net 2>&1 | grep 'verify return'" \
|
||||
"verify return"
|
||||
|
||||
probe "Cloudflare DoT (1.1.1.1:853)" \
|
||||
"echo Q | openssl s_client -connect 1.1.1.1:853 -servername cloudflare-dns.com 2>&1 | grep 'verify return'" \
|
||||
"verify return"
|
||||
|
||||
echo ""
|
||||
echo "=== DoH port 443 (DNS-over-HTTPS) ==="
|
||||
probe "Quad9 DoH (dns.quad9.net)" \
|
||||
"curl -s -m 5 -H 'accept: application/dns-json' 'https://dns.quad9.net:443/dns-query?name=google.com&type=A'" \
|
||||
"Answer"
|
||||
|
||||
probe "Cloudflare DoH (1.1.1.1)" \
|
||||
"curl -s -m 5 -H 'accept: application/dns-json' 'https://1.1.1.1/dns-query?name=google.com&type=A'" \
|
||||
"Answer"
|
||||
|
||||
probe "Google DoH (dns.google)" \
|
||||
"curl -s -m 5 'https://dns.google/resolve?name=google.com&type=A'" \
|
||||
"Answer"
|
||||
|
||||
echo ""
|
||||
echo "=== ISP DNS ==="
|
||||
# Detect system DNS
|
||||
SYS_DNS=$(scutil --dns 2>/dev/null | grep "nameserver\[0\]" | head -1 | awk '{print $3}' || echo "unknown")
|
||||
if [ "$SYS_DNS" != "unknown" ] && [ "$SYS_DNS" != "127.0.0.1" ]; then
|
||||
probe "ISP DNS ($SYS_DNS)" \
|
||||
"dig @$SYS_DNS google.com A +short +time=5 +tries=1" \
|
||||
"\."
|
||||
else
|
||||
printf " ${DIM}– System DNS is $SYS_DNS (skipped)${RESET}\n"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "==========================="
|
||||
TOTAL=$((PASSED + FAILED))
|
||||
printf "Results: ${GREEN}%d passed${RESET}, ${RED}%d blocked${RESET} / %d total\n" "$PASSED" "$FAILED" "$TOTAL"
|
||||
|
||||
echo ""
|
||||
echo "Recommendation:"
|
||||
if [ "$FAILED" -eq 0 ]; then
|
||||
echo " All transports available. Recursive mode will work."
|
||||
elif dig @198.41.0.4 . NS +short +time=5 +tries=1 2>&1 | grep -q "root-servers"; then
|
||||
echo " UDP:53 works. Recursive mode will work."
|
||||
else
|
||||
echo " UDP:53 blocked — recursive mode will NOT work on this network."
|
||||
if curl -s -m 5 'https://dns.quad9.net:443/dns-query?name=test.com&type=A' 2>&1 | grep -q "Answer"; then
|
||||
echo " DoH (port 443) works — use mode = \"forward\" with DoH upstream."
|
||||
elif echo Q | openssl s_client -connect 9.9.9.9:853 2>&1 | grep -q "verify return"; then
|
||||
echo " DoT (port 853) works — DoT upstream would work (not yet implemented)."
|
||||
else
|
||||
echo " Only ISP DNS available. Use mode = \"forward\" with ISP auto-detect."
|
||||
fi
|
||||
fi
|
||||
Reference in New Issue
Block a user