feat(odoh): ship ODoH client + self-hosted relay (RFC 9230)
Client (mode = "odoh"): URL-query target routing per RFC 9230 §5,
/.well-known/odohconfigs TTL cache with 60s backoff on failure, HPKE
seal/open via odoh-rs, strict-mode default that SERVFAILs on relay
failure instead of silently downgrading. Host-equality config
validation rejects same-operator relay/target pairs.
Relay (`numa relay [PORT]`): axum server with /relay + /health.
SSRF-hardened hostname validator (RFC 1035 ASCII + dot + dash),
4 KiB body cap at the axum layer, 5s full-transaction timeout, and
static 502 on target failure (reqwest internals logged, not leaked).
Aggregate counters only — no per-request logs.
Observability: new `UpstreamTransport { Udp, Doh, Dot, Odoh }`
orthogonal to `QueryPath`, so /stats can tally wire protocols
symmetrically. Recursive mode records `Some(Udp)` for honest
"bytes egressing in cleartext" accounting.
Tests: Suite 8 exercises the client end-to-end via Frank Denis's
public relay + Cloudflare target; Suite 9 exercises `numa relay`
forwarding + guards against Cloudflare as the real far end. Full
probe script at tests/probe-odoh-ecosystem.sh verifies the entire
public ODoH ecosystem (4 targets + 1 relay per DNSCrypt's curated
list — confirms deploying Numa's relay doubles global supply).
This commit is contained in:
15
src/api.rs
15
src/api.rs
@@ -170,6 +170,7 @@ struct StatsResponse {
|
||||
srtt: bool,
|
||||
queries: QueriesStats,
|
||||
transport: TransportStats,
|
||||
upstream_transport: UpstreamTransportStats,
|
||||
cache: CacheStats,
|
||||
overrides: OverrideStats,
|
||||
blocking: BlockingStatsResponse,
|
||||
@@ -186,6 +187,14 @@ struct TransportStats {
|
||||
doh: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct UpstreamTransportStats {
|
||||
udp: u64,
|
||||
doh: u64,
|
||||
dot: u64,
|
||||
odoh: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct MobileStatsResponse {
|
||||
enabled: bool,
|
||||
@@ -566,6 +575,12 @@ async fn stats(State(ctx): State<Arc<ServerCtx>>) -> Json<StatsResponse> {
|
||||
dot: snap.transport_dot,
|
||||
doh: snap.transport_doh,
|
||||
},
|
||||
upstream_transport: UpstreamTransportStats {
|
||||
udp: snap.upstream_transport_udp,
|
||||
doh: snap.upstream_transport_doh,
|
||||
dot: snap.upstream_transport_dot,
|
||||
odoh: snap.upstream_transport_odoh,
|
||||
},
|
||||
cache: CacheStats {
|
||||
entries: cache_len,
|
||||
max_entries: cache_max,
|
||||
|
||||
177
src/config.rs
177
src/config.rs
@@ -134,6 +134,7 @@ pub enum UpstreamMode {
|
||||
#[default]
|
||||
Forward,
|
||||
Recursive,
|
||||
Odoh,
|
||||
}
|
||||
|
||||
impl UpstreamMode {
|
||||
@@ -142,6 +143,7 @@ impl UpstreamMode {
|
||||
UpstreamMode::Auto => "auto",
|
||||
UpstreamMode::Forward => "forward",
|
||||
UpstreamMode::Recursive => "recursive",
|
||||
UpstreamMode::Odoh => "odoh",
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -154,7 +156,7 @@ pub struct UpstreamConfig {
|
||||
pub address: Vec<String>,
|
||||
#[serde(default = "default_upstream_port")]
|
||||
pub port: u16,
|
||||
#[serde(default)]
|
||||
#[serde(default, deserialize_with = "string_or_vec")]
|
||||
pub fallback: Vec<String>,
|
||||
#[serde(default = "default_timeout_ms")]
|
||||
pub timeout_ms: u64,
|
||||
@@ -166,6 +168,20 @@ pub struct UpstreamConfig {
|
||||
pub prime_tlds: Vec<String>,
|
||||
#[serde(default = "default_srtt")]
|
||||
pub srtt: bool,
|
||||
|
||||
/// Only used when `mode = "odoh"`. Full https:// URL of the relay
|
||||
/// endpoint (including path, e.g. `https://odoh-relay.numa.rs/relay`).
|
||||
#[serde(default)]
|
||||
pub relay: Option<String>,
|
||||
/// Only used when `mode = "odoh"`. Full https:// URL of the target
|
||||
/// resolver (`https://odoh.cloudflare-dns.com/dns-query`).
|
||||
#[serde(default)]
|
||||
pub target: Option<String>,
|
||||
/// Only used when `mode = "odoh"`. When true (the default), relay failure
|
||||
/// returns SERVFAIL instead of downgrading to the `fallback` upstream —
|
||||
/// a user who configured ODoH rarely wants a silent non-oblivious path.
|
||||
#[serde(default)]
|
||||
pub strict: Option<bool>,
|
||||
}
|
||||
|
||||
impl Default for UpstreamConfig {
|
||||
@@ -180,10 +196,75 @@ impl Default for UpstreamConfig {
|
||||
root_hints: default_root_hints(),
|
||||
prime_tlds: default_prime_tlds(),
|
||||
srtt: default_srtt(),
|
||||
relay: None,
|
||||
target: None,
|
||||
strict: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Parsed ODoH config fields. `mode = "odoh"` requires both URLs to be
|
||||
/// present, to parse as `https://`, and to resolve to distinct hosts.
|
||||
#[derive(Debug)]
|
||||
pub struct OdohUpstream {
|
||||
pub relay_url: String,
|
||||
pub target_host: String,
|
||||
pub target_path: String,
|
||||
pub strict: bool,
|
||||
}
|
||||
|
||||
impl UpstreamConfig {
|
||||
/// Validate and extract ODoH-specific fields. Called during `load_config`
|
||||
/// so misconfigured ODoH fails fast at startup, the same care we take
|
||||
/// with the DNSSEC strict boot check.
|
||||
pub fn odoh_upstream(&self) -> Result<OdohUpstream> {
|
||||
let relay = self
|
||||
.relay
|
||||
.as_deref()
|
||||
.ok_or("mode = \"odoh\" requires upstream.relay")?;
|
||||
let target = self
|
||||
.target
|
||||
.as_deref()
|
||||
.ok_or("mode = \"odoh\" requires upstream.target")?;
|
||||
|
||||
let relay_url = reqwest::Url::parse(relay)
|
||||
.map_err(|e| format!("upstream.relay invalid URL '{}': {}", relay, e))?;
|
||||
let target_url = reqwest::Url::parse(target)
|
||||
.map_err(|e| format!("upstream.target invalid URL '{}': {}", target, e))?;
|
||||
|
||||
if relay_url.scheme() != "https" || target_url.scheme() != "https" {
|
||||
return Err("upstream.relay and upstream.target must both use https://".into());
|
||||
}
|
||||
if relay_url.host_str().is_none() || target_url.host_str().is_none() {
|
||||
return Err("upstream.relay and upstream.target must include a host".into());
|
||||
}
|
||||
if relay_url.host_str() == target_url.host_str() {
|
||||
return Err(format!(
|
||||
"upstream.relay and upstream.target resolve to the same host ({}); the privacy property requires distinct operators",
|
||||
relay_url.host_str().unwrap_or("?")
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
let target_host = target_url
|
||||
.host_str()
|
||||
.ok_or("upstream.target has no host")?
|
||||
.to_string();
|
||||
let target_path = if target_url.path().is_empty() {
|
||||
"/".to_string()
|
||||
} else {
|
||||
target_url.path().to_string()
|
||||
};
|
||||
|
||||
Ok(OdohUpstream {
|
||||
relay_url: relay.to_string(),
|
||||
target_host,
|
||||
target_path,
|
||||
strict: self.strict.unwrap_or(true),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn string_or_vec<'de, D>(deserializer: D) -> std::result::Result<Vec<String>, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
@@ -643,12 +724,22 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fallback_parses() {
|
||||
fn fallback_array_parses() {
|
||||
let config: Config =
|
||||
toml::from_str("[upstream]\nfallback = [\"8.8.8.8\", \"1.1.1.1\"]").unwrap();
|
||||
assert_eq!(config.upstream.fallback, vec!["8.8.8.8", "1.1.1.1"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fallback_string_parses_as_singleton_vec() {
|
||||
let config: Config =
|
||||
toml::from_str("[upstream]\nfallback = \"tls://1.1.1.1#cloudflare-dns.com\"").unwrap();
|
||||
assert_eq!(
|
||||
config.upstream.fallback,
|
||||
vec!["tls://1.1.1.1#cloudflare-dns.com"]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_address_gives_empty_vec() {
|
||||
let config: Config = toml::from_str("").unwrap();
|
||||
@@ -656,6 +747,88 @@ mod tests {
|
||||
assert!(config.upstream.fallback.is_empty());
|
||||
}
|
||||
|
||||
// ── [upstream] mode = "odoh" ────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn odoh_config_parses_and_validates() {
|
||||
let toml = r#"
|
||||
[upstream]
|
||||
mode = "odoh"
|
||||
relay = "https://odoh-relay.numa.rs/relay"
|
||||
target = "https://odoh.cloudflare-dns.com/dns-query"
|
||||
"#;
|
||||
let config: Config = toml::from_str(toml).unwrap();
|
||||
assert!(matches!(config.upstream.mode, UpstreamMode::Odoh));
|
||||
let odoh = config.upstream.odoh_upstream().unwrap();
|
||||
assert_eq!(odoh.relay_url, "https://odoh-relay.numa.rs/relay");
|
||||
assert_eq!(odoh.target_host, "odoh.cloudflare-dns.com");
|
||||
assert_eq!(odoh.target_path, "/dns-query");
|
||||
assert!(odoh.strict, "strict defaults to true under mode=odoh");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn odoh_strict_false_is_honoured() {
|
||||
let toml = r#"
|
||||
[upstream]
|
||||
mode = "odoh"
|
||||
relay = "https://odoh-relay.numa.rs/relay"
|
||||
target = "https://odoh.cloudflare-dns.com/dns-query"
|
||||
strict = false
|
||||
"#;
|
||||
let config: Config = toml::from_str(toml).unwrap();
|
||||
assert!(!config.upstream.odoh_upstream().unwrap().strict);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn odoh_rejects_same_host_relay_and_target() {
|
||||
let toml = r#"
|
||||
[upstream]
|
||||
mode = "odoh"
|
||||
relay = "https://odoh.example.com/relay"
|
||||
target = "https://odoh.example.com/dns-query"
|
||||
"#;
|
||||
let config: Config = toml::from_str(toml).unwrap();
|
||||
let err = config.upstream.odoh_upstream().unwrap_err().to_string();
|
||||
assert!(err.contains("same host"), "got: {err}");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn odoh_rejects_non_https() {
|
||||
let toml = r#"
|
||||
[upstream]
|
||||
mode = "odoh"
|
||||
relay = "http://odoh-relay.numa.rs/relay"
|
||||
target = "https://odoh.cloudflare-dns.com/dns-query"
|
||||
"#;
|
||||
let config: Config = toml::from_str(toml).unwrap();
|
||||
let err = config.upstream.odoh_upstream().unwrap_err().to_string();
|
||||
assert!(err.contains("https"), "got: {err}");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn odoh_missing_relay_rejected() {
|
||||
let toml = r#"
|
||||
[upstream]
|
||||
mode = "odoh"
|
||||
target = "https://odoh.cloudflare-dns.com/dns-query"
|
||||
"#;
|
||||
let config: Config = toml::from_str(toml).unwrap();
|
||||
let err = config.upstream.odoh_upstream().unwrap_err().to_string();
|
||||
assert!(err.contains("upstream.relay"), "got: {err}");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn odoh_missing_target_rejected() {
|
||||
let toml = r#"
|
||||
[upstream]
|
||||
mode = "odoh"
|
||||
relay = "https://odoh-relay.numa.rs/relay"
|
||||
"#;
|
||||
let config: Config = toml::from_str(toml).unwrap();
|
||||
let err = config.upstream.odoh_upstream().unwrap_err().to_string();
|
||||
assert!(err.contains("upstream.target"), "got: {err}");
|
||||
}
|
||||
|
||||
// ── issue #82: [[forwarding]] config section ────────────────────────
|
||||
|
||||
#[test]
|
||||
|
||||
14
src/ctx.rs
14
src/ctx.rs
@@ -105,6 +105,7 @@ pub async fn resolve_query(
|
||||
// Pipeline: overrides -> .localhost -> local zones -> special-use (unless forwarded)
|
||||
// -> .tld proxy -> blocklist -> cache -> forwarding -> recursive/upstream
|
||||
// Each lock is scoped to avoid holding MutexGuard across await points.
|
||||
let mut upstream_transport: Option<crate::stats::UpstreamTransport> = None;
|
||||
let (response, path, dnssec) = {
|
||||
let override_record = ctx.overrides.read().unwrap().lookup(&qname);
|
||||
if let Some(record) = override_record {
|
||||
@@ -208,6 +209,7 @@ pub async fn resolve_query(
|
||||
{
|
||||
// Conditional forwarding takes priority over recursive mode
|
||||
// (e.g. Tailscale .ts.net, VPC private zones)
|
||||
upstream_transport = pool.preferred().map(|u| u.transport());
|
||||
match forward_with_failover_raw(
|
||||
raw_wire,
|
||||
pool,
|
||||
@@ -241,6 +243,9 @@ pub async fn resolve_query(
|
||||
}
|
||||
}
|
||||
} else if ctx.upstream_mode == UpstreamMode::Recursive {
|
||||
// Recursive resolution makes UDP hops to roots/TLDs/auths;
|
||||
// tag as Udp so the dashboard can aggregate plaintext-wire
|
||||
// egress honestly. Only mark on success — errors stay None.
|
||||
let key = (qname.clone(), qtype);
|
||||
let (resp, path, err) = resolve_coalesced(&ctx.inflight, key, &query, || {
|
||||
crate::recursive::resolve_recursive(
|
||||
@@ -263,6 +268,8 @@ pub async fn resolve_query(
|
||||
qname,
|
||||
err.as_deref().unwrap_or("leader failed")
|
||||
);
|
||||
} else {
|
||||
upstream_transport = Some(crate::stats::UpstreamTransport::Udp);
|
||||
}
|
||||
(resp, path, DnssecStatus::Indeterminate)
|
||||
} else {
|
||||
@@ -277,7 +284,10 @@ pub async fn resolve_query(
|
||||
.await
|
||||
{
|
||||
Ok(resp_wire) => match cache_and_parse(ctx, &qname, qtype, &resp_wire) {
|
||||
Ok(resp) => (resp, QueryPath::Upstream, DnssecStatus::Indeterminate),
|
||||
Ok(resp) => {
|
||||
upstream_transport = pool.preferred().map(|u| u.transport());
|
||||
(resp, QueryPath::Upstream, DnssecStatus::Indeterminate)
|
||||
}
|
||||
Err(e) => {
|
||||
error!("{} | {:?} {} | PARSE ERROR | {}", src_addr, qtype, qname, e);
|
||||
(
|
||||
@@ -397,7 +407,7 @@ pub async fn resolve_query(
|
||||
// Record stats and query log
|
||||
{
|
||||
let mut s = ctx.stats.lock().unwrap();
|
||||
let total = s.record(path, transport);
|
||||
let total = s.record(path, transport, upstream_transport);
|
||||
if total.is_multiple_of(1000) {
|
||||
s.log_summary();
|
||||
}
|
||||
|
||||
119
src/forward.rs
119
src/forward.rs
@@ -1,14 +1,16 @@
|
||||
use std::fmt;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::sync::RwLock;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use tokio::net::UdpSocket;
|
||||
use tokio::time::timeout;
|
||||
|
||||
use crate::buffer::BytePacketBuffer;
|
||||
use crate::odoh::{query_through_relay, OdohConfigCache};
|
||||
use crate::packet::DnsPacket;
|
||||
use crate::srtt::SrttCache;
|
||||
use crate::stats::UpstreamTransport;
|
||||
use crate::Result;
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -23,16 +25,34 @@ pub enum Upstream {
|
||||
tls_name: Option<String>,
|
||||
connector: tokio_rustls::TlsConnector,
|
||||
},
|
||||
/// Oblivious DNS-over-HTTPS (RFC 9230). Queries are HPKE-sealed to the
|
||||
/// target and forwarded through an independent relay. Target host lives
|
||||
/// on `target_config` (single source of truth — the cache keys on it).
|
||||
Odoh {
|
||||
relay_url: String,
|
||||
target_path: String,
|
||||
client: reqwest::Client,
|
||||
target_config: Arc<OdohConfigCache>,
|
||||
},
|
||||
}
|
||||
|
||||
impl Upstream {
|
||||
/// IP address to key SRTT tracking on, if the upstream has a stable one.
|
||||
/// `Doh` routes through a URL + connection pool, so there's no single IP
|
||||
/// to track; SRTT is skipped for it.
|
||||
/// `Doh` and `Odoh` route through a URL + connection pool, so there's no
|
||||
/// single IP to track; SRTT is skipped for them.
|
||||
pub fn tracked_ip(&self) -> Option<IpAddr> {
|
||||
match self {
|
||||
Upstream::Udp(addr) | Upstream::Dot { addr, .. } => Some(addr.ip()),
|
||||
Upstream::Doh { .. } => None,
|
||||
Upstream::Doh { .. } | Upstream::Odoh { .. } => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn transport(&self) -> UpstreamTransport {
|
||||
match self {
|
||||
Upstream::Udp(_) => UpstreamTransport::Udp,
|
||||
Upstream::Doh { .. } => UpstreamTransport::Doh,
|
||||
Upstream::Dot { .. } => UpstreamTransport::Dot,
|
||||
Upstream::Odoh { .. } => UpstreamTransport::Odoh,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -43,6 +63,20 @@ impl PartialEq for Upstream {
|
||||
(Self::Udp(a), Self::Udp(b)) => a == b,
|
||||
(Self::Doh { url: a, .. }, Self::Doh { url: b, .. }) => a == b,
|
||||
(Self::Dot { addr: a, .. }, Self::Dot { addr: b, .. }) => a == b,
|
||||
(
|
||||
Self::Odoh {
|
||||
relay_url: ra,
|
||||
target_path: pa,
|
||||
target_config: ca,
|
||||
..
|
||||
},
|
||||
Self::Odoh {
|
||||
relay_url: rb,
|
||||
target_path: pb,
|
||||
target_config: cb,
|
||||
..
|
||||
},
|
||||
) => ra == rb && pa == pb && ca.target_host() == cb.target_host(),
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
@@ -63,6 +97,18 @@ impl fmt::Display for Upstream {
|
||||
Some(name) => write!(f, "tls://{}#{}", addr, name),
|
||||
None => write!(f, "tls://{}", addr),
|
||||
},
|
||||
Upstream::Odoh {
|
||||
relay_url,
|
||||
target_path,
|
||||
target_config,
|
||||
..
|
||||
} => write!(
|
||||
f,
|
||||
"odoh://{}{} via {}",
|
||||
target_config.target_host(),
|
||||
target_path,
|
||||
relay_url
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -82,22 +128,20 @@ pub(crate) fn parse_upstream_addr(
|
||||
Err(format!("invalid upstream address: {}", s))
|
||||
}
|
||||
|
||||
/// Parse a slice of upstream address strings into `Upstream` values, failing
|
||||
/// on the first invalid entry.
|
||||
pub fn parse_upstream_list(addrs: &[String], default_port: u16) -> Result<Vec<Upstream>> {
|
||||
addrs
|
||||
.iter()
|
||||
.map(|s| parse_upstream(s, default_port))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn parse_upstream(s: &str, default_port: u16) -> Result<Upstream> {
|
||||
if s.starts_with("https://") {
|
||||
let client = reqwest::Client::builder()
|
||||
.use_rustls_tls()
|
||||
.http2_initial_stream_window_size(65_535)
|
||||
.http2_initial_connection_window_size(65_535)
|
||||
.http2_keep_alive_interval(Duration::from_secs(15))
|
||||
.http2_keep_alive_while_idle(true)
|
||||
.http2_keep_alive_timeout(Duration::from_secs(10))
|
||||
.pool_idle_timeout(Duration::from_secs(300))
|
||||
.pool_max_idle_per_host(1)
|
||||
.build()
|
||||
.unwrap_or_default();
|
||||
return Ok(Upstream::Doh {
|
||||
url: s.to_string(),
|
||||
client,
|
||||
client: build_https_client(),
|
||||
});
|
||||
}
|
||||
// tls://IP:PORT#hostname or tls://IP#hostname (default port 853)
|
||||
@@ -118,6 +162,33 @@ pub fn parse_upstream(s: &str, default_port: u16) -> Result<Upstream> {
|
||||
Ok(Upstream::Udp(addr))
|
||||
}
|
||||
|
||||
/// HTTP/2 client tuned for DoH/ODoH: small windows for low latency, long-lived
|
||||
/// keep-alive. Shared by the DoH upstream and the ODoH config-fetcher +
|
||||
/// seal/open path. Pool defaults to one idle conn per host — good for
|
||||
/// resolvers that talk to a single upstream; relays that fan out to many
|
||||
/// targets should use [`build_https_client_with_pool`].
|
||||
pub fn build_https_client() -> reqwest::Client {
|
||||
build_https_client_with_pool(1)
|
||||
}
|
||||
|
||||
/// Same shape as [`build_https_client`], but caller picks
|
||||
/// `pool_max_idle_per_host`. Relay workloads hit many distinct target hosts
|
||||
/// and benefit from a larger pool so warm connections survive concurrent
|
||||
/// fan-out.
|
||||
pub fn build_https_client_with_pool(pool_max_idle_per_host: usize) -> reqwest::Client {
|
||||
reqwest::Client::builder()
|
||||
.use_rustls_tls()
|
||||
.http2_initial_stream_window_size(65_535)
|
||||
.http2_initial_connection_window_size(65_535)
|
||||
.http2_keep_alive_interval(Duration::from_secs(15))
|
||||
.http2_keep_alive_while_idle(true)
|
||||
.http2_keep_alive_timeout(Duration::from_secs(10))
|
||||
.pool_idle_timeout(Duration::from_secs(300))
|
||||
.pool_max_idle_per_host(pool_max_idle_per_host)
|
||||
.build()
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
fn build_dot_connector() -> Result<tokio_rustls::TlsConnector> {
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
let mut root_store = rustls::RootCertStore::empty();
|
||||
@@ -282,6 +353,22 @@ pub async fn forward_query_raw(
|
||||
tls_name,
|
||||
connector,
|
||||
} => forward_dot_raw(wire, *addr, tls_name, connector, timeout_duration).await,
|
||||
Upstream::Odoh {
|
||||
relay_url,
|
||||
target_path,
|
||||
client,
|
||||
target_config,
|
||||
} => {
|
||||
query_through_relay(
|
||||
wire,
|
||||
relay_url,
|
||||
target_path,
|
||||
client,
|
||||
target_config,
|
||||
timeout_duration,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ pub mod health;
|
||||
pub mod lan;
|
||||
pub mod mobile_api;
|
||||
pub mod mobileconfig;
|
||||
pub mod odoh;
|
||||
pub mod override_store;
|
||||
pub mod packet;
|
||||
pub mod proxy;
|
||||
@@ -20,6 +21,7 @@ pub mod query_log;
|
||||
pub mod question;
|
||||
pub mod record;
|
||||
pub mod recursive;
|
||||
pub mod relay;
|
||||
pub mod serve;
|
||||
pub mod service_store;
|
||||
pub mod setup_phone;
|
||||
|
||||
17
src/main.rs
17
src/main.rs
@@ -60,6 +60,22 @@ fn main() -> numa::Result<()> {
|
||||
.block_on(numa::setup_phone::run())
|
||||
.map_err(|e| e.into());
|
||||
}
|
||||
"relay" => {
|
||||
let port: u16 = std::env::args()
|
||||
.nth(2)
|
||||
.as_deref()
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(8443);
|
||||
let addr: std::net::SocketAddr = ([127, 0, 0, 1], port).into();
|
||||
eprintln!(
|
||||
"\x1b[1;38;2;192;98;58mNuma\x1b[0m — ODoH relay on {}\n",
|
||||
addr
|
||||
);
|
||||
let runtime = tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.build()?;
|
||||
return runtime.block_on(numa::relay::run(addr));
|
||||
}
|
||||
"lan" => {
|
||||
let sub = std::env::args().nth(2).unwrap_or_default();
|
||||
let config_path = std::env::args()
|
||||
@@ -91,6 +107,7 @@ fn main() -> numa::Result<()> {
|
||||
eprintln!(" service status Check if the service is running");
|
||||
eprintln!(" lan on Enable LAN service discovery (mDNS)");
|
||||
eprintln!(" lan off Disable LAN service discovery");
|
||||
eprintln!(" relay [PORT] Run as an ODoH relay (RFC 9230, default port 8443)");
|
||||
eprintln!(" setup-phone Generate a QR code to install Numa DoT on a phone");
|
||||
eprintln!(" help Show this help");
|
||||
eprintln!();
|
||||
|
||||
489
src/odoh.rs
Normal file
489
src/odoh.rs
Normal file
@@ -0,0 +1,489 @@
|
||||
//! ODoH target-config fetcher and TTL cache (RFC 9230 §6).
|
||||
//!
|
||||
//! ## Ciphersuite policy
|
||||
//! `odoh-rs` deserialization rejects any config whose KEM/KDF/AEAD triple is
|
||||
//! not the mandatory `(X25519, HKDF-SHA256, AES-128-GCM)` (see
|
||||
//! `ObliviousDoHConfigContents::deserialize`). This is stricter than the
|
||||
//! plan's "pick the mandatory suite if mixed": a response containing *any*
|
||||
//! non-mandatory config fails parse entirely. Real-world targets publish a
|
||||
//! single mandatory config, so this is fine in practice; revisit if a target
|
||||
//! that matters starts mixing suites.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use arc_swap::ArcSwapOption;
|
||||
use odoh_rs::{
|
||||
ObliviousDoHConfigContents, ObliviousDoHConfigs, ObliviousDoHMessage,
|
||||
ObliviousDoHMessagePlaintext,
|
||||
};
|
||||
use rand_core::{OsRng, TryRngCore};
|
||||
use reqwest::header::HeaderMap;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::time::timeout;
|
||||
|
||||
use crate::Result;
|
||||
|
||||
/// MIME type used for both directions of the ODoH exchange (RFC 9230 §4).
|
||||
const ODOH_CONTENT_TYPE: &str = "application/oblivious-dns-message";
|
||||
|
||||
/// Cap on the response body we read into memory when the relay returns
|
||||
/// non-success. Protects against a hostile relay streaming a huge body on
|
||||
/// the error path; keeps enough room to carry a human-readable reason.
|
||||
const ERROR_BODY_PREVIEW_BYTES: usize = 1024;
|
||||
|
||||
/// Fallback TTL when the target's response lacks a usable `Cache-Control`
|
||||
/// directive. RFC 9230 §6.2 places no hard floor; 24 h matches what Cloudflare
|
||||
/// publishes in practice.
|
||||
const DEFAULT_CONFIG_TTL: Duration = Duration::from_secs(24 * 60 * 60);
|
||||
|
||||
/// Cap on any TTL we'll honour, regardless of what the target advertises.
|
||||
/// Keeps a misconfigured server from pinning an old key indefinitely.
|
||||
const MAX_CONFIG_TTL: Duration = Duration::from_secs(7 * 24 * 60 * 60);
|
||||
|
||||
/// After a failed `/.well-known/odohconfigs` fetch, refuse to refetch again
|
||||
/// within this window — a target that is genuinely broken would otherwise
|
||||
/// receive one request per query. Queries that arrive during the backoff
|
||||
/// return the cached error immediately.
|
||||
const REFRESH_BACKOFF: Duration = Duration::from_secs(60);
|
||||
|
||||
/// Parsed ODoH target config plus the freshness metadata needed to age it out.
|
||||
#[derive(Debug)]
|
||||
pub struct OdohTargetConfig {
|
||||
pub contents: ObliviousDoHConfigContents,
|
||||
pub key_id: Vec<u8>,
|
||||
expires_at: Instant,
|
||||
}
|
||||
|
||||
impl OdohTargetConfig {
|
||||
pub fn is_expired(&self) -> bool {
|
||||
Instant::now() >= self.expires_at
|
||||
}
|
||||
}
|
||||
|
||||
struct FailedRefresh {
|
||||
at: Instant,
|
||||
err: String,
|
||||
}
|
||||
|
||||
/// TTL-gated cache of a single target's HPKE config.
|
||||
///
|
||||
/// Reads go through `ArcSwapOption` (lock-free hot path). Refreshes serialize
|
||||
/// on an async mutex so a burst of simultaneous misses produces a single
|
||||
/// outbound fetch, and a failed refresh blocks subsequent refetches for
|
||||
/// [`REFRESH_BACKOFF`] to prevent hot-looping against a broken target.
|
||||
pub struct OdohConfigCache {
|
||||
target_host: String,
|
||||
configs_url: String,
|
||||
client: reqwest::Client,
|
||||
current: ArcSwapOption<OdohTargetConfig>,
|
||||
last_failure: ArcSwapOption<FailedRefresh>,
|
||||
refresh_lock: Mutex<()>,
|
||||
}
|
||||
|
||||
impl OdohConfigCache {
|
||||
pub fn new(target_host: String, client: reqwest::Client) -> Self {
|
||||
let configs_url = format!("https://{}/.well-known/odohconfigs", target_host);
|
||||
Self {
|
||||
target_host,
|
||||
configs_url,
|
||||
client,
|
||||
current: ArcSwapOption::from(None),
|
||||
last_failure: ArcSwapOption::from(None),
|
||||
refresh_lock: Mutex::new(()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn target_host(&self) -> &str {
|
||||
&self.target_host
|
||||
}
|
||||
|
||||
/// Return a valid config, refetching when the cache is cold or expired.
|
||||
/// Within [`REFRESH_BACKOFF`] of a failed refresh, returns the cached
|
||||
/// error without issuing another fetch.
|
||||
pub async fn get(&self) -> Result<Arc<OdohTargetConfig>> {
|
||||
if let Some(cfg) = self.current.load_full() {
|
||||
if !cfg.is_expired() {
|
||||
return Ok(cfg);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(err) = self.backoff_error() {
|
||||
return Err(err);
|
||||
}
|
||||
|
||||
let _guard = self.refresh_lock.lock().await;
|
||||
|
||||
// Another task may have refreshed or failed while we waited.
|
||||
if let Some(cfg) = self.current.load_full() {
|
||||
if !cfg.is_expired() {
|
||||
return Ok(cfg);
|
||||
}
|
||||
}
|
||||
if let Some(err) = self.backoff_error() {
|
||||
return Err(err);
|
||||
}
|
||||
|
||||
match fetch_odoh_config(&self.client, &self.configs_url).await {
|
||||
Ok(fresh) => {
|
||||
let fresh = Arc::new(fresh);
|
||||
self.current.store(Some(fresh.clone()));
|
||||
self.last_failure.store(None);
|
||||
Ok(fresh)
|
||||
}
|
||||
Err(e) => {
|
||||
let msg = format!("ODoH config fetch failed: {e}");
|
||||
self.last_failure.store(Some(Arc::new(FailedRefresh {
|
||||
at: Instant::now(),
|
||||
err: msg.clone(),
|
||||
})));
|
||||
Err(msg.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Drop the cached config. Called after the target rejects ciphertext
|
||||
/// (key rotation race) so the next `get()` refetches.
|
||||
pub fn invalidate(&self) {
|
||||
self.current.store(None);
|
||||
}
|
||||
|
||||
fn backoff_error(&self) -> Option<crate::Error> {
|
||||
let fail = self.last_failure.load_full()?;
|
||||
if fail.at.elapsed() < REFRESH_BACKOFF {
|
||||
Some(format!("{} (backoff active)", fail.err).into())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Fetch `/.well-known/odohconfigs` from `configs_url` and parse it into an
|
||||
/// [`OdohTargetConfig`]. The TTL is taken from the response's
|
||||
/// `Cache-Control: max-age=`, clamped to [`DEFAULT_CONFIG_TTL`,
|
||||
/// [`MAX_CONFIG_TTL`]] when absent or obviously wrong.
|
||||
pub async fn fetch_odoh_config(
|
||||
client: &reqwest::Client,
|
||||
configs_url: &str,
|
||||
) -> Result<OdohTargetConfig> {
|
||||
let resp = client.get(configs_url).send().await?.error_for_status()?;
|
||||
let ttl = cache_control_ttl(resp.headers()).unwrap_or(DEFAULT_CONFIG_TTL);
|
||||
let body = resp.bytes().await?;
|
||||
parse_odoh_config(&body, ttl)
|
||||
}
|
||||
|
||||
fn parse_odoh_config(body: &[u8], ttl: Duration) -> Result<OdohTargetConfig> {
|
||||
let mut buf = body;
|
||||
let configs: ObliviousDoHConfigs = odoh_rs::parse(&mut buf)
|
||||
.map_err(|e| format!("failed to parse ObliviousDoHConfigs: {e}"))?;
|
||||
let first = configs
|
||||
.into_iter()
|
||||
.next()
|
||||
.ok_or("target published no ODoH configs with a supported version + ciphersuite")?;
|
||||
let contents: ObliviousDoHConfigContents = first.into();
|
||||
let key_id = contents
|
||||
.identifier()
|
||||
.map_err(|e| format!("failed to derive key_id from ODoH config: {e}"))?;
|
||||
Ok(OdohTargetConfig {
|
||||
contents,
|
||||
key_id,
|
||||
expires_at: Instant::now() + ttl.min(MAX_CONFIG_TTL),
|
||||
})
|
||||
}
|
||||
|
||||
/// Send a DNS wire query through an ODoH relay to a target and return the
|
||||
/// plaintext DNS wire response.
|
||||
///
|
||||
/// Flow: fetch the target's HPKE config (cached), seal the query, POST to the
|
||||
/// relay with `Targethost`/`Targetpath` headers, then unseal the response.
|
||||
/// On seal/unseal failure we invalidate the cache and retry once — this
|
||||
/// handles the benign race where the target rotated its key between our
|
||||
/// cached config and the POST.
|
||||
pub async fn query_through_relay(
|
||||
wire: &[u8],
|
||||
relay_url: &str,
|
||||
target_path: &str,
|
||||
client: &reqwest::Client,
|
||||
cache: &OdohConfigCache,
|
||||
timeout_duration: Duration,
|
||||
) -> Result<Vec<u8>> {
|
||||
let req = OdohRequest {
|
||||
wire,
|
||||
relay_url,
|
||||
target_path,
|
||||
client,
|
||||
cache,
|
||||
timeout: timeout_duration,
|
||||
};
|
||||
match attempt_query(&req).await {
|
||||
Ok(v) => Ok(v),
|
||||
Err(AttemptError::KeyRotation(_)) => {
|
||||
cache.invalidate();
|
||||
attempt_query(&req).await.map_err(AttemptError::into_error)
|
||||
}
|
||||
Err(e) => Err(e.into_error()),
|
||||
}
|
||||
}
|
||||
|
||||
struct OdohRequest<'a> {
|
||||
wire: &'a [u8],
|
||||
relay_url: &'a str,
|
||||
target_path: &'a str,
|
||||
client: &'a reqwest::Client,
|
||||
cache: &'a OdohConfigCache,
|
||||
timeout: Duration,
|
||||
}
|
||||
|
||||
/// Classification used only by the retry path in [`query_through_relay`].
|
||||
enum AttemptError {
|
||||
/// Target signalled the config we used is stale (key rotation race).
|
||||
/// Callers should invalidate the cache and retry exactly once.
|
||||
KeyRotation(String),
|
||||
/// Any other failure — transport, timeout, malformed response.
|
||||
Other(crate::Error),
|
||||
}
|
||||
|
||||
impl AttemptError {
|
||||
fn into_error(self) -> crate::Error {
|
||||
match self {
|
||||
AttemptError::KeyRotation(m) => format!("ODoH key rotation race: {m}").into(),
|
||||
AttemptError::Other(e) => e,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn attempt_query(req: &OdohRequest<'_>) -> std::result::Result<Vec<u8>, AttemptError> {
|
||||
let cfg = req.cache.get().await.map_err(AttemptError::Other)?;
|
||||
|
||||
let plaintext = ObliviousDoHMessagePlaintext::new(req.wire, 0);
|
||||
// rand_core 0.9's OsRng is fallible-only; wrap for the infallible bound.
|
||||
let mut os = OsRng;
|
||||
let mut rng = os.unwrap_mut();
|
||||
let (encrypted_query, client_secret) =
|
||||
odoh_rs::encrypt_query(&plaintext, &cfg.contents, &mut rng)
|
||||
.map_err(|e| AttemptError::Other(format!("ODoH encrypt failed: {e}").into()))?;
|
||||
let body = odoh_rs::compose(&encrypted_query)
|
||||
.map_err(|e| AttemptError::Other(format!("ODoH compose failed: {e}").into()))?
|
||||
.freeze();
|
||||
|
||||
// RFC 9230 §5 and the reference client use URL query parameters, not
|
||||
// HTTP headers, to carry the target routing. `Targethost`/`Targetpath`
|
||||
// headers cause relays to treat the request as an unspecified-target and
|
||||
// reject it.
|
||||
let (status, resp_body) = timeout(req.timeout, async {
|
||||
let resp = req
|
||||
.client
|
||||
.post(req.relay_url)
|
||||
.header(reqwest::header::CONTENT_TYPE, ODOH_CONTENT_TYPE)
|
||||
.header(reqwest::header::ACCEPT, ODOH_CONTENT_TYPE)
|
||||
.header(reqwest::header::CACHE_CONTROL, "no-cache, no-store")
|
||||
.query(&[
|
||||
("targethost", req.cache.target_host()),
|
||||
("targetpath", req.target_path),
|
||||
])
|
||||
.body(body)
|
||||
.send()
|
||||
.await?;
|
||||
let status = resp.status();
|
||||
let body = resp.bytes().await?;
|
||||
Ok::<_, reqwest::Error>((status, body))
|
||||
})
|
||||
.await
|
||||
.map_err(|_| AttemptError::Other("ODoH relay request timed out".into()))?
|
||||
.map_err(|e| AttemptError::Other(format!("ODoH relay request failed: {e}").into()))?;
|
||||
|
||||
// RFC 9230 §4.3 expects a target that can't decrypt to reply with a DNS
|
||||
// error in a sealed 200 response; a 401 from the relay/target is the
|
||||
// practical signal that our cached HPKE key is stale. Treat 400 as a
|
||||
// client-side bug (malformed ODoH envelope) — retrying would loop-fail.
|
||||
if !status.is_success() {
|
||||
let preview_len = resp_body.len().min(ERROR_BODY_PREVIEW_BYTES);
|
||||
let body_preview = String::from_utf8_lossy(&resp_body[..preview_len]);
|
||||
let msg = format!("ODoH relay returned {status}: {}", body_preview.trim());
|
||||
return Err(if status.as_u16() == 401 {
|
||||
AttemptError::KeyRotation(msg)
|
||||
} else {
|
||||
AttemptError::Other(msg.into())
|
||||
});
|
||||
}
|
||||
|
||||
let mut buf = resp_body;
|
||||
let encrypted_response: ObliviousDoHMessage = odoh_rs::parse(&mut buf)
|
||||
.map_err(|e| AttemptError::Other(format!("ODoH response parse failed: {e}").into()))?;
|
||||
let plaintext_response =
|
||||
odoh_rs::decrypt_response(&plaintext, &encrypted_response, client_secret)
|
||||
.map_err(|e| AttemptError::KeyRotation(format!("ODoH decrypt failed: {e}")))?;
|
||||
|
||||
Ok(plaintext_response.into_msg().to_vec())
|
||||
}
|
||||
|
||||
fn cache_control_ttl(headers: &HeaderMap) -> Option<Duration> {
|
||||
let cc = headers.get(reqwest::header::CACHE_CONTROL)?.to_str().ok()?;
|
||||
for directive in cc.split(',') {
|
||||
let directive = directive.trim();
|
||||
if let Some(rest) = directive.strip_prefix("max-age=") {
|
||||
if let Ok(secs) = rest.trim().parse::<u64>() {
|
||||
if secs > 0 {
|
||||
return Some(Duration::from_secs(secs));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use odoh_rs::{ObliviousDoHConfig, ObliviousDoHKeyPair};
|
||||
|
||||
// RFC 9180 HPKE IDs for the sole ODoH mandatory suite:
|
||||
// KEM = X25519, KDF = HKDF-SHA256, AEAD = AES-128-GCM.
|
||||
const KEM_X25519: u16 = 0x0020;
|
||||
const KDF_SHA256: u16 = 0x0001;
|
||||
const AEAD_AES128GCM: u16 = 0x0001;
|
||||
|
||||
fn synth_configs_bytes() -> Vec<u8> {
|
||||
let kp = ObliviousDoHKeyPair::from_parameters(
|
||||
KEM_X25519,
|
||||
KDF_SHA256,
|
||||
AEAD_AES128GCM,
|
||||
&[0u8; 32],
|
||||
);
|
||||
let pk = kp.public().clone();
|
||||
let configs: ObliviousDoHConfigs = vec![ObliviousDoHConfig::from(pk)].into();
|
||||
odoh_rs::compose(&configs).unwrap().to_vec()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_accepts_well_formed_config() {
|
||||
let bytes = synth_configs_bytes();
|
||||
let cfg = parse_odoh_config(&bytes, Duration::from_secs(3600)).unwrap();
|
||||
assert!(!cfg.key_id.is_empty());
|
||||
assert!(!cfg.is_expired());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_rejects_garbage() {
|
||||
let bytes = [0xffu8; 16];
|
||||
assert!(parse_odoh_config(&bytes, Duration::from_secs(3600)).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_rejects_empty() {
|
||||
assert!(parse_odoh_config(&[], Duration::from_secs(3600)).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ttl_capped_at_max() {
|
||||
let bytes = synth_configs_bytes();
|
||||
let cfg = parse_odoh_config(&bytes, Duration::from_secs(100 * 24 * 60 * 60)).unwrap();
|
||||
let remaining = cfg.expires_at.saturating_duration_since(Instant::now());
|
||||
assert!(remaining <= MAX_CONFIG_TTL);
|
||||
assert!(remaining >= MAX_CONFIG_TTL - Duration::from_secs(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cache_control_parses_max_age() {
|
||||
let mut h = HeaderMap::new();
|
||||
h.insert("cache-control", "public, max-age=86400".parse().unwrap());
|
||||
assert_eq!(cache_control_ttl(&h), Some(Duration::from_secs(86400)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cache_control_ignores_max_age_zero() {
|
||||
let mut h = HeaderMap::new();
|
||||
h.insert("cache-control", "max-age=0, no-store".parse().unwrap());
|
||||
assert_eq!(cache_control_ttl(&h), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cache_control_missing_falls_back() {
|
||||
let h = HeaderMap::new();
|
||||
assert_eq!(cache_control_ttl(&h), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn is_expired_tracks_ttl() {
|
||||
let bytes = synth_configs_bytes();
|
||||
let mut cfg = parse_odoh_config(&bytes, Duration::from_secs(3600)).unwrap();
|
||||
assert!(!cfg.is_expired());
|
||||
cfg.expires_at = Instant::now() - Duration::from_secs(1);
|
||||
assert!(cfg.is_expired());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn cache_backoff_blocks_refetch_after_failure() {
|
||||
// Point the cache at a host that does not exist so the fetch fails
|
||||
// deterministically; this exercises the backoff wiring without a
|
||||
// network round-trip succeeding.
|
||||
let cache = OdohConfigCache::new(
|
||||
"odoh-target.invalid".to_string(),
|
||||
reqwest::Client::builder()
|
||||
.timeout(Duration::from_millis(200))
|
||||
.build()
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
let first = cache.get().await;
|
||||
assert!(first.is_err(), "first fetch must fail against invalid host");
|
||||
|
||||
// Within the backoff window, the cached error is returned immediately.
|
||||
let second = cache.get().await.unwrap_err().to_string();
|
||||
assert!(
|
||||
second.contains("backoff active"),
|
||||
"expected backoff hint, got: {second}"
|
||||
);
|
||||
|
||||
// Reaching past the backoff window allows a fresh attempt — simulate
|
||||
// by rewinding the recorded failure timestamp.
|
||||
cache.last_failure.store(Some(Arc::new(FailedRefresh {
|
||||
at: Instant::now() - (REFRESH_BACKOFF + Duration::from_secs(1)),
|
||||
err: "prior".to_string(),
|
||||
})));
|
||||
let third = cache.get().await.unwrap_err().to_string();
|
||||
assert!(
|
||||
!third.contains("backoff active"),
|
||||
"expected fresh fetch attempt, got: {third}"
|
||||
);
|
||||
}
|
||||
|
||||
/// Round-trip the HPKE seal/unseal path in isolation from HTTP, using the
|
||||
/// odoh-rs primitives that `query_through_relay` wires together. Guards
|
||||
/// against silently breaking the crypto glue if we refactor that path.
|
||||
#[test]
|
||||
fn seal_unseal_round_trip() {
|
||||
use odoh_rs::{decrypt_query, encrypt_response, ResponseNonce};
|
||||
|
||||
let kp = ObliviousDoHKeyPair::from_parameters(
|
||||
KEM_X25519,
|
||||
KDF_SHA256,
|
||||
AEAD_AES128GCM,
|
||||
&[0u8; 32],
|
||||
);
|
||||
|
||||
let query_wire = b"\x12\x34\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\x01\x00\x01";
|
||||
let query_pt = ObliviousDoHMessagePlaintext::new(query_wire, 0);
|
||||
let mut os = OsRng;
|
||||
let mut rng = os.unwrap_mut();
|
||||
let (query_enc, client_secret) =
|
||||
odoh_rs::encrypt_query(&query_pt, kp.public(), &mut rng).unwrap();
|
||||
|
||||
let (query_back, server_secret) = decrypt_query(&query_enc, &kp).unwrap();
|
||||
assert_eq!(query_back.into_msg().as_ref(), query_wire);
|
||||
|
||||
let response_wire = b"\x12\x34\x81\x80\x00\x01\x00\x01\x00\x00\x00\x00";
|
||||
let response_pt = ObliviousDoHMessagePlaintext::new(response_wire, 0);
|
||||
let response_enc = encrypt_response(
|
||||
&query_pt,
|
||||
&response_pt,
|
||||
server_secret,
|
||||
ResponseNonce::default(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let response_back =
|
||||
odoh_rs::decrypt_response(&query_pt, &response_enc, client_secret).unwrap();
|
||||
assert_eq!(response_back.into_msg().as_ref(), response_wire);
|
||||
}
|
||||
}
|
||||
347
src/relay.rs
Normal file
347
src/relay.rs
Normal file
@@ -0,0 +1,347 @@
|
||||
//! ODoH relay (RFC 9230 §5) — the forward-without-reading half of the
|
||||
//! protocol. Runs `numa relay`; skips all resolver initialisation (no port
|
||||
//! 53, no cache, no recursion, no dashboard). The relay never reads the
|
||||
//! HPKE-sealed payload and keeps no per-request logs — only aggregate
|
||||
//! counters.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use axum::body::Bytes;
|
||||
use axum::extract::{DefaultBodyLimit, Query, State};
|
||||
use axum::http::{header, StatusCode};
|
||||
use axum::response::{IntoResponse, Response};
|
||||
use axum::routing::{get, post};
|
||||
use axum::Router;
|
||||
use log::{error, info};
|
||||
use serde::Deserialize;
|
||||
use tokio::net::TcpListener;
|
||||
|
||||
use crate::forward::build_https_client_with_pool;
|
||||
use crate::Result;
|
||||
|
||||
const ODOH_CONTENT_TYPE: &str = "application/oblivious-dns-message";
|
||||
|
||||
/// Cap on the opaque body we accept from a client. ODoH envelopes are
|
||||
/// ~100–300 bytes in practice; anything larger is malformed or hostile.
|
||||
const MAX_BODY_BYTES: usize = 4 * 1024;
|
||||
|
||||
/// Cap on the body we read back from the target before streaming to client.
|
||||
/// Slightly larger: target responses carry DNS answers plus HPKE overhead.
|
||||
const MAX_TARGET_RESPONSE_BYTES: usize = 8 * 1024;
|
||||
|
||||
/// Covers the whole client-to-target round trip — not just `.send()` — so a
|
||||
/// slow-drip target can't hang a worker indefinitely after headers arrive.
|
||||
const TARGET_REQUEST_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
|
||||
/// The relay hits many distinct target hosts on behalf of clients. A
|
||||
/// per-host idle pool of 4 keeps warm TLS connections available for concurrent
|
||||
/// fan-out without blowing up memory on a small VPS.
|
||||
const RELAY_POOL_PER_HOST: usize = 4;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct RelayParams {
|
||||
targethost: String,
|
||||
targetpath: String,
|
||||
}
|
||||
|
||||
struct RelayState {
|
||||
client: reqwest::Client,
|
||||
total_requests: AtomicU64,
|
||||
forwarded_ok: AtomicU64,
|
||||
forwarded_err: AtomicU64,
|
||||
rejected_bad_request: AtomicU64,
|
||||
}
|
||||
|
||||
pub async fn run(addr: SocketAddr) -> Result<()> {
|
||||
let state = Arc::new(RelayState {
|
||||
client: build_https_client_with_pool(RELAY_POOL_PER_HOST),
|
||||
total_requests: AtomicU64::new(0),
|
||||
forwarded_ok: AtomicU64::new(0),
|
||||
forwarded_err: AtomicU64::new(0),
|
||||
rejected_bad_request: AtomicU64::new(0),
|
||||
});
|
||||
|
||||
let app = Router::new()
|
||||
.route("/relay", post(handle_relay))
|
||||
// Overrides axum's default (2 MiB) so hostile clients can't force
|
||||
// the relay to buffer multi-MB bodies before our own cap check.
|
||||
.layer(DefaultBodyLimit::max(MAX_BODY_BYTES))
|
||||
.route("/health", get(handle_health))
|
||||
.with_state(state);
|
||||
|
||||
let listener = TcpListener::bind(addr).await?;
|
||||
info!("ODoH relay listening on {}", addr);
|
||||
axum::serve(listener, app).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_health(State(state): State<Arc<RelayState>>) -> impl IntoResponse {
|
||||
let body = format!(
|
||||
"ok\ntotal {}\nforwarded_ok {}\nforwarded_err {}\nrejected_bad_request {}\n",
|
||||
state.total_requests.load(Ordering::Relaxed),
|
||||
state.forwarded_ok.load(Ordering::Relaxed),
|
||||
state.forwarded_err.load(Ordering::Relaxed),
|
||||
state.rejected_bad_request.load(Ordering::Relaxed),
|
||||
);
|
||||
(
|
||||
StatusCode::OK,
|
||||
[(header::CONTENT_TYPE, "text/plain; charset=utf-8")],
|
||||
body,
|
||||
)
|
||||
}
|
||||
|
||||
async fn handle_relay(
|
||||
State(state): State<Arc<RelayState>>,
|
||||
Query(params): Query<RelayParams>,
|
||||
headers: axum::http::HeaderMap,
|
||||
body: Bytes,
|
||||
) -> Response {
|
||||
state.total_requests.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
if !content_type_matches(&headers, ODOH_CONTENT_TYPE) {
|
||||
state.rejected_bad_request.fetch_add(1, Ordering::Relaxed);
|
||||
return (
|
||||
StatusCode::UNSUPPORTED_MEDIA_TYPE,
|
||||
"expected application/oblivious-dns-message",
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
|
||||
if body.len() > MAX_BODY_BYTES {
|
||||
state.rejected_bad_request.fetch_add(1, Ordering::Relaxed);
|
||||
return (StatusCode::PAYLOAD_TOO_LARGE, "body exceeds 4 KiB cap").into_response();
|
||||
}
|
||||
|
||||
if !is_valid_hostname(¶ms.targethost) || !params.targetpath.starts_with('/') {
|
||||
state.rejected_bad_request.fetch_add(1, Ordering::Relaxed);
|
||||
return (StatusCode::BAD_REQUEST, "invalid targethost or targetpath").into_response();
|
||||
}
|
||||
|
||||
let target_url = format!("https://{}{}", params.targethost, params.targetpath);
|
||||
match forward_to_target(&state.client, &target_url, body).await {
|
||||
Ok((status, resp_body)) => {
|
||||
state.forwarded_ok.fetch_add(1, Ordering::Relaxed);
|
||||
(
|
||||
status,
|
||||
[(header::CONTENT_TYPE, ODOH_CONTENT_TYPE)],
|
||||
resp_body,
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
Err(e) => {
|
||||
// Log the underlying reason for operators; don't leak reqwest
|
||||
// internals (which can reveal the target's TLS config, IP, etc.)
|
||||
// back to arbitrary clients.
|
||||
error!("relay forward to {} failed: {}", target_url, e);
|
||||
state.forwarded_err.fetch_add(1, Ordering::Relaxed);
|
||||
(StatusCode::BAD_GATEWAY, "target unreachable").into_response()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn forward_to_target(
|
||||
client: &reqwest::Client,
|
||||
url: &str,
|
||||
body: Bytes,
|
||||
) -> Result<(StatusCode, Bytes)> {
|
||||
let response = tokio::time::timeout(TARGET_REQUEST_TIMEOUT, async {
|
||||
let resp = client
|
||||
.post(url)
|
||||
.header(header::CONTENT_TYPE, ODOH_CONTENT_TYPE)
|
||||
.header(header::ACCEPT, ODOH_CONTENT_TYPE)
|
||||
.body(body)
|
||||
.send()
|
||||
.await?;
|
||||
let status = StatusCode::from_u16(resp.status().as_u16())?;
|
||||
let resp_body = resp.bytes().await?;
|
||||
Ok::<_, crate::Error>((status, resp_body))
|
||||
})
|
||||
.await
|
||||
.map_err(|_| "timed out talking to target")??;
|
||||
|
||||
if response.1.len() > MAX_TARGET_RESPONSE_BYTES {
|
||||
return Err("target response exceeds cap".into());
|
||||
}
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
fn content_type_matches(headers: &axum::http::HeaderMap, expected: &str) -> bool {
|
||||
headers
|
||||
.get(header::CONTENT_TYPE)
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(|ct| ct.split(';').next().unwrap_or("").trim() == expected)
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Strict DNS-hostname validator, aimed at closing the SSRF surface a naive
|
||||
/// `contains('.')` check leaves open (e.g. `example.com@internal.host`,
|
||||
/// `evil.com/../admin`). Requires ASCII letters/digits/dot/dash, at least
|
||||
/// one dot, no leading dot or dash, length ≤ 253 per RFC 1035.
|
||||
fn is_valid_hostname(h: &str) -> bool {
|
||||
if h.is_empty() || h.len() > 253 || !h.contains('.') {
|
||||
return false;
|
||||
}
|
||||
if h.starts_with('.') || h.starts_with('-') || h.ends_with('.') || h.ends_with('-') {
|
||||
return false;
|
||||
}
|
||||
h.chars()
|
||||
.all(|c| c.is_ascii_alphanumeric() || c == '.' || c == '-')
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
async fn spawn_relay() -> (SocketAddr, Arc<RelayState>) {
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let addr = listener.local_addr().unwrap();
|
||||
|
||||
let state = Arc::new(RelayState {
|
||||
client: build_https_client_with_pool(RELAY_POOL_PER_HOST),
|
||||
total_requests: AtomicU64::new(0),
|
||||
forwarded_ok: AtomicU64::new(0),
|
||||
forwarded_err: AtomicU64::new(0),
|
||||
rejected_bad_request: AtomicU64::new(0),
|
||||
});
|
||||
|
||||
let app = Router::new()
|
||||
.route("/relay", post(handle_relay))
|
||||
.layer(DefaultBodyLimit::max(MAX_BODY_BYTES))
|
||||
.route("/health", get(handle_health))
|
||||
.with_state(state.clone());
|
||||
|
||||
tokio::spawn(async move {
|
||||
let _ = axum::serve(listener, app).await;
|
||||
});
|
||||
(addr, state)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn rejects_missing_content_type() {
|
||||
let (addr, state) = spawn_relay().await;
|
||||
let client = reqwest::Client::new();
|
||||
let resp = client
|
||||
.post(format!(
|
||||
"http://{}/relay?targethost=odoh.example.com&targetpath=/dns-query",
|
||||
addr
|
||||
))
|
||||
.body("body")
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(resp.status(), reqwest::StatusCode::UNSUPPORTED_MEDIA_TYPE);
|
||||
assert_eq!(state.rejected_bad_request.load(Ordering::Relaxed), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn rejects_oversized_body() {
|
||||
let (addr, _state) = spawn_relay().await;
|
||||
let big = vec![0u8; MAX_BODY_BYTES + 1];
|
||||
let client = reqwest::Client::new();
|
||||
let resp = client
|
||||
.post(format!(
|
||||
"http://{}/relay?targethost=odoh.example.com&targetpath=/dns-query",
|
||||
addr
|
||||
))
|
||||
.header(header::CONTENT_TYPE, ODOH_CONTENT_TYPE)
|
||||
.body(big)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
// axum's DefaultBodyLimit rejects before our handler runs, so the
|
||||
// counter doesn't increment — but the status code proves the layer
|
||||
// enforced the cap. Either status is acceptable evidence.
|
||||
assert!(matches!(
|
||||
resp.status(),
|
||||
reqwest::StatusCode::PAYLOAD_TOO_LARGE | reqwest::StatusCode::BAD_REQUEST
|
||||
));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn rejects_targethost_without_dot() {
|
||||
let (addr, state) = spawn_relay().await;
|
||||
let client = reqwest::Client::new();
|
||||
let resp = client
|
||||
.post(format!(
|
||||
"http://{}/relay?targethost=localhost&targetpath=/dns-query",
|
||||
addr
|
||||
))
|
||||
.header(header::CONTENT_TYPE, ODOH_CONTENT_TYPE)
|
||||
.body("body")
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(resp.status(), reqwest::StatusCode::BAD_REQUEST);
|
||||
assert_eq!(state.rejected_bad_request.load(Ordering::Relaxed), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn rejects_userinfo_ssrf_attempt() {
|
||||
let (addr, state) = spawn_relay().await;
|
||||
let client = reqwest::Client::new();
|
||||
// The naive contains('.') check would let this through and reqwest
|
||||
// would route to `internal.host` using `evil.com` as userinfo.
|
||||
let resp = client
|
||||
.post(format!(
|
||||
"http://{}/relay?targethost=evil.com@internal.host&targetpath=/dns-query",
|
||||
addr
|
||||
))
|
||||
.header(header::CONTENT_TYPE, ODOH_CONTENT_TYPE)
|
||||
.body("body")
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(resp.status(), reqwest::StatusCode::BAD_REQUEST);
|
||||
assert_eq!(state.rejected_bad_request.load(Ordering::Relaxed), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn rejects_targetpath_without_leading_slash() {
|
||||
let (addr, state) = spawn_relay().await;
|
||||
let client = reqwest::Client::new();
|
||||
let resp = client
|
||||
.post(format!(
|
||||
"http://{}/relay?targethost=odoh.example.com&targetpath=dns-query",
|
||||
addr
|
||||
))
|
||||
.header(header::CONTENT_TYPE, ODOH_CONTENT_TYPE)
|
||||
.body("body")
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(resp.status(), reqwest::StatusCode::BAD_REQUEST);
|
||||
assert_eq!(state.rejected_bad_request.load(Ordering::Relaxed), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn health_endpoint_reports_counters() {
|
||||
let (addr, _state) = spawn_relay().await;
|
||||
let client = reqwest::Client::new();
|
||||
let resp = client
|
||||
.get(format!("http://{}/health", addr))
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(resp.status(), reqwest::StatusCode::OK);
|
||||
let body = resp.text().await.unwrap();
|
||||
assert!(body.contains("ok\n"));
|
||||
assert!(body.contains("forwarded_ok 0"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hostname_validator_accepts_and_rejects() {
|
||||
assert!(is_valid_hostname("odoh.cloudflare-dns.com"));
|
||||
assert!(is_valid_hostname("a.b"));
|
||||
assert!(!is_valid_hostname(""));
|
||||
assert!(!is_valid_hostname("localhost"));
|
||||
assert!(!is_valid_hostname(".leading.dot"));
|
||||
assert!(!is_valid_hostname("trailing.dot."));
|
||||
assert!(!is_valid_hostname("-leading.dash"));
|
||||
assert!(!is_valid_hostname("evil.com@internal.host"));
|
||||
assert!(!is_valid_hostname("evil.com/../admin"));
|
||||
assert!(!is_valid_hostname(&"a".repeat(254)));
|
||||
}
|
||||
}
|
||||
39
src/serve.rs
39
src/serve.rs
@@ -17,7 +17,8 @@ use crate::buffer::BytePacketBuffer;
|
||||
use crate::cache::DnsCache;
|
||||
use crate::config::{build_zone_map, load_config, ConfigLoad};
|
||||
use crate::ctx::{handle_query, ServerCtx};
|
||||
use crate::forward::{parse_upstream, Upstream, UpstreamPool};
|
||||
use crate::forward::{build_https_client, parse_upstream_list, Upstream, UpstreamPool};
|
||||
use crate::odoh::OdohConfigCache;
|
||||
use crate::override_store::OverrideStore;
|
||||
use crate::query_log::QueryLog;
|
||||
use crate::service_store::ServiceStore;
|
||||
@@ -54,10 +55,7 @@ pub async fn run(config_path: String) -> crate::Result<()> {
|
||||
(crate::config::UpstreamMode::Recursive, false, pool, label)
|
||||
} else {
|
||||
log::warn!("recursive probe failed — falling back to Quad9 DoH");
|
||||
let client = reqwest::Client::builder()
|
||||
.use_rustls_tls()
|
||||
.build()
|
||||
.unwrap_or_default();
|
||||
let client = build_https_client();
|
||||
let url = DOH_FALLBACK.to_string();
|
||||
let label = url.clone();
|
||||
let pool = UpstreamPool::new(vec![Upstream::Doh { url, client }], vec![]);
|
||||
@@ -82,16 +80,8 @@ pub async fn run(config_path: String) -> crate::Result<()> {
|
||||
config.upstream.address.clone()
|
||||
};
|
||||
|
||||
let primary: Vec<Upstream> = addrs
|
||||
.iter()
|
||||
.map(|s| parse_upstream(s, config.upstream.port))
|
||||
.collect::<crate::Result<Vec<_>>>()?;
|
||||
let fallback: Vec<Upstream> = config
|
||||
.upstream
|
||||
.fallback
|
||||
.iter()
|
||||
.map(|s| parse_upstream(s, config.upstream.port))
|
||||
.collect::<crate::Result<Vec<_>>>()?;
|
||||
let primary = parse_upstream_list(&addrs, config.upstream.port)?;
|
||||
let fallback = parse_upstream_list(&config.upstream.fallback, config.upstream.port)?;
|
||||
|
||||
let pool = UpstreamPool::new(primary, fallback);
|
||||
let label = pool.label();
|
||||
@@ -102,6 +92,25 @@ pub async fn run(config_path: String) -> crate::Result<()> {
|
||||
label,
|
||||
)
|
||||
}
|
||||
crate::config::UpstreamMode::Odoh => {
|
||||
let odoh = config.upstream.odoh_upstream()?;
|
||||
let client = build_https_client();
|
||||
let target_config = Arc::new(OdohConfigCache::new(odoh.target_host, client.clone()));
|
||||
let primary = vec![Upstream::Odoh {
|
||||
relay_url: odoh.relay_url,
|
||||
target_path: odoh.target_path,
|
||||
client,
|
||||
target_config,
|
||||
}];
|
||||
let fallback = if odoh.strict {
|
||||
Vec::new()
|
||||
} else {
|
||||
parse_upstream_list(&config.upstream.fallback, config.upstream.port)?
|
||||
};
|
||||
let pool = UpstreamPool::new(primary, fallback);
|
||||
let label = pool.label();
|
||||
(crate::config::UpstreamMode::Odoh, false, pool, label)
|
||||
}
|
||||
};
|
||||
let api_port = config.server.api_port;
|
||||
|
||||
|
||||
62
src/stats.rs
62
src/stats.rs
@@ -102,6 +102,10 @@ pub struct ServerStats {
|
||||
transport_tcp: u64,
|
||||
transport_dot: u64,
|
||||
transport_doh: u64,
|
||||
upstream_transport_udp: u64,
|
||||
upstream_transport_doh: u64,
|
||||
upstream_transport_dot: u64,
|
||||
upstream_transport_odoh: u64,
|
||||
started_at: Instant,
|
||||
}
|
||||
|
||||
@@ -124,6 +128,31 @@ impl Transport {
|
||||
}
|
||||
}
|
||||
|
||||
/// Wire protocol used for a forwarded upstream call. Orthogonal to
|
||||
/// `QueryPath`: the path answers "where the answer came from"; this answers
|
||||
/// "over what wire we spoke to the forwarder." Callers pass
|
||||
/// `Option<UpstreamTransport>` — `None` for resolutions that never touched
|
||||
/// a forwarder (cache/local/blocked) or for recursive mode, which has its
|
||||
/// own counter via `QueryPath::Recursive`.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum UpstreamTransport {
|
||||
Udp,
|
||||
Doh,
|
||||
Dot,
|
||||
Odoh,
|
||||
}
|
||||
|
||||
impl UpstreamTransport {
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
UpstreamTransport::Udp => "UDP",
|
||||
UpstreamTransport::Doh => "DOH",
|
||||
UpstreamTransport::Dot => "DOT",
|
||||
UpstreamTransport::Odoh => "ODOH",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum QueryPath {
|
||||
Local,
|
||||
@@ -202,11 +231,20 @@ impl ServerStats {
|
||||
transport_tcp: 0,
|
||||
transport_dot: 0,
|
||||
transport_doh: 0,
|
||||
upstream_transport_udp: 0,
|
||||
upstream_transport_doh: 0,
|
||||
upstream_transport_dot: 0,
|
||||
upstream_transport_odoh: 0,
|
||||
started_at: Instant::now(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn record(&mut self, path: QueryPath, transport: Transport) -> u64 {
|
||||
pub fn record(
|
||||
&mut self,
|
||||
path: QueryPath,
|
||||
transport: Transport,
|
||||
upstream_transport: Option<UpstreamTransport>,
|
||||
) -> u64 {
|
||||
self.queries_total += 1;
|
||||
match path {
|
||||
QueryPath::Local => self.queries_local += 1,
|
||||
@@ -225,6 +263,14 @@ impl ServerStats {
|
||||
Transport::Dot => self.transport_dot += 1,
|
||||
Transport::Doh => self.transport_doh += 1,
|
||||
}
|
||||
if let Some(ut) = upstream_transport {
|
||||
match ut {
|
||||
UpstreamTransport::Udp => self.upstream_transport_udp += 1,
|
||||
UpstreamTransport::Doh => self.upstream_transport_doh += 1,
|
||||
UpstreamTransport::Dot => self.upstream_transport_dot += 1,
|
||||
UpstreamTransport::Odoh => self.upstream_transport_odoh += 1,
|
||||
}
|
||||
}
|
||||
self.queries_total
|
||||
}
|
||||
|
||||
@@ -253,6 +299,10 @@ impl ServerStats {
|
||||
transport_tcp: self.transport_tcp,
|
||||
transport_dot: self.transport_dot,
|
||||
transport_doh: self.transport_doh,
|
||||
upstream_transport_udp: self.upstream_transport_udp,
|
||||
upstream_transport_doh: self.upstream_transport_doh,
|
||||
upstream_transport_dot: self.upstream_transport_dot,
|
||||
upstream_transport_odoh: self.upstream_transport_odoh,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -263,7 +313,7 @@ impl ServerStats {
|
||||
let secs = uptime.as_secs() % 60;
|
||||
|
||||
log::info!(
|
||||
"STATS | uptime {}h{}m{}s | total {} | fwd {} | upstream {} | recursive {} | coalesced {} | cached {} | local {} | override {} | blocked {} | errors {}",
|
||||
"STATS | uptime {}h{}m{}s | total {} | fwd {} | upstream {} | recursive {} | coalesced {} | cached {} | local {} | override {} | blocked {} | errors {} | up-udp {} | up-doh {} | up-dot {} | up-odoh {}",
|
||||
hours, mins, secs,
|
||||
self.queries_total,
|
||||
self.queries_forwarded,
|
||||
@@ -275,6 +325,10 @@ impl ServerStats {
|
||||
self.queries_overridden,
|
||||
self.queries_blocked,
|
||||
self.upstream_errors,
|
||||
self.upstream_transport_udp,
|
||||
self.upstream_transport_doh,
|
||||
self.upstream_transport_dot,
|
||||
self.upstream_transport_odoh,
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -295,4 +349,8 @@ pub struct StatsSnapshot {
|
||||
pub transport_tcp: u64,
|
||||
pub transport_dot: u64,
|
||||
pub transport_doh: u64,
|
||||
pub upstream_transport_udp: u64,
|
||||
pub upstream_transport_doh: u64,
|
||||
pub upstream_transport_dot: u64,
|
||||
pub upstream_transport_odoh: u64,
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user