add local service proxy with .numa domains

HTTP reverse proxy on port 80 lets developers use clean domain names
(frontend.numa, api.numa) instead of localhost:PORT. Includes WebSocket
upgrade support for HMR, TCP health checks, dashboard UI panel, and
REST API for service management. numa.numa is preconfigured for the
dashboard itself.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Razvan Dimescu
2026-03-20 15:07:15 +02:00
parent 14a9e9e7e3
commit 8f959ce0a5
15 changed files with 762 additions and 53 deletions

View File

@@ -42,6 +42,9 @@ pub fn router(ctx: Arc<ServerCtx>) -> Router {
"/blocking/allowlist/{domain}",
delete(blocking_allowlist_remove),
)
.route("/services", get(list_services))
.route("/services", post(create_service))
.route("/services/{name}", delete(remove_service))
.with_state(ctx)
}
@@ -572,3 +575,107 @@ async fn blocking_allowlist_remove(
StatusCode::NOT_FOUND
}
}
// --- Service proxy handlers ---
#[derive(Serialize)]
struct ServiceResponse {
name: String,
target_port: u16,
url: String,
healthy: bool,
}
#[derive(Deserialize)]
struct CreateServiceRequest {
name: String,
target_port: u16,
}
async fn list_services(State(ctx): State<Arc<ServerCtx>>) -> Json<Vec<ServiceResponse>> {
let entries: Vec<_> = {
let store = ctx.services.lock().unwrap();
store
.list()
.into_iter()
.map(|e| (e.name.clone(), e.target_port))
.collect()
};
let tld = &ctx.proxy_tld;
// Run all health checks concurrently
let health_futures: Vec<_> = entries.iter().map(|(_, port)| check_health(*port)).collect();
let health_results = futures::future::join_all(health_futures).await;
let results: Vec<_> = entries
.into_iter()
.zip(health_results)
.map(|((name, port), healthy)| ServiceResponse {
url: format!("http://{}.{}", name, tld),
name,
target_port: port,
healthy,
})
.collect();
Json(results)
}
async fn create_service(
State(ctx): State<Arc<ServerCtx>>,
Json(req): Json<CreateServiceRequest>,
) -> Result<(StatusCode, Json<ServiceResponse>), (StatusCode, String)> {
let name = req.name.to_lowercase();
// Validate name: alphanumeric + hyphens only, 1-63 chars
if name.is_empty() || name.len() > 63 {
return Err((StatusCode::BAD_REQUEST, "name must be 1-63 characters".into()));
}
if !name.chars().all(|c| c.is_ascii_alphanumeric() || c == '-') {
return Err((
StatusCode::BAD_REQUEST,
"name must contain only alphanumeric characters and hyphens".into(),
));
}
if req.target_port == 0 {
return Err((StatusCode::BAD_REQUEST, "target_port must be > 0".into()));
}
let tld = &ctx.proxy_tld;
ctx.services.lock().unwrap().insert(&name, req.target_port);
let healthy = check_health(req.target_port).await;
Ok((
StatusCode::CREATED,
Json(ServiceResponse {
url: format!("http://{}.{}", name, tld),
name,
target_port: req.target_port,
healthy,
}),
))
}
async fn remove_service(
State(ctx): State<Arc<ServerCtx>>,
Path(name): Path<String>,
) -> StatusCode {
if name.eq_ignore_ascii_case("numa") {
return StatusCode::FORBIDDEN;
}
let mut store = ctx.services.lock().unwrap();
if store.remove(&name) {
StatusCode::NO_CONTENT
} else {
StatusCode::NOT_FOUND
}
}
async fn check_health(port: u16) -> bool {
tokio::time::timeout(
std::time::Duration::from_millis(100),
tokio::net::TcpStream::connect(format!("127.0.0.1:{}", port)),
)
.await
.map(|r| r.is_ok())
.unwrap_or(false)
}

View File

@@ -9,7 +9,7 @@ use crate::question::QueryType;
use crate::record::DnsRecord;
use crate::Result;
#[derive(Deserialize)]
#[derive(Deserialize, Default)]
pub struct Config {
#[serde(default)]
pub server: ServerConfig,
@@ -21,6 +21,10 @@ pub struct Config {
pub blocking: BlockingConfig,
#[serde(default)]
pub zones: Vec<ZoneRecord>,
#[serde(default)]
pub proxy: ProxyConfig,
#[serde(default)]
pub services: Vec<ServiceConfig>,
}
#[derive(Deserialize)]
@@ -156,15 +160,45 @@ fn default_zone_ttl() -> u32 {
300
}
#[derive(Deserialize, Clone)]
pub struct ProxyConfig {
#[serde(default = "default_proxy_enabled")]
pub enabled: bool,
#[serde(default = "default_proxy_port")]
pub port: u16,
#[serde(default = "default_proxy_tld")]
pub tld: String,
}
impl Default for ProxyConfig {
fn default() -> Self {
ProxyConfig {
enabled: default_proxy_enabled(),
port: default_proxy_port(),
tld: default_proxy_tld(),
}
}
}
fn default_proxy_enabled() -> bool {
true
}
fn default_proxy_port() -> u16 {
80
}
fn default_proxy_tld() -> String {
"numa".to_string()
}
#[derive(Deserialize, Clone)]
pub struct ServiceConfig {
pub name: String,
pub target_port: u16,
}
pub fn load_config(path: &str) -> Result<Config> {
if !Path::new(path).exists() {
return Ok(Config {
server: ServerConfig::default(),
upstream: UpstreamConfig::default(),
cache: CacheConfig::default(),
blocking: BlockingConfig::default(),
zones: Vec::new(),
});
return Ok(Config::default());
}
let contents = std::fs::read_to_string(path)?;
let config: Config = toml::from_str(&contents)?;

View File

@@ -14,7 +14,9 @@ use crate::header::ResultCode;
use crate::override_store::OverrideStore;
use crate::packet::DnsPacket;
use crate::query_log::{QueryLog, QueryLogEntry};
use crate::question::QueryType;
use crate::record::DnsRecord;
use crate::service_store::ServiceStore;
use crate::stats::{QueryPath, ServerStats};
use crate::system_dns::ForwardingRule;
@@ -26,9 +28,12 @@ pub struct ServerCtx {
pub overrides: Mutex<OverrideStore>,
pub blocklist: Mutex<BlocklistStore>,
pub query_log: Mutex<QueryLog>,
pub services: Mutex<ServiceStore>,
pub forwarding_rules: Vec<ForwardingRule>,
pub upstream: SocketAddr,
pub timeout: Duration,
pub proxy_tld: String,
pub proxy_tld_suffix: String, // pre-computed ".{tld}" to avoid per-query allocation
}
pub async fn handle_query(
@@ -51,7 +56,7 @@ pub async fn handle_query(
None => return Ok(()),
};
// Pipeline: overrides -> blocklist -> local zones -> cache -> upstream
// Pipeline: overrides -> .tld interception -> blocklist -> local zones -> cache -> upstream
// Each lock is scoped to avoid holding MutexGuard across await points.
let (response, path) = {
let override_record = ctx.overrides.lock().unwrap().lookup(&qname);
@@ -59,8 +64,24 @@ pub async fn handle_query(
let mut resp = DnsPacket::response_from(&query, ResultCode::NOERROR);
resp.answers.push(record);
(resp, QueryPath::Overridden)
} else if !ctx.proxy_tld_suffix.is_empty()
&& (qname.ends_with(&ctx.proxy_tld_suffix) || qname == ctx.proxy_tld)
{
let mut resp = DnsPacket::response_from(&query, ResultCode::NOERROR);
match qtype {
QueryType::AAAA => resp.answers.push(DnsRecord::AAAA {
domain: qname.clone(),
addr: std::net::Ipv6Addr::LOCALHOST,
ttl: 300,
}),
_ => resp.answers.push(DnsRecord::A {
domain: qname.clone(),
addr: std::net::Ipv4Addr::LOCALHOST,
ttl: 300,
}),
}
(resp, QueryPath::Local)
} else if ctx.blocklist.lock().unwrap().is_blocked(&qname) {
use crate::question::QueryType;
let mut resp = DnsPacket::response_from(&query, ResultCode::NOERROR);
match qtype {
QueryType::AAAA => resp.answers.push(DnsRecord::AAAA {

View File

@@ -8,9 +8,11 @@ pub mod forward;
pub mod header;
pub mod override_store;
pub mod packet;
pub mod proxy;
pub mod query_log;
pub mod question;
pub mod record;
pub mod service_store;
pub mod stats;
pub mod system_dns;

View File

@@ -12,6 +12,7 @@ use numa::config::{build_zone_map, load_config};
use numa::ctx::{handle_query, ServerCtx};
use numa::override_store::OverrideStore;
use numa::query_log::QueryLog;
use numa::service_store::ServiceStore;
use numa::stats::ServerStats;
use numa::system_dns::{
discover_system_dns, install_service, install_system_dns, restart_service, service_status,
@@ -49,6 +50,10 @@ async fn main() -> numa::Result<()> {
}
};
}
"version" | "--version" | "-V" => {
eprintln!("numa {}", env!("CARGO_PKG_VERSION"));
return Ok(());
}
"help" | "--help" | "-h" => {
eprintln!("Usage: numa [command] [config-path]");
eprintln!();
@@ -99,6 +104,13 @@ async fn main() -> numa::Result<()> {
blocklist.set_enabled(false);
}
// Build service store from config, always include numa dashboard
let mut service_store = ServiceStore::new();
service_store.insert("numa", config.server.api_port);
for svc in &config.services {
service_store.insert(&svc.name, svc.target_port);
}
let forwarding_rules = system_dns.forwarding_rules;
let ctx = Arc::new(ServerCtx {
@@ -113,14 +125,21 @@ async fn main() -> numa::Result<()> {
overrides: Mutex::new(OverrideStore::new()),
blocklist: Mutex::new(blocklist),
query_log: Mutex::new(QueryLog::new(1000)),
services: Mutex::new(service_store),
forwarding_rules,
upstream,
timeout: Duration::from_millis(config.upstream.timeout_ms),
proxy_tld_suffix: if config.proxy.tld.is_empty() {
String::new()
} else {
format!(".{}", config.proxy.tld)
},
proxy_tld: config.proxy.tld.clone(),
});
let zone_count: usize = ctx.zone_map.values().map(|m| m.len()).sum();
eprintln!("\n\x1b[38;2;192;98;58m ╔══════════════════════════════════════════╗\x1b[0m");
eprintln!("\x1b[38;2;192;98;58m ║\x1b[0m \x1b[1;38;2;192;98;58mNUMA\x1b[0m \x1b[3;38;2;163;152;136mDNS that governs itself\x1b[0m \x1b[38;2;192;98;58m║\x1b[0m");
eprintln!("\x1b[38;2;192;98;58m ║\x1b[0m \x1b[1;38;2;192;98;58mNUMA\x1b[0m \x1b[3;38;2;163;152;136mDNS that governs itself\x1b[0m \x1b[38;2;163;152;136mv{}\x1b[0m \x1b[38;2;192;98;58m║\x1b[0m", env!("CARGO_PKG_VERSION"));
eprintln!("\x1b[38;2;192;98;58m ╠══════════════════════════════════════════╣\x1b[0m");
eprintln!("\x1b[38;2;192;98;58m ║\x1b[0m \x1b[38;2;107;124;78mDNS\x1b[0m {:<30}\x1b[38;2;192;98;58m║\x1b[0m", config.server.bind_addr);
eprintln!("\x1b[38;2;192;98;58m ║\x1b[0m \x1b[38;2;107;124;78mAPI\x1b[0m http://localhost:{:<16}\x1b[38;2;192;98;58m║\x1b[0m", api_port);
@@ -130,6 +149,10 @@ async fn main() -> numa::Result<()> {
eprintln!("\x1b[38;2;192;98;58m ║\x1b[0m \x1b[38;2;107;124;78mCache\x1b[0m {:<30}\x1b[38;2;192;98;58m║\x1b[0m", format!("max {} entries", config.cache.max_entries));
eprintln!("\x1b[38;2;192;98;58m ║\x1b[0m \x1b[38;2;107;124;78mBlocking\x1b[0m {:<30}\x1b[38;2;192;98;58m║\x1b[0m",
if config.blocking.enabled { format!("{} lists", config.blocking.lists.len()) } else { "disabled".to_string() });
if config.proxy.enabled {
eprintln!("\x1b[38;2;192;98;58m ║\x1b[0m \x1b[38;2;107;124;78mProxy\x1b[0m {:<30}\x1b[38;2;192;98;58m║\x1b[0m",
format!("http://*.{} on :{}", config.proxy.tld, config.proxy.port));
}
if !ctx.forwarding_rules.is_empty() {
eprintln!("\x1b[38;2;192;98;58m ║\x1b[0m \x1b[38;2;107;124;78mRouting\x1b[0m {:<30}\x1b[38;2;192;98;58m║\x1b[0m",
format!("{} conditional rules", ctx.forwarding_rules.len()));
@@ -171,6 +194,16 @@ async fn main() -> numa::Result<()> {
axum::serve(listener, app).await.unwrap();
});
// Spawn HTTP reverse proxy for .numa domains
if config.proxy.enabled {
let proxy_ctx = Arc::clone(&ctx);
let proxy_port = config.proxy.port;
let proxy_tld = config.proxy.tld.clone();
tokio::spawn(async move {
numa::proxy::start_proxy(proxy_ctx, proxy_port, &proxy_tld).await;
});
}
// UDP DNS listener
#[allow(clippy::infinite_loop)]
loop {

188
src/proxy.rs Normal file
View File

@@ -0,0 +1,188 @@
use std::net::SocketAddr;
use std::sync::Arc;
use axum::body::Body;
use axum::extract::{Request, State};
use axum::response::IntoResponse;
use axum::routing::any;
use axum::Router;
use http_body_util::BodyExt;
use hyper::StatusCode;
use hyper_util::client::legacy::Client;
use hyper_util::rt::TokioExecutor;
use log::{debug, error, info, warn};
use tokio::io::copy_bidirectional;
use crate::ctx::ServerCtx;
type HttpClient = Client<hyper_util::client::legacy::connect::HttpConnector, Body>;
#[derive(Clone)]
struct ProxyState {
ctx: Arc<ServerCtx>,
client: HttpClient,
tld_suffix: String, // pre-computed ".{tld}"
}
pub async fn start_proxy(ctx: Arc<ServerCtx>, port: u16, tld: &str) {
let addr: SocketAddr = ([0, 0, 0, 0], port).into();
let listener = match tokio::net::TcpListener::bind(addr).await {
Ok(l) => l,
Err(e) => {
warn!(
"proxy: could not bind port {} ({}) — proxy disabled",
port, e
);
return;
}
};
info!("HTTP proxy listening on {}", addr);
let client: HttpClient = Client::builder(TokioExecutor::new())
.http1_preserve_header_case(true)
.build_http();
let state = ProxyState {
ctx,
client,
tld_suffix: format!(".{}", tld),
};
let app = Router::new()
.fallback(any(proxy_handler))
.with_state(state);
axum::serve(listener, app).await.unwrap();
}
fn extract_host(req: &Request) -> Option<String> {
req.headers()
.get(hyper::header::HOST)
.and_then(|v| v.to_str().ok())
.map(|h| h.split(':').next().unwrap_or(h).to_lowercase())
}
async fn proxy_handler(
State(state): State<ProxyState>,
req: Request,
) -> axum::response::Response {
let hostname = match extract_host(&req) {
Some(h) => h,
None => {
return (StatusCode::BAD_REQUEST, "missing Host header").into_response();
}
};
let service_name = match hostname.strip_suffix(state.tld_suffix.as_str()) {
Some(name) => name.to_string(),
None => {
return (
StatusCode::BAD_GATEWAY,
format!("not a {} domain: {}", state.tld_suffix, hostname),
)
.into_response()
}
};
let target_port = {
let store = state.ctx.services.lock().unwrap();
match store.lookup(&service_name) {
Some(entry) => entry.target_port,
None => {
return (
StatusCode::BAD_GATEWAY,
format!("unknown service: {}{}", service_name, state.tld_suffix),
)
.into_response()
}
}
};
let path_and_query = req
.uri()
.path_and_query()
.map(|pq| pq.as_str())
.unwrap_or("/");
let target_uri: hyper::Uri = format!("http://127.0.0.1:{}{}", target_port, path_and_query)
.parse()
.unwrap();
// Check for upgrade request (WebSocket, etc.)
let is_upgrade = req.headers().get(hyper::header::UPGRADE).is_some();
if is_upgrade {
return handle_upgrade(req, target_uri, state.client.clone()).await;
}
// Regular HTTP proxy
let (mut parts, body) = req.into_parts();
parts.uri = target_uri;
let proxied_req = Request::from_parts(parts, body);
match state.client.request(proxied_req).await {
Ok(resp) => {
let (parts, body) = resp.into_parts();
let body = Body::new(body.map_err(axum::Error::new));
axum::response::Response::from_parts(parts, body)
}
Err(e) => (StatusCode::BAD_GATEWAY, format!("proxy error: {}", e)).into_response(),
}
}
async fn handle_upgrade(
mut req: Request,
target_uri: hyper::Uri,
client: HttpClient,
) -> axum::response::Response {
// Save the client-side upgrade future before forwarding
let client_upgrade = hyper::upgrade::on(&mut req);
// Forward the request to backend
let (mut parts, body) = req.into_parts();
parts.uri = target_uri;
let backend_req = Request::from_parts(parts, body);
let mut backend_resp = match client.request(backend_req).await {
Ok(r) => r,
Err(e) => {
return (StatusCode::BAD_GATEWAY, format!("upgrade error: {}", e)).into_response()
}
};
if backend_resp.status() != StatusCode::SWITCHING_PROTOCOLS {
let (parts, body) = backend_resp.into_parts();
let body = Body::new(body.map_err(axum::Error::new));
return axum::response::Response::from_parts(parts, body);
}
// Save response headers before consuming for upgrade
let resp_headers = backend_resp.headers().clone();
let backend_upgrade = hyper::upgrade::on(&mut backend_resp);
// Spawn bidirectional pipe once both sides are upgraded
tokio::spawn(async move {
let (client_io, backend_io) = match tokio::try_join!(client_upgrade, backend_upgrade) {
Ok((c, b)) => (c, b),
Err(e) => {
error!("proxy upgrade failed: {}", e);
return;
}
};
let mut client_rw = hyper_util::rt::TokioIo::new(client_io);
let mut backend_rw = hyper_util::rt::TokioIo::new(backend_io);
match copy_bidirectional(&mut client_rw, &mut backend_rw).await {
Ok((up, down)) => debug!("ws proxy closed: {} up, {} down bytes", up, down),
Err(e) => debug!("ws proxy error: {}", e),
}
});
// Return 101 to client with the backend's upgrade headers
let mut resp = axum::response::Response::builder()
.status(StatusCode::SWITCHING_PROTOCOLS);
for (key, value) in &resp_headers {
resp = resp.header(key, value);
}
resp.body(Body::empty()).unwrap()
}

52
src/service_store.rs Normal file
View File

@@ -0,0 +1,52 @@
use std::collections::HashMap;
use serde::Serialize;
#[derive(Clone, Serialize)]
pub struct ServiceEntry {
pub name: String,
pub target_port: u16,
}
pub struct ServiceStore {
entries: HashMap<String, ServiceEntry>,
}
impl Default for ServiceStore {
fn default() -> Self {
Self::new()
}
}
impl ServiceStore {
pub fn new() -> Self {
ServiceStore {
entries: HashMap::new(),
}
}
pub fn insert(&mut self, name: &str, target_port: u16) {
let key = name.to_lowercase();
self.entries.insert(
key.clone(),
ServiceEntry {
name: key,
target_port,
},
);
}
pub fn lookup(&self, name: &str) -> Option<&ServiceEntry> {
self.entries.get(&name.to_lowercase())
}
pub fn remove(&mut self, name: &str) -> bool {
self.entries.remove(&name.to_lowercase()).is_some()
}
pub fn list(&self) -> Vec<&ServiceEntry> {
let mut entries: Vec<_> = self.entries.values().collect();
entries.sort_by(|a, b| a.name.cmp(&b.name));
entries
}
}

View File

@@ -418,6 +418,15 @@ pub fn uninstall_service() -> Result<(), String> {
/// Restart the service (kill process, launchd/systemd auto-restarts with new binary).
pub fn restart_service() -> Result<(), String> {
// Show version of the binary that will be running after restart
let version = match std::process::Command::new("/usr/local/bin/numa")
.arg("--version")
.output()
{
Ok(o) => String::from_utf8_lossy(&o.stderr).trim().to_string(),
Err(_) => "unknown".to_string(),
};
#[cfg(target_os = "macos")]
{
let output = std::process::Command::new("launchctl")
@@ -425,11 +434,10 @@ pub fn restart_service() -> Result<(), String> {
.output();
match output {
Ok(o) if o.status.success() => {
// Service is loaded — kill the process, launchd restarts it
let _ = std::process::Command::new("pkill")
.args(["-f", "/usr/local/bin/numa"])
.status();
eprintln!(" Service restarting (launchd will respawn).\n");
eprintln!(" Service restarting {}\n", version);
Ok(())
}
_ => Err("Service is not installed. Run 'sudo numa service start' first.".to_string()),
@@ -438,7 +446,7 @@ pub fn restart_service() -> Result<(), String> {
#[cfg(target_os = "linux")]
{
run_systemctl(&["restart", "numa"])?;
eprintln!(" Service restarted.\n");
eprintln!(" Service restarted{}\n", version);
Ok(())
}
#[cfg(not(any(target_os = "macos", target_os = "linux")))]