Merge pull request #7 from razvandimescu/feat/lan-discovery
Add LAN service discovery via UDP multicast
This commit is contained in:
21
Cargo.lock
generated
21
Cargo.lock
generated
@@ -621,7 +621,7 @@ dependencies = [
|
|||||||
"libc",
|
"libc",
|
||||||
"percent-encoding",
|
"percent-encoding",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
"socket2",
|
"socket2 0.6.3",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tower-service",
|
"tower-service",
|
||||||
"tracing",
|
"tracing",
|
||||||
@@ -932,7 +932,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "numa"
|
name = "numa"
|
||||||
version = "0.1.0"
|
version = "0.3.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"axum",
|
"axum",
|
||||||
"env_logger",
|
"env_logger",
|
||||||
@@ -946,6 +946,7 @@ dependencies = [
|
|||||||
"rustls",
|
"rustls",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
|
"socket2 0.5.10",
|
||||||
"time",
|
"time",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-rustls",
|
"tokio-rustls",
|
||||||
@@ -1062,7 +1063,7 @@ dependencies = [
|
|||||||
"quinn-udp",
|
"quinn-udp",
|
||||||
"rustc-hash",
|
"rustc-hash",
|
||||||
"rustls",
|
"rustls",
|
||||||
"socket2",
|
"socket2 0.6.3",
|
||||||
"thiserror 2.0.18",
|
"thiserror 2.0.18",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tracing",
|
"tracing",
|
||||||
@@ -1099,7 +1100,7 @@ dependencies = [
|
|||||||
"cfg_aliases",
|
"cfg_aliases",
|
||||||
"libc",
|
"libc",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"socket2",
|
"socket2 0.6.3",
|
||||||
"tracing",
|
"tracing",
|
||||||
"windows-sys 0.60.2",
|
"windows-sys 0.60.2",
|
||||||
]
|
]
|
||||||
@@ -1407,6 +1408,16 @@ version = "1.15.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03"
|
checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "socket2"
|
||||||
|
version = "0.5.10"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678"
|
||||||
|
dependencies = [
|
||||||
|
"libc",
|
||||||
|
"windows-sys 0.52.0",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "socket2"
|
name = "socket2"
|
||||||
version = "0.6.3"
|
version = "0.6.3"
|
||||||
@@ -1566,7 +1577,7 @@ dependencies = [
|
|||||||
"libc",
|
"libc",
|
||||||
"mio",
|
"mio",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
"socket2",
|
"socket2 0.6.3",
|
||||||
"tokio-macros",
|
"tokio-macros",
|
||||||
"windows-sys 0.61.2",
|
"windows-sys 0.61.2",
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "numa"
|
name = "numa"
|
||||||
version = "0.1.0"
|
version = "0.3.0"
|
||||||
authors = ["razvandimescu <razvan@dimescu.com>"]
|
authors = ["razvandimescu <razvan@dimescu.com>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
description = "Ephemeral DNS overrides for development and testing. Point any hostname to any endpoint. Auto-revert when you're done."
|
description = "Ephemeral DNS overrides for development and testing. Point any hostname to any endpoint. Auto-revert when you're done."
|
||||||
@@ -22,6 +22,7 @@ hyper = { version = "1", features = ["client", "http1", "server"] }
|
|||||||
hyper-util = { version = "0.1", features = ["client-legacy", "http1", "tokio"] }
|
hyper-util = { version = "0.1", features = ["client-legacy", "http1", "tokio"] }
|
||||||
http-body-util = "0.1"
|
http-body-util = "0.1"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
|
socket2 = { version = "0.5", features = ["all"] }
|
||||||
rcgen = { version = "0.13", features = ["pem", "x509-parser"] }
|
rcgen = { version = "0.13", features = ["pem", "x509-parser"] }
|
||||||
time = "0.3"
|
time = "0.3"
|
||||||
rustls = "0.23"
|
rustls = "0.23"
|
||||||
|
|||||||
39
README.md
39
README.md
@@ -39,9 +39,10 @@ sudo ./target/release/numa
|
|||||||
|
|
||||||
- **Ad blocking that travels with you** — 385K+ domains blocked via [Hagezi Pro](https://github.com/hagezi/dns-blocklists). Works on any network: coffee shops, hotels, airports.
|
- **Ad blocking that travels with you** — 385K+ domains blocked via [Hagezi Pro](https://github.com/hagezi/dns-blocklists). Works on any network: coffee shops, hotels, airports.
|
||||||
- **Local service proxy** — `https://frontend.numa` instead of `localhost:5173`. Auto-generated TLS certs, WebSocket support for HMR. Like `/etc/hosts` but with a dashboard and auto-revert.
|
- **Local service proxy** — `https://frontend.numa` instead of `localhost:5173`. Auto-generated TLS certs, WebSocket support for HMR. Like `/etc/hosts` but with a dashboard and auto-revert.
|
||||||
|
- **LAN service discovery** — Numa instances on the same network find each other automatically via multicast. Access a teammate's `api.numa` from your machine, zero config.
|
||||||
- **Developer overrides** — point any hostname to any IP, auto-reverts after N minutes. REST API with 22 endpoints.
|
- **Developer overrides** — point any hostname to any IP, auto-reverts after N minutes. REST API with 22 endpoints.
|
||||||
- **Sub-millisecond caching** — cached lookups in 0ms. Faster than any public resolver.
|
- **Sub-millisecond caching** — cached lookups in 0ms. Faster than any public resolver.
|
||||||
- **Live dashboard** — real-time stats, query log, blocking controls, service management.
|
- **Live dashboard** — real-time stats, query log, blocking controls, service management. LAN accessibility badges show which services are reachable from other devices.
|
||||||
- **macOS + Linux** — `numa install` configures system DNS, `numa service start` runs as launchd/systemd service.
|
- **macOS + Linux** — `numa install` configures system DNS, `numa service start` runs as launchd/systemd service.
|
||||||
|
|
||||||
## Local Service Proxy
|
## Local Service Proxy
|
||||||
@@ -59,6 +60,7 @@ open http://frontend.numa # → proxied to localhost:5173
|
|||||||
- **HTTPS with green lock** — auto-generated local CA + per-service TLS certs
|
- **HTTPS with green lock** — auto-generated local CA + per-service TLS certs
|
||||||
- **WebSocket** — Vite/webpack HMR works through the proxy
|
- **WebSocket** — Vite/webpack HMR works through the proxy
|
||||||
- **Health checks** — dashboard shows green/red status per service
|
- **Health checks** — dashboard shows green/red status per service
|
||||||
|
- **LAN sharing** — services bound to `0.0.0.0` are automatically discoverable by other Numa instances on the network. Dashboard shows "LAN" or "local only" per service.
|
||||||
- **Persistent** — services survive restarts
|
- **Persistent** — services survive restarts
|
||||||
- Or configure in `numa.toml`:
|
- Or configure in `numa.toml`:
|
||||||
|
|
||||||
@@ -68,6 +70,39 @@ name = "frontend"
|
|||||||
target_port = 5173
|
target_port = 5173
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## LAN Service Discovery
|
||||||
|
|
||||||
|
Run Numa on multiple machines. They find each other automatically:
|
||||||
|
|
||||||
|
```
|
||||||
|
Machine A (192.168.1.5) Machine B (192.168.1.20)
|
||||||
|
┌──────────────────────┐ ┌──────────────────────┐
|
||||||
|
│ Numa │ multicast │ Numa │
|
||||||
|
│ services: │◄───────────►│ services: │
|
||||||
|
│ - api (port 8000) │ discovery │ - grafana (3000) │
|
||||||
|
│ - frontend (5173) │ │ │
|
||||||
|
└──────────────────────┘ └──────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
From Machine B:
|
||||||
|
```bash
|
||||||
|
dig @127.0.0.1 api.numa # → 192.168.1.5
|
||||||
|
curl http://api.numa # → proxied to Machine A's port 8000
|
||||||
|
```
|
||||||
|
|
||||||
|
No configuration needed. Multicast announcements on `239.255.70.78:5390`, configurable via `[lan]` in `numa.toml`.
|
||||||
|
|
||||||
|
**Hub mode** — don't want to install Numa on every machine? Run one instance as a shared DNS server and point other devices to it:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# On the hub machine, bind to LAN interface
|
||||||
|
[server]
|
||||||
|
bind_addr = "0.0.0.0:53"
|
||||||
|
|
||||||
|
# On other devices, set DNS to the hub's IP
|
||||||
|
# They get .numa resolution, ad blocking, caching — zero install
|
||||||
|
```
|
||||||
|
|
||||||
## How It Compares
|
## How It Compares
|
||||||
|
|
||||||
| | Pi-hole | AdGuard Home | NextDNS | Cloudflare | Numa |
|
| | Pi-hole | AdGuard Home | NextDNS | Cloudflare | Numa |
|
||||||
@@ -76,6 +111,7 @@ target_port = 5173
|
|||||||
| Portable (travels with laptop) | No (appliance) | No (appliance) | Cloud only | Cloud only | Single binary |
|
| Portable (travels with laptop) | No (appliance) | No (appliance) | Cloud only | Cloud only | Single binary |
|
||||||
| Developer overrides | No | No | No | No | REST API + auto-expiry |
|
| Developer overrides | No | No | No | No | REST API + auto-expiry |
|
||||||
| Local service proxy | No | No | No | No | `.numa` + HTTPS + WS |
|
| Local service proxy | No | No | No | No | `.numa` + HTTPS + WS |
|
||||||
|
| LAN service discovery | No | No | No | No | Multicast, zero config |
|
||||||
| Data stays local | Yes | Yes | Cloud | Cloud | 100% local |
|
| Data stays local | Yes | Yes | Cloud | Cloud | 100% local |
|
||||||
| Zero config | Complex | Docker/setup | Yes | Yes | Works out of the box |
|
| Zero config | Complex | Docker/setup | Yes | Yes | Works out of the box |
|
||||||
| Self-sovereign DNS | No | No | No | No | pkarr/DHT roadmap |
|
| Self-sovereign DNS | No | No | No | No | pkarr/DHT roadmap |
|
||||||
@@ -97,6 +133,7 @@ No DNS libraries. The wire protocol — headers, labels, compression pointers, r
|
|||||||
- [x] Ad blocking — 385K+ domains, live dashboard, allowlist
|
- [x] Ad blocking — 385K+ domains, live dashboard, allowlist
|
||||||
- [x] System integration — macOS + Linux, launchd/systemd, Tailscale/VPN auto-discovery
|
- [x] System integration — macOS + Linux, launchd/systemd, Tailscale/VPN auto-discovery
|
||||||
- [x] Local service proxy — `.numa` domains, HTTP/HTTPS proxy, auto TLS, WebSocket
|
- [x] Local service proxy — `.numa` domains, HTTP/HTTPS proxy, auto TLS, WebSocket
|
||||||
|
- [x] LAN service discovery — multicast auto-discovery, cross-machine DNS + proxy
|
||||||
- [ ] pkarr integration — self-sovereign DNS via Mainline DHT (15M nodes)
|
- [ ] pkarr integration — self-sovereign DNS via Mainline DHT (15M nodes)
|
||||||
- [ ] Global `.numa` names — self-publish, DHT-backed, first-come-first-served
|
- [ ] Global `.numa` names — self-publish, DHT-backed, first-come-first-served
|
||||||
|
|
||||||
|
|||||||
@@ -8,8 +8,10 @@
|
|||||||
# 1. Opens the dashboard in Chrome --app mode (clean, no address bar)
|
# 1. Opens the dashboard in Chrome --app mode (clean, no address bar)
|
||||||
# 2. Generates DNS traffic (forward, cache hit, blocked)
|
# 2. Generates DNS traffic (forward, cache hit, blocked)
|
||||||
# 3. Types "peekm" / "6419" into the Local Services form on camera
|
# 3. Types "peekm" / "6419" into the Local Services form on camera
|
||||||
# 4. Opens peekm.numa to show the proxy working
|
# 4. Shows LAN accessibility badge ("local only" / "LAN")
|
||||||
# 5. Records via ffmpeg and converts to optimized GIF
|
# 5. Checks a blocked domain
|
||||||
|
# 6. Opens peekm.numa to show the proxy working
|
||||||
|
# 7. Records via ffmpeg and converts to optimized GIF
|
||||||
|
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
@@ -228,18 +230,10 @@ dig @127.0.0.1 github.com +short > /dev/null 2>&1
|
|||||||
dig @127.0.0.1 ad.doubleclick.net +short > /dev/null 2>&1
|
dig @127.0.0.1 ad.doubleclick.net +short > /dev/null 2>&1
|
||||||
sleep 3
|
sleep 3
|
||||||
|
|
||||||
# --------------- Scene 2: Check Domain blocker (3-6s) ---------------
|
# --------------- Scene 2: Add peekm service via UI (3-7s) ---------------
|
||||||
log "Scene 2: Check Domain — blocked tracker..."
|
log "Scene 2: Adding peekm.numa service..."
|
||||||
type_into "#checkDomainInput" "ads.doubleclick.net" 0.04
|
|
||||||
sleep 0.3
|
|
||||||
# Click Check button
|
|
||||||
run_js "document.querySelector('#checkDomainInput').closest('form').querySelector('.btn').click();"
|
|
||||||
sleep 2
|
|
||||||
|
|
||||||
# --------------- Scene 3: Add peekm service via UI (6-10s) ---------------
|
# Services panel is now first — scroll to it
|
||||||
log "Scene 3: Adding peekm.numa service..."
|
|
||||||
|
|
||||||
# Scroll to Local Services form
|
|
||||||
run_js "
|
run_js "
|
||||||
var svcPanel = document.getElementById('serviceForm');
|
var svcPanel = document.getElementById('serviceForm');
|
||||||
if (svcPanel) svcPanel.scrollIntoView({behavior: 'smooth', block: 'center'});
|
if (svcPanel) svcPanel.scrollIntoView({behavior: 'smooth', block: 'center'});
|
||||||
@@ -251,20 +245,34 @@ sleep 0.2
|
|||||||
type_into "#svcPort" "6419" 0.1
|
type_into "#svcPort" "6419" 0.1
|
||||||
sleep 0.3
|
sleep 0.3
|
||||||
|
|
||||||
# Click "Add Service"
|
# Click "Add Service" — LAN badge ("local only" or "LAN") will appear
|
||||||
run_js "document.querySelector('#serviceForm .btn-add').click();"
|
run_js "document.querySelector('#serviceForm .btn-add').click();"
|
||||||
sleep 1.5
|
sleep 2
|
||||||
|
|
||||||
# --------------- Scene 4: Open peekm.numa (10-14s) ---------------
|
# --------------- Scene 3: Open peekm.numa (7-11s) ---------------
|
||||||
log "Scene 4: Opening peekm.numa in browser..."
|
log "Scene 3: Opening peekm.numa in browser..."
|
||||||
open "http://peekm.numa/view/peekm/README.md" 2>/dev/null || true
|
open "http://peekm.numa/view/peekm/README.md" 2>/dev/null || true
|
||||||
sleep 4
|
sleep 4
|
||||||
|
|
||||||
# --------------- Scene 5: Back to dashboard (14-17s) ---------------
|
# --------------- Scene 4: Back to dashboard (11-14s) ---------------
|
||||||
log "Scene 5: Back to dashboard — LOCAL queries visible..."
|
log "Scene 4: Back to dashboard — LAN badges + LOCAL queries visible..."
|
||||||
osascript -e "tell application \"System Events\" to set frontmost of (first process whose unix id is $CHROME_PID) to true" 2>/dev/null || true
|
osascript -e "tell application \"System Events\" to set frontmost of (first process whose unix id is $CHROME_PID) to true" 2>/dev/null || true
|
||||||
sleep 3
|
sleep 3
|
||||||
|
|
||||||
|
# --------------- Scene 5: Check Domain blocker (14-17s) ---------------
|
||||||
|
log "Scene 5: Check Domain — blocked tracker..."
|
||||||
|
# Scroll down to blocking panel
|
||||||
|
run_js "
|
||||||
|
var blockPanel = document.getElementById('blockingPanel');
|
||||||
|
if (blockPanel) blockPanel.scrollIntoView({behavior: 'smooth', block: 'center'});
|
||||||
|
"
|
||||||
|
sleep 0.5
|
||||||
|
type_into "#checkDomainInput" "ads.doubleclick.net" 0.04
|
||||||
|
sleep 0.3
|
||||||
|
# Click Check button
|
||||||
|
run_js "document.querySelector('#checkDomainInput').closest('form').querySelector('.btn').click();"
|
||||||
|
sleep 2
|
||||||
|
|
||||||
# --------------- Scene 6: Terminal-style dig overlay (17-20s) ---------------
|
# --------------- Scene 6: Terminal-style dig overlay (17-20s) ---------------
|
||||||
log "Scene 6: dig proof overlay..."
|
log "Scene 6: dig proof overlay..."
|
||||||
DIG_RESULT=$(dig @127.0.0.1 peekm.numa +short 2>/dev/null | head -1)
|
DIG_RESULT=$(dig @127.0.0.1 peekm.numa +short 2>/dev/null | head -1)
|
||||||
|
|||||||
@@ -382,6 +382,15 @@ body {
|
|||||||
}
|
}
|
||||||
.health-dot.up { background: var(--emerald); }
|
.health-dot.up { background: var(--emerald); }
|
||||||
.health-dot.down { background: var(--rose); }
|
.health-dot.down { background: var(--rose); }
|
||||||
|
.lan-badge {
|
||||||
|
font-family: var(--font-mono);
|
||||||
|
font-size: 0.58rem;
|
||||||
|
padding: 1px 5px;
|
||||||
|
border-radius: 3px;
|
||||||
|
margin-left: 0.3rem;
|
||||||
|
}
|
||||||
|
.lan-badge.shared { background: rgba(82, 122, 82, 0.12); color: var(--emerald); }
|
||||||
|
.lan-badge.local-only { background: rgba(192, 98, 58, 0.12); color: var(--amber-dim); }
|
||||||
|
|
||||||
/* Override form */
|
/* Override form */
|
||||||
.override-form {
|
.override-form {
|
||||||
@@ -568,22 +577,26 @@ body {
|
|||||||
|
|
||||||
<!-- Sidebar -->
|
<!-- Sidebar -->
|
||||||
<div class="sidebar">
|
<div class="sidebar">
|
||||||
<!-- Blocking -->
|
<!-- Local services -->
|
||||||
<div class="panel" id="blockingPanel">
|
<div class="panel">
|
||||||
<div class="panel-header">
|
<div class="panel-header">
|
||||||
<span class="panel-title">Blocking</span>
|
<div>
|
||||||
<span class="panel-title" id="blockingRefresh" style="color:var(--text-dim);font-weight:400;"></span>
|
<span class="panel-title">Local Services</span>
|
||||||
|
<div style="font-size:0.68rem;color:var(--text-dim);margin-top:0.15rem;">Give localhost apps clean .numa URLs. Persistent, with HTTP proxy.</div>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div class="panel-body">
|
<div class="panel-body">
|
||||||
<form class="override-form" onsubmit="return checkDomain(event)" style="margin-bottom:0;border-bottom:none;padding-bottom:0;">
|
<form class="override-form" id="serviceForm" onsubmit="return addService(event)">
|
||||||
<div class="override-form-row">
|
<div class="override-form-row">
|
||||||
<input type="text" id="checkDomainInput" placeholder="Is this domain blocked?" required style="flex:3">
|
<input type="text" id="svcName" placeholder="name (becomes name.numa)" required style="flex:2">
|
||||||
<button type="submit" class="btn" style="background:var(--violet);color:white;flex-shrink:0;">Check</button>
|
<input type="number" id="svcPort" placeholder="port (e.g. 3000)" required min="1" max="65535" style="flex:1">
|
||||||
</div>
|
</div>
|
||||||
|
<button type="submit" class="btn btn-add">Add Service</button>
|
||||||
|
<div class="override-error" id="serviceError"></div>
|
||||||
</form>
|
</form>
|
||||||
<div id="checkResult" style="display:none;margin-top:0.6rem;padding:0.5rem 0.6rem;border-radius:5px;font-family:var(--font-mono);font-size:0.72rem;"></div>
|
<div id="servicesList">
|
||||||
<div id="blockingSources" style="margin-top:0.8rem;padding-top:0.6rem;border-top:1px solid var(--border);"></div>
|
<div class="empty-state">No services configured</div>
|
||||||
<div id="blockingAllowlist" style="margin-top:0.8rem;padding-top:0.6rem;border-top:1px solid var(--border);"></div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
@@ -612,26 +625,22 @@ body {
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<!-- Local services -->
|
<!-- Blocking -->
|
||||||
<div class="panel">
|
<div class="panel" id="blockingPanel">
|
||||||
<div class="panel-header">
|
<div class="panel-header">
|
||||||
<div>
|
<span class="panel-title">Blocking</span>
|
||||||
<span class="panel-title">Local Services</span>
|
<span class="panel-title" id="blockingRefresh" style="color:var(--text-dim);font-weight:400;"></span>
|
||||||
<div style="font-size:0.68rem;color:var(--text-dim);margin-top:0.15rem;">Give localhost apps clean .numa URLs. Persistent, with HTTP proxy.</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
</div>
|
||||||
<div class="panel-body">
|
<div class="panel-body">
|
||||||
<form class="override-form" id="serviceForm" onsubmit="return addService(event)">
|
<form class="override-form" onsubmit="return checkDomain(event)" style="margin-bottom:0;border-bottom:none;padding-bottom:0;">
|
||||||
<div class="override-form-row">
|
<div class="override-form-row">
|
||||||
<input type="text" id="svcName" placeholder="name (becomes name.numa)" required style="flex:2">
|
<input type="text" id="checkDomainInput" placeholder="Is this domain blocked?" required style="flex:3">
|
||||||
<input type="number" id="svcPort" placeholder="port (e.g. 3000)" required min="1" max="65535" style="flex:1">
|
<button type="submit" class="btn" style="background:var(--violet);color:white;flex-shrink:0;">Check</button>
|
||||||
</div>
|
</div>
|
||||||
<button type="submit" class="btn btn-add">Add Service</button>
|
|
||||||
<div class="override-error" id="serviceError"></div>
|
|
||||||
</form>
|
</form>
|
||||||
<div id="servicesList">
|
<div id="checkResult" style="display:none;margin-top:0.6rem;padding:0.5rem 0.6rem;border-radius:5px;font-family:var(--font-mono);font-size:0.72rem;"></div>
|
||||||
<div class="empty-state">No services configured</div>
|
<div id="blockingSources" style="margin-top:0.8rem;padding-top:0.6rem;border-top:1px solid var(--border);"></div>
|
||||||
</div>
|
<div id="blockingAllowlist" style="margin-top:0.8rem;padding-top:0.6rem;border-top:1px solid var(--border);"></div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
@@ -1082,16 +1091,22 @@ function renderServices(entries) {
|
|||||||
el.innerHTML = '<div class="empty-state">No services configured</div>';
|
el.innerHTML = '<div class="empty-state">No services configured</div>';
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
el.innerHTML = entries.map(e => `
|
el.innerHTML = entries.map(e => {
|
||||||
|
const lanBadge = e.healthy
|
||||||
|
? (e.lan_accessible
|
||||||
|
? '<span class="lan-badge shared" title="Reachable from other devices on the network">LAN</span>'
|
||||||
|
: '<span class="lan-badge local-only" title="Bound to localhost — not reachable from other devices. Start with 0.0.0.0 to share on LAN.">local only</span>')
|
||||||
|
: '';
|
||||||
|
return `
|
||||||
<div class="service-item">
|
<div class="service-item">
|
||||||
<span class="health-dot ${e.healthy ? 'up' : 'down'}" title="${e.healthy ? 'running' : 'not reachable'}"></span>
|
<span class="health-dot ${e.healthy ? 'up' : 'down'}" title="${e.healthy ? 'running' : 'not reachable'}"></span>
|
||||||
<div class="service-info">
|
<div class="service-info">
|
||||||
<div class="service-name"><a href="${e.url}" target="_blank">${e.name}.numa</a></div>
|
<div class="service-name"><a href="${e.url}" target="_blank">${e.name}.numa</a>${lanBadge}</div>
|
||||||
<div class="service-port">localhost:${e.target_port} → proxied</div>
|
<div class="service-port">localhost:${e.target_port} → proxied</div>
|
||||||
</div>
|
</div>
|
||||||
${e.name === 'numa' ? '' : `<button class="btn-delete" onclick="deleteService('${e.name}')" title="Remove service">×</button>`}
|
${e.name === 'numa' ? '' : `<button class="btn-delete" onclick="deleteService('${e.name}')" title="Remove service">×</button>`}
|
||||||
</div>
|
</div>
|
||||||
`).join('');
|
`}).join('');
|
||||||
}
|
}
|
||||||
|
|
||||||
async function addService(event) {
|
async function addService(event) {
|
||||||
|
|||||||
54
src/api.rs
54
src/api.rs
@@ -590,6 +590,7 @@ struct ServiceResponse {
|
|||||||
target_port: u16,
|
target_port: u16,
|
||||||
url: String,
|
url: String,
|
||||||
healthy: bool,
|
healthy: bool,
|
||||||
|
lan_accessible: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
@@ -609,22 +610,38 @@ async fn list_services(State(ctx): State<Arc<ServerCtx>>) -> Json<Vec<ServiceRes
|
|||||||
};
|
};
|
||||||
let tld = &ctx.proxy_tld;
|
let tld = &ctx.proxy_tld;
|
||||||
|
|
||||||
// Run all health checks concurrently
|
let lan_ip = crate::lan::detect_lan_ip();
|
||||||
let health_futures: Vec<_> = entries
|
|
||||||
|
let check_futures: Vec<_> = entries
|
||||||
.iter()
|
.iter()
|
||||||
.map(|(_, port)| check_health(*port))
|
.map(|(_, port)| {
|
||||||
|
let port = *port;
|
||||||
|
let localhost = std::net::SocketAddr::from(([127, 0, 0, 1], port));
|
||||||
|
let lan_addr = lan_ip.map(|ip| std::net::SocketAddr::new(ip.into(), port));
|
||||||
|
async move {
|
||||||
|
let healthy = check_tcp(localhost).await;
|
||||||
|
let lan_accessible = match lan_addr {
|
||||||
|
Some(addr) => check_tcp(addr).await,
|
||||||
|
None => false,
|
||||||
|
};
|
||||||
|
(healthy, lan_accessible)
|
||||||
|
}
|
||||||
|
})
|
||||||
.collect();
|
.collect();
|
||||||
let health_results = futures::future::join_all(health_futures).await;
|
let check_results = futures::future::join_all(check_futures).await;
|
||||||
|
|
||||||
let results: Vec<_> = entries
|
let results: Vec<_> = entries
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.zip(health_results)
|
.zip(check_results)
|
||||||
.map(|((name, port), healthy)| ServiceResponse {
|
.map(
|
||||||
url: format!("http://{}.{}", name, tld),
|
|((name, port), (healthy, lan_accessible))| ServiceResponse {
|
||||||
name,
|
url: format!("http://{}.{}", name, tld),
|
||||||
target_port: port,
|
name,
|
||||||
healthy,
|
target_port: port,
|
||||||
})
|
healthy,
|
||||||
|
lan_accessible,
|
||||||
|
},
|
||||||
|
)
|
||||||
.collect();
|
.collect();
|
||||||
Json(results)
|
Json(results)
|
||||||
}
|
}
|
||||||
@@ -655,7 +672,15 @@ async fn create_service(
|
|||||||
let tld = &ctx.proxy_tld;
|
let tld = &ctx.proxy_tld;
|
||||||
ctx.services.lock().unwrap().insert(&name, req.target_port);
|
ctx.services.lock().unwrap().insert(&name, req.target_port);
|
||||||
|
|
||||||
let healthy = check_health(req.target_port).await;
|
let localhost = std::net::SocketAddr::from(([127, 0, 0, 1], req.target_port));
|
||||||
|
let lan_addr =
|
||||||
|
crate::lan::detect_lan_ip().map(|ip| std::net::SocketAddr::new(ip.into(), req.target_port));
|
||||||
|
let (healthy, lan_accessible) = tokio::join!(check_tcp(localhost), async {
|
||||||
|
match lan_addr {
|
||||||
|
Some(a) => check_tcp(a).await,
|
||||||
|
None => false,
|
||||||
|
}
|
||||||
|
});
|
||||||
Ok((
|
Ok((
|
||||||
StatusCode::CREATED,
|
StatusCode::CREATED,
|
||||||
Json(ServiceResponse {
|
Json(ServiceResponse {
|
||||||
@@ -663,6 +688,7 @@ async fn create_service(
|
|||||||
name,
|
name,
|
||||||
target_port: req.target_port,
|
target_port: req.target_port,
|
||||||
healthy,
|
healthy,
|
||||||
|
lan_accessible,
|
||||||
}),
|
}),
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
@@ -679,10 +705,10 @@ async fn remove_service(State(ctx): State<Arc<ServerCtx>>, Path(name): Path<Stri
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn check_health(port: u16) -> bool {
|
async fn check_tcp(addr: std::net::SocketAddr) -> bool {
|
||||||
tokio::time::timeout(
|
tokio::time::timeout(
|
||||||
std::time::Duration::from_millis(100),
|
std::time::Duration::from_millis(100),
|
||||||
tokio::net::TcpStream::connect(format!("127.0.0.1:{}", port)),
|
tokio::net::TcpStream::connect(addr),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.map(|r| r.is_ok())
|
.map(|r| r.is_ok())
|
||||||
|
|||||||
@@ -25,6 +25,8 @@ pub struct Config {
|
|||||||
pub proxy: ProxyConfig,
|
pub proxy: ProxyConfig,
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub services: Vec<ServiceConfig>,
|
pub services: Vec<ServiceConfig>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub lan: LanConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
@@ -202,6 +204,48 @@ pub struct ServiceConfig {
|
|||||||
pub target_port: u16,
|
pub target_port: u16,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Clone)]
|
||||||
|
pub struct LanConfig {
|
||||||
|
#[serde(default = "default_lan_enabled")]
|
||||||
|
pub enabled: bool,
|
||||||
|
#[serde(default = "default_lan_multicast_group")]
|
||||||
|
pub multicast_group: String,
|
||||||
|
#[serde(default = "default_lan_port")]
|
||||||
|
pub port: u16,
|
||||||
|
#[serde(default = "default_lan_broadcast_interval")]
|
||||||
|
pub broadcast_interval_secs: u64,
|
||||||
|
#[serde(default = "default_lan_peer_timeout")]
|
||||||
|
pub peer_timeout_secs: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for LanConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
LanConfig {
|
||||||
|
enabled: default_lan_enabled(),
|
||||||
|
multicast_group: default_lan_multicast_group(),
|
||||||
|
port: default_lan_port(),
|
||||||
|
broadcast_interval_secs: default_lan_broadcast_interval(),
|
||||||
|
peer_timeout_secs: default_lan_peer_timeout(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_lan_enabled() -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
fn default_lan_multicast_group() -> String {
|
||||||
|
"239.255.70.78".to_string()
|
||||||
|
}
|
||||||
|
fn default_lan_port() -> u16 {
|
||||||
|
5390
|
||||||
|
}
|
||||||
|
fn default_lan_broadcast_interval() -> u64 {
|
||||||
|
30
|
||||||
|
}
|
||||||
|
fn default_lan_peer_timeout() -> u64 {
|
||||||
|
90
|
||||||
|
}
|
||||||
|
|
||||||
pub fn load_config(path: &str) -> Result<Config> {
|
pub fn load_config(path: &str) -> Result<Config> {
|
||||||
if !Path::new(path).exists() {
|
if !Path::new(path).exists() {
|
||||||
return Ok(Config::default());
|
return Ok(Config::default());
|
||||||
|
|||||||
27
src/ctx.rs
27
src/ctx.rs
@@ -11,6 +11,7 @@ use crate::cache::DnsCache;
|
|||||||
use crate::config::ZoneMap;
|
use crate::config::ZoneMap;
|
||||||
use crate::forward::forward_query;
|
use crate::forward::forward_query;
|
||||||
use crate::header::ResultCode;
|
use crate::header::ResultCode;
|
||||||
|
use crate::lan::PeerStore;
|
||||||
use crate::override_store::OverrideStore;
|
use crate::override_store::OverrideStore;
|
||||||
use crate::packet::DnsPacket;
|
use crate::packet::DnsPacket;
|
||||||
use crate::query_log::{QueryLog, QueryLogEntry};
|
use crate::query_log::{QueryLog, QueryLogEntry};
|
||||||
@@ -29,6 +30,7 @@ pub struct ServerCtx {
|
|||||||
pub blocklist: Mutex<BlocklistStore>,
|
pub blocklist: Mutex<BlocklistStore>,
|
||||||
pub query_log: Mutex<QueryLog>,
|
pub query_log: Mutex<QueryLog>,
|
||||||
pub services: Mutex<ServiceStore>,
|
pub services: Mutex<ServiceStore>,
|
||||||
|
pub lan_peers: Mutex<PeerStore>,
|
||||||
pub forwarding_rules: Vec<ForwardingRule>,
|
pub forwarding_rules: Vec<ForwardingRule>,
|
||||||
pub upstream: SocketAddr,
|
pub upstream: SocketAddr,
|
||||||
pub timeout: Duration,
|
pub timeout: Duration,
|
||||||
@@ -67,16 +69,37 @@ pub async fn handle_query(
|
|||||||
} else if !ctx.proxy_tld_suffix.is_empty()
|
} else if !ctx.proxy_tld_suffix.is_empty()
|
||||||
&& (qname.ends_with(&ctx.proxy_tld_suffix) || qname == ctx.proxy_tld)
|
&& (qname.ends_with(&ctx.proxy_tld_suffix) || qname == ctx.proxy_tld)
|
||||||
{
|
{
|
||||||
|
// Resolve .numa: local services → 127.0.0.1, LAN peers → peer IP
|
||||||
|
let service_name = qname.strip_suffix(&ctx.proxy_tld_suffix).unwrap_or(&qname);
|
||||||
|
let resolve_ip = {
|
||||||
|
let local = ctx.services.lock().unwrap();
|
||||||
|
if local.lookup(service_name).is_some() {
|
||||||
|
std::net::Ipv4Addr::LOCALHOST
|
||||||
|
} else {
|
||||||
|
let mut peers = ctx.lan_peers.lock().unwrap();
|
||||||
|
peers
|
||||||
|
.lookup(service_name)
|
||||||
|
.and_then(|(ip, _)| match ip {
|
||||||
|
std::net::IpAddr::V4(v4) => Some(v4),
|
||||||
|
_ => None,
|
||||||
|
})
|
||||||
|
.unwrap_or(std::net::Ipv4Addr::LOCALHOST)
|
||||||
|
}
|
||||||
|
};
|
||||||
let mut resp = DnsPacket::response_from(&query, ResultCode::NOERROR);
|
let mut resp = DnsPacket::response_from(&query, ResultCode::NOERROR);
|
||||||
match qtype {
|
match qtype {
|
||||||
QueryType::AAAA => resp.answers.push(DnsRecord::AAAA {
|
QueryType::AAAA => resp.answers.push(DnsRecord::AAAA {
|
||||||
domain: qname.clone(),
|
domain: qname.clone(),
|
||||||
addr: std::net::Ipv6Addr::LOCALHOST,
|
addr: if resolve_ip == std::net::Ipv4Addr::LOCALHOST {
|
||||||
|
std::net::Ipv6Addr::LOCALHOST
|
||||||
|
} else {
|
||||||
|
resolve_ip.to_ipv6_mapped()
|
||||||
|
},
|
||||||
ttl: 300,
|
ttl: 300,
|
||||||
}),
|
}),
|
||||||
_ => resp.answers.push(DnsRecord::A {
|
_ => resp.answers.push(DnsRecord::A {
|
||||||
domain: qname.clone(),
|
domain: qname.clone(),
|
||||||
addr: std::net::Ipv4Addr::LOCALHOST,
|
addr: resolve_ip,
|
||||||
ttl: 300,
|
ttl: 300,
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
|
|||||||
224
src/lan.rs
Normal file
224
src/lan.rs
Normal file
@@ -0,0 +1,224 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
use log::{debug, info, warn};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::config::LanConfig;
|
||||||
|
use crate::ctx::ServerCtx;
|
||||||
|
|
||||||
|
// --- Peer Store ---
|
||||||
|
|
||||||
|
pub struct PeerStore {
|
||||||
|
peers: HashMap<String, (IpAddr, u16, Instant)>,
|
||||||
|
timeout: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PeerStore {
|
||||||
|
pub fn new(timeout_secs: u64) -> Self {
|
||||||
|
PeerStore {
|
||||||
|
peers: HashMap::new(),
|
||||||
|
timeout: Duration::from_secs(timeout_secs),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn update(&mut self, host: IpAddr, services: &[(String, u16)]) {
|
||||||
|
let now = Instant::now();
|
||||||
|
for (name, port) in services {
|
||||||
|
self.peers.insert(name.to_lowercase(), (host, *port, now));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn lookup(&mut self, name: &str) -> Option<(IpAddr, u16)> {
|
||||||
|
let key = name.to_lowercase();
|
||||||
|
let entry = self.peers.get(&key)?;
|
||||||
|
if entry.2.elapsed() > self.timeout {
|
||||||
|
self.peers.remove(&key);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
Some((entry.0, entry.1))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn list(&mut self) -> Vec<(String, IpAddr, u16, u64)> {
|
||||||
|
let now = Instant::now();
|
||||||
|
self.peers
|
||||||
|
.retain(|_, (_, _, seen)| now.duration_since(*seen) < self.timeout);
|
||||||
|
self.peers
|
||||||
|
.iter()
|
||||||
|
.map(|(name, (ip, port, seen))| {
|
||||||
|
(
|
||||||
|
name.clone(),
|
||||||
|
*ip,
|
||||||
|
*port,
|
||||||
|
now.duration_since(*seen).as_secs(),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Multicast ---
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
struct Announcement {
|
||||||
|
instance_id: u64,
|
||||||
|
host: String,
|
||||||
|
services: Vec<AnnouncedService>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
struct AnnouncedService {
|
||||||
|
name: String,
|
||||||
|
port: u16,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn detect_lan_ip() -> Option<Ipv4Addr> {
|
||||||
|
let socket = std::net::UdpSocket::bind("0.0.0.0:0").ok()?;
|
||||||
|
socket.connect("8.8.8.8:80").ok()?;
|
||||||
|
match socket.local_addr().ok()? {
|
||||||
|
SocketAddr::V4(addr) => Some(*addr.ip()),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn start_lan_discovery(ctx: Arc<ServerCtx>, config: &LanConfig) {
|
||||||
|
let multicast_group: Ipv4Addr = match config.multicast_group.parse::<Ipv4Addr>() {
|
||||||
|
Ok(g) if g.is_multicast() => g,
|
||||||
|
Ok(g) => {
|
||||||
|
warn!("LAN: {} is not a multicast address (224.0.0.0/4)", g);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!(
|
||||||
|
"LAN: invalid multicast group {}: {}",
|
||||||
|
config.multicast_group, e
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let port = config.port;
|
||||||
|
let interval = Duration::from_secs(config.broadcast_interval_secs);
|
||||||
|
|
||||||
|
let instance_id: u64 = {
|
||||||
|
let pid = std::process::id() as u64;
|
||||||
|
let ts = std::time::SystemTime::now()
|
||||||
|
.duration_since(std::time::UNIX_EPOCH)
|
||||||
|
.unwrap_or_default()
|
||||||
|
.as_nanos() as u64;
|
||||||
|
pid ^ ts
|
||||||
|
};
|
||||||
|
let local_ip = detect_lan_ip().unwrap_or(Ipv4Addr::LOCALHOST);
|
||||||
|
info!(
|
||||||
|
"LAN discovery on {}:{}, local IP {}, instance {:016x}",
|
||||||
|
multicast_group, port, local_ip, instance_id
|
||||||
|
);
|
||||||
|
|
||||||
|
// Create socket with SO_REUSEADDR for multicast
|
||||||
|
let std_socket = match create_multicast_socket(multicast_group, port) {
|
||||||
|
Ok(s) => s,
|
||||||
|
Err(e) => {
|
||||||
|
warn!(
|
||||||
|
"LAN: could not bind multicast socket: {} — LAN discovery disabled",
|
||||||
|
e
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let socket = match tokio::net::UdpSocket::from_std(std_socket) {
|
||||||
|
Ok(s) => s,
|
||||||
|
Err(e) => {
|
||||||
|
warn!("LAN: tokio socket conversion failed: {}", e);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let socket = Arc::new(socket);
|
||||||
|
|
||||||
|
// Spawn sender
|
||||||
|
let sender_ctx = Arc::clone(&ctx);
|
||||||
|
let sender_socket = Arc::clone(&socket);
|
||||||
|
let local_ip_str = local_ip.to_string();
|
||||||
|
let dest = SocketAddr::new(IpAddr::V4(multicast_group), port);
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let mut ticker = tokio::time::interval(interval);
|
||||||
|
loop {
|
||||||
|
ticker.tick().await;
|
||||||
|
let services: Vec<AnnouncedService> = {
|
||||||
|
let store = sender_ctx.services.lock().unwrap();
|
||||||
|
store
|
||||||
|
.list()
|
||||||
|
.iter()
|
||||||
|
.map(|e| AnnouncedService {
|
||||||
|
name: e.name.clone(),
|
||||||
|
port: e.target_port,
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
};
|
||||||
|
if services.is_empty() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let announcement = Announcement {
|
||||||
|
instance_id,
|
||||||
|
host: local_ip_str.clone(),
|
||||||
|
services,
|
||||||
|
};
|
||||||
|
if let Ok(json) = serde_json::to_vec(&announcement) {
|
||||||
|
let _ = sender_socket.send_to(&json, dest).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Receiver loop
|
||||||
|
let mut buf = vec![0u8; 4096];
|
||||||
|
loop {
|
||||||
|
let (len, src) = match socket.recv_from(&mut buf).await {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
debug!("LAN recv error: {}", e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let announcement: Announcement = match serde_json::from_slice(&buf[..len]) {
|
||||||
|
Ok(a) => a,
|
||||||
|
Err(_) => continue,
|
||||||
|
};
|
||||||
|
// Skip self-announcements
|
||||||
|
if announcement.instance_id == instance_id {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let peer_ip: IpAddr = match announcement.host.parse() {
|
||||||
|
Ok(ip) => ip,
|
||||||
|
Err(_) => continue,
|
||||||
|
};
|
||||||
|
let services: Vec<(String, u16)> = announcement
|
||||||
|
.services
|
||||||
|
.iter()
|
||||||
|
.map(|s| (s.name.clone(), s.port))
|
||||||
|
.collect();
|
||||||
|
let count = services.len();
|
||||||
|
ctx.lan_peers.lock().unwrap().update(peer_ip, &services);
|
||||||
|
debug!(
|
||||||
|
"LAN: {} services from {} (via {})",
|
||||||
|
count, announcement.host, src
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn create_multicast_socket(group: Ipv4Addr, port: u16) -> std::io::Result<std::net::UdpSocket> {
|
||||||
|
use std::net::SocketAddrV4;
|
||||||
|
|
||||||
|
let addr = SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, port);
|
||||||
|
let socket = socket2::Socket::new(
|
||||||
|
socket2::Domain::IPV4,
|
||||||
|
socket2::Type::DGRAM,
|
||||||
|
Some(socket2::Protocol::UDP),
|
||||||
|
)?;
|
||||||
|
socket.set_reuse_address(true)?;
|
||||||
|
#[cfg(unix)]
|
||||||
|
socket.set_reuse_port(true)?;
|
||||||
|
socket.set_nonblocking(true)?;
|
||||||
|
socket.bind(&socket2::SockAddr::from(addr))?;
|
||||||
|
socket.join_multicast_v4(&group, &Ipv4Addr::UNSPECIFIED)?;
|
||||||
|
Ok(socket.into())
|
||||||
|
}
|
||||||
@@ -6,6 +6,7 @@ pub mod config;
|
|||||||
pub mod ctx;
|
pub mod ctx;
|
||||||
pub mod forward;
|
pub mod forward;
|
||||||
pub mod header;
|
pub mod header;
|
||||||
|
pub mod lan;
|
||||||
pub mod override_store;
|
pub mod override_store;
|
||||||
pub mod packet;
|
pub mod packet;
|
||||||
pub mod proxy;
|
pub mod proxy;
|
||||||
|
|||||||
14
src/main.rs
14
src/main.rs
@@ -127,6 +127,7 @@ async fn main() -> numa::Result<()> {
|
|||||||
blocklist: Mutex::new(blocklist),
|
blocklist: Mutex::new(blocklist),
|
||||||
query_log: Mutex::new(QueryLog::new(1000)),
|
query_log: Mutex::new(QueryLog::new(1000)),
|
||||||
services: Mutex::new(service_store),
|
services: Mutex::new(service_store),
|
||||||
|
lan_peers: Mutex::new(numa::lan::PeerStore::new(config.lan.peer_timeout_secs)),
|
||||||
forwarding_rules,
|
forwarding_rules,
|
||||||
upstream,
|
upstream,
|
||||||
timeout: Duration::from_millis(config.upstream.timeout_ms),
|
timeout: Duration::from_millis(config.upstream.timeout_ms),
|
||||||
@@ -161,6 +162,10 @@ async fn main() -> numa::Result<()> {
|
|||||||
};
|
};
|
||||||
eprintln!("\x1b[38;2;192;98;58m ║\x1b[0m \x1b[38;2;107;124;78mProxy\x1b[0m {:<30}\x1b[38;2;192;98;58m║\x1b[0m", schemes);
|
eprintln!("\x1b[38;2;192;98;58m ║\x1b[0m \x1b[38;2;107;124;78mProxy\x1b[0m {:<30}\x1b[38;2;192;98;58m║\x1b[0m", schemes);
|
||||||
}
|
}
|
||||||
|
if config.lan.enabled {
|
||||||
|
eprintln!("\x1b[38;2;192;98;58m ║\x1b[0m \x1b[38;2;107;124;78mLAN\x1b[0m {:<30}\x1b[38;2;192;98;58m║\x1b[0m",
|
||||||
|
format!("{}:{}", config.lan.multicast_group, config.lan.port));
|
||||||
|
}
|
||||||
if !ctx.forwarding_rules.is_empty() {
|
if !ctx.forwarding_rules.is_empty() {
|
||||||
eprintln!("\x1b[38;2;192;98;58m ║\x1b[0m \x1b[38;2;107;124;78mRouting\x1b[0m {:<30}\x1b[38;2;192;98;58m║\x1b[0m",
|
eprintln!("\x1b[38;2;192;98;58m ║\x1b[0m \x1b[38;2;107;124;78mRouting\x1b[0m {:<30}\x1b[38;2;192;98;58m║\x1b[0m",
|
||||||
format!("{} conditional rules", ctx.forwarding_rules.len()));
|
format!("{} conditional rules", ctx.forwarding_rules.len()));
|
||||||
@@ -235,6 +240,15 @@ async fn main() -> numa::Result<()> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Spawn LAN service discovery
|
||||||
|
if config.lan.enabled {
|
||||||
|
let lan_ctx = Arc::clone(&ctx);
|
||||||
|
let lan_config = config.lan.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
numa::lan::start_lan_discovery(lan_ctx, &lan_config).await;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
// UDP DNS listener
|
// UDP DNS listener
|
||||||
#[allow(clippy::infinite_loop)]
|
#[allow(clippy::infinite_loop)]
|
||||||
loop {
|
loop {
|
||||||
|
|||||||
20
src/proxy.rs
20
src/proxy.rs
@@ -135,11 +135,15 @@ async fn proxy_handler(State(state): State<ProxyState>, req: Request) -> axum::r
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let target_port = {
|
let (target_host, target_port) = {
|
||||||
let store = state.ctx.services.lock().unwrap();
|
let store = state.ctx.services.lock().unwrap();
|
||||||
match store.lookup(&service_name) {
|
if let Some(entry) = store.lookup(&service_name) {
|
||||||
Some(entry) => entry.target_port,
|
("localhost".to_string(), entry.target_port)
|
||||||
None => {
|
} else {
|
||||||
|
let mut peers = state.ctx.lan_peers.lock().unwrap();
|
||||||
|
match peers.lookup(&service_name) {
|
||||||
|
Some((ip, port)) => (ip.to_string(), port),
|
||||||
|
None => {
|
||||||
return (
|
return (
|
||||||
StatusCode::NOT_FOUND,
|
StatusCode::NOT_FOUND,
|
||||||
[(hyper::header::CONTENT_TYPE, "text/html; charset=utf-8")],
|
[(hyper::header::CONTENT_TYPE, "text/html; charset=utf-8")],
|
||||||
@@ -259,6 +263,7 @@ pre .str {{ color: #d48a5a }}
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
.into_response()
|
.into_response()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -268,9 +273,10 @@ pre .str {{ color: #d48a5a }}
|
|||||||
.path_and_query()
|
.path_and_query()
|
||||||
.map(|pq| pq.as_str())
|
.map(|pq| pq.as_str())
|
||||||
.unwrap_or("/");
|
.unwrap_or("/");
|
||||||
let target_uri: hyper::Uri = format!("http://localhost:{}{}", target_port, path_and_query)
|
let target_uri: hyper::Uri =
|
||||||
.parse()
|
format!("http://{}:{}{}", target_host, target_port, path_and_query)
|
||||||
.unwrap();
|
.parse()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
// Check for upgrade request (WebSocket, etc.)
|
// Check for upgrade request (WebSocket, etc.)
|
||||||
let is_upgrade = req.headers().get(hyper::header::UPGRADE).is_some();
|
let is_upgrade = req.headers().get(hyper::header::UPGRADE).is_some();
|
||||||
|
|||||||
Reference in New Issue
Block a user