You've already forked directdnsonly
Compare commits
33 Commits
1d1c12b661
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| db60d808de | |||
| 0f417da204 | |||
| 3f6a061ffe | |||
| 0b31b75789 | |||
| 83fbb03cad | |||
| 5e9a6f19bd | |||
| 4a4b4f2b98 | |||
| 6e96e78376 | |||
| e8939bcd82 | |||
| d98f08a408 | |||
| fbb6220728 | |||
| f9907d2859 | |||
| d81ecd6bdd | |||
| 8c1c2b4abc | |||
| 22e64498ce | |||
| 143cf9c792 | |||
| 33f4f30b5f | |||
| b939bb5fa0 | |||
| 70ae81ee0d | |||
| b523b17f30 | |||
| 0e044b7dc2 | |||
| e0a119558d | |||
| ae1e89a236 | |||
| aac7b365a5 | |||
| 0903d78458 | |||
| 74c5f4012e | |||
| 807d6271f1 | |||
| bd46227364 | |||
| b8f12d0208 | |||
| 5c8bc2653c | |||
| 02536cd448 | |||
| 24877be037 | |||
| 6445cf49c0 |
28
.gitignore
vendored
28
.gitignore
vendored
@@ -1,6 +1,34 @@
|
|||||||
*.db
|
*.db
|
||||||
|
dist/
|
||||||
venv/
|
venv/
|
||||||
.venv
|
.venv
|
||||||
.idea
|
.idea
|
||||||
build
|
build
|
||||||
!build/.gitkeep
|
!build/.gitkeep
|
||||||
|
**/__pycache__/
|
||||||
|
*.pyc
|
||||||
|
*.pyo
|
||||||
|
*.pyd
|
||||||
|
*.egg-info
|
||||||
|
*.egg
|
||||||
|
*.log
|
||||||
|
*.DS_Store
|
||||||
|
*.swp
|
||||||
|
*.swo
|
||||||
|
*.bak
|
||||||
|
*.tmp
|
||||||
|
*.orig
|
||||||
|
*.coverage
|
||||||
|
*.cover
|
||||||
|
*.tox
|
||||||
|
*.dist-info
|
||||||
|
*.egg-info
|
||||||
|
*.mypy_cache
|
||||||
|
*.pytest_cache
|
||||||
|
/data/*
|
||||||
|
|
||||||
|
# Editor / tool settings — always local, never committed
|
||||||
|
.vscode/
|
||||||
|
.claude/
|
||||||
|
.env
|
||||||
|
*.env
|
||||||
|
|||||||
1
.python-version
Normal file
1
.python-version
Normal file
@@ -0,0 +1 @@
|
|||||||
|
3.11.12
|
||||||
64
Dockerfile
64
Dockerfile
@@ -1,11 +1,61 @@
|
|||||||
FROM pypy:slim-buster
|
FROM python:3.11.12-slim
|
||||||
|
|
||||||
RUN mkdir -p /opt/apikeyhandler/config
|
# Install system dependencies.
|
||||||
VOLUME /opt/apikeyhandler/config
|
# Both NSD and BIND are installed so the image works with any DNS backend type.
|
||||||
|
# The entrypoint detects which one is configured and starts only that daemon.
|
||||||
|
# CoreDNS MySQL users: neither daemon is started — the image is still usable.
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
bind9 \
|
||||||
|
bind9utils \
|
||||||
|
nsd \
|
||||||
|
dnsutils \
|
||||||
|
gcc \
|
||||||
|
python3-dev \
|
||||||
|
default-libmysqlclient-dev \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
COPY ./src/ /opt/apikeyhandler
|
# ---------------------------------------------------------------------------
|
||||||
WORKDIR /opt/apikeyhandler
|
# BIND setup
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
RUN mkdir -p /etc/named/zones && \
|
||||||
|
chown -R bind:bind /etc/named && \
|
||||||
|
chmod 755 /etc/named/zones
|
||||||
|
|
||||||
RUN pip install -r requirements.txt
|
COPY docker/named.conf.local /etc/bind/
|
||||||
|
COPY docker/named.conf.options /etc/bind/
|
||||||
|
RUN chown root:bind /etc/bind/named.conf.*
|
||||||
|
|
||||||
CMD pypy3 main.py
|
# ---------------------------------------------------------------------------
|
||||||
|
# NSD setup
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
RUN mkdir -p /etc/nsd/zones /etc/nsd/nsd.conf.d && \
|
||||||
|
chown -R nsd:nsd /etc/nsd && \
|
||||||
|
chmod 755 /etc/nsd/zones
|
||||||
|
|
||||||
|
COPY docker/nsd.conf /etc/nsd/nsd.conf
|
||||||
|
RUN chown nsd:nsd /etc/nsd/nsd.conf
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Application
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
WORKDIR /app
|
||||||
|
COPY pyproject.toml poetry.lock README.md ./
|
||||||
|
|
||||||
|
RUN pip install "poetry==2.1.2"
|
||||||
|
|
||||||
|
COPY directdnsonly ./directdnsonly
|
||||||
|
COPY schema ./schema
|
||||||
|
|
||||||
|
RUN poetry config virtualenvs.create false && \
|
||||||
|
poetry install
|
||||||
|
|
||||||
|
# Create data directories
|
||||||
|
RUN mkdir -p /app/data/queues /app/data/zones /app/logs && \
|
||||||
|
chmod -R 755 /app/data
|
||||||
|
|
||||||
|
# Start script
|
||||||
|
COPY docker/entrypoint.sh /entrypoint.sh
|
||||||
|
RUN chmod +x /entrypoint.sh
|
||||||
|
|
||||||
|
EXPOSE 2222 53/udp
|
||||||
|
CMD ["/entrypoint.sh"]
|
||||||
|
|||||||
@@ -1,16 +0,0 @@
|
|||||||
FROM centos:latest
|
|
||||||
ENV APP_NAME=rpmbuild
|
|
||||||
ENV VERSION=latest
|
|
||||||
|
|
||||||
RUN mkdir -p /tmp/build/rpm
|
|
||||||
|
|
||||||
WORKDIR /tmp/build/rpm
|
|
||||||
RUN dnf install -y --allowerasing gcc rpm-build rpm-devel \
|
|
||||||
rpmlint make bash coreutils \
|
|
||||||
diffutils patch rpmdevtools && \
|
|
||||||
dnf clean all && \
|
|
||||||
rm -Rf /var/dnf/cache && \
|
|
||||||
rpmdev-setuptree
|
|
||||||
|
|
||||||
VOLUME /tmp/build/rpm
|
|
||||||
CMD ["rpmbuild", "--define version ${VERSION}", "-bb", "${APP_NAME}.spec"]
|
|
||||||
@@ -1,55 +0,0 @@
|
|||||||
FROM python:3.7.9 as builder
|
|
||||||
# Allow Passing Version from CI
|
|
||||||
ARG VERSION
|
|
||||||
ENV LC_ALL=en_NZ.utf8
|
|
||||||
ENV LANG=en_NZ.utf8
|
|
||||||
ENV APP_NAME="directdnsonly"
|
|
||||||
|
|
||||||
RUN mkdir -p /tmp/build && apt-get update && \
|
|
||||||
apt-get install -y libgcc1-dbg libssl-dev
|
|
||||||
|
|
||||||
COPY src/ /tmp/build/
|
|
||||||
COPY requirements.txt /tmp/build
|
|
||||||
|
|
||||||
WORKDIR /tmp/build
|
|
||||||
|
|
||||||
WORKDIR /tmp/src
|
|
||||||
RUN wget https://github.com/NixOS/patchelf/releases/download/0.12/patchelf-0.12.tar.bz2 && \
|
|
||||||
tar xvf patchelf-0.12.tar.bz2 && \
|
|
||||||
cd /tmp/src/patchelf-0.12* && \
|
|
||||||
./configure --prefix="/usr" && \
|
|
||||||
make install
|
|
||||||
|
|
||||||
WORKDIR /tmp/build
|
|
||||||
RUN pip3 install -r requirements.txt && \
|
|
||||||
pyinstaller \
|
|
||||||
--hidden-import=json \
|
|
||||||
--hidden-import=pyopenssl \
|
|
||||||
--hidden-import=jaraco \
|
|
||||||
--hidden-import=cheroot \
|
|
||||||
--hidden-import=cheroot.ssl.pyopenssl \
|
|
||||||
--hidden-import=cheroot.ssl.builtin \
|
|
||||||
--noconfirm --onefile ${APP_NAME}.py && \
|
|
||||||
cd /tmp/build/dist && \
|
|
||||||
staticx ${APP_NAME} ./${APP_NAME}_static
|
|
||||||
|
|
||||||
RUN mkdir -p /tmp/approot && \
|
|
||||||
mkdir -p /tmp/approot/app && \
|
|
||||||
mkdir -p /tmp/approot/app/config && \
|
|
||||||
mkdir -p /tmp/approot/etc && \
|
|
||||||
mkdir -p /tmp/approot/tmp && \
|
|
||||||
mkdir -p /tmp/approot/data && \
|
|
||||||
mkdir -p /tmp/approot/lib/x86_64-linux-gnu && \
|
|
||||||
cp /tmp/build/config/app.yml /tmp/approot/app/config/app.yml && \
|
|
||||||
cp /tmp/build/dist/${APP_NAME}_static /tmp/approot/app/${APP_NAME} && \
|
|
||||||
cp /usr/lib/gcc/x86_64-linux-gnu/8/libgcc_s.so.1 /tmp/approot/lib/x86_64-linux-gnu/libgcc_s.so.1
|
|
||||||
|
|
||||||
FROM scratch
|
|
||||||
COPY --from=builder /tmp/approot /
|
|
||||||
COPY --from=builder /usr/share/zoneinfo /usr/share/zoneinfo
|
|
||||||
ENV TZ=Pacific/Auckland
|
|
||||||
WORKDIR /app
|
|
||||||
|
|
||||||
VOLUME /app/config /data
|
|
||||||
|
|
||||||
CMD ["/app/directdnsonly"]
|
|
||||||
690
README.md
Normal file
690
README.md
Normal file
@@ -0,0 +1,690 @@
|
|||||||
|
# DirectDNSOnly - DNS Management System
|
||||||
|
|
||||||
|
## Deployment Topologies
|
||||||
|
|
||||||
|
Three reference topologies are documented below. Choose the one that matches your infrastructure.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Topology A — Dual NSD/BIND Instances (High-Availability / Multi-Server)
|
||||||
|
|
||||||
|
Two independent DirectDNSOnly containers, each running a bundled DNS daemon (NSD by default, or BIND9). Both are registered as Extra DNS servers in the same DirectAdmin Multi-Server environment, so DA pushes every zone change to both simultaneously.
|
||||||
|
|
||||||
|
```
|
||||||
|
DirectAdmin Multi-Server
|
||||||
|
│
|
||||||
|
├─ POST /CMD_API_DNS_ADMIN ──▶ directdnsonly-1 (container, BIND backend)
|
||||||
|
│ │
|
||||||
|
│ Persistent Queue
|
||||||
|
│ ├─ writes zone file
|
||||||
|
│ ├─ reloads named
|
||||||
|
│ └─ retry on failure (exp. backoff)
|
||||||
|
│ (serves authoritative DNS on :53)
|
||||||
|
│
|
||||||
|
└─ POST /CMD_API_DNS_ADMIN ──▶ directdnsonly-2 (container, BIND backend)
|
||||||
|
│
|
||||||
|
Persistent Queue
|
||||||
|
├─ writes zone file
|
||||||
|
├─ reloads named
|
||||||
|
└─ retry on failure (exp. backoff)
|
||||||
|
(serves authoritative DNS on :53)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Each instance is completely independent** — no shared state, no cross-talk. Redundancy comes from DA pushing to both. If one container goes down, DA continues to push to the other.
|
||||||
|
|
||||||
|
#### Failure behaviour
|
||||||
|
|
||||||
|
| Scenario | What happens |
|
||||||
|
|---|---|
|
||||||
|
| One container down during DA push | DA cannot deliver; that instance misses the update. The retry queue inside that instance cannot help — the push never arrived. When the container recovers, it will serve stale zone data until DA re-pushes (next zone change triggers a new push). |
|
||||||
|
| BIND crashes but container stays up | The zone write lands in the persistent queue. The retry worker replays it with exponential backoff (30 s → 2 m → 5 m → 15 m → 30 m, up to 5 attempts). |
|
||||||
|
| Zone deleted from DA while instance was down | The reconciliation poller detects the orphan on the next pass and queues a delete, keeping the BIND instance clean without manual intervention. |
|
||||||
|
| Two instances diverge | No automatic cross-instance sync. Drift persists until DA re-pushes the affected zone (i.e. the next time that domain is touched in DA). |
|
||||||
|
|
||||||
|
> **DNS consistency note:** DirectAdmin pushes to each Extra DNS server sequentially, not atomically. If one instance is offline when a zone is changed, that instance will serve stale data until the next DA push for that zone. For workloads where split-brain DNS is unacceptable, use Topology B (single write path → multiple MySQL backends) instead.
|
||||||
|
|
||||||
|
#### `config/app.yml` — instance 1
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
app:
|
||||||
|
auth_username: directdnsonly
|
||||||
|
auth_password: your-secret
|
||||||
|
|
||||||
|
dns:
|
||||||
|
default_backend: bind
|
||||||
|
backends:
|
||||||
|
bind:
|
||||||
|
type: bind
|
||||||
|
enabled: true
|
||||||
|
zones_dir: /etc/named/zones
|
||||||
|
named_conf: /etc/bind/named.conf.local
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `docker-compose.yml` sketch — instance 1
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
directdnsonly-1:
|
||||||
|
image: guisea/directdnsonly:2.5.0
|
||||||
|
ports:
|
||||||
|
- "2222:2222" # DA pushes here
|
||||||
|
- "53:53/udp" # authoritative DNS
|
||||||
|
volumes:
|
||||||
|
- ./config:/app/config
|
||||||
|
- ./data:/app/data
|
||||||
|
```
|
||||||
|
|
||||||
|
Register both containers as separate Extra DNS entries in DA → DNS Administration → Extra DNS Servers, with the same credentials configured in each `config/app.yml`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Topology B — Single Instance, Multiple CoreDNS MySQL Backends (Multi-DC)
|
||||||
|
|
||||||
|
One DirectDNSOnly instance receives zone pushes from DirectAdmin and fans out to two (or more) CoreDNS MySQL databases in parallel. CoreDNS servers in each data centre read from their local database. The directdnsonly instance is the sole write path — it does **not** serve DNS itself.
|
||||||
|
|
||||||
|
```
|
||||||
|
DirectAdmin
|
||||||
|
│
|
||||||
|
└─ POST /CMD_API_DNS_ADMIN ──▶ directdnsonly (single container)
|
||||||
|
│
|
||||||
|
Persistent Queue (survives restarts)
|
||||||
|
zone_data stored to SQLite after each write
|
||||||
|
│
|
||||||
|
ThreadPoolExecutor (one thread per backend)
|
||||||
|
│ │
|
||||||
|
▼ ▼
|
||||||
|
coredns_mysql_dc1 coredns_mysql_dc2
|
||||||
|
(MySQL 10.0.0.80) (MySQL 10.0.1.29)
|
||||||
|
│ │
|
||||||
|
[success] [failure → retry queue]
|
||||||
|
│ │
|
||||||
|
▼ 30s/2m/5m/15m/30m backoff
|
||||||
|
CoreDNS (DC1) retry → coredns_mysql_dc2
|
||||||
|
serves :53 from DB
|
||||||
|
│
|
||||||
|
Reconciliation poller (every N minutes)
|
||||||
|
├─ orphan detection (zones removed from DA)
|
||||||
|
└─ healing pass: zone_exists() per backend
|
||||||
|
→ re-queue any backend missing a zone
|
||||||
|
using stored zone_data (no DA re-push needed)
|
||||||
|
```
|
||||||
|
|
||||||
|
Both MySQL backends are written **concurrently** within the same zone update. A slow or unreachable secondary does not block the primary write. Failed backends enter the retry queue automatically. The reconciliation healing pass provides a further safety net for prolonged outages.
|
||||||
|
|
||||||
|
#### Failure behaviour
|
||||||
|
|
||||||
|
| Scenario | What happens |
|
||||||
|
|---|---|
|
||||||
|
| One MySQL backend unreachable | Other backend(s) succeed immediately. Failed backend queued for retry with exponential backoff (30 s → 2 m → 5 m → 15 m → 30 m, up to 5 attempts). CoreDNS continues serving from its local JSON cache throughout. |
|
||||||
|
| MySQL backend down for hours | Retry queue exhausts. CoreDNS serves from cache the entire time — zero query downtime. On recovery, the reconciliation healing pass detects the backend is missing zones and re-pushes all of them using stored `zone_data` — no DA intervention required. |
|
||||||
|
| directdnsonly container restarts | Persistent queue survives. In-flight zone updates replay on startup. |
|
||||||
|
| directdnsonly container down during DA push | DA cannot deliver. Persistent queue on disk is intact; when the container comes back, it resumes processing any previously queued items. New pushes during downtime are lost at the DA level (DA does not retry). |
|
||||||
|
| Zone deleted from DA | Reconciliation poller detects orphan and queues delete across all backends. |
|
||||||
|
|
||||||
|
#### `config/app.yml`
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
app:
|
||||||
|
auth_username: directdnsonly
|
||||||
|
auth_password: your-secret
|
||||||
|
|
||||||
|
dns:
|
||||||
|
default_backend: coredns_mysql_dc1
|
||||||
|
backends:
|
||||||
|
coredns_mysql_dc1:
|
||||||
|
type: coredns_mysql
|
||||||
|
enabled: true
|
||||||
|
host: 10.0.0.80
|
||||||
|
port: 3306
|
||||||
|
database: coredns
|
||||||
|
username: coredns
|
||||||
|
password: your-db-password
|
||||||
|
|
||||||
|
coredns_mysql_dc2:
|
||||||
|
type: coredns_mysql
|
||||||
|
enabled: true
|
||||||
|
host: 10.0.1.29
|
||||||
|
port: 3306
|
||||||
|
database: coredns
|
||||||
|
username: coredns
|
||||||
|
password: your-db-password
|
||||||
|
```
|
||||||
|
|
||||||
|
Adding a third data centre is a single stanza in the config — no code changes required.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Topology C — Multi-Instance with Peer Sync (Most Robust)
|
||||||
|
|
||||||
|
Multiple independent DirectDNSOnly containers, each with a single local DNS backend (NSD or CoreDNS MySQL), registered as separate Extra DNS servers in DirectAdmin Multi-Server. Peer sync provides eventual consistency — if one instance misses a DA push while it is offline, it recovers the missing zone data from a peer on the next sync interval.
|
||||||
|
|
||||||
|
```
|
||||||
|
DirectAdmin Multi-Server
|
||||||
|
│
|
||||||
|
├─ POST /CMD_API_DNS_ADMIN ──▶ directdnsonly-syd (NSD or CoreDNS MySQL)
|
||||||
|
│ │
|
||||||
|
│ Persistent Queue + zone_data store
|
||||||
|
│ ├─ writes zone file / MySQL
|
||||||
|
│ ├─ reloads daemon
|
||||||
|
│ └─ retry on failure
|
||||||
|
│ │
|
||||||
|
│ ◀──── peer sync ────▶
|
||||||
|
│ │
|
||||||
|
└─ POST /CMD_API_DNS_ADMIN ──▶ directdnsonly-mlb (NSD or CoreDNS MySQL)
|
||||||
|
│
|
||||||
|
Persistent Queue + zone_data store
|
||||||
|
├─ writes zone file / MySQL
|
||||||
|
├─ reloads daemon
|
||||||
|
└─ retry on failure
|
||||||
|
```
|
||||||
|
|
||||||
|
**Why this is the most robust topology:**
|
||||||
|
|
||||||
|
- DA pushes to each instance independently — no single point of failure
|
||||||
|
- No load balancer in the write path — a dead LB cannot silence both instances
|
||||||
|
- Each instance serves DNS immediately from its own daemon
|
||||||
|
- If SYD misses a push while offline, it pulls the newer zone from MLB on the next peer sync (default 15 minutes)
|
||||||
|
- Peer sync is best-effort eventual consistency — deliberately simple, no consensus protocol
|
||||||
|
|
||||||
|
#### Failure behaviour
|
||||||
|
|
||||||
|
| Scenario | What happens |
|
||||||
|
|---|---|
|
||||||
|
| One instance down during DA push | Other instance(s) receive and serve the update. When the downed instance recovers, peer sync detects the stale/missing `zone_updated_at` and pulls the newer zone data from a peer. |
|
||||||
|
| Both instances down during DA push | Both miss the push. When they recover, they sync from each other — the most recently updated peer wins per zone. No DA re-push needed. |
|
||||||
|
| Peer offline | Peer sync silently skips unreachable peers. Syncs resume automatically when the peer recovers. |
|
||||||
|
| Zone deleted from DA | Reconciliation poller detects the orphan and queues the delete on each instance independently. |
|
||||||
|
|
||||||
|
#### `config/app.yml` — instance syd
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
app:
|
||||||
|
auth_username: directdnsonly
|
||||||
|
auth_password: your-secret
|
||||||
|
|
||||||
|
dns:
|
||||||
|
default_backend: nsd
|
||||||
|
backends:
|
||||||
|
nsd:
|
||||||
|
type: nsd
|
||||||
|
enabled: true
|
||||||
|
zones_dir: /etc/nsd/zones
|
||||||
|
nsd_conf: /etc/nsd/nsd.conf.d/zones.conf
|
||||||
|
|
||||||
|
peer_sync:
|
||||||
|
enabled: true
|
||||||
|
interval_minutes: 15
|
||||||
|
peers:
|
||||||
|
- url: http://directdnsonly-mlb:2222
|
||||||
|
username: directdnsonly
|
||||||
|
password: your-secret
|
||||||
|
|
||||||
|
reconciliation:
|
||||||
|
enabled: true
|
||||||
|
interval_minutes: 60
|
||||||
|
directadmin_servers:
|
||||||
|
- hostname: da.syd.example.com
|
||||||
|
port: 2222
|
||||||
|
username: admin
|
||||||
|
password: da-secret
|
||||||
|
ssl: true
|
||||||
|
```
|
||||||
|
|
||||||
|
Register each container as a separate Extra DNS server entry in DA → DNS Administration → Extra DNS Servers with the same credentials.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Topology Comparison
|
||||||
|
|
||||||
|
| | Topology A — Dual NSD/BIND | Topology B — CoreDNS MySQL | Topology C — Multi-Instance + Peer Sync |
|
||||||
|
|---|---|---|---|
|
||||||
|
| **DNS server** | NSD or BIND9 (bundled) | CoreDNS (separate, reads MySQL) | NSD or CoreDNS MySQL (per instance) |
|
||||||
|
| **Write path** | DA → each instance independently | DA → single instance → all backends | DA → each instance independently |
|
||||||
|
| **Zone storage** | Zone files on container disk | MySQL database rows | Zone files or MySQL + SQLite zone_data store |
|
||||||
|
| **DA registration** | Two Extra DNS server entries | One Extra DNS server entry | One entry per instance |
|
||||||
|
| **Redundancy model** | Independent app+DNS units | One app, N database backends | Independent instances + peer sync |
|
||||||
|
| **Transient backend failure** | Retry queue (exp. backoff, 5 attempts) | Retry queue (exp. backoff, 5 attempts) | Retry queue (exp. backoff, 5 attempts) |
|
||||||
|
| **Prolonged backend outage** | No auto-recovery — waits for next DA push | Reconciler healing pass re-pushes all missing zones | Peer sync pulls missed zones from a healthy peer |
|
||||||
|
| **Container down during push** | Zone missed entirely | Zone missed at DA level | Zone missed at DA level; recovered via peer sync |
|
||||||
|
| **Cross-node consistency** | No sync between instances | All backends share same write path | Peer sync provides eventual consistency |
|
||||||
|
| **Orphan detection** | Yes — reconciler | Yes — reconciler | Yes — reconciler (per instance) |
|
||||||
|
| **External DB required** | No | Yes (MySQL per CoreDNS node) | No (NSD) or Yes (CoreDNS MySQL) |
|
||||||
|
| **Horizontal scaling** | Add DA Extra DNS entries + containers | Add backend stanzas in config | Add DA Extra DNS entries + containers + peer list |
|
||||||
|
| **Best for** | Simple HA, no external DB | Best overall — resilient writes (retry queue) + resilient reads (CoreDNS cache fallback), no daemon reloads, scales to thousands of zones | Most robust HA — resilient at every layer, survives extended outages without DA re-push |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## DNS Server Resource and Scale Guide
|
||||||
|
|
||||||
|
### BIND9 vs CoreDNS MySQL — resource profile
|
||||||
|
|
||||||
|
| | BIND9 (bundled) | CoreDNS + MySQL |
|
||||||
|
|---|---|---|
|
||||||
|
| **Base memory** | ~13–15 MB | ~20–30 MB (CoreDNS binary) + MySQL process |
|
||||||
|
| **Per-zone overhead** | ~300 bytes per resource record in memory | Schema rows in MySQL; CoreDNS itself holds no zone state |
|
||||||
|
| **100-zone deployment** | ~30–60 MB total | ~80–150 MB (CoreDNS + MySQL combined) |
|
||||||
|
| **500-zone deployment** | ~100–300 MB total | ~100–200 MB (zone data lives in MySQL, not CoreDNS) |
|
||||||
|
| **Zone reload** | `rndc reload <zone>` — per-zone is fast; full reload blocks queries for seconds at large counts | No reload needed — CoreDNS queries MySQL at resolution time |
|
||||||
|
| **Zone update latency** | File write + `rndc reload` — typically <100 ms for a single zone | Write to MySQL — immediately visible to CoreDNS on next query |
|
||||||
|
| **CPU on reload** | Spikes on full `rndc reload`; grows linearly with zone count | No reload CPU spike; MySQL write is the only cost |
|
||||||
|
| **Query throughput** | High — zones loaded into memory | Slightly lower — each query hits MySQL (mitigated by MySQL query cache / connection pooling) |
|
||||||
|
| **Scale ceiling** | Degrades past ~1 000 zones: memory climbs, full reloads take 120 s+ | Scales with MySQL — thousands of zones with no DNS-process impact |
|
||||||
|
|
||||||
|
**Rule of thumb:** Below ~300 zones BIND9 and CoreDNS MySQL are broadly comparable. Above ~500 zones, CoreDNS MySQL has a significant advantage because zone data lives entirely in the database — adding a new zone costs one MySQL INSERT, not a daemon reload.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Bundled DNS daemons — NSD and BIND9
|
||||||
|
|
||||||
|
The container image ships with **both NSD and BIND9** installed. The entrypoint reads your config and starts only the daemon that matches the configured backend type. CoreDNS MySQL deployments start neither.
|
||||||
|
|
||||||
|
**NSD (Name Server Daemon)** from NLnet Labs is the default recommendation:
|
||||||
|
|
||||||
|
| | BIND9 | NSD | Knot DNS |
|
||||||
|
|---|---|---|---|
|
||||||
|
| **Design focus** | Everything (authoritative + recursive + DNSSEC + ...) | Authoritative only | Authoritative only |
|
||||||
|
| **Base memory** | ~13–15 MB | ~5–10 MB | ~10–15 MB |
|
||||||
|
| **500-zone memory** | ~100–300 MB | <100 MB (estimated) | ~100–200 MB (3× zone text size) |
|
||||||
|
| **Zone update** | `rndc reload <zone>` | `nsd-control reload` | `knotc zone-reload` (atomic via RCU — zero query interruption) |
|
||||||
|
| **Config format** | `named.conf` / zone files | `nsd.conf` / zone files (nearly identical format) | `knot.conf` / zone files |
|
||||||
|
| **Docker image** | ~150–200 MB | ~30–50 MB Alpine | ~40–60 MB Alpine |
|
||||||
|
| **Recursive queries** | Yes (if configured) | No | No |
|
||||||
|
| **Throughput** | Baseline | ~2–5× BIND9 | ~5–10× BIND9 (2.2 Mqps at 32 cores) |
|
||||||
|
| **Production use** | Wide adoption | TLD servers (`.nl`, `.se`), major registries | CZ.NIC, Cloudflare internal testing |
|
||||||
|
|
||||||
|
**NSD** would slot almost directly into the existing BIND backend implementation — zone files have the same RFC 1035 format, and `nsd-control reload` is the equivalent of `rndc reload`. The main implementation difference is the daemon config file (`nsd.conf` vs `named.conf`) and the absence of `named.conf.local`-style zone includes (NSD uses pattern-based config).
|
||||||
|
|
||||||
|
**Knot DNS** is worth considering if seamless zone updates matter: its RCU (Read-Copy-Update) mechanism serves the old zone to in-flight queries while atomically swapping in the new one — there is no window where queries see a partially-loaded zone. It is meaningfully heavier than NSD at moderate zone counts but the best performer at high scale.
|
||||||
|
|
||||||
|
**Summary recommendation:**
|
||||||
|
|
||||||
|
- **Any scale, external DB available:** CoreDNS MySQL ([cybercinch fork](https://github.com/cybercinch/coredns_mysql_extend)) wins at every zone count. Connection pooling, JSON cache fallback, health monitoring, and zero-downtime operation during DB maintenance make it the most resilient choice regardless of size. No daemon reload ever needed — a zone write is a MySQL INSERT.
|
||||||
|
- **No external DB, simplicity first:** NSD (bundled) — lightweight, fast, authoritative-only, same RFC 1035 zone file format as BIND.
|
||||||
|
- **Need zero-interruption zone swaps:** Knot DNS (RCU — serves old zone to in-flight queries while atomically swapping in the new one).
|
||||||
|
- **Need an HTTP API for zone management:** PowerDNS Authoritative with its native HTTP API.
|
||||||
|
|
||||||
|
> **Note:** Knot DNS and PowerDNS backends are **not implemented** in directdnsonly — they are listed here as architectural context only. Implemented backends: `nsd`, `bind`, `coredns_mysql`. Pull requests for additional backends are welcome.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## CoreDNS MySQL Backend — Required Fork
|
||||||
|
|
||||||
|
The `coredns_mysql` backend writes zones to a MySQL database that CoreDNS reads
|
||||||
|
at query time. **Vanilla CoreDNS with a stock MySQL plugin is not sufficient** —
|
||||||
|
out of the box it does not act as a fully authoritative server, does not return
|
||||||
|
NS records in the additional section, does not set the AA flag, and does not
|
||||||
|
handle wildcard records.
|
||||||
|
|
||||||
|
This project is designed to work with a patched fork that resolves all of those
|
||||||
|
issues and adds production-grade resilience:
|
||||||
|
|
||||||
|
**[cybercinch/coredns_mysql_extend](https://github.com/cybercinch/coredns_mysql_extend)**
|
||||||
|
|
||||||
|
| Feature | Detail |
|
||||||
|
|---|---|
|
||||||
|
| **Fully authoritative** | Correct AA flag, NXDOMAIN on misses, NS records in the additional section |
|
||||||
|
| **Wildcard records** | `*` entries served correctly |
|
||||||
|
| **Connection pooling** | Configurable MySQL connection management — efficient under load |
|
||||||
|
| **Degraded operation** | Automatic fallback to a local JSON cache when MySQL is unavailable — DNS keeps serving |
|
||||||
|
| **Smart caching** | Intelligent per-record cache management reduces per-query MySQL round-trips |
|
||||||
|
| **Health monitoring** | Continuous database health checks with configurable intervals |
|
||||||
|
| **Zero downtime** | DNS continues serving during database maintenance windows |
|
||||||
|
|
||||||
|
**Why this matters for Topology B:** directdnsonly's retry queue handles the write side during a MySQL outage — the CoreDNS fork handles the read side. Between them, neither writes nor queries are dropped during transient database failures.
|
||||||
|
|
||||||
|
Use the NSD or BIND backend if you want a zero-dependency setup with no custom CoreDNS build required.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Features
|
||||||
|
- Multi-backend DNS management (NSD, BIND, CoreDNS MySQL)
|
||||||
|
- Parallel backend dispatch — all enabled backends updated simultaneously
|
||||||
|
- Persistent queue — zone updates survive restarts
|
||||||
|
- Automatic record-count verification and drift reconciliation
|
||||||
|
- Peer sync — eventual consistency between directdnsonly instances
|
||||||
|
- Thread-safe operations
|
||||||
|
- Loguru-based logging
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
```bash
|
||||||
|
poetry install
|
||||||
|
poetry run dadns
|
||||||
|
```
|
||||||
|
|
||||||
|
## Concurrent Multi-Backend Processing
|
||||||
|
|
||||||
|
DirectDNSOnly propagates every zone update to all enabled backends in parallel using a
|
||||||
|
queue-based worker architecture.
|
||||||
|
|
||||||
|
### Architecture
|
||||||
|
|
||||||
|
```
|
||||||
|
DirectAdmin zone push
|
||||||
|
│
|
||||||
|
▼
|
||||||
|
Persistent Queue (persist-queue, survives restarts)
|
||||||
|
│
|
||||||
|
▼
|
||||||
|
save_queue_worker (single daemon thread, sequential dequeue)
|
||||||
|
│
|
||||||
|
├─ 1 backend enabled ──▶ direct call (no thread overhead)
|
||||||
|
│
|
||||||
|
└─ N backends enabled ──▶ ThreadPoolExecutor(max_workers=N)
|
||||||
|
│
|
||||||
|
┌─────┴─────┐
|
||||||
|
▼ ▼
|
||||||
|
bind coredns_dc1 ...
|
||||||
|
(concurrent, as_completed)
|
||||||
|
```
|
||||||
|
|
||||||
|
### How it works
|
||||||
|
|
||||||
|
1. **Queue consumer** — A single background thread drains the persistent save
|
||||||
|
queue. Items are processed one zone at a time, in order.
|
||||||
|
|
||||||
|
2. **Single-backend path** — When only one backend is enabled, the zone is
|
||||||
|
written directly with no extra thread spawning.
|
||||||
|
|
||||||
|
3. **Parallel-backend path** — When two or more backends are enabled, a
|
||||||
|
`ThreadPoolExecutor` with one thread per backend dispatches all writes
|
||||||
|
simultaneously. Results are collected with `as_completed`, so a slow or
|
||||||
|
failing backend does not block the others.
|
||||||
|
|
||||||
|
4. **Record verification** — After each successful write, the backend's stored
|
||||||
|
record count is compared against the authoritative count parsed from the
|
||||||
|
source zone file (the DirectAdmin zone). Mismatches trigger automatic
|
||||||
|
reconciliation: extra records are removed and the count is re-verified.
|
||||||
|
|
||||||
|
5. **Batch telemetry** — The worker tracks batch start time and emits a summary
|
||||||
|
log on queue drain, including zones processed, failures, elapsed time, and
|
||||||
|
throughput (zones/sec).
|
||||||
|
|
||||||
|
### Log output (example)
|
||||||
|
|
||||||
|
```
|
||||||
|
INFO | 📥 Batch started — 12 zone(s) queued for processing
|
||||||
|
DEBUG | Processing example.com across 2 backends concurrently: bind, coredns_dc1
|
||||||
|
DEBUG | Parallel processing of example.com across 2 backends completed in 43ms
|
||||||
|
SUCCESS | 📦 Batch complete — 12/12 zone(s) processed successfully in 1.8s (6.7 zones/sec)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Adding backends
|
||||||
|
|
||||||
|
Enable additional backends in `config/app.yml`. Each enabled backend is
|
||||||
|
automatically included in the parallel dispatch — no code changes required.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
dns:
|
||||||
|
backends:
|
||||||
|
bind:
|
||||||
|
enabled: true
|
||||||
|
coredns_dc1:
|
||||||
|
enabled: true
|
||||||
|
host: "mysql-dc1"
|
||||||
|
coredns_dc2:
|
||||||
|
enabled: true # adds a third parallel worker automatically
|
||||||
|
host: "mysql-dc2"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
DirectDNSOnly uses [Vyper](https://github.com/sn3d/vyper-py) for configuration. Settings are resolved in this priority order (highest wins):
|
||||||
|
|
||||||
|
1. **Environment variables** — `DADNS_` prefix, dots replaced with underscores (e.g. `DADNS_APP_AUTH_PASSWORD`)
|
||||||
|
2. **Config file** — `app.yml` searched in `/etc/directdnsonly`, `.`, `./config`, then the bundled default
|
||||||
|
3. **Built-in defaults** (shown in the table below)
|
||||||
|
|
||||||
|
**A config file is entirely optional.** Every scalar setting can be provided through environment variables alone.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Configuration Reference
|
||||||
|
|
||||||
|
#### Core
|
||||||
|
|
||||||
|
| Config key | Environment variable | Default | Description |
|
||||||
|
|---|---|---|---|
|
||||||
|
| `log_level` | `DADNS_LOG_LEVEL` | `info` | Log verbosity: `debug`, `info`, `warning`, `error` |
|
||||||
|
| `timezone` | `DADNS_TIMEZONE` | `Pacific/Auckland` | Timezone for log timestamps |
|
||||||
|
| `queue_location` | `DADNS_QUEUE_LOCATION` | `./data/queues` | Path for the persistent zone-update queue |
|
||||||
|
|
||||||
|
#### App (HTTP server)
|
||||||
|
|
||||||
|
| Config key | Environment variable | Default | Description |
|
||||||
|
|---|---|---|---|
|
||||||
|
| `app.auth_username` | `DADNS_APP_AUTH_USERNAME` | `directdnsonly` | Basic auth username for all API routes (including `/internal`) |
|
||||||
|
| `app.auth_password` | `DADNS_APP_AUTH_PASSWORD` | `changeme` | Basic auth password — **always override in production** |
|
||||||
|
| `app.listen_port` | `DADNS_APP_LISTEN_PORT` | `2222` | TCP port the HTTP server binds to |
|
||||||
|
| `app.ssl_enable` | `DADNS_APP_SSL_ENABLE` | `false` | Enable TLS on the HTTP server |
|
||||||
|
| `app.proxy_support` | `DADNS_APP_PROXY_SUPPORT` | `true` | Trust `X-Forwarded-For` from a reverse proxy |
|
||||||
|
| `app.proxy_support_base` | `DADNS_APP_PROXY_SUPPORT_BASE` | `http://127.0.0.1` | Trusted proxy base address |
|
||||||
|
|
||||||
|
#### Datastore (internal SQLite)
|
||||||
|
|
||||||
|
| Config key | Environment variable | Default | Description |
|
||||||
|
|---|---|---|---|
|
||||||
|
| `datastore.type` | `DADNS_DATASTORE_TYPE` | `sqlite` | Internal datastore type (only `sqlite` supported) |
|
||||||
|
| `datastore.db_location` | `DADNS_DATASTORE_DB_LOCATION` | `data/directdns.db` | Path to the SQLite database file |
|
||||||
|
|
||||||
|
#### DNS backends — BIND
|
||||||
|
|
||||||
|
| Config key | Environment variable | Default | Description |
|
||||||
|
|---|---|---|---|
|
||||||
|
| `dns.default_backend` | `DADNS_DNS_DEFAULT_BACKEND` | _(none)_ | Name of the primary backend (used for status/health reporting) |
|
||||||
|
| `dns.backends.bind.enabled` | `DADNS_DNS_BACKENDS_BIND_ENABLED` | `false` | Enable the bundled BIND9 backend |
|
||||||
|
| `dns.backends.bind.zones_dir` | `DADNS_DNS_BACKENDS_BIND_ZONES_DIR` | `/etc/named/zones` | Directory where zone files are written |
|
||||||
|
| `dns.backends.bind.named_conf` | `DADNS_DNS_BACKENDS_BIND_NAMED_CONF` | `/etc/named.conf.local` | `named.conf` include file managed by directdnsonly |
|
||||||
|
|
||||||
|
#### DNS backends — NSD
|
||||||
|
|
||||||
|
| Config key | Environment variable | Default | Description |
|
||||||
|
|---|---|---|---|
|
||||||
|
| `dns.backends.nsd.enabled` | `DADNS_DNS_BACKENDS_NSD_ENABLED` | `false` | Enable the NSD backend |
|
||||||
|
| `dns.backends.nsd.zones_dir` | `DADNS_DNS_BACKENDS_NSD_ZONES_DIR` | `/etc/nsd/zones` | Directory where zone files are written |
|
||||||
|
| `dns.backends.nsd.nsd_conf` | `DADNS_DNS_BACKENDS_NSD_NSD_CONF` | `/etc/nsd/nsd.conf.d/zones.conf` | NSD zone include file managed by directdnsonly |
|
||||||
|
|
||||||
|
#### DNS backends — CoreDNS MySQL
|
||||||
|
|
||||||
|
The built-in env var mapping targets the backend named `coredns_mysql`. For multiple named CoreDNS backends (e.g. `coredns_dc1`, `coredns_dc2`) you must use a config file — see [Multi-backend via config file](#multi-backend-via-config-file) below.
|
||||||
|
|
||||||
|
| Config key | Environment variable | Default | Description |
|
||||||
|
|---|---|---|---|
|
||||||
|
| `dns.backends.coredns_mysql.enabled` | `DADNS_DNS_BACKENDS_COREDNS_MYSQL_ENABLED` | `false` | Enable the CoreDNS MySQL backend |
|
||||||
|
| `dns.backends.coredns_mysql.host` | `DADNS_DNS_BACKENDS_COREDNS_MYSQL_HOST` | `localhost` | MySQL host |
|
||||||
|
| `dns.backends.coredns_mysql.port` | `DADNS_DNS_BACKENDS_COREDNS_MYSQL_PORT` | `3306` | MySQL port |
|
||||||
|
| `dns.backends.coredns_mysql.database` | `DADNS_DNS_BACKENDS_COREDNS_MYSQL_DATABASE` | `coredns` | MySQL database name |
|
||||||
|
| `dns.backends.coredns_mysql.username` | `DADNS_DNS_BACKENDS_COREDNS_MYSQL_USERNAME` | `coredns` | MySQL username |
|
||||||
|
| `dns.backends.coredns_mysql.password` | `DADNS_DNS_BACKENDS_COREDNS_MYSQL_PASSWORD` | _(empty)_ | MySQL password |
|
||||||
|
|
||||||
|
#### Reconciliation poller
|
||||||
|
|
||||||
|
| Config key | Environment variable | Default | Description |
|
||||||
|
|---|---|---|---|
|
||||||
|
| `reconciliation.enabled` | `DADNS_RECONCILIATION_ENABLED` | `false` | Enable the background reconciliation poller |
|
||||||
|
| `reconciliation.dry_run` | `DADNS_RECONCILIATION_DRY_RUN` | `false` | Log orphans but do not queue deletes (safe first-run mode) |
|
||||||
|
| `reconciliation.interval_minutes` | `DADNS_RECONCILIATION_INTERVAL_MINUTES` | `60` | How often the poller runs |
|
||||||
|
| `reconciliation.verify_ssl` | `DADNS_RECONCILIATION_VERIFY_SSL` | `true` | Verify TLS certificates when querying DirectAdmin |
|
||||||
|
|
||||||
|
> The `reconciliation.directadmin_servers` list (DA hostnames, credentials) requires a config file — it cannot be expressed as simple env vars.
|
||||||
|
|
||||||
|
#### Peer sync
|
||||||
|
|
||||||
|
| Config key / Environment variable | Default | Description |
|
||||||
|
|---|---|---|
|
||||||
|
| `peer_sync.enabled` / `DADNS_PEER_SYNC_ENABLED` | `false` | Enable background peer-to-peer zone sync |
|
||||||
|
| `peer_sync.interval_minutes` / `DADNS_PEER_SYNC_INTERVAL_MINUTES` | `15` | How often each peer is polled |
|
||||||
|
|
||||||
|
For a **single peer** (the typical two-node Topology C setup) the peer can be configured entirely via env vars — no config file required:
|
||||||
|
|
||||||
|
| Environment variable | Default | Description |
|
||||||
|
|---|---|---|
|
||||||
|
| `DADNS_PEER_SYNC_PEER_URL` | _(unset)_ | URL of the single peer (e.g. `http://ddo-2:2222`). When set, this peer is automatically appended to the peers list. |
|
||||||
|
| `DADNS_PEER_SYNC_PEER_USERNAME` | `directdnsonly` | Basic auth username for the peer |
|
||||||
|
| `DADNS_PEER_SYNC_PEER_PASSWORD` | _(empty)_ | Basic auth password for the peer |
|
||||||
|
|
||||||
|
> For **multiple peers**, use a config file with the `peer_sync.peers` list. A peer defined via env var is deduped — if the same URL already appears in the config file it will not be added twice.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Environment-variable-only setup
|
||||||
|
|
||||||
|
No config file is needed for single-backend deployments. Pass all settings as container environment variables.
|
||||||
|
|
||||||
|
#### Topology A/C — NSD backend (env vars only, recommended)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
DADNS_APP_AUTH_PASSWORD=my-strong-secret
|
||||||
|
DADNS_DNS_DEFAULT_BACKEND=nsd
|
||||||
|
DADNS_DNS_BACKENDS_NSD_ENABLED=true
|
||||||
|
DADNS_DNS_BACKENDS_NSD_ZONES_DIR=/etc/nsd/zones
|
||||||
|
DADNS_DNS_BACKENDS_NSD_NSD_CONF=/etc/nsd/nsd.conf.d/zones.conf
|
||||||
|
DADNS_QUEUE_LOCATION=/app/data/queues
|
||||||
|
DADNS_DATASTORE_DB_LOCATION=/app/data/directdns.db
|
||||||
|
```
|
||||||
|
|
||||||
|
`docker-compose.yml` snippet (Topology C — two instances with peer sync via config file):
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
directdnsonly-syd:
|
||||||
|
image: guisea/directdnsonly:2.5.0
|
||||||
|
ports:
|
||||||
|
- "2222:2222"
|
||||||
|
- "53:53/udp"
|
||||||
|
environment:
|
||||||
|
DADNS_APP_AUTH_PASSWORD: my-strong-secret
|
||||||
|
DADNS_DNS_DEFAULT_BACKEND: nsd
|
||||||
|
DADNS_DNS_BACKENDS_NSD_ENABLED: "true"
|
||||||
|
DADNS_PEER_SYNC_ENABLED: "true"
|
||||||
|
DADNS_PEER_SYNC_PEER_URL: http://directdnsonly-mlb:2222
|
||||||
|
DADNS_PEER_SYNC_PEER_USERNAME: directdnsonly
|
||||||
|
DADNS_PEER_SYNC_PEER_PASSWORD: my-strong-secret
|
||||||
|
volumes:
|
||||||
|
- syd-data:/app/data
|
||||||
|
|
||||||
|
directdnsonly-mlb:
|
||||||
|
image: guisea/directdnsonly:2.5.0
|
||||||
|
ports:
|
||||||
|
- "2223:2222"
|
||||||
|
- "54:53/udp"
|
||||||
|
environment:
|
||||||
|
DADNS_APP_AUTH_PASSWORD: my-strong-secret
|
||||||
|
DADNS_DNS_DEFAULT_BACKEND: nsd
|
||||||
|
DADNS_DNS_BACKENDS_NSD_ENABLED: "true"
|
||||||
|
DADNS_PEER_SYNC_ENABLED: "true"
|
||||||
|
DADNS_PEER_SYNC_PEER_URL: http://directdnsonly-syd:2222
|
||||||
|
DADNS_PEER_SYNC_PEER_USERNAME: directdnsonly
|
||||||
|
DADNS_PEER_SYNC_PEER_PASSWORD: my-strong-secret
|
||||||
|
volumes:
|
||||||
|
- mlb-data:/app/data
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
syd-data:
|
||||||
|
mlb-data:
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Topology A — BIND backend (env vars only)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# docker run / docker-compose environment:
|
||||||
|
DADNS_APP_AUTH_USERNAME=directdnsonly
|
||||||
|
DADNS_APP_AUTH_PASSWORD=my-strong-secret
|
||||||
|
DADNS_DNS_DEFAULT_BACKEND=bind
|
||||||
|
DADNS_DNS_BACKENDS_BIND_ENABLED=true
|
||||||
|
DADNS_DNS_BACKENDS_BIND_ZONES_DIR=/etc/named/zones
|
||||||
|
DADNS_DNS_BACKENDS_BIND_NAMED_CONF=/etc/named/named.conf.local
|
||||||
|
DADNS_QUEUE_LOCATION=/app/data/queues
|
||||||
|
DADNS_DATASTORE_DB_LOCATION=/app/data/directdns.db
|
||||||
|
```
|
||||||
|
|
||||||
|
`docker-compose.yml` snippet:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
services:
|
||||||
|
directdnsonly:
|
||||||
|
image: guisea/directdnsonly:2.5.0
|
||||||
|
ports:
|
||||||
|
- "2222:2222"
|
||||||
|
- "53:53/udp"
|
||||||
|
environment:
|
||||||
|
DADNS_APP_AUTH_PASSWORD: my-strong-secret
|
||||||
|
DADNS_DNS_DEFAULT_BACKEND: bind
|
||||||
|
DADNS_DNS_BACKENDS_BIND_ENABLED: "true"
|
||||||
|
DADNS_DNS_BACKENDS_BIND_ZONES_DIR: /etc/named/zones
|
||||||
|
DADNS_DNS_BACKENDS_BIND_NAMED_CONF: /etc/named/named.conf.local
|
||||||
|
volumes:
|
||||||
|
- ddo-data:/app/data
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
ddo-data:
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Topology B — single CoreDNS MySQL backend (env vars only)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
DADNS_APP_AUTH_PASSWORD=my-strong-secret
|
||||||
|
DADNS_DNS_DEFAULT_BACKEND=coredns_mysql
|
||||||
|
DADNS_DNS_BACKENDS_COREDNS_MYSQL_ENABLED=true
|
||||||
|
DADNS_DNS_BACKENDS_COREDNS_MYSQL_HOST=mysql.dc1.internal
|
||||||
|
DADNS_DNS_BACKENDS_COREDNS_MYSQL_PORT=3306
|
||||||
|
DADNS_DNS_BACKENDS_COREDNS_MYSQL_DATABASE=coredns
|
||||||
|
DADNS_DNS_BACKENDS_COREDNS_MYSQL_USERNAME=coredns
|
||||||
|
DADNS_DNS_BACKENDS_COREDNS_MYSQL_PASSWORD=db-secret
|
||||||
|
DADNS_QUEUE_LOCATION=/app/data/queues
|
||||||
|
DADNS_DATASTORE_DB_LOCATION=/app/data/directdns.db
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Multi-backend via config file
|
||||||
|
|
||||||
|
When you need **multiple named backends** (e.g. two CoreDNS MySQL instances in different data centres), **peer sync**, or **reconciliation with DA servers**, use a config file mounted at `/app/config/app.yml` (or `/etc/directdnsonly/app.yml`):
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
app:
|
||||||
|
auth_username: directdnsonly
|
||||||
|
auth_password: my-strong-secret # or use DADNS_APP_AUTH_PASSWORD
|
||||||
|
|
||||||
|
dns:
|
||||||
|
default_backend: coredns_dc1
|
||||||
|
backends:
|
||||||
|
coredns_dc1:
|
||||||
|
type: coredns_mysql
|
||||||
|
enabled: true
|
||||||
|
host: 10.0.0.80
|
||||||
|
port: 3306
|
||||||
|
database: coredns
|
||||||
|
username: coredns
|
||||||
|
password: db-secret-dc1
|
||||||
|
|
||||||
|
coredns_dc2:
|
||||||
|
type: coredns_mysql
|
||||||
|
enabled: true
|
||||||
|
host: 10.0.1.29
|
||||||
|
port: 3306
|
||||||
|
database: coredns
|
||||||
|
username: coredns
|
||||||
|
password: db-secret-dc2
|
||||||
|
|
||||||
|
reconciliation:
|
||||||
|
enabled: true
|
||||||
|
dry_run: false
|
||||||
|
interval_minutes: 60
|
||||||
|
verify_ssl: true
|
||||||
|
directadmin_servers:
|
||||||
|
- hostname: da1.example.com
|
||||||
|
port: 2222
|
||||||
|
username: admin
|
||||||
|
password: da-secret
|
||||||
|
ssl: true
|
||||||
|
|
||||||
|
peer_sync:
|
||||||
|
enabled: true
|
||||||
|
interval_minutes: 15
|
||||||
|
peers:
|
||||||
|
- url: http://ddo-2:2222
|
||||||
|
username: directdnsonly
|
||||||
|
password: my-strong-secret
|
||||||
|
```
|
||||||
|
|
||||||
|
Credentials in the config file can still be overridden by env vars — for example, `DADNS_APP_AUTH_PASSWORD` overrides `app.auth_password` regardless of what the file says.
|
||||||
105
app.py
105
app.py
@@ -1,105 +0,0 @@
|
|||||||
from flask import Flask, request
|
|
||||||
import mmap
|
|
||||||
import re
|
|
||||||
|
|
||||||
app = Flask(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
@app.route('/')
|
|
||||||
def hello_world():
|
|
||||||
return 'Hello World!'
|
|
||||||
|
|
||||||
|
|
||||||
@app.route('/CMD_API_LOGIN_TEST')
|
|
||||||
def login_test():
|
|
||||||
multi_dict = request.values
|
|
||||||
for key in multi_dict:
|
|
||||||
print(multi_dict.get(key))
|
|
||||||
print(multi_dict.getlist(key))
|
|
||||||
# print(request.values)
|
|
||||||
print(request.headers)
|
|
||||||
print(request.authorization)
|
|
||||||
|
|
||||||
return 'error=0&text=Login OK&details=none'
|
|
||||||
|
|
||||||
|
|
||||||
@app.route('/CMD_API_DNS_ADMIN', methods=['GET', 'POST'])
|
|
||||||
def domain_admin():
|
|
||||||
print(str(request.data, encoding="utf-8"))
|
|
||||||
print(request.values.get('action'))
|
|
||||||
action = request.values.get('action')
|
|
||||||
if action == 'exists':
|
|
||||||
# DirectAdmin is checking whether the domain is in the cluster
|
|
||||||
return 'result: exists=1'
|
|
||||||
if action == 'delete':
|
|
||||||
# Domain is being removed from the DNS
|
|
||||||
hostname = request.values.get('hostname')
|
|
||||||
username = request.values.get('username')
|
|
||||||
domain = request.values.get('select0')
|
|
||||||
|
|
||||||
|
|
||||||
if action == 'rawsave':
|
|
||||||
# DirectAdmin wants to add/update a domain
|
|
||||||
hostname = request.values.get('hostname')
|
|
||||||
username = request.values.get('username')
|
|
||||||
domain = request.values.get('domain')
|
|
||||||
|
|
||||||
if not check_zone_exists(str(domain)):
|
|
||||||
put_zone_index(str(domain))
|
|
||||||
write_zone_file(str(domain), request.data.decode("utf-8"))
|
|
||||||
else:
|
|
||||||
# Domain already exists
|
|
||||||
write_zone_file(str(domain), request.data.decode("utf-8"))
|
|
||||||
|
|
||||||
|
|
||||||
def create_zone_index():
|
|
||||||
# Create an index of all zones present from zone definitions
|
|
||||||
regex = r"(?<=\")(?P<domain>.*)(?=\"\s)"
|
|
||||||
|
|
||||||
with open(zone_index_file, 'w+') as f:
|
|
||||||
with open(named_conf, 'r') as named_file:
|
|
||||||
while True:
|
|
||||||
# read line
|
|
||||||
line = named_file.readline()
|
|
||||||
if not line:
|
|
||||||
# Reached end of file
|
|
||||||
break
|
|
||||||
print(line)
|
|
||||||
hosted_domain = re.search(regex, line).group(0)
|
|
||||||
f.write(hosted_domain + "\n")
|
|
||||||
|
|
||||||
|
|
||||||
def put_zone_index(zone_name):
|
|
||||||
# add a new zone to index
|
|
||||||
with open(zone_index_file, 'a+') as f:
|
|
||||||
# We are using append mode
|
|
||||||
f.write(zone_name)
|
|
||||||
|
|
||||||
|
|
||||||
def write_zone_file(zone_name, data):
|
|
||||||
# Write the zone to file
|
|
||||||
with open(zones_dir + '/' + zone_name + '.db', 'w') as f:
|
|
||||||
f.write(data)
|
|
||||||
|
|
||||||
|
|
||||||
def check_zone_exists(zone_name):
|
|
||||||
# Check if zone is present in the index
|
|
||||||
with open(zone_index_file, 'r') as f:
|
|
||||||
try:
|
|
||||||
s = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
|
|
||||||
if s.find(bytes(zone_name, encoding='utf8')) != -1:
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
except ValueError as e:
|
|
||||||
# File Empty?
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
zones_dir = "/etc/pdns/zones"
|
|
||||||
zone_index_file = "/etc/pdns/zones/.index"
|
|
||||||
named_conf = "/etc/pdns/named.conf"
|
|
||||||
create_zone_index()
|
|
||||||
|
|
||||||
app.run(host="0.0.0.0")
|
|
||||||
1
directdnsonly/__init__.py
Normal file
1
directdnsonly/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
# Package initialization
|
||||||
17
directdnsonly/__main__.py
Normal file
17
directdnsonly/__main__.py
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
def run():
|
||||||
|
# main.py uses short-form imports (from app.*, from worker) that resolve
|
||||||
|
# relative to the directdnsonly/ package directory. Insert it into the
|
||||||
|
# path before importing so `python -m directdnsonly` and the `dadns`
|
||||||
|
# console script both work without changing main.py.
|
||||||
|
sys.path.insert(0, os.path.dirname(__file__))
|
||||||
|
from main import main
|
||||||
|
|
||||||
|
main()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
run()
|
||||||
18
directdnsonly/app/__init__.py
Normal file
18
directdnsonly/app/__init__.py
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
from loguru import logger
|
||||||
|
import sys
|
||||||
|
from directdnsonly.config import config
|
||||||
|
|
||||||
|
|
||||||
|
def configure_logging():
|
||||||
|
logger.remove()
|
||||||
|
logger.add(
|
||||||
|
sys.stderr,
|
||||||
|
level=config.get("log_level"),
|
||||||
|
format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>",
|
||||||
|
)
|
||||||
|
logger.add(
|
||||||
|
"logs/directdnsonly_{time}.log",
|
||||||
|
rotation="10 MB",
|
||||||
|
retention="30 days",
|
||||||
|
level="DEBUG",
|
||||||
|
)
|
||||||
1
directdnsonly/app/api/__init__.py
Normal file
1
directdnsonly/app/api/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
# Package initialization
|
||||||
188
directdnsonly/app/api/admin.py
Normal file
188
directdnsonly/app/api/admin.py
Normal file
@@ -0,0 +1,188 @@
|
|||||||
|
import cherrypy
|
||||||
|
from urllib.parse import urlencode, parse_qs
|
||||||
|
from loguru import logger
|
||||||
|
from directdnsonly.app.utils import (
|
||||||
|
check_zone_exists,
|
||||||
|
check_parent_domain_owner,
|
||||||
|
get_domain_record,
|
||||||
|
get_parent_domain_record,
|
||||||
|
)
|
||||||
|
from directdnsonly.app.utils.zone_parser import validate_and_normalize_zone
|
||||||
|
|
||||||
|
|
||||||
|
class DNSAdminAPI:
|
||||||
|
def __init__(self, save_queue, delete_queue, backend_registry):
|
||||||
|
self.save_queue = save_queue
|
||||||
|
self.delete_queue = delete_queue
|
||||||
|
self.backend_registry = backend_registry
|
||||||
|
|
||||||
|
@cherrypy.expose
|
||||||
|
def index(self):
|
||||||
|
return "DNS Admin API - Available endpoints: /CMD_API_DNS_ADMIN"
|
||||||
|
|
||||||
|
@cherrypy.expose
|
||||||
|
def CMD_API_LOGIN_TEST(self):
|
||||||
|
"""DirectAdmin login test — confirms credentials are valid"""
|
||||||
|
return urlencode({"error": 0, "text": "Login OK"})
|
||||||
|
|
||||||
|
@cherrypy.expose
|
||||||
|
def CMD_API_DNS_ADMIN(self, **params):
|
||||||
|
"""Handle both DirectAdmin-style API calls and raw zone file uploads"""
|
||||||
|
try:
|
||||||
|
if cherrypy.request.method == "GET":
|
||||||
|
return self._handle_exists(params)
|
||||||
|
|
||||||
|
if cherrypy.request.method != "POST":
|
||||||
|
cherrypy.response.status = 405
|
||||||
|
return urlencode({"error": 1, "text": "Method not allowed"})
|
||||||
|
|
||||||
|
# Parse parameters from both query string and body
|
||||||
|
body_params = {}
|
||||||
|
if cherrypy.request.body:
|
||||||
|
content_type = cherrypy.request.headers.get("Content-Type", "")
|
||||||
|
|
||||||
|
if "application/x-www-form-urlencoded" in content_type:
|
||||||
|
raw_body = cherrypy.request.body.read()
|
||||||
|
if raw_body:
|
||||||
|
body_params = parse_qs(raw_body.decode("utf-8"))
|
||||||
|
body_params = {
|
||||||
|
k: v[0] if len(v) == 1 else v
|
||||||
|
for k, v in body_params.items()
|
||||||
|
}
|
||||||
|
elif "text/plain" in content_type:
|
||||||
|
body_params = {
|
||||||
|
"zone_file": cherrypy.request.body.read().decode("utf-8")
|
||||||
|
}
|
||||||
|
|
||||||
|
# Combine parameters (body overrides query)
|
||||||
|
all_params = {**params, **body_params}
|
||||||
|
logger.debug(f"Request parameters: {all_params}")
|
||||||
|
|
||||||
|
if "zone_file" not in all_params:
|
||||||
|
logger.debug(
|
||||||
|
"No zone file provided. Maybe in body as DirectAdmin does?"
|
||||||
|
)
|
||||||
|
# Grab from body
|
||||||
|
all_params["zone_file"] = str(cherrypy.request.body.read(), "utf-8")
|
||||||
|
logger.debug("Read zone file from body :)")
|
||||||
|
|
||||||
|
# Required parameters
|
||||||
|
action = all_params.get("action")
|
||||||
|
domain = all_params.get("domain")
|
||||||
|
|
||||||
|
if not action:
|
||||||
|
# DirectAdmin sends an initial request without an action
|
||||||
|
# parameter as a connectivity check — respond with success
|
||||||
|
logger.debug("Received request with no action — connectivity check")
|
||||||
|
return urlencode({"error": 0, "text": "OK"})
|
||||||
|
if not domain:
|
||||||
|
raise ValueError("Missing 'domain' parameter")
|
||||||
|
|
||||||
|
# Handle different actions
|
||||||
|
if action == "rawsave":
|
||||||
|
return self._handle_rawsave(domain, all_params)
|
||||||
|
elif action == "delete":
|
||||||
|
return self._handle_delete(domain, all_params)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported action: {action}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"API error: {str(e)}")
|
||||||
|
cherrypy.response.status = 400
|
||||||
|
return urlencode({"error": 1, "text": str(e)})
|
||||||
|
|
||||||
|
def _handle_exists(self, params: dict):
|
||||||
|
"""Handle GET action=exists — domain and optional parent domain lookup"""
|
||||||
|
action = params.get("action")
|
||||||
|
if action != "exists":
|
||||||
|
cherrypy.response.status = 400
|
||||||
|
return urlencode({"error": 1, "text": f"Unsupported GET action: {action}"})
|
||||||
|
|
||||||
|
domain = params.get("domain")
|
||||||
|
if not domain:
|
||||||
|
cherrypy.response.status = 400
|
||||||
|
return urlencode({"error": 1, "text": "Missing 'domain' parameter"})
|
||||||
|
|
||||||
|
check_parent = bool(params.get("check_for_parent_domain"))
|
||||||
|
|
||||||
|
domain_exists = check_zone_exists(domain)
|
||||||
|
parent_exists = check_parent_domain_owner(domain) if check_parent else False
|
||||||
|
|
||||||
|
if not domain_exists and not parent_exists:
|
||||||
|
return urlencode({"error": 0, "exists": 0})
|
||||||
|
|
||||||
|
if domain_exists:
|
||||||
|
record = get_domain_record(domain)
|
||||||
|
return urlencode(
|
||||||
|
{
|
||||||
|
"error": 0,
|
||||||
|
"exists": 1,
|
||||||
|
"details": f"Domain exists on {record.hostname}",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# parent match only
|
||||||
|
parent_record = get_parent_domain_record(domain)
|
||||||
|
return urlencode(
|
||||||
|
{
|
||||||
|
"error": 0,
|
||||||
|
"exists": 2,
|
||||||
|
"details": f"Parent Domain exists on {parent_record.hostname}",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
def _handle_rawsave(self, domain: str, params: dict):
|
||||||
|
"""Process zone file saves"""
|
||||||
|
zone_data = params.get("zone_file")
|
||||||
|
if not zone_data:
|
||||||
|
raise ValueError("Missing zone file content")
|
||||||
|
|
||||||
|
normalized_zone = validate_and_normalize_zone(zone_data, domain)
|
||||||
|
logger.info(f"Validated zone for {domain}")
|
||||||
|
|
||||||
|
self.save_queue.put(
|
||||||
|
{
|
||||||
|
"domain": domain,
|
||||||
|
"zone_file": normalized_zone,
|
||||||
|
"hostname": params.get("hostname", ""),
|
||||||
|
"username": params.get("username", ""),
|
||||||
|
"client_ip": cherrypy.request.remote.ip,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.success(f"Queued zone update for {domain}")
|
||||||
|
return urlencode({"error": 0})
|
||||||
|
|
||||||
|
def _handle_delete(self, domain: str, params: dict):
|
||||||
|
"""Process zone deletions"""
|
||||||
|
self.delete_queue.put(
|
||||||
|
{
|
||||||
|
"domain": domain,
|
||||||
|
"hostname": params.get("hostname", ""),
|
||||||
|
"username": params.get("username", ""),
|
||||||
|
"client_ip": cherrypy.request.remote.ip,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.success(f"Queued deletion for {domain}")
|
||||||
|
return urlencode({"error": 0})
|
||||||
|
|
||||||
|
@cherrypy.expose
|
||||||
|
def queue_status(self):
|
||||||
|
"""Debug endpoint for queue monitoring"""
|
||||||
|
return {
|
||||||
|
"save_queue_size": self.save_queue.qsize(),
|
||||||
|
"delete_queue_size": self.delete_queue.qsize(),
|
||||||
|
"last_save_item": self._get_last_item(self.save_queue),
|
||||||
|
"last_delete_item": self._get_last_item(self.delete_queue),
|
||||||
|
}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_last_item(queue):
|
||||||
|
"""Helper to safely get last queue item"""
|
||||||
|
try:
|
||||||
|
if hasattr(queue, "last_item"):
|
||||||
|
return queue.last_item
|
||||||
|
return "Last item tracking not available"
|
||||||
|
except Exception:
|
||||||
|
return "Error retrieving last item"
|
||||||
24
directdnsonly/app/api/health.py
Normal file
24
directdnsonly/app/api/health.py
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
import cherrypy
|
||||||
|
from loguru import logger
|
||||||
|
|
||||||
|
|
||||||
|
class HealthAPI:
|
||||||
|
def __init__(self, backend_registry):
|
||||||
|
self.registry = backend_registry
|
||||||
|
|
||||||
|
@cherrypy.expose
|
||||||
|
def health(self):
|
||||||
|
status = {"status": "OK", "backends": []}
|
||||||
|
|
||||||
|
for name, backend in self.registry.get_available_backends().items():
|
||||||
|
status["backends"].append(
|
||||||
|
{
|
||||||
|
"name": name,
|
||||||
|
"status": (
|
||||||
|
"active" if backend().zone_exists("test") else "unavailable"
|
||||||
|
),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.debug("Health check performed")
|
||||||
|
return status
|
||||||
96
directdnsonly/app/api/internal.py
Normal file
96
directdnsonly/app/api/internal.py
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
import cherrypy
|
||||||
|
import json
|
||||||
|
from loguru import logger
|
||||||
|
from sqlalchemy import select
|
||||||
|
from directdnsonly.app.db import connect
|
||||||
|
from directdnsonly.app.db.models import Domain
|
||||||
|
|
||||||
|
|
||||||
|
class InternalAPI:
|
||||||
|
"""Peer-to-peer zone_data exchange endpoints.
|
||||||
|
|
||||||
|
Used by PeerSyncWorker to replicate zone_data between directdnsonly
|
||||||
|
instances so each node can independently heal its local backends.
|
||||||
|
|
||||||
|
All routes require peer_sync basic auth credentials, which are
|
||||||
|
configured separately from the main DirectAdmin-facing credentials
|
||||||
|
(peer_sync.auth_username / peer_sync.auth_password).
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, peer_syncer=None):
|
||||||
|
self._peer_syncer = peer_syncer
|
||||||
|
|
||||||
|
@cherrypy.expose
|
||||||
|
def zones(self, domain=None):
|
||||||
|
"""Return zone metadata or zone_data for a specific domain.
|
||||||
|
|
||||||
|
GET /internal/zones
|
||||||
|
Returns a JSON array of {domain, zone_updated_at, hostname, username}
|
||||||
|
for all domains that have stored zone_data.
|
||||||
|
|
||||||
|
GET /internal/zones?domain=example.com
|
||||||
|
Returns {domain, zone_data, zone_updated_at, hostname, username}
|
||||||
|
for the requested domain, or 404 if not found / no zone_data.
|
||||||
|
"""
|
||||||
|
cherrypy.response.headers["Content-Type"] = "application/json"
|
||||||
|
session = connect()
|
||||||
|
try:
|
||||||
|
if domain:
|
||||||
|
record = session.execute(
|
||||||
|
select(Domain)
|
||||||
|
.filter_by(domain=domain)
|
||||||
|
.where(Domain.zone_data.isnot(None))
|
||||||
|
).scalar_one_or_none()
|
||||||
|
if not record:
|
||||||
|
cherrypy.response.status = 404
|
||||||
|
return json.dumps({"error": "not found"}).encode()
|
||||||
|
return json.dumps(
|
||||||
|
{
|
||||||
|
"domain": record.domain,
|
||||||
|
"zone_data": record.zone_data,
|
||||||
|
"zone_updated_at": (
|
||||||
|
record.zone_updated_at.isoformat()
|
||||||
|
if record.zone_updated_at
|
||||||
|
else None
|
||||||
|
),
|
||||||
|
"hostname": record.hostname,
|
||||||
|
"username": record.username,
|
||||||
|
}
|
||||||
|
).encode()
|
||||||
|
else:
|
||||||
|
records = session.execute(
|
||||||
|
select(Domain).where(Domain.zone_data.isnot(None))
|
||||||
|
).scalars().all()
|
||||||
|
return json.dumps(
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"domain": r.domain,
|
||||||
|
"zone_updated_at": (
|
||||||
|
r.zone_updated_at.isoformat()
|
||||||
|
if r.zone_updated_at
|
||||||
|
else None
|
||||||
|
),
|
||||||
|
"hostname": r.hostname,
|
||||||
|
"username": r.username,
|
||||||
|
}
|
||||||
|
for r in records
|
||||||
|
]
|
||||||
|
).encode()
|
||||||
|
except Exception as exc:
|
||||||
|
logger.error(f"[internal] Error serving /internal/zones: {exc}")
|
||||||
|
cherrypy.response.status = 500
|
||||||
|
return json.dumps({"error": "internal server error"}).encode()
|
||||||
|
finally:
|
||||||
|
session.close()
|
||||||
|
|
||||||
|
@cherrypy.expose
|
||||||
|
def peers(self):
|
||||||
|
"""Return the list of peer URLs this node knows about.
|
||||||
|
|
||||||
|
GET /internal/peers
|
||||||
|
Returns a JSON array of URL strings. Used by other nodes during
|
||||||
|
sync to discover new cluster members (gossip-lite mesh expansion).
|
||||||
|
"""
|
||||||
|
cherrypy.response.headers["Content-Type"] = "application/json"
|
||||||
|
urls = self._peer_syncer.get_peer_urls() if self._peer_syncer else []
|
||||||
|
return json.dumps(urls).encode()
|
||||||
82
directdnsonly/app/api/status.py
Normal file
82
directdnsonly/app/api/status.py
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
"""Operational status endpoint — aggregates queue, worker, reconciler, and peer health."""
|
||||||
|
|
||||||
|
import json
|
||||||
|
|
||||||
|
import cherrypy
|
||||||
|
from sqlalchemy import func, select
|
||||||
|
|
||||||
|
from directdnsonly.app.db import connect
|
||||||
|
from directdnsonly.app.db.models import Domain
|
||||||
|
|
||||||
|
|
||||||
|
class StatusAPI:
|
||||||
|
"""Exposes GET /status as a JSON health/status document.
|
||||||
|
|
||||||
|
Aggregates data from WorkerManager.queue_status() and a live DB zone count
|
||||||
|
into a single response that a UI or monitoring system can poll.
|
||||||
|
|
||||||
|
Overall ``status`` field:
|
||||||
|
- ``ok`` — all workers alive, no dead-letters, all peers healthy
|
||||||
|
- ``degraded`` — retries pending, dead-letters present, or a peer is unhealthy
|
||||||
|
- ``error`` — a core worker thread is not alive
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, worker_manager):
|
||||||
|
self._wm = worker_manager
|
||||||
|
|
||||||
|
@cherrypy.expose
|
||||||
|
def index(self):
|
||||||
|
cherrypy.response.headers["Content-Type"] = "application/json"
|
||||||
|
return json.dumps(self._build(), default=str).encode()
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Internal
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
def _build(self) -> dict:
|
||||||
|
qs = self._wm.queue_status()
|
||||||
|
|
||||||
|
zone_count = self._zone_count()
|
||||||
|
|
||||||
|
overall = self._compute_overall(qs)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"status": overall,
|
||||||
|
"queues": {
|
||||||
|
"save": qs.get("save_queue_size", 0),
|
||||||
|
"delete": qs.get("delete_queue_size", 0),
|
||||||
|
"retry": qs.get("retry_queue_size", 0),
|
||||||
|
"dead_letters": qs.get("dead_letters", 0),
|
||||||
|
},
|
||||||
|
"workers": {
|
||||||
|
"save": qs.get("save_worker_alive"),
|
||||||
|
"delete": qs.get("delete_worker_alive"),
|
||||||
|
"retry_drain": qs.get("retry_worker_alive"),
|
||||||
|
},
|
||||||
|
"reconciler": qs.get("reconciler", {}),
|
||||||
|
"peer_sync": qs.get("peer_sync", {}),
|
||||||
|
"zones": {"total": zone_count},
|
||||||
|
}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _zone_count() -> int:
|
||||||
|
session = connect()
|
||||||
|
try:
|
||||||
|
return session.execute(select(func.count(Domain.id))).scalar() or 0
|
||||||
|
except Exception:
|
||||||
|
return 0
|
||||||
|
finally:
|
||||||
|
session.close()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _compute_overall(qs: dict) -> str:
|
||||||
|
if not qs.get("save_worker_alive") or not qs.get("delete_worker_alive"):
|
||||||
|
return "error"
|
||||||
|
peer_sync = qs.get("peer_sync", {})
|
||||||
|
if (
|
||||||
|
qs.get("retry_queue_size", 0) > 0
|
||||||
|
or qs.get("dead_letters", 0) > 0
|
||||||
|
or peer_sync.get("degraded", 0) > 0
|
||||||
|
):
|
||||||
|
return "degraded"
|
||||||
|
return "ok"
|
||||||
91
directdnsonly/app/backends/__init__.py
Normal file
91
directdnsonly/app/backends/__init__.py
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
from typing import Dict, Type, Optional
|
||||||
|
from .base import DNSBackend
|
||||||
|
from .bind import BINDBackend
|
||||||
|
from .coredns_mysql import CoreDNSMySQLBackend
|
||||||
|
from .nsd import NSDBackend
|
||||||
|
from directdnsonly.config import config
|
||||||
|
from loguru import logger
|
||||||
|
|
||||||
|
|
||||||
|
class BackendRegistry:
|
||||||
|
def __init__(self):
|
||||||
|
self._backend_types = {
|
||||||
|
"bind": BINDBackend,
|
||||||
|
"coredns_mysql": CoreDNSMySQLBackend,
|
||||||
|
"nsd": NSDBackend,
|
||||||
|
}
|
||||||
|
self._backend_instances: Dict[str, DNSBackend] = {}
|
||||||
|
self._initialized = False
|
||||||
|
|
||||||
|
def _initialize_backends(self):
|
||||||
|
"""Initialize and cache all enabled backend instances"""
|
||||||
|
if self._initialized:
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
logger.debug("Attempting to load backend configurations")
|
||||||
|
backend_configs = config.get("dns")
|
||||||
|
if not backend_configs:
|
||||||
|
logger.warning("No 'dns' configuration found")
|
||||||
|
self._initialized = True
|
||||||
|
return
|
||||||
|
|
||||||
|
backend_configs = backend_configs.get("backends")
|
||||||
|
if not backend_configs:
|
||||||
|
logger.warning("No 'dns.backends' configuration found")
|
||||||
|
self._initialized = True
|
||||||
|
return
|
||||||
|
|
||||||
|
logger.debug(f"Found backend configs: {backend_configs}")
|
||||||
|
|
||||||
|
for instance_name, instance_config in backend_configs.items():
|
||||||
|
logger.debug(f"Processing backend instance: {instance_name}")
|
||||||
|
backend_type = instance_config.get("type")
|
||||||
|
|
||||||
|
if not backend_type:
|
||||||
|
logger.warning(
|
||||||
|
f"No type specified for backend instance: {instance_name}"
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
if backend_type not in self._backend_types:
|
||||||
|
logger.warning(
|
||||||
|
f"Unknown backend type '{backend_type}' for instance: {instance_name}"
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
backend_class = self._backend_types[backend_type]
|
||||||
|
if not backend_class.is_available():
|
||||||
|
logger.warning(
|
||||||
|
f"Backend {backend_type} is not available for instance: {instance_name}"
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
enabled = instance_config.get("enabled", False)
|
||||||
|
if not enabled:
|
||||||
|
logger.debug(f"Backend instance {instance_name} is disabled")
|
||||||
|
continue
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
f"Initializing backend instance {instance_name} of type {backend_type}"
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
backend = backend_class(instance_config)
|
||||||
|
self._backend_instances[instance_name] = backend
|
||||||
|
logger.info(
|
||||||
|
f"Successfully initialized backend instance: {instance_name}"
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"Failed to initialize backend instance {instance_name}: {e}"
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error loading backend configurations: {e}")
|
||||||
|
|
||||||
|
self._initialized = True
|
||||||
|
|
||||||
|
def get_available_backends(self) -> Dict[str, DNSBackend]:
|
||||||
|
"""Return cached backend instances, initializing on first call"""
|
||||||
|
self._initialize_backends()
|
||||||
|
return self._backend_instances
|
||||||
75
directdnsonly/app/backends/base.py
Normal file
75
directdnsonly/app/backends/base.py
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import List, Optional, Dict, Any, Tuple
|
||||||
|
|
||||||
|
|
||||||
|
class DNSBackend(ABC):
|
||||||
|
def __init__(self, config: Dict[str, Any]):
|
||||||
|
self.config = config
|
||||||
|
self.instance_name = config.get("instance_name", self.get_name())
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@abstractmethod
|
||||||
|
def get_name(cls) -> str:
|
||||||
|
"""Return the backend type name"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@property
|
||||||
|
def instance_id(self) -> str:
|
||||||
|
"""Return the unique instance identifier"""
|
||||||
|
return self.instance_name
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@abstractmethod
|
||||||
|
def is_available(cls) -> bool:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def write_zone(self, zone_name: str, zone_data: str) -> bool:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def delete_zone(self, zone_name: str) -> bool:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def reload_zone(self, zone_name: Optional[str] = None) -> bool:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def zone_exists(self, zone_name: str) -> bool:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def verify_zone_record_count(
|
||||||
|
self, zone_name: str, expected_count: int
|
||||||
|
) -> Tuple[bool, int]:
|
||||||
|
"""Verify the record count in this backend matches the expected count
|
||||||
|
from the source zone file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
zone_name: The zone to verify
|
||||||
|
expected_count: The number of records parsed from the source zone
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (matches: bool, actual_count: int)
|
||||||
|
"""
|
||||||
|
raise NotImplementedError(
|
||||||
|
f"Backend {self.get_name()} does not implement record count verification"
|
||||||
|
)
|
||||||
|
|
||||||
|
def reconcile_zone_records(
|
||||||
|
self, zone_name: str, zone_data: str
|
||||||
|
) -> Tuple[bool, int]:
|
||||||
|
"""Reconcile backend records against the authoritative BIND zone from
|
||||||
|
DirectAdmin. Any records in the backend that are not present in the
|
||||||
|
source zone will be removed.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
zone_name: The zone to reconcile
|
||||||
|
zone_data: The raw BIND zone file content (authoritative source)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (success: bool, records_removed: int)
|
||||||
|
"""
|
||||||
|
raise NotImplementedError(
|
||||||
|
f"Backend {self.get_name()} does not implement zone reconciliation"
|
||||||
|
)
|
||||||
124
directdnsonly/app/backends/bind.py
Normal file
124
directdnsonly/app/backends/bind.py
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
from loguru import logger
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Optional
|
||||||
|
from .base import DNSBackend
|
||||||
|
|
||||||
|
|
||||||
|
class BINDBackend(DNSBackend):
|
||||||
|
@classmethod
|
||||||
|
def get_name(cls) -> str:
|
||||||
|
return "bind"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def is_available(cls) -> bool:
|
||||||
|
try:
|
||||||
|
result = subprocess.run(["named", "-v"], capture_output=True, text=True)
|
||||||
|
if result.returncode == 0:
|
||||||
|
logger.info(f"BIND available: {result.stdout.splitlines()[0]}")
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
except FileNotFoundError:
|
||||||
|
logger.warning("BIND/named not found in PATH")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
self.zones_dir = Path(config["zones_dir"])
|
||||||
|
self.named_conf = Path(config["named_conf"])
|
||||||
|
|
||||||
|
# Safe directory creation handling
|
||||||
|
try:
|
||||||
|
# Check if it's a symlink first
|
||||||
|
if self.zones_dir.is_symlink():
|
||||||
|
logger.debug(f"{self.zones_dir} is already a symlink")
|
||||||
|
elif not self.zones_dir.exists():
|
||||||
|
self.zones_dir.mkdir(parents=True, mode=0o755)
|
||||||
|
logger.debug(f"Created zones directory: {self.zones_dir}")
|
||||||
|
else:
|
||||||
|
logger.debug(f"Directory already exists: {self.zones_dir}")
|
||||||
|
|
||||||
|
# Ensure proper permissions
|
||||||
|
os.chmod(self.zones_dir, 0o755)
|
||||||
|
logger.debug(f"Using zones directory: {self.zones_dir}")
|
||||||
|
|
||||||
|
except FileExistsError:
|
||||||
|
logger.debug(f"Directory already exists (safe to ignore): {self.zones_dir}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to setup zones directory: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
# Verify named.conf exists
|
||||||
|
if not self.named_conf.exists():
|
||||||
|
logger.warning(f"named.conf not found at {self.named_conf}")
|
||||||
|
self.named_conf.touch()
|
||||||
|
logger.info(f"Created empty named.conf at {self.named_conf}")
|
||||||
|
|
||||||
|
logger.success(f"BIND backend initialized for {self.zones_dir}")
|
||||||
|
|
||||||
|
def write_zone(self, zone_name: str, zone_data: str) -> bool:
|
||||||
|
zone_file = self.zones_dir / f"{zone_name}.db"
|
||||||
|
try:
|
||||||
|
with open(zone_file, "w") as f:
|
||||||
|
f.write(zone_data)
|
||||||
|
logger.debug(f"Wrote zone file: {zone_file}")
|
||||||
|
return True
|
||||||
|
except IOError as e:
|
||||||
|
logger.error(f"Failed to write zone file {zone_file}: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def delete_zone(self, zone_name: str) -> bool:
|
||||||
|
zone_file = self.zones_dir / f"{zone_name}.db"
|
||||||
|
try:
|
||||||
|
if zone_file.exists():
|
||||||
|
zone_file.unlink()
|
||||||
|
logger.debug(f"Deleted zone file: {zone_file}")
|
||||||
|
return True
|
||||||
|
logger.warning(f"Zone file not found: {zone_file}")
|
||||||
|
return False
|
||||||
|
except IOError as e:
|
||||||
|
logger.error(f"Failed to delete zone file {zone_file}: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def reload_zone(self, zone_name: Optional[str] = None) -> bool:
|
||||||
|
try:
|
||||||
|
if zone_name:
|
||||||
|
cmd = ["rndc", "reload", zone_name]
|
||||||
|
logger.debug(f"Reloading single zone: {zone_name}")
|
||||||
|
else:
|
||||||
|
cmd = ["rndc", "reload"]
|
||||||
|
logger.debug("Reloading all zones")
|
||||||
|
|
||||||
|
result = subprocess.run(
|
||||||
|
cmd,
|
||||||
|
check=True,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
text=True,
|
||||||
|
)
|
||||||
|
logger.debug(f"BIND reload successful: {result.stdout}")
|
||||||
|
return True
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
logger.error(f"BIND reload failed: {e.stderr}")
|
||||||
|
return False
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Unexpected error during BIND reload: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def zone_exists(self, zone_name: str) -> bool:
|
||||||
|
zone_file = self.zones_dir / f"{zone_name}.db"
|
||||||
|
exists = zone_file.exists()
|
||||||
|
logger.debug(f"Zone existence check for {zone_name}: {exists}")
|
||||||
|
return exists
|
||||||
|
|
||||||
|
def update_named_conf(self, zones: List[str]) -> bool:
|
||||||
|
try:
|
||||||
|
with open(self.named_conf, "w") as f:
|
||||||
|
for zone in zones:
|
||||||
|
zone_file = self.zones_dir / f"{zone}.db"
|
||||||
|
f.write(f'zone "{zone}" {{ type master; file "{zone_file}"; }};\n')
|
||||||
|
logger.debug(f"Updated named.conf: {self.named_conf}")
|
||||||
|
return True
|
||||||
|
except IOError as e:
|
||||||
|
logger.error(f"Failed to update named.conf: {e}")
|
||||||
|
return False
|
||||||
443
directdnsonly/app/backends/coredns_mysql.py
Normal file
443
directdnsonly/app/backends/coredns_mysql.py
Normal file
@@ -0,0 +1,443 @@
|
|||||||
|
from typing import Optional, Dict, Set, Tuple, Any
|
||||||
|
|
||||||
|
from sqlalchemy import create_engine, Column, String, Integer, Text, ForeignKey, Boolean, select, func, delete
|
||||||
|
from sqlalchemy.orm import sessionmaker, scoped_session, relationship, declarative_base
|
||||||
|
from dns import zone as dns_zone_module
|
||||||
|
from dns.rdataclass import IN
|
||||||
|
from loguru import logger
|
||||||
|
from .base import DNSBackend
|
||||||
|
|
||||||
|
Base = declarative_base()
|
||||||
|
|
||||||
|
|
||||||
|
class Zone(Base):
|
||||||
|
__tablename__ = "zones"
|
||||||
|
id = Column(Integer, primary_key=True)
|
||||||
|
zone_name = Column(String(255), nullable=False, index=True, unique=True)
|
||||||
|
managed_by = Column(String(255), nullable=True) # 'directadmin' | 'direct' | NULL (legacy)
|
||||||
|
|
||||||
|
|
||||||
|
class Record(Base):
|
||||||
|
__tablename__ = "records"
|
||||||
|
id = Column(Integer, primary_key=True)
|
||||||
|
zone_id = Column(Integer, ForeignKey("zones.id"), nullable=False)
|
||||||
|
hostname = Column(String(255), nullable=False, index=True)
|
||||||
|
type = Column(String(10), nullable=False)
|
||||||
|
data = Column(Text, nullable=False)
|
||||||
|
ttl = Column(Integer, nullable=True)
|
||||||
|
online = Column(Boolean, nullable=False, default=False)
|
||||||
|
|
||||||
|
zone = relationship("Zone", backref="records")
|
||||||
|
|
||||||
|
|
||||||
|
class CoreDNSMySQLBackend(DNSBackend):
|
||||||
|
def __init__(self, config: Dict[str, Any]):
|
||||||
|
super().__init__(config)
|
||||||
|
self.host = config.get("host", "localhost")
|
||||||
|
self.port = config.get("port", 3306)
|
||||||
|
self.database = config.get("database", "coredns")
|
||||||
|
self.username = config.get("username")
|
||||||
|
self.password = config.get("password")
|
||||||
|
|
||||||
|
self.engine = create_engine(
|
||||||
|
f"mysql+pymysql://{self.username}:{self.password}@"
|
||||||
|
f"{self.host}:{self.port}/{self.database}",
|
||||||
|
pool_pre_ping=True,
|
||||||
|
pool_size=5,
|
||||||
|
max_overflow=10,
|
||||||
|
)
|
||||||
|
self.Session = scoped_session(sessionmaker(self.engine))
|
||||||
|
Base.metadata.create_all(self.engine)
|
||||||
|
logger.info(
|
||||||
|
f"Initialized CoreDNS MySQL backend '{self.instance_name}' "
|
||||||
|
f"for {self.database}@{self.host}:{self.port}"
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def dot_fqdn(zone_name):
|
||||||
|
return f"{zone_name}." if not zone_name.endswith(".") else zone_name
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_name(cls) -> str:
|
||||||
|
return "coredns_mysql"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def is_available(cls) -> bool:
|
||||||
|
try:
|
||||||
|
import pymysql
|
||||||
|
|
||||||
|
return True
|
||||||
|
except ImportError:
|
||||||
|
logger.warning("PyMySQL not available - CoreDNS MySQL backend disabled")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def write_zone(self, zone_name: str, zone_data: str) -> bool:
|
||||||
|
session = self.Session()
|
||||||
|
try:
|
||||||
|
# Ensure zone exists
|
||||||
|
zone = self._ensure_zone_exists(session, zone_name)
|
||||||
|
|
||||||
|
# Get existing records for this zone but track SOA records separately
|
||||||
|
existing_records = {}
|
||||||
|
existing_soa = None
|
||||||
|
for r in session.execute(select(Record).filter_by(zone_id=zone.id)).scalars().all():
|
||||||
|
if r.type == "SOA":
|
||||||
|
existing_soa = r
|
||||||
|
else:
|
||||||
|
existing_records[(r.hostname, r.type, r.data)] = r
|
||||||
|
|
||||||
|
# Parse the zone data into a normalised record set
|
||||||
|
source_records, source_soa = self._parse_zone_to_record_set(
|
||||||
|
zone_name, zone_data
|
||||||
|
)
|
||||||
|
|
||||||
|
# Pre-compute the set of (hostname, type, data) keys that should
|
||||||
|
# remain after this update, so we can identify stale records upfront.
|
||||||
|
incoming_keys = {
|
||||||
|
(name, rtype, data) for name, rtype, data, _ in source_records
|
||||||
|
}
|
||||||
|
|
||||||
|
changes = {"added": 0, "updated": 0, "removed": 0}
|
||||||
|
|
||||||
|
# --- 1. Remove stale records first ---
|
||||||
|
# Deleting before inserting means a brief NXDOMAIN is preferable
|
||||||
|
# to briefly serving both old and new records simultaneously.
|
||||||
|
for key, record in existing_records.items():
|
||||||
|
if key not in incoming_keys:
|
||||||
|
logger.debug(
|
||||||
|
f"Removed record: {record.hostname} {record.type} {record.data}"
|
||||||
|
)
|
||||||
|
session.delete(record)
|
||||||
|
changes["removed"] += 1
|
||||||
|
|
||||||
|
# Handle SOA removal if needed
|
||||||
|
if existing_soa and not source_soa:
|
||||||
|
logger.debug(
|
||||||
|
f"Removed SOA record: {existing_soa.hostname} SOA {existing_soa.data}"
|
||||||
|
)
|
||||||
|
session.delete(existing_soa)
|
||||||
|
changes["removed"] += 1
|
||||||
|
|
||||||
|
# --- 2. Add / update incoming records ---
|
||||||
|
# Handle SOA record
|
||||||
|
if source_soa:
|
||||||
|
soa_name, soa_content, soa_ttl = source_soa
|
||||||
|
soa_parts = soa_content.split()
|
||||||
|
if len(soa_parts) == 7:
|
||||||
|
if existing_soa:
|
||||||
|
existing_soa.data = soa_content
|
||||||
|
existing_soa.ttl = soa_ttl
|
||||||
|
existing_soa.online = True
|
||||||
|
changes["updated"] += 1
|
||||||
|
logger.debug(
|
||||||
|
f"Updated SOA record: {soa_name} SOA {soa_content}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
existing_soa = Record(
|
||||||
|
zone_id=zone.id,
|
||||||
|
hostname=soa_name,
|
||||||
|
type="SOA",
|
||||||
|
data=soa_content,
|
||||||
|
ttl=soa_ttl,
|
||||||
|
online=True,
|
||||||
|
)
|
||||||
|
session.add(existing_soa)
|
||||||
|
changes["added"] += 1
|
||||||
|
logger.debug(f"Added SOA record: {soa_name} SOA {soa_content}")
|
||||||
|
|
||||||
|
# Process all non-SOA records
|
||||||
|
for record_name, record_type, record_content, record_ttl in source_records:
|
||||||
|
key = (record_name, record_type, record_content)
|
||||||
|
|
||||||
|
if key in existing_records:
|
||||||
|
# Update existing record if TTL changed
|
||||||
|
record = existing_records[key]
|
||||||
|
if record.ttl != record_ttl:
|
||||||
|
record.ttl = record_ttl
|
||||||
|
record.online = True
|
||||||
|
changes["updated"] += 1
|
||||||
|
logger.debug(
|
||||||
|
f"Updated TTL for record: {record_name} {record_type} {record_content}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Add new record
|
||||||
|
new_record = Record(
|
||||||
|
zone_id=zone.id,
|
||||||
|
hostname=record_name,
|
||||||
|
type=record_type,
|
||||||
|
data=record_content,
|
||||||
|
ttl=record_ttl,
|
||||||
|
online=True,
|
||||||
|
)
|
||||||
|
session.add(new_record)
|
||||||
|
changes["added"] += 1
|
||||||
|
logger.debug(
|
||||||
|
f"Added new record: {record_name} {record_type} {record_content}"
|
||||||
|
)
|
||||||
|
|
||||||
|
session.commit()
|
||||||
|
total_changes = changes["added"] + changes["updated"] + changes["removed"]
|
||||||
|
if total_changes > 0:
|
||||||
|
logger.info(
|
||||||
|
f"[{self.instance_name}] Zone {zone_name} updated: "
|
||||||
|
f"{changes['added']} added, {changes['updated']} updated, "
|
||||||
|
f"{changes['removed']} removed"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.debug(f"[{self.instance_name}] Zone {zone_name}: no changes")
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error writing zone {zone_name}: {e}")
|
||||||
|
session.rollback()
|
||||||
|
return False
|
||||||
|
finally:
|
||||||
|
session.close()
|
||||||
|
|
||||||
|
def delete_zone(self, zone_name: str) -> bool:
|
||||||
|
session = self.Session()
|
||||||
|
try:
|
||||||
|
# First find the zone
|
||||||
|
zone = session.execute(
|
||||||
|
select(Zone).filter_by(zone_name=self.dot_fqdn(zone_name))
|
||||||
|
).scalar_one_or_none()
|
||||||
|
if not zone:
|
||||||
|
logger.warning(f"Zone {zone_name} not found for deletion")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Delete all records associated with the zone
|
||||||
|
count = session.execute(
|
||||||
|
delete(Record).where(Record.zone_id == zone.id)
|
||||||
|
).rowcount
|
||||||
|
|
||||||
|
# Delete the zone itself
|
||||||
|
session.delete(zone)
|
||||||
|
session.commit()
|
||||||
|
|
||||||
|
logger.info(f"Deleted zone {zone_name} with {count} records")
|
||||||
|
return True
|
||||||
|
except Exception as e:
|
||||||
|
session.rollback()
|
||||||
|
logger.error(f"Zone deletion failed for {zone_name}: {e}")
|
||||||
|
return False
|
||||||
|
finally:
|
||||||
|
session.close()
|
||||||
|
|
||||||
|
def reload_zone(self, zone_name: Optional[str] = None) -> bool:
|
||||||
|
# In coredns_mysql_extend, the core plugin handles reloading automatically
|
||||||
|
# when database changes are detected, so we just log the request
|
||||||
|
if zone_name:
|
||||||
|
logger.debug(f"CoreDNS reload triggered for zone {zone_name}")
|
||||||
|
else:
|
||||||
|
logger.debug("CoreDNS reload triggered for all zones")
|
||||||
|
return True
|
||||||
|
|
||||||
|
def zone_exists(self, zone_name: str) -> bool:
|
||||||
|
session = self.Session()
|
||||||
|
try:
|
||||||
|
exists = session.execute(
|
||||||
|
select(Zone).filter_by(zone_name=self.dot_fqdn(zone_name))
|
||||||
|
).scalar_one_or_none() is not None
|
||||||
|
logger.debug(f"Zone existence check for {zone_name}: {exists}")
|
||||||
|
return exists
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Zone existence check failed for {zone_name}: {e}")
|
||||||
|
return False
|
||||||
|
finally:
|
||||||
|
session.close()
|
||||||
|
|
||||||
|
def _ensure_zone_exists(self, session, zone_name: str) -> Zone:
|
||||||
|
"""Ensure a zone exists in the database, creating it if necessary."""
|
||||||
|
zone = session.execute(
|
||||||
|
select(Zone).filter_by(zone_name=self.dot_fqdn(zone_name))
|
||||||
|
).scalar_one_or_none()
|
||||||
|
if not zone:
|
||||||
|
logger.debug(f"Creating new zone: {self.dot_fqdn(zone_name)}")
|
||||||
|
zone = Zone(
|
||||||
|
zone_name=self.dot_fqdn(zone_name),
|
||||||
|
managed_by="directadmin",
|
||||||
|
)
|
||||||
|
session.add(zone)
|
||||||
|
session.flush()
|
||||||
|
elif not zone.managed_by:
|
||||||
|
# Migrate pre-existing rows that were created before this field was added
|
||||||
|
zone.managed_by = "directadmin"
|
||||||
|
return zone
|
||||||
|
|
||||||
|
def _parse_zone_to_record_set(
|
||||||
|
self, zone_name: str, zone_data: str
|
||||||
|
) -> Tuple[Set[Tuple[str, str, str, int]], Optional[Tuple[str, str, int]]]:
|
||||||
|
"""Parse a BIND zone file into a set of record keys.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of:
|
||||||
|
- set of (hostname, type, data, ttl) tuples for non-SOA records
|
||||||
|
- (hostname, soa_data, ttl) tuple for the SOA record, or None
|
||||||
|
"""
|
||||||
|
dns_zone = dns_zone_module.from_text(zone_data, check_origin=False)
|
||||||
|
records: Set[Tuple[str, str, str, int]] = set()
|
||||||
|
soa = None
|
||||||
|
|
||||||
|
# Use the zone origin (if available) to expand relative names in RDATA
|
||||||
|
# back to absolute FQDNs. Without this, dnspython's default relativize=True
|
||||||
|
# behaviour turns in-zone targets like `wvvcc.co.nz.` into `@` in the
|
||||||
|
# stored data, which CoreDNS then serves incorrectly.
|
||||||
|
origin = dns_zone.origin
|
||||||
|
|
||||||
|
for name, ttl, rdata in dns_zone.iterate_rdatas():
|
||||||
|
if rdata.rdclass != IN:
|
||||||
|
continue
|
||||||
|
|
||||||
|
record_name = str(name)
|
||||||
|
record_type = rdata.rdtype.name
|
||||||
|
if origin is not None:
|
||||||
|
record_content = rdata.to_text(origin=origin, relativize=False)
|
||||||
|
else:
|
||||||
|
record_content = rdata.to_text()
|
||||||
|
|
||||||
|
if record_type == "SOA":
|
||||||
|
soa = (record_name, record_content, ttl)
|
||||||
|
continue
|
||||||
|
|
||||||
|
records.add((record_name, record_type, record_content, ttl))
|
||||||
|
|
||||||
|
return records, soa
|
||||||
|
|
||||||
|
def verify_zone_record_count(
|
||||||
|
self, zone_name: str, expected_count: int
|
||||||
|
) -> tuple[bool, int]:
|
||||||
|
"""Verify the record count in this backend matches the expected count
|
||||||
|
from the source (DirectAdmin) zone file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
zone_name: The zone to verify
|
||||||
|
expected_count: The number of records parsed from the source BIND zone
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (matches: bool, actual_count: int)
|
||||||
|
"""
|
||||||
|
session = self.Session()
|
||||||
|
try:
|
||||||
|
zone = session.execute(
|
||||||
|
select(Zone).filter_by(zone_name=self.dot_fqdn(zone_name))
|
||||||
|
).scalar_one_or_none()
|
||||||
|
if not zone:
|
||||||
|
logger.warning(
|
||||||
|
f"[{self.instance_name}] Zone {zone_name} not found "
|
||||||
|
f"during record count verification"
|
||||||
|
)
|
||||||
|
return False, 0
|
||||||
|
|
||||||
|
actual_count = session.execute(
|
||||||
|
select(func.count()).select_from(Record).where(Record.zone_id == zone.id)
|
||||||
|
).scalar()
|
||||||
|
matches = actual_count == expected_count
|
||||||
|
|
||||||
|
if not matches:
|
||||||
|
logger.warning(
|
||||||
|
f"[{self.instance_name}] Record count mismatch for "
|
||||||
|
f"{zone_name}: source zone has {expected_count} records, "
|
||||||
|
f"backend has {actual_count} records "
|
||||||
|
f"(difference: {actual_count - expected_count:+d})"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.debug(
|
||||||
|
f"[{self.instance_name}] Record count verified for "
|
||||||
|
f"{zone_name}: {actual_count} records match source"
|
||||||
|
)
|
||||||
|
|
||||||
|
return matches, actual_count
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"[{self.instance_name}] Error verifying record count "
|
||||||
|
f"for {zone_name}: {e}"
|
||||||
|
)
|
||||||
|
return False, -1
|
||||||
|
finally:
|
||||||
|
session.close()
|
||||||
|
|
||||||
|
def reconcile_zone_records(
|
||||||
|
self, zone_name: str, zone_data: str
|
||||||
|
) -> Tuple[bool, int]:
|
||||||
|
"""Reconcile backend records against the authoritative BIND zone from
|
||||||
|
DirectAdmin. Any records in the backend that are **not** present in
|
||||||
|
the source zone will be deleted.
|
||||||
|
|
||||||
|
This is the post-write safety net: even though ``write_zone`` already
|
||||||
|
removes stale records during normal processing, this method catches
|
||||||
|
any extras that may have crept in via race conditions, manual edits,
|
||||||
|
or replication drift between MySQL nodes.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
zone_name: The zone to reconcile
|
||||||
|
zone_data: The raw BIND zone file content (authoritative source)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple of (success: bool, records_removed: int)
|
||||||
|
"""
|
||||||
|
session = self.Session()
|
||||||
|
try:
|
||||||
|
zone = session.execute(
|
||||||
|
select(Zone).filter_by(zone_name=self.dot_fqdn(zone_name))
|
||||||
|
).scalar_one_or_none()
|
||||||
|
if not zone:
|
||||||
|
logger.warning(
|
||||||
|
f"[{self.instance_name}] Zone {zone_name} not found "
|
||||||
|
f"during reconciliation"
|
||||||
|
)
|
||||||
|
return False, 0
|
||||||
|
|
||||||
|
# Build the expected record set from the source BIND zone
|
||||||
|
source_records, source_soa = self._parse_zone_to_record_set(
|
||||||
|
zone_name, zone_data
|
||||||
|
)
|
||||||
|
# Build lookup keys (without TTL) matching write_zone's key format
|
||||||
|
expected_keys: Set[Tuple[str, str, str]] = {
|
||||||
|
(hostname, rtype, data) for hostname, rtype, data, _ in source_records
|
||||||
|
}
|
||||||
|
|
||||||
|
# Query all records currently in the backend for this zone
|
||||||
|
db_records = session.execute(
|
||||||
|
select(Record).where(Record.zone_id == zone.id)
|
||||||
|
).scalars().all()
|
||||||
|
|
||||||
|
removed = 0
|
||||||
|
for record in db_records:
|
||||||
|
# SOA records are managed separately – skip them
|
||||||
|
if record.type == "SOA":
|
||||||
|
continue
|
||||||
|
|
||||||
|
key = (record.hostname, record.type, record.data)
|
||||||
|
if key not in expected_keys:
|
||||||
|
logger.debug(
|
||||||
|
f"[{self.instance_name}] Reconcile: removing extra "
|
||||||
|
f"record from {zone_name}: "
|
||||||
|
f"{record.hostname} {record.type} {record.data}"
|
||||||
|
)
|
||||||
|
session.delete(record)
|
||||||
|
removed += 1
|
||||||
|
|
||||||
|
if removed > 0:
|
||||||
|
session.commit()
|
||||||
|
logger.info(
|
||||||
|
f"[{self.instance_name}] Reconciliation for {zone_name}: "
|
||||||
|
f"removed {removed} extra record(s) not in source zone"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.debug(
|
||||||
|
f"[{self.instance_name}] Reconciliation for {zone_name}: "
|
||||||
|
f"all records match source zone — no action needed"
|
||||||
|
)
|
||||||
|
|
||||||
|
return True, removed
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"[{self.instance_name}] Error reconciling records "
|
||||||
|
f"for {zone_name}: {e}"
|
||||||
|
)
|
||||||
|
session.rollback()
|
||||||
|
return False, 0
|
||||||
|
finally:
|
||||||
|
session.close()
|
||||||
179
directdnsonly/app/backends/nsd.py
Normal file
179
directdnsonly/app/backends/nsd.py
Normal file
@@ -0,0 +1,179 @@
|
|||||||
|
import os
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
from loguru import logger
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Optional
|
||||||
|
from .base import DNSBackend
|
||||||
|
|
||||||
|
|
||||||
|
class NSDBackend(DNSBackend):
|
||||||
|
"""DNS backend for NSD (Name Server Daemon) by NLnet Labs.
|
||||||
|
|
||||||
|
Zone files use the same RFC 1035 format as BIND. NSD is reloaded via
|
||||||
|
``nsd-control reload`` after each write. Zone registration is managed in a
|
||||||
|
dedicated include file so the main ``nsd.conf`` is never modified by the
|
||||||
|
application.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_name(cls) -> str:
|
||||||
|
return "nsd"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def is_available(cls) -> bool:
|
||||||
|
try:
|
||||||
|
result = subprocess.run(
|
||||||
|
["nsd-control", "status"],
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
)
|
||||||
|
# nsd-control exits 0 when NSD is running, non-zero otherwise.
|
||||||
|
# Either way, a non-FileNotFoundError means the binary is present.
|
||||||
|
logger.info("NSD available (nsd-control found)")
|
||||||
|
return True
|
||||||
|
except FileNotFoundError:
|
||||||
|
logger.warning("NSD not found in PATH — nsd-control missing")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
super().__init__(config)
|
||||||
|
self.zones_dir = Path(config.get("zones_dir", "/etc/nsd/zones"))
|
||||||
|
self.nsd_conf = Path(
|
||||||
|
config.get("nsd_conf", "/etc/nsd/nsd.conf.d/zones.conf")
|
||||||
|
)
|
||||||
|
|
||||||
|
# Ensure zones directory exists
|
||||||
|
try:
|
||||||
|
if self.zones_dir.is_symlink():
|
||||||
|
logger.debug(f"{self.zones_dir} is already a symlink")
|
||||||
|
elif not self.zones_dir.exists():
|
||||||
|
self.zones_dir.mkdir(parents=True, mode=0o755)
|
||||||
|
logger.debug(f"Created zones directory: {self.zones_dir}")
|
||||||
|
os.chmod(self.zones_dir, 0o755)
|
||||||
|
except FileExistsError:
|
||||||
|
pass
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to setup zones directory: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
# Ensure the conf include directory and file exist
|
||||||
|
self.nsd_conf.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
if not self.nsd_conf.exists():
|
||||||
|
self.nsd_conf.touch()
|
||||||
|
logger.info(f"Created empty NSD zone conf: {self.nsd_conf}")
|
||||||
|
|
||||||
|
logger.success(
|
||||||
|
f"NSD backend initialized — zones: {self.zones_dir}, "
|
||||||
|
f"conf: {self.nsd_conf}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Core backend interface
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
def write_zone(self, zone_name: str, zone_data: str) -> bool:
|
||||||
|
zone_file = self.zones_dir / f"{zone_name}.db"
|
||||||
|
try:
|
||||||
|
zone_file.write_text(zone_data)
|
||||||
|
logger.debug(f"Wrote zone file: {zone_file}")
|
||||||
|
self._ensure_zone_in_conf(zone_name)
|
||||||
|
return True
|
||||||
|
except IOError as e:
|
||||||
|
logger.error(f"Failed to write zone file {zone_file}: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def delete_zone(self, zone_name: str) -> bool:
|
||||||
|
zone_file = self.zones_dir / f"{zone_name}.db"
|
||||||
|
try:
|
||||||
|
if zone_file.exists():
|
||||||
|
zone_file.unlink()
|
||||||
|
logger.debug(f"Deleted zone file: {zone_file}")
|
||||||
|
else:
|
||||||
|
logger.warning(f"Zone file not found: {zone_file}")
|
||||||
|
return False
|
||||||
|
self._remove_zone_from_conf(zone_name)
|
||||||
|
return True
|
||||||
|
except IOError as e:
|
||||||
|
logger.error(f"Failed to delete zone {zone_name}: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def reload_zone(self, zone_name: Optional[str] = None) -> bool:
|
||||||
|
try:
|
||||||
|
if zone_name:
|
||||||
|
cmd = ["nsd-control", "reload", zone_name]
|
||||||
|
logger.debug(f"Reloading single zone: {zone_name}")
|
||||||
|
else:
|
||||||
|
cmd = ["nsd-control", "reload"]
|
||||||
|
logger.debug("Reloading all zones")
|
||||||
|
|
||||||
|
result = subprocess.run(
|
||||||
|
cmd,
|
||||||
|
check=True,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
text=True,
|
||||||
|
)
|
||||||
|
logger.debug(f"NSD reload successful: {result.stdout.strip()}")
|
||||||
|
return True
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
logger.error(f"NSD reload failed: {e.stderr.strip()}")
|
||||||
|
return False
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Unexpected error during NSD reload: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def zone_exists(self, zone_name: str) -> bool:
|
||||||
|
exists = (self.zones_dir / f"{zone_name}.db").exists()
|
||||||
|
logger.debug(f"Zone existence check for {zone_name}: {exists}")
|
||||||
|
return exists
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# NSD conf file management
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
def update_nsd_conf(self, zones: List[str]) -> bool:
|
||||||
|
"""Rewrite the NSD zones include file with exactly the given zone list.
|
||||||
|
|
||||||
|
Equivalent to BINDBackend.update_named_conf — full replacement from a
|
||||||
|
known-good source list.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
lines = []
|
||||||
|
for zone in zones:
|
||||||
|
zone_file = self.zones_dir / f"{zone}.db"
|
||||||
|
lines.append(
|
||||||
|
f'\nzone:\n name: "{zone}"\n zonefile: "{zone_file}"\n'
|
||||||
|
)
|
||||||
|
self.nsd_conf.write_text("".join(lines))
|
||||||
|
logger.debug(f"Rewrote NSD zone conf: {self.nsd_conf}")
|
||||||
|
return True
|
||||||
|
except IOError as e:
|
||||||
|
logger.error(f"Failed to update NSD zone conf: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _ensure_zone_in_conf(self, zone_name: str) -> None:
|
||||||
|
"""Append a zone stanza to the NSD conf file if it is not already present."""
|
||||||
|
zone_file = self.zones_dir / f"{zone_name}.db"
|
||||||
|
stanza = f'\nzone:\n name: "{zone_name}"\n zonefile: "{zone_file}"\n'
|
||||||
|
|
||||||
|
content = self.nsd_conf.read_text() if self.nsd_conf.exists() else ""
|
||||||
|
if f'name: "{zone_name}"' not in content:
|
||||||
|
with open(self.nsd_conf, "a") as f:
|
||||||
|
f.write(stanza)
|
||||||
|
logger.debug(f"Added zone {zone_name} to NSD conf")
|
||||||
|
|
||||||
|
def _remove_zone_from_conf(self, zone_name: str) -> None:
|
||||||
|
"""Remove a zone stanza from the NSD conf file."""
|
||||||
|
if not self.nsd_conf.exists():
|
||||||
|
return
|
||||||
|
content = self.nsd_conf.read_text()
|
||||||
|
pattern = (
|
||||||
|
r'\nzone:\n name: "'
|
||||||
|
+ re.escape(zone_name)
|
||||||
|
+ r'"\n zonefile: "[^"]+"\n'
|
||||||
|
)
|
||||||
|
new_content = re.sub(pattern, "", content)
|
||||||
|
if new_content != content:
|
||||||
|
self.nsd_conf.write_text(new_content)
|
||||||
|
logger.debug(f"Removed zone {zone_name} from NSD conf")
|
||||||
3
directdnsonly/app/da/__init__.py
Normal file
3
directdnsonly/app/da/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
from .client import DirectAdminClient
|
||||||
|
|
||||||
|
__all__ = ["DirectAdminClient"]
|
||||||
340
directdnsonly/app/da/client.py
Normal file
340
directdnsonly/app/da/client.py
Normal file
@@ -0,0 +1,340 @@
|
|||||||
|
"""DirectAdmin HTTP client.
|
||||||
|
|
||||||
|
Encapsulates all outbound communication with a single DirectAdmin server:
|
||||||
|
authenticated requests, the Basic-Auth → session-cookie fallback for DA Evo,
|
||||||
|
paginated domain listing, and the legacy URL-encoded response parser.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from urllib.parse import parse_qs
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
import requests
|
||||||
|
import requests.exceptions
|
||||||
|
from loguru import logger
|
||||||
|
|
||||||
|
|
||||||
|
class DirectAdminClient:
|
||||||
|
"""HTTP client for a single DirectAdmin server.
|
||||||
|
|
||||||
|
Handles two authentication modes transparently:
|
||||||
|
- Basic Auth (classic DA / API-only access)
|
||||||
|
- Session cookie via CMD_LOGIN (DA Evolution — redirects Basic Auth)
|
||||||
|
|
||||||
|
Usage::
|
||||||
|
|
||||||
|
client = DirectAdminClient("da1.example.com", 2222, "admin", "secret")
|
||||||
|
domains = client.list_domains() # set[str] or None on failure
|
||||||
|
response = client.get("CMD_API_SHOW_ALL_USERS")
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
hostname: str,
|
||||||
|
port: int,
|
||||||
|
username: str,
|
||||||
|
password: str,
|
||||||
|
ssl: bool = True,
|
||||||
|
verify_ssl: bool = True,
|
||||||
|
) -> None:
|
||||||
|
self.hostname = hostname
|
||||||
|
self.port = port
|
||||||
|
self.username = username
|
||||||
|
self.password = password
|
||||||
|
self.scheme = "https" if ssl else "http"
|
||||||
|
self.verify_ssl = verify_ssl
|
||||||
|
self._cookies = None # populated on first successful session login
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Public API
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
def list_domains(self, ipp: int = 1000) -> Optional[set]:
|
||||||
|
"""Return all domains on this DA server via CMD_DNS_ADMIN (JSON, paginated).
|
||||||
|
|
||||||
|
Falls back to the legacy URL-encoded parser if JSON decode fails.
|
||||||
|
Returns a set of lowercase domain strings, or ``None`` if the server
|
||||||
|
is unreachable or returns an error.
|
||||||
|
"""
|
||||||
|
page = 1
|
||||||
|
all_domains: set = set()
|
||||||
|
total_pages = 1
|
||||||
|
|
||||||
|
try:
|
||||||
|
while page <= total_pages:
|
||||||
|
response = self.get(
|
||||||
|
"CMD_DNS_ADMIN",
|
||||||
|
params={"json": "yes", "page": page, "ipp": ipp},
|
||||||
|
)
|
||||||
|
if response is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if response.is_redirect or response.status_code in (
|
||||||
|
301,
|
||||||
|
302,
|
||||||
|
303,
|
||||||
|
307,
|
||||||
|
308,
|
||||||
|
):
|
||||||
|
if self._cookies:
|
||||||
|
logger.error(
|
||||||
|
f"[da:{self.hostname}] Still redirecting after session login — "
|
||||||
|
f"check that '{self.username}' has admin-level access. Skipping."
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
logger.debug(
|
||||||
|
f"[da:{self.hostname}] Basic Auth redirected "
|
||||||
|
f"(HTTP {response.status_code}) — attempting session login (DA Evo)"
|
||||||
|
)
|
||||||
|
if not self._login():
|
||||||
|
return None
|
||||||
|
continue # retry this page with cookies
|
||||||
|
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
content_type = response.headers.get("Content-Type", "")
|
||||||
|
if "text/html" in content_type:
|
||||||
|
logger.error(
|
||||||
|
f"[da:{self.hostname}] Returned HTML instead of API response — "
|
||||||
|
f"check credentials and admin-level access. Skipping."
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
data = response.json()
|
||||||
|
for k, v in data.items():
|
||||||
|
if k.isdigit() and isinstance(v, dict) and "domain" in v:
|
||||||
|
all_domains.add(v["domain"].strip().lower())
|
||||||
|
total_pages = int(data.get("info", {}).get("total_pages", 1))
|
||||||
|
page += 1
|
||||||
|
except Exception as exc:
|
||||||
|
logger.error(
|
||||||
|
f"[da:{self.hostname}] JSON decode failed on page {page}: {exc}\n"
|
||||||
|
f"Raw response: {response.text[:500]}"
|
||||||
|
)
|
||||||
|
all_domains.update(self._parse_legacy_domain_list(response.text))
|
||||||
|
break # no paging in legacy mode
|
||||||
|
|
||||||
|
return all_domains
|
||||||
|
|
||||||
|
except requests.exceptions.SSLError as exc:
|
||||||
|
logger.error(
|
||||||
|
f"[da:{self.hostname}] SSL error — {exc}. "
|
||||||
|
f"Set verify_ssl: false in reconciliation config if using self-signed certs."
|
||||||
|
)
|
||||||
|
except requests.exceptions.ConnectionError as exc:
|
||||||
|
logger.error(f"[da:{self.hostname}] Cannot reach server — {exc}. Skipping.")
|
||||||
|
except requests.exceptions.Timeout:
|
||||||
|
logger.error(f"[da:{self.hostname}] Connection timed out. Skipping.")
|
||||||
|
except requests.exceptions.HTTPError as exc:
|
||||||
|
logger.error(f"[da:{self.hostname}] HTTP error — {exc}. Skipping.")
|
||||||
|
except Exception as exc:
|
||||||
|
logger.error(f"[da:{self.hostname}] Unexpected error: {exc}")
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get(
|
||||||
|
self, command: str, params: Optional[dict] = None
|
||||||
|
) -> Optional[requests.Response]:
|
||||||
|
"""Authenticated GET to any DA CMD_* endpoint.
|
||||||
|
|
||||||
|
Uses session cookies when available (after a successful ``_login``),
|
||||||
|
otherwise falls back to HTTP Basic Auth. Does **not** follow redirects
|
||||||
|
so callers can detect the Basic-Auth → cookie upgrade.
|
||||||
|
"""
|
||||||
|
url = f"{self.scheme}://{self.hostname}:{self.port}/{command}"
|
||||||
|
kwargs: dict = dict(
|
||||||
|
params=params or {},
|
||||||
|
timeout=30,
|
||||||
|
verify=self.verify_ssl,
|
||||||
|
allow_redirects=False,
|
||||||
|
)
|
||||||
|
if self._cookies:
|
||||||
|
kwargs["cookies"] = self._cookies
|
||||||
|
else:
|
||||||
|
kwargs["auth"] = (self.username, self.password)
|
||||||
|
|
||||||
|
try:
|
||||||
|
return requests.get(url, **kwargs)
|
||||||
|
except Exception as exc:
|
||||||
|
logger.error(f"[da:{self.hostname}] GET {command} failed: {exc}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def post(
|
||||||
|
self, command: str, data: Optional[dict] = None
|
||||||
|
) -> Optional[requests.Response]:
|
||||||
|
"""Authenticated POST to any DA CMD_* endpoint."""
|
||||||
|
url = f"{self.scheme}://{self.hostname}:{self.port}/{command}"
|
||||||
|
kwargs: dict = dict(
|
||||||
|
data=data or {},
|
||||||
|
timeout=30,
|
||||||
|
verify=self.verify_ssl,
|
||||||
|
allow_redirects=False,
|
||||||
|
)
|
||||||
|
if self._cookies:
|
||||||
|
kwargs["cookies"] = self._cookies
|
||||||
|
else:
|
||||||
|
kwargs["auth"] = (self.username, self.password)
|
||||||
|
|
||||||
|
try:
|
||||||
|
return requests.post(url, **kwargs)
|
||||||
|
except Exception as exc:
|
||||||
|
logger.error(f"[da:{self.hostname}] POST {command} failed: {exc}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_extra_dns_servers(self) -> dict:
|
||||||
|
"""Return the Extra DNS server map from CMD_MULTI_SERVER (GET).
|
||||||
|
|
||||||
|
Returns a dict keyed by server hostname/IP, each value being the
|
||||||
|
per-server settings dict (dns, domain_check, port, user, ssl, …).
|
||||||
|
Returns ``{}`` on any error.
|
||||||
|
"""
|
||||||
|
resp = self.get("CMD_MULTI_SERVER", params={"json": "yes"})
|
||||||
|
if resp is None or resp.status_code != 200:
|
||||||
|
logger.error(f"[da:{self.hostname}] CMD_MULTI_SERVER GET failed")
|
||||||
|
return {}
|
||||||
|
try:
|
||||||
|
return resp.json().get("servers", {})
|
||||||
|
except Exception as exc:
|
||||||
|
logger.error(f"[da:{self.hostname}] CMD_MULTI_SERVER parse error: {exc}")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def add_extra_dns_server(
|
||||||
|
self, ip: str, port: int, user: str, passwd: str, ssl: bool = False
|
||||||
|
) -> bool:
|
||||||
|
"""Register a new Extra DNS server via CMD_MULTI_SERVER action=add.
|
||||||
|
|
||||||
|
Returns ``True`` if DA reports success, ``False`` otherwise.
|
||||||
|
"""
|
||||||
|
resp = self.post(
|
||||||
|
"CMD_MULTI_SERVER",
|
||||||
|
data={
|
||||||
|
"action": "add",
|
||||||
|
"json": "yes",
|
||||||
|
"ip": ip,
|
||||||
|
"port": str(port),
|
||||||
|
"user": user,
|
||||||
|
"passwd": passwd,
|
||||||
|
"ssl": "yes" if ssl else "no",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if resp is None or resp.status_code != 200:
|
||||||
|
logger.error(f"[da:{self.hostname}] CMD_MULTI_SERVER add failed for {ip}")
|
||||||
|
return False
|
||||||
|
try:
|
||||||
|
result = resp.json()
|
||||||
|
if result.get("success"):
|
||||||
|
logger.info(f"[da:{self.hostname}] Added Extra DNS server {ip}")
|
||||||
|
return True
|
||||||
|
logger.error(
|
||||||
|
f"[da:{self.hostname}] CMD_MULTI_SERVER add error: {result.get('result', result)}"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
except Exception as exc:
|
||||||
|
logger.error(f"[da:{self.hostname}] CMD_MULTI_SERVER add parse error: {exc}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def ensure_extra_dns_server(
|
||||||
|
self, ip: str, port: int, user: str, passwd: str, ssl: bool = False
|
||||||
|
) -> bool:
|
||||||
|
"""Add (if absent) and configure a directdnsonly Extra DNS server.
|
||||||
|
|
||||||
|
Ensures the server is registered with ``dns=yes`` and
|
||||||
|
``domain_check=yes`` so DirectAdmin pushes zone updates to it.
|
||||||
|
Returns ``True`` if fully configured, ``False`` on any failure.
|
||||||
|
"""
|
||||||
|
servers = self.get_extra_dns_servers()
|
||||||
|
if ip not in servers:
|
||||||
|
if not self.add_extra_dns_server(ip, port, user, passwd, ssl):
|
||||||
|
return False
|
||||||
|
|
||||||
|
ssl_str = "yes" if ssl else "no"
|
||||||
|
resp = self.post(
|
||||||
|
"CMD_MULTI_SERVER",
|
||||||
|
data={
|
||||||
|
"action": "multiple",
|
||||||
|
"save": "yes",
|
||||||
|
"json": "yes",
|
||||||
|
"passwd": "",
|
||||||
|
"select0": ip,
|
||||||
|
f"port-{ip}": str(port),
|
||||||
|
f"user-{ip}": user,
|
||||||
|
f"ssl-{ip}": ssl_str,
|
||||||
|
f"dns-{ip}": "yes",
|
||||||
|
f"domain_check-{ip}": "yes",
|
||||||
|
f"user_check-{ip}": "no",
|
||||||
|
f"email-{ip}": "no",
|
||||||
|
f"show_all_users-{ip}": "no",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if resp is None or resp.status_code != 200:
|
||||||
|
logger.error(
|
||||||
|
f"[da:{self.hostname}] CMD_MULTI_SERVER save failed for {ip}"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
try:
|
||||||
|
result = resp.json()
|
||||||
|
if result.get("success"):
|
||||||
|
logger.info(
|
||||||
|
f"[da:{self.hostname}] Extra DNS server {ip} configured "
|
||||||
|
f"(dns=yes domain_check=yes)"
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
logger.error(
|
||||||
|
f"[da:{self.hostname}] CMD_MULTI_SERVER save error: {result.get('result', result)}"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
except Exception as exc:
|
||||||
|
logger.error(
|
||||||
|
f"[da:{self.hostname}] CMD_MULTI_SERVER save parse error: {exc}"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Internal
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
def _login(self) -> bool:
|
||||||
|
"""POST CMD_LOGIN to obtain a DA Evo session cookie.
|
||||||
|
|
||||||
|
Populates ``self._cookies`` on success and returns ``True``.
|
||||||
|
Returns ``False`` on any failure.
|
||||||
|
"""
|
||||||
|
login_url = f"{self.scheme}://{self.hostname}:{self.port}/CMD_LOGIN"
|
||||||
|
try:
|
||||||
|
response = requests.post(
|
||||||
|
login_url,
|
||||||
|
data={
|
||||||
|
"username": self.username,
|
||||||
|
"password": self.password,
|
||||||
|
"referer": "/CMD_DNS_ADMIN?json=yes&page=1&ipp=500",
|
||||||
|
},
|
||||||
|
timeout=30,
|
||||||
|
verify=self.verify_ssl,
|
||||||
|
allow_redirects=False,
|
||||||
|
)
|
||||||
|
if not response.cookies:
|
||||||
|
logger.error(
|
||||||
|
f"[da:{self.hostname}] CMD_LOGIN returned no session cookie — "
|
||||||
|
f"check username/password."
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
self._cookies = response.cookies
|
||||||
|
logger.debug(f"[da:{self.hostname}] Session login successful (DA Evo)")
|
||||||
|
return True
|
||||||
|
except Exception as exc:
|
||||||
|
logger.error(f"[da:{self.hostname}] Session login failed: {exc}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _parse_legacy_domain_list(body: str) -> set:
|
||||||
|
"""Parse DA's legacy CMD_API_SHOW_ALL_DOMAINS URL-encoded response.
|
||||||
|
|
||||||
|
DA returns ``list[]=example.com&list[]=example2.com``, optionally
|
||||||
|
newline-separated instead of ampersand-separated.
|
||||||
|
"""
|
||||||
|
normalised = body.replace("\n", "&").strip("&")
|
||||||
|
params = parse_qs(normalised)
|
||||||
|
domains = params.get("list[]", [])
|
||||||
|
return {d.strip().lower() for d in domains if d.strip()}
|
||||||
80
directdnsonly/app/db/__init__.py
Normal file
80
directdnsonly/app/db/__init__.py
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
from sqlalchemy import create_engine, text
|
||||||
|
from sqlalchemy.orm import sessionmaker, declarative_base
|
||||||
|
from vyper import v
|
||||||
|
from loguru import logger
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
|
||||||
|
Base = declarative_base()
|
||||||
|
|
||||||
|
|
||||||
|
def _migrate(engine):
|
||||||
|
"""Apply additive schema migrations for columns added after initial release."""
|
||||||
|
migrations = [
|
||||||
|
("domains", "zone_data", "ALTER TABLE domains ADD COLUMN zone_data TEXT"),
|
||||||
|
(
|
||||||
|
"domains",
|
||||||
|
"zone_updated_at",
|
||||||
|
"ALTER TABLE domains ADD COLUMN zone_updated_at DATETIME",
|
||||||
|
),
|
||||||
|
]
|
||||||
|
with engine.connect() as conn:
|
||||||
|
for table, column, ddl in migrations:
|
||||||
|
try:
|
||||||
|
conn.execute(text(f"SELECT {column} FROM {table} LIMIT 1"))
|
||||||
|
except Exception:
|
||||||
|
try:
|
||||||
|
conn.execute(text(ddl))
|
||||||
|
conn.commit()
|
||||||
|
logger.info(f"[db] Migration applied: added {table}.{column}")
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning(f"[db] Migration skipped ({table}.{column}): {exc}")
|
||||||
|
|
||||||
|
|
||||||
|
def connect(dbtype="sqlite", **kwargs):
|
||||||
|
if dbtype == "sqlite":
|
||||||
|
# Start SQLite engine
|
||||||
|
db_location = v.get("datastore.db_location")
|
||||||
|
if db_location == -1:
|
||||||
|
raise Exception("DB Type is sqlite but db_location is not defined")
|
||||||
|
else:
|
||||||
|
engine = create_engine(
|
||||||
|
"sqlite:///" + db_location, connect_args={"check_same_thread": False}
|
||||||
|
)
|
||||||
|
Base.metadata.create_all(engine)
|
||||||
|
_migrate(engine)
|
||||||
|
return sessionmaker(engine)()
|
||||||
|
elif dbtype == "mysql":
|
||||||
|
# Start a MySQL engine
|
||||||
|
db_user = v.get_string("datastore.user")
|
||||||
|
db_host = v.get_string("datastore.host")
|
||||||
|
db_name = v.get_string("datastore.name")
|
||||||
|
db_pass = v.get_string("datastore.pass")
|
||||||
|
db_port = v.get_string("datastore.port")
|
||||||
|
if (
|
||||||
|
not v.is_set("datastore.user")
|
||||||
|
or not v.is_set("datastore.name")
|
||||||
|
or not v.is_set("datastore.pass")
|
||||||
|
or not v.is_set("datastore.host")
|
||||||
|
):
|
||||||
|
raise Exception(
|
||||||
|
"DB Type is mysql but db_(host,name,and pass) are not populated"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
engine = create_engine(
|
||||||
|
"mysql+pymysql://"
|
||||||
|
+ db_user
|
||||||
|
+ ":"
|
||||||
|
+ db_pass
|
||||||
|
+ "@"
|
||||||
|
+ db_host
|
||||||
|
+ ":"
|
||||||
|
+ db_port
|
||||||
|
+ "/"
|
||||||
|
+ db_name
|
||||||
|
)
|
||||||
|
Base.metadata.create_all(engine)
|
||||||
|
_migrate(engine)
|
||||||
|
return sessionmaker(engine)()
|
||||||
|
else:
|
||||||
|
raise Exception("Unknown/unimplemented database type: {}".format(dbtype))
|
||||||
37
directdnsonly/app/db/models/__init__.py
Normal file
37
directdnsonly/app/db/models/__init__.py
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
from directdnsonly.app.db import Base
|
||||||
|
from sqlalchemy import Column, Integer, String, DateTime, Text
|
||||||
|
|
||||||
|
|
||||||
|
class Key(Base):
|
||||||
|
__tablename__ = "keys"
|
||||||
|
id = Column(Integer, primary_key=True)
|
||||||
|
key = Column(String(255), unique=True)
|
||||||
|
name = Column(String(255))
|
||||||
|
expires = Column(DateTime)
|
||||||
|
service = Column(String(255))
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<Key(key='%s', name='%s', expires='%s', service='%s')>" % (
|
||||||
|
self.key,
|
||||||
|
self.name,
|
||||||
|
self.expires,
|
||||||
|
self.service,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class Domain(Base):
|
||||||
|
__tablename__ = "domains"
|
||||||
|
id = Column(Integer, primary_key=True)
|
||||||
|
domain = Column(String(255), unique=True)
|
||||||
|
hostname = Column(String(255))
|
||||||
|
username = Column(String(255))
|
||||||
|
zone_data = Column(Text, nullable=True) # last known zone file from DA
|
||||||
|
zone_updated_at = Column(DateTime, nullable=True) # when zone_data was last stored
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<Domain(id='%s', domain='%s', hostname='%s', username='%s')>" % (
|
||||||
|
self.id,
|
||||||
|
self.domain,
|
||||||
|
self.hostname,
|
||||||
|
self.username,
|
||||||
|
)
|
||||||
353
directdnsonly/app/peer_sync.py
Normal file
353
directdnsonly/app/peer_sync.py
Normal file
@@ -0,0 +1,353 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""Peer sync worker — exchanges zone_data between directdnsonly instances.
|
||||||
|
|
||||||
|
Each node stores zone_data in its local SQLite DB after every successful
|
||||||
|
backend write. When DirectAdmin pushes a zone to one node but another
|
||||||
|
is temporarily offline, the offline node misses that zone_data.
|
||||||
|
|
||||||
|
PeerSyncWorker corrects this by periodically comparing zone lists with
|
||||||
|
all known peers and fetching any zone_data that is newer or absent locally.
|
||||||
|
It only updates the local DB — it never writes directly to backends. The
|
||||||
|
existing reconciler healing pass then detects missing zones and re-pushes
|
||||||
|
using the freshly synced zone_data.
|
||||||
|
|
||||||
|
Mesh behaviour:
|
||||||
|
- Each node exposes /internal/peers listing the URLs it knows about
|
||||||
|
- During each sync pass, every peer is asked for its peer list; any URLs
|
||||||
|
not already known are added automatically (gossip-lite discovery)
|
||||||
|
- A three-node cluster therefore only needs a linear chain of initial
|
||||||
|
connections — nodes propagate awareness of each other on the first pass
|
||||||
|
|
||||||
|
Health tracking:
|
||||||
|
- Consecutive failures per peer are counted; after FAILURE_THRESHOLD
|
||||||
|
misses the peer is marked degraded and a warning is logged once
|
||||||
|
- On the next successful contact the peer is marked recovered
|
||||||
|
|
||||||
|
Safety properties:
|
||||||
|
- If a peer is unreachable, skip it and try next interval
|
||||||
|
- Only zone_data is synced — backend writes remain the sole responsibility
|
||||||
|
of the local save queue worker
|
||||||
|
- Newer zone_updated_at timestamp wins; local data is never overwritten
|
||||||
|
with older peer data
|
||||||
|
- Peer discovery is best-effort and never fails a sync pass
|
||||||
|
"""
|
||||||
|
import datetime
|
||||||
|
import os
|
||||||
|
import threading
|
||||||
|
from loguru import logger
|
||||||
|
import requests
|
||||||
|
from sqlalchemy import select
|
||||||
|
|
||||||
|
from directdnsonly.app.db import connect
|
||||||
|
from directdnsonly.app.db.models import Domain
|
||||||
|
|
||||||
|
# Consecutive failures before a peer is logged as degraded
|
||||||
|
FAILURE_THRESHOLD = 3
|
||||||
|
|
||||||
|
|
||||||
|
class PeerSyncWorker:
|
||||||
|
"""Periodically fetches zone_data from peer directdnsonly instances and
|
||||||
|
stores it locally so the healing pass can re-push missing zones without
|
||||||
|
waiting for a DirectAdmin re-push."""
|
||||||
|
|
||||||
|
def __init__(self, peer_sync_config: dict):
|
||||||
|
self.enabled = peer_sync_config.get("enabled", False)
|
||||||
|
self.interval_seconds = peer_sync_config.get("interval_minutes", 15) * 60
|
||||||
|
self.peers = list(peer_sync_config.get("peers") or [])
|
||||||
|
|
||||||
|
# Per-peer health state: url -> {consecutive_failures, healthy, last_seen}
|
||||||
|
self._peer_health: dict = {}
|
||||||
|
|
||||||
|
# ----------------------------------------------------------------
|
||||||
|
# Env-var peer injection
|
||||||
|
# ----------------------------------------------------------------
|
||||||
|
# Original single-peer vars (backward compat):
|
||||||
|
# DADNS_PEER_SYNC_PEER_URL / _USERNAME / _PASSWORD
|
||||||
|
# Numbered multi-peer vars (new):
|
||||||
|
# DADNS_PEER_SYNC_PEER_1_URL / _USERNAME / _PASSWORD
|
||||||
|
# DADNS_PEER_SYNC_PEER_2_URL / ... (up to 9)
|
||||||
|
known_urls = {p.get("url") for p in self.peers}
|
||||||
|
|
||||||
|
env_candidates = []
|
||||||
|
|
||||||
|
single_url = os.environ.get("DADNS_PEER_SYNC_PEER_URL", "").strip()
|
||||||
|
if single_url:
|
||||||
|
env_candidates.append({
|
||||||
|
"url": single_url,
|
||||||
|
"username": os.environ.get("DADNS_PEER_SYNC_PEER_USERNAME", "peersync"),
|
||||||
|
"password": os.environ.get("DADNS_PEER_SYNC_PEER_PASSWORD", ""),
|
||||||
|
})
|
||||||
|
|
||||||
|
for i in range(1, 10):
|
||||||
|
numbered_url = os.environ.get(f"DADNS_PEER_SYNC_PEER_{i}_URL", "").strip()
|
||||||
|
if not numbered_url:
|
||||||
|
break
|
||||||
|
env_candidates.append({
|
||||||
|
"url": numbered_url,
|
||||||
|
"username": os.environ.get(
|
||||||
|
f"DADNS_PEER_SYNC_PEER_{i}_USERNAME", "peersync"
|
||||||
|
),
|
||||||
|
"password": os.environ.get(f"DADNS_PEER_SYNC_PEER_{i}_PASSWORD", ""),
|
||||||
|
})
|
||||||
|
|
||||||
|
for candidate in env_candidates:
|
||||||
|
if candidate["url"] not in known_urls:
|
||||||
|
self.peers.append(candidate)
|
||||||
|
known_urls.add(candidate["url"])
|
||||||
|
logger.debug(
|
||||||
|
f"[peer_sync] Added peer from env vars: {candidate['url']}"
|
||||||
|
)
|
||||||
|
|
||||||
|
self._stop_event = threading.Event()
|
||||||
|
self._thread = None
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Lifecycle
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
if not self.enabled:
|
||||||
|
logger.info("Peer sync disabled — skipping")
|
||||||
|
return
|
||||||
|
if not self.peers:
|
||||||
|
logger.warning("Peer sync enabled but no peers configured")
|
||||||
|
return
|
||||||
|
|
||||||
|
self._stop_event.clear()
|
||||||
|
self._thread = threading.Thread(
|
||||||
|
target=self._run, daemon=True, name="peer_sync_worker"
|
||||||
|
)
|
||||||
|
self._thread.start()
|
||||||
|
peer_urls = [p.get("url", "?") for p in self.peers]
|
||||||
|
logger.info(
|
||||||
|
f"Peer sync worker started — "
|
||||||
|
f"interval: {self.interval_seconds // 60}m, "
|
||||||
|
f"peers: {peer_urls}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
self._stop_event.set()
|
||||||
|
if self._thread:
|
||||||
|
self._thread.join(timeout=10)
|
||||||
|
logger.info("Peer sync worker stopped")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_alive(self):
|
||||||
|
return self._thread is not None and self._thread.is_alive()
|
||||||
|
|
||||||
|
def get_peer_urls(self) -> list:
|
||||||
|
"""Return the current list of known peer URLs.
|
||||||
|
Exposed via /internal/peers so other nodes can discover this node's mesh."""
|
||||||
|
return [p["url"] for p in self.peers if p.get("url")]
|
||||||
|
|
||||||
|
def get_peer_status(self) -> dict:
|
||||||
|
"""Return peer health summary for the /status endpoint."""
|
||||||
|
peers = []
|
||||||
|
for peer in self.peers:
|
||||||
|
url = peer.get("url", "")
|
||||||
|
h = self._peer_health.get(url, {})
|
||||||
|
last_seen = h.get("last_seen")
|
||||||
|
peers.append({
|
||||||
|
"url": url,
|
||||||
|
"healthy": h.get("healthy", True),
|
||||||
|
"consecutive_failures": h.get("consecutive_failures", 0),
|
||||||
|
"last_seen": last_seen.isoformat() if last_seen else None,
|
||||||
|
})
|
||||||
|
healthy = sum(1 for p in peers if p["healthy"])
|
||||||
|
return {
|
||||||
|
"enabled": self.enabled,
|
||||||
|
"alive": self.is_alive,
|
||||||
|
"interval_minutes": self.interval_seconds // 60,
|
||||||
|
"peers": peers,
|
||||||
|
"total": len(peers),
|
||||||
|
"healthy": healthy,
|
||||||
|
"degraded": len(peers) - healthy,
|
||||||
|
}
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Health tracking
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
def _health(self, url: str) -> dict:
|
||||||
|
return self._peer_health.setdefault(
|
||||||
|
url, {"consecutive_failures": 0, "healthy": True, "last_seen": None}
|
||||||
|
)
|
||||||
|
|
||||||
|
def _record_success(self, url: str):
|
||||||
|
h = self._health(url)
|
||||||
|
recovered = not h["healthy"]
|
||||||
|
h.update(
|
||||||
|
consecutive_failures=0,
|
||||||
|
healthy=True,
|
||||||
|
last_seen=datetime.datetime.utcnow(),
|
||||||
|
)
|
||||||
|
if recovered:
|
||||||
|
logger.info(f"[peer_sync] {url}: peer recovered")
|
||||||
|
|
||||||
|
def _record_failure(self, url: str, exc):
|
||||||
|
h = self._health(url)
|
||||||
|
h["consecutive_failures"] += 1
|
||||||
|
if h["healthy"] and h["consecutive_failures"] >= FAILURE_THRESHOLD:
|
||||||
|
h["healthy"] = False
|
||||||
|
logger.warning(
|
||||||
|
f"[peer_sync] {url}: marked degraded after {FAILURE_THRESHOLD} "
|
||||||
|
f"consecutive failures — {exc}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.debug(
|
||||||
|
f"[peer_sync] {url}: unreachable "
|
||||||
|
f"(failure #{h['consecutive_failures']}) — {exc}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Internal
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
def _run(self):
|
||||||
|
logger.info("Peer sync worker starting — running initial sync now")
|
||||||
|
self._sync_all()
|
||||||
|
while not self._stop_event.wait(timeout=self.interval_seconds):
|
||||||
|
self._sync_all()
|
||||||
|
|
||||||
|
def _sync_all(self):
|
||||||
|
logger.debug(f"[peer_sync] Starting sync pass across {len(self.peers)} peer(s)")
|
||||||
|
# Iterate over a snapshot — _discover_peers_from may grow self.peers
|
||||||
|
for peer in list(self.peers):
|
||||||
|
url = peer.get("url")
|
||||||
|
if not url:
|
||||||
|
logger.warning("[peer_sync] Peer config missing url — skipping")
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
self._sync_from_peer(peer)
|
||||||
|
self._discover_peers_from(peer)
|
||||||
|
self._record_success(url)
|
||||||
|
except Exception as exc:
|
||||||
|
self._record_failure(url, exc)
|
||||||
|
|
||||||
|
def _discover_peers_from(self, peer: dict):
|
||||||
|
"""Fetch peer's known peer list and add any new nodes for mesh expansion.
|
||||||
|
|
||||||
|
This is best-effort — failures are silently swallowed so they never
|
||||||
|
interrupt the main sync pass."""
|
||||||
|
url = peer.get("url", "").rstrip("/")
|
||||||
|
username = peer.get("username")
|
||||||
|
password = peer.get("password")
|
||||||
|
auth = (username, password) if username else None
|
||||||
|
try:
|
||||||
|
resp = requests.get(f"{url}/internal/peers", auth=auth, timeout=5)
|
||||||
|
if resp.status_code != 200:
|
||||||
|
return
|
||||||
|
remote_urls = resp.json() # list of URL strings
|
||||||
|
known_urls = {p.get("url") for p in self.peers}
|
||||||
|
for remote_url in remote_urls:
|
||||||
|
if remote_url and remote_url not in known_urls:
|
||||||
|
# Inherit credentials from the introducing peer — in practice
|
||||||
|
# all cluster nodes share the same peer_sync auth credentials.
|
||||||
|
self.peers.append({
|
||||||
|
"url": remote_url,
|
||||||
|
"username": username,
|
||||||
|
"password": password,
|
||||||
|
})
|
||||||
|
known_urls.add(remote_url)
|
||||||
|
logger.info(
|
||||||
|
f"[peer_sync] Discovered new peer {remote_url} via {url}"
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
pass # discovery is best-effort
|
||||||
|
|
||||||
|
def _sync_from_peer(self, peer: dict):
|
||||||
|
url = peer.get("url", "").rstrip("/")
|
||||||
|
username = peer.get("username")
|
||||||
|
password = peer.get("password")
|
||||||
|
auth = (username, password) if username else None
|
||||||
|
|
||||||
|
# Fetch the peer's zone list
|
||||||
|
resp = requests.get(f"{url}/internal/zones", auth=auth, timeout=10)
|
||||||
|
if resp.status_code != 200:
|
||||||
|
logger.warning(
|
||||||
|
f"[peer_sync] {url}: /internal/zones returned {resp.status_code}"
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
peer_zones = resp.json() # [{domain, zone_updated_at, hostname, username}]
|
||||||
|
if not peer_zones:
|
||||||
|
logger.debug(f"[peer_sync] {url}: no zone_data on peer yet")
|
||||||
|
return
|
||||||
|
|
||||||
|
session = connect()
|
||||||
|
try:
|
||||||
|
synced = 0
|
||||||
|
for entry in peer_zones:
|
||||||
|
domain = entry.get("domain")
|
||||||
|
if not domain:
|
||||||
|
continue
|
||||||
|
|
||||||
|
peer_ts_str = entry.get("zone_updated_at")
|
||||||
|
peer_ts = (
|
||||||
|
datetime.datetime.fromisoformat(peer_ts_str)
|
||||||
|
if peer_ts_str
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
|
||||||
|
local = session.execute(
|
||||||
|
select(Domain).filter_by(domain=domain)
|
||||||
|
).scalar_one_or_none()
|
||||||
|
|
||||||
|
needs_sync = (
|
||||||
|
local is None
|
||||||
|
or local.zone_data is None
|
||||||
|
or (peer_ts and not local.zone_updated_at)
|
||||||
|
or (
|
||||||
|
peer_ts
|
||||||
|
and local.zone_updated_at
|
||||||
|
and peer_ts > local.zone_updated_at
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
if not needs_sync:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Fetch full zone_data from peer
|
||||||
|
zresp = requests.get(
|
||||||
|
f"{url}/internal/zones",
|
||||||
|
params={"domain": domain},
|
||||||
|
auth=auth,
|
||||||
|
timeout=10,
|
||||||
|
)
|
||||||
|
if zresp.status_code != 200:
|
||||||
|
logger.warning(
|
||||||
|
f"[peer_sync] {url}: could not fetch zone_data "
|
||||||
|
f"for {domain} (HTTP {zresp.status_code})"
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
zdata = zresp.json()
|
||||||
|
zone_data = zdata.get("zone_data")
|
||||||
|
if not zone_data:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if local is None:
|
||||||
|
local = Domain(
|
||||||
|
domain=domain,
|
||||||
|
hostname=entry.get("hostname"),
|
||||||
|
username=entry.get("username"),
|
||||||
|
zone_data=zone_data,
|
||||||
|
zone_updated_at=peer_ts,
|
||||||
|
)
|
||||||
|
session.add(local)
|
||||||
|
logger.debug(
|
||||||
|
f"[peer_sync] {url}: created local record for {domain}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
local.zone_data = zone_data
|
||||||
|
local.zone_updated_at = peer_ts
|
||||||
|
logger.debug(f"[peer_sync] {url}: updated zone_data for {domain}")
|
||||||
|
synced += 1
|
||||||
|
|
||||||
|
if synced:
|
||||||
|
session.commit()
|
||||||
|
logger.info(f"[peer_sync] Synced {synced} zone(s) from {url}")
|
||||||
|
else:
|
||||||
|
logger.debug(f"[peer_sync] {url}: already up to date")
|
||||||
|
finally:
|
||||||
|
session.close()
|
||||||
322
directdnsonly/app/reconciler.py
Executable file
322
directdnsonly/app/reconciler.py
Executable file
@@ -0,0 +1,322 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
import datetime
|
||||||
|
import threading
|
||||||
|
from loguru import logger
|
||||||
|
from sqlalchemy import select
|
||||||
|
|
||||||
|
from directdnsonly.app.da import DirectAdminClient
|
||||||
|
from directdnsonly.app.db import connect
|
||||||
|
from directdnsonly.app.db.models import Domain
|
||||||
|
|
||||||
|
|
||||||
|
class ReconciliationWorker:
|
||||||
|
"""Periodically polls configured DirectAdmin servers and queues deletes
|
||||||
|
for any zones in our DB that no longer exist in DirectAdmin.
|
||||||
|
|
||||||
|
Also runs an Option C backend healing pass: for each zone with stored
|
||||||
|
zone_data, checks every backend for presence and re-queues any that are
|
||||||
|
missing (e.g. after a prolonged backend outage).
|
||||||
|
|
||||||
|
Safety rules:
|
||||||
|
- If a DA server is unreachable, skip it entirely — never delete on uncertainty
|
||||||
|
- Only touches domains registered via DaDNS (present in our `domains` table)
|
||||||
|
- Domains in CoreDNS but NOT in our DB are not our zones; left untouched
|
||||||
|
- Pushes to the existing delete_queue so the full delete path is exercised
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
delete_queue,
|
||||||
|
reconciliation_config: dict,
|
||||||
|
save_queue=None,
|
||||||
|
backend_registry=None,
|
||||||
|
):
|
||||||
|
self.delete_queue = delete_queue
|
||||||
|
self.save_queue = save_queue
|
||||||
|
self.backend_registry = backend_registry
|
||||||
|
self.enabled = reconciliation_config.get("enabled", False)
|
||||||
|
self.interval_seconds = reconciliation_config.get("interval_minutes", 60) * 60
|
||||||
|
self.servers = reconciliation_config.get("directadmin_servers") or []
|
||||||
|
self.verify_ssl = reconciliation_config.get("verify_ssl", True)
|
||||||
|
self.ipp = int(reconciliation_config.get("ipp", 1000))
|
||||||
|
self.dry_run = bool(reconciliation_config.get("dry_run", False))
|
||||||
|
self._initial_delay = reconciliation_config.get("initial_delay_minutes", 0) * 60
|
||||||
|
self._stop_event = threading.Event()
|
||||||
|
self._thread = None
|
||||||
|
self._last_run: dict = {}
|
||||||
|
|
||||||
|
def get_status(self) -> dict:
|
||||||
|
"""Return reconciler configuration and last-run statistics."""
|
||||||
|
return {
|
||||||
|
"enabled": self.enabled,
|
||||||
|
"alive": self.is_alive,
|
||||||
|
"dry_run": self.dry_run,
|
||||||
|
"interval_minutes": self.interval_seconds // 60,
|
||||||
|
"last_run": dict(self._last_run),
|
||||||
|
}
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
if not self.enabled:
|
||||||
|
logger.info("Reconciliation poller disabled — skipping")
|
||||||
|
return
|
||||||
|
if not self.servers:
|
||||||
|
logger.warning(
|
||||||
|
"Reconciliation enabled but no directadmin_servers configured"
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
self._stop_event.clear()
|
||||||
|
self._thread = threading.Thread(
|
||||||
|
target=self._run, daemon=True, name="reconciliation_worker"
|
||||||
|
)
|
||||||
|
self._thread.start()
|
||||||
|
server_names = [s.get("hostname", "?") for s in self.servers]
|
||||||
|
mode = "DRY-RUN" if self.dry_run else "LIVE"
|
||||||
|
delay_str = (
|
||||||
|
f", initial_delay: {self._initial_delay // 60}m"
|
||||||
|
if self._initial_delay
|
||||||
|
else ""
|
||||||
|
)
|
||||||
|
logger.info(
|
||||||
|
f"Reconciliation poller started [{mode}] — "
|
||||||
|
f"interval: {self.interval_seconds // 60}m"
|
||||||
|
f"{delay_str}, "
|
||||||
|
f"servers: {server_names}"
|
||||||
|
)
|
||||||
|
if self.dry_run:
|
||||||
|
logger.warning(
|
||||||
|
"[reconciler] DRY-RUN mode active — orphans will be logged but NOT queued for deletion"
|
||||||
|
)
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
self._stop_event.set()
|
||||||
|
if self._thread:
|
||||||
|
self._thread.join(timeout=10)
|
||||||
|
logger.info("Reconciliation poller stopped")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_alive(self):
|
||||||
|
return self._thread is not None and self._thread.is_alive()
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Internal
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
def _run(self):
|
||||||
|
if self._initial_delay > 0:
|
||||||
|
logger.info(
|
||||||
|
f"[reconciler] Initial delay {self._initial_delay // 60}m — "
|
||||||
|
f"first reconciliation pass deferred"
|
||||||
|
)
|
||||||
|
if self._stop_event.wait(timeout=self._initial_delay):
|
||||||
|
return # stopped cleanly during the initial delay
|
||||||
|
logger.info("Reconciliation worker starting — running initial check now")
|
||||||
|
self._reconcile_all()
|
||||||
|
while not self._stop_event.wait(timeout=self.interval_seconds):
|
||||||
|
self._reconcile_all()
|
||||||
|
|
||||||
|
def _reconcile_all(self):
|
||||||
|
started_at = datetime.datetime.utcnow()
|
||||||
|
self._last_run = {"status": "running", "started_at": started_at.isoformat()}
|
||||||
|
logger.info(
|
||||||
|
f"[reconciler] Starting reconciliation pass across "
|
||||||
|
f"{len(self.servers)} server(s)"
|
||||||
|
)
|
||||||
|
total_queued = 0
|
||||||
|
da_servers_polled = 0
|
||||||
|
da_servers_unreachable = 0
|
||||||
|
migrated = 0
|
||||||
|
backfilled = 0
|
||||||
|
zones_in_db = 0
|
||||||
|
|
||||||
|
# Build a map of all domains seen on all DA servers: domain -> hostname
|
||||||
|
all_da_domains: dict = {}
|
||||||
|
for server in self.servers:
|
||||||
|
hostname = server.get("hostname")
|
||||||
|
if not hostname:
|
||||||
|
logger.warning("[reconciler] Server config missing hostname — skipping")
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
client = DirectAdminClient(
|
||||||
|
hostname=hostname,
|
||||||
|
port=server.get("port", 2222),
|
||||||
|
username=server.get("username"),
|
||||||
|
password=server.get("password"),
|
||||||
|
ssl=server.get("ssl", True),
|
||||||
|
verify_ssl=self.verify_ssl,
|
||||||
|
)
|
||||||
|
da_servers_polled += 1
|
||||||
|
da_domains = client.list_domains(ipp=self.ipp)
|
||||||
|
if da_domains is not None:
|
||||||
|
for d in da_domains:
|
||||||
|
all_da_domains[d] = hostname
|
||||||
|
else:
|
||||||
|
da_servers_unreachable += 1
|
||||||
|
logger.debug(
|
||||||
|
f"[reconciler] {hostname}: "
|
||||||
|
f"{len(da_domains) if da_domains else 0} active domain(s) in DA"
|
||||||
|
)
|
||||||
|
except Exception as exc:
|
||||||
|
logger.error(f"[reconciler] Unexpected error polling {hostname}: {exc}")
|
||||||
|
da_servers_unreachable += 1
|
||||||
|
|
||||||
|
# Compare local DB against what DA reported; update masters and queue deletes
|
||||||
|
session = connect()
|
||||||
|
try:
|
||||||
|
all_local_domains = session.execute(select(Domain)).scalars().all()
|
||||||
|
zones_in_db = len(all_local_domains)
|
||||||
|
known_servers = {s.get("hostname") for s in self.servers}
|
||||||
|
for record in all_local_domains:
|
||||||
|
domain = record.domain
|
||||||
|
recorded_master = record.hostname
|
||||||
|
actual_master = all_da_domains.get(domain)
|
||||||
|
if actual_master:
|
||||||
|
if not recorded_master:
|
||||||
|
logger.info(
|
||||||
|
f"[reconciler] Domain '{domain}' hostname backfilled: '{actual_master}'"
|
||||||
|
)
|
||||||
|
record.hostname = actual_master
|
||||||
|
backfilled += 1
|
||||||
|
elif actual_master != recorded_master:
|
||||||
|
logger.warning(
|
||||||
|
f"[reconciler] Domain '{domain}' migrated: "
|
||||||
|
f"'{recorded_master}' -> '{actual_master}'. Updating local DB."
|
||||||
|
)
|
||||||
|
record.hostname = actual_master
|
||||||
|
migrated += 1
|
||||||
|
else:
|
||||||
|
if recorded_master in known_servers:
|
||||||
|
if self.dry_run:
|
||||||
|
logger.warning(
|
||||||
|
f"[reconciler] [DRY-RUN] Would delete orphan: {record.domain} "
|
||||||
|
f"(master: {recorded_master})"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.delete_queue.put(
|
||||||
|
{
|
||||||
|
"domain": record.domain,
|
||||||
|
"hostname": record.hostname,
|
||||||
|
"username": record.username or "",
|
||||||
|
"source": "reconciler",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
logger.debug(
|
||||||
|
f"[reconciler] Queued delete for orphan: {record.domain} "
|
||||||
|
f"(master: {recorded_master})"
|
||||||
|
)
|
||||||
|
total_queued += 1
|
||||||
|
|
||||||
|
if migrated or backfilled:
|
||||||
|
session.commit()
|
||||||
|
if backfilled:
|
||||||
|
logger.info(
|
||||||
|
f"[reconciler] {backfilled} domain(s) had missing hostname backfilled."
|
||||||
|
)
|
||||||
|
if migrated:
|
||||||
|
logger.info(
|
||||||
|
f"[reconciler] {migrated} domain(s) migrated to new master."
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
session.close()
|
||||||
|
|
||||||
|
if self.dry_run:
|
||||||
|
logger.info(
|
||||||
|
f"[reconciler] Reconciliation pass complete [DRY-RUN] — "
|
||||||
|
f"{total_queued} orphan(s) identified (none deleted)"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.info(
|
||||||
|
f"[reconciler] Reconciliation pass complete — "
|
||||||
|
f"{total_queued} domain(s) queued for deletion"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Option C: heal backends that are missing zones
|
||||||
|
zones_healed = 0
|
||||||
|
if self.save_queue is not None and self.backend_registry is not None:
|
||||||
|
zones_healed = self._heal_backends()
|
||||||
|
|
||||||
|
completed_at = datetime.datetime.utcnow()
|
||||||
|
self._last_run = {
|
||||||
|
"status": "ok",
|
||||||
|
"started_at": started_at.isoformat(),
|
||||||
|
"completed_at": completed_at.isoformat(),
|
||||||
|
"duration_seconds": round(
|
||||||
|
(completed_at - started_at).total_seconds(), 1
|
||||||
|
),
|
||||||
|
"da_servers_polled": da_servers_polled,
|
||||||
|
"da_servers_unreachable": da_servers_unreachable,
|
||||||
|
"zones_in_da": len(all_da_domains),
|
||||||
|
"zones_in_db": zones_in_db,
|
||||||
|
"orphans_found": total_queued,
|
||||||
|
"orphans_queued": total_queued if not self.dry_run else 0,
|
||||||
|
"hostnames_backfilled": backfilled,
|
||||||
|
"hostnames_migrated": migrated,
|
||||||
|
"zones_healed": zones_healed,
|
||||||
|
"dry_run": self.dry_run,
|
||||||
|
}
|
||||||
|
|
||||||
|
def _heal_backends(self) -> int:
|
||||||
|
"""Check every backend for zone presence and re-queue any zone that is
|
||||||
|
missing from one or more backends, using the stored zone_data as the
|
||||||
|
authoritative source. This corrects backends that missed pushes due to
|
||||||
|
downtime without waiting for DirectAdmin to re-send the zone.
|
||||||
|
"""
|
||||||
|
backends = self.backend_registry.get_available_backends()
|
||||||
|
if not backends:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
session = connect()
|
||||||
|
healed = 0
|
||||||
|
try:
|
||||||
|
domains = session.execute(
|
||||||
|
select(Domain).where(Domain.zone_data.isnot(None))
|
||||||
|
).scalars().all()
|
||||||
|
if not domains:
|
||||||
|
logger.debug(
|
||||||
|
"[reconciler] Healing pass: no zone_data stored yet — skipping"
|
||||||
|
)
|
||||||
|
return 0
|
||||||
|
for record in domains:
|
||||||
|
missing = []
|
||||||
|
for backend_name, backend in backends.items():
|
||||||
|
try:
|
||||||
|
if not backend.zone_exists(record.domain):
|
||||||
|
missing.append(backend_name)
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning(
|
||||||
|
f"[reconciler] heal: zone_exists check failed for "
|
||||||
|
f"{record.domain} on {backend_name}: {exc}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if missing:
|
||||||
|
mode = "[DRY-RUN] Would heal" if self.dry_run else "Healing"
|
||||||
|
logger.warning(
|
||||||
|
f"[reconciler] {mode} — {record.domain} missing from "
|
||||||
|
f"{missing}; re-queuing with stored zone_data"
|
||||||
|
)
|
||||||
|
if not self.dry_run:
|
||||||
|
self.save_queue.put(
|
||||||
|
{
|
||||||
|
"domain": record.domain,
|
||||||
|
"hostname": record.hostname or "",
|
||||||
|
"username": record.username or "",
|
||||||
|
"zone_file": record.zone_data,
|
||||||
|
"failed_backends": missing,
|
||||||
|
"retry_count": 0,
|
||||||
|
"source": "reconciler_heal",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
healed += 1
|
||||||
|
|
||||||
|
if healed:
|
||||||
|
logger.info(
|
||||||
|
f"[reconciler] Healing pass complete — "
|
||||||
|
f"{healed} zone(s) re-queued for backend recovery"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.debug(
|
||||||
|
"[reconciler] Healing pass complete — all backends consistent"
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
session.close()
|
||||||
|
return healed
|
||||||
56
directdnsonly/app/utils/__init__.py
Normal file
56
directdnsonly/app/utils/__init__.py
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
from loguru import logger
|
||||||
|
from sqlalchemy import select
|
||||||
|
|
||||||
|
from directdnsonly.app.db.models import *
|
||||||
|
from directdnsonly.app.db import connect
|
||||||
|
|
||||||
|
|
||||||
|
def check_zone_exists(zone_name):
|
||||||
|
# Check if zone is present in the index
|
||||||
|
session = connect()
|
||||||
|
logger.debug("Checking if {} is present in the DB".format(zone_name))
|
||||||
|
domain_exists = bool(
|
||||||
|
session.execute(select(Domain.id).filter_by(domain=zone_name)).first()
|
||||||
|
)
|
||||||
|
logger.debug("Returned from query: {}".format(domain_exists))
|
||||||
|
return domain_exists
|
||||||
|
|
||||||
|
|
||||||
|
def put_zone_index(zone_name, host_name, user_name):
|
||||||
|
# add a new zone to index
|
||||||
|
session = connect()
|
||||||
|
logger.debug("Placed zone into database.. {}".format(str(zone_name)))
|
||||||
|
domain = Domain(domain=zone_name, hostname=host_name, username=user_name)
|
||||||
|
session.add(domain)
|
||||||
|
session.commit()
|
||||||
|
|
||||||
|
|
||||||
|
def get_domain_record(zone_name):
|
||||||
|
"""Return the Domain record for zone_name, or None if not found"""
|
||||||
|
session = connect()
|
||||||
|
return session.execute(
|
||||||
|
select(Domain).filter_by(domain=zone_name)
|
||||||
|
).scalar_one_or_none()
|
||||||
|
|
||||||
|
|
||||||
|
def check_parent_domain_owner(zone_name):
|
||||||
|
"""Return True if the immediate parent domain of zone_name exists in the DB"""
|
||||||
|
parent_domain = ".".join(zone_name.split(".")[1:])
|
||||||
|
if not parent_domain:
|
||||||
|
return False
|
||||||
|
session = connect()
|
||||||
|
logger.debug("Checking if parent domain {} exists in DB".format(parent_domain))
|
||||||
|
return bool(
|
||||||
|
session.execute(select(Domain.id).filter_by(domain=parent_domain)).first()
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_parent_domain_record(zone_name):
|
||||||
|
"""Return the Domain record for the parent of zone_name, or None"""
|
||||||
|
parent_domain = ".".join(zone_name.split(".")[1:])
|
||||||
|
if not parent_domain:
|
||||||
|
return None
|
||||||
|
session = connect()
|
||||||
|
return session.execute(
|
||||||
|
select(Domain).filter_by(domain=parent_domain)
|
||||||
|
).scalar_one_or_none()
|
||||||
67
directdnsonly/app/utils/zone_parser.py
Normal file
67
directdnsonly/app/utils/zone_parser.py
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
from dns import zone, name
|
||||||
|
from dns.rdataclass import IN
|
||||||
|
from dns.exception import DNSException
|
||||||
|
from loguru import logger
|
||||||
|
|
||||||
|
|
||||||
|
def validate_and_normalize_zone(zone_data: str, domain_name: str) -> str:
|
||||||
|
"""
|
||||||
|
Normalize zone file content and ensure proper origin handling
|
||||||
|
Returns normalized zone data
|
||||||
|
Raises DNSException on validation failure
|
||||||
|
"""
|
||||||
|
# Ensure domain ends with dot
|
||||||
|
if not domain_name.endswith("."):
|
||||||
|
domain_name = f"{domain_name}."
|
||||||
|
|
||||||
|
# Add $ORIGIN if missing
|
||||||
|
if "$ORIGIN" not in zone_data:
|
||||||
|
zone_data = f"$ORIGIN {domain_name}\n{zone_data}"
|
||||||
|
|
||||||
|
# Add $TTL if missing
|
||||||
|
if "$TTL" not in zone_data:
|
||||||
|
zone_data = f"$TTL 300\n{zone_data}"
|
||||||
|
|
||||||
|
# Validate the zone
|
||||||
|
try:
|
||||||
|
zone.from_text(
|
||||||
|
zone_data, origin=name.from_text(domain_name), check_origin=False
|
||||||
|
)
|
||||||
|
return zone_data
|
||||||
|
except DNSException as e:
|
||||||
|
logger.error(f"Zone validation failed: {e}")
|
||||||
|
raise ValueError(f"Invalid zone data: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
def count_zone_records(zone_data: str, domain_name: str) -> int:
|
||||||
|
"""Count the number of individual DNS records in a parsed BIND zone file.
|
||||||
|
|
||||||
|
This counts every individual resource record (each A, AAAA, MX, TXT, etc.)
|
||||||
|
the same way the CoreDNS MySQL backend stores them — one row per record.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
zone_data: The raw or normalized BIND zone file content
|
||||||
|
domain_name: The domain name for the zone
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The total number of individual records in the zone
|
||||||
|
"""
|
||||||
|
if not domain_name.endswith("."):
|
||||||
|
domain_name = f"{domain_name}."
|
||||||
|
|
||||||
|
try:
|
||||||
|
dns_zone = zone.from_text(
|
||||||
|
zone_data, origin=name.from_text(domain_name), check_origin=False
|
||||||
|
)
|
||||||
|
|
||||||
|
count = 0
|
||||||
|
for _, _, rdata in dns_zone.iterate_rdatas():
|
||||||
|
if rdata.rdclass == IN:
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
logger.debug(f"Source zone {domain_name} contains {count} records")
|
||||||
|
return count
|
||||||
|
|
||||||
|
except DNSException as e:
|
||||||
|
logger.error(f"Failed to count records for {domain_name}: {e}")
|
||||||
|
return -1
|
||||||
86
directdnsonly/config/__init__.py
Normal file
86
directdnsonly/config/__init__.py
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
from vyper import v, Vyper
|
||||||
|
from loguru import logger
|
||||||
|
|
||||||
|
# from vyper.config import Config
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict
|
||||||
|
|
||||||
|
|
||||||
|
def load_config() -> Vyper:
|
||||||
|
# Initialize Vyper
|
||||||
|
v.set_config_name("app") # Looks for app.yaml/app.yml
|
||||||
|
# User-supplied paths checked first so they override the bundled defaults
|
||||||
|
v.add_config_path("/etc/directdnsonly") # system-level mount
|
||||||
|
v.add_config_path(".") # CWD (e.g. /app when run directly)
|
||||||
|
v.add_config_path("./config") # docker-compose volume mount at /app/config
|
||||||
|
# Bundled config colocated with this module — last-resort fallback
|
||||||
|
v.add_config_path(str(Path(__file__).parent))
|
||||||
|
v.set_env_prefix("DADNS")
|
||||||
|
v.set_env_key_replacer("_", ".")
|
||||||
|
v.automatic_env()
|
||||||
|
# Set defaults for all required parameters
|
||||||
|
v.set_default("log_level", "info")
|
||||||
|
v.set_default("queue_location", "./data/queues")
|
||||||
|
v.set_default("timezone", "Pacific/Aucland")
|
||||||
|
|
||||||
|
# Set defaults for app
|
||||||
|
v.set_default("app.listen_port", 2222)
|
||||||
|
v.set_default("app.proxy_support", True)
|
||||||
|
v.set_default("app.proxy_support_base", "http://127.0.0.1")
|
||||||
|
v.set_default("app.log_level", "debug")
|
||||||
|
v.set_default("app.log_to", "file")
|
||||||
|
v.set_default("app.ssl_enable", "false")
|
||||||
|
v.set_default("app.listen_port", 2222)
|
||||||
|
v.set_default("app.token_valid_for_days", 30)
|
||||||
|
v.set_default("app.queue_location", "conf/queues")
|
||||||
|
v.set_default("app.auth_username", "directdnsonly")
|
||||||
|
v.set_default("app.auth_password", "changeme")
|
||||||
|
v.set_default("timezone", "Pacific/Auckland")
|
||||||
|
|
||||||
|
# DNS backend defaults
|
||||||
|
v.set_default("dns.backends.bind.enabled", False)
|
||||||
|
v.set_default("dns.backends.bind.zones_dir", "/etc/named/zones")
|
||||||
|
v.set_default("dns.backends.bind.named_conf", "/etc/named.conf.local")
|
||||||
|
|
||||||
|
v.set_default("dns.backends.nsd.enabled", False)
|
||||||
|
v.set_default("dns.backends.nsd.zones_dir", "/etc/nsd/zones")
|
||||||
|
v.set_default("dns.backends.nsd.nsd_conf", "/etc/nsd/nsd.conf.d/zones.conf")
|
||||||
|
|
||||||
|
v.set_default("dns.backends.coredns_mysql.enabled", False)
|
||||||
|
v.set_default("dns.backends.coredns_mysql.host", "localhost")
|
||||||
|
v.set_default("dns.backends.coredns_mysql.port", 3306)
|
||||||
|
v.set_default("dns.backends.coredns_mysql.database", "coredns")
|
||||||
|
v.set_default("dns.backends.coredns_mysql.username", "coredns")
|
||||||
|
v.set_default("dns.backends.coredns_mysql.password", "")
|
||||||
|
v.set_default("dns.backends.coredns_mysql.table_name", "records")
|
||||||
|
|
||||||
|
# Set Defaults Datastore
|
||||||
|
v.set_default("datastore.type", "sqlite")
|
||||||
|
v.set_default("datastore.port", 3306)
|
||||||
|
v.set_default("datastore.db_location", "data/directdns.db")
|
||||||
|
|
||||||
|
# Reconciliation poller defaults
|
||||||
|
v.set_default("reconciliation.enabled", False)
|
||||||
|
v.set_default("reconciliation.dry_run", False)
|
||||||
|
v.set_default("reconciliation.interval_minutes", 60)
|
||||||
|
v.set_default("reconciliation.verify_ssl", True)
|
||||||
|
|
||||||
|
# Peer sync defaults
|
||||||
|
v.set_default("peer_sync.enabled", False)
|
||||||
|
v.set_default("peer_sync.interval_minutes", 15)
|
||||||
|
v.set_default("peer_sync.auth_username", "peersync")
|
||||||
|
v.set_default("peer_sync.auth_password", "changeme")
|
||||||
|
|
||||||
|
# Read configuration
|
||||||
|
try:
|
||||||
|
if not v.read_in_config():
|
||||||
|
logger.warning("No config file found, using defaults")
|
||||||
|
except Exception:
|
||||||
|
logger.warning("No config file found, using defaults")
|
||||||
|
|
||||||
|
return v
|
||||||
|
|
||||||
|
|
||||||
|
# Global config instance
|
||||||
|
config = load_config()
|
||||||
84
directdnsonly/config/app.yml
Normal file
84
directdnsonly/config/app.yml
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
---
|
||||||
|
timezone: Pacific/Auckland
|
||||||
|
log_level: INFO
|
||||||
|
queue_location: ./data/queues
|
||||||
|
|
||||||
|
# Application datastore — stores domain index and zone_data for healing/peer-sync.
|
||||||
|
# SQLite (default) requires no extra dependencies and is fine for single-node setups.
|
||||||
|
# MySQL is recommended for multi-node deployments with a shared datastore.
|
||||||
|
datastore:
|
||||||
|
type: sqlite
|
||||||
|
db_location: ./data/directdnsonly.db
|
||||||
|
# --- MySQL ---
|
||||||
|
# type: mysql
|
||||||
|
# host: "127.0.0.1"
|
||||||
|
# port: "3306"
|
||||||
|
# name: "directdnsonly"
|
||||||
|
# user: "directdnsonly"
|
||||||
|
# pass: "changeme"
|
||||||
|
|
||||||
|
app:
|
||||||
|
auth_username: directdnsonly
|
||||||
|
auth_password: changeme # Override via DADNS_APP_AUTH_PASSWORD env var
|
||||||
|
|
||||||
|
# Reconciliation poller — queries each DA server and removes orphaned zones
|
||||||
|
# Disabled by default. Only touches zones registered via DaDNS (in our DB).
|
||||||
|
# If a DA server is unreachable, that server is skipped entirely.
|
||||||
|
#reconciliation:
|
||||||
|
# enabled: true
|
||||||
|
# dry_run: true # log orphans but do NOT queue deletes — safe first-run mode
|
||||||
|
# interval_minutes: 60
|
||||||
|
# initial_delay_minutes: 0 # stagger first run when running multiple receivers behind a LB
|
||||||
|
# # e.g. receiver-1: 0, receiver-2: 30 (half the interval)
|
||||||
|
# verify_ssl: true # set false for self-signed DA certs
|
||||||
|
# ipp: 1000 # items per page when polling DA (default 1000)
|
||||||
|
# directadmin_servers:
|
||||||
|
# - hostname: da1.example.com
|
||||||
|
# port: 2222
|
||||||
|
# username: admin
|
||||||
|
# password: secret
|
||||||
|
# ssl: true
|
||||||
|
# - hostname: da2.example.com
|
||||||
|
# port: 2222
|
||||||
|
# username: admin
|
||||||
|
# password: secret
|
||||||
|
# ssl: true
|
||||||
|
|
||||||
|
# Peer sync — exchange zone_data between directdnsonly instances
|
||||||
|
# Enables eventual consistency without a shared datastore.
|
||||||
|
# If a peer is offline, the sync is silently skipped and retried next interval.
|
||||||
|
# Use the same credentials as the peer's app.auth_username / auth_password.
|
||||||
|
#peer_sync:
|
||||||
|
# enabled: true
|
||||||
|
# interval_minutes: 15
|
||||||
|
# peers:
|
||||||
|
# - url: http://ddo-2:2222 # URL of the peer directdnsonly instance
|
||||||
|
# username: directdnsonly
|
||||||
|
# password: changeme
|
||||||
|
|
||||||
|
dns:
|
||||||
|
default_backend: bind
|
||||||
|
backends:
|
||||||
|
bind:
|
||||||
|
type: bind
|
||||||
|
enabled: true
|
||||||
|
zones_dir: ./data/zones
|
||||||
|
named_conf: ./data/named.conf.include
|
||||||
|
coredns_dc1:
|
||||||
|
type: coredns_mysql
|
||||||
|
enabled: true
|
||||||
|
host: "mysql-dc1"
|
||||||
|
port: 3306
|
||||||
|
database: "coredns"
|
||||||
|
username: "coredns"
|
||||||
|
password: "coredns123"
|
||||||
|
table_name: "records"
|
||||||
|
coredns_dc2:
|
||||||
|
type: coredns_mysql
|
||||||
|
enabled: true
|
||||||
|
host: "mysql-dc2"
|
||||||
|
port: 3306
|
||||||
|
database: "coredns"
|
||||||
|
username: "coredns"
|
||||||
|
password: "coredns123"
|
||||||
|
table_name: "records"
|
||||||
141
directdnsonly/main.py
Normal file
141
directdnsonly/main.py
Normal file
@@ -0,0 +1,141 @@
|
|||||||
|
from loguru import logger
|
||||||
|
import cherrypy
|
||||||
|
from app.backends import BackendRegistry
|
||||||
|
from app.api.admin import DNSAdminAPI
|
||||||
|
from app.api.health import HealthAPI
|
||||||
|
from app.api.internal import InternalAPI
|
||||||
|
from app.api.status import StatusAPI
|
||||||
|
from app import configure_logging
|
||||||
|
from worker import WorkerManager
|
||||||
|
from directdnsonly.config import config
|
||||||
|
from directdnsonly.app.db import connect
|
||||||
|
import importlib.metadata
|
||||||
|
|
||||||
|
app_version = importlib.metadata.version("directdnsonly")
|
||||||
|
|
||||||
|
|
||||||
|
class Root:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
try:
|
||||||
|
# Initialize logging
|
||||||
|
configure_logging()
|
||||||
|
logger.info("Starting DaDNS server initialization")
|
||||||
|
|
||||||
|
# Initialize backend registry
|
||||||
|
registry = BackendRegistry()
|
||||||
|
available_backends = registry.get_available_backends()
|
||||||
|
logger.info(f"Available backend instances: {list(available_backends.keys())}")
|
||||||
|
|
||||||
|
global session
|
||||||
|
try:
|
||||||
|
session = connect(config.get("datastore.type"))
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(str(e))
|
||||||
|
print("ERROR: " + str(e))
|
||||||
|
exit(1)
|
||||||
|
logger.info("Database Connected!")
|
||||||
|
|
||||||
|
# Setup worker manager
|
||||||
|
reconciliation_config = config.get("reconciliation") or {}
|
||||||
|
peer_sync_config = config.get("peer_sync") or {}
|
||||||
|
worker_manager = WorkerManager(
|
||||||
|
queue_path=config.get("queue_location"),
|
||||||
|
backend_registry=registry,
|
||||||
|
reconciliation_config=reconciliation_config,
|
||||||
|
peer_sync_config=peer_sync_config,
|
||||||
|
)
|
||||||
|
worker_manager.start()
|
||||||
|
logger.info(
|
||||||
|
f"Worker manager started with queue path: {config.get('queue_location')}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Configure CherryPy
|
||||||
|
user_password_dict = {
|
||||||
|
config.get_string("app.auth_username"): config.get_string(
|
||||||
|
"app.auth_password"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
check_password = cherrypy.lib.auth_basic.checkpassword_dict(user_password_dict)
|
||||||
|
|
||||||
|
cherrypy.config.update(
|
||||||
|
{
|
||||||
|
"server.socket_host": "0.0.0.0",
|
||||||
|
"server.socket_port": config.get_int("app.listen_port"),
|
||||||
|
"tools.proxy.on": config.get_bool("app.proxy_support"),
|
||||||
|
"tools.proxy.base": config.get_string("app.proxy_support_base"),
|
||||||
|
"tools.auth_basic.on": True,
|
||||||
|
"tools.auth_basic.realm": "dadns",
|
||||||
|
"tools.auth_basic.checkpassword": check_password,
|
||||||
|
"tools.response_headers.on": True,
|
||||||
|
"tools.response_headers.headers": [
|
||||||
|
("Server", "DirectDNS v" + app_version)
|
||||||
|
],
|
||||||
|
"environment": config.get("environment"),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
if config.get_bool("app.ssl_enable"):
|
||||||
|
cherrypy.config.update(
|
||||||
|
{
|
||||||
|
"server.ssl_module": "builtin",
|
||||||
|
"server.ssl_certificate": config.get("app.ssl_cert"),
|
||||||
|
"server.ssl_private_key": config.get("app.ssl_key"),
|
||||||
|
"server.ssl_certificate_chain": config.get("ssl_bundle"),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# cherrypy.log.error_log.propagate = False
|
||||||
|
if config.get_string("app.log_level").upper() != "DEBUG":
|
||||||
|
cherrypy.log.access_log.propagate = False
|
||||||
|
|
||||||
|
# Peer sync auth — separate credentials from the DA-facing API so a
|
||||||
|
# compromised peer node cannot push zones or access the admin endpoints.
|
||||||
|
peer_user_password_dict = {
|
||||||
|
config.get_string("peer_sync.auth_username"): config.get_string(
|
||||||
|
"peer_sync.auth_password"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
peer_check_password = cherrypy.lib.auth_basic.checkpassword_dict(
|
||||||
|
peer_user_password_dict
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mount applications
|
||||||
|
root = Root()
|
||||||
|
root = DNSAdminAPI(
|
||||||
|
save_queue=worker_manager.save_queue,
|
||||||
|
delete_queue=worker_manager.delete_queue,
|
||||||
|
backend_registry=registry,
|
||||||
|
)
|
||||||
|
root.health = HealthAPI(registry)
|
||||||
|
root.internal = InternalAPI(peer_syncer=worker_manager._peer_syncer)
|
||||||
|
root.status = StatusAPI(worker_manager)
|
||||||
|
|
||||||
|
# Add queue status endpoint (debug)
|
||||||
|
root.queue_status = lambda: worker_manager.queue_status()
|
||||||
|
|
||||||
|
# Override auth for /internal so peers use their own credentials
|
||||||
|
cherrypy.tree.mount(root, "/", config={
|
||||||
|
"/internal": {
|
||||||
|
"tools.auth_basic.checkpassword": peer_check_password,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
cherrypy.engine.start()
|
||||||
|
logger.success(f"Server started on port {config.get_int('app.listen_port')}")
|
||||||
|
|
||||||
|
# Add shutdown handler
|
||||||
|
cherrypy.engine.subscribe("stop", worker_manager.stop)
|
||||||
|
|
||||||
|
cherrypy.engine.block()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.critical(f"Server startup failed: {e}")
|
||||||
|
if "worker_manager" in locals():
|
||||||
|
worker_manager.stop()
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
521
directdnsonly/worker.py
Normal file
521
directdnsonly/worker.py
Normal file
@@ -0,0 +1,521 @@
|
|||||||
|
import datetime
|
||||||
|
import os
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||||
|
from loguru import logger
|
||||||
|
from persistqueue import Queue
|
||||||
|
from persistqueue.exceptions import Empty
|
||||||
|
from sqlalchemy import select
|
||||||
|
|
||||||
|
from app.utils import check_zone_exists, put_zone_index
|
||||||
|
from app.utils.zone_parser import count_zone_records
|
||||||
|
from directdnsonly.app.db.models import Domain
|
||||||
|
from directdnsonly.app.db import connect
|
||||||
|
from directdnsonly.app.reconciler import ReconciliationWorker
|
||||||
|
from directdnsonly.app.peer_sync import PeerSyncWorker
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Retry configuration
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
MAX_RETRIES = 5
|
||||||
|
# Seconds to wait before each retry attempt (exponential-ish backoff)
|
||||||
|
BACKOFF_SECONDS = [30, 120, 300, 900, 1800] # 30s, 2m, 5m, 15m, 30m
|
||||||
|
RETRY_DRAIN_INTERVAL = 30 # how often the retry drain thread wakes
|
||||||
|
|
||||||
|
|
||||||
|
class WorkerManager:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
queue_path: str,
|
||||||
|
backend_registry,
|
||||||
|
reconciliation_config: dict = None,
|
||||||
|
peer_sync_config: dict = None,
|
||||||
|
):
|
||||||
|
self.queue_path = queue_path
|
||||||
|
self.backend_registry = backend_registry
|
||||||
|
self._running = False
|
||||||
|
self._save_thread = None
|
||||||
|
self._delete_thread = None
|
||||||
|
self._retry_thread = None
|
||||||
|
self._reconciler = None
|
||||||
|
self._peer_syncer = None
|
||||||
|
self._reconciliation_config = reconciliation_config or {}
|
||||||
|
self._peer_sync_config = peer_sync_config or {}
|
||||||
|
self._dead_letter_count = 0
|
||||||
|
|
||||||
|
try:
|
||||||
|
os.makedirs(queue_path, exist_ok=True)
|
||||||
|
self.save_queue = Queue(f"{queue_path}/save")
|
||||||
|
self.delete_queue = Queue(f"{queue_path}/delete")
|
||||||
|
self.retry_queue = Queue(f"{queue_path}/retry")
|
||||||
|
logger.success(f"Initialized queues at {queue_path}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.critical(f"Failed to initialize queues: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Save queue worker
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
def _process_save_queue(self):
|
||||||
|
logger.info("Save queue worker started")
|
||||||
|
session = connect()
|
||||||
|
|
||||||
|
while self._running:
|
||||||
|
# Block until at least one item is available
|
||||||
|
try:
|
||||||
|
item = self.save_queue.get(block=True, timeout=5)
|
||||||
|
except Empty:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Open a batch and keep processing until the queue is empty
|
||||||
|
batch_start = time.monotonic()
|
||||||
|
batch_processed = 0
|
||||||
|
batch_failed = 0
|
||||||
|
logger.info("📥 Batch started")
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
domain = item.get("domain", "unknown")
|
||||||
|
is_retry = item.get("source") in ("retry", "reconciler_heal")
|
||||||
|
target_backends = item.get("failed_backends") # None = all backends
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
f"Processing zone update for {domain}"
|
||||||
|
+ (f" [retry #{item.get('retry_count', 0)}]" if is_retry else "")
|
||||||
|
+ (f" [backends: {target_backends}]" if target_backends else "")
|
||||||
|
)
|
||||||
|
|
||||||
|
if not is_retry and not check_zone_exists(domain):
|
||||||
|
put_zone_index(domain, item.get("hostname"), item.get("username"))
|
||||||
|
|
||||||
|
if not all(k in item for k in ["domain", "zone_file"]):
|
||||||
|
logger.error(f"Invalid queue item: {item}")
|
||||||
|
self.save_queue.task_done()
|
||||||
|
batch_failed += 1
|
||||||
|
else:
|
||||||
|
backends = self.backend_registry.get_available_backends()
|
||||||
|
if target_backends:
|
||||||
|
backends = {
|
||||||
|
k: v for k, v in backends.items() if k in target_backends
|
||||||
|
}
|
||||||
|
if not backends:
|
||||||
|
logger.warning("No target backends available for this item!")
|
||||||
|
self.save_queue.task_done()
|
||||||
|
batch_failed += 1
|
||||||
|
else:
|
||||||
|
if len(backends) > 1:
|
||||||
|
failed = self._process_backends_parallel(backends, item, session)
|
||||||
|
else:
|
||||||
|
failed = set()
|
||||||
|
for backend_name, backend in backends.items():
|
||||||
|
if not self._process_single_backend(
|
||||||
|
backend_name, backend, item, session
|
||||||
|
):
|
||||||
|
failed.add(backend_name)
|
||||||
|
|
||||||
|
if failed:
|
||||||
|
self._schedule_retry(item, failed)
|
||||||
|
batch_failed += 1
|
||||||
|
else:
|
||||||
|
self._store_zone_data(session, domain, item["zone_file"])
|
||||||
|
batch_processed += 1
|
||||||
|
|
||||||
|
self.save_queue.task_done()
|
||||||
|
logger.debug(f"Completed processing for {domain}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Unexpected worker error processing {item.get('domain', '?')}: {e}")
|
||||||
|
batch_failed += 1
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
# Check immediately for the next item — keep batch open while
|
||||||
|
# more work is queued; close it only when the queue is empty.
|
||||||
|
try:
|
||||||
|
item = self.save_queue.get_nowait()
|
||||||
|
except Empty:
|
||||||
|
break
|
||||||
|
|
||||||
|
elapsed = time.monotonic() - batch_start
|
||||||
|
total = batch_processed + batch_failed
|
||||||
|
rate = batch_processed / elapsed if elapsed > 0 else 0
|
||||||
|
logger.success(
|
||||||
|
f"📦 Batch complete — {batch_processed}/{total} zone(s) "
|
||||||
|
f"processed successfully in {elapsed:.1f}s "
|
||||||
|
f"({rate:.1f} zones/sec)"
|
||||||
|
+ (f", {batch_failed} failed" if batch_failed else "")
|
||||||
|
)
|
||||||
|
|
||||||
|
def _process_single_backend(self, backend_name, backend, item, session) -> bool:
|
||||||
|
"""Write a zone to one backend. Returns True on success, False on failure."""
|
||||||
|
try:
|
||||||
|
if backend.write_zone(item["domain"], item["zone_file"]):
|
||||||
|
logger.debug(f"Successfully updated {item['domain']} in {backend_name}")
|
||||||
|
if backend.get_name() == "bind":
|
||||||
|
backend.update_named_conf(
|
||||||
|
[d.domain for d in session.execute(select(Domain)).scalars().all()]
|
||||||
|
)
|
||||||
|
backend.reload_zone()
|
||||||
|
else:
|
||||||
|
backend.reload_zone(zone_name=item["domain"])
|
||||||
|
self._verify_backend_record_count(
|
||||||
|
backend_name, backend, item["domain"], item["zone_file"]
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to update {item['domain']} in {backend_name}")
|
||||||
|
return False
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in {backend_name}: {str(e)}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _process_backends_parallel(self, backends, item, session) -> set:
|
||||||
|
"""Write a zone to multiple backends concurrently.
|
||||||
|
Returns a set of backend names that failed."""
|
||||||
|
start_time = time.monotonic()
|
||||||
|
failed = set()
|
||||||
|
with ThreadPoolExecutor(
|
||||||
|
max_workers=len(backends), thread_name_prefix="backend"
|
||||||
|
) as executor:
|
||||||
|
futures = {
|
||||||
|
executor.submit(
|
||||||
|
self._process_single_backend, backend_name, backend, item, session
|
||||||
|
): backend_name
|
||||||
|
for backend_name, backend in backends.items()
|
||||||
|
}
|
||||||
|
for future in as_completed(futures):
|
||||||
|
backend_name = futures[future]
|
||||||
|
try:
|
||||||
|
success = future.result()
|
||||||
|
if not success:
|
||||||
|
failed.add(backend_name)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Unhandled error in backend {backend_name}: {e}")
|
||||||
|
failed.add(backend_name)
|
||||||
|
elapsed = (time.monotonic() - start_time) * 1000
|
||||||
|
logger.debug(
|
||||||
|
f"Parallel processing of {item['domain']} across "
|
||||||
|
f"{len(backends)} backends completed in {elapsed:.0f}ms"
|
||||||
|
)
|
||||||
|
return failed
|
||||||
|
|
||||||
|
def _schedule_retry(self, item: dict, failed_backends: set):
|
||||||
|
"""Push a failed write onto the retry queue with exponential backoff.
|
||||||
|
Discards to dead-letter after MAX_RETRIES attempts."""
|
||||||
|
retry_count = item.get("retry_count", 0) + 1
|
||||||
|
if retry_count > MAX_RETRIES:
|
||||||
|
self._dead_letter_count += 1
|
||||||
|
logger.error(
|
||||||
|
f"[retry] Dead-letter: {item['domain']} failed on "
|
||||||
|
f"{failed_backends} after {MAX_RETRIES} attempts — giving up"
|
||||||
|
)
|
||||||
|
return
|
||||||
|
delay = BACKOFF_SECONDS[min(retry_count - 1, len(BACKOFF_SECONDS) - 1)]
|
||||||
|
retry_item = {
|
||||||
|
**item,
|
||||||
|
"failed_backends": list(failed_backends),
|
||||||
|
"retry_count": retry_count,
|
||||||
|
"retry_after": time.time() + delay,
|
||||||
|
"source": "retry",
|
||||||
|
}
|
||||||
|
self.retry_queue.put(retry_item)
|
||||||
|
logger.warning(
|
||||||
|
f"[retry] {item['domain']} → {list(failed_backends)} "
|
||||||
|
f"scheduled for retry #{retry_count} in {delay}s"
|
||||||
|
)
|
||||||
|
|
||||||
|
def _store_zone_data(self, session, domain: str, zone_file: str):
|
||||||
|
"""Persist the latest zone file content to the domain DB record."""
|
||||||
|
try:
|
||||||
|
record = session.execute(
|
||||||
|
select(Domain).filter_by(domain=domain)
|
||||||
|
).scalar_one_or_none()
|
||||||
|
if record:
|
||||||
|
record.zone_data = zone_file
|
||||||
|
record.zone_updated_at = datetime.datetime.utcnow()
|
||||||
|
session.commit()
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning(f"[worker] Could not store zone_data for {domain}: {exc}")
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Retry drain worker
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
def _process_retry_queue(self):
|
||||||
|
"""Periodically drain the retry queue and re-feed ready items to the
|
||||||
|
save queue. Items not yet due are put back onto the retry queue."""
|
||||||
|
logger.info("Retry drain worker started")
|
||||||
|
while self._running:
|
||||||
|
time.sleep(RETRY_DRAIN_INTERVAL)
|
||||||
|
now = time.time()
|
||||||
|
pending = []
|
||||||
|
# Drain all current retry items into memory
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
pending.append(self.retry_queue.get_nowait())
|
||||||
|
self.retry_queue.task_done()
|
||||||
|
except Empty:
|
||||||
|
break
|
||||||
|
|
||||||
|
if not pending:
|
||||||
|
continue
|
||||||
|
|
||||||
|
ready = [i for i in pending if i.get("retry_after", 0) <= now]
|
||||||
|
not_ready = [i for i in pending if i.get("retry_after", 0) > now]
|
||||||
|
|
||||||
|
for item in not_ready:
|
||||||
|
self.retry_queue.put(item)
|
||||||
|
|
||||||
|
for item in ready:
|
||||||
|
logger.info(
|
||||||
|
f"[retry] Re-queuing {item['domain']} → "
|
||||||
|
f"{item.get('failed_backends')} "
|
||||||
|
f"(attempt #{item.get('retry_count', '?')})"
|
||||||
|
)
|
||||||
|
self.save_queue.put(item)
|
||||||
|
|
||||||
|
if ready:
|
||||||
|
logger.debug(
|
||||||
|
f"[retry] Drain: {len(ready)} item(s) ready, "
|
||||||
|
f"{len(not_ready)} still pending"
|
||||||
|
)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Delete queue worker
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
def _process_delete_queue(self):
|
||||||
|
logger.info("Delete queue worker started")
|
||||||
|
session = connect()
|
||||||
|
|
||||||
|
while self._running:
|
||||||
|
try:
|
||||||
|
item = self.delete_queue.get(block=True, timeout=5)
|
||||||
|
domain = item.get("domain")
|
||||||
|
hostname = item.get("hostname", "")
|
||||||
|
|
||||||
|
logger.debug(f"Processing delete for {domain}")
|
||||||
|
|
||||||
|
record = session.execute(
|
||||||
|
select(Domain).filter_by(domain=domain)
|
||||||
|
).scalar_one_or_none()
|
||||||
|
if not record:
|
||||||
|
logger.warning(f"Domain {domain} not found in DB — skipping delete")
|
||||||
|
self.delete_queue.task_done()
|
||||||
|
continue
|
||||||
|
|
||||||
|
if record.hostname and record.hostname != hostname:
|
||||||
|
logger.warning(
|
||||||
|
f"Hostname mismatch for {domain}: registered on "
|
||||||
|
f"{record.hostname}, delete requested from {hostname} — rejected"
|
||||||
|
)
|
||||||
|
self.delete_queue.task_done()
|
||||||
|
continue
|
||||||
|
if not record.hostname:
|
||||||
|
logger.warning(
|
||||||
|
f"No origin hostname stored for {domain} — "
|
||||||
|
f"skipping ownership check, proceeding with delete"
|
||||||
|
)
|
||||||
|
|
||||||
|
backends = self.backend_registry.get_available_backends()
|
||||||
|
remaining_domains = [
|
||||||
|
d.domain for d in session.execute(select(Domain)).scalars().all()
|
||||||
|
]
|
||||||
|
delete_success = True
|
||||||
|
|
||||||
|
if not backends:
|
||||||
|
logger.warning(
|
||||||
|
f"No active backends — {domain} will be removed from DB only"
|
||||||
|
)
|
||||||
|
elif len(backends) > 1:
|
||||||
|
results = []
|
||||||
|
with ThreadPoolExecutor(max_workers=len(backends)) as executor:
|
||||||
|
futures = {
|
||||||
|
executor.submit(
|
||||||
|
self._delete_single_backend,
|
||||||
|
backend_name,
|
||||||
|
backend,
|
||||||
|
domain,
|
||||||
|
remaining_domains,
|
||||||
|
): backend_name
|
||||||
|
for backend_name, backend in backends.items()
|
||||||
|
}
|
||||||
|
for future in as_completed(futures):
|
||||||
|
backend_name = futures[future]
|
||||||
|
try:
|
||||||
|
results.append(future.result())
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"Unhandled error deleting from {backend_name}: {e}"
|
||||||
|
)
|
||||||
|
results.append(False)
|
||||||
|
delete_success = all(results)
|
||||||
|
else:
|
||||||
|
for backend_name, backend in backends.items():
|
||||||
|
if not self._delete_single_backend(
|
||||||
|
backend_name, backend, domain, remaining_domains
|
||||||
|
):
|
||||||
|
delete_success = False
|
||||||
|
|
||||||
|
if delete_success:
|
||||||
|
session.delete(record)
|
||||||
|
session.commit()
|
||||||
|
logger.success(f"Delete completed for {domain}")
|
||||||
|
else:
|
||||||
|
logger.error(
|
||||||
|
f"Delete failed for {domain} on one or more backends — "
|
||||||
|
f"DB record retained"
|
||||||
|
)
|
||||||
|
self.delete_queue.task_done()
|
||||||
|
|
||||||
|
except Empty:
|
||||||
|
continue
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Unexpected delete worker error: {e}")
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
def _delete_single_backend(
|
||||||
|
self, backend_name, backend, domain, remaining_domains
|
||||||
|
) -> bool:
|
||||||
|
"""Delete a zone from one backend. Returns True on success."""
|
||||||
|
try:
|
||||||
|
if backend.delete_zone(domain):
|
||||||
|
logger.debug(f"Deleted {domain} from {backend_name}")
|
||||||
|
if backend.get_name() == "bind":
|
||||||
|
backend.update_named_conf(remaining_domains)
|
||||||
|
backend.reload_zone()
|
||||||
|
else:
|
||||||
|
backend.reload_zone(zone_name=domain)
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
logger.error(f"Failed to delete {domain} from {backend_name}")
|
||||||
|
return False
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error deleting {domain} from {backend_name}: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Record count verification
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
def _verify_backend_record_count(self, backend_name, backend, zone_name, zone_data):
|
||||||
|
try:
|
||||||
|
expected = count_zone_records(zone_data, zone_name)
|
||||||
|
if expected < 0:
|
||||||
|
logger.warning(
|
||||||
|
f"[{backend_name}] Could not parse source zone for "
|
||||||
|
f"{zone_name} — skipping record count verification"
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
matches, actual = backend.verify_zone_record_count(zone_name, expected)
|
||||||
|
if matches:
|
||||||
|
return
|
||||||
|
|
||||||
|
if actual > expected:
|
||||||
|
logger.warning(
|
||||||
|
f"[{backend_name}] Backend has {actual - expected} extra "
|
||||||
|
f"record(s) for {zone_name} — reconciling"
|
||||||
|
)
|
||||||
|
success, removed = backend.reconcile_zone_records(zone_name, zone_data)
|
||||||
|
if success and removed > 0:
|
||||||
|
matches, new_count = backend.verify_zone_record_count(
|
||||||
|
zone_name, expected
|
||||||
|
)
|
||||||
|
if matches:
|
||||||
|
logger.success(
|
||||||
|
f"[{backend_name}] Reconciliation successful for "
|
||||||
|
f"{zone_name}: removed {removed} extra record(s)"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.error(
|
||||||
|
f"[{backend_name}] Reconciliation for {zone_name} "
|
||||||
|
f"removed {removed} record(s) but count still mismatched: "
|
||||||
|
f"expected {expected}, got {new_count}"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warning(
|
||||||
|
f"[{backend_name}] Backend has fewer records than source "
|
||||||
|
f"for {zone_name} (expected {expected}, got {actual}) — "
|
||||||
|
f"next zone push from DirectAdmin should correct this"
|
||||||
|
)
|
||||||
|
|
||||||
|
except NotImplementedError:
|
||||||
|
logger.debug(
|
||||||
|
f"[{backend_name}] Record count verification not supported — skipping"
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"[{backend_name}] Error during record count verification "
|
||||||
|
f"for {zone_name}: {e}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
# Lifecycle
|
||||||
|
# ------------------------------------------------------------------
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
if self._running:
|
||||||
|
return
|
||||||
|
self._running = True
|
||||||
|
|
||||||
|
self._save_thread = threading.Thread(
|
||||||
|
target=self._process_save_queue, daemon=True, name="save_queue_worker"
|
||||||
|
)
|
||||||
|
self._delete_thread = threading.Thread(
|
||||||
|
target=self._process_delete_queue, daemon=True, name="delete_queue_worker"
|
||||||
|
)
|
||||||
|
self._retry_thread = threading.Thread(
|
||||||
|
target=self._process_retry_queue, daemon=True, name="retry_drain_worker"
|
||||||
|
)
|
||||||
|
self._save_thread.start()
|
||||||
|
self._delete_thread.start()
|
||||||
|
self._retry_thread.start()
|
||||||
|
logger.info(f"Started worker threads: save, delete, retry_drain")
|
||||||
|
|
||||||
|
self._reconciler = ReconciliationWorker(
|
||||||
|
delete_queue=self.delete_queue,
|
||||||
|
save_queue=self.save_queue,
|
||||||
|
backend_registry=self.backend_registry,
|
||||||
|
reconciliation_config=self._reconciliation_config,
|
||||||
|
)
|
||||||
|
self._reconciler.start()
|
||||||
|
|
||||||
|
self._peer_syncer = PeerSyncWorker(self._peer_sync_config)
|
||||||
|
self._peer_syncer.start()
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
self._running = False
|
||||||
|
if self._reconciler:
|
||||||
|
self._reconciler.stop()
|
||||||
|
if self._peer_syncer:
|
||||||
|
self._peer_syncer.stop()
|
||||||
|
for thread in (self._save_thread, self._delete_thread, self._retry_thread):
|
||||||
|
if thread:
|
||||||
|
thread.join(timeout=5)
|
||||||
|
logger.info("Workers stopped")
|
||||||
|
|
||||||
|
def queue_status(self):
|
||||||
|
reconciler = (
|
||||||
|
self._reconciler.get_status()
|
||||||
|
if self._reconciler
|
||||||
|
else {"enabled": False, "alive": False, "last_run": {}}
|
||||||
|
)
|
||||||
|
peer_sync = (
|
||||||
|
self._peer_syncer.get_peer_status()
|
||||||
|
if self._peer_syncer
|
||||||
|
else {"enabled": False, "alive": False, "peers": [], "total": 0, "healthy": 0, "degraded": 0}
|
||||||
|
)
|
||||||
|
return {
|
||||||
|
"save_queue_size": self.save_queue.qsize(),
|
||||||
|
"delete_queue_size": self.delete_queue.qsize(),
|
||||||
|
"retry_queue_size": self.retry_queue.qsize(),
|
||||||
|
"dead_letters": self._dead_letter_count,
|
||||||
|
"save_worker_alive": bool(self._save_thread and self._save_thread.is_alive()),
|
||||||
|
"delete_worker_alive": bool(self._delete_thread and self._delete_thread.is_alive()),
|
||||||
|
"retry_worker_alive": bool(self._retry_thread and self._retry_thread.is_alive()),
|
||||||
|
"reconciler": reconciler,
|
||||||
|
"peer_sync": peer_sync,
|
||||||
|
}
|
||||||
@@ -1,48 +1,52 @@
|
|||||||
version: '3.7'
|
version: '3.8'
|
||||||
services:
|
|
||||||
app:
|
|
||||||
image: registry.dockerprod.ultrafast.co.nz/uff/apikeyhandler:0.10
|
|
||||||
networks:
|
|
||||||
- traefik-net
|
|
||||||
volumes:
|
|
||||||
- /etc/localtime:/etc/localtime:ro # Mount Timezone config to container
|
|
||||||
- /data/swarm-vols/apikeyhandler:/opt/apikeyhandler/config # Store Config on Persistent drive shared between nodes
|
|
||||||
deploy:
|
|
||||||
mode: replicated
|
|
||||||
replicas: 1
|
|
||||||
placement:
|
|
||||||
constraints:
|
|
||||||
- node.role == worker # Place this service on Worker Nodes alternatively may specify manager if you want service on manager node.
|
|
||||||
labels:
|
|
||||||
- "traefik.http.routers.apikeyauth.rule=Host(`apiauth-internal.dockertest.ultrafast.co.nz`)" # This label creates a route Traefik will listen on
|
|
||||||
- "traefik.http.routers.apikeyauth.tls=true" # Enable TLS, in this example using default TLS cert
|
|
||||||
- "traefik.http.services.apikeyauth.loadbalancer.server.port=8080" # Set Port to proxy
|
|
||||||
- "traefik.enable=true" # This flag enables load balancing through Traefik :)
|
|
||||||
- "traefik.docker.network=traefik-net" # Set the network to connect to container on
|
|
||||||
- "traefik.http.middlewares.apikeyauth.forwardauth.address=https://apiauth-internal.dockertest.ultrafast.co.nz"
|
|
||||||
- "traefik.http.middlewares.apikeyauth.forwardauth.trustForwardHeader=true"
|
|
||||||
- "traefik.http.middlewares.apikeyauth.forwardauth.authResponseHeaders=X-Client-Id"
|
|
||||||
- "traefik.http.middlewares.apikeyauth.forwardauth.tls.insecureSkipVerify=true"
|
|
||||||
test_app:
|
|
||||||
image: containous/whoami
|
|
||||||
networks:
|
|
||||||
- traefik-net
|
|
||||||
volumes:
|
|
||||||
- /etc/localtime:/etc/localtime:ro # Mount Timezone config to container
|
|
||||||
deploy:
|
|
||||||
mode: replicated
|
|
||||||
replicas: 1
|
|
||||||
placement:
|
|
||||||
constraints:
|
|
||||||
- node.role == worker # Place this service on Worker Nodes alternatively may specify manager if you want service on manager node.
|
|
||||||
labels:
|
|
||||||
- "traefik.http.routers.testapp.rule=Host(`testapp.dockertest.ultrafast.co.nz`)" # This label creates a route Traefik will listen on
|
|
||||||
- "traefik.http.routers.testapp.tls=true" # Enable TLS, in this example using default TLS cert
|
|
||||||
- "traefik.http.routers.testapp.middlewares=apikeyauth"
|
|
||||||
- "traefik.http.services.testapp.loadbalancer.server.port=80" # Set Port to proxy
|
|
||||||
- "traefik.enable=true" # This flag enables load balancing through Traefik :)
|
|
||||||
- "traefik.docker.network=traefik-net" # Set the network to connect to container on
|
|
||||||
|
|
||||||
networks:
|
services:
|
||||||
traefik-net:
|
mysql:
|
||||||
external: true
|
image: mysql:8.0
|
||||||
|
container_name: dadns_mysql
|
||||||
|
environment:
|
||||||
|
MYSQL_ROOT_PASSWORD: rootpassword
|
||||||
|
MYSQL_DATABASE: coredns
|
||||||
|
MYSQL_USER: coredns
|
||||||
|
MYSQL_PASSWORD: coredns123
|
||||||
|
ports:
|
||||||
|
- "3306:3306"
|
||||||
|
volumes:
|
||||||
|
- ./schema/coredns_mysql.sql:/docker-entrypoint-initdb.d/init.sql
|
||||||
|
- mysql_data:/var/lib/mysql
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "mysqladmin", "ping", "-h", "localhost"]
|
||||||
|
interval: 5s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
|
||||||
|
dadns:
|
||||||
|
build:
|
||||||
|
dockerfile: Dockerfile.deepseek
|
||||||
|
context: .
|
||||||
|
no_cache: false
|
||||||
|
container_name: dadns_app
|
||||||
|
depends_on:
|
||||||
|
mysql:
|
||||||
|
condition: service_healthy
|
||||||
|
ports:
|
||||||
|
- "2222:2222"
|
||||||
|
volumes:
|
||||||
|
- ./config:/app/config
|
||||||
|
- ./data:/app/data
|
||||||
|
- ./logs:/app/logs
|
||||||
|
environment:
|
||||||
|
- TZ=Pacific/Auckland
|
||||||
|
- DNS_BACKENDS__BIND__ENABLED=true
|
||||||
|
- DNS_BACKENDS__BIND__ZONES_DIR=/etc/named/zones/dadns
|
||||||
|
- DNS_BACKENDS__BIND__NAMED_CONF=/etc/bind/named.conf.local
|
||||||
|
- DNS_BACKENDS__COREDNS_MYSQL__ENABLED=true
|
||||||
|
- DNS_BACKENDS__COREDNS_MYSQL__HOST=mysql
|
||||||
|
- DNS_BACKENDS__COREDNS_MYSQL__PORT=3306
|
||||||
|
- DNS_BACKENDS__COREDNS_MYSQL__DATABASE=coredns
|
||||||
|
- DNS_BACKENDS__COREDNS_MYSQL__USERNAME=coredns
|
||||||
|
- DNS_BACKENDS__COREDNS_MYSQL__PASSWORD=coredns123
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
mysql_data:
|
||||||
91
docker/entrypoint.sh
Executable file
91
docker/entrypoint.sh
Executable file
@@ -0,0 +1,91 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Detect which DNS backend type(s) are configured and enabled.
|
||||||
|
# Uses the same config search order as the application itself.
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
detect_backend_types() {
|
||||||
|
python3 - <<'EOF'
|
||||||
|
import yaml, sys, os
|
||||||
|
|
||||||
|
config_paths = [
|
||||||
|
"/etc/directdnsonly/app.yml",
|
||||||
|
"/etc/directdnsonly/app.yaml",
|
||||||
|
"/app/app.yml",
|
||||||
|
"/app/app.yaml",
|
||||||
|
"/app/config/app.yml",
|
||||||
|
"/app/config/app.yaml",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Also honour env-var-only deployments (no config file)
|
||||||
|
bind_env = os.environ.get("DADNS_DNS_BACKENDS_BIND_ENABLED", "").lower() == "true"
|
||||||
|
nsd_env = os.environ.get("DADNS_DNS_BACKENDS_NSD_ENABLED", "").lower() == "true"
|
||||||
|
|
||||||
|
config = {}
|
||||||
|
for path in config_paths:
|
||||||
|
if os.path.exists(path):
|
||||||
|
with open(path) as f:
|
||||||
|
config = yaml.safe_load(f) or {}
|
||||||
|
break
|
||||||
|
|
||||||
|
backends = config.get("dns", {}).get("backends", {})
|
||||||
|
has_bind = bind_env
|
||||||
|
has_nsd = nsd_env
|
||||||
|
for cfg in backends.values():
|
||||||
|
if not isinstance(cfg, dict) or not cfg.get("enabled", False):
|
||||||
|
continue
|
||||||
|
btype = cfg.get("type", "")
|
||||||
|
if btype == "bind":
|
||||||
|
has_bind = True
|
||||||
|
elif btype == "nsd":
|
||||||
|
has_nsd = True
|
||||||
|
|
||||||
|
types = []
|
||||||
|
if has_bind:
|
||||||
|
types.append("bind")
|
||||||
|
if has_nsd:
|
||||||
|
types.append("nsd")
|
||||||
|
print(" ".join(types) if types else "none")
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
BACKEND_TYPES=$(detect_backend_types)
|
||||||
|
echo "[entrypoint] Detected DNS backend type(s): ${BACKEND_TYPES:-none}"
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Start BIND if a bind backend is configured
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
if echo "$BACKEND_TYPES" | grep -qw "bind"; then
|
||||||
|
if command -v named >/dev/null 2>&1; then
|
||||||
|
echo "[entrypoint] Starting BIND (named)"
|
||||||
|
/usr/sbin/named -u bind -f &
|
||||||
|
else
|
||||||
|
echo "[entrypoint] WARNING: bind backend configured but 'named' not found — skipping"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Start NSD if an nsd backend is configured
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
if echo "$BACKEND_TYPES" | grep -qw "nsd"; then
|
||||||
|
if command -v nsd >/dev/null 2>&1; then
|
||||||
|
echo "[entrypoint] Starting NSD"
|
||||||
|
# Ensure nsd-control keys exist (generated on first run)
|
||||||
|
if [ ! -f /etc/nsd/nsd_server.key ]; then
|
||||||
|
nsd-control-setup 2>/dev/null || true
|
||||||
|
fi
|
||||||
|
/usr/sbin/nsd -d -c /etc/nsd/nsd.conf &
|
||||||
|
else
|
||||||
|
echo "[entrypoint] WARNING: nsd backend configured but 'nsd' not found — skipping"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$BACKEND_TYPES" = "none" ] || [ -z "$BACKEND_TYPES" ]; then
|
||||||
|
echo "[entrypoint] No local DNS daemon required (CoreDNS MySQL or similar)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Start the directdnsonly application
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
exec python -m directdnsonly
|
||||||
4
docker/named.conf.local
Normal file
4
docker/named.conf.local
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
zone "guise.nz" {
|
||||||
|
type master;
|
||||||
|
file "/etc/named/zones/dadns/guise.nz.db";
|
||||||
|
};
|
||||||
8
docker/named.conf.options
Normal file
8
docker/named.conf.options
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
options {
|
||||||
|
directory "/var/cache/bind";
|
||||||
|
allow-query { any; };
|
||||||
|
recursion no;
|
||||||
|
dnssec-validation no;
|
||||||
|
listen-on { any; };
|
||||||
|
listen-on-v6 { any; };
|
||||||
|
};
|
||||||
20
docker/nsd.conf
Normal file
20
docker/nsd.conf
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# NSD base configuration for directdnsonly containers.
|
||||||
|
# Zone stanzas are written to /etc/nsd/nsd.conf.d/zones.conf by the NSD
|
||||||
|
# backend and auto-included via the glob below.
|
||||||
|
|
||||||
|
server:
|
||||||
|
server-count: 1
|
||||||
|
ip-address: 0.0.0.0
|
||||||
|
port: 53
|
||||||
|
username: nsd
|
||||||
|
zonesdir: /etc/nsd/zones
|
||||||
|
verbosity: 1
|
||||||
|
# Log to stderr so Docker captures it
|
||||||
|
logfile: ""
|
||||||
|
|
||||||
|
remote-control:
|
||||||
|
control-enable: yes
|
||||||
|
control-interface: 127.0.0.1
|
||||||
|
control-port: 8952
|
||||||
|
|
||||||
|
include: /etc/nsd/nsd.conf.d/*.conf
|
||||||
101
justfile
Normal file
101
justfile
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
#!/usr/bin/env just --justfile
|
||||||
|
# directdnsonly — developer task runner
|
||||||
|
# Requires: just, pyenv, poetry
|
||||||
|
|
||||||
|
APP_NAME := "directdnsonly"
|
||||||
|
|
||||||
|
# Ensure pyenv shims and common install locations are on PATH so that `python`
|
||||||
|
# resolves via pyenv (.python-version) and `poetry` is found without a full
|
||||||
|
# shell init in every recipe.
|
||||||
|
export PATH := env_var("HOME") + "/.pyenv/shims:" + env_var("HOME") + "/.pyenv/bin:" + env_var("HOME") + "/.local/bin:" + env_var("PATH")
|
||||||
|
|
||||||
|
# List available recipes (default)
|
||||||
|
default:
|
||||||
|
@just --list
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Setup
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# Install all dependencies (including dev group)
|
||||||
|
install:
|
||||||
|
poetry install
|
||||||
|
|
||||||
|
# Install only production dependencies
|
||||||
|
install-prod:
|
||||||
|
poetry install --only main
|
||||||
|
|
||||||
|
# Show the Python interpreter that will be used
|
||||||
|
which-python:
|
||||||
|
@poetry run python --version
|
||||||
|
@poetry run python -c "import sys; print(sys.executable)"
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Testing
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# Run the full test suite
|
||||||
|
test:
|
||||||
|
poetry run pytest tests/ -v
|
||||||
|
|
||||||
|
# Run tests with terminal coverage report
|
||||||
|
coverage:
|
||||||
|
poetry run pytest tests/ -v --cov=directdnsonly --cov-report=term-missing
|
||||||
|
|
||||||
|
# Run tests with HTML coverage report (opens in browser)
|
||||||
|
coverage-html:
|
||||||
|
poetry run pytest tests/ --cov=directdnsonly --cov-report=html
|
||||||
|
@echo "Coverage report: htmlcov/index.html"
|
||||||
|
|
||||||
|
# Run a single test file or pattern, e.g. just test-one test_reconciler
|
||||||
|
test-one target:
|
||||||
|
poetry run pytest tests/ -v -k "{{target}}"
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Code quality
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# Format all source and test files with black
|
||||||
|
fmt:
|
||||||
|
poetry run black directdnsonly/ tests/
|
||||||
|
|
||||||
|
# Check formatting without making changes (CI-safe)
|
||||||
|
fmt-check:
|
||||||
|
poetry run black --check directdnsonly/ tests/
|
||||||
|
|
||||||
|
# CI gate — run fmt-check then test, fail fast
|
||||||
|
ci: fmt-check test
|
||||||
|
|
||||||
|
# Start the application
|
||||||
|
run:
|
||||||
|
poetry run python -m directdnsonly
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Build
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# Build a standalone binary with PyInstaller
|
||||||
|
build:
|
||||||
|
poetry run pyinstaller \
|
||||||
|
--hidden-import=json \
|
||||||
|
--hidden-import=pymysql \
|
||||||
|
--hidden-import=cheroot \
|
||||||
|
--hidden-import=cheroot.ssl.pyopenssl \
|
||||||
|
--hidden-import=cheroot.ssl.builtin \
|
||||||
|
--noconfirm --onefile \
|
||||||
|
--name=directdnsonly \
|
||||||
|
directdnsonly/main.py
|
||||||
|
rm -f *.spec
|
||||||
|
|
||||||
|
build-docker:
|
||||||
|
export DOCKER_CONFIG="/home/guisea/.docker/guisea" && \
|
||||||
|
docker buildx build --platform linux/amd64,linux/arm64 -t guisea/directdnsonly:dev --push --progress plain --file Dockerfile .
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Clean
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# Remove build artefacts, caches, and compiled bytecode
|
||||||
|
clean:
|
||||||
|
rm -rf dist/ build/*.spec .coverage htmlcov/ .pytest_cache/
|
||||||
|
find . -type d -name __pycache__ -exec rm -rf {} + 2>/dev/null || true
|
||||||
|
find . -name "*.pyc" -delete 2>/dev/null || true
|
||||||
1548
poetry.lock
generated
Normal file
1548
poetry.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
2
poetry.toml
Normal file
2
poetry.toml
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
[virtualenvs]
|
||||||
|
in-project = true
|
||||||
38
pyproject.toml
Normal file
38
pyproject.toml
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
[project]
|
||||||
|
name = "directdnsonly"
|
||||||
|
version = "2.5.0"
|
||||||
|
description = "DNS Management System - DirectAdmin to multiple backends"
|
||||||
|
authors = [
|
||||||
|
{name = "Aaron Guise",email = "aaron@guise.net.nz"}
|
||||||
|
]
|
||||||
|
license = {text = "MIT"}
|
||||||
|
readme = "README.md"
|
||||||
|
requires-python = ">=3.11,<3.14"
|
||||||
|
dependencies = [
|
||||||
|
"vyper-config (>=1.2.1,<2.0.0)",
|
||||||
|
"loguru (>=0.7.3,<0.8.0)",
|
||||||
|
"persist-queue (>=1.1.0,<2.0.0)",
|
||||||
|
"cherrypy (>=18.10.0,<19.0.0)",
|
||||||
|
"sqlalchemy (>=2.0.0,<3.0.0)",
|
||||||
|
"pymysql (>=1.1.2,<2.0.0)",
|
||||||
|
"dnspython (>=2.8.0,<3.0.0)",
|
||||||
|
"pyyaml (>=6.0.3,<7.0.0)",
|
||||||
|
"requests (>=2.32.0,<3.0.0)",
|
||||||
|
]
|
||||||
|
|
||||||
|
[project.scripts]
|
||||||
|
dadns = "directdnsonly.__main__:run"
|
||||||
|
|
||||||
|
[tool.poetry]
|
||||||
|
package-mode = true
|
||||||
|
|
||||||
|
[tool.poetry.group.dev.dependencies]
|
||||||
|
black = "^26.1.0"
|
||||||
|
pyinstaller = "^6.13.0"
|
||||||
|
pytest = "^9.0.2"
|
||||||
|
pytest-cov = "^7.0.0"
|
||||||
|
pytest-mock = "^3.15.1"
|
||||||
|
|
||||||
|
[build-system]
|
||||||
|
requires = ["poetry-core>=2.0.0,<3.0.0"]
|
||||||
|
build-backend = "poetry.core.masonry.api"
|
||||||
33
schema/coredns_mysql.sql
Normal file
33
schema/coredns_mysql.sql
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
-- DirectDNSOnly — CoreDNS MySQL schema
|
||||||
|
-- Compatible with cybercinch/coredns_mysql_extend
|
||||||
|
--
|
||||||
|
-- managed_by values:
|
||||||
|
-- 'directadmin' zone is managed via directdnsonly / DirectAdmin push
|
||||||
|
-- 'direct' zone was created directly (not via DA)
|
||||||
|
-- NULL legacy row created before this column was added
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS `zones` (
|
||||||
|
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||||
|
`zone_name` varchar(255) NOT NULL,
|
||||||
|
`managed_by` varchar(255) DEFAULT NULL,
|
||||||
|
PRIMARY KEY (`id`),
|
||||||
|
UNIQUE KEY `uq_zone_name` (`zone_name`)
|
||||||
|
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS `records` (
|
||||||
|
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||||
|
`zone_id` int(11) NOT NULL,
|
||||||
|
`hostname` varchar(255) NOT NULL,
|
||||||
|
`type` varchar(10) NOT NULL,
|
||||||
|
`data` text NOT NULL,
|
||||||
|
`ttl` int(11) DEFAULT NULL,
|
||||||
|
`online` tinyint(1) NOT NULL DEFAULT 0,
|
||||||
|
PRIMARY KEY (`id`),
|
||||||
|
KEY `idx_zone_id` (`zone_id`),
|
||||||
|
KEY `idx_hostname` (`hostname`),
|
||||||
|
CONSTRAINT `fk_records_zone` FOREIGN KEY (`zone_id`) REFERENCES `zones` (`id`)
|
||||||
|
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
|
||||||
|
|
||||||
|
-- Migration: add managed_by to an existing installation
|
||||||
|
-- ALTER TABLE `zones` ADD COLUMN `managed_by` varchar(255) DEFAULT NULL;
|
||||||
|
-- UPDATE `zones` SET `managed_by` = 'directadmin' WHERE `managed_by` IS NULL;
|
||||||
0
tests/__init__.py
Normal file
0
tests/__init__.py
Normal file
42
tests/conftest.py
Normal file
42
tests/conftest.py
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
"""Shared test fixtures for directdnsonly test suite."""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
from sqlalchemy import create_engine
|
||||||
|
from sqlalchemy.orm import sessionmaker
|
||||||
|
|
||||||
|
from directdnsonly.app.db import Base
|
||||||
|
from directdnsonly.app.db.models import (
|
||||||
|
Domain,
|
||||||
|
Key,
|
||||||
|
) # noqa: F401 — registers models with Base
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def engine():
|
||||||
|
eng = create_engine("sqlite:///:memory:")
|
||||||
|
Base.metadata.create_all(eng)
|
||||||
|
yield eng
|
||||||
|
eng.dispose()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def db_session(engine):
|
||||||
|
session = sessionmaker(engine)()
|
||||||
|
yield session
|
||||||
|
session.close()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def patch_connect(db_session, monkeypatch):
|
||||||
|
"""Patch connect() at every call-site, returning the shared test session.
|
||||||
|
|
||||||
|
Modules that import connect() directly (e.g. utils, reconciler) are
|
||||||
|
patched at their local name so the in-memory SQLite session is used
|
||||||
|
instead of trying to read from vyper config.
|
||||||
|
"""
|
||||||
|
_factory = lambda: db_session # noqa: E731
|
||||||
|
monkeypatch.setattr("directdnsonly.app.utils.connect", _factory)
|
||||||
|
monkeypatch.setattr("directdnsonly.app.reconciler.connect", _factory)
|
||||||
|
monkeypatch.setattr("directdnsonly.app.peer_sync.connect", _factory)
|
||||||
|
monkeypatch.setattr("directdnsonly.app.api.status.connect", _factory)
|
||||||
|
return db_session
|
||||||
219
tests/test_admin_api.py
Normal file
219
tests/test_admin_api.py
Normal file
@@ -0,0 +1,219 @@
|
|||||||
|
"""Tests for directdnsonly.app.api.admin — DNSAdminAPI handler methods."""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
from unittest.mock import MagicMock, patch
|
||||||
|
from urllib.parse import parse_qs
|
||||||
|
|
||||||
|
import cherrypy
|
||||||
|
|
||||||
|
from directdnsonly.app.api.admin import DNSAdminAPI
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Fixtures
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def save_queue():
|
||||||
|
return MagicMock()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def delete_queue():
|
||||||
|
return MagicMock()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def api(save_queue, delete_queue):
|
||||||
|
return DNSAdminAPI(save_queue, delete_queue, backend_registry=MagicMock())
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# CMD_API_LOGIN_TEST
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_login_test_returns_success(api):
|
||||||
|
result = api.CMD_API_LOGIN_TEST()
|
||||||
|
parsed = parse_qs(result)
|
||||||
|
assert parsed["error"] == ["0"]
|
||||||
|
assert parsed["text"] == ["Login OK"]
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# _handle_exists — GET action=exists
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_handle_exists_missing_domain_returns_error(api):
|
||||||
|
with patch.object(cherrypy, "response", MagicMock()):
|
||||||
|
result = api._handle_exists({"action": "exists"})
|
||||||
|
parsed = parse_qs(result)
|
||||||
|
assert parsed["error"] == ["1"]
|
||||||
|
|
||||||
|
|
||||||
|
def test_handle_exists_unsupported_action_returns_error(api):
|
||||||
|
with patch.object(cherrypy, "response", MagicMock()):
|
||||||
|
result = api._handle_exists({"action": "rawsave"})
|
||||||
|
parsed = parse_qs(result)
|
||||||
|
assert parsed["error"] == ["1"]
|
||||||
|
|
||||||
|
|
||||||
|
def test_handle_exists_domain_not_found(api):
|
||||||
|
with (
|
||||||
|
patch("directdnsonly.app.api.admin.check_zone_exists", return_value=False),
|
||||||
|
patch(
|
||||||
|
"directdnsonly.app.api.admin.check_parent_domain_owner", return_value=False
|
||||||
|
),
|
||||||
|
):
|
||||||
|
result = api._handle_exists({"action": "exists", "domain": "example.com"})
|
||||||
|
|
||||||
|
parsed = parse_qs(result)
|
||||||
|
assert parsed["error"] == ["0"]
|
||||||
|
assert parsed["exists"] == ["0"]
|
||||||
|
|
||||||
|
|
||||||
|
def test_handle_exists_domain_found(api):
|
||||||
|
record = MagicMock()
|
||||||
|
record.hostname = "da1.example.com"
|
||||||
|
|
||||||
|
with (
|
||||||
|
patch("directdnsonly.app.api.admin.check_zone_exists", return_value=True),
|
||||||
|
patch("directdnsonly.app.api.admin.get_domain_record", return_value=record),
|
||||||
|
):
|
||||||
|
result = api._handle_exists({"action": "exists", "domain": "example.com"})
|
||||||
|
|
||||||
|
parsed = parse_qs(result)
|
||||||
|
assert parsed["error"] == ["0"]
|
||||||
|
assert parsed["exists"] == ["1"]
|
||||||
|
assert "da1.example.com" in parsed["details"][0]
|
||||||
|
|
||||||
|
|
||||||
|
def test_handle_exists_parent_found(api):
|
||||||
|
parent = MagicMock()
|
||||||
|
parent.hostname = "da2.example.com"
|
||||||
|
|
||||||
|
with (
|
||||||
|
patch("directdnsonly.app.api.admin.check_zone_exists", return_value=False),
|
||||||
|
patch(
|
||||||
|
"directdnsonly.app.api.admin.check_parent_domain_owner", return_value=True
|
||||||
|
),
|
||||||
|
patch(
|
||||||
|
"directdnsonly.app.api.admin.get_parent_domain_record", return_value=parent
|
||||||
|
),
|
||||||
|
):
|
||||||
|
result = api._handle_exists(
|
||||||
|
{
|
||||||
|
"action": "exists",
|
||||||
|
"domain": "sub.example.com",
|
||||||
|
"check_for_parent_domain": "1",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
parsed = parse_qs(result)
|
||||||
|
assert parsed["error"] == ["0"]
|
||||||
|
assert parsed["exists"] == ["2"]
|
||||||
|
assert "da2.example.com" in parsed["details"][0]
|
||||||
|
|
||||||
|
|
||||||
|
def test_handle_exists_no_parent_check_when_flag_absent(api):
|
||||||
|
"""check_parent_domain_owner should not be called if flag not set."""
|
||||||
|
record = MagicMock()
|
||||||
|
record.hostname = "da1.example.com"
|
||||||
|
|
||||||
|
with (
|
||||||
|
patch("directdnsonly.app.api.admin.check_zone_exists", return_value=True),
|
||||||
|
patch("directdnsonly.app.api.admin.check_parent_domain_owner") as mock_parent,
|
||||||
|
patch("directdnsonly.app.api.admin.get_domain_record", return_value=record),
|
||||||
|
):
|
||||||
|
api._handle_exists({"action": "exists", "domain": "example.com"})
|
||||||
|
|
||||||
|
mock_parent.assert_not_called()
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# _handle_rawsave
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
SAMPLE_ZONE = "$ORIGIN example.com.\n$TTL 300\nexample.com. 300 IN A 1.2.3.4\n"
|
||||||
|
|
||||||
|
|
||||||
|
def test_rawsave_enqueues_item(api, save_queue):
|
||||||
|
with (
|
||||||
|
patch(
|
||||||
|
"directdnsonly.app.api.admin.validate_and_normalize_zone",
|
||||||
|
return_value=SAMPLE_ZONE,
|
||||||
|
),
|
||||||
|
patch.object(cherrypy, "request", MagicMock(remote=MagicMock(ip="127.0.0.1"))),
|
||||||
|
):
|
||||||
|
result = api._handle_rawsave(
|
||||||
|
"example.com",
|
||||||
|
{
|
||||||
|
"zone_file": SAMPLE_ZONE,
|
||||||
|
"hostname": "da1.example.com",
|
||||||
|
"username": "admin",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
save_queue.put.assert_called_once()
|
||||||
|
item = save_queue.put.call_args[0][0]
|
||||||
|
assert item["domain"] == "example.com"
|
||||||
|
assert item["hostname"] == "da1.example.com"
|
||||||
|
assert item["username"] == "admin"
|
||||||
|
assert item["client_ip"] == "127.0.0.1"
|
||||||
|
|
||||||
|
parsed = parse_qs(result)
|
||||||
|
assert parsed["error"] == ["0"]
|
||||||
|
|
||||||
|
|
||||||
|
def test_rawsave_missing_zone_file_raises(api):
|
||||||
|
with patch.object(cherrypy, "request", MagicMock(remote=MagicMock(ip="127.0.0.1"))):
|
||||||
|
with pytest.raises(ValueError, match="Missing zone file"):
|
||||||
|
api._handle_rawsave("example.com", {})
|
||||||
|
|
||||||
|
|
||||||
|
def test_rawsave_invalid_zone_raises(api):
|
||||||
|
with (
|
||||||
|
patch(
|
||||||
|
"directdnsonly.app.api.admin.validate_and_normalize_zone",
|
||||||
|
side_effect=ValueError("Invalid zone data: bad record"),
|
||||||
|
),
|
||||||
|
patch.object(cherrypy, "request", MagicMock(remote=MagicMock(ip="127.0.0.1"))),
|
||||||
|
):
|
||||||
|
with pytest.raises(ValueError, match="Invalid zone data"):
|
||||||
|
api._handle_rawsave("example.com", {"zone_file": "garbage"})
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# _handle_delete
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_delete_enqueues_item(api, delete_queue):
|
||||||
|
with patch.object(cherrypy, "request", MagicMock(remote=MagicMock(ip="10.0.0.1"))):
|
||||||
|
result = api._handle_delete(
|
||||||
|
"example.com",
|
||||||
|
{
|
||||||
|
"hostname": "da1.example.com",
|
||||||
|
"username": "admin",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
delete_queue.put.assert_called_once()
|
||||||
|
item = delete_queue.put.call_args[0][0]
|
||||||
|
assert item["domain"] == "example.com"
|
||||||
|
assert item["hostname"] == "da1.example.com"
|
||||||
|
assert item["client_ip"] == "10.0.0.1"
|
||||||
|
|
||||||
|
parsed = parse_qs(result)
|
||||||
|
assert parsed["error"] == ["0"]
|
||||||
|
|
||||||
|
|
||||||
|
def test_delete_missing_params_uses_empty_strings(api, delete_queue):
|
||||||
|
with patch.object(cherrypy, "request", MagicMock(remote=MagicMock(ip="127.0.0.1"))):
|
||||||
|
api._handle_delete("example.com", {})
|
||||||
|
|
||||||
|
item = delete_queue.put.call_args[0][0]
|
||||||
|
assert item["hostname"] == ""
|
||||||
|
assert item["username"] == ""
|
||||||
201
tests/test_coredns_mysql.py
Normal file
201
tests/test_coredns_mysql.py
Normal file
@@ -0,0 +1,201 @@
|
|||||||
|
"""Tests for the CoreDNS MySQL backend (run against in-memory SQLite)."""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
from sqlalchemy import create_engine, select
|
||||||
|
from sqlalchemy.orm import scoped_session, sessionmaker
|
||||||
|
|
||||||
|
from directdnsonly.app.backends.coredns_mysql import (
|
||||||
|
Base,
|
||||||
|
CoreDNSMySQLBackend,
|
||||||
|
Record,
|
||||||
|
Zone,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Fixture — in-memory SQLite backend (bypasses real MySQL connection)
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def mysql_backend():
|
||||||
|
engine = create_engine("sqlite:///:memory:")
|
||||||
|
Base.metadata.create_all(engine)
|
||||||
|
|
||||||
|
class _TestBackend(CoreDNSMySQLBackend):
|
||||||
|
def __init__(self):
|
||||||
|
# Manually initialise without triggering the MySQL create_engine call
|
||||||
|
self.config = {}
|
||||||
|
self.instance_name = "test"
|
||||||
|
self.engine = engine
|
||||||
|
self.Session = scoped_session(sessionmaker(engine))
|
||||||
|
|
||||||
|
yield _TestBackend()
|
||||||
|
engine.dispose()
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# write_zone / zone_exists / delete_zone
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
ZONE_DATA = """\
|
||||||
|
$ORIGIN example.com.
|
||||||
|
$TTL 300
|
||||||
|
example.com. 300 IN SOA ns.example.com. admin.example.com. (2023 3600 1800 604800 86400)
|
||||||
|
example.com. 300 IN A 192.0.2.1
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def test_write_zone_creates_zone(mysql_backend):
|
||||||
|
assert mysql_backend.write_zone("example.com", ZONE_DATA)
|
||||||
|
|
||||||
|
|
||||||
|
def test_zone_exists_after_write(mysql_backend):
|
||||||
|
mysql_backend.write_zone("example.com", ZONE_DATA)
|
||||||
|
assert mysql_backend.zone_exists("example.com")
|
||||||
|
|
||||||
|
|
||||||
|
def test_zone_does_not_exist_before_write(mysql_backend):
|
||||||
|
assert not mysql_backend.zone_exists("missing.com")
|
||||||
|
|
||||||
|
|
||||||
|
def test_write_zone_idempotent(mysql_backend):
|
||||||
|
assert mysql_backend.write_zone("example.com", ZONE_DATA)
|
||||||
|
assert mysql_backend.write_zone("example.com", ZONE_DATA)
|
||||||
|
|
||||||
|
|
||||||
|
def test_write_zone_updates_records(mysql_backend):
|
||||||
|
mysql_backend.write_zone("example.com", ZONE_DATA)
|
||||||
|
|
||||||
|
updated = """\
|
||||||
|
$ORIGIN example.com.
|
||||||
|
$TTL 300
|
||||||
|
example.com. 3600 IN A 192.0.2.1
|
||||||
|
example.com. 300 IN AAAA 2001:db8::1
|
||||||
|
"""
|
||||||
|
assert mysql_backend.write_zone("example.com", updated)
|
||||||
|
|
||||||
|
|
||||||
|
def test_write_zone_removes_stale_records(mysql_backend):
|
||||||
|
mysql_backend.write_zone("example.com", ZONE_DATA)
|
||||||
|
|
||||||
|
reduced = "example.com. 300 IN A 192.0.2.1"
|
||||||
|
mysql_backend.write_zone("example.com", reduced)
|
||||||
|
|
||||||
|
session = mysql_backend.Session()
|
||||||
|
zone = session.execute(select(Zone).filter_by(zone_name="example.com.")).scalar_one_or_none()
|
||||||
|
records = session.execute(select(Record).filter_by(zone_id=zone.id, type="AAAA")).scalars().all()
|
||||||
|
assert records == []
|
||||||
|
session.close()
|
||||||
|
|
||||||
|
|
||||||
|
def test_delete_zone_removes_zone_and_records(mysql_backend):
|
||||||
|
mysql_backend.write_zone("example.com", ZONE_DATA)
|
||||||
|
assert mysql_backend.delete_zone("example.com")
|
||||||
|
assert not mysql_backend.zone_exists("example.com")
|
||||||
|
|
||||||
|
|
||||||
|
def test_delete_nonexistent_zone_returns_false(mysql_backend):
|
||||||
|
assert not mysql_backend.delete_zone("ghost.com")
|
||||||
|
|
||||||
|
|
||||||
|
def test_reload_zone_returns_true(mysql_backend):
|
||||||
|
assert mysql_backend.reload_zone("example.com")
|
||||||
|
assert mysql_backend.reload_zone()
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# verify_zone_record_count
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_verify_zone_record_count_match(mysql_backend):
|
||||||
|
mysql_backend.write_zone("example.com", ZONE_DATA)
|
||||||
|
# SOA + A = 2 records total
|
||||||
|
matches, count = mysql_backend.verify_zone_record_count("example.com", 2)
|
||||||
|
assert matches
|
||||||
|
assert count == 2
|
||||||
|
|
||||||
|
|
||||||
|
def test_verify_zone_record_count_mismatch(mysql_backend):
|
||||||
|
mysql_backend.write_zone("example.com", ZONE_DATA)
|
||||||
|
matches, count = mysql_backend.verify_zone_record_count("example.com", 99)
|
||||||
|
assert not matches
|
||||||
|
assert count == 2
|
||||||
|
|
||||||
|
|
||||||
|
def test_verify_zone_record_count_missing_zone(mysql_backend):
|
||||||
|
matches, count = mysql_backend.verify_zone_record_count("ghost.com", 0)
|
||||||
|
assert not matches
|
||||||
|
assert count == 0
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# reconcile_zone_records
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_reconcile_removes_extra_records(mysql_backend):
|
||||||
|
mysql_backend.write_zone("example.com", ZONE_DATA)
|
||||||
|
|
||||||
|
# Inject a phantom record directly into the DB
|
||||||
|
session = mysql_backend.Session()
|
||||||
|
zone = session.execute(select(Zone).filter_by(zone_name="example.com.")).scalar_one_or_none()
|
||||||
|
session.add(
|
||||||
|
Record(
|
||||||
|
zone_id=zone.id,
|
||||||
|
hostname="phantom",
|
||||||
|
type="A",
|
||||||
|
data="10.0.0.99",
|
||||||
|
ttl=300,
|
||||||
|
online=True,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
session.commit()
|
||||||
|
session.close()
|
||||||
|
|
||||||
|
success, removed = mysql_backend.reconcile_zone_records("example.com", ZONE_DATA)
|
||||||
|
assert success
|
||||||
|
assert removed == 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_reconcile_no_changes_when_zone_matches(mysql_backend):
|
||||||
|
mysql_backend.write_zone("example.com", ZONE_DATA)
|
||||||
|
success, removed = mysql_backend.reconcile_zone_records("example.com", ZONE_DATA)
|
||||||
|
assert success
|
||||||
|
assert removed == 0
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# managed_by field
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_write_zone_sets_managed_by_directadmin(mysql_backend):
|
||||||
|
mysql_backend.write_zone("example.com", ZONE_DATA)
|
||||||
|
session = mysql_backend.Session()
|
||||||
|
zone = session.execute(
|
||||||
|
select(Zone).filter_by(zone_name="example.com.")
|
||||||
|
).scalar_one_or_none()
|
||||||
|
assert zone.managed_by == "directadmin"
|
||||||
|
session.close()
|
||||||
|
|
||||||
|
|
||||||
|
def test_write_zone_migrates_null_managed_by(mysql_backend):
|
||||||
|
"""Zones that pre-exist without managed_by get it set on next write."""
|
||||||
|
session = mysql_backend.Session()
|
||||||
|
zone = Zone(zone_name="example.com.", managed_by=None)
|
||||||
|
session.add(zone)
|
||||||
|
session.commit()
|
||||||
|
session.close()
|
||||||
|
|
||||||
|
mysql_backend.write_zone("example.com", ZONE_DATA)
|
||||||
|
|
||||||
|
session = mysql_backend.Session()
|
||||||
|
zone = session.execute(
|
||||||
|
select(Zone).filter_by(zone_name="example.com.")
|
||||||
|
).scalar_one_or_none()
|
||||||
|
assert zone.managed_by == "directadmin"
|
||||||
|
session.close()
|
||||||
|
|
||||||
370
tests/test_da_client.py
Normal file
370
tests/test_da_client.py
Normal file
@@ -0,0 +1,370 @@
|
|||||||
|
"""Tests for directdnsonly.app.da.client — DirectAdminClient."""
|
||||||
|
|
||||||
|
import requests.exceptions
|
||||||
|
from unittest.mock import MagicMock, patch
|
||||||
|
|
||||||
|
from directdnsonly.app.da import DirectAdminClient
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Helpers
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def _make_json_response(domains_list, total_pages=1):
|
||||||
|
data = {str(i): {"domain": d} for i, d in enumerate(domains_list)}
|
||||||
|
data["info"] = {"total_pages": total_pages}
|
||||||
|
mock = MagicMock()
|
||||||
|
mock.status_code = 200
|
||||||
|
mock.is_redirect = False
|
||||||
|
mock.headers = {"Content-Type": "application/json"}
|
||||||
|
mock.json.return_value = data
|
||||||
|
mock.raise_for_status = MagicMock()
|
||||||
|
return mock
|
||||||
|
|
||||||
|
|
||||||
|
def _client():
|
||||||
|
return DirectAdminClient(
|
||||||
|
"da1.example.com", 2222, "admin", "secret", ssl=True, verify_ssl=True
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# list_domains — JSON happy path
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_list_domains_returns_set_from_json():
|
||||||
|
mock_resp = _make_json_response(["example.com", "test.com"])
|
||||||
|
|
||||||
|
with patch("requests.get", return_value=mock_resp):
|
||||||
|
result = _client().list_domains()
|
||||||
|
|
||||||
|
assert result == {"example.com", "test.com"}
|
||||||
|
|
||||||
|
|
||||||
|
def test_list_domains_paginates():
|
||||||
|
page1 = _make_json_response(["a.com"], total_pages=2)
|
||||||
|
page2 = _make_json_response(["b.com"], total_pages=2)
|
||||||
|
|
||||||
|
with patch("requests.get", side_effect=[page1, page2]):
|
||||||
|
result = _client().list_domains()
|
||||||
|
|
||||||
|
assert result == {"a.com", "b.com"}
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# list_domains — DA Evo session login fallback
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_redirect_triggers_session_login():
|
||||||
|
redirect_resp = MagicMock()
|
||||||
|
redirect_resp.status_code = 302
|
||||||
|
redirect_resp.is_redirect = True
|
||||||
|
|
||||||
|
client = _client()
|
||||||
|
with (
|
||||||
|
patch("requests.get", return_value=redirect_resp),
|
||||||
|
patch.object(client, "_login", return_value=False),
|
||||||
|
):
|
||||||
|
result = client.list_domains()
|
||||||
|
|
||||||
|
assert result is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_persistent_redirect_after_login_returns_none():
|
||||||
|
redirect_resp = MagicMock()
|
||||||
|
redirect_resp.status_code = 302
|
||||||
|
redirect_resp.is_redirect = True
|
||||||
|
|
||||||
|
client = _client()
|
||||||
|
# Simulate cookies already set (login succeeded previously)
|
||||||
|
client._cookies = {"session": "abc"}
|
||||||
|
|
||||||
|
with patch("requests.get", return_value=redirect_resp):
|
||||||
|
result = client.list_domains()
|
||||||
|
|
||||||
|
assert result is None
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# list_domains — error cases
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_html_response_returns_none():
|
||||||
|
mock_resp = MagicMock()
|
||||||
|
mock_resp.status_code = 200
|
||||||
|
mock_resp.is_redirect = False
|
||||||
|
mock_resp.headers = {"Content-Type": "text/html; charset=utf-8"}
|
||||||
|
mock_resp.raise_for_status = MagicMock()
|
||||||
|
|
||||||
|
with patch("requests.get", return_value=mock_resp):
|
||||||
|
result = _client().list_domains()
|
||||||
|
|
||||||
|
assert result is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_connection_error_returns_none():
|
||||||
|
with patch(
|
||||||
|
"requests.get", side_effect=requests.exceptions.ConnectionError("refused")
|
||||||
|
):
|
||||||
|
result = _client().list_domains()
|
||||||
|
|
||||||
|
assert result is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_timeout_returns_none():
|
||||||
|
with patch("requests.get", side_effect=requests.exceptions.Timeout()):
|
||||||
|
result = _client().list_domains()
|
||||||
|
|
||||||
|
assert result is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_ssl_error_returns_none():
|
||||||
|
with patch(
|
||||||
|
"requests.get", side_effect=requests.exceptions.SSLError("cert verify failed")
|
||||||
|
):
|
||||||
|
result = _client().list_domains()
|
||||||
|
|
||||||
|
assert result is None
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# _parse_legacy_domain_list
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_standard_querystring():
|
||||||
|
result = DirectAdminClient._parse_legacy_domain_list(
|
||||||
|
"list[]=example.com&list[]=test.com"
|
||||||
|
)
|
||||||
|
assert result == {"example.com", "test.com"}
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_newline_separated():
|
||||||
|
result = DirectAdminClient._parse_legacy_domain_list(
|
||||||
|
"list[]=example.com\nlist[]=test.com"
|
||||||
|
)
|
||||||
|
assert result == {"example.com", "test.com"}
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_empty_body_returns_empty_set():
|
||||||
|
assert DirectAdminClient._parse_legacy_domain_list("") == set()
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_normalises_to_lowercase():
|
||||||
|
result = DirectAdminClient._parse_legacy_domain_list("list[]=EXAMPLE.COM")
|
||||||
|
assert "example.com" in result
|
||||||
|
assert "EXAMPLE.COM" not in result
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_strips_whitespace():
|
||||||
|
result = DirectAdminClient._parse_legacy_domain_list("list[]= example.com ")
|
||||||
|
assert "example.com" in result
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# _login
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_login_stores_cookies_on_success():
|
||||||
|
mock_resp = MagicMock()
|
||||||
|
mock_resp.cookies = {"session": "tok123"}
|
||||||
|
|
||||||
|
client = _client()
|
||||||
|
with patch("requests.post", return_value=mock_resp):
|
||||||
|
result = client._login()
|
||||||
|
|
||||||
|
assert result is True
|
||||||
|
assert client._cookies == {"session": "tok123"}
|
||||||
|
|
||||||
|
|
||||||
|
def test_login_returns_false_when_no_cookies():
|
||||||
|
mock_resp = MagicMock()
|
||||||
|
mock_resp.cookies = {}
|
||||||
|
|
||||||
|
client = _client()
|
||||||
|
with patch("requests.post", return_value=mock_resp):
|
||||||
|
result = client._login()
|
||||||
|
|
||||||
|
assert result is False
|
||||||
|
assert client._cookies is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_login_returns_false_on_exception():
|
||||||
|
client = _client()
|
||||||
|
with patch("requests.post", side_effect=requests.exceptions.ConnectionError()):
|
||||||
|
result = client._login()
|
||||||
|
|
||||||
|
assert result is False
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# get_extra_dns_servers
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def _multi_server_get_resp(servers=None):
|
||||||
|
mock = MagicMock()
|
||||||
|
mock.status_code = 200
|
||||||
|
mock.is_redirect = False
|
||||||
|
mock.headers = {"Content-Type": "application/json"}
|
||||||
|
mock.json.return_value = {"CLUSTER_ON": "yes", "servers": servers or {}}
|
||||||
|
mock.raise_for_status = MagicMock()
|
||||||
|
return mock
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_extra_dns_servers_returns_servers_dict():
|
||||||
|
servers = {
|
||||||
|
"1.2.3.4": {"dns": "yes", "domain_check": "yes", "port": "2222", "ssl": "no"}
|
||||||
|
}
|
||||||
|
with patch("requests.get", return_value=_multi_server_get_resp(servers)):
|
||||||
|
result = _client().get_extra_dns_servers()
|
||||||
|
|
||||||
|
assert "1.2.3.4" in result
|
||||||
|
assert result["1.2.3.4"]["dns"] == "yes"
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_extra_dns_servers_returns_empty_on_http_error():
|
||||||
|
mock_resp = MagicMock()
|
||||||
|
mock_resp.status_code = 500
|
||||||
|
with patch("requests.get", return_value=mock_resp):
|
||||||
|
result = _client().get_extra_dns_servers()
|
||||||
|
|
||||||
|
assert result == {}
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_extra_dns_servers_returns_empty_on_connection_error():
|
||||||
|
with patch(
|
||||||
|
"requests.get", side_effect=requests.exceptions.ConnectionError("refused")
|
||||||
|
):
|
||||||
|
result = _client().get_extra_dns_servers()
|
||||||
|
|
||||||
|
assert result == {}
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# add_extra_dns_server
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_add_extra_dns_server_returns_true_on_success():
|
||||||
|
mock_resp = MagicMock()
|
||||||
|
mock_resp.status_code = 200
|
||||||
|
mock_resp.json.return_value = {"result": "", "success": "Connection Added"}
|
||||||
|
|
||||||
|
with patch("requests.post", return_value=mock_resp):
|
||||||
|
result = _client().add_extra_dns_server("1.2.3.4", 2222, "ddnsonly", "s3cr3t")
|
||||||
|
|
||||||
|
assert result is True
|
||||||
|
|
||||||
|
|
||||||
|
def test_add_extra_dns_server_returns_false_on_da_error():
|
||||||
|
mock_resp = MagicMock()
|
||||||
|
mock_resp.status_code = 200
|
||||||
|
mock_resp.json.return_value = {"result": "Server already exists", "success": ""}
|
||||||
|
|
||||||
|
with patch("requests.post", return_value=mock_resp):
|
||||||
|
result = _client().add_extra_dns_server("1.2.3.4", 2222, "ddnsonly", "s3cr3t")
|
||||||
|
|
||||||
|
assert result is False
|
||||||
|
|
||||||
|
|
||||||
|
def test_add_extra_dns_server_returns_false_on_connection_error():
|
||||||
|
with patch(
|
||||||
|
"requests.post", side_effect=requests.exceptions.ConnectionError("refused")
|
||||||
|
):
|
||||||
|
result = _client().add_extra_dns_server("1.2.3.4", 2222, "ddnsonly", "s3cr3t")
|
||||||
|
|
||||||
|
assert result is False
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# ensure_extra_dns_server
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def _add_success_resp():
|
||||||
|
mock = MagicMock()
|
||||||
|
mock.status_code = 200
|
||||||
|
mock.json.return_value = {"result": "", "success": "Connection Added"}
|
||||||
|
return mock
|
||||||
|
|
||||||
|
|
||||||
|
def _save_success_resp():
|
||||||
|
mock = MagicMock()
|
||||||
|
mock.status_code = 200
|
||||||
|
mock.json.return_value = {"result": "", "success": "Connections Saved"}
|
||||||
|
return mock
|
||||||
|
|
||||||
|
|
||||||
|
def test_ensure_extra_dns_server_adds_and_configures_new_server():
|
||||||
|
"""Server not yet registered — adds it, then saves dns+domain_check settings."""
|
||||||
|
with (
|
||||||
|
patch("requests.get", return_value=_multi_server_get_resp(servers={})),
|
||||||
|
patch(
|
||||||
|
"requests.post",
|
||||||
|
side_effect=[_add_success_resp(), _save_success_resp()],
|
||||||
|
),
|
||||||
|
):
|
||||||
|
result = _client().ensure_extra_dns_server(
|
||||||
|
"1.2.3.4", 2222, "ddnsonly", "s3cr3t"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert result is True
|
||||||
|
|
||||||
|
|
||||||
|
def test_ensure_extra_dns_server_skips_add_when_already_present():
|
||||||
|
"""Server already registered — no add call, only saves settings."""
|
||||||
|
existing = {
|
||||||
|
"1.2.3.4": {"dns": "no", "domain_check": "no", "port": "2222", "ssl": "no"}
|
||||||
|
}
|
||||||
|
with (
|
||||||
|
patch("requests.get", return_value=_multi_server_get_resp(servers=existing)),
|
||||||
|
patch("requests.post", return_value=_save_success_resp()) as mock_post,
|
||||||
|
):
|
||||||
|
result = _client().ensure_extra_dns_server(
|
||||||
|
"1.2.3.4", 2222, "ddnsonly", "s3cr3t"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert result is True
|
||||||
|
assert mock_post.call_count == 1 # save only, no add
|
||||||
|
|
||||||
|
|
||||||
|
def test_ensure_extra_dns_server_returns_false_when_add_fails():
|
||||||
|
fail_resp = MagicMock()
|
||||||
|
fail_resp.status_code = 200
|
||||||
|
fail_resp.json.return_value = {"result": "error", "success": ""}
|
||||||
|
|
||||||
|
with (
|
||||||
|
patch("requests.get", return_value=_multi_server_get_resp(servers={})),
|
||||||
|
patch("requests.post", return_value=fail_resp),
|
||||||
|
):
|
||||||
|
result = _client().ensure_extra_dns_server(
|
||||||
|
"1.2.3.4", 2222, "ddnsonly", "s3cr3t"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert result is False
|
||||||
|
|
||||||
|
|
||||||
|
def test_ensure_extra_dns_server_returns_false_when_save_fails():
|
||||||
|
"""Add succeeds but the subsequent settings save fails."""
|
||||||
|
fail_save = MagicMock()
|
||||||
|
fail_save.status_code = 200
|
||||||
|
fail_save.json.return_value = {"result": "error", "success": ""}
|
||||||
|
|
||||||
|
with (
|
||||||
|
patch("requests.get", return_value=_multi_server_get_resp(servers={})),
|
||||||
|
patch(
|
||||||
|
"requests.post",
|
||||||
|
side_effect=[_add_success_resp(), fail_save],
|
||||||
|
),
|
||||||
|
):
|
||||||
|
result = _client().ensure_extra_dns_server(
|
||||||
|
"1.2.3.4", 2222, "ddnsonly", "s3cr3t"
|
||||||
|
)
|
||||||
|
|
||||||
|
assert result is False
|
||||||
227
tests/test_nsd.py
Normal file
227
tests/test_nsd.py
Normal file
@@ -0,0 +1,227 @@
|
|||||||
|
"""Tests for directdnsonly.app.backends.nsd — NSDBackend."""
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
from pathlib import Path
|
||||||
|
from unittest.mock import patch, MagicMock
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from directdnsonly.app.backends.nsd import NSDBackend
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Helpers
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
ZONE_DATA = """\
|
||||||
|
$ORIGIN example.com.
|
||||||
|
$TTL 300
|
||||||
|
@ 300 IN SOA ns1.example.com. hostmaster.example.com. (2024010101 3600 900 604800 300)
|
||||||
|
@ 300 IN NS ns1.example.com.
|
||||||
|
@ 300 IN A 192.0.2.1
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def _make_backend(tmp_path) -> NSDBackend:
|
||||||
|
"""Return an NSDBackend pointing at tmp_path directories.
|
||||||
|
|
||||||
|
is_available() is patched so the tests do not require a real nsd install.
|
||||||
|
"""
|
||||||
|
zones_dir = tmp_path / "zones"
|
||||||
|
nsd_conf = tmp_path / "nsd.conf.d" / "zones.conf"
|
||||||
|
config = {
|
||||||
|
"instance_name": "test_nsd",
|
||||||
|
"zones_dir": str(zones_dir),
|
||||||
|
"nsd_conf": str(nsd_conf),
|
||||||
|
}
|
||||||
|
with patch.object(NSDBackend, "is_available", return_value=True):
|
||||||
|
return NSDBackend(config)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Availability check
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_is_available_true(monkeypatch):
|
||||||
|
monkeypatch.setattr(
|
||||||
|
"directdnsonly.app.backends.nsd.subprocess.run",
|
||||||
|
lambda *a, **kw: MagicMock(returncode=0),
|
||||||
|
)
|
||||||
|
assert NSDBackend.is_available()
|
||||||
|
|
||||||
|
|
||||||
|
def test_is_available_false_when_not_installed(monkeypatch):
|
||||||
|
def raise_fnf(*args, **kwargs):
|
||||||
|
raise FileNotFoundError
|
||||||
|
|
||||||
|
monkeypatch.setattr("directdnsonly.app.backends.nsd.subprocess.run", raise_fnf)
|
||||||
|
assert not NSDBackend.is_available()
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Initialisation
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_init_creates_zones_dir(tmp_path):
|
||||||
|
backend = _make_backend(tmp_path)
|
||||||
|
assert backend.zones_dir.exists()
|
||||||
|
|
||||||
|
|
||||||
|
def test_init_creates_nsd_conf(tmp_path):
|
||||||
|
backend = _make_backend(tmp_path)
|
||||||
|
assert backend.nsd_conf.exists()
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_name():
|
||||||
|
assert NSDBackend.get_name() == "nsd"
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# write_zone
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_write_zone_creates_zone_file(tmp_path):
|
||||||
|
backend = _make_backend(tmp_path)
|
||||||
|
assert backend.write_zone("example.com", ZONE_DATA)
|
||||||
|
assert (backend.zones_dir / "example.com.db").exists()
|
||||||
|
|
||||||
|
|
||||||
|
def test_write_zone_content_matches(tmp_path):
|
||||||
|
backend = _make_backend(tmp_path)
|
||||||
|
backend.write_zone("example.com", ZONE_DATA)
|
||||||
|
content = (backend.zones_dir / "example.com.db").read_text()
|
||||||
|
assert content == ZONE_DATA
|
||||||
|
|
||||||
|
|
||||||
|
def test_write_zone_adds_to_conf(tmp_path):
|
||||||
|
backend = _make_backend(tmp_path)
|
||||||
|
backend.write_zone("example.com", ZONE_DATA)
|
||||||
|
conf = backend.nsd_conf.read_text()
|
||||||
|
assert 'name: "example.com"' in conf
|
||||||
|
assert "example.com.db" in conf
|
||||||
|
|
||||||
|
|
||||||
|
def test_write_zone_idempotent_conf_entry(tmp_path):
|
||||||
|
backend = _make_backend(tmp_path)
|
||||||
|
backend.write_zone("example.com", ZONE_DATA)
|
||||||
|
backend.write_zone("example.com", ZONE_DATA)
|
||||||
|
conf = backend.nsd_conf.read_text()
|
||||||
|
# Should appear exactly once
|
||||||
|
assert conf.count('name: "example.com"') == 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_write_zone_multiple_zones(tmp_path):
|
||||||
|
backend = _make_backend(tmp_path)
|
||||||
|
backend.write_zone("example.com", ZONE_DATA)
|
||||||
|
backend.write_zone("other.com", ZONE_DATA)
|
||||||
|
conf = backend.nsd_conf.read_text()
|
||||||
|
assert 'name: "example.com"' in conf
|
||||||
|
assert 'name: "other.com"' in conf
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# zone_exists
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_zone_exists_after_write(tmp_path):
|
||||||
|
backend = _make_backend(tmp_path)
|
||||||
|
backend.write_zone("example.com", ZONE_DATA)
|
||||||
|
assert backend.zone_exists("example.com")
|
||||||
|
|
||||||
|
|
||||||
|
def test_zone_not_exists_before_write(tmp_path):
|
||||||
|
backend = _make_backend(tmp_path)
|
||||||
|
assert not backend.zone_exists("missing.com")
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# delete_zone
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_delete_zone_removes_file(tmp_path):
|
||||||
|
backend = _make_backend(tmp_path)
|
||||||
|
backend.write_zone("example.com", ZONE_DATA)
|
||||||
|
assert backend.delete_zone("example.com")
|
||||||
|
assert not (backend.zones_dir / "example.com.db").exists()
|
||||||
|
|
||||||
|
|
||||||
|
def test_delete_zone_removes_conf_entry(tmp_path):
|
||||||
|
backend = _make_backend(tmp_path)
|
||||||
|
backend.write_zone("example.com", ZONE_DATA)
|
||||||
|
backend.delete_zone("example.com")
|
||||||
|
conf = backend.nsd_conf.read_text()
|
||||||
|
assert 'name: "example.com"' not in conf
|
||||||
|
|
||||||
|
|
||||||
|
def test_delete_zone_returns_false_when_missing(tmp_path):
|
||||||
|
backend = _make_backend(tmp_path)
|
||||||
|
assert not backend.delete_zone("ghost.com")
|
||||||
|
|
||||||
|
|
||||||
|
def test_delete_zone_leaves_other_zones(tmp_path):
|
||||||
|
backend = _make_backend(tmp_path)
|
||||||
|
backend.write_zone("example.com", ZONE_DATA)
|
||||||
|
backend.write_zone("other.com", ZONE_DATA)
|
||||||
|
backend.delete_zone("example.com")
|
||||||
|
assert 'name: "other.com"' in backend.nsd_conf.read_text()
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# reload_zone — subprocess interactions
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_reload_zone_calls_nsd_control_reload(tmp_path, monkeypatch):
|
||||||
|
backend = _make_backend(tmp_path)
|
||||||
|
calls = []
|
||||||
|
|
||||||
|
def fake_run(cmd, **kwargs):
|
||||||
|
calls.append(cmd)
|
||||||
|
return MagicMock(returncode=0, stdout="ok", stderr="")
|
||||||
|
|
||||||
|
monkeypatch.setattr("directdnsonly.app.backends.nsd.subprocess.run", fake_run)
|
||||||
|
assert backend.reload_zone()
|
||||||
|
assert calls[0] == ["nsd-control", "reload"]
|
||||||
|
|
||||||
|
|
||||||
|
def test_reload_single_zone_passes_zone_name(tmp_path, monkeypatch):
|
||||||
|
backend = _make_backend(tmp_path)
|
||||||
|
calls = []
|
||||||
|
|
||||||
|
def fake_run(cmd, **kwargs):
|
||||||
|
calls.append(cmd)
|
||||||
|
return MagicMock(returncode=0, stdout="ok", stderr="")
|
||||||
|
|
||||||
|
monkeypatch.setattr("directdnsonly.app.backends.nsd.subprocess.run", fake_run)
|
||||||
|
assert backend.reload_zone("example.com")
|
||||||
|
assert calls[0] == ["nsd-control", "reload", "example.com"]
|
||||||
|
|
||||||
|
|
||||||
|
def test_reload_zone_returns_false_on_failure(tmp_path, monkeypatch):
|
||||||
|
backend = _make_backend(tmp_path)
|
||||||
|
|
||||||
|
def fake_run(cmd, **kwargs):
|
||||||
|
raise subprocess.CalledProcessError(1, cmd, stderr="nsd-control: error")
|
||||||
|
|
||||||
|
monkeypatch.setattr("directdnsonly.app.backends.nsd.subprocess.run", fake_run)
|
||||||
|
assert not backend.reload_zone()
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# update_nsd_conf — full rewrite
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_update_nsd_conf_replaces_all_zones(tmp_path):
|
||||||
|
backend = _make_backend(tmp_path)
|
||||||
|
backend.write_zone("old.com", ZONE_DATA)
|
||||||
|
backend.update_nsd_conf(["new1.com", "new2.com"])
|
||||||
|
conf = backend.nsd_conf.read_text()
|
||||||
|
assert 'name: "old.com"' not in conf
|
||||||
|
assert 'name: "new1.com"' in conf
|
||||||
|
assert 'name: "new2.com"' in conf
|
||||||
446
tests/test_peer_sync.py
Normal file
446
tests/test_peer_sync.py
Normal file
@@ -0,0 +1,446 @@
|
|||||||
|
"""Tests for directdnsonly.app.peer_sync — PeerSyncWorker."""
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import json
|
||||||
|
import pytest
|
||||||
|
from sqlalchemy import select, func
|
||||||
|
from unittest.mock import patch, MagicMock
|
||||||
|
|
||||||
|
from directdnsonly.app.peer_sync import PeerSyncWorker
|
||||||
|
from directdnsonly.app.db.models import Domain
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Fixtures
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
BASE_CONFIG = {
|
||||||
|
"enabled": True,
|
||||||
|
"interval_minutes": 15,
|
||||||
|
"peers": [
|
||||||
|
{
|
||||||
|
"url": "http://ddo-2:2222",
|
||||||
|
"username": "directdnsonly",
|
||||||
|
"password": "changeme",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
NOW = datetime.datetime(2024, 6, 1, 12, 0, 0)
|
||||||
|
OLDER = datetime.datetime(2024, 6, 1, 11, 0, 0)
|
||||||
|
|
||||||
|
ZONE_DATA = "$ORIGIN example.com.\n@ 300 IN SOA ns1 hostmaster 1 3600 900 604800 300\n"
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Config / startup tests
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_disabled_by_default():
|
||||||
|
worker = PeerSyncWorker({})
|
||||||
|
assert not worker.enabled
|
||||||
|
|
||||||
|
|
||||||
|
def test_interval_stored():
|
||||||
|
worker = PeerSyncWorker({"enabled": True, "interval_minutes": 30})
|
||||||
|
assert worker.interval_seconds == 1800
|
||||||
|
|
||||||
|
|
||||||
|
def test_default_interval():
|
||||||
|
worker = PeerSyncWorker({"enabled": True})
|
||||||
|
assert worker.interval_seconds == 15 * 60
|
||||||
|
|
||||||
|
|
||||||
|
def test_peers_stored():
|
||||||
|
worker = PeerSyncWorker(BASE_CONFIG)
|
||||||
|
assert len(worker.peers) == 1
|
||||||
|
assert worker.peers[0]["url"] == "http://ddo-2:2222"
|
||||||
|
|
||||||
|
|
||||||
|
def test_peer_from_env_var(monkeypatch):
|
||||||
|
"""DADNS_PEER_SYNC_PEER_URL adds a peer without a config file."""
|
||||||
|
monkeypatch.setenv("DADNS_PEER_SYNC_PEER_URL", "http://ddo-env:2222")
|
||||||
|
monkeypatch.setenv("DADNS_PEER_SYNC_PEER_USERNAME", "admin")
|
||||||
|
monkeypatch.setenv("DADNS_PEER_SYNC_PEER_PASSWORD", "secret")
|
||||||
|
worker = PeerSyncWorker({"enabled": True})
|
||||||
|
assert len(worker.peers) == 1
|
||||||
|
assert worker.peers[0]["url"] == "http://ddo-env:2222"
|
||||||
|
assert worker.peers[0]["username"] == "admin"
|
||||||
|
assert worker.peers[0]["password"] == "secret"
|
||||||
|
|
||||||
|
|
||||||
|
def test_env_peer_not_duplicated_when_also_in_config(monkeypatch):
|
||||||
|
"""Env var peer is not added if it already appears in the config file peers list."""
|
||||||
|
monkeypatch.setenv("DADNS_PEER_SYNC_PEER_URL", "http://ddo-2:2222")
|
||||||
|
worker = PeerSyncWorker(BASE_CONFIG)
|
||||||
|
# BASE_CONFIG already has http://ddo-2:2222 — must remain exactly one entry
|
||||||
|
urls = [p["url"] for p in worker.peers]
|
||||||
|
assert urls.count("http://ddo-2:2222") == 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_numbered_env_peers(monkeypatch):
|
||||||
|
"""DADNS_PEER_SYNC_PEER_1_URL and _2_URL add multiple peers."""
|
||||||
|
monkeypatch.setenv("DADNS_PEER_SYNC_PEER_1_URL", "http://node-a:2222")
|
||||||
|
monkeypatch.setenv("DADNS_PEER_SYNC_PEER_1_USERNAME", "peersync")
|
||||||
|
monkeypatch.setenv("DADNS_PEER_SYNC_PEER_1_PASSWORD", "s3cr3t")
|
||||||
|
monkeypatch.setenv("DADNS_PEER_SYNC_PEER_2_URL", "http://node-b:2222")
|
||||||
|
worker = PeerSyncWorker({"enabled": True})
|
||||||
|
urls = [p["url"] for p in worker.peers]
|
||||||
|
assert "http://node-a:2222" in urls
|
||||||
|
assert "http://node-b:2222" in urls
|
||||||
|
assert len(urls) == 2
|
||||||
|
|
||||||
|
|
||||||
|
def test_numbered_env_peers_not_duplicated(monkeypatch):
|
||||||
|
"""Numbered env var peers are deduplicated against the config file list."""
|
||||||
|
monkeypatch.setenv("DADNS_PEER_SYNC_PEER_1_URL", "http://ddo-2:2222")
|
||||||
|
worker = PeerSyncWorker(BASE_CONFIG)
|
||||||
|
urls = [p["url"] for p in worker.peers]
|
||||||
|
assert urls.count("http://ddo-2:2222") == 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_peer_urls():
|
||||||
|
worker = PeerSyncWorker(BASE_CONFIG)
|
||||||
|
assert worker.get_peer_urls() == ["http://ddo-2:2222"]
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Health tracking
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_peer_health_starts_healthy():
|
||||||
|
worker = PeerSyncWorker(BASE_CONFIG)
|
||||||
|
h = worker._health("http://ddo-2:2222")
|
||||||
|
assert h["healthy"] is True
|
||||||
|
assert h["consecutive_failures"] == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_record_failure_increments_count():
|
||||||
|
worker = PeerSyncWorker(BASE_CONFIG)
|
||||||
|
worker._record_failure("http://ddo-2:2222", ConnectionError("down"))
|
||||||
|
assert worker._health("http://ddo-2:2222")["consecutive_failures"] == 1
|
||||||
|
assert worker._health("http://ddo-2:2222")["healthy"] is True
|
||||||
|
|
||||||
|
|
||||||
|
def test_record_failure_marks_degraded_at_threshold():
|
||||||
|
from directdnsonly.app.peer_sync import FAILURE_THRESHOLD
|
||||||
|
worker = PeerSyncWorker(BASE_CONFIG)
|
||||||
|
for _ in range(FAILURE_THRESHOLD):
|
||||||
|
worker._record_failure("http://ddo-2:2222", ConnectionError("down"))
|
||||||
|
assert worker._health("http://ddo-2:2222")["healthy"] is False
|
||||||
|
|
||||||
|
|
||||||
|
def test_record_success_resets_health():
|
||||||
|
from directdnsonly.app.peer_sync import FAILURE_THRESHOLD
|
||||||
|
worker = PeerSyncWorker(BASE_CONFIG)
|
||||||
|
for _ in range(FAILURE_THRESHOLD):
|
||||||
|
worker._record_failure("http://ddo-2:2222", ConnectionError("down"))
|
||||||
|
assert not worker._health("http://ddo-2:2222")["healthy"]
|
||||||
|
worker._record_success("http://ddo-2:2222")
|
||||||
|
assert worker._health("http://ddo-2:2222")["healthy"] is True
|
||||||
|
assert worker._health("http://ddo-2:2222")["consecutive_failures"] == 0
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Peer discovery (_discover_peers_from)
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_discover_peers_adds_new_peer(monkeypatch):
|
||||||
|
"""New peer URL returned by /internal/peers is added to the peer list."""
|
||||||
|
worker = PeerSyncWorker(BASE_CONFIG)
|
||||||
|
|
||||||
|
def mock_get(url, auth=None, timeout=10, params=None):
|
||||||
|
resp = MagicMock()
|
||||||
|
resp.status_code = 200
|
||||||
|
resp.json.return_value = ["http://node-c:2222"]
|
||||||
|
return resp
|
||||||
|
|
||||||
|
monkeypatch.setattr("directdnsonly.app.peer_sync.requests.get", mock_get)
|
||||||
|
worker._discover_peers_from(BASE_CONFIG["peers"][0])
|
||||||
|
urls = [p["url"] for p in worker.peers]
|
||||||
|
assert "http://node-c:2222" in urls
|
||||||
|
|
||||||
|
|
||||||
|
def test_discover_peers_skips_known(monkeypatch):
|
||||||
|
"""Already-known peer URLs are not re-added."""
|
||||||
|
worker = PeerSyncWorker(BASE_CONFIG)
|
||||||
|
|
||||||
|
def mock_get(url, auth=None, timeout=10, params=None):
|
||||||
|
resp = MagicMock()
|
||||||
|
resp.status_code = 200
|
||||||
|
resp.json.return_value = ["http://ddo-2:2222"] # already known
|
||||||
|
return resp
|
||||||
|
|
||||||
|
monkeypatch.setattr("directdnsonly.app.peer_sync.requests.get", mock_get)
|
||||||
|
worker._discover_peers_from(BASE_CONFIG["peers"][0])
|
||||||
|
assert len(worker.peers) == 1 # unchanged
|
||||||
|
|
||||||
|
|
||||||
|
def test_discover_peers_tolerates_failure(monkeypatch):
|
||||||
|
"""Network error during discovery does not propagate."""
|
||||||
|
worker = PeerSyncWorker(BASE_CONFIG)
|
||||||
|
|
||||||
|
def mock_get(*args, **kwargs):
|
||||||
|
raise ConnectionError("peer down")
|
||||||
|
|
||||||
|
monkeypatch.setattr("directdnsonly.app.peer_sync.requests.get", mock_get)
|
||||||
|
# Should not raise
|
||||||
|
worker._discover_peers_from(BASE_CONFIG["peers"][0])
|
||||||
|
|
||||||
|
|
||||||
|
def test_start_skips_when_disabled(caplog):
|
||||||
|
worker = PeerSyncWorker({"enabled": False})
|
||||||
|
worker.start()
|
||||||
|
assert worker._thread is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_start_warns_when_no_peers(caplog):
|
||||||
|
import logging
|
||||||
|
|
||||||
|
worker = PeerSyncWorker({"enabled": True, "peers": []})
|
||||||
|
with patch.object(worker, "_run"):
|
||||||
|
worker.start()
|
||||||
|
# Thread should not have started
|
||||||
|
assert worker._thread is None
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# _sync_from_peer tests
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def _make_peer():
|
||||||
|
return BASE_CONFIG["peers"][0]
|
||||||
|
|
||||||
|
|
||||||
|
def _peer_list(domain, ts=None):
|
||||||
|
"""Simulate the JSON response from GET /internal/zones."""
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
"domain": domain,
|
||||||
|
"zone_updated_at": ts.isoformat() if ts else None,
|
||||||
|
"hostname": "da1.example.com",
|
||||||
|
"username": "admin",
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def _peer_zone(domain, ts=None, zone_data=ZONE_DATA):
|
||||||
|
"""Simulate the JSON response from GET /internal/zones?domain=X."""
|
||||||
|
return {
|
||||||
|
"domain": domain,
|
||||||
|
"zone_data": zone_data,
|
||||||
|
"zone_updated_at": ts.isoformat() if ts else None,
|
||||||
|
"hostname": "da1.example.com",
|
||||||
|
"username": "admin",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def test_sync_creates_new_local_record(patch_connect, monkeypatch):
|
||||||
|
"""When local DB has no record, peer zone_data is fetched and stored."""
|
||||||
|
worker = PeerSyncWorker(BASE_CONFIG)
|
||||||
|
session = patch_connect
|
||||||
|
|
||||||
|
def mock_get(url, auth=None, timeout=10, params=None):
|
||||||
|
resp = MagicMock()
|
||||||
|
resp.status_code = 200
|
||||||
|
if params and params.get("domain"):
|
||||||
|
resp.json.return_value = _peer_zone("example.com", NOW)
|
||||||
|
else:
|
||||||
|
resp.json.return_value = _peer_list("example.com", NOW)
|
||||||
|
return resp
|
||||||
|
|
||||||
|
monkeypatch.setattr("directdnsonly.app.peer_sync.requests.get", mock_get)
|
||||||
|
|
||||||
|
worker._sync_from_peer(_make_peer())
|
||||||
|
|
||||||
|
record = session.execute(
|
||||||
|
select(Domain).filter_by(domain="example.com")
|
||||||
|
).scalar_one_or_none()
|
||||||
|
assert record is not None
|
||||||
|
assert record.zone_data == ZONE_DATA
|
||||||
|
assert record.zone_updated_at == NOW
|
||||||
|
|
||||||
|
|
||||||
|
def test_sync_updates_older_local_record(patch_connect, monkeypatch):
|
||||||
|
"""When local zone_data is older than peer's, it is overwritten."""
|
||||||
|
session = patch_connect
|
||||||
|
session.add(
|
||||||
|
Domain(domain="example.com", zone_data="old data", zone_updated_at=OLDER)
|
||||||
|
)
|
||||||
|
session.commit()
|
||||||
|
|
||||||
|
worker = PeerSyncWorker(BASE_CONFIG)
|
||||||
|
|
||||||
|
def mock_get(url, auth=None, timeout=10, params=None):
|
||||||
|
resp = MagicMock()
|
||||||
|
resp.status_code = 200
|
||||||
|
if params and params.get("domain"):
|
||||||
|
resp.json.return_value = _peer_zone("example.com", NOW)
|
||||||
|
else:
|
||||||
|
resp.json.return_value = _peer_list("example.com", NOW)
|
||||||
|
return resp
|
||||||
|
|
||||||
|
monkeypatch.setattr("directdnsonly.app.peer_sync.requests.get", mock_get)
|
||||||
|
|
||||||
|
worker._sync_from_peer(_make_peer())
|
||||||
|
|
||||||
|
record = session.execute(
|
||||||
|
select(Domain).filter_by(domain="example.com")
|
||||||
|
).scalar_one_or_none()
|
||||||
|
assert record.zone_data == ZONE_DATA
|
||||||
|
assert record.zone_updated_at == NOW
|
||||||
|
|
||||||
|
|
||||||
|
def test_sync_skips_when_local_is_newer(patch_connect, monkeypatch):
|
||||||
|
"""When local zone_data is newer than peer's, it is not overwritten."""
|
||||||
|
session = patch_connect
|
||||||
|
session.add(
|
||||||
|
Domain(domain="example.com", zone_data="newer local", zone_updated_at=NOW)
|
||||||
|
)
|
||||||
|
session.commit()
|
||||||
|
|
||||||
|
worker = PeerSyncWorker(BASE_CONFIG)
|
||||||
|
fetch_calls = []
|
||||||
|
|
||||||
|
def mock_get(url, auth=None, timeout=10, params=None):
|
||||||
|
resp = MagicMock()
|
||||||
|
resp.status_code = 200
|
||||||
|
if params and params.get("domain"):
|
||||||
|
fetch_calls.append(url)
|
||||||
|
resp.json.return_value = _peer_zone("example.com", OLDER)
|
||||||
|
else:
|
||||||
|
resp.json.return_value = _peer_list("example.com", OLDER)
|
||||||
|
return resp
|
||||||
|
|
||||||
|
monkeypatch.setattr("directdnsonly.app.peer_sync.requests.get", mock_get)
|
||||||
|
|
||||||
|
worker._sync_from_peer(_make_peer())
|
||||||
|
|
||||||
|
# zone_data fetch should not have been called
|
||||||
|
assert not fetch_calls
|
||||||
|
record = session.execute(
|
||||||
|
select(Domain).filter_by(domain="example.com")
|
||||||
|
).scalar_one_or_none()
|
||||||
|
assert record.zone_data == "newer local"
|
||||||
|
|
||||||
|
|
||||||
|
def test_sync_skips_unreachable_peer(monkeypatch):
|
||||||
|
"""If the peer raises a connection error, _sync_all catches it gracefully."""
|
||||||
|
worker = PeerSyncWorker(BASE_CONFIG)
|
||||||
|
|
||||||
|
def mock_get(*args, **kwargs):
|
||||||
|
raise ConnectionError("peer down")
|
||||||
|
|
||||||
|
monkeypatch.setattr("directdnsonly.app.peer_sync.requests.get", mock_get)
|
||||||
|
|
||||||
|
# Should not raise
|
||||||
|
worker._sync_all()
|
||||||
|
|
||||||
|
|
||||||
|
def test_sync_skips_peer_with_bad_status(patch_connect, monkeypatch):
|
||||||
|
"""Non-200 response from peer zone list is silently skipped."""
|
||||||
|
worker = PeerSyncWorker(BASE_CONFIG)
|
||||||
|
session = patch_connect
|
||||||
|
|
||||||
|
def mock_get(url, auth=None, timeout=10, params=None):
|
||||||
|
resp = MagicMock()
|
||||||
|
resp.status_code = 503
|
||||||
|
return resp
|
||||||
|
|
||||||
|
monkeypatch.setattr("directdnsonly.app.peer_sync.requests.get", mock_get)
|
||||||
|
|
||||||
|
worker._sync_from_peer(_make_peer())
|
||||||
|
|
||||||
|
# No records should have been created
|
||||||
|
assert session.execute(select(func.count()).select_from(Domain)).scalar() == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_sync_skips_missing_zone_data_in_response(patch_connect, monkeypatch):
|
||||||
|
"""If the peer returns no zone_data for a domain, it is skipped."""
|
||||||
|
session = patch_connect
|
||||||
|
|
||||||
|
worker = PeerSyncWorker(BASE_CONFIG)
|
||||||
|
|
||||||
|
def mock_get(url, auth=None, timeout=10, params=None):
|
||||||
|
resp = MagicMock()
|
||||||
|
resp.status_code = 200
|
||||||
|
if params and params.get("domain"):
|
||||||
|
resp.json.return_value = {"domain": "example.com", "zone_data": None}
|
||||||
|
else:
|
||||||
|
resp.json.return_value = _peer_list("example.com", NOW)
|
||||||
|
return resp
|
||||||
|
|
||||||
|
monkeypatch.setattr("directdnsonly.app.peer_sync.requests.get", mock_get)
|
||||||
|
|
||||||
|
worker._sync_from_peer(_make_peer())
|
||||||
|
|
||||||
|
assert session.execute(select(func.count()).select_from(Domain)).scalar() == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_sync_empty_peer_list(patch_connect, monkeypatch):
|
||||||
|
"""Empty zone list from peer results in zero syncs without error."""
|
||||||
|
worker = PeerSyncWorker(BASE_CONFIG)
|
||||||
|
|
||||||
|
def mock_get(url, auth=None, timeout=10, params=None):
|
||||||
|
resp = MagicMock()
|
||||||
|
resp.status_code = 200
|
||||||
|
resp.json.return_value = []
|
||||||
|
return resp
|
||||||
|
|
||||||
|
monkeypatch.setattr("directdnsonly.app.peer_sync.requests.get", mock_get)
|
||||||
|
|
||||||
|
worker._sync_from_peer(_make_peer())
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# get_peer_status
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_peer_status_no_contact_yet():
|
||||||
|
worker = PeerSyncWorker(BASE_CONFIG)
|
||||||
|
status = worker.get_peer_status()
|
||||||
|
|
||||||
|
assert status["enabled"] is True
|
||||||
|
assert status["total"] == 1
|
||||||
|
assert status["healthy"] == 1
|
||||||
|
assert status["degraded"] == 0
|
||||||
|
assert status["peers"][0]["url"] == "http://ddo-2:2222"
|
||||||
|
assert status["peers"][0]["healthy"] is True
|
||||||
|
assert status["peers"][0]["last_seen"] is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_peer_status_after_success():
|
||||||
|
worker = PeerSyncWorker(BASE_CONFIG)
|
||||||
|
worker._record_success("http://ddo-2:2222")
|
||||||
|
status = worker.get_peer_status()
|
||||||
|
|
||||||
|
assert status["healthy"] == 1
|
||||||
|
assert status["degraded"] == 0
|
||||||
|
assert status["peers"][0]["last_seen"] is not None
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_peer_status_after_degraded():
|
||||||
|
from directdnsonly.app.peer_sync import FAILURE_THRESHOLD
|
||||||
|
|
||||||
|
worker = PeerSyncWorker(BASE_CONFIG)
|
||||||
|
for _ in range(FAILURE_THRESHOLD):
|
||||||
|
worker._record_failure("http://ddo-2:2222", Exception("timeout"))
|
||||||
|
|
||||||
|
status = worker.get_peer_status()
|
||||||
|
assert status["healthy"] == 0
|
||||||
|
assert status["degraded"] == 1
|
||||||
|
assert status["peers"][0]["healthy"] is False
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_peer_status_disabled():
|
||||||
|
worker = PeerSyncWorker({})
|
||||||
|
status = worker.get_peer_status()
|
||||||
|
|
||||||
|
assert status["enabled"] is False
|
||||||
|
assert status["total"] == 0
|
||||||
|
assert status["peers"] == []
|
||||||
399
tests/test_reconciler.py
Normal file
399
tests/test_reconciler.py
Normal file
@@ -0,0 +1,399 @@
|
|||||||
|
"""Tests for directdnsonly.app.reconciler — ReconciliationWorker."""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
from queue import Queue
|
||||||
|
from unittest.mock import patch, MagicMock
|
||||||
|
|
||||||
|
from directdnsonly.app.reconciler import ReconciliationWorker
|
||||||
|
from directdnsonly.app.db.models import Domain
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Fixtures
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
SERVER = {
|
||||||
|
"hostname": "da1.example.com",
|
||||||
|
"port": 2222,
|
||||||
|
"username": "admin",
|
||||||
|
"password": "secret",
|
||||||
|
"ssl": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
BASE_CONFIG = {
|
||||||
|
"enabled": True,
|
||||||
|
"dry_run": False,
|
||||||
|
"interval_minutes": 60,
|
||||||
|
"verify_ssl": True,
|
||||||
|
"ipp": 100,
|
||||||
|
"directadmin_servers": [SERVER],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def delete_queue():
|
||||||
|
return Queue()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def worker(delete_queue):
|
||||||
|
return ReconciliationWorker(delete_queue, BASE_CONFIG)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def dry_run_worker(delete_queue):
|
||||||
|
cfg = {**BASE_CONFIG, "dry_run": True}
|
||||||
|
return ReconciliationWorker(delete_queue, cfg)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Helpers
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
DA_CLIENT_PATH = "directdnsonly.app.reconciler.DirectAdminClient"
|
||||||
|
|
||||||
|
|
||||||
|
def _patch_da(return_value):
|
||||||
|
"""Patch DirectAdminClient so list_domains returns a fixed value."""
|
||||||
|
return patch(
|
||||||
|
DA_CLIENT_PATH, **{"return_value.list_domains.return_value": return_value}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# _reconcile_all — orphan detection
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_orphan_queued_when_domain_missing_from_da(worker, delete_queue, patch_connect):
|
||||||
|
patch_connect.add(
|
||||||
|
Domain(domain="orphan.com", hostname="da1.example.com", username="admin")
|
||||||
|
)
|
||||||
|
patch_connect.commit()
|
||||||
|
|
||||||
|
with _patch_da(set()):
|
||||||
|
worker._reconcile_all()
|
||||||
|
|
||||||
|
assert not delete_queue.empty()
|
||||||
|
item = delete_queue.get_nowait()
|
||||||
|
assert item["domain"] == "orphan.com"
|
||||||
|
assert item["source"] == "reconciler"
|
||||||
|
|
||||||
|
|
||||||
|
def test_orphan_not_queued_in_dry_run(dry_run_worker, delete_queue, patch_connect):
|
||||||
|
patch_connect.add(
|
||||||
|
Domain(domain="orphan.com", hostname="da1.example.com", username="admin")
|
||||||
|
)
|
||||||
|
patch_connect.commit()
|
||||||
|
|
||||||
|
with _patch_da(set()):
|
||||||
|
dry_run_worker._reconcile_all()
|
||||||
|
|
||||||
|
assert delete_queue.empty()
|
||||||
|
|
||||||
|
|
||||||
|
def test_orphan_not_queued_for_unknown_server(worker, delete_queue, patch_connect):
|
||||||
|
"""Domains whose recorded master is NOT in our configured servers are skipped."""
|
||||||
|
patch_connect.add(
|
||||||
|
Domain(domain="other.com", hostname="da99.unknown.com", username="admin")
|
||||||
|
)
|
||||||
|
patch_connect.commit()
|
||||||
|
|
||||||
|
with _patch_da(set()):
|
||||||
|
worker._reconcile_all()
|
||||||
|
|
||||||
|
assert delete_queue.empty()
|
||||||
|
|
||||||
|
|
||||||
|
def test_active_domain_not_queued(worker, delete_queue, patch_connect):
|
||||||
|
patch_connect.add(
|
||||||
|
Domain(domain="good.com", hostname="da1.example.com", username="admin")
|
||||||
|
)
|
||||||
|
patch_connect.commit()
|
||||||
|
|
||||||
|
with _patch_da({"good.com"}):
|
||||||
|
worker._reconcile_all()
|
||||||
|
|
||||||
|
assert delete_queue.empty()
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# _reconcile_all — hostname backfill and migration
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_backfill_null_hostname(worker, patch_connect):
|
||||||
|
patch_connect.add(Domain(domain="backfill.com", hostname=None, username="admin"))
|
||||||
|
patch_connect.commit()
|
||||||
|
|
||||||
|
with _patch_da({"backfill.com"}):
|
||||||
|
worker._reconcile_all()
|
||||||
|
|
||||||
|
record = patch_connect.query(Domain).filter_by(domain="backfill.com").first()
|
||||||
|
assert record.hostname == "da1.example.com"
|
||||||
|
|
||||||
|
|
||||||
|
def test_migration_updates_hostname(worker, patch_connect):
|
||||||
|
patch_connect.add(
|
||||||
|
Domain(domain="moved.com", hostname="da-old.example.com", username="admin")
|
||||||
|
)
|
||||||
|
patch_connect.commit()
|
||||||
|
|
||||||
|
with _patch_da({"moved.com"}):
|
||||||
|
worker._reconcile_all()
|
||||||
|
|
||||||
|
record = patch_connect.query(Domain).filter_by(domain="moved.com").first()
|
||||||
|
assert record.hostname == "da1.example.com"
|
||||||
|
|
||||||
|
|
||||||
|
def test_dry_run_still_backfills(dry_run_worker, patch_connect):
|
||||||
|
"""Backfill is a data-repair operation, applied even in dry-run mode."""
|
||||||
|
patch_connect.add(Domain(domain="fill.com", hostname=None, username="admin"))
|
||||||
|
patch_connect.commit()
|
||||||
|
|
||||||
|
with _patch_da({"fill.com"}):
|
||||||
|
dry_run_worker._reconcile_all()
|
||||||
|
|
||||||
|
record = patch_connect.query(Domain).filter_by(domain="fill.com").first()
|
||||||
|
assert record.hostname == "da1.example.com"
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Worker lifecycle
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_disabled_worker_does_not_start(delete_queue):
|
||||||
|
cfg = {**BASE_CONFIG, "enabled": False}
|
||||||
|
w = ReconciliationWorker(delete_queue, cfg)
|
||||||
|
w.start()
|
||||||
|
assert not w.is_alive
|
||||||
|
|
||||||
|
|
||||||
|
def test_no_servers_does_not_start(delete_queue):
|
||||||
|
cfg = {**BASE_CONFIG, "directadmin_servers": []}
|
||||||
|
w = ReconciliationWorker(delete_queue, cfg)
|
||||||
|
w.start()
|
||||||
|
assert not w.is_alive
|
||||||
|
|
||||||
|
|
||||||
|
def test_initial_delay_stored(delete_queue):
|
||||||
|
cfg = {**BASE_CONFIG, "initial_delay_minutes": 30}
|
||||||
|
w = ReconciliationWorker(delete_queue, cfg)
|
||||||
|
assert w._initial_delay == 30 * 60
|
||||||
|
|
||||||
|
|
||||||
|
def test_zero_initial_delay_by_default(delete_queue):
|
||||||
|
w = ReconciliationWorker(delete_queue, BASE_CONFIG)
|
||||||
|
assert w._initial_delay == 0
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# _heal_backends — Option C backend healing
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def _make_backend_registry(zone_exists_return: bool):
|
||||||
|
"""Build a mock backend_registry with one backend whose zone_exists returns
|
||||||
|
the given value."""
|
||||||
|
backend = MagicMock()
|
||||||
|
backend.zone_exists.return_value = zone_exists_return
|
||||||
|
registry = MagicMock()
|
||||||
|
registry.get_available_backends.return_value = {"coredns": backend}
|
||||||
|
return registry, backend
|
||||||
|
|
||||||
|
|
||||||
|
def test_heal_queues_zone_missing_from_backend(delete_queue, patch_connect):
|
||||||
|
save_queue = Queue()
|
||||||
|
registry, backend = _make_backend_registry(zone_exists_return=False)
|
||||||
|
|
||||||
|
patch_connect.add(
|
||||||
|
Domain(
|
||||||
|
domain="missing.com",
|
||||||
|
hostname="da1.example.com",
|
||||||
|
username="admin",
|
||||||
|
zone_data="; zone file",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
patch_connect.commit()
|
||||||
|
|
||||||
|
w = ReconciliationWorker(
|
||||||
|
delete_queue, BASE_CONFIG, save_queue=save_queue, backend_registry=registry
|
||||||
|
)
|
||||||
|
w._heal_backends()
|
||||||
|
|
||||||
|
assert not save_queue.empty()
|
||||||
|
item = save_queue.get_nowait()
|
||||||
|
assert item["domain"] == "missing.com"
|
||||||
|
assert item["failed_backends"] == ["coredns"]
|
||||||
|
assert item["source"] == "reconciler_heal"
|
||||||
|
assert item["zone_file"] == "; zone file"
|
||||||
|
|
||||||
|
|
||||||
|
def test_heal_skips_domains_without_zone_data(delete_queue, patch_connect):
|
||||||
|
save_queue = Queue()
|
||||||
|
registry, _ = _make_backend_registry(zone_exists_return=False)
|
||||||
|
|
||||||
|
patch_connect.add(
|
||||||
|
Domain(
|
||||||
|
domain="nodata.com",
|
||||||
|
hostname="da1.example.com",
|
||||||
|
username="admin",
|
||||||
|
zone_data=None,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
patch_connect.commit()
|
||||||
|
|
||||||
|
w = ReconciliationWorker(
|
||||||
|
delete_queue, BASE_CONFIG, save_queue=save_queue, backend_registry=registry
|
||||||
|
)
|
||||||
|
w._heal_backends()
|
||||||
|
|
||||||
|
assert save_queue.empty()
|
||||||
|
|
||||||
|
|
||||||
|
def test_heal_skips_when_all_backends_have_zone(delete_queue, patch_connect):
|
||||||
|
save_queue = Queue()
|
||||||
|
registry, _ = _make_backend_registry(zone_exists_return=True)
|
||||||
|
|
||||||
|
patch_connect.add(
|
||||||
|
Domain(
|
||||||
|
domain="present.com",
|
||||||
|
hostname="da1.example.com",
|
||||||
|
username="admin",
|
||||||
|
zone_data="; zone file",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
patch_connect.commit()
|
||||||
|
|
||||||
|
w = ReconciliationWorker(
|
||||||
|
delete_queue, BASE_CONFIG, save_queue=save_queue, backend_registry=registry
|
||||||
|
)
|
||||||
|
w._heal_backends()
|
||||||
|
|
||||||
|
assert save_queue.empty()
|
||||||
|
|
||||||
|
|
||||||
|
def test_heal_dry_run_does_not_queue(delete_queue, patch_connect):
|
||||||
|
save_queue = Queue()
|
||||||
|
registry, _ = _make_backend_registry(zone_exists_return=False)
|
||||||
|
|
||||||
|
patch_connect.add(
|
||||||
|
Domain(
|
||||||
|
domain="dry.com",
|
||||||
|
hostname="da1.example.com",
|
||||||
|
username="admin",
|
||||||
|
zone_data="; zone file",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
patch_connect.commit()
|
||||||
|
|
||||||
|
cfg = {**BASE_CONFIG, "dry_run": True}
|
||||||
|
w = ReconciliationWorker(
|
||||||
|
delete_queue, cfg, save_queue=save_queue, backend_registry=registry
|
||||||
|
)
|
||||||
|
w._heal_backends()
|
||||||
|
|
||||||
|
assert save_queue.empty()
|
||||||
|
|
||||||
|
|
||||||
|
def test_heal_skipped_when_no_registry(delete_queue, patch_connect):
|
||||||
|
"""_heal_backends should not run when backend_registry is None."""
|
||||||
|
save_queue = Queue()
|
||||||
|
|
||||||
|
patch_connect.add(
|
||||||
|
Domain(
|
||||||
|
domain="noregistry.com",
|
||||||
|
hostname="da1.example.com",
|
||||||
|
username="admin",
|
||||||
|
zone_data="; zone file",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
patch_connect.commit()
|
||||||
|
|
||||||
|
w = ReconciliationWorker(delete_queue, BASE_CONFIG, save_queue=save_queue)
|
||||||
|
# Should not raise; healing is silently skipped
|
||||||
|
with _patch_da({"noregistry.com"}):
|
||||||
|
w._reconcile_all()
|
||||||
|
|
||||||
|
assert save_queue.empty()
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# get_status — last-run state
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_status_before_any_run(worker):
|
||||||
|
status = worker.get_status()
|
||||||
|
assert status["enabled"] is True
|
||||||
|
assert status["alive"] is False
|
||||||
|
assert status["last_run"] == {}
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_status_after_run(worker, patch_connect):
|
||||||
|
with _patch_da(set()):
|
||||||
|
worker._reconcile_all()
|
||||||
|
|
||||||
|
s = worker.get_status()
|
||||||
|
assert s["enabled"] is True
|
||||||
|
lr = s["last_run"]
|
||||||
|
assert lr["status"] == "ok"
|
||||||
|
assert "started_at" in lr
|
||||||
|
assert "completed_at" in lr
|
||||||
|
assert "duration_seconds" in lr
|
||||||
|
assert lr["da_servers_polled"] == 1
|
||||||
|
assert lr["da_servers_unreachable"] == 0
|
||||||
|
assert lr["dry_run"] is False
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_status_counts_unreachable_server(worker, patch_connect):
|
||||||
|
with _patch_da(None):
|
||||||
|
worker._reconcile_all()
|
||||||
|
|
||||||
|
lr = worker.get_status()["last_run"]
|
||||||
|
assert lr["da_servers_polled"] == 1
|
||||||
|
assert lr["da_servers_unreachable"] == 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_status_counts_orphans(worker, delete_queue, patch_connect):
|
||||||
|
patch_connect.add(
|
||||||
|
Domain(domain="orphan.com", hostname="da1.example.com", username="admin")
|
||||||
|
)
|
||||||
|
patch_connect.commit()
|
||||||
|
|
||||||
|
with _patch_da(set()):
|
||||||
|
worker._reconcile_all()
|
||||||
|
|
||||||
|
lr = worker.get_status()["last_run"]
|
||||||
|
assert lr["orphans_found"] == 1
|
||||||
|
assert lr["orphans_queued"] == 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_status_dry_run_orphans_not_queued_in_stats(dry_run_worker, patch_connect):
|
||||||
|
patch_connect.add(
|
||||||
|
Domain(domain="orphan.com", hostname="da1.example.com", username="admin")
|
||||||
|
)
|
||||||
|
patch_connect.commit()
|
||||||
|
|
||||||
|
with _patch_da(set()):
|
||||||
|
dry_run_worker._reconcile_all()
|
||||||
|
|
||||||
|
lr = dry_run_worker.get_status()["last_run"]
|
||||||
|
assert lr["dry_run"] is True
|
||||||
|
assert lr["orphans_found"] == 1
|
||||||
|
assert lr["orphans_queued"] == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_status_zones_in_db_counted(worker, patch_connect):
|
||||||
|
for d in ["a.com", "b.com", "c.com"]:
|
||||||
|
patch_connect.add(Domain(domain=d, hostname="da1.example.com", username="admin"))
|
||||||
|
patch_connect.commit()
|
||||||
|
|
||||||
|
with _patch_da({"a.com", "b.com", "c.com"}):
|
||||||
|
worker._reconcile_all()
|
||||||
|
|
||||||
|
lr = worker.get_status()["last_run"]
|
||||||
|
assert lr["zones_in_db"] == 3
|
||||||
|
assert lr["zones_in_da"] == 3
|
||||||
|
assert lr["orphans_found"] == 0
|
||||||
162
tests/test_status_api.py
Normal file
162
tests/test_status_api.py
Normal file
@@ -0,0 +1,162 @@
|
|||||||
|
"""Tests for directdnsonly.app.api.status — StatusAPI."""
|
||||||
|
|
||||||
|
import json
|
||||||
|
from unittest.mock import MagicMock
|
||||||
|
|
||||||
|
import cherrypy
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from directdnsonly.app.api.status import StatusAPI
|
||||||
|
from directdnsonly.app.db.models import Domain
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Helpers
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
_RECONCILER_OK = {
|
||||||
|
"enabled": True,
|
||||||
|
"alive": True,
|
||||||
|
"dry_run": False,
|
||||||
|
"interval_minutes": 60,
|
||||||
|
"last_run": {},
|
||||||
|
}
|
||||||
|
_PEER_SYNC_OFF = {
|
||||||
|
"enabled": False,
|
||||||
|
"alive": False,
|
||||||
|
"peers": [],
|
||||||
|
"total": 0,
|
||||||
|
"healthy": 0,
|
||||||
|
"degraded": 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _qs(**overrides):
|
||||||
|
base = {
|
||||||
|
"save_queue_size": 0,
|
||||||
|
"delete_queue_size": 0,
|
||||||
|
"retry_queue_size": 0,
|
||||||
|
"dead_letters": 0,
|
||||||
|
"save_worker_alive": True,
|
||||||
|
"delete_worker_alive": True,
|
||||||
|
"retry_worker_alive": True,
|
||||||
|
"reconciler": _RECONCILER_OK,
|
||||||
|
"peer_sync": _PEER_SYNC_OFF,
|
||||||
|
}
|
||||||
|
base.update(overrides)
|
||||||
|
return base
|
||||||
|
|
||||||
|
|
||||||
|
def _api(qs=None):
|
||||||
|
wm = MagicMock()
|
||||||
|
wm.queue_status.return_value = qs or _qs()
|
||||||
|
return StatusAPI(wm)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# _compute_overall
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_overall_ok_all_healthy():
|
||||||
|
assert StatusAPI._compute_overall(_qs()) == "ok"
|
||||||
|
|
||||||
|
|
||||||
|
def test_overall_error_save_worker_dead():
|
||||||
|
assert StatusAPI._compute_overall(_qs(save_worker_alive=False)) == "error"
|
||||||
|
|
||||||
|
|
||||||
|
def test_overall_error_delete_worker_dead():
|
||||||
|
assert StatusAPI._compute_overall(_qs(delete_worker_alive=False)) == "error"
|
||||||
|
|
||||||
|
|
||||||
|
def test_overall_degraded_retries_pending():
|
||||||
|
assert StatusAPI._compute_overall(_qs(retry_queue_size=3)) == "degraded"
|
||||||
|
|
||||||
|
|
||||||
|
def test_overall_degraded_dead_letters():
|
||||||
|
assert StatusAPI._compute_overall(_qs(dead_letters=1)) == "degraded"
|
||||||
|
|
||||||
|
|
||||||
|
def test_overall_degraded_peer_unhealthy():
|
||||||
|
ps = {**_PEER_SYNC_OFF, "degraded": 1}
|
||||||
|
assert StatusAPI._compute_overall(_qs(peer_sync=ps)) == "degraded"
|
||||||
|
|
||||||
|
|
||||||
|
def test_overall_error_takes_priority_over_degraded():
|
||||||
|
"""error > degraded when both conditions are true."""
|
||||||
|
assert (
|
||||||
|
StatusAPI._compute_overall(
|
||||||
|
_qs(save_worker_alive=False, retry_queue_size=5)
|
||||||
|
)
|
||||||
|
== "error"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# _build — structure and zone count
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_build_structure(patch_connect):
|
||||||
|
api = _api()
|
||||||
|
result = api._build()
|
||||||
|
|
||||||
|
assert "status" in result
|
||||||
|
assert "queues" in result
|
||||||
|
assert "workers" in result
|
||||||
|
assert "reconciler" in result
|
||||||
|
assert "peer_sync" in result
|
||||||
|
assert "zones" in result
|
||||||
|
|
||||||
|
|
||||||
|
def test_build_zone_count_zero(patch_connect):
|
||||||
|
api = _api()
|
||||||
|
result = api._build()
|
||||||
|
assert result["zones"]["total"] == 0
|
||||||
|
|
||||||
|
|
||||||
|
def test_build_zone_count_with_domains(patch_connect):
|
||||||
|
for d in ["a.com", "b.com", "c.com"]:
|
||||||
|
patch_connect.add(Domain(domain=d, hostname="da1.example.com", username="admin"))
|
||||||
|
patch_connect.commit()
|
||||||
|
|
||||||
|
api = _api()
|
||||||
|
result = api._build()
|
||||||
|
assert result["zones"]["total"] == 3
|
||||||
|
|
||||||
|
|
||||||
|
def test_build_queues_forwarded(patch_connect):
|
||||||
|
api = _api(_qs(save_queue_size=2, delete_queue_size=1, retry_queue_size=3, dead_letters=1))
|
||||||
|
result = api._build()
|
||||||
|
|
||||||
|
assert result["queues"]["save"] == 2
|
||||||
|
assert result["queues"]["delete"] == 1
|
||||||
|
assert result["queues"]["retry"] == 3
|
||||||
|
assert result["queues"]["dead_letters"] == 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_build_workers_forwarded(patch_connect):
|
||||||
|
api = _api()
|
||||||
|
result = api._build()
|
||||||
|
|
||||||
|
assert result["workers"]["save"] is True
|
||||||
|
assert result["workers"]["delete"] is True
|
||||||
|
assert result["workers"]["retry_drain"] is True
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# index — JSON encoding
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_index_returns_valid_json(patch_connect):
|
||||||
|
api = _api()
|
||||||
|
with MagicMock() as mock_resp:
|
||||||
|
cherrypy.response = mock_resp
|
||||||
|
cherrypy.response.headers = {}
|
||||||
|
body = api.index()
|
||||||
|
|
||||||
|
data = json.loads(body)
|
||||||
|
assert data["status"] == "ok"
|
||||||
|
assert isinstance(data["zones"]["total"], int)
|
||||||
138
tests/test_utils.py
Normal file
138
tests/test_utils.py
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
"""Tests for directdnsonly.app.utils — zone index helper functions."""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from directdnsonly.app.db.models import Domain
|
||||||
|
from directdnsonly.app.utils import (
|
||||||
|
check_zone_exists,
|
||||||
|
check_parent_domain_owner,
|
||||||
|
get_domain_record,
|
||||||
|
get_parent_domain_record,
|
||||||
|
put_zone_index,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# check_zone_exists
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_check_zone_exists_not_found(patch_connect):
|
||||||
|
assert check_zone_exists("example.com") is False
|
||||||
|
|
||||||
|
|
||||||
|
def test_check_zone_exists_found(patch_connect):
|
||||||
|
patch_connect.add(
|
||||||
|
Domain(domain="example.com", hostname="da1.example.com", username="admin")
|
||||||
|
)
|
||||||
|
patch_connect.commit()
|
||||||
|
|
||||||
|
assert check_zone_exists("example.com") is True
|
||||||
|
|
||||||
|
|
||||||
|
def test_check_zone_exists_does_not_match_partial(patch_connect):
|
||||||
|
patch_connect.add(
|
||||||
|
Domain(domain="example.com", hostname="da1.example.com", username="admin")
|
||||||
|
)
|
||||||
|
patch_connect.commit()
|
||||||
|
|
||||||
|
assert check_zone_exists("sub.example.com") is False
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# put_zone_index
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_put_zone_index_adds_record(patch_connect):
|
||||||
|
put_zone_index("new.com", "da1.example.com", "admin")
|
||||||
|
|
||||||
|
record = patch_connect.query(Domain).filter_by(domain="new.com").first()
|
||||||
|
assert record is not None
|
||||||
|
assert record.hostname == "da1.example.com"
|
||||||
|
assert record.username == "admin"
|
||||||
|
|
||||||
|
|
||||||
|
def test_put_zone_index_stores_domain_name(patch_connect):
|
||||||
|
put_zone_index("another.nz", "da2.example.com", "user1")
|
||||||
|
|
||||||
|
assert check_zone_exists("another.nz") is True
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# get_domain_record
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_domain_record_returns_none_when_missing(patch_connect):
|
||||||
|
assert get_domain_record("missing.com") is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_domain_record_returns_record(patch_connect):
|
||||||
|
patch_connect.add(
|
||||||
|
Domain(domain="found.com", hostname="da1.example.com", username="admin")
|
||||||
|
)
|
||||||
|
patch_connect.commit()
|
||||||
|
|
||||||
|
record = get_domain_record("found.com")
|
||||||
|
assert record is not None
|
||||||
|
assert record.domain == "found.com"
|
||||||
|
assert record.hostname == "da1.example.com"
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# check_parent_domain_owner
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_check_parent_domain_owner_not_found(patch_connect):
|
||||||
|
assert check_parent_domain_owner("sub.example.com") is False
|
||||||
|
|
||||||
|
|
||||||
|
def test_check_parent_domain_owner_found(patch_connect):
|
||||||
|
patch_connect.add(
|
||||||
|
Domain(domain="example.com", hostname="da1.example.com", username="admin")
|
||||||
|
)
|
||||||
|
patch_connect.commit()
|
||||||
|
|
||||||
|
assert check_parent_domain_owner("sub.example.com") is True
|
||||||
|
|
||||||
|
|
||||||
|
def test_check_parent_domain_owner_single_label_returns_false(patch_connect):
|
||||||
|
# A single-label name like "com" has no parent
|
||||||
|
assert check_parent_domain_owner("com") is False
|
||||||
|
|
||||||
|
|
||||||
|
def test_check_parent_domain_owner_ignores_grandparent(patch_connect):
|
||||||
|
# Only the immediate parent is checked, not grandparents
|
||||||
|
patch_connect.add(
|
||||||
|
Domain(domain="example.com", hostname="da1.example.com", username="admin")
|
||||||
|
)
|
||||||
|
patch_connect.commit()
|
||||||
|
|
||||||
|
# deep.sub.example.com's immediate parent is sub.example.com (not in DB)
|
||||||
|
assert check_parent_domain_owner("deep.sub.example.com") is False
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# get_parent_domain_record
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_parent_domain_record_returns_none_when_missing(patch_connect):
|
||||||
|
assert get_parent_domain_record("sub.example.com") is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_parent_domain_record_returns_parent(patch_connect):
|
||||||
|
patch_connect.add(
|
||||||
|
Domain(domain="example.com", hostname="da1.example.com", username="admin")
|
||||||
|
)
|
||||||
|
patch_connect.commit()
|
||||||
|
|
||||||
|
parent = get_parent_domain_record("sub.example.com")
|
||||||
|
assert parent is not None
|
||||||
|
assert parent.domain == "example.com"
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_parent_domain_record_single_label_returns_none(patch_connect):
|
||||||
|
assert get_parent_domain_record("com") is None
|
||||||
101
tests/test_zone_parser.py
Normal file
101
tests/test_zone_parser.py
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
"""Tests for directdnsonly.app.utils.zone_parser."""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
from dns.exception import DNSException
|
||||||
|
|
||||||
|
from directdnsonly.app.utils.zone_parser import (
|
||||||
|
count_zone_records,
|
||||||
|
validate_and_normalize_zone,
|
||||||
|
)
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# Helpers
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
MINIMAL_ZONE = "example.com. 300 IN A 1.2.3.4"
|
||||||
|
|
||||||
|
FULL_ZONE = """\
|
||||||
|
$ORIGIN example.com.
|
||||||
|
$TTL 300
|
||||||
|
@ IN SOA ns1.example.com. admin.example.com. 2024010101 3600 900 604800 300
|
||||||
|
@ IN NS ns1.example.com.
|
||||||
|
@ IN A 1.2.3.4
|
||||||
|
www IN A 5.6.7.8
|
||||||
|
mail IN MX 10 mail.example.com.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# validate_and_normalize_zone
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_validate_adds_origin_when_missing():
|
||||||
|
result = validate_and_normalize_zone(MINIMAL_ZONE, "example.com")
|
||||||
|
assert "$ORIGIN example.com." in result
|
||||||
|
|
||||||
|
|
||||||
|
def test_validate_adds_ttl_when_missing():
|
||||||
|
result = validate_and_normalize_zone(MINIMAL_ZONE, "example.com")
|
||||||
|
assert "$TTL" in result
|
||||||
|
|
||||||
|
|
||||||
|
def test_validate_does_not_duplicate_origin():
|
||||||
|
zone = "$ORIGIN example.com.\nexample.com. 300 IN A 1.2.3.4"
|
||||||
|
result = validate_and_normalize_zone(zone, "example.com")
|
||||||
|
assert result.count("$ORIGIN") == 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_validate_does_not_duplicate_ttl():
|
||||||
|
zone = "$TTL 300\nexample.com. 300 IN A 1.2.3.4"
|
||||||
|
result = validate_and_normalize_zone(zone, "example.com")
|
||||||
|
assert result.count("$TTL") == 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_validate_appends_dot_to_domain():
|
||||||
|
result = validate_and_normalize_zone(MINIMAL_ZONE, "example.com")
|
||||||
|
assert "$ORIGIN example.com." in result
|
||||||
|
|
||||||
|
|
||||||
|
def test_validate_returns_string():
|
||||||
|
result = validate_and_normalize_zone(MINIMAL_ZONE, "example.com")
|
||||||
|
assert isinstance(result, str)
|
||||||
|
|
||||||
|
|
||||||
|
def test_validate_full_zone_passes():
|
||||||
|
result = validate_and_normalize_zone(FULL_ZONE, "example.com")
|
||||||
|
assert result is not None
|
||||||
|
|
||||||
|
|
||||||
|
def test_validate_raises_on_invalid_zone():
|
||||||
|
bad_zone = "this is not a zone file at all !!!"
|
||||||
|
with pytest.raises(ValueError, match="Invalid zone data"):
|
||||||
|
validate_and_normalize_zone(bad_zone, "example.com")
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
# count_zone_records
|
||||||
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
def test_count_records_simple_zone():
|
||||||
|
zone = "$ORIGIN example.com.\n$TTL 300\n@ IN A 1.2.3.4\n@ IN AAAA ::1\n"
|
||||||
|
count = count_zone_records(zone, "example.com")
|
||||||
|
assert count == 2
|
||||||
|
|
||||||
|
|
||||||
|
def test_count_records_soa_included():
|
||||||
|
count = count_zone_records(FULL_ZONE, "example.com")
|
||||||
|
# SOA + NS + A (apex) + A (www) + MX = 5
|
||||||
|
assert count == 5
|
||||||
|
|
||||||
|
|
||||||
|
def test_count_records_returns_negative_on_bad_zone():
|
||||||
|
count = count_zone_records("not a valid zone", "example.com")
|
||||||
|
assert count == -1
|
||||||
|
|
||||||
|
|
||||||
|
def test_count_records_empty_zone():
|
||||||
|
zone = "$ORIGIN example.com.\n$TTL 300\n"
|
||||||
|
count = count_zone_records(zone, "example.com")
|
||||||
|
assert count == 0
|
||||||
Reference in New Issue
Block a user