feat: migrate to Poetry and implement multi-backend DNS management

- Migrated from setuptools to Poetry; added pyproject.toml, poetry.lock,
  poetry.toml and .python-version (Python 3.11.12)
- Built out full directdnsonly Python package with BIND and CoreDNS MySQL
  backends, CherryPy REST API, persist-queue worker, and vyper-based config
- Auth credentials now read from config/env (app.auth_username/password)
  rather than hardcoded; override via DADNS_APP_AUTH_PASSWORD env var
- Added Dockerfile.deepseek: Python 3.11 slim + BIND9 + Poetry install
- Rewrote docker-compose.yml for local dev stack (MySQL + dadns services)
- Added SQL schema, docker/ BIND configs, justfile, tests, and README
- Expanded .gitignore for Poetry/Python project artifacts
This commit is contained in:
2026-02-17 16:12:46 +13:00
parent 1d1c12b661
commit 6445cf49c0
37 changed files with 3347 additions and 54 deletions

23
.gitignore vendored
View File

@@ -3,4 +3,25 @@ venv/
.venv
.idea
build
!build/.gitkeep
!build/.gitkeep
**/__pycache__/
*.pyc
*.pyo
*.pyd
*.egg-info
*.egg
*.log
*.DS_Store
*.swp
*.swo
*.bak
*.tmp
*.orig
*.coverage
*.cover
*.tox
*.dist-info
*.egg-info
*.mypy_cache
*.pytest_cache
/data/*

1
.python-version Normal file
View File

@@ -0,0 +1 @@
3.11.12

View File

@@ -1,6 +1,6 @@
FROM pypy:slim-buster
RUN mkdir -p /opt/apikeyhandler/config
RUN mkdir -p /opt/apikeyhandler/conf
VOLUME /opt/apikeyhandler/config
COPY ./src/ /opt/apikeyhandler

53
Dockerfile.deepseek Normal file
View File

@@ -0,0 +1,53 @@
FROM python:3.11.12-slim
# Install system dependencies
RUN apt-get update && apt-get install -y \
bind9 \
bind9utils \
dnsutils \
gcc \
python3-dev \
default-libmysqlclient-dev \
&& rm -rf /var/lib/apt/lists/*
# Configure BIND
RUN mkdir -p /etc/named/zones && \
chown -R bind:bind /etc/named && \
chmod 755 /etc/named/zones
COPY docker/named.conf.local /etc/bind/
COPY docker/named.conf.options /etc/bind/
RUN chown root:bind /etc/bind/named.conf.*
# Install Python dependencies
WORKDIR /app
COPY pyproject.toml poetry.lock README.md ./
# Install specific Poetry version that matches your lock file
RUN pip install "poetry==2.1.2" # Adjust version to match your lock file
# Copy application files
COPY directdnsonly ./directdnsonly
COPY config ./config
COPY schema ./schema
RUN poetry config virtualenvs.create false && \
poetry install
# Create data directories
RUN mkdir -p /app/data/queues && \
mkdir -p /app/data/zones && \
mkdir -p /app/logs && \
chmod -R 755 /app/data
# Configure BIND zone directory to match app config
#RUN ln -s /app/data/zones /etc/named/zones/dadns
# Start script
COPY docker/entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
EXPOSE 2222 53/udp
CMD ["/entrypoint.sh"]

View File

@@ -1,4 +1,4 @@
FROM python:3.7.9 as builder
FROM python:3.8 AS builder
# Allow Passing Version from CI
ARG VERSION
ENV LC_ALL=en_NZ.utf8
@@ -6,7 +6,7 @@ ENV LANG=en_NZ.utf8
ENV APP_NAME="directdnsonly"
RUN mkdir -p /tmp/build && apt-get update && \
apt-get install -y libgcc1-dbg libssl-dev
apt-get install -y libssl-dev python3-cryptography
COPY src/ /tmp/build/
COPY requirements.txt /tmp/build
@@ -29,6 +29,7 @@ RUN pip3 install -r requirements.txt && \
--hidden-import=cheroot \
--hidden-import=cheroot.ssl.pyopenssl \
--hidden-import=cheroot.ssl.builtin \
--hidden-import=lib \
--noconfirm --onefile ${APP_NAME}.py && \
cd /tmp/build/dist && \
staticx ${APP_NAME} ./${APP_NAME}_static
@@ -39,10 +40,8 @@ RUN mkdir -p /tmp/approot && \
mkdir -p /tmp/approot/etc && \
mkdir -p /tmp/approot/tmp && \
mkdir -p /tmp/approot/data && \
mkdir -p /tmp/approot/lib/x86_64-linux-gnu && \
cp /tmp/build/config/app.yml /tmp/approot/app/config/app.yml && \
cp /tmp/build/dist/${APP_NAME}_static /tmp/approot/app/${APP_NAME} && \
cp /usr/lib/gcc/x86_64-linux-gnu/8/libgcc_s.so.1 /tmp/approot/lib/x86_64-linux-gnu/libgcc_s.so.1
cp /tmp/build/dist/${APP_NAME}_static /tmp/approot/app/${APP_NAME}
FROM scratch
COPY --from=builder /tmp/approot /

40
README.md Normal file
View File

@@ -0,0 +1,40 @@
# DaDNS - DNS Management System
## Features
- Multi-backend DNS management (BIND, CoreDNS MySQL)
- Atomic zone updates
- Thread-safe operations
- Loguru-based logging
## Installation
```bash
poetry install
poetry run dadns
```
## Configuration
Edit config/app.yml for backend settings
### Config Files
#### `config/app.yml`
```yaml
timezone: Pacific/Auckland
log_level: INFO
queue_location: ./data/queues
dns:
default_backend: bind
backends:
bind:
enabled: true
zones_dir: ./data/zones
named_conf: ./data/named.conf.include
coredns_mysql:
enabled: true
host: "127.0.0.1"
port: 3306
database: "coredns"
username: "coredns"
password: "password"

1
config.json Normal file
View File

@@ -0,0 +1 @@
{}

29
config/app.yml Normal file
View File

@@ -0,0 +1,29 @@
---
timezone: Pacific/Auckland
log_level: INFO
queue_location: ./data/queues
dns:
# default_backend: coredns_mysql
backends:
bind_backend:
type: bind
enabled: false
zones_dir: /etc/named/zones/dadns
named_conf: /etc/bind/named.conf.local
coredns_primary:
enabled: true
host: "mysql" # Matches Docker service name
port: 3306
database: "coredns"
username: "coredns"
password: "coredns123"
table_name: "records"
coredns_secondary:
enabled: false
host: "mysql" # Matches Docker service name
port: 3306
database: "coredns"
username: "coredns"
password: "coredns123"
table_name: "records"

View File

@@ -0,0 +1 @@
# Package initialization

View File

@@ -0,0 +1,18 @@
from loguru import logger
import sys
from config import config
def configure_logging():
logger.remove()
logger.add(
sys.stderr,
level=config.get("log_level"),
format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>",
)
logger.add(
"logs/directdnsonly_{time}.log",
rotation="10 MB",
retention="30 days",
level="DEBUG",
)

View File

@@ -0,0 +1 @@
# Package initialization

View File

@@ -0,0 +1,134 @@
import cherrypy
from urllib.parse import urlencode, parse_qs
from loguru import logger
from directdnsonly.app.utils.zone_parser import validate_and_normalize_zone
class DNSAdminAPI:
def __init__(self, save_queue, delete_queue, backend_registry):
self.save_queue = save_queue
self.delete_queue = delete_queue
self.backend_registry = backend_registry
@cherrypy.expose
def index(self):
return "DNS Admin API - Available endpoints: /CMD_API_DNS_ADMIN"
@cherrypy.expose
def CMD_API_DNS_ADMIN(self, **params):
"""Handle both DirectAdmin-style API calls and raw zone file uploads"""
try:
if cherrypy.request.method != "POST":
cherrypy.response.status = 405
return urlencode({"error": 1, "text": "Method not allowed"})
# Parse parameters from both query string and body
body_params = {}
if cherrypy.request.body:
content_type = cherrypy.request.headers.get("Content-Type", "")
if "application/x-www-form-urlencoded" in content_type:
raw_body = cherrypy.request.body.read()
if raw_body:
body_params = parse_qs(raw_body.decode("utf-8"))
body_params = {
k: v[0] if len(v) == 1 else v
for k, v in body_params.items()
}
elif "text/plain" in content_type:
body_params = {
"zone_file": cherrypy.request.body.read().decode("utf-8")
}
# Combine parameters (body overrides query)
all_params = {**params, **body_params}
logger.debug(f"Request parameters: {all_params}")
if "zone_file" not in all_params:
logger.debug(
"No zone file provided. Maybe in body as DirectAdmin does?"
)
# Grab from body
all_params["zone_file"] = str(cherrypy.request.body.read(), "utf-8")
logger.debug("Read zone file from body :)")
# Required parameters
action = all_params.get("action")
domain = all_params.get("domain")
if not action:
# DirectAdmin sends an initial request without an action
# parameter as a connectivity check — respond with success
logger.debug("Received request with no action — connectivity check")
return urlencode({"error": 0, "text": "OK"})
if not domain:
raise ValueError("Missing 'domain' parameter")
# Handle different actions
if action == "rawsave":
return self._handle_rawsave(domain, all_params)
elif action == "delete":
return self._handle_delete(domain, all_params)
else:
raise ValueError(f"Unsupported action: {action}")
except Exception as e:
logger.error(f"API error: {str(e)}")
cherrypy.response.status = 400
return urlencode({"error": 1, "text": str(e)})
def _handle_rawsave(self, domain: str, params: dict):
"""Process zone file saves"""
zone_data = params.get("zone_file")
if not zone_data:
raise ValueError("Missing zone file content")
normalized_zone = validate_and_normalize_zone(zone_data, domain)
logger.info(f"Validated zone for {domain}")
self.save_queue.put(
{
"domain": domain,
"zone_file": normalized_zone,
"hostname": params.get("hostname", ""),
"username": params.get("username", ""),
"client_ip": cherrypy.request.remote.ip,
}
)
logger.success(f"Queued zone update for {domain}")
return urlencode({"error": 0})
def _handle_delete(self, domain: str, params: dict):
"""Process zone deletions"""
self.delete_queue.put(
{
"domain": domain,
"hostname": params.get("hostname", ""),
"username": params.get("username", ""),
"client_ip": cherrypy.request.remote.ip,
}
)
logger.success(f"Queued deletion for {domain}")
return urlencode({"error": 0})
@cherrypy.expose
def queue_status(self):
"""Debug endpoint for queue monitoring"""
return {
"save_queue_size": self.save_queue.qsize(),
"delete_queue_size": self.delete_queue.qsize(),
"last_save_item": self._get_last_item(self.save_queue),
"last_delete_item": self._get_last_item(self.delete_queue),
}
@staticmethod
def _get_last_item(queue):
"""Helper to safely get last queue item"""
try:
if hasattr(queue, "last_item"):
return queue.last_item
return "Last item tracking not available"
except Exception:
return "Error retrieving last item"

View File

@@ -0,0 +1,24 @@
import cherrypy
from loguru import logger
class HealthAPI:
def __init__(self, backend_registry):
self.registry = backend_registry
@cherrypy.expose
def health(self):
status = {"status": "OK", "backends": []}
for name, backend in self.registry.get_available_backends().items():
status["backends"].append(
{
"name": name,
"status": (
"active" if backend().zone_exists("test") else "unavailable"
),
}
)
logger.debug("Health check performed")
return status

View File

@@ -0,0 +1,89 @@
from typing import Dict, Type, Optional
from .base import DNSBackend
from .bind import BINDBackend
from .coredns_mysql import CoreDNSMySQLBackend
from directdnsonly.config import config
from loguru import logger
class BackendRegistry:
def __init__(self):
self._backend_types = {
"bind": BINDBackend,
"coredns_mysql": CoreDNSMySQLBackend,
}
self._backend_instances: Dict[str, DNSBackend] = {}
self._initialized = False
def _initialize_backends(self):
"""Initialize and cache all enabled backend instances"""
if self._initialized:
return
try:
logger.debug("Attempting to load backend configurations")
backend_configs = config.get("dns")
if not backend_configs:
logger.warning("No 'dns' configuration found")
self._initialized = True
return
backend_configs = backend_configs.get("backends")
if not backend_configs:
logger.warning("No 'dns.backends' configuration found")
self._initialized = True
return
logger.debug(f"Found backend configs: {backend_configs}")
for instance_name, instance_config in backend_configs.items():
logger.debug(f"Processing backend instance: {instance_name}")
backend_type = instance_config.get("type")
if not backend_type:
logger.warning(
f"No type specified for backend instance: {instance_name}"
)
continue
if backend_type not in self._backend_types:
logger.warning(
f"Unknown backend type '{backend_type}' for instance: {instance_name}"
)
continue
backend_class = self._backend_types[backend_type]
if not backend_class.is_available():
logger.warning(
f"Backend {backend_type} is not available for instance: {instance_name}"
)
continue
enabled = instance_config.get("enabled", False)
if not enabled:
logger.debug(f"Backend instance {instance_name} is disabled")
continue
logger.debug(
f"Initializing backend instance {instance_name} of type {backend_type}"
)
try:
backend = backend_class(instance_config)
self._backend_instances[instance_name] = backend
logger.info(
f"Successfully initialized backend instance: {instance_name}"
)
except Exception as e:
logger.error(
f"Failed to initialize backend instance {instance_name}: {e}"
)
except Exception as e:
logger.error(f"Error loading backend configurations: {e}")
self._initialized = True
def get_available_backends(self) -> Dict[str, DNSBackend]:
"""Return cached backend instances, initializing on first call"""
self._initialize_backends()
return self._backend_instances

View File

@@ -0,0 +1,75 @@
from abc import ABC, abstractmethod
from typing import List, Optional, Dict, Any, Tuple
class DNSBackend(ABC):
def __init__(self, config: Dict[str, Any]):
self.config = config
self.instance_name = config.get("instance_name", self.get_name())
@classmethod
@abstractmethod
def get_name(cls) -> str:
"""Return the backend type name"""
pass
@property
def instance_id(self) -> str:
"""Return the unique instance identifier"""
return self.instance_name
@classmethod
@abstractmethod
def is_available(cls) -> bool:
pass
@abstractmethod
def write_zone(self, zone_name: str, zone_data: str) -> bool:
pass
@abstractmethod
def delete_zone(self, zone_name: str) -> bool:
pass
@abstractmethod
def reload_zone(self, zone_name: Optional[str] = None) -> bool:
pass
@abstractmethod
def zone_exists(self, zone_name: str) -> bool:
pass
def verify_zone_record_count(
self, zone_name: str, expected_count: int
) -> Tuple[bool, int]:
"""Verify the record count in this backend matches the expected count
from the source zone file.
Args:
zone_name: The zone to verify
expected_count: The number of records parsed from the source zone
Returns:
Tuple of (matches: bool, actual_count: int)
"""
raise NotImplementedError(
f"Backend {self.get_name()} does not implement record count verification"
)
def reconcile_zone_records(
self, zone_name: str, zone_data: str
) -> Tuple[bool, int]:
"""Reconcile backend records against the authoritative BIND zone from
DirectAdmin. Any records in the backend that are not present in the
source zone will be removed.
Args:
zone_name: The zone to reconcile
zone_data: The raw BIND zone file content (authoritative source)
Returns:
Tuple of (success: bool, records_removed: int)
"""
raise NotImplementedError(
f"Backend {self.get_name()} does not implement zone reconciliation"
)

View File

@@ -0,0 +1,124 @@
import os
import subprocess
from loguru import logger
from pathlib import Path
from typing import Dict, List, Optional
from .base import DNSBackend
class BINDBackend(DNSBackend):
@classmethod
def get_name(cls) -> str:
return "bind"
@classmethod
def is_available(cls) -> bool:
try:
result = subprocess.run(["named", "-v"], capture_output=True, text=True)
if result.returncode == 0:
logger.info(f"BIND available: {result.stdout.splitlines()[0]}")
return True
return False
except FileNotFoundError:
logger.warning("BIND/named not found in PATH")
return False
def __init__(self, config: Dict):
self.zones_dir = Path(config["zones_dir"])
self.named_conf = Path(config["named_conf"])
# Safe directory creation handling
try:
# Check if it's a symlink first
if self.zones_dir.is_symlink():
logger.debug(f"{self.zones_dir} is already a symlink")
elif not self.zones_dir.exists():
self.zones_dir.mkdir(parents=True, mode=0o755)
logger.debug(f"Created zones directory: {self.zones_dir}")
else:
logger.debug(f"Directory already exists: {self.zones_dir}")
# Ensure proper permissions
os.chmod(self.zones_dir, 0o755)
logger.debug(f"Using zones directory: {self.zones_dir}")
except FileExistsError:
logger.debug(f"Directory already exists (safe to ignore): {self.zones_dir}")
except Exception as e:
logger.error(f"Failed to setup zones directory: {e}")
raise
# Verify named.conf exists
if not self.named_conf.exists():
logger.warning(f"named.conf not found at {self.named_conf}")
self.named_conf.touch()
logger.info(f"Created empty named.conf at {self.named_conf}")
logger.success(f"BIND backend initialized for {self.zones_dir}")
def write_zone(self, zone_name: str, zone_data: str) -> bool:
zone_file = self.zones_dir / f"{zone_name}.db"
try:
with open(zone_file, "w") as f:
f.write(zone_data)
logger.debug(f"Wrote zone file: {zone_file}")
return True
except IOError as e:
logger.error(f"Failed to write zone file {zone_file}: {e}")
return False
def delete_zone(self, zone_name: str) -> bool:
zone_file = self.zones_dir / f"{zone_name}.db"
try:
if zone_file.exists():
zone_file.unlink()
logger.debug(f"Deleted zone file: {zone_file}")
return True
logger.warning(f"Zone file not found: {zone_file}")
return False
except IOError as e:
logger.error(f"Failed to delete zone file {zone_file}: {e}")
return False
def reload_zone(self, zone_name: Optional[str] = None) -> bool:
try:
if zone_name:
cmd = ["rndc", "reload", zone_name]
logger.debug(f"Reloading single zone: {zone_name}")
else:
cmd = ["rndc", "reload"]
logger.debug("Reloading all zones")
result = subprocess.run(
cmd,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
)
logger.debug(f"BIND reload successful: {result.stdout}")
return True
except subprocess.CalledProcessError as e:
logger.error(f"BIND reload failed: {e.stderr}")
return False
except Exception as e:
logger.error(f"Unexpected error during BIND reload: {e}")
return False
def zone_exists(self, zone_name: str) -> bool:
zone_file = self.zones_dir / f"{zone_name}.db"
exists = zone_file.exists()
logger.debug(f"Zone existence check for {zone_name}: {exists}")
return exists
def update_named_conf(self, zones: List[str]) -> bool:
try:
with open(self.named_conf, "w") as f:
for zone in zones:
zone_file = self.zones_dir / f"{zone}.db"
f.write(f'zone "{zone}" {{ type master; file "{zone_file}"; }};\n')
logger.debug(f"Updated named.conf: {self.named_conf}")
return True
except IOError as e:
logger.error(f"Failed to update named.conf: {e}")
return False

View File

@@ -0,0 +1,460 @@
from typing import Optional, Dict, Set, Tuple, Any
from sqlalchemy import create_engine, Column, String, Integer, Text, ForeignKey, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session, relationship
from dns import zone as dns_zone_module
from dns.rdataclass import IN
from loguru import logger
from .base import DNSBackend
from config import config
Base = declarative_base()
class Zone(Base):
__tablename__ = "zones"
id = Column(Integer, primary_key=True)
zone_name = Column(String(255), nullable=False, index=True, unique=True)
class Record(Base):
__tablename__ = "records"
id = Column(Integer, primary_key=True)
zone_id = Column(Integer, ForeignKey("zones.id"), nullable=False)
hostname = Column(String(255), nullable=False, index=True)
type = Column(String(10), nullable=False)
data = Column(Text, nullable=False)
ttl = Column(Integer, nullable=True)
online = Column(Boolean, nullable=False, default=False)
zone = relationship("Zone", backref="records")
class CoreDNSMySQLBackend(DNSBackend):
def __init__(self, config: Dict[str, Any]):
super().__init__(config)
self.host = config.get("host", "localhost")
self.port = config.get("port", 3306)
self.database = config.get("database", "coredns")
self.username = config.get("username")
self.password = config.get("password")
self.engine = create_engine(
f"mysql+pymysql://{self.username}:{self.password}@"
f"{self.host}:{self.port}/{self.database}",
pool_pre_ping=True,
pool_size=5,
max_overflow=10,
)
self.Session = scoped_session(sessionmaker(bind=self.engine))
Base.metadata.create_all(self.engine)
logger.info(
f"Initialized CoreDNS MySQL backend '{self.instance_name}' "
f"for {self.database}@{self.host}:{self.port}"
)
@staticmethod
def dot_fqdn(zone_name):
return f"{zone_name}." if not zone_name.endswith(".") else zone_name
@classmethod
def get_name(cls) -> str:
return "coredns_mysql"
@classmethod
def is_available(cls) -> bool:
try:
import pymysql
return True
except ImportError:
logger.warning("PyMySQL not available - CoreDNS MySQL backend disabled")
return False
def write_zone(self, zone_name: str, zone_data: str) -> bool:
session = self.Session()
try:
# Ensure zone exists
zone = self._ensure_zone_exists(session, zone_name)
# Get existing records for this zone but track SOA records separately
existing_records = {}
existing_soa = None
for r in session.query(Record).filter_by(zone_id=zone.id).all():
if r.type == "SOA":
existing_soa = r
else:
existing_records[(r.hostname, r.type, r.data)] = r
# Parse the zone data into a normalised record set
source_records, source_soa = self._parse_zone_to_record_set(
zone_name, zone_data
)
# Track changes
current_records = set()
changes = {"added": 0, "updated": 0, "removed": 0}
# Handle SOA record
if source_soa:
soa_name, soa_content, soa_ttl = source_soa
soa_parts = soa_content.split()
if len(soa_parts) == 7:
if existing_soa:
existing_soa.data = soa_content
existing_soa.ttl = soa_ttl
existing_soa.online = True
changes["updated"] += 1
logger.debug(
f"Updated SOA record: {soa_name} SOA {soa_content}"
)
else:
existing_soa = Record(
zone_id=zone.id,
hostname=soa_name,
type="SOA",
data=soa_content,
ttl=soa_ttl,
online=True,
)
session.add(existing_soa)
changes["added"] += 1
logger.debug(
f"Added SOA record: {soa_name} SOA {soa_content}"
)
# Process all non-SOA records
for record_name, record_type, record_content, record_ttl in source_records:
key = (record_name, record_type, record_content)
current_records.add(key)
if key in existing_records:
# Update existing record if TTL changed
record = existing_records[key]
if record.ttl != record_ttl:
record.ttl = record_ttl
record.online = True
changes["updated"] += 1
logger.debug(
f"Updated TTL for record: {record_name} {record_type} {record_content}"
)
else:
# Add new record
new_record = Record(
zone_id=zone.id,
hostname=record_name,
type=record_type,
data=record_content,
ttl=record_ttl,
online=True,
)
session.add(new_record)
changes["added"] += 1
logger.debug(
f"Added new record: {record_name} {record_type} {record_content}"
)
# Remove records that no longer exist in the source zone
for key, record in existing_records.items():
if key not in current_records:
logger.debug(
f"Removed record: {record.hostname} {record.type} {record.data}"
)
session.delete(record)
changes["removed"] += 1
# Handle SOA removal if needed
if existing_soa and not source_soa:
logger.debug(
f"Removed SOA record: {existing_soa.hostname} SOA {existing_soa.data}"
)
session.delete(existing_soa)
changes["removed"] += 1
session.commit()
total_changes = changes['added'] + changes['updated'] + changes['removed']
if total_changes > 0:
logger.info(
f"[{self.instance_name}] Zone {zone_name} updated: "
f"{changes['added']} added, {changes['updated']} updated, "
f"{changes['removed']} removed"
)
else:
logger.debug(
f"[{self.instance_name}] Zone {zone_name}: no changes"
)
return True
except Exception as e:
logger.error(f"Error writing zone {zone_name}: {e}")
session.rollback()
return False
finally:
session.close()
def delete_zone(self, zone_name: str) -> bool:
session = self.Session()
try:
# First find the zone
zone = session.query(Zone).filter_by(name=zone_name).first()
if not zone:
logger.warning(f"Zone {zone_name} not found for deletion")
return False
# Delete all records associated with the zone
count = session.query(Record).filter_by(zone_id=zone.id).delete()
# Delete the zone itself
session.delete(zone)
session.commit()
logger.info(f"Deleted zone {zone_name} with {count} records")
return True
except Exception as e:
session.rollback()
logger.error(f"Zone deletion failed for {zone_name}: {e}")
return False
finally:
session.close()
def reload_zone(self, zone_name: Optional[str] = None) -> bool:
# In coredns_mysql_extend, the core plugin handles reloading automatically
# when database changes are detected, so we just log the request
if zone_name:
logger.debug(f"CoreDNS reload triggered for zone {zone_name}")
else:
logger.debug("CoreDNS reload triggered for all zones")
return True
def zone_exists(self, zone_name: str) -> bool:
session = self.Session()
try:
exists = (
session.query(Zone).filter_by(name=self.dot_fqdn(zone_name)).first()
is not None
)
logger.debug(f"Zone existence check for {zone_name}: {exists}")
return exists
except Exception as e:
logger.error(f"Zone existence check failed for {zone_name}: {e}")
return False
finally:
session.close()
def _ensure_zone_exists(self, session, zone_name: str) -> Zone:
"""Ensure a zone exists in the database, creating it if necessary"""
zone = session.query(Zone).filter_by(zone_name=self.dot_fqdn(zone_name)).first()
if not zone:
logger.debug(f"Creating new zone: {self.dot_fqdn(zone_name)}")
zone = Zone(zone_name=self.dot_fqdn(zone_name))
session.add(zone)
session.flush() # Get the zone ID
return zone
def _normalize_cname_data(self, zone_name: str, record_content: str) -> str:
"""Normalize CNAME record data to ensure consistent FQDN format.
This ensures CNAME targets are always stored as fully-qualified domain
names so that record comparison between the BIND zone source and the
database is deterministic.
Args:
zone_name: The zone name for relative-name expansion
record_content: The raw CNAME target from the parsed zone
Returns:
The normalized CNAME target string
"""
if record_content.startswith("@"):
logger.debug(
f"CNAME target starts with '@', replacing with zone FQDN"
)
record_content = self.dot_fqdn(zone_name)
elif not record_content.endswith("."):
logger.debug(
f"CNAME target {record_content} is relative, appending zone"
)
record_content = ".".join(
[record_content, self.dot_fqdn(zone_name)]
)
return record_content
def _parse_zone_to_record_set(
self, zone_name: str, zone_data: str
) -> Tuple[Set[Tuple[str, str, str, int]], Optional[Tuple[str, str, int]]]:
"""Parse a BIND zone file into a set of normalised record keys.
Returns:
Tuple of:
- set of (hostname, type, data, ttl) tuples for non-SOA records
- (hostname, soa_data, ttl) tuple for the SOA record, or None
"""
dns_zone = dns_zone_module.from_text(zone_data, check_origin=False)
records: Set[Tuple[str, str, str, int]] = set()
soa = None
for name, ttl, rdata in dns_zone.iterate_rdatas():
if rdata.rdclass != IN:
continue
record_name = str(name)
record_type = rdata.rdtype.name
record_content = rdata.to_text()
if record_type == "SOA":
soa = (record_name, record_content, ttl)
continue
if record_type == "CNAME":
record_content = self._normalize_cname_data(
zone_name, record_content
)
records.add((record_name, record_type, record_content, ttl))
return records, soa
def verify_zone_record_count(
self, zone_name: str, expected_count: int
) -> tuple[bool, int]:
"""Verify the record count in this backend matches the expected count
from the source (DirectAdmin) zone file.
Args:
zone_name: The zone to verify
expected_count: The number of records parsed from the source BIND zone
Returns:
Tuple of (matches: bool, actual_count: int)
"""
session = self.Session()
try:
zone = (
session.query(Zone)
.filter_by(zone_name=self.dot_fqdn(zone_name))
.first()
)
if not zone:
logger.warning(
f"[{self.instance_name}] Zone {zone_name} not found "
f"during record count verification"
)
return False, 0
actual_count = (
session.query(Record).filter_by(zone_id=zone.id).count()
)
matches = actual_count == expected_count
if not matches:
logger.warning(
f"[{self.instance_name}] Record count mismatch for "
f"{zone_name}: source zone has {expected_count} records, "
f"backend has {actual_count} records "
f"(difference: {actual_count - expected_count:+d})"
)
else:
logger.debug(
f"[{self.instance_name}] Record count verified for "
f"{zone_name}: {actual_count} records match source"
)
return matches, actual_count
except Exception as e:
logger.error(
f"[{self.instance_name}] Error verifying record count "
f"for {zone_name}: {e}"
)
return False, -1
finally:
session.close()
def reconcile_zone_records(
self, zone_name: str, zone_data: str
) -> Tuple[bool, int]:
"""Reconcile backend records against the authoritative BIND zone from
DirectAdmin. Any records in the backend that are **not** present in
the source zone will be deleted.
This is the post-write safety net: even though ``write_zone`` already
removes stale records during normal processing, this method catches
any extras that may have crept in via race conditions, manual edits,
or replication drift between MySQL nodes.
Args:
zone_name: The zone to reconcile
zone_data: The raw BIND zone file content (authoritative source)
Returns:
Tuple of (success: bool, records_removed: int)
"""
session = self.Session()
try:
zone = (
session.query(Zone)
.filter_by(zone_name=self.dot_fqdn(zone_name))
.first()
)
if not zone:
logger.warning(
f"[{self.instance_name}] Zone {zone_name} not found "
f"during reconciliation"
)
return False, 0
# Build the expected record set from the source BIND zone
source_records, source_soa = self._parse_zone_to_record_set(
zone_name, zone_data
)
# Build lookup keys (without TTL) matching write_zone's key format
expected_keys: Set[Tuple[str, str, str]] = {
(hostname, rtype, data)
for hostname, rtype, data, _ in source_records
}
# Query all records currently in the backend for this zone
db_records = (
session.query(Record).filter_by(zone_id=zone.id).all()
)
removed = 0
for record in db_records:
# SOA records are managed separately skip them
if record.type == "SOA":
continue
key = (record.hostname, record.type, record.data)
if key not in expected_keys:
logger.debug(
f"[{self.instance_name}] Reconcile: removing extra "
f"record from {zone_name}: "
f"{record.hostname} {record.type} {record.data}"
)
session.delete(record)
removed += 1
if removed > 0:
session.commit()
logger.info(
f"[{self.instance_name}] Reconciliation for {zone_name}: "
f"removed {removed} extra record(s) not in source zone"
)
else:
logger.debug(
f"[{self.instance_name}] Reconciliation for {zone_name}: "
f"all records match source zone — no action needed"
)
return True, removed
except Exception as e:
logger.error(
f"[{self.instance_name}] Error reconciling records "
f"for {zone_name}: {e}"
)
session.rollback()
return False, 0
finally:
session.close()

View File

@@ -0,0 +1,332 @@
from typing import Optional, Dict, Set, Tuple, List
from sqlalchemy import (
create_engine,
Column,
String,
Integer,
Text,
Boolean,
DateTime,
func,
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
from loguru import logger
from .base import DNSBackend
from config import config
import time
Base = declarative_base()
class Domain(Base):
__tablename__ = "domains"
id = Column(Integer, primary_key=True)
name = Column(String(255), nullable=False, index=True, unique=True)
master = Column(String(128), nullable=True)
last_check = Column(Integer, nullable=True)
type = Column(String(6), nullable=False, default="NATIVE")
notified_serial = Column(Integer, nullable=True)
account = Column(String(40), nullable=True)
class Record(Base):
__tablename__ = "records"
id = Column(Integer, primary_key=True)
domain_id = Column(Integer, nullable=False, index=True)
name = Column(String(255), nullable=False, index=True)
type = Column(String(10), nullable=False)
content = Column(Text, nullable=False)
ttl = Column(Integer, nullable=True)
prio = Column(Integer, nullable=True)
change_date = Column(Integer, nullable=True)
disabled = Column(Boolean, nullable=False, default=False)
ordername = Column(String(255), nullable=True)
auth = Column(Boolean, nullable=False, default=True)
class PowerDNSMySQLBackend(DNSBackend):
@classmethod
def get_name(cls) -> str:
return "powerdns_mysql"
@classmethod
def is_available(cls) -> bool:
try:
import pymysql
return True
except ImportError:
logger.warning("PyMySQL not available - PowerDNS MySQL backend disabled")
return False
@staticmethod
def ensure_fqdn(name: str, zone_name: str) -> str:
"""Ensure name is fully qualified for PowerDNS"""
if name == "@" or name == "":
return zone_name
elif name.endswith("."):
return name.rstrip(".")
elif name == zone_name:
return name
else:
return f"{name}.{zone_name}"
def __init__(self, config: dict = None):
c = config or config.get("dns.backends.powerdns_mysql")
self.engine = create_engine(
f"mysql+pymysql://{c['username']}:{c['password']}@"
f"{c['host']}:{c['port']}/{c['database']}",
pool_pre_ping=True,
)
self.Session = scoped_session(sessionmaker(bind=self.engine))
Base.metadata.create_all(self.engine)
logger.info(f"Initialized PowerDNS MySQL backend for {c['database']}")
def _ensure_domain_exists(self, session, zone_name: str) -> Domain:
"""Ensure domain exists and return domain object"""
domain = session.query(Domain).filter_by(name=zone_name).first()
if not domain:
domain = Domain(name=zone_name, type="NATIVE")
session.add(domain)
session.flush() # Flush to get the domain ID
logger.info(f"Created new domain: {zone_name}")
return domain
def _parse_soa_content(self, soa_content: str) -> Dict[str, str]:
"""Parse SOA record content into components"""
parts = soa_content.split()
if len(parts) >= 7:
return {
"primary_ns": parts[0],
"hostmaster": parts[1],
"serial": parts[2],
"refresh": parts[3],
"retry": parts[4],
"expire": parts[5],
"minimum": parts[6],
}
return {}
def write_zone(self, zone_name: str, zone_data: str) -> bool:
from dns import zone as dns_zone_module
from dns.rdataclass import IN
session = self.Session()
try:
# Ensure domain exists
domain = self._ensure_domain_exists(session, zone_name)
# Get existing records for this domain
existing_records = {
(r.name, r.type): r
for r in session.query(Record).filter_by(domain_id=domain.id).all()
}
# Parse the zone data
dns_zone = dns_zone_module.from_text(zone_data, check_origin=False)
# Track records we process
current_records: Set[Tuple[str, str]] = set()
changes = {"added": 0, "updated": 0, "removed": 0}
current_time = int(time.time())
# Process all records
for name, ttl, rdata in dns_zone.iterate_rdatas():
if rdata.rdclass != IN:
continue
record_name = self.ensure_fqdn(str(name), zone_name)
record_type = rdata.rdtype.name
record_content = rdata.to_text()
record_ttl = ttl
record_prio = None
# Handle MX records priority
if record_type == "MX":
parts = record_content.split(" ", 1)
if len(parts) == 2:
record_prio = int(parts[0])
record_content = parts[1]
# Handle SRV records priority and other fields
elif record_type == "SRV":
parts = record_content.split(" ", 3)
if len(parts) == 4:
record_prio = int(parts[0])
record_content = f"{parts[1]} {parts[2]} {parts[3]}"
# Ensure CNAME and other records have proper FQDN format
if record_type in ["CNAME", "MX", "NS"]:
if not record_content.endswith(".") and record_content != "@":
if record_content == "@":
record_content = zone_name
elif "." not in record_content:
record_content = f"{record_content}.{zone_name}"
key = (record_name, record_type)
current_records.add(key)
if key in existing_records:
# Update existing record if needed
record = existing_records[key]
if (
record.content != record_content
or record.ttl != record_ttl
or record.prio != record_prio
):
record.content = record_content
record.ttl = record_ttl
record.prio = record_prio
record.change_date = current_time
record.disabled = False
changes["updated"] += 1
else:
# Add new record
new_record = Record(
domain_id=domain.id,
name=record_name,
type=record_type,
content=record_content,
ttl=record_ttl,
prio=record_prio,
change_date=current_time,
disabled=False,
auth=True,
)
session.add(new_record)
changes["added"] += 1
# Remove deleted records
for key in set(existing_records.keys()) - current_records:
session.delete(existing_records[key])
changes["removed"] += 1
session.commit()
logger.success(
f"Zone {zone_name} updated: "
f"+{changes['added']} ~{changes['updated']} -{changes['removed']}"
)
return True
except Exception as e:
session.rollback()
logger.error(f"Zone update failed for {zone_name}: {e}")
return False
finally:
session.close()
def delete_zone(self, zone_name: str) -> bool:
session = self.Session()
try:
# First find the domain
domain = session.query(Domain).filter_by(name=zone_name).first()
if not domain:
logger.warning(f"Domain {zone_name} not found for deletion")
return False
# Delete all records associated with the domain
count = session.query(Record).filter_by(domain_id=domain.id).delete()
# Delete the domain itself
session.delete(domain)
session.commit()
logger.info(f"Deleted domain {zone_name} with {count} records")
return True
except Exception as e:
session.rollback()
logger.error(f"Domain deletion failed for {zone_name}: {e}")
return False
finally:
session.close()
def reload_zone(self, zone_name: Optional[str] = None) -> bool:
"""PowerDNS reload - could trigger pdns_control reload if needed"""
if zone_name:
logger.debug(f"PowerDNS reload triggered for zone {zone_name}")
# Optional: Call pdns_control reload-zones here if needed
# subprocess.run(['pdns_control', 'reload-zones'], check=True)
else:
logger.debug("PowerDNS reload triggered for all zones")
# Optional: Call pdns_control reload here if needed
# subprocess.run(['pdns_control', 'reload'], check=True)
return True
def zone_exists(self, zone_name: str) -> bool:
session = self.Session()
try:
exists = session.query(Domain).filter_by(name=zone_name).first() is not None
logger.debug(f"Zone existence check for {zone_name}: {exists}")
return exists
except Exception as e:
logger.error(f"Zone existence check failed for {zone_name}: {e}")
return False
finally:
session.close()
def get_zone_records(self, zone_name: str) -> List[Dict]:
"""Get all records for a zone - useful for debugging/inspection"""
session = self.Session()
try:
domain = session.query(Domain).filter_by(name=zone_name).first()
if not domain:
return []
records = session.query(Record).filter_by(domain_id=domain.id).all()
return [
{
"name": r.name,
"type": r.type,
"content": r.content,
"ttl": r.ttl,
"prio": r.prio,
"disabled": r.disabled,
}
for r in records
]
except Exception as e:
logger.error(f"Failed to get records for {zone_name}: {e}")
return []
finally:
session.close()
def set_record_status(
self, zone_name: str, record_name: str, record_type: str, disabled: bool
) -> bool:
"""Enable/disable specific records"""
session = self.Session()
try:
domain = session.query(Domain).filter_by(name=zone_name).first()
if not domain:
logger.warning(f"Domain {zone_name} not found")
return False
full_name = self.ensure_fqdn(record_name, zone_name)
record = (
session.query(Record)
.filter_by(domain_id=domain.id, name=full_name, type=record_type)
.first()
)
if not record:
logger.warning(
f"Record {full_name} {record_type} not found in {zone_name}"
)
return False
record.disabled = disabled
record.change_date = int(time.time())
session.commit()
status = "disabled" if disabled else "enabled"
logger.info(f"Record {full_name} {record_type} {status} in {zone_name}")
return True
except Exception as e:
session.rollback()
logger.error(f"Failed to set record status: {e}")
return False
finally:
session.close()

View File

@@ -0,0 +1,55 @@
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from vyper import v
import datetime
Base = declarative_base()
def connect(dbtype="sqlite", **kwargs):
if dbtype == "sqlite":
# Start SQLite engine
db_location = v.get("datastore.db_location")
if db_location == -1:
raise Exception("DB Type is sqlite but db_location is not defined")
else:
engine = create_engine(
"sqlite:///" + db_location, connect_args={"check_same_thread": False}
)
Base.metadata.create_all(engine)
return sessionmaker(bind=engine)()
elif dbtype == "mysql":
# Start a MySQL engine
db_user = v.get_string("datastore.user")
db_host = v.get_string("datastore.host")
db_name = v.get_string("datastore.name")
db_pass = v.get_string("datastore.pass")
db_port = v.get_string("datastore.port")
if (
not v.is_set("datastore.user")
or not v.is_set("datastore.name")
or not v.is_set("datastore.pass")
or not v.is_set("datastore.host")
):
raise Exception(
"DB Type is mysql but db_(host,name,and pass) are not populated"
)
else:
engine = create_engine(
"mysql+pymysql://"
+ db_user
+ ":"
+ db_pass
+ "@"
+ db_host
+ ":"
+ db_port
+ "/"
+ db_name
)
Base.metadata.create_all(engine)
return sessionmaker(bind=engine)()
else:
raise Exception("Unknown/unimplemented database type: {}".format(dbtype))

View File

@@ -0,0 +1,35 @@
from directdnsonly.app.db import Base
from sqlalchemy import Column, Integer, String, DateTime
class Key(Base):
__tablename__ = "keys"
id = Column(Integer, primary_key=True)
key = Column(String(255), unique=True)
name = Column(String(255))
expires = Column(DateTime)
service = Column(String(255))
def __repr__(self):
return "<Key(key='%s', name='%s', expires='%s', service='%s')>" % (
self.key,
self.name,
self.expires,
self.service,
)
class Domain(Base):
__tablename__ = "domains"
id = Column(Integer, primary_key=True)
domain = Column(String(255), unique=True)
hostname = Column(String(255))
username = Column(String(255))
def __repr__(self):
return "<Domain(id='%s', domain='%s', hostname='%s', username='%s')>" % (
self.id,
self.domain,
self.hostname,
self.username,
)

View File

@@ -0,0 +1,25 @@
from loguru import logger
from directdnsonly.app.db.models import *
from directdnsonly.app.db import connect
def check_zone_exists(zone_name):
# Check if zone is present in the index
session = connect()
logger.debug("Checking if {} is present in the DB".format(zone_name))
domain_exists = bool(session.query(Domain.id).filter_by(domain=zone_name).first())
logger.debug("Returned from query: {}".format(domain_exists))
if domain_exists:
return True
else:
return False
def put_zone_index(zone_name, host_name, user_name):
# add a new zone to index
session = connect()
logger.debug("Placed zone into database.. {}".format(str(zone_name)))
domain = Domain(domain=zone_name, hostname=host_name, username=user_name)
session.add(domain)
session.commit()

View File

@@ -0,0 +1,69 @@
from dns import zone, name
from dns.rdataclass import IN
from dns.exception import DNSException
from loguru import logger
def validate_and_normalize_zone(zone_data: str, domain_name: str) -> str:
"""
Normalize zone file content and ensure proper origin handling
Returns normalized zone data
Raises DNSException on validation failure
"""
# Ensure domain ends with dot
if not domain_name.endswith("."):
domain_name = f"{domain_name}."
# Add $ORIGIN if missing
if "$ORIGIN" not in zone_data:
zone_data = f"$ORIGIN {domain_name}\n{zone_data}"
# Add $TTL if missing
if "$TTL" not in zone_data:
zone_data = f"$TTL 300\n{zone_data}"
# Validate the zone
try:
zone.from_text(
zone_data, origin=name.from_text(domain_name), check_origin=False
)
return zone_data
except DNSException as e:
logger.error(f"Zone validation failed: {e}")
raise ValueError(f"Invalid zone data: {str(e)}")
def count_zone_records(zone_data: str, domain_name: str) -> int:
"""Count the number of individual DNS records in a parsed BIND zone file.
This counts every individual resource record (each A, AAAA, MX, TXT, etc.)
the same way the CoreDNS MySQL backend stores them — one row per record.
Args:
zone_data: The raw or normalized BIND zone file content
domain_name: The domain name for the zone
Returns:
The total number of individual records in the zone
"""
if not domain_name.endswith("."):
domain_name = f"{domain_name}."
try:
dns_zone = zone.from_text(
zone_data, origin=name.from_text(domain_name), check_origin=False
)
count = 0
for _, _, rdata in dns_zone.iterate_rdatas():
if rdata.rdclass == IN:
count += 1
logger.debug(
f"Source zone {domain_name} contains {count} records"
)
return count
except DNSException as e:
logger.error(f"Failed to count records for {domain_name}: {e}")
return -1

View File

@@ -0,0 +1,63 @@
from vyper import v, Vyper
from loguru import logger
# from vyper.config import Config
import os
from pathlib import Path
from typing import Any, Dict
def load_config() -> Vyper:
# Initialize Vyper
v.set_config_name("app") # Looks for app.yaml/app.yml
v.add_config_path(".") # Search in current directory
v.add_config_path("./config")
v.set_env_prefix("DADNS")
v.set_env_key_replacer("_", ".")
v.automatic_env()
# Set defaults for all required parameters
v.set_default("log_level", "info")
v.set_default("queue_location", "./data/queues")
v.set_default("timezone", "Pacific/Aucland")
# Set defaults for app
v.set_default("app.listen_port", 2222)
v.set_default("app.proxy_support", True)
v.set_default("app.proxy_support_base", "http://127.0.0.1")
v.set_default("app.log_level", "debug")
v.set_default("app.log_to", "file")
v.set_default("app.ssl_enable", "false")
v.set_default("app.listen_port", 2222)
v.set_default("app.token_valid_for_days", 30)
v.set_default("app.queue_location", "conf/queues")
v.set_default("app.auth_username", "directdnsonly")
v.set_default("app.auth_password", "changeme")
v.set_default("timezone", "Pacific/Auckland")
# DNS backend defaults
v.set_default("dns.backends.bind.enabled", False)
v.set_default("dns.backends.bind.zones_dir", "/etc/named/zones")
v.set_default("dns.backends.bind.named_conf", "/etc/named.conf.local")
v.set_default("dns.backends.coredns_mysql.enabled", False)
v.set_default("dns.backends.coredns_mysql.host", "localhost")
v.set_default("dns.backends.coredns_mysql.port", 3306)
v.set_default("dns.backends.coredns_mysql.database", "coredns")
v.set_default("dns.backends.coredns_mysql.username", "coredns")
v.set_default("dns.backends.coredns_mysql.password", "")
v.set_default("dns.backends.coredns_mysql.table_name", "records")
# Set Defaults Datastore
v.set_default("datastore.type", "sqlite")
v.set_default("datastore.port", 3306)
v.set_default("datastore.db_location", "data/directdns.db")
# Read configuration
if not v.read_in_config():
logger.warning("No config file found, using defaults")
return v
# Global config instance
config = load_config()

View File

@@ -0,0 +1,35 @@
---
timezone: Pacific/Auckland
log_level: INFO
queue_location: ./data/queues
app:
auth_username: directdnsonly
auth_password: changeme # Override via DADNS_APP_AUTH_PASSWORD env var
dns:
default_backend: bind
backends:
bind:
type: bind
enabled: true
zones_dir: ./data/zones
named_conf: ./data/named.conf.include
coredns_dc1:
type: coredns_mysql
enabled: true
host: "mysql-dc1"
port: 3306
database: "coredns"
username: "coredns"
password: "coredns123"
table_name: "records"
coredns_dc2:
type: coredns_mysql
enabled: true
host: "mysql-dc2"
port: 3306
database: "coredns"
username: "coredns"
password: "coredns123"
table_name: "records"

114
directdnsonly/main.py Normal file
View File

@@ -0,0 +1,114 @@
from loguru import logger
import cherrypy
from app.backends import BackendRegistry
from app.api.admin import DNSAdminAPI
from app.api.health import HealthAPI
from app import configure_logging
from worker import WorkerManager
from directdnsonly.config import config
from directdnsonly.app.db import connect
import importlib.metadata
app_version = importlib.metadata.version("directdnsonly")
class Root:
pass
def main():
try:
# Initialize logging
configure_logging()
logger.info("Starting DaDNS server initialization")
# Initialize backend registry
registry = BackendRegistry()
available_backends = registry.get_available_backends()
logger.info(f"Available backend instances: {list(available_backends.keys())}")
global session
try:
session = connect(config.get("datastore.type"))
except Exception as e:
logger.error(str(e))
print("ERROR: " + str(e))
exit(1)
logger.info("Database Connected!")
# Setup worker manager
worker_manager = WorkerManager(
queue_path=config.get("queue_location"), backend_registry=registry
)
worker_manager.start()
logger.info(
f"Worker manager started with queue path: {config.get('queue_location')}"
)
# Configure CherryPy
user_password_dict = {
config.get_string("app.auth_username"): config.get_string("app.auth_password")
}
check_password = cherrypy.lib.auth_basic.checkpassword_dict(user_password_dict)
cherrypy.config.update(
{
"server.socket_host": "0.0.0.0",
"server.socket_port": config.get_int("app.listen_port"),
"tools.proxy.on": config.get_bool("app.proxy_support"),
"tools.proxy.base": config.get_string("app.proxy_support_base"),
"tools.auth_basic.on": True,
"tools.auth_basic.realm": "dadns",
"tools.auth_basic.checkpassword": check_password,
"tools.response_headers.on": True,
"tools.response_headers.headers": [
("Server", "DirectDNS v" + app_version)
],
"environment": config.get("environment"),
}
)
if config.get_bool("app.ssl_enable"):
cherrypy.config.update(
{
"server.ssl_module": "builtin",
"server.ssl_certificate": config.get("app.ssl_cert"),
"server.ssl_private_key": config.get("app.ssl_key"),
"server.ssl_certificate_chain": config.get("ssl_bundle"),
}
)
# cherrypy.log.error_log.propagate = False
if config.get_string("app.log_level").upper() != "DEBUG":
cherrypy.log.access_log.propagate = False
# Mount applications
root = Root()
root = DNSAdminAPI(
save_queue=worker_manager.save_queue,
delete_queue=worker_manager.delete_queue,
backend_registry=registry,
)
root.health = HealthAPI(registry)
# Add queue status endpoint
root.queue_status = lambda: worker_manager.queue_status()
cherrypy.tree.mount(root, "/")
cherrypy.engine.start()
logger.success(f"Server started on port {config.get_int('app.listen_port')}")
# Add shutdown handler
cherrypy.engine.subscribe("stop", worker_manager.stop)
cherrypy.engine.block()
except Exception as e:
logger.critical(f"Server startup failed: {e}")
if "worker_manager" in locals():
worker_manager.stop()
raise
if __name__ == "__main__":
main()

282
directdnsonly/worker.py Normal file
View File

@@ -0,0 +1,282 @@
import os
import threading
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from loguru import logger
from persistqueue import Queue
from persistqueue.exceptions import Empty
from app.utils import check_zone_exists, put_zone_index
from app.utils.zone_parser import count_zone_records
from directdnsonly.app.db.models import Domain
from directdnsonly.app.db import connect
class WorkerManager:
def __init__(self, queue_path: str, backend_registry):
self.queue_path = queue_path
self.backend_registry = backend_registry
self._running = False
self._thread = None
# Initialize queues with error handling
try:
os.makedirs(queue_path, exist_ok=True)
self.save_queue = Queue(f"{queue_path}/save")
self.delete_queue = Queue(f"{queue_path}/delete")
logger.success(f"Initialized queues at {queue_path}")
except Exception as e:
logger.critical(f"Failed to initialize queues: {e}")
raise
def _process_save_queue(self):
"""Main worker loop for processing save requests"""
logger.info("Save queue worker started")
# Get DB Connection
session = connect()
# Batch tracking
batch_start = None
batch_processed = 0
batch_failed = 0
while self._running:
try:
item = self.save_queue.get(block=True, timeout=5)
# Start a new batch timer on the first item
if batch_start is None:
batch_start = time.monotonic()
batch_processed = 0
batch_failed = 0
pending = self.save_queue.qsize()
logger.info(
f"📥 Batch started — {pending + 1} zone(s) queued "
f"for processing"
)
logger.debug(
f"Processing zone update for {item.get('domain', 'unknown')}"
)
if not check_zone_exists(item.get("domain")):
put_zone_index(
item.get("domain"), item.get("hostname"), item.get("username")
)
# Validate item structure
if not all(k in item for k in ["domain", "zone_file"]):
logger.error(f"Invalid queue item: {item}")
self.save_queue.task_done()
batch_failed += 1
continue
# Process with all available backends
backends = self.backend_registry.get_available_backends()
if not backends:
logger.warning("No active backends available!")
if len(backends) > 1:
# Process backends in parallel for faster sync
logger.debug(
f"Processing {item['domain']} across "
f"{len(backends)} backends concurrently: "
f"{', '.join(backends.keys())}"
)
self._process_backends_parallel(
backends, item, session
)
else:
# Single backend, no need for thread overhead
for backend_name, backend in backends.items():
self._process_single_backend(
backend_name, backend, item, session
)
self.save_queue.task_done()
batch_processed += 1
logger.debug(f"Completed processing for {item['domain']}")
except Empty:
# Queue is empty — if we were in a batch, log the summary
if batch_start is not None:
elapsed = time.monotonic() - batch_start
total = batch_processed + batch_failed
rate = batch_processed / elapsed if elapsed > 0 else 0
logger.success(
f"📦 Batch complete — {batch_processed}/{total} zone(s) "
f"processed successfully in {elapsed:.1f}s "
f"({rate:.1f} zones/sec)"
+ (f", {batch_failed} failed" if batch_failed else "")
)
batch_start = None
batch_processed = 0
batch_failed = 0
continue
except Exception as e:
logger.error(f"Unexpected worker error: {e}")
batch_failed += 1
time.sleep(1) # Prevent tight error loops
def _process_single_backend(self, backend_name, backend, item, session):
"""Process a zone update for a single backend"""
try:
logger.debug(f"Using backend: {backend_name}")
if backend.write_zone(item["domain"], item["zone_file"]):
logger.debug(
f"Successfully updated {item['domain']} in {backend_name}"
)
if backend.get_name() == "bind":
# Need to update the named.conf
backend.update_named_conf(
[d.domain for d in session.query(Domain).all()]
)
# Reload all zones
backend.reload_zone()
else:
backend.reload_zone(zone_name=item["domain"])
# Verify record count matches the source zone from DirectAdmin
self._verify_backend_record_count(
backend_name, backend, item["domain"], item["zone_file"]
)
else:
logger.error(
f"Failed to update {item['domain']} in {backend_name}"
)
except Exception as e:
logger.error(f"Error in {backend_name}: {str(e)}")
def _process_backends_parallel(self, backends, item, session):
"""Process zone updates across multiple backends in parallel"""
start_time = time.monotonic()
with ThreadPoolExecutor(
max_workers=len(backends),
thread_name_prefix="backend"
) as executor:
futures = {
executor.submit(
self._process_single_backend,
backend_name, backend, item, session
): backend_name
for backend_name, backend in backends.items()
}
for future in as_completed(futures):
backend_name = futures[future]
try:
future.result()
except Exception as e:
logger.error(
f"Unhandled error processing backend "
f"{backend_name}: {str(e)}"
)
elapsed = (time.monotonic() - start_time) * 1000
logger.debug(
f"Parallel processing of {item['domain']} across "
f"{len(backends)} backends completed in {elapsed:.0f}ms"
)
def _verify_backend_record_count(
self, backend_name, backend, zone_name, zone_data
):
"""Verify and reconcile the backend record count against the
authoritative BIND zone from DirectAdmin.
After a successful write, this method checks whether the number of
records stored in the backend matches the number of records parsed
from the source zone file. If there are **extra** records in the
backend (e.g. from replication drift or stale data) they are
automatically removed via the backend's reconcile method.
Args:
backend_name: Display name of the backend instance
backend: The backend instance
zone_name: The zone that was just written
zone_data: The raw BIND zone file content (authoritative source)
"""
try:
expected = count_zone_records(zone_data, zone_name)
if expected < 0:
logger.warning(
f"[{backend_name}] Could not parse source zone for "
f"{zone_name} — skipping record count verification"
)
return
matches, actual = backend.verify_zone_record_count(
zone_name, expected
)
if matches:
return # All good
if actual > expected:
logger.warning(
f"[{backend_name}] Backend has {actual - expected} extra "
f"record(s) for {zone_name} — reconciling against "
f"DirectAdmin source zone"
)
success, removed = backend.reconcile_zone_records(
zone_name, zone_data
)
if success and removed > 0:
# Verify again after reconciliation
matches, new_count = backend.verify_zone_record_count(
zone_name, expected
)
if matches:
logger.success(
f"[{backend_name}] Reconciliation successful for "
f"{zone_name}: removed {removed} extra record(s), "
f"count now matches source ({new_count})"
)
else:
logger.error(
f"[{backend_name}] Reconciliation for {zone_name} "
f"removed {removed} record(s) but count still "
f"mismatched: expected {expected}, got {new_count}"
)
else:
logger.warning(
f"[{backend_name}] Backend has fewer records than source "
f"for {zone_name} (expected {expected}, got {actual}) — "
f"this may indicate a write failure; the next zone push "
f"from DirectAdmin should correct this"
)
except NotImplementedError:
logger.debug(
f"[{backend_name}] Record count verification not "
f"supported — skipping"
)
except Exception as e:
logger.error(
f"[{backend_name}] Error during record count verification "
f"for {zone_name}: {e}"
)
def start(self):
"""Start background workers"""
if self._running:
return
self._running = True
self._thread = threading.Thread(
target=self._process_save_queue, daemon=True, name="save_queue_worker"
)
self._thread.start()
logger.info(f"Started worker thread {self._thread.name}")
def stop(self):
"""Stop background workers gracefully"""
self._running = False
if self._thread:
self._thread.join(timeout=5)
logger.info("Workers stopped")
def queue_status(self):
"""Return current queue status"""
return {
"save_queue_size": self.save_queue.qsize(),
"delete_queue_size": self.delete_queue.qsize(),
"worker_alive": self._thread and self._thread.is_alive(),
}

View File

@@ -1,48 +1,52 @@
version: '3.7'
services:
app:
image: registry.dockerprod.ultrafast.co.nz/uff/apikeyhandler:0.10
networks:
- traefik-net
volumes:
- /etc/localtime:/etc/localtime:ro # Mount Timezone config to container
- /data/swarm-vols/apikeyhandler:/opt/apikeyhandler/config # Store Config on Persistent drive shared between nodes
deploy:
mode: replicated
replicas: 1
placement:
constraints:
- node.role == worker # Place this service on Worker Nodes alternatively may specify manager if you want service on manager node.
labels:
- "traefik.http.routers.apikeyauth.rule=Host(`apiauth-internal.dockertest.ultrafast.co.nz`)" # This label creates a route Traefik will listen on
- "traefik.http.routers.apikeyauth.tls=true" # Enable TLS, in this example using default TLS cert
- "traefik.http.services.apikeyauth.loadbalancer.server.port=8080" # Set Port to proxy
- "traefik.enable=true" # This flag enables load balancing through Traefik :)
- "traefik.docker.network=traefik-net" # Set the network to connect to container on
- "traefik.http.middlewares.apikeyauth.forwardauth.address=https://apiauth-internal.dockertest.ultrafast.co.nz"
- "traefik.http.middlewares.apikeyauth.forwardauth.trustForwardHeader=true"
- "traefik.http.middlewares.apikeyauth.forwardauth.authResponseHeaders=X-Client-Id"
- "traefik.http.middlewares.apikeyauth.forwardauth.tls.insecureSkipVerify=true"
test_app:
image: containous/whoami
networks:
- traefik-net
volumes:
- /etc/localtime:/etc/localtime:ro # Mount Timezone config to container
deploy:
mode: replicated
replicas: 1
placement:
constraints:
- node.role == worker # Place this service on Worker Nodes alternatively may specify manager if you want service on manager node.
labels:
- "traefik.http.routers.testapp.rule=Host(`testapp.dockertest.ultrafast.co.nz`)" # This label creates a route Traefik will listen on
- "traefik.http.routers.testapp.tls=true" # Enable TLS, in this example using default TLS cert
- "traefik.http.routers.testapp.middlewares=apikeyauth"
- "traefik.http.services.testapp.loadbalancer.server.port=80" # Set Port to proxy
- "traefik.enable=true" # This flag enables load balancing through Traefik :)
- "traefik.docker.network=traefik-net" # Set the network to connect to container on
version: '3.8'
networks:
traefik-net:
external: true
services:
mysql:
image: mysql:8.0
container_name: dadns_mysql
environment:
MYSQL_ROOT_PASSWORD: rootpassword
MYSQL_DATABASE: coredns
MYSQL_USER: coredns
MYSQL_PASSWORD: coredns123
ports:
- "3306:3306"
volumes:
- ./schema/coredns_mysql.sql:/docker-entrypoint-initdb.d/init.sql
- mysql_data:/var/lib/mysql
healthcheck:
test: ["CMD", "mysqladmin", "ping", "-h", "localhost"]
interval: 5s
timeout: 5s
retries: 5
dadns:
build:
dockerfile: Dockerfile.deepseek
context: .
no_cache: false
container_name: dadns_app
depends_on:
mysql:
condition: service_healthy
ports:
- "2222:2222"
volumes:
- ./config:/app/config
- ./data:/app/data
- ./logs:/app/logs
environment:
- TZ=Pacific/Auckland
- DNS_BACKENDS__BIND__ENABLED=true
- DNS_BACKENDS__BIND__ZONES_DIR=/etc/named/zones/dadns
- DNS_BACKENDS__BIND__NAMED_CONF=/etc/bind/named.conf.local
- DNS_BACKENDS__COREDNS_MYSQL__ENABLED=true
- DNS_BACKENDS__COREDNS_MYSQL__HOST=mysql
- DNS_BACKENDS__COREDNS_MYSQL__PORT=3306
- DNS_BACKENDS__COREDNS_MYSQL__DATABASE=coredns
- DNS_BACKENDS__COREDNS_MYSQL__USERNAME=coredns
- DNS_BACKENDS__COREDNS_MYSQL__PASSWORD=coredns123
restart: unless-stopped
volumes:
mysql_data:

12
docker/entrypoint.sh Executable file
View File

@@ -0,0 +1,12 @@
#!/bin/bash
# Start BIND
/usr/sbin/named -u bind -f &
## Initialize MySQL schema if needed
#if [ -f /app/schema/coredns_mysql.sql ]; then
# mysql -h mysql -u root -prootpassword coredns < /app/schema/coredns_mysql.sql
#fi
# Start the application
poetry run python directdnsonly/main.py

4
docker/named.conf.local Normal file
View File

@@ -0,0 +1,4 @@
zone "guise.nz" {
type master;
file "/etc/named/zones/dadns/guise.nz.db";
};

View File

@@ -0,0 +1,8 @@
options {
directory "/var/cache/bind";
allow-query { any; };
recursion no;
dnssec-validation no;
listen-on { any; };
listen-on-v6 { any; };
};

17
justfile Normal file
View File

@@ -0,0 +1,17 @@
#!/usr/bin/env just --justfile
APP_NAME := "directdnsonly"
build:
cd src && \
pyinstaller \
-p . \
--hidden-import=json \
--hidden-import=pyopenssl \
--hidden-import=pymysql \
--hidden-import=jaraco \
--hidden-import=cheroot \
--hidden-import=cheroot.ssl.pyopenssl \
--hidden-import=cheroot.ssl.builtin \
--hidden-import=lib \
--hidden-import=os \
--hidden-import=builtins \
--noconfirm --onefile {{APP_NAME}}.py

1073
poetry.lock generated Normal file

File diff suppressed because it is too large Load Diff

2
poetry.toml Normal file
View File

@@ -0,0 +1,2 @@
[virtualenvs]
in-project = true

34
pyproject.toml Normal file
View File

@@ -0,0 +1,34 @@
[project]
name = "directdnsonly"
version = "1.0.9"
description = "DNS Management System - DirectAdmin to multiple backends"
authors = [
{name = "Aaron Guise",email = "aaron@guise.net.nz"}
]
license = {text = "MIT"}
readme = "README.md"
requires-python = ">=3.11,<3.14"
dependencies = [
"vyper-config (>=1.2.1,<2.0.0)",
"loguru (>=0.7.3,<0.8.0)",
"persist-queue (>=1.0.0,<2.0.0)",
"cherrypy (>=18.10.0,<19.0.0)",
"sqlalchemy (<2.0.0)",
"pymysql (>=1.1.1,<2.0.0)",
"dnspython (>=2.7.0,<3.0.0)",
"pyyaml (>=6.0.2,<7.0.0)",
]
[tool.poetry]
package-mode = true
[tool.poetry.group.dev.dependencies]
black = "^25.1.0"
pyinstaller = "^6.13.0"
pytest = "^8.3.5"
pytest-cov = "^6.1.1"
pytest-mock = "^3.14.0"
[build-system]
requires = ["poetry-core>=2.0.0,<3.0.0"]
build-backend = "poetry.core.masonry.api"

12
schema/coredns_mysql.sql Normal file
View File

@@ -0,0 +1,12 @@
CREATE TABLE IF NOT EXISTS `records` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`zone` varchar(255) NOT NULL,
`name` varchar(255) NOT NULL,
`ttl` int(11) DEFAULT NULL,
`type` varchar(10) NOT NULL,
`data` text NOT NULL,
PRIMARY KEY (`id`),
KEY `idx_zone` (`zone`),
KEY `idx_name` (`name`),
KEY `idx_type` (`type`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;

0
tests/__init__.py Normal file
View File

View File

@@ -0,0 +1,47 @@
import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from directdnsonly.app.backends.coredns_mysql import CoreDNSMySQLBackend, CoreDNSRecord
from loguru import logger
@pytest.fixture
def mysql_backend(tmp_path):
# Setup in-memory SQLite for testing (replace with test MySQL in CI)
engine = create_engine("sqlite:///:memory:")
CoreDNSRecord.metadata.create_all(engine)
class TestBackend(CoreDNSMySQLBackend):
def __init__(self):
super().__init__()
self.engine = engine
self.Session = scoped_session(sessionmaker(bind=engine))
yield TestBackend()
engine.dispose()
def test_zone_operations(mysql_backend):
zone_data = """
example.com. 300 IN SOA ns.example.com. admin.example.com. (2023 3600 1800 604800 86400)
example.com. 300 IN A 192.0.2.1
"""
# Test zone creation
assert mysql_backend.write_zone("example.com", zone_data)
assert mysql_backend.zone_exists("example.com")
# Test record update
updated_zone = """
example.com. 3600 IN A 192.0.2.1
example.com. 300 IN AAAA 2001:db8::1
"""
assert mysql_backend.write_zone("example.com", updated_zone)
# Test record removal
reduced_zone = "example.com. 300 IN A 192.0.2.1"
assert mysql_backend.write_zone("example.com", reduced_zone)
# Test zone deletion
assert mysql_backend.delete_zone("example.com")
assert not mysql_backend.zone_exists("example.com")