10 Commits

Author SHA1 Message Date
41d0f13ba8 feat: Add JSON structured logging and fix mariadb-devel package name
All checks were successful
CI / build (push) Successful in 13s
CI / release (release) Successful in 11m54s
2026-02-21 21:04:08 +13:00
6d5cfc3110 feat: Add Docker support with environment configuration and cron maintenance
Some checks failed
CI / build (push) Successful in 37s
CI / release (release) Failing after 3m9s
2026-02-21 16:48:05 +13:00
300ef37e06 fix: Revert to dnf from microdnf for package management consistency
All checks were successful
CI / build (push) Successful in 10s
CI / release (release) Successful in 11m9s
2025-12-04 15:10:14 +13:00
3cc0cd2955 fix: Escape percentage signs in CMD for proper log formatting
Some checks failed
CI / build (push) Successful in 11s
CI / release (release) Failing after 52s
2025-12-04 15:07:35 +13:00
9dec26252a fix: Replace dnf with microdnf for package management consistency 🐛
Some checks failed
CI / build (push) Successful in 10s
CI / release (release) Failing after 46s
2025-12-04 15:02:37 +13:00
acfae94e00 fix: Update base image to almalinux:9-minimal for improved stability
Some checks failed
CI / build (push) Successful in 10s
CI / release (release) Failing after 31s
2025-12-04 14:41:59 +13:00
7172bbf4e0 fix: Escape quotes in CMD for proper execution 🐛
All checks were successful
CI / build (push) Successful in 14s
CI / release (release) Successful in 11m31s
2025-12-04 14:41:12 +13:00
9511f003d1 fix: Update base image and adjust ara installation dependencies 🐛
All checks were successful
CI / build (push) Successful in 33s
CI / release (release) Successful in 8m36s
2025-12-04 14:09:13 +13:00
66a7f6d86b fix: Listen on ipv6 address 🐛
All checks were successful
CI / build (push) Successful in 1m49s
CI / release (release) Successful in 12m44s
2025-02-18 21:58:53 +13:00
7a1184375d fix: CRB was not enabled
All checks were successful
CI / build (push) Successful in 13s
CI / release (release) Successful in 11m33s
2025-02-15 07:31:05 +13:00
9 changed files with 471 additions and 30 deletions

56
.env.example Normal file
View File

@@ -0,0 +1,56 @@
# ============================================================
# Copy this file to .env and edit before running:
# cp .env.example .env
# ============================================================
# ------------------------------------------------------------
# MariaDB
# ------------------------------------------------------------
MARIADB_ROOT_PASSWORD=rootsecret
ARA_DATABASE_NAME=ara
ARA_DATABASE_USER=ara
ARA_DATABASE_PASSWORD=arasecret
# ------------------------------------------------------------
# ARA core
# ------------------------------------------------------------
# IMPORTANT: replace with a long random string in production
ARA_SECRET_KEY=changeme_use_a_long_random_string
# Timezone — controls BOTH crond scheduling AND ARA display times.
# Use a tz database name: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
# Examples: UTC, Pacific/Auckland, Australia/Sydney, Europe/London
TZ=Pacific/Auckland
# Comma-separated list of hostnames/IPs the server responds to
# Example: ARA_ALLOWED_HOSTS=["ara.example.com"]
ARA_ALLOWED_HOSTS=["*"]
ARA_LOG_LEVEL=INFO
# ------------------------------------------------------------
# Server tuning
# ------------------------------------------------------------
ARA_PORT=8000
ARA_GUNICORN_WORKERS=4
ARA_PAGE_SIZE=100
ARA_DATABASE_CONN_MAX_AGE=60
# ------------------------------------------------------------
# Security / auth (set to true to require login)
# ------------------------------------------------------------
ARA_READ_LOGIN_REQUIRED=false
ARA_WRITE_LOGIN_REQUIRED=false
# ------------------------------------------------------------
# Maintenance — automatic pruning via built-in cron
# ------------------------------------------------------------
# Delete playbooks older than this many days
ARA_PRUNE_DAYS=30
# Standard cron schedule — default: daily at 02:00
# Examples:
# every 6 hours -> 0 */6 * * *
# every day -> 0 2 * * *
ARA_PRUNE_CRON=0 2 * * *

View File

@@ -29,10 +29,12 @@ jobs:
- name: Build and push
uses: docker/build-push-action@v6
with:
context: .
file: docker/Dockerfile
platforms: >
linux/amd64,
linux/arm64
push: true
tags: >
${{ secrets.DOCKERHUB_USERNAME }}/ara-api:latest,
${{ secrets.DOCKERHUB_USERNAME }}/ara-api:${{ env.GITHUB_REF_NAME }}
${{ secrets.DOCKERHUB_USERNAME }}/ara-api:${{ github.ref_name }}

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
.env

View File

@@ -1,28 +0,0 @@
FROM almalinux:9.5
ARG DEV_DEPENDENCIES="gcc python3-devel postgresql-devel mariadb-devel"
RUN dnf install -y epel-release \
&& dnf update -y \
&& dnf install -y which \
python3-pip \
postgresql \
libpq \
mariadb-connector-c
# Install development dependencies required for installing packages from PyPI
RUN dnf install -y ${DEV_DEPENDENCIES}
# Install ara from PyPI with API server extras for dependencies (django & django-rest-framework)
# including database backend libraries and gunicorn
RUN python3 -m pip install "ara[server,postgresql,mysql]" gunicorn
# Remove development dependencies and clean up
RUN dnf remove -y ${DEV_DEPENDENCIES} \
&& dnf autoremove -y \
&& dnf clean all \
&& python3 -m pip cache purge
# Set up the container to execute SQL migrations and run the API server with gunicorn
ENV ARA_BASE_DIR=/opt/ara
CMD ["bash", "-c", "/usr/local/bin/ara-manage migrate && python3 -m gunicorn --workers=4 --access-logfile - --bind 0.0.0.0:8000 ara.server.wsgi" ]
EXPOSE 8000

129
README.md
View File

@@ -1,3 +1,130 @@
# docker-ara
Dockerized ARA on Almalinux
Slim, production-ready container image for [ARA Records Ansible](https://ara.readthedocs.io/en/latest/) built on **AlmaLinux 9 minimal**.
Features:
- Minimal image footprint (`almalinux:9.5-minimal` base, build tools removed after install)
- **tini** as PID 1 — correct signal handling and zombie reaping for crond child processes
- Supports **SQLite** (default), **MariaDB/MySQL**, and **PostgreSQL** via environment variables
- Built-in **crond** — automatically prunes playbooks older than a configurable number of days
- All settings configurable at runtime through environment variables
---
## Quick start (SQLite)
```bash
docker build -f docker/Dockerfile -t ara .
docker run -d \
--name ara \
-p 8000:8000 \
-v ara_data:/opt/ara \
ara
```
Then open <http://localhost:8000>.
---
## Environment variables
All ARA server settings map directly to environment variables.
Full reference: <https://ara.readthedocs.io/en/latest/api-configuration.html>
### Core
| Variable | Default | Description |
|---|---|---|
| `ARA_BASE_DIR` | `/opt/ara` | Data & config directory |
| `ARA_SECRET_KEY` | *(random)* | Django secret key — **set a stable value in production** |
| `ARA_ALLOWED_HOSTS` | `["127.0.0.1","localhost","::1"]` | Hosts the server will respond to |
| `TZ` | `UTC` | System timezone — controls **when crond fires** |
| `ARA_TIME_ZONE` | same as `TZ` | Timezone for ARA to store/display results — keep in sync with `TZ` |
| `ARA_LOG_LEVEL` | `INFO` | Log verbosity (`DEBUG`, `INFO`, `WARNING`, `ERROR`) |
### Database
| Variable | Default | Description |
|---|---|---|
| `ARA_DATABASE_ENGINE` | `django.db.backends.sqlite3` | `sqlite3`, `mysql`, or `postgresql` |
| `ARA_DATABASE_NAME` | `~/.ara/server/ansible.sqlite` | DB name (or path for SQLite) |
| `ARA_DATABASE_HOST` | *(none)* | Database host |
| `ARA_DATABASE_PORT` | *(none)* | Database port |
| `ARA_DATABASE_USER` | *(none)* | Database user |
| `ARA_DATABASE_PASSWORD` | *(none)* | Database password |
| `ARA_DATABASE_CONN_MAX_AGE` | `0` | Persistent connection lifetime (seconds) |
### Security / authentication
| Variable | Default | Description |
|---|---|---|
| `ARA_READ_LOGIN_REQUIRED` | `false` | Require auth for read requests |
| `ARA_WRITE_LOGIN_REQUIRED` | `false` | Require auth for write requests |
### Server tuning
| Variable | Default | Description |
|---|---|---|
| `ARA_PORT` | `8000` | Port gunicorn listens on |
| `ARA_GUNICORN_WORKERS` | `4` | Number of gunicorn worker processes |
| `ARA_PAGE_SIZE` | `100` | Results per page from the API |
### Maintenance / pruning
| Variable | Default | Description |
|---|---|---|
| `ARA_PRUNE_DAYS` | `30` | Delete playbooks older than this many days |
| `ARA_PRUNE_CRON` | `0 2 * * *` | Cron schedule for pruning (daily at 02:00) |
The prune job uses `ara playbook prune --client offline` so it accesses the database directly without going through the HTTP server. Output is forwarded to `docker logs`.
---
## Docker Compose — MariaDB
See [`docker-compose.yml`](docker-compose.yml) for a ready-to-use stack with MariaDB.
```bash
# Copy and edit the environment file
cp .env.example .env
# Start the stack
docker compose up -d
# View logs
docker compose logs -f ara
```
---
## Manual pruning
```bash
# Preview what would be deleted (no --confirm = dry run)
docker exec ara ara playbook prune --client offline --days 30
# Actually delete
docker exec ara ara playbook prune --client offline --days 30 --confirm
```
---
## Configuring Ansible to report to ARA
Install the ARA callback plugin on your Ansible controller:
```bash
pip install ara
```
Then add to `ansible.cfg`:
```ini
[defaults]
callback_plugins = $(python3 -m ara.setup.callback_plugins)
[ara]
api_client = http
api_server = http://<ara-host>:8000
```

77
docker-compose.yml Normal file
View File

@@ -0,0 +1,77 @@
services:
db:
image: mariadb:11
restart: unless-stopped
environment:
MARIADB_ROOT_PASSWORD: ${MARIADB_ROOT_PASSWORD:-rootsecret}
MARIADB_DATABASE: ${ARA_DATABASE_NAME:-ara}
MARIADB_USER: ${ARA_DATABASE_USER:-ara}
MARIADB_PASSWORD: ${ARA_DATABASE_PASSWORD:-arasecret}
volumes:
- db_data:/var/lib/mysql
healthcheck:
test: ["CMD", "healthcheck.sh", "--connect", "--innodb_initialized"]
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
ara:
build:
context: .
dockerfile: docker/Dockerfile
image: ara:latest
restart: unless-stopped
depends_on:
db:
condition: service_healthy
ports:
- "${ARA_PORT:-8000}:${ARA_PORT:-8000}"
volumes:
- ara_data:/opt/ara
environment:
# -----------------------------------------------------------------------
# Core
# -----------------------------------------------------------------------
ARA_BASE_DIR: /opt/ara
ARA_SECRET_KEY: ${ARA_SECRET_KEY:-changeme_use_a_long_random_string}
ARA_ALLOWED_HOSTS: ${ARA_ALLOWED_HOSTS:-["*"]}
ARA_TIME_ZONE: ${TZ:-UTC}
ARA_LOG_LEVEL: ${ARA_LOG_LEVEL:-INFO}
# System timezone used by crond — keep in sync with ARA_TIME_ZONE
TZ: ${TZ:-UTC}
# -----------------------------------------------------------------------
# Database — MariaDB
# -----------------------------------------------------------------------
ARA_DATABASE_ENGINE: django.db.backends.mysql
ARA_DATABASE_NAME: ${ARA_DATABASE_NAME:-ara}
ARA_DATABASE_HOST: db
ARA_DATABASE_PORT: ${ARA_DATABASE_PORT:-3306}
ARA_DATABASE_USER: ${ARA_DATABASE_USER:-ara}
ARA_DATABASE_PASSWORD: ${ARA_DATABASE_PASSWORD:-arasecret}
ARA_DATABASE_CONN_MAX_AGE: ${ARA_DATABASE_CONN_MAX_AGE:-60}
# -----------------------------------------------------------------------
# Security / auth
# -----------------------------------------------------------------------
ARA_READ_LOGIN_REQUIRED: ${ARA_READ_LOGIN_REQUIRED:-false}
ARA_WRITE_LOGIN_REQUIRED: ${ARA_WRITE_LOGIN_REQUIRED:-false}
# -----------------------------------------------------------------------
# Server tuning
# -----------------------------------------------------------------------
ARA_PORT: ${ARA_PORT:-8000}
ARA_GUNICORN_WORKERS: ${ARA_GUNICORN_WORKERS:-4}
ARA_PAGE_SIZE: ${ARA_PAGE_SIZE:-100}
# -----------------------------------------------------------------------
# Maintenance — prune playbooks older than ARA_PRUNE_DAYS
# -----------------------------------------------------------------------
ARA_PRUNE_DAYS: ${ARA_PRUNE_DAYS:-30}
ARA_PRUNE_CRON: ${ARA_PRUNE_CRON:-0 2 * * *}
volumes:
db_data:
ara_data:

67
docker/Dockerfile Normal file
View File

@@ -0,0 +1,67 @@
FROM almalinux:9.5-minimal
ARG DEV_DEPENDENCIES="gcc python3-devel postgresql-devel mariadb-connector-c-devel"
# Install only the runtime packages we need, including cronie for cron support
# tini is used as PID 1 to reap zombie processes spawned by crond and forward
# signals correctly to gunicorn on `docker stop`.
# tzdata is required so named timezones (e.g. Pacific/Auckland) are available
# to crond when TZ is set at runtime.
RUN microdnf install -y epel-release \
&& microdnf install -y \
python3-pip \
libpq \
mariadb-connector-c \
cronie \
tini \
tzdata \
&& microdnf clean all
# Install build-time dependencies, build Python packages, then remove them
RUN microdnf install -y ${DEV_DEPENDENCIES} \
&& python3 -m pip install --no-cache-dir "ara[server,postgresql,mysql]" gunicorn \
&& microdnf remove -y ${DEV_DEPENDENCIES} \
&& microdnf clean all
COPY docker/entrypoint.sh /entrypoint.sh
COPY docker/json_logger.py /json_logger.py
RUN chmod +x /entrypoint.sh
# ---------------------------------------------------------------------------
# ARA server configuration all values can be overridden at runtime via
# environment variables (see https://ara.readthedocs.io/en/latest/api-configuration.html)
# ---------------------------------------------------------------------------
# Core
ENV ARA_BASE_DIR=/opt/ara
ENV PYTHONPATH=/
# ENV ARA_SECRET_KEY=changeme # set a stable secret in production
# ENV ARA_ALLOWED_HOSTS="['*']" # restrict to your hostname(s)
# ENV ARA_TIME_ZONE=UTC # ARA display/storage timezone
# ENV TZ=UTC # system/crond timezone — set to match ARA_TIME_ZONE
# Database (defaults to sqlite inside ARA_BASE_DIR)
# ENV ARA_DATABASE_ENGINE=django.db.backends.postgresql
# ENV ARA_DATABASE_NAME=ara
# ENV ARA_DATABASE_USER=ara
# ENV ARA_DATABASE_PASSWORD=secret
# ENV ARA_DATABASE_HOST=db
# ENV ARA_DATABASE_PORT=5432
# Security / auth
# ENV ARA_READ_LOGIN_REQUIRED=false
# ENV ARA_WRITE_LOGIN_REQUIRED=false
# Server tuning
# ENV ARA_PORT=8000
# ENV ARA_GUNICORN_WORKERS=4
# ENV ARA_PAGE_SIZE=100
# ENV ARA_LOG_LEVEL=INFO
# Maintenance / pruning
# ENV ARA_PRUNE_DAYS=30 # delete playbooks older than N days
# ENV ARA_PRUNE_CRON="0 2 * * *" # cron schedule for pruning (default: daily 02:00)
EXPOSE ${ARA_PORT:-8000}
ENTRYPOINT ["/usr/bin/tini", "--", "/entrypoint.sh"]

52
docker/entrypoint.sh Normal file
View File

@@ -0,0 +1,52 @@
#!/bin/bash
set -e
# ---------------------------------------------------------------------------
# Timezone — controls when crond fires, not just ARA display times.
# Set TZ at runtime, e.g.: -e TZ=Pacific/Auckland
# Defaults to UTC if not set.
# ---------------------------------------------------------------------------
TZ="${TZ:-UTC}"
ln -snf "/usr/share/zoneinfo/${TZ}" /etc/localtime
echo "${TZ}" > /etc/timezone
export TZ
# ---------------------------------------------------------------------------
# Configurable retention period (days) override via environment variable
# Default: 30 days
# ---------------------------------------------------------------------------
PRUNE_DAYS="${ARA_PRUNE_DAYS:-30}"
# ---------------------------------------------------------------------------
# Cron schedule for pruning override via environment variable
# Default: daily at 02:00
# ---------------------------------------------------------------------------
PRUNE_CRON="${ARA_PRUNE_CRON:-0 2 * * *}"
# ---------------------------------------------------------------------------
# Write the cron job
# Uses `ara playbook prune` with the offline client so it talks directly
# to the same database without needing a running HTTP server.
# ARA_BASE_DIR is inherited from the container environment.
# ---------------------------------------------------------------------------
echo "${PRUNE_CRON} /usr/local/bin/ara playbook prune \
--client offline \
--days ${PRUNE_DAYS} \
--limit 9000 \
--confirm >> /proc/1/fd/1 2>&1" \
| crontab -
# Start the cron daemon in the background
crond -n &
# ---------------------------------------------------------------------------
# Run DB migrations then start gunicorn
# ---------------------------------------------------------------------------
/usr/local/bin/ara-manage migrate
exec python3 -m gunicorn \
--workers="${ARA_GUNICORN_WORKERS:-4}" \
--access-logfile - \
--logger-class json_logger.JsonLogger \
--bind "[::]:${ARA_PORT:-8000}" \
ara.server.wsgi

87
docker/json_logger.py Normal file
View File

@@ -0,0 +1,87 @@
"""
Minimal JSON logger for gunicorn.
Replaces gunicorn's default text logger with structured JSON output,
one JSON object per line. Both access and error logs are covered.
Usage:
gunicorn --logger-class json_logger.JsonLogger ...
"""
import json
import logging
import time
from gunicorn.glogging import Logger
class JsonLogger(Logger):
"""Gunicorn logger that emits one JSON object per log line."""
# ------------------------------------------------------------------ #
# Error / application log records #
# ------------------------------------------------------------------ #
def setup(self, cfg):
super().setup(cfg)
# Replace every handler's formatter on both error and access loggers
for logger_name in ("error_log", "access_log"):
lgr = getattr(self, logger_name)
for handler in lgr.handlers:
handler.setFormatter(_JsonFormatter())
# ------------------------------------------------------------------ #
# Access log records #
# ------------------------------------------------------------------ #
def access(self, resp, req, environ, request_time):
"""Emit a structured JSON access log record."""
if not self.access_log.handlers or not self.cfg.accesslog:
return
status = resp.status
if isinstance(status, str):
status_code = int(status.split(None, 1)[0])
else:
status_code = status
record = {
"timestamp": time.strftime("%Y-%m-%dT%H:%M:%S%z"),
"level": "INFO",
"logger": "gunicorn.access",
"method": environ.get("REQUEST_METHOD", "-"),
"path": environ.get("PATH_INFO", "-"),
"query": environ.get("QUERY_STRING", "") or None,
"status": status_code,
"response_bytes": getattr(resp, "sent", None),
"duration_ms": round(request_time.seconds * 1000
+ request_time.microseconds / 1000, 2),
"remote_addr": environ.get("REMOTE_ADDR", "-"),
"x_forwarded_for": environ.get("HTTP_X_FORWARDED_FOR") or None,
"user_agent": environ.get("HTTP_USER_AGENT", "-"),
"referer": environ.get("HTTP_REFERER") or None,
"http_version": environ.get("SERVER_PROTOCOL", "-"),
}
# Drop None values for cleaner output
record = {k: v for k, v in record.items() if v is not None}
# Write directly to handler stream to avoid double-formatting
line = json.dumps(record)
for handler in self.access_log.handlers:
stream = getattr(handler, "stream", None)
if stream:
stream.write(line + "\n")
stream.flush()
class _JsonFormatter(logging.Formatter):
"""Formatter that converts a LogRecord to a single JSON line."""
def format(self, record: logging.LogRecord) -> str:
obj = {
"timestamp": self.formatTime(record, "%Y-%m-%dT%H:%M:%S%z"),
"level": record.levelname,
"logger": record.name,
"message": record.getMessage(),
}
if record.exc_info:
obj["exception"] = self.formatException(record.exc_info)
return json.dumps(obj)