From d33208420679806e67eea945377a879cc2c3cb0d Mon Sep 17 00:00:00 2001 From: Sami Date: Mon, 23 Mar 2026 14:25:22 -0700 Subject: [PATCH] feat: add host-side auto-updater for zero-touch API container rebuilds When the in-container self-updater downloads a new version, it writes trigger.json. The new systemd path unit watches for this file and runs dashcaddy-update.sh, which backs up current API files, copies the new ones, rebuilds the container, verifies health, and writes result.json. Automatic rollback on build or health check failure. Also fixes undefined `isWindows` variable in self-updater.js and adds DASHCADDY_HOST_UPDATES_DIR env var to the installer's docker-compose template for correct container-to-host path translation. Co-Authored-By: Claude Opus 4.6 --- dashcaddy-api/scripts/dashcaddy-update.sh | 208 ++++++++++++++++++ dashcaddy-api/scripts/dashcaddy-updater.path | 9 + .../scripts/dashcaddy-updater.service | 12 + dashcaddy-api/self-updater.js | 1 + dashcaddy-installer/install.sh | 1 + 5 files changed, 231 insertions(+) create mode 100644 dashcaddy-api/scripts/dashcaddy-update.sh create mode 100644 dashcaddy-api/scripts/dashcaddy-updater.path create mode 100644 dashcaddy-api/scripts/dashcaddy-updater.service diff --git a/dashcaddy-api/scripts/dashcaddy-update.sh b/dashcaddy-api/scripts/dashcaddy-update.sh new file mode 100644 index 0000000..38a9a1c --- /dev/null +++ b/dashcaddy-api/scripts/dashcaddy-update.sh @@ -0,0 +1,208 @@ +#!/usr/bin/env bash +# DashCaddy Host-Side Updater +# Triggered by systemd path unit when the container writes trigger.json. +# Reads the trigger, backs up current API, copies new files, rebuilds container. +# Writes result.json so the new container knows the outcome. +# +# This runs on the HOST, outside the container. + +set -euo pipefail + +readonly UPDATES_DIR="/opt/dashcaddy/updates" +readonly TRIGGER_FILE="${UPDATES_DIR}/trigger.json" +readonly RESULT_FILE="${UPDATES_DIR}/result.json" +readonly BACKUPS_DIR="${UPDATES_DIR}/backups" +readonly CONTAINER_NAME="dashcaddy-api" +readonly MAX_BACKUPS=3 +readonly HEALTH_TIMEOUT=60 + +log() { echo "[dashcaddy-update] $(date '+%Y-%m-%d %H:%M:%S') $*"; } + +write_result() { + local success="$1" version="$2" duration="$3" + shift 3 + local error="${1:-}" + + if [[ "$success" == "true" ]]; then + cat > "$RESULT_FILE" < "$RESULT_FILE" </dev/null | wc -l) + if (( count > MAX_BACKUPS )); then + log "Cleaning old backups (${count} > ${MAX_BACKUPS})" + find "$BACKUPS_DIR" -maxdepth 1 -mindepth 1 -type d -printf '%T+ %p\n' \ + | sort | head -n $(( count - MAX_BACKUPS )) | cut -d' ' -f2- \ + | xargs rm -rf + fi +} + +wait_for_health() { + local port="${1:-3001}" + local timeout="$HEALTH_TIMEOUT" + local elapsed=0 + + log "Waiting for health check (timeout: ${timeout}s)..." + while (( elapsed < timeout )); do + if curl -fsSL --max-time 3 "http://localhost:${port}/health" &>/dev/null; then + log "Health check passed after ${elapsed}s" + return 0 + fi + sleep 2 + elapsed=$(( elapsed + 2 )) + done + + log "Health check FAILED after ${timeout}s" + return 1 +} + +main() { + local start_time + start_time=$(date +%s) + + # 1. Read trigger + if [[ ! -f "$TRIGGER_FILE" ]]; then + log "No trigger file found — nothing to do" + exit 0 + fi + + # Parse trigger.json (uses python3 which is available on all supported distros) + local action version from_version staging_dir api_source_dir + action=$(python3 -c "import json; print(json.load(open('${TRIGGER_FILE}'))['action'])") + version=$(python3 -c "import json; print(json.load(open('${TRIGGER_FILE}'))['version'])") + from_version=$(python3 -c "import json; print(json.load(open('${TRIGGER_FILE}'))['fromVersion'])") + staging_dir=$(python3 -c "import json; print(json.load(open('${TRIGGER_FILE}'))['stagingDir'])") + api_source_dir=$(python3 -c "import json; print(json.load(open('${TRIGGER_FILE}'))['apiSourceDir'])") + + log "=== ${action^^}: v${from_version} -> v${version} ===" + log "Staging: ${staging_dir}" + log "API source: ${api_source_dir}" + + # Consume the trigger immediately so we don't re-process on failure + mv "$TRIGGER_FILE" "${TRIGGER_FILE}.processing" + + # 2. Validate staging directory + if [[ ! -d "$staging_dir" ]]; then + log "ERROR: Staging directory not found: ${staging_dir}" + write_result "false" "$version" "$(( $(date +%s) - start_time ))" "Staging directory not found" + rm -f "${TRIGGER_FILE}.processing" + exit 1 + fi + + # 3. Backup current API files + local backup_dir="${BACKUPS_DIR}/${from_version}" + mkdir -p "$backup_dir" + log "Backing up current API files to ${backup_dir}" + + # Copy all JS files, package.json, Dockerfile, and routes + for item in "$api_source_dir"/*.js "$api_source_dir"/package.json "$api_source_dir"/package-lock.json "$api_source_dir"/Dockerfile "$api_source_dir"/openapi.yaml; do + [[ -f "$item" ]] && cp -f "$item" "$backup_dir/" 2>/dev/null || true + done + [[ -d "$api_source_dir/routes" ]] && cp -rf "$api_source_dir/routes" "$backup_dir/" + # Save version marker + echo "$from_version" > "$backup_dir/VERSION" + + cleanup_old_backups + + # 4. Copy new files from staging to API source + log "Deploying new API files..." + for item in "$staging_dir"/*.js "$staging_dir"/package.json "$staging_dir"/package-lock.json "$staging_dir"/Dockerfile "$staging_dir"/openapi.yaml; do + [[ -f "$item" ]] && cp -f "$item" "$api_source_dir/" 2>/dev/null || true + done + [[ -d "$staging_dir/routes" ]] && cp -rf "$staging_dir/routes" "$api_source_dir/routes/" + + # 5. Rebuild container + log "Rebuilding container..." + cd "$api_source_dir" + + local build_ok=false + if docker compose build --quiet 2>&1; then + build_ok=true + elif docker-compose build --quiet 2>&1; then + build_ok=true + fi + + if [[ "$build_ok" != "true" ]]; then + log "ERROR: Docker build failed — rolling back" + + # Restore backup + for item in "$backup_dir"/*.js "$backup_dir"/package.json "$backup_dir"/package-lock.json "$backup_dir"/Dockerfile "$backup_dir"/openapi.yaml; do + [[ -f "$item" ]] && cp -f "$item" "$api_source_dir/" 2>/dev/null || true + done + [[ -d "$backup_dir/routes" ]] && cp -rf "$backup_dir/routes" "$api_source_dir/routes/" + + write_result "false" "$version" "$(( $(date +%s) - start_time ))" "Docker build failed" + rm -f "${TRIGGER_FILE}.processing" + exit 1 + fi + + # 6. Restart container + log "Restarting container..." + if docker compose up -d 2>&1 || docker-compose up -d 2>&1; then + log "Container restarted" + else + log "ERROR: Container restart failed — rolling back" + + # Restore backup + for item in "$backup_dir"/*.js "$backup_dir"/package.json "$backup_dir"/package-lock.json "$backup_dir"/Dockerfile "$backup_dir"/openapi.yaml; do + [[ -f "$item" ]] && cp -f "$item" "$api_source_dir/" 2>/dev/null || true + done + [[ -d "$backup_dir/routes" ]] && cp -rf "$backup_dir/routes" "$api_source_dir/routes/" + + docker compose build --quiet 2>&1 || docker-compose build --quiet 2>&1 || true + docker compose up -d 2>&1 || docker-compose up -d 2>&1 || true + + write_result "false" "$version" "$(( $(date +%s) - start_time ))" "Container restart failed" + rm -f "${TRIGGER_FILE}.processing" + exit 1 + fi + + # 7. Health check + if wait_for_health; then + local duration=$(( $(date +%s) - start_time )) + log "=== Update successful: v${version} in ${duration}s ===" + write_result "true" "$version" "$duration" + else + local duration=$(( $(date +%s) - start_time )) + log "ERROR: Health check failed after update — rolling back" + + # Restore backup + for item in "$backup_dir"/*.js "$backup_dir"/package.json "$backup_dir"/package-lock.json "$backup_dir"/Dockerfile "$backup_dir"/openapi.yaml; do + [[ -f "$item" ]] && cp -f "$item" "$api_source_dir/" 2>/dev/null || true + done + [[ -d "$backup_dir/routes" ]] && cp -rf "$backup_dir/routes" "$api_source_dir/routes/" + + docker compose build --quiet 2>&1 || docker-compose build --quiet 2>&1 || true + docker compose up -d 2>&1 || docker-compose up -d 2>&1 || true + wait_for_health || log "WARNING: Rollback health check also failed" + + write_result "false" "$version" "$duration" "Health check failed after update" + fi + + # 8. Cleanup + rm -f "${TRIGGER_FILE}.processing" + rm -rf "${UPDATES_DIR}/staging" 2>/dev/null || true + + log "=== Update process complete ===" +} + +main "$@" diff --git a/dashcaddy-api/scripts/dashcaddy-updater.path b/dashcaddy-api/scripts/dashcaddy-updater.path new file mode 100644 index 0000000..0506681 --- /dev/null +++ b/dashcaddy-api/scripts/dashcaddy-updater.path @@ -0,0 +1,9 @@ +[Unit] +Description=Watch for DashCaddy update trigger + +[Path] +PathChanged=/opt/dashcaddy/updates/trigger.json +MakeDirectory=yes + +[Install] +WantedBy=multi-user.target diff --git a/dashcaddy-api/scripts/dashcaddy-updater.service b/dashcaddy-api/scripts/dashcaddy-updater.service new file mode 100644 index 0000000..f1e3278 --- /dev/null +++ b/dashcaddy-api/scripts/dashcaddy-updater.service @@ -0,0 +1,12 @@ +[Unit] +Description=DashCaddy auto-update handler +After=docker.service +Requires=docker.service + +[Service] +Type=oneshot +ExecStart=/opt/dashcaddy/scripts/dashcaddy-update.sh +TimeoutStartSec=300 +StandardOutput=journal +StandardError=journal +SyslogIdentifier=dashcaddy-update diff --git a/dashcaddy-api/self-updater.js b/dashcaddy-api/self-updater.js index c5ba1de..0735bca 100644 --- a/dashcaddy-api/self-updater.js +++ b/dashcaddy-api/self-updater.js @@ -17,6 +17,7 @@ const crypto = require('crypto'); const { execSync } = require('child_process'); const zlib = require('zlib'); const platformPaths = require('./platform-paths'); +const isWindows = platformPaths.isWindows; const DEFAULTS = { CHECK_INTERVAL: 30 * 60 * 1000, // 30 minutes diff --git a/dashcaddy-installer/install.sh b/dashcaddy-installer/install.sh index a248f46..9df436a 100644 --- a/dashcaddy-installer/install.sh +++ b/dashcaddy-installer/install.sh @@ -661,6 +661,7 @@ services: - DASHCADDY_UPDATE_URL=https://get.dashcaddy.net/release - DASHCADDY_MIRROR_URL=https://get2.dashcaddy.net/release - DASHCADDY_UPDATES_DIR=/app/updates + - DASHCADDY_HOST_UPDATES_DIR=/opt/dashcaddy/updates - DASHCADDY_API_SOURCE_DIR=${API_DIR} - DASHCADDY_FRONTEND_DIR=/app/dashboard extra_hosts: