Files
dashcaddy/dashcaddy-api/scripts/dashcaddy-update.sh
Sami 2d1944fd55 Fix auto-update pipeline bugs discovered in e2e testing
- Fix container-to-host path mapping in trigger.json (stagingDir
  was using container path /app/updates/ instead of host path
  /opt/dashcaddy/updates/)
- Fix download race condition: primary download's async unlink
  could delete mirror download's file — use unlinkSync before retry
- Fix DASHCADDY_COMMIT build arg not passed to docker compose build
  (was set as env var, now uses --build-arg)
- Remove MakeDirectory=yes from systemd path unit (was creating
  trigger.json as directory instead of file)
- Remove unused 'tar' npm module import
- Add mirror fallback for tarball downloads (not just version checks)

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-07 03:32:08 -08:00

220 lines
7.7 KiB
Bash

#!/usr/bin/env bash
# DashCaddy Host-Side Updater
# Triggered by systemd path unit when the API container writes trigger.json.
# Handles API container rebuild + restart with automatic rollback on failure.
set -euo pipefail
readonly UPDATES_DIR="/opt/dashcaddy/updates"
readonly TRIGGER_FILE="${UPDATES_DIR}/trigger.json"
readonly RESULT_FILE="${UPDATES_DIR}/result.json"
readonly BACKUP_BASE="${UPDATES_DIR}/backups"
readonly HEALTH_URL="http://localhost:3001/health"
readonly HEALTH_TIMEOUT=60
readonly MAX_BACKUPS=3
log() { echo "[dashcaddy-update] $(date '+%Y-%m-%d %H:%M:%S') $*"; }
write_result() {
local success="$1" message="$2" version="$3" duration="$4"
cat > "$RESULT_FILE" <<EOF
{
"success": ${success},
"version": "${version}",
"message": "${message}",
"duration": ${duration},
"timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)"
}
EOF
log "Result written: success=${success} version=${version}"
}
backup_current() {
local api_dir="$1" version="$2"
local backup_dir="${BACKUP_BASE}/${version}"
mkdir -p "$backup_dir/routes"
cp -f "${api_dir}"/*.js "$backup_dir/" 2>/dev/null || true
cp -rf "${api_dir}/routes/"* "$backup_dir/routes/" 2>/dev/null || true
cp -f "${api_dir}/package.json" "$backup_dir/" 2>/dev/null || true
cp -f "${api_dir}/package-lock.json" "$backup_dir/" 2>/dev/null || true
cp -f "${api_dir}/Dockerfile" "$backup_dir/" 2>/dev/null || true
cp -f "${api_dir}/openapi.yaml" "$backup_dir/" 2>/dev/null || true
log "Backed up version ${version} to ${backup_dir}"
}
restore_backup() {
local api_dir="$1" version="$2"
local backup_dir="${BACKUP_BASE}/${version}"
if [ ! -d "$backup_dir" ]; then
log "ERROR: No backup found for version ${version}"
return 1
fi
cp -f "${backup_dir}"/*.js "$api_dir/" 2>/dev/null || true
cp -rf "${backup_dir}/routes/"* "${api_dir}/routes/" 2>/dev/null || true
cp -f "${backup_dir}/package.json" "$api_dir/" 2>/dev/null || true
cp -f "${backup_dir}/package-lock.json" "$api_dir/" 2>/dev/null || true
cp -f "${backup_dir}/Dockerfile" "$api_dir/" 2>/dev/null || true
cp -f "${backup_dir}/openapi.yaml" "$api_dir/" 2>/dev/null || true
log "Restored version ${version} from ${backup_dir}"
}
copy_new_files() {
local staging_dir="$1" api_dir="$2"
cp -f "${staging_dir}"/*.js "$api_dir/" 2>/dev/null || true
[ -d "${staging_dir}/routes" ] && cp -rf "${staging_dir}/routes/"* "${api_dir}/routes/" 2>/dev/null || true
cp -f "${staging_dir}/package.json" "$api_dir/" 2>/dev/null || true
cp -f "${staging_dir}/package-lock.json" "$api_dir/" 2>/dev/null || true
cp -f "${staging_dir}/Dockerfile" "$api_dir/" 2>/dev/null || true
cp -f "${staging_dir}/openapi.yaml" "$api_dir/" 2>/dev/null || true
log "Copied new files from ${staging_dir} to ${api_dir}"
}
wait_for_health() {
local attempt=0
local max_attempts=$((HEALTH_TIMEOUT / 2))
while (( attempt < max_attempts )); do
if curl -fsS --max-time 3 "$HEALTH_URL" >/dev/null 2>&1; then
log "Health check passed (attempt $((attempt+1)))"
return 0
fi
sleep 2
attempt=$((attempt + 1))
done
log "Health check FAILED after ${HEALTH_TIMEOUT}s"
return 1
}
find_compose_dir() {
# Find the docker-compose.yml for dashcaddy-api
for dir in /etc/dashcaddy/sites/dashcaddy-api /etc/dashcaddy/sites/caddy-api; do
if [ -f "${dir}/docker-compose.yml" ] || [ -f "${dir}/docker-compose.yaml" ]; then
echo "$dir"
return 0
fi
done
# Fallback: same as api source
echo "$1"
}
cleanup_old_backups() {
if [ ! -d "$BACKUP_BASE" ]; then return; fi
local count
count=$(ls -1d "${BACKUP_BASE}"/*/ 2>/dev/null | wc -l)
if (( count > MAX_BACKUPS )); then
local to_remove=$((count - MAX_BACKUPS))
ls -1d "${BACKUP_BASE}"/*/ 2>/dev/null | head -n "$to_remove" | while read -r dir; do
rm -rf "$dir"
log "Cleaned old backup: $dir"
done
fi
}
main() {
if [ ! -f "$TRIGGER_FILE" ]; then
log "No trigger file found, exiting"
exit 0
fi
local start_time
start_time=$(date +%s)
# Parse trigger file
local action version from_version staging_dir api_dir commit
action=$(python3 -c "import json,sys; d=json.load(open('$TRIGGER_FILE')); print(d.get('action','update'))" 2>/dev/null || echo "update")
version=$(python3 -c "import json,sys; d=json.load(open('$TRIGGER_FILE')); print(d.get('version','unknown'))" 2>/dev/null || echo "unknown")
from_version=$(python3 -c "import json,sys; d=json.load(open('$TRIGGER_FILE')); print(d.get('fromVersion','unknown'))" 2>/dev/null || echo "unknown")
staging_dir=$(python3 -c "import json,sys; d=json.load(open('$TRIGGER_FILE')); print(d.get('stagingDir',''))" 2>/dev/null || echo "")
api_dir=$(python3 -c "import json,sys; d=json.load(open('$TRIGGER_FILE')); print(d.get('apiSourceDir','/opt/dashcaddy'))" 2>/dev/null || echo "/opt/dashcaddy")
commit=$(python3 -c "import json,sys; d=json.load(open('$TRIGGER_FILE')); print(d.get('commit','unknown'))" 2>/dev/null || echo "unknown")
log "=== DashCaddy ${action} started: ${from_version}${version} (${commit}) ==="
if [ -z "$staging_dir" ] || [ ! -d "$staging_dir" ]; then
log "ERROR: Staging directory not found: ${staging_dir}"
write_result "false" "Staging directory not found" "$version" "0"
rm -f "$TRIGGER_FILE"
exit 1
fi
local compose_dir
compose_dir=$(find_compose_dir "$api_dir")
# Step 1: Backup current version
log "Step 1: Backing up current version (${from_version})"
backup_current "$api_dir" "$from_version"
# Step 2: Copy new files
log "Step 2: Copying new files"
copy_new_files "$staging_dir" "$api_dir"
# Write commit hash to VERSION file
echo "$commit" > "${api_dir}/VERSION"
# Step 3: Rebuild container
log "Step 3: Building new container image"
cd "$compose_dir"
if ! docker compose build --build-arg DASHCADDY_COMMIT="$commit" --quiet 2>&1; then
log "ERROR: docker compose build failed, rolling back"
restore_backup "$api_dir" "$from_version"
local elapsed=$(( $(date +%s) - start_time ))
write_result "false" "Build failed, rolled back to ${from_version}" "$version" "$((elapsed * 1000))"
rm -f "$TRIGGER_FILE"
exit 1
fi
# Step 4: Restart container
log "Step 4: Restarting container"
if ! docker compose up -d 2>&1; then
log "ERROR: docker compose up failed, rolling back"
restore_backup "$api_dir" "$from_version"
docker compose build --quiet 2>&1 || true
docker compose up -d 2>&1 || true
local elapsed=$(( $(date +%s) - start_time ))
write_result "false" "Container start failed, rolled back to ${from_version}" "$version" "$((elapsed * 1000))"
rm -f "$TRIGGER_FILE"
exit 1
fi
# Step 5: Health check
log "Step 5: Waiting for health check (${HEALTH_TIMEOUT}s timeout)"
if wait_for_health; then
local elapsed=$(( $(date +%s) - start_time ))
log "=== Update to ${version} SUCCESSFUL (${elapsed}s) ==="
write_result "true" "Update successful" "$version" "$((elapsed * 1000))"
else
log "Health check failed — ROLLING BACK to ${from_version}"
restore_backup "$api_dir" "$from_version"
cd "$compose_dir"
docker compose build --quiet 2>&1 || true
docker compose up -d 2>&1 || true
if wait_for_health; then
local elapsed=$(( $(date +%s) - start_time ))
log "Rollback to ${from_version} succeeded"
write_result "false" "Health check failed after update. Rolled back to ${from_version}." "$version" "$((elapsed * 1000))"
else
local elapsed=$(( $(date +%s) - start_time ))
log "CRITICAL: Rollback also failed. Manual intervention required."
write_result "false" "CRITICAL: Both update and rollback failed. Manual intervention required." "$version" "$((elapsed * 1000))"
fi
fi
# Cleanup
rm -f "$TRIGGER_FILE"
rm -rf "${UPDATES_DIR}/staging"
cleanup_old_backups
log "Update process complete"
}
main "$@"