Add Docker hygiene, deployment manifests, and daily log digest
Prevents Docker disk bloat by adding log rotation (10MB max, 3 files) to all container creation and update paths, auto-pruning dangling images after deploy/remove/update, and a daily maintenance module that cleans build cache and warns on disk thresholds. Saves a deployment manifest in services.json at deploy time so users can restore all their apps after a Docker purge. Adds restore-all and restore-single endpoints that recreate containers, Caddy config, and DNS records from the saved manifests. Adds an hourly log collector and daily digest generator that summarizes errors, warnings, and events across all services into a single human-readable report with guidance on where to investigate. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -103,6 +103,21 @@ const DNS_RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'MX', 'TXT', 'NS', 'SRV', 'PTR',
|
|||||||
const DOCKER = {
|
const DOCKER = {
|
||||||
CONTAINER_PREFIX: 'sami-',
|
CONTAINER_PREFIX: 'sami-',
|
||||||
TIMEOUT: 30000, // 30s — timeout for docker pull/create operations
|
TIMEOUT: 30000, // 30s — timeout for docker pull/create operations
|
||||||
|
LOG_CONFIG: {
|
||||||
|
Type: 'json-file',
|
||||||
|
Config: { 'max-size': '10m', 'max-file': '3' } // 30MB max per container
|
||||||
|
},
|
||||||
|
MAINTENANCE: {
|
||||||
|
INTERVAL: 24 * 60 * 60 * 1000, // 24 hours
|
||||||
|
DISK_WARN_GB: 20, // Warn when Docker uses more than 20GB
|
||||||
|
},
|
||||||
|
DIGEST: {
|
||||||
|
COLLECT_INTERVAL: 60 * 60 * 1000, // Hourly log collection
|
||||||
|
DIGEST_HOUR: 0, // Generate daily digest at midnight
|
||||||
|
MAX_HOURLY_ENTRIES: 24, // Keep 24 hours of hourly summaries
|
||||||
|
MAX_DIGEST_FILES: 30, // Keep 30 days of daily digests
|
||||||
|
LOG_TAIL: 500, // Lines to fetch per container per hour
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
// ── Emby/Jellyfin Auth Header Builder ─────────────────────────
|
// ── Emby/Jellyfin Auth Header Builder ─────────────────────────
|
||||||
|
|||||||
212
dashcaddy-api/docker-maintenance.js
Normal file
212
dashcaddy-api/docker-maintenance.js
Normal file
@@ -0,0 +1,212 @@
|
|||||||
|
/**
|
||||||
|
* Docker Maintenance Module
|
||||||
|
* Scheduled cleanup to prevent Docker disk bloat:
|
||||||
|
* - Prunes dangling images
|
||||||
|
* - Prunes stopped non-managed containers
|
||||||
|
* - Clears build cache
|
||||||
|
* - Monitors disk usage and warns when thresholds exceeded
|
||||||
|
*/
|
||||||
|
|
||||||
|
const Docker = require('dockerode');
|
||||||
|
const EventEmitter = require('events');
|
||||||
|
const { DOCKER } = require('./constants');
|
||||||
|
|
||||||
|
const docker = new Docker();
|
||||||
|
|
||||||
|
class DockerMaintenance extends EventEmitter {
|
||||||
|
constructor() {
|
||||||
|
super();
|
||||||
|
this.interval = null;
|
||||||
|
this.running = false;
|
||||||
|
this.lastRun = null;
|
||||||
|
this.lastResult = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
start() {
|
||||||
|
if (this.running) return;
|
||||||
|
this.running = true;
|
||||||
|
|
||||||
|
// Run first maintenance 5 minutes after startup (let everything settle)
|
||||||
|
setTimeout(() => {
|
||||||
|
if (!this.running) return;
|
||||||
|
this.runMaintenance().catch(() => {});
|
||||||
|
}, 5 * 60 * 1000);
|
||||||
|
|
||||||
|
// Then run on the configured interval (default 24h)
|
||||||
|
this.interval = setInterval(() => {
|
||||||
|
this.runMaintenance().catch(() => {});
|
||||||
|
}, DOCKER.MAINTENANCE.INTERVAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
stop() {
|
||||||
|
if (!this.running) return;
|
||||||
|
this.running = false;
|
||||||
|
if (this.interval) {
|
||||||
|
clearInterval(this.interval);
|
||||||
|
this.interval = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async runMaintenance() {
|
||||||
|
const startTime = Date.now();
|
||||||
|
const result = {
|
||||||
|
timestamp: new Date().toISOString(),
|
||||||
|
pruned: { images: 0, containers: 0, buildCache: 0 },
|
||||||
|
spaceReclaimed: { images: 0, containers: 0, buildCache: 0, total: 0 },
|
||||||
|
diskUsage: null,
|
||||||
|
warnings: [],
|
||||||
|
containersWithoutLogLimits: []
|
||||||
|
};
|
||||||
|
|
||||||
|
try {
|
||||||
|
// 1. Prune dangling images
|
||||||
|
try {
|
||||||
|
const imgResult = await docker.pruneImages({ filters: { dangling: { true: true } } });
|
||||||
|
result.pruned.images = (imgResult.ImagesDeleted || []).length;
|
||||||
|
result.spaceReclaimed.images = imgResult.SpaceReclaimed || 0;
|
||||||
|
} catch (e) {
|
||||||
|
result.warnings.push(`Image prune failed: ${e.message}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Prune stopped containers (only non-managed ones)
|
||||||
|
try {
|
||||||
|
const stopped = await docker.listContainers({
|
||||||
|
all: true,
|
||||||
|
filters: { status: ['exited', 'dead'] }
|
||||||
|
});
|
||||||
|
for (const c of stopped) {
|
||||||
|
// Skip DashCaddy-managed containers — user may want to restart them
|
||||||
|
if (c.Labels?.['sami.managed'] === 'true') continue;
|
||||||
|
// Skip containers stopped less than 24h ago
|
||||||
|
const stoppedAge = Date.now() / 1000 - c.Created;
|
||||||
|
if (stoppedAge < 86400) continue;
|
||||||
|
try {
|
||||||
|
const container = docker.getContainer(c.Id);
|
||||||
|
await container.remove({ force: true });
|
||||||
|
result.pruned.containers++;
|
||||||
|
} catch (e) {
|
||||||
|
// Container may have been removed between list and remove
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
result.warnings.push(`Container prune failed: ${e.message}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Prune build cache
|
||||||
|
try {
|
||||||
|
const cacheResult = await docker.pruneBuilder();
|
||||||
|
result.spaceReclaimed.buildCache = cacheResult.SpaceReclaimed || 0;
|
||||||
|
result.pruned.buildCache = (cacheResult.CachesDeleted || []).length;
|
||||||
|
} catch (e) {
|
||||||
|
// Build cache prune may not be available on all Docker versions
|
||||||
|
result.warnings.push(`Build cache prune failed: ${e.message}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. Get disk usage
|
||||||
|
try {
|
||||||
|
const df = await docker.df();
|
||||||
|
result.diskUsage = {
|
||||||
|
images: {
|
||||||
|
count: (df.Images || []).length,
|
||||||
|
sizeBytes: (df.Images || []).reduce((sum, i) => sum + (i.Size || 0), 0)
|
||||||
|
},
|
||||||
|
containers: {
|
||||||
|
count: (df.Containers || []).length,
|
||||||
|
sizeBytes: (df.Containers || []).reduce((sum, c) => sum + (c.SizeRw || 0), 0)
|
||||||
|
},
|
||||||
|
volumes: {
|
||||||
|
count: (df.Volumes?.Volumes || []).length,
|
||||||
|
sizeBytes: (df.Volumes?.Volumes || []).reduce((sum, v) => sum + (v.UsageData?.Size || 0), 0)
|
||||||
|
},
|
||||||
|
buildCache: {
|
||||||
|
count: (df.BuildCache || []).length,
|
||||||
|
sizeBytes: (df.BuildCache || []).reduce((sum, b) => sum + (b.Size || 0), 0)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
result.diskUsage.totalBytes =
|
||||||
|
result.diskUsage.images.sizeBytes +
|
||||||
|
result.diskUsage.containers.sizeBytes +
|
||||||
|
result.diskUsage.volumes.sizeBytes +
|
||||||
|
result.diskUsage.buildCache.sizeBytes;
|
||||||
|
result.diskUsage.totalGB = +(result.diskUsage.totalBytes / (1024 ** 3)).toFixed(2);
|
||||||
|
|
||||||
|
if (result.diskUsage.totalGB > DOCKER.MAINTENANCE.DISK_WARN_GB) {
|
||||||
|
result.warnings.push(`Docker disk usage is ${result.diskUsage.totalGB}GB (threshold: ${DOCKER.MAINTENANCE.DISK_WARN_GB}GB)`);
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
result.warnings.push(`Disk usage check failed: ${e.message}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5. Check for containers without log rotation
|
||||||
|
try {
|
||||||
|
const running = await docker.listContainers({ all: false });
|
||||||
|
for (const c of running) {
|
||||||
|
if (c.Labels?.['sami.managed'] !== 'true') continue;
|
||||||
|
try {
|
||||||
|
const container = docker.getContainer(c.Id);
|
||||||
|
const info = await container.inspect();
|
||||||
|
const logConfig = info.HostConfig?.LogConfig;
|
||||||
|
if (!logConfig?.Config?.['max-size']) {
|
||||||
|
result.containersWithoutLogLimits.push({
|
||||||
|
name: c.Names[0]?.replace(/^\//, '') || c.Id.slice(0, 12),
|
||||||
|
id: c.Id.slice(0, 12)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
// Container may have stopped between list and inspect
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (result.containersWithoutLogLimits.length > 0) {
|
||||||
|
result.warnings.push(
|
||||||
|
`${result.containersWithoutLogLimits.length} container(s) have no log rotation — restart or update them to apply log limits: ${result.containersWithoutLogLimits.map(c => c.name).join(', ')}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
result.warnings.push(`Log config check failed: ${e.message}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
result.spaceReclaimed.total =
|
||||||
|
result.spaceReclaimed.images +
|
||||||
|
result.spaceReclaimed.containers +
|
||||||
|
result.spaceReclaimed.buildCache;
|
||||||
|
|
||||||
|
result.duration = Date.now() - startTime;
|
||||||
|
this.lastRun = new Date().toISOString();
|
||||||
|
this.lastResult = result;
|
||||||
|
|
||||||
|
this.emit('maintenance-complete', result);
|
||||||
|
return result;
|
||||||
|
} catch (error) {
|
||||||
|
result.error = error.message;
|
||||||
|
result.duration = Date.now() - startTime;
|
||||||
|
this.lastResult = result;
|
||||||
|
this.emit('maintenance-failed', result);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Get Docker disk usage snapshot (callable on demand) */
|
||||||
|
async getDiskUsage() {
|
||||||
|
try {
|
||||||
|
const df = await docker.df();
|
||||||
|
const images = { count: (df.Images || []).length, sizeBytes: (df.Images || []).reduce((sum, i) => sum + (i.Size || 0), 0) };
|
||||||
|
const containers = { count: (df.Containers || []).length, sizeBytes: (df.Containers || []).reduce((sum, c) => sum + (c.SizeRw || 0), 0) };
|
||||||
|
const volumes = { count: (df.Volumes?.Volumes || []).length, sizeBytes: (df.Volumes?.Volumes || []).reduce((sum, v) => sum + (v.UsageData?.Size || 0), 0) };
|
||||||
|
const buildCache = { count: (df.BuildCache || []).length, sizeBytes: (df.BuildCache || []).reduce((sum, b) => sum + (b.Size || 0), 0) };
|
||||||
|
const totalBytes = images.sizeBytes + containers.sizeBytes + volumes.sizeBytes + buildCache.sizeBytes;
|
||||||
|
return { images, containers, volumes, buildCache, totalBytes, totalGB: +(totalBytes / (1024 ** 3)).toFixed(2) };
|
||||||
|
} catch (e) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
getStatus() {
|
||||||
|
return {
|
||||||
|
running: this.running,
|
||||||
|
lastRun: this.lastRun,
|
||||||
|
lastResult: this.lastResult
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = new DockerMaintenance();
|
||||||
575
dashcaddy-api/log-digest.js
Normal file
575
dashcaddy-api/log-digest.js
Normal file
@@ -0,0 +1,575 @@
|
|||||||
|
/**
|
||||||
|
* Log Digest Module
|
||||||
|
* Collects container logs hourly, generates daily summaries.
|
||||||
|
* Gives users a single place to see what happened across all services
|
||||||
|
* and guidance on where to look for more detail.
|
||||||
|
*/
|
||||||
|
|
||||||
|
const Docker = require('dockerode');
|
||||||
|
const EventEmitter = require('events');
|
||||||
|
const fs = require('fs');
|
||||||
|
const fsp = require('fs').promises;
|
||||||
|
const path = require('path');
|
||||||
|
const { DOCKER } = require('./constants');
|
||||||
|
|
||||||
|
const docker = new Docker();
|
||||||
|
|
||||||
|
const ERROR_PATTERNS = [
|
||||||
|
/\berror\b/i, /\bfailed\b/i, /\bfatal\b/i, /\bpanic\b/i,
|
||||||
|
/\bcrash(ed)?\b/i, /\bexception\b/i, /\btimeout\b/i,
|
||||||
|
/\bOOM\b/, /\bout of memory\b/i, /\bkilled\b/i,
|
||||||
|
/\bdenied\b/i, /\bunauthorized\b/i, /\brefused\b/i
|
||||||
|
];
|
||||||
|
|
||||||
|
const WARNING_PATTERNS = [
|
||||||
|
/\bwarn(ing)?\b/i, /\bdeprecated\b/i, /\bretry(ing)?\b/i,
|
||||||
|
/\bslow\b/i, /\blatency\b/i
|
||||||
|
];
|
||||||
|
|
||||||
|
const EVENT_PATTERNS = [
|
||||||
|
{ pattern: /\b(start(ed|ing)?|boot(ed|ing)?|init(ializ(ed|ing))?)\b/i, type: 'startup' },
|
||||||
|
{ pattern: /\b(stop(ped|ping)?|shutdown|exit(ed|ing)?|terminat(ed|ing)?)\b/i, type: 'shutdown' },
|
||||||
|
{ pattern: /\b(restart(ed|ing)?|reload(ed|ing)?)\b/i, type: 'restart' },
|
||||||
|
{ pattern: /\bhealth.?check.*(fail|unhealthy)\b/i, type: 'health_failure' },
|
||||||
|
{ pattern: /\b(update|upgrade|migration)\b/i, type: 'update' }
|
||||||
|
];
|
||||||
|
|
||||||
|
class LogDigest extends EventEmitter {
|
||||||
|
constructor() {
|
||||||
|
super();
|
||||||
|
this.collectInterval = null;
|
||||||
|
this.digestTimeout = null;
|
||||||
|
this.running = false;
|
||||||
|
this.hourlySummaries = []; // Ring buffer of hourly snapshots
|
||||||
|
this.digestDir = null; // Set during start()
|
||||||
|
this.lastCollect = null;
|
||||||
|
this._lastCollectTimestamp = {}; // Per-container: last log timestamp fetched
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Start the log digest system.
|
||||||
|
* @param {string} digestDir - Directory to write daily digest files
|
||||||
|
*/
|
||||||
|
start(digestDir) {
|
||||||
|
if (this.running) return;
|
||||||
|
this.running = true;
|
||||||
|
this.digestDir = digestDir;
|
||||||
|
|
||||||
|
// Ensure digest directory exists
|
||||||
|
if (!fs.existsSync(digestDir)) {
|
||||||
|
fs.mkdirSync(digestDir, { recursive: true });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect logs every hour
|
||||||
|
this.collectInterval = setInterval(() => {
|
||||||
|
this._collectHourlyLogs().catch(e =>
|
||||||
|
console.error('[LogDigest] Hourly collection failed:', e.message)
|
||||||
|
);
|
||||||
|
}, DOCKER.DIGEST.COLLECT_INTERVAL);
|
||||||
|
|
||||||
|
// Schedule daily digest generation
|
||||||
|
this._scheduleDailyDigest();
|
||||||
|
|
||||||
|
// Run initial collection after 2 minutes
|
||||||
|
setTimeout(() => {
|
||||||
|
if (this.running) {
|
||||||
|
this._collectHourlyLogs().catch(() => {});
|
||||||
|
}
|
||||||
|
}, 2 * 60 * 1000);
|
||||||
|
}
|
||||||
|
|
||||||
|
stop() {
|
||||||
|
if (!this.running) return;
|
||||||
|
this.running = false;
|
||||||
|
if (this.collectInterval) {
|
||||||
|
clearInterval(this.collectInterval);
|
||||||
|
this.collectInterval = null;
|
||||||
|
}
|
||||||
|
if (this.digestTimeout) {
|
||||||
|
clearTimeout(this.digestTimeout);
|
||||||
|
this.digestTimeout = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Collect logs from all managed containers for the last hour.
|
||||||
|
*/
|
||||||
|
async _collectHourlyLogs() {
|
||||||
|
const now = new Date();
|
||||||
|
const sinceTimestamp = Math.floor((now.getTime() - DOCKER.DIGEST.COLLECT_INTERVAL) / 1000);
|
||||||
|
const hourKey = `${now.getFullYear()}-${String(now.getMonth() + 1).padStart(2, '0')}-${String(now.getDate()).padStart(2, '0')}T${String(now.getHours()).padStart(2, '0')}:00`;
|
||||||
|
|
||||||
|
const hourSummary = {
|
||||||
|
hour: hourKey,
|
||||||
|
timestamp: now.toISOString(),
|
||||||
|
services: {}
|
||||||
|
};
|
||||||
|
|
||||||
|
try {
|
||||||
|
const containers = await docker.listContainers({ all: true });
|
||||||
|
const managed = containers.filter(c => c.Labels?.['sami.managed'] === 'true');
|
||||||
|
|
||||||
|
for (const containerInfo of managed) {
|
||||||
|
const name = containerInfo.Names[0]?.replace(/^\//, '') || containerInfo.Id.slice(0, 12);
|
||||||
|
const appId = containerInfo.Labels['sami.app'] || name;
|
||||||
|
const isRunning = containerInfo.State === 'running';
|
||||||
|
|
||||||
|
const serviceSummary = {
|
||||||
|
name,
|
||||||
|
appId,
|
||||||
|
state: containerInfo.State,
|
||||||
|
errors: [],
|
||||||
|
warnings: [],
|
||||||
|
events: [],
|
||||||
|
errorCount: 0,
|
||||||
|
warningCount: 0,
|
||||||
|
totalLines: 0
|
||||||
|
};
|
||||||
|
|
||||||
|
if (isRunning) {
|
||||||
|
try {
|
||||||
|
const container = docker.getContainer(containerInfo.Id);
|
||||||
|
const logBuffer = await container.logs({
|
||||||
|
stdout: true,
|
||||||
|
stderr: true,
|
||||||
|
since: sinceTimestamp,
|
||||||
|
tail: DOCKER.DIGEST.LOG_TAIL,
|
||||||
|
timestamps: true
|
||||||
|
});
|
||||||
|
|
||||||
|
const lines = this._parseDockerLogs(logBuffer);
|
||||||
|
serviceSummary.totalLines = lines.length;
|
||||||
|
|
||||||
|
for (const line of lines) {
|
||||||
|
// Check for errors
|
||||||
|
if (line.stream === 'stderr' || ERROR_PATTERNS.some(p => p.test(line.text))) {
|
||||||
|
serviceSummary.errorCount++;
|
||||||
|
if (serviceSummary.errors.length < 10) {
|
||||||
|
serviceSummary.errors.push({
|
||||||
|
time: line.timestamp || hourKey,
|
||||||
|
text: line.text.slice(0, 500)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for warnings
|
||||||
|
if (WARNING_PATTERNS.some(p => p.test(line.text))) {
|
||||||
|
serviceSummary.warningCount++;
|
||||||
|
if (serviceSummary.warnings.length < 5) {
|
||||||
|
serviceSummary.warnings.push({
|
||||||
|
time: line.timestamp || hourKey,
|
||||||
|
text: line.text.slice(0, 300)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for notable events
|
||||||
|
for (const { pattern, type } of EVENT_PATTERNS) {
|
||||||
|
if (pattern.test(line.text)) {
|
||||||
|
serviceSummary.events.push({
|
||||||
|
type,
|
||||||
|
time: line.timestamp || hourKey,
|
||||||
|
text: line.text.slice(0, 300)
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (logErr) {
|
||||||
|
serviceSummary.errors.push({
|
||||||
|
time: now.toISOString(),
|
||||||
|
text: `Failed to fetch logs: ${logErr.message}`
|
||||||
|
});
|
||||||
|
serviceSummary.errorCount++;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
serviceSummary.events.push({
|
||||||
|
type: 'not_running',
|
||||||
|
time: now.toISOString(),
|
||||||
|
text: `Container is ${containerInfo.State}`
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
hourSummary.services[appId] = serviceSummary;
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
console.error('[LogDigest] Container enumeration failed:', e.message);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add to ring buffer
|
||||||
|
this.hourlySummaries.push(hourSummary);
|
||||||
|
if (this.hourlySummaries.length > DOCKER.DIGEST.MAX_HOURLY_ENTRIES) {
|
||||||
|
this.hourlySummaries.shift();
|
||||||
|
}
|
||||||
|
|
||||||
|
this.lastCollect = now.toISOString();
|
||||||
|
this.emit('hourly-collected', hourSummary);
|
||||||
|
return hourSummary;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse Docker multiplexed log stream into lines.
|
||||||
|
*/
|
||||||
|
_parseDockerLogs(logData) {
|
||||||
|
const lines = [];
|
||||||
|
const buffer = Buffer.isBuffer(logData) ? logData : Buffer.from(logData);
|
||||||
|
let offset = 0;
|
||||||
|
|
||||||
|
while (offset < buffer.length) {
|
||||||
|
if (offset + 8 > buffer.length) break;
|
||||||
|
const streamType = buffer[0 + offset];
|
||||||
|
const size = buffer.readUInt32BE(4 + offset);
|
||||||
|
if (offset + 8 + size > buffer.length) break;
|
||||||
|
|
||||||
|
const text = buffer.slice(offset + 8, offset + 8 + size).toString('utf8').trim();
|
||||||
|
if (text) {
|
||||||
|
// Try to extract timestamp from Docker's format: "2026-03-13T12:00:00.000000000Z message"
|
||||||
|
let timestamp = null;
|
||||||
|
let message = text;
|
||||||
|
const tsMatch = text.match(/^(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})\.\d+Z\s(.*)$/s);
|
||||||
|
if (tsMatch) {
|
||||||
|
timestamp = tsMatch[1];
|
||||||
|
message = tsMatch[2];
|
||||||
|
}
|
||||||
|
|
||||||
|
lines.push({
|
||||||
|
stream: streamType === 2 ? 'stderr' : 'stdout',
|
||||||
|
text: message,
|
||||||
|
timestamp
|
||||||
|
});
|
||||||
|
}
|
||||||
|
offset += 8 + size;
|
||||||
|
}
|
||||||
|
return lines;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Schedule the daily digest at the configured hour.
|
||||||
|
*/
|
||||||
|
_scheduleDailyDigest() {
|
||||||
|
const now = new Date();
|
||||||
|
const targetHour = DOCKER.DIGEST.DIGEST_HOUR;
|
||||||
|
const next = new Date(now);
|
||||||
|
next.setHours(targetHour, 5, 0, 0); // 5 minutes past the hour
|
||||||
|
if (next <= now) next.setDate(next.getDate() + 1);
|
||||||
|
|
||||||
|
const delay = next.getTime() - now.getTime();
|
||||||
|
this.digestTimeout = setTimeout(() => {
|
||||||
|
this.generateDailyDigest().catch(e =>
|
||||||
|
console.error('[LogDigest] Daily digest generation failed:', e.message)
|
||||||
|
);
|
||||||
|
// Reschedule for tomorrow
|
||||||
|
if (this.running) this._scheduleDailyDigest();
|
||||||
|
}, delay);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate the daily digest from accumulated hourly summaries.
|
||||||
|
* Can also be called on-demand.
|
||||||
|
*/
|
||||||
|
async generateDailyDigest(dateStr) {
|
||||||
|
const date = dateStr || new Date(Date.now() - 86400000).toISOString().slice(0, 10);
|
||||||
|
const relevantHours = this.hourlySummaries.filter(h => h.hour.startsWith(date));
|
||||||
|
|
||||||
|
// Aggregate per-service stats across all hours
|
||||||
|
const serviceAgg = {};
|
||||||
|
const notableEvents = [];
|
||||||
|
|
||||||
|
for (const hour of relevantHours) {
|
||||||
|
for (const [appId, svc] of Object.entries(hour.services)) {
|
||||||
|
if (!serviceAgg[appId]) {
|
||||||
|
serviceAgg[appId] = {
|
||||||
|
name: svc.name,
|
||||||
|
appId,
|
||||||
|
totalErrors: 0,
|
||||||
|
totalWarnings: 0,
|
||||||
|
totalLines: 0,
|
||||||
|
lastState: svc.state,
|
||||||
|
topErrors: [],
|
||||||
|
events: []
|
||||||
|
};
|
||||||
|
}
|
||||||
|
const agg = serviceAgg[appId];
|
||||||
|
agg.totalErrors += svc.errorCount;
|
||||||
|
agg.totalWarnings += svc.warningCount;
|
||||||
|
agg.totalLines += svc.totalLines;
|
||||||
|
agg.lastState = svc.state;
|
||||||
|
|
||||||
|
// Keep top errors (deduplicated-ish)
|
||||||
|
for (const err of svc.errors) {
|
||||||
|
if (agg.topErrors.length < 5) {
|
||||||
|
agg.topErrors.push(err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect notable events
|
||||||
|
for (const evt of svc.events) {
|
||||||
|
notableEvents.push({ ...evt, service: svc.name, appId });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get Docker disk usage
|
||||||
|
let diskUsage = null;
|
||||||
|
try {
|
||||||
|
const dockerMaintenance = require('./docker-maintenance');
|
||||||
|
diskUsage = await dockerMaintenance.getDiskUsage();
|
||||||
|
} catch (e) {
|
||||||
|
// Module may not be loaded yet
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build digest object
|
||||||
|
const digest = {
|
||||||
|
date,
|
||||||
|
generatedAt: new Date().toISOString(),
|
||||||
|
hoursCollected: relevantHours.length,
|
||||||
|
services: serviceAgg,
|
||||||
|
notableEvents: notableEvents.sort((a, b) => (a.time || '').localeCompare(b.time || '')),
|
||||||
|
diskUsage,
|
||||||
|
summary: {
|
||||||
|
totalServices: Object.keys(serviceAgg).length,
|
||||||
|
servicesWithErrors: Object.values(serviceAgg).filter(s => s.totalErrors > 0).length,
|
||||||
|
totalErrors: Object.values(serviceAgg).reduce((sum, s) => sum + s.totalErrors, 0),
|
||||||
|
totalWarnings: Object.values(serviceAgg).reduce((sum, s) => sum + s.totalWarnings, 0)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Write formatted digest file
|
||||||
|
const formatted = this._formatDigest(digest);
|
||||||
|
const filename = `digest-${date}.log`;
|
||||||
|
const filepath = path.join(this.digestDir, filename);
|
||||||
|
await fsp.writeFile(filepath, formatted, 'utf8');
|
||||||
|
|
||||||
|
// Also write JSON for API consumption
|
||||||
|
const jsonPath = path.join(this.digestDir, `digest-${date}.json`);
|
||||||
|
await fsp.writeFile(jsonPath, JSON.stringify(digest, null, 2), 'utf8');
|
||||||
|
|
||||||
|
// Cleanup old digests
|
||||||
|
await this._cleanupOldDigests();
|
||||||
|
|
||||||
|
this.emit('digest-generated', { date, filepath, digest });
|
||||||
|
return digest;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Format digest into human-readable text.
|
||||||
|
*/
|
||||||
|
_formatDigest(digest) {
|
||||||
|
const lines = [];
|
||||||
|
const hr = '='.repeat(55);
|
||||||
|
const sr = '-'.repeat(55);
|
||||||
|
|
||||||
|
lines.push(hr);
|
||||||
|
lines.push(' DashCaddy Daily Log Digest');
|
||||||
|
lines.push(` ${digest.date}`);
|
||||||
|
lines.push(` Generated: ${digest.generatedAt}`);
|
||||||
|
lines.push(hr);
|
||||||
|
lines.push('');
|
||||||
|
|
||||||
|
// Service summary table
|
||||||
|
lines.push('-- Service Summary ' + '-'.repeat(36));
|
||||||
|
const services = Object.values(digest.services);
|
||||||
|
if (services.length === 0) {
|
||||||
|
lines.push(' No managed services found.');
|
||||||
|
} else {
|
||||||
|
for (const svc of services) {
|
||||||
|
const stateIcon = svc.lastState === 'running' ? 'OK' : '!!';
|
||||||
|
const errStr = `${svc.totalErrors} error${svc.totalErrors !== 1 ? 's' : ''}`;
|
||||||
|
const warnStr = `${svc.totalWarnings} warning${svc.totalWarnings !== 1 ? 's' : ''}`;
|
||||||
|
const flag = svc.totalErrors > 0 ? ' <-- investigate' : '';
|
||||||
|
lines.push(` ${svc.name.padEnd(18)} ${stateIcon.padEnd(10)} ${errStr.padEnd(14)} ${warnStr}${flag}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lines.push('');
|
||||||
|
|
||||||
|
// Notable events
|
||||||
|
const events = digest.notableEvents;
|
||||||
|
if (events.length > 0) {
|
||||||
|
lines.push('-- Notable Events ' + '-'.repeat(37));
|
||||||
|
for (const evt of events) {
|
||||||
|
const time = (evt.time || '').slice(11, 16) || '??:??';
|
||||||
|
lines.push(` [${time}] ${evt.service}: ${evt.text.slice(0, 80)}`);
|
||||||
|
// Add guidance for where to look further
|
||||||
|
const containerName = `${DOCKER.CONTAINER_PREFIX}${evt.appId}`;
|
||||||
|
if (evt.type === 'health_failure' || evt.type === 'restart') {
|
||||||
|
const sinceDate = digest.date + 'T' + (evt.time || '').slice(11, 13) + ':00:00';
|
||||||
|
lines.push(` See: docker logs ${containerName} --since ${sinceDate}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lines.push('');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Top errors per service
|
||||||
|
const errServices = services.filter(s => s.totalErrors > 0);
|
||||||
|
if (errServices.length > 0) {
|
||||||
|
lines.push('-- Error Details ' + '-'.repeat(38));
|
||||||
|
for (const svc of errServices) {
|
||||||
|
lines.push(` ${svc.name} (${svc.totalErrors} errors):`);
|
||||||
|
for (const err of svc.topErrors) {
|
||||||
|
const time = (err.time || '').slice(11, 16) || '??:??';
|
||||||
|
lines.push(` [${time}] ${err.text.slice(0, 100)}`);
|
||||||
|
}
|
||||||
|
const containerName = `${DOCKER.CONTAINER_PREFIX}${svc.appId}`;
|
||||||
|
lines.push(` Full logs: docker logs ${containerName} --since ${digest.date}T00:00:00`);
|
||||||
|
lines.push('');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Docker disk usage
|
||||||
|
if (digest.diskUsage) {
|
||||||
|
lines.push('-- Docker Disk Usage ' + '-'.repeat(34));
|
||||||
|
const du = digest.diskUsage;
|
||||||
|
lines.push(` Images: ${formatBytes(du.images.sizeBytes)} (${du.images.count} images)`);
|
||||||
|
lines.push(` Containers: ${formatBytes(du.containers.sizeBytes)}`);
|
||||||
|
lines.push(` Volumes: ${formatBytes(du.volumes.sizeBytes)} (${du.volumes.count} volumes)`);
|
||||||
|
lines.push(` Build Cache: ${formatBytes(du.buildCache.sizeBytes)}`);
|
||||||
|
lines.push(` Total: ${du.totalGB} GB`);
|
||||||
|
if (du.totalGB > DOCKER.MAINTENANCE.DISK_WARN_GB) {
|
||||||
|
lines.push(` WARNING: Exceeds ${DOCKER.MAINTENANCE.DISK_WARN_GB}GB threshold!`);
|
||||||
|
lines.push(' Run: docker system prune -a (removes unused images/cache)');
|
||||||
|
}
|
||||||
|
lines.push('');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Summary
|
||||||
|
lines.push(sr);
|
||||||
|
lines.push(` ${digest.summary.totalServices} service(s) monitored | ${digest.summary.totalErrors} error(s) | ${digest.summary.totalWarnings} warning(s)`);
|
||||||
|
lines.push(` Hours collected: ${digest.hoursCollected}/24`);
|
||||||
|
lines.push(hr);
|
||||||
|
|
||||||
|
return lines.join('\n') + '\n';
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remove digest files older than MAX_DIGEST_FILES days.
|
||||||
|
*/
|
||||||
|
async _cleanupOldDigests() {
|
||||||
|
if (!this.digestDir) return;
|
||||||
|
try {
|
||||||
|
const files = await fsp.readdir(this.digestDir);
|
||||||
|
const digestFiles = files.filter(f => f.startsWith('digest-')).sort();
|
||||||
|
// Each date has .log + .json = 2 files per day
|
||||||
|
const maxFiles = DOCKER.DIGEST.MAX_DIGEST_FILES * 2;
|
||||||
|
if (digestFiles.length > maxFiles) {
|
||||||
|
const toDelete = digestFiles.slice(0, digestFiles.length - maxFiles);
|
||||||
|
for (const f of toDelete) {
|
||||||
|
await fsp.unlink(path.join(this.digestDir, f)).catch(() => {});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
// Directory may not exist yet
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the latest daily digest (JSON).
|
||||||
|
*/
|
||||||
|
async getLatestDigest() {
|
||||||
|
if (!this.digestDir) return null;
|
||||||
|
try {
|
||||||
|
const files = await fsp.readdir(this.digestDir);
|
||||||
|
const jsonFiles = files.filter(f => f.endsWith('.json')).sort();
|
||||||
|
if (jsonFiles.length === 0) return null;
|
||||||
|
const latest = path.join(this.digestDir, jsonFiles[jsonFiles.length - 1]);
|
||||||
|
return JSON.parse(await fsp.readFile(latest, 'utf8'));
|
||||||
|
} catch (e) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get digest for a specific date.
|
||||||
|
*/
|
||||||
|
async getDigestByDate(dateStr) {
|
||||||
|
if (!this.digestDir) return null;
|
||||||
|
const jsonPath = path.join(this.digestDir, `digest-${dateStr}.json`);
|
||||||
|
try {
|
||||||
|
return JSON.parse(await fsp.readFile(jsonPath, 'utf8'));
|
||||||
|
} catch (e) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the formatted text version of a digest.
|
||||||
|
*/
|
||||||
|
async getDigestText(dateStr) {
|
||||||
|
if (!this.digestDir) return null;
|
||||||
|
const logPath = path.join(this.digestDir, `digest-${dateStr}.log`);
|
||||||
|
try {
|
||||||
|
return await fsp.readFile(logPath, 'utf8');
|
||||||
|
} catch (e) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* List available digest dates.
|
||||||
|
*/
|
||||||
|
async listDigests() {
|
||||||
|
if (!this.digestDir) return [];
|
||||||
|
try {
|
||||||
|
const files = await fsp.readdir(this.digestDir);
|
||||||
|
return files
|
||||||
|
.filter(f => f.endsWith('.json'))
|
||||||
|
.map(f => f.replace('digest-', '').replace('.json', ''))
|
||||||
|
.sort()
|
||||||
|
.reverse();
|
||||||
|
} catch (e) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get live data: current day's accumulated hourly summaries.
|
||||||
|
*/
|
||||||
|
getLiveData() {
|
||||||
|
const today = new Date().toISOString().slice(0, 10);
|
||||||
|
const todayHours = this.hourlySummaries.filter(h => h.hour.startsWith(today));
|
||||||
|
|
||||||
|
// Aggregate
|
||||||
|
const serviceAgg = {};
|
||||||
|
for (const hour of todayHours) {
|
||||||
|
for (const [appId, svc] of Object.entries(hour.services)) {
|
||||||
|
if (!serviceAgg[appId]) {
|
||||||
|
serviceAgg[appId] = { name: svc.name, appId, totalErrors: 0, totalWarnings: 0, lastState: svc.state, recentErrors: [] };
|
||||||
|
}
|
||||||
|
serviceAgg[appId].totalErrors += svc.errorCount;
|
||||||
|
serviceAgg[appId].totalWarnings += svc.warningCount;
|
||||||
|
serviceAgg[appId].lastState = svc.state;
|
||||||
|
for (const err of svc.errors) {
|
||||||
|
if (serviceAgg[appId].recentErrors.length < 10) {
|
||||||
|
serviceAgg[appId].recentErrors.push(err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
date: today,
|
||||||
|
hoursCollected: todayHours.length,
|
||||||
|
lastCollect: this.lastCollect,
|
||||||
|
services: serviceAgg
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
getStatus() {
|
||||||
|
return {
|
||||||
|
running: this.running,
|
||||||
|
lastCollect: this.lastCollect,
|
||||||
|
hourlySummaries: this.hourlySummaries.length,
|
||||||
|
digestDir: this.digestDir
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatBytes(bytes) {
|
||||||
|
if (bytes === 0) return '0 B';
|
||||||
|
const units = ['B', 'KB', 'MB', 'GB', 'TB'];
|
||||||
|
const i = Math.floor(Math.log(bytes) / Math.log(1024));
|
||||||
|
return (bytes / Math.pow(1024, i)).toFixed(1) + ' ' + units[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = new LogDigest();
|
||||||
@@ -41,6 +41,9 @@ const paths = {
|
|||||||
// Docker data path for app volumes
|
// Docker data path for app volumes
|
||||||
appData: (appName) => path.join(DOCKER_DATA, appName),
|
appData: (appName) => path.join(DOCKER_DATA, appName),
|
||||||
|
|
||||||
|
// Log digest directory
|
||||||
|
digestDir: process.env.DIGEST_DIR || path.join(CADDY_BASE, 'digests'),
|
||||||
|
|
||||||
// Log paths (for allowed log file access)
|
// Log paths (for allowed log file access)
|
||||||
allowedLogPaths: isWindows
|
allowedLogPaths: isWindows
|
||||||
? [
|
? [
|
||||||
|
|||||||
@@ -113,7 +113,8 @@ module.exports = function(ctx, helpers) {
|
|||||||
HostConfig: {
|
HostConfig: {
|
||||||
PortBindings: {},
|
PortBindings: {},
|
||||||
Binds: processedTemplate.docker.volumes || [],
|
Binds: processedTemplate.docker.volumes || [],
|
||||||
RestartPolicy: { Name: 'unless-stopped' }
|
RestartPolicy: { Name: 'unless-stopped' },
|
||||||
|
LogConfig: DOCKER.LOG_CONFIG
|
||||||
},
|
},
|
||||||
Env: Object.entries(processedTemplate.docker.environment || {}).map(([k, v]) => `${k}=${v}`),
|
Env: Object.entries(processedTemplate.docker.environment || {}).map(([k, v]) => `${k}=${v}`),
|
||||||
Labels: {
|
Labels: {
|
||||||
@@ -152,6 +153,16 @@ module.exports = function(ctx, helpers) {
|
|||||||
const container = await ctx.docker.client.createContainer(containerConfig);
|
const container = await ctx.docker.client.createContainer(containerConfig);
|
||||||
await container.start();
|
await container.start();
|
||||||
|
|
||||||
|
// Prune dangling images to prevent disk bloat
|
||||||
|
try {
|
||||||
|
const pruneResult = await ctx.docker.client.pruneImages({ filters: { dangling: { true: true } } });
|
||||||
|
if (pruneResult.SpaceReclaimed > 0) {
|
||||||
|
ctx.log.info('docker', 'Pruned dangling images after deploy', { spaceReclaimed: Math.round(pruneResult.SpaceReclaimed / 1024 / 1024) + 'MB' });
|
||||||
|
}
|
||||||
|
} catch (pruneErr) {
|
||||||
|
ctx.log.debug('docker', 'Image prune after deploy failed', { error: pruneErr.message });
|
||||||
|
}
|
||||||
|
|
||||||
await ctx.portLockManager.releasePorts(lockId);
|
await ctx.portLockManager.releasePorts(lockId);
|
||||||
ctx.log.info('deploy', 'Port locks released', { lockId });
|
ctx.log.info('deploy', 'Port locks released', { lockId });
|
||||||
return container.id;
|
return container.id;
|
||||||
@@ -294,6 +305,44 @@ module.exports = function(ctx, helpers) {
|
|||||||
// Build service URL based on routing mode
|
// Build service URL based on routing mode
|
||||||
const serviceUrl = ctx.buildServiceUrl(config.subdomain);
|
const serviceUrl = ctx.buildServiceUrl(config.subdomain);
|
||||||
|
|
||||||
|
// Build deployment manifest — the full recipe to recreate this container
|
||||||
|
const deploymentManifest = {
|
||||||
|
templateId: appId,
|
||||||
|
config: {
|
||||||
|
subdomain: config.subdomain,
|
||||||
|
port: config.port || template.defaultPort,
|
||||||
|
ip: config.ip,
|
||||||
|
mediaPath: config.mediaPath || undefined,
|
||||||
|
createDns: config.createDns || false,
|
||||||
|
tailscaleOnly: config.tailscaleOnly || false,
|
||||||
|
allowedIPs: config.allowedIPs || [],
|
||||||
|
customVolumes: config.customVolumes || undefined,
|
||||||
|
useExisting: false
|
||||||
|
},
|
||||||
|
container: template.isStaticSite ? null : {
|
||||||
|
image: processedTemplate.docker.image,
|
||||||
|
ports: processedTemplate.docker.ports,
|
||||||
|
volumes: processedTemplate.docker.volumes || [],
|
||||||
|
environment: (() => {
|
||||||
|
// Strip sensitive values from stored env (claim tokens, secrets)
|
||||||
|
const env = { ...processedTemplate.docker.environment };
|
||||||
|
for (const key of Object.keys(env)) {
|
||||||
|
if (/claim|secret|password|token|key/i.test(key) && env[key]) {
|
||||||
|
env[key] = ''; // Clear sensitive values — user re-enters on restore
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return env;
|
||||||
|
})(),
|
||||||
|
capabilities: processedTemplate.docker.capabilities || undefined
|
||||||
|
},
|
||||||
|
caddy: {
|
||||||
|
tailscaleOnly: config.tailscaleOnly || false,
|
||||||
|
allowedIPs: config.allowedIPs || [],
|
||||||
|
subpathSupport: template.subpathSupport || 'strip',
|
||||||
|
routingMode: ctx.siteConfig.routingMode
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
await ctx.addServiceToConfig({
|
await ctx.addServiceToConfig({
|
||||||
id: config.subdomain, name: template.name,
|
id: config.subdomain, name: template.name,
|
||||||
logo: template.logo || `/assets/${appId}.png`,
|
logo: template.logo || `/assets/${appId}.png`,
|
||||||
@@ -301,7 +350,8 @@ module.exports = function(ctx, helpers) {
|
|||||||
containerId, appTemplate: appId,
|
containerId, appTemplate: appId,
|
||||||
tailscaleOnly: config.tailscaleOnly || false,
|
tailscaleOnly: config.tailscaleOnly || false,
|
||||||
routingMode: ctx.siteConfig.routingMode,
|
routingMode: ctx.siteConfig.routingMode,
|
||||||
deployedAt: new Date().toISOString()
|
deployedAt: new Date().toISOString(),
|
||||||
|
deploymentManifest
|
||||||
});
|
});
|
||||||
ctx.log.info('deploy', 'Service added to dashboard', { subdomain: config.subdomain });
|
ctx.log.info('deploy', 'Service added to dashboard', { subdomain: config.subdomain });
|
||||||
|
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ const initHelpers = require('./helpers');
|
|||||||
const initDeploy = require('./deploy');
|
const initDeploy = require('./deploy');
|
||||||
const initRemoval = require('./removal');
|
const initRemoval = require('./removal');
|
||||||
const initTemplates = require('./templates');
|
const initTemplates = require('./templates');
|
||||||
|
const initRestore = require('./restore');
|
||||||
|
|
||||||
module.exports = function(ctx) {
|
module.exports = function(ctx) {
|
||||||
const router = express.Router();
|
const router = express.Router();
|
||||||
@@ -11,6 +12,7 @@ module.exports = function(ctx) {
|
|||||||
router.use(initDeploy(ctx, helpers));
|
router.use(initDeploy(ctx, helpers));
|
||||||
router.use(initRemoval(ctx, helpers));
|
router.use(initRemoval(ctx, helpers));
|
||||||
router.use(initTemplates(ctx, helpers));
|
router.use(initTemplates(ctx, helpers));
|
||||||
|
router.use(initRestore(ctx, helpers));
|
||||||
|
|
||||||
return router;
|
return router;
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -22,6 +22,15 @@ module.exports = function(ctx, helpers) {
|
|||||||
await container.remove({ force: true });
|
await container.remove({ force: true });
|
||||||
results.container = 'removed';
|
results.container = 'removed';
|
||||||
ctx.log.info('docker', 'Container removed', { containerId });
|
ctx.log.info('docker', 'Container removed', { containerId });
|
||||||
|
// Prune dangling images after removal
|
||||||
|
try {
|
||||||
|
const pruneResult = await ctx.docker.client.pruneImages({ filters: { dangling: { true: true } } });
|
||||||
|
if (pruneResult.SpaceReclaimed > 0) {
|
||||||
|
ctx.log.info('docker', 'Pruned dangling images after removal', { spaceReclaimed: Math.round(pruneResult.SpaceReclaimed / 1024 / 1024) + 'MB' });
|
||||||
|
}
|
||||||
|
} catch (pruneErr) {
|
||||||
|
ctx.log.debug('docker', 'Image prune after removal failed', { error: pruneErr.message });
|
||||||
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
results.container = error.message.includes('no such container') ? 'already removed' : error.message;
|
results.container = error.message.includes('no such container') ? 'already removed' : error.message;
|
||||||
}
|
}
|
||||||
|
|||||||
295
dashcaddy-api/routes/apps/restore.js
Normal file
295
dashcaddy-api/routes/apps/restore.js
Normal file
@@ -0,0 +1,295 @@
|
|||||||
|
const express = require('express');
|
||||||
|
const { DOCKER } = require('../../constants');
|
||||||
|
|
||||||
|
module.exports = function(ctx, helpers) {
|
||||||
|
const router = express.Router();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Restore a single service from its deployment manifest.
|
||||||
|
* Pulls image, creates container, starts it, recreates Caddy config.
|
||||||
|
* Skips if container is already running.
|
||||||
|
*/
|
||||||
|
router.post('/apps/:appId/restore', ctx.asyncHandler(async (req, res) => {
|
||||||
|
const { appId } = req.params;
|
||||||
|
const services = await ctx.servicesStateManager.read();
|
||||||
|
const service = services.find(s => s.id === appId);
|
||||||
|
|
||||||
|
if (!service) {
|
||||||
|
return ctx.errorResponse(res, 404, `Service "${appId}" not found in services.json`);
|
||||||
|
}
|
||||||
|
if (!service.deploymentManifest) {
|
||||||
|
return ctx.errorResponse(res, 400, `Service "${appId}" has no deployment manifest — it was deployed before the manifest feature was added. Redeploy it manually to create a manifest.`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = await restoreService(service);
|
||||||
|
res.json({ success: true, result });
|
||||||
|
}, 'apps-restore'));
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Restore all services that have deployment manifests.
|
||||||
|
* Returns per-service results.
|
||||||
|
*/
|
||||||
|
router.post('/apps/restore-all', ctx.asyncHandler(async (req, res) => {
|
||||||
|
const services = await ctx.servicesStateManager.read();
|
||||||
|
const restoreable = services.filter(s => s.deploymentManifest);
|
||||||
|
|
||||||
|
if (restoreable.length === 0) {
|
||||||
|
return res.json({
|
||||||
|
success: true,
|
||||||
|
message: 'No services have deployment manifests to restore',
|
||||||
|
results: []
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
const results = [];
|
||||||
|
for (const service of restoreable) {
|
||||||
|
try {
|
||||||
|
const result = await restoreService(service);
|
||||||
|
results.push(result);
|
||||||
|
} catch (error) {
|
||||||
|
results.push({
|
||||||
|
id: service.id,
|
||||||
|
name: service.name,
|
||||||
|
status: 'failed',
|
||||||
|
error: error.message
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const succeeded = results.filter(r => r.status === 'restored').length;
|
||||||
|
const skipped = results.filter(r => r.status === 'skipped').length;
|
||||||
|
const failed = results.filter(r => r.status === 'failed').length;
|
||||||
|
|
||||||
|
res.json({
|
||||||
|
success: true,
|
||||||
|
message: `Restore complete: ${succeeded} restored, ${skipped} skipped, ${failed} failed`,
|
||||||
|
results
|
||||||
|
});
|
||||||
|
}, 'apps-restore-all'));
|
||||||
|
|
||||||
|
/**
|
||||||
|
* List all services and their restore status.
|
||||||
|
*/
|
||||||
|
router.get('/apps/restore-status', ctx.asyncHandler(async (req, res) => {
|
||||||
|
const services = await ctx.servicesStateManager.read();
|
||||||
|
const status = [];
|
||||||
|
|
||||||
|
for (const service of services) {
|
||||||
|
const entry = {
|
||||||
|
id: service.id,
|
||||||
|
name: service.name,
|
||||||
|
hasManifest: !!service.deploymentManifest,
|
||||||
|
templateId: service.deploymentManifest?.templateId || service.appTemplate || null,
|
||||||
|
deployedAt: service.deployedAt || null,
|
||||||
|
containerRunning: false
|
||||||
|
};
|
||||||
|
|
||||||
|
// Check if container is currently running
|
||||||
|
if (service.containerId) {
|
||||||
|
try {
|
||||||
|
const container = ctx.docker.client.getContainer(service.containerId);
|
||||||
|
const info = await container.inspect();
|
||||||
|
entry.containerRunning = info.State.Running;
|
||||||
|
} catch (e) {
|
||||||
|
entry.containerRunning = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
status.push(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
res.json({ success: true, services: status });
|
||||||
|
}, 'apps-restore-status'));
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Core restore logic for a single service.
|
||||||
|
*/
|
||||||
|
async function restoreService(service) {
|
||||||
|
const manifest = service.deploymentManifest;
|
||||||
|
const template = ctx.APP_TEMPLATES[manifest.templateId];
|
||||||
|
|
||||||
|
ctx.log.info('restore', `Restoring service: ${service.name}`, { id: service.id, templateId: manifest.templateId });
|
||||||
|
|
||||||
|
// Static sites: just recreate Caddy config
|
||||||
|
if (template?.isStaticSite) {
|
||||||
|
ctx.log.info('restore', `Restoring static site Caddy config: ${service.name}`);
|
||||||
|
const caddyOptions = {
|
||||||
|
tailscaleOnly: manifest.caddy.tailscaleOnly,
|
||||||
|
allowedIPs: manifest.caddy.allowedIPs,
|
||||||
|
subpathSupport: manifest.caddy.subpathSupport,
|
||||||
|
};
|
||||||
|
// Static site Caddy config would need to be regenerated
|
||||||
|
// For now, just confirm the service entry exists
|
||||||
|
return {
|
||||||
|
id: service.id,
|
||||||
|
name: service.name,
|
||||||
|
status: 'restored',
|
||||||
|
type: 'static',
|
||||||
|
message: `Static site "${service.name}" config preserved`
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Docker container: check if already running
|
||||||
|
if (service.containerId) {
|
||||||
|
try {
|
||||||
|
const existing = ctx.docker.client.getContainer(service.containerId);
|
||||||
|
const info = await existing.inspect();
|
||||||
|
if (info.State.Running) {
|
||||||
|
ctx.log.info('restore', `Container already running, skipping: ${service.name}`);
|
||||||
|
return {
|
||||||
|
id: service.id,
|
||||||
|
name: service.name,
|
||||||
|
status: 'skipped',
|
||||||
|
message: 'Container already running'
|
||||||
|
};
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
// Container doesn't exist — proceed with restore
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Also check by name (container ID may have changed)
|
||||||
|
const containerName = `${DOCKER.CONTAINER_PREFIX}${manifest.config.subdomain}`;
|
||||||
|
try {
|
||||||
|
const byName = ctx.docker.client.getContainer(containerName);
|
||||||
|
const info = await byName.inspect();
|
||||||
|
if (info.State.Running) {
|
||||||
|
// Update the service entry with the current container ID
|
||||||
|
await ctx.servicesStateManager.update(services => {
|
||||||
|
const svc = services.find(s => s.id === service.id);
|
||||||
|
if (svc) svc.containerId = info.Id;
|
||||||
|
return services;
|
||||||
|
});
|
||||||
|
return {
|
||||||
|
id: service.id,
|
||||||
|
name: service.name,
|
||||||
|
status: 'skipped',
|
||||||
|
message: 'Container already running (found by name)'
|
||||||
|
};
|
||||||
|
}
|
||||||
|
// Exists but not running — remove stale container
|
||||||
|
await byName.remove({ force: true });
|
||||||
|
} catch (e) {
|
||||||
|
// Container doesn't exist — proceed
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!manifest.container) {
|
||||||
|
return {
|
||||||
|
id: service.id,
|
||||||
|
name: service.name,
|
||||||
|
status: 'failed',
|
||||||
|
error: 'No container configuration in manifest'
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pull image
|
||||||
|
ctx.log.info('restore', `Pulling image: ${manifest.container.image}`);
|
||||||
|
try {
|
||||||
|
await ctx.docker.pull(manifest.container.image);
|
||||||
|
} catch (e) {
|
||||||
|
// Check if image exists locally
|
||||||
|
const images = await ctx.docker.client.listImages({
|
||||||
|
filters: { reference: [manifest.container.image] }
|
||||||
|
});
|
||||||
|
if (images.length === 0) {
|
||||||
|
throw new Error(`Failed to pull image ${manifest.container.image}: ${e.message}`);
|
||||||
|
}
|
||||||
|
ctx.log.warn('restore', `Pull failed, using local image: ${manifest.container.image}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build container config from manifest
|
||||||
|
const containerConfig = {
|
||||||
|
Image: manifest.container.image,
|
||||||
|
name: containerName,
|
||||||
|
ExposedPorts: {},
|
||||||
|
HostConfig: {
|
||||||
|
PortBindings: {},
|
||||||
|
Binds: manifest.container.volumes || [],
|
||||||
|
RestartPolicy: { Name: 'unless-stopped' },
|
||||||
|
LogConfig: DOCKER.LOG_CONFIG
|
||||||
|
},
|
||||||
|
Env: Object.entries(manifest.container.environment || {}).map(([k, v]) => `${k}=${v}`),
|
||||||
|
Labels: {
|
||||||
|
'sami.managed': 'true',
|
||||||
|
'sami.app': manifest.templateId,
|
||||||
|
'sami.subdomain': manifest.config.subdomain,
|
||||||
|
'sami.deployed': new Date().toISOString(),
|
||||||
|
'sami.restored': 'true'
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Set up port bindings
|
||||||
|
(manifest.container.ports || []).forEach(portMapping => {
|
||||||
|
const [hostPort, containerPort, protocol = 'tcp'] = portMapping.split(/[:/]/);
|
||||||
|
const containerPortKey = `${containerPort}/${protocol}`;
|
||||||
|
containerConfig.ExposedPorts[containerPortKey] = {};
|
||||||
|
containerConfig.HostConfig.PortBindings[containerPortKey] = [{ HostPort: hostPort }];
|
||||||
|
});
|
||||||
|
|
||||||
|
if (manifest.container.capabilities) {
|
||||||
|
containerConfig.HostConfig.CapAdd = manifest.container.capabilities;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create and start container
|
||||||
|
ctx.log.info('restore', `Creating container: ${containerName}`);
|
||||||
|
const container = await ctx.docker.client.createContainer(containerConfig);
|
||||||
|
await container.start();
|
||||||
|
ctx.log.info('restore', `Container started: ${containerName}`);
|
||||||
|
|
||||||
|
// Recreate Caddy config
|
||||||
|
const port = manifest.config.port;
|
||||||
|
const caddyOptions = {
|
||||||
|
tailscaleOnly: manifest.caddy.tailscaleOnly,
|
||||||
|
allowedIPs: manifest.caddy.allowedIPs,
|
||||||
|
subpathSupport: manifest.caddy.subpathSupport,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (manifest.caddy.routingMode === 'subdirectory') {
|
||||||
|
const caddyConfig = ctx.caddy.generateConfig(manifest.config.subdomain, manifest.config.ip, port, caddyOptions);
|
||||||
|
try {
|
||||||
|
await helpers.ensureMainDomainBlock();
|
||||||
|
await helpers.addSubpathConfig(manifest.config.subdomain, caddyConfig);
|
||||||
|
} catch (e) {
|
||||||
|
ctx.log.warn('restore', `Caddy config may already exist: ${e.message}`);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
const caddyConfig = ctx.caddy.generateConfig(manifest.config.subdomain, manifest.config.ip, port, caddyOptions);
|
||||||
|
try {
|
||||||
|
await helpers.addCaddyConfig(manifest.config.subdomain, caddyConfig);
|
||||||
|
} catch (e) {
|
||||||
|
ctx.log.warn('restore', `Caddy config may already exist: ${e.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DNS record
|
||||||
|
if (manifest.config.createDns && manifest.caddy.routingMode !== 'subdirectory') {
|
||||||
|
try {
|
||||||
|
await ctx.dns.createRecord(manifest.config.subdomain, manifest.config.ip);
|
||||||
|
ctx.log.info('restore', 'DNS record recreated', { subdomain: manifest.config.subdomain });
|
||||||
|
} catch (e) {
|
||||||
|
ctx.log.warn('restore', `DNS recreation failed: ${e.message}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the service entry with the new container ID
|
||||||
|
await ctx.servicesStateManager.update(services => {
|
||||||
|
const svc = services.find(s => s.id === service.id);
|
||||||
|
if (svc) {
|
||||||
|
svc.containerId = container.id;
|
||||||
|
svc.url = ctx.buildServiceUrl(manifest.config.subdomain);
|
||||||
|
}
|
||||||
|
return services;
|
||||||
|
});
|
||||||
|
|
||||||
|
return {
|
||||||
|
id: service.id,
|
||||||
|
name: service.name,
|
||||||
|
status: 'restored',
|
||||||
|
type: 'container',
|
||||||
|
containerId: container.id,
|
||||||
|
message: `${service.name} restored successfully`
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
return router;
|
||||||
|
};
|
||||||
@@ -74,7 +74,8 @@ module.exports = function(ctx) {
|
|||||||
Privileged: hostConfig.Privileged,
|
Privileged: hostConfig.Privileged,
|
||||||
CapAdd: hostConfig.CapAdd,
|
CapAdd: hostConfig.CapAdd,
|
||||||
CapDrop: hostConfig.CapDrop,
|
CapDrop: hostConfig.CapDrop,
|
||||||
Devices: hostConfig.Devices
|
Devices: hostConfig.Devices,
|
||||||
|
LogConfig: DOCKER.LOG_CONFIG // Ensure log rotation on updated containers
|
||||||
},
|
},
|
||||||
NetworkingConfig: {}
|
NetworkingConfig: {}
|
||||||
};
|
};
|
||||||
@@ -114,6 +115,16 @@ module.exports = function(ctx) {
|
|||||||
|
|
||||||
const newContainerInfo = await newContainer.inspect();
|
const newContainerInfo = await newContainer.inspect();
|
||||||
|
|
||||||
|
// Prune dangling images after update
|
||||||
|
try {
|
||||||
|
const pruneResult = await ctx.docker.client.pruneImages({ filters: { dangling: { true: true } } });
|
||||||
|
if (pruneResult.SpaceReclaimed > 0) {
|
||||||
|
ctx.log.info('docker', 'Pruned dangling images after update', { spaceReclaimed: Math.round(pruneResult.SpaceReclaimed / 1024 / 1024) + 'MB' });
|
||||||
|
}
|
||||||
|
} catch (pruneErr) {
|
||||||
|
ctx.log.debug('docker', 'Image prune after update failed', { error: pruneErr.message });
|
||||||
|
}
|
||||||
|
|
||||||
res.json({
|
res.json({
|
||||||
success: true,
|
success: true,
|
||||||
message: `Container ${containerName} updated successfully`,
|
message: `Container ${containerName} updated successfully`,
|
||||||
|
|||||||
@@ -138,6 +138,65 @@ module.exports = function(ctx) {
|
|||||||
});
|
});
|
||||||
}, 'logs-stream'));
|
}, 'logs-stream'));
|
||||||
|
|
||||||
|
// Get latest daily digest
|
||||||
|
router.get('/logs/digest/latest', ctx.asyncHandler(async (req, res) => {
|
||||||
|
const digest = await ctx.logDigest.getLatestDigest();
|
||||||
|
if (!digest) {
|
||||||
|
return res.json({ success: true, digest: null, message: 'No digest available yet. First digest is generated at midnight.' });
|
||||||
|
}
|
||||||
|
res.json({ success: true, digest });
|
||||||
|
}, 'logs-digest-latest'));
|
||||||
|
|
||||||
|
// Get live digest data (today's accumulated stats)
|
||||||
|
router.get('/logs/digest/live', ctx.asyncHandler(async (req, res) => {
|
||||||
|
const live = ctx.logDigest.getLiveData();
|
||||||
|
res.json({ success: true, ...live });
|
||||||
|
}, 'logs-digest-live'));
|
||||||
|
|
||||||
|
// List available digest dates
|
||||||
|
router.get('/logs/digest/history', ctx.asyncHandler(async (req, res) => {
|
||||||
|
const dates = await ctx.logDigest.listDigests();
|
||||||
|
res.json({ success: true, dates });
|
||||||
|
}, 'logs-digest-history'));
|
||||||
|
|
||||||
|
// Generate digest on demand (for today or a specific date)
|
||||||
|
router.post('/logs/digest/generate', ctx.asyncHandler(async (req, res) => {
|
||||||
|
const date = req.body.date || new Date().toISOString().slice(0, 10);
|
||||||
|
const digest = await ctx.logDigest.generateDailyDigest(date);
|
||||||
|
res.json({ success: true, digest });
|
||||||
|
}, 'logs-digest-generate'));
|
||||||
|
|
||||||
|
// Get digest for a specific date (JSON)
|
||||||
|
router.get('/logs/digest/:date', ctx.asyncHandler(async (req, res) => {
|
||||||
|
const { date } = req.params;
|
||||||
|
if (!/^\d{4}-\d{2}-\d{2}$/.test(date)) {
|
||||||
|
return ctx.errorResponse(res, 400, 'Invalid date format. Use YYYY-MM-DD.');
|
||||||
|
}
|
||||||
|
const format = req.query.format || 'json';
|
||||||
|
if (format === 'text') {
|
||||||
|
const text = await ctx.logDigest.getDigestText(date);
|
||||||
|
if (!text) return ctx.errorResponse(res, 404, `No digest found for ${date}`);
|
||||||
|
res.setHeader('Content-Type', 'text/plain');
|
||||||
|
return res.send(text);
|
||||||
|
}
|
||||||
|
const digest = await ctx.logDigest.getDigestByDate(date);
|
||||||
|
if (!digest) return ctx.errorResponse(res, 404, `No digest found for ${date}`);
|
||||||
|
res.json({ success: true, digest });
|
||||||
|
}, 'logs-digest-date'));
|
||||||
|
|
||||||
|
// Get Docker disk usage snapshot
|
||||||
|
router.get('/logs/docker-disk', ctx.asyncHandler(async (req, res) => {
|
||||||
|
const diskUsage = await ctx.dockerMaintenance.getDiskUsage();
|
||||||
|
const status = ctx.dockerMaintenance.getStatus();
|
||||||
|
res.json({ success: true, diskUsage, maintenance: status });
|
||||||
|
}, 'logs-docker-disk'));
|
||||||
|
|
||||||
|
// Trigger Docker maintenance manually
|
||||||
|
router.post('/logs/docker-maintenance', ctx.asyncHandler(async (req, res) => {
|
||||||
|
const result = await ctx.dockerMaintenance.runMaintenance();
|
||||||
|
res.json({ success: true, result });
|
||||||
|
}, 'logs-docker-maintenance'));
|
||||||
|
|
||||||
// Get logs from a file path (for native applications)
|
// Get logs from a file path (for native applications)
|
||||||
router.get('/logs/file', ctx.asyncHandler(async (req, res) => {
|
router.get('/logs/file', ctx.asyncHandler(async (req, res) => {
|
||||||
const { path: logPath, tail = 100 } = req.query;
|
const { path: logPath, tail = 100 } = req.query;
|
||||||
|
|||||||
@@ -50,6 +50,8 @@ const backupManager = require('./backup-manager');
|
|||||||
const healthChecker = require('./health-checker');
|
const healthChecker = require('./health-checker');
|
||||||
const updateManager = require('./update-manager');
|
const updateManager = require('./update-manager');
|
||||||
const selfUpdater = require('./self-updater');
|
const selfUpdater = require('./self-updater');
|
||||||
|
const dockerMaintenance = require('./docker-maintenance');
|
||||||
|
const logDigest = require('./log-digest');
|
||||||
const StateManager = require('./state-manager');
|
const StateManager = require('./state-manager');
|
||||||
const auditLogger = require('./audit-logger');
|
const auditLogger = require('./audit-logger');
|
||||||
const portLockManager = require('./port-lock-manager');
|
const portLockManager = require('./port-lock-manager');
|
||||||
@@ -1161,7 +1163,7 @@ Object.assign(ctx, {
|
|||||||
app, siteConfig, servicesStateManager, configStateManager,
|
app, siteConfig, servicesStateManager, configStateManager,
|
||||||
credentialManager, authManager, licenseManager,
|
credentialManager, authManager, licenseManager,
|
||||||
healthChecker, updateManager, backupManager, resourceMonitor,
|
healthChecker, updateManager, backupManager, resourceMonitor,
|
||||||
auditLogger, portLockManager, selfUpdater,
|
auditLogger, portLockManager, selfUpdater, dockerMaintenance, logDigest,
|
||||||
APP_TEMPLATES, TEMPLATE_CATEGORIES, DIFFICULTY_LEVELS, RECIPE_TEMPLATES, RECIPE_CATEGORIES,
|
APP_TEMPLATES, TEMPLATE_CATEGORIES, DIFFICULTY_LEVELS, RECIPE_TEMPLATES, RECIPE_CATEGORIES,
|
||||||
asyncHandler, errorResponse, ok, fetchT, log, logError, safeErrorMessage,
|
asyncHandler, errorResponse, ok, fetchT, log, logError, safeErrorMessage,
|
||||||
buildDomain, buildServiceUrl, getServiceById, readConfig, saveConfig, addServiceToConfig,
|
buildDomain, buildServiceUrl, getServiceById, readConfig, saveConfig, addServiceToConfig,
|
||||||
@@ -1885,6 +1887,39 @@ const server = app.listen(PORT, '0.0.0.0', () => {
|
|||||||
log.error('server', 'Self-updater failed to start', { error: error.message });
|
log.error('server', 'Self-updater failed to start', { error: error.message });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
dockerMaintenance.start();
|
||||||
|
log.info('server', 'Docker maintenance started');
|
||||||
|
dockerMaintenance.on('maintenance-complete', (result) => {
|
||||||
|
const saved = Math.round(result.spaceReclaimed.total / 1024 / 1024);
|
||||||
|
if (saved > 0 || result.warnings.length > 0) {
|
||||||
|
log.info('maintenance', 'Docker maintenance completed', {
|
||||||
|
spaceReclaimedMB: saved,
|
||||||
|
pruned: result.pruned,
|
||||||
|
warnings: result.warnings.length
|
||||||
|
});
|
||||||
|
}
|
||||||
|
if (result.warnings.length > 0) {
|
||||||
|
for (const w of result.warnings) log.warn('maintenance', w);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
log.error('server', 'Docker maintenance failed to start', { error: error.message });
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
logDigest.start(platformPaths.digestDir);
|
||||||
|
log.info('server', 'Log digest started', { digestDir: platformPaths.digestDir });
|
||||||
|
logDigest.on('digest-generated', ({ date }) => {
|
||||||
|
log.info('digest', `Daily digest generated for ${date}`);
|
||||||
|
if (typeof ctx.notification?.send === 'function') {
|
||||||
|
ctx.notification.send('system.digest', 'Daily Log Digest', `Log digest for ${date} is ready. View it in the DashCaddy dashboard.`, 'info');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
log.error('server', 'Log digest failed to start', { error: error.message });
|
||||||
|
}
|
||||||
|
|
||||||
// Tailscale API sync (if OAuth configured)
|
// Tailscale API sync (if OAuth configured)
|
||||||
if (tailscaleConfig.oauthConfigured) {
|
if (tailscaleConfig.oauthConfigured) {
|
||||||
startTailscaleSyncTimer();
|
startTailscaleSyncTimer();
|
||||||
@@ -1900,6 +1935,8 @@ function shutdown(signal) {
|
|||||||
log.info('shutdown', `${signal} received, draining connections...`);
|
log.info('shutdown', `${signal} received, draining connections...`);
|
||||||
resourceMonitor.stop();
|
resourceMonitor.stop();
|
||||||
backupManager.stop();
|
backupManager.stop();
|
||||||
|
dockerMaintenance.stop();
|
||||||
|
logDigest.stop();
|
||||||
healthChecker.stop();
|
healthChecker.stop();
|
||||||
updateManager.stop();
|
updateManager.stop();
|
||||||
selfUpdater.stop();
|
selfUpdater.stop();
|
||||||
|
|||||||
Reference in New Issue
Block a user