Prevents Docker disk bloat by adding log rotation (10MB max, 3 files) to all container creation and update paths, auto-pruning dangling images after deploy/remove/update, and a daily maintenance module that cleans build cache and warns on disk thresholds. Saves a deployment manifest in services.json at deploy time so users can restore all their apps after a Docker purge. Adds restore-all and restore-single endpoints that recreate containers, Caddy config, and DNS records from the saved manifests. Adds an hourly log collector and daily digest generator that summarizes errors, warnings, and events across all services into a single human-readable report with guidance on where to investigate. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
296 lines
9.9 KiB
JavaScript
296 lines
9.9 KiB
JavaScript
const express = require('express');
|
|
const { DOCKER } = require('../../constants');
|
|
|
|
module.exports = function(ctx, helpers) {
|
|
const router = express.Router();
|
|
|
|
/**
|
|
* Restore a single service from its deployment manifest.
|
|
* Pulls image, creates container, starts it, recreates Caddy config.
|
|
* Skips if container is already running.
|
|
*/
|
|
router.post('/apps/:appId/restore', ctx.asyncHandler(async (req, res) => {
|
|
const { appId } = req.params;
|
|
const services = await ctx.servicesStateManager.read();
|
|
const service = services.find(s => s.id === appId);
|
|
|
|
if (!service) {
|
|
return ctx.errorResponse(res, 404, `Service "${appId}" not found in services.json`);
|
|
}
|
|
if (!service.deploymentManifest) {
|
|
return ctx.errorResponse(res, 400, `Service "${appId}" has no deployment manifest — it was deployed before the manifest feature was added. Redeploy it manually to create a manifest.`);
|
|
}
|
|
|
|
const result = await restoreService(service);
|
|
res.json({ success: true, result });
|
|
}, 'apps-restore'));
|
|
|
|
/**
|
|
* Restore all services that have deployment manifests.
|
|
* Returns per-service results.
|
|
*/
|
|
router.post('/apps/restore-all', ctx.asyncHandler(async (req, res) => {
|
|
const services = await ctx.servicesStateManager.read();
|
|
const restoreable = services.filter(s => s.deploymentManifest);
|
|
|
|
if (restoreable.length === 0) {
|
|
return res.json({
|
|
success: true,
|
|
message: 'No services have deployment manifests to restore',
|
|
results: []
|
|
});
|
|
}
|
|
|
|
const results = [];
|
|
for (const service of restoreable) {
|
|
try {
|
|
const result = await restoreService(service);
|
|
results.push(result);
|
|
} catch (error) {
|
|
results.push({
|
|
id: service.id,
|
|
name: service.name,
|
|
status: 'failed',
|
|
error: error.message
|
|
});
|
|
}
|
|
}
|
|
|
|
const succeeded = results.filter(r => r.status === 'restored').length;
|
|
const skipped = results.filter(r => r.status === 'skipped').length;
|
|
const failed = results.filter(r => r.status === 'failed').length;
|
|
|
|
res.json({
|
|
success: true,
|
|
message: `Restore complete: ${succeeded} restored, ${skipped} skipped, ${failed} failed`,
|
|
results
|
|
});
|
|
}, 'apps-restore-all'));
|
|
|
|
/**
|
|
* List all services and their restore status.
|
|
*/
|
|
router.get('/apps/restore-status', ctx.asyncHandler(async (req, res) => {
|
|
const services = await ctx.servicesStateManager.read();
|
|
const status = [];
|
|
|
|
for (const service of services) {
|
|
const entry = {
|
|
id: service.id,
|
|
name: service.name,
|
|
hasManifest: !!service.deploymentManifest,
|
|
templateId: service.deploymentManifest?.templateId || service.appTemplate || null,
|
|
deployedAt: service.deployedAt || null,
|
|
containerRunning: false
|
|
};
|
|
|
|
// Check if container is currently running
|
|
if (service.containerId) {
|
|
try {
|
|
const container = ctx.docker.client.getContainer(service.containerId);
|
|
const info = await container.inspect();
|
|
entry.containerRunning = info.State.Running;
|
|
} catch (e) {
|
|
entry.containerRunning = false;
|
|
}
|
|
}
|
|
|
|
status.push(entry);
|
|
}
|
|
|
|
res.json({ success: true, services: status });
|
|
}, 'apps-restore-status'));
|
|
|
|
/**
|
|
* Core restore logic for a single service.
|
|
*/
|
|
async function restoreService(service) {
|
|
const manifest = service.deploymentManifest;
|
|
const template = ctx.APP_TEMPLATES[manifest.templateId];
|
|
|
|
ctx.log.info('restore', `Restoring service: ${service.name}`, { id: service.id, templateId: manifest.templateId });
|
|
|
|
// Static sites: just recreate Caddy config
|
|
if (template?.isStaticSite) {
|
|
ctx.log.info('restore', `Restoring static site Caddy config: ${service.name}`);
|
|
const caddyOptions = {
|
|
tailscaleOnly: manifest.caddy.tailscaleOnly,
|
|
allowedIPs: manifest.caddy.allowedIPs,
|
|
subpathSupport: manifest.caddy.subpathSupport,
|
|
};
|
|
// Static site Caddy config would need to be regenerated
|
|
// For now, just confirm the service entry exists
|
|
return {
|
|
id: service.id,
|
|
name: service.name,
|
|
status: 'restored',
|
|
type: 'static',
|
|
message: `Static site "${service.name}" config preserved`
|
|
};
|
|
}
|
|
|
|
// Docker container: check if already running
|
|
if (service.containerId) {
|
|
try {
|
|
const existing = ctx.docker.client.getContainer(service.containerId);
|
|
const info = await existing.inspect();
|
|
if (info.State.Running) {
|
|
ctx.log.info('restore', `Container already running, skipping: ${service.name}`);
|
|
return {
|
|
id: service.id,
|
|
name: service.name,
|
|
status: 'skipped',
|
|
message: 'Container already running'
|
|
};
|
|
}
|
|
} catch (e) {
|
|
// Container doesn't exist — proceed with restore
|
|
}
|
|
}
|
|
|
|
// Also check by name (container ID may have changed)
|
|
const containerName = `${DOCKER.CONTAINER_PREFIX}${manifest.config.subdomain}`;
|
|
try {
|
|
const byName = ctx.docker.client.getContainer(containerName);
|
|
const info = await byName.inspect();
|
|
if (info.State.Running) {
|
|
// Update the service entry with the current container ID
|
|
await ctx.servicesStateManager.update(services => {
|
|
const svc = services.find(s => s.id === service.id);
|
|
if (svc) svc.containerId = info.Id;
|
|
return services;
|
|
});
|
|
return {
|
|
id: service.id,
|
|
name: service.name,
|
|
status: 'skipped',
|
|
message: 'Container already running (found by name)'
|
|
};
|
|
}
|
|
// Exists but not running — remove stale container
|
|
await byName.remove({ force: true });
|
|
} catch (e) {
|
|
// Container doesn't exist — proceed
|
|
}
|
|
|
|
if (!manifest.container) {
|
|
return {
|
|
id: service.id,
|
|
name: service.name,
|
|
status: 'failed',
|
|
error: 'No container configuration in manifest'
|
|
};
|
|
}
|
|
|
|
// Pull image
|
|
ctx.log.info('restore', `Pulling image: ${manifest.container.image}`);
|
|
try {
|
|
await ctx.docker.pull(manifest.container.image);
|
|
} catch (e) {
|
|
// Check if image exists locally
|
|
const images = await ctx.docker.client.listImages({
|
|
filters: { reference: [manifest.container.image] }
|
|
});
|
|
if (images.length === 0) {
|
|
throw new Error(`Failed to pull image ${manifest.container.image}: ${e.message}`);
|
|
}
|
|
ctx.log.warn('restore', `Pull failed, using local image: ${manifest.container.image}`);
|
|
}
|
|
|
|
// Build container config from manifest
|
|
const containerConfig = {
|
|
Image: manifest.container.image,
|
|
name: containerName,
|
|
ExposedPorts: {},
|
|
HostConfig: {
|
|
PortBindings: {},
|
|
Binds: manifest.container.volumes || [],
|
|
RestartPolicy: { Name: 'unless-stopped' },
|
|
LogConfig: DOCKER.LOG_CONFIG
|
|
},
|
|
Env: Object.entries(manifest.container.environment || {}).map(([k, v]) => `${k}=${v}`),
|
|
Labels: {
|
|
'sami.managed': 'true',
|
|
'sami.app': manifest.templateId,
|
|
'sami.subdomain': manifest.config.subdomain,
|
|
'sami.deployed': new Date().toISOString(),
|
|
'sami.restored': 'true'
|
|
}
|
|
};
|
|
|
|
// Set up port bindings
|
|
(manifest.container.ports || []).forEach(portMapping => {
|
|
const [hostPort, containerPort, protocol = 'tcp'] = portMapping.split(/[:/]/);
|
|
const containerPortKey = `${containerPort}/${protocol}`;
|
|
containerConfig.ExposedPorts[containerPortKey] = {};
|
|
containerConfig.HostConfig.PortBindings[containerPortKey] = [{ HostPort: hostPort }];
|
|
});
|
|
|
|
if (manifest.container.capabilities) {
|
|
containerConfig.HostConfig.CapAdd = manifest.container.capabilities;
|
|
}
|
|
|
|
// Create and start container
|
|
ctx.log.info('restore', `Creating container: ${containerName}`);
|
|
const container = await ctx.docker.client.createContainer(containerConfig);
|
|
await container.start();
|
|
ctx.log.info('restore', `Container started: ${containerName}`);
|
|
|
|
// Recreate Caddy config
|
|
const port = manifest.config.port;
|
|
const caddyOptions = {
|
|
tailscaleOnly: manifest.caddy.tailscaleOnly,
|
|
allowedIPs: manifest.caddy.allowedIPs,
|
|
subpathSupport: manifest.caddy.subpathSupport,
|
|
};
|
|
|
|
if (manifest.caddy.routingMode === 'subdirectory') {
|
|
const caddyConfig = ctx.caddy.generateConfig(manifest.config.subdomain, manifest.config.ip, port, caddyOptions);
|
|
try {
|
|
await helpers.ensureMainDomainBlock();
|
|
await helpers.addSubpathConfig(manifest.config.subdomain, caddyConfig);
|
|
} catch (e) {
|
|
ctx.log.warn('restore', `Caddy config may already exist: ${e.message}`);
|
|
}
|
|
} else {
|
|
const caddyConfig = ctx.caddy.generateConfig(manifest.config.subdomain, manifest.config.ip, port, caddyOptions);
|
|
try {
|
|
await helpers.addCaddyConfig(manifest.config.subdomain, caddyConfig);
|
|
} catch (e) {
|
|
ctx.log.warn('restore', `Caddy config may already exist: ${e.message}`);
|
|
}
|
|
}
|
|
|
|
// DNS record
|
|
if (manifest.config.createDns && manifest.caddy.routingMode !== 'subdirectory') {
|
|
try {
|
|
await ctx.dns.createRecord(manifest.config.subdomain, manifest.config.ip);
|
|
ctx.log.info('restore', 'DNS record recreated', { subdomain: manifest.config.subdomain });
|
|
} catch (e) {
|
|
ctx.log.warn('restore', `DNS recreation failed: ${e.message}`);
|
|
}
|
|
}
|
|
|
|
// Update the service entry with the new container ID
|
|
await ctx.servicesStateManager.update(services => {
|
|
const svc = services.find(s => s.id === service.id);
|
|
if (svc) {
|
|
svc.containerId = container.id;
|
|
svc.url = ctx.buildServiceUrl(manifest.config.subdomain);
|
|
}
|
|
return services;
|
|
});
|
|
|
|
return {
|
|
id: service.id,
|
|
name: service.name,
|
|
status: 'restored',
|
|
type: 'container',
|
|
containerId: container.id,
|
|
message: `${service.name} restored successfully`
|
|
};
|
|
}
|
|
|
|
return router;
|
|
};
|