- Container exec/shell via WebSocket + xterm.js (subtle >_ button on cards) - Live dashboard updates via SSE (resource alerts, health changes, update notices) - Docker Compose import with YAML parsing, preview, and dependency-ordered deploy - Volume & network management modal with disk usage overview - CPU/memory resource limits on deploy and live update - Email SMTP notifications (nodemailer) alongside Discord/Telegram/ntfy - Scheduled auto-update scheduler with maintenance windows (daily/weekly/monthly) New deps: ws, js-yaml, nodemailer Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
335 lines
12 KiB
JavaScript
335 lines
12 KiB
JavaScript
const express = require('express');
|
|
const yaml = require('js-yaml');
|
|
const { DOCKER, REGEX } = require('../../constants');
|
|
const { ValidationError } = require('../../errors');
|
|
const platformPaths = require('../../platform-paths');
|
|
|
|
/**
|
|
* Docker Compose import routes
|
|
* Parse and deploy services from docker-compose.yml
|
|
* @param {Object} deps
|
|
*/
|
|
module.exports = function({ docker, caddy, servicesStateManager, portLockManager, asyncHandler, log, siteConfig, buildDomain, buildServiceUrl, addServiceToConfig, dns, notification }) {
|
|
const router = express.Router();
|
|
|
|
/**
|
|
* Parse a compose YAML string into DashCaddy-compatible service configs
|
|
*/
|
|
function parseCompose(yamlStr, stackName) {
|
|
let doc;
|
|
try {
|
|
doc = yaml.load(yamlStr);
|
|
} catch (e) {
|
|
throw new ValidationError(`Invalid YAML: ${e.message}`);
|
|
}
|
|
|
|
if (!doc || !doc.services || typeof doc.services !== 'object') {
|
|
throw new ValidationError('No services found in compose file');
|
|
}
|
|
|
|
const services = [];
|
|
const networks = Object.keys(doc.networks || {});
|
|
const volumes = Object.keys(doc.volumes || {});
|
|
|
|
for (const [name, svc] of Object.entries(doc.services)) {
|
|
if (!svc.image) {
|
|
// Build-based services can't be imported without the build context
|
|
services.push({ name, skip: true, reason: 'No image specified (build-only service)' });
|
|
continue;
|
|
}
|
|
|
|
const parsed = {
|
|
name,
|
|
image: svc.image,
|
|
ports: [],
|
|
volumes: [],
|
|
environment: {},
|
|
restart: svc.restart || 'unless-stopped',
|
|
networks: svc.networks || [],
|
|
dependsOn: svc.depends_on || [],
|
|
labels: { 'sami.managed': 'true', 'sami.compose-stack': stackName, 'sami.compose-service': name },
|
|
resources: {},
|
|
};
|
|
|
|
// Parse ports
|
|
if (svc.ports) {
|
|
for (const p of svc.ports) {
|
|
const str = String(p);
|
|
// Handle "8080:80", "8080:80/tcp", "127.0.0.1:8080:80"
|
|
const parts = str.split(':');
|
|
if (parts.length === 2) {
|
|
parsed.ports.push({ host: parts[0], container: parts[1].split('/')[0], protocol: parts[1].includes('/') ? parts[1].split('/')[1] : 'tcp' });
|
|
} else if (parts.length === 3) {
|
|
parsed.ports.push({ host: parts[1], container: parts[2].split('/')[0], protocol: parts[2].includes('/') ? parts[2].split('/')[1] : 'tcp' });
|
|
}
|
|
}
|
|
}
|
|
|
|
// Parse volumes
|
|
if (svc.volumes) {
|
|
for (const v of svc.volumes) {
|
|
if (typeof v === 'string') {
|
|
parsed.volumes.push(v);
|
|
} else if (v.source && v.target) {
|
|
const mode = v.read_only ? 'ro' : 'rw';
|
|
parsed.volumes.push(`${v.source}:${v.target}:${mode}`);
|
|
}
|
|
}
|
|
}
|
|
|
|
// Parse environment
|
|
if (svc.environment) {
|
|
if (Array.isArray(svc.environment)) {
|
|
for (const e of svc.environment) {
|
|
const [key, ...val] = String(e).split('=');
|
|
parsed.environment[key] = val.join('=');
|
|
}
|
|
} else {
|
|
parsed.environment = { ...svc.environment };
|
|
}
|
|
}
|
|
|
|
// Parse env_file entries (note: we record them but can't resolve file contents)
|
|
if (svc.env_file) {
|
|
parsed.envFileWarning = 'env_file references found — variables not imported (paste them as environment vars)';
|
|
}
|
|
|
|
// Resource limits
|
|
if (svc.deploy?.resources?.limits) {
|
|
const lim = svc.deploy.resources.limits;
|
|
if (lim.cpus) parsed.resources.cpus = parseFloat(lim.cpus);
|
|
if (lim.memory) {
|
|
const mem = String(lim.memory).toLowerCase();
|
|
if (mem.endsWith('g')) parsed.resources.memory = parseFloat(mem) * 1024;
|
|
else if (mem.endsWith('m')) parsed.resources.memory = parseFloat(mem);
|
|
else parsed.resources.memory = parseFloat(mem) / (1024 * 1024); // assume bytes
|
|
}
|
|
}
|
|
// Legacy mem_limit / cpus
|
|
if (svc.mem_limit) {
|
|
const mem = String(svc.mem_limit).toLowerCase();
|
|
if (mem.endsWith('g')) parsed.resources.memory = parseFloat(mem) * 1024;
|
|
else if (mem.endsWith('m')) parsed.resources.memory = parseFloat(mem);
|
|
}
|
|
if (svc.cpus) parsed.resources.cpus = parseFloat(svc.cpus);
|
|
|
|
// Cap-add
|
|
if (svc.cap_add) parsed.capAdd = svc.cap_add;
|
|
|
|
services.push(parsed);
|
|
}
|
|
|
|
return { services, networks, volumes, stackName };
|
|
}
|
|
|
|
/**
|
|
* Topological sort based on depends_on
|
|
*/
|
|
function topoSort(services) {
|
|
const graph = new Map();
|
|
const nameMap = new Map();
|
|
for (const svc of services) {
|
|
if (svc.skip) continue;
|
|
graph.set(svc.name, svc.dependsOn || []);
|
|
nameMap.set(svc.name, svc);
|
|
}
|
|
|
|
const sorted = [];
|
|
const visited = new Set();
|
|
const visiting = new Set();
|
|
|
|
function visit(name) {
|
|
if (visited.has(name)) return;
|
|
if (visiting.has(name)) return; // circular — just break
|
|
visiting.add(name);
|
|
for (const dep of (graph.get(name) || [])) {
|
|
if (graph.has(dep)) visit(dep);
|
|
}
|
|
visiting.delete(name);
|
|
visited.add(name);
|
|
if (nameMap.has(name)) sorted.push(nameMap.get(name));
|
|
}
|
|
|
|
for (const name of graph.keys()) visit(name);
|
|
return sorted;
|
|
}
|
|
|
|
// POST /import-compose — parse YAML and return preview
|
|
router.post('/import-compose', asyncHandler(async (req, res) => {
|
|
const { yaml: yamlStr, stackName } = req.body;
|
|
if (!yamlStr || typeof yamlStr !== 'string') {
|
|
throw new ValidationError('yaml field is required (string)');
|
|
}
|
|
const name = (stackName || 'stack').replace(/[^a-zA-Z0-9_-]/g, '').substring(0, 32) || 'stack';
|
|
const result = parseCompose(yamlStr, name);
|
|
res.json({ success: true, ...result });
|
|
}, 'compose-import'));
|
|
|
|
// POST /deploy-compose — deploy parsed services
|
|
router.post('/deploy-compose', asyncHandler(async (req, res) => {
|
|
const { services, networks, stackName, subdomainPrefix } = req.body;
|
|
if (!services || !Array.isArray(services) || services.length === 0) {
|
|
throw new ValidationError('services array is required');
|
|
}
|
|
const prefix = (subdomainPrefix || stackName || 'stack').replace(/[^a-zA-Z0-9-]/g, '').substring(0, 16);
|
|
const results = [];
|
|
|
|
// Create networks first
|
|
if (networks && networks.length > 0) {
|
|
for (const net of networks) {
|
|
try {
|
|
await docker.client.createNetwork({ Name: `${prefix}_${net}`, Driver: 'bridge' });
|
|
results.push({ type: 'network', name: net, status: 'created' });
|
|
} catch (e) {
|
|
if (e.statusCode === 409) {
|
|
results.push({ type: 'network', name: net, status: 'exists' });
|
|
} else {
|
|
results.push({ type: 'network', name: net, status: 'failed', error: e.message });
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Sort by dependency order
|
|
const sorted = topoSort(services.filter(s => !s.skip));
|
|
|
|
for (const svc of sorted) {
|
|
const containerName = `${DOCKER.CONTAINER_PREFIX}${prefix}-${svc.name}`;
|
|
const subdomain = `${prefix}-${svc.name}`;
|
|
try {
|
|
// Pull image
|
|
try {
|
|
await docker.pull(svc.image);
|
|
} catch (pullErr) {
|
|
// Check if local
|
|
const images = await docker.client.listImages({ filters: { reference: [svc.image] } });
|
|
if (images.length === 0) throw new Error(`Image ${svc.image} not found: ${pullErr.message}`);
|
|
}
|
|
|
|
// Build container config
|
|
const containerConfig = {
|
|
Image: svc.image,
|
|
name: containerName,
|
|
ExposedPorts: {},
|
|
HostConfig: {
|
|
PortBindings: {},
|
|
Binds: (svc.volumes || []).map(v => {
|
|
const [hostPath, ...rest] = v.split(':');
|
|
const translated = platformPaths.toDockerMountPath(hostPath);
|
|
return rest.length > 0 ? `${translated}:${rest.join(':')}` : translated;
|
|
}),
|
|
RestartPolicy: { Name: svc.restart || 'unless-stopped' },
|
|
LogConfig: DOCKER.LOG_CONFIG,
|
|
},
|
|
Env: Object.entries(svc.environment || {}).map(([k, v]) => `${k}=${v}`),
|
|
Labels: svc.labels || {},
|
|
};
|
|
|
|
// Ports
|
|
if (svc.ports) {
|
|
for (const p of svc.ports) {
|
|
const key = `${p.container}/${p.protocol || 'tcp'}`;
|
|
containerConfig.ExposedPorts[key] = {};
|
|
containerConfig.HostConfig.PortBindings[key] = [{ HostPort: String(p.host) }];
|
|
}
|
|
}
|
|
|
|
// Resources
|
|
if (svc.resources?.memory) {
|
|
containerConfig.HostConfig.Memory = Math.round(svc.resources.memory * 1024 * 1024);
|
|
containerConfig.HostConfig.MemoryReservation = Math.round(svc.resources.memory * 1024 * 1024 * 0.5);
|
|
}
|
|
if (svc.resources?.cpus) {
|
|
containerConfig.HostConfig.NanoCpus = Math.round(svc.resources.cpus * 1e9);
|
|
}
|
|
|
|
// Capabilities
|
|
if (svc.capAdd) containerConfig.HostConfig.CapAdd = svc.capAdd;
|
|
|
|
// Networks
|
|
if (svc.networks && svc.networks.length > 0) {
|
|
containerConfig.HostConfig.NetworkMode = `${prefix}_${svc.networks[0]}`;
|
|
}
|
|
|
|
// Remove stale container with same name
|
|
try {
|
|
const existing = docker.client.getContainer(containerName);
|
|
await existing.remove({ force: true });
|
|
await new Promise(r => setTimeout(r, 1000));
|
|
} catch (_) {}
|
|
|
|
const container = await docker.client.createContainer(containerConfig);
|
|
await container.start();
|
|
|
|
// Determine port for Caddy/service registration
|
|
const mainPort = svc.ports?.[0]?.host || null;
|
|
|
|
// Add to services.json if it has a port (i.e., is web-accessible)
|
|
if (mainPort) {
|
|
const ip = siteConfig.dnsServerIp || 'localhost';
|
|
const serviceUrl = buildServiceUrl(subdomain);
|
|
|
|
await addServiceToConfig({
|
|
id: subdomain,
|
|
name: `${stackName || prefix}: ${svc.name}`,
|
|
logo: '/assets/docker.png',
|
|
url: serviceUrl,
|
|
containerId: container.id,
|
|
appTemplate: null,
|
|
routingMode: siteConfig.routingMode,
|
|
deployedAt: new Date().toISOString(),
|
|
deploymentManifest: {
|
|
templateId: null,
|
|
composeStack: stackName || prefix,
|
|
config: { subdomain, port: mainPort, ip }
|
|
}
|
|
});
|
|
}
|
|
|
|
results.push({ type: 'container', name: svc.name, containerId: container.id, status: 'deployed', subdomain: mainPort ? subdomain : null });
|
|
} catch (e) {
|
|
log.error('compose', `Failed to deploy service ${svc.name}`, { error: e.message });
|
|
results.push({ type: 'container', name: svc.name, status: 'failed', error: e.message });
|
|
}
|
|
}
|
|
|
|
// Skipped services
|
|
for (const svc of services.filter(s => s.skip)) {
|
|
results.push({ type: 'container', name: svc.name, status: 'skipped', reason: svc.reason });
|
|
}
|
|
|
|
res.json({ success: true, results, stackName: stackName || prefix });
|
|
}, 'compose-deploy'));
|
|
|
|
// DELETE /compose-stack/:stackName — remove an entire stack
|
|
router.delete('/compose-stack/:stackName', asyncHandler(async (req, res) => {
|
|
const { stackName } = req.params;
|
|
if (!stackName) throw new ValidationError('stackName is required');
|
|
|
|
const containers = await docker.client.listContainers({ all: true, filters: { label: [`sami.compose-stack=${stackName}`] } });
|
|
const removed = [];
|
|
|
|
for (const c of containers) {
|
|
try {
|
|
const container = docker.client.getContainer(c.Id);
|
|
await container.remove({ force: true });
|
|
removed.push({ name: c.Names[0], id: c.Id });
|
|
} catch (e) {
|
|
removed.push({ name: c.Names[0], id: c.Id, error: e.message });
|
|
}
|
|
}
|
|
|
|
// Remove from services.json
|
|
const services = await servicesStateManager.read();
|
|
const updated = (services.services || []).filter(s => {
|
|
const manifest = s.deploymentManifest;
|
|
return !(manifest && manifest.composeStack === stackName);
|
|
});
|
|
await servicesStateManager.update(data => { data.services = updated; });
|
|
|
|
res.json({ success: true, removed, count: removed.length });
|
|
}, 'compose-stack-delete'));
|
|
|
|
return router;
|
|
};
|