feat: add 7 new features — exec shell, SSE events, compose import, docker resources, resource limits, email notifications, auto-updates

- Container exec/shell via WebSocket + xterm.js (subtle >_ button on cards)
- Live dashboard updates via SSE (resource alerts, health changes, update notices)
- Docker Compose import with YAML parsing, preview, and dependency-ordered deploy
- Volume & network management modal with disk usage overview
- CPU/memory resource limits on deploy and live update
- Email SMTP notifications (nodemailer) alongside Discord/Telegram/ntfy
- Scheduled auto-update scheduler with maintenance windows (daily/weekly/monthly)

New deps: ws, js-yaml, nodemailer

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-04-05 16:15:14 -07:00
parent b60e7e40d0
commit bdf3f247b1
30 changed files with 2423 additions and 313 deletions

View File

@@ -14,14 +14,17 @@
"express": "^4.22.1",
"express-rate-limit": "^7.5.1",
"helmet": "^8.1.0",
"js-yaml": "^4.1.1",
"jsonwebtoken": "^9.0.2",
"lru-cache": "^10.4.3",
"nodemailer": "^8.0.4",
"otplib": "^12.0.1",
"png-to-ico": "^2.1.8",
"proper-lockfile": "^4.1.2",
"qrcode": "^1.5.3",
"sharp": "^0.33.5",
"validator": "^13.11.0"
"validator": "^13.11.0",
"ws": "^8.20.0"
},
"devDependencies": {
"eslint": "^8.57.1",
@@ -61,6 +64,7 @@
"integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@babel/code-frame": "^7.29.0",
"@babel/generator": "^7.29.0",
@@ -605,26 +609,6 @@
"url": "https://opencollective.com/eslint"
}
},
"node_modules/@eslint/eslintrc/node_modules/argparse": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
"integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
"dev": true,
"license": "Python-2.0"
},
"node_modules/@eslint/eslintrc/node_modules/js-yaml": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz",
"integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==",
"dev": true,
"license": "MIT",
"dependencies": {
"argparse": "^2.0.1"
},
"bin": {
"js-yaml": "bin/js-yaml.js"
}
},
"node_modules/@eslint/js": {
"version": "8.57.1",
"resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.1.tgz",
@@ -1100,6 +1084,30 @@
"node": ">=8"
}
},
"node_modules/@istanbuljs/load-nyc-config/node_modules/argparse": {
"version": "1.0.10",
"resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
"integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
"dev": true,
"license": "MIT",
"dependencies": {
"sprintf-js": "~1.0.2"
}
},
"node_modules/@istanbuljs/load-nyc-config/node_modules/js-yaml": {
"version": "3.14.2",
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz",
"integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==",
"dev": true,
"license": "MIT",
"dependencies": {
"argparse": "^1.0.7",
"esprima": "^4.0.0"
},
"bin": {
"js-yaml": "bin/js-yaml.js"
}
},
"node_modules/@istanbuljs/schema": {
"version": "0.1.3",
"resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz",
@@ -1805,6 +1813,7 @@
"integrity": "sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==",
"dev": true,
"license": "MIT",
"peer": true,
"bin": {
"acorn": "bin/acorn"
},
@@ -1894,14 +1903,10 @@
}
},
"node_modules/argparse": {
"version": "1.0.10",
"resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
"integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
"dev": true,
"license": "MIT",
"dependencies": {
"sprintf-js": "~1.0.2"
}
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
"integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
"license": "Python-2.0"
},
"node_modules/array-flatten": {
"version": "1.1.1",
@@ -2188,6 +2193,7 @@
}
],
"license": "MIT",
"peer": true,
"dependencies": {
"baseline-browser-mapping": "^2.9.0",
"caniuse-lite": "^1.0.30001759",
@@ -3007,6 +3013,7 @@
"deprecated": "This version is no longer supported. Please see https://eslint.org/version-support for other options.",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@eslint-community/eslint-utils": "^4.2.0",
"@eslint-community/regexpp": "^4.6.1",
@@ -3087,13 +3094,6 @@
"url": "https://opencollective.com/eslint"
}
},
"node_modules/eslint/node_modules/argparse": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
"integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
"dev": true,
"license": "Python-2.0"
},
"node_modules/eslint/node_modules/escape-string-regexp": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz",
@@ -3124,19 +3124,6 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/eslint/node_modules/js-yaml": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz",
"integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==",
"dev": true,
"license": "MIT",
"dependencies": {
"argparse": "^2.0.1"
},
"bin": {
"js-yaml": "bin/js-yaml.js"
}
},
"node_modules/eslint/node_modules/locate-path": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz",
@@ -4795,14 +4782,12 @@
"license": "MIT"
},
"node_modules/js-yaml": {
"version": "3.14.2",
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.2.tgz",
"integrity": "sha512-PMSmkqxr106Xa156c2M265Z+FTrPl+oxd/rgOQy2tijQeK5TxQ43psO1ZCwhVOSdnn+RzkzlRz/eY4BgJBYVpg==",
"dev": true,
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz",
"integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==",
"license": "MIT",
"dependencies": {
"argparse": "^1.0.7",
"esprima": "^4.0.0"
"argparse": "^2.0.1"
},
"bin": {
"js-yaml": "bin/js-yaml.js"
@@ -5257,6 +5242,15 @@
"dev": true,
"license": "MIT"
},
"node_modules/nodemailer": {
"version": "8.0.4",
"resolved": "https://registry.npmjs.org/nodemailer/-/nodemailer-8.0.4.tgz",
"integrity": "sha512-k+jf6N8PfQJ0Fe8ZhJlgqU5qJU44Lpvp2yvidH3vp1lPnVQMgi4yEEMPXg5eJS1gFIJTVq1NHBk7Ia9ARdSBdQ==",
"license": "MIT-0",
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/normalize-path": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
@@ -6918,6 +6912,27 @@
"node": "^12.13.0 || ^14.15.0 || >=16.0.0"
}
},
"node_modules/ws": {
"version": "8.20.0",
"resolved": "https://registry.npmjs.org/ws/-/ws-8.20.0.tgz",
"integrity": "sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA==",
"license": "MIT",
"engines": {
"node": ">=10.0.0"
},
"peerDependencies": {
"bufferutil": "^4.0.1",
"utf-8-validate": ">=5.0.2"
},
"peerDependenciesMeta": {
"bufferutil": {
"optional": true
},
"utf-8-validate": {
"optional": true
}
}
},
"node_modules/y18n": {
"version": "5.0.8",
"resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",

View File

@@ -19,14 +19,17 @@
"express": "^4.22.1",
"express-rate-limit": "^7.5.1",
"helmet": "^8.1.0",
"js-yaml": "^4.1.1",
"jsonwebtoken": "^9.0.2",
"lru-cache": "^10.4.3",
"nodemailer": "^8.0.4",
"otplib": "^12.0.1",
"png-to-ico": "^2.1.8",
"proper-lockfile": "^4.1.2",
"qrcode": "^1.5.3",
"sharp": "^0.33.5",
"validator": "^13.11.0"
"validator": "^13.11.0",
"ws": "^8.20.0"
},
"devDependencies": {
"eslint": "^8.57.1",

View File

@@ -0,0 +1,334 @@
const express = require('express');
const yaml = require('js-yaml');
const { DOCKER, REGEX } = require('../../constants');
const { ValidationError } = require('../../errors');
const platformPaths = require('../../platform-paths');
/**
* Docker Compose import routes
* Parse and deploy services from docker-compose.yml
* @param {Object} deps
*/
module.exports = function({ docker, caddy, servicesStateManager, portLockManager, asyncHandler, log, siteConfig, buildDomain, buildServiceUrl, addServiceToConfig, dns, notification }) {
const router = express.Router();
/**
* Parse a compose YAML string into DashCaddy-compatible service configs
*/
function parseCompose(yamlStr, stackName) {
let doc;
try {
doc = yaml.load(yamlStr);
} catch (e) {
throw new ValidationError(`Invalid YAML: ${e.message}`);
}
if (!doc || !doc.services || typeof doc.services !== 'object') {
throw new ValidationError('No services found in compose file');
}
const services = [];
const networks = Object.keys(doc.networks || {});
const volumes = Object.keys(doc.volumes || {});
for (const [name, svc] of Object.entries(doc.services)) {
if (!svc.image) {
// Build-based services can't be imported without the build context
services.push({ name, skip: true, reason: 'No image specified (build-only service)' });
continue;
}
const parsed = {
name,
image: svc.image,
ports: [],
volumes: [],
environment: {},
restart: svc.restart || 'unless-stopped',
networks: svc.networks || [],
dependsOn: svc.depends_on || [],
labels: { 'sami.managed': 'true', 'sami.compose-stack': stackName, 'sami.compose-service': name },
resources: {},
};
// Parse ports
if (svc.ports) {
for (const p of svc.ports) {
const str = String(p);
// Handle "8080:80", "8080:80/tcp", "127.0.0.1:8080:80"
const parts = str.split(':');
if (parts.length === 2) {
parsed.ports.push({ host: parts[0], container: parts[1].split('/')[0], protocol: parts[1].includes('/') ? parts[1].split('/')[1] : 'tcp' });
} else if (parts.length === 3) {
parsed.ports.push({ host: parts[1], container: parts[2].split('/')[0], protocol: parts[2].includes('/') ? parts[2].split('/')[1] : 'tcp' });
}
}
}
// Parse volumes
if (svc.volumes) {
for (const v of svc.volumes) {
if (typeof v === 'string') {
parsed.volumes.push(v);
} else if (v.source && v.target) {
const mode = v.read_only ? 'ro' : 'rw';
parsed.volumes.push(`${v.source}:${v.target}:${mode}`);
}
}
}
// Parse environment
if (svc.environment) {
if (Array.isArray(svc.environment)) {
for (const e of svc.environment) {
const [key, ...val] = String(e).split('=');
parsed.environment[key] = val.join('=');
}
} else {
parsed.environment = { ...svc.environment };
}
}
// Parse env_file entries (note: we record them but can't resolve file contents)
if (svc.env_file) {
parsed.envFileWarning = 'env_file references found — variables not imported (paste them as environment vars)';
}
// Resource limits
if (svc.deploy?.resources?.limits) {
const lim = svc.deploy.resources.limits;
if (lim.cpus) parsed.resources.cpus = parseFloat(lim.cpus);
if (lim.memory) {
const mem = String(lim.memory).toLowerCase();
if (mem.endsWith('g')) parsed.resources.memory = parseFloat(mem) * 1024;
else if (mem.endsWith('m')) parsed.resources.memory = parseFloat(mem);
else parsed.resources.memory = parseFloat(mem) / (1024 * 1024); // assume bytes
}
}
// Legacy mem_limit / cpus
if (svc.mem_limit) {
const mem = String(svc.mem_limit).toLowerCase();
if (mem.endsWith('g')) parsed.resources.memory = parseFloat(mem) * 1024;
else if (mem.endsWith('m')) parsed.resources.memory = parseFloat(mem);
}
if (svc.cpus) parsed.resources.cpus = parseFloat(svc.cpus);
// Cap-add
if (svc.cap_add) parsed.capAdd = svc.cap_add;
services.push(parsed);
}
return { services, networks, volumes, stackName };
}
/**
* Topological sort based on depends_on
*/
function topoSort(services) {
const graph = new Map();
const nameMap = new Map();
for (const svc of services) {
if (svc.skip) continue;
graph.set(svc.name, svc.dependsOn || []);
nameMap.set(svc.name, svc);
}
const sorted = [];
const visited = new Set();
const visiting = new Set();
function visit(name) {
if (visited.has(name)) return;
if (visiting.has(name)) return; // circular — just break
visiting.add(name);
for (const dep of (graph.get(name) || [])) {
if (graph.has(dep)) visit(dep);
}
visiting.delete(name);
visited.add(name);
if (nameMap.has(name)) sorted.push(nameMap.get(name));
}
for (const name of graph.keys()) visit(name);
return sorted;
}
// POST /import-compose — parse YAML and return preview
router.post('/import-compose', asyncHandler(async (req, res) => {
const { yaml: yamlStr, stackName } = req.body;
if (!yamlStr || typeof yamlStr !== 'string') {
throw new ValidationError('yaml field is required (string)');
}
const name = (stackName || 'stack').replace(/[^a-zA-Z0-9_-]/g, '').substring(0, 32) || 'stack';
const result = parseCompose(yamlStr, name);
res.json({ success: true, ...result });
}, 'compose-import'));
// POST /deploy-compose — deploy parsed services
router.post('/deploy-compose', asyncHandler(async (req, res) => {
const { services, networks, stackName, subdomainPrefix } = req.body;
if (!services || !Array.isArray(services) || services.length === 0) {
throw new ValidationError('services array is required');
}
const prefix = (subdomainPrefix || stackName || 'stack').replace(/[^a-zA-Z0-9-]/g, '').substring(0, 16);
const results = [];
// Create networks first
if (networks && networks.length > 0) {
for (const net of networks) {
try {
await docker.client.createNetwork({ Name: `${prefix}_${net}`, Driver: 'bridge' });
results.push({ type: 'network', name: net, status: 'created' });
} catch (e) {
if (e.statusCode === 409) {
results.push({ type: 'network', name: net, status: 'exists' });
} else {
results.push({ type: 'network', name: net, status: 'failed', error: e.message });
}
}
}
}
// Sort by dependency order
const sorted = topoSort(services.filter(s => !s.skip));
for (const svc of sorted) {
const containerName = `${DOCKER.CONTAINER_PREFIX}${prefix}-${svc.name}`;
const subdomain = `${prefix}-${svc.name}`;
try {
// Pull image
try {
await docker.pull(svc.image);
} catch (pullErr) {
// Check if local
const images = await docker.client.listImages({ filters: { reference: [svc.image] } });
if (images.length === 0) throw new Error(`Image ${svc.image} not found: ${pullErr.message}`);
}
// Build container config
const containerConfig = {
Image: svc.image,
name: containerName,
ExposedPorts: {},
HostConfig: {
PortBindings: {},
Binds: (svc.volumes || []).map(v => {
const [hostPath, ...rest] = v.split(':');
const translated = platformPaths.toDockerMountPath(hostPath);
return rest.length > 0 ? `${translated}:${rest.join(':')}` : translated;
}),
RestartPolicy: { Name: svc.restart || 'unless-stopped' },
LogConfig: DOCKER.LOG_CONFIG,
},
Env: Object.entries(svc.environment || {}).map(([k, v]) => `${k}=${v}`),
Labels: svc.labels || {},
};
// Ports
if (svc.ports) {
for (const p of svc.ports) {
const key = `${p.container}/${p.protocol || 'tcp'}`;
containerConfig.ExposedPorts[key] = {};
containerConfig.HostConfig.PortBindings[key] = [{ HostPort: String(p.host) }];
}
}
// Resources
if (svc.resources?.memory) {
containerConfig.HostConfig.Memory = Math.round(svc.resources.memory * 1024 * 1024);
containerConfig.HostConfig.MemoryReservation = Math.round(svc.resources.memory * 1024 * 1024 * 0.5);
}
if (svc.resources?.cpus) {
containerConfig.HostConfig.NanoCpus = Math.round(svc.resources.cpus * 1e9);
}
// Capabilities
if (svc.capAdd) containerConfig.HostConfig.CapAdd = svc.capAdd;
// Networks
if (svc.networks && svc.networks.length > 0) {
containerConfig.HostConfig.NetworkMode = `${prefix}_${svc.networks[0]}`;
}
// Remove stale container with same name
try {
const existing = docker.client.getContainer(containerName);
await existing.remove({ force: true });
await new Promise(r => setTimeout(r, 1000));
} catch (_) {}
const container = await docker.client.createContainer(containerConfig);
await container.start();
// Determine port for Caddy/service registration
const mainPort = svc.ports?.[0]?.host || null;
// Add to services.json if it has a port (i.e., is web-accessible)
if (mainPort) {
const ip = siteConfig.dnsServerIp || 'localhost';
const serviceUrl = buildServiceUrl(subdomain);
await addServiceToConfig({
id: subdomain,
name: `${stackName || prefix}: ${svc.name}`,
logo: '/assets/docker.png',
url: serviceUrl,
containerId: container.id,
appTemplate: null,
routingMode: siteConfig.routingMode,
deployedAt: new Date().toISOString(),
deploymentManifest: {
templateId: null,
composeStack: stackName || prefix,
config: { subdomain, port: mainPort, ip }
}
});
}
results.push({ type: 'container', name: svc.name, containerId: container.id, status: 'deployed', subdomain: mainPort ? subdomain : null });
} catch (e) {
log.error('compose', `Failed to deploy service ${svc.name}`, { error: e.message });
results.push({ type: 'container', name: svc.name, status: 'failed', error: e.message });
}
}
// Skipped services
for (const svc of services.filter(s => s.skip)) {
results.push({ type: 'container', name: svc.name, status: 'skipped', reason: svc.reason });
}
res.json({ success: true, results, stackName: stackName || prefix });
}, 'compose-deploy'));
// DELETE /compose-stack/:stackName — remove an entire stack
router.delete('/compose-stack/:stackName', asyncHandler(async (req, res) => {
const { stackName } = req.params;
if (!stackName) throw new ValidationError('stackName is required');
const containers = await docker.client.listContainers({ all: true, filters: { label: [`sami.compose-stack=${stackName}`] } });
const removed = [];
for (const c of containers) {
try {
const container = docker.client.getContainer(c.Id);
await container.remove({ force: true });
removed.push({ name: c.Names[0], id: c.Id });
} catch (e) {
removed.push({ name: c.Names[0], id: c.Id, error: e.message });
}
}
// Remove from services.json
const services = await servicesStateManager.read();
const updated = (services.services || []).filter(s => {
const manifest = s.deploymentManifest;
return !(manifest && manifest.composeStack === stackName);
});
await servicesStateManager.update(data => { data.services = updated; });
res.json({ success: true, removed, count: removed.length });
}, 'compose-stack-delete'));
return router;
};

View File

@@ -170,6 +170,18 @@ module.exports = function({ docker, caddy, credentialManager, servicesStateManag
containerConfig.HostConfig.CapAdd = processedTemplate.docker.capabilities;
}
// Resource limits (CPU and memory)
if (userConfig.resources) {
if (userConfig.resources.memory) {
const memBytes = Math.round(userConfig.resources.memory * 1024 * 1024); // MB to bytes
containerConfig.HostConfig.Memory = memBytes;
containerConfig.HostConfig.MemoryReservation = Math.round(memBytes * 0.5); // soft limit = 50%
}
if (userConfig.resources.cpus) {
containerConfig.HostConfig.NanoCpus = Math.round(userConfig.resources.cpus * 1e9);
}
}
try {
log.info('docker', 'Pulling image', { image: processedTemplate.docker.image });
await docker.pull(processedTemplate.docker.image);

View File

@@ -4,6 +4,7 @@ const initDeploy = require('./deploy');
const initRemoval = require('./removal');
const initTemplates = require('./templates');
const initRestore = require('./restore');
const initCompose = require('./compose');
/**
* Apps routes aggregator
@@ -44,6 +45,7 @@ module.exports = function(ctx) {
router.use(initRemoval(subCtx));
router.use(initTemplates(subCtx));
router.use(initRestore(subCtx));
router.use(initCompose(subCtx));
return router;
};

View File

@@ -190,6 +190,36 @@ module.exports = function({ docker, log, asyncHandler }) {
success(res, { logs: logs.toString() });
}, 'container-logs'));
// Update resource limits on a running container
router.put('/:id/resources', asyncHandler(async (req, res) => {
const container = await getVerifiedContainer(req.params.id);
const { memory, cpus } = req.body;
const updateConfig = {};
if (memory !== undefined) {
updateConfig.Memory = memory > 0 ? Math.round(memory * 1024 * 1024) : 0; // MB to bytes, 0 = unlimited
updateConfig.MemoryReservation = memory > 0 ? Math.round(memory * 1024 * 1024 * 0.5) : 0;
}
if (cpus !== undefined) {
updateConfig.NanoCpus = cpus > 0 ? Math.round(cpus * 1e9) : 0; // 0 = unlimited
}
await container.update(updateConfig);
success(res, { message: 'Resource limits updated' });
}, 'container-resources'));
// Get resource limits for a container
router.get('/:id/resources', asyncHandler(async (req, res) => {
const container = await getVerifiedContainer(req.params.id);
const info = await container.inspect();
const hc = info.HostConfig;
success(res, {
memory: hc.Memory ? Math.round(hc.Memory / 1024 / 1024) : 0, // bytes to MB
memoryReservation: hc.MemoryReservation ? Math.round(hc.MemoryReservation / 1024 / 1024) : 0,
cpus: hc.NanoCpus ? hc.NanoCpus / 1e9 : 0,
});
}, 'container-resources-get'));
// Delete container
router.delete('/:id', asyncHandler(async (req, res) => {
const container = await getVerifiedContainer(req.params.id);

View File

@@ -0,0 +1,113 @@
const express = require('express');
const { success } = require('../response-helpers');
const { ValidationError } = require('../errors');
/**
* Docker resources route factory (volumes, networks, disk usage)
* @param {Object} deps
* @param {Object} deps.docker - Docker client wrapper
* @param {Function} deps.asyncHandler - Async route handler wrapper
* @returns {express.Router}
*/
module.exports = function({ docker, asyncHandler }) {
const router = express.Router();
// ===== VOLUMES =====
router.get('/volumes', asyncHandler(async (req, res) => {
const result = await docker.client.listVolumes();
const volumes = (result.Volumes || []).map(v => ({
name: v.Name,
driver: v.Driver,
mountpoint: v.Mountpoint,
scope: v.Scope,
created: v.CreatedAt,
labels: v.Labels || {},
}));
success(res, { volumes, count: volumes.length });
}, 'docker-volumes-list'));
router.post('/volumes', asyncHandler(async (req, res) => {
const { name, driver } = req.body;
if (!name || !/^[a-zA-Z0-9][a-zA-Z0-9_.-]{0,127}$/.test(name)) {
throw new ValidationError('Invalid volume name');
}
const volume = await docker.client.createVolume({
Name: name,
Driver: driver || 'local',
});
success(res, { message: `Volume "${name}" created`, volume: { name: volume.name } });
}, 'docker-volumes-create'));
router.delete('/volumes/:name', asyncHandler(async (req, res) => {
const volume = docker.client.getVolume(req.params.name);
await volume.remove({ force: req.query.force === 'true' });
success(res, { message: `Volume "${req.params.name}" removed` });
}, 'docker-volumes-delete'));
// ===== NETWORKS =====
router.get('/networks', asyncHandler(async (req, res) => {
const networkList = await docker.client.listNetworks();
const networks = networkList.map(n => ({
id: n.Id.substring(0, 12),
name: n.Name,
driver: n.Driver,
scope: n.Scope,
internal: n.Internal,
containers: Object.keys(n.Containers || {}).length,
created: n.Created,
}));
success(res, { networks, count: networks.length });
}, 'docker-networks-list'));
router.post('/networks', asyncHandler(async (req, res) => {
const { name, driver } = req.body;
if (!name || !/^[a-zA-Z0-9][a-zA-Z0-9_.-]{0,63}$/.test(name)) {
throw new ValidationError('Invalid network name');
}
const network = await docker.client.createNetwork({
Name: name,
Driver: driver || 'bridge',
});
success(res, { message: `Network "${name}" created`, id: network.id });
}, 'docker-networks-create'));
router.delete('/networks/:id', asyncHandler(async (req, res) => {
const network = docker.client.getNetwork(req.params.id);
await network.remove();
success(res, { message: 'Network removed' });
}, 'docker-networks-delete'));
// ===== DISK USAGE =====
router.get('/disk-usage', asyncHandler(async (req, res) => {
const df = await docker.client.df();
const summary = {
images: {
count: (df.Images || []).length,
size: (df.Images || []).reduce((sum, i) => sum + (i.Size || 0), 0),
reclaimable: (df.Images || []).filter(i => i.Containers === 0).reduce((sum, i) => sum + (i.Size || 0), 0),
},
containers: {
count: (df.Containers || []).length,
running: (df.Containers || []).filter(c => c.State === 'running').length,
size: (df.Containers || []).reduce((sum, c) => sum + (c.SizeRw || 0), 0),
},
volumes: {
count: (df.Volumes || []).length,
size: (df.Volumes || []).reduce((sum, v) => sum + (v.UsageData?.Size || 0), 0),
reclaimable: (df.Volumes || []).filter(v => v.UsageData?.RefCount === 0).reduce((sum, v) => sum + (v.UsageData?.Size || 0), 0),
},
buildCache: {
count: (df.BuildCache || []).length,
size: (df.BuildCache || []).reduce((sum, b) => sum + (b.Size || 0), 0),
reclaimable: (df.BuildCache || []).filter(b => !b.InUse).reduce((sum, b) => sum + (b.Size || 0), 0),
},
};
summary.totalSize = summary.images.size + summary.containers.size + summary.volumes.size + summary.buildCache.size;
success(res, summary);
}, 'docker-disk-usage'));
return router;
};

View File

@@ -0,0 +1,111 @@
const express = require('express');
/**
* Server-Sent Events route factory
* Pushes real-time updates to connected dashboard clients
* @param {Object} deps - Dependencies
* @param {Object} deps.resourceMonitor - Container resource monitor
* @param {Object} deps.healthChecker - Health checker
* @param {Object} deps.updateManager - Update manager
* @param {Function} deps.logError - Error logging function
* @returns {express.Router}
*/
module.exports = function({ resourceMonitor, healthChecker, updateManager, logError }) {
const router = express.Router();
const clients = new Set();
function broadcast(event, data) {
const msg = `event: ${event}\ndata: ${JSON.stringify(data)}\n\n`;
for (const res of clients) {
try { res.write(msg); } catch (_) { clients.delete(res); }
}
}
// --- Wire up EventEmitter listeners ---
// Resource monitor events
if (resourceMonitor) {
resourceMonitor.on('alert', (data) => {
broadcast('resource-alert', data);
});
resourceMonitor.on('auto-restart', (data) => {
broadcast('auto-restart', data);
});
}
// Health checker events
if (healthChecker) {
healthChecker.on('status-check', (data) => {
broadcast('status-change', {
serviceId: data.serviceId,
name: data.name,
status: data.status,
responseTime: data.responseTime,
timestamp: data.timestamp
});
});
healthChecker.on('incident-created', (data) => {
broadcast('incident', { type: 'created', ...data });
});
healthChecker.on('incident-resolved', (data) => {
broadcast('incident', { type: 'resolved', ...data });
});
}
// Update manager events
if (updateManager) {
updateManager.on('update-available', (data) => {
broadcast('update-available', data);
});
updateManager.on('update-start', (data) => {
broadcast('update-start', data);
});
updateManager.on('update-complete', (data) => {
broadcast('update-complete', data);
});
updateManager.on('update-failed', (data) => {
broadcast('update-failed', data);
});
updateManager.on('auto-update-start', (data) => {
broadcast('auto-update-start', data);
});
updateManager.on('auto-update-complete', (data) => {
broadcast('auto-update-complete', data);
});
}
// SSE endpoint
router.get('/stream', (req, res) => {
res.writeHead(200, {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'X-Accel-Buffering': 'no',
});
// Send initial connected event
res.write(`event: connected\ndata: ${JSON.stringify({ clients: clients.size + 1 })}\n\n`);
clients.add(res);
// Heartbeat every 30s
const heartbeat = setInterval(() => {
try { res.write(': heartbeat\n\n'); } catch (_) { cleanup(); }
}, 30000);
function cleanup() {
clearInterval(heartbeat);
clients.delete(res);
}
req.on('close', cleanup);
req.on('error', cleanup);
});
// Client count (useful for debugging)
router.get('/clients', (req, res) => {
res.json({ success: true, count: clients.size });
});
return router;
};

View File

@@ -0,0 +1,124 @@
const { WebSocketServer } = require('ws');
const Docker = require('dockerode');
const url = require('url');
const docker = new Docker();
/**
* Attach WebSocket server for container exec/shell
* Route: ws://host/ws/exec/:containerId
* @param {http.Server} server - The HTTP server instance
* @param {Object} log - Logger
*/
module.exports = function attachExecWS(server, log) {
const wss = new WebSocketServer({ noServer: true });
server.on('upgrade', (req, socket, head) => {
const parsed = url.parse(req.url, true);
const match = parsed.pathname.match(/^\/ws\/exec\/([a-zA-Z0-9_.-]+)$/);
if (!match) return; // Not our route — let other handlers deal with it
const containerId = decodeURIComponent(match[1]);
wss.handleUpgrade(req, socket, head, (ws) => {
handleExec(ws, containerId, log);
});
});
return wss;
};
async function handleExec(ws, containerId, log) {
let execStream = null;
let execInstance = null;
try {
const container = docker.getContainer(containerId);
// Verify container exists and is running
const info = await container.inspect();
if (!info.State.Running) {
ws.send(JSON.stringify({ type: 'error', message: 'Container is not running' }));
ws.close();
return;
}
// Detect available shell
let shell = '/bin/sh';
try {
const bashCheck = await container.exec({ Cmd: ['which', 'bash'], AttachStdout: true });
const bashStream = await bashCheck.start();
const chunks = [];
await new Promise((resolve) => {
bashStream.on('data', (chunk) => chunks.push(chunk));
bashStream.on('end', resolve);
});
if (chunks.length > 0 && Buffer.concat(chunks).toString().includes('/bash')) {
shell = '/bin/bash';
}
} catch (_) {}
execInstance = await container.exec({
Cmd: [shell],
AttachStdin: true,
AttachStdout: true,
AttachStderr: true,
Tty: true,
});
execStream = await execInstance.start({ hijack: true, stdin: true, Tty: true });
ws.send(JSON.stringify({ type: 'connected', shell, containerId }));
// Docker → WebSocket
execStream.on('data', (chunk) => {
if (ws.readyState === ws.OPEN) {
ws.send(chunk);
}
});
execStream.on('end', () => {
if (ws.readyState === ws.OPEN) {
ws.send(JSON.stringify({ type: 'exit' }));
ws.close();
}
});
// WebSocket → Docker
ws.on('message', (data) => {
if (!execStream.writable) return;
try {
// Check for control messages (JSON)
const str = data.toString();
if (str.startsWith('{"type":')) {
const msg = JSON.parse(str);
if (msg.type === 'resize' && execInstance && msg.cols && msg.rows) {
execInstance.resize({ h: msg.rows, w: msg.cols }).catch(() => {});
return;
}
}
} catch (_) {}
// Regular terminal input
execStream.write(data);
});
ws.on('close', () => {
if (execStream) {
try { execStream.destroy(); } catch (_) {}
}
});
ws.on('error', (err) => {
log.warn('exec', 'WebSocket error', { containerId, error: err.message });
if (execStream) {
try { execStream.destroy(); } catch (_) {}
}
});
} catch (err) {
log.error('exec', 'Failed to start exec session', { containerId, error: err.message });
if (ws.readyState === ws.OPEN) {
ws.send(JSON.stringify({ type: 'error', message: err.message }));
ws.close();
}
}
}

View File

@@ -1,5 +1,6 @@
const express = require('express');
const { validateURL, validateToken } = require('../input-validator');
const validatorLib = require('validator');
const { paginate, parsePaginationParams } = require('../pagination');
const { ValidationError } = require('../errors');
@@ -32,6 +33,12 @@ module.exports = function({ notification, asyncHandler }) {
enabled: notificationConfig.providers.ntfy?.enabled || false,
configured: !!notificationConfig.providers.ntfy?.topic,
serverUrl: notificationConfig.providers.ntfy?.serverUrl || 'https://ntfy.sh'
},
email: {
enabled: notificationConfig.providers.email?.enabled || false,
configured: !!(notificationConfig.providers.email?.host && notificationConfig.providers.email?.to),
host: notificationConfig.providers.email?.host || '',
from: notificationConfig.providers.email?.from || ''
}
},
events: notificationConfig.events,
@@ -74,6 +81,19 @@ module.exports = function({ notification, asyncHandler }) {
throw new ValidationError('Invalid ntfy topic (alphanumeric, hyphens, underscores only, max 64 chars)');
}
}
if (providers.email?.to) {
const emails = providers.email.to.split(',').map(e => e.trim());
for (const email of emails) {
if (!validatorLib.isEmail(email)) {
throw new ValidationError(`Invalid email address: ${email}`);
}
}
}
if (providers.email?.host && typeof providers.email.host === 'string') {
if (!validatorLib.isFQDN(providers.email.host) && !validatorLib.isIP(providers.email.host)) {
throw new ValidationError('Invalid SMTP host');
}
}
}
// Update enabled state
@@ -101,6 +121,12 @@ module.exports = function({ notification, asyncHandler }) {
...providers.ntfy
};
}
if (providers.email) {
notificationConfig.providers.email = {
...notificationConfig.providers.email,
...providers.email
};
}
}
// Update events
@@ -144,6 +170,9 @@ module.exports = function({ notification, asyncHandler }) {
case 'ntfy':
result = await notification.sendNtfy('Test Notification', 'This is a test notification from DashCaddy.', 'info');
break;
case 'email':
result = await notification.sendEmail('Test Notification', 'This is a test notification from DashCaddy.', 'info');
break;
default:
throw new ValidationError('Unknown provider');
}

View File

@@ -59,6 +59,12 @@ module.exports = function({ updateManager, selfUpdater, asyncHandler, logError }
res.json({ success: true, message: 'Auto-update configured' });
}, 'updates-auto-update'));
// Get auto-update configuration
router.get('/updates/auto-update', asyncHandler(async (req, res) => {
const config = updateManager.getAutoUpdateConfig();
res.json({ success: true, config });
}, 'updates-auto-update-config'));
// Schedule update
router.post('/updates/schedule/:containerId', asyncHandler(async (req, res) => {
const { scheduledTime } = req.body;

View File

@@ -52,6 +52,11 @@ process.on('uncaughtException', (error) => {
environment: process.env.NODE_ENV || 'production'
});
// Attach WebSocket exec handler
const attachExecWS = require('./routes/exec');
attachExecWS(server, log);
log.info('server', 'WebSocket exec handler attached');
// Start feature modules
const resourceMonitor = require('./resource-monitor');
const backupManager = require('./backup-manager');

View File

@@ -66,6 +66,8 @@ const errorLogsRoutes = require('../routes/errorlogs');
const licenseRoutes = require('../routes/license');
const recipesRoutes = require('../routes/recipes');
const themesRoutes = require('../routes/themes');
const dockerResourcesRoutes = require('../routes/docker-resources');
const eventsRoutes = require('../routes/events');
// Constants
const { APP } = require('../constants');
@@ -419,6 +421,16 @@ async function createApp() {
}));
apiRouter.use('/recipes', recipesRoutes(ctx));
apiRouter.use(themesRoutes({ asyncHandler: ctx.asyncHandler }));
apiRouter.use('/docker', dockerResourcesRoutes({
docker: ctx.docker,
asyncHandler: ctx.asyncHandler
}));
apiRouter.use('/events', eventsRoutes({
resourceMonitor: ctx.resourceMonitor,
healthChecker: ctx.healthChecker,
updateManager: ctx.updateManager,
logError: ctx.logError
}));
// Inline API routes
apiRouter.get('/health', (req, res) => {

View File

@@ -27,19 +27,22 @@ class UpdateManager extends EventEmitter {
}
/**
* Start update checking
* Start update checking and auto-update scheduler
*/
start() {
if (this.checking) return;
console.log('[UpdateManager] Starting update checks');
this.checking = true;
// Initial check
this.checkForUpdates();
// Schedule periodic checks
this.checkInterval = setInterval(() => this.checkForUpdates(), CHECK_INTERVAL);
// Start auto-update scheduler (checks every hour)
this.startAutoUpdateScheduler();
}
/**
@@ -47,14 +50,18 @@ class UpdateManager extends EventEmitter {
*/
stop() {
if (!this.checking) return;
console.log('[UpdateManager] Stopping update checks');
this.checking = false;
if (this.checkInterval) {
clearInterval(this.checkInterval);
this.checkInterval = null;
}
if (this.autoUpdateInterval) {
clearInterval(this.autoUpdateInterval);
this.autoUpdateInterval = null;
}
}
/**
@@ -823,6 +830,92 @@ class UpdateManager extends EventEmitter {
return lines.join('\n') || 'No changelog available';
}
/**
* Start the auto-update scheduler — runs hourly, applies updates in maintenance windows
*/
startAutoUpdateScheduler() {
const AUTO_CHECK_INTERVAL = 60 * 60 * 1000; // 1 hour
// Delay first run by 10 minutes to let containers start
setTimeout(() => this.runAutoUpdates(), 10 * 60 * 1000);
this.autoUpdateInterval = setInterval(() => this.runAutoUpdates(), AUTO_CHECK_INTERVAL);
const count = Object.values(this.config.autoUpdate || {}).filter(c => c.enabled).length;
if (count > 0) {
console.log(`[UpdateManager] Auto-update scheduler started (${count} container(s) configured)`);
}
}
/**
* Execute auto-updates for all configured containers
*/
async runAutoUpdates() {
const autoConfig = this.config.autoUpdate || {};
const now = new Date();
const hour = now.getHours();
const dayOfWeek = now.getDay(); // 0 = Sunday
const dayOfMonth = now.getDate();
for (const [containerId, cfg] of Object.entries(autoConfig)) {
if (!cfg.enabled) continue;
// Check maintenance window (e.g., "02:00-05:00")
if (cfg.maintenanceWindow) {
const [startStr, endStr] = cfg.maintenanceWindow.split('-').map(s => s.trim());
const startHour = parseInt(startStr);
const endHour = parseInt(endStr);
if (startHour <= endHour) {
if (hour < startHour || hour >= endHour) continue;
} else {
// Wraps midnight (e.g., "22:00-04:00")
if (hour < startHour && hour >= endHour) continue;
}
} else {
// Default: only run between 2AM and 4AM
if (hour < 2 || hour >= 4) continue;
}
// Check schedule
const shouldRun =
cfg.schedule === 'daily' ||
(cfg.schedule === 'weekly' && dayOfWeek === 0) || // Sunday
(cfg.schedule === 'monthly' && dayOfMonth === 1);
if (!shouldRun) continue;
// Check if already ran today
const lastRun = cfg.lastAutoUpdate ? new Date(cfg.lastAutoUpdate) : null;
if (lastRun && lastRun.toDateString() === now.toDateString()) continue;
// Check if this container has an available update
const update = this.availableUpdates.get(containerId);
if (!update) continue;
console.log(`[UpdateManager] Auto-updating ${update.containerName} (schedule: ${cfg.schedule})`);
this.emit('auto-update-start', { containerId, containerName: update.containerName, schedule: cfg.schedule });
try {
const result = await this.updateContainer(containerId, { autoRollback: cfg.autoRollback !== false });
cfg.lastAutoUpdate = now.toISOString();
this.saveConfig();
console.log(`[UpdateManager] Auto-update completed for ${update.containerName}`);
this.emit('auto-update-complete', { containerId, containerName: update.containerName, result });
} catch (error) {
console.error(`[UpdateManager] Auto-update failed for ${update.containerName}:`, error.message);
cfg.lastAutoUpdate = now.toISOString(); // Don't retry same day
this.saveConfig();
this.emit('auto-update-failed', { containerId, containerName: update.containerName, error: error.message });
}
}
}
/**
* Get auto-update configuration for all containers
*/
getAutoUpdateConfig() {
return this.config.autoUpdate || {};
}
/**
* Configure auto-update for a container
*/