feat: add 7 new features — exec shell, SSE events, compose import, docker resources, resource limits, email notifications, auto-updates

- Container exec/shell via WebSocket + xterm.js (subtle >_ button on cards)
- Live dashboard updates via SSE (resource alerts, health changes, update notices)
- Docker Compose import with YAML parsing, preview, and dependency-ordered deploy
- Volume & network management modal with disk usage overview
- CPU/memory resource limits on deploy and live update
- Email SMTP notifications (nodemailer) alongside Discord/Telegram/ntfy
- Scheduled auto-update scheduler with maintenance windows (daily/weekly/monthly)

New deps: ws, js-yaml, nodemailer

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-04-05 16:15:14 -07:00
parent b60e7e40d0
commit bdf3f247b1
30 changed files with 2423 additions and 313 deletions

View File

@@ -0,0 +1,334 @@
const express = require('express');
const yaml = require('js-yaml');
const { DOCKER, REGEX } = require('../../constants');
const { ValidationError } = require('../../errors');
const platformPaths = require('../../platform-paths');
/**
* Docker Compose import routes
* Parse and deploy services from docker-compose.yml
* @param {Object} deps
*/
module.exports = function({ docker, caddy, servicesStateManager, portLockManager, asyncHandler, log, siteConfig, buildDomain, buildServiceUrl, addServiceToConfig, dns, notification }) {
const router = express.Router();
/**
* Parse a compose YAML string into DashCaddy-compatible service configs
*/
function parseCompose(yamlStr, stackName) {
let doc;
try {
doc = yaml.load(yamlStr);
} catch (e) {
throw new ValidationError(`Invalid YAML: ${e.message}`);
}
if (!doc || !doc.services || typeof doc.services !== 'object') {
throw new ValidationError('No services found in compose file');
}
const services = [];
const networks = Object.keys(doc.networks || {});
const volumes = Object.keys(doc.volumes || {});
for (const [name, svc] of Object.entries(doc.services)) {
if (!svc.image) {
// Build-based services can't be imported without the build context
services.push({ name, skip: true, reason: 'No image specified (build-only service)' });
continue;
}
const parsed = {
name,
image: svc.image,
ports: [],
volumes: [],
environment: {},
restart: svc.restart || 'unless-stopped',
networks: svc.networks || [],
dependsOn: svc.depends_on || [],
labels: { 'sami.managed': 'true', 'sami.compose-stack': stackName, 'sami.compose-service': name },
resources: {},
};
// Parse ports
if (svc.ports) {
for (const p of svc.ports) {
const str = String(p);
// Handle "8080:80", "8080:80/tcp", "127.0.0.1:8080:80"
const parts = str.split(':');
if (parts.length === 2) {
parsed.ports.push({ host: parts[0], container: parts[1].split('/')[0], protocol: parts[1].includes('/') ? parts[1].split('/')[1] : 'tcp' });
} else if (parts.length === 3) {
parsed.ports.push({ host: parts[1], container: parts[2].split('/')[0], protocol: parts[2].includes('/') ? parts[2].split('/')[1] : 'tcp' });
}
}
}
// Parse volumes
if (svc.volumes) {
for (const v of svc.volumes) {
if (typeof v === 'string') {
parsed.volumes.push(v);
} else if (v.source && v.target) {
const mode = v.read_only ? 'ro' : 'rw';
parsed.volumes.push(`${v.source}:${v.target}:${mode}`);
}
}
}
// Parse environment
if (svc.environment) {
if (Array.isArray(svc.environment)) {
for (const e of svc.environment) {
const [key, ...val] = String(e).split('=');
parsed.environment[key] = val.join('=');
}
} else {
parsed.environment = { ...svc.environment };
}
}
// Parse env_file entries (note: we record them but can't resolve file contents)
if (svc.env_file) {
parsed.envFileWarning = 'env_file references found — variables not imported (paste them as environment vars)';
}
// Resource limits
if (svc.deploy?.resources?.limits) {
const lim = svc.deploy.resources.limits;
if (lim.cpus) parsed.resources.cpus = parseFloat(lim.cpus);
if (lim.memory) {
const mem = String(lim.memory).toLowerCase();
if (mem.endsWith('g')) parsed.resources.memory = parseFloat(mem) * 1024;
else if (mem.endsWith('m')) parsed.resources.memory = parseFloat(mem);
else parsed.resources.memory = parseFloat(mem) / (1024 * 1024); // assume bytes
}
}
// Legacy mem_limit / cpus
if (svc.mem_limit) {
const mem = String(svc.mem_limit).toLowerCase();
if (mem.endsWith('g')) parsed.resources.memory = parseFloat(mem) * 1024;
else if (mem.endsWith('m')) parsed.resources.memory = parseFloat(mem);
}
if (svc.cpus) parsed.resources.cpus = parseFloat(svc.cpus);
// Cap-add
if (svc.cap_add) parsed.capAdd = svc.cap_add;
services.push(parsed);
}
return { services, networks, volumes, stackName };
}
/**
* Topological sort based on depends_on
*/
function topoSort(services) {
const graph = new Map();
const nameMap = new Map();
for (const svc of services) {
if (svc.skip) continue;
graph.set(svc.name, svc.dependsOn || []);
nameMap.set(svc.name, svc);
}
const sorted = [];
const visited = new Set();
const visiting = new Set();
function visit(name) {
if (visited.has(name)) return;
if (visiting.has(name)) return; // circular — just break
visiting.add(name);
for (const dep of (graph.get(name) || [])) {
if (graph.has(dep)) visit(dep);
}
visiting.delete(name);
visited.add(name);
if (nameMap.has(name)) sorted.push(nameMap.get(name));
}
for (const name of graph.keys()) visit(name);
return sorted;
}
// POST /import-compose — parse YAML and return preview
router.post('/import-compose', asyncHandler(async (req, res) => {
const { yaml: yamlStr, stackName } = req.body;
if (!yamlStr || typeof yamlStr !== 'string') {
throw new ValidationError('yaml field is required (string)');
}
const name = (stackName || 'stack').replace(/[^a-zA-Z0-9_-]/g, '').substring(0, 32) || 'stack';
const result = parseCompose(yamlStr, name);
res.json({ success: true, ...result });
}, 'compose-import'));
// POST /deploy-compose — deploy parsed services
router.post('/deploy-compose', asyncHandler(async (req, res) => {
const { services, networks, stackName, subdomainPrefix } = req.body;
if (!services || !Array.isArray(services) || services.length === 0) {
throw new ValidationError('services array is required');
}
const prefix = (subdomainPrefix || stackName || 'stack').replace(/[^a-zA-Z0-9-]/g, '').substring(0, 16);
const results = [];
// Create networks first
if (networks && networks.length > 0) {
for (const net of networks) {
try {
await docker.client.createNetwork({ Name: `${prefix}_${net}`, Driver: 'bridge' });
results.push({ type: 'network', name: net, status: 'created' });
} catch (e) {
if (e.statusCode === 409) {
results.push({ type: 'network', name: net, status: 'exists' });
} else {
results.push({ type: 'network', name: net, status: 'failed', error: e.message });
}
}
}
}
// Sort by dependency order
const sorted = topoSort(services.filter(s => !s.skip));
for (const svc of sorted) {
const containerName = `${DOCKER.CONTAINER_PREFIX}${prefix}-${svc.name}`;
const subdomain = `${prefix}-${svc.name}`;
try {
// Pull image
try {
await docker.pull(svc.image);
} catch (pullErr) {
// Check if local
const images = await docker.client.listImages({ filters: { reference: [svc.image] } });
if (images.length === 0) throw new Error(`Image ${svc.image} not found: ${pullErr.message}`);
}
// Build container config
const containerConfig = {
Image: svc.image,
name: containerName,
ExposedPorts: {},
HostConfig: {
PortBindings: {},
Binds: (svc.volumes || []).map(v => {
const [hostPath, ...rest] = v.split(':');
const translated = platformPaths.toDockerMountPath(hostPath);
return rest.length > 0 ? `${translated}:${rest.join(':')}` : translated;
}),
RestartPolicy: { Name: svc.restart || 'unless-stopped' },
LogConfig: DOCKER.LOG_CONFIG,
},
Env: Object.entries(svc.environment || {}).map(([k, v]) => `${k}=${v}`),
Labels: svc.labels || {},
};
// Ports
if (svc.ports) {
for (const p of svc.ports) {
const key = `${p.container}/${p.protocol || 'tcp'}`;
containerConfig.ExposedPorts[key] = {};
containerConfig.HostConfig.PortBindings[key] = [{ HostPort: String(p.host) }];
}
}
// Resources
if (svc.resources?.memory) {
containerConfig.HostConfig.Memory = Math.round(svc.resources.memory * 1024 * 1024);
containerConfig.HostConfig.MemoryReservation = Math.round(svc.resources.memory * 1024 * 1024 * 0.5);
}
if (svc.resources?.cpus) {
containerConfig.HostConfig.NanoCpus = Math.round(svc.resources.cpus * 1e9);
}
// Capabilities
if (svc.capAdd) containerConfig.HostConfig.CapAdd = svc.capAdd;
// Networks
if (svc.networks && svc.networks.length > 0) {
containerConfig.HostConfig.NetworkMode = `${prefix}_${svc.networks[0]}`;
}
// Remove stale container with same name
try {
const existing = docker.client.getContainer(containerName);
await existing.remove({ force: true });
await new Promise(r => setTimeout(r, 1000));
} catch (_) {}
const container = await docker.client.createContainer(containerConfig);
await container.start();
// Determine port for Caddy/service registration
const mainPort = svc.ports?.[0]?.host || null;
// Add to services.json if it has a port (i.e., is web-accessible)
if (mainPort) {
const ip = siteConfig.dnsServerIp || 'localhost';
const serviceUrl = buildServiceUrl(subdomain);
await addServiceToConfig({
id: subdomain,
name: `${stackName || prefix}: ${svc.name}`,
logo: '/assets/docker.png',
url: serviceUrl,
containerId: container.id,
appTemplate: null,
routingMode: siteConfig.routingMode,
deployedAt: new Date().toISOString(),
deploymentManifest: {
templateId: null,
composeStack: stackName || prefix,
config: { subdomain, port: mainPort, ip }
}
});
}
results.push({ type: 'container', name: svc.name, containerId: container.id, status: 'deployed', subdomain: mainPort ? subdomain : null });
} catch (e) {
log.error('compose', `Failed to deploy service ${svc.name}`, { error: e.message });
results.push({ type: 'container', name: svc.name, status: 'failed', error: e.message });
}
}
// Skipped services
for (const svc of services.filter(s => s.skip)) {
results.push({ type: 'container', name: svc.name, status: 'skipped', reason: svc.reason });
}
res.json({ success: true, results, stackName: stackName || prefix });
}, 'compose-deploy'));
// DELETE /compose-stack/:stackName — remove an entire stack
router.delete('/compose-stack/:stackName', asyncHandler(async (req, res) => {
const { stackName } = req.params;
if (!stackName) throw new ValidationError('stackName is required');
const containers = await docker.client.listContainers({ all: true, filters: { label: [`sami.compose-stack=${stackName}`] } });
const removed = [];
for (const c of containers) {
try {
const container = docker.client.getContainer(c.Id);
await container.remove({ force: true });
removed.push({ name: c.Names[0], id: c.Id });
} catch (e) {
removed.push({ name: c.Names[0], id: c.Id, error: e.message });
}
}
// Remove from services.json
const services = await servicesStateManager.read();
const updated = (services.services || []).filter(s => {
const manifest = s.deploymentManifest;
return !(manifest && manifest.composeStack === stackName);
});
await servicesStateManager.update(data => { data.services = updated; });
res.json({ success: true, removed, count: removed.length });
}, 'compose-stack-delete'));
return router;
};

View File

@@ -170,6 +170,18 @@ module.exports = function({ docker, caddy, credentialManager, servicesStateManag
containerConfig.HostConfig.CapAdd = processedTemplate.docker.capabilities;
}
// Resource limits (CPU and memory)
if (userConfig.resources) {
if (userConfig.resources.memory) {
const memBytes = Math.round(userConfig.resources.memory * 1024 * 1024); // MB to bytes
containerConfig.HostConfig.Memory = memBytes;
containerConfig.HostConfig.MemoryReservation = Math.round(memBytes * 0.5); // soft limit = 50%
}
if (userConfig.resources.cpus) {
containerConfig.HostConfig.NanoCpus = Math.round(userConfig.resources.cpus * 1e9);
}
}
try {
log.info('docker', 'Pulling image', { image: processedTemplate.docker.image });
await docker.pull(processedTemplate.docker.image);

View File

@@ -4,6 +4,7 @@ const initDeploy = require('./deploy');
const initRemoval = require('./removal');
const initTemplates = require('./templates');
const initRestore = require('./restore');
const initCompose = require('./compose');
/**
* Apps routes aggregator
@@ -44,6 +45,7 @@ module.exports = function(ctx) {
router.use(initRemoval(subCtx));
router.use(initTemplates(subCtx));
router.use(initRestore(subCtx));
router.use(initCompose(subCtx));
return router;
};

View File

@@ -190,6 +190,36 @@ module.exports = function({ docker, log, asyncHandler }) {
success(res, { logs: logs.toString() });
}, 'container-logs'));
// Update resource limits on a running container
router.put('/:id/resources', asyncHandler(async (req, res) => {
const container = await getVerifiedContainer(req.params.id);
const { memory, cpus } = req.body;
const updateConfig = {};
if (memory !== undefined) {
updateConfig.Memory = memory > 0 ? Math.round(memory * 1024 * 1024) : 0; // MB to bytes, 0 = unlimited
updateConfig.MemoryReservation = memory > 0 ? Math.round(memory * 1024 * 1024 * 0.5) : 0;
}
if (cpus !== undefined) {
updateConfig.NanoCpus = cpus > 0 ? Math.round(cpus * 1e9) : 0; // 0 = unlimited
}
await container.update(updateConfig);
success(res, { message: 'Resource limits updated' });
}, 'container-resources'));
// Get resource limits for a container
router.get('/:id/resources', asyncHandler(async (req, res) => {
const container = await getVerifiedContainer(req.params.id);
const info = await container.inspect();
const hc = info.HostConfig;
success(res, {
memory: hc.Memory ? Math.round(hc.Memory / 1024 / 1024) : 0, // bytes to MB
memoryReservation: hc.MemoryReservation ? Math.round(hc.MemoryReservation / 1024 / 1024) : 0,
cpus: hc.NanoCpus ? hc.NanoCpus / 1e9 : 0,
});
}, 'container-resources-get'));
// Delete container
router.delete('/:id', asyncHandler(async (req, res) => {
const container = await getVerifiedContainer(req.params.id);

View File

@@ -0,0 +1,113 @@
const express = require('express');
const { success } = require('../response-helpers');
const { ValidationError } = require('../errors');
/**
* Docker resources route factory (volumes, networks, disk usage)
* @param {Object} deps
* @param {Object} deps.docker - Docker client wrapper
* @param {Function} deps.asyncHandler - Async route handler wrapper
* @returns {express.Router}
*/
module.exports = function({ docker, asyncHandler }) {
const router = express.Router();
// ===== VOLUMES =====
router.get('/volumes', asyncHandler(async (req, res) => {
const result = await docker.client.listVolumes();
const volumes = (result.Volumes || []).map(v => ({
name: v.Name,
driver: v.Driver,
mountpoint: v.Mountpoint,
scope: v.Scope,
created: v.CreatedAt,
labels: v.Labels || {},
}));
success(res, { volumes, count: volumes.length });
}, 'docker-volumes-list'));
router.post('/volumes', asyncHandler(async (req, res) => {
const { name, driver } = req.body;
if (!name || !/^[a-zA-Z0-9][a-zA-Z0-9_.-]{0,127}$/.test(name)) {
throw new ValidationError('Invalid volume name');
}
const volume = await docker.client.createVolume({
Name: name,
Driver: driver || 'local',
});
success(res, { message: `Volume "${name}" created`, volume: { name: volume.name } });
}, 'docker-volumes-create'));
router.delete('/volumes/:name', asyncHandler(async (req, res) => {
const volume = docker.client.getVolume(req.params.name);
await volume.remove({ force: req.query.force === 'true' });
success(res, { message: `Volume "${req.params.name}" removed` });
}, 'docker-volumes-delete'));
// ===== NETWORKS =====
router.get('/networks', asyncHandler(async (req, res) => {
const networkList = await docker.client.listNetworks();
const networks = networkList.map(n => ({
id: n.Id.substring(0, 12),
name: n.Name,
driver: n.Driver,
scope: n.Scope,
internal: n.Internal,
containers: Object.keys(n.Containers || {}).length,
created: n.Created,
}));
success(res, { networks, count: networks.length });
}, 'docker-networks-list'));
router.post('/networks', asyncHandler(async (req, res) => {
const { name, driver } = req.body;
if (!name || !/^[a-zA-Z0-9][a-zA-Z0-9_.-]{0,63}$/.test(name)) {
throw new ValidationError('Invalid network name');
}
const network = await docker.client.createNetwork({
Name: name,
Driver: driver || 'bridge',
});
success(res, { message: `Network "${name}" created`, id: network.id });
}, 'docker-networks-create'));
router.delete('/networks/:id', asyncHandler(async (req, res) => {
const network = docker.client.getNetwork(req.params.id);
await network.remove();
success(res, { message: 'Network removed' });
}, 'docker-networks-delete'));
// ===== DISK USAGE =====
router.get('/disk-usage', asyncHandler(async (req, res) => {
const df = await docker.client.df();
const summary = {
images: {
count: (df.Images || []).length,
size: (df.Images || []).reduce((sum, i) => sum + (i.Size || 0), 0),
reclaimable: (df.Images || []).filter(i => i.Containers === 0).reduce((sum, i) => sum + (i.Size || 0), 0),
},
containers: {
count: (df.Containers || []).length,
running: (df.Containers || []).filter(c => c.State === 'running').length,
size: (df.Containers || []).reduce((sum, c) => sum + (c.SizeRw || 0), 0),
},
volumes: {
count: (df.Volumes || []).length,
size: (df.Volumes || []).reduce((sum, v) => sum + (v.UsageData?.Size || 0), 0),
reclaimable: (df.Volumes || []).filter(v => v.UsageData?.RefCount === 0).reduce((sum, v) => sum + (v.UsageData?.Size || 0), 0),
},
buildCache: {
count: (df.BuildCache || []).length,
size: (df.BuildCache || []).reduce((sum, b) => sum + (b.Size || 0), 0),
reclaimable: (df.BuildCache || []).filter(b => !b.InUse).reduce((sum, b) => sum + (b.Size || 0), 0),
},
};
summary.totalSize = summary.images.size + summary.containers.size + summary.volumes.size + summary.buildCache.size;
success(res, summary);
}, 'docker-disk-usage'));
return router;
};

View File

@@ -0,0 +1,111 @@
const express = require('express');
/**
* Server-Sent Events route factory
* Pushes real-time updates to connected dashboard clients
* @param {Object} deps - Dependencies
* @param {Object} deps.resourceMonitor - Container resource monitor
* @param {Object} deps.healthChecker - Health checker
* @param {Object} deps.updateManager - Update manager
* @param {Function} deps.logError - Error logging function
* @returns {express.Router}
*/
module.exports = function({ resourceMonitor, healthChecker, updateManager, logError }) {
const router = express.Router();
const clients = new Set();
function broadcast(event, data) {
const msg = `event: ${event}\ndata: ${JSON.stringify(data)}\n\n`;
for (const res of clients) {
try { res.write(msg); } catch (_) { clients.delete(res); }
}
}
// --- Wire up EventEmitter listeners ---
// Resource monitor events
if (resourceMonitor) {
resourceMonitor.on('alert', (data) => {
broadcast('resource-alert', data);
});
resourceMonitor.on('auto-restart', (data) => {
broadcast('auto-restart', data);
});
}
// Health checker events
if (healthChecker) {
healthChecker.on('status-check', (data) => {
broadcast('status-change', {
serviceId: data.serviceId,
name: data.name,
status: data.status,
responseTime: data.responseTime,
timestamp: data.timestamp
});
});
healthChecker.on('incident-created', (data) => {
broadcast('incident', { type: 'created', ...data });
});
healthChecker.on('incident-resolved', (data) => {
broadcast('incident', { type: 'resolved', ...data });
});
}
// Update manager events
if (updateManager) {
updateManager.on('update-available', (data) => {
broadcast('update-available', data);
});
updateManager.on('update-start', (data) => {
broadcast('update-start', data);
});
updateManager.on('update-complete', (data) => {
broadcast('update-complete', data);
});
updateManager.on('update-failed', (data) => {
broadcast('update-failed', data);
});
updateManager.on('auto-update-start', (data) => {
broadcast('auto-update-start', data);
});
updateManager.on('auto-update-complete', (data) => {
broadcast('auto-update-complete', data);
});
}
// SSE endpoint
router.get('/stream', (req, res) => {
res.writeHead(200, {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'X-Accel-Buffering': 'no',
});
// Send initial connected event
res.write(`event: connected\ndata: ${JSON.stringify({ clients: clients.size + 1 })}\n\n`);
clients.add(res);
// Heartbeat every 30s
const heartbeat = setInterval(() => {
try { res.write(': heartbeat\n\n'); } catch (_) { cleanup(); }
}, 30000);
function cleanup() {
clearInterval(heartbeat);
clients.delete(res);
}
req.on('close', cleanup);
req.on('error', cleanup);
});
// Client count (useful for debugging)
router.get('/clients', (req, res) => {
res.json({ success: true, count: clients.size });
});
return router;
};

View File

@@ -0,0 +1,124 @@
const { WebSocketServer } = require('ws');
const Docker = require('dockerode');
const url = require('url');
const docker = new Docker();
/**
* Attach WebSocket server for container exec/shell
* Route: ws://host/ws/exec/:containerId
* @param {http.Server} server - The HTTP server instance
* @param {Object} log - Logger
*/
module.exports = function attachExecWS(server, log) {
const wss = new WebSocketServer({ noServer: true });
server.on('upgrade', (req, socket, head) => {
const parsed = url.parse(req.url, true);
const match = parsed.pathname.match(/^\/ws\/exec\/([a-zA-Z0-9_.-]+)$/);
if (!match) return; // Not our route — let other handlers deal with it
const containerId = decodeURIComponent(match[1]);
wss.handleUpgrade(req, socket, head, (ws) => {
handleExec(ws, containerId, log);
});
});
return wss;
};
async function handleExec(ws, containerId, log) {
let execStream = null;
let execInstance = null;
try {
const container = docker.getContainer(containerId);
// Verify container exists and is running
const info = await container.inspect();
if (!info.State.Running) {
ws.send(JSON.stringify({ type: 'error', message: 'Container is not running' }));
ws.close();
return;
}
// Detect available shell
let shell = '/bin/sh';
try {
const bashCheck = await container.exec({ Cmd: ['which', 'bash'], AttachStdout: true });
const bashStream = await bashCheck.start();
const chunks = [];
await new Promise((resolve) => {
bashStream.on('data', (chunk) => chunks.push(chunk));
bashStream.on('end', resolve);
});
if (chunks.length > 0 && Buffer.concat(chunks).toString().includes('/bash')) {
shell = '/bin/bash';
}
} catch (_) {}
execInstance = await container.exec({
Cmd: [shell],
AttachStdin: true,
AttachStdout: true,
AttachStderr: true,
Tty: true,
});
execStream = await execInstance.start({ hijack: true, stdin: true, Tty: true });
ws.send(JSON.stringify({ type: 'connected', shell, containerId }));
// Docker → WebSocket
execStream.on('data', (chunk) => {
if (ws.readyState === ws.OPEN) {
ws.send(chunk);
}
});
execStream.on('end', () => {
if (ws.readyState === ws.OPEN) {
ws.send(JSON.stringify({ type: 'exit' }));
ws.close();
}
});
// WebSocket → Docker
ws.on('message', (data) => {
if (!execStream.writable) return;
try {
// Check for control messages (JSON)
const str = data.toString();
if (str.startsWith('{"type":')) {
const msg = JSON.parse(str);
if (msg.type === 'resize' && execInstance && msg.cols && msg.rows) {
execInstance.resize({ h: msg.rows, w: msg.cols }).catch(() => {});
return;
}
}
} catch (_) {}
// Regular terminal input
execStream.write(data);
});
ws.on('close', () => {
if (execStream) {
try { execStream.destroy(); } catch (_) {}
}
});
ws.on('error', (err) => {
log.warn('exec', 'WebSocket error', { containerId, error: err.message });
if (execStream) {
try { execStream.destroy(); } catch (_) {}
}
});
} catch (err) {
log.error('exec', 'Failed to start exec session', { containerId, error: err.message });
if (ws.readyState === ws.OPEN) {
ws.send(JSON.stringify({ type: 'error', message: err.message }));
ws.close();
}
}
}

View File

@@ -1,5 +1,6 @@
const express = require('express');
const { validateURL, validateToken } = require('../input-validator');
const validatorLib = require('validator');
const { paginate, parsePaginationParams } = require('../pagination');
const { ValidationError } = require('../errors');
@@ -32,6 +33,12 @@ module.exports = function({ notification, asyncHandler }) {
enabled: notificationConfig.providers.ntfy?.enabled || false,
configured: !!notificationConfig.providers.ntfy?.topic,
serverUrl: notificationConfig.providers.ntfy?.serverUrl || 'https://ntfy.sh'
},
email: {
enabled: notificationConfig.providers.email?.enabled || false,
configured: !!(notificationConfig.providers.email?.host && notificationConfig.providers.email?.to),
host: notificationConfig.providers.email?.host || '',
from: notificationConfig.providers.email?.from || ''
}
},
events: notificationConfig.events,
@@ -74,6 +81,19 @@ module.exports = function({ notification, asyncHandler }) {
throw new ValidationError('Invalid ntfy topic (alphanumeric, hyphens, underscores only, max 64 chars)');
}
}
if (providers.email?.to) {
const emails = providers.email.to.split(',').map(e => e.trim());
for (const email of emails) {
if (!validatorLib.isEmail(email)) {
throw new ValidationError(`Invalid email address: ${email}`);
}
}
}
if (providers.email?.host && typeof providers.email.host === 'string') {
if (!validatorLib.isFQDN(providers.email.host) && !validatorLib.isIP(providers.email.host)) {
throw new ValidationError('Invalid SMTP host');
}
}
}
// Update enabled state
@@ -101,6 +121,12 @@ module.exports = function({ notification, asyncHandler }) {
...providers.ntfy
};
}
if (providers.email) {
notificationConfig.providers.email = {
...notificationConfig.providers.email,
...providers.email
};
}
}
// Update events
@@ -144,6 +170,9 @@ module.exports = function({ notification, asyncHandler }) {
case 'ntfy':
result = await notification.sendNtfy('Test Notification', 'This is a test notification from DashCaddy.', 'info');
break;
case 'email':
result = await notification.sendEmail('Test Notification', 'This is a test notification from DashCaddy.', 'info');
break;
default:
throw new ValidationError('Unknown provider');
}

View File

@@ -59,6 +59,12 @@ module.exports = function({ updateManager, selfUpdater, asyncHandler, logError }
res.json({ success: true, message: 'Auto-update configured' });
}, 'updates-auto-update'));
// Get auto-update configuration
router.get('/updates/auto-update', asyncHandler(async (req, res) => {
const config = updateManager.getAutoUpdateConfig();
res.json({ success: true, config });
}, 'updates-auto-update-config'));
// Schedule update
router.post('/updates/schedule/:containerId', asyncHandler(async (req, res) => {
const { scheduledTime } = req.body;