Initial commit: DashCaddy v1.0
Full codebase including API server (32 modules + routes), dashboard frontend, DashCA certificate distribution, installer script, and deployment skills.
This commit is contained in:
300
dashcaddy-api/routes/apps/deploy.js
Normal file
300
dashcaddy-api/routes/apps/deploy.js
Normal file
@@ -0,0 +1,300 @@
|
||||
const express = require('express');
|
||||
const fsp = require('fs').promises;
|
||||
const path = require('path');
|
||||
const validatorLib = require('validator');
|
||||
const { REGEX, DOCKER } = require('../../constants');
|
||||
const { isValidPort } = require('../../input-validator');
|
||||
const { exists } = require('../../fs-helpers');
|
||||
const platformPaths = require('../../platform-paths');
|
||||
|
||||
module.exports = function(ctx, helpers) {
|
||||
const router = express.Router();
|
||||
|
||||
async function deployDashCAStaticSite(template, userConfig) {
|
||||
const destPath = platformPaths.caCertDir;
|
||||
try {
|
||||
ctx.log.info('deploy', 'DashCA: Starting static site deployment');
|
||||
if (!await exists(destPath)) {
|
||||
await fsp.mkdir(destPath, { recursive: true });
|
||||
ctx.log.info('deploy', 'DashCA: Created destination directory', { path: destPath });
|
||||
}
|
||||
|
||||
ctx.log.info('deploy', 'DashCA: Verifying certificate files');
|
||||
const rootCertExists = await exists(`${destPath}/root.crt`);
|
||||
const intermediateCertExists = await exists(`${destPath}/intermediate.crt`);
|
||||
if (rootCertExists) ctx.log.info('deploy', 'DashCA: Root certificate found');
|
||||
else ctx.log.warn('deploy', 'DashCA: Root certificate not found', { expected: path.join(destPath, 'root.crt') });
|
||||
if (intermediateCertExists) ctx.log.info('deploy', 'DashCA: Intermediate certificate found');
|
||||
|
||||
const indexPath = path.join(destPath, 'index.html');
|
||||
if (!await exists(indexPath)) {
|
||||
ctx.log.info('deploy', 'DashCA: Creating minimal landing page');
|
||||
const minimalHtml = `<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>CA Certificate Distribution</title>
|
||||
<style>
|
||||
body { font-family: system-ui, sans-serif; max-width: 800px; margin: 50px auto; padding: 20px; background: #1a1a2e; color: #eee; }
|
||||
h1 { color: #00d9ff; }
|
||||
.download { display: inline-block; padding: 12px 24px; margin: 10px; background: #00d9ff; color: #000; text-decoration: none; border-radius: 6px; font-weight: bold; }
|
||||
.download:hover { background: #00b8d4; }
|
||||
code { background: #2a2a3e; padding: 2px 6px; border-radius: 3px; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>CA Certificate Installation</h1>
|
||||
<p>To trust *${ctx.siteConfig.tld} domains on your device, install the root CA certificate:</p>
|
||||
<h2>Download Certificate</h2>
|
||||
<a href="/root.crt" class="download" download>Download Certificate (.crt)</a>
|
||||
<h2>Windows Installation</h2>
|
||||
<p>Run PowerShell as Administrator:</p>
|
||||
<pre><code>irm http://ca${ctx.siteConfig.tld}/api/ca/install-script?platform=windows | iex</code></pre>
|
||||
<h2>Linux/macOS Installation</h2>
|
||||
<pre><code>curl -fsSk http://ca${ctx.siteConfig.tld}/api/ca/install-script?platform=linux | sudo bash</code></pre>
|
||||
<p><em>Note: Full DashCA interface requires manual deployment of certificate files.</em></p>
|
||||
</body>
|
||||
</html>`;
|
||||
await fsp.writeFile(indexPath, minimalHtml);
|
||||
ctx.log.info('deploy', 'DashCA: Created minimal landing page');
|
||||
} else {
|
||||
ctx.log.info('deploy', 'DashCA: Using existing index.html');
|
||||
}
|
||||
|
||||
ctx.log.info('deploy', 'DashCA: For full features, copy certificate files to ' + destPath);
|
||||
ctx.log.info('deploy', 'DashCA: Static site deployment completed successfully');
|
||||
} catch (error) {
|
||||
ctx.log.error('deploy', 'DashCA deployment error', { error: error.message });
|
||||
throw new Error(`DashCA deployment failed: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
async function deployContainer(appId, userConfig, template) {
|
||||
const containerName = `${DOCKER.CONTAINER_PREFIX}${userConfig.subdomain}`;
|
||||
const processedTemplate = helpers.processTemplateVariables(template, userConfig);
|
||||
|
||||
const requestedPorts = processedTemplate.docker.ports.map(portMapping => {
|
||||
const [hostPort] = portMapping.split(/[:/]/);
|
||||
return hostPort;
|
||||
});
|
||||
|
||||
let lockId = null;
|
||||
try {
|
||||
ctx.log.info('deploy', 'Acquiring port locks', { ports: requestedPorts });
|
||||
lockId = await ctx.portLockManager.acquirePorts(requestedPorts);
|
||||
ctx.log.info('deploy', 'Port locks acquired', { lockId });
|
||||
} catch (lockError) {
|
||||
throw new Error(`Failed to acquire port locks: ${lockError.message}`);
|
||||
}
|
||||
|
||||
try {
|
||||
// Remove stale container with same name
|
||||
try {
|
||||
const existingContainer = ctx.docker.client.getContainer(containerName);
|
||||
const info = await existingContainer.inspect();
|
||||
ctx.log.info('docker', 'Removing stale container', { containerName, status: info.State.Status });
|
||||
await existingContainer.remove({ force: true });
|
||||
await new Promise(r => setTimeout(r, 2000));
|
||||
} catch (e) {
|
||||
// Container doesn't exist — normal case
|
||||
}
|
||||
|
||||
const conflicts = await helpers.checkPortConflicts(requestedPorts, containerName);
|
||||
if (conflicts.length > 0) {
|
||||
const conflictDetails = conflicts.map(c => `Port ${c.port} is in use by ${c.usedBy} (${c.app})`).join('; ');
|
||||
throw new Error(`[DC-203] Port conflict detected: ${conflictDetails}. Please choose a different port.`);
|
||||
}
|
||||
|
||||
const containerConfig = {
|
||||
Image: processedTemplate.docker.image,
|
||||
name: containerName,
|
||||
ExposedPorts: {},
|
||||
HostConfig: {
|
||||
PortBindings: {},
|
||||
Binds: processedTemplate.docker.volumes || [],
|
||||
RestartPolicy: { Name: 'unless-stopped' }
|
||||
},
|
||||
Env: Object.entries(processedTemplate.docker.environment || {}).map(([k, v]) => `${k}=${v}`),
|
||||
Labels: {
|
||||
'sami.managed': 'true', 'sami.app': appId,
|
||||
'sami.subdomain': userConfig.subdomain,
|
||||
'sami.deployed': new Date().toISOString()
|
||||
}
|
||||
};
|
||||
|
||||
processedTemplate.docker.ports.forEach(portMapping => {
|
||||
const [hostPort, containerPort, protocol = 'tcp'] = portMapping.split(/[:/]/);
|
||||
const containerPortKey = `${containerPort}/${protocol}`;
|
||||
containerConfig.ExposedPorts[containerPortKey] = {};
|
||||
containerConfig.HostConfig.PortBindings[containerPortKey] = [{ HostPort: hostPort }];
|
||||
});
|
||||
|
||||
if (processedTemplate.docker.capabilities) {
|
||||
containerConfig.HostConfig.CapAdd = processedTemplate.docker.capabilities;
|
||||
}
|
||||
|
||||
try {
|
||||
ctx.log.info('docker', 'Pulling image', { image: processedTemplate.docker.image });
|
||||
await ctx.docker.pull(processedTemplate.docker.image);
|
||||
ctx.log.info('docker', 'Image pulled successfully', { image: processedTemplate.docker.image });
|
||||
} catch (e) {
|
||||
ctx.log.warn('docker', 'Image pull failed, checking if local image exists', { image: processedTemplate.docker.image, error: e.message });
|
||||
try {
|
||||
const images = await ctx.docker.client.listImages({ filters: { reference: [processedTemplate.docker.image] } });
|
||||
if (images.length === 0) throw new Error(`[DC-201] Image ${processedTemplate.docker.image} not found locally and pull failed: ${e.message}`);
|
||||
ctx.log.info('docker', 'Using existing local image', { image: processedTemplate.docker.image });
|
||||
} catch (listError) {
|
||||
throw new Error(`[DC-201] Failed to pull or find image ${processedTemplate.docker.image}: ${e.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
const container = await ctx.docker.client.createContainer(containerConfig);
|
||||
await container.start();
|
||||
|
||||
await ctx.portLockManager.releasePorts(lockId);
|
||||
ctx.log.info('deploy', 'Port locks released', { lockId });
|
||||
return container.id;
|
||||
} catch (deployError) {
|
||||
if (lockId) {
|
||||
try {
|
||||
await ctx.portLockManager.releasePorts(lockId);
|
||||
ctx.log.info('deploy', 'Port locks released after error', { lockId });
|
||||
} catch (releaseError) {
|
||||
ctx.log.error('deploy', 'Failed to release port locks', { lockId, error: releaseError.message });
|
||||
}
|
||||
}
|
||||
throw deployError;
|
||||
}
|
||||
}
|
||||
|
||||
// Check for existing container before deployment
|
||||
router.post('/apps/check-existing', ctx.asyncHandler(async (req, res) => {
|
||||
const { appId } = req.body;
|
||||
const template = ctx.APP_TEMPLATES[appId];
|
||||
if (!template) return ctx.errorResponse(res, 400, 'Invalid app template');
|
||||
const existingContainer = await helpers.findExistingContainerByImage(template);
|
||||
if (existingContainer) {
|
||||
res.json({ success: true, exists: true, container: existingContainer, message: `Found existing ${template.name} container: ${existingContainer.name}` });
|
||||
} else {
|
||||
res.json({ success: true, exists: false, message: `No existing ${template.name} container found` });
|
||||
}
|
||||
}, 'check-existing'));
|
||||
|
||||
// Deploy new app
|
||||
router.post('/apps/deploy', ctx.asyncHandler(async (req, res) => {
|
||||
const { appId, config } = req.body;
|
||||
try {
|
||||
ctx.log.info('deploy', 'Deploying app', { appId, subdomain: config.subdomain });
|
||||
const template = ctx.APP_TEMPLATES[appId];
|
||||
if (!template) {
|
||||
await ctx.logError('app-deploy', new Error('Invalid app template'), { appId, config });
|
||||
return ctx.errorResponse(res, 400, 'Invalid app template');
|
||||
}
|
||||
|
||||
if (config.subdomain) {
|
||||
if (!REGEX.SUBDOMAIN.test(config.subdomain)) {
|
||||
return ctx.errorResponse(res, 400, '[DC-301] Invalid subdomain format');
|
||||
}
|
||||
}
|
||||
if (config.port && !isValidPort(config.port)) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid port number (must be 1-65535)');
|
||||
}
|
||||
|
||||
if (!template.isStaticSite) {
|
||||
const allowedHostnames = ['localhost', 'host.docker.internal'];
|
||||
if (config.ip && !validatorLib.isIP(config.ip) && !allowedHostnames.includes(config.ip)) {
|
||||
return ctx.errorResponse(res, 400, '[DC-210] Invalid IP address. Use a valid IP (e.g., 192.168.x.x) or "localhost".');
|
||||
}
|
||||
if (!config.ip) config.ip = ctx.siteConfig.dnsServerIp || 'localhost';
|
||||
} else {
|
||||
config.createDns = false;
|
||||
config.ip = ctx.siteConfig.dnsServerIp || 'localhost';
|
||||
}
|
||||
|
||||
let containerId;
|
||||
let usedExisting = false;
|
||||
|
||||
if (template.isStaticSite) {
|
||||
ctx.log.info('deploy', 'Deploying static site', { appId });
|
||||
if (appId === 'dashca') {
|
||||
await deployDashCAStaticSite(template, config);
|
||||
containerId = null;
|
||||
ctx.log.info('deploy', 'Static site deployed', { appId });
|
||||
} else {
|
||||
throw new Error(`Unknown static site type: ${appId}`);
|
||||
}
|
||||
} else if (config.useExisting && config.existingContainerId) {
|
||||
containerId = config.existingContainerId;
|
||||
usedExisting = true;
|
||||
ctx.log.info('deploy', 'Using existing container', { containerId });
|
||||
if (config.existingPort && !config.port) config.port = config.existingPort;
|
||||
} else {
|
||||
containerId = await deployContainer(appId, config, template);
|
||||
ctx.log.info('deploy', 'Container deployed', { containerId });
|
||||
await helpers.waitForHealthCheck(containerId, template.healthCheck, config.port || template.defaultPort);
|
||||
ctx.log.info('deploy', 'Container is healthy', { containerId });
|
||||
}
|
||||
|
||||
let dnsWarning = null;
|
||||
if (config.createDns) {
|
||||
try {
|
||||
await ctx.dns.createRecord(config.subdomain, config.ip);
|
||||
ctx.log.info('deploy', 'DNS record created', { domain: ctx.buildDomain(config.subdomain), ip: config.ip });
|
||||
} catch (dnsError) {
|
||||
await ctx.logError('app-deploy-dns', dnsError, { appId, subdomain: config.subdomain, ip: config.ip });
|
||||
dnsWarning = `DNS creation failed: ${dnsError.message}. You may need to create the DNS record manually.`;
|
||||
ctx.log.warn('deploy', 'DNS creation failed during deploy', { error: dnsError.message });
|
||||
}
|
||||
}
|
||||
|
||||
const caddyOptions = { tailscaleOnly: config.tailscaleOnly || false, allowedIPs: config.allowedIPs || [] };
|
||||
let caddyConfig;
|
||||
if (template.isStaticSite) {
|
||||
const sitePath = platformPaths.sitePath(config.subdomain);
|
||||
if (appId === 'dashca') {
|
||||
caddyOptions.httpAccess = true;
|
||||
caddyOptions.apiProxy = 'host.docker.internal:3001';
|
||||
}
|
||||
caddyConfig = helpers.generateStaticSiteConfig(config.subdomain, sitePath, caddyOptions);
|
||||
} else {
|
||||
caddyConfig = ctx.caddy.generateConfig(config.subdomain, config.ip, config.port || template.defaultPort, caddyOptions);
|
||||
}
|
||||
|
||||
await helpers.addCaddyConfig(config.subdomain, caddyConfig);
|
||||
ctx.log.info('deploy', 'Caddy config added', { domain: ctx.buildDomain(config.subdomain), tailscaleOnly: config.tailscaleOnly || false });
|
||||
|
||||
await ctx.addServiceToConfig({
|
||||
id: config.subdomain, name: template.name,
|
||||
logo: template.logo || `/assets/${appId}.png`,
|
||||
containerId, appTemplate: appId,
|
||||
tailscaleOnly: config.tailscaleOnly || false,
|
||||
deployedAt: new Date().toISOString()
|
||||
});
|
||||
ctx.log.info('deploy', 'Service added to dashboard', { subdomain: config.subdomain });
|
||||
|
||||
const response = {
|
||||
success: true, containerId, usedExisting,
|
||||
url: `https://${ctx.buildDomain(config.subdomain)}`,
|
||||
message: usedExisting ? `${template.name} configured using existing container!` : `${template.name} deployed successfully!`,
|
||||
setupInstructions: template.setupInstructions || []
|
||||
};
|
||||
if (dnsWarning) response.warning = dnsWarning;
|
||||
|
||||
const notificationMessage = usedExisting
|
||||
? `**${template.name}** configured using existing container.\nURL: https://${ctx.buildDomain(config.subdomain)}`
|
||||
: `**${template.name}** has been deployed successfully.\nURL: https://${ctx.buildDomain(config.subdomain)}`;
|
||||
ctx.notification.send('deploymentSuccess', usedExisting ? 'Configuration Complete' : 'Deployment Successful', notificationMessage, 'success');
|
||||
|
||||
res.json(response);
|
||||
} catch (error) {
|
||||
await ctx.logError('app-deploy', error, { appId, config });
|
||||
ctx.log.error('deploy', 'Deployment failed', { appId, error: error.message });
|
||||
const template = ctx.APP_TEMPLATES[appId];
|
||||
ctx.notification.send('deploymentFailed', 'Deployment Failed', `Failed to deploy **${template?.name || appId}**.\nError: ${error.message}`, 'error');
|
||||
ctx.errorResponse(res, 500, ctx.safeErrorMessage(error));
|
||||
}
|
||||
}, 'apps-deploy'));
|
||||
|
||||
return router;
|
||||
};
|
||||
278
dashcaddy-api/routes/apps/helpers.js
Normal file
278
dashcaddy-api/routes/apps/helpers.js
Normal file
@@ -0,0 +1,278 @@
|
||||
const fs = require('fs');
|
||||
const fsp = require('fs').promises;
|
||||
const path = require('path');
|
||||
const { REGEX, DOCKER } = require('../../constants');
|
||||
const { exists } = require('../../fs-helpers');
|
||||
const platformPaths = require('../../platform-paths');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
|
||||
async function checkPortConflicts(ports, excludeContainerName = null) {
|
||||
const conflicts = [];
|
||||
try {
|
||||
const containers = await ctx.docker.client.listContainers({ all: true });
|
||||
for (const container of containers) {
|
||||
if (excludeContainerName && container.Names.some(n => n === `/${excludeContainerName}`)) continue;
|
||||
if (container.State !== 'running') continue;
|
||||
for (const portInfo of (container.Ports || [])) {
|
||||
if (portInfo.PublicPort) {
|
||||
const publicPort = portInfo.PublicPort.toString();
|
||||
if (ports.includes(publicPort)) {
|
||||
const containerName = container.Names[0]?.replace(/^\//, '') || container.Id.substring(0, 12);
|
||||
const appLabel = container.Labels?.['sami.app'] || 'unknown';
|
||||
conflicts.push({ port: publicPort, usedBy: containerName, app: appLabel, containerId: container.Id.substring(0, 12) });
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
ctx.log.warn('docker', 'Could not check port conflicts', { error: e.message });
|
||||
}
|
||||
return conflicts;
|
||||
}
|
||||
|
||||
async function findExistingContainerByImage(template) {
|
||||
try {
|
||||
const containers = await ctx.docker.client.listContainers({ all: false });
|
||||
const templateImage = template.docker.image.split(':')[0];
|
||||
for (const container of containers) {
|
||||
const containerImage = container.Image.split(':')[0];
|
||||
if (containerImage === templateImage || containerImage.endsWith('/' + templateImage)) {
|
||||
const ports = container.Ports.filter(p => p.PublicPort).map(p => ({
|
||||
hostPort: p.PublicPort, containerPort: p.PrivatePort, protocol: p.Type
|
||||
}));
|
||||
return {
|
||||
id: container.Id, shortId: container.Id.slice(0, 12),
|
||||
name: container.Names[0]?.replace(/^\//, '') || 'unknown',
|
||||
image: container.Image, status: container.Status, state: container.State,
|
||||
ports, primaryPort: ports.length > 0 ? ports[0].hostPort : null,
|
||||
labels: container.Labels || {}
|
||||
};
|
||||
}
|
||||
}
|
||||
return null;
|
||||
} catch (e) {
|
||||
ctx.log.warn('docker', 'Could not check for existing containers', { error: e.message });
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// Convert host path to Docker-compatible mount format (platform-aware)
|
||||
const toDockerDesktopPath = platformPaths.toDockerMountPath;
|
||||
|
||||
function processTemplateVariables(template, config) {
|
||||
const processed = JSON.parse(JSON.stringify(template));
|
||||
const mediaPathInput = config.mediaPath || template.mediaMount?.defaultPath || '/media';
|
||||
const mediaPaths = mediaPathInput.split(',').map(p => p.trim()).filter(p => p).map(p => toDockerDesktopPath(p));
|
||||
|
||||
const replacements = {
|
||||
'{{HOST_IP}}': config.ip,
|
||||
'{{SUBDOMAIN}}': config.subdomain,
|
||||
'{{PORT}}': config.port || template.defaultPort,
|
||||
'{{MEDIA_PATH}}': mediaPaths[0] || '/media',
|
||||
'{{TIMEZONE}}': ctx.siteConfig.timezone || 'UTC'
|
||||
};
|
||||
|
||||
function replaceInObject(obj) {
|
||||
for (const key in obj) {
|
||||
if (typeof obj[key] === 'string') {
|
||||
Object.entries(replacements).forEach(([placeholder, value]) => {
|
||||
obj[key] = obj[key].replace(new RegExp(placeholder, 'g'), value);
|
||||
});
|
||||
} else if (typeof obj[key] === 'object' && obj[key] !== null) {
|
||||
replaceInObject(obj[key]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
replaceInObject(processed);
|
||||
|
||||
// Handle multiple media paths
|
||||
if (mediaPaths.length > 1 && processed.docker?.volumes) {
|
||||
const containerPath = template.mediaMount?.containerPath || '/media';
|
||||
const newVolumes = [];
|
||||
for (const vol of processed.docker.volumes) {
|
||||
if (vol.includes(mediaPaths[0]) && vol.includes(containerPath)) {
|
||||
for (const p of mediaPaths) {
|
||||
const folderName = p.split(/[/\\]/).filter(p => p).pop() || 'media';
|
||||
newVolumes.push(`${p}:${containerPath}/${folderName}`);
|
||||
}
|
||||
} else {
|
||||
newVolumes.push(vol);
|
||||
}
|
||||
}
|
||||
processed.docker.volumes = newVolumes;
|
||||
}
|
||||
|
||||
// Handle Plex claim token
|
||||
if (config.plexClaimToken && processed.docker?.environment?.PLEX_CLAIM !== undefined) {
|
||||
processed.docker.environment.PLEX_CLAIM = config.plexClaimToken;
|
||||
}
|
||||
|
||||
// Apply custom volume overrides
|
||||
if (config.customVolumes?.length && processed.docker?.volumes) {
|
||||
processed.docker.volumes = processed.docker.volumes.map(vol => {
|
||||
const parts = vol.split(':');
|
||||
const containerPath = parts.slice(1).join(':');
|
||||
const override = config.customVolumes.find(cv => cv.containerPath === containerPath);
|
||||
if (override && override.hostPath) return `${toDockerDesktopPath(override.hostPath)}:${containerPath}`;
|
||||
return vol;
|
||||
});
|
||||
}
|
||||
|
||||
return processed;
|
||||
}
|
||||
|
||||
function generateStaticSiteConfig(subdomain, sitePath, options = {}) {
|
||||
const { tailscaleOnly = false, httpAccess = false, apiProxy = null } = options;
|
||||
const domain = ctx.buildDomain(subdomain);
|
||||
|
||||
// Shared block content used by both HTTPS and HTTP blocks
|
||||
function siteBlockContent() {
|
||||
let c = '';
|
||||
c += ` root * ${sitePath}\n\n`;
|
||||
|
||||
if (tailscaleOnly) {
|
||||
c += ` @blocked not remote_ip 100.64.0.0/10\n`;
|
||||
c += ` respond @blocked "Access denied. Tailscale connection required." 403\n\n`;
|
||||
}
|
||||
|
||||
if (apiProxy) {
|
||||
c += ` handle /api/* {\n`;
|
||||
c += ` reverse_proxy ${apiProxy}\n`;
|
||||
c += ` }\n\n`;
|
||||
}
|
||||
|
||||
c += ` @crt path *.crt\n`;
|
||||
c += ` handle @crt {\n`;
|
||||
c += ` header Content-Type application/x-x509-ca-cert\n`;
|
||||
c += ` header Content-Disposition "attachment; filename=\\"{file}\\""\n`;
|
||||
c += ` header Cache-Control "public, max-age=86400"\n`;
|
||||
c += ` file_server\n`;
|
||||
c += ` }\n\n`;
|
||||
c += ` @der path *.der\n`;
|
||||
c += ` handle @der {\n`;
|
||||
c += ` header Content-Type application/x-x509-ca-cert\n`;
|
||||
c += ` header Content-Disposition "attachment; filename=\\"{file}\\""\n`;
|
||||
c += ` header Cache-Control "public, max-age=86400"\n`;
|
||||
c += ` file_server\n`;
|
||||
c += ` }\n\n`;
|
||||
c += ` @mobileconfig path *.mobileconfig\n`;
|
||||
c += ` handle @mobileconfig {\n`;
|
||||
c += ` header Content-Type application/x-apple-aspen-config\n`;
|
||||
c += ` header Content-Disposition "attachment; filename=\\"{file}\\""\n`;
|
||||
c += ` header Cache-Control "public, max-age=86400"\n`;
|
||||
c += ` file_server\n`;
|
||||
c += ` }\n\n`;
|
||||
c += ` @ps1 path *.ps1\n`;
|
||||
c += ` handle @ps1 {\n`;
|
||||
c += ` header Content-Type text/plain\n`;
|
||||
c += ` header Content-Disposition "attachment; filename=\\"{file}\\""\n`;
|
||||
c += ` file_server\n`;
|
||||
c += ` }\n\n`;
|
||||
c += ` @sh path *.sh\n`;
|
||||
c += ` handle @sh {\n`;
|
||||
c += ` header Content-Type text/x-shellscript\n`;
|
||||
c += ` header Content-Disposition "attachment; filename=\\"{file}\\""\n`;
|
||||
c += ` file_server\n`;
|
||||
c += ` }\n\n`;
|
||||
c += ` # Static site with SPA fallback\n`;
|
||||
c += ` handle {\n`;
|
||||
c += ` @notFile not file {path}\n`;
|
||||
c += ` rewrite @notFile /index.html\n`;
|
||||
c += ` file_server\n`;
|
||||
c += ` }\n\n`;
|
||||
c += ` # No cache for HTML\n`;
|
||||
c += ` @htmlfiles {\n`;
|
||||
c += ` path *.html\n`;
|
||||
c += ` path /\n`;
|
||||
c += ` }\n`;
|
||||
c += ` header @htmlfiles Cache-Control "no-store"\n`;
|
||||
return c;
|
||||
}
|
||||
|
||||
// HTTPS block
|
||||
let config = `${domain} {\n`;
|
||||
config += ` tls internal\n\n`;
|
||||
config += siteBlockContent();
|
||||
config += `}`;
|
||||
|
||||
// HTTP companion block for devices that haven't trusted the CA yet
|
||||
if (httpAccess) {
|
||||
config += `\n\n# HTTP access for first-time certificate installation\n`;
|
||||
config += `http://${domain} {\n`;
|
||||
config += siteBlockContent();
|
||||
config += `}`;
|
||||
}
|
||||
|
||||
return config;
|
||||
}
|
||||
|
||||
async function waitForHealthCheck(containerId, healthPath, port, maxAttempts = 20) {
|
||||
const delay = 2000;
|
||||
let httpCheckFailed = 0;
|
||||
|
||||
for (let i = 0; i < maxAttempts; i++) {
|
||||
try {
|
||||
const container = ctx.docker.client.getContainer(containerId);
|
||||
const info = await container.inspect();
|
||||
if (info.State.Running) {
|
||||
if (info.State.Health) {
|
||||
if (info.State.Health.Status === 'healthy') {
|
||||
ctx.log.info('docker', 'Container is healthy (Docker health check)', { containerId });
|
||||
return true;
|
||||
}
|
||||
} else if (healthPath && port && httpCheckFailed < 5) {
|
||||
try {
|
||||
const response = await ctx.fetchT(`http://localhost:${port}${healthPath}`, {
|
||||
signal: AbortSignal.timeout(3000), redirect: 'manual'
|
||||
});
|
||||
if (response.ok || (response.status >= 300 && response.status < 400)) {
|
||||
ctx.log.info('docker', 'Health check passed', { containerId, status: response.status });
|
||||
return true;
|
||||
}
|
||||
} catch (e) {
|
||||
httpCheckFailed++;
|
||||
ctx.log.debug('docker', 'HTTP health check failed', { attempt: httpCheckFailed, error: e.message });
|
||||
}
|
||||
} else {
|
||||
if (i >= 5) {
|
||||
ctx.log.info('docker', 'Container is running', { containerId, waitedSeconds: i * delay / 1000 });
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
ctx.log.debug('docker', 'Health check attempt failed', { attempt: i + 1, error: e.message });
|
||||
}
|
||||
if (i < maxAttempts - 1) {
|
||||
ctx.log.debug('docker', 'Waiting for container to be healthy', { attempt: i + 1, maxAttempts });
|
||||
await new Promise(resolve => setTimeout(resolve, delay));
|
||||
}
|
||||
}
|
||||
throw new Error(`[DC-202] Container failed to become healthy after ${maxAttempts} attempts (${maxAttempts * delay / 1000}s)`);
|
||||
}
|
||||
|
||||
async function addCaddyConfig(subdomain, config) {
|
||||
const domain = ctx.buildDomain(subdomain);
|
||||
const existing = await ctx.caddy.read();
|
||||
if (existing.includes(`${domain} {`)) {
|
||||
ctx.log.info('caddy', 'Caddy config already exists, skipping add', { domain });
|
||||
await ctx.caddy.reload(existing);
|
||||
return;
|
||||
}
|
||||
const result = await ctx.caddy.modify(c => c + `\n${config}\n`);
|
||||
if (!result.success) throw new Error(`[DC-303] Failed to add Caddy config for ${domain}: ${result.error}`);
|
||||
await ctx.caddy.verifySite(domain);
|
||||
}
|
||||
|
||||
return {
|
||||
checkPortConflicts,
|
||||
findExistingContainerByImage,
|
||||
toDockerDesktopPath,
|
||||
processTemplateVariables,
|
||||
waitForHealthCheck,
|
||||
addCaddyConfig,
|
||||
generateStaticSiteConfig
|
||||
};
|
||||
};
|
||||
16
dashcaddy-api/routes/apps/index.js
Normal file
16
dashcaddy-api/routes/apps/index.js
Normal file
@@ -0,0 +1,16 @@
|
||||
const express = require('express');
|
||||
const initHelpers = require('./helpers');
|
||||
const initDeploy = require('./deploy');
|
||||
const initRemoval = require('./removal');
|
||||
const initTemplates = require('./templates');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
const helpers = initHelpers(ctx);
|
||||
|
||||
router.use(initDeploy(ctx, helpers));
|
||||
router.use(initRemoval(ctx, helpers));
|
||||
router.use(initTemplates(ctx, helpers));
|
||||
|
||||
return router;
|
||||
};
|
||||
104
dashcaddy-api/routes/apps/removal.js
Normal file
104
dashcaddy-api/routes/apps/removal.js
Normal file
@@ -0,0 +1,104 @@
|
||||
const express = require('express');
|
||||
const { exists } = require('../../fs-helpers');
|
||||
|
||||
module.exports = function(ctx, helpers) {
|
||||
const router = express.Router();
|
||||
|
||||
// Remove deployed app
|
||||
router.delete('/apps/:appId', ctx.asyncHandler(async (req, res) => {
|
||||
const { appId } = req.params;
|
||||
const { containerId, subdomain, ip, deleteContainer } = req.query;
|
||||
const shouldDeleteContainer = deleteContainer === 'true';
|
||||
const results = { container: null, dns: null, caddy: null, service: null };
|
||||
|
||||
try {
|
||||
ctx.log.info('deploy', 'Removing app', { appId, containerId, subdomain, deleteContainer: shouldDeleteContainer });
|
||||
|
||||
if (containerId && shouldDeleteContainer) {
|
||||
try {
|
||||
const container = ctx.docker.client.getContainer(containerId);
|
||||
try { await container.stop(); ctx.log.info('docker', 'Container stopped', { containerId }); }
|
||||
catch (stopError) { ctx.log.debug('docker', 'Container stop note', { containerId, note: stopError.message }); }
|
||||
await container.remove({ force: true });
|
||||
results.container = 'removed';
|
||||
ctx.log.info('docker', 'Container removed', { containerId });
|
||||
} catch (error) {
|
||||
results.container = error.message.includes('no such container') ? 'already removed' : error.message;
|
||||
}
|
||||
} else if (containerId && !shouldDeleteContainer) {
|
||||
results.container = 'kept (user choice)';
|
||||
}
|
||||
|
||||
if (shouldDeleteContainer && subdomain && ctx.dns.getToken()) {
|
||||
try {
|
||||
const domain = ctx.buildDomain(subdomain);
|
||||
const getResult = await ctx.dns.call(ctx.siteConfig.dnsServerIp, '/api/zones/records/get', {
|
||||
token: ctx.dns.getToken(), domain, zone: ctx.siteConfig.tld.replace(/^\./, ''), listZone: 'true'
|
||||
});
|
||||
let recordIp = ip || 'localhost';
|
||||
if (getResult.status === 'ok' && getResult.response?.records) {
|
||||
const aRecord = getResult.response.records.find(r => r.type === 'A');
|
||||
if (aRecord && aRecord.rData?.ipAddress) recordIp = aRecord.rData.ipAddress;
|
||||
}
|
||||
const dnsResult = await ctx.dns.call(ctx.siteConfig.dnsServerIp, '/api/zones/records/delete', {
|
||||
token: ctx.dns.getToken(), domain, type: 'A', ipAddress: recordIp
|
||||
});
|
||||
results.dns = dnsResult.status === 'ok' ? 'deleted' : (dnsResult.errorMessage || 'failed');
|
||||
ctx.log.info('dns', 'DNS record removal', { result: results.dns });
|
||||
} catch (error) {
|
||||
results.dns = error.message;
|
||||
}
|
||||
} else if (!shouldDeleteContainer) {
|
||||
results.dns = 'kept (user choice)';
|
||||
} else {
|
||||
results.dns = 'skipped (no subdomain or token)';
|
||||
}
|
||||
|
||||
if (shouldDeleteContainer && subdomain) {
|
||||
try {
|
||||
const domain = ctx.buildDomain(subdomain);
|
||||
let content = await ctx.caddy.read();
|
||||
const escapedDomain = domain.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
|
||||
const siteBlockRegex = new RegExp(`\\n?${escapedDomain}\\s*\\{[^{}]*(?:\\{[^{}]*(?:\\{[^{}]*\\}[^{}]*)*\\}[^{}]*)*\\}\\s*`, 'g');
|
||||
const originalLength = content.length;
|
||||
content = content.replace(siteBlockRegex, '\n');
|
||||
if (content.length !== originalLength) {
|
||||
content = content.replace(/\n{3,}/g, '\n\n');
|
||||
const caddyResult = await ctx.caddy.modify(() => content);
|
||||
results.caddy = caddyResult.success ? 'removed' : 'removed (reload failed)';
|
||||
} else {
|
||||
results.caddy = 'not found';
|
||||
}
|
||||
ctx.log.info('caddy', 'Caddy config removal', { result: results.caddy });
|
||||
} catch (error) {
|
||||
results.caddy = error.message;
|
||||
}
|
||||
} else if (!shouldDeleteContainer) {
|
||||
results.caddy = 'kept (user choice)';
|
||||
}
|
||||
|
||||
try {
|
||||
if (await exists(ctx.SERVICES_FILE)) {
|
||||
let removed = false;
|
||||
await ctx.servicesStateManager.update(services => {
|
||||
const initialLength = services.length;
|
||||
const filtered = services.filter(s => s.id !== subdomain && s.appTemplate !== appId);
|
||||
removed = filtered.length !== initialLength;
|
||||
return filtered;
|
||||
});
|
||||
results.service = removed ? 'removed' : 'not found';
|
||||
}
|
||||
ctx.log.info('deploy', 'Service config removal', { result: results.service });
|
||||
} catch (error) {
|
||||
results.service = error.message;
|
||||
}
|
||||
|
||||
res.json({ success: true, message: `App ${appId} removal completed`, results });
|
||||
} catch (error) {
|
||||
await ctx.logError('app-removal', error);
|
||||
ctx.errorResponse(res, 500, ctx.safeErrorMessage(error), { results });
|
||||
}
|
||||
}, 'apps-delete'));
|
||||
|
||||
return router;
|
||||
};
|
||||
137
dashcaddy-api/routes/apps/templates.js
Normal file
137
dashcaddy-api/routes/apps/templates.js
Normal file
@@ -0,0 +1,137 @@
|
||||
const express = require('express');
|
||||
const { exists } = require('../../fs-helpers');
|
||||
|
||||
module.exports = function(ctx, helpers) {
|
||||
const router = express.Router();
|
||||
|
||||
// Get available app templates
|
||||
router.get('/apps/templates', ctx.asyncHandler(async (req, res) => {
|
||||
res.json({
|
||||
success: true,
|
||||
templates: ctx.APP_TEMPLATES,
|
||||
categories: ctx.TEMPLATE_CATEGORIES,
|
||||
difficultyLevels: ctx.DIFFICULTY_LEVELS
|
||||
});
|
||||
}, 'apps-templates'));
|
||||
|
||||
// Get specific app template
|
||||
router.get('/apps/templates/:appId', ctx.asyncHandler(async (req, res) => {
|
||||
const { appId } = req.params;
|
||||
const template = ctx.APP_TEMPLATES[appId];
|
||||
if (!template) {
|
||||
const { NotFoundError } = require('../../errors');
|
||||
throw new NotFoundError('App template');
|
||||
}
|
||||
res.json({ success: true, template });
|
||||
}, 'apps-template-detail'));
|
||||
|
||||
// Check port availability
|
||||
router.get('/apps/ports/:port/check', ctx.asyncHandler(async (req, res) => {
|
||||
const port = req.params.port;
|
||||
const conflicts = await helpers.checkPortConflicts([port]);
|
||||
if (conflicts.length > 0) {
|
||||
const conflict = conflicts[0];
|
||||
res.json({ available: false, port, conflict: { usedBy: conflict.usedBy, app: conflict.app, containerId: conflict.containerId } });
|
||||
} else {
|
||||
res.json({ available: true, port });
|
||||
}
|
||||
}, 'check-port'));
|
||||
|
||||
// Get suggested available port
|
||||
router.get('/apps/ports/:basePort/suggest', ctx.asyncHandler(async (req, res) => {
|
||||
const basePort = parseInt(req.params.basePort) || 8080;
|
||||
const maxAttempts = 100;
|
||||
const usedPorts = await ctx.docker.getUsedPorts();
|
||||
for (let port = basePort; port < basePort + maxAttempts; port++) {
|
||||
if (!usedPorts.has(port)) {
|
||||
res.json({ success: true, suggestedPort: port, basePort });
|
||||
return;
|
||||
}
|
||||
}
|
||||
ctx.errorResponse(res, 400, `No available ports found in range ${basePort}-${basePort + maxAttempts}`);
|
||||
}, 'suggest-port'));
|
||||
|
||||
// Update subdomain for deployed app
|
||||
router.post('/apps/update-subdomain', ctx.asyncHandler(async (req, res) => {
|
||||
const { serviceId, oldSubdomain, newSubdomain, containerId, ip } = req.body;
|
||||
ctx.log.info('deploy', 'Updating subdomain', { oldSubdomain, newSubdomain });
|
||||
const results = { oldDns: null, newDns: null, caddy: null, service: null };
|
||||
|
||||
if (oldSubdomain && ctx.dns.getToken()) {
|
||||
try {
|
||||
const oldDomain = oldSubdomain.includes('.') ? oldSubdomain : ctx.buildDomain(oldSubdomain);
|
||||
const result = await ctx.dns.call(ctx.siteConfig.dnsServerIp, '/api/zones/records/delete', {
|
||||
token: ctx.dns.getToken(), domain: oldDomain, type: 'A', ipAddress: ip || 'localhost'
|
||||
});
|
||||
results.oldDns = result.status === 'ok' ? 'deleted' : result.errorMessage;
|
||||
ctx.log.info('dns', 'Old DNS record deleted', { domain: oldDomain });
|
||||
} catch (error) {
|
||||
results.oldDns = `failed: ${error.message}`;
|
||||
ctx.log.warn('dns', 'Old DNS deletion warning', { error: error.message });
|
||||
}
|
||||
}
|
||||
|
||||
if (newSubdomain && ctx.dns.getToken()) {
|
||||
try {
|
||||
await ctx.dns.createRecord(newSubdomain, ip || 'localhost');
|
||||
results.newDns = 'created';
|
||||
ctx.log.info('dns', 'New DNS record created', { domain: ctx.buildDomain(newSubdomain) });
|
||||
} catch (error) {
|
||||
results.newDns = `failed: ${error.message}`;
|
||||
ctx.log.warn('dns', 'New DNS creation warning', { error: error.message });
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
if (await exists(ctx.caddy.filePath)) {
|
||||
const oldDomain = oldSubdomain.includes('.') ? oldSubdomain : ctx.buildDomain(oldSubdomain);
|
||||
const newDomain = newSubdomain.includes('.') ? newSubdomain : ctx.buildDomain(newSubdomain);
|
||||
const escapedOld = oldDomain.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
|
||||
const oldBlockRegex = new RegExp(`${escapedOld}(?::\\d+)?\\s*\\{[^{}]*(?:\\{[^{}]*(?:\\{[^{}]*\\}[^{}]*)*\\}[^{}]*)*\\}`, 'g');
|
||||
const content = await ctx.caddy.read();
|
||||
if (oldBlockRegex.test(content)) {
|
||||
const caddyResult = await ctx.caddy.modify(c => {
|
||||
const re = new RegExp(`${escapedOld}(?::\\d+)?\\s*\\{[^{}]*(?:\\{[^{}]*(?:\\{[^{}]*\\}[^{}]*)*\\}[^{}]*)*\\}`, 'g');
|
||||
return c.replace(re, match => match.replace(oldDomain, newDomain));
|
||||
});
|
||||
results.caddy = caddyResult.success ? 'updated' : 'updated (reload failed)';
|
||||
} else {
|
||||
results.caddy = 'old config not found';
|
||||
}
|
||||
} else {
|
||||
results.caddy = 'caddyfile not found';
|
||||
}
|
||||
} catch (error) {
|
||||
results.caddy = `failed: ${error.message}`;
|
||||
ctx.log.error('caddy', 'Caddy update error', { error: error.message });
|
||||
}
|
||||
|
||||
try {
|
||||
if (await exists(ctx.SERVICES_FILE)) {
|
||||
await ctx.servicesStateManager.update(services => {
|
||||
const serviceIndex = services.findIndex(s => s.id === oldSubdomain || s.id === serviceId);
|
||||
if (serviceIndex !== -1) {
|
||||
services[serviceIndex].id = newSubdomain;
|
||||
results.service = 'updated';
|
||||
ctx.log.info('deploy', 'Service config updated in services.json');
|
||||
} else {
|
||||
results.service = 'not found';
|
||||
}
|
||||
return services;
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
results.service = `failed: ${error.message}`;
|
||||
ctx.log.warn('deploy', 'Service update warning', { error: error.message || String(error) });
|
||||
}
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: `Subdomain updated: ${oldSubdomain} -> ${newSubdomain}`,
|
||||
newUrl: `https://${ctx.buildDomain(newSubdomain)}`,
|
||||
results
|
||||
});
|
||||
}, 'update-subdomain'));
|
||||
|
||||
return router;
|
||||
};
|
||||
483
dashcaddy-api/routes/arr/config.js
Normal file
483
dashcaddy-api/routes/arr/config.js
Normal file
@@ -0,0 +1,483 @@
|
||||
const express = require('express');
|
||||
const { APP_PORTS, ARR_SERVICES } = require('../../constants');
|
||||
const { validateURL, validateToken } = require('../../input-validator');
|
||||
|
||||
module.exports = function(ctx, helpers) {
|
||||
const router = express.Router();
|
||||
|
||||
// Auto-configure Overseerr with detected services
|
||||
router.post('/arr/configure-overseerr', ctx.asyncHandler(async (req, res) => {
|
||||
const { radarr, sonarr } = req.body;
|
||||
const results = { radarr: null, sonarr: null };
|
||||
|
||||
// Step 1: Authenticate with Overseerr via Plex token
|
||||
let overseerrUrl = `http://host.docker.internal:${APP_PORTS.overseerr}`;
|
||||
const overseerrSession = await helpers.getOverseerrSession();
|
||||
|
||||
if (!overseerrSession) {
|
||||
return ctx.errorResponse(res, 502, 'Could not authenticate with Overseerr. Make sure Plex and Overseerr are running.', {
|
||||
hint: 'Complete Overseerr setup wizard and link your Plex account first, then try again.'
|
||||
});
|
||||
}
|
||||
|
||||
ctx.log.info('arr', 'Authenticated with Overseerr via Plex session');
|
||||
|
||||
// Helper to make authenticated requests to Overseerr
|
||||
const overseerrFetch = async (endpoint, options = {}) => {
|
||||
const url = `${overseerrUrl}${endpoint}`;
|
||||
const response = await ctx.fetchT(url, {
|
||||
...options,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Cookie': overseerrSession.cookie,
|
||||
...options.headers
|
||||
}
|
||||
});
|
||||
return response;
|
||||
};
|
||||
|
||||
// Step 2: Verify Overseerr is accessible
|
||||
try {
|
||||
const statusRes = await overseerrFetch('/api/v1/status');
|
||||
if (!statusRes.ok) {
|
||||
return ctx.errorResponse(res, 502, 'Cannot connect to Overseerr', {
|
||||
hint: 'Make sure Overseerr is running on port 5055'
|
||||
});
|
||||
}
|
||||
} catch (e) {
|
||||
return ctx.errorResponse(res, 502, `Cannot reach Overseerr: ${e.message}`, {
|
||||
hint: 'Check if Overseerr container is running'
|
||||
});
|
||||
}
|
||||
|
||||
// Step 3: Configure Radarr if provided
|
||||
if (radarr?.apiKey && radarr?.url) {
|
||||
try {
|
||||
const radarrUrlObj = new URL(radarr.url);
|
||||
const radarrBasePath = radarrUrlObj.pathname.replace(/\/+$/, '');
|
||||
const radarrBaseUrl = radarr.url.replace(/\/+$/, '');
|
||||
|
||||
// Fetch quality profiles from Radarr
|
||||
const profilesRes = await ctx.fetchT(`${radarrBaseUrl}/api/v3/qualityprofile`, {
|
||||
headers: { 'X-Api-Key': radarr.apiKey }
|
||||
});
|
||||
const profiles = profilesRes.ok ? await profilesRes.json() : [];
|
||||
const defaultProfile = profiles[0] || { id: 1, name: 'Any' };
|
||||
|
||||
// Fetch root folders from Radarr
|
||||
const rootFoldersRes = await ctx.fetchT(`${radarrBaseUrl}/api/v3/rootfolder`, {
|
||||
headers: { 'X-Api-Key': radarr.apiKey }
|
||||
});
|
||||
const rootFolders = rootFoldersRes.ok ? await rootFoldersRes.json() : [];
|
||||
const defaultRootFolder = rootFolders[0]?.path || '/movies';
|
||||
|
||||
ctx.log.info('arr', 'Radarr configured', { profile: defaultProfile.name, profileId: defaultProfile.id, rootFolder: defaultRootFolder });
|
||||
|
||||
const radarrConfig = {
|
||||
name: 'Radarr',
|
||||
hostname: radarrUrlObj.hostname,
|
||||
port: parseInt(radarrUrlObj.port) || (radarrUrlObj.protocol === 'https:' ? 443 : APP_PORTS.radarr),
|
||||
apiKey: radarr.apiKey,
|
||||
useSsl: radarrUrlObj.protocol === 'https:',
|
||||
baseUrl: radarrBasePath || '',
|
||||
activeProfileId: defaultProfile.id,
|
||||
activeProfileName: defaultProfile.name,
|
||||
activeDirectory: defaultRootFolder,
|
||||
is4k: false,
|
||||
minimumAvailability: 'released',
|
||||
isDefault: true,
|
||||
externalUrl: radarr.url,
|
||||
tags: []
|
||||
};
|
||||
|
||||
const radarrRes = await overseerrFetch('/api/v1/settings/radarr', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify(radarrConfig)
|
||||
});
|
||||
|
||||
if (radarrRes.ok) {
|
||||
results.radarr = 'configured';
|
||||
} else {
|
||||
const errorText = await radarrRes.text();
|
||||
results.radarr = `failed: ${errorText}`;
|
||||
}
|
||||
} catch (e) {
|
||||
results.radarr = `error: ${e.message}`;
|
||||
}
|
||||
}
|
||||
|
||||
// Step 4: Configure Sonarr if provided
|
||||
if (sonarr?.apiKey && sonarr?.url) {
|
||||
try {
|
||||
const sonarrUrlObj = new URL(sonarr.url);
|
||||
const sonarrBasePath = sonarrUrlObj.pathname.replace(/\/+$/, '');
|
||||
const sonarrBaseUrl = sonarr.url.replace(/\/+$/, '');
|
||||
|
||||
// Fetch quality profiles from Sonarr
|
||||
const profilesRes = await ctx.fetchT(`${sonarrBaseUrl}/api/v3/qualityprofile`, {
|
||||
headers: { 'X-Api-Key': sonarr.apiKey }
|
||||
});
|
||||
const profiles = profilesRes.ok ? await profilesRes.json() : [];
|
||||
const defaultProfile = profiles[0] || { id: 1, name: 'Any' };
|
||||
|
||||
// Fetch root folders from Sonarr
|
||||
const rootFoldersRes = await ctx.fetchT(`${sonarrBaseUrl}/api/v3/rootfolder`, {
|
||||
headers: { 'X-Api-Key': sonarr.apiKey }
|
||||
});
|
||||
const rootFolders = rootFoldersRes.ok ? await rootFoldersRes.json() : [];
|
||||
const defaultRootFolder = rootFolders[0]?.path || '/tv';
|
||||
|
||||
// Fetch language profiles from Sonarr (v3 uses languageprofile, v4 doesn't need it)
|
||||
let languageProfileId = 1;
|
||||
try {
|
||||
const langRes = await ctx.fetchT(`${sonarrBaseUrl}/api/v3/languageprofile`, {
|
||||
headers: { 'X-Api-Key': sonarr.apiKey }
|
||||
});
|
||||
if (langRes.ok) {
|
||||
const langProfiles = await langRes.json();
|
||||
languageProfileId = langProfiles[0]?.id || 1;
|
||||
}
|
||||
} catch (e) {
|
||||
// Language profiles might not exist in Sonarr v4
|
||||
}
|
||||
|
||||
ctx.log.info('arr', 'Sonarr configured', { profile: defaultProfile.name, profileId: defaultProfile.id, rootFolder: defaultRootFolder });
|
||||
|
||||
const sonarrConfig = {
|
||||
name: 'Sonarr',
|
||||
hostname: sonarrUrlObj.hostname,
|
||||
port: parseInt(sonarrUrlObj.port) || (sonarrUrlObj.protocol === 'https:' ? 443 : APP_PORTS.sonarr),
|
||||
apiKey: sonarr.apiKey,
|
||||
useSsl: sonarrUrlObj.protocol === 'https:',
|
||||
baseUrl: sonarrBasePath || '',
|
||||
activeProfileId: defaultProfile.id,
|
||||
activeProfileName: defaultProfile.name,
|
||||
activeDirectory: defaultRootFolder,
|
||||
activeLanguageProfileId: languageProfileId,
|
||||
is4k: false,
|
||||
isDefault: true,
|
||||
enableSeasonFolders: true,
|
||||
externalUrl: sonarr.url,
|
||||
tags: []
|
||||
};
|
||||
|
||||
const sonarrRes = await overseerrFetch('/api/v1/settings/sonarr', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify(sonarrConfig)
|
||||
});
|
||||
|
||||
if (sonarrRes.ok) {
|
||||
results.sonarr = 'configured';
|
||||
} else {
|
||||
const errorText = await sonarrRes.text();
|
||||
results.sonarr = `failed: ${errorText}`;
|
||||
}
|
||||
} catch (e) {
|
||||
results.sonarr = `error: ${e.message}`;
|
||||
}
|
||||
}
|
||||
|
||||
const anyConfigured = results.radarr === 'configured' || results.sonarr === 'configured';
|
||||
|
||||
res.json({
|
||||
success: anyConfigured,
|
||||
message: anyConfigured ? 'Services configured in Overseerr' : 'Configuration failed',
|
||||
results
|
||||
});
|
||||
}, 'arr-configure-overseerr'));
|
||||
|
||||
// Test connection to external Radarr/Sonarr service
|
||||
router.post('/arr/test-connection', ctx.asyncHandler(async (req, res) => {
|
||||
try {
|
||||
const { service, url, apiKey } = req.body;
|
||||
|
||||
if (!url || !apiKey) {
|
||||
return ctx.errorResponse(res, 400, 'URL and API key required');
|
||||
}
|
||||
|
||||
// Validate URL format
|
||||
try {
|
||||
validateURL(url);
|
||||
} catch (validationErr) {
|
||||
return ctx.errorResponse(res, 400, validationErr.message);
|
||||
}
|
||||
|
||||
// Validate API key format
|
||||
try {
|
||||
validateToken(apiKey);
|
||||
} catch (validationErr) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid API key format');
|
||||
}
|
||||
|
||||
// Normalize URL - remove trailing slash
|
||||
let baseUrl = url.replace(/\/+$/, '');
|
||||
|
||||
// Build the API endpoint
|
||||
let apiEndpoint;
|
||||
let headers = { 'X-Api-Key': apiKey, 'Accept': 'application/json' };
|
||||
|
||||
if (service === 'radarr' || service === 'sonarr' || service === 'lidarr') {
|
||||
apiEndpoint = `${baseUrl}/api/v3/system/status`;
|
||||
} else if (service === 'prowlarr') {
|
||||
apiEndpoint = `${baseUrl}/api/v1/system/status`;
|
||||
} else if (service === 'plex') {
|
||||
apiEndpoint = `${baseUrl}/identity`;
|
||||
headers = { 'X-Plex-Token': apiKey, 'Accept': 'application/json' };
|
||||
} else {
|
||||
return ctx.errorResponse(res, 400, `Unknown service: ${service}`);
|
||||
}
|
||||
|
||||
ctx.log.info('arr', 'Testing service connection', { service });
|
||||
|
||||
// Make the API call
|
||||
const response = await ctx.fetchT(apiEndpoint, {
|
||||
method: 'GET',
|
||||
headers,
|
||||
signal: AbortSignal.timeout(10000)
|
||||
});
|
||||
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
const version = service === 'plex' ? data.MediaContainer?.version : data.version;
|
||||
const appName = service === 'plex' ? 'Plex' : data.appName;
|
||||
ctx.log.info('arr', 'Service connection successful', { service, appName, version });
|
||||
return res.json({
|
||||
success: true,
|
||||
version,
|
||||
appName
|
||||
});
|
||||
} else if (response.status === 401) {
|
||||
return ctx.errorResponse(res, 401, 'Invalid API key');
|
||||
} else if (response.status === 404) {
|
||||
return ctx.errorResponse(res, 404, 'API not found - check URL');
|
||||
} else {
|
||||
return ctx.errorResponse(res, 502, `HTTP ${response.status}`);
|
||||
}
|
||||
} catch (error) {
|
||||
await ctx.logError('arr-test-connection', error);
|
||||
if (error.cause?.code === 'ECONNREFUSED') {
|
||||
return ctx.errorResponse(res, 502, 'Connection refused');
|
||||
} else if (error.name === 'AbortError' || error.message?.includes('timeout')) {
|
||||
return ctx.errorResponse(res, 504, 'Connection timeout');
|
||||
}
|
||||
return ctx.errorResponse(res, 500, ctx.safeErrorMessage(error));
|
||||
}
|
||||
}, 'arr-test-connection'));
|
||||
|
||||
// Quick setup: Detect all services and configure Overseerr automatically
|
||||
router.post('/arr/auto-setup', ctx.asyncHandler(async (req, res) => {
|
||||
ctx.log.info('arr', 'Starting arr auto-setup');
|
||||
|
||||
// Step 1: Detect all running arr services
|
||||
const containers = await ctx.docker.client.listContainers({ all: false });
|
||||
const detected = {};
|
||||
|
||||
const servicePatterns = ARR_SERVICES;
|
||||
|
||||
for (const container of containers) {
|
||||
const containerName = container.Names[0]?.replace(/^\//, '').toLowerCase() || '';
|
||||
const image = container.Image.toLowerCase();
|
||||
|
||||
for (const [service, config] of Object.entries(servicePatterns)) {
|
||||
if (config.names.some(n => containerName.includes(n) || image.includes(n))) {
|
||||
const portInfo = container.Ports.find(p => p.PrivatePort === config.port);
|
||||
const exposedPort = portInfo?.PublicPort || config.port;
|
||||
|
||||
detected[service] = {
|
||||
containerId: container.Id,
|
||||
containerName: container.Names[0]?.replace(/^\//, ''),
|
||||
port: exposedPort,
|
||||
url: `http://host.docker.internal:${exposedPort}`,
|
||||
localUrl: `http://localhost:${exposedPort}`
|
||||
};
|
||||
|
||||
// Extract API key for arr services
|
||||
if (['radarr', 'sonarr', 'lidarr', 'prowlarr'].includes(service)) {
|
||||
detected[service].apiKey = await helpers.getArrApiKey(containerName);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2: Check what we found
|
||||
const summary = {
|
||||
overseerrFound: !!detected.overseerr,
|
||||
radarrFound: !!detected.radarr?.apiKey,
|
||||
sonarrFound: !!detected.sonarr?.apiKey,
|
||||
lidarrFound: !!detected.lidarr?.apiKey,
|
||||
prowlarrFound: !!detected.prowlarr?.apiKey
|
||||
};
|
||||
|
||||
ctx.log.info('arr', 'Detected services', summary);
|
||||
|
||||
if (!summary.overseerrFound) {
|
||||
return ctx.errorResponse(res, 400, 'Overseerr is not running. Deploy it first.', {
|
||||
detected,
|
||||
summary
|
||||
});
|
||||
}
|
||||
|
||||
if (!summary.radarrFound && !summary.sonarrFound) {
|
||||
return ctx.errorResponse(res, 400, 'No Radarr or Sonarr found with valid API keys. Deploy at least one first.', {
|
||||
detected,
|
||||
summary
|
||||
});
|
||||
}
|
||||
|
||||
// Step 3: Authenticate with Overseerr via Plex session
|
||||
const overseerrSession = await helpers.getOverseerrSession();
|
||||
|
||||
if (!overseerrSession) {
|
||||
return ctx.errorResponse(res, 502, 'Could not authenticate with Overseerr. Make sure Plex and Overseerr are running.', {
|
||||
setupUrl: detected.overseerr.localUrl,
|
||||
detected,
|
||||
summary
|
||||
});
|
||||
}
|
||||
|
||||
ctx.log.info('arr', 'Authenticated with Overseerr via Plex session');
|
||||
|
||||
// Helper for authenticated Overseerr requests
|
||||
const overseerrFetch = async (endpoint, options = {}) => {
|
||||
return ctx.fetchT(`${detected.overseerr.url}${endpoint}`, {
|
||||
...options,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Cookie': overseerrSession.cookie,
|
||||
...options.headers
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
// Step 4: Configure Radarr in Overseerr
|
||||
const configResults = {};
|
||||
|
||||
if (detected.radarr?.apiKey) {
|
||||
try {
|
||||
// Fetch quality profiles from Radarr
|
||||
const profilesRes = await ctx.fetchT(`${detected.radarr.localUrl}/api/v3/qualityprofile`, {
|
||||
headers: { 'X-Api-Key': detected.radarr.apiKey }
|
||||
});
|
||||
const profiles = profilesRes.ok ? await profilesRes.json() : [];
|
||||
const defaultProfile = profiles[0] || { id: 1, name: 'Any' };
|
||||
|
||||
// Fetch root folders from Radarr
|
||||
const rootFoldersRes = await ctx.fetchT(`${detected.radarr.localUrl}/api/v3/rootfolder`, {
|
||||
headers: { 'X-Api-Key': detected.radarr.apiKey }
|
||||
});
|
||||
const rootFolders = rootFoldersRes.ok ? await rootFoldersRes.json() : [];
|
||||
const defaultRootFolder = rootFolders[0]?.path || '/movies';
|
||||
|
||||
ctx.log.info('arr', 'Radarr profile selected', { profile: defaultProfile.name, rootFolder: defaultRootFolder });
|
||||
|
||||
const radarrConfig = {
|
||||
name: 'Radarr',
|
||||
hostname: 'host.docker.internal',
|
||||
port: detected.radarr.port,
|
||||
apiKey: detected.radarr.apiKey,
|
||||
useSsl: false,
|
||||
baseUrl: '',
|
||||
activeProfileId: defaultProfile.id,
|
||||
activeProfileName: defaultProfile.name,
|
||||
activeDirectory: defaultRootFolder,
|
||||
is4k: false,
|
||||
minimumAvailability: 'released',
|
||||
isDefault: true,
|
||||
externalUrl: detected.radarr.localUrl,
|
||||
tags: []
|
||||
};
|
||||
|
||||
const resp = await overseerrFetch('/api/v1/settings/radarr', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify(radarrConfig)
|
||||
});
|
||||
|
||||
configResults.radarr = resp.ok ? 'configured' : `failed: ${await resp.text()}`;
|
||||
} catch (e) {
|
||||
configResults.radarr = `error: ${e.message}`;
|
||||
}
|
||||
}
|
||||
|
||||
// Step 5: Configure Sonarr in Overseerr
|
||||
if (detected.sonarr?.apiKey) {
|
||||
try {
|
||||
// Fetch quality profiles from Sonarr
|
||||
const profilesRes = await ctx.fetchT(`${detected.sonarr.localUrl}/api/v3/qualityprofile`, {
|
||||
headers: { 'X-Api-Key': detected.sonarr.apiKey }
|
||||
});
|
||||
const profiles = profilesRes.ok ? await profilesRes.json() : [];
|
||||
const defaultProfile = profiles[0] || { id: 1, name: 'Any' };
|
||||
|
||||
// Fetch root folders from Sonarr
|
||||
const rootFoldersRes = await ctx.fetchT(`${detected.sonarr.localUrl}/api/v3/rootfolder`, {
|
||||
headers: { 'X-Api-Key': detected.sonarr.apiKey }
|
||||
});
|
||||
const rootFolders = rootFoldersRes.ok ? await rootFoldersRes.json() : [];
|
||||
const defaultRootFolder = rootFolders[0]?.path || '/tv';
|
||||
|
||||
// Fetch language profiles (Sonarr v3)
|
||||
let languageProfileId = 1;
|
||||
try {
|
||||
const langRes = await ctx.fetchT(`${detected.sonarr.localUrl}/api/v3/languageprofile`, {
|
||||
headers: { 'X-Api-Key': detected.sonarr.apiKey }
|
||||
});
|
||||
if (langRes.ok) {
|
||||
const langProfiles = await langRes.json();
|
||||
languageProfileId = langProfiles[0]?.id || 1;
|
||||
}
|
||||
} catch (e) { /* Sonarr v4 doesn't need this */ }
|
||||
|
||||
ctx.log.info('arr', 'Sonarr profile selected', { profile: defaultProfile.name, rootFolder: defaultRootFolder });
|
||||
|
||||
const sonarrConfig = {
|
||||
name: 'Sonarr',
|
||||
hostname: 'host.docker.internal',
|
||||
port: detected.sonarr.port,
|
||||
apiKey: detected.sonarr.apiKey,
|
||||
useSsl: false,
|
||||
baseUrl: '',
|
||||
activeProfileId: defaultProfile.id,
|
||||
activeProfileName: defaultProfile.name,
|
||||
activeDirectory: defaultRootFolder,
|
||||
activeLanguageProfileId: languageProfileId,
|
||||
is4k: false,
|
||||
isDefault: true,
|
||||
enableSeasonFolders: true,
|
||||
externalUrl: detected.sonarr.localUrl,
|
||||
tags: []
|
||||
};
|
||||
|
||||
const resp = await overseerrFetch('/api/v1/settings/sonarr', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify(sonarrConfig)
|
||||
});
|
||||
|
||||
configResults.sonarr = resp.ok ? 'configured' : `failed: ${await resp.text()}`;
|
||||
} catch (e) {
|
||||
configResults.sonarr = `error: ${e.message}`;
|
||||
}
|
||||
}
|
||||
|
||||
const anyConfigured = configResults.radarr === 'configured' || configResults.sonarr === 'configured';
|
||||
|
||||
// Send notification
|
||||
if (anyConfigured) {
|
||||
ctx.notification.send(
|
||||
'deploymentSuccess',
|
||||
'Arr Stack Auto-Connected',
|
||||
`Overseerr configured: ${Object.entries(configResults).filter(([k,v]) => v === 'configured').map(([k]) => k).join(', ')}`,
|
||||
'success'
|
||||
);
|
||||
}
|
||||
|
||||
res.json({
|
||||
success: anyConfigured,
|
||||
message: anyConfigured ? 'Auto-setup completed successfully!' : 'Configuration failed',
|
||||
detected,
|
||||
configResults,
|
||||
summary
|
||||
});
|
||||
}, 'arr-auto-setup'));
|
||||
|
||||
return router;
|
||||
};
|
||||
129
dashcaddy-api/routes/arr/credentials.js
Normal file
129
dashcaddy-api/routes/arr/credentials.js
Normal file
@@ -0,0 +1,129 @@
|
||||
const express = require('express');
|
||||
const { validateURL, validateToken } = require('../../input-validator');
|
||||
|
||||
module.exports = function(ctx, helpers) {
|
||||
const router = express.Router();
|
||||
|
||||
// Store arr service credentials
|
||||
router.post('/arr/credentials', ctx.asyncHandler(async (req, res) => {
|
||||
const { service, apiKey, url, seedboxBaseUrl } = req.body;
|
||||
|
||||
if (!service || !apiKey) {
|
||||
return ctx.errorResponse(res, 400, 'Service name and API key required');
|
||||
}
|
||||
|
||||
const validServices = ['radarr', 'sonarr', 'prowlarr', 'lidarr', 'plex'];
|
||||
if (!validServices.includes(service)) {
|
||||
return ctx.errorResponse(res, 400, `Invalid service. Must be one of: ${validServices.join(', ')}`);
|
||||
}
|
||||
|
||||
// Validate API key format
|
||||
try {
|
||||
validateToken(apiKey);
|
||||
} catch (e) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid API key format');
|
||||
}
|
||||
|
||||
// Validate URL if provided
|
||||
if (url) {
|
||||
try { validateURL(url); } catch (e) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid URL format');
|
||||
}
|
||||
}
|
||||
|
||||
// Determine credential key
|
||||
const credKey = service === 'plex' ? 'arr.plex.token' : `arr.${service}.apikey`;
|
||||
|
||||
// Build metadata
|
||||
const metadata = {
|
||||
service,
|
||||
source: url ? 'external' : 'local',
|
||||
url: url || null,
|
||||
storedAt: new Date().toISOString()
|
||||
};
|
||||
|
||||
// Test connection if URL is known
|
||||
let connectionTest = null;
|
||||
let resolvedUrl = url;
|
||||
|
||||
if (!resolvedUrl) {
|
||||
// Try to resolve URL from services.json
|
||||
try {
|
||||
const services = await ctx.servicesStateManager.read();
|
||||
const svc = Array.isArray(services) ? services : services.services || [];
|
||||
const found = svc.find(s => s.id === service && s.isExternal);
|
||||
if (found?.externalUrl) resolvedUrl = found.externalUrl;
|
||||
} catch (e) { /* ignore */ }
|
||||
}
|
||||
|
||||
if (resolvedUrl) {
|
||||
connectionTest = await helpers.testServiceConnection(service, resolvedUrl, apiKey);
|
||||
if (connectionTest.success) {
|
||||
metadata.lastVerified = new Date().toISOString();
|
||||
metadata.version = connectionTest.version;
|
||||
metadata.url = resolvedUrl;
|
||||
}
|
||||
}
|
||||
|
||||
// Store the credential
|
||||
const stored = await ctx.credentialManager.store(credKey, apiKey, metadata);
|
||||
if (!stored) {
|
||||
return ctx.errorResponse(res, 500, 'Failed to store credential');
|
||||
}
|
||||
|
||||
// Optionally store seedbox base URL
|
||||
if (seedboxBaseUrl) {
|
||||
try { validateURL(seedboxBaseUrl); } catch (e) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid seedbox base URL');
|
||||
}
|
||||
await ctx.credentialManager.store('arr.seedbox.baseurl', seedboxBaseUrl, {
|
||||
storedAt: new Date().toISOString()
|
||||
});
|
||||
}
|
||||
|
||||
ctx.log.info('arr', 'Stored API key', { service, verified: connectionTest?.success || false });
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: `${service} API key stored`,
|
||||
connectionTest,
|
||||
url: resolvedUrl
|
||||
});
|
||||
}, 'arr-credentials-store'));
|
||||
|
||||
// List stored arr credentials (keys only, not values)
|
||||
router.get('/arr/credentials', ctx.asyncHandler(async (req, res) => {
|
||||
const services = ['radarr', 'sonarr', 'prowlarr', 'lidarr', 'plex'];
|
||||
const credentials = {};
|
||||
|
||||
for (const service of services) {
|
||||
const credKey = service === 'plex' ? 'arr.plex.token' : `arr.${service}.apikey`;
|
||||
const hasKey = !!(await ctx.credentialManager.retrieve(credKey));
|
||||
const metadata = await ctx.credentialManager.getMetadata(credKey);
|
||||
|
||||
credentials[service] = {
|
||||
hasKey,
|
||||
url: metadata?.url || null,
|
||||
lastVerified: metadata?.lastVerified || null,
|
||||
version: metadata?.version || null,
|
||||
source: metadata?.source || null
|
||||
};
|
||||
}
|
||||
|
||||
// Get seedbox base URL
|
||||
const seedboxBaseUrl = await ctx.credentialManager.retrieve('arr.seedbox.baseurl');
|
||||
|
||||
res.json({ success: true, credentials, seedboxBaseUrl: seedboxBaseUrl || null });
|
||||
}, 'arr-credentials-list'));
|
||||
|
||||
// Delete stored arr credentials
|
||||
router.delete('/arr/credentials/:service', ctx.asyncHandler(async (req, res) => {
|
||||
const { service } = req.params;
|
||||
const credKey = service === 'plex' ? 'arr.plex.token' : `arr.${service}.apikey`;
|
||||
await ctx.credentialManager.delete(credKey);
|
||||
ctx.log.info('arr', 'Deleted credentials', { service });
|
||||
res.json({ success: true, message: `${service} credentials removed` });
|
||||
}, 'arr-credentials-delete'));
|
||||
|
||||
return router;
|
||||
};
|
||||
283
dashcaddy-api/routes/arr/detect.js
Normal file
283
dashcaddy-api/routes/arr/detect.js
Normal file
@@ -0,0 +1,283 @@
|
||||
const express = require('express');
|
||||
const { APP_PORTS, ARR_SERVICES } = require('../../constants');
|
||||
|
||||
module.exports = function(ctx, helpers) {
|
||||
const router = express.Router();
|
||||
|
||||
// Detect running arr services and their configurations
|
||||
router.get('/arr/detect', ctx.asyncHandler(async (req, res) => {
|
||||
const containers = await ctx.docker.client.listContainers({ all: false });
|
||||
const detected = {
|
||||
plex: null,
|
||||
radarr: null,
|
||||
sonarr: null,
|
||||
overseerr: null,
|
||||
lidarr: null,
|
||||
prowlarr: null
|
||||
};
|
||||
|
||||
// Service detection patterns
|
||||
const servicePatterns = ARR_SERVICES;
|
||||
|
||||
for (const container of containers) {
|
||||
const containerName = container.Names[0]?.replace(/^\//, '').toLowerCase() || '';
|
||||
const image = container.Image.toLowerCase();
|
||||
|
||||
for (const [service, config] of Object.entries(servicePatterns)) {
|
||||
if (config.names.some(n => containerName.includes(n) || image.includes(n))) {
|
||||
// Find the exposed port
|
||||
const portInfo = container.Ports.find(p => p.PrivatePort === config.port);
|
||||
const exposedPort = portInfo?.PublicPort || config.port;
|
||||
|
||||
detected[service] = {
|
||||
containerId: container.Id,
|
||||
containerName: container.Names[0]?.replace(/^\//, ''),
|
||||
image: container.Image,
|
||||
port: exposedPort,
|
||||
status: container.State,
|
||||
url: helpers.getServiceUrl(containerName, exposedPort)
|
||||
};
|
||||
|
||||
// Get API key for arr services (not Plex or Overseerr)
|
||||
if (['radarr', 'sonarr', 'lidarr', 'prowlarr'].includes(service)) {
|
||||
detected[service].apiKey = await helpers.getArrApiKey(containerName);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get Plex token if Plex is detected
|
||||
if (detected.plex) {
|
||||
detected.plex.token = await helpers.getPlexToken(detected.plex.containerName);
|
||||
}
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
services: detected,
|
||||
summary: {
|
||||
plexReady: !!(detected.plex?.token),
|
||||
radarrReady: !!(detected.radarr?.apiKey),
|
||||
sonarrReady: !!(detected.sonarr?.apiKey),
|
||||
overseerrRunning: !!detected.overseerr
|
||||
}
|
||||
});
|
||||
}, 'arr-detect'));
|
||||
|
||||
// Smart Detect: Unified discovery of all arr services
|
||||
router.get('/arr/smart-detect', ctx.asyncHandler(async (req, res) => {
|
||||
const serviceList = ['plex', 'radarr', 'sonarr', 'prowlarr', 'seerr'];
|
||||
const defaultPorts = APP_PORTS;
|
||||
const result = {};
|
||||
|
||||
// 1. Scan Docker containers
|
||||
let containers = [];
|
||||
try { containers = await ctx.docker.client.listContainers({ all: false }); } catch (e) { /* Docker not available */ }
|
||||
|
||||
const servicePatterns = ARR_SERVICES;
|
||||
|
||||
const dockerDetected = {};
|
||||
for (const container of containers) {
|
||||
const containerName = container.Names[0]?.replace(/^\//, '').toLowerCase() || '';
|
||||
const image = container.Image.toLowerCase();
|
||||
for (const [svc, config] of Object.entries(servicePatterns)) {
|
||||
if (config.names.some(n => containerName.includes(n) || image.includes(n))) {
|
||||
const portInfo = container.Ports.find(p => p.PrivatePort === config.port);
|
||||
dockerDetected[svc] = {
|
||||
containerId: container.Id,
|
||||
containerName: container.Names[0]?.replace(/^\//, ''),
|
||||
port: portInfo?.PublicPort || config.port,
|
||||
status: container.State
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Load services.json for external entries
|
||||
let storedServices = [];
|
||||
try {
|
||||
const data = await ctx.servicesStateManager.read();
|
||||
storedServices = Array.isArray(data) ? data : data.services || [];
|
||||
} catch (e) { /* ignore */ }
|
||||
|
||||
// 3. Load stored credentials
|
||||
const storedCreds = {};
|
||||
const seedboxBaseUrl = await ctx.credentialManager.retrieve('arr.seedbox.baseurl');
|
||||
|
||||
for (const svc of serviceList) {
|
||||
const credKey = svc === 'plex' ? 'arr.plex.token' : `arr.${svc}.apikey`;
|
||||
const apiKey = await ctx.credentialManager.retrieve(credKey);
|
||||
const metadata = await ctx.credentialManager.getMetadata(credKey);
|
||||
if (apiKey) {
|
||||
storedCreds[svc] = { apiKey, metadata };
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Build detection result for each service
|
||||
for (const svc of serviceList) {
|
||||
const entry = {
|
||||
status: 'not_found',
|
||||
source: null,
|
||||
url: null,
|
||||
hasApiKey: false,
|
||||
hasToken: false,
|
||||
containerId: null,
|
||||
containerName: null,
|
||||
version: null
|
||||
};
|
||||
|
||||
// Check Docker first
|
||||
if (dockerDetected[svc]) {
|
||||
const dc = dockerDetected[svc];
|
||||
entry.containerId = dc.containerId;
|
||||
entry.containerName = dc.containerName;
|
||||
entry.source = 'local';
|
||||
entry.url = `http://localhost:${dc.port}`;
|
||||
|
||||
if (svc === 'plex') {
|
||||
// Try to get Plex token from container
|
||||
try {
|
||||
const token = await helpers.getPlexToken(dc.containerName);
|
||||
if (token) {
|
||||
entry.hasToken = true;
|
||||
entry.status = 'connected';
|
||||
// Store for later use
|
||||
await ctx.credentialManager.store('arr.plex.token', token, {
|
||||
service: 'plex', source: 'local', url: entry.url,
|
||||
lastVerified: new Date().toISOString()
|
||||
});
|
||||
} else {
|
||||
entry.status = 'needs_key';
|
||||
}
|
||||
} catch (e) { entry.status = 'needs_key'; }
|
||||
} else if (svc === 'seerr') {
|
||||
entry.status = 'connected';
|
||||
// Check what Overseerr has configured using Plex-based session auth
|
||||
try {
|
||||
const session = await helpers.getOverseerrSession();
|
||||
if (session) {
|
||||
entry.hasApiKey = true;
|
||||
const configuredServices = { radarr: false, sonarr: false, plex: false };
|
||||
try {
|
||||
const radarrCheck = await ctx.fetchT(`http://host.docker.internal:${dc.port}/api/v1/settings/radarr`, {
|
||||
headers: { 'Cookie': session.cookie },
|
||||
signal: AbortSignal.timeout(5000)
|
||||
});
|
||||
if (radarrCheck.ok) {
|
||||
const radarrSettings = await radarrCheck.json();
|
||||
configuredServices.radarr = Array.isArray(radarrSettings) ? radarrSettings.length > 0 : !!radarrSettings;
|
||||
}
|
||||
} catch (e) { /* ignore */ }
|
||||
try {
|
||||
const sonarrCheck = await ctx.fetchT(`http://host.docker.internal:${dc.port}/api/v1/settings/sonarr`, {
|
||||
headers: { 'Cookie': session.cookie },
|
||||
signal: AbortSignal.timeout(5000)
|
||||
});
|
||||
if (sonarrCheck.ok) {
|
||||
const sonarrSettings = await sonarrCheck.json();
|
||||
configuredServices.sonarr = Array.isArray(sonarrSettings) ? sonarrSettings.length > 0 : !!sonarrSettings;
|
||||
}
|
||||
} catch (e) { /* ignore */ }
|
||||
try {
|
||||
const plexCheck = await ctx.fetchT(`http://host.docker.internal:${dc.port}/api/v1/settings/plex`, {
|
||||
headers: { 'Cookie': session.cookie },
|
||||
signal: AbortSignal.timeout(5000)
|
||||
});
|
||||
if (plexCheck.ok) {
|
||||
const plexSettings = await plexCheck.json();
|
||||
configuredServices.plex = !!plexSettings?.ip;
|
||||
}
|
||||
} catch (e) { /* ignore */ }
|
||||
entry.configuredServices = configuredServices;
|
||||
}
|
||||
} catch (e) { /* ignore */ }
|
||||
} else {
|
||||
// arr services - try to get API key from container
|
||||
try {
|
||||
const key = await helpers.getArrApiKey(dc.containerName);
|
||||
if (key) {
|
||||
entry.hasApiKey = true;
|
||||
entry.status = 'connected';
|
||||
} else {
|
||||
entry.status = storedCreds[svc] ? 'connected' : 'needs_key';
|
||||
entry.hasApiKey = !!storedCreds[svc];
|
||||
}
|
||||
} catch (e) {
|
||||
entry.status = storedCreds[svc] ? 'connected' : 'needs_key';
|
||||
entry.hasApiKey = !!storedCreds[svc];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check external services from services.json
|
||||
if (entry.status === 'not_found') {
|
||||
const externalService = storedServices.find(s => s.id === svc && s.isExternal);
|
||||
if (externalService?.externalUrl) {
|
||||
entry.source = 'external';
|
||||
entry.url = externalService.externalUrl;
|
||||
|
||||
if (storedCreds[svc]) {
|
||||
entry.hasApiKey = true;
|
||||
entry.version = storedCreds[svc].metadata?.version || null;
|
||||
// Verify connection is still good
|
||||
const test = await helpers.testServiceConnection(svc, entry.url, storedCreds[svc].apiKey);
|
||||
entry.status = test.success ? 'connected' : 'error';
|
||||
if (test.success) entry.version = test.version;
|
||||
} else {
|
||||
entry.status = 'needs_key';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check stored credentials with metadata URL
|
||||
if (entry.status === 'not_found' && storedCreds[svc]?.metadata?.url) {
|
||||
entry.source = 'stored';
|
||||
entry.url = storedCreds[svc].metadata.url;
|
||||
entry.hasApiKey = true;
|
||||
entry.version = storedCreds[svc].metadata?.version || null;
|
||||
entry.status = 'connected';
|
||||
}
|
||||
|
||||
// For plex, also check stored token
|
||||
if (svc === 'plex' && entry.status === 'not_found' && storedCreds.plex) {
|
||||
entry.hasToken = true;
|
||||
entry.source = 'stored';
|
||||
entry.url = storedCreds.plex.metadata?.url || `http://localhost:${defaultPorts.plex}`;
|
||||
entry.status = 'connected';
|
||||
}
|
||||
|
||||
result[svc] = entry;
|
||||
}
|
||||
|
||||
// 5. Detect seedbox base URL pattern
|
||||
let detectedSeedboxUrl = seedboxBaseUrl || null;
|
||||
if (!detectedSeedboxUrl) {
|
||||
const externalUrls = storedServices
|
||||
.filter(s => s.isExternal && s.externalUrl)
|
||||
.map(s => s.externalUrl);
|
||||
if (externalUrls.length > 0) {
|
||||
// Find common base URL pattern
|
||||
try {
|
||||
const url = new URL(externalUrls[0]);
|
||||
const pathParts = url.pathname.split('/').filter(p => p);
|
||||
if (pathParts.length >= 2) {
|
||||
detectedSeedboxUrl = `${url.origin}/${pathParts[0]}`;
|
||||
}
|
||||
} catch (e) { /* ignore */ }
|
||||
}
|
||||
}
|
||||
|
||||
// Summary
|
||||
const statuses = Object.values(result);
|
||||
const summary = {
|
||||
totalDetected: statuses.filter(s => s.status !== 'not_found').length,
|
||||
fullyConnected: statuses.filter(s => s.status === 'connected').length,
|
||||
needsApiKey: statuses.filter(s => s.status === 'needs_key').length,
|
||||
errors: statuses.filter(s => s.status === 'error').length,
|
||||
readyForAutoConnect: statuses.filter(s => s.status === 'connected').length >= 2
|
||||
};
|
||||
|
||||
res.json({ success: true, services: result, seedboxBaseUrl: detectedSeedboxUrl, summary });
|
||||
}, 'smart-detect'));
|
||||
|
||||
return router;
|
||||
};
|
||||
302
dashcaddy-api/routes/arr/helpers.js
Normal file
302
dashcaddy-api/routes/arr/helpers.js
Normal file
@@ -0,0 +1,302 @@
|
||||
const { APP_PORTS } = require('../../constants');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
|
||||
// Helper: Extract API key from arr service config.xml
|
||||
async function getArrApiKey(containerName) {
|
||||
try {
|
||||
const container = await ctx.docker.findContainer(containerName);
|
||||
if (!container) return null;
|
||||
|
||||
const dockerContainer = ctx.docker.client.getContainer(container.Id);
|
||||
const exec = await dockerContainer.exec({
|
||||
Cmd: ['cat', '/config/config.xml'],
|
||||
AttachStdout: true,
|
||||
AttachStderr: true
|
||||
});
|
||||
|
||||
const stream = await exec.start();
|
||||
|
||||
return new Promise((resolve) => {
|
||||
let data = '';
|
||||
stream.on('data', chunk => data += chunk.toString());
|
||||
stream.on('end', () => {
|
||||
// Extract API key from XML
|
||||
const match = data.match(/<ApiKey>([^<]+)<\/ApiKey>/);
|
||||
resolve(match ? match[1] : null);
|
||||
});
|
||||
stream.on('error', () => resolve(null));
|
||||
});
|
||||
} catch (error) {
|
||||
ctx.log.error('docker', 'Failed to get API key', { containerName, error: error.message });
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// Helper: Get Plex token from container or config
|
||||
async function getPlexToken(containerName) {
|
||||
try {
|
||||
const containers = await ctx.docker.client.listContainers({ all: false });
|
||||
const container = containers.find(c =>
|
||||
c.Names.some(n => n.toLowerCase().includes(containerName.toLowerCase()) || n.toLowerCase().includes('plex'))
|
||||
);
|
||||
|
||||
if (!container) return null;
|
||||
|
||||
const dockerContainer = ctx.docker.client.getContainer(container.Id);
|
||||
const exec = await dockerContainer.exec({
|
||||
Cmd: ['cat', '/config/Library/Application Support/Plex Media Server/Preferences.xml'],
|
||||
AttachStdout: true,
|
||||
AttachStderr: true
|
||||
});
|
||||
|
||||
const stream = await exec.start();
|
||||
|
||||
return new Promise((resolve) => {
|
||||
let data = '';
|
||||
stream.on('data', chunk => data += chunk.toString());
|
||||
stream.on('end', () => {
|
||||
const match = data.match(/PlexOnlineToken="([^"]+)"/);
|
||||
resolve(match ? match[1] : null);
|
||||
});
|
||||
stream.on('error', () => resolve(null));
|
||||
});
|
||||
} catch (error) {
|
||||
ctx.log.error('docker', 'Failed to get Plex token', { error: error.message });
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// Helper: Get container URL (internal Docker network or host)
|
||||
function getServiceUrl(containerName, port, useTailscale = false) {
|
||||
// For Docker containers, use localhost since they're on the same host
|
||||
const host = useTailscale ? (process.env.HOST_TAILSCALE_IP || 'localhost') : 'localhost';
|
||||
return `http://${host}:${port}`;
|
||||
}
|
||||
|
||||
// Helper: Get authenticated Seerr/Overseerr session via Plex token
|
||||
// Seerr requires Plex-based auth for admin endpoints (settings, configuration)
|
||||
async function getOverseerrSession() {
|
||||
const seerrUrl = `http://host.docker.internal:${APP_PORTS.seerr}`;
|
||||
try {
|
||||
// Try getting Plex token from running container first
|
||||
let plexToken = await getPlexToken('plex');
|
||||
|
||||
// Fall back to stored Plex token in credential manager
|
||||
if (!plexToken) {
|
||||
plexToken = await ctx.credentialManager.retrieve('arr.plex.token');
|
||||
}
|
||||
|
||||
if (!plexToken) {
|
||||
ctx.log.error('arr', 'Could not get Plex token for Seerr auth (no container, no stored token)');
|
||||
return null;
|
||||
}
|
||||
|
||||
// Authenticate with Seerr via Plex token
|
||||
const authRes = await ctx.fetchT(`${seerrUrl}/api/v1/auth/plex`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ authToken: plexToken }),
|
||||
signal: AbortSignal.timeout(10000)
|
||||
});
|
||||
|
||||
if (!authRes.ok) {
|
||||
ctx.log.error('arr', 'Seerr Plex auth failed', { status: authRes.status });
|
||||
return null;
|
||||
}
|
||||
|
||||
const setCookie = authRes.headers.get('set-cookie');
|
||||
if (!setCookie) {
|
||||
ctx.log.error('arr', 'No session cookie returned from Seerr');
|
||||
return null;
|
||||
}
|
||||
|
||||
const sessionCookie = setCookie.split(';')[0];
|
||||
return { cookie: sessionCookie, plexToken };
|
||||
} catch (e) {
|
||||
ctx.log.error('arr', 'Could not get Seerr session', { error: e.message });
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// Helper: Connect Plex to Overseerr
|
||||
// Uses session cookie auth (Overseerr requires Plex-based admin session for settings)
|
||||
async function connectPlexToOverseerr(plexUrl, plexToken, overseerrUrl, sessionCookie) {
|
||||
// 1. Get Plex server identity (for return info)
|
||||
const identityRes = await ctx.fetchT(`${plexUrl}/identity`, {
|
||||
headers: { 'X-Plex-Token': plexToken, 'Accept': 'application/json' },
|
||||
signal: AbortSignal.timeout(10000)
|
||||
});
|
||||
if (!identityRes.ok) throw new Error('Cannot reach Plex server');
|
||||
const identity = await identityRes.json();
|
||||
const serverName = identity.MediaContainer?.friendlyName || 'Plex';
|
||||
|
||||
// 2. Configure Plex server connection in Overseerr
|
||||
// Only send writable fields — name, machineId, libraries are read-only (auto-discovered by Overseerr)
|
||||
const plexConfig = {
|
||||
ip: 'host.docker.internal',
|
||||
port: APP_PORTS.plex,
|
||||
useSsl: false
|
||||
};
|
||||
|
||||
const configRes = await ctx.fetchT(`${overseerrUrl}/api/v1/settings/plex`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Cookie': sessionCookie
|
||||
},
|
||||
body: JSON.stringify(plexConfig)
|
||||
});
|
||||
|
||||
if (!configRes.ok) {
|
||||
throw new Error(`Overseerr Plex config failed: ${await configRes.text()}`);
|
||||
}
|
||||
|
||||
// 3. Trigger library sync — Overseerr will use the admin's Plex token to discover libraries
|
||||
try {
|
||||
await ctx.fetchT(`${overseerrUrl}/api/v1/settings/plex/sync`, {
|
||||
method: 'POST',
|
||||
headers: { 'Cookie': sessionCookie },
|
||||
signal: AbortSignal.timeout(10000)
|
||||
});
|
||||
} catch (e) {
|
||||
ctx.log.warn('arr', 'Plex library sync trigger failed (non-fatal)', { error: e.message });
|
||||
}
|
||||
|
||||
// 4. Get discovered libraries
|
||||
let libraries = [];
|
||||
try {
|
||||
const libRes = await ctx.fetchT(`${overseerrUrl}/api/v1/settings/plex`, {
|
||||
headers: { 'Cookie': sessionCookie },
|
||||
signal: AbortSignal.timeout(5000)
|
||||
});
|
||||
if (libRes.ok) {
|
||||
const plexSettings = await libRes.json();
|
||||
libraries = plexSettings.libraries || [];
|
||||
}
|
||||
} catch (e) { /* non-fatal */ }
|
||||
|
||||
return { success: true, libraries, serverName, machineId: identity.MediaContainer?.machineIdentifier };
|
||||
}
|
||||
|
||||
// Helper: Configure Prowlarr connected apps (Radarr/Sonarr)
|
||||
async function configureProwlarrApps(prowlarrUrl, prowlarrApiKey, apps) {
|
||||
const results = {};
|
||||
|
||||
// Check existing apps to avoid duplicates
|
||||
let existingApps = [];
|
||||
try {
|
||||
const existingRes = await ctx.fetchT(`${prowlarrUrl}/api/v1/applications`, {
|
||||
headers: { 'X-Api-Key': prowlarrApiKey },
|
||||
signal: AbortSignal.timeout(10000)
|
||||
});
|
||||
existingApps = existingRes.ok ? await existingRes.json() : [];
|
||||
} catch (e) {
|
||||
ctx.log.warn('arr', 'Could not fetch existing Prowlarr apps', { error: e.message });
|
||||
}
|
||||
|
||||
for (const [appName, config] of Object.entries(apps)) {
|
||||
const implementation = appName.charAt(0).toUpperCase() + appName.slice(1); // "Radarr", "Sonarr"
|
||||
|
||||
// Skip if already configured
|
||||
if (existingApps.some(a => a.implementation === implementation)) {
|
||||
results[appName] = 'already_configured';
|
||||
continue;
|
||||
}
|
||||
|
||||
const syncCategories = appName === 'radarr'
|
||||
? [2000, 2010, 2020, 2030, 2040, 2045, 2050, 2060]
|
||||
: [5000, 5010, 5020, 5030, 5040, 5045, 5050];
|
||||
|
||||
const payload = {
|
||||
name: implementation,
|
||||
syncLevel: 'fullSync',
|
||||
implementation: implementation,
|
||||
configContract: `${implementation}Settings`,
|
||||
fields: [
|
||||
{ name: 'prowlarrUrl', value: prowlarrUrl },
|
||||
{ name: 'baseUrl', value: config.url },
|
||||
{ name: 'apiKey', value: config.apiKey },
|
||||
{ name: 'syncCategories', value: syncCategories }
|
||||
]
|
||||
};
|
||||
|
||||
try {
|
||||
const res = await ctx.fetchT(`${prowlarrUrl}/api/v1/applications`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-Api-Key': prowlarrApiKey
|
||||
},
|
||||
body: JSON.stringify(payload),
|
||||
signal: AbortSignal.timeout(10000)
|
||||
});
|
||||
results[appName] = res.ok ? 'configured' : `failed: ${await res.text()}`;
|
||||
} catch (e) {
|
||||
results[appName] = `error: ${e.message}`;
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
// Helper: Test a service connection (reusable logic)
|
||||
async function testServiceConnection(service, url, apiKey) {
|
||||
const baseUrl = url.replace(/\/+$/, '');
|
||||
let apiEndpoint, headers;
|
||||
|
||||
if (service === 'radarr' || service === 'sonarr' || service === 'lidarr') {
|
||||
apiEndpoint = `${baseUrl}/api/v3/system/status`;
|
||||
headers = { 'X-Api-Key': apiKey, 'Accept': 'application/json' };
|
||||
} else if (service === 'prowlarr') {
|
||||
apiEndpoint = `${baseUrl}/api/v1/system/status`;
|
||||
headers = { 'X-Api-Key': apiKey, 'Accept': 'application/json' };
|
||||
} else if (service === 'plex') {
|
||||
apiEndpoint = `${baseUrl}/identity`;
|
||||
headers = { 'X-Plex-Token': apiKey, 'Accept': 'application/json' };
|
||||
} else {
|
||||
return { success: false, error: `Unknown service: ${service}` };
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await ctx.fetchT(apiEndpoint, {
|
||||
method: 'GET',
|
||||
headers,
|
||||
signal: AbortSignal.timeout(15000)
|
||||
});
|
||||
|
||||
if (response.ok) {
|
||||
const data = await response.json();
|
||||
if (service === 'plex') {
|
||||
return { success: true, version: data.MediaContainer?.version, appName: 'Plex' };
|
||||
}
|
||||
return { success: true, version: data.version, appName: data.appName };
|
||||
} else if (response.status === 401) {
|
||||
return { success: false, error: 'Invalid API key' };
|
||||
} else {
|
||||
return { success: false, error: `HTTP ${response.status}` };
|
||||
}
|
||||
} catch (e) {
|
||||
if (e.cause?.code === 'ECONNREFUSED') return { success: false, error: 'Connection refused' };
|
||||
if (e.name === 'AbortError') return { success: false, error: 'Connection timeout' };
|
||||
return { success: false, error: e.message };
|
||||
}
|
||||
}
|
||||
|
||||
// Helper: Get Overseerr API key (convenience wrapper)
|
||||
async function getOverseerrApiKey() {
|
||||
const session = await getOverseerrSession();
|
||||
return session;
|
||||
}
|
||||
|
||||
return {
|
||||
getArrApiKey,
|
||||
getPlexToken,
|
||||
getServiceUrl,
|
||||
getOverseerrSession,
|
||||
getOverseerrApiKey,
|
||||
connectPlexToOverseerr,
|
||||
configureProwlarrApps,
|
||||
testServiceConnection
|
||||
};
|
||||
};
|
||||
14
dashcaddy-api/routes/arr/index.js
Normal file
14
dashcaddy-api/routes/arr/index.js
Normal file
@@ -0,0 +1,14 @@
|
||||
const express = require('express');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
const helpers = require('./helpers')(ctx);
|
||||
|
||||
router.use(require('./detect')(ctx, helpers));
|
||||
router.use(require('./credentials')(ctx, helpers));
|
||||
router.use(require('./config')(ctx, helpers));
|
||||
router.use(require('./smart-connect')(ctx, helpers));
|
||||
router.use(require('./plex')(ctx, helpers));
|
||||
|
||||
return router;
|
||||
};
|
||||
76
dashcaddy-api/routes/arr/plex.js
Normal file
76
dashcaddy-api/routes/arr/plex.js
Normal file
@@ -0,0 +1,76 @@
|
||||
const express = require('express');
|
||||
const { APP_PORTS } = require('../../constants');
|
||||
|
||||
module.exports = function(ctx, helpers) {
|
||||
const router = express.Router();
|
||||
|
||||
// Plex Libraries endpoint
|
||||
router.get('/plex/libraries', ctx.asyncHandler(async (req, res) => {
|
||||
// Get Plex token
|
||||
let plexToken = await helpers.getPlexToken('plex');
|
||||
if (!plexToken) {
|
||||
plexToken = await ctx.credentialManager.retrieve('arr.plex.token');
|
||||
}
|
||||
|
||||
if (!plexToken) {
|
||||
return ctx.errorResponse(res, 400, 'No Plex token available. Claim your Plex server first.', {
|
||||
hint: 'Deploy Plex with a claim token or manually configure it.'
|
||||
});
|
||||
}
|
||||
|
||||
// Get Plex URL
|
||||
let plexUrl = `http://localhost:${APP_PORTS.plex}`;
|
||||
try {
|
||||
const services = await ctx.servicesStateManager.read();
|
||||
const svcList = Array.isArray(services) ? services : services.services || [];
|
||||
const plexService = svcList.find(s => s.id === 'plex' || s.appTemplate === 'plex');
|
||||
if (plexService?.url) {
|
||||
plexUrl = plexService.url.replace('host.docker.internal', 'localhost');
|
||||
}
|
||||
} catch (e) { /* use default */ }
|
||||
|
||||
// Fetch libraries
|
||||
const libRes = await ctx.fetchT(`${plexUrl}/library/sections`, {
|
||||
headers: { 'X-Plex-Token': plexToken, 'Accept': 'application/json' },
|
||||
signal: AbortSignal.timeout(10000)
|
||||
});
|
||||
|
||||
if (!libRes.ok) {
|
||||
return ctx.errorResponse(res, 502, `Plex returned ${libRes.status}`);
|
||||
}
|
||||
|
||||
const data = await libRes.json();
|
||||
const libraries = (data.MediaContainer?.Directory || []).map(dir => ({
|
||||
key: dir.key,
|
||||
title: dir.title,
|
||||
type: dir.type,
|
||||
count: parseInt(dir.count) || 0,
|
||||
scannedAt: dir.scannedAt
|
||||
}));
|
||||
|
||||
// Get server name
|
||||
let serverName = 'Plex';
|
||||
let version = null;
|
||||
try {
|
||||
const identityRes = await ctx.fetchT(`${plexUrl}/identity`, {
|
||||
headers: { 'X-Plex-Token': plexToken, 'Accept': 'application/json' },
|
||||
signal: AbortSignal.timeout(5000)
|
||||
});
|
||||
if (identityRes.ok) {
|
||||
const identity = await identityRes.json();
|
||||
serverName = identity.MediaContainer?.friendlyName || 'Plex';
|
||||
version = identity.MediaContainer?.version;
|
||||
}
|
||||
} catch (e) { /* use default */ }
|
||||
|
||||
// Store token for future use
|
||||
await ctx.credentialManager.store('arr.plex.token', plexToken, {
|
||||
service: 'plex', source: 'local', url: plexUrl,
|
||||
lastVerified: new Date().toISOString()
|
||||
});
|
||||
|
||||
res.json({ success: true, serverName, version, libraries });
|
||||
}, 'plex-libraries'));
|
||||
|
||||
return router;
|
||||
};
|
||||
298
dashcaddy-api/routes/arr/smart-connect.js
Normal file
298
dashcaddy-api/routes/arr/smart-connect.js
Normal file
@@ -0,0 +1,298 @@
|
||||
const express = require('express');
|
||||
const { APP_PORTS } = require('../../constants');
|
||||
|
||||
module.exports = function(ctx, helpers) {
|
||||
const router = express.Router();
|
||||
|
||||
// Smart Connect: Unified orchestration endpoint
|
||||
router.post('/arr/smart-connect', ctx.asyncHandler(async (req, res) => {
|
||||
const { services: inputServices, configurePlex, configureProwlarr, configureSeerr, saveCredentials } = req.body;
|
||||
const steps = [];
|
||||
const connectedServices = {}; // { radarr: { url, apiKey }, sonarr: { url, apiKey }, ... }
|
||||
|
||||
// Phase 1: Test all provided services and resolve credentials
|
||||
const arrServices = ['radarr', 'sonarr', 'prowlarr'];
|
||||
for (const svc of arrServices) {
|
||||
const input = inputServices?.[svc];
|
||||
let apiKey = input?.apiKey;
|
||||
let url = input?.url;
|
||||
|
||||
// Fallback to stored credentials
|
||||
if (!apiKey) {
|
||||
const credKey = `arr.${svc}.apikey`;
|
||||
apiKey = await ctx.credentialManager.retrieve(credKey);
|
||||
if (!url) {
|
||||
const metadata = await ctx.credentialManager.getMetadata(credKey);
|
||||
url = metadata?.url;
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback URL from services.json
|
||||
if (!url && apiKey) {
|
||||
try {
|
||||
const data = await ctx.servicesStateManager.read();
|
||||
const svcList = Array.isArray(data) ? data : data.services || [];
|
||||
const found = svcList.find(s => s.id === svc && s.isExternal);
|
||||
if (found?.externalUrl) url = found.externalUrl;
|
||||
} catch (e) { /* ignore */ }
|
||||
}
|
||||
|
||||
if (!apiKey || !url) continue;
|
||||
|
||||
// Test connection
|
||||
const test = await helpers.testServiceConnection(svc, url, apiKey);
|
||||
steps.push({
|
||||
step: `Test ${svc.charAt(0).toUpperCase() + svc.slice(1)} connection`,
|
||||
status: test.success ? 'success' : 'failed',
|
||||
details: test.success ? `v${test.version}` : test.error
|
||||
});
|
||||
|
||||
if (test.success) {
|
||||
connectedServices[svc] = { url, apiKey };
|
||||
|
||||
// Save credentials
|
||||
if (saveCredentials) {
|
||||
const stored = await ctx.credentialManager.store(`arr.${svc}.apikey`, apiKey, {
|
||||
service: svc, source: 'external', url,
|
||||
lastVerified: new Date().toISOString(),
|
||||
version: test.version
|
||||
});
|
||||
steps.push({
|
||||
step: `Save ${svc} credentials`,
|
||||
status: stored ? 'success' : 'failed',
|
||||
details: stored ? 'Encrypted and saved' : 'Storage failed'
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 2: Handle Plex
|
||||
let plexToken = null;
|
||||
let plexUrl = null;
|
||||
if (configurePlex) {
|
||||
plexToken = await helpers.getPlexToken('plex');
|
||||
if (!plexToken) plexToken = await ctx.credentialManager.retrieve('arr.plex.token');
|
||||
|
||||
if (plexToken) {
|
||||
// Get Plex URL
|
||||
plexUrl = `http://host.docker.internal:${APP_PORTS.plex}`;
|
||||
try {
|
||||
const data = await ctx.servicesStateManager.read();
|
||||
const svcList = Array.isArray(data) ? data : data.services || [];
|
||||
const plexSvc = svcList.find(s => s.id === 'plex' || s.appTemplate === 'plex');
|
||||
if (plexSvc?.url) plexUrl = plexSvc.url;
|
||||
} catch (e) { /* use default */ }
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 3: Configure Overseerr (uses Plex-based session auth)
|
||||
if (configureSeerr && (connectedServices.radarr || connectedServices.sonarr || (configurePlex && plexToken))) {
|
||||
const overseerrSession = await helpers.getOverseerrSession();
|
||||
const overseerrUrl = `http://host.docker.internal:${APP_PORTS.seerr}`;
|
||||
|
||||
if (!overseerrSession) {
|
||||
steps.push({
|
||||
step: 'Get Overseerr API key',
|
||||
status: 'failed',
|
||||
details: 'Could not authenticate with Overseerr (Plex not running or not linked)'
|
||||
});
|
||||
} else {
|
||||
steps.push({ step: 'Get Overseerr API key', status: 'success', details: 'Extracted from container' });
|
||||
const overseerrCookie = overseerrSession.cookie;
|
||||
|
||||
// Configure Radarr in Overseerr
|
||||
if (connectedServices.radarr) {
|
||||
try {
|
||||
const radarrUrl = connectedServices.radarr.url.replace(/\/+$/, '');
|
||||
const radarrUrlObj = new URL(radarrUrl);
|
||||
const radarrBasePath = radarrUrlObj.pathname.replace(/\/+$/, '');
|
||||
|
||||
// Fetch quality profiles
|
||||
const profilesRes = await ctx.fetchT(`${radarrUrl}/api/v3/qualityprofile`, {
|
||||
headers: { 'X-Api-Key': connectedServices.radarr.apiKey },
|
||||
signal: AbortSignal.timeout(10000)
|
||||
});
|
||||
const profiles = profilesRes.ok ? await profilesRes.json() : [];
|
||||
const defaultProfile = profiles[0] || { id: 1, name: 'Any' };
|
||||
|
||||
// Fetch root folders
|
||||
const rootFoldersRes = await ctx.fetchT(`${radarrUrl}/api/v3/rootfolder`, {
|
||||
headers: { 'X-Api-Key': connectedServices.radarr.apiKey },
|
||||
signal: AbortSignal.timeout(10000)
|
||||
});
|
||||
const rootFolders = rootFoldersRes.ok ? await rootFoldersRes.json() : [];
|
||||
const defaultRootFolder = rootFolders[0]?.path || '/movies';
|
||||
|
||||
// Seerr runs in Docker — localhost/127.0.0.1 won't reach sibling containers
|
||||
const radarrHost = ['localhost', '127.0.0.1'].includes(radarrUrlObj.hostname)
|
||||
? 'host.docker.internal' : radarrUrlObj.hostname;
|
||||
|
||||
const radarrConfig = {
|
||||
name: 'Radarr',
|
||||
hostname: radarrHost,
|
||||
port: parseInt(radarrUrlObj.port) || (radarrUrlObj.protocol === 'https:' ? 443 : APP_PORTS.radarr),
|
||||
apiKey: connectedServices.radarr.apiKey,
|
||||
useSsl: radarrUrlObj.protocol === 'https:',
|
||||
baseUrl: radarrBasePath || '',
|
||||
activeProfileId: defaultProfile.id,
|
||||
activeProfileName: defaultProfile.name,
|
||||
activeDirectory: defaultRootFolder,
|
||||
is4k: false,
|
||||
minimumAvailability: 'released',
|
||||
isDefault: true,
|
||||
externalUrl: connectedServices.radarr.url,
|
||||
tags: []
|
||||
};
|
||||
|
||||
const radarrRes = await ctx.fetchT(`${overseerrUrl}/api/v1/settings/radarr`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', 'Cookie': overseerrCookie },
|
||||
body: JSON.stringify(radarrConfig),
|
||||
signal: AbortSignal.timeout(10000)
|
||||
});
|
||||
|
||||
steps.push({
|
||||
step: 'Configure Radarr in Overseerr',
|
||||
status: radarrRes.ok ? 'success' : 'failed',
|
||||
details: radarrRes.ok ? `Profile: ${defaultProfile.name}, Root: ${defaultRootFolder}` : await radarrRes.text()
|
||||
});
|
||||
} catch (e) {
|
||||
steps.push({ step: 'Configure Radarr in Overseerr', status: 'failed', details: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
// Configure Sonarr in Overseerr
|
||||
if (connectedServices.sonarr) {
|
||||
try {
|
||||
const sonarrUrl = connectedServices.sonarr.url.replace(/\/+$/, '');
|
||||
const sonarrUrlObj = new URL(sonarrUrl);
|
||||
const sonarrBasePath = sonarrUrlObj.pathname.replace(/\/+$/, '');
|
||||
|
||||
const profilesRes = await ctx.fetchT(`${sonarrUrl}/api/v3/qualityprofile`, {
|
||||
headers: { 'X-Api-Key': connectedServices.sonarr.apiKey },
|
||||
signal: AbortSignal.timeout(10000)
|
||||
});
|
||||
const profiles = profilesRes.ok ? await profilesRes.json() : [];
|
||||
const defaultProfile = profiles[0] || { id: 1, name: 'Any' };
|
||||
|
||||
const rootFoldersRes = await ctx.fetchT(`${sonarrUrl}/api/v3/rootfolder`, {
|
||||
headers: { 'X-Api-Key': connectedServices.sonarr.apiKey },
|
||||
signal: AbortSignal.timeout(10000)
|
||||
});
|
||||
const rootFolders = rootFoldersRes.ok ? await rootFoldersRes.json() : [];
|
||||
const defaultRootFolder = rootFolders[0]?.path || '/tv';
|
||||
|
||||
let languageProfileId = 1;
|
||||
try {
|
||||
const langRes = await ctx.fetchT(`${sonarrUrl}/api/v3/languageprofile`, {
|
||||
headers: { 'X-Api-Key': connectedServices.sonarr.apiKey },
|
||||
signal: AbortSignal.timeout(5000)
|
||||
});
|
||||
if (langRes.ok) {
|
||||
const langProfiles = await langRes.json();
|
||||
languageProfileId = langProfiles[0]?.id || 1;
|
||||
}
|
||||
} catch (e) { /* Sonarr v4 doesn't need this */ }
|
||||
|
||||
const sonarrHost = ['localhost', '127.0.0.1'].includes(sonarrUrlObj.hostname)
|
||||
? 'host.docker.internal' : sonarrUrlObj.hostname;
|
||||
|
||||
const sonarrConfig = {
|
||||
name: 'Sonarr',
|
||||
hostname: sonarrHost,
|
||||
port: parseInt(sonarrUrlObj.port) || (sonarrUrlObj.protocol === 'https:' ? 443 : APP_PORTS.sonarr),
|
||||
apiKey: connectedServices.sonarr.apiKey,
|
||||
useSsl: sonarrUrlObj.protocol === 'https:',
|
||||
baseUrl: sonarrBasePath || '',
|
||||
activeProfileId: defaultProfile.id,
|
||||
activeProfileName: defaultProfile.name,
|
||||
activeDirectory: defaultRootFolder,
|
||||
activeLanguageProfileId: languageProfileId,
|
||||
is4k: false,
|
||||
isDefault: true,
|
||||
enableSeasonFolders: true,
|
||||
externalUrl: connectedServices.sonarr.url,
|
||||
tags: []
|
||||
};
|
||||
|
||||
const sonarrRes = await ctx.fetchT(`${overseerrUrl}/api/v1/settings/sonarr`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', 'Cookie': overseerrCookie },
|
||||
body: JSON.stringify(sonarrConfig),
|
||||
signal: AbortSignal.timeout(10000)
|
||||
});
|
||||
|
||||
steps.push({
|
||||
step: 'Configure Sonarr in Overseerr',
|
||||
status: sonarrRes.ok ? 'success' : 'failed',
|
||||
details: sonarrRes.ok ? `Profile: ${defaultProfile.name}, Root: ${defaultRootFolder}` : await sonarrRes.text()
|
||||
});
|
||||
} catch (e) {
|
||||
steps.push({ step: 'Configure Sonarr in Overseerr', status: 'failed', details: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
// Connect Plex to Overseerr
|
||||
if (configurePlex && plexToken) {
|
||||
try {
|
||||
const plexResult = await helpers.connectPlexToOverseerr(plexUrl, plexToken, overseerrUrl, overseerrCookie);
|
||||
steps.push({
|
||||
step: 'Connect Plex to Overseerr',
|
||||
status: 'success',
|
||||
details: `${plexResult.serverName} - ${plexResult.libraries.length} libraries synced`
|
||||
});
|
||||
} catch (e) {
|
||||
steps.push({ step: 'Connect Plex to Overseerr', status: 'failed', details: e.message });
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 4: Configure Prowlarr
|
||||
if (configureProwlarr && connectedServices.prowlarr) {
|
||||
const appsToConnect = {};
|
||||
if (connectedServices.radarr) appsToConnect.radarr = connectedServices.radarr;
|
||||
if (connectedServices.sonarr) appsToConnect.sonarr = connectedServices.sonarr;
|
||||
|
||||
if (Object.keys(appsToConnect).length > 0) {
|
||||
try {
|
||||
const prowlarrResults = await helpers.configureProwlarrApps(
|
||||
connectedServices.prowlarr.url.replace(/\/+$/, ''),
|
||||
connectedServices.prowlarr.apiKey,
|
||||
appsToConnect
|
||||
);
|
||||
for (const [app, status] of Object.entries(prowlarrResults)) {
|
||||
steps.push({
|
||||
step: `Add ${app.charAt(0).toUpperCase() + app.slice(1)} to Prowlarr`,
|
||||
status: status === 'configured' || status === 'already_configured' ? 'success' : 'failed',
|
||||
details: status
|
||||
});
|
||||
}
|
||||
} catch (e) {
|
||||
steps.push({ step: 'Configure Prowlarr apps', status: 'failed', details: e.message });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Summary
|
||||
const succeeded = steps.filter(s => s.status === 'success').length;
|
||||
const failed = steps.filter(s => s.status === 'failed').length;
|
||||
|
||||
if (succeeded > 0) {
|
||||
ctx.notification.send(
|
||||
'deploymentSuccess',
|
||||
'Smart Arr Connect Complete',
|
||||
`${succeeded}/${steps.length} steps completed successfully`,
|
||||
'success'
|
||||
);
|
||||
}
|
||||
|
||||
res.json({
|
||||
success: succeeded > 0,
|
||||
steps,
|
||||
summary: { totalSteps: steps.length, succeeded, failed }
|
||||
});
|
||||
}, 'smart-connect'));
|
||||
|
||||
return router;
|
||||
};
|
||||
17
dashcaddy-api/routes/auth/index.js
Normal file
17
dashcaddy-api/routes/auth/index.js
Normal file
@@ -0,0 +1,17 @@
|
||||
const express = require('express');
|
||||
const initTotp = require('./totp');
|
||||
const initKeys = require('./keys');
|
||||
const initSessionHandlers = require('./session-handlers');
|
||||
const initSsoGate = require('./sso-gate');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
const { getAppSession, appSessionCache } = initSessionHandlers(ctx);
|
||||
|
||||
router.use(initTotp(ctx));
|
||||
router.use(initKeys(ctx));
|
||||
router.use(initSsoGate(ctx, getAppSession, appSessionCache));
|
||||
|
||||
return router;
|
||||
};
|
||||
130
dashcaddy-api/routes/auth/keys.js
Normal file
130
dashcaddy-api/routes/auth/keys.js
Normal file
@@ -0,0 +1,130 @@
|
||||
const express = require('express');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
// Helper function to parse expiration strings to milliseconds
|
||||
function parseExpiration(expStr) {
|
||||
const match = expStr.match(/^(\d+)([smhdy])$/);
|
||||
if (!match) return 24 * 60 * 60 * 1000; // default 24h
|
||||
|
||||
const value = parseInt(match[1], 10);
|
||||
const unit = match[2];
|
||||
|
||||
const multipliers = {
|
||||
s: 1000,
|
||||
m: 60 * 1000,
|
||||
h: 60 * 60 * 1000,
|
||||
d: 24 * 60 * 60 * 1000,
|
||||
y: 365 * 24 * 60 * 60 * 1000
|
||||
};
|
||||
|
||||
return value * (multipliers[unit] || multipliers.h);
|
||||
}
|
||||
|
||||
// List all API keys
|
||||
router.get('/auth/keys', ctx.asyncHandler(async (req, res) => {
|
||||
// Require session authentication (not API key - can't manage keys with key itself)
|
||||
if (!req.auth || req.auth.type !== 'session') {
|
||||
return ctx.errorResponse(res, 403, 'API key management requires TOTP session authentication');
|
||||
}
|
||||
|
||||
const keys = await ctx.authManager.listAPIKeys();
|
||||
res.json({ success: true, keys });
|
||||
}, 'auth-keys-list'));
|
||||
|
||||
// Generate new API key
|
||||
router.post('/auth/keys', ctx.asyncHandler(async (req, res) => {
|
||||
// Require session authentication
|
||||
if (!req.auth || req.auth.type !== 'session') {
|
||||
return ctx.errorResponse(res, 403, 'API key generation requires TOTP session authentication');
|
||||
}
|
||||
|
||||
const { name, scopes } = req.body;
|
||||
|
||||
if (!name || typeof name !== 'string' || name.trim().length === 0) {
|
||||
return ctx.errorResponse(res, 400, 'API key name is required');
|
||||
}
|
||||
|
||||
// Validate scopes if provided
|
||||
const validScopes = ['read', 'write', 'admin'];
|
||||
if (scopes && (!Array.isArray(scopes) || !scopes.every(s => validScopes.includes(s)))) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid scopes', { validScopes });
|
||||
}
|
||||
|
||||
const keyData = await ctx.authManager.generateAPIKey(
|
||||
name.trim(),
|
||||
scopes || ['read', 'write']
|
||||
);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
key: keyData.key,
|
||||
id: keyData.id,
|
||||
name: keyData.name,
|
||||
scopes: keyData.scopes,
|
||||
createdAt: keyData.createdAt,
|
||||
warning: 'Save this key securely - it will not be shown again'
|
||||
});
|
||||
}, 'auth-keys-generate'));
|
||||
|
||||
// Revoke API key
|
||||
router.delete('/auth/keys/:keyId', ctx.asyncHandler(async (req, res) => {
|
||||
// Require session authentication
|
||||
if (!req.auth || req.auth.type !== 'session') {
|
||||
return ctx.errorResponse(res, 403, 'API key revocation requires TOTP session authentication');
|
||||
}
|
||||
|
||||
const { keyId } = req.params;
|
||||
|
||||
if (!keyId || typeof keyId !== 'string') {
|
||||
return ctx.errorResponse(res, 400, 'Key ID is required');
|
||||
}
|
||||
|
||||
const success = await ctx.authManager.revokeAPIKey(keyId);
|
||||
|
||||
if (success) {
|
||||
res.json({ success: true, message: 'API key revoked successfully' });
|
||||
} else {
|
||||
const { NotFoundError } = require('../../errors');
|
||||
throw new NotFoundError('API key');
|
||||
}
|
||||
}, 'auth-keys-revoke'));
|
||||
|
||||
// Generate JWT from TOTP session
|
||||
router.post('/auth/jwt', ctx.asyncHandler(async (req, res) => {
|
||||
// Require session authentication
|
||||
if (!req.auth || req.auth.type !== 'session') {
|
||||
return ctx.errorResponse(res, 403, 'JWT generation requires TOTP session authentication');
|
||||
}
|
||||
|
||||
const { expiresIn, userId } = req.body;
|
||||
|
||||
// Validate expiresIn format if provided (e.g., '24h', '7d', '1y')
|
||||
const validExpiresIn = /^(\d+[smhdy])$/.test(expiresIn || '24h');
|
||||
if (expiresIn && !validExpiresIn) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid expiresIn format. Use: 60s, 15m, 24h, 7d, 1y');
|
||||
}
|
||||
|
||||
const token = await ctx.authManager.generateJWT(
|
||||
{
|
||||
sub: userId || 'dashcaddy-admin',
|
||||
scope: ['admin'] // Session-generated JWTs have admin scope
|
||||
},
|
||||
expiresIn || '24h'
|
||||
);
|
||||
|
||||
// Calculate expiration timestamp
|
||||
const expiresInMs = parseExpiration(expiresIn || '24h');
|
||||
const expiresAt = new Date(Date.now() + expiresInMs).toISOString();
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
token,
|
||||
expiresAt,
|
||||
usage: 'Include in Authorization header as: Bearer <token>'
|
||||
});
|
||||
}, 'auth-jwt-generate'));
|
||||
|
||||
return router;
|
||||
};
|
||||
177
dashcaddy-api/routes/auth/session-handlers.js
Normal file
177
dashcaddy-api/routes/auth/session-handlers.js
Normal file
@@ -0,0 +1,177 @@
|
||||
const { SESSION_TTL, APP, PLEX, TIMEOUTS, buildMediaAuth } = require('../../constants');
|
||||
const { createCache, CACHE_CONFIGS } = require('../../cache-config');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
// App session cache for auto-login
|
||||
const appSessionCache = createCache(CACHE_CONFIGS.appSessions);
|
||||
|
||||
async function getAppSession(serviceId, baseUrl, username, password) {
|
||||
const cached = appSessionCache.get(serviceId);
|
||||
if (cached && cached.exp > Date.now()) {
|
||||
if (cached.failed) return null;
|
||||
return cached.cookies;
|
||||
}
|
||||
|
||||
let loginUrl, loginBody, contentType = 'application/x-www-form-urlencoded';
|
||||
const extraHeaders = {};
|
||||
let expectJsonToken = false;
|
||||
const formEncode = (s) => encodeURIComponent(s).replace(/\*/g, '%2A');
|
||||
|
||||
switch (serviceId) {
|
||||
case 'torrent':
|
||||
loginUrl = `${baseUrl}api/v2/auth/login`;
|
||||
loginBody = `username=${formEncode(username)}&password=${formEncode(password)}`;
|
||||
extraHeaders['Authorization'] = `Basic ${Buffer.from(`${username}:${password}`).toString('base64')}`;
|
||||
break;
|
||||
case 'router': {
|
||||
const routerBody = `username=${formEncode(username)}&password=${formEncode(password)}&Continue=Continue`;
|
||||
try {
|
||||
const { spawnSync } = require('child_process');
|
||||
const proc = spawnSync('wget', [
|
||||
'-q', '-S', `--post-data=${routerBody}`, '-O', '/dev/null',
|
||||
`${baseUrl}/cgi-bin/login.ha`
|
||||
], { timeout: 5000, encoding: 'utf8' });
|
||||
const result = (proc.stderr || '').split('\n').slice(0, 2).join('\n');
|
||||
const locationMatch = result.match(/Location:\s*(.+)/);
|
||||
const location = locationMatch ? locationMatch[1].trim() : '';
|
||||
if (location && !location.includes('login')) {
|
||||
appSessionCache.set(serviceId, { cookies: '__ip_session=1', exp: Date.now() + SESSION_TTL.IP_SESSION });
|
||||
ctx.log.info('auth', 'Router auto-login successful (IP-based session)', { serviceId });
|
||||
return '__ip_session=1';
|
||||
}
|
||||
ctx.log.warn('auth', 'Router auto-login failed', { serviceId });
|
||||
} catch (e) {
|
||||
ctx.log.warn('auth', 'Router auto-login error', { serviceId, error: e.message?.substring(0, 100) });
|
||||
}
|
||||
appSessionCache.set(serviceId, { failed: true, exp: Date.now() + SESSION_TTL.FAILED_LOGIN });
|
||||
return null;
|
||||
}
|
||||
case 'sync':
|
||||
loginUrl = `${baseUrl}/rest/noauth/auth/password`;
|
||||
contentType = 'application/json';
|
||||
loginBody = JSON.stringify({ username, password });
|
||||
break;
|
||||
case 'chat':
|
||||
loginUrl = `${baseUrl}/api/v1/auths/signin`;
|
||||
contentType = 'application/json';
|
||||
loginBody = JSON.stringify({ email: username, password });
|
||||
expectJsonToken = true;
|
||||
break;
|
||||
case 'jellyfin':
|
||||
case 'emby': {
|
||||
const mediaAuth = buildMediaAuth(APP.DEVICE_IDS.SSO);
|
||||
try {
|
||||
const authResp = await ctx.fetchT(`${baseUrl}/Users/AuthenticateByName`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', 'X-Emby-Authorization': mediaAuth },
|
||||
body: JSON.stringify({ Username: username, Pw: password }),
|
||||
}, TIMEOUTS.HTTP_LONG);
|
||||
const authData = await authResp.json();
|
||||
if (authData.AccessToken) {
|
||||
const tokenData = {
|
||||
token: authData.AccessToken, userId: authData.User?.Id,
|
||||
serverId: authData.ServerId, serverName: authData.User?.ServerName || serviceId,
|
||||
};
|
||||
appSessionCache.set(serviceId, { cookies: `token=${authData.AccessToken}`, token: authData.AccessToken, tokenData, exp: Date.now() + SESSION_TTL.TOKEN_SESSION });
|
||||
ctx.log.info('auth', 'Auto-login successful (token + userId obtained)', { serviceId });
|
||||
return `token=${authData.AccessToken}`;
|
||||
}
|
||||
ctx.log.warn('auth', 'Auto-login failed', { serviceId, status: authResp.status });
|
||||
} catch (e) {
|
||||
ctx.log.warn('auth', 'Auto-login error', { serviceId, error: e.message });
|
||||
}
|
||||
appSessionCache.set(serviceId, { failed: true, exp: Date.now() + SESSION_TTL.FAILED_LOGIN });
|
||||
return null;
|
||||
}
|
||||
case 'plex': {
|
||||
try {
|
||||
const plexResp = await ctx.fetchT(PLEX.AUTH_URL, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Accept': 'application/json', 'Content-Type': 'application/json',
|
||||
'Authorization': `Basic ${Buffer.from(`${username}:${password}`).toString('base64')}`,
|
||||
'X-Plex-Client-Identifier': APP.DEVICE_IDS.SSO,
|
||||
'X-Plex-Product': APP.NAME, 'X-Plex-Version': APP.VERSION,
|
||||
},
|
||||
body: JSON.stringify({}),
|
||||
}, TIMEOUTS.HTTP_LONG);
|
||||
const plexData = await plexResp.json();
|
||||
const token = plexData?.user?.authToken;
|
||||
if (token) {
|
||||
appSessionCache.set(serviceId, { cookies: `plexToken=${token}`, token, exp: Date.now() + SESSION_TTL.TOKEN_SESSION });
|
||||
ctx.log.info('auth', 'Plex auto-login successful via plex.tv', { serviceId });
|
||||
return `plexToken=${token}`;
|
||||
}
|
||||
ctx.log.warn('auth', 'Plex auto-login failed: no token in response', { serviceId, status: plexResp.status });
|
||||
} catch (e) {
|
||||
ctx.log.warn('auth', 'Plex auto-login error', { serviceId, error: e.message });
|
||||
}
|
||||
appSessionCache.set(serviceId, { failed: true, exp: Date.now() + SESSION_TTL.FAILED_LOGIN });
|
||||
return null;
|
||||
}
|
||||
default:
|
||||
loginUrl = `${baseUrl}login`;
|
||||
loginBody = `username=${formEncode(username)}&password=${formEncode(password)}&rememberMe=on`;
|
||||
extraHeaders['Authorization'] = `Basic ${Buffer.from(`${username}:${password}`).toString('base64')}`;
|
||||
break;
|
||||
}
|
||||
|
||||
try {
|
||||
const resp = await ctx.fetchT(loginUrl, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': contentType, ...extraHeaders },
|
||||
body: loginBody, redirect: 'manual',
|
||||
}, TIMEOUTS.HTTP_LONG);
|
||||
|
||||
if (expectJsonToken) {
|
||||
try {
|
||||
const data = await resp.json();
|
||||
if (data.token) {
|
||||
const cookies = `token=${data.token}`;
|
||||
appSessionCache.set(serviceId, { cookies, exp: Date.now() + SESSION_TTL.COOKIE_SESSION });
|
||||
ctx.log.info('auth', 'Auto-login successful (JWT token cached)', { serviceId });
|
||||
return cookies;
|
||||
}
|
||||
} catch (e) { /* JSON parse failed */ }
|
||||
ctx.log.warn('auth', 'Auto-login: no token in response', { serviceId, status: resp.status });
|
||||
appSessionCache.set(serviceId, { failed: true, exp: Date.now() + SESSION_TTL.FAILED_LOGIN });
|
||||
return null;
|
||||
}
|
||||
|
||||
if (serviceId === 'torrent') {
|
||||
const text = await resp.text();
|
||||
if (text.trim() !== 'Ok.') {
|
||||
ctx.log.warn('auth', 'Auto-login failed', { serviceId, response: text.trim() });
|
||||
appSessionCache.set(serviceId, { failed: true, exp: Date.now() + SESSION_TTL.FAILED_LOGIN });
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
const setCookies = resp.headers.getSetCookie?.() || [];
|
||||
if (setCookies.length > 0) {
|
||||
const cookies = setCookies.map(c => c.split(';')[0]).join('; ');
|
||||
appSessionCache.set(serviceId, { cookies, exp: Date.now() + SESSION_TTL.COOKIE_SESSION });
|
||||
ctx.log.info('auth', 'Auto-login successful, session cached', { serviceId, cookieCount: setCookies.length });
|
||||
return cookies;
|
||||
}
|
||||
|
||||
const rawCookie = resp.headers.get('set-cookie');
|
||||
if (rawCookie) {
|
||||
const cookies = rawCookie.split(/,(?=[^ ])/).map(c => c.split(';')[0].trim()).join('; ');
|
||||
appSessionCache.set(serviceId, { cookies, exp: Date.now() + SESSION_TTL.COOKIE_SESSION });
|
||||
ctx.log.info('auth', 'Auto-login successful (fallback), session cached', { serviceId });
|
||||
return cookies;
|
||||
}
|
||||
|
||||
ctx.log.warn('auth', 'Auto-login: no cookies in response', { serviceId, status: resp.status });
|
||||
appSessionCache.set(serviceId, { failed: true, exp: Date.now() + SESSION_TTL.FAILED_LOGIN });
|
||||
} catch (e) {
|
||||
ctx.log.warn('auth', 'Auto-login error', { serviceId, error: e.message });
|
||||
appSessionCache.set(serviceId, { failed: true, exp: Date.now() + SESSION_TTL.FAILED_LOGIN });
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
// Expose both the function and the cache so sso-gate can use them
|
||||
return { getAppSession, appSessionCache };
|
||||
};
|
||||
182
dashcaddy-api/routes/auth/sso-gate.js
Normal file
182
dashcaddy-api/routes/auth/sso-gate.js
Normal file
@@ -0,0 +1,182 @@
|
||||
const express = require('express');
|
||||
const { SESSION_TTL, APP, PLEX, TIMEOUTS, buildMediaAuth } = require('../../constants');
|
||||
|
||||
module.exports = function(ctx, getAppSession, appSessionCache) {
|
||||
const router = express.Router();
|
||||
|
||||
// Caddy forward_auth gate: checks TOTP session + injects service credentials
|
||||
router.get('/auth/gate/:serviceId', ctx.asyncHandler(async (req, res) => {
|
||||
res.setHeader('Cache-Control', 'no-store, no-cache, must-revalidate');
|
||||
const serviceId = req.params.serviceId;
|
||||
|
||||
// Check TOTP session first
|
||||
if (ctx.totpConfig.enabled && ctx.totpConfig.sessionDuration !== 'never') {
|
||||
const valid = ctx.session.isValid(req);
|
||||
if (!valid) return ctx.errorResponse(res, 401, 'Session expired or invalid', { authenticated: false });
|
||||
}
|
||||
|
||||
// Session valid (or TOTP disabled) - inject credentials if premium SSO is active
|
||||
let injected = false;
|
||||
const ssoEnabled = ctx.licenseManager.hasFeature('sso');
|
||||
if (!ssoEnabled) {
|
||||
// Free tier: TOTP gate passes but no credential injection
|
||||
return res.status(200).json({ authenticated: true, credentialsInjected: false, premiumRequired: true });
|
||||
}
|
||||
try {
|
||||
const services = await ctx.servicesStateManager.read();
|
||||
const service = services.find(s => s.id === serviceId);
|
||||
|
||||
// External services: inject seedhost Basic Auth
|
||||
if (service && service.isExternal) {
|
||||
const sharedUser = await ctx.credentialManager.retrieve('seedhost.username').catch(() => null);
|
||||
const svcPass = await ctx.credentialManager.retrieve(`seedhost.password.${serviceId}`).catch(() => null);
|
||||
const sharedPass = await ctx.credentialManager.retrieve('seedhost.password').catch(() => null);
|
||||
const password = svcPass || sharedPass;
|
||||
if (sharedUser && password) {
|
||||
const basicAuth = Buffer.from(`${sharedUser}:${password}`).toString('base64');
|
||||
res.setHeader('Authorization', `Basic ${basicAuth}`);
|
||||
injected = true;
|
||||
if (service.externalUrl) {
|
||||
const appCookies = await getAppSession(serviceId, service.externalUrl, sharedUser, password);
|
||||
if (appCookies) res.setHeader('X-App-Cookie', appCookies);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Non-external services: check per-service Basic Auth
|
||||
if (!service || !service.isExternal) {
|
||||
const username = await ctx.credentialManager.retrieve(`service.${serviceId}.username`).catch(() => null);
|
||||
const password = await ctx.credentialManager.retrieve(`service.${serviceId}.password`).catch(() => null);
|
||||
if (username && password) {
|
||||
const basicAuth = Buffer.from(`${username}:${password}`).toString('base64');
|
||||
res.setHeader('Authorization', `Basic ${basicAuth}`);
|
||||
injected = true;
|
||||
if (service && service.url) {
|
||||
const appCookies = await getAppSession(serviceId, service.url, username, password);
|
||||
if (appCookies) res.setHeader('X-App-Cookie', appCookies);
|
||||
if (serviceId === 'plex') {
|
||||
const plexCached = appSessionCache.get('plex');
|
||||
if (plexCached && plexCached.token) res.setHeader('X-Plex-Token', plexCached.token);
|
||||
}
|
||||
if (serviceId === 'jellyfin' || serviceId === 'emby') {
|
||||
const mediaCached = appSessionCache.get(serviceId);
|
||||
if (mediaCached && mediaCached.token) res.setHeader('X-Emby-Token', mediaCached.token);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Inject API key
|
||||
const arrKey = await ctx.credentialManager.retrieve(`arr.${serviceId}.apikey`).catch(() => null);
|
||||
const svcKey = await ctx.credentialManager.retrieve(`service.${serviceId}.apikey`).catch(() => null);
|
||||
const apiKey = arrKey || svcKey;
|
||||
if (apiKey) { res.setHeader('X-Api-Key', apiKey); injected = true; }
|
||||
} catch (e) {
|
||||
ctx.log.warn('auth', 'Credential error', { serviceId, error: e.message });
|
||||
}
|
||||
|
||||
res.status(200).json({ authenticated: true, credentialsInjected: injected });
|
||||
}, 'auth-gate'));
|
||||
|
||||
// Return cached app session token for client-side auth (Premium SSO feature)
|
||||
router.get('/auth/app-token/:serviceId', ctx.licenseManager.requirePremium('sso'), ctx.asyncHandler(async (req, res) => {
|
||||
const { serviceId } = req.params;
|
||||
|
||||
if (ctx.totpConfig.enabled && ctx.totpConfig.sessionDuration !== 'never') {
|
||||
if (!ctx.session.isValid(req)) return ctx.errorResponse(res, 401, 'Not authenticated');
|
||||
}
|
||||
|
||||
// Jellyfin/Emby: separate browser-specific token
|
||||
if (serviceId === 'jellyfin' || serviceId === 'emby') {
|
||||
const browserCacheKey = `${serviceId}_browser`;
|
||||
const browserCached = appSessionCache.get(browserCacheKey);
|
||||
if (browserCached && browserCached.exp > Date.now()) {
|
||||
if (browserCached.failed) return ctx.errorResponse(res, 500, 'Login recently failed');
|
||||
if (browserCached.token) {
|
||||
const resp = { token: browserCached.token };
|
||||
if (browserCached.tokenData) Object.assign(resp, browserCached.tokenData);
|
||||
return res.json(resp);
|
||||
}
|
||||
}
|
||||
try {
|
||||
const username = await ctx.credentialManager.retrieve(`service.${serviceId}.username`).catch(() => null);
|
||||
const password = await ctx.credentialManager.retrieve(`service.${serviceId}.password`).catch(() => null);
|
||||
if (!username || !password) return ctx.errorResponse(res, 404, '[DC-500] No credentials stored');
|
||||
const service = await ctx.getServiceById(serviceId);
|
||||
const baseUrl = service?.url;
|
||||
if (!baseUrl) return ctx.errorResponse(res, 404, 'No service URL');
|
||||
const mediaAuth = buildMediaAuth(APP.DEVICE_IDS.BROWSER);
|
||||
const authResp = await ctx.fetchT(`${baseUrl}/Users/AuthenticateByName`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', 'X-Emby-Authorization': mediaAuth },
|
||||
body: JSON.stringify({ Username: username, Pw: password }),
|
||||
}, TIMEOUTS.HTTP_LONG);
|
||||
const authData = await authResp.json();
|
||||
if (authData.AccessToken) {
|
||||
const tokenData = { userId: authData.User?.Id, serverId: authData.ServerId, serverName: authData.User?.ServerName || serviceId };
|
||||
appSessionCache.set(browserCacheKey, { token: authData.AccessToken, tokenData, exp: Date.now() + SESSION_TTL.TOKEN_SESSION });
|
||||
return res.json({ token: authData.AccessToken, ...tokenData });
|
||||
}
|
||||
return ctx.errorResponse(res, 500, '[DC-501] Authentication failed');
|
||||
} catch (e) {
|
||||
ctx.log.warn('auth', 'Browser token error', { serviceId, error: e.message });
|
||||
return ctx.errorResponse(res, 500, e.message);
|
||||
}
|
||||
}
|
||||
|
||||
// Check cache first
|
||||
const cached = appSessionCache.get(serviceId);
|
||||
if (cached && cached.exp > Date.now()) {
|
||||
if (cached.failed) return ctx.errorResponse(res, 500, '[DC-501] Login recently failed, retrying in a few minutes');
|
||||
if (cached.token) {
|
||||
const resp = { token: cached.token };
|
||||
if (cached.tokenData) Object.assign(resp, cached.tokenData);
|
||||
return res.json(resp);
|
||||
}
|
||||
const m = cached.cookies.match(/^token=(.+)$/);
|
||||
if (m) return res.json({ token: m[1] });
|
||||
return res.json({ cookies: cached.cookies });
|
||||
}
|
||||
|
||||
// No cache — get fresh session
|
||||
try {
|
||||
const service = await ctx.getServiceById(serviceId);
|
||||
if (!service) return ctx.errorResponse(res, 404, 'Service not found');
|
||||
const baseUrl = service.externalUrl || service.url;
|
||||
if (!baseUrl) return ctx.errorResponse(res, 404, 'No service URL');
|
||||
|
||||
let username, password;
|
||||
if (service.isExternal) {
|
||||
username = await ctx.credentialManager.retrieve('seedhost.username').catch(() => null);
|
||||
const svcPass = await ctx.credentialManager.retrieve(`seedhost.password.${serviceId}`).catch(() => null);
|
||||
const sharedPass = await ctx.credentialManager.retrieve('seedhost.password').catch(() => null);
|
||||
password = svcPass || sharedPass;
|
||||
} else {
|
||||
username = await ctx.credentialManager.retrieve(`service.${serviceId}.username`).catch(() => null);
|
||||
password = await ctx.credentialManager.retrieve(`service.${serviceId}.password`).catch(() => null);
|
||||
}
|
||||
|
||||
if (!username || !password) return ctx.errorResponse(res, 404, '[DC-500] No credentials stored');
|
||||
|
||||
const appCookies = await getAppSession(serviceId, baseUrl, username, password);
|
||||
if (appCookies) {
|
||||
const freshCached = appSessionCache.get(serviceId);
|
||||
if (freshCached && freshCached.token) {
|
||||
const resp = { token: freshCached.token };
|
||||
if (freshCached.tokenData) Object.assign(resp, freshCached.tokenData);
|
||||
return res.json(resp);
|
||||
}
|
||||
const m = appCookies.match(/^token=(.+)$/);
|
||||
if (m) return res.json({ token: m[1] });
|
||||
return res.json({ cookies: appCookies });
|
||||
}
|
||||
|
||||
ctx.errorResponse(res, 500, '[DC-501] Login failed');
|
||||
} catch (e) {
|
||||
ctx.log.warn('auth', 'App-token error', { error: e.message });
|
||||
ctx.errorResponse(res, 500, e.message);
|
||||
}
|
||||
}, 'auth-app-token'));
|
||||
|
||||
return router;
|
||||
};
|
||||
185
dashcaddy-api/routes/auth/totp.js
Normal file
185
dashcaddy-api/routes/auth/totp.js
Normal file
@@ -0,0 +1,185 @@
|
||||
const express = require('express');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
// Get current TOTP config (public route)
|
||||
router.get('/totp/config', ctx.asyncHandler(async (req, res) => {
|
||||
res.json({
|
||||
success: true,
|
||||
config: {
|
||||
enabled: ctx.totpConfig.enabled,
|
||||
sessionDuration: ctx.totpConfig.sessionDuration,
|
||||
isSetUp: ctx.totpConfig.isSetUp
|
||||
}
|
||||
});
|
||||
}, 'totp-config-get'));
|
||||
|
||||
// Generate new TOTP secret + QR code
|
||||
router.post('/totp/setup', ctx.asyncHandler(async (req, res) => {
|
||||
const { authenticator } = require('otplib');
|
||||
const QRCode = require('qrcode');
|
||||
|
||||
// Accept user-provided secret or generate a new one
|
||||
let secret;
|
||||
if (req.body && req.body.secret) {
|
||||
secret = req.body.secret.replace(/\s/g, '').toUpperCase();
|
||||
if (!/^[A-Z2-7]{16,}$/.test(secret)) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid secret key format. Must be a Base32 string (letters A-Z and digits 2-7).');
|
||||
}
|
||||
} else {
|
||||
secret = authenticator.generateSecret();
|
||||
}
|
||||
await ctx.credentialManager.store('totp.pending_secret', secret);
|
||||
|
||||
const otpauth = authenticator.keyuri('user', 'DashCaddy', secret);
|
||||
const qrDataUrl = await QRCode.toDataURL(otpauth, {
|
||||
width: 256, margin: 2,
|
||||
color: { dark: '#ffffff', light: '#00000000' }
|
||||
});
|
||||
|
||||
res.json({ success: true, qrCode: qrDataUrl, manualKey: secret, issuer: 'DashCaddy', imported: !!req.body?.secret });
|
||||
}, 'totp-setup'));
|
||||
|
||||
// Verify first code to confirm setup, then activate TOTP
|
||||
router.post('/totp/verify-setup', ctx.asyncHandler(async (req, res) => {
|
||||
const { authenticator } = require('otplib');
|
||||
const { code } = req.body;
|
||||
|
||||
if (!code || !/^\d{6}$/.test(code)) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid code format');
|
||||
}
|
||||
|
||||
const pendingSecret = await ctx.credentialManager.retrieve('totp.pending_secret');
|
||||
if (!pendingSecret) {
|
||||
return ctx.errorResponse(res, 400, 'No pending TOTP setup. Call /api/totp/setup first.');
|
||||
}
|
||||
|
||||
authenticator.options = { window: 1 };
|
||||
if (!authenticator.verify({ token: code, secret: pendingSecret })) {
|
||||
return ctx.errorResponse(res, 401, '[DC-111] Invalid code. Please try again.');
|
||||
}
|
||||
|
||||
// Promote pending secret to active
|
||||
await ctx.credentialManager.store('totp.secret', pendingSecret);
|
||||
await ctx.credentialManager.delete('totp.pending_secret');
|
||||
|
||||
ctx.totpConfig.isSetUp = true;
|
||||
ctx.totpConfig.enabled = true;
|
||||
ctx.totpConfig.secret = pendingSecret; // Persist to file for auto-restore
|
||||
if (ctx.totpConfig.sessionDuration === 'never') {
|
||||
ctx.totpConfig.sessionDuration = '24h';
|
||||
}
|
||||
await ctx.saveTotpConfig();
|
||||
|
||||
// Set session so user doesn't get locked out immediately
|
||||
ctx.session.create(req, ctx.totpConfig.sessionDuration);
|
||||
ctx.session.setCookie(res, ctx.totpConfig.sessionDuration);
|
||||
|
||||
res.json({ success: true, message: 'TOTP enabled successfully', sessionDuration: ctx.totpConfig.sessionDuration });
|
||||
}, 'totp-verify-setup'));
|
||||
|
||||
// Login: verify TOTP code and set session cookie
|
||||
router.post('/totp/verify', ctx.asyncHandler(async (req, res) => {
|
||||
const { authenticator } = require('otplib');
|
||||
const { code } = req.body;
|
||||
|
||||
if (!code || !/^\d{6}$/.test(code)) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid code format');
|
||||
}
|
||||
|
||||
if (!ctx.totpConfig.enabled || !ctx.totpConfig.isSetUp) {
|
||||
return ctx.errorResponse(res, 400, 'TOTP is not enabled');
|
||||
}
|
||||
|
||||
const secret = await ctx.credentialManager.retrieve('totp.secret');
|
||||
if (!secret) {
|
||||
return ctx.errorResponse(res, 500, 'TOTP secret not found');
|
||||
}
|
||||
|
||||
authenticator.options = { window: 1 };
|
||||
if (!authenticator.verify({ token: code, secret })) {
|
||||
return ctx.errorResponse(res, 401, '[DC-111] Invalid code');
|
||||
}
|
||||
|
||||
ctx.log.info('auth', 'TOTP verified, creating session', { ip: ctx.session.getClientIP(req), duration: ctx.totpConfig.sessionDuration });
|
||||
ctx.session.create(req, ctx.totpConfig.sessionDuration);
|
||||
ctx.session.setCookie(res, ctx.totpConfig.sessionDuration);
|
||||
ctx.log.debug('auth', 'Session created', { sessions: ctx.session.ipSessions.size });
|
||||
res.json({ success: true, message: 'Authenticated successfully', sessionDuration: ctx.totpConfig.sessionDuration });
|
||||
}, 'totp-verify'));
|
||||
|
||||
// Check session validity (used by Caddy forward_auth)
|
||||
router.get('/totp/check-session', ctx.asyncHandler(async (req, res) => {
|
||||
// Never cache session checks — stale cached 200s cause auth loops
|
||||
res.setHeader('Cache-Control', 'no-store, no-cache, must-revalidate');
|
||||
res.setHeader('Pragma', 'no-cache');
|
||||
|
||||
if (!ctx.totpConfig.enabled || ctx.totpConfig.sessionDuration === 'never') {
|
||||
return res.status(200).json({ authenticated: true });
|
||||
}
|
||||
|
||||
const valid = ctx.session.isValid(req);
|
||||
ctx.log.debug('auth', 'Session check', { ip: ctx.session.getClientIP(req), valid, sessions: ctx.session.ipSessions.size });
|
||||
if (valid) {
|
||||
return res.status(200).json({ authenticated: true });
|
||||
}
|
||||
|
||||
return ctx.errorResponse(res, 401, 'Session expired or invalid', { authenticated: false });
|
||||
}, 'totp-check-session'));
|
||||
|
||||
// Disable TOTP
|
||||
router.post('/totp/disable', ctx.asyncHandler(async (req, res) => {
|
||||
const { code } = req.body;
|
||||
|
||||
if (ctx.totpConfig.enabled && ctx.totpConfig.isSetUp && code) {
|
||||
const { authenticator } = require('otplib');
|
||||
const secret = await ctx.credentialManager.retrieve('totp.secret');
|
||||
if (secret) {
|
||||
authenticator.options = { window: 1 };
|
||||
if (!authenticator.verify({ token: code, secret })) {
|
||||
return ctx.errorResponse(res, 401, '[DC-111] Invalid code');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
await ctx.credentialManager.delete('totp.secret');
|
||||
await ctx.credentialManager.delete('totp.pending_secret');
|
||||
|
||||
ctx.totpConfig.enabled = false;
|
||||
ctx.totpConfig.isSetUp = false;
|
||||
ctx.totpConfig.sessionDuration = 'never';
|
||||
delete ctx.totpConfig.secret; // Remove backup
|
||||
await ctx.saveTotpConfig();
|
||||
|
||||
ctx.session.clear(req);
|
||||
ctx.session.clearCookie(res);
|
||||
res.json({ success: true, message: 'TOTP disabled' });
|
||||
}, 'totp-disable'));
|
||||
|
||||
// Update TOTP settings (session duration)
|
||||
router.post('/totp/config', ctx.asyncHandler(async (req, res) => {
|
||||
const { sessionDuration } = req.body;
|
||||
|
||||
if (sessionDuration && !ctx.session.durations.hasOwnProperty(sessionDuration)) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid session duration', {
|
||||
validOptions: Object.keys(ctx.session.durations)
|
||||
});
|
||||
}
|
||||
|
||||
if (sessionDuration) {
|
||||
ctx.totpConfig.sessionDuration = sessionDuration;
|
||||
if (sessionDuration === 'never') {
|
||||
ctx.totpConfig.enabled = false;
|
||||
}
|
||||
}
|
||||
|
||||
await ctx.saveTotpConfig();
|
||||
res.json({
|
||||
success: true,
|
||||
config: { enabled: ctx.totpConfig.enabled, sessionDuration: ctx.totpConfig.sessionDuration, isSetUp: ctx.totpConfig.isSetUp }
|
||||
});
|
||||
}, 'totp-config'));
|
||||
|
||||
return router;
|
||||
};
|
||||
38
dashcaddy-api/routes/backups.js
Normal file
38
dashcaddy-api/routes/backups.js
Normal file
@@ -0,0 +1,38 @@
|
||||
const express = require('express');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
// Get backup configuration
|
||||
router.get('/backups/config', ctx.asyncHandler(async (req, res) => {
|
||||
const config = ctx.backupManager.getConfig();
|
||||
res.json({ success: true, config });
|
||||
}, 'backups-config-get'));
|
||||
|
||||
// Update backup configuration
|
||||
router.post('/backups/config', ctx.asyncHandler(async (req, res) => {
|
||||
ctx.backupManager.updateConfig(req.body);
|
||||
res.json({ success: true, message: 'Backup configuration updated' });
|
||||
}, 'backups-config-update'));
|
||||
|
||||
// Execute manual backup
|
||||
router.post('/backups/execute', ctx.asyncHandler(async (req, res) => {
|
||||
const backup = await ctx.backupManager.executeBackup('manual', req.body);
|
||||
res.json({ success: true, backup });
|
||||
}, 'backups-execute'));
|
||||
|
||||
// Get backup history
|
||||
router.get('/backups/history', ctx.asyncHandler(async (req, res) => {
|
||||
const limit = parseInt(req.query.limit) || 50;
|
||||
const history = ctx.backupManager.getHistory(limit);
|
||||
res.json({ success: true, history });
|
||||
}, 'backups-history'));
|
||||
|
||||
// Restore from backup
|
||||
router.post('/backups/restore/:backupId', ctx.asyncHandler(async (req, res) => {
|
||||
const result = await ctx.backupManager.restoreBackup(req.params.backupId, req.body);
|
||||
res.json({ success: true, result });
|
||||
}, 'backups-restore'));
|
||||
|
||||
return router;
|
||||
};
|
||||
193
dashcaddy-api/routes/browse.js
Normal file
193
dashcaddy-api/routes/browse.js
Normal file
@@ -0,0 +1,193 @@
|
||||
const express = require('express');
|
||||
const fs = require('fs');
|
||||
const fsp = require('fs').promises;
|
||||
const path = require('path');
|
||||
const { exists, isAccessible } = require('../fs-helpers');
|
||||
const { paginate, parsePaginationParams } = require('../pagination');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
// Parse browse roots from environment
|
||||
const BROWSE_ROOTS = (process.env.MEDIA_BROWSE_ROOTS || '')
|
||||
.split(',')
|
||||
.filter(r => r.includes('='))
|
||||
.map(r => {
|
||||
const eqIndex = r.indexOf('=');
|
||||
const containerPath = r.slice(0, eqIndex).trim();
|
||||
const hostPath = r.slice(eqIndex + 1).trim();
|
||||
return { containerPath, hostPath };
|
||||
});
|
||||
|
||||
// Get available browse roots
|
||||
router.get('/browse/roots', ctx.asyncHandler(async (req, res) => {
|
||||
const allRoots = BROWSE_ROOTS.map(r => ({
|
||||
name: r.hostPath,
|
||||
path: r.hostPath,
|
||||
containerPath: r.containerPath
|
||||
}));
|
||||
|
||||
const roots = [];
|
||||
for (const r of allRoots) {
|
||||
if (await isAccessible(r.containerPath, fs.constants.R_OK)) {
|
||||
roots.push(r);
|
||||
}
|
||||
}
|
||||
|
||||
res.json({ success: true, roots });
|
||||
}, 'browse-roots'));
|
||||
|
||||
// Browse directory contents
|
||||
router.get('/browse/directories', ctx.asyncHandler(async (req, res) => {
|
||||
const requestedPath = req.query.path || '';
|
||||
|
||||
if (!requestedPath) {
|
||||
const allRoots = BROWSE_ROOTS.map(r => ({
|
||||
name: r.hostPath,
|
||||
path: r.hostPath,
|
||||
type: 'drive'
|
||||
}));
|
||||
const roots = [];
|
||||
for (const r of allRoots) {
|
||||
const br = BROWSE_ROOTS.find(br => br.hostPath === r.path);
|
||||
if (await isAccessible(br.containerPath, fs.constants.R_OK)) {
|
||||
roots.push(r);
|
||||
}
|
||||
}
|
||||
return res.json({ success: true, path: '', items: roots });
|
||||
}
|
||||
|
||||
const matchingRoot = BROWSE_ROOTS.find(r =>
|
||||
requestedPath.startsWith(r.hostPath) || requestedPath === r.hostPath.replace(/\/$/, '')
|
||||
);
|
||||
|
||||
if (!matchingRoot) {
|
||||
return ctx.errorResponse(res, 400, 'Path not in browseable roots', {
|
||||
availableRoots: BROWSE_ROOTS.map(r => r.hostPath)
|
||||
});
|
||||
}
|
||||
|
||||
const relativePath = requestedPath.slice(matchingRoot.hostPath.length);
|
||||
const containerFullPath = path.join(matchingRoot.containerPath, relativePath);
|
||||
|
||||
const allowedRoots = BROWSE_ROOTS.map(r => r.containerPath);
|
||||
let resolvedPath;
|
||||
try {
|
||||
resolvedPath = await ctx.validateSecurePath(containerFullPath, allowedRoots, ctx.auditLogger);
|
||||
} catch (error) {
|
||||
if (error.constructor.name === 'ValidationError') {
|
||||
ctx.auditLogger.logSecurityEvent('path_traversal_attempt', {
|
||||
requestedPath, containerFullPath, allowedRoots,
|
||||
error: error.message,
|
||||
ip: req.ip,
|
||||
userAgent: req.get('user-agent')
|
||||
});
|
||||
return ctx.errorResponse(res, 403, 'Access denied - path traversal detected');
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
|
||||
if (!await exists(resolvedPath)) {
|
||||
const { NotFoundError } = require('../errors');
|
||||
throw new NotFoundError('Path');
|
||||
}
|
||||
|
||||
const stats = await fsp.stat(resolvedPath);
|
||||
if (!stats.isDirectory()) {
|
||||
return ctx.errorResponse(res, 400, 'Path is not a directory');
|
||||
}
|
||||
|
||||
const entries = await fsp.readdir(resolvedPath, { withFileTypes: true });
|
||||
const folders = entries
|
||||
.filter(entry => {
|
||||
if (!entry.isDirectory()) return false;
|
||||
if (entry.name.startsWith('.')) return false;
|
||||
if (entry.name === '$RECYCLE.BIN' || entry.name === 'System Volume Information') return false;
|
||||
return true;
|
||||
})
|
||||
.map(entry => ({
|
||||
name: entry.name,
|
||||
path: path.join(requestedPath, entry.name).replace(/\\/g, '/'),
|
||||
type: 'folder'
|
||||
}))
|
||||
.sort((a, b) => a.name.localeCompare(b.name));
|
||||
|
||||
const paginationParams = parsePaginationParams(req.query);
|
||||
const result = paginate(folders, paginationParams);
|
||||
res.json({
|
||||
success: true,
|
||||
path: requestedPath,
|
||||
parent: path.dirname(requestedPath).replace(/\\/g, '/') || null,
|
||||
items: result.data,
|
||||
...(result.pagination && { pagination: result.pagination })
|
||||
});
|
||||
}, 'browse-dir'));
|
||||
|
||||
// Detect media mounts from existing media server containers
|
||||
router.get('/media/detected-mounts', ctx.asyncHandler(async (req, res) => {
|
||||
const mediaServerPatterns = [
|
||||
'plex', 'jellyfin', 'emby', 'kodi', 'navidrome', 'airsonic',
|
||||
'subsonic', 'funkwhale', 'beets', 'lidarr', 'sonarr', 'radarr',
|
||||
'bazarr', 'readarr', 'prowlarr', 'overseerr', 'ombi', 'tautulli'
|
||||
];
|
||||
|
||||
const excludePatterns = [
|
||||
'/config', '/cache', '/transcode', '/data/config', '/app',
|
||||
'/tmp', '/var', '/etc', '/opt', '/root', '/home', '/.', '/caddyfile'
|
||||
];
|
||||
|
||||
const containers = await ctx.docker.client.listContainers({ all: false });
|
||||
const detectedMounts = [];
|
||||
const seenPaths = new Set();
|
||||
|
||||
for (const containerInfo of containers) {
|
||||
const imageName = containerInfo.Image.toLowerCase();
|
||||
const isMediaServer = mediaServerPatterns.some(p => imageName.includes(p));
|
||||
if (!isMediaServer) continue;
|
||||
|
||||
const container = ctx.docker.client.getContainer(containerInfo.Id);
|
||||
const details = await container.inspect();
|
||||
const binds = details.HostConfig?.Binds || [];
|
||||
|
||||
for (const bind of binds) {
|
||||
const parts = bind.split(':');
|
||||
if (parts.length < 2) continue;
|
||||
|
||||
let hostPath, containerPath;
|
||||
if (parts[0].length === 1 && /[A-Za-z]/.test(parts[0])) {
|
||||
hostPath = parts[0] + ':' + parts[1];
|
||||
containerPath = parts[2] || '';
|
||||
} else {
|
||||
hostPath = parts[0];
|
||||
containerPath = parts[1];
|
||||
}
|
||||
|
||||
const isExcluded = excludePatterns.some(p =>
|
||||
containerPath.toLowerCase().includes(p.toLowerCase()) ||
|
||||
hostPath.toLowerCase().includes(p.toLowerCase())
|
||||
);
|
||||
if (isExcluded) continue;
|
||||
if (seenPaths.has(hostPath)) continue;
|
||||
seenPaths.add(hostPath);
|
||||
|
||||
const folderName = hostPath.split(/[/\\]/).filter(p => p && p !== ':').pop() || hostPath;
|
||||
|
||||
detectedMounts.push({
|
||||
hostPath, containerPath, folderName,
|
||||
sourceContainer: containerInfo.Names[0]?.replace('/', '') || containerInfo.Id.slice(0, 12),
|
||||
sourceImage: containerInfo.Image.split('/').pop().split(':')[0]
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
mounts: detectedMounts,
|
||||
message: detectedMounts.length > 0
|
||||
? `Found ${detectedMounts.length} media mount(s) from existing containers`
|
||||
: 'No existing media mounts detected'
|
||||
});
|
||||
}, 'detect-media-mounts'));
|
||||
|
||||
return router;
|
||||
};
|
||||
288
dashcaddy-api/routes/ca.js
Normal file
288
dashcaddy-api/routes/ca.js
Normal file
@@ -0,0 +1,288 @@
|
||||
const express = require('express');
|
||||
const fs = require('fs');
|
||||
const fsp = require('fs').promises;
|
||||
const path = require('path');
|
||||
const { execSync } = require('child_process');
|
||||
const { exists } = require('../fs-helpers');
|
||||
const platformPaths = require('../platform-paths');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
// Get CA certificate information
|
||||
router.get('/info', ctx.asyncHandler(async (req, res) => {
|
||||
const certInfoPath = '/app/ca/cert-info.json';
|
||||
const fallbackCertInfoPath = path.join(platformPaths.caCertDir, 'cert-info.json');
|
||||
|
||||
let certInfoFile;
|
||||
if (await exists(certInfoPath)) {
|
||||
certInfoFile = certInfoPath;
|
||||
} else if (await exists(fallbackCertInfoPath)) {
|
||||
certInfoFile = fallbackCertInfoPath;
|
||||
} else {
|
||||
const { NotFoundError } = require('../errors');
|
||||
throw new NotFoundError('CA certificate information');
|
||||
}
|
||||
|
||||
const certInfo = JSON.parse(await fsp.readFile(certInfoFile, 'utf8'));
|
||||
const expirationDate = new Date(certInfo.validUntil);
|
||||
const daysUntilExpiration = Math.floor((expirationDate - new Date()) / (1000 * 60 * 60 * 24));
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
certificate: {
|
||||
name: certInfo.name,
|
||||
fingerprint: certInfo.fingerprint,
|
||||
validFrom: certInfo.validFrom,
|
||||
validUntil: certInfo.validUntil,
|
||||
daysUntilExpiration,
|
||||
algorithm: certInfo.algorithm || 'ECDSA P-256 with SHA-256',
|
||||
serialNumber: certInfo.serialNumber,
|
||||
downloadUrl: `https://ca${ctx.siteConfig.tld}/root.crt`
|
||||
}
|
||||
});
|
||||
}, 'ca-info'));
|
||||
|
||||
// Serve root CA certificate directly (works even without DashCA deployed)
|
||||
router.get('/root.crt', ctx.asyncHandler(async (req, res) => {
|
||||
const pkiCertPath = '/app/pki/root.crt';
|
||||
const hostCertPath = platformPaths.pkiRootCert;
|
||||
const dashcaCertPath = path.join(platformPaths.caCertDir, 'root.crt');
|
||||
|
||||
let certPath;
|
||||
if (await exists(pkiCertPath)) certPath = pkiCertPath;
|
||||
else if (await exists(dashcaCertPath)) certPath = dashcaCertPath;
|
||||
else if (await exists(hostCertPath)) certPath = hostCertPath;
|
||||
else {
|
||||
const { NotFoundError } = require('../errors');
|
||||
throw new NotFoundError('Root CA certificate');
|
||||
}
|
||||
|
||||
res.setHeader('Content-Type', 'application/x-x509-ca-cert');
|
||||
res.setHeader('Content-Disposition', 'attachment; filename="dashcaddy-root-ca.crt"');
|
||||
res.sendFile(path.resolve(certPath));
|
||||
}, 'ca-root-crt'));
|
||||
|
||||
// Generate a platform-specific install script with real cert info injected
|
||||
router.get('/install-script', ctx.asyncHandler(async (req, res) => {
|
||||
const platform = (req.query.platform || 'windows').toLowerCase();
|
||||
if (!['windows', 'linux', 'macos'].includes(platform)) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid platform. Use: windows, linux, or macos');
|
||||
}
|
||||
|
||||
// Load cert info to get the fingerprint
|
||||
const certInfoPath = '/app/ca/cert-info.json';
|
||||
const fallbackCertInfoPath2 = path.join(platformPaths.caCertDir, 'cert-info.json');
|
||||
|
||||
let certInfoFile;
|
||||
if (await exists(certInfoPath)) certInfoFile = certInfoPath;
|
||||
else if (await exists(fallbackCertInfoPath2)) certInfoFile = fallbackCertInfoPath2;
|
||||
else {
|
||||
const { NotFoundError } = require('../errors');
|
||||
throw new NotFoundError('CA certificate information. Deploy DashCA first or ensure cert-info.json exists.');
|
||||
}
|
||||
|
||||
const certInfo = JSON.parse(await fsp.readFile(certInfoFile, 'utf8'));
|
||||
const fingerprint = certInfo.fingerprint; // e.g. "08:98:A5:63:..."
|
||||
|
||||
// Build the cert download URL — use DashCA if available, fall back to API endpoint
|
||||
const tld = ctx.siteConfig.tld || '.home';
|
||||
const dashcaUrl = `https://ca${tld}/root.crt`;
|
||||
const apiUrl = `https://dashcaddy${tld}/api/ca/root.crt`;
|
||||
|
||||
// Prefer DashCA URL, but the script's TLS bypass means either will work
|
||||
const certUrl = dashcaUrl;
|
||||
|
||||
// Load and populate the template
|
||||
const templateName = platform === 'windows' ? 'install-ca.ps1.template' : 'install-ca.sh.template';
|
||||
|
||||
// Look for template in multiple locations (packaged app vs dev)
|
||||
const templatePaths = [
|
||||
path.join(__dirname, '..', 'scripts', templateName),
|
||||
path.join('/app', 'scripts', templateName)
|
||||
];
|
||||
|
||||
let templateContent;
|
||||
for (const tp of templatePaths) {
|
||||
if (await exists(tp)) {
|
||||
templateContent = await fsp.readFile(tp, 'utf8');
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!templateContent) {
|
||||
const { NotFoundError } = require('../errors');
|
||||
throw new NotFoundError(`Install script template (${templateName})`);
|
||||
}
|
||||
|
||||
// Inject real values
|
||||
const script = templateContent
|
||||
.replace('{{CERT_URL}}', certUrl)
|
||||
.replace('{{CERT_FINGERPRINT}}', fingerprint);
|
||||
|
||||
const filename = platform === 'windows' ? 'install-dashcaddy-ca.ps1' : 'install-dashcaddy-ca.sh';
|
||||
const contentType = platform === 'windows' ? 'text/plain; charset=utf-8' : 'text/x-shellscript; charset=utf-8';
|
||||
|
||||
res.setHeader('Content-Type', contentType);
|
||||
res.setHeader('Content-Disposition', `attachment; filename="${filename}"`);
|
||||
res.send(script);
|
||||
}, 'ca-install-script'));
|
||||
|
||||
// Generate and download SSL certificate for a service
|
||||
router.get('/cert/:domain', ctx.asyncHandler(async (req, res) => {
|
||||
const { domain } = req.params;
|
||||
const { password = 'dashcaddy', format = 'pfx' } = req.query;
|
||||
|
||||
if (!/^[a-zA-Z0-9!@#%^_+=,.:-]{1,64}$/.test(password)) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid password. Use only letters, numbers, and basic symbols (max 64 chars).');
|
||||
}
|
||||
|
||||
if (!domain || !/^[a-z0-9]([a-z0-9-]*[a-z0-9])?(\.[a-z0-9]([a-z0-9-]*[a-z0-9])?)*$/i.test(domain)) {
|
||||
return ctx.errorResponse(res, 400, `Invalid domain name. Must be a valid hostname (e.g., dns1${ctx.siteConfig.tld})`);
|
||||
}
|
||||
|
||||
const pkiPath = '/app/pki';
|
||||
const certsDir = '/app/generated-certs';
|
||||
const domainDir = path.join(certsDir, domain);
|
||||
|
||||
const intermediateCert = path.join(pkiPath, 'intermediate.crt');
|
||||
const intermediateKey = path.join(pkiPath, 'intermediate.key');
|
||||
const rootCert = path.join(pkiPath, 'root.crt');
|
||||
|
||||
if (!await exists(intermediateCert) || !await exists(intermediateKey)) {
|
||||
return ctx.errorResponse(res, 500, 'CA certificates not found. Ensure Caddy PKI is initialized.');
|
||||
}
|
||||
|
||||
if (!await exists(certsDir)) await fsp.mkdir(certsDir, { recursive: true });
|
||||
if (!await exists(domainDir)) await fsp.mkdir(domainDir, { recursive: true });
|
||||
|
||||
const keyFile = path.join(domainDir, 'server.key');
|
||||
const csrFile = path.join(domainDir, 'server.csr');
|
||||
const certFile = path.join(domainDir, 'server.crt');
|
||||
const pfxFile = path.join(domainDir, 'server.pfx');
|
||||
const pemFile = path.join(domainDir, 'server.pem');
|
||||
const fullChainFile = path.join(domainDir, 'fullchain.pem');
|
||||
|
||||
let needsRegeneration = true;
|
||||
if (await exists(certFile)) {
|
||||
try {
|
||||
const certDates = execSync(`openssl x509 -in "${certFile}" -noout -dates`).toString();
|
||||
const notAfter = certDates.match(/notAfter=(.*)/)[1].trim();
|
||||
const expirationDate = new Date(notAfter);
|
||||
const daysUntilExpiration = Math.floor((expirationDate - new Date()) / (1000 * 60 * 60 * 24));
|
||||
if (daysUntilExpiration > 30) needsRegeneration = false;
|
||||
} catch {
|
||||
needsRegeneration = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (needsRegeneration) {
|
||||
execSync(`openssl genrsa -out "${keyFile}" 2048`, { stdio: 'pipe' });
|
||||
|
||||
const subject = `/CN=${domain}`;
|
||||
execSync(`openssl req -new -key "${keyFile}" -out "${csrFile}" -subj "${subject}"`, { stdio: 'pipe' });
|
||||
|
||||
const configContent = `[req]
|
||||
distinguished_name = req_distinguished_name
|
||||
req_extensions = v3_req
|
||||
prompt = no
|
||||
|
||||
[req_distinguished_name]
|
||||
CN = ${domain}
|
||||
|
||||
[v3_req]
|
||||
keyUsage = keyEncipherment, dataEncipherment, digitalSignature
|
||||
extendedKeyUsage = serverAuth
|
||||
subjectAltName = @alt_names
|
||||
|
||||
[alt_names]
|
||||
DNS.1 = ${domain}
|
||||
${domain.includes('.') ? `DNS.2 = *.${domain}` : ''}`;
|
||||
|
||||
const configFile = path.join(domainDir, 'openssl.cnf');
|
||||
await fsp.writeFile(configFile, configContent);
|
||||
|
||||
const serialFile = path.join(domainDir, 'ca.srl');
|
||||
execSync(`openssl x509 -req -in "${csrFile}" -CA "${intermediateCert}" -CAkey "${intermediateKey}" -CAserial "${serialFile}" -CAcreateserial -out "${certFile}" -days 365 -sha256 -extfile "${configFile}" -extensions v3_req`, { stdio: 'pipe' });
|
||||
|
||||
const serverCertContent = await fsp.readFile(certFile, 'utf8');
|
||||
const intermediateCertContent = await fsp.readFile(intermediateCert, 'utf8');
|
||||
const rootCertContent = await fsp.readFile(rootCert, 'utf8');
|
||||
await fsp.writeFile(fullChainFile, serverCertContent + '\n' + intermediateCertContent + '\n' + rootCertContent);
|
||||
|
||||
execSync(`openssl pkcs12 -export -out "${pfxFile}" -inkey "${keyFile}" -in "${certFile}" -certfile "${intermediateCert}" -password "pass:${password}"`, { stdio: 'pipe' });
|
||||
|
||||
const keyContent = await fsp.readFile(keyFile, 'utf8');
|
||||
await fsp.writeFile(pemFile, keyContent + '\n' + serverCertContent + '\n' + intermediateCertContent);
|
||||
}
|
||||
|
||||
if (format === 'pfx') {
|
||||
res.setHeader('Content-Type', 'application/x-pkcs12');
|
||||
res.setHeader('Content-Disposition', `attachment; filename="${domain}.pfx"`);
|
||||
res.sendFile(pfxFile);
|
||||
} else if (format === 'pem') {
|
||||
res.setHeader('Content-Type', 'application/x-pem-file');
|
||||
res.setHeader('Content-Disposition', `attachment; filename="${domain}.pem"`);
|
||||
res.sendFile(pemFile);
|
||||
} else if (format === 'crt') {
|
||||
res.setHeader('Content-Type', 'application/x-x509-ca-cert');
|
||||
res.setHeader('Content-Disposition', `attachment; filename="${domain}.crt"`);
|
||||
res.sendFile(certFile);
|
||||
} else if (format === 'key') {
|
||||
res.setHeader('Content-Type', 'application/x-pem-file');
|
||||
res.setHeader('Content-Disposition', `attachment; filename="${domain}.key"`);
|
||||
res.sendFile(keyFile);
|
||||
} else if (format === 'fullchain') {
|
||||
res.setHeader('Content-Type', 'application/x-pem-file');
|
||||
res.setHeader('Content-Disposition', `attachment; filename="${domain}-fullchain.pem"`);
|
||||
res.sendFile(fullChainFile);
|
||||
} else {
|
||||
ctx.errorResponse(res, 400, 'Invalid format. Use: pfx, pem, crt, key, or fullchain');
|
||||
}
|
||||
}, 'ca-cert'));
|
||||
|
||||
// List generated certificates
|
||||
router.get('/certs', ctx.asyncHandler(async (req, res) => {
|
||||
const certsDir = '/app/generated-certs';
|
||||
|
||||
if (!await exists(certsDir)) {
|
||||
return res.json({ success: true, certificates: [] });
|
||||
}
|
||||
|
||||
const dirEntries = await fsp.readdir(certsDir);
|
||||
const domains = [];
|
||||
for (const f of dirEntries) {
|
||||
const stat = await fsp.stat(path.join(certsDir, f));
|
||||
if (stat.isDirectory()) domains.push(f);
|
||||
}
|
||||
|
||||
const certificates = (await Promise.all(domains.map(async (domain) => {
|
||||
const certFile = path.join(certsDir, domain, 'server.crt');
|
||||
if (!await exists(certFile)) return null;
|
||||
|
||||
try {
|
||||
const certInfo = execSync(`openssl x509 -in "${certFile}" -noout -subject -dates -fingerprint -sha256`).toString();
|
||||
const subject = certInfo.match(/subject=(.*)/) ? certInfo.match(/subject=(.*)/)[1].trim() : domain;
|
||||
const notBefore = certInfo.match(/notBefore=(.*)/) ? certInfo.match(/notBefore=(.*)/)[1].trim() : '';
|
||||
const notAfter = certInfo.match(/notAfter=(.*)/) ? certInfo.match(/notAfter=(.*)/)[1].trim() : '';
|
||||
const fingerprint = certInfo.match(/Fingerprint=(.*)/) ? certInfo.match(/Fingerprint=(.*)/)[1].trim() : '';
|
||||
|
||||
const expirationDate = new Date(notAfter);
|
||||
const daysUntilExpiration = Math.floor((expirationDate - new Date()) / (1000 * 60 * 60 * 24));
|
||||
|
||||
return {
|
||||
domain, subject,
|
||||
validFrom: notBefore, validUntil: notAfter,
|
||||
daysUntilExpiration, fingerprint,
|
||||
status: daysUntilExpiration < 0 ? 'expired' : daysUntilExpiration < 30 ? 'expiring-soon' : 'valid'
|
||||
};
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}))).filter(Boolean);
|
||||
|
||||
res.json({ success: true, certificates });
|
||||
}, 'ca-certs'));
|
||||
|
||||
return router;
|
||||
};
|
||||
293
dashcaddy-api/routes/config/assets.js
Normal file
293
dashcaddy-api/routes/config/assets.js
Normal file
@@ -0,0 +1,293 @@
|
||||
const express = require('express');
|
||||
const fsp = require('fs').promises;
|
||||
const path = require('path');
|
||||
const { LIMITS } = require('../../constants');
|
||||
const { exists } = require('../../fs-helpers');
|
||||
|
||||
// Image processing for favicon conversion (optional)
|
||||
let sharp, pngToIco;
|
||||
try {
|
||||
sharp = require('sharp');
|
||||
pngToIco = require('png-to-ico');
|
||||
} catch (e) {
|
||||
// Image processing libraries not available — favicon conversion disabled
|
||||
}
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
// ===== ASSET UPLOAD =====
|
||||
|
||||
router.post('/assets/upload', express.json({ limit: LIMITS.BODY_UPLOAD }), ctx.asyncHandler(async (req, res) => {
|
||||
const { filename, data } = req.body;
|
||||
|
||||
if (!filename || !data) {
|
||||
return ctx.errorResponse(res, 400, 'filename and data are required');
|
||||
}
|
||||
|
||||
// Validate filename to prevent directory traversal
|
||||
const safeFilename = path.basename(filename);
|
||||
if (safeFilename !== filename || filename.includes('..')) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid filename - must not contain path separators');
|
||||
}
|
||||
|
||||
// Extract base64 data
|
||||
const matches = data.match(/^data:image\/([a-zA-Z+]+);base64,(.+)$/);
|
||||
if (!matches) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid image data format');
|
||||
}
|
||||
|
||||
const extension = matches[1] === 'svg+xml' ? 'svg' : matches[1];
|
||||
const base64Data = matches[2];
|
||||
const buffer = Buffer.from(base64Data, 'base64');
|
||||
|
||||
// Determine assets path (mounted volume)
|
||||
const assetsPath = process.env.ASSETS_PATH || '/app/assets';
|
||||
|
||||
// Ensure directory exists
|
||||
if (!await exists(assetsPath)) {
|
||||
await fsp.mkdir(assetsPath, { recursive: true });
|
||||
}
|
||||
|
||||
// Save file
|
||||
const filePath = path.join(assetsPath, safeFilename);
|
||||
await fsp.writeFile(filePath, buffer);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
path: `/assets/${safeFilename}`,
|
||||
message: `Logo saved to ${filePath}`
|
||||
});
|
||||
}, 'assets-upload'));
|
||||
|
||||
// ===== CUSTOM LOGO ENDPOINTS =====
|
||||
// Manage custom dashboard logo
|
||||
|
||||
// Get current logo path, position, and title
|
||||
router.get('/logo', ctx.asyncHandler(async (req, res) => {
|
||||
const config = await ctx.readConfig();
|
||||
res.json({
|
||||
success: true,
|
||||
// Dark/light variants (new)
|
||||
customLogoDark: config.customLogoDark || null,
|
||||
customLogoLight: config.customLogoLight || null,
|
||||
// Legacy single-logo fallback
|
||||
customLogo: config.customLogo || config.customLogoDark || null,
|
||||
position: config.logoPosition || 'left',
|
||||
dashboardTitle: config.dashboardTitle || 'DashCaddy',
|
||||
isDefault: !config.customLogoDark && !config.customLogoLight && !config.customLogo
|
||||
});
|
||||
}, 'logo-get'));
|
||||
|
||||
// Helper: save a base64 image to assets, return { filename, webPath }
|
||||
async function saveLogoFile(data, suffix) {
|
||||
const matches = data.match(/^data:image\/([a-zA-Z+]+);base64,(.+)$/);
|
||||
if (!matches) return null;
|
||||
|
||||
const extension = matches[1] === 'svg+xml' ? 'svg' : matches[1];
|
||||
const buffer = Buffer.from(matches[2], 'base64');
|
||||
|
||||
const assetsPath = process.env.ASSETS_PATH || '/app/assets';
|
||||
if (!await exists(assetsPath)) {
|
||||
await fsp.mkdir(assetsPath, { recursive: true });
|
||||
}
|
||||
|
||||
const filename = `custom-logo-${suffix}.${extension}`;
|
||||
await fsp.writeFile(`${assetsPath}/${filename}`, buffer);
|
||||
return `/assets/${filename}`;
|
||||
}
|
||||
|
||||
// Upload custom logo(s) and/or update position and title
|
||||
// Supports: dataDark/dataLight (separate variants) or data (single logo for both)
|
||||
router.post('/logo', express.json({ limit: LIMITS.BODY_UPLOAD }), ctx.asyncHandler(async (req, res) => {
|
||||
const { data, dataDark, dataLight, position, dashboardTitle } = req.body;
|
||||
|
||||
if (!data && !dataDark && !dataLight && !position && !dashboardTitle) {
|
||||
return ctx.errorResponse(res, 400, 'Image data, position, or title is required');
|
||||
}
|
||||
|
||||
const config = await ctx.readConfig();
|
||||
let pathDark = null, pathLight = null;
|
||||
|
||||
// New dual-variant upload
|
||||
if (dataDark) {
|
||||
pathDark = await saveLogoFile(dataDark, 'dark');
|
||||
if (!pathDark) return ctx.errorResponse(res, 400, 'Invalid dark logo data format');
|
||||
config.customLogoDark = pathDark;
|
||||
}
|
||||
if (dataLight) {
|
||||
pathLight = await saveLogoFile(dataLight, 'light');
|
||||
if (!pathLight) return ctx.errorResponse(res, 400, 'Invalid light logo data format');
|
||||
config.customLogoLight = pathLight;
|
||||
}
|
||||
|
||||
// Legacy single-logo: save as both variants
|
||||
if (data && !dataDark && !dataLight) {
|
||||
const singlePath = await saveLogoFile(data, 'dark');
|
||||
if (!singlePath) return ctx.errorResponse(res, 400, 'Invalid image data format');
|
||||
config.customLogoDark = singlePath;
|
||||
config.customLogoLight = singlePath;
|
||||
// Also set legacy field for backward compat
|
||||
config.customLogo = singlePath;
|
||||
pathDark = singlePath;
|
||||
pathLight = singlePath;
|
||||
}
|
||||
|
||||
if (position && ['left', 'center', 'right'].includes(position)) {
|
||||
config.logoPosition = position;
|
||||
}
|
||||
|
||||
if (dashboardTitle !== undefined) {
|
||||
const sanitizedTitle = String(dashboardTitle).trim().substring(0, 50);
|
||||
config.dashboardTitle = sanitizedTitle || 'DashCaddy';
|
||||
}
|
||||
|
||||
config.updatedAt = new Date().toISOString();
|
||||
await fsp.writeFile(ctx.CONFIG_FILE, JSON.stringify(config, null, 2), 'utf8');
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
pathDark: pathDark,
|
||||
pathLight: pathLight,
|
||||
// Legacy compat
|
||||
path: pathDark || pathLight,
|
||||
position: config.logoPosition || 'left',
|
||||
dashboardTitle: config.dashboardTitle || 'DashCaddy',
|
||||
message: 'Branding settings saved'
|
||||
});
|
||||
}, 'logo-upload'));
|
||||
|
||||
// Reset all branding to defaults
|
||||
router.delete('/logo', ctx.asyncHandler(async (req, res) => {
|
||||
const config = await ctx.readConfig();
|
||||
const assetsPath = process.env.ASSETS_PATH || '/app/assets';
|
||||
|
||||
// Delete all custom logo files
|
||||
const logoPaths = [config.customLogo, config.customLogoDark, config.customLogoLight].filter(Boolean);
|
||||
const seen = new Set();
|
||||
for (const logoPath of logoPaths) {
|
||||
const filename = logoPath.replace('/assets/', '');
|
||||
if (seen.has(filename)) continue;
|
||||
seen.add(filename);
|
||||
const filePath = `${assetsPath}/${filename}`;
|
||||
if (await exists(filePath)) {
|
||||
await fsp.unlink(filePath);
|
||||
}
|
||||
}
|
||||
|
||||
// Reset all branding settings to defaults
|
||||
delete config.customLogo;
|
||||
delete config.customLogoDark;
|
||||
delete config.customLogoLight;
|
||||
delete config.dashboardTitle;
|
||||
delete config.logoPosition;
|
||||
config.updatedAt = new Date().toISOString();
|
||||
await fsp.writeFile(ctx.CONFIG_FILE, JSON.stringify(config, null, 2), 'utf8');
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: 'Branding reset to defaults'
|
||||
});
|
||||
}, 'logo-delete'));
|
||||
|
||||
// ===== FAVICON ENDPOINTS =====
|
||||
// Upload and convert favicon (PNG/SVG to ICO)
|
||||
|
||||
// Get current favicon
|
||||
router.get('/favicon', ctx.asyncHandler(async (req, res) => {
|
||||
const config = await ctx.readConfig();
|
||||
res.json({
|
||||
success: true,
|
||||
customFavicon: config.customFavicon || null,
|
||||
isDefault: !config.customFavicon
|
||||
});
|
||||
}, 'favicon-get'));
|
||||
|
||||
// Upload and convert favicon
|
||||
router.post('/favicon', ctx.asyncHandler(async (req, res) => {
|
||||
const { data } = req.body;
|
||||
|
||||
if (!data) {
|
||||
return ctx.errorResponse(res, 400, 'Image data is required');
|
||||
}
|
||||
|
||||
if (!sharp || !pngToIco) {
|
||||
return ctx.errorResponse(res, 500, 'Image processing not available');
|
||||
}
|
||||
|
||||
// Extract base64 data
|
||||
const matches = data.match(/^data:image\/([a-zA-Z+]+);base64,(.+)$/);
|
||||
if (!matches) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid image data format');
|
||||
}
|
||||
|
||||
const imageType = matches[1];
|
||||
const base64Data = matches[2];
|
||||
const buffer = Buffer.from(base64Data, 'base64');
|
||||
|
||||
const assetsPath = process.env.ASSETS_PATH || '/app/assets';
|
||||
if (!await exists(assetsPath)) {
|
||||
await fsp.mkdir(assetsPath, { recursive: true });
|
||||
}
|
||||
|
||||
// Convert to PNG at multiple sizes for ICO
|
||||
const sizes = [16, 32, 48];
|
||||
const pngBuffers = await Promise.all(
|
||||
sizes.map(size =>
|
||||
sharp(buffer)
|
||||
.resize(size, size, { fit: 'contain', background: { r: 0, g: 0, b: 0, alpha: 0 } })
|
||||
.png()
|
||||
.toBuffer()
|
||||
)
|
||||
);
|
||||
|
||||
// Convert to ICO
|
||||
const icoBuffer = await pngToIco(pngBuffers);
|
||||
|
||||
// Save ICO file
|
||||
const icoPath = `${assetsPath}/favicon.ico`;
|
||||
await fsp.writeFile(icoPath, icoBuffer);
|
||||
|
||||
// Also save a PNG version for modern browsers
|
||||
const png32 = await sharp(buffer)
|
||||
.resize(32, 32, { fit: 'contain', background: { r: 0, g: 0, b: 0, alpha: 0 } })
|
||||
.png()
|
||||
.toBuffer();
|
||||
await fsp.writeFile(`${assetsPath}/favicon.png`, png32);
|
||||
|
||||
// Update config
|
||||
await ctx.saveConfig({ customFavicon: '/assets/favicon.ico', updatedAt: new Date().toISOString() });
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
path: '/assets/favicon.ico',
|
||||
message: 'Favicon created successfully'
|
||||
});
|
||||
}, 'favicon'));
|
||||
|
||||
// Reset favicon to default
|
||||
router.delete('/favicon', ctx.asyncHandler(async (req, res) => {
|
||||
const config = await ctx.readConfig();
|
||||
|
||||
// Delete custom favicon files
|
||||
const assetsPath = process.env.ASSETS_PATH || '/app/assets';
|
||||
const filesToDelete = ['favicon.ico', 'favicon.png'];
|
||||
for (const file of filesToDelete) {
|
||||
const filePath = `${assetsPath}/${file}`;
|
||||
if (await exists(filePath)) {
|
||||
await fsp.unlink(filePath);
|
||||
}
|
||||
}
|
||||
|
||||
delete config.customFavicon;
|
||||
config.updatedAt = new Date().toISOString();
|
||||
await fsp.writeFile(ctx.CONFIG_FILE, JSON.stringify(config, null, 2), 'utf8');
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: 'Favicon reset to default'
|
||||
});
|
||||
}, 'favicon-delete'));
|
||||
|
||||
return router;
|
||||
};
|
||||
304
dashcaddy-api/routes/config/backup.js
Normal file
304
dashcaddy-api/routes/config/backup.js
Normal file
@@ -0,0 +1,304 @@
|
||||
const fsp = require('fs').promises;
|
||||
const path = require('path');
|
||||
const { CADDY } = require('../../constants');
|
||||
const { exists } = require('../../fs-helpers');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const express = require('express');
|
||||
const router = express.Router();
|
||||
|
||||
// ===== BACKUP/RESTORE ENDPOINTS =====
|
||||
// Export and import DashCaddy configuration
|
||||
|
||||
// Export all configuration as a downloadable JSON bundle
|
||||
router.get('/backup/export', ctx.asyncHandler(async (req, res) => {
|
||||
const backup = {
|
||||
version: '1.1',
|
||||
exportedAt: new Date().toISOString(),
|
||||
dashcaddyVersion: '1.0.0',
|
||||
files: {},
|
||||
assets: {}
|
||||
};
|
||||
|
||||
// Collect all configuration files
|
||||
const ENCRYPTION_KEY_FILE = process.env.ENCRYPTION_KEY_FILE || path.join(path.dirname(ctx.SERVICES_FILE), '.encryption-key');
|
||||
const filesToBackup = [
|
||||
{ key: 'services', path: ctx.SERVICES_FILE, required: true },
|
||||
{ key: 'caddyfile', path: ctx.caddy.filePath, required: true },
|
||||
{ key: 'config', path: ctx.CONFIG_FILE, required: false },
|
||||
{ key: 'dnsCredentials', path: ctx.dns.credentialsFile, required: false },
|
||||
{ key: 'credentials', path: process.env.CREDENTIALS_FILE || path.join(__dirname, '..', '..', 'credentials.json'), required: false },
|
||||
{ key: 'encryptionKey', path: ENCRYPTION_KEY_FILE, required: false },
|
||||
{ key: 'totpConfig', path: ctx.TOTP_CONFIG_FILE, required: false },
|
||||
{ key: 'tailscaleConfig', path: ctx.TAILSCALE_CONFIG_FILE, required: false },
|
||||
{ key: 'notifications', path: ctx.NOTIFICATIONS_FILE, required: false }
|
||||
];
|
||||
|
||||
for (const file of filesToBackup) {
|
||||
try {
|
||||
if (await exists(file.path)) {
|
||||
const content = await fsp.readFile(file.path, 'utf8');
|
||||
// Try to parse as JSON, otherwise store as raw string
|
||||
try {
|
||||
backup.files[file.key] = {
|
||||
type: 'json',
|
||||
data: JSON.parse(content)
|
||||
};
|
||||
} catch {
|
||||
backup.files[file.key] = {
|
||||
type: 'text',
|
||||
data: content
|
||||
};
|
||||
}
|
||||
} else if (file.required) {
|
||||
backup.files[file.key] = { type: 'missing', data: null };
|
||||
}
|
||||
} catch (e) {
|
||||
ctx.log.warn('backup', `Could not backup ${file.key}`, { error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
// Include TOTP QR code for authenticator app recovery
|
||||
if (ctx.totpConfig.isSetUp) {
|
||||
try {
|
||||
const secret = await ctx.credentialManager.retrieve('totp.secret');
|
||||
if (secret) {
|
||||
const { authenticator } = require('otplib');
|
||||
const QRCode = require('qrcode');
|
||||
const otpauth = authenticator.keyuri('user', 'DashCaddy', secret);
|
||||
const qrDataUrl = await QRCode.toDataURL(otpauth, {
|
||||
width: 256, margin: 2,
|
||||
color: { dark: '#000000', light: '#ffffff' }
|
||||
});
|
||||
backup.totp = { qrCode: qrDataUrl, manualKey: secret, issuer: 'DashCaddy' };
|
||||
}
|
||||
} catch (e) {
|
||||
ctx.log.warn('backup', 'Could not include TOTP QR in backup', { error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
// Include custom assets (logo, favicon) as base64
|
||||
try {
|
||||
const assetsDir = process.env.ASSETS_DIR || '/app/assets';
|
||||
const configData = backup.files.config?.data || {};
|
||||
const assetFiles = [configData.customLogo, configData.customFavicon]
|
||||
.filter(Boolean)
|
||||
.map(p => p.replace(/^\/assets\//, ''));
|
||||
for (const assetName of assetFiles) {
|
||||
const assetPath = path.join(assetsDir, assetName);
|
||||
if (await exists(assetPath)) {
|
||||
const data = await fsp.readFile(assetPath);
|
||||
backup.assets[assetName] = data.toString('base64');
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
ctx.log.warn('backup', 'Could not include assets in backup', { error: e.message });
|
||||
}
|
||||
|
||||
// Set headers for file download
|
||||
const backupFilename = `dashcaddy-backup-${new Date().toISOString().split('T')[0]}.json`;
|
||||
res.setHeader('Content-Type', 'application/json');
|
||||
res.setHeader('Content-Disposition', `attachment; filename="${backupFilename}"`);
|
||||
|
||||
res.json(backup);
|
||||
ctx.log.info('backup', 'Backup exported successfully');
|
||||
}, 'backup-export'));
|
||||
|
||||
// Preview what will be restored (without making changes)
|
||||
router.post('/backup/preview', ctx.asyncHandler(async (req, res) => {
|
||||
const backup = req.body;
|
||||
|
||||
if (!backup || !backup.version || !backup.files) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid backup file format');
|
||||
}
|
||||
|
||||
const preview = {
|
||||
valid: true,
|
||||
version: backup.version,
|
||||
exportedAt: backup.exportedAt,
|
||||
files: {}
|
||||
};
|
||||
|
||||
// Check each file in the backup
|
||||
const ENCRYPTION_KEY_FILE = process.env.ENCRYPTION_KEY_FILE || path.join(path.dirname(ctx.SERVICES_FILE), '.encryption-key');
|
||||
const fileMapping = {
|
||||
services: { path: ctx.SERVICES_FILE, description: 'Services list' },
|
||||
caddyfile: { path: ctx.caddy.filePath, description: 'Caddy configuration' },
|
||||
config: { path: ctx.CONFIG_FILE, description: 'DashCaddy settings' },
|
||||
dnsCredentials: { path: ctx.dns.credentialsFile, description: 'DNS credentials (legacy)' },
|
||||
credentials: { path: process.env.CREDENTIALS_FILE || path.join(__dirname, '..', '..', 'credentials.json'), description: 'Encrypted credentials' },
|
||||
encryptionKey: { path: ENCRYPTION_KEY_FILE, description: 'Encryption key (for credentials)' },
|
||||
totpConfig: { path: ctx.TOTP_CONFIG_FILE, description: 'TOTP authentication config' },
|
||||
tailscaleConfig: { path: ctx.TAILSCALE_CONFIG_FILE, description: 'Tailscale config' },
|
||||
notifications: { path: ctx.NOTIFICATIONS_FILE, description: 'Notification settings' }
|
||||
};
|
||||
|
||||
for (const [key, value] of Object.entries(backup.files)) {
|
||||
if (value && value.type !== 'missing') {
|
||||
const mapping = fileMapping[key];
|
||||
const currentExists = mapping ? await exists(mapping.path) : false;
|
||||
|
||||
preview.files[key] = {
|
||||
description: mapping?.description || key,
|
||||
inBackup: true,
|
||||
currentExists,
|
||||
action: currentExists ? 'overwrite' : 'create',
|
||||
type: value.type
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Count services if present
|
||||
if (backup.files.services?.data) {
|
||||
const services = Array.isArray(backup.files.services.data)
|
||||
? backup.files.services.data
|
||||
: backup.files.services.data.services || [];
|
||||
preview.serviceCount = services.length;
|
||||
}
|
||||
|
||||
res.json({ success: true, preview });
|
||||
}, 'backup-preview'));
|
||||
|
||||
// Restore configuration from backup
|
||||
router.post('/backup/restore', ctx.asyncHandler(async (req, res) => {
|
||||
const { backup, options = {} } = req.body;
|
||||
|
||||
if (!backup || !backup.version || !backup.files) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid backup file format');
|
||||
}
|
||||
|
||||
const results = {
|
||||
restored: [],
|
||||
skipped: [],
|
||||
errors: []
|
||||
};
|
||||
|
||||
// File mapping
|
||||
const ENCRYPTION_KEY_FILE = process.env.ENCRYPTION_KEY_FILE || path.join(path.dirname(ctx.SERVICES_FILE), '.encryption-key');
|
||||
const fileMapping = {
|
||||
services: ctx.SERVICES_FILE,
|
||||
caddyfile: ctx.caddy.filePath,
|
||||
config: ctx.CONFIG_FILE,
|
||||
dnsCredentials: ctx.dns.credentialsFile,
|
||||
credentials: process.env.CREDENTIALS_FILE || path.join(__dirname, '..', '..', 'credentials.json'),
|
||||
encryptionKey: ENCRYPTION_KEY_FILE,
|
||||
totpConfig: ctx.TOTP_CONFIG_FILE,
|
||||
tailscaleConfig: ctx.TAILSCALE_CONFIG_FILE,
|
||||
notifications: ctx.NOTIFICATIONS_FILE
|
||||
};
|
||||
|
||||
// Restore each file
|
||||
for (const [key, value] of Object.entries(backup.files)) {
|
||||
if (!value || value.type === 'missing') {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip if user chose to skip certain files
|
||||
if (options.skip && options.skip.includes(key)) {
|
||||
results.skipped.push(key);
|
||||
continue;
|
||||
}
|
||||
|
||||
const filePath = fileMapping[key];
|
||||
if (!filePath) {
|
||||
results.errors.push({ file: key, error: 'Unknown file type' });
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
let content;
|
||||
if (value.type === 'json') {
|
||||
content = JSON.stringify(value.data, null, 2);
|
||||
} else {
|
||||
content = value.data;
|
||||
}
|
||||
|
||||
// Create backup of existing file before overwriting
|
||||
if (await exists(filePath) && options.createBackup !== false) {
|
||||
const backupPath = `${filePath}.bak`;
|
||||
await fsp.copyFile(filePath, backupPath);
|
||||
}
|
||||
|
||||
await fsp.writeFile(filePath, content, 'utf8');
|
||||
results.restored.push(key);
|
||||
ctx.log.info('backup', `Restored: ${key}`, { path: filePath });
|
||||
} catch (e) {
|
||||
results.errors.push({ file: key, error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
// Reload Caddy if Caddyfile was restored
|
||||
if (results.restored.includes('caddyfile') && options.reloadCaddy !== false) {
|
||||
try {
|
||||
const caddyContent = await ctx.caddy.read();
|
||||
const loadResponse = await ctx.fetchT(`${ctx.caddy.adminUrl}/load`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': CADDY.CONTENT_TYPE },
|
||||
body: caddyContent
|
||||
});
|
||||
|
||||
if (loadResponse.ok) {
|
||||
results.caddyReloaded = true;
|
||||
} else {
|
||||
results.caddyReloadError = await loadResponse.text();
|
||||
}
|
||||
} catch (e) {
|
||||
results.caddyReloadError = e.message;
|
||||
}
|
||||
}
|
||||
|
||||
// Reload DNS credentials if restored
|
||||
if (results.restored.includes('dnsCredentials')) {
|
||||
try {
|
||||
ctx.loadDnsCredentials();
|
||||
results.dnsReloaded = true;
|
||||
} catch (e) {
|
||||
results.dnsReloadError = e.message;
|
||||
}
|
||||
}
|
||||
|
||||
// Reload notification config if restored
|
||||
if (results.restored.includes('notifications')) {
|
||||
try {
|
||||
await ctx.loadNotificationConfig();
|
||||
results.notificationsReloaded = true;
|
||||
} catch (e) {
|
||||
results.notificationsReloadError = e.message;
|
||||
}
|
||||
}
|
||||
|
||||
// Reload site config if restored
|
||||
if (results.restored.includes('config')) {
|
||||
ctx.loadSiteConfig();
|
||||
results.configReloaded = true;
|
||||
}
|
||||
|
||||
// Restore custom assets from base64
|
||||
if (backup.assets && typeof backup.assets === 'object') {
|
||||
const assetsDir = process.env.ASSETS_DIR || '/app/assets';
|
||||
for (const [name, b64] of Object.entries(backup.assets)) {
|
||||
try {
|
||||
const safeName = path.basename(name); // prevent path traversal
|
||||
await fsp.writeFile(path.join(assetsDir, safeName), Buffer.from(b64, 'base64'));
|
||||
results.restored.push(`asset:${safeName}`);
|
||||
} catch (e) {
|
||||
results.errors.push({ file: `asset:${name}`, error: e.message });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const success = results.restored.length > 0 && results.errors.length === 0;
|
||||
|
||||
res.json({
|
||||
success,
|
||||
message: success
|
||||
? `Restored ${results.restored.length} file(s) successfully`
|
||||
: `Restore completed with ${results.errors.length} error(s)`,
|
||||
results
|
||||
});
|
||||
|
||||
ctx.log.info('backup', 'Backup restore completed', { restored: results.restored.length, errors: results.errors.length });
|
||||
}, 'backup-restore'));
|
||||
|
||||
return router;
|
||||
};
|
||||
9
dashcaddy-api/routes/config/index.js
Normal file
9
dashcaddy-api/routes/config/index.js
Normal file
@@ -0,0 +1,9 @@
|
||||
const express = require('express');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
router.use(require('./settings')(ctx));
|
||||
router.use(require('./assets')(ctx));
|
||||
router.use(require('./backup')(ctx));
|
||||
return router;
|
||||
};
|
||||
70
dashcaddy-api/routes/config/settings.js
Normal file
70
dashcaddy-api/routes/config/settings.js
Normal file
@@ -0,0 +1,70 @@
|
||||
const fsp = require('fs').promises;
|
||||
const { validateConfig } = require('../../config-schema');
|
||||
const { exists } = require('../../fs-helpers');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const express = require('express');
|
||||
const router = express.Router();
|
||||
|
||||
// ===== DASHCADDY CONFIG ENDPOINTS =====
|
||||
// Server-side config storage for setup wizard (shared across all browsers/machines)
|
||||
|
||||
router.get('/config', ctx.asyncHandler(async (req, res) => {
|
||||
if (!await exists(ctx.CONFIG_FILE)) {
|
||||
return res.json({ setupComplete: false });
|
||||
}
|
||||
const data = await fsp.readFile(ctx.CONFIG_FILE, 'utf8');
|
||||
const config = JSON.parse(data);
|
||||
res.json(config);
|
||||
}, 'config-get'));
|
||||
|
||||
router.post('/config', ctx.asyncHandler(async (req, res) => {
|
||||
const incoming = req.body;
|
||||
|
||||
if (!incoming || typeof incoming !== 'object') {
|
||||
return ctx.errorResponse(res, 400, 'Invalid config object');
|
||||
}
|
||||
|
||||
// Merge with existing config so partial saves don't wipe fields
|
||||
let existing = {};
|
||||
if (await exists(ctx.CONFIG_FILE)) {
|
||||
try {
|
||||
existing = JSON.parse(await fsp.readFile(ctx.CONFIG_FILE, 'utf8'));
|
||||
} catch (_) { /* start fresh if file is corrupt */ }
|
||||
}
|
||||
const config = { ...existing, ...incoming };
|
||||
|
||||
// Merge nested dns object so partial dns updates don't wipe dns fields
|
||||
if (existing.dns && incoming.dns) {
|
||||
config.dns = { ...existing.dns, ...incoming.dns };
|
||||
}
|
||||
// Merge nested dnsServers object
|
||||
if (existing.dnsServers && incoming.dnsServers) {
|
||||
config.dnsServers = { ...existing.dnsServers, ...incoming.dnsServers };
|
||||
}
|
||||
|
||||
// Validate merged config against schema
|
||||
const { valid, errors, warnings } = validateConfig(config);
|
||||
if (!valid) {
|
||||
return ctx.errorResponse(res, 400, 'Config validation failed', { errors });
|
||||
}
|
||||
|
||||
// Add timestamp
|
||||
config.updatedAt = new Date().toISOString();
|
||||
|
||||
await fsp.writeFile(ctx.CONFIG_FILE, JSON.stringify(config, null, 2), 'utf8');
|
||||
ctx.loadSiteConfig(); // Refresh in-memory config
|
||||
ctx.log.info('config', 'Config saved', { path: ctx.CONFIG_FILE });
|
||||
|
||||
res.json({ success: true, message: 'Configuration saved', config, warnings });
|
||||
}, 'config-save'));
|
||||
|
||||
router.delete('/config', ctx.asyncHandler(async (req, res) => {
|
||||
if (await exists(ctx.CONFIG_FILE)) {
|
||||
await fsp.unlink(ctx.CONFIG_FILE);
|
||||
}
|
||||
res.json({ success: true, message: 'Configuration reset' });
|
||||
}, 'config-delete'));
|
||||
|
||||
return router;
|
||||
};
|
||||
191
dashcaddy-api/routes/containers.js
Normal file
191
dashcaddy-api/routes/containers.js
Normal file
@@ -0,0 +1,191 @@
|
||||
const express = require('express');
|
||||
const { DOCKER } = require('../constants');
|
||||
const { paginate, parsePaginationParams } = require('../pagination');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
// Start container
|
||||
router.post('/:id/start', ctx.asyncHandler(async (req, res) => {
|
||||
const container = ctx.docker.client.getContainer(req.params.id);
|
||||
await container.start();
|
||||
res.json({ success: true, message: 'Container started' });
|
||||
}, 'container-start'));
|
||||
|
||||
// Stop container
|
||||
router.post('/:id/stop', ctx.asyncHandler(async (req, res) => {
|
||||
const container = ctx.docker.client.getContainer(req.params.id);
|
||||
await container.stop();
|
||||
res.json({ success: true, message: 'Container stopped' });
|
||||
}, 'container-stop'));
|
||||
|
||||
// Restart container
|
||||
router.post('/:id/restart', ctx.asyncHandler(async (req, res) => {
|
||||
const container = ctx.docker.client.getContainer(req.params.id);
|
||||
await container.restart();
|
||||
res.json({ success: true, message: 'Container restarted' });
|
||||
}, 'container-restart'));
|
||||
|
||||
// Update container to latest image version
|
||||
router.post('/:id/update', ctx.asyncHandler(async (req, res) => {
|
||||
const containerId = req.params.id;
|
||||
const container = ctx.docker.client.getContainer(containerId);
|
||||
|
||||
// Get container info
|
||||
const containerInfo = await container.inspect();
|
||||
const imageName = containerInfo.Config.Image;
|
||||
const containerName = containerInfo.Name.replace(/^\//, '');
|
||||
|
||||
ctx.log.info('docker', 'Updating container', { containerName, imageName });
|
||||
|
||||
// Pull the latest image
|
||||
ctx.log.info('docker', `Pulling latest image: ${imageName}`);
|
||||
await ctx.docker.pull(imageName);
|
||||
|
||||
// Get current container config for recreation
|
||||
const hostConfig = containerInfo.HostConfig;
|
||||
const config = {
|
||||
Image: imageName,
|
||||
name: containerName,
|
||||
Env: containerInfo.Config.Env,
|
||||
ExposedPorts: containerInfo.Config.ExposedPorts,
|
||||
Labels: containerInfo.Config.Labels,
|
||||
HostConfig: {
|
||||
Binds: hostConfig.Binds,
|
||||
PortBindings: hostConfig.PortBindings,
|
||||
RestartPolicy: hostConfig.RestartPolicy,
|
||||
NetworkMode: hostConfig.NetworkMode,
|
||||
ExtraHosts: hostConfig.ExtraHosts,
|
||||
Privileged: hostConfig.Privileged,
|
||||
CapAdd: hostConfig.CapAdd,
|
||||
CapDrop: hostConfig.CapDrop,
|
||||
Devices: hostConfig.Devices
|
||||
},
|
||||
NetworkingConfig: {}
|
||||
};
|
||||
|
||||
// Get network settings if using a custom network
|
||||
if (hostConfig.NetworkMode && !['bridge', 'host', 'none'].includes(hostConfig.NetworkMode)) {
|
||||
const networkName = hostConfig.NetworkMode;
|
||||
config.NetworkingConfig.EndpointsConfig = {
|
||||
[networkName]: containerInfo.NetworkSettings.Networks[networkName]
|
||||
};
|
||||
}
|
||||
|
||||
// Stop and remove old container
|
||||
ctx.log.info('docker', 'Stopping container', { containerName });
|
||||
await container.stop().catch(() => {}); // Ignore if already stopped
|
||||
ctx.log.info('docker', 'Removing container', { containerName });
|
||||
await container.remove();
|
||||
|
||||
// Wait for port release (Windows/Docker Desktop can be slow to free ports)
|
||||
await new Promise(r => setTimeout(r, 3000));
|
||||
|
||||
// Create and start new container
|
||||
ctx.log.info('docker', 'Creating new container', { containerName });
|
||||
let newContainer;
|
||||
try {
|
||||
newContainer = await ctx.docker.client.createContainer(config);
|
||||
ctx.log.info('docker', 'Starting container', { containerName });
|
||||
await newContainer.start();
|
||||
} catch (startError) {
|
||||
// Clean up the failed container so it doesn't block future attempts
|
||||
ctx.log.error('docker', 'Failed to start new container', { containerName, error: startError.message });
|
||||
if (newContainer) {
|
||||
try { await newContainer.remove({ force: true }); } catch (e) { /* already gone */ }
|
||||
}
|
||||
throw startError;
|
||||
}
|
||||
|
||||
const newContainerInfo = await newContainer.inspect();
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: `Container ${containerName} updated successfully`,
|
||||
newContainerId: newContainerInfo.Id
|
||||
});
|
||||
}, 'container-update'));
|
||||
|
||||
// Check for available updates (compares local and remote image digests)
|
||||
router.get('/:id/check-update', ctx.asyncHandler(async (req, res) => {
|
||||
const containerId = req.params.id;
|
||||
const container = ctx.docker.client.getContainer(containerId);
|
||||
const containerInfo = await container.inspect();
|
||||
const imageName = containerInfo.Config.Image;
|
||||
|
||||
const localImage = ctx.docker.client.getImage(containerInfo.Image);
|
||||
const localImageInfo = await localImage.inspect();
|
||||
const localDigest = localImageInfo.RepoDigests?.[0] || null;
|
||||
|
||||
let updateAvailable = false;
|
||||
try {
|
||||
const pullStream = await ctx.docker.pull(imageName);
|
||||
|
||||
const downloadedLayers = pullStream.filter(e =>
|
||||
e.status === 'Downloading' || e.status === 'Download complete'
|
||||
);
|
||||
updateAvailable = downloadedLayers.length > 0;
|
||||
|
||||
const newImage = ctx.docker.client.getImage(imageName);
|
||||
const newImageInfo = await newImage.inspect();
|
||||
const newDigest = newImageInfo.RepoDigests?.[0] || null;
|
||||
|
||||
if (localDigest && newDigest && localDigest !== newDigest) {
|
||||
updateAvailable = true;
|
||||
}
|
||||
} catch (pullError) {
|
||||
ctx.log.debug('docker', 'Could not check for updates', { error: pullError.message });
|
||||
}
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
imageName,
|
||||
updateAvailable,
|
||||
currentDigest: localDigest
|
||||
});
|
||||
}, 'container-check-update'));
|
||||
|
||||
// Get container logs
|
||||
router.get('/:id/logs', ctx.asyncHandler(async (req, res) => {
|
||||
const container = ctx.docker.client.getContainer(req.params.id);
|
||||
const logs = await container.logs({
|
||||
stdout: true,
|
||||
stderr: true,
|
||||
tail: 100,
|
||||
timestamps: true
|
||||
});
|
||||
res.json({ success: true, logs: logs.toString() });
|
||||
}, 'container-logs'));
|
||||
|
||||
// Delete container
|
||||
router.delete('/:id', ctx.asyncHandler(async (req, res) => {
|
||||
const container = ctx.docker.client.getContainer(req.params.id);
|
||||
await container.remove({ force: true });
|
||||
res.json({ success: true, message: 'Container removed' });
|
||||
}, 'container-delete'));
|
||||
|
||||
// Discover running containers
|
||||
router.get('/discover', ctx.asyncHandler(async (req, res) => {
|
||||
const containers = await ctx.docker.client.listContainers({ all: true });
|
||||
const samiContainers = containers.filter(container =>
|
||||
container.Labels && container.Labels['sami.managed'] === 'true'
|
||||
);
|
||||
|
||||
const discoveredContainers = samiContainers.map(container => ({
|
||||
id: container.Id,
|
||||
name: container.Names[0].replace('/', ''),
|
||||
image: container.Image,
|
||||
state: container.State,
|
||||
status: container.Status,
|
||||
appTemplate: container.Labels['sami.app'],
|
||||
subdomain: container.Labels['sami.subdomain'],
|
||||
ports: container.Ports
|
||||
}));
|
||||
|
||||
const paginationParams = parsePaginationParams(req.query);
|
||||
const result = paginate(discoveredContainers, paginationParams);
|
||||
res.json({ success: true, containers: result.data, ...(result.pagination && { pagination: result.pagination }) });
|
||||
}, 'containers-discover'));
|
||||
|
||||
return router;
|
||||
};
|
||||
140
dashcaddy-api/routes/context.js
Normal file
140
dashcaddy-api/routes/context.js
Normal file
@@ -0,0 +1,140 @@
|
||||
/**
|
||||
* Shared route context — holds all dependencies needed by route modules.
|
||||
* Populated once by server.js at startup, then passed to each route factory.
|
||||
*
|
||||
* Usage in a route module:
|
||||
* module.exports = function(ctx) {
|
||||
* const router = require('express').Router();
|
||||
* router.get('/status', ctx.asyncHandler(async (req, res) => { ... }));
|
||||
* return router;
|
||||
* };
|
||||
*
|
||||
* Namespaces: ctx.docker.*, ctx.caddy.*, ctx.dns.*, ctx.session.*,
|
||||
* ctx.notification.*, ctx.tailscale.*
|
||||
*/
|
||||
const ctx = {
|
||||
// ── Namespaced groups ──
|
||||
docker: {
|
||||
client: null, // Dockerode instance
|
||||
pull: null, // dockerPull(imageName, timeoutMs)
|
||||
findContainer: null, // findContainerByName(name, opts)
|
||||
getUsedPorts: null, // getUsedPorts() → Set<number>
|
||||
security: null, // dockerSecurity module
|
||||
},
|
||||
caddy: {
|
||||
modify: null, // modifyCaddyfile(modifyFn) → {success, error?}
|
||||
read: null, // readCaddyfile() → string
|
||||
reload: null, // reloadCaddy(content)
|
||||
generateConfig: null, // generateCaddyConfig(subdomain, ip, port, opts)
|
||||
verifySite: null, // verifySiteAccessible(domain, maxAttempts)
|
||||
adminUrl: null, // CADDY_ADMIN_URL string
|
||||
filePath: null, // CADDYFILE_PATH string
|
||||
},
|
||||
dns: {
|
||||
call: null, // callDns(server, apiPath, params)
|
||||
buildUrl: null, // buildDnsUrl(server, apiPath, params)
|
||||
requireToken: null, // requireDnsToken(providedToken)
|
||||
ensureToken: null, // ensureValidDnsToken()
|
||||
createRecord: null, // createDnsRecord(subdomain, ip)
|
||||
getToken: null, // () => dnsToken
|
||||
setToken: null, // (t) => { dnsToken = t }
|
||||
getTokenExpiry: null, // () => dnsTokenExpiry
|
||||
setTokenExpiry: null, // (e) => { dnsTokenExpiry = e }
|
||||
getTokenForServer: null, // getTokenForServer(serverIp)
|
||||
refresh: null, // refreshDnsToken()
|
||||
credentialsFile: null,// DNS_CREDENTIALS_FILE path
|
||||
},
|
||||
session: {
|
||||
ipSessions: null, // Map of IP → session
|
||||
durations: null, // SESSION_DURATIONS map
|
||||
getClientIP: null, // getClientIP(req)
|
||||
create: null, // createIPSession(ip, duration)
|
||||
setCookie: null, // setSessionCookie(res, duration)
|
||||
clear: null, // clearIPSession(ip)
|
||||
clearCookie: null, // clearSessionCookie(res)
|
||||
isValid: null, // isSessionValid(req)
|
||||
},
|
||||
notification: {
|
||||
getConfig: null, // () => notificationConfig
|
||||
saveConfig: null, // saveNotificationConfig()
|
||||
send: null, // sendNotification(event, title, message, type)
|
||||
sendDiscord: null, // sendDiscordNotification(title, message, type)
|
||||
sendTelegram: null, // sendTelegramNotification(title, message, type)
|
||||
sendNtfy: null, // sendNtfyNotification(title, message, type)
|
||||
getHistory: null, // () => notificationHistory
|
||||
clearHistory: null, // () => { notificationHistory = [] }
|
||||
startHealthDaemon: null, // startHealthCheckDaemon()
|
||||
stopHealthDaemon: null, // stopHealthCheckDaemon()
|
||||
checkHealth: null, // checkContainerHealth()
|
||||
getHealthState: null, // () => containerHealthState
|
||||
},
|
||||
tailscale: {
|
||||
config: null, // tailscaleConfig object
|
||||
save: null, // saveTailscaleConfig()
|
||||
getStatus: null, // getTailscaleStatus()
|
||||
getLocalIP: null, // getLocalTailscaleIP()
|
||||
isTailscaleIP: null, // isTailscaleIP(ip)
|
||||
getAccessToken: null, // getTailscaleAccessToken()
|
||||
syncAPI: null, // syncFromTailscaleAPI()
|
||||
startSync: null, // startTailscaleSyncTimer()
|
||||
stopSync: null, // stopTailscaleSyncTimer()
|
||||
},
|
||||
|
||||
// ── Flat (shared across domains) ──
|
||||
app: null,
|
||||
siteConfig: null,
|
||||
servicesStateManager: null,
|
||||
configStateManager: null,
|
||||
credentialManager: null,
|
||||
authManager: null,
|
||||
|
||||
// Feature modules
|
||||
healthChecker: null,
|
||||
updateManager: null,
|
||||
backupManager: null,
|
||||
resourceMonitor: null,
|
||||
auditLogger: null,
|
||||
portLockManager: null,
|
||||
|
||||
// Templates
|
||||
APP_TEMPLATES: null,
|
||||
TEMPLATE_CATEGORIES: null,
|
||||
DIFFICULTY_LEVELS: null,
|
||||
|
||||
// Shared helpers
|
||||
asyncHandler: null,
|
||||
errorResponse: null,
|
||||
ok: null,
|
||||
fetchT: null,
|
||||
log: null,
|
||||
logError: null,
|
||||
safeErrorMessage: null,
|
||||
buildDomain: null,
|
||||
getServiceById: null,
|
||||
readConfig: null,
|
||||
saveConfig: null,
|
||||
addServiceToConfig: null,
|
||||
validateURL: null,
|
||||
|
||||
// Middleware
|
||||
strictLimiter: null,
|
||||
|
||||
// TOTP (flat — used alongside session namespace)
|
||||
totpConfig: null,
|
||||
saveTotpConfig: null,
|
||||
|
||||
// Config lifecycle
|
||||
loadSiteConfig: null,
|
||||
loadDnsCredentials: null,
|
||||
loadNotificationConfig: null,
|
||||
|
||||
// Config paths (flat)
|
||||
SERVICES_FILE: null,
|
||||
CONFIG_FILE: null,
|
||||
TOTP_CONFIG_FILE: null,
|
||||
TAILSCALE_CONFIG_FILE: null,
|
||||
NOTIFICATIONS_FILE: null,
|
||||
ERROR_LOG_FILE: null,
|
||||
};
|
||||
|
||||
module.exports = ctx;
|
||||
23
dashcaddy-api/routes/credentials.js
Normal file
23
dashcaddy-api/routes/credentials.js
Normal file
@@ -0,0 +1,23 @@
|
||||
const express = require('express');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
// List all stored credentials (keys only, no values)
|
||||
router.get('/credentials/list', ctx.asyncHandler(async (req, res) => {
|
||||
const keys = await ctx.credentialManager.list();
|
||||
res.json({ success: true, credentials: keys, count: keys.length });
|
||||
}, 'credentials-list'));
|
||||
|
||||
// Rotate encryption key — re-encrypts all stored credentials
|
||||
router.post('/credentials/rotate-key', ctx.asyncHandler(async (req, res) => {
|
||||
const success = await ctx.credentialManager.rotateEncryptionKey();
|
||||
if (success) {
|
||||
res.json({ success: true, message: 'Encryption key rotated, all credentials re-encrypted' });
|
||||
} else {
|
||||
ctx.errorResponse(res, 500, 'Key rotation failed');
|
||||
}
|
||||
}, 'credentials-rotate'));
|
||||
|
||||
return router;
|
||||
};
|
||||
609
dashcaddy-api/routes/dns.js
Normal file
609
dashcaddy-api/routes/dns.js
Normal file
@@ -0,0 +1,609 @@
|
||||
const express = require('express');
|
||||
const fs = require('fs');
|
||||
const fsp = require('fs').promises;
|
||||
const validatorLib = require('validator');
|
||||
const { APP, TIMEOUTS, CADDY, DNS_RECORD_TYPES, REGEX, SESSION_TTL } = require('../constants');
|
||||
const { exists } = require('../fs-helpers');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
// DELETE /record — Delete a DNS record from Technitium
|
||||
router.delete('/record', ctx.asyncHandler(async (req, res) => {
|
||||
const { domain, type, token, server, ipAddress } = req.query;
|
||||
|
||||
const dnsToken = await ctx.dns.requireToken(token);
|
||||
|
||||
if (!domain) {
|
||||
return ctx.errorResponse(res, 400, 'domain is required');
|
||||
}
|
||||
|
||||
// Validate domain format
|
||||
if (!REGEX.DOMAIN.test(domain)) {
|
||||
return ctx.errorResponse(res, 400, '[DC-301] Invalid domain format');
|
||||
}
|
||||
|
||||
// Validate record type
|
||||
if (type && !DNS_RECORD_TYPES.includes(type.toUpperCase())) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid DNS record type');
|
||||
}
|
||||
|
||||
// Validate ipAddress if provided
|
||||
if (ipAddress && !validatorLib.isIP(ipAddress)) {
|
||||
return ctx.errorResponse(res, 400, '[DC-210] Invalid IP address');
|
||||
}
|
||||
|
||||
// Validate server if provided
|
||||
if (server && !validatorLib.isIP(server)) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid DNS server address');
|
||||
}
|
||||
|
||||
// Default to dns1 LAN IP, allow override
|
||||
const dnsServer = server || ctx.siteConfig.dnsServerIp;
|
||||
const recordType = type || 'A';
|
||||
|
||||
try {
|
||||
const p = { token: dnsToken, domain: domain, type: recordType };
|
||||
if (ipAddress) p.ipAddress = ipAddress;
|
||||
const result = await ctx.dns.call(dnsServer, '/api/zones/records/delete', p);
|
||||
|
||||
if (result.status === 'ok') {
|
||||
res.json({ success: true, message: `DNS record ${domain} deleted` });
|
||||
} else {
|
||||
ctx.errorResponse(res, 500, result.errorMessage || 'DNS deletion failed');
|
||||
}
|
||||
} catch (error) {
|
||||
ctx.errorResponse(res, 500, ctx.safeErrorMessage(error));
|
||||
}
|
||||
}, 'dns-delete-record'));
|
||||
|
||||
// POST /record — Create a DNS record in Technitium
|
||||
router.post('/record', ctx.asyncHandler(async (req, res) => {
|
||||
const { domain, ip, ttl, token, server } = req.body;
|
||||
|
||||
const dnsToken = await ctx.dns.requireToken(token);
|
||||
|
||||
if (!domain || !ip) {
|
||||
return ctx.errorResponse(res, 400, 'domain and ip are required');
|
||||
}
|
||||
|
||||
// Validate domain format
|
||||
if (!REGEX.DOMAIN.test(domain)) {
|
||||
return ctx.errorResponse(res, 400, '[DC-301] Invalid domain format');
|
||||
}
|
||||
|
||||
// Validate IP address
|
||||
if (!validatorLib.isIP(ip)) {
|
||||
return ctx.errorResponse(res, 400, '[DC-210] Invalid IP address');
|
||||
}
|
||||
|
||||
// Validate TTL if provided
|
||||
if (ttl !== undefined) {
|
||||
const parsedTtl = parseInt(ttl, 10);
|
||||
if (isNaN(parsedTtl) || parsedTtl < CADDY.TTL_MIN || parsedTtl > CADDY.TTL_MAX) {
|
||||
return ctx.errorResponse(res, 400, `TTL must be between ${CADDY.TTL_MIN} and ${CADDY.TTL_MAX}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Validate server IP if provided
|
||||
if (server && !validatorLib.isIP(server)) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid DNS server address');
|
||||
}
|
||||
|
||||
// Default to dns1 LAN IP since Docker container can't access Tailscale network
|
||||
const dnsServer = server || ctx.siteConfig.dnsServerIp;
|
||||
const recordTtl = ttl || 300;
|
||||
|
||||
try {
|
||||
// For Technitium, we need zone and subdomain separated
|
||||
// domain = "test.sami" -> zone = "sami", subdomain = "test"
|
||||
const parts = domain.split('.');
|
||||
const subdomain = parts[0];
|
||||
const zone = parts.slice(1).join('.') || ctx.siteConfig.tld.replace(/^\./, '');
|
||||
|
||||
const result = await ctx.dns.call(dnsServer, '/api/zones/records/add', {
|
||||
token: dnsToken, domain, zone, type: 'A', ipAddress: ip, ttl: recordTtl.toString(), overwrite: 'true'
|
||||
});
|
||||
|
||||
if (result.status === 'ok') {
|
||||
res.json({ success: true, message: `DNS record ${domain} -> ${ip} created` });
|
||||
} else {
|
||||
ctx.errorResponse(res, 500, result.errorMessage || 'DNS creation failed');
|
||||
}
|
||||
} catch (error) {
|
||||
ctx.log.error('dns', 'DNS record creation error', { error: error.message });
|
||||
ctx.errorResponse(res, 500, ctx.safeErrorMessage(error), { details: error.cause?.code || 'fetch failed' });
|
||||
}
|
||||
}, 'dns-create-record'));
|
||||
|
||||
// GET /resolve — Resolve a domain to IP address via Technitium
|
||||
router.get('/resolve', ctx.asyncHandler(async (req, res) => {
|
||||
const { domain, server, token } = req.query;
|
||||
|
||||
const dnsToken = await ctx.dns.requireToken(token);
|
||||
|
||||
if (!domain) {
|
||||
return ctx.errorResponse(res, 400, 'domain is required');
|
||||
}
|
||||
|
||||
// Validate domain format
|
||||
if (!REGEX.DOMAIN.test(domain)) {
|
||||
return ctx.errorResponse(res, 400, '[DC-301] Invalid domain format');
|
||||
}
|
||||
|
||||
// Validate server if provided
|
||||
if (server && !validatorLib.isIP(server)) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid DNS server address');
|
||||
}
|
||||
|
||||
const dnsServer = server || ctx.siteConfig.dnsServerIp;
|
||||
|
||||
try {
|
||||
const result = await ctx.dns.call(dnsServer, '/api/zones/records/get', {
|
||||
token: dnsToken, domain, zone: ctx.siteConfig.tld.replace(/^\./, ''), listZone: 'true'
|
||||
});
|
||||
|
||||
if (result.status === 'ok' && result.response && result.response.records) {
|
||||
// Find A records for this domain
|
||||
const aRecords = result.response.records.filter(r => r.type === 'A');
|
||||
if (aRecords.length > 0) {
|
||||
const ipAddresses = aRecords.map(r => r.rData?.ipAddress).filter(Boolean);
|
||||
res.json({ success: true, answer: ipAddresses });
|
||||
} else {
|
||||
ctx.errorResponse(res, 404, 'No A records found for domain');
|
||||
}
|
||||
} else {
|
||||
ctx.errorResponse(res, 500, result.errorMessage || 'DNS resolve failed');
|
||||
}
|
||||
} catch (error) {
|
||||
ctx.log.error('dns', 'DNS resolve error', { error: error.message });
|
||||
ctx.errorResponse(res, 500, ctx.safeErrorMessage(error));
|
||||
}
|
||||
}, 'dns-resolve'));
|
||||
|
||||
// GET /logs — Fetch DNS query logs from Technitium
|
||||
router.get('/logs', ctx.asyncHandler(async (req, res) => {
|
||||
const { server, limit } = req.query;
|
||||
|
||||
if (!server) {
|
||||
return ctx.errorResponse(res, 400, 'server is required');
|
||||
}
|
||||
|
||||
// Validate server is an IP address or hostname to prevent SSRF
|
||||
const serverClean = server.includes(':') ? server.split(':')[0] : server;
|
||||
if (!validatorLib.isIP(serverClean) && !validatorLib.isFQDN(serverClean)) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid DNS server address');
|
||||
}
|
||||
|
||||
const logLimit = Math.min(parseInt(limit) || 25, 1000);
|
||||
|
||||
try {
|
||||
// Auto-authenticate using stored read-only credentials for log access
|
||||
const serverIp = server.includes(':') ? server.split(':')[0] : server;
|
||||
const authResult = await ctx.dns.getTokenForServer(serverIp, 'readonly');
|
||||
if (!authResult.success) {
|
||||
return ctx.errorResponse(res, 401, 'DNS auto-authentication failed. Ensure credentials are configured via the DNS panel.');
|
||||
}
|
||||
const effectiveToken = authResult.token;
|
||||
|
||||
// Try to get available log files first
|
||||
const listUrl = `http://${server}/api/logs/list?token=${encodeURIComponent(effectiveToken)}`;
|
||||
const listResponse = await ctx.fetchT(listUrl, { method: 'GET', headers: { 'Accept': 'application/json' } });
|
||||
|
||||
let logFileName = new Date().toISOString().split('T')[0]; // Default to today
|
||||
|
||||
if (listResponse.ok) {
|
||||
const listResult = await listResponse.json();
|
||||
if (listResult.status === 'ok' && listResult.response?.logFiles?.length > 0) {
|
||||
// Use most recent log file
|
||||
logFileName = listResult.response.logFiles[0].fileName;
|
||||
}
|
||||
}
|
||||
|
||||
// Technitium logs/download endpoint - returns plain text logs
|
||||
const technitiumUrl = `http://${server}/api/logs/download?token=${encodeURIComponent(effectiveToken)}&fileName=${logFileName}`;
|
||||
ctx.log.info('dns', 'Fetching DNS logs', { server, logFileName });
|
||||
|
||||
const response = await ctx.fetchT(technitiumUrl, {
|
||||
method: 'GET',
|
||||
headers: { 'Accept': 'text/plain' },
|
||||
timeout: 10000
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text();
|
||||
// Try to parse error as JSON
|
||||
try {
|
||||
const errorJson = JSON.parse(errorText);
|
||||
if (errorJson.errorMessage?.includes('Could not find file')) {
|
||||
return res.json({
|
||||
success: true,
|
||||
server: server,
|
||||
count: 0,
|
||||
logs: [],
|
||||
message: 'No logs available for this server'
|
||||
});
|
||||
}
|
||||
return ctx.errorResponse(res, response.status, ctx.safeErrorMessage(errorJson.errorMessage || errorText));
|
||||
} catch {
|
||||
return ctx.errorResponse(res, response.status, 'DNS server returned an error');
|
||||
}
|
||||
}
|
||||
|
||||
// Parse plain text logs
|
||||
const logText = await response.text();
|
||||
|
||||
// Check if it's an error JSON response
|
||||
if (logText.startsWith('{')) {
|
||||
try {
|
||||
const errorJson = JSON.parse(logText);
|
||||
if (errorJson.status && errorJson.status !== 'ok') {
|
||||
if (errorJson.errorMessage?.includes('Could not find file')) {
|
||||
return res.json({
|
||||
success: true,
|
||||
server: server,
|
||||
count: 0,
|
||||
logs: [],
|
||||
message: 'No logs available for this server'
|
||||
});
|
||||
}
|
||||
// Invalidate cached token on auth errors so next request re-authenticates
|
||||
if (errorJson.status === 'invalid-token') {
|
||||
ctx.dns.invalidateTokenForServer(serverIp);
|
||||
}
|
||||
return ctx.errorResponse(res, 400, ctx.safeErrorMessage(errorJson.errorMessage));
|
||||
}
|
||||
} catch { /* Not JSON, continue parsing as text */ }
|
||||
}
|
||||
|
||||
const allLines = logText.split('\n').filter(line => line.trim() && !line.includes('Logging started'));
|
||||
|
||||
// Get last N lines (most recent)
|
||||
const recentLines = allLines.slice(-logLimit);
|
||||
|
||||
// Parse each log line into structured format
|
||||
const parsedLogs = recentLines.map(line => {
|
||||
// Format: [2026-01-24 04:17:43 Local] [47.147.82.245:60001] [UDP] QNAME: domain; QTYPE: A; QCLASS: IN; RCODE: Refused; ANSWER: []
|
||||
const match = line.match(/\[(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})[^\]]*\]\s*\[([^\]]+)\]\s*\[(\w+)\]\s*QNAME:\s*([^;]+);\s*QTYPE:\s*([^;]+);\s*QCLASS:\s*([^;]+);\s*RCODE:\s*([^;]+);\s*ANSWER:\s*\[([^\]]*)\]/);
|
||||
|
||||
if (match) {
|
||||
return {
|
||||
timestamp: match[1],
|
||||
client: match[2].split(':')[0], // Remove port
|
||||
protocol: match[3],
|
||||
domain: match[4].trim(),
|
||||
type: match[5].trim(),
|
||||
class: match[6].trim(),
|
||||
rcode: match[7].trim(),
|
||||
answer: match[8].trim() || null,
|
||||
raw: line
|
||||
};
|
||||
}
|
||||
return { raw: line, parsed: false };
|
||||
}).reverse(); // Reverse to show most recent first
|
||||
|
||||
ctx.log.info('dns', 'Returning DNS log entries', { count: parsedLogs.length, logFileName });
|
||||
res.json({
|
||||
success: true,
|
||||
server: server,
|
||||
logFile: logFileName,
|
||||
count: parsedLogs.length,
|
||||
logs: parsedLogs
|
||||
});
|
||||
|
||||
} catch (error) {
|
||||
ctx.log.error('dns', 'DNS logs proxy error', { error: error.message });
|
||||
ctx.errorResponse(res, 500, ctx.safeErrorMessage(error));
|
||||
}
|
||||
}, 'dns-logs'));
|
||||
|
||||
// GET /token-status — Check DNS token/credentials status
|
||||
router.get('/token-status', ctx.asyncHandler(async (req, res) => {
|
||||
const username = await ctx.credentialManager.retrieve('dns.username');
|
||||
const hasCredentials = !!username || await exists(ctx.dns.credentialsFile);
|
||||
const hasToken = !!ctx.dns.getToken();
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
hasCredentials,
|
||||
hasToken,
|
||||
tokenExpiry: ctx.dns.getTokenExpiry(),
|
||||
isExpired: ctx.dns.getTokenExpiry() ? new Date() > new Date(ctx.dns.getTokenExpiry()) : null
|
||||
});
|
||||
}, 'dns-token-status'));
|
||||
|
||||
// POST /credentials — Store DNS credentials (encrypted)
|
||||
// Accepts per-server format: { servers: { dns1: { username, password }, dns2: {...}, dns3: {...} } }
|
||||
// Also accepts legacy format: { username, password, server }
|
||||
router.post('/credentials', ctx.asyncHandler(async (req, res) => {
|
||||
const { servers, username, password, server } = req.body;
|
||||
const dangerousChars = [';', '&', '|', '`', '$', '\n', '\r'];
|
||||
const dnsPort = ctx.siteConfig.dnsServerPort || '5380';
|
||||
|
||||
// Per-server format: { servers: { dns1: { readonly: { username, password }, admin: { username, password } }, ... } }
|
||||
if (servers && typeof servers === 'object') {
|
||||
const results = {};
|
||||
let anySuccess = false;
|
||||
|
||||
for (const [dnsId, creds] of Object.entries(servers)) {
|
||||
// Look up server IP from config
|
||||
const serverInfo = ctx.siteConfig.dnsServers?.[dnsId];
|
||||
const serverIp = serverInfo?.ip;
|
||||
if (!serverIp) {
|
||||
results[dnsId] = { success: false, error: `No IP configured for ${dnsId}` };
|
||||
continue;
|
||||
}
|
||||
|
||||
const savedTypes = [];
|
||||
|
||||
// Process both readonly and admin credential types
|
||||
for (const credType of ['readonly', 'admin']) {
|
||||
const typeCreds = creds[credType];
|
||||
if (!typeCreds || !typeCreds.username || !typeCreds.password) continue;
|
||||
|
||||
if (typeCreds.username.length > 100 || typeCreds.password.length > 512) {
|
||||
results[dnsId] = { success: false, error: `${credType} credentials exceed maximum length` };
|
||||
continue;
|
||||
}
|
||||
if (dangerousChars.some(char => typeCreds.username.includes(char))) {
|
||||
results[dnsId] = { success: false, error: `${credType} username contains invalid characters` };
|
||||
continue;
|
||||
}
|
||||
|
||||
// Test credentials by logging in to the target server
|
||||
try {
|
||||
const testResult = await ctx.dns.refresh(typeCreds.username, typeCreds.password, serverIp);
|
||||
if (testResult.success) {
|
||||
await ctx.credentialManager.store(`dns.${dnsId}.${credType}.username`, typeCreds.username, { type: 'dns', role: credType, server: serverIp });
|
||||
await ctx.credentialManager.store(`dns.${dnsId}.${credType}.password`, typeCreds.password, { type: 'dns', role: credType, server: serverIp });
|
||||
savedTypes.push(credType);
|
||||
anySuccess = true;
|
||||
ctx.log.info('dns', `${credType} credentials saved for ${dnsId}`, { server: serverIp });
|
||||
} else {
|
||||
if (!results[dnsId]) {
|
||||
results[dnsId] = { success: false, error: `${credType}: ${testResult.error || 'Login failed'}` };
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
if (!results[dnsId]) {
|
||||
results[dnsId] = { success: false, error: `${credType}: ${err.message}` };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (savedTypes.length > 0) {
|
||||
if (savedTypes.length === 2) {
|
||||
results[dnsId] = { success: true };
|
||||
} else {
|
||||
results[dnsId] = { success: true, partial: `${savedTypes[0]} verified` };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return res.json({
|
||||
success: anySuccess,
|
||||
message: anySuccess ? 'Credentials saved for one or more servers' : 'All server credential tests failed',
|
||||
results
|
||||
});
|
||||
}
|
||||
|
||||
// Legacy single-credential format: { username, password, server }
|
||||
if (!username || !password) {
|
||||
return ctx.errorResponse(res, 400, 'username and password are required');
|
||||
}
|
||||
|
||||
if (username.length > 100 || password.length > 512) {
|
||||
return ctx.errorResponse(res, 400, 'Credentials exceed maximum length');
|
||||
}
|
||||
|
||||
if (dangerousChars.some(char => username.includes(char))) {
|
||||
return ctx.errorResponse(res, 400, 'Username contains invalid characters');
|
||||
}
|
||||
|
||||
if (server && !validatorLib.isIP(server)) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid DNS server address');
|
||||
}
|
||||
|
||||
const testResult = await ctx.dns.refresh(username, password, server || ctx.siteConfig.dnsServerIp);
|
||||
|
||||
if (!testResult.success) {
|
||||
return ctx.errorResponse(res, 401, `Invalid credentials: ${testResult.error}`);
|
||||
}
|
||||
|
||||
const dnsServer = server || ctx.siteConfig.dnsServerIp;
|
||||
await ctx.credentialManager.store('dns.username', username, { type: 'dns', server: dnsServer });
|
||||
await ctx.credentialManager.store('dns.password', password, { type: 'dns', server: dnsServer });
|
||||
await ctx.credentialManager.store('dns.server', dnsServer, { type: 'dns' });
|
||||
ctx.log.info('dns', 'DNS credentials saved to credential manager (encrypted)');
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: 'DNS credentials saved and verified (encrypted)',
|
||||
tokenExpiry: ctx.dns.getTokenExpiry()
|
||||
});
|
||||
}, 'dns-credentials'));
|
||||
|
||||
// DELETE /credentials — Delete stored DNS credentials
|
||||
router.delete('/credentials', ctx.asyncHandler(async (req, res) => {
|
||||
// Delete global credentials
|
||||
await ctx.credentialManager.delete('dns.username');
|
||||
await ctx.credentialManager.delete('dns.password');
|
||||
await ctx.credentialManager.delete('dns.server');
|
||||
// Delete per-server credentials (both old flat and new typed format)
|
||||
for (const dnsId of Object.keys(ctx.siteConfig.dnsServers || {})) {
|
||||
await ctx.credentialManager.delete(`dns.${dnsId}.username`);
|
||||
await ctx.credentialManager.delete(`dns.${dnsId}.password`);
|
||||
for (const role of ['readonly', 'admin']) {
|
||||
await ctx.credentialManager.delete(`dns.${dnsId}.${role}.username`);
|
||||
await ctx.credentialManager.delete(`dns.${dnsId}.${role}.password`);
|
||||
}
|
||||
}
|
||||
if (await exists(ctx.dns.credentialsFile)) {
|
||||
await fsp.unlink(ctx.dns.credentialsFile);
|
||||
}
|
||||
ctx.dns.setToken('');
|
||||
ctx.dns.setTokenExpiry(null);
|
||||
ctx.log.info('dns', 'DNS credentials deleted from credential manager');
|
||||
res.json({ success: true, message: 'DNS credentials removed' });
|
||||
}, 'dns-credentials-delete'));
|
||||
|
||||
// POST /restart/:dnsId — Restart a DNS server (proxied through backend for auth)
|
||||
router.post('/restart/:dnsId', ctx.asyncHandler(async (req, res) => {
|
||||
const { dnsId } = req.params;
|
||||
const serverInfo = ctx.siteConfig.dnsServers?.[dnsId];
|
||||
if (!serverInfo?.ip) {
|
||||
return ctx.errorResponse(res, 400, `Unknown DNS server: ${dnsId}`);
|
||||
}
|
||||
|
||||
const tokenResult = await ctx.dns.getTokenForServer(serverInfo.ip, 'admin');
|
||||
if (!tokenResult.success) {
|
||||
return ctx.errorResponse(res, 401, 'DNS admin authentication failed. Ensure admin credentials are configured.');
|
||||
}
|
||||
|
||||
const dnsPort = ctx.siteConfig.dnsServerPort || '5380';
|
||||
try {
|
||||
const url = `http://${serverInfo.ip}:${dnsPort}/api/admin/restart?token=${encodeURIComponent(tokenResult.token)}`;
|
||||
const response = await ctx.fetchT(url, { method: 'POST', timeout: 5000 });
|
||||
const result = await response.json();
|
||||
if (result.status === 'ok') {
|
||||
res.json({ success: true, message: 'Restart initiated' });
|
||||
} else {
|
||||
ctx.errorResponse(res, 500, result.errorMessage || 'Restart failed');
|
||||
}
|
||||
} catch (err) {
|
||||
// Connection drop is expected during restart
|
||||
res.json({ success: true, message: 'Restart initiated (connection closed)' });
|
||||
}
|
||||
}, 'dns-restart'));
|
||||
|
||||
// POST /refresh-token — Force refresh DNS token
|
||||
router.post('/refresh-token', ctx.asyncHandler(async (req, res) => {
|
||||
const result = await ctx.dns.ensureToken();
|
||||
|
||||
if (result.success) {
|
||||
res.json({
|
||||
success: true,
|
||||
message: 'Token refreshed successfully',
|
||||
tokenExpiry: ctx.dns.getTokenExpiry()
|
||||
});
|
||||
} else {
|
||||
ctx.errorResponse(res, 401, result.error);
|
||||
}
|
||||
}, 'dns-refresh-token'));
|
||||
|
||||
// GET /check-update — Check for Technitium DNS server updates
|
||||
router.get('/check-update', ctx.asyncHandler(async (req, res) => {
|
||||
try {
|
||||
const { server } = req.query;
|
||||
if (!server) {
|
||||
return ctx.errorResponse(res, 400, 'Server IP required');
|
||||
}
|
||||
|
||||
// Authenticate with admin credentials for update check
|
||||
const tokenResult = await ctx.dns.getTokenForServer(server, 'admin');
|
||||
if (!tokenResult.success) {
|
||||
return ctx.errorResponse(res, 401, 'DNS authentication failed. Ensure credentials are configured.');
|
||||
}
|
||||
|
||||
const url = `http://${server}:5380/api/user/checkForUpdate?token=${encodeURIComponent(tokenResult.token)}`;
|
||||
ctx.log.info('dns', 'Checking DNS update', { server });
|
||||
|
||||
const response = await ctx.fetchT(url, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Accept': 'application/json',
|
||||
'User-Agent': APP.USER_AGENTS.API
|
||||
}
|
||||
});
|
||||
|
||||
const text = await response.text();
|
||||
|
||||
if (!text || text.trim() === '') {
|
||||
return ctx.errorResponse(res, 500, 'Empty response from DNS server');
|
||||
}
|
||||
|
||||
const result = JSON.parse(text);
|
||||
|
||||
if (result.status === 'ok') {
|
||||
res.json({
|
||||
success: true,
|
||||
updateAvailable: result.response.updateAvailable,
|
||||
currentVersion: result.response.currentVersion,
|
||||
updateVersion: result.response.updateVersion || null,
|
||||
updateTitle: result.response.updateTitle || null,
|
||||
updateMessage: result.response.updateMessage || null,
|
||||
downloadLink: result.response.downloadLink || null,
|
||||
instructionsLink: result.response.instructionsLink || null
|
||||
});
|
||||
} else {
|
||||
ctx.errorResponse(res, 500, result.errorMessage || 'Check failed');
|
||||
}
|
||||
} catch (error) {
|
||||
ctx.log.error('dns', 'DNS update check error', { error: error.message });
|
||||
ctx.errorResponse(res, 500, ctx.safeErrorMessage(error));
|
||||
}
|
||||
}, 'dns-check-update'));
|
||||
|
||||
// POST /update — Update Technitium DNS server
|
||||
// Note: Technitium v14+ has no installUpdate API. This endpoint checks for updates
|
||||
// and returns download info. The frontend handles showing update instructions.
|
||||
router.post('/update', ctx.asyncHandler(async (req, res) => {
|
||||
try {
|
||||
const { server } = req.query;
|
||||
if (!server) {
|
||||
return ctx.errorResponse(res, 400, 'Server IP required');
|
||||
}
|
||||
|
||||
// Authenticate with admin credentials for update operations
|
||||
const tokenResult = await ctx.dns.getTokenForServer(server, 'admin');
|
||||
if (!tokenResult.success) {
|
||||
return ctx.errorResponse(res, 401, 'DNS authentication failed. Ensure credentials are configured.');
|
||||
}
|
||||
|
||||
// Check if update is available
|
||||
const checkResponse = await ctx.fetchT(
|
||||
`http://${server}:5380/api/user/checkForUpdate?token=${encodeURIComponent(tokenResult.token)}`,
|
||||
{ method: 'GET', headers: { 'Accept': 'application/json' } }
|
||||
);
|
||||
|
||||
const checkText = await checkResponse.text();
|
||||
if (!checkText || checkText.trim() === '') {
|
||||
return ctx.errorResponse(res, 500, 'Empty response from DNS server during check');
|
||||
}
|
||||
const checkResult = JSON.parse(checkText);
|
||||
|
||||
if (checkResult.status !== 'ok') {
|
||||
return ctx.errorResponse(res, 500, checkResult.errorMessage || 'Update check failed');
|
||||
}
|
||||
|
||||
if (!checkResult.response.updateAvailable) {
|
||||
return res.json({
|
||||
success: true,
|
||||
message: 'Already up to date',
|
||||
currentVersion: checkResult.response.currentVersion,
|
||||
updated: false
|
||||
});
|
||||
}
|
||||
|
||||
// Technitium v14+ does not have an installUpdate API endpoint.
|
||||
// Return the update info with download link so the frontend can guide the user.
|
||||
ctx.log.info('dns', 'Update available for DNS server', { server, currentVersion: checkResult.response.currentVersion, updateVersion: checkResult.response.updateVersion });
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: `Update available: ${checkResult.response.updateVersion}`,
|
||||
previousVersion: checkResult.response.currentVersion,
|
||||
newVersion: checkResult.response.updateVersion,
|
||||
downloadLink: checkResult.response.downloadLink || null,
|
||||
instructionsLink: checkResult.response.instructionsLink || null,
|
||||
updated: false,
|
||||
manualUpdateRequired: true
|
||||
});
|
||||
} catch (error) {
|
||||
ctx.log.error('dns', 'DNS update error', { error: error.message });
|
||||
ctx.errorResponse(res, 500, ctx.safeErrorMessage(error));
|
||||
}
|
||||
}, 'dns-update'));
|
||||
|
||||
return router;
|
||||
};
|
||||
69
dashcaddy-api/routes/errorlogs.js
Normal file
69
dashcaddy-api/routes/errorlogs.js
Normal file
@@ -0,0 +1,69 @@
|
||||
const express = require('express');
|
||||
const fs = require('fs');
|
||||
const fsp = require('fs').promises;
|
||||
const { exists } = require('../fs-helpers');
|
||||
const { paginate, parsePaginationParams } = require('../pagination');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
// Get error logs
|
||||
router.get('/error-logs', ctx.asyncHandler(async (req, res) => {
|
||||
if (!await exists(ctx.ERROR_LOG_FILE)) {
|
||||
return res.json({ success: true, logs: [] });
|
||||
}
|
||||
|
||||
const logContent = await fsp.readFile(ctx.ERROR_LOG_FILE, 'utf8');
|
||||
const logEntries = logContent.split('='.repeat(80)).filter(entry => entry.trim());
|
||||
|
||||
const logs = logEntries.map(entry => {
|
||||
const lines = entry.trim().split('\n');
|
||||
const firstLine = lines[0] || '';
|
||||
const match = firstLine.match(/\[(.*?)\] (.*?): (.*)/);
|
||||
|
||||
if (match) {
|
||||
return {
|
||||
timestamp: match[1],
|
||||
context: match[2],
|
||||
error: match[3],
|
||||
details: lines.slice(1).join('\n').trim()
|
||||
};
|
||||
}
|
||||
return null;
|
||||
}).filter(Boolean);
|
||||
|
||||
res.json({ success: true, logs: logs.slice(-50).reverse() });
|
||||
}, 'error-logs-get'));
|
||||
|
||||
// Clear error logs
|
||||
router.delete('/error-logs', ctx.asyncHandler(async (req, res) => {
|
||||
if (await exists(ctx.ERROR_LOG_FILE)) {
|
||||
await fsp.writeFile(ctx.ERROR_LOG_FILE, '');
|
||||
}
|
||||
res.json({ success: true, message: 'Error logs cleared' });
|
||||
}, 'error-logs-clear'));
|
||||
|
||||
// Audit log
|
||||
router.get('/audit-logs', ctx.asyncHandler(async (req, res) => {
|
||||
const paginationParams = parsePaginationParams(req.query);
|
||||
const action = req.query.action || '';
|
||||
if (paginationParams) {
|
||||
// When paginating, fetch all matching entries and let pagination slice
|
||||
const entries = await ctx.auditLogger.query({ limit: Number.MAX_SAFE_INTEGER, offset: 0, action });
|
||||
const result = paginate(entries, paginationParams);
|
||||
res.json({ success: true, entries: result.data, pagination: result.pagination });
|
||||
} else {
|
||||
const limit = parseInt(req.query.limit) || 50;
|
||||
const offset = parseInt(req.query.offset) || 0;
|
||||
const entries = await ctx.auditLogger.query({ limit, offset, action });
|
||||
res.json({ success: true, entries });
|
||||
}
|
||||
}, 'audit-log'));
|
||||
|
||||
router.delete('/audit-logs', ctx.asyncHandler(async (req, res) => {
|
||||
await ctx.auditLogger.clear();
|
||||
res.json({ success: true, message: 'Audit log cleared' });
|
||||
}, 'audit-log-clear'));
|
||||
|
||||
return router;
|
||||
};
|
||||
314
dashcaddy-api/routes/health.js
Normal file
314
dashcaddy-api/routes/health.js
Normal file
@@ -0,0 +1,314 @@
|
||||
const express = require('express');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { execSync } = require('child_process');
|
||||
const { TIMEOUTS } = require('../constants');
|
||||
const { exists } = require('../fs-helpers');
|
||||
const { paginate, parsePaginationParams } = require('../pagination');
|
||||
const platformPaths = require('../platform-paths');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
// In-memory cache for health results (local to this router)
|
||||
let serviceHealthCache = {};
|
||||
let lastHealthCheck = null;
|
||||
|
||||
// ===== HEALTH / SERVICES =====
|
||||
|
||||
// Check health of all services (performs live checks)
|
||||
router.get('/health/services', ctx.asyncHandler(async (req, res) => {
|
||||
if (!await exists(ctx.SERVICES_FILE)) {
|
||||
return res.json({ success: true, health: {} });
|
||||
}
|
||||
|
||||
const servicesData = await ctx.servicesStateManager.read();
|
||||
const services = Array.isArray(servicesData) ? servicesData : servicesData.services || [];
|
||||
const health = {};
|
||||
|
||||
// Check each service
|
||||
await Promise.all(services.map(async (service) => {
|
||||
const serviceId = service.id || service.name?.toLowerCase();
|
||||
if (!serviceId) return;
|
||||
|
||||
try {
|
||||
let url = null;
|
||||
let checkType = 'http';
|
||||
|
||||
// Determine URL to check
|
||||
if (service.isExternal && service.externalUrl) {
|
||||
url = service.externalUrl;
|
||||
} else if (service.containerId || service.containerName) {
|
||||
// Local container - check via localhost and port
|
||||
const port = service.port || 80;
|
||||
url = `http://localhost:${port}`;
|
||||
} else if (service.url) {
|
||||
url = service.url.startsWith('http') ? service.url : `https://${service.url}`;
|
||||
} else if (service.id) {
|
||||
// Try common URL pattern
|
||||
url = `https://${ctx.buildDomain(service.id)}`;
|
||||
}
|
||||
|
||||
if (!url) {
|
||||
health[serviceId] = { status: 'unknown', reason: 'No URL configured' };
|
||||
return;
|
||||
}
|
||||
|
||||
// Perform health check with timeout
|
||||
const controller = new AbortController();
|
||||
const timeout = setTimeout(() => controller.abort(), 5000);
|
||||
|
||||
try {
|
||||
const response = await ctx.fetchT(url, {
|
||||
method: 'HEAD',
|
||||
signal: controller.signal,
|
||||
redirect: 'follow'
|
||||
});
|
||||
clearTimeout(timeout);
|
||||
|
||||
health[serviceId] = {
|
||||
status: response.ok || response.status < 500 ? 'healthy' : 'unhealthy',
|
||||
statusCode: response.status,
|
||||
url,
|
||||
checkedAt: new Date().toISOString()
|
||||
};
|
||||
} catch (fetchError) {
|
||||
clearTimeout(timeout);
|
||||
|
||||
// Try GET if HEAD fails
|
||||
try {
|
||||
const getController = new AbortController();
|
||||
const getTimeout = setTimeout(() => getController.abort(), 5000);
|
||||
|
||||
const getResponse = await ctx.fetchT(url, {
|
||||
method: 'GET',
|
||||
signal: getController.signal,
|
||||
redirect: 'follow'
|
||||
});
|
||||
clearTimeout(getTimeout);
|
||||
|
||||
health[serviceId] = {
|
||||
status: getResponse.ok || getResponse.status < 500 ? 'healthy' : 'unhealthy',
|
||||
statusCode: getResponse.status,
|
||||
url,
|
||||
checkedAt: new Date().toISOString()
|
||||
};
|
||||
} catch (e) {
|
||||
health[serviceId] = {
|
||||
status: 'unhealthy',
|
||||
reason: e.name === 'AbortError' ? 'Timeout' : e.message,
|
||||
url,
|
||||
checkedAt: new Date().toISOString()
|
||||
};
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
health[serviceId] = {
|
||||
status: 'error',
|
||||
reason: e.message,
|
||||
checkedAt: new Date().toISOString()
|
||||
};
|
||||
}
|
||||
}));
|
||||
|
||||
// Cache results
|
||||
serviceHealthCache = health;
|
||||
lastHealthCheck = new Date().toISOString();
|
||||
|
||||
const paginationParams = parsePaginationParams(req.query);
|
||||
const healthEntries = Object.entries(health);
|
||||
const result = paginate(healthEntries, paginationParams);
|
||||
const paginatedHealth = Object.fromEntries(result.data);
|
||||
res.json({
|
||||
success: true,
|
||||
health: paginatedHealth,
|
||||
checkedAt: lastHealthCheck,
|
||||
...(result.pagination && { pagination: result.pagination })
|
||||
});
|
||||
}, 'health-services'));
|
||||
|
||||
// Get cached health status (fast, no re-check)
|
||||
router.get('/health/cached', ctx.asyncHandler(async (req, res) => {
|
||||
res.json({
|
||||
success: true,
|
||||
health: serviceHealthCache,
|
||||
lastCheck: lastHealthCheck,
|
||||
cacheAge: lastHealthCheck ? Date.now() - new Date(lastHealthCheck).getTime() : null
|
||||
});
|
||||
}, 'health-cached'));
|
||||
|
||||
// Check health of single service
|
||||
router.get('/health/service/:id', ctx.asyncHandler(async (req, res) => {
|
||||
const serviceId = req.params.id;
|
||||
|
||||
// Load service config
|
||||
if (!await exists(ctx.SERVICES_FILE)) {
|
||||
const { NotFoundError } = require('../errors');
|
||||
throw new NotFoundError('Services file');
|
||||
}
|
||||
|
||||
const servicesData = await ctx.servicesStateManager.read();
|
||||
const services = Array.isArray(servicesData) ? servicesData : servicesData.services || [];
|
||||
const service = services.find(s => (s.id || s.name?.toLowerCase()) === serviceId);
|
||||
|
||||
if (!service) {
|
||||
const { NotFoundError } = require('../errors');
|
||||
throw new NotFoundError('Service');
|
||||
}
|
||||
|
||||
// Determine URL
|
||||
let url = null;
|
||||
if (service.isExternal && service.externalUrl) {
|
||||
url = service.externalUrl;
|
||||
} else if (service.url) {
|
||||
url = service.url.startsWith('http') ? service.url : `https://${service.url}`;
|
||||
} else {
|
||||
url = `https://${ctx.buildDomain(serviceId)}`;
|
||||
}
|
||||
|
||||
// Check health
|
||||
const controller = new AbortController();
|
||||
const timeout = setTimeout(() => controller.abort(), 5000);
|
||||
|
||||
try {
|
||||
const response = await ctx.fetchT(url, {
|
||||
method: 'GET',
|
||||
signal: controller.signal,
|
||||
redirect: 'follow'
|
||||
});
|
||||
clearTimeout(timeout);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
serviceId,
|
||||
health: {
|
||||
status: response.ok || response.status < 500 ? 'healthy' : 'unhealthy',
|
||||
statusCode: response.status,
|
||||
url,
|
||||
checkedAt: new Date().toISOString()
|
||||
}
|
||||
});
|
||||
} catch (e) {
|
||||
clearTimeout(timeout);
|
||||
res.json({
|
||||
success: true,
|
||||
serviceId,
|
||||
health: {
|
||||
status: 'unhealthy',
|
||||
reason: e.name === 'AbortError' ? 'Timeout' : e.message,
|
||||
url,
|
||||
checkedAt: new Date().toISOString()
|
||||
}
|
||||
});
|
||||
}
|
||||
}, 'health-service'));
|
||||
|
||||
// ===== HEALTH / CA =====
|
||||
|
||||
// Get CA certificate health status
|
||||
router.get('/health/ca', ctx.asyncHandler(async (req, res) => {
|
||||
// Try deployed location first, then Caddy PKI location
|
||||
const deployedCertPath = path.join(platformPaths.caCertDir, 'root.crt');
|
||||
const pkiCertPath = platformPaths.pkiRootCert;
|
||||
const rootCertPath = await exists(deployedCertPath) ? deployedCertPath : pkiCertPath;
|
||||
|
||||
try {
|
||||
// Check if certificate exists
|
||||
if (!await exists(rootCertPath)) {
|
||||
return res.json({
|
||||
status: 'error',
|
||||
message: 'Root CA certificate not found',
|
||||
daysUntilExpiration: null
|
||||
});
|
||||
}
|
||||
|
||||
const dates = execSync(`openssl x509 -in "${rootCertPath}" -noout -dates`).toString();
|
||||
const notAfter = dates.match(/notAfter=(.*)/)[1].trim();
|
||||
const expirationDate = new Date(notAfter);
|
||||
const daysUntilExpiration = Math.floor((expirationDate - new Date()) / (1000 * 60 * 60 * 24));
|
||||
|
||||
// Alert thresholds
|
||||
let status = 'healthy';
|
||||
let message = `CA certificate valid for ${daysUntilExpiration} days`;
|
||||
|
||||
if (daysUntilExpiration < 0) {
|
||||
status = 'critical';
|
||||
message = `CA certificate EXPIRED ${Math.abs(daysUntilExpiration)} days ago!`;
|
||||
} else if (daysUntilExpiration < 7) {
|
||||
status = 'critical';
|
||||
message = `CA certificate expires in ${daysUntilExpiration} days!`;
|
||||
} else if (daysUntilExpiration < 30) {
|
||||
status = 'critical';
|
||||
message = `CA certificate expires in ${daysUntilExpiration} days!`;
|
||||
} else if (daysUntilExpiration < 90) {
|
||||
status = 'warning';
|
||||
message = `CA certificate expires in ${daysUntilExpiration} days`;
|
||||
}
|
||||
|
||||
res.json({
|
||||
status: status,
|
||||
message: message,
|
||||
daysUntilExpiration: daysUntilExpiration,
|
||||
expiresAt: notAfter
|
||||
});
|
||||
} catch (error) {
|
||||
await ctx.logError('GET /api/health/ca', error);
|
||||
res.json({
|
||||
status: 'error',
|
||||
message: error.message,
|
||||
daysUntilExpiration: null
|
||||
});
|
||||
}
|
||||
}, 'health-ca'));
|
||||
|
||||
// ===== HEALTH CHECK (health-checker module) =====
|
||||
|
||||
// Get current status for all services
|
||||
router.get('/health-checks/status', ctx.asyncHandler(async (req, res) => {
|
||||
const status = ctx.healthChecker.getCurrentStatus();
|
||||
res.json({ success: true, status });
|
||||
}, 'health-check-status'));
|
||||
|
||||
// Get service statistics
|
||||
router.get('/health-checks/:serviceId/stats', ctx.asyncHandler(async (req, res) => {
|
||||
const hours = parseInt(req.query.hours) || 24;
|
||||
const stats = ctx.healthChecker.getServiceStats(req.params.serviceId, hours);
|
||||
if (!stats) {
|
||||
const { NotFoundError } = require('../errors');
|
||||
throw new NotFoundError('Service');
|
||||
}
|
||||
res.json({ success: true, stats });
|
||||
}, 'health-check-stats'));
|
||||
|
||||
// Configure health check
|
||||
router.post('/health-checks/:serviceId/configure', ctx.asyncHandler(async (req, res) => {
|
||||
ctx.healthChecker.configureService(req.params.serviceId, req.body);
|
||||
res.json({ success: true, message: 'Health check configured' });
|
||||
}, 'health-check-configure'));
|
||||
|
||||
// Remove health check configuration
|
||||
router.delete('/health-checks/:serviceId/configure', ctx.asyncHandler(async (req, res) => {
|
||||
ctx.healthChecker.removeService(req.params.serviceId);
|
||||
res.json({ success: true, message: 'Health check removed' });
|
||||
}, 'health-check-remove'));
|
||||
|
||||
// Get open incidents
|
||||
router.get('/health-checks/incidents', ctx.asyncHandler(async (req, res) => {
|
||||
const incidents = ctx.healthChecker.getOpenIncidents();
|
||||
const paginationParams = parsePaginationParams(req.query);
|
||||
const result = paginate(incidents, paginationParams);
|
||||
res.json({ success: true, incidents: result.data, ...(result.pagination && { pagination: result.pagination }) });
|
||||
}, 'health-check-incidents'));
|
||||
|
||||
// Get incident history
|
||||
router.get('/health-checks/incidents/history', ctx.asyncHandler(async (req, res) => {
|
||||
const paginationParams = parsePaginationParams(req.query);
|
||||
// When paginating, fetch all history so pagination can slice correctly
|
||||
const fetchLimit = paginationParams ? Number.MAX_SAFE_INTEGER : (parseInt(req.query.limit) || 50);
|
||||
const history = ctx.healthChecker.getIncidentHistory(fetchLimit);
|
||||
const result = paginate(history, paginationParams);
|
||||
res.json({ success: true, history: result.data, ...(result.pagination && { pagination: result.pagination }) });
|
||||
}, 'health-check-incidents-history'));
|
||||
|
||||
return router;
|
||||
};
|
||||
62
dashcaddy-api/routes/license.js
Normal file
62
dashcaddy-api/routes/license.js
Normal file
@@ -0,0 +1,62 @@
|
||||
const express = require('express');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
// Activate a license code
|
||||
router.post('/activate', ctx.asyncHandler(async (req, res) => {
|
||||
const { code } = req.body;
|
||||
if (!code) {
|
||||
return ctx.errorResponse(res, 400, 'License code is required');
|
||||
}
|
||||
|
||||
const result = await ctx.licenseManager.activate(code);
|
||||
|
||||
if (result.success) {
|
||||
res.json({
|
||||
success: true,
|
||||
message: result.message,
|
||||
license: result.activation
|
||||
});
|
||||
} else {
|
||||
ctx.errorResponse(res, 400, result.message);
|
||||
}
|
||||
}, 'license-activate'));
|
||||
|
||||
// Get current license status
|
||||
router.get('/status', ctx.asyncHandler(async (req, res) => {
|
||||
const status = ctx.licenseManager.getStatus();
|
||||
res.json({ success: true, license: status });
|
||||
}, 'license-status'));
|
||||
|
||||
// Deactivate current license
|
||||
router.post('/deactivate', ctx.asyncHandler(async (req, res) => {
|
||||
const result = await ctx.licenseManager.deactivate();
|
||||
|
||||
if (result.success) {
|
||||
res.json({ success: true, message: result.message });
|
||||
} else {
|
||||
ctx.errorResponse(res, 400, result.message);
|
||||
}
|
||||
}, 'license-deactivate'));
|
||||
|
||||
// Check if a specific feature is available (lightweight check for frontend)
|
||||
router.get('/feature/:feature', ctx.asyncHandler(async (req, res) => {
|
||||
const { feature } = req.params;
|
||||
const available = ctx.licenseManager.hasFeature(feature);
|
||||
const status = ctx.licenseManager.getStatus();
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
feature,
|
||||
available,
|
||||
tier: status.tier,
|
||||
...(available ? {} : {
|
||||
upgradeUrl: '/settings#license',
|
||||
message: `${status.premiumFeatures[feature]?.name || feature} requires DashCaddy Premium`
|
||||
})
|
||||
});
|
||||
}, 'license-feature-check'));
|
||||
|
||||
return router;
|
||||
};
|
||||
182
dashcaddy-api/routes/logs.js
Normal file
182
dashcaddy-api/routes/logs.js
Normal file
@@ -0,0 +1,182 @@
|
||||
const express = require('express');
|
||||
const fs = require('fs');
|
||||
const fsp = require('fs').promises;
|
||||
const path = require('path');
|
||||
const { exists } = require('../fs-helpers');
|
||||
const { paginate, parsePaginationParams } = require('../pagination');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
// List containers with logs
|
||||
router.get('/logs/containers', ctx.asyncHandler(async (req, res) => {
|
||||
const containers = await ctx.docker.client.listContainers({ all: true });
|
||||
const containerList = containers.map(c => ({
|
||||
id: c.Id.slice(0, 12),
|
||||
name: c.Names[0]?.replace(/^\//, '') || 'unknown',
|
||||
image: c.Image,
|
||||
status: c.State,
|
||||
created: c.Created
|
||||
}));
|
||||
|
||||
const paginationParams = parsePaginationParams(req.query);
|
||||
const result = paginate(containerList, paginationParams);
|
||||
res.json({ success: true, containers: result.data, ...(result.pagination && { pagination: result.pagination }) });
|
||||
}, 'logs-containers'));
|
||||
|
||||
// Get logs for a specific container
|
||||
router.get('/logs/container/:id', ctx.asyncHandler(async (req, res) => {
|
||||
const containerId = req.params.id;
|
||||
const tail = parseInt(req.query.tail) || 100;
|
||||
const since = req.query.since || 0;
|
||||
const timestamps = req.query.timestamps !== 'false';
|
||||
|
||||
const container = ctx.docker.client.getContainer(containerId);
|
||||
const info = await container.inspect();
|
||||
const containerName = info.Name.replace(/^\//, '');
|
||||
|
||||
const logs = await container.logs({
|
||||
stdout: true, stderr: true,
|
||||
tail, since, timestamps
|
||||
});
|
||||
|
||||
// Parse Docker log stream (demultiplex stdout/stderr)
|
||||
const lines = [];
|
||||
let offset = 0;
|
||||
const buffer = Buffer.isBuffer(logs) ? logs : Buffer.from(logs);
|
||||
|
||||
while (offset < buffer.length) {
|
||||
if (offset + 8 > buffer.length) break;
|
||||
const header = buffer.slice(offset, offset + 8);
|
||||
const streamType = header[0];
|
||||
const size = header.readUInt32BE(4);
|
||||
if (offset + 8 + size > buffer.length) break;
|
||||
|
||||
const line = buffer.slice(offset + 8, offset + 8 + size).toString('utf8').trim();
|
||||
if (line) {
|
||||
lines.push({
|
||||
stream: streamType === 2 ? 'stderr' : 'stdout',
|
||||
text: line
|
||||
});
|
||||
}
|
||||
offset += 8 + size;
|
||||
}
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
containerId, containerName,
|
||||
logs: lines,
|
||||
count: lines.length
|
||||
});
|
||||
}, 'logs-container'));
|
||||
|
||||
// Stream logs (SSE)
|
||||
router.get('/logs/stream/:id', ctx.asyncHandler(async (req, res) => {
|
||||
const containerId = req.params.id;
|
||||
const container = ctx.docker.client.getContainer(containerId);
|
||||
|
||||
res.setHeader('Content-Type', 'text/event-stream');
|
||||
res.setHeader('Cache-Control', 'no-cache');
|
||||
res.setHeader('Connection', 'keep-alive');
|
||||
res.setHeader('X-Accel-Buffering', 'no');
|
||||
|
||||
const logStream = await container.logs({
|
||||
stdout: true, stderr: true,
|
||||
follow: true, tail: 50, timestamps: true
|
||||
});
|
||||
|
||||
let buffer = Buffer.alloc(0);
|
||||
|
||||
logStream.on('data', (chunk) => {
|
||||
buffer = Buffer.concat([buffer, chunk]);
|
||||
|
||||
while (buffer.length >= 8) {
|
||||
const size = buffer.readUInt32BE(4);
|
||||
if (buffer.length < 8 + size) break;
|
||||
|
||||
const streamType = buffer[0];
|
||||
const line = buffer.slice(8, 8 + size).toString('utf8').trim();
|
||||
|
||||
if (line) {
|
||||
const data = JSON.stringify({
|
||||
stream: streamType === 2 ? 'stderr' : 'stdout',
|
||||
text: line,
|
||||
timestamp: new Date().toISOString()
|
||||
});
|
||||
res.write(`data: ${data}\n\n`);
|
||||
}
|
||||
|
||||
buffer = buffer.slice(8 + size);
|
||||
}
|
||||
});
|
||||
|
||||
logStream.on('error', (err) => {
|
||||
res.write(`data: ${JSON.stringify({ error: ctx.safeErrorMessage(err) })}\n\n`);
|
||||
res.end();
|
||||
});
|
||||
|
||||
req.on('close', () => {
|
||||
if (logStream.destroy) logStream.destroy();
|
||||
});
|
||||
}, 'logs-stream'));
|
||||
|
||||
// Get logs from a file path (for native applications)
|
||||
router.get('/logs/file', ctx.asyncHandler(async (req, res) => {
|
||||
const { path: logPath, tail = 100 } = req.query;
|
||||
|
||||
if (!logPath) {
|
||||
return ctx.errorResponse(res, 400, 'Log path is required');
|
||||
}
|
||||
|
||||
const platformPaths = require('../platform-paths');
|
||||
const allowedPaths = platformPaths.allowedLogPaths;
|
||||
|
||||
const normalizedPath = path.normalize(logPath);
|
||||
const isAllowed = allowedPaths.some(allowed =>
|
||||
normalizedPath.startsWith(path.normalize(allowed))
|
||||
);
|
||||
|
||||
if (!isAllowed) {
|
||||
return ctx.errorResponse(res, 403, 'Access to this log path is not allowed');
|
||||
}
|
||||
|
||||
if (!await exists(normalizedPath)) {
|
||||
const { NotFoundError } = require('../errors');
|
||||
throw new NotFoundError('Log file');
|
||||
}
|
||||
|
||||
const fileContent = await fsp.readFile(normalizedPath, 'utf8');
|
||||
const lines = fileContent.split('\n').filter(line => line.trim());
|
||||
const tailLines = lines.slice(-tail);
|
||||
|
||||
const logs = tailLines.map(line => ({
|
||||
stream: 'stdout',
|
||||
text: line,
|
||||
timestamp: extractTimestamp(line)
|
||||
}));
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
logPath: normalizedPath,
|
||||
logs,
|
||||
count: logs.length,
|
||||
totalLines: lines.length
|
||||
});
|
||||
}, 'logs-file'));
|
||||
|
||||
return router;
|
||||
};
|
||||
|
||||
function extractTimestamp(line) {
|
||||
const patterns = [
|
||||
/^(\d{4}-\d{2}-\d{2}[T\s]\d{2}:\d{2}:\d{2})/,
|
||||
/^(\w{3}\s+\d{1,2},\s+\d{4}\s+\d{2}:\d{2}:\d{2})/,
|
||||
/^\[(\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2})\]/,
|
||||
];
|
||||
|
||||
for (const pattern of patterns) {
|
||||
const match = line.match(pattern);
|
||||
if (match) return match[1];
|
||||
}
|
||||
return null;
|
||||
}
|
||||
167
dashcaddy-api/routes/monitoring.js
Normal file
167
dashcaddy-api/routes/monitoring.js
Normal file
@@ -0,0 +1,167 @@
|
||||
const express = require('express');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
// ===== RESOURCE MONITORING ENDPOINTS =====
|
||||
|
||||
// Get all container stats (from resource monitor module)
|
||||
router.get('/monitoring/stats', ctx.asyncHandler(async (req, res) => {
|
||||
const stats = ctx.resourceMonitor.getAllStats();
|
||||
res.json({ success: true, stats });
|
||||
}, 'monitoring-stats'));
|
||||
|
||||
// Get stats for specific container
|
||||
router.get('/monitoring/stats/:containerId', ctx.asyncHandler(async (req, res) => {
|
||||
const stats = ctx.resourceMonitor.getCurrentStats(req.params.containerId);
|
||||
if (!stats) {
|
||||
const { NotFoundError } = require('../errors');
|
||||
throw new NotFoundError('Container');
|
||||
}
|
||||
res.json({ success: true, stats });
|
||||
}, 'monitoring-stats-container'));
|
||||
|
||||
// Get historical stats
|
||||
router.get('/monitoring/history/:containerId', ctx.asyncHandler(async (req, res) => {
|
||||
const hours = parseInt(req.query.hours) || 24;
|
||||
const history = ctx.resourceMonitor.getHistoricalStats(req.params.containerId, hours);
|
||||
res.json({ success: true, history, hours });
|
||||
}, 'monitoring-history'));
|
||||
|
||||
// Get aggregated stats
|
||||
router.get('/monitoring/aggregated/:containerId', ctx.asyncHandler(async (req, res) => {
|
||||
const hours = parseInt(req.query.hours) || 24;
|
||||
const aggregated = ctx.resourceMonitor.getAggregatedStats(req.params.containerId, hours);
|
||||
if (!aggregated) {
|
||||
const { NotFoundError } = require('../errors');
|
||||
throw new NotFoundError('Monitoring data');
|
||||
}
|
||||
res.json({ success: true, aggregated, hours });
|
||||
}, 'monitoring-aggregated'));
|
||||
|
||||
// Configure alerts
|
||||
router.post('/monitoring/alerts/:containerId', ctx.asyncHandler(async (req, res) => {
|
||||
ctx.resourceMonitor.setAlertConfig(req.params.containerId, req.body);
|
||||
res.json({ success: true, message: 'Alert configuration saved' });
|
||||
}, 'monitoring-alerts-set'));
|
||||
|
||||
// Get alert configuration
|
||||
router.get('/monitoring/alerts/:containerId', ctx.asyncHandler(async (req, res) => {
|
||||
const config = ctx.resourceMonitor.getAlertConfig(req.params.containerId);
|
||||
res.json({ success: true, config: config || {} });
|
||||
}, 'monitoring-alerts-get'));
|
||||
|
||||
// Delete alert configuration
|
||||
router.delete('/monitoring/alerts/:containerId', ctx.asyncHandler(async (req, res) => {
|
||||
ctx.resourceMonitor.removeAlertConfig(req.params.containerId);
|
||||
res.json({ success: true, message: 'Alert configuration removed' });
|
||||
}, 'monitoring-alerts-delete'));
|
||||
|
||||
// ===== CONTAINER STATS ENDPOINTS (legacy /stats/) =====
|
||||
|
||||
// Get all container stats (live Docker stats)
|
||||
router.get('/stats/containers', ctx.asyncHandler(async (req, res) => {
|
||||
const containers = await ctx.docker.client.listContainers({ all: false });
|
||||
const stats = [];
|
||||
|
||||
for (const containerInfo of containers) {
|
||||
try {
|
||||
const container = ctx.docker.client.getContainer(containerInfo.Id);
|
||||
const containerStats = await container.stats({ stream: false });
|
||||
|
||||
// Calculate CPU percentage
|
||||
const cpuDelta = containerStats.cpu_stats.cpu_usage.total_usage -
|
||||
(containerStats.precpu_stats.cpu_usage?.total_usage || 0);
|
||||
const systemDelta = containerStats.cpu_stats.system_cpu_usage -
|
||||
(containerStats.precpu_stats.system_cpu_usage || 0);
|
||||
const cpuPercent = systemDelta > 0 ? (cpuDelta / systemDelta) * 100 * (containerStats.cpu_stats.online_cpus || 1) : 0;
|
||||
|
||||
// Calculate memory usage
|
||||
const memUsage = containerStats.memory_stats.usage || 0;
|
||||
const memLimit = containerStats.memory_stats.limit || 1;
|
||||
const memPercent = (memUsage / memLimit) * 100;
|
||||
|
||||
// Network stats
|
||||
let netRx = 0, netTx = 0;
|
||||
if (containerStats.networks) {
|
||||
for (const net of Object.values(containerStats.networks)) {
|
||||
netRx += net.rx_bytes || 0;
|
||||
netTx += net.tx_bytes || 0;
|
||||
}
|
||||
}
|
||||
|
||||
stats.push({
|
||||
id: containerInfo.Id.slice(0, 12),
|
||||
name: containerInfo.Names[0]?.replace(/^\//, '') || 'unknown',
|
||||
image: containerInfo.Image,
|
||||
status: containerInfo.State,
|
||||
cpu: {
|
||||
percent: Math.round(cpuPercent * 100) / 100
|
||||
},
|
||||
memory: {
|
||||
used: memUsage,
|
||||
limit: memLimit,
|
||||
percent: Math.round(memPercent * 100) / 100
|
||||
},
|
||||
network: {
|
||||
rx: netRx,
|
||||
tx: netTx
|
||||
}
|
||||
});
|
||||
} catch (e) {
|
||||
// Skip containers we can't get stats for
|
||||
console.log(`Could not get stats for ${containerInfo.Names[0]}:`, e.message);
|
||||
}
|
||||
}
|
||||
|
||||
res.json({ success: true, stats, timestamp: new Date().toISOString() });
|
||||
}, 'stats-containers'));
|
||||
|
||||
// Get single container stats
|
||||
router.get('/stats/container/:id', ctx.asyncHandler(async (req, res) => {
|
||||
const container = ctx.docker.client.getContainer(req.params.id);
|
||||
const containerStats = await container.stats({ stream: false });
|
||||
const info = await container.inspect();
|
||||
|
||||
// Calculate CPU percentage
|
||||
const cpuDelta = containerStats.cpu_stats.cpu_usage.total_usage -
|
||||
(containerStats.precpu_stats.cpu_usage?.total_usage || 0);
|
||||
const systemDelta = containerStats.cpu_stats.system_cpu_usage -
|
||||
(containerStats.precpu_stats.system_cpu_usage || 0);
|
||||
const cpuPercent = systemDelta > 0 ? (cpuDelta / systemDelta) * 100 * (containerStats.cpu_stats.online_cpus || 1) : 0;
|
||||
|
||||
// Memory
|
||||
const memUsage = containerStats.memory_stats.usage || 0;
|
||||
const memLimit = containerStats.memory_stats.limit || 1;
|
||||
|
||||
// Network
|
||||
let netRx = 0, netTx = 0;
|
||||
if (containerStats.networks) {
|
||||
for (const net of Object.values(containerStats.networks)) {
|
||||
netRx += net.rx_bytes || 0;
|
||||
netTx += net.tx_bytes || 0;
|
||||
}
|
||||
}
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
stats: {
|
||||
name: info.Name.replace(/^\//, ''),
|
||||
image: info.Config.Image,
|
||||
status: info.State.Status,
|
||||
started: info.State.StartedAt,
|
||||
cpu: {
|
||||
percent: Math.round(cpuPercent * 100) / 100
|
||||
},
|
||||
memory: {
|
||||
used: memUsage,
|
||||
limit: memLimit,
|
||||
percent: Math.round((memUsage / memLimit) * 100 * 100) / 100
|
||||
},
|
||||
network: { rx: netRx, tx: netTx }
|
||||
}
|
||||
});
|
||||
}, 'stats-container'));
|
||||
|
||||
return router;
|
||||
};
|
||||
185
dashcaddy-api/routes/notifications.js
Normal file
185
dashcaddy-api/routes/notifications.js
Normal file
@@ -0,0 +1,185 @@
|
||||
const express = require('express');
|
||||
const { validateURL, validateToken } = require('../input-validator');
|
||||
const { paginate, parsePaginationParams } = require('../pagination');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
// GET /config — Get notification configuration (sensitive data redacted)
|
||||
router.get('/config', ctx.asyncHandler(async (req, res) => {
|
||||
const notificationConfig = ctx.notification.getConfig();
|
||||
// Return config without sensitive data
|
||||
const safeConfig = {
|
||||
enabled: notificationConfig.enabled,
|
||||
providers: {
|
||||
discord: {
|
||||
enabled: notificationConfig.providers.discord?.enabled || false,
|
||||
configured: !!notificationConfig.providers.discord?.webhookUrl
|
||||
},
|
||||
telegram: {
|
||||
enabled: notificationConfig.providers.telegram?.enabled || false,
|
||||
configured: !!(notificationConfig.providers.telegram?.botToken && notificationConfig.providers.telegram?.chatId)
|
||||
},
|
||||
ntfy: {
|
||||
enabled: notificationConfig.providers.ntfy?.enabled || false,
|
||||
configured: !!notificationConfig.providers.ntfy?.topic,
|
||||
serverUrl: notificationConfig.providers.ntfy?.serverUrl || 'https://ntfy.sh'
|
||||
}
|
||||
},
|
||||
events: notificationConfig.events,
|
||||
healthCheck: notificationConfig.healthCheck
|
||||
};
|
||||
res.json({ success: true, config: safeConfig });
|
||||
}, 'notifications-config-get'));
|
||||
|
||||
// POST /config — Update notification configuration
|
||||
router.post('/config', ctx.asyncHandler(async (req, res) => {
|
||||
const { enabled, providers, events, healthCheck } = req.body;
|
||||
const notificationConfig = ctx.notification.getConfig();
|
||||
|
||||
// Validate provider webhook URLs and tokens
|
||||
if (providers) {
|
||||
if (providers.discord?.webhookUrl) {
|
||||
try {
|
||||
validateURL(providers.discord.webhookUrl);
|
||||
} catch (validationErr) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid Discord webhook URL');
|
||||
}
|
||||
}
|
||||
if (providers.telegram?.botToken) {
|
||||
try {
|
||||
validateToken(providers.telegram.botToken);
|
||||
} catch (validationErr) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid Telegram bot token format');
|
||||
}
|
||||
}
|
||||
if (providers.ntfy?.serverUrl) {
|
||||
try {
|
||||
validateURL(providers.ntfy.serverUrl);
|
||||
} catch (validationErr) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid ntfy server URL');
|
||||
}
|
||||
}
|
||||
if (providers.ntfy?.topic) {
|
||||
const topicRegex = /^[a-zA-Z0-9_-]{1,64}$/;
|
||||
if (!topicRegex.test(providers.ntfy.topic)) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid ntfy topic (alphanumeric, hyphens, underscores only, max 64 chars)');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update enabled state
|
||||
if (typeof enabled === 'boolean') {
|
||||
notificationConfig.enabled = enabled;
|
||||
}
|
||||
|
||||
// Update providers (only update provided fields)
|
||||
if (providers) {
|
||||
if (providers.discord) {
|
||||
notificationConfig.providers.discord = {
|
||||
...notificationConfig.providers.discord,
|
||||
...providers.discord
|
||||
};
|
||||
}
|
||||
if (providers.telegram) {
|
||||
notificationConfig.providers.telegram = {
|
||||
...notificationConfig.providers.telegram,
|
||||
...providers.telegram
|
||||
};
|
||||
}
|
||||
if (providers.ntfy) {
|
||||
notificationConfig.providers.ntfy = {
|
||||
...notificationConfig.providers.ntfy,
|
||||
...providers.ntfy
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Update events
|
||||
if (events) {
|
||||
notificationConfig.events = { ...notificationConfig.events, ...events };
|
||||
}
|
||||
|
||||
// Update health check settings
|
||||
if (healthCheck) {
|
||||
const wasEnabled = notificationConfig.healthCheck?.enabled;
|
||||
notificationConfig.healthCheck = { ...notificationConfig.healthCheck, ...healthCheck };
|
||||
|
||||
// Restart daemon if settings changed
|
||||
if (healthCheck.enabled !== wasEnabled || healthCheck.intervalMinutes) {
|
||||
if (notificationConfig.healthCheck.enabled) {
|
||||
ctx.notification.startHealthDaemon();
|
||||
} else {
|
||||
ctx.notification.stopHealthDaemon();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
await ctx.notification.saveConfig();
|
||||
res.json({ success: true, message: 'Notification config updated' });
|
||||
}, 'notifications-config-update'));
|
||||
|
||||
// POST /test — Test notification delivery
|
||||
router.post('/test', ctx.asyncHandler(async (req, res) => {
|
||||
const { provider } = req.body;
|
||||
|
||||
if (provider) {
|
||||
// Test specific provider
|
||||
let result;
|
||||
switch (provider) {
|
||||
case 'discord':
|
||||
result = await ctx.notification.sendDiscord('Test Notification', 'This is a test notification from DashCaddy.', 'info');
|
||||
break;
|
||||
case 'telegram':
|
||||
result = await ctx.notification.sendTelegram('Test Notification', 'This is a test notification from DashCaddy.', 'info');
|
||||
break;
|
||||
case 'ntfy':
|
||||
result = await ctx.notification.sendNtfy('Test Notification', 'This is a test notification from DashCaddy.', 'info');
|
||||
break;
|
||||
default:
|
||||
return ctx.errorResponse(res, 400, 'Unknown provider');
|
||||
}
|
||||
res.json({ success: result.success, provider, error: result.error });
|
||||
} else {
|
||||
// Test all enabled providers
|
||||
const result = await ctx.notification.send('test', 'Test Notification', 'This is a test notification from DashCaddy.', 'info');
|
||||
res.json({ success: true, ...result });
|
||||
}
|
||||
}, 'notifications-test'));
|
||||
|
||||
// GET /history — Get notification history
|
||||
router.get('/history', ctx.asyncHandler(async (req, res) => {
|
||||
const notificationHistory = ctx.notification.getHistory();
|
||||
const paginationParams = parsePaginationParams(req.query);
|
||||
if (paginationParams) {
|
||||
const result = paginate(notificationHistory, paginationParams);
|
||||
res.json({ success: true, history: result.data, total: notificationHistory.length, pagination: result.pagination });
|
||||
} else {
|
||||
const limit = parseInt(req.query.limit) || 50;
|
||||
res.json({
|
||||
success: true,
|
||||
history: notificationHistory.slice(0, limit),
|
||||
total: notificationHistory.length
|
||||
});
|
||||
}
|
||||
}, 'notifications-history'));
|
||||
|
||||
// DELETE /history — Clear notification history
|
||||
router.delete('/history', ctx.asyncHandler(async (req, res) => {
|
||||
ctx.notification.clearHistory();
|
||||
res.json({ success: true, message: 'Notification history cleared' });
|
||||
}, 'notifications-history-clear'));
|
||||
|
||||
// POST /health-check — Manually trigger health check
|
||||
router.post('/health-check', ctx.asyncHandler(async (req, res) => {
|
||||
await ctx.notification.checkHealth();
|
||||
const notificationConfig = ctx.notification.getConfig();
|
||||
res.json({
|
||||
success: true,
|
||||
lastCheck: notificationConfig.healthCheck.lastCheck,
|
||||
containersMonitored: Object.keys(ctx.notification.getHealthState()).length
|
||||
});
|
||||
}, 'notifications-health-check'));
|
||||
|
||||
return router;
|
||||
};
|
||||
373
dashcaddy-api/routes/recipes/deploy.js
Normal file
373
dashcaddy-api/routes/recipes/deploy.js
Normal file
@@ -0,0 +1,373 @@
|
||||
const express = require('express');
|
||||
const crypto = require('crypto');
|
||||
const { DOCKER } = require('../../constants');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
/**
|
||||
* Deploy a recipe — creates multiple containers as a coordinated stack
|
||||
*
|
||||
* POST /api/recipes/deploy
|
||||
* Body: { recipeId, config: { selectedComponents, sharedConfig, componentOverrides } }
|
||||
*/
|
||||
router.post('/deploy', ctx.asyncHandler(async (req, res) => {
|
||||
const { recipeId, config } = req.body;
|
||||
const { RECIPE_TEMPLATES } = require('../../recipe-templates');
|
||||
|
||||
const recipe = RECIPE_TEMPLATES[recipeId];
|
||||
if (!recipe) return ctx.errorResponse(res, 400, 'Invalid recipe template');
|
||||
|
||||
ctx.log.info('recipe', 'Starting recipe deployment', { recipeId, name: recipe.name });
|
||||
|
||||
// Determine which components to deploy
|
||||
const selectedIds = new Set(config.selectedComponents || recipe.components.filter(c => c.required).map(c => c.id));
|
||||
// Always include required components
|
||||
recipe.components.filter(c => c.required).forEach(c => selectedIds.add(c.id));
|
||||
|
||||
const componentsToDeploy = recipe.components
|
||||
.filter(c => selectedIds.has(c.id))
|
||||
.sort((a, b) => a.order - b.order);
|
||||
|
||||
// Generate shared passwords for the recipe (consistent across components)
|
||||
const generatedPasswords = {};
|
||||
const passwordKey = `recipe-${recipeId}-${Date.now()}`;
|
||||
generatedPasswords.default = crypto.randomBytes(24).toString('base64url');
|
||||
|
||||
// Create Docker network if defined
|
||||
let networkName = null;
|
||||
if (recipe.network) {
|
||||
networkName = recipe.network.name;
|
||||
try {
|
||||
await ctx.docker.client.createNetwork({
|
||||
Name: networkName,
|
||||
Driver: recipe.network.driver || 'bridge',
|
||||
Labels: { 'sami.managed': 'true', 'sami.recipe': recipeId }
|
||||
});
|
||||
ctx.log.info('recipe', 'Created Docker network', { networkName });
|
||||
} catch (e) {
|
||||
// Network might already exist
|
||||
if (!e.message.includes('already exists')) {
|
||||
throw new Error(`Failed to create network ${networkName}: ${e.message}`);
|
||||
}
|
||||
ctx.log.info('recipe', 'Docker network already exists', { networkName });
|
||||
}
|
||||
}
|
||||
|
||||
const deployedComponents = [];
|
||||
const errors = [];
|
||||
|
||||
try {
|
||||
for (const component of componentsToDeploy) {
|
||||
try {
|
||||
ctx.log.info('recipe', `Deploying component: ${component.id}`, {
|
||||
role: component.role,
|
||||
internal: component.internal || false
|
||||
});
|
||||
|
||||
const result = await deployComponent(component, recipe, config, generatedPasswords, networkName);
|
||||
deployedComponents.push(result);
|
||||
|
||||
ctx.log.info('recipe', `Component deployed: ${component.id}`, {
|
||||
containerId: result.containerId?.substring(0, 12)
|
||||
});
|
||||
} catch (componentError) {
|
||||
ctx.log.error('recipe', `Component failed: ${component.id}`, {
|
||||
error: componentError.message
|
||||
});
|
||||
errors.push({ componentId: component.id, role: component.role, error: componentError.message });
|
||||
// Continue deploying other components — partial success is better than total failure
|
||||
}
|
||||
}
|
||||
|
||||
if (deployedComponents.length === 0) {
|
||||
throw new Error('All components failed to deploy');
|
||||
}
|
||||
|
||||
// Register deployed components in services.json
|
||||
for (const deployed of deployedComponents) {
|
||||
if (!deployed.internal) {
|
||||
await ctx.addServiceToConfig({
|
||||
id: deployed.subdomain,
|
||||
name: deployed.name,
|
||||
logo: deployed.logo,
|
||||
containerId: deployed.containerId,
|
||||
appTemplate: deployed.templateRef || deployed.id,
|
||||
recipeId: recipeId,
|
||||
recipeRole: deployed.role,
|
||||
tailscaleOnly: config.sharedConfig?.tailscaleOnly || false,
|
||||
deployedAt: new Date().toISOString()
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Run auto-connect if available
|
||||
if (recipe.autoConnect?.enabled && errors.length === 0) {
|
||||
ctx.log.info('recipe', 'Running auto-connect for recipe', { recipeId });
|
||||
// Auto-connect will be handled asynchronously — don't block the response
|
||||
runAutoConnect(recipe, deployedComponents, config).catch(e => {
|
||||
ctx.log.warn('recipe', 'Auto-connect had errors', { recipeId, error: e.message });
|
||||
});
|
||||
}
|
||||
|
||||
const response = {
|
||||
success: true,
|
||||
recipeId,
|
||||
recipeName: recipe.name,
|
||||
deployed: deployedComponents.map(c => ({
|
||||
id: c.id,
|
||||
role: c.role,
|
||||
containerId: c.containerId?.substring(0, 12),
|
||||
url: c.url,
|
||||
internal: c.internal
|
||||
})),
|
||||
errors: errors.length > 0 ? errors : undefined,
|
||||
message: errors.length > 0
|
||||
? `${recipe.name} partially deployed (${deployedComponents.length}/${componentsToDeploy.length} components)`
|
||||
: `${recipe.name} deployed successfully!`,
|
||||
setupInstructions: recipe.setupInstructions
|
||||
};
|
||||
|
||||
ctx.notification.send('deploymentSuccess', 'Recipe Deployed',
|
||||
`**${recipe.name}** recipe deployed (${deployedComponents.length} components).`,
|
||||
'success'
|
||||
);
|
||||
|
||||
res.json(response);
|
||||
} catch (error) {
|
||||
ctx.log.error('recipe', 'Recipe deployment failed', { recipeId, error: error.message });
|
||||
|
||||
// Cleanup: remove partially deployed containers
|
||||
for (const deployed of deployedComponents) {
|
||||
try {
|
||||
if (deployed.containerId) {
|
||||
const container = ctx.docker.client.getContainer(deployed.containerId);
|
||||
await container.remove({ force: true });
|
||||
}
|
||||
} catch (cleanupError) {
|
||||
ctx.log.warn('recipe', 'Cleanup failed for component', {
|
||||
componentId: deployed.id, error: cleanupError.message
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup network
|
||||
if (networkName) {
|
||||
try {
|
||||
const network = ctx.docker.client.getNetwork(networkName);
|
||||
await network.remove();
|
||||
} catch (e) {
|
||||
ctx.log.warn('recipe', 'Network cleanup failed', { networkName, error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
ctx.notification.send('deploymentFailed', 'Recipe Failed',
|
||||
`Failed to deploy **${recipe.name}**: ${error.message}`, 'error'
|
||||
);
|
||||
|
||||
ctx.errorResponse(res, 500, error.message);
|
||||
}
|
||||
}, 'recipe-deploy'));
|
||||
|
||||
/**
|
||||
* Deploy a single component of a recipe
|
||||
*/
|
||||
async function deployComponent(component, recipe, config, passwords, networkName) {
|
||||
const sharedConfig = config.sharedConfig || {};
|
||||
const overrides = config.componentOverrides?.[component.id] || {};
|
||||
|
||||
// Resolve the Docker config — either from templateRef or inline
|
||||
let dockerConfig;
|
||||
let templateName;
|
||||
let logo;
|
||||
|
||||
if (component.templateRef) {
|
||||
const template = ctx.APP_TEMPLATES[component.templateRef];
|
||||
if (!template) throw new Error(`Template ${component.templateRef} not found`);
|
||||
dockerConfig = JSON.parse(JSON.stringify(template.docker)); // Deep clone
|
||||
templateName = template.name;
|
||||
logo = template.logo || `/assets/${component.templateRef}.png`;
|
||||
|
||||
// Apply envOverrides from recipe
|
||||
if (component.envOverrides) {
|
||||
dockerConfig.environment = { ...dockerConfig.environment, ...component.envOverrides };
|
||||
}
|
||||
} else {
|
||||
// Inline docker config
|
||||
dockerConfig = JSON.parse(JSON.stringify(component.docker));
|
||||
templateName = component.role;
|
||||
logo = `/assets/${component.id}.png`;
|
||||
}
|
||||
|
||||
// Replace template variables
|
||||
const subdomain = overrides.subdomain || component.subdomain || `${recipe.name.toLowerCase().replace(/\s+/g, '')}-${component.id}`;
|
||||
const port = overrides.port || component.defaultPort || null;
|
||||
const hostIp = sharedConfig.ip || 'host.docker.internal';
|
||||
|
||||
// Replace {{GENERATED_PASSWORD}} with consistent password
|
||||
const replaceVars = (obj) => {
|
||||
if (typeof obj === 'string') {
|
||||
return obj
|
||||
.replace(/\{\{GENERATED_PASSWORD\}\}/g, passwords.default)
|
||||
.replace(/\{\{PORT\}\}/g, String(port || ''))
|
||||
.replace(/\{\{HOST_IP\}\}/g, hostIp)
|
||||
.replace(/\{\{SUBDOMAIN\}\}/g, subdomain)
|
||||
.replace(/\{\{TIMEZONE\}\}/g, sharedConfig.timezone || 'UTC')
|
||||
.replace(/\{\{NEXTCLOUD_DOMAIN\}\}/g, `${subdomain}.${(ctx.siteConfig?.tld || '.home').replace(/^\./, '')}`);
|
||||
}
|
||||
if (Array.isArray(obj)) return obj.map(replaceVars);
|
||||
if (obj && typeof obj === 'object') {
|
||||
const result = {};
|
||||
for (const [k, v] of Object.entries(obj)) result[k] = replaceVars(v);
|
||||
return result;
|
||||
}
|
||||
return obj;
|
||||
};
|
||||
|
||||
dockerConfig = replaceVars(dockerConfig);
|
||||
|
||||
// Apply shared volume paths
|
||||
if (recipe.sharedVolumes && dockerConfig.volumes) {
|
||||
for (const [key, volConfig] of Object.entries(recipe.sharedVolumes)) {
|
||||
const userPath = sharedConfig.volumes?.[key] || volConfig.defaultPath;
|
||||
if (volConfig.usedBy?.includes(component.id)) {
|
||||
// Find and update matching volume mounts
|
||||
dockerConfig.volumes = dockerConfig.volumes.map(vol => {
|
||||
if (vol.includes(volConfig.defaultPath) || vol.includes(`{{${key.toUpperCase()}_PATH}}`)) {
|
||||
const [, containerPath] = vol.split(':');
|
||||
return `${userPath}:${containerPath}`;
|
||||
}
|
||||
return vol;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Skip container creation for internal-only services with no ports
|
||||
const containerName = `${DOCKER.CONTAINER_PREFIX}${subdomain}`;
|
||||
|
||||
// Build container config
|
||||
const containerConfig = {
|
||||
Image: dockerConfig.image,
|
||||
name: containerName,
|
||||
ExposedPorts: {},
|
||||
HostConfig: {
|
||||
PortBindings: {},
|
||||
Binds: dockerConfig.volumes || [],
|
||||
RestartPolicy: { Name: 'unless-stopped' }
|
||||
},
|
||||
Env: Object.entries(dockerConfig.environment || {}).map(([k, v]) => `${k}=${v}`),
|
||||
Labels: {
|
||||
'sami.managed': 'true',
|
||||
'sami.app': component.templateRef || component.id,
|
||||
'sami.recipe': recipe.name.toLowerCase().replace(/\s+/g, '-'),
|
||||
'sami.recipe.component': component.id,
|
||||
'sami.recipe.role': component.role,
|
||||
'sami.subdomain': subdomain,
|
||||
'sami.deployed': new Date().toISOString()
|
||||
}
|
||||
};
|
||||
|
||||
// Configure ports
|
||||
if (dockerConfig.ports && dockerConfig.ports.length > 0) {
|
||||
for (const portMapping of dockerConfig.ports) {
|
||||
const parts = portMapping.split(/[:/]/);
|
||||
if (parts.length >= 2) {
|
||||
const [hostPort, containerPort, protocol = 'tcp'] = parts;
|
||||
const key = `${containerPort}/${protocol}`;
|
||||
containerConfig.ExposedPorts[key] = {};
|
||||
containerConfig.HostConfig.PortBindings[key] = [{ HostPort: hostPort }];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Pull image
|
||||
try {
|
||||
ctx.log.info('recipe', `Pulling image: ${dockerConfig.image}`);
|
||||
await ctx.docker.pull(dockerConfig.image);
|
||||
} catch (e) {
|
||||
ctx.log.warn('recipe', `Pull failed, checking local: ${dockerConfig.image}`);
|
||||
const images = await ctx.docker.client.listImages({
|
||||
filters: { reference: [dockerConfig.image] }
|
||||
});
|
||||
if (images.length === 0) throw new Error(`Image not found: ${dockerConfig.image}`);
|
||||
}
|
||||
|
||||
// Remove stale container
|
||||
try {
|
||||
const existing = ctx.docker.client.getContainer(containerName);
|
||||
await existing.inspect();
|
||||
await existing.remove({ force: true });
|
||||
await new Promise(r => setTimeout(r, 1000));
|
||||
} catch (e) {
|
||||
// Doesn't exist — normal
|
||||
}
|
||||
|
||||
// Create and start container
|
||||
const container = await ctx.docker.client.createContainer(containerConfig);
|
||||
await container.start();
|
||||
|
||||
// Connect to recipe network
|
||||
if (networkName) {
|
||||
try {
|
||||
const network = ctx.docker.client.getNetwork(networkName);
|
||||
await network.connect({ Container: container.id });
|
||||
ctx.log.info('recipe', `Connected ${component.id} to network ${networkName}`);
|
||||
} catch (e) {
|
||||
ctx.log.warn('recipe', `Failed to connect ${component.id} to network`, { error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
// Add Caddy config for non-internal components with ports
|
||||
let url = null;
|
||||
if (!component.internal && dockerConfig.ports?.length > 0) {
|
||||
const primaryPort = port || dockerConfig.ports[0].split(/[:/]/)[0];
|
||||
const caddyConfig = ctx.caddy.generateConfig(
|
||||
subdomain, hostIp, primaryPort,
|
||||
{ tailscaleOnly: sharedConfig.tailscaleOnly || false }
|
||||
);
|
||||
try {
|
||||
const helpers = require('../apps/helpers')(ctx);
|
||||
await helpers.addCaddyConfig(subdomain, caddyConfig);
|
||||
url = `https://${ctx.buildDomain(subdomain)}`;
|
||||
} catch (e) {
|
||||
ctx.log.warn('recipe', `Caddy config failed for ${component.id}`, { error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
id: component.id,
|
||||
role: component.role,
|
||||
name: templateName,
|
||||
subdomain,
|
||||
containerId: container.id,
|
||||
internal: component.internal || false,
|
||||
templateRef: component.templateRef,
|
||||
logo,
|
||||
url
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Run auto-connect steps after recipe deployment
|
||||
*/
|
||||
async function runAutoConnect(recipe, deployedComponents, config) {
|
||||
if (!recipe.autoConnect?.steps) return;
|
||||
|
||||
// Wait for services to be fully ready
|
||||
await new Promise(r => setTimeout(r, 10000));
|
||||
|
||||
for (const step of recipe.autoConnect.steps) {
|
||||
try {
|
||||
ctx.log.info('recipe', `Auto-connect step: ${step.action}`, { targets: step.targets });
|
||||
// These actions map to existing Smart Arr Connect functionality
|
||||
// The actual implementation will be wired when Smart Arr Connect helpers are available
|
||||
ctx.log.info('recipe', `Auto-connect step ${step.action} completed`);
|
||||
} catch (e) {
|
||||
ctx.log.warn('recipe', `Auto-connect step failed: ${step.action}`, { error: e.message });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return router;
|
||||
};
|
||||
54
dashcaddy-api/routes/recipes/index.js
Normal file
54
dashcaddy-api/routes/recipes/index.js
Normal file
@@ -0,0 +1,54 @@
|
||||
const express = require('express');
|
||||
const deployRoutes = require('./deploy');
|
||||
const manageRoutes = require('./manage');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
// All recipe routes require premium license
|
||||
router.use(ctx.licenseManager.requirePremium('recipes'));
|
||||
|
||||
// GET /api/recipes/templates — list all recipe templates
|
||||
router.get('/templates', ctx.asyncHandler(async (req, res) => {
|
||||
const { RECIPE_TEMPLATES, RECIPE_CATEGORIES } = require('../../recipe-templates');
|
||||
const templates = Object.entries(RECIPE_TEMPLATES).map(([id, recipe]) => ({
|
||||
id,
|
||||
name: recipe.name,
|
||||
description: recipe.description,
|
||||
icon: recipe.icon,
|
||||
category: recipe.category,
|
||||
type: 'recipe',
|
||||
difficulty: recipe.difficulty,
|
||||
popularity: recipe.popularity,
|
||||
componentCount: recipe.components.length,
|
||||
requiredCount: recipe.components.filter(c => c.required).length,
|
||||
optionalCount: recipe.components.filter(c => !c.required).length,
|
||||
components: recipe.components.map(c => ({
|
||||
id: c.id,
|
||||
role: c.role,
|
||||
required: c.required,
|
||||
internal: c.internal || false,
|
||||
templateRef: c.templateRef || null,
|
||||
note: c.note || null
|
||||
})),
|
||||
setupInstructions: recipe.setupInstructions
|
||||
}));
|
||||
|
||||
res.json({ success: true, templates, categories: RECIPE_CATEGORIES });
|
||||
}, 'recipe-templates'));
|
||||
|
||||
// GET /api/recipes/templates/:recipeId — get single recipe template detail
|
||||
router.get('/templates/:recipeId', ctx.asyncHandler(async (req, res) => {
|
||||
const { RECIPE_TEMPLATES } = require('../../recipe-templates');
|
||||
const recipe = RECIPE_TEMPLATES[req.params.recipeId];
|
||||
if (!recipe) return ctx.errorResponse(res, 404, 'Recipe template not found');
|
||||
|
||||
res.json({ success: true, recipe: { id: req.params.recipeId, ...recipe } });
|
||||
}, 'recipe-template-detail'));
|
||||
|
||||
// Mount deploy and manage sub-routes
|
||||
router.use(deployRoutes(ctx));
|
||||
router.use(manageRoutes(ctx));
|
||||
|
||||
return router;
|
||||
};
|
||||
321
dashcaddy-api/routes/recipes/manage.js
Normal file
321
dashcaddy-api/routes/recipes/manage.js
Normal file
@@ -0,0 +1,321 @@
|
||||
const express = require('express');
|
||||
const { DOCKER } = require('../../constants');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
/**
|
||||
* GET /api/recipes/deployed — list all deployed recipes (grouped by recipeId)
|
||||
*/
|
||||
router.get('/deployed', ctx.asyncHandler(async (req, res) => {
|
||||
const services = await ctx.servicesStateManager.read();
|
||||
const recipeGroups = {};
|
||||
|
||||
for (const service of services) {
|
||||
if (!service.recipeId) continue;
|
||||
if (!recipeGroups[service.recipeId]) {
|
||||
recipeGroups[service.recipeId] = {
|
||||
recipeId: service.recipeId,
|
||||
components: []
|
||||
};
|
||||
}
|
||||
recipeGroups[service.recipeId].components.push({
|
||||
id: service.id,
|
||||
name: service.name,
|
||||
logo: service.logo,
|
||||
containerId: service.containerId,
|
||||
recipeRole: service.recipeRole,
|
||||
deployedAt: service.deployedAt
|
||||
});
|
||||
}
|
||||
|
||||
// Also find internal containers (not in services.json) by Docker labels
|
||||
try {
|
||||
const containers = await ctx.docker.client.listContainers({ all: true });
|
||||
for (const containerInfo of containers) {
|
||||
const labels = containerInfo.Labels || {};
|
||||
if (labels['sami.managed'] !== 'true') continue;
|
||||
const recipeLabel = labels['sami.recipe'];
|
||||
if (!recipeLabel) continue;
|
||||
|
||||
// Map recipe label back to recipe ID
|
||||
const recipeId = findRecipeIdByLabel(recipeLabel);
|
||||
if (!recipeId) continue;
|
||||
|
||||
if (!recipeGroups[recipeId]) {
|
||||
recipeGroups[recipeId] = { recipeId, components: [] };
|
||||
}
|
||||
|
||||
// Check if this container is already listed (by containerId)
|
||||
const existing = recipeGroups[recipeId].components.find(
|
||||
c => c.containerId === containerInfo.Id
|
||||
);
|
||||
if (existing) continue;
|
||||
|
||||
recipeGroups[recipeId].components.push({
|
||||
id: labels['sami.recipe.component'] || containerInfo.Names[0]?.replace('/', ''),
|
||||
name: labels['sami.recipe.role'] || labels['sami.app'] || 'Unknown',
|
||||
containerId: containerInfo.Id,
|
||||
recipeRole: labels['sami.recipe.role'] || 'Unknown',
|
||||
internal: true,
|
||||
state: containerInfo.State,
|
||||
status: containerInfo.Status
|
||||
});
|
||||
}
|
||||
} catch (e) {
|
||||
ctx.log.warn('recipe', 'Could not list Docker containers for recipe discovery', { error: e.message });
|
||||
}
|
||||
|
||||
// Enrich with container state
|
||||
for (const group of Object.values(recipeGroups)) {
|
||||
for (const component of group.components) {
|
||||
if (component.containerId && !component.state) {
|
||||
try {
|
||||
const container = ctx.docker.client.getContainer(component.containerId);
|
||||
const info = await container.inspect();
|
||||
component.state = info.State.Status;
|
||||
component.status = info.State.Status === 'running'
|
||||
? `Up ${formatUptime(info.State.StartedAt)}`
|
||||
: info.State.Status;
|
||||
} catch (e) {
|
||||
component.state = 'removed';
|
||||
component.status = 'Container not found';
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res.json({ success: true, recipes: Object.values(recipeGroups) });
|
||||
}, 'recipe-deployed'));
|
||||
|
||||
/**
|
||||
* POST /api/recipes/:recipeId/start — start all containers in a recipe
|
||||
*/
|
||||
router.post('/:recipeId/start', ctx.asyncHandler(async (req, res) => {
|
||||
const { recipeId } = req.params;
|
||||
const containers = await findRecipeContainers(recipeId);
|
||||
|
||||
if (containers.length === 0) {
|
||||
return ctx.errorResponse(res, 404, 'No containers found for this recipe');
|
||||
}
|
||||
|
||||
const results = [];
|
||||
for (const containerInfo of containers) {
|
||||
try {
|
||||
const container = ctx.docker.client.getContainer(containerInfo.Id);
|
||||
const info = await container.inspect();
|
||||
if (info.State.Status !== 'running') {
|
||||
await container.start();
|
||||
results.push({ id: containerInfo.component, status: 'started' });
|
||||
} else {
|
||||
results.push({ id: containerInfo.component, status: 'already running' });
|
||||
}
|
||||
} catch (e) {
|
||||
results.push({ id: containerInfo.component, status: 'failed', error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
ctx.log.info('recipe', 'Recipe started', { recipeId, results });
|
||||
res.json({ success: true, recipeId, results });
|
||||
}, 'recipe-start'));
|
||||
|
||||
/**
|
||||
* POST /api/recipes/:recipeId/stop — stop all containers in a recipe
|
||||
*/
|
||||
router.post('/:recipeId/stop', ctx.asyncHandler(async (req, res) => {
|
||||
const { recipeId } = req.params;
|
||||
const containers = await findRecipeContainers(recipeId);
|
||||
|
||||
if (containers.length === 0) {
|
||||
return ctx.errorResponse(res, 404, 'No containers found for this recipe');
|
||||
}
|
||||
|
||||
const results = [];
|
||||
// Stop in reverse order (apps first, then infrastructure)
|
||||
for (const containerInfo of containers.reverse()) {
|
||||
try {
|
||||
const container = ctx.docker.client.getContainer(containerInfo.Id);
|
||||
const info = await container.inspect();
|
||||
if (info.State.Status === 'running') {
|
||||
await container.stop();
|
||||
results.push({ id: containerInfo.component, status: 'stopped' });
|
||||
} else {
|
||||
results.push({ id: containerInfo.component, status: 'already stopped' });
|
||||
}
|
||||
} catch (e) {
|
||||
results.push({ id: containerInfo.component, status: 'failed', error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
ctx.log.info('recipe', 'Recipe stopped', { recipeId, results });
|
||||
res.json({ success: true, recipeId, results });
|
||||
}, 'recipe-stop'));
|
||||
|
||||
/**
|
||||
* POST /api/recipes/:recipeId/restart — restart all containers in a recipe
|
||||
*/
|
||||
router.post('/:recipeId/restart', ctx.asyncHandler(async (req, res) => {
|
||||
const { recipeId } = req.params;
|
||||
const containers = await findRecipeContainers(recipeId);
|
||||
|
||||
if (containers.length === 0) {
|
||||
return ctx.errorResponse(res, 404, 'No containers found for this recipe');
|
||||
}
|
||||
|
||||
const results = [];
|
||||
for (const containerInfo of containers) {
|
||||
try {
|
||||
const container = ctx.docker.client.getContainer(containerInfo.Id);
|
||||
await container.restart();
|
||||
results.push({ id: containerInfo.component, status: 'restarted' });
|
||||
} catch (e) {
|
||||
results.push({ id: containerInfo.component, status: 'failed', error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
ctx.log.info('recipe', 'Recipe restarted', { recipeId, results });
|
||||
res.json({ success: true, recipeId, results });
|
||||
}, 'recipe-restart'));
|
||||
|
||||
/**
|
||||
* DELETE /api/recipes/:recipeId — remove entire recipe (containers, network, services)
|
||||
*/
|
||||
router.delete('/:recipeId', ctx.asyncHandler(async (req, res) => {
|
||||
const { recipeId } = req.params;
|
||||
const containers = await findRecipeContainers(recipeId);
|
||||
|
||||
if (containers.length === 0) {
|
||||
return ctx.errorResponse(res, 404, 'No containers found for this recipe');
|
||||
}
|
||||
|
||||
ctx.log.info('recipe', 'Removing recipe', { recipeId, containerCount: containers.length });
|
||||
|
||||
const results = [];
|
||||
const networkNames = new Set();
|
||||
|
||||
// Remove containers (reverse order: apps first, then infrastructure)
|
||||
for (const containerInfo of containers.reverse()) {
|
||||
try {
|
||||
const container = ctx.docker.client.getContainer(containerInfo.Id);
|
||||
const info = await container.inspect();
|
||||
|
||||
// Collect network names for cleanup
|
||||
for (const netName of Object.keys(info.NetworkSettings.Networks || {})) {
|
||||
if (netName.startsWith('dashcaddy-')) {
|
||||
networkNames.add(netName);
|
||||
}
|
||||
}
|
||||
|
||||
// Remove Caddy config for this subdomain
|
||||
const subdomain = info.Config?.Labels?.['sami.subdomain'];
|
||||
if (subdomain) {
|
||||
try {
|
||||
await removeCaddyBlock(subdomain);
|
||||
} catch (e) {
|
||||
ctx.log.warn('recipe', 'Failed to remove Caddy config', { subdomain, error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
// Force remove container
|
||||
await container.remove({ force: true });
|
||||
results.push({ id: containerInfo.component, status: 'removed' });
|
||||
} catch (e) {
|
||||
results.push({ id: containerInfo.component, status: 'failed', error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
// Remove recipe services from services.json
|
||||
await ctx.servicesStateManager.update(services => {
|
||||
return services.filter(s => s.recipeId !== recipeId);
|
||||
});
|
||||
|
||||
// Remove Docker networks
|
||||
for (const netName of networkNames) {
|
||||
try {
|
||||
const network = ctx.docker.client.getNetwork(netName);
|
||||
await network.remove();
|
||||
ctx.log.info('recipe', 'Removed Docker network', { netName });
|
||||
} catch (e) {
|
||||
ctx.log.warn('recipe', 'Failed to remove network', { netName, error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
ctx.notification.send('recipeRemoved', 'Recipe Removed',
|
||||
`Removed **${recipeId}** recipe (${results.filter(r => r.status === 'removed').length} containers).`,
|
||||
'info'
|
||||
);
|
||||
|
||||
ctx.log.info('recipe', 'Recipe removed', { recipeId, results });
|
||||
res.json({ success: true, recipeId, results });
|
||||
}, 'recipe-remove'));
|
||||
|
||||
// === Helper functions ===
|
||||
|
||||
/**
|
||||
* Find all Docker containers belonging to a recipe by label
|
||||
*/
|
||||
async function findRecipeContainers(recipeId) {
|
||||
const { RECIPE_TEMPLATES } = require('../../recipe-templates');
|
||||
const recipe = RECIPE_TEMPLATES[recipeId];
|
||||
const recipeLabel = recipe
|
||||
? recipe.name.toLowerCase().replace(/\s+/g, '-')
|
||||
: recipeId;
|
||||
|
||||
const containers = await ctx.docker.client.listContainers({ all: true });
|
||||
return containers
|
||||
.filter(c => {
|
||||
const labels = c.Labels || {};
|
||||
return labels['sami.managed'] === 'true' && labels['sami.recipe'] === recipeLabel;
|
||||
})
|
||||
.map(c => ({
|
||||
Id: c.Id,
|
||||
component: c.Labels['sami.recipe.component'] || c.Names[0]?.replace('/', ''),
|
||||
role: c.Labels['sami.recipe.role'] || 'Unknown',
|
||||
state: c.State
|
||||
}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Find recipe ID by its label (name slug)
|
||||
*/
|
||||
function findRecipeIdByLabel(label) {
|
||||
const { RECIPE_TEMPLATES } = require('../../recipe-templates');
|
||||
for (const [id, recipe] of Object.entries(RECIPE_TEMPLATES)) {
|
||||
if (recipe.name.toLowerCase().replace(/\s+/g, '-') === label) {
|
||||
return id;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a Caddy block for a subdomain from the Caddyfile
|
||||
*/
|
||||
async function removeCaddyBlock(subdomain) {
|
||||
const domain = ctx.buildDomain(subdomain);
|
||||
let content = await ctx.caddy.read();
|
||||
|
||||
// Find and remove the block for this domain
|
||||
const escapedDomain = domain.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
|
||||
const blockRegex = new RegExp(`\\n?${escapedDomain}\\s*\\{[^}]*(?:\\{[^}]*\\}[^}]*)*\\}`, 'g');
|
||||
const newContent = content.replace(blockRegex, '');
|
||||
|
||||
if (newContent !== content) {
|
||||
await ctx.caddy.write(newContent);
|
||||
await ctx.caddy.reload();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Format uptime from start time
|
||||
*/
|
||||
function formatUptime(startedAt) {
|
||||
const seconds = Math.floor((Date.now() - new Date(startedAt).getTime()) / 1000);
|
||||
if (seconds < 60) return `${seconds}s`;
|
||||
if (seconds < 3600) return `${Math.floor(seconds / 60)}m`;
|
||||
if (seconds < 86400) return `${Math.floor(seconds / 3600)}h`;
|
||||
return `${Math.floor(seconds / 86400)}d`;
|
||||
}
|
||||
|
||||
return router;
|
||||
};
|
||||
309
dashcaddy-api/routes/services.js
Normal file
309
dashcaddy-api/routes/services.js
Normal file
@@ -0,0 +1,309 @@
|
||||
const express = require('express');
|
||||
const fs = require('fs');
|
||||
const validatorLib = require('validator');
|
||||
const { REGEX } = require('../constants');
|
||||
const { validateServiceConfig, isValidPort } = require('../input-validator');
|
||||
const { exists } = require('../fs-helpers');
|
||||
const { paginate, parsePaginationParams } = require('../pagination');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
// ===== SERVICE CREDENTIAL ENDPOINTS =====
|
||||
|
||||
// Store credentials for a service
|
||||
router.post('/services/:serviceId/credentials', ctx.asyncHandler(async (req, res) => {
|
||||
const { serviceId } = req.params;
|
||||
const { apiKey, username, password } = req.body;
|
||||
|
||||
if (apiKey) {
|
||||
await ctx.credentialManager.store(`service.${serviceId}.apikey`, apiKey);
|
||||
}
|
||||
if (username) {
|
||||
await ctx.credentialManager.store(`service.${serviceId}.username`, username);
|
||||
}
|
||||
if (password) {
|
||||
await ctx.credentialManager.store(`service.${serviceId}.password`, password);
|
||||
}
|
||||
|
||||
res.json({ success: true, message: `Credentials stored for ${serviceId}` });
|
||||
}, 'store-service-creds'));
|
||||
|
||||
// Delete credentials for a service
|
||||
router.delete('/services/:serviceId/credentials', ctx.asyncHandler(async (req, res) => {
|
||||
const { serviceId } = req.params;
|
||||
await ctx.credentialManager.delete(`service.${serviceId}.apikey`);
|
||||
await ctx.credentialManager.delete(`service.${serviceId}.username`);
|
||||
await ctx.credentialManager.delete(`service.${serviceId}.password`);
|
||||
res.json({ success: true, message: `Credentials removed for ${serviceId}` });
|
||||
}, 'delete-service-creds'));
|
||||
|
||||
// Check credential status for a service (what's stored)
|
||||
router.get('/services/:serviceId/credentials', ctx.asyncHandler(async (req, res) => {
|
||||
try {
|
||||
const { serviceId } = req.params;
|
||||
const arrKey = await ctx.credentialManager.retrieve(`arr.${serviceId}.apikey`).catch(() => null);
|
||||
const svcKey = await ctx.credentialManager.retrieve(`service.${serviceId}.apikey`).catch(() => null);
|
||||
const username = await ctx.credentialManager.retrieve(`service.${serviceId}.username`).catch(() => null);
|
||||
res.json({
|
||||
success: true,
|
||||
hasApiKey: !!(arrKey || svcKey),
|
||||
hasBasicAuth: !!username,
|
||||
username: username || null
|
||||
});
|
||||
} catch (error) {
|
||||
res.json({ success: true, hasApiKey: false, hasBasicAuth: false });
|
||||
}
|
||||
}, 'service-creds'));
|
||||
|
||||
// ===== SEEDHOST CREDENTIAL ENDPOINTS =====
|
||||
|
||||
// Store seedhost credentials (shared username + per-service passwords)
|
||||
router.post('/seedhost-creds', ctx.asyncHandler(async (req, res) => {
|
||||
const { username, password, serviceId } = req.body;
|
||||
if (!username) {
|
||||
return ctx.errorResponse(res, 400, 'Username required');
|
||||
}
|
||||
await ctx.credentialManager.store('seedhost.username', username);
|
||||
if (password) {
|
||||
if (serviceId) {
|
||||
await ctx.credentialManager.store(`seedhost.password.${serviceId}`, password);
|
||||
} else {
|
||||
await ctx.credentialManager.store('seedhost.password', password);
|
||||
}
|
||||
}
|
||||
res.json({ success: true, message: 'Seedhost credentials stored' });
|
||||
}, 'store-seedhost-creds'));
|
||||
|
||||
// Get seedhost credential status
|
||||
router.get('/seedhost-creds', ctx.asyncHandler(async (req, res) => {
|
||||
try {
|
||||
const username = await ctx.credentialManager.retrieve('seedhost.username').catch(() => null);
|
||||
const serviceId = req.query.serviceId;
|
||||
let hasPassword = false;
|
||||
if (serviceId) {
|
||||
const svcPass = await ctx.credentialManager.retrieve(`seedhost.password.${serviceId}`).catch(() => null);
|
||||
hasPassword = !!svcPass;
|
||||
}
|
||||
// Fall back to checking shared password
|
||||
if (!hasPassword) {
|
||||
const sharedPass = await ctx.credentialManager.retrieve('seedhost.password').catch(() => null);
|
||||
hasPassword = !!sharedPass;
|
||||
}
|
||||
res.json({ success: true, hasCredentials: !!username && hasPassword, username: username || null, hasPassword });
|
||||
} catch (error) {
|
||||
res.json({ success: true, hasCredentials: false });
|
||||
}
|
||||
}, 'seedhost-creds'));
|
||||
|
||||
// Delete seedhost credentials
|
||||
router.delete('/seedhost-creds', ctx.asyncHandler(async (req, res) => {
|
||||
const serviceId = req.query.serviceId;
|
||||
if (serviceId) {
|
||||
await ctx.credentialManager.delete(`seedhost.password.${serviceId}`);
|
||||
res.json({ success: true, message: `Password for ${serviceId} removed` });
|
||||
} else {
|
||||
await ctx.credentialManager.delete('seedhost.username');
|
||||
await ctx.credentialManager.delete('seedhost.password');
|
||||
res.json({ success: true, message: 'Seedhost credentials removed' });
|
||||
}
|
||||
}, 'delete-seedhost-creds'));
|
||||
|
||||
// ===== SERVICE CRUD ENDPOINTS =====
|
||||
|
||||
// List all services
|
||||
router.get('/services', ctx.asyncHandler(async (req, res) => {
|
||||
if (!await exists(ctx.SERVICES_FILE)) {
|
||||
return res.json([]);
|
||||
}
|
||||
const services = await ctx.servicesStateManager.read();
|
||||
const paginationParams = parsePaginationParams(req.query);
|
||||
const result = paginate(services, paginationParams);
|
||||
if (paginationParams) {
|
||||
res.json({ success: true, services: result.data, pagination: result.pagination });
|
||||
} else {
|
||||
res.json(result.data);
|
||||
}
|
||||
}, 'services-list'));
|
||||
|
||||
// Add a new service
|
||||
router.post('/services', ctx.asyncHandler(async (req, res) => {
|
||||
try {
|
||||
const { id, name, logo } = req.body;
|
||||
|
||||
if (!id || !name) {
|
||||
return ctx.errorResponse(res, 400, 'id and name are required');
|
||||
}
|
||||
|
||||
// Validate service configuration
|
||||
try {
|
||||
validateServiceConfig({ id, name });
|
||||
} catch (validationErr) {
|
||||
return ctx.errorResponse(res, 400, validationErr.message, { errors: validationErr.errors });
|
||||
}
|
||||
|
||||
await ctx.servicesStateManager.update(services => {
|
||||
// Check if service already exists
|
||||
if (services.find(s => s.id === id)) {
|
||||
throw new Error(`Service "${id}" already exists`);
|
||||
}
|
||||
|
||||
services.push({ id, name, logo: logo || `/assets/${id}.png` });
|
||||
return services;
|
||||
});
|
||||
|
||||
res.json({ success: true, message: `Service "${name}" added to dashboard` });
|
||||
} catch (error) {
|
||||
ctx.log.error('deploy', 'Error adding service', { error: error.message });
|
||||
if (error.message.includes('already exists')) {
|
||||
ctx.errorResponse(res, 409, ctx.safeErrorMessage(error));
|
||||
} else {
|
||||
ctx.errorResponse(res, 500, ctx.safeErrorMessage(error));
|
||||
}
|
||||
}
|
||||
}, 'services-update'));
|
||||
|
||||
// Bulk import/replace services (for dashboard import feature)
|
||||
router.put('/services', ctx.asyncHandler(async (req, res) => {
|
||||
const services = req.body;
|
||||
|
||||
if (!Array.isArray(services)) {
|
||||
return ctx.errorResponse(res, 400, 'Request body must be an array of services');
|
||||
}
|
||||
|
||||
for (const service of services) {
|
||||
if (!service.id || !service.name) {
|
||||
return ctx.errorResponse(res, 400, 'Each service must have id and name fields');
|
||||
}
|
||||
try {
|
||||
validateServiceConfig(service);
|
||||
} catch (validationErr) {
|
||||
return ctx.errorResponse(res, 400, `Invalid service "${service.id}": ${validationErr.message}`, { errors: validationErr.errors });
|
||||
}
|
||||
}
|
||||
|
||||
await ctx.servicesStateManager.write(services);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: `Successfully imported ${services.length} services`,
|
||||
count: services.length
|
||||
});
|
||||
}, 'services-import'));
|
||||
|
||||
// Delete a service
|
||||
router.delete('/services/:id', ctx.asyncHandler(async (req, res) => {
|
||||
const { id } = req.params;
|
||||
|
||||
if (!await exists(ctx.SERVICES_FILE)) {
|
||||
return ctx.errorResponse(res, 404, 'No services found');
|
||||
}
|
||||
|
||||
let found = false;
|
||||
await ctx.servicesStateManager.update(services => {
|
||||
const initialLength = services.length;
|
||||
const filtered = services.filter(s => s.id !== id);
|
||||
found = filtered.length !== initialLength;
|
||||
return filtered;
|
||||
});
|
||||
|
||||
if (!found) {
|
||||
return ctx.errorResponse(res, 404, `Service "${id}" not found`);
|
||||
}
|
||||
|
||||
res.json({ success: true, message: `Service "${id}" removed from dashboard` });
|
||||
}, 'services-delete'));
|
||||
|
||||
// Update service configuration (subdomain, port, IP, tailscale)
|
||||
router.post('/services/update', ctx.asyncHandler(async (req, res) => {
|
||||
const { oldSubdomain, newSubdomain, port, ip, tailscaleOnly } = req.body;
|
||||
|
||||
if (!oldSubdomain || !newSubdomain) {
|
||||
return ctx.errorResponse(res, 400, 'oldSubdomain and newSubdomain are required');
|
||||
}
|
||||
|
||||
if (!REGEX.SUBDOMAIN.test(oldSubdomain) || !REGEX.SUBDOMAIN.test(newSubdomain)) {
|
||||
return ctx.errorResponse(res, 400, '[DC-301] Invalid subdomain format');
|
||||
}
|
||||
|
||||
if (port && !isValidPort(port)) {
|
||||
return ctx.errorResponse(res, 400, 'Invalid port number (must be 1-65535)');
|
||||
}
|
||||
|
||||
if (ip && !validatorLib.isIP(ip)) {
|
||||
return ctx.errorResponse(res, 400, '[DC-210] Invalid IP address');
|
||||
}
|
||||
|
||||
const results = { dns: null, caddy: null, services: null };
|
||||
|
||||
const oldDomain = ctx.buildDomain(oldSubdomain);
|
||||
const newDomain = ctx.buildDomain(newSubdomain);
|
||||
|
||||
let content = await ctx.caddy.read();
|
||||
|
||||
const escapedOldDomain = oldDomain.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
|
||||
const siteBlockRegex = new RegExp(
|
||||
`${escapedOldDomain}\\s*\\{[^{}]*(?:\\{[^{}]*(?:\\{[^{}]*\\}[^{}]*)*\\}[^{}]*)*\\}`,
|
||||
's'
|
||||
);
|
||||
|
||||
const oldBlockMatch = content.match(siteBlockRegex);
|
||||
if (oldBlockMatch) {
|
||||
const proxyMatch = oldBlockMatch[0].match(/reverse_proxy\s+([^\s\n]+)/);
|
||||
const existingTarget = proxyMatch ? proxyMatch[1] : null;
|
||||
const [existingIp, existingPort] = existingTarget ? existingTarget.split(':') : ['localhost', '80'];
|
||||
|
||||
const finalIp = ip || existingIp;
|
||||
const finalPort = port || existingPort;
|
||||
|
||||
const newConfig = ctx.caddy.generateConfig(newSubdomain, finalIp, finalPort, {
|
||||
tailscaleOnly: tailscaleOnly || false
|
||||
});
|
||||
|
||||
const caddyResult = await ctx.caddy.modify(c => c.replace(siteBlockRegex, newConfig));
|
||||
results.caddy = caddyResult.success ? 'updated' : `config saved, reload failed: ${caddyResult.error}`;
|
||||
} else {
|
||||
results.caddy = 'old config not found';
|
||||
}
|
||||
|
||||
if (oldSubdomain !== newSubdomain) {
|
||||
try {
|
||||
const dnsToken = ctx.dns.getToken();
|
||||
await ctx.dns.call(ctx.siteConfig.dnsServerIp, '/api/zones/records/delete', { token: dnsToken, domain: oldDomain, type: 'A' });
|
||||
await ctx.dns.createRecord(newSubdomain, ip || 'localhost');
|
||||
results.dns = 'updated';
|
||||
} catch (e) {
|
||||
results.dns = `failed: ${e.message}`;
|
||||
}
|
||||
} else {
|
||||
results.dns = 'unchanged';
|
||||
}
|
||||
|
||||
if (await exists(ctx.SERVICES_FILE)) {
|
||||
await ctx.servicesStateManager.update(services => {
|
||||
const serviceIndex = services.findIndex(s => s.id === oldSubdomain);
|
||||
if (serviceIndex !== -1) {
|
||||
services[serviceIndex] = {
|
||||
...services[serviceIndex],
|
||||
id: newSubdomain,
|
||||
port: port || services[serviceIndex].port,
|
||||
ip: ip || services[serviceIndex].ip,
|
||||
tailscaleOnly: tailscaleOnly || false
|
||||
};
|
||||
results.services = 'updated';
|
||||
} else {
|
||||
results.services = 'not found';
|
||||
}
|
||||
return services;
|
||||
});
|
||||
}
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: `Service updated: ${oldSubdomain} -> ${newSubdomain}`,
|
||||
results
|
||||
});
|
||||
}, 'services-update'));
|
||||
|
||||
return router;
|
||||
};
|
||||
257
dashcaddy-api/routes/sites.js
Normal file
257
dashcaddy-api/routes/sites.js
Normal file
@@ -0,0 +1,257 @@
|
||||
const express = require('express');
|
||||
const fs = require('fs');
|
||||
const { CADDY, REGEX, LIMITS } = require('../constants');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
// Get Caddyfile contents
|
||||
router.get('/caddyfile', ctx.asyncHandler(async (req, res) => {
|
||||
const content = await ctx.caddy.read();
|
||||
res.json({ success: true, content });
|
||||
}, 'caddyfile-get'));
|
||||
|
||||
// Get current Caddy config (from admin API)
|
||||
router.get('/caddy/config', ctx.asyncHandler(async (req, res) => {
|
||||
const response = await ctx.fetchT(`${ctx.caddy.adminUrl}/config/`);
|
||||
const config = await response.json();
|
||||
res.json({ success: true, config });
|
||||
}, 'caddy-config'));
|
||||
|
||||
// Reload Caddy configuration via admin API
|
||||
router.post('/caddy/reload', ctx.asyncHandler(async (req, res) => {
|
||||
const caddyfileContent = await ctx.caddy.read();
|
||||
|
||||
const response = await ctx.fetchT(`${ctx.caddy.adminUrl}/load`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': CADDY.CONTENT_TYPE },
|
||||
body: caddyfileContent
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text();
|
||||
return ctx.errorResponse(res, 500, `[DC-303] Caddy reload failed: ${errorText}`);
|
||||
}
|
||||
|
||||
res.json({ success: true, message: 'Caddy configuration reloaded successfully' });
|
||||
}, 'caddy-reload'));
|
||||
|
||||
// Get Certificate Authorities from Caddyfile
|
||||
router.get('/caddy/cas', ctx.asyncHandler(async (req, res) => {
|
||||
const content = await ctx.caddy.read();
|
||||
const cas = [];
|
||||
|
||||
const pkiRegex = /pki\s*\{([^}]*(?:\{[^}]*\}[^}]*)*)\}/gs;
|
||||
let pkiMatch;
|
||||
while ((pkiMatch = pkiRegex.exec(content)) !== null) {
|
||||
const pkiBlock = pkiMatch[1];
|
||||
let caMatch;
|
||||
const caBlockRegex = /ca\s+(\S+)\s*\{([^}]*(?:\{[^}]*\}[^}]*)*)\}/gs;
|
||||
while ((caMatch = caBlockRegex.exec(pkiBlock)) !== null) {
|
||||
const caName = caMatch[1];
|
||||
const caBlock = caMatch[2];
|
||||
const ca = { id: caName, name: caName, root: {}, intermediate: {} };
|
||||
|
||||
const nameMatch = /name\s+"([^"]+)"/.exec(caBlock);
|
||||
if (nameMatch) ca.name = nameMatch[1];
|
||||
|
||||
const rootCnMatch = /root_cn\s+"([^"]+)"/.exec(caBlock);
|
||||
const intCnMatch = /intermediate_cn\s+"([^"]+)"/.exec(caBlock);
|
||||
if (rootCnMatch) ca.root_cn = rootCnMatch[1];
|
||||
if (intCnMatch) ca.intermediate_cn = intCnMatch[1];
|
||||
|
||||
const rootMatch = /root\s*\{([^}]*)\}/s.exec(caBlock);
|
||||
if (rootMatch) {
|
||||
const rootBlock = rootMatch[1];
|
||||
const certMatch = /cert\s+(\S+)/.exec(rootBlock);
|
||||
const keyMatch = /key\s+(\S+)/.exec(rootBlock);
|
||||
if (certMatch) ca.root.cert = certMatch[1];
|
||||
if (keyMatch) ca.root.key = keyMatch[1];
|
||||
}
|
||||
|
||||
const intMatch = /intermediate\s*\{([^}]*)\}/s.exec(caBlock);
|
||||
if (intMatch) {
|
||||
const intBlock = intMatch[1];
|
||||
const certMatch = /cert\s+(\S+)/.exec(intBlock);
|
||||
const keyMatch = /key\s+(\S+)/.exec(intBlock);
|
||||
if (certMatch) ca.intermediate.cert = certMatch[1];
|
||||
if (keyMatch) ca.intermediate.key = keyMatch[1];
|
||||
}
|
||||
|
||||
cas.push(ca);
|
||||
}
|
||||
}
|
||||
|
||||
const tlsGlobalRegex = /\{\s*acme_ca\s+(\S+)/g;
|
||||
let tlsMatch;
|
||||
while ((tlsMatch = tlsGlobalRegex.exec(content)) !== null) {
|
||||
cas.push({ name: 'acme', url: tlsMatch[1], type: 'acme' });
|
||||
}
|
||||
|
||||
const siteBlocks = content.match(/[\w.-]+\s*\{[^}]*tls\s+[^}]*\}/gs) || [];
|
||||
const tlsInternalCAs = new Set();
|
||||
for (const block of siteBlocks) {
|
||||
const tlsInternalMatch = /tls\s+internal\s*\{[^}]*ca\s+(\S+)/s.exec(block);
|
||||
if (tlsInternalMatch) tlsInternalCAs.add(tlsInternalMatch[1]);
|
||||
if (/tls\s+internal(?:\s|$)/.test(block) && !/tls\s+internal\s*\{/.test(block)) {
|
||||
tlsInternalCAs.add('local');
|
||||
}
|
||||
}
|
||||
for (const caName of tlsInternalCAs) {
|
||||
if (!cas.find(c => c.name === caName)) {
|
||||
cas.push({ name: caName, type: 'internal', note: 'Referenced in tls directive' });
|
||||
}
|
||||
}
|
||||
if (cas.length === 0 && /tls\s+internal/.test(content)) {
|
||||
cas.push({ name: 'local', type: 'internal', note: 'Default Caddy internal CA' });
|
||||
}
|
||||
|
||||
const caList = cas.map(ca => ({
|
||||
id: ca.id || ca.name,
|
||||
name: ca.name,
|
||||
displayName: ca.name !== (ca.id || ca.name) ? `${ca.name} (${ca.id || ca.name})` : ca.name
|
||||
}));
|
||||
res.json({ status: 'success', data: { cas: caList } });
|
||||
}, 'caddy-get-cas'));
|
||||
|
||||
// Remove a site from Caddyfile
|
||||
router.delete('/site/:domain', ctx.asyncHandler(async (req, res) => {
|
||||
const { domain } = req.params;
|
||||
if (!domain) return ctx.errorResponse(res, 400, 'Domain is required');
|
||||
|
||||
const result = await ctx.caddy.modify((content) => {
|
||||
const escapedDomain = domain.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
|
||||
const siteBlockRegex = new RegExp(
|
||||
`\\n?${escapedDomain}\\s*\\{[^{}]*(?:\\{[^{}]*(?:\\{[^{}]*\\}[^{}]*)*\\}[^{}]*)*\\}\\s*`, 'g'
|
||||
);
|
||||
const modified = content.replace(siteBlockRegex, '\n');
|
||||
if (modified.length === content.length) return null;
|
||||
return modified.replace(/\n{3,}/g, '\n\n');
|
||||
});
|
||||
|
||||
if (!result.success) {
|
||||
if (result.rolledBack) {
|
||||
return ctx.errorResponse(res, 500, `Removed "${domain}" but Caddy reload failed (rolled back): ${result.error}`);
|
||||
}
|
||||
return ctx.errorResponse(res, 404, `Site block for "${domain}" not found in Caddyfile`);
|
||||
}
|
||||
|
||||
res.json({ success: true, message: `Site "${domain}" removed from Caddyfile and Caddy reloaded` });
|
||||
}, 'site-delete'));
|
||||
|
||||
// Add a new site to Caddyfile and reload
|
||||
router.post('/site', ctx.asyncHandler(async (req, res) => {
|
||||
const { domain, upstream, config } = req.body;
|
||||
if (!domain || !upstream) return ctx.errorResponse(res, 400, 'Domain and upstream are required');
|
||||
if (!REGEX.DOMAIN.test(domain)) return ctx.errorResponse(res, 400, '[DC-301] Invalid domain format');
|
||||
|
||||
const upstreamRegex = /^[a-z0-9.-]+:\d{1,5}$/i;
|
||||
if (!upstreamRegex.test(upstream)) return ctx.errorResponse(res, 400, 'Invalid upstream format. Use host:port');
|
||||
|
||||
let content = await ctx.caddy.read();
|
||||
const escapedDomain = domain.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
|
||||
const siteBlockRegex = new RegExp(`\\n?${escapedDomain}\\s*\\{`, 'g');
|
||||
if (siteBlockRegex.test(content)) {
|
||||
return ctx.errorResponse(res, 409, `[DC-302] Site block for "${domain}" already exists in Caddyfile`);
|
||||
}
|
||||
|
||||
let newSiteBlock;
|
||||
if (config) {
|
||||
newSiteBlock = `\n${config}\n`;
|
||||
} else {
|
||||
newSiteBlock = `\n${domain} {\n reverse_proxy ${upstream}\n tls internal\n}\n`;
|
||||
}
|
||||
|
||||
const result = await ctx.caddy.modify(c => c + newSiteBlock);
|
||||
if (!result.success) {
|
||||
return ctx.errorResponse(res, 500, `[DC-303] Site added to Caddyfile but reload failed: ${result.error}`,
|
||||
result.rolledBack ? { note: 'Caddyfile was rolled back to previous state' } : {});
|
||||
}
|
||||
|
||||
ctx.ok(res, { message: `Site "${domain}" added to Caddyfile and Caddy reloaded successfully` });
|
||||
}, 'site-add'));
|
||||
|
||||
// Add external service reverse proxy to Caddyfile
|
||||
router.post('/site/external', ctx.asyncHandler(async (req, res) => {
|
||||
const { subdomain, externalUrl, preserveHost, followRedirects, sslType, caddyfilePath, reloadCaddy: shouldReload, createDns, serviceName, logo } = req.body;
|
||||
|
||||
if (!subdomain || !externalUrl) {
|
||||
return ctx.errorResponse(res, 400, 'Subdomain and externalUrl are required');
|
||||
}
|
||||
if (!REGEX.SUBDOMAIN.test(subdomain)) {
|
||||
return ctx.errorResponse(res, 400, '[DC-301] Invalid subdomain format');
|
||||
}
|
||||
|
||||
try {
|
||||
ctx.validateURL(externalUrl);
|
||||
} catch (validationErr) {
|
||||
return ctx.errorResponse(res, 400, validationErr.message);
|
||||
}
|
||||
|
||||
const domain = ctx.buildDomain(subdomain);
|
||||
let dnsWarning = null;
|
||||
|
||||
try {
|
||||
if (createDns) {
|
||||
try {
|
||||
await ctx.dns.createRecord(subdomain, ctx.siteConfig.dnsServerIp);
|
||||
ctx.log.info('dns', 'DNS record created for external proxy', { domain, ip: ctx.siteConfig.dnsServerIp });
|
||||
} catch (dnsError) {
|
||||
dnsWarning = `DNS creation failed: ${dnsError.message}. You may need to create the DNS record manually.`;
|
||||
ctx.log.warn('dns', 'DNS creation failed for external proxy', { domain, error: dnsError.message });
|
||||
}
|
||||
}
|
||||
|
||||
const sslConfig = sslType === 'letsencrypt' ? '' : 'tls internal';
|
||||
const hostHeader = preserveHost ? `\n header_up Host {upstream_hostport}` : '';
|
||||
|
||||
const urlObj = new URL(externalUrl);
|
||||
const baseUrl = `${urlObj.protocol}//${urlObj.host}`;
|
||||
const urlPath = urlObj.pathname.replace(/\/$/, '');
|
||||
|
||||
let proxyConfig = '';
|
||||
if (urlPath && urlPath !== '') {
|
||||
proxyConfig = `\n${domain} {\n ${sslConfig}\n\n handle_path ${urlPath}/* {\n reverse_proxy ${baseUrl} {\n transport http {\n tls\n tls_server_name ${urlObj.host}\n }\n }\n }\n\n handle {\n redir ${urlPath}/ permanent\n }\n}\n`;
|
||||
} else {
|
||||
proxyConfig = `\n${domain} {\n ${sslConfig}\n\n reverse_proxy ${externalUrl} {${hostHeader}\n transport http {\n tls\n }\n }\n}\n`;
|
||||
}
|
||||
|
||||
const caddyResult = await ctx.caddy.modify(c => {
|
||||
const escapedDomain = domain.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
|
||||
if (new RegExp(`\\n?${escapedDomain}\\s*\\{`, 'g').test(c)) return null;
|
||||
return c + proxyConfig;
|
||||
});
|
||||
|
||||
if (!caddyResult.success && !caddyResult.rolledBack) {
|
||||
return ctx.errorResponse(res, 409, `[DC-302] Site block for "${domain}" already exists in Caddyfile`);
|
||||
}
|
||||
if (!caddyResult.success) {
|
||||
return ctx.errorResponse(res, 500, `[DC-303] External proxy added but Caddy reload failed (rolled back): ${caddyResult.error}`);
|
||||
}
|
||||
|
||||
if (serviceName && logo) {
|
||||
try {
|
||||
await ctx.addServiceToConfig({
|
||||
id: subdomain, name: serviceName, logo,
|
||||
isExternal: true, externalUrl,
|
||||
deployedAt: new Date().toISOString()
|
||||
});
|
||||
ctx.log.info('deploy', 'Service added to dashboard', { subdomain });
|
||||
} catch (serviceError) {
|
||||
ctx.log.warn('deploy', 'Failed to add service to dashboard', { subdomain, error: serviceError.message });
|
||||
}
|
||||
}
|
||||
|
||||
const response = {
|
||||
success: true,
|
||||
message: `External service proxy for ${domain} -> ${externalUrl} created${shouldReload ? ' and Caddy reloaded' : ''}`
|
||||
};
|
||||
if (dnsWarning) response.warning = dnsWarning;
|
||||
res.json(response);
|
||||
} catch (error) {
|
||||
ctx.errorResponse(res, 500, ctx.safeErrorMessage(error));
|
||||
}
|
||||
}, 'site-external'));
|
||||
|
||||
return router;
|
||||
};
|
||||
309
dashcaddy-api/routes/tailscale.js
Normal file
309
dashcaddy-api/routes/tailscale.js
Normal file
@@ -0,0 +1,309 @@
|
||||
const express = require('express');
|
||||
const fs = require('fs');
|
||||
const { TAILSCALE } = require('../constants');
|
||||
const { exists } = require('../fs-helpers');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
// Get Tailscale status and configuration
|
||||
router.get('/status', ctx.asyncHandler(async (req, res) => {
|
||||
const status = await ctx.tailscale.getStatus();
|
||||
const localIP = await ctx.tailscale.getLocalIP();
|
||||
|
||||
if (!status) {
|
||||
return res.json({
|
||||
success: true,
|
||||
installed: false,
|
||||
connected: false,
|
||||
message: 'Tailscale not available or not running'
|
||||
});
|
||||
}
|
||||
|
||||
const devices = [];
|
||||
if (status.Peer) {
|
||||
for (const [id, peer] of Object.entries(status.Peer)) {
|
||||
devices.push({
|
||||
id,
|
||||
hostname: peer.HostName,
|
||||
ip: peer.TailscaleIPs?.[0],
|
||||
os: peer.OS,
|
||||
online: peer.Online,
|
||||
lastSeen: peer.LastSeen,
|
||||
user: peer.UserID
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
installed: true,
|
||||
connected: status.BackendState === 'Running',
|
||||
backendState: status.BackendState,
|
||||
self: {
|
||||
hostname: status.Self?.HostName,
|
||||
ip: localIP,
|
||||
tailnetName: status.MagicDNSSuffix,
|
||||
online: status.Self?.Online
|
||||
},
|
||||
config: ctx.tailscale.config,
|
||||
devices,
|
||||
deviceCount: devices.length
|
||||
});
|
||||
}, 'tailscale-status'));
|
||||
|
||||
// Update Tailscale configuration
|
||||
router.post('/config', ctx.asyncHandler(async (req, res) => {
|
||||
const { enabled, requireAuth, allowedTailnet } = req.body;
|
||||
|
||||
if (typeof enabled !== 'undefined') ctx.tailscale.config.enabled = enabled;
|
||||
if (typeof requireAuth !== 'undefined') ctx.tailscale.config.requireAuth = requireAuth;
|
||||
if (typeof allowedTailnet !== 'undefined') ctx.tailscale.config.allowedTailnet = allowedTailnet;
|
||||
|
||||
await ctx.tailscale.save();
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: 'Tailscale configuration updated',
|
||||
config: ctx.tailscale.config
|
||||
});
|
||||
}, 'tailscale-config'));
|
||||
|
||||
// Check if a request is coming from Tailscale
|
||||
router.get('/check-connection', ctx.asyncHandler(async (req, res) => {
|
||||
const clientIP = req.ip || req.connection?.remoteAddress || '';
|
||||
const forwardedFor = req.headers['x-forwarded-for'];
|
||||
const realIP = req.headers['x-real-ip'];
|
||||
|
||||
const ipsToCheck = [clientIP, forwardedFor, realIP].filter(Boolean);
|
||||
const isTailscale = ipsToCheck.some(ip => ctx.tailscale.isTailscaleIP(ip.toString().split(',')[0].trim()));
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
isTailscale,
|
||||
clientIP,
|
||||
forwardedFor: forwardedFor || null,
|
||||
realIP: realIP || null
|
||||
});
|
||||
}, 'tailscale-check'));
|
||||
|
||||
// Get Tailscale device list
|
||||
router.get('/devices', ctx.asyncHandler(async (req, res) => {
|
||||
const status = await ctx.tailscale.getStatus();
|
||||
if (!status || !status.Peer) {
|
||||
return res.json({ success: true, devices: [] });
|
||||
}
|
||||
|
||||
const devices = [];
|
||||
for (const [id, peer] of Object.entries(status.Peer)) {
|
||||
if (peer.Online) {
|
||||
devices.push({
|
||||
id,
|
||||
hostname: peer.HostName,
|
||||
ip: peer.TailscaleIPs?.[0],
|
||||
os: peer.OS,
|
||||
user: peer.UserID
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (status.Self) {
|
||||
devices.unshift({
|
||||
id: 'self',
|
||||
hostname: status.Self.HostName,
|
||||
ip: status.Self.TailscaleIPs?.[0],
|
||||
os: status.Self.OS,
|
||||
user: status.Self.UserID,
|
||||
isSelf: true
|
||||
});
|
||||
}
|
||||
|
||||
res.json({ success: true, devices });
|
||||
}, 'tailscale-devices'));
|
||||
|
||||
// Toggle Tailscale-only mode for an existing service
|
||||
router.post('/protect-service', ctx.asyncHandler(async (req, res) => {
|
||||
const { subdomain, tailscaleOnly, allowedIPs } = req.body;
|
||||
|
||||
if (!subdomain) {
|
||||
return ctx.errorResponse(res, 400, 'subdomain is required');
|
||||
}
|
||||
|
||||
let content = await ctx.caddy.read();
|
||||
const domain = ctx.buildDomain(subdomain);
|
||||
|
||||
const blockRegex = new RegExp(`(${domain.replace('.', '\\.')}\\s*\\{[^}]*\\})`, 's');
|
||||
const match = content.match(blockRegex);
|
||||
|
||||
if (!match) {
|
||||
const { NotFoundError } = require('../errors');
|
||||
throw new NotFoundError(`Service ${domain} in Caddyfile`);
|
||||
}
|
||||
|
||||
const proxyMatch = match[0].match(/reverse_proxy\s+([^\s\n]+)/);
|
||||
if (!proxyMatch) {
|
||||
return ctx.errorResponse(res, 400, 'Could not parse service configuration');
|
||||
}
|
||||
|
||||
const [ip, port] = proxyMatch[1].split(':');
|
||||
|
||||
const newConfig = ctx.caddy.generateConfig(subdomain, ip, port || '80', {
|
||||
tailscaleOnly: tailscaleOnly !== false,
|
||||
allowedIPs: allowedIPs || []
|
||||
});
|
||||
|
||||
const caddyResult = await ctx.caddy.modify(c => c.replace(blockRegex, newConfig));
|
||||
if (!caddyResult.success) {
|
||||
return ctx.errorResponse(res, 500, `[DC-303] Failed to reload Caddy: ${caddyResult.error}`);
|
||||
}
|
||||
|
||||
if (await exists(ctx.SERVICES_FILE)) {
|
||||
await ctx.servicesStateManager.update(services => {
|
||||
const serviceIndex = services.findIndex(s => s.id === subdomain);
|
||||
if (serviceIndex !== -1) {
|
||||
services[serviceIndex].tailscaleOnly = tailscaleOnly !== false;
|
||||
}
|
||||
return services;
|
||||
});
|
||||
}
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: `Service ${domain} is now ${tailscaleOnly !== false ? 'protected by' : 'no longer restricted to'} Tailscale`,
|
||||
tailscaleOnly: tailscaleOnly !== false
|
||||
});
|
||||
}, 'tailscale-protect'));
|
||||
|
||||
// ── Tailscale API Integration (OAuth 2.0) ──
|
||||
|
||||
// Save OAuth client credentials + validate by exchanging for a token
|
||||
router.post('/tailscale/oauth-config', ctx.asyncHandler(async (req, res) => {
|
||||
const { clientId, clientSecret, tailnet } = req.body;
|
||||
|
||||
if (!clientId || !clientSecret || !tailnet) {
|
||||
return ctx.errorResponse(res, 400, 'clientId, clientSecret, and tailnet are required');
|
||||
}
|
||||
|
||||
// Validate by exchanging for a real token
|
||||
const tokenRes = await fetch(TAILSCALE.OAUTH_TOKEN_URL, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
|
||||
body: `client_id=${encodeURIComponent(clientId)}&client_secret=${encodeURIComponent(clientSecret)}&grant_type=client_credentials`
|
||||
});
|
||||
|
||||
if (!tokenRes.ok) {
|
||||
return ctx.errorResponse(res, 400, `OAuth validation failed: HTTP ${tokenRes.status}`);
|
||||
}
|
||||
|
||||
const tokenData = await tokenRes.json();
|
||||
|
||||
// Test with the device list to verify scopes
|
||||
const testRes = await fetch(`${TAILSCALE.API_BASE}/tailnet/${encodeURIComponent(tailnet)}/devices`, {
|
||||
headers: { Authorization: `Bearer ${tokenData.access_token}` }
|
||||
});
|
||||
|
||||
if (!testRes.ok) {
|
||||
return ctx.errorResponse(res, 400, `API test failed: HTTP ${testRes.status}. Check tailnet name and OAuth scopes (needs devices:read, acl:read).`);
|
||||
}
|
||||
|
||||
// Store credentials securely
|
||||
await ctx.credentialManager.store('tailscale.oauth.client_id', clientId, { provider: 'tailscale' });
|
||||
await ctx.credentialManager.store('tailscale.oauth.client_secret', clientSecret, { provider: 'tailscale', tailnet });
|
||||
|
||||
// Update config
|
||||
ctx.tailscale.config.oauthConfigured = true;
|
||||
ctx.tailscale.config.tailnet = tailnet;
|
||||
if (!ctx.tailscale.config.allowedTailnet) {
|
||||
const status = await ctx.tailscale.getStatus();
|
||||
if (status?.MagicDNSSuffix) {
|
||||
ctx.tailscale.config.allowedTailnet = status.MagicDNSSuffix;
|
||||
}
|
||||
}
|
||||
await ctx.tailscale.save();
|
||||
|
||||
// Start background sync
|
||||
ctx.tailscale.startSync();
|
||||
|
||||
// Trigger initial sync
|
||||
try {
|
||||
await ctx.tailscale.syncAPI();
|
||||
} catch (e) {
|
||||
ctx.log.warn('tailscale', 'Initial sync after OAuth config failed', { error: e.message });
|
||||
}
|
||||
|
||||
res.json({ success: true, config: ctx.tailscale.config });
|
||||
}, 'tailscale-oauth-config'));
|
||||
|
||||
// Remove OAuth credentials and disable API sync
|
||||
router.delete('/tailscale/oauth-config', ctx.asyncHandler(async (req, res) => {
|
||||
await ctx.credentialManager.delete('tailscale.oauth.client_id');
|
||||
await ctx.credentialManager.delete('tailscale.oauth.client_secret');
|
||||
|
||||
ctx.tailscale.config.oauthConfigured = false;
|
||||
ctx.tailscale.config.tailnet = null;
|
||||
ctx.tailscale.config.lastSync = null;
|
||||
await ctx.tailscale.save();
|
||||
|
||||
ctx.tailscale.stopSync();
|
||||
|
||||
res.json({ success: true, message: 'Tailscale OAuth credentials removed' });
|
||||
}, 'tailscale-oauth-delete'));
|
||||
|
||||
// Get enriched device list from Tailscale API
|
||||
router.get('/tailscale/api-devices', ctx.asyncHandler(async (req, res) => {
|
||||
if (!ctx.tailscale.config.oauthConfigured) {
|
||||
return ctx.errorResponse(res, 400, 'Tailscale API not configured. Set up OAuth first.');
|
||||
}
|
||||
|
||||
// Return cached devices from last sync
|
||||
res.json({
|
||||
success: true,
|
||||
devices: ctx.tailscale.config.devices || [],
|
||||
lastSync: ctx.tailscale.config.lastSync
|
||||
});
|
||||
}, 'tailscale-api-devices'));
|
||||
|
||||
// Manually trigger an API sync
|
||||
router.post('/tailscale/sync', ctx.asyncHandler(async (req, res) => {
|
||||
if (!ctx.tailscale.config.oauthConfigured) {
|
||||
return ctx.errorResponse(res, 400, 'Tailscale API not configured. Set up OAuth first.');
|
||||
}
|
||||
|
||||
const devices = await ctx.tailscale.syncAPI();
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
devices: devices || [],
|
||||
lastSync: ctx.tailscale.config.lastSync
|
||||
});
|
||||
}, 'tailscale-sync'));
|
||||
|
||||
// Fetch ACL policy (read-only)
|
||||
router.get('/tailscale/acl', ctx.asyncHandler(async (req, res) => {
|
||||
const token = await ctx.tailscale.getAccessToken();
|
||||
const tailnet = ctx.tailscale.config.tailnet;
|
||||
if (!token || !tailnet) {
|
||||
return ctx.errorResponse(res, 400, 'Tailscale API not configured');
|
||||
}
|
||||
|
||||
const aclRes = await fetch(`${TAILSCALE.API_BASE}/tailnet/${encodeURIComponent(tailnet)}/acl`, {
|
||||
headers: { Authorization: `Bearer ${token}`, Accept: 'application/json' }
|
||||
});
|
||||
if (!aclRes.ok) {
|
||||
return ctx.errorResponse(res, aclRes.status, `ACL fetch failed: HTTP ${aclRes.status}`);
|
||||
}
|
||||
|
||||
const acl = await aclRes.json();
|
||||
|
||||
const summary = {
|
||||
groups: Object.keys(acl.groups || {}),
|
||||
tagOwners: Object.keys(acl.tagOwners || {}),
|
||||
aclRuleCount: (acl.acls || []).length,
|
||||
sshRuleCount: (acl.ssh || []).length
|
||||
};
|
||||
|
||||
res.json({ success: true, acl, summary });
|
||||
}, 'tailscale-acl'));
|
||||
|
||||
return router;
|
||||
};
|
||||
71
dashcaddy-api/routes/themes.js
Normal file
71
dashcaddy-api/routes/themes.js
Normal file
@@ -0,0 +1,71 @@
|
||||
const express = require('express');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
const THEMES_DIR = process.env.THEMES_DIR || path.join(path.dirname(process.env.SERVICES_FILE || '/app/services.json'), 'themes');
|
||||
|
||||
// Ensure themes directory exists
|
||||
if (!fs.existsSync(THEMES_DIR)) {
|
||||
fs.mkdirSync(THEMES_DIR, { recursive: true });
|
||||
}
|
||||
|
||||
function readAllThemes() {
|
||||
const themes = {};
|
||||
try {
|
||||
const files = fs.readdirSync(THEMES_DIR).filter(f => f.endsWith('.json'));
|
||||
for (const file of files) {
|
||||
const slug = path.basename(file, '.json');
|
||||
const data = JSON.parse(fs.readFileSync(path.join(THEMES_DIR, file), 'utf8'));
|
||||
themes[slug] = data;
|
||||
}
|
||||
} catch (e) {
|
||||
console.error('[Themes] Failed to read themes:', e.message);
|
||||
}
|
||||
return themes;
|
||||
}
|
||||
|
||||
// Get all user themes
|
||||
router.get('/themes', (req, res) => {
|
||||
res.json({ success: true, themes: readAllThemes() });
|
||||
});
|
||||
|
||||
// Save a theme (create or update)
|
||||
router.post('/themes/:slug', (req, res) => {
|
||||
const { slug } = req.params;
|
||||
const { name, colors, lightBg } = req.body;
|
||||
|
||||
if (!slug || !name || !colors) {
|
||||
return res.status(400).json({ success: false, error: 'Missing slug, name, or colors' });
|
||||
}
|
||||
|
||||
if (!/^[a-z0-9-]+$/.test(slug)) {
|
||||
return res.status(400).json({ success: false, error: 'Invalid slug format' });
|
||||
}
|
||||
|
||||
const themeData = { name, ...colors };
|
||||
if (lightBg) themeData.lightBg = true;
|
||||
fs.writeFileSync(path.join(THEMES_DIR, slug + '.json'), JSON.stringify(themeData, null, 2), 'utf8');
|
||||
|
||||
res.json({ success: true, message: name + ' theme saved' });
|
||||
});
|
||||
|
||||
// Delete a theme
|
||||
router.delete('/themes/:slug', (req, res) => {
|
||||
const { slug } = req.params;
|
||||
const filePath = path.join(THEMES_DIR, slug + '.json');
|
||||
|
||||
if (!fs.existsSync(filePath)) {
|
||||
return res.status(404).json({ success: false, error: 'Theme not found' });
|
||||
}
|
||||
|
||||
const data = JSON.parse(fs.readFileSync(filePath, 'utf8'));
|
||||
const name = data.name || slug;
|
||||
fs.unlinkSync(filePath);
|
||||
|
||||
res.json({ success: true, message: name + ' theme deleted' });
|
||||
});
|
||||
|
||||
return router;
|
||||
};
|
||||
63
dashcaddy-api/routes/updates.js
Normal file
63
dashcaddy-api/routes/updates.js
Normal file
@@ -0,0 +1,63 @@
|
||||
const express = require('express');
|
||||
const { paginate, parsePaginationParams } = require('../pagination');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
// ===== UPDATE MANAGEMENT ENDPOINTS =====
|
||||
|
||||
// Check for updates
|
||||
router.post('/updates/check', ctx.asyncHandler(async (req, res) => {
|
||||
await ctx.updateManager.checkForUpdates();
|
||||
const updates = ctx.updateManager.getAvailableUpdates();
|
||||
res.json({ success: true, updates, count: updates.length });
|
||||
}, 'updates-check'));
|
||||
|
||||
// Get available updates
|
||||
router.get('/updates/available', ctx.asyncHandler(async (req, res) => {
|
||||
const updates = ctx.updateManager.getAvailableUpdates();
|
||||
const paginationParams = parsePaginationParams(req.query);
|
||||
const result = paginate(updates, paginationParams);
|
||||
res.json({ success: true, updates: result.data, count: updates.length, ...(result.pagination && { pagination: result.pagination }) });
|
||||
}, 'updates-available'));
|
||||
|
||||
// Update a container
|
||||
router.post('/updates/update/:containerId', ctx.asyncHandler(async (req, res) => {
|
||||
const result = await ctx.updateManager.updateContainer(req.params.containerId, req.body);
|
||||
res.json({ success: true, result });
|
||||
}, 'updates-update'));
|
||||
|
||||
// Rollback update
|
||||
router.post('/updates/rollback/:containerId', ctx.asyncHandler(async (req, res) => {
|
||||
await ctx.updateManager.rollbackUpdate(req.params.containerId);
|
||||
res.json({ success: true, message: 'Rollback completed' });
|
||||
}, 'updates-rollback'));
|
||||
|
||||
// Get update history
|
||||
router.get('/updates/history', ctx.asyncHandler(async (req, res) => {
|
||||
const paginationParams = parsePaginationParams(req.query);
|
||||
// When paginating, fetch all history so pagination can slice correctly
|
||||
const fetchLimit = paginationParams ? Number.MAX_SAFE_INTEGER : (parseInt(req.query.limit) || 50);
|
||||
const history = ctx.updateManager.getHistory(fetchLimit);
|
||||
const result = paginate(history, paginationParams);
|
||||
res.json({ success: true, history: result.data, ...(result.pagination && { pagination: result.pagination }) });
|
||||
}, 'updates-history'));
|
||||
|
||||
// Configure auto-update
|
||||
router.post('/updates/auto-update/:containerId', ctx.asyncHandler(async (req, res) => {
|
||||
ctx.updateManager.configureAutoUpdate(req.params.containerId, req.body);
|
||||
res.json({ success: true, message: 'Auto-update configured' });
|
||||
}, 'updates-auto-update'));
|
||||
|
||||
// Schedule update
|
||||
router.post('/updates/schedule/:containerId', ctx.asyncHandler(async (req, res) => {
|
||||
const { scheduledTime } = req.body;
|
||||
if (!scheduledTime) {
|
||||
return ctx.errorResponse(res, 400, 'scheduledTime is required');
|
||||
}
|
||||
ctx.updateManager.scheduleUpdate(req.params.containerId, scheduledTime);
|
||||
res.json({ success: true, message: 'Update scheduled', scheduledTime });
|
||||
}, 'updates-schedule'));
|
||||
|
||||
return router;
|
||||
};
|
||||
Reference in New Issue
Block a user