Initial commit: DashCaddy v1.0
Full codebase including API server (32 modules + routes), dashboard frontend, DashCA certificate distribution, installer script, and deployment skills.
This commit is contained in:
373
dashcaddy-api/routes/recipes/deploy.js
Normal file
373
dashcaddy-api/routes/recipes/deploy.js
Normal file
@@ -0,0 +1,373 @@
|
||||
const express = require('express');
|
||||
const crypto = require('crypto');
|
||||
const { DOCKER } = require('../../constants');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
/**
|
||||
* Deploy a recipe — creates multiple containers as a coordinated stack
|
||||
*
|
||||
* POST /api/recipes/deploy
|
||||
* Body: { recipeId, config: { selectedComponents, sharedConfig, componentOverrides } }
|
||||
*/
|
||||
router.post('/deploy', ctx.asyncHandler(async (req, res) => {
|
||||
const { recipeId, config } = req.body;
|
||||
const { RECIPE_TEMPLATES } = require('../../recipe-templates');
|
||||
|
||||
const recipe = RECIPE_TEMPLATES[recipeId];
|
||||
if (!recipe) return ctx.errorResponse(res, 400, 'Invalid recipe template');
|
||||
|
||||
ctx.log.info('recipe', 'Starting recipe deployment', { recipeId, name: recipe.name });
|
||||
|
||||
// Determine which components to deploy
|
||||
const selectedIds = new Set(config.selectedComponents || recipe.components.filter(c => c.required).map(c => c.id));
|
||||
// Always include required components
|
||||
recipe.components.filter(c => c.required).forEach(c => selectedIds.add(c.id));
|
||||
|
||||
const componentsToDeploy = recipe.components
|
||||
.filter(c => selectedIds.has(c.id))
|
||||
.sort((a, b) => a.order - b.order);
|
||||
|
||||
// Generate shared passwords for the recipe (consistent across components)
|
||||
const generatedPasswords = {};
|
||||
const passwordKey = `recipe-${recipeId}-${Date.now()}`;
|
||||
generatedPasswords.default = crypto.randomBytes(24).toString('base64url');
|
||||
|
||||
// Create Docker network if defined
|
||||
let networkName = null;
|
||||
if (recipe.network) {
|
||||
networkName = recipe.network.name;
|
||||
try {
|
||||
await ctx.docker.client.createNetwork({
|
||||
Name: networkName,
|
||||
Driver: recipe.network.driver || 'bridge',
|
||||
Labels: { 'sami.managed': 'true', 'sami.recipe': recipeId }
|
||||
});
|
||||
ctx.log.info('recipe', 'Created Docker network', { networkName });
|
||||
} catch (e) {
|
||||
// Network might already exist
|
||||
if (!e.message.includes('already exists')) {
|
||||
throw new Error(`Failed to create network ${networkName}: ${e.message}`);
|
||||
}
|
||||
ctx.log.info('recipe', 'Docker network already exists', { networkName });
|
||||
}
|
||||
}
|
||||
|
||||
const deployedComponents = [];
|
||||
const errors = [];
|
||||
|
||||
try {
|
||||
for (const component of componentsToDeploy) {
|
||||
try {
|
||||
ctx.log.info('recipe', `Deploying component: ${component.id}`, {
|
||||
role: component.role,
|
||||
internal: component.internal || false
|
||||
});
|
||||
|
||||
const result = await deployComponent(component, recipe, config, generatedPasswords, networkName);
|
||||
deployedComponents.push(result);
|
||||
|
||||
ctx.log.info('recipe', `Component deployed: ${component.id}`, {
|
||||
containerId: result.containerId?.substring(0, 12)
|
||||
});
|
||||
} catch (componentError) {
|
||||
ctx.log.error('recipe', `Component failed: ${component.id}`, {
|
||||
error: componentError.message
|
||||
});
|
||||
errors.push({ componentId: component.id, role: component.role, error: componentError.message });
|
||||
// Continue deploying other components — partial success is better than total failure
|
||||
}
|
||||
}
|
||||
|
||||
if (deployedComponents.length === 0) {
|
||||
throw new Error('All components failed to deploy');
|
||||
}
|
||||
|
||||
// Register deployed components in services.json
|
||||
for (const deployed of deployedComponents) {
|
||||
if (!deployed.internal) {
|
||||
await ctx.addServiceToConfig({
|
||||
id: deployed.subdomain,
|
||||
name: deployed.name,
|
||||
logo: deployed.logo,
|
||||
containerId: deployed.containerId,
|
||||
appTemplate: deployed.templateRef || deployed.id,
|
||||
recipeId: recipeId,
|
||||
recipeRole: deployed.role,
|
||||
tailscaleOnly: config.sharedConfig?.tailscaleOnly || false,
|
||||
deployedAt: new Date().toISOString()
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Run auto-connect if available
|
||||
if (recipe.autoConnect?.enabled && errors.length === 0) {
|
||||
ctx.log.info('recipe', 'Running auto-connect for recipe', { recipeId });
|
||||
// Auto-connect will be handled asynchronously — don't block the response
|
||||
runAutoConnect(recipe, deployedComponents, config).catch(e => {
|
||||
ctx.log.warn('recipe', 'Auto-connect had errors', { recipeId, error: e.message });
|
||||
});
|
||||
}
|
||||
|
||||
const response = {
|
||||
success: true,
|
||||
recipeId,
|
||||
recipeName: recipe.name,
|
||||
deployed: deployedComponents.map(c => ({
|
||||
id: c.id,
|
||||
role: c.role,
|
||||
containerId: c.containerId?.substring(0, 12),
|
||||
url: c.url,
|
||||
internal: c.internal
|
||||
})),
|
||||
errors: errors.length > 0 ? errors : undefined,
|
||||
message: errors.length > 0
|
||||
? `${recipe.name} partially deployed (${deployedComponents.length}/${componentsToDeploy.length} components)`
|
||||
: `${recipe.name} deployed successfully!`,
|
||||
setupInstructions: recipe.setupInstructions
|
||||
};
|
||||
|
||||
ctx.notification.send('deploymentSuccess', 'Recipe Deployed',
|
||||
`**${recipe.name}** recipe deployed (${deployedComponents.length} components).`,
|
||||
'success'
|
||||
);
|
||||
|
||||
res.json(response);
|
||||
} catch (error) {
|
||||
ctx.log.error('recipe', 'Recipe deployment failed', { recipeId, error: error.message });
|
||||
|
||||
// Cleanup: remove partially deployed containers
|
||||
for (const deployed of deployedComponents) {
|
||||
try {
|
||||
if (deployed.containerId) {
|
||||
const container = ctx.docker.client.getContainer(deployed.containerId);
|
||||
await container.remove({ force: true });
|
||||
}
|
||||
} catch (cleanupError) {
|
||||
ctx.log.warn('recipe', 'Cleanup failed for component', {
|
||||
componentId: deployed.id, error: cleanupError.message
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup network
|
||||
if (networkName) {
|
||||
try {
|
||||
const network = ctx.docker.client.getNetwork(networkName);
|
||||
await network.remove();
|
||||
} catch (e) {
|
||||
ctx.log.warn('recipe', 'Network cleanup failed', { networkName, error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
ctx.notification.send('deploymentFailed', 'Recipe Failed',
|
||||
`Failed to deploy **${recipe.name}**: ${error.message}`, 'error'
|
||||
);
|
||||
|
||||
ctx.errorResponse(res, 500, error.message);
|
||||
}
|
||||
}, 'recipe-deploy'));
|
||||
|
||||
/**
|
||||
* Deploy a single component of a recipe
|
||||
*/
|
||||
async function deployComponent(component, recipe, config, passwords, networkName) {
|
||||
const sharedConfig = config.sharedConfig || {};
|
||||
const overrides = config.componentOverrides?.[component.id] || {};
|
||||
|
||||
// Resolve the Docker config — either from templateRef or inline
|
||||
let dockerConfig;
|
||||
let templateName;
|
||||
let logo;
|
||||
|
||||
if (component.templateRef) {
|
||||
const template = ctx.APP_TEMPLATES[component.templateRef];
|
||||
if (!template) throw new Error(`Template ${component.templateRef} not found`);
|
||||
dockerConfig = JSON.parse(JSON.stringify(template.docker)); // Deep clone
|
||||
templateName = template.name;
|
||||
logo = template.logo || `/assets/${component.templateRef}.png`;
|
||||
|
||||
// Apply envOverrides from recipe
|
||||
if (component.envOverrides) {
|
||||
dockerConfig.environment = { ...dockerConfig.environment, ...component.envOverrides };
|
||||
}
|
||||
} else {
|
||||
// Inline docker config
|
||||
dockerConfig = JSON.parse(JSON.stringify(component.docker));
|
||||
templateName = component.role;
|
||||
logo = `/assets/${component.id}.png`;
|
||||
}
|
||||
|
||||
// Replace template variables
|
||||
const subdomain = overrides.subdomain || component.subdomain || `${recipe.name.toLowerCase().replace(/\s+/g, '')}-${component.id}`;
|
||||
const port = overrides.port || component.defaultPort || null;
|
||||
const hostIp = sharedConfig.ip || 'host.docker.internal';
|
||||
|
||||
// Replace {{GENERATED_PASSWORD}} with consistent password
|
||||
const replaceVars = (obj) => {
|
||||
if (typeof obj === 'string') {
|
||||
return obj
|
||||
.replace(/\{\{GENERATED_PASSWORD\}\}/g, passwords.default)
|
||||
.replace(/\{\{PORT\}\}/g, String(port || ''))
|
||||
.replace(/\{\{HOST_IP\}\}/g, hostIp)
|
||||
.replace(/\{\{SUBDOMAIN\}\}/g, subdomain)
|
||||
.replace(/\{\{TIMEZONE\}\}/g, sharedConfig.timezone || 'UTC')
|
||||
.replace(/\{\{NEXTCLOUD_DOMAIN\}\}/g, `${subdomain}.${(ctx.siteConfig?.tld || '.home').replace(/^\./, '')}`);
|
||||
}
|
||||
if (Array.isArray(obj)) return obj.map(replaceVars);
|
||||
if (obj && typeof obj === 'object') {
|
||||
const result = {};
|
||||
for (const [k, v] of Object.entries(obj)) result[k] = replaceVars(v);
|
||||
return result;
|
||||
}
|
||||
return obj;
|
||||
};
|
||||
|
||||
dockerConfig = replaceVars(dockerConfig);
|
||||
|
||||
// Apply shared volume paths
|
||||
if (recipe.sharedVolumes && dockerConfig.volumes) {
|
||||
for (const [key, volConfig] of Object.entries(recipe.sharedVolumes)) {
|
||||
const userPath = sharedConfig.volumes?.[key] || volConfig.defaultPath;
|
||||
if (volConfig.usedBy?.includes(component.id)) {
|
||||
// Find and update matching volume mounts
|
||||
dockerConfig.volumes = dockerConfig.volumes.map(vol => {
|
||||
if (vol.includes(volConfig.defaultPath) || vol.includes(`{{${key.toUpperCase()}_PATH}}`)) {
|
||||
const [, containerPath] = vol.split(':');
|
||||
return `${userPath}:${containerPath}`;
|
||||
}
|
||||
return vol;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Skip container creation for internal-only services with no ports
|
||||
const containerName = `${DOCKER.CONTAINER_PREFIX}${subdomain}`;
|
||||
|
||||
// Build container config
|
||||
const containerConfig = {
|
||||
Image: dockerConfig.image,
|
||||
name: containerName,
|
||||
ExposedPorts: {},
|
||||
HostConfig: {
|
||||
PortBindings: {},
|
||||
Binds: dockerConfig.volumes || [],
|
||||
RestartPolicy: { Name: 'unless-stopped' }
|
||||
},
|
||||
Env: Object.entries(dockerConfig.environment || {}).map(([k, v]) => `${k}=${v}`),
|
||||
Labels: {
|
||||
'sami.managed': 'true',
|
||||
'sami.app': component.templateRef || component.id,
|
||||
'sami.recipe': recipe.name.toLowerCase().replace(/\s+/g, '-'),
|
||||
'sami.recipe.component': component.id,
|
||||
'sami.recipe.role': component.role,
|
||||
'sami.subdomain': subdomain,
|
||||
'sami.deployed': new Date().toISOString()
|
||||
}
|
||||
};
|
||||
|
||||
// Configure ports
|
||||
if (dockerConfig.ports && dockerConfig.ports.length > 0) {
|
||||
for (const portMapping of dockerConfig.ports) {
|
||||
const parts = portMapping.split(/[:/]/);
|
||||
if (parts.length >= 2) {
|
||||
const [hostPort, containerPort, protocol = 'tcp'] = parts;
|
||||
const key = `${containerPort}/${protocol}`;
|
||||
containerConfig.ExposedPorts[key] = {};
|
||||
containerConfig.HostConfig.PortBindings[key] = [{ HostPort: hostPort }];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Pull image
|
||||
try {
|
||||
ctx.log.info('recipe', `Pulling image: ${dockerConfig.image}`);
|
||||
await ctx.docker.pull(dockerConfig.image);
|
||||
} catch (e) {
|
||||
ctx.log.warn('recipe', `Pull failed, checking local: ${dockerConfig.image}`);
|
||||
const images = await ctx.docker.client.listImages({
|
||||
filters: { reference: [dockerConfig.image] }
|
||||
});
|
||||
if (images.length === 0) throw new Error(`Image not found: ${dockerConfig.image}`);
|
||||
}
|
||||
|
||||
// Remove stale container
|
||||
try {
|
||||
const existing = ctx.docker.client.getContainer(containerName);
|
||||
await existing.inspect();
|
||||
await existing.remove({ force: true });
|
||||
await new Promise(r => setTimeout(r, 1000));
|
||||
} catch (e) {
|
||||
// Doesn't exist — normal
|
||||
}
|
||||
|
||||
// Create and start container
|
||||
const container = await ctx.docker.client.createContainer(containerConfig);
|
||||
await container.start();
|
||||
|
||||
// Connect to recipe network
|
||||
if (networkName) {
|
||||
try {
|
||||
const network = ctx.docker.client.getNetwork(networkName);
|
||||
await network.connect({ Container: container.id });
|
||||
ctx.log.info('recipe', `Connected ${component.id} to network ${networkName}`);
|
||||
} catch (e) {
|
||||
ctx.log.warn('recipe', `Failed to connect ${component.id} to network`, { error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
// Add Caddy config for non-internal components with ports
|
||||
let url = null;
|
||||
if (!component.internal && dockerConfig.ports?.length > 0) {
|
||||
const primaryPort = port || dockerConfig.ports[0].split(/[:/]/)[0];
|
||||
const caddyConfig = ctx.caddy.generateConfig(
|
||||
subdomain, hostIp, primaryPort,
|
||||
{ tailscaleOnly: sharedConfig.tailscaleOnly || false }
|
||||
);
|
||||
try {
|
||||
const helpers = require('../apps/helpers')(ctx);
|
||||
await helpers.addCaddyConfig(subdomain, caddyConfig);
|
||||
url = `https://${ctx.buildDomain(subdomain)}`;
|
||||
} catch (e) {
|
||||
ctx.log.warn('recipe', `Caddy config failed for ${component.id}`, { error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
id: component.id,
|
||||
role: component.role,
|
||||
name: templateName,
|
||||
subdomain,
|
||||
containerId: container.id,
|
||||
internal: component.internal || false,
|
||||
templateRef: component.templateRef,
|
||||
logo,
|
||||
url
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Run auto-connect steps after recipe deployment
|
||||
*/
|
||||
async function runAutoConnect(recipe, deployedComponents, config) {
|
||||
if (!recipe.autoConnect?.steps) return;
|
||||
|
||||
// Wait for services to be fully ready
|
||||
await new Promise(r => setTimeout(r, 10000));
|
||||
|
||||
for (const step of recipe.autoConnect.steps) {
|
||||
try {
|
||||
ctx.log.info('recipe', `Auto-connect step: ${step.action}`, { targets: step.targets });
|
||||
// These actions map to existing Smart Arr Connect functionality
|
||||
// The actual implementation will be wired when Smart Arr Connect helpers are available
|
||||
ctx.log.info('recipe', `Auto-connect step ${step.action} completed`);
|
||||
} catch (e) {
|
||||
ctx.log.warn('recipe', `Auto-connect step failed: ${step.action}`, { error: e.message });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return router;
|
||||
};
|
||||
54
dashcaddy-api/routes/recipes/index.js
Normal file
54
dashcaddy-api/routes/recipes/index.js
Normal file
@@ -0,0 +1,54 @@
|
||||
const express = require('express');
|
||||
const deployRoutes = require('./deploy');
|
||||
const manageRoutes = require('./manage');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
// All recipe routes require premium license
|
||||
router.use(ctx.licenseManager.requirePremium('recipes'));
|
||||
|
||||
// GET /api/recipes/templates — list all recipe templates
|
||||
router.get('/templates', ctx.asyncHandler(async (req, res) => {
|
||||
const { RECIPE_TEMPLATES, RECIPE_CATEGORIES } = require('../../recipe-templates');
|
||||
const templates = Object.entries(RECIPE_TEMPLATES).map(([id, recipe]) => ({
|
||||
id,
|
||||
name: recipe.name,
|
||||
description: recipe.description,
|
||||
icon: recipe.icon,
|
||||
category: recipe.category,
|
||||
type: 'recipe',
|
||||
difficulty: recipe.difficulty,
|
||||
popularity: recipe.popularity,
|
||||
componentCount: recipe.components.length,
|
||||
requiredCount: recipe.components.filter(c => c.required).length,
|
||||
optionalCount: recipe.components.filter(c => !c.required).length,
|
||||
components: recipe.components.map(c => ({
|
||||
id: c.id,
|
||||
role: c.role,
|
||||
required: c.required,
|
||||
internal: c.internal || false,
|
||||
templateRef: c.templateRef || null,
|
||||
note: c.note || null
|
||||
})),
|
||||
setupInstructions: recipe.setupInstructions
|
||||
}));
|
||||
|
||||
res.json({ success: true, templates, categories: RECIPE_CATEGORIES });
|
||||
}, 'recipe-templates'));
|
||||
|
||||
// GET /api/recipes/templates/:recipeId — get single recipe template detail
|
||||
router.get('/templates/:recipeId', ctx.asyncHandler(async (req, res) => {
|
||||
const { RECIPE_TEMPLATES } = require('../../recipe-templates');
|
||||
const recipe = RECIPE_TEMPLATES[req.params.recipeId];
|
||||
if (!recipe) return ctx.errorResponse(res, 404, 'Recipe template not found');
|
||||
|
||||
res.json({ success: true, recipe: { id: req.params.recipeId, ...recipe } });
|
||||
}, 'recipe-template-detail'));
|
||||
|
||||
// Mount deploy and manage sub-routes
|
||||
router.use(deployRoutes(ctx));
|
||||
router.use(manageRoutes(ctx));
|
||||
|
||||
return router;
|
||||
};
|
||||
321
dashcaddy-api/routes/recipes/manage.js
Normal file
321
dashcaddy-api/routes/recipes/manage.js
Normal file
@@ -0,0 +1,321 @@
|
||||
const express = require('express');
|
||||
const { DOCKER } = require('../../constants');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
/**
|
||||
* GET /api/recipes/deployed — list all deployed recipes (grouped by recipeId)
|
||||
*/
|
||||
router.get('/deployed', ctx.asyncHandler(async (req, res) => {
|
||||
const services = await ctx.servicesStateManager.read();
|
||||
const recipeGroups = {};
|
||||
|
||||
for (const service of services) {
|
||||
if (!service.recipeId) continue;
|
||||
if (!recipeGroups[service.recipeId]) {
|
||||
recipeGroups[service.recipeId] = {
|
||||
recipeId: service.recipeId,
|
||||
components: []
|
||||
};
|
||||
}
|
||||
recipeGroups[service.recipeId].components.push({
|
||||
id: service.id,
|
||||
name: service.name,
|
||||
logo: service.logo,
|
||||
containerId: service.containerId,
|
||||
recipeRole: service.recipeRole,
|
||||
deployedAt: service.deployedAt
|
||||
});
|
||||
}
|
||||
|
||||
// Also find internal containers (not in services.json) by Docker labels
|
||||
try {
|
||||
const containers = await ctx.docker.client.listContainers({ all: true });
|
||||
for (const containerInfo of containers) {
|
||||
const labels = containerInfo.Labels || {};
|
||||
if (labels['sami.managed'] !== 'true') continue;
|
||||
const recipeLabel = labels['sami.recipe'];
|
||||
if (!recipeLabel) continue;
|
||||
|
||||
// Map recipe label back to recipe ID
|
||||
const recipeId = findRecipeIdByLabel(recipeLabel);
|
||||
if (!recipeId) continue;
|
||||
|
||||
if (!recipeGroups[recipeId]) {
|
||||
recipeGroups[recipeId] = { recipeId, components: [] };
|
||||
}
|
||||
|
||||
// Check if this container is already listed (by containerId)
|
||||
const existing = recipeGroups[recipeId].components.find(
|
||||
c => c.containerId === containerInfo.Id
|
||||
);
|
||||
if (existing) continue;
|
||||
|
||||
recipeGroups[recipeId].components.push({
|
||||
id: labels['sami.recipe.component'] || containerInfo.Names[0]?.replace('/', ''),
|
||||
name: labels['sami.recipe.role'] || labels['sami.app'] || 'Unknown',
|
||||
containerId: containerInfo.Id,
|
||||
recipeRole: labels['sami.recipe.role'] || 'Unknown',
|
||||
internal: true,
|
||||
state: containerInfo.State,
|
||||
status: containerInfo.Status
|
||||
});
|
||||
}
|
||||
} catch (e) {
|
||||
ctx.log.warn('recipe', 'Could not list Docker containers for recipe discovery', { error: e.message });
|
||||
}
|
||||
|
||||
// Enrich with container state
|
||||
for (const group of Object.values(recipeGroups)) {
|
||||
for (const component of group.components) {
|
||||
if (component.containerId && !component.state) {
|
||||
try {
|
||||
const container = ctx.docker.client.getContainer(component.containerId);
|
||||
const info = await container.inspect();
|
||||
component.state = info.State.Status;
|
||||
component.status = info.State.Status === 'running'
|
||||
? `Up ${formatUptime(info.State.StartedAt)}`
|
||||
: info.State.Status;
|
||||
} catch (e) {
|
||||
component.state = 'removed';
|
||||
component.status = 'Container not found';
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res.json({ success: true, recipes: Object.values(recipeGroups) });
|
||||
}, 'recipe-deployed'));
|
||||
|
||||
/**
|
||||
* POST /api/recipes/:recipeId/start — start all containers in a recipe
|
||||
*/
|
||||
router.post('/:recipeId/start', ctx.asyncHandler(async (req, res) => {
|
||||
const { recipeId } = req.params;
|
||||
const containers = await findRecipeContainers(recipeId);
|
||||
|
||||
if (containers.length === 0) {
|
||||
return ctx.errorResponse(res, 404, 'No containers found for this recipe');
|
||||
}
|
||||
|
||||
const results = [];
|
||||
for (const containerInfo of containers) {
|
||||
try {
|
||||
const container = ctx.docker.client.getContainer(containerInfo.Id);
|
||||
const info = await container.inspect();
|
||||
if (info.State.Status !== 'running') {
|
||||
await container.start();
|
||||
results.push({ id: containerInfo.component, status: 'started' });
|
||||
} else {
|
||||
results.push({ id: containerInfo.component, status: 'already running' });
|
||||
}
|
||||
} catch (e) {
|
||||
results.push({ id: containerInfo.component, status: 'failed', error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
ctx.log.info('recipe', 'Recipe started', { recipeId, results });
|
||||
res.json({ success: true, recipeId, results });
|
||||
}, 'recipe-start'));
|
||||
|
||||
/**
|
||||
* POST /api/recipes/:recipeId/stop — stop all containers in a recipe
|
||||
*/
|
||||
router.post('/:recipeId/stop', ctx.asyncHandler(async (req, res) => {
|
||||
const { recipeId } = req.params;
|
||||
const containers = await findRecipeContainers(recipeId);
|
||||
|
||||
if (containers.length === 0) {
|
||||
return ctx.errorResponse(res, 404, 'No containers found for this recipe');
|
||||
}
|
||||
|
||||
const results = [];
|
||||
// Stop in reverse order (apps first, then infrastructure)
|
||||
for (const containerInfo of containers.reverse()) {
|
||||
try {
|
||||
const container = ctx.docker.client.getContainer(containerInfo.Id);
|
||||
const info = await container.inspect();
|
||||
if (info.State.Status === 'running') {
|
||||
await container.stop();
|
||||
results.push({ id: containerInfo.component, status: 'stopped' });
|
||||
} else {
|
||||
results.push({ id: containerInfo.component, status: 'already stopped' });
|
||||
}
|
||||
} catch (e) {
|
||||
results.push({ id: containerInfo.component, status: 'failed', error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
ctx.log.info('recipe', 'Recipe stopped', { recipeId, results });
|
||||
res.json({ success: true, recipeId, results });
|
||||
}, 'recipe-stop'));
|
||||
|
||||
/**
|
||||
* POST /api/recipes/:recipeId/restart — restart all containers in a recipe
|
||||
*/
|
||||
router.post('/:recipeId/restart', ctx.asyncHandler(async (req, res) => {
|
||||
const { recipeId } = req.params;
|
||||
const containers = await findRecipeContainers(recipeId);
|
||||
|
||||
if (containers.length === 0) {
|
||||
return ctx.errorResponse(res, 404, 'No containers found for this recipe');
|
||||
}
|
||||
|
||||
const results = [];
|
||||
for (const containerInfo of containers) {
|
||||
try {
|
||||
const container = ctx.docker.client.getContainer(containerInfo.Id);
|
||||
await container.restart();
|
||||
results.push({ id: containerInfo.component, status: 'restarted' });
|
||||
} catch (e) {
|
||||
results.push({ id: containerInfo.component, status: 'failed', error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
ctx.log.info('recipe', 'Recipe restarted', { recipeId, results });
|
||||
res.json({ success: true, recipeId, results });
|
||||
}, 'recipe-restart'));
|
||||
|
||||
/**
|
||||
* DELETE /api/recipes/:recipeId — remove entire recipe (containers, network, services)
|
||||
*/
|
||||
router.delete('/:recipeId', ctx.asyncHandler(async (req, res) => {
|
||||
const { recipeId } = req.params;
|
||||
const containers = await findRecipeContainers(recipeId);
|
||||
|
||||
if (containers.length === 0) {
|
||||
return ctx.errorResponse(res, 404, 'No containers found for this recipe');
|
||||
}
|
||||
|
||||
ctx.log.info('recipe', 'Removing recipe', { recipeId, containerCount: containers.length });
|
||||
|
||||
const results = [];
|
||||
const networkNames = new Set();
|
||||
|
||||
// Remove containers (reverse order: apps first, then infrastructure)
|
||||
for (const containerInfo of containers.reverse()) {
|
||||
try {
|
||||
const container = ctx.docker.client.getContainer(containerInfo.Id);
|
||||
const info = await container.inspect();
|
||||
|
||||
// Collect network names for cleanup
|
||||
for (const netName of Object.keys(info.NetworkSettings.Networks || {})) {
|
||||
if (netName.startsWith('dashcaddy-')) {
|
||||
networkNames.add(netName);
|
||||
}
|
||||
}
|
||||
|
||||
// Remove Caddy config for this subdomain
|
||||
const subdomain = info.Config?.Labels?.['sami.subdomain'];
|
||||
if (subdomain) {
|
||||
try {
|
||||
await removeCaddyBlock(subdomain);
|
||||
} catch (e) {
|
||||
ctx.log.warn('recipe', 'Failed to remove Caddy config', { subdomain, error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
// Force remove container
|
||||
await container.remove({ force: true });
|
||||
results.push({ id: containerInfo.component, status: 'removed' });
|
||||
} catch (e) {
|
||||
results.push({ id: containerInfo.component, status: 'failed', error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
// Remove recipe services from services.json
|
||||
await ctx.servicesStateManager.update(services => {
|
||||
return services.filter(s => s.recipeId !== recipeId);
|
||||
});
|
||||
|
||||
// Remove Docker networks
|
||||
for (const netName of networkNames) {
|
||||
try {
|
||||
const network = ctx.docker.client.getNetwork(netName);
|
||||
await network.remove();
|
||||
ctx.log.info('recipe', 'Removed Docker network', { netName });
|
||||
} catch (e) {
|
||||
ctx.log.warn('recipe', 'Failed to remove network', { netName, error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
ctx.notification.send('recipeRemoved', 'Recipe Removed',
|
||||
`Removed **${recipeId}** recipe (${results.filter(r => r.status === 'removed').length} containers).`,
|
||||
'info'
|
||||
);
|
||||
|
||||
ctx.log.info('recipe', 'Recipe removed', { recipeId, results });
|
||||
res.json({ success: true, recipeId, results });
|
||||
}, 'recipe-remove'));
|
||||
|
||||
// === Helper functions ===
|
||||
|
||||
/**
|
||||
* Find all Docker containers belonging to a recipe by label
|
||||
*/
|
||||
async function findRecipeContainers(recipeId) {
|
||||
const { RECIPE_TEMPLATES } = require('../../recipe-templates');
|
||||
const recipe = RECIPE_TEMPLATES[recipeId];
|
||||
const recipeLabel = recipe
|
||||
? recipe.name.toLowerCase().replace(/\s+/g, '-')
|
||||
: recipeId;
|
||||
|
||||
const containers = await ctx.docker.client.listContainers({ all: true });
|
||||
return containers
|
||||
.filter(c => {
|
||||
const labels = c.Labels || {};
|
||||
return labels['sami.managed'] === 'true' && labels['sami.recipe'] === recipeLabel;
|
||||
})
|
||||
.map(c => ({
|
||||
Id: c.Id,
|
||||
component: c.Labels['sami.recipe.component'] || c.Names[0]?.replace('/', ''),
|
||||
role: c.Labels['sami.recipe.role'] || 'Unknown',
|
||||
state: c.State
|
||||
}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Find recipe ID by its label (name slug)
|
||||
*/
|
||||
function findRecipeIdByLabel(label) {
|
||||
const { RECIPE_TEMPLATES } = require('../../recipe-templates');
|
||||
for (const [id, recipe] of Object.entries(RECIPE_TEMPLATES)) {
|
||||
if (recipe.name.toLowerCase().replace(/\s+/g, '-') === label) {
|
||||
return id;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a Caddy block for a subdomain from the Caddyfile
|
||||
*/
|
||||
async function removeCaddyBlock(subdomain) {
|
||||
const domain = ctx.buildDomain(subdomain);
|
||||
let content = await ctx.caddy.read();
|
||||
|
||||
// Find and remove the block for this domain
|
||||
const escapedDomain = domain.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
|
||||
const blockRegex = new RegExp(`\\n?${escapedDomain}\\s*\\{[^}]*(?:\\{[^}]*\\}[^}]*)*\\}`, 'g');
|
||||
const newContent = content.replace(blockRegex, '');
|
||||
|
||||
if (newContent !== content) {
|
||||
await ctx.caddy.write(newContent);
|
||||
await ctx.caddy.reload();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Format uptime from start time
|
||||
*/
|
||||
function formatUptime(startedAt) {
|
||||
const seconds = Math.floor((Date.now() - new Date(startedAt).getTime()) / 1000);
|
||||
if (seconds < 60) return `${seconds}s`;
|
||||
if (seconds < 3600) return `${Math.floor(seconds / 60)}m`;
|
||||
if (seconds < 86400) return `${Math.floor(seconds / 3600)}h`;
|
||||
return `${Math.floor(seconds / 86400)}d`;
|
||||
}
|
||||
|
||||
return router;
|
||||
};
|
||||
Reference in New Issue
Block a user