Initial commit: DashCaddy v1.0
Full codebase including API server (32 modules + routes), dashboard frontend, DashCA certificate distribution, installer script, and deployment skills.
This commit is contained in:
373
dashcaddy-api/routes/recipes/deploy.js
Normal file
373
dashcaddy-api/routes/recipes/deploy.js
Normal file
@@ -0,0 +1,373 @@
|
||||
const express = require('express');
|
||||
const crypto = require('crypto');
|
||||
const { DOCKER } = require('../../constants');
|
||||
|
||||
module.exports = function(ctx) {
|
||||
const router = express.Router();
|
||||
|
||||
/**
|
||||
* Deploy a recipe — creates multiple containers as a coordinated stack
|
||||
*
|
||||
* POST /api/recipes/deploy
|
||||
* Body: { recipeId, config: { selectedComponents, sharedConfig, componentOverrides } }
|
||||
*/
|
||||
router.post('/deploy', ctx.asyncHandler(async (req, res) => {
|
||||
const { recipeId, config } = req.body;
|
||||
const { RECIPE_TEMPLATES } = require('../../recipe-templates');
|
||||
|
||||
const recipe = RECIPE_TEMPLATES[recipeId];
|
||||
if (!recipe) return ctx.errorResponse(res, 400, 'Invalid recipe template');
|
||||
|
||||
ctx.log.info('recipe', 'Starting recipe deployment', { recipeId, name: recipe.name });
|
||||
|
||||
// Determine which components to deploy
|
||||
const selectedIds = new Set(config.selectedComponents || recipe.components.filter(c => c.required).map(c => c.id));
|
||||
// Always include required components
|
||||
recipe.components.filter(c => c.required).forEach(c => selectedIds.add(c.id));
|
||||
|
||||
const componentsToDeploy = recipe.components
|
||||
.filter(c => selectedIds.has(c.id))
|
||||
.sort((a, b) => a.order - b.order);
|
||||
|
||||
// Generate shared passwords for the recipe (consistent across components)
|
||||
const generatedPasswords = {};
|
||||
const passwordKey = `recipe-${recipeId}-${Date.now()}`;
|
||||
generatedPasswords.default = crypto.randomBytes(24).toString('base64url');
|
||||
|
||||
// Create Docker network if defined
|
||||
let networkName = null;
|
||||
if (recipe.network) {
|
||||
networkName = recipe.network.name;
|
||||
try {
|
||||
await ctx.docker.client.createNetwork({
|
||||
Name: networkName,
|
||||
Driver: recipe.network.driver || 'bridge',
|
||||
Labels: { 'sami.managed': 'true', 'sami.recipe': recipeId }
|
||||
});
|
||||
ctx.log.info('recipe', 'Created Docker network', { networkName });
|
||||
} catch (e) {
|
||||
// Network might already exist
|
||||
if (!e.message.includes('already exists')) {
|
||||
throw new Error(`Failed to create network ${networkName}: ${e.message}`);
|
||||
}
|
||||
ctx.log.info('recipe', 'Docker network already exists', { networkName });
|
||||
}
|
||||
}
|
||||
|
||||
const deployedComponents = [];
|
||||
const errors = [];
|
||||
|
||||
try {
|
||||
for (const component of componentsToDeploy) {
|
||||
try {
|
||||
ctx.log.info('recipe', `Deploying component: ${component.id}`, {
|
||||
role: component.role,
|
||||
internal: component.internal || false
|
||||
});
|
||||
|
||||
const result = await deployComponent(component, recipe, config, generatedPasswords, networkName);
|
||||
deployedComponents.push(result);
|
||||
|
||||
ctx.log.info('recipe', `Component deployed: ${component.id}`, {
|
||||
containerId: result.containerId?.substring(0, 12)
|
||||
});
|
||||
} catch (componentError) {
|
||||
ctx.log.error('recipe', `Component failed: ${component.id}`, {
|
||||
error: componentError.message
|
||||
});
|
||||
errors.push({ componentId: component.id, role: component.role, error: componentError.message });
|
||||
// Continue deploying other components — partial success is better than total failure
|
||||
}
|
||||
}
|
||||
|
||||
if (deployedComponents.length === 0) {
|
||||
throw new Error('All components failed to deploy');
|
||||
}
|
||||
|
||||
// Register deployed components in services.json
|
||||
for (const deployed of deployedComponents) {
|
||||
if (!deployed.internal) {
|
||||
await ctx.addServiceToConfig({
|
||||
id: deployed.subdomain,
|
||||
name: deployed.name,
|
||||
logo: deployed.logo,
|
||||
containerId: deployed.containerId,
|
||||
appTemplate: deployed.templateRef || deployed.id,
|
||||
recipeId: recipeId,
|
||||
recipeRole: deployed.role,
|
||||
tailscaleOnly: config.sharedConfig?.tailscaleOnly || false,
|
||||
deployedAt: new Date().toISOString()
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Run auto-connect if available
|
||||
if (recipe.autoConnect?.enabled && errors.length === 0) {
|
||||
ctx.log.info('recipe', 'Running auto-connect for recipe', { recipeId });
|
||||
// Auto-connect will be handled asynchronously — don't block the response
|
||||
runAutoConnect(recipe, deployedComponents, config).catch(e => {
|
||||
ctx.log.warn('recipe', 'Auto-connect had errors', { recipeId, error: e.message });
|
||||
});
|
||||
}
|
||||
|
||||
const response = {
|
||||
success: true,
|
||||
recipeId,
|
||||
recipeName: recipe.name,
|
||||
deployed: deployedComponents.map(c => ({
|
||||
id: c.id,
|
||||
role: c.role,
|
||||
containerId: c.containerId?.substring(0, 12),
|
||||
url: c.url,
|
||||
internal: c.internal
|
||||
})),
|
||||
errors: errors.length > 0 ? errors : undefined,
|
||||
message: errors.length > 0
|
||||
? `${recipe.name} partially deployed (${deployedComponents.length}/${componentsToDeploy.length} components)`
|
||||
: `${recipe.name} deployed successfully!`,
|
||||
setupInstructions: recipe.setupInstructions
|
||||
};
|
||||
|
||||
ctx.notification.send('deploymentSuccess', 'Recipe Deployed',
|
||||
`**${recipe.name}** recipe deployed (${deployedComponents.length} components).`,
|
||||
'success'
|
||||
);
|
||||
|
||||
res.json(response);
|
||||
} catch (error) {
|
||||
ctx.log.error('recipe', 'Recipe deployment failed', { recipeId, error: error.message });
|
||||
|
||||
// Cleanup: remove partially deployed containers
|
||||
for (const deployed of deployedComponents) {
|
||||
try {
|
||||
if (deployed.containerId) {
|
||||
const container = ctx.docker.client.getContainer(deployed.containerId);
|
||||
await container.remove({ force: true });
|
||||
}
|
||||
} catch (cleanupError) {
|
||||
ctx.log.warn('recipe', 'Cleanup failed for component', {
|
||||
componentId: deployed.id, error: cleanupError.message
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup network
|
||||
if (networkName) {
|
||||
try {
|
||||
const network = ctx.docker.client.getNetwork(networkName);
|
||||
await network.remove();
|
||||
} catch (e) {
|
||||
ctx.log.warn('recipe', 'Network cleanup failed', { networkName, error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
ctx.notification.send('deploymentFailed', 'Recipe Failed',
|
||||
`Failed to deploy **${recipe.name}**: ${error.message}`, 'error'
|
||||
);
|
||||
|
||||
ctx.errorResponse(res, 500, error.message);
|
||||
}
|
||||
}, 'recipe-deploy'));
|
||||
|
||||
/**
|
||||
* Deploy a single component of a recipe
|
||||
*/
|
||||
async function deployComponent(component, recipe, config, passwords, networkName) {
|
||||
const sharedConfig = config.sharedConfig || {};
|
||||
const overrides = config.componentOverrides?.[component.id] || {};
|
||||
|
||||
// Resolve the Docker config — either from templateRef or inline
|
||||
let dockerConfig;
|
||||
let templateName;
|
||||
let logo;
|
||||
|
||||
if (component.templateRef) {
|
||||
const template = ctx.APP_TEMPLATES[component.templateRef];
|
||||
if (!template) throw new Error(`Template ${component.templateRef} not found`);
|
||||
dockerConfig = JSON.parse(JSON.stringify(template.docker)); // Deep clone
|
||||
templateName = template.name;
|
||||
logo = template.logo || `/assets/${component.templateRef}.png`;
|
||||
|
||||
// Apply envOverrides from recipe
|
||||
if (component.envOverrides) {
|
||||
dockerConfig.environment = { ...dockerConfig.environment, ...component.envOverrides };
|
||||
}
|
||||
} else {
|
||||
// Inline docker config
|
||||
dockerConfig = JSON.parse(JSON.stringify(component.docker));
|
||||
templateName = component.role;
|
||||
logo = `/assets/${component.id}.png`;
|
||||
}
|
||||
|
||||
// Replace template variables
|
||||
const subdomain = overrides.subdomain || component.subdomain || `${recipe.name.toLowerCase().replace(/\s+/g, '')}-${component.id}`;
|
||||
const port = overrides.port || component.defaultPort || null;
|
||||
const hostIp = sharedConfig.ip || 'host.docker.internal';
|
||||
|
||||
// Replace {{GENERATED_PASSWORD}} with consistent password
|
||||
const replaceVars = (obj) => {
|
||||
if (typeof obj === 'string') {
|
||||
return obj
|
||||
.replace(/\{\{GENERATED_PASSWORD\}\}/g, passwords.default)
|
||||
.replace(/\{\{PORT\}\}/g, String(port || ''))
|
||||
.replace(/\{\{HOST_IP\}\}/g, hostIp)
|
||||
.replace(/\{\{SUBDOMAIN\}\}/g, subdomain)
|
||||
.replace(/\{\{TIMEZONE\}\}/g, sharedConfig.timezone || 'UTC')
|
||||
.replace(/\{\{NEXTCLOUD_DOMAIN\}\}/g, `${subdomain}.${(ctx.siteConfig?.tld || '.home').replace(/^\./, '')}`);
|
||||
}
|
||||
if (Array.isArray(obj)) return obj.map(replaceVars);
|
||||
if (obj && typeof obj === 'object') {
|
||||
const result = {};
|
||||
for (const [k, v] of Object.entries(obj)) result[k] = replaceVars(v);
|
||||
return result;
|
||||
}
|
||||
return obj;
|
||||
};
|
||||
|
||||
dockerConfig = replaceVars(dockerConfig);
|
||||
|
||||
// Apply shared volume paths
|
||||
if (recipe.sharedVolumes && dockerConfig.volumes) {
|
||||
for (const [key, volConfig] of Object.entries(recipe.sharedVolumes)) {
|
||||
const userPath = sharedConfig.volumes?.[key] || volConfig.defaultPath;
|
||||
if (volConfig.usedBy?.includes(component.id)) {
|
||||
// Find and update matching volume mounts
|
||||
dockerConfig.volumes = dockerConfig.volumes.map(vol => {
|
||||
if (vol.includes(volConfig.defaultPath) || vol.includes(`{{${key.toUpperCase()}_PATH}}`)) {
|
||||
const [, containerPath] = vol.split(':');
|
||||
return `${userPath}:${containerPath}`;
|
||||
}
|
||||
return vol;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Skip container creation for internal-only services with no ports
|
||||
const containerName = `${DOCKER.CONTAINER_PREFIX}${subdomain}`;
|
||||
|
||||
// Build container config
|
||||
const containerConfig = {
|
||||
Image: dockerConfig.image,
|
||||
name: containerName,
|
||||
ExposedPorts: {},
|
||||
HostConfig: {
|
||||
PortBindings: {},
|
||||
Binds: dockerConfig.volumes || [],
|
||||
RestartPolicy: { Name: 'unless-stopped' }
|
||||
},
|
||||
Env: Object.entries(dockerConfig.environment || {}).map(([k, v]) => `${k}=${v}`),
|
||||
Labels: {
|
||||
'sami.managed': 'true',
|
||||
'sami.app': component.templateRef || component.id,
|
||||
'sami.recipe': recipe.name.toLowerCase().replace(/\s+/g, '-'),
|
||||
'sami.recipe.component': component.id,
|
||||
'sami.recipe.role': component.role,
|
||||
'sami.subdomain': subdomain,
|
||||
'sami.deployed': new Date().toISOString()
|
||||
}
|
||||
};
|
||||
|
||||
// Configure ports
|
||||
if (dockerConfig.ports && dockerConfig.ports.length > 0) {
|
||||
for (const portMapping of dockerConfig.ports) {
|
||||
const parts = portMapping.split(/[:/]/);
|
||||
if (parts.length >= 2) {
|
||||
const [hostPort, containerPort, protocol = 'tcp'] = parts;
|
||||
const key = `${containerPort}/${protocol}`;
|
||||
containerConfig.ExposedPorts[key] = {};
|
||||
containerConfig.HostConfig.PortBindings[key] = [{ HostPort: hostPort }];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Pull image
|
||||
try {
|
||||
ctx.log.info('recipe', `Pulling image: ${dockerConfig.image}`);
|
||||
await ctx.docker.pull(dockerConfig.image);
|
||||
} catch (e) {
|
||||
ctx.log.warn('recipe', `Pull failed, checking local: ${dockerConfig.image}`);
|
||||
const images = await ctx.docker.client.listImages({
|
||||
filters: { reference: [dockerConfig.image] }
|
||||
});
|
||||
if (images.length === 0) throw new Error(`Image not found: ${dockerConfig.image}`);
|
||||
}
|
||||
|
||||
// Remove stale container
|
||||
try {
|
||||
const existing = ctx.docker.client.getContainer(containerName);
|
||||
await existing.inspect();
|
||||
await existing.remove({ force: true });
|
||||
await new Promise(r => setTimeout(r, 1000));
|
||||
} catch (e) {
|
||||
// Doesn't exist — normal
|
||||
}
|
||||
|
||||
// Create and start container
|
||||
const container = await ctx.docker.client.createContainer(containerConfig);
|
||||
await container.start();
|
||||
|
||||
// Connect to recipe network
|
||||
if (networkName) {
|
||||
try {
|
||||
const network = ctx.docker.client.getNetwork(networkName);
|
||||
await network.connect({ Container: container.id });
|
||||
ctx.log.info('recipe', `Connected ${component.id} to network ${networkName}`);
|
||||
} catch (e) {
|
||||
ctx.log.warn('recipe', `Failed to connect ${component.id} to network`, { error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
// Add Caddy config for non-internal components with ports
|
||||
let url = null;
|
||||
if (!component.internal && dockerConfig.ports?.length > 0) {
|
||||
const primaryPort = port || dockerConfig.ports[0].split(/[:/]/)[0];
|
||||
const caddyConfig = ctx.caddy.generateConfig(
|
||||
subdomain, hostIp, primaryPort,
|
||||
{ tailscaleOnly: sharedConfig.tailscaleOnly || false }
|
||||
);
|
||||
try {
|
||||
const helpers = require('../apps/helpers')(ctx);
|
||||
await helpers.addCaddyConfig(subdomain, caddyConfig);
|
||||
url = `https://${ctx.buildDomain(subdomain)}`;
|
||||
} catch (e) {
|
||||
ctx.log.warn('recipe', `Caddy config failed for ${component.id}`, { error: e.message });
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
id: component.id,
|
||||
role: component.role,
|
||||
name: templateName,
|
||||
subdomain,
|
||||
containerId: container.id,
|
||||
internal: component.internal || false,
|
||||
templateRef: component.templateRef,
|
||||
logo,
|
||||
url
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Run auto-connect steps after recipe deployment
|
||||
*/
|
||||
async function runAutoConnect(recipe, deployedComponents, config) {
|
||||
if (!recipe.autoConnect?.steps) return;
|
||||
|
||||
// Wait for services to be fully ready
|
||||
await new Promise(r => setTimeout(r, 10000));
|
||||
|
||||
for (const step of recipe.autoConnect.steps) {
|
||||
try {
|
||||
ctx.log.info('recipe', `Auto-connect step: ${step.action}`, { targets: step.targets });
|
||||
// These actions map to existing Smart Arr Connect functionality
|
||||
// The actual implementation will be wired when Smart Arr Connect helpers are available
|
||||
ctx.log.info('recipe', `Auto-connect step ${step.action} completed`);
|
||||
} catch (e) {
|
||||
ctx.log.warn('recipe', `Auto-connect step failed: ${step.action}`, { error: e.message });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return router;
|
||||
};
|
||||
Reference in New Issue
Block a user