Initial commit: DashCaddy v1.0
Full codebase including API server (32 modules + routes), dashboard frontend, DashCA certificate distribution, installer script, and deployment skills.
This commit is contained in:
911
dashcaddy-api/update-manager.js
Normal file
911
dashcaddy-api/update-manager.js
Normal file
@@ -0,0 +1,911 @@
|
||||
/**
|
||||
* Update Management Module
|
||||
* Checks for Docker image updates, manages update scheduling,
|
||||
* and provides rollback capabilities
|
||||
*/
|
||||
|
||||
const Docker = require('dockerode');
|
||||
const EventEmitter = require('events');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const https = require('https');
|
||||
|
||||
const docker = new Docker();
|
||||
|
||||
const UPDATE_CONFIG_FILE = process.env.UPDATE_CONFIG_FILE || path.join(__dirname, 'update-config.json');
|
||||
const UPDATE_HISTORY_FILE = process.env.UPDATE_HISTORY_FILE || path.join(__dirname, 'update-history.json');
|
||||
const CHECK_INTERVAL = parseInt(process.env.UPDATE_CHECK_INTERVAL || '3600000', 10); // 1 hour
|
||||
|
||||
class UpdateManager extends EventEmitter {
|
||||
constructor() {
|
||||
super();
|
||||
this.config = this.loadConfig();
|
||||
this.history = this.loadHistory();
|
||||
this.availableUpdates = new Map();
|
||||
this.checking = false;
|
||||
this.checkInterval = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Start update checking
|
||||
*/
|
||||
start() {
|
||||
if (this.checking) return;
|
||||
|
||||
console.log('[UpdateManager] Starting update checks');
|
||||
this.checking = true;
|
||||
|
||||
// Initial check
|
||||
this.checkForUpdates();
|
||||
|
||||
// Schedule periodic checks
|
||||
this.checkInterval = setInterval(() => this.checkForUpdates(), CHECK_INTERVAL);
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop update checking
|
||||
*/
|
||||
stop() {
|
||||
if (!this.checking) return;
|
||||
|
||||
console.log('[UpdateManager] Stopping update checks');
|
||||
this.checking = false;
|
||||
|
||||
if (this.checkInterval) {
|
||||
clearInterval(this.checkInterval);
|
||||
this.checkInterval = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check for updates for all containers
|
||||
*/
|
||||
async checkForUpdates() {
|
||||
try {
|
||||
const containers = await docker.listContainers({ all: true });
|
||||
|
||||
for (const containerInfo of containers) {
|
||||
try {
|
||||
const container = docker.getContainer(containerInfo.Id);
|
||||
const inspect = await container.inspect();
|
||||
|
||||
const imageName = inspect.Config.Image;
|
||||
const currentDigest = inspect.Image;
|
||||
|
||||
// Check if update available
|
||||
const latestDigest = await this.getLatestImageDigest(imageName);
|
||||
|
||||
if (latestDigest && latestDigest !== currentDigest) {
|
||||
this.availableUpdates.set(containerInfo.Id, {
|
||||
containerId: containerInfo.Id,
|
||||
containerName: containerInfo.Names[0].replace(/^\//, ''),
|
||||
imageName,
|
||||
currentDigest: currentDigest.substring(0, 12),
|
||||
latestDigest: latestDigest.substring(0, 12),
|
||||
currentTag: this.extractTag(imageName),
|
||||
detectedAt: new Date().toISOString()
|
||||
});
|
||||
|
||||
this.emit('update-available', this.availableUpdates.get(containerInfo.Id));
|
||||
} else {
|
||||
this.availableUpdates.delete(containerInfo.Id);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`[UpdateManager] Error checking ${containerInfo.Names[0]}:`, error.message);
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`[UpdateManager] Found ${this.availableUpdates.size} updates available`);
|
||||
} catch (error) {
|
||||
console.error('[UpdateManager] Error checking for updates:', error.message);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get latest image digest from registry
|
||||
*/
|
||||
async getLatestImageDigest(imageName) {
|
||||
try {
|
||||
// Parse image name
|
||||
const [repository, tag] = imageName.split(':');
|
||||
const imageTag = tag || 'latest';
|
||||
|
||||
// For Docker Hub images
|
||||
if (!repository.includes('/') || repository.split('/').length === 2) {
|
||||
return await this.getDockerHubDigest(repository, imageTag);
|
||||
}
|
||||
|
||||
// For other registries (would need authentication)
|
||||
console.warn(`[UpdateManager] Custom registry not yet supported: ${repository}`);
|
||||
return null;
|
||||
} catch (error) {
|
||||
console.error(`[UpdateManager] Error getting digest for ${imageName}:`, error.message);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get image digest from Docker Hub
|
||||
*/
|
||||
async getDockerHubDigest(repository, tag) {
|
||||
return new Promise((resolve, reject) => {
|
||||
// Normalize repository name
|
||||
const repo = repository.includes('/') ? repository : `library/${repository}`;
|
||||
|
||||
const options = {
|
||||
hostname: 'registry-1.docker.io',
|
||||
path: `/v2/${repo}/manifests/${tag}`,
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Accept': 'application/vnd.docker.distribution.manifest.v2+json'
|
||||
}
|
||||
};
|
||||
|
||||
const req = https.request(options, (res) => {
|
||||
if (res.statusCode === 401) {
|
||||
// Need to authenticate
|
||||
const authHeader = res.headers['www-authenticate'];
|
||||
const authUrl = this.parseAuthHeader(authHeader);
|
||||
|
||||
if (authUrl) {
|
||||
this.authenticateAndGetDigest(authUrl, options).then(resolve).catch(reject);
|
||||
} else {
|
||||
reject(new Error('Authentication required but no auth URL found'));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
const digest = res.headers['docker-content-digest'];
|
||||
resolve(digest || null);
|
||||
});
|
||||
|
||||
req.on('error', reject);
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse authentication header
|
||||
*/
|
||||
parseAuthHeader(header) {
|
||||
if (!header) return null;
|
||||
|
||||
const match = header.match(/Bearer realm="([^"]+)"/);
|
||||
if (!match) return null;
|
||||
|
||||
const url = new URL(match[1]);
|
||||
const params = header.match(/service="([^"]+)"/);
|
||||
if (params) url.searchParams.set('service', params[1]);
|
||||
|
||||
const scope = header.match(/scope="([^"]+)"/);
|
||||
if (scope) url.searchParams.set('scope', scope[1]);
|
||||
|
||||
return url.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Authenticate and get digest
|
||||
*/
|
||||
async authenticateAndGetDigest(authUrl, originalOptions) {
|
||||
return new Promise((resolve, reject) => {
|
||||
https.get(authUrl, (res) => {
|
||||
let data = '';
|
||||
res.on('data', chunk => data += chunk);
|
||||
res.on('end', () => {
|
||||
try {
|
||||
const auth = JSON.parse(data);
|
||||
const token = auth.token || auth.access_token;
|
||||
|
||||
if (!token) {
|
||||
reject(new Error('No token in auth response'));
|
||||
return;
|
||||
}
|
||||
|
||||
// Retry original request with token
|
||||
const options = {
|
||||
...originalOptions,
|
||||
headers: {
|
||||
...originalOptions.headers,
|
||||
'Authorization': `Bearer ${token}`
|
||||
}
|
||||
};
|
||||
|
||||
const req = https.request(options, (res) => {
|
||||
const digest = res.headers['docker-content-digest'];
|
||||
resolve(digest || null);
|
||||
});
|
||||
|
||||
req.on('error', reject);
|
||||
req.end();
|
||||
} catch (error) {
|
||||
reject(error);
|
||||
}
|
||||
});
|
||||
}).on('error', reject);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract tag from image name
|
||||
*/
|
||||
extractTag(imageName) {
|
||||
const parts = imageName.split(':');
|
||||
return parts.length > 1 ? parts[parts.length - 1] : 'latest';
|
||||
}
|
||||
|
||||
/**
|
||||
* Update a container
|
||||
*/
|
||||
async updateContainer(containerId, options = {}) {
|
||||
const startTime = Date.now();
|
||||
|
||||
console.log(`[UpdateManager] Starting update for container ${containerId}`);
|
||||
this.emit('update-start', { containerId, timestamp: new Date().toISOString() });
|
||||
|
||||
try {
|
||||
const container = docker.getContainer(containerId);
|
||||
const inspect = await container.inspect();
|
||||
|
||||
const imageName = inspect.Config.Image;
|
||||
const containerName = inspect.Name.replace(/^\//, '');
|
||||
const oldImageId = inspect.Image;
|
||||
|
||||
// Get old image digest for rollback
|
||||
let oldImageDigest = null;
|
||||
try {
|
||||
const oldImage = docker.getImage(oldImageId);
|
||||
const oldImageInspect = await oldImage.inspect();
|
||||
oldImageDigest = oldImageInspect.RepoDigests?.[0] || oldImageId;
|
||||
console.log(`[UpdateManager] Stored old image digest: ${oldImageDigest.substring(0, 40)}...`);
|
||||
} catch (error) {
|
||||
console.warn(`[UpdateManager] Could not get old image digest: ${error.message}`);
|
||||
}
|
||||
|
||||
// Create backup of current state
|
||||
const backup = {
|
||||
containerId,
|
||||
containerName,
|
||||
imageName,
|
||||
imageId: oldImageId,
|
||||
imageDigest: oldImageDigest,
|
||||
config: inspect.Config,
|
||||
hostConfig: inspect.HostConfig,
|
||||
networkSettings: inspect.NetworkSettings,
|
||||
timestamp: new Date().toISOString()
|
||||
};
|
||||
|
||||
// Pull latest image
|
||||
console.log(`[UpdateManager] Pulling latest image: ${imageName}`);
|
||||
await this.pullImage(imageName);
|
||||
|
||||
// Stop container
|
||||
console.log(`[UpdateManager] Stopping container: ${containerName}`);
|
||||
await container.stop();
|
||||
|
||||
// Remove old container
|
||||
console.log(`[UpdateManager] Removing old container: ${containerName}`);
|
||||
await container.remove();
|
||||
|
||||
// Create new container with same configuration
|
||||
console.log(`[UpdateManager] Creating new container: ${containerName}`);
|
||||
const newContainer = await docker.createContainer({
|
||||
name: containerName,
|
||||
Image: imageName,
|
||||
...backup.config,
|
||||
HostConfig: backup.hostConfig
|
||||
});
|
||||
|
||||
// Start new container
|
||||
console.log(`[UpdateManager] Starting new container: ${containerName}`);
|
||||
await newContainer.start();
|
||||
|
||||
// Extended verification with health checks and port accessibility
|
||||
console.log(`[UpdateManager] Performing extended verification...`);
|
||||
await this.verifyContainerExtended(newContainer, inspect, options.verifyTimeout || 60000);
|
||||
|
||||
// Get new image ID
|
||||
const newInspect = await newContainer.inspect();
|
||||
const newImageId = newInspect.Image;
|
||||
|
||||
// Remove old image only after successful verification
|
||||
if (oldImageId !== newImageId) {
|
||||
try {
|
||||
console.log(`[UpdateManager] Removing old image: ${oldImageId.substring(0, 12)}`);
|
||||
const oldImage = docker.getImage(oldImageId);
|
||||
await oldImage.remove({ force: false });
|
||||
console.log(`[UpdateManager] Old image removed successfully`);
|
||||
} catch (error) {
|
||||
console.warn(`[UpdateManager] Could not remove old image (may be in use): ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
const duration = Date.now() - startTime;
|
||||
|
||||
const historyEntry = {
|
||||
containerId: newContainer.id,
|
||||
containerName,
|
||||
imageName,
|
||||
oldImageId: oldImageId.substring(0, 12),
|
||||
newImageId: newImageId.substring(0, 12),
|
||||
timestamp: new Date().toISOString(),
|
||||
duration,
|
||||
status: 'success',
|
||||
backup
|
||||
};
|
||||
|
||||
this.addToHistory(historyEntry);
|
||||
this.availableUpdates.delete(containerId);
|
||||
|
||||
this.emit('update-complete', historyEntry);
|
||||
console.log(`[UpdateManager] Update completed in ${duration}ms`);
|
||||
|
||||
return historyEntry;
|
||||
} catch (error) {
|
||||
const duration = Date.now() - startTime;
|
||||
|
||||
const historyEntry = {
|
||||
containerId,
|
||||
timestamp: new Date().toISOString(),
|
||||
duration,
|
||||
status: 'failed',
|
||||
error: error.message
|
||||
};
|
||||
|
||||
this.addToHistory(historyEntry);
|
||||
this.emit('update-failed', historyEntry);
|
||||
|
||||
// Attempt rollback
|
||||
if (options.autoRollback !== false) {
|
||||
console.log(`[UpdateManager] Attempting rollback for ${containerId}`);
|
||||
try {
|
||||
await this.rollbackUpdate(containerId);
|
||||
} catch (rollbackError) {
|
||||
console.error(`[UpdateManager] Rollback failed:`, rollbackError.message);
|
||||
}
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Pull Docker image
|
||||
*/
|
||||
async pullImage(imageName) {
|
||||
return new Promise((resolve, reject) => {
|
||||
docker.pull(imageName, (err, stream) => {
|
||||
if (err) {
|
||||
reject(err);
|
||||
return;
|
||||
}
|
||||
|
||||
docker.modem.followProgress(stream, (err, output) => {
|
||||
if (err) {
|
||||
reject(err);
|
||||
} else {
|
||||
resolve(output);
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify container is running and healthy
|
||||
*/
|
||||
async verifyContainer(container, timeout = 30000) {
|
||||
const startTime = Date.now();
|
||||
|
||||
while (Date.now() - startTime < timeout) {
|
||||
try {
|
||||
const inspect = await container.inspect();
|
||||
|
||||
if (inspect.State.Running) {
|
||||
// Check health if health check is configured
|
||||
if (inspect.State.Health) {
|
||||
if (inspect.State.Health.Status === 'healthy') {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
// No health check, just verify it's running
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Wait before checking again
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
} catch (error) {
|
||||
throw new Error(`Container verification failed: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error('Container verification timeout');
|
||||
}
|
||||
|
||||
/**
|
||||
* Extended container verification with health checks and port accessibility
|
||||
* @param {object} container - Docker container object
|
||||
* @param {object} oldInspect - Old container inspect data for port comparison
|
||||
* @param {number} timeout - Verification timeout in milliseconds (default: 60000)
|
||||
*/
|
||||
async verifyContainerExtended(container, oldInspect, timeout = 60000) {
|
||||
const startTime = Date.now();
|
||||
const maxAttempts = Math.floor(timeout / 2000); // Check every 2 seconds
|
||||
let lastError = null;
|
||||
|
||||
console.log(`[UpdateManager] Extended verification with ${maxAttempts} attempts over ${timeout/1000}s`);
|
||||
|
||||
for (let attempt = 0; attempt < maxAttempts; attempt++) {
|
||||
try {
|
||||
const inspect = await container.inspect();
|
||||
|
||||
// Step 1: Verify container is running
|
||||
if (!inspect.State.Running) {
|
||||
lastError = 'Container is not running';
|
||||
throw new Error(lastError);
|
||||
}
|
||||
|
||||
// Step 2: Check Docker health check if available
|
||||
if (inspect.State.Health) {
|
||||
if (inspect.State.Health.Status === 'healthy') {
|
||||
console.log(`[UpdateManager] Container health check: healthy`);
|
||||
return true;
|
||||
} else if (inspect.State.Health.Status === 'unhealthy') {
|
||||
lastError = 'Container health check failed (unhealthy)';
|
||||
throw new Error(lastError);
|
||||
}
|
||||
// Status is 'starting' - continue waiting
|
||||
console.log(`[UpdateManager] Health check status: ${inspect.State.Health.Status} (attempt ${attempt + 1}/${maxAttempts})`);
|
||||
} else {
|
||||
// Step 3: No Docker health check - verify HTTP port accessibility
|
||||
const ports = this.extractPorts(inspect);
|
||||
|
||||
if (ports.length > 0) {
|
||||
// Try to access the first HTTP port
|
||||
const primaryPort = ports[0];
|
||||
const testUrl = `http://localhost:${primaryPort.hostPort}`;
|
||||
|
||||
try {
|
||||
const response = await fetch(testUrl, {
|
||||
signal: AbortSignal.timeout(3000),
|
||||
redirect: 'manual'
|
||||
});
|
||||
|
||||
// Accept 2xx, 3xx, 4xx as "accessible" (server is responding)
|
||||
if (response.status >= 200 && response.status < 500) {
|
||||
console.log(`[UpdateManager] Port ${primaryPort.hostPort} is accessible (HTTP ${response.status})`);
|
||||
|
||||
// Wait a bit more to ensure stability
|
||||
if (attempt >= 2) {
|
||||
console.log(`[UpdateManager] Container verified successfully`);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} catch (fetchError) {
|
||||
lastError = `Port ${primaryPort.hostPort} not accessible: ${fetchError.message}`;
|
||||
console.log(`[UpdateManager] ${lastError} (attempt ${attempt + 1}/${maxAttempts})`);
|
||||
}
|
||||
} else {
|
||||
// No ports exposed - just verify it's running for a few cycles
|
||||
if (attempt >= 5) {
|
||||
console.log(`[UpdateManager] Container running without exposed ports (verified)`);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wait before next attempt
|
||||
if (attempt < maxAttempts - 1) {
|
||||
await new Promise(resolve => setTimeout(resolve, 2000));
|
||||
}
|
||||
} catch (error) {
|
||||
lastError = error.message;
|
||||
console.log(`[UpdateManager] Verification attempt ${attempt + 1} failed: ${lastError}`);
|
||||
|
||||
if (attempt < maxAttempts - 1) {
|
||||
await new Promise(resolve => setTimeout(resolve, 2000));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Verification failed
|
||||
const duration = Date.now() - startTime;
|
||||
throw new Error(`Extended verification failed after ${duration}ms: ${lastError || 'timeout'}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract port mappings from container inspect data
|
||||
* @param {object} inspect - Container inspect data
|
||||
* @returns {Array} Array of port mappings
|
||||
*/
|
||||
extractPorts(inspect) {
|
||||
const ports = [];
|
||||
|
||||
if (inspect.NetworkSettings && inspect.NetworkSettings.Ports) {
|
||||
for (const [containerPort, bindings] of Object.entries(inspect.NetworkSettings.Ports)) {
|
||||
if (bindings && bindings.length > 0) {
|
||||
for (const binding of bindings) {
|
||||
if (binding.HostPort) {
|
||||
ports.push({
|
||||
containerPort: containerPort.split('/')[0],
|
||||
hostPort: binding.HostPort,
|
||||
protocol: containerPort.split('/')[1] || 'tcp'
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ports;
|
||||
}
|
||||
|
||||
/**
|
||||
* Rollback to previous version
|
||||
*/
|
||||
async rollbackUpdate(containerId) {
|
||||
console.log(`[UpdateManager] Rolling back container ${containerId}`);
|
||||
|
||||
// Find last successful update in history
|
||||
const lastUpdate = this.history
|
||||
.filter(h => h.containerId === containerId && h.status === 'success' && h.backup)
|
||||
.pop();
|
||||
|
||||
if (!lastUpdate || !lastUpdate.backup) {
|
||||
throw new Error('No backup found for rollback');
|
||||
}
|
||||
|
||||
const backup = lastUpdate.backup;
|
||||
|
||||
try {
|
||||
// Stop and remove current container
|
||||
try {
|
||||
const container = docker.getContainer(containerId);
|
||||
await container.stop();
|
||||
await container.remove();
|
||||
} catch (error) {
|
||||
// Container might not exist, continue
|
||||
}
|
||||
|
||||
// Recreate container from backup
|
||||
const newContainer = await docker.createContainer({
|
||||
name: backup.containerName,
|
||||
Image: backup.imageName,
|
||||
...backup.config,
|
||||
HostConfig: backup.hostConfig
|
||||
});
|
||||
|
||||
await newContainer.start();
|
||||
|
||||
console.log(`[UpdateManager] Rollback completed for ${backup.containerName}`);
|
||||
this.emit('rollback-complete', { containerId, containerName: backup.containerName });
|
||||
|
||||
return true;
|
||||
} catch (error) {
|
||||
console.error(`[UpdateManager] Rollback failed:`, error.message);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Schedule update for maintenance window
|
||||
*/
|
||||
scheduleUpdate(containerId, scheduledTime) {
|
||||
const delay = new Date(scheduledTime).getTime() - Date.now();
|
||||
|
||||
if (delay < 0) {
|
||||
throw new Error('Scheduled time must be in the future');
|
||||
}
|
||||
|
||||
setTimeout(() => {
|
||||
this.updateContainer(containerId).catch(error => {
|
||||
console.error(`[UpdateManager] Scheduled update failed:`, error.message);
|
||||
});
|
||||
}, delay);
|
||||
|
||||
console.log(`[UpdateManager] Update scheduled for ${containerId} at ${scheduledTime}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get available updates
|
||||
*/
|
||||
getAvailableUpdates() {
|
||||
return Array.from(this.availableUpdates.values());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get update history
|
||||
*/
|
||||
getHistory(limit = 50) {
|
||||
return this.history.slice(-limit).reverse();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get changelog and release information from Docker Hub
|
||||
* @param {string} imageName - Docker image name (e.g., "nginx:latest" or "linuxserver/plex")
|
||||
* @returns {Object} Changelog information including tags, description, and URLs
|
||||
*/
|
||||
async getChangelog(imageName) {
|
||||
try {
|
||||
// Parse image name
|
||||
const [fullRepo, tag] = imageName.split(':');
|
||||
const imageTag = tag || 'latest';
|
||||
|
||||
// Normalize repository name for Docker Hub API
|
||||
let repo = fullRepo;
|
||||
let namespace = 'library';
|
||||
|
||||
if (fullRepo.includes('/')) {
|
||||
const parts = fullRepo.split('/');
|
||||
namespace = parts[0];
|
||||
repo = parts.slice(1).join('/');
|
||||
}
|
||||
|
||||
const repoPath = namespace === 'library' ? repo : `${namespace}/${repo}`;
|
||||
|
||||
// Fetch repository info from Docker Hub API
|
||||
const repoInfo = await this.fetchDockerHubRepo(repoPath, namespace === 'library');
|
||||
|
||||
// Fetch available tags
|
||||
const tags = await this.fetchDockerHubTags(repoPath, namespace === 'library');
|
||||
|
||||
// Build the Docker Hub URL
|
||||
const hubUrl = namespace === 'library'
|
||||
? `https://hub.docker.com/_/${repo}`
|
||||
: `https://hub.docker.com/r/${namespace}/${repo}`;
|
||||
|
||||
return {
|
||||
imageName,
|
||||
currentTag: imageTag,
|
||||
repository: {
|
||||
name: repoPath,
|
||||
description: repoInfo?.description || 'No description available',
|
||||
shortDescription: repoInfo?.description?.substring(0, 200) || '',
|
||||
starCount: repoInfo?.star_count || 0,
|
||||
pullCount: repoInfo?.pull_count || 0,
|
||||
lastUpdated: repoInfo?.last_updated || null
|
||||
},
|
||||
tags: tags.slice(0, 10).map(t => ({
|
||||
name: t.name,
|
||||
lastPushed: t.last_pushed || t.tag_last_pushed,
|
||||
digest: t.digest?.substring(0, 12) || 'unknown',
|
||||
size: t.full_size || t.size || 0
|
||||
})),
|
||||
urls: {
|
||||
dockerHub: hubUrl,
|
||||
tags: `${hubUrl}/tags`,
|
||||
dockerfile: repoInfo?.dockerfile_url || null
|
||||
},
|
||||
changelog: this.formatChangelog(repoInfo, tags, imageTag)
|
||||
};
|
||||
} catch (error) {
|
||||
console.error(`[UpdateManager] Error fetching changelog for ${imageName}:`, error.message);
|
||||
|
||||
// Return basic info even on error
|
||||
const [fullRepo] = imageName.split(':');
|
||||
const repoPath = fullRepo.includes('/') ? fullRepo : `library/${fullRepo}`;
|
||||
|
||||
return {
|
||||
imageName,
|
||||
error: error.message,
|
||||
urls: {
|
||||
dockerHub: `https://hub.docker.com/r/${repoPath.replace('library/', '_/')}`,
|
||||
},
|
||||
changelog: 'Unable to fetch changelog. Visit Docker Hub for details.'
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch repository info from Docker Hub
|
||||
*/
|
||||
async fetchDockerHubRepo(repoPath, isLibrary) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const apiPath = isLibrary
|
||||
? `/v2/repositories/library/${repoPath}`
|
||||
: `/v2/repositories/${repoPath}`;
|
||||
|
||||
const options = {
|
||||
hostname: 'hub.docker.com',
|
||||
path: apiPath,
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Accept': 'application/json',
|
||||
'User-Agent': 'DashCaddy/1.0'
|
||||
}
|
||||
};
|
||||
|
||||
const req = https.request(options, (res) => {
|
||||
let data = '';
|
||||
res.on('data', chunk => data += chunk);
|
||||
res.on('end', () => {
|
||||
try {
|
||||
if (res.statusCode === 200) {
|
||||
resolve(JSON.parse(data));
|
||||
} else {
|
||||
resolve(null);
|
||||
}
|
||||
} catch (e) {
|
||||
resolve(null);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
req.on('error', () => resolve(null));
|
||||
req.setTimeout(10000, () => {
|
||||
req.destroy();
|
||||
resolve(null);
|
||||
});
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch available tags from Docker Hub
|
||||
*/
|
||||
async fetchDockerHubTags(repoPath, isLibrary) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const apiPath = isLibrary
|
||||
? `/v2/repositories/library/${repoPath}/tags?page_size=20&ordering=last_updated`
|
||||
: `/v2/repositories/${repoPath}/tags?page_size=20&ordering=last_updated`;
|
||||
|
||||
const options = {
|
||||
hostname: 'hub.docker.com',
|
||||
path: apiPath,
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Accept': 'application/json',
|
||||
'User-Agent': 'DashCaddy/1.0'
|
||||
}
|
||||
};
|
||||
|
||||
const req = https.request(options, (res) => {
|
||||
let data = '';
|
||||
res.on('data', chunk => data += chunk);
|
||||
res.on('end', () => {
|
||||
try {
|
||||
if (res.statusCode === 200) {
|
||||
const parsed = JSON.parse(data);
|
||||
resolve(parsed.results || []);
|
||||
} else {
|
||||
resolve([]);
|
||||
}
|
||||
} catch (e) {
|
||||
resolve([]);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
req.on('error', () => resolve([]));
|
||||
req.setTimeout(10000, () => {
|
||||
req.destroy();
|
||||
resolve([]);
|
||||
});
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Format changelog from repo info and tags
|
||||
*/
|
||||
formatChangelog(repoInfo, tags, currentTag) {
|
||||
const lines = [];
|
||||
|
||||
if (repoInfo?.description) {
|
||||
lines.push(`**${repoInfo.description.split('\n')[0]}**`);
|
||||
lines.push('');
|
||||
}
|
||||
|
||||
// Find current and latest tags
|
||||
const latestTag = tags.find(t => t.name === 'latest');
|
||||
const currentTagInfo = tags.find(t => t.name === currentTag);
|
||||
|
||||
if (latestTag?.last_pushed || latestTag?.tag_last_pushed) {
|
||||
const lastUpdated = new Date(latestTag.last_pushed || latestTag.tag_last_pushed);
|
||||
lines.push(`Latest update: ${lastUpdated.toLocaleDateString()}`);
|
||||
}
|
||||
|
||||
if (tags.length > 0) {
|
||||
lines.push('');
|
||||
lines.push('Recent tags:');
|
||||
tags.slice(0, 5).forEach(t => {
|
||||
const date = t.last_pushed || t.tag_last_pushed;
|
||||
const dateStr = date ? new Date(date).toLocaleDateString() : 'unknown';
|
||||
lines.push(` - ${t.name} (${dateStr})`);
|
||||
});
|
||||
}
|
||||
|
||||
if (repoInfo?.pull_count) {
|
||||
lines.push('');
|
||||
lines.push(`Total pulls: ${repoInfo.pull_count.toLocaleString()}`);
|
||||
}
|
||||
|
||||
return lines.join('\n') || 'No changelog available';
|
||||
}
|
||||
|
||||
/**
|
||||
* Configure auto-update for a container
|
||||
*/
|
||||
configureAutoUpdate(containerId, config) {
|
||||
if (!this.config.autoUpdate) {
|
||||
this.config.autoUpdate = {};
|
||||
}
|
||||
|
||||
this.config.autoUpdate[containerId] = {
|
||||
enabled: config.enabled !== false,
|
||||
schedule: config.schedule || 'weekly',
|
||||
maintenanceWindow: config.maintenanceWindow,
|
||||
autoRollback: config.autoRollback !== false,
|
||||
securityOnly: config.securityOnly || false
|
||||
};
|
||||
|
||||
this.saveConfig();
|
||||
}
|
||||
|
||||
/**
|
||||
* Add entry to history
|
||||
*/
|
||||
addToHistory(entry) {
|
||||
this.history.push(entry);
|
||||
|
||||
// Keep only last 100 entries
|
||||
if (this.history.length > 100) {
|
||||
this.history = this.history.slice(-100);
|
||||
}
|
||||
|
||||
this.saveHistory();
|
||||
}
|
||||
|
||||
/**
|
||||
* Load configuration
|
||||
*/
|
||||
loadConfig() {
|
||||
try {
|
||||
if (fs.existsSync(UPDATE_CONFIG_FILE)) {
|
||||
return JSON.parse(fs.readFileSync(UPDATE_CONFIG_FILE, 'utf8'));
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('[UpdateManager] Error loading config:', error.message);
|
||||
}
|
||||
return { autoUpdate: {} };
|
||||
}
|
||||
|
||||
/**
|
||||
* Save configuration
|
||||
*/
|
||||
saveConfig() {
|
||||
try {
|
||||
fs.writeFileSync(UPDATE_CONFIG_FILE, JSON.stringify(this.config, null, 2));
|
||||
} catch (error) {
|
||||
console.error('[UpdateManager] Error saving config:', error.message);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Load history
|
||||
*/
|
||||
loadHistory() {
|
||||
try {
|
||||
if (fs.existsSync(UPDATE_HISTORY_FILE)) {
|
||||
return JSON.parse(fs.readFileSync(UPDATE_HISTORY_FILE, 'utf8'));
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('[UpdateManager] Error loading history:', error.message);
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Save history
|
||||
*/
|
||||
saveHistory() {
|
||||
try {
|
||||
fs.writeFileSync(UPDATE_HISTORY_FILE, JSON.stringify(this.history, null, 2));
|
||||
} catch (error) {
|
||||
console.error('[UpdateManager] Error saving history:', error.message);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Export singleton instance
|
||||
module.exports = new UpdateManager();
|
||||
Reference in New Issue
Block a user