256
src/lib/queues/builder.ts
Normal file
256
src/lib/queues/builder.ts
Normal file
@@ -0,0 +1,256 @@
|
||||
import crypto from 'crypto';
|
||||
import * as buildpacks from '../buildPacks';
|
||||
import * as importers from '../importers';
|
||||
import { dockerInstance } from '../docker';
|
||||
import { asyncExecShell, createDirectories, getDomain, getEngine, saveBuildLog } from '../common';
|
||||
import { configureProxyForApplication, reloadHaproxy } from '../haproxy';
|
||||
import * as db from '$lib/database';
|
||||
import { decrypt } from '$lib/crypto';
|
||||
import {
|
||||
copyBaseConfigurationFiles,
|
||||
makeLabelForStandaloneApplication,
|
||||
setDefaultConfiguration
|
||||
} from '$lib/buildPacks/common';
|
||||
import { letsEncrypt } from '$lib/letsencrypt';
|
||||
|
||||
export default async function (job) {
|
||||
/*
|
||||
Edge cases:
|
||||
1 - Change build pack and redeploy, what should happen?
|
||||
*/
|
||||
let {
|
||||
id: applicationId,
|
||||
repository,
|
||||
branch,
|
||||
buildPack,
|
||||
name,
|
||||
destinationDocker,
|
||||
destinationDockerId,
|
||||
gitSource,
|
||||
build_id: buildId,
|
||||
configHash,
|
||||
port,
|
||||
installCommand,
|
||||
buildCommand,
|
||||
startCommand,
|
||||
fqdn,
|
||||
baseDirectory,
|
||||
publishDirectory,
|
||||
projectId,
|
||||
secrets,
|
||||
type,
|
||||
pullmergeRequestId = null,
|
||||
sourceBranch = null,
|
||||
settings
|
||||
} = job.data;
|
||||
const { debug } = settings;
|
||||
|
||||
let imageId = applicationId;
|
||||
let domain = getDomain(fqdn);
|
||||
const isHttps = fqdn.startsWith('https://');
|
||||
|
||||
// Previews, we need to get the source branch and set subdomain
|
||||
if (pullmergeRequestId) {
|
||||
branch = sourceBranch;
|
||||
domain = `${pullmergeRequestId}.${domain}`;
|
||||
imageId = `${applicationId}-${pullmergeRequestId}`;
|
||||
}
|
||||
|
||||
let deployNeeded = true;
|
||||
let destinationType;
|
||||
|
||||
if (destinationDockerId) {
|
||||
destinationType = 'docker';
|
||||
}
|
||||
// Not implemented yet
|
||||
// if (destinationKubernetesId) {
|
||||
// destinationType = 'kubernetes'
|
||||
// }
|
||||
|
||||
if (destinationType === 'docker') {
|
||||
const docker = dockerInstance({ destinationDocker });
|
||||
const host = getEngine(destinationDocker.engine);
|
||||
|
||||
const build = await db.createBuild({
|
||||
id: buildId,
|
||||
applicationId,
|
||||
destinationDockerId: destinationDocker.id,
|
||||
gitSourceId: gitSource.id,
|
||||
githubAppId: gitSource.githubApp?.id,
|
||||
gitlabAppId: gitSource.gitlabApp?.id,
|
||||
type
|
||||
});
|
||||
|
||||
const { workdir, repodir } = await createDirectories({ repository, buildId: build.id });
|
||||
|
||||
const configuration = await setDefaultConfiguration(job.data);
|
||||
|
||||
buildPack = configuration.buildPack;
|
||||
port = configuration.port;
|
||||
installCommand = configuration.installCommand;
|
||||
startCommand = configuration.startCommand;
|
||||
buildCommand = configuration.buildCommand;
|
||||
publishDirectory = configuration.publishDirectory;
|
||||
|
||||
let commit = await importers[gitSource.type]({
|
||||
applicationId,
|
||||
debug,
|
||||
workdir,
|
||||
repodir,
|
||||
githubAppId: gitSource.githubApp?.id,
|
||||
gitlabAppId: gitSource.gitlabApp?.id,
|
||||
repository,
|
||||
branch,
|
||||
buildId: build.id,
|
||||
apiUrl: gitSource.apiUrl,
|
||||
projectId,
|
||||
deployKeyId: gitSource.gitlabApp?.deployKeyId || null,
|
||||
privateSshKey: decrypt(gitSource.gitlabApp?.privateSshKey) || null
|
||||
});
|
||||
let tag = commit.slice(0, 7);
|
||||
if (pullmergeRequestId) {
|
||||
tag = `${commit.slice(0, 7)}-${pullmergeRequestId}`;
|
||||
}
|
||||
|
||||
try {
|
||||
await db.prisma.build.update({ where: { id: build.id }, data: { commit } });
|
||||
} catch (err) {
|
||||
console.log(err);
|
||||
}
|
||||
|
||||
if (!pullmergeRequestId) {
|
||||
const currentHash = crypto
|
||||
.createHash('sha256')
|
||||
.update(
|
||||
JSON.stringify({
|
||||
buildPack,
|
||||
port,
|
||||
installCommand,
|
||||
buildCommand,
|
||||
startCommand,
|
||||
secrets,
|
||||
branch,
|
||||
repository,
|
||||
fqdn
|
||||
})
|
||||
)
|
||||
.digest('hex');
|
||||
|
||||
if (configHash !== currentHash) {
|
||||
await db.prisma.application.update({
|
||||
where: { id: applicationId },
|
||||
data: { configHash: currentHash }
|
||||
});
|
||||
deployNeeded = true;
|
||||
if (configHash) {
|
||||
saveBuildLog({ line: 'Configuration changed.', buildId, applicationId });
|
||||
}
|
||||
} else {
|
||||
deployNeeded = false;
|
||||
}
|
||||
} else {
|
||||
deployNeeded = true;
|
||||
}
|
||||
const image = await docker.engine.getImage(`${applicationId}:${tag}`);
|
||||
|
||||
let imageFound = false;
|
||||
try {
|
||||
await image.inspect();
|
||||
imageFound = false;
|
||||
} catch (error) {
|
||||
//
|
||||
}
|
||||
if (!imageFound || deployNeeded) {
|
||||
await copyBaseConfigurationFiles(buildPack, workdir, buildId, applicationId);
|
||||
if (buildpacks[buildPack])
|
||||
await buildpacks[buildPack]({
|
||||
buildId: build.id,
|
||||
applicationId,
|
||||
domain,
|
||||
name,
|
||||
type,
|
||||
pullmergeRequestId,
|
||||
buildPack,
|
||||
repository,
|
||||
branch,
|
||||
projectId,
|
||||
publishDirectory,
|
||||
debug,
|
||||
commit,
|
||||
tag,
|
||||
workdir,
|
||||
docker,
|
||||
port,
|
||||
installCommand,
|
||||
buildCommand,
|
||||
startCommand,
|
||||
baseDirectory,
|
||||
secrets
|
||||
});
|
||||
else {
|
||||
saveBuildLog({ line: `Build pack ${buildPack} not found`, buildId, applicationId });
|
||||
throw new Error(`Build pack ${buildPack} not found.`);
|
||||
}
|
||||
deployNeeded = true;
|
||||
} else {
|
||||
deployNeeded = false;
|
||||
saveBuildLog({ line: 'Nothing changed.', buildId, applicationId });
|
||||
}
|
||||
|
||||
// Deploy to Docker Engine
|
||||
try {
|
||||
await asyncExecShell(`DOCKER_HOST=${host} docker stop -t 0 ${imageId}`);
|
||||
await asyncExecShell(`DOCKER_HOST=${host} docker rm ${imageId}`);
|
||||
} catch (error) {
|
||||
//
|
||||
}
|
||||
const envs = [];
|
||||
if (secrets.length > 0) {
|
||||
secrets.forEach((secret) => {
|
||||
if (!secret.isBuildSecret) {
|
||||
envs.push(`--env ${secret.name}=${secret.value}`);
|
||||
}
|
||||
});
|
||||
}
|
||||
const labels = makeLabelForStandaloneApplication({
|
||||
applicationId,
|
||||
fqdn,
|
||||
name,
|
||||
type,
|
||||
pullmergeRequestId,
|
||||
buildPack,
|
||||
repository,
|
||||
branch,
|
||||
projectId,
|
||||
port,
|
||||
commit,
|
||||
installCommand,
|
||||
buildCommand,
|
||||
startCommand,
|
||||
baseDirectory,
|
||||
publishDirectory
|
||||
});
|
||||
saveBuildLog({ line: 'Deployment started.', buildId, applicationId });
|
||||
const { stderr } = await asyncExecShell(
|
||||
`DOCKER_HOST=${host} docker run ${envs.join()} ${labels.join(
|
||||
' '
|
||||
)} --name ${imageId} --network ${docker.network} --restart always -d ${applicationId}:${tag}`
|
||||
);
|
||||
if (stderr) console.log(stderr);
|
||||
saveBuildLog({ line: 'Deployment successful!', buildId, applicationId });
|
||||
|
||||
if (destinationDockerId && destinationDocker.isCoolifyProxyUsed) {
|
||||
saveBuildLog({ line: 'Proxy configuration started!', buildId, applicationId });
|
||||
await configureProxyForApplication({ domain, imageId, applicationId, port });
|
||||
if (isHttps) await letsEncrypt({ domain, id: applicationId });
|
||||
await reloadHaproxy(destinationDocker.engine);
|
||||
saveBuildLog({ line: 'Proxy configuration successful!', buildId, applicationId });
|
||||
} else {
|
||||
saveBuildLog({
|
||||
line: 'Coolify Proxy is not configured for this destination. Nothing else to do.',
|
||||
buildId,
|
||||
applicationId
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
24
src/lib/queues/cleanup.ts
Normal file
24
src/lib/queues/cleanup.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import { dev } from '$app/env';
|
||||
import { asyncExecShell, getEngine } from '$lib/common';
|
||||
import { prisma } from '$lib/database';
|
||||
|
||||
export default async function () {
|
||||
if (!dev) {
|
||||
const destinationDockers = await prisma.destinationDocker.findMany();
|
||||
for (const destinationDocker of destinationDockers) {
|
||||
const host = getEngine(destinationDocker.engine);
|
||||
try {
|
||||
await asyncExecShell(`DOCKER_HOST=${host} docker container prune -f`);
|
||||
} catch (error) {
|
||||
//
|
||||
console.log(error);
|
||||
}
|
||||
try {
|
||||
await asyncExecShell(`DOCKER_HOST=${host} docker image prune -f`);
|
||||
} catch (error) {
|
||||
//
|
||||
console.log(error);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
174
src/lib/queues/index.ts
Normal file
174
src/lib/queues/index.ts
Normal file
@@ -0,0 +1,174 @@
|
||||
import * as Bullmq from 'bullmq';
|
||||
import { default as ProdBullmq, Job, QueueEvents, QueueScheduler } from 'bullmq';
|
||||
import cuid from 'cuid';
|
||||
import { dev } from '$app/env';
|
||||
import { prisma } from '$lib/database';
|
||||
|
||||
import builder from './builder';
|
||||
import logger from './logger';
|
||||
import cleanup from './cleanup';
|
||||
import proxy from './proxy';
|
||||
import ssl from './ssl';
|
||||
import sslrenewal from './sslrenewal';
|
||||
|
||||
import { asyncExecShell, saveBuildLog } from '$lib/common';
|
||||
|
||||
let { Queue, Worker } = Bullmq;
|
||||
let redisHost = 'localhost';
|
||||
|
||||
if (!dev) {
|
||||
Queue = ProdBullmq.Queue;
|
||||
Worker = ProdBullmq.Worker;
|
||||
redisHost = 'coolify-redis';
|
||||
}
|
||||
|
||||
const connectionOptions = {
|
||||
connection: {
|
||||
host: redisHost
|
||||
}
|
||||
};
|
||||
|
||||
const cron = async () => {
|
||||
new QueueScheduler('proxy', connectionOptions);
|
||||
new QueueScheduler('cleanup', connectionOptions);
|
||||
new QueueScheduler('ssl', connectionOptions);
|
||||
new QueueScheduler('sslRenew', connectionOptions);
|
||||
|
||||
const queue = {
|
||||
proxy: new Queue('proxy', { ...connectionOptions }),
|
||||
cleanup: new Queue('cleanup', { ...connectionOptions }),
|
||||
ssl: new Queue('ssl', { ...connectionOptions }),
|
||||
sslRenew: new Queue('sslRenew', { ...connectionOptions })
|
||||
};
|
||||
await queue.proxy.drain();
|
||||
await queue.cleanup.drain();
|
||||
await queue.ssl.drain();
|
||||
await queue.sslRenew.drain();
|
||||
|
||||
new Worker(
|
||||
'proxy',
|
||||
async () => {
|
||||
await proxy();
|
||||
},
|
||||
{
|
||||
...connectionOptions
|
||||
}
|
||||
);
|
||||
|
||||
new Worker(
|
||||
'ssl',
|
||||
async () => {
|
||||
await ssl();
|
||||
},
|
||||
{
|
||||
...connectionOptions
|
||||
}
|
||||
);
|
||||
|
||||
new Worker(
|
||||
'cleanup',
|
||||
async () => {
|
||||
await cleanup();
|
||||
},
|
||||
{
|
||||
...connectionOptions
|
||||
}
|
||||
);
|
||||
|
||||
new Worker(
|
||||
'sslRenew',
|
||||
async () => {
|
||||
await sslrenewal();
|
||||
},
|
||||
{
|
||||
...connectionOptions
|
||||
}
|
||||
);
|
||||
|
||||
await queue.proxy.add('proxy', {}, { repeat: { every: 10000 } });
|
||||
// await queue.ssl.add('ssl', {}, { repeat: { every: 10000 } });
|
||||
await queue.cleanup.add('cleanup', {}, { repeat: { every: 3600000 } });
|
||||
await queue.sslRenew.add('sslRenew', {}, { repeat: { every: 1800000 } });
|
||||
|
||||
const events = {
|
||||
proxy: new QueueEvents('proxy', { ...connectionOptions }),
|
||||
ssl: new QueueEvents('ssl', { ...connectionOptions })
|
||||
};
|
||||
|
||||
events.proxy.on('completed', (data) => {
|
||||
// console.log(data)
|
||||
});
|
||||
events.ssl.on('completed', (data) => {
|
||||
// console.log(data)
|
||||
});
|
||||
};
|
||||
cron().catch((error) => {
|
||||
console.log('cron failed to start');
|
||||
console.log(error);
|
||||
});
|
||||
|
||||
const buildQueueName = dev ? cuid() : 'build_queue';
|
||||
const buildQueue = new Queue(buildQueueName, connectionOptions);
|
||||
const buildWorker = new Worker(buildQueueName, async (job) => await builder(job), {
|
||||
concurrency: 2,
|
||||
...connectionOptions
|
||||
});
|
||||
|
||||
buildWorker.on('completed', async (job: Bullmq.Job) => {
|
||||
try {
|
||||
await prisma.build.update({ where: { id: job.data.build_id }, data: { status: 'success' } });
|
||||
} catch (err) {
|
||||
console.log(err);
|
||||
} finally {
|
||||
await asyncExecShell(`rm -fr ${job.data.workdir}`);
|
||||
}
|
||||
return;
|
||||
});
|
||||
|
||||
buildWorker.on('failed', async (job: Bullmq.Job, failedReason) => {
|
||||
console.log(failedReason);
|
||||
try {
|
||||
await prisma.build.update({ where: { id: job.data.build_id }, data: { status: 'failed' } });
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
} finally {
|
||||
await asyncExecShell(`rm -fr ${job.data.workdir}`);
|
||||
}
|
||||
saveBuildLog({ line: 'Failed build!', buildId: job.data.build_id, applicationId: job.data.id });
|
||||
saveBuildLog({
|
||||
line: `Reason: ${failedReason.toString()}`,
|
||||
buildId: job.data.build_id,
|
||||
applicationId: job.data.id
|
||||
});
|
||||
});
|
||||
|
||||
// const letsEncryptQueueName = dev ? cuid() : 'letsencrypt_queue'
|
||||
// const letsEncryptQueue = new Queue(letsEncryptQueueName, connectionOptions)
|
||||
|
||||
// const letsEncryptWorker = new Worker(letsEncryptQueueName, async (job) => await letsencrypt(job), {
|
||||
// concurrency: 1,
|
||||
// ...connectionOptions
|
||||
// })
|
||||
// letsEncryptWorker.on('completed', async () => {
|
||||
// // TODO: Save letsencrypt logs as build logs!
|
||||
// console.log('[DEBUG] Lets Encrypt job completed')
|
||||
// })
|
||||
|
||||
// letsEncryptWorker.on('failed', async (job: Job, failedReason: string) => {
|
||||
// try {
|
||||
// await prisma.applicationSettings.updateMany({ where: { applicationId: job.data.id }, data: { forceSSL: false } })
|
||||
// } catch (error) {
|
||||
// console.log(error)
|
||||
// }
|
||||
// console.log('[DEBUG] Lets Encrypt job failed')
|
||||
// console.log(failedReason)
|
||||
// })
|
||||
|
||||
const buildLogQueueName = dev ? cuid() : 'log_queue';
|
||||
const buildLogQueue = new Queue(buildLogQueueName, connectionOptions);
|
||||
const buildLogWorker = new Worker(buildLogQueueName, async (job) => await logger(job), {
|
||||
concurrency: 1,
|
||||
...connectionOptions
|
||||
});
|
||||
|
||||
export { buildQueue, buildLogQueue };
|
||||
8
src/lib/queues/logger.ts
Normal file
8
src/lib/queues/logger.ts
Normal file
@@ -0,0 +1,8 @@
|
||||
import { prisma } from '$lib/database';
|
||||
import { dev } from '$app/env';
|
||||
|
||||
export default async function (job) {
|
||||
const { line, applicationId, buildId } = job.data;
|
||||
if (dev) console.debug(`[${applicationId}] ${line}`);
|
||||
await prisma.buildLog.create({ data: { line, buildId, time: Number(job.id), applicationId } });
|
||||
}
|
||||
64
src/lib/queues/proxy.ts
Normal file
64
src/lib/queues/proxy.ts
Normal file
@@ -0,0 +1,64 @@
|
||||
import { getDomain } from '$lib/common';
|
||||
import { prisma } from '$lib/database';
|
||||
import { dockerInstance } from '$lib/docker';
|
||||
import {
|
||||
checkContainer,
|
||||
configureCoolifyProxyOn,
|
||||
configureProxyForApplication,
|
||||
forceSSLOnApplication,
|
||||
reloadHaproxy,
|
||||
startCoolifyProxy
|
||||
} from '$lib/haproxy';
|
||||
import * as db from '$lib/database';
|
||||
|
||||
export default async function () {
|
||||
try {
|
||||
// Check destination containers and configure proxy if needed
|
||||
const destinationDockers = await prisma.destinationDocker.findMany({});
|
||||
for (const destination of destinationDockers) {
|
||||
if (destination.isCoolifyProxyUsed) {
|
||||
const docker = dockerInstance({ destinationDocker: destination });
|
||||
const containers = await docker.engine.listContainers();
|
||||
const configurations = containers.filter(
|
||||
(container) => container.Labels['coolify.managed']
|
||||
);
|
||||
for (const configuration of configurations) {
|
||||
const parsedConfiguration = JSON.parse(
|
||||
Buffer.from(configuration.Labels['coolify.configuration'], 'base64').toString()
|
||||
);
|
||||
if (configuration.Labels['coolify.type'] === 'standalone-application') {
|
||||
const { fqdn, applicationId, port, pullmergeRequestId } = parsedConfiguration;
|
||||
if (fqdn) {
|
||||
const domain = getDomain(fqdn);
|
||||
await configureProxyForApplication({
|
||||
domain,
|
||||
imageId: pullmergeRequestId
|
||||
? `${applicationId}-${pullmergeRequestId}`
|
||||
: applicationId,
|
||||
applicationId,
|
||||
port
|
||||
});
|
||||
const isHttps = fqdn.startsWith('https://');
|
||||
if (isHttps) await forceSSLOnApplication({ domain });
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Check Coolify FQDN and configure proxy if needed
|
||||
const { fqdn } = await db.listSettings();
|
||||
if (fqdn) {
|
||||
const domain = getDomain(fqdn);
|
||||
const found = await checkContainer('/var/run/docker.sock', 'coolify-haproxy');
|
||||
if (!found) await startCoolifyProxy('/var/run/docker.sock');
|
||||
await configureCoolifyProxyOn({ domain });
|
||||
const isHttps = fqdn.startsWith('https://');
|
||||
if (isHttps) await forceSSLOnApplication({ domain });
|
||||
}
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
throw error;
|
||||
} finally {
|
||||
// await reloadHaproxy('/var/run/docker.sock');
|
||||
}
|
||||
}
|
||||
69
src/lib/queues/ssl.ts
Normal file
69
src/lib/queues/ssl.ts
Normal file
@@ -0,0 +1,69 @@
|
||||
import { asyncExecShell, getDomain, getEngine } from '$lib/common';
|
||||
import { prisma } from '$lib/database';
|
||||
import { dockerInstance } from '$lib/docker';
|
||||
import { forceSSLOnApplication } from '$lib/haproxy';
|
||||
import * as db from '$lib/database';
|
||||
import { dev } from '$app/env';
|
||||
|
||||
export default async function () {
|
||||
try {
|
||||
const destinationDockers = await prisma.destinationDocker.findMany({});
|
||||
for (const destination of destinationDockers) {
|
||||
if (destination.isCoolifyProxyUsed) {
|
||||
const docker = dockerInstance({ destinationDocker: destination });
|
||||
const containers = await docker.engine.listContainers();
|
||||
const configurations = containers.filter(
|
||||
(container) => container.Labels['coolify.managed']
|
||||
);
|
||||
for (const configuration of configurations) {
|
||||
const parsedConfiguration = JSON.parse(
|
||||
Buffer.from(configuration.Labels['coolify.configuration'], 'base64').toString()
|
||||
);
|
||||
if (configuration.Labels['coolify.type'] === 'standalone-application') {
|
||||
const { fqdn } = parsedConfiguration;
|
||||
if (fqdn) {
|
||||
const domain = getDomain(fqdn);
|
||||
const isHttps = fqdn.startsWith('https://');
|
||||
if (isHttps) {
|
||||
if (dev) {
|
||||
console.log('DEV MODE: SSL is enabled');
|
||||
} else {
|
||||
const host = getEngine(destination.engine);
|
||||
await asyncExecShell(
|
||||
`DOCKER_HOST=${host} docker run --rm --name certbot -p 9080:9080 -v "coolify-letsencrypt:/etc/letsencrypt" certbot/certbot --logs-dir /etc/letsencrypt/logs certonly --standalone --preferred-challenges http --http-01-address 0.0.0.0 --http-01-port 9080 -d ${domain} --agree-tos --non-interactive --register-unsafely-without-email`
|
||||
);
|
||||
const { stderr } = await asyncExecShell(
|
||||
`DOCKER_HOST=${host} docker run --rm --name bash -v "coolify-letsencrypt:/etc/letsencrypt" -v "coolify-ssl-certs:/app/ssl" alpine:latest cat /etc/letsencrypt/live/${domain}/fullchain.pem /etc/letsencrypt/live/${domain}/privkey.pem > /app/ssl/${domain}.pem`
|
||||
);
|
||||
if (stderr) throw new Error(stderr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
const { fqdn } = await db.listSettings();
|
||||
if (fqdn) {
|
||||
const domain = getDomain(fqdn);
|
||||
const isHttps = fqdn.startsWith('https://');
|
||||
if (isHttps) {
|
||||
if (dev) {
|
||||
console.log('DEV MODE: SSL is enabled');
|
||||
} else {
|
||||
await asyncExecShell(
|
||||
`docker run --rm --name certbot -p 9080:9080 -v "coolify-letsencrypt:/etc/letsencrypt" certbot/certbot --logs-dir /etc/letsencrypt/logs certonly --standalone --preferred-challenges http --http-01-address 0.0.0.0 --http-01-port 9080 -d ${domain} --agree-tos --non-interactive --register-unsafely-without-email`
|
||||
);
|
||||
|
||||
const { stderr } = await asyncExecShell(
|
||||
`docker run --rm -v "coolify-letsencrypt:/etc/letsencrypt" -v "coolify-ssl-certs:/app/ssl" alpine:latest cat /etc/letsencrypt/live/${domain}/fullchain.pem /etc/letsencrypt/live/${domain}/privkey.pem > /app/ssl/${domain}.pem`
|
||||
);
|
||||
if (stderr) throw new Error(stderr);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
11
src/lib/queues/sslrenewal.ts
Normal file
11
src/lib/queues/sslrenewal.ts
Normal file
@@ -0,0 +1,11 @@
|
||||
import { asyncExecShell } from '$lib/common';
|
||||
|
||||
export default async function () {
|
||||
try {
|
||||
return await asyncExecShell(
|
||||
`docker run --rm --name certbot-renewal -v "coolify-letsencrypt:/etc/letsencrypt" certbot/certbot --logs-dir /etc/letsencrypt/logs renew`
|
||||
);
|
||||
} catch (error) {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user