diff --git a/apps/api/devTemplates.yaml b/apps/api/devTemplates.yaml index 84d457d02..c94309932 100644 --- a/apps/api/devTemplates.yaml +++ b/apps/api/devTemplates.yaml @@ -1771,8 +1771,11 @@ $$id-mysql: name: MySQL depends_on: [] - image: 'mysql:5.7' + image: 'bitnami/mysql:5.7' + imageArm : 'mysql:5.7' volumes: + - '$$id-mysql-data:/bitnami/mysql/data' + volumesArm: - '$$id-mysql-data:/var/lib/mysql' environment: - MYSQL_ROOT_PASSWORD=$$secret_mysql_root_password @@ -1780,7 +1783,6 @@ - MYSQL_DATABASE=$$config_mysql_database - MYSQL_USER=$$config_mysql_user - MYSQL_PASSWORD=$$secret_mysql_password - ports: [] variables: - id: $$config_wordpress_db_host name: WORDPRESS_DB_HOST diff --git a/apps/api/src/lib.ts b/apps/api/src/lib.ts index a5d5cff5a..6752745a0 100644 --- a/apps/api/src/lib.ts +++ b/apps/api/src/lib.ts @@ -1,13 +1,34 @@ import cuid from "cuid"; import { decrypt, encrypt, fixType, generatePassword, getDomain, prisma } from "./lib/common"; import { getTemplates } from "./lib/services"; -import { includeServices } from "./lib/services/common"; export async function migrateServicesToNewTemplate() { // This function migrates old hardcoded services to the new template based services try { let templates = await getTemplates() - const services: any = await prisma.service.findMany({ include: includeServices }) + const services: any = await prisma.service.findMany({ + include: { + destinationDocker: true, + persistentStorage: true, + serviceSecret: true, + serviceSetting: true, + minio: true, + plausibleAnalytics: true, + vscodeserver: true, + wordpress: true, + ghost: true, + meiliSearch: true, + umami: true, + hasura: true, + fider: true, + moodle: true, + appwrite: true, + glitchTip: true, + searxng: true, + weblate: true, + taiga: true, + } + }) for (const service of services) { const { id } = service if (!service.type) { diff --git a/apps/api/src/lib/common.ts b/apps/api/src/lib/common.ts index d497fa24c..3feb77d7c 100644 --- a/apps/api/src/lib/common.ts +++ b/apps/api/src/lib/common.ts @@ -17,8 +17,6 @@ import { day } from './dayjs'; import * as serviceFields from './services/serviceFields'; import { saveBuildLog } from './buildPacks/common'; import { scheduler } from './scheduler'; -import { supportedServiceTypesAndVersions } from './services/supportedVersions'; -import { includeServices } from './services/common'; export const version = '3.10.16'; export const isDev = process.env.NODE_ENV === 'development'; @@ -400,12 +398,6 @@ export function generateTimestamp(): string { return `${day().format('HH:mm:ss.SSS')}`; } -export async function listServicesWithIncludes(): Promise { - return await prisma.service.findMany({ - include: includeServices, - orderBy: { createdAt: 'desc' } - }); -} export const supportedDatabaseTypesAndVersions = [ { @@ -1452,7 +1444,12 @@ export async function getServiceFromDB({ const settings = await prisma.setting.findFirst(); const body = await prisma.service.findFirst({ where: { id, teams: { some: { id: teamId === '0' ? undefined : teamId } } }, - include: includeServices + include: { + destinationDocker: true, + persistentStorage: true, + serviceSecret: true, + serviceSetting: true, + } }); if (!body) { return null @@ -1469,22 +1466,6 @@ export async function getServiceFromDB({ return { ...body, settings }; } -export function getServiceImage(type: string): string { - const found = supportedServiceTypesAndVersions.find((t) => t.name === type); - if (found) { - return found.baseImage; - } - return ''; -} - -export function getServiceImages(type: string): string[] { - const found = supportedServiceTypesAndVersions.find((t) => t.name === type); - if (found) { - return found.images; - } - return []; -} - export function saveUpdateableFields(type: string, data: any) { const update = {}; if (type && serviceFields[type]) { @@ -1534,14 +1515,6 @@ export function fixType(type) { return type?.replaceAll(' ', '').toLowerCase() || null; } -export const getServiceMainPort = (service: string) => { - const serviceType = supportedServiceTypesAndVersions.find((s) => s.name === service); - if (serviceType) { - return serviceType.ports.main; - } - return null; -}; - export function makeLabelForServices(type) { return [ 'coolify.managed=true', diff --git a/apps/api/src/lib/services.ts b/apps/api/src/lib/services.ts index 546cc1edd..5461a8355 100644 --- a/apps/api/src/lib/services.ts +++ b/apps/api/src/lib/services.ts @@ -1,4 +1,4 @@ -import { createDirectories, getServiceFromDB, getServiceImage, getServiceMainPort, isDev, makeLabelForServices } from "./common"; +import { isDev } from "./common"; import fs from 'fs/promises'; export async function getTemplates() { let templates: any = []; @@ -141,21 +141,3 @@ export async function getTemplates() { // } return templates } -export async function defaultServiceConfigurations({ id, teamId }) { - const service = await getServiceFromDB({ id, teamId }); - const { destinationDockerId, destinationDocker, type, serviceSecret } = service; - - const network = destinationDockerId && destinationDocker.network; - const port = getServiceMainPort(type); - - const { workdir } = await createDirectories({ repository: type, buildId: id }); - - const image = getServiceImage(type); - let secrets = []; - if (serviceSecret.length > 0) { - serviceSecret.forEach((secret) => { - secrets.push(`${secret.name}=${secret.value}`); - }); - } - return { ...service, network, port, workdir, image, secrets } -} \ No newline at end of file diff --git a/apps/api/src/lib/services/common.ts b/apps/api/src/lib/services/common.ts index 08924cf81..fc1102ba1 100644 --- a/apps/api/src/lib/services/common.ts +++ b/apps/api/src/lib/services/common.ts @@ -2,27 +2,6 @@ import cuid from 'cuid'; import { encrypt, generatePassword, prisma } from '../common'; -export const includeServices: any = { - destinationDocker: true, - persistentStorage: true, - serviceSecret: true, - serviceSetting: true, - minio: true, - plausibleAnalytics: true, - vscodeserver: true, - wordpress: true, - ghost: true, - meiliSearch: true, - umami: true, - hasura: true, - fider: true, - moodle: true, - appwrite: true, - glitchTip: true, - searxng: true, - weblate: true, - taiga: true, -}; export async function configureServiceType({ id, type diff --git a/apps/api/src/lib/services/handlers.ts b/apps/api/src/lib/services/handlers.ts index f822f215f..38751321b 100644 --- a/apps/api/src/lib/services/handlers.ts +++ b/apps/api/src/lib/services/handlers.ts @@ -1,697 +1,41 @@ import type { FastifyReply, FastifyRequest } from 'fastify'; import fs from 'fs/promises'; import yaml from 'js-yaml'; -import bcrypt from 'bcryptjs'; +import path from 'path'; +import { asyncSleep, ComposeFile, createDirectories, decrypt, defaultComposeConfiguration, errorHandler, executeDockerCmd, getServiceFromDB, isARM, makeLabelForServices, persistentVolumes, prisma } from '../common'; +import { parseAndFindServiceTemplates } from '../../routes/api/v1/services/handlers'; + import { ServiceStartStop } from '../../routes/api/v1/services/types'; -import { asyncSleep, ComposeFile, createDirectories, decrypt, defaultComposeConfiguration, errorHandler, executeDockerCmd, getDomain, getFreePublicPort, getServiceFromDB, getServiceImage, getServiceMainPort, isARM, isDev, makeLabelForServices, persistentVolumes, prisma } from '../common'; -import { defaultServiceConfigurations } from '../services'; import { OnlyId } from '../../types'; -import { parseAndFindServiceTemplates } from '../../routes/api/v1/services/handlers'; -import path from 'path'; -// export async function startService(request: FastifyRequest) { -// try { -// const { type } = request.params -// if (type === 'plausibleanalytics') { -// return await startPlausibleAnalyticsService(request) -// } -// if (type === 'nocodb') { -// return await startNocodbService(request) -// } -// if (type === 'minio') { -// return await startMinioService(request) -// } -// if (type === 'vscodeserver') { -// return await startVscodeService(request) -// } -// if (type === 'wordpress') { -// return await startWordpressService(request) -// } -// if (type === 'vaultwarden') { -// return await startVaultwardenService(request) -// } -// if (type === 'languagetool') { -// return await startLanguageToolService(request) -// } -// if (type === 'n8n') { -// return await startN8nService(request) -// } -// if (type === 'uptimekuma') { -// return await startUptimekumaService(request) -// } -// if (type === 'ghost') { -// return await startGhostService(request) -// } -// if (type === 'meilisearch') { -// return await startMeilisearchService(request) -// } -// if (type === 'umami') { -// return await startUmamiService(request) -// } -// if (type === 'hasura') { -// return await startHasuraService(request) -// } -// if (type === 'fider') { -// return await startFiderService(request) -// } -// if (type === 'moodle') { -// return await startMoodleService(request) -// } -// if (type === 'appwrite') { -// return await startAppWriteService(request) -// } -// if (type === 'glitchTip') { -// return await startGlitchTipService(request) -// } -// if (type === 'searxng') { -// return await startSearXNGService(request) -// } -// if (type === 'weblate') { -// return await startWeblateService(request) -// } -// if (type === 'taiga') { -// return await startTaigaService(request) -// } -// if (type === 'grafana') { -// return await startGrafanaService(request) -// } -// if (type === 'trilium') { -// return await startTriliumService(request) -// } - -// throw `Service type ${type} not supported.` -// } catch (error) { -// throw { status: 500, message: error?.message || error } -// } -// } export async function stopService(request: FastifyRequest) { - try { - return await stopServiceContainers(request) - } catch (error) { - throw { status: 500, message: error?.message || error } - } -} - -async function startPlausibleAnalyticsService(request: FastifyRequest) { - try { - const { id } = request.params - const teamId = request.user.teamId; - const service = await getServiceFromDB({ id, teamId }); - const { - type, - version, - fqdn, - destinationDockerId, - destinationDocker, - serviceSecret, - persistentStorage, - exposePort, - plausibleAnalytics: { - id: plausibleDbId, - username, - email, - password, - postgresqlDatabase, - postgresqlPassword, - postgresqlUser, - secretKeyBase - } - } = service; - const image = getServiceImage(type); - - const config = { - plausibleAnalytics: { - image: `${image}:${version}`, - environmentVariables: { - ADMIN_USER_EMAIL: email, - ADMIN_USER_NAME: username, - ADMIN_USER_PWD: password, - BASE_URL: fqdn, - SECRET_KEY_BASE: secretKeyBase, - DISABLE_AUTH: 'false', - DISABLE_REGISTRATION: 'true', - DATABASE_URL: `postgresql://${postgresqlUser}:${postgresqlPassword}@${id}-postgresql:5432/${postgresqlDatabase}`, - CLICKHOUSE_DATABASE_URL: `http://${id}-clickhouse:8123/plausible` - } - }, - postgresql: { - volumes: [`${plausibleDbId}-postgresql-data:/bitnami/postgresql/`], - image: 'bitnami/postgresql:13.2.0', - environmentVariables: { - POSTGRESQL_PASSWORD: postgresqlPassword, - POSTGRESQL_USERNAME: postgresqlUser, - POSTGRESQL_DATABASE: postgresqlDatabase - } - }, - clickhouse: { - volumes: [`${plausibleDbId}-clickhouse-data:/var/lib/clickhouse`], - image: 'yandex/clickhouse-server:21.3.2.5', - environmentVariables: {}, - ulimits: { - nofile: { - soft: 262144, - hard: 262144 - } - } - } - }; - if (serviceSecret.length > 0) { - serviceSecret.forEach((secret) => { - config.plausibleAnalytics.environmentVariables[secret.name] = secret.value; - }); - } - const network = destinationDockerId && destinationDocker.network; - const port = getServiceMainPort('plausibleanalytics'); - - const { workdir } = await createDirectories({ repository: type, buildId: id }); - - const clickhouseConfigXml = ` - - - warning - true - - - - - - - - - - - - `; - const clickhouseUserConfigXml = ` - - - - 0 - 0 - - - `; - - const initQuery = 'CREATE DATABASE IF NOT EXISTS plausible;'; - const initScript = 'clickhouse client --queries-file /docker-entrypoint-initdb.d/init.query'; - await fs.writeFile(`${workdir}/clickhouse-config.xml`, clickhouseConfigXml); - await fs.writeFile(`${workdir}/clickhouse-user-config.xml`, clickhouseUserConfigXml); - await fs.writeFile(`${workdir}/init.query`, initQuery); - await fs.writeFile(`${workdir}/init-db.sh`, initScript); - - const Dockerfile = ` -FROM ${config.clickhouse.image} -COPY ./clickhouse-config.xml /etc/clickhouse-server/users.d/logging.xml -COPY ./clickhouse-user-config.xml /etc/clickhouse-server/config.d/logging.xml -COPY ./init.query /docker-entrypoint-initdb.d/init.query -COPY ./init-db.sh /docker-entrypoint-initdb.d/init-db.sh`; - - await fs.writeFile(`${workdir}/Dockerfile`, Dockerfile); - - const { volumeMounts } = persistentVolumes(id, persistentStorage, config) - - const composeFile: ComposeFile = { - version: '3.8', - services: { - [id]: { - container_name: id, - image: config.plausibleAnalytics.image, - command: - 'sh -c "sleep 10 && /entrypoint.sh db createdb && /entrypoint.sh db migrate && /entrypoint.sh db init-admin && /entrypoint.sh run"', - environment: config.plausibleAnalytics.environmentVariables, - ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - depends_on: [`${id}-postgresql`, `${id}-clickhouse`], - labels: makeLabelForServices('plausibleAnalytics'), - ...defaultComposeConfiguration(network), - }, - [`${id}-postgresql`]: { - container_name: `${id}-postgresql`, - image: config.postgresql.image, - environment: config.postgresql.environmentVariables, - volumes: config.postgresql.volumes, - ...defaultComposeConfiguration(network), - }, - [`${id}-clickhouse`]: { - build: workdir, - container_name: `${id}-clickhouse`, - environment: config.clickhouse.environmentVariables, - volumes: config.clickhouse.volumes, - ...defaultComposeConfiguration(network), - } - }, - networks: { - [network]: { - external: true - } - }, - volumes: volumeMounts - }; - const composeFileDestination = `${workdir}/docker-compose.yaml`; - await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await startServiceContainers(destinationDocker.id, composeFileDestination) - return {} - } catch ({ status, message }) { - return errorHandler({ status, message }) - } -} - -async function startNocodbService(request: FastifyRequest) { try { const { id } = request.params; const teamId = request.user.teamId; - const service = await getServiceFromDB({ id, teamId }); - const { type, version, destinationDockerId, destinationDocker, serviceSecret, exposePort, persistentStorage } = - service; - const network = destinationDockerId && destinationDocker.network; - const port = getServiceMainPort('nocodb'); - - const { workdir } = await createDirectories({ repository: type, buildId: id }); - const image = getServiceImage(type); - - const config = { - nocodb: { - image: `${image}:${version}`, - volumes: [`${id}-nc:/usr/app/data`], - environmentVariables: {} - } - - }; - if (serviceSecret.length > 0) { - serviceSecret.forEach((secret) => { - config.nocodb.environmentVariables[secret.name] = secret.value; - }); - } - const { volumeMounts } = persistentVolumes(id, persistentStorage, config) - const composeFile: ComposeFile = { - version: '3.8', - services: { - [id]: { - container_name: id, - image: config.nocodb.image, - volumes: config.nocodb.volumes, - environment: config.nocodb.environmentVariables, - ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - labels: makeLabelForServices('nocodb'), - ...defaultComposeConfiguration(network), - } - }, - networks: { - [network]: { - external: true - } - }, - volumes: volumeMounts - }; - const composeFileDestination = `${workdir}/docker-compose.yaml`; - await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await startServiceContainers(destinationDocker.id, composeFileDestination) - return {} - } catch ({ status, message }) { - return errorHandler({ status, message }) - } -} - -async function startMinioService(request: FastifyRequest) { - try { - const { id } = request.params; - const teamId = request.user.teamId; - const service = await getServiceFromDB({ id, teamId }); - const { - type, - version, - fqdn, - destinationDockerId, - destinationDocker, - persistentStorage, - exposePort, - minio: { rootUser, rootUserPassword, apiFqdn }, - serviceSecret - } = service; - - const network = destinationDockerId && destinationDocker.network; - const port = getServiceMainPort('minio'); - - const { service: { destinationDocker: { remoteEngine, engine, remoteIpAddress } } } = await prisma.minio.findUnique({ where: { serviceId: id }, include: { service: { include: { destinationDocker: true } } } }) - const publicPort = await getFreePublicPort({ id, remoteEngine, engine, remoteIpAddress }); - - const consolePort = 9001; - const { workdir } = await createDirectories({ repository: type, buildId: id }); - const image = getServiceImage(type); - - const config = { - minio: { - image: `${image}:${version}`, - volumes: [`${id}-minio-data:/data`], - environmentVariables: { - MINIO_SERVER_URL: apiFqdn, - MINIO_DOMAIN: getDomain(fqdn), - MINIO_ROOT_USER: rootUser, - MINIO_ROOT_PASSWORD: rootUserPassword, - MINIO_BROWSER_REDIRECT_URL: fqdn - } - } - - }; - if (serviceSecret.length > 0) { - serviceSecret.forEach((secret) => { - config.minio.environmentVariables[secret.name] = secret.value; - }); - } - const { volumeMounts } = persistentVolumes(id, persistentStorage, config) - const composeFile: ComposeFile = { - version: '3.8', - services: { - [id]: { - container_name: id, - image: config.minio.image, - command: `server /data --console-address ":${consolePort}"`, - environment: config.minio.environmentVariables, - volumes: config.minio.volumes, - ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - labels: makeLabelForServices('minio'), - ...defaultComposeConfiguration(network), - } - }, - networks: { - [network]: { - external: true - } - }, - volumes: volumeMounts - }; - const composeFileDestination = `${workdir}/docker-compose.yaml`; - await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await startServiceContainers(destinationDocker.id, composeFileDestination) - await prisma.minio.update({ where: { serviceId: id }, data: { publicPort } }); - return {} - } catch ({ status, message }) { - return errorHandler({ status, message }) - } -} - -async function startVscodeService(request: FastifyRequest) { - try { - const { id } = request.params; - const teamId = request.user.teamId; - const service = await getServiceFromDB({ id, teamId }); - const { - type, - version, - destinationDockerId, - destinationDocker, - serviceSecret, - persistentStorage, - exposePort, - vscodeserver: { password } - } = service; - - const network = destinationDockerId && destinationDocker.network; - const port = getServiceMainPort('vscodeserver'); - - const { workdir } = await createDirectories({ repository: type, buildId: id }); - const image = getServiceImage(type); - - const config = { - vscodeserver: { - image: `${image}:${version}`, - volumes: [`${id}-vscodeserver-data:/home/coder`], - environmentVariables: { - PASSWORD: password - } - } - - }; - if (serviceSecret.length > 0) { - serviceSecret.forEach((secret) => { - config.vscodeserver.environmentVariables[secret.name] = secret.value; - }); - } - const { volumeMounts } = persistentVolumes(id, persistentStorage, config) - - const composeFile: ComposeFile = { - version: '3.8', - services: { - [id]: { - container_name: id, - image: config.vscodeserver.image, - environment: config.vscodeserver.environmentVariables, - volumes: config.vscodeserver.volumes, - ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - labels: makeLabelForServices('vscodeServer'), - ...defaultComposeConfiguration(network), - } - }, - networks: { - [network]: { - external: true - } - }, - volumes: volumeMounts - }; - const composeFileDestination = `${workdir}/docker-compose.yaml`; - await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await startServiceContainers(destinationDocker.id, composeFileDestination) - - const changePermissionOn = persistentStorage.map((p) => p.path); - if (changePermissionOn.length > 0) { + const { destinationDockerId } = await getServiceFromDB({ id, teamId }); + if (destinationDockerId) { await executeDockerCmd({ - dockerId: destinationDocker.id, command: `docker exec -u root ${id} chown -R 1000:1000 ${changePermissionOn.join( - ' ' - )}` + dockerId: destinationDockerId, + command: `docker ps -a --filter 'label=com.docker.compose.project=${id}' --format {{.ID}}|xargs -r -n 1 docker stop -t 0` }) + await executeDockerCmd({ + dockerId: destinationDockerId, + command: `docker ps -a --filter 'label=com.docker.compose.project=${id}' --format {{.ID}}|xargs -r -n 1 docker rm --force` + }) + return {} } - return {} + throw { status: 500, message: 'Could not stop containers.' } } catch ({ status, message }) { return errorHandler({ status, message }) } } - -async function startWordpressService(request: FastifyRequest) { - try { - const { id } = request.params; - const teamId = request.user.teamId; - const service = await getServiceFromDB({ id, teamId }); - const { - arch, - type, - version, - destinationDockerId, - serviceSecret, - destinationDocker, - persistentStorage, - exposePort, - wordpress: { - mysqlDatabase, - mysqlHost, - mysqlPort, - mysqlUser, - mysqlPassword, - extraConfig, - mysqlRootUser, - mysqlRootUserPassword, - ownMysql - } - } = service; - - const network = destinationDockerId && destinationDocker.network; - const image = getServiceImage(type); - const port = getServiceMainPort('wordpress'); - - const { workdir } = await createDirectories({ repository: type, buildId: id }); - const config = { - wordpress: { - image: `${image}:${version}`, - volumes: [`${id}-wordpress-data:/var/www/html`], - environmentVariables: { - WORDPRESS_DB_HOST: ownMysql ? `${mysqlHost}:${mysqlPort}` : `${id}-mysql`, - WORDPRESS_DB_USER: mysqlUser, - WORDPRESS_DB_PASSWORD: mysqlPassword, - WORDPRESS_DB_NAME: mysqlDatabase, - WORDPRESS_CONFIG_EXTRA: extraConfig - } - }, - mysql: { - image: `bitnami/mysql:5.7`, - volumes: [`${id}-mysql-data:/bitnami/mysql/data`], - environmentVariables: { - MYSQL_ROOT_PASSWORD: mysqlRootUserPassword, - MYSQL_ROOT_USER: mysqlRootUser, - MYSQL_USER: mysqlUser, - MYSQL_PASSWORD: mysqlPassword, - MYSQL_DATABASE: mysqlDatabase - } - } - }; - if (isARM(arch)) { - config.mysql.image = 'mysql:5.7' - config.mysql.volumes = [`${id}-mysql-data:/var/lib/mysql`] - } - if (serviceSecret.length > 0) { - serviceSecret.forEach((secret) => { - config.wordpress.environmentVariables[secret.name] = secret.value; - }); - } - - const { volumeMounts } = persistentVolumes(id, persistentStorage, config) - - const composeFile: ComposeFile = { - version: '3.8', - services: { - [id]: { - container_name: id, - image: config.wordpress.image, - environment: config.wordpress.environmentVariables, - volumes: config.wordpress.volumes, - ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - labels: makeLabelForServices('wordpress'), - ...defaultComposeConfiguration(network), - } - }, - networks: { - [network]: { - external: true - } - }, - volumes: volumeMounts - }; - if (!ownMysql) { - composeFile.services[id].depends_on = [`${id}-mysql`]; - composeFile.services[`${id}-mysql`] = { - container_name: `${id}-mysql`, - image: config.mysql.image, - volumes: config.mysql.volumes, - environment: config.mysql.environmentVariables, - ...defaultComposeConfiguration(network), - }; - } - const composeFileDestination = `${workdir}/docker-compose.yaml`; - await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await startServiceContainers(destinationDocker.id, composeFileDestination) - return {} - } catch ({ status, message }) { - return errorHandler({ status, message }) - } -} - -async function startVaultwardenService(request: FastifyRequest) { - try { - const { id } = request.params; - const teamId = request.user.teamId; - const service = await getServiceFromDB({ id, teamId }); - const { type, version, destinationDockerId, destinationDocker, serviceSecret, exposePort, persistentStorage } = - service; - - const network = destinationDockerId && destinationDocker.network; - const port = getServiceMainPort('vaultwarden'); - - const { workdir } = await createDirectories({ repository: type, buildId: id }); - const image = getServiceImage(type); - - const config = { - vaultwarden: { - image: `${image}:${version}`, - volumes: [`${id}-vaultwarden-data:/data/`], - environmentVariables: {} - } - - }; - if (serviceSecret.length > 0) { - serviceSecret.forEach((secret) => { - config.vaultwarden.environmentVariables[secret.name] = secret.value; - }); - } - const { volumeMounts } = persistentVolumes(id, persistentStorage, config) - const composeFile: ComposeFile = { - version: '3.8', - services: { - [id]: { - container_name: id, - image: config.vaultwarden.image, - environment: config.vaultwarden.environmentVariables, - volumes: config.vaultwarden.volumes, - ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - labels: makeLabelForServices('vaultWarden'), - ...defaultComposeConfiguration(network), - } - }, - networks: { - [network]: { - external: true - } - }, - volumes: volumeMounts - }; - const composeFileDestination = `${workdir}/docker-compose.yaml`; - await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await startServiceContainers(destinationDocker.id, composeFileDestination) - return {} - } catch ({ status, message }) { - return errorHandler({ status, message }) - } -} - -async function startLanguageToolService(request: FastifyRequest) { - try { - const { id } = request.params; - const teamId = request.user.teamId; - const service = await getServiceFromDB({ id, teamId }); - const { type, version, destinationDockerId, destinationDocker, serviceSecret, exposePort, persistentStorage } = - service; - const network = destinationDockerId && destinationDocker.network; - const port = getServiceMainPort('languagetool'); - - const { workdir } = await createDirectories({ repository: type, buildId: id }); - const image = getServiceImage(type); - - const config = { - languagetool: { - image: `${image}:${version}`, - volumes: [`${id}-ngrams:/ngrams`], - environmentVariables: {} - } - }; - - if (serviceSecret.length > 0) { - serviceSecret.forEach((secret) => { - config.languagetool.environmentVariables[secret.name] = secret.value; - }); - } - const { volumeMounts } = persistentVolumes(id, persistentStorage, config) - const composeFile: ComposeFile = { - version: '3.8', - services: { - [id]: { - container_name: id, - image: config.languagetool.image, - environment: config.languagetool.environmentVariables, - ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - volumes: config.languagetool.volumes, - labels: makeLabelForServices('languagetool'), - ...defaultComposeConfiguration(network), - } - }, - networks: { - [network]: { - external: true - } - }, - volumes: volumeMounts - }; - const composeFileDestination = `${workdir}/docker-compose.yaml`; - await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await startServiceContainers(destinationDocker.id, composeFileDestination) - return {} - } catch ({ status, message }) { - return errorHandler({ status, message }) - } -} - export async function startService(request: FastifyRequest) { try { const { id } = request.params; const teamId = request.user.teamId; - const service = await getServiceFromDB({ id, teamId }); + const arm = isARM(service.arch) + console.log(arm) const { type, destinationDockerId, destinationDocker, persistentStorage } = service; @@ -701,14 +45,26 @@ export async function startService(request: FastifyRequest) { const config = {}; for (const service in template.services) { let newEnvironments = [] - if (template.services[service]?.environment?.length > 0) { - for (const environment of template.services[service].environment) { - const [env, value] = environment.split("="); - if (!value.startsWith('$$secret') && value !== '') { - newEnvironments.push(`${env}=${value}`) + if (arm) { + if (template.services[service]?.environmentArm?.length > 0) { + for (const environment of template.services[service].environmentArm) { + const [env, value] = environment.split("="); + if (!value.startsWith('$$secret') && value !== '') { + newEnvironments.push(`${env}=${value}`) + } + } + } + } else { + if (template.services[service]?.environment?.length > 0) { + for (const environment of template.services[service].environment) { + const [env, value] = environment.split("="); + if (!value.startsWith('$$secret') && value !== '') { + newEnvironments.push(`${env}=${value}`) + } } } } + const secrets = await prisma.serviceSecret.findMany({ where: { serviceId: id } }) for (const secret of secrets) { const { name, value } = secret @@ -725,10 +81,10 @@ export async function startService(request: FastifyRequest) { build: template.services[service].build || undefined, command: template.services[service].command, entrypoint: template.services[service]?.entrypoint, - image: template.services[service].image, + image: arm ? template.services[service].imageArm : template.services[service].image, expose: template.services[service].ports, // ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - volumes: template.services[service].volumes, + volumes: arm ? template.services[service].volumesArm : template.services[service].volumes, environment: newEnvironments, depends_on: template.services[service]?.depends_on, ulimits: template.services[service]?.ulimits, @@ -777,1157 +133,6 @@ export async function startService(request: FastifyRequest) { return errorHandler({ status, message }) } } - -async function startUptimekumaService(request: FastifyRequest) { - try { - const { id } = request.params; - const teamId = request.user.teamId; - const service = await getServiceFromDB({ id, teamId }); - const { type, version, destinationDockerId, destinationDocker, serviceSecret, exposePort, persistentStorage } = - service; - const network = destinationDockerId && destinationDocker.network; - const port = getServiceMainPort('uptimekuma'); - - const { workdir } = await createDirectories({ repository: type, buildId: id }); - const image = getServiceImage(type); - - const config = { - uptimekuma: { - image: `${image}:${version}`, - volumes: [`${id}-uptimekuma:/app/data`], - environmentVariables: {} - } - }; - if (serviceSecret.length > 0) { - serviceSecret.forEach((secret) => { - config.uptimekuma.environmentVariables[secret.name] = secret.value; - }); - } - const { volumeMounts } = persistentVolumes(id, persistentStorage, config) - const composeFile: ComposeFile = { - version: '3.8', - services: { - [id]: { - container_name: id, - image: config.uptimekuma.image, - volumes: config.uptimekuma.volumes, - environment: config.uptimekuma.environmentVariables, - ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - labels: makeLabelForServices('uptimekuma'), - ...defaultComposeConfiguration(network), - } - }, - networks: { - [network]: { - external: true - } - }, - volumes: volumeMounts - }; - const composeFileDestination = `${workdir}/docker-compose.yaml`; - await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await startServiceContainers(destinationDocker.id, composeFileDestination) - return {} - } catch ({ status, message }) { - return errorHandler({ status, message }) - } -} - -async function startGhostService(request: FastifyRequest) { - try { - const { id } = request.params; - const teamId = request.user.teamId; - const service = await getServiceFromDB({ id, teamId }); - const { - type, - version, - destinationDockerId, - destinationDocker, - serviceSecret, - persistentStorage, - exposePort, - fqdn, - ghost: { - defaultEmail, - defaultPassword, - mariadbRootUser, - mariadbRootUserPassword, - mariadbDatabase, - mariadbPassword, - mariadbUser - } - } = service; - const network = destinationDockerId && destinationDocker.network; - - const { workdir } = await createDirectories({ repository: type, buildId: id }); - const image = getServiceImage(type); - const domain = getDomain(fqdn); - const port = getServiceMainPort('ghost'); - const isHttps = fqdn.startsWith('https://'); - const config = { - ghost: { - image: `${image}:${version}`, - volumes: [`${id}-ghost:/bitnami/ghost`], - environmentVariables: { - url: fqdn, - GHOST_HOST: domain, - GHOST_ENABLE_HTTPS: isHttps ? 'yes' : 'no', - GHOST_EMAIL: defaultEmail, - GHOST_PASSWORD: defaultPassword, - GHOST_DATABASE_HOST: `${id}-mariadb`, - GHOST_DATABASE_USER: mariadbUser, - GHOST_DATABASE_PASSWORD: mariadbPassword, - GHOST_DATABASE_NAME: mariadbDatabase, - GHOST_DATABASE_PORT_NUMBER: 3306 - } - }, - mariadb: { - image: `bitnami/mariadb:latest`, - volumes: [`${id}-mariadb:/bitnami/mariadb`], - environmentVariables: { - MARIADB_USER: mariadbUser, - MARIADB_PASSWORD: mariadbPassword, - MARIADB_DATABASE: mariadbDatabase, - MARIADB_ROOT_USER: mariadbRootUser, - MARIADB_ROOT_PASSWORD: mariadbRootUserPassword - } - } - }; - if (serviceSecret.length > 0) { - serviceSecret.forEach((secret) => { - config.ghost.environmentVariables[secret.name] = secret.value; - }); - } - - const { volumeMounts } = persistentVolumes(id, persistentStorage, config) - const composeFile: ComposeFile = { - version: '3.8', - services: { - [id]: { - container_name: id, - image: config.ghost.image, - volumes: config.ghost.volumes, - environment: config.ghost.environmentVariables, - ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - labels: makeLabelForServices('ghost'), - depends_on: [`${id}-mariadb`], - ...defaultComposeConfiguration(network), - }, - [`${id}-mariadb`]: { - container_name: `${id}-mariadb`, - image: config.mariadb.image, - volumes: config.mariadb.volumes, - environment: config.mariadb.environmentVariables, - ...defaultComposeConfiguration(network), - } - }, - networks: { - [network]: { - external: true - } - }, - volumes: volumeMounts - }; - const composeFileDestination = `${workdir}/docker-compose.yaml`; - await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await startServiceContainers(destinationDocker.id, composeFileDestination) - return {} - } catch ({ status, message }) { - return errorHandler({ status, message }) - } -} - -async function startMeilisearchService(request: FastifyRequest) { - try { - const { id } = request.params; - const teamId = request.user.teamId; - const service = await getServiceFromDB({ id, teamId }); - const { - meiliSearch: { masterKey } - } = service; - const { type, version, destinationDockerId, destinationDocker, - serviceSecret, exposePort, persistentStorage } = service; - const network = destinationDockerId && destinationDocker.network; - const port = getServiceMainPort('meilisearch'); - - const { workdir } = await createDirectories({ repository: type, buildId: id }); - const image = getServiceImage(type); - - const config = { - meilisearch: { - image: `${image}:${version}`, - volumes: [`${id}-datams:/meili_data/data.ms`, `${id}-data:/meili_data `], - environmentVariables: { - MEILI_MASTER_KEY: masterKey - } - } - }; - - if (serviceSecret.length > 0) { - serviceSecret.forEach((secret) => { - config.meilisearch.environmentVariables[secret.name] = secret.value; - }); - } - const { volumeMounts } = persistentVolumes(id, persistentStorage, config) - const composeFile: ComposeFile = { - version: '3.8', - services: { - [id]: { - container_name: id, - image: config.meilisearch.image, - environment: config.meilisearch.environmentVariables, - ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - volumes: config.meilisearch.volumes, - labels: makeLabelForServices('meilisearch'), - ...defaultComposeConfiguration(network), - } - }, - networks: { - [network]: { - external: true - } - }, - volumes: volumeMounts - }; - const composeFileDestination = `${workdir}/docker-compose.yaml`; - await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await startServiceContainers(destinationDocker.id, composeFileDestination) - return {} - } catch ({ status, message }) { - return errorHandler({ status, message }) - } -} - -async function startUmamiService(request: FastifyRequest) { - try { - const { id } = request.params; - const teamId = request.user.teamId; - const service = await getServiceFromDB({ id, teamId }); - const { - type, - version, - destinationDockerId, - destinationDocker, - serviceSecret, - persistentStorage, - exposePort, - umami: { - umamiAdminPassword, - postgresqlUser, - postgresqlPassword, - postgresqlDatabase, - hashSalt - } - } = service; - const network = destinationDockerId && destinationDocker.network; - const port = getServiceMainPort('umami'); - - const { workdir } = await createDirectories({ repository: type, buildId: id }); - const image = getServiceImage(type); - - const config = { - umami: { - image: `${image}:${version}`, - environmentVariables: { - DATABASE_URL: `postgresql://${postgresqlUser}:${postgresqlPassword}@${id}-postgresql:5432/${postgresqlDatabase}`, - DATABASE_TYPE: 'postgresql', - HASH_SALT: hashSalt - } - }, - postgresql: { - image: 'postgres:12-alpine', - volumes: [`${id}-postgresql-data:/var/lib/postgresql/data`], - environmentVariables: { - POSTGRES_USER: postgresqlUser, - POSTGRES_PASSWORD: postgresqlPassword, - POSTGRES_DB: postgresqlDatabase - } - } - }; - if (serviceSecret.length > 0) { - serviceSecret.forEach((secret) => { - config.umami.environmentVariables[secret.name] = secret.value; - }); - } - - const initDbSQL = ` - -- CreateTable -CREATE TABLE "account" ( - "user_id" SERIAL NOT NULL, - "username" VARCHAR(255) NOT NULL, - "password" VARCHAR(60) NOT NULL, - "is_admin" BOOLEAN NOT NULL DEFAULT false, - "created_at" TIMESTAMPTZ(6) DEFAULT CURRENT_TIMESTAMP, - "updated_at" TIMESTAMPTZ(6) DEFAULT CURRENT_TIMESTAMP, - - PRIMARY KEY ("user_id") -); - --- CreateTable -CREATE TABLE "event" ( - "event_id" SERIAL NOT NULL, - "website_id" INTEGER NOT NULL, - "session_id" INTEGER NOT NULL, - "created_at" TIMESTAMPTZ(6) DEFAULT CURRENT_TIMESTAMP, - "url" VARCHAR(500) NOT NULL, - "event_type" VARCHAR(50) NOT NULL, - "event_value" VARCHAR(50) NOT NULL, - - PRIMARY KEY ("event_id") -); - --- CreateTable -CREATE TABLE "pageview" ( - "view_id" SERIAL NOT NULL, - "website_id" INTEGER NOT NULL, - "session_id" INTEGER NOT NULL, - "created_at" TIMESTAMPTZ(6) DEFAULT CURRENT_TIMESTAMP, - "url" VARCHAR(500) NOT NULL, - "referrer" VARCHAR(500), - - PRIMARY KEY ("view_id") -); - --- CreateTable -CREATE TABLE "session" ( - "session_id" SERIAL NOT NULL, - "session_uuid" UUID NOT NULL, - "website_id" INTEGER NOT NULL, - "created_at" TIMESTAMPTZ(6) DEFAULT CURRENT_TIMESTAMP, - "hostname" VARCHAR(100), - "browser" VARCHAR(20), - "os" VARCHAR(20), - "device" VARCHAR(20), - "screen" VARCHAR(11), - "language" VARCHAR(35), - "country" CHAR(2), - - PRIMARY KEY ("session_id") -); - --- CreateTable -CREATE TABLE "website" ( - "website_id" SERIAL NOT NULL, - "website_uuid" UUID NOT NULL, - "user_id" INTEGER NOT NULL, - "name" VARCHAR(100) NOT NULL, - "domain" VARCHAR(500), - "share_id" VARCHAR(64), - "created_at" TIMESTAMPTZ(6) DEFAULT CURRENT_TIMESTAMP, - - PRIMARY KEY ("website_id") -); - --- CreateIndex -CREATE UNIQUE INDEX "account.username_unique" ON "account"("username"); - --- CreateIndex -CREATE INDEX "event_created_at_idx" ON "event"("created_at"); - --- CreateIndex -CREATE INDEX "event_session_id_idx" ON "event"("session_id"); - --- CreateIndex -CREATE INDEX "event_website_id_idx" ON "event"("website_id"); - --- CreateIndex -CREATE INDEX "pageview_created_at_idx" ON "pageview"("created_at"); - --- CreateIndex -CREATE INDEX "pageview_session_id_idx" ON "pageview"("session_id"); - --- CreateIndex -CREATE INDEX "pageview_website_id_created_at_idx" ON "pageview"("website_id", "created_at"); - --- CreateIndex -CREATE INDEX "pageview_website_id_idx" ON "pageview"("website_id"); - --- CreateIndex -CREATE INDEX "pageview_website_id_session_id_created_at_idx" ON "pageview"("website_id", "session_id", "created_at"); - --- CreateIndex -CREATE UNIQUE INDEX "session.session_uuid_unique" ON "session"("session_uuid"); - --- CreateIndex -CREATE INDEX "session_created_at_idx" ON "session"("created_at"); - --- CreateIndex -CREATE INDEX "session_website_id_idx" ON "session"("website_id"); - --- CreateIndex -CREATE UNIQUE INDEX "website.website_uuid_unique" ON "website"("website_uuid"); - --- CreateIndex -CREATE UNIQUE INDEX "website.share_id_unique" ON "website"("share_id"); - --- CreateIndex -CREATE INDEX "website_user_id_idx" ON "website"("user_id"); - --- AddForeignKey -ALTER TABLE "event" ADD FOREIGN KEY ("session_id") REFERENCES "session"("session_id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "event" ADD FOREIGN KEY ("website_id") REFERENCES "website"("website_id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "pageview" ADD FOREIGN KEY ("session_id") REFERENCES "session"("session_id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "pageview" ADD FOREIGN KEY ("website_id") REFERENCES "website"("website_id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "session" ADD FOREIGN KEY ("website_id") REFERENCES "website"("website_id") ON DELETE CASCADE ON UPDATE CASCADE; - --- AddForeignKey -ALTER TABLE "website" ADD FOREIGN KEY ("user_id") REFERENCES "account"("user_id") ON DELETE CASCADE ON UPDATE CASCADE; - - insert into account (username, password, is_admin) values ('admin', '${bcrypt.hashSync( - umamiAdminPassword, - 10 - )}', true);`; - await fs.writeFile(`${workdir}/schema.postgresql.sql`, initDbSQL); - const Dockerfile = ` - FROM ${config.postgresql.image} - COPY ./schema.postgresql.sql /docker-entrypoint-initdb.d/schema.postgresql.sql`; - await fs.writeFile(`${workdir}/Dockerfile`, Dockerfile); - const { volumeMounts } = persistentVolumes(id, persistentStorage, config) - const composeFile: ComposeFile = { - version: '3.8', - services: { - [id]: { - container_name: id, - image: config.umami.image, - environment: config.umami.environmentVariables, - ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - labels: makeLabelForServices('umami'), - depends_on: [`${id}-postgresql`], - ...defaultComposeConfiguration(network), - }, - [`${id}-postgresql`]: { - build: workdir, - container_name: `${id}-postgresql`, - environment: config.postgresql.environmentVariables, - volumes: config.postgresql.volumes, - ...defaultComposeConfiguration(network), - } - }, - networks: { - [network]: { - external: true - } - }, - volumes: volumeMounts - }; - const composeFileDestination = `${workdir}/docker-compose.yaml`; - await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await startServiceContainers(destinationDocker.id, composeFileDestination) - return {} - } catch ({ status, message }) { - return errorHandler({ status, message }) - } -} - -async function startHasuraService(request: FastifyRequest) { - try { - const { id } = request.params; - const teamId = request.user.teamId; - const service = await getServiceFromDB({ id, teamId }); - const { - type, - version, - destinationDockerId, - destinationDocker, - persistentStorage, - serviceSecret, - exposePort, - hasura: { postgresqlUser, postgresqlPassword, postgresqlDatabase } - } = service; - const network = destinationDockerId && destinationDocker.network; - const port = getServiceMainPort('hasura'); - - const { workdir } = await createDirectories({ repository: type, buildId: id }); - const image = getServiceImage(type); - - const config = { - hasura: { - image: `${image}:${version}`, - environmentVariables: { - HASURA_GRAPHQL_METADATA_DATABASE_URL: `postgresql://${postgresqlUser}:${postgresqlPassword}@${id}-postgresql:5432/${postgresqlDatabase}` - } - }, - postgresql: { - image: 'postgres:12-alpine', - volumes: [`${id}-postgresql-data:/var/lib/postgresql/data`], - environmentVariables: { - POSTGRES_USER: postgresqlUser, - POSTGRES_PASSWORD: postgresqlPassword, - POSTGRES_DB: postgresqlDatabase - } - } - }; - if (serviceSecret.length > 0) { - serviceSecret.forEach((secret) => { - config.hasura.environmentVariables[secret.name] = secret.value; - }); - } - - const { volumeMounts } = persistentVolumes(id, persistentStorage, config) - const composeFile: ComposeFile = { - version: '3.8', - services: { - [id]: { - container_name: id, - image: config.hasura.image, - environment: config.hasura.environmentVariables, - labels: makeLabelForServices('hasura'), - ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - depends_on: [`${id}-postgresql`], - ...defaultComposeConfiguration(network), - }, - [`${id}-postgresql`]: { - image: config.postgresql.image, - container_name: `${id}-postgresql`, - environment: config.postgresql.environmentVariables, - volumes: config.postgresql.volumes, - ...defaultComposeConfiguration(network), - } - }, - networks: { - [network]: { - external: true - } - }, - volumes: volumeMounts - }; - const composeFileDestination = `${workdir}/docker-compose.yaml`; - await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await startServiceContainers(destinationDocker.id, composeFileDestination) - return {} - } catch ({ status, message }) { - return errorHandler({ status, message }) - } -} - -async function startFiderService(request: FastifyRequest) { - try { - const { id } = request.params; - const teamId = request.user.teamId; - const service = await getServiceFromDB({ id, teamId }); - const { - type, - version, - fqdn, - destinationDockerId, - destinationDocker, - serviceSecret, - persistentStorage, - exposePort, - fider: { - postgresqlUser, - postgresqlPassword, - postgresqlDatabase, - jwtSecret, - emailNoreply, - emailMailgunApiKey, - emailMailgunDomain, - emailMailgunRegion, - emailSmtpHost, - emailSmtpPort, - emailSmtpUser, - emailSmtpPassword, - emailSmtpEnableStartTls - } - } = service; - const network = destinationDockerId && destinationDocker.network; - const port = getServiceMainPort('fider'); - - const { workdir } = await createDirectories({ repository: type, buildId: id }); - const image = getServiceImage(type); - const config = { - fider: { - image: `${image}:${version}`, - environmentVariables: { - BASE_URL: fqdn, - DATABASE_URL: `postgresql://${postgresqlUser}:${postgresqlPassword}@${id}-postgresql:5432/${postgresqlDatabase}?sslmode=disable`, - JWT_SECRET: `${jwtSecret.replace(/\$/g, '$$$')}`, - EMAIL_NOREPLY: emailNoreply, - EMAIL_MAILGUN_API: emailMailgunApiKey, - EMAIL_MAILGUN_REGION: emailMailgunRegion, - EMAIL_MAILGUN_DOMAIN: emailMailgunDomain, - EMAIL_SMTP_HOST: emailSmtpHost, - EMAIL_SMTP_PORT: emailSmtpPort, - EMAIL_SMTP_USER: emailSmtpUser, - EMAIL_SMTP_PASSWORD: emailSmtpPassword, - EMAIL_SMTP_ENABLE_STARTTLS: emailSmtpEnableStartTls - } - }, - postgresql: { - image: 'postgres:12-alpine', - volumes: [`${id}-postgresql-data:/var/lib/postgresql/data`], - environmentVariables: { - POSTGRES_USER: postgresqlUser, - POSTGRES_PASSWORD: postgresqlPassword, - POSTGRES_DB: postgresqlDatabase - } - } - }; - if (serviceSecret.length > 0) { - serviceSecret.forEach((secret) => { - config.fider.environmentVariables[secret.name] = secret.value; - }); - } - const { volumeMounts } = persistentVolumes(id, persistentStorage, config) - const composeFile: ComposeFile = { - version: '3.8', - services: { - [id]: { - container_name: id, - image: config.fider.image, - environment: config.fider.environmentVariables, - labels: makeLabelForServices('fider'), - ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - depends_on: [`${id}-postgresql`], - ...defaultComposeConfiguration(network), - }, - [`${id}-postgresql`]: { - image: config.postgresql.image, - container_name: `${id}-postgresql`, - environment: config.postgresql.environmentVariables, - volumes: config.postgresql.volumes, - ...defaultComposeConfiguration(network), - } - }, - networks: { - [network]: { - external: true - } - }, - volumes: volumeMounts - }; - const composeFileDestination = `${workdir}/docker-compose.yaml`; - await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await startServiceContainers(destinationDocker.id, composeFileDestination) - return {} - } catch ({ status, message }) { - return errorHandler({ status, message }) - } -} - -async function startAppWriteService(request: FastifyRequest) { - try { - const { id } = request.params; - const teamId = request.user.teamId; - const { version, fqdn, destinationDocker, secrets, exposePort, network, port, workdir, image, appwrite } = await defaultServiceConfigurations({ id, teamId }) - - const { - opensslKeyV1, - executorSecret, - mariadbHost, - mariadbPort, - mariadbUser, - mariadbPassword, - mariadbRootUser, - mariadbRootUserPassword, - mariadbDatabase - } = appwrite; - - const dockerCompose = { - [id]: { - image: `${image}:${version}`, - container_name: id, - labels: makeLabelForServices('appwrite'), - ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - volumes: [ - `${id}-uploads:/storage/uploads:rw`, - `${id}-cache:/storage/cache:rw`, - `${id}-config:/storage/config:rw`, - `${id}-certificates:/storage/certificates:rw`, - `${id}-functions:/storage/functions:rw` - ], - depends_on: [ - `${id}-mariadb`, - `${id}-redis`, - `${id}-influxdb`, - ], - environment: [ - "_APP_ENV=production", - "_APP_LOCALE=en", - `_APP_OPENSSL_KEY_V1=${opensslKeyV1}`, - `_APP_DOMAIN=${fqdn}`, - `_APP_DOMAIN_TARGET=${fqdn}`, - `_APP_REDIS_HOST=${id}-redis`, - "_APP_REDIS_PORT=6379", - `_APP_DB_HOST=${mariadbHost}`, - `_APP_DB_PORT=${mariadbPort}`, - `_APP_DB_SCHEMA=${mariadbDatabase}`, - `_APP_DB_USER=${mariadbUser}`, - `_APP_DB_PASS=${mariadbPassword}`, - `_APP_INFLUXDB_HOST=${id}-influxdb`, - "_APP_INFLUXDB_PORT=8086", - `_APP_EXECUTOR_SECRET=${executorSecret}`, - `_APP_EXECUTOR_HOST=http://${id}-executor/v1`, - `_APP_STATSD_HOST=${id}-telegraf`, - "_APP_STATSD_PORT=8125", - `OPEN_RUNTIMES_NETWORK=${network}`, - ...secrets - ], - ...defaultComposeConfiguration(network), - }, - [`${id}-realtime`]: { - image: `${image}:${version}`, - container_name: `${id}-realtime`, - entrypoint: "realtime", - labels: makeLabelForServices('appwrite'), - depends_on: [ - `${id}-mariadb`, - `${id}-redis`, - ], - environment: [ - "_APP_ENV=production", - `_APP_OPENSSL_KEY_V1=${opensslKeyV1}`, - `_APP_REDIS_HOST=${id}-redis`, - "_APP_REDIS_PORT=6379", - `_APP_DB_HOST=${mariadbHost}`, - `_APP_DB_PORT=${mariadbPort}`, - `_APP_DB_SCHEMA=${mariadbDatabase}`, - `_APP_DB_USER=${mariadbUser}`, - `_APP_DB_PASS=${mariadbPassword}`, - `OPEN_RUNTIMES_NETWORK=${network}`, - ...secrets - ], - ...defaultComposeConfiguration(network), - }, - [`${id}-worker-audits`]: { - image: `${image}:${version}`, - container_name: `${id}-worker-audits`, - labels: makeLabelForServices('appwrite'), - entrypoint: "worker-audits", - depends_on: [ - `${id}-mariadb`, - `${id}-redis`, - ], - environment: [ - "_APP_ENV=production", - `_APP_OPENSSL_KEY_V1=${opensslKeyV1}`, - `_APP_REDIS_HOST=${id}-redis`, - "_APP_REDIS_PORT=6379", - `_APP_DB_HOST=${mariadbHost}`, - `_APP_DB_PORT=${mariadbPort}`, - `_APP_DB_SCHEMA=${mariadbDatabase}`, - `_APP_DB_USER=${mariadbUser}`, - `_APP_DB_PASS=${mariadbPassword}`, - `OPEN_RUNTIMES_NETWORK=${network}`, - ...secrets - ], - ...defaultComposeConfiguration(network), - }, - [`${id}-worker-webhooks`]: { - image: `${image}:${version}`, - container_name: `${id}-worker-webhooks`, - labels: makeLabelForServices('appwrite'), - entrypoint: "worker-webhooks", - depends_on: [ - `${id}-mariadb`, - `${id}-redis`, - ], - environment: [ - "_APP_ENV=production", - `_APP_OPENSSL_KEY_V1=${opensslKeyV1}`, - `_APP_REDIS_HOST=${id}-redis`, - "_APP_REDIS_PORT=6379", - `OPEN_RUNTIMES_NETWORK=${network}`, - ...secrets - ], - ...defaultComposeConfiguration(network), - }, - [`${id}-worker-deletes`]: { - image: `${image}:${version}`, - container_name: `${id}-worker-deletes`, - labels: makeLabelForServices('appwrite'), - entrypoint: "worker-deletes", - depends_on: [ - `${id}-mariadb`, - `${id}-redis`, - ], - volumes: [ - `${id}-uploads:/storage/uploads:rw`, - `${id}-cache:/storage/cache:rw`, - `${id}-config:/storage/config:rw`, - `${id}-certificates:/storage/certificates:rw`, - `${id}-functions:/storage/functions:rw`, - `${id}-builds:/storage/builds:rw`, - ], - "environment": [ - "_APP_ENV=production", - `_APP_OPENSSL_KEY_V1=${opensslKeyV1}`, - `_APP_REDIS_HOST=${id}-redis`, - "_APP_REDIS_PORT=6379", - `_APP_DB_HOST=${mariadbHost}`, - `_APP_DB_PORT=${mariadbPort}`, - `_APP_DB_SCHEMA=${mariadbDatabase}`, - `_APP_DB_USER=${mariadbUser}`, - `_APP_DB_PASS=${mariadbPassword}`, - `_APP_EXECUTOR_SECRET=${executorSecret}`, - `_APP_EXECUTOR_HOST=http://${id}-executor/v1`, - `OPEN_RUNTIMES_NETWORK=${network}`, - ...secrets - ], - ...defaultComposeConfiguration(network), - }, - [`${id}-worker-databases`]: { - image: `${image}:${version}`, - container_name: `${id}-worker-databases`, - labels: makeLabelForServices('appwrite'), - entrypoint: "worker-databases", - depends_on: [ - `${id}-mariadb`, - `${id}-redis`, - ], - environment: [ - "_APP_ENV=production", - `_APP_OPENSSL_KEY_V1=${opensslKeyV1}`, - `_APP_REDIS_HOST=${id}-redis`, - "_APP_REDIS_PORT=6379", - `_APP_DB_HOST=${mariadbHost}`, - `_APP_DB_PORT=${mariadbPort}`, - `_APP_DB_SCHEMA=${mariadbDatabase}`, - `_APP_DB_USER=${mariadbUser}`, - `_APP_DB_PASS=${mariadbPassword}`, - `OPEN_RUNTIMES_NETWORK=${network}`, - ...secrets - ], - ...defaultComposeConfiguration(network), - }, - [`${id}-worker-builds`]: { - image: `${image}:${version}`, - container_name: `${id}-worker-builds`, - labels: makeLabelForServices('appwrite'), - entrypoint: "worker-builds", - depends_on: [ - `${id}-mariadb`, - `${id}-redis`, - ], - environment: [ - "_APP_ENV=production", - `_APP_OPENSSL_KEY_V1=${opensslKeyV1}`, - `_APP_EXECUTOR_SECRET=${executorSecret}`, - `_APP_EXECUTOR_HOST=http://${id}-executor/v1`, - `_APP_REDIS_HOST=${id}-redis`, - "_APP_REDIS_PORT=6379", - `_APP_DB_HOST=${mariadbHost}`, - `_APP_DB_PORT=${mariadbPort}`, - `_APP_DB_SCHEMA=${mariadbDatabase}`, - `_APP_DB_USER=${mariadbUser}`, - `_APP_DB_PASS=${mariadbPassword}`, - `OPEN_RUNTIMES_NETWORK=${network}`, - ...secrets - ], - ...defaultComposeConfiguration(network), - }, - [`${id}-worker-certificates`]: { - image: `${image}:${version}`, - container_name: `${id}-worker-certificates`, - labels: makeLabelForServices('appwrite'), - entrypoint: "worker-certificates", - depends_on: [ - `${id}-mariadb`, - `${id}-redis`, - ], - volumes: [ - `${id}-config:/storage/config:rw`, - `${id}-certificates:/storage/certificates:rw`, - ], - environment: [ - "_APP_ENV=production", - `_APP_OPENSSL_KEY_V1=${opensslKeyV1}`, - `_APP_DOMAIN=${fqdn}`, - `_APP_DOMAIN_TARGET=${fqdn}`, - `_APP_REDIS_HOST=${id}-redis`, - "_APP_REDIS_PORT=6379", - `_APP_DB_HOST=${mariadbHost}`, - `_APP_DB_PORT=${mariadbPort}`, - `_APP_DB_SCHEMA=${mariadbDatabase}`, - `_APP_DB_USER=${mariadbUser}`, - `_APP_DB_PASS=${mariadbPassword}`, - `OPEN_RUNTIMES_NETWORK=${network}`, - ...secrets - ], - ...defaultComposeConfiguration(network), - }, - [`${id}-worker-functions`]: { - image: `${image}:${version}`, - container_name: `${id}-worker-functions`, - labels: makeLabelForServices('appwrite'), - entrypoint: "worker-functions", - depends_on: [ - `${id}-mariadb`, - `${id}-redis`, - `${id}-executor` - ], - environment: [ - "_APP_ENV=production", - `_APP_OPENSSL_KEY_V1=${opensslKeyV1}`, - `_APP_REDIS_HOST=${id}-redis`, - "_APP_REDIS_PORT=6379", - `_APP_DB_HOST=${mariadbHost}`, - `_APP_DB_PORT=${mariadbPort}`, - `_APP_DB_SCHEMA=${mariadbDatabase}`, - `_APP_DB_USER=${mariadbUser}`, - `_APP_DB_PASS=${mariadbPassword}`, - `_APP_EXECUTOR_SECRET=${executorSecret}`, - `_APP_EXECUTOR_HOST=http://${id}-executor/v1`, - `OPEN_RUNTIMES_NETWORK=${network}`, - ...secrets - ], - ...defaultComposeConfiguration(network), - }, - [`${id}-executor`]: { - image: `${image}:${version}`, - container_name: `${id}-executor`, - labels: makeLabelForServices('appwrite'), - entrypoint: "executor", - stop_signal: "SIGINT", - volumes: [ - `${id}-functions:/storage/functions:rw`, - `${id}-builds:/storage/builds:rw`, - "/var/run/docker.sock:/var/run/docker.sock", - "/tmp:/tmp:rw" - ], - depends_on: [ - `${id}-mariadb`, - `${id}-redis`, - `${id}` - ], - environment: [ - "_APP_ENV=production", - `_APP_EXECUTOR_SECRET=${executorSecret}`, - `OPEN_RUNTIMES_NETWORK=${network}`, - ...secrets - ], - ...defaultComposeConfiguration(network), - }, - [`${id}-worker-mails`]: { - image: `${image}:${version}`, - container_name: `${id}-worker-mails`, - labels: makeLabelForServices('appwrite'), - entrypoint: "worker-mails", - depends_on: [ - `${id}-redis`, - ], - environment: [ - "_APP_ENV=production", - `_APP_OPENSSL_KEY_V1=${opensslKeyV1}`, - `_APP_REDIS_HOST=${id}-redis`, - "_APP_REDIS_PORT=6379", - `OPEN_RUNTIMES_NETWORK=${network}`, - ...secrets - ], - ...defaultComposeConfiguration(network), - }, - [`${id}-worker-messaging`]: { - image: `${image}:${version}`, - container_name: `${id}-worker-messaging`, - labels: makeLabelForServices('appwrite'), - entrypoint: "worker-messaging", - depends_on: [ - `${id}-redis`, - ], - environment: [ - "_APP_ENV=production", - `_APP_REDIS_HOST=${id}-redis`, - "_APP_REDIS_PORT=6379", - `OPEN_RUNTIMES_NETWORK=${network}`, - ...secrets - ], - ...defaultComposeConfiguration(network), - }, - [`${id}-maintenance`]: { - image: `${image}:${version}`, - container_name: `${id}-maintenance`, - labels: makeLabelForServices('appwrite'), - entrypoint: "maintenance", - depends_on: [ - `${id}-redis`, - ], - environment: [ - "_APP_ENV=production", - `_APP_OPENSSL_KEY_V1=${opensslKeyV1}`, - `_APP_DOMAIN=${fqdn}`, - `_APP_DOMAIN_TARGET=${fqdn}`, - `_APP_REDIS_HOST=${id}-redis`, - "_APP_REDIS_PORT=6379", - `_APP_DB_HOST=${mariadbHost}`, - `_APP_DB_PORT=${mariadbPort}`, - `_APP_DB_SCHEMA=${mariadbDatabase}`, - `_APP_DB_USER=${mariadbUser}`, - `_APP_DB_PASS=${mariadbPassword}`, - `OPEN_RUNTIMES_NETWORK=${network}`, - ...secrets - ], - ...defaultComposeConfiguration(network), - }, - [`${id}-schedule`]: { - image: `${image}:${version}`, - container_name: `${id}-schedule`, - labels: makeLabelForServices('appwrite'), - entrypoint: "schedule", - depends_on: [ - `${id}-redis`, - ], - environment: [ - "_APP_ENV=production", - `_APP_REDIS_HOST=${id}-redis`, - "_APP_REDIS_PORT=6379", - `OPEN_RUNTIMES_NETWORK=${network}`, - ...secrets - ], - ...defaultComposeConfiguration(network), - }, - [`${id}-mariadb`]: { - image: "mariadb:10.7", - container_name: `${id}-mariadb`, - labels: makeLabelForServices('appwrite'), - volumes: [ - `${id}-mariadb:/var/lib/mysql:rw` - ], - environment: [ - `MYSQL_ROOT_USER=${mariadbRootUser}`, - `MYSQL_ROOT_PASSWORD=${mariadbRootUserPassword}`, - `MYSQL_USER=${mariadbUser}`, - `MYSQL_PASSWORD=${mariadbPassword}`, - `MYSQL_DATABASE=${mariadbDatabase}`, - `OPEN_RUNTIMES_NETWORK=${network}`, - ], - command: "mysqld --innodb-flush-method=fsync", - ...defaultComposeConfiguration(network), - }, - [`${id}-redis`]: { - image: "redis:6.2-alpine", - container_name: `${id}-redis`, - command: `redis-server --maxmemory 512mb --maxmemory-policy allkeys-lru --maxmemory-samples 5\n`, - volumes: [ - `${id}-redis:/data:rw` - ], - ...defaultComposeConfiguration(network), - }, - [`${id}-usage-timeseries`]: { - image: `${image}:${version}`, - container_name: `${id}-usage`, - labels: makeLabelForServices('appwrite'), - entrypoint: "usage --type=timeseries", - depends_on: [ - `${id}-mariadb`, - `${id}-influxdb`, - ], - environment: [ - "_APP_ENV=production", - `_APP_OPENSSL_KEY_V1=${opensslKeyV1}`, - `_APP_DB_HOST=${mariadbHost}`, - `_APP_DB_PORT=${mariadbPort}`, - `_APP_DB_SCHEMA=${mariadbDatabase}`, - `_APP_DB_USER=${mariadbUser}`, - `_APP_DB_PASS=${mariadbPassword}`, - `_APP_INFLUXDB_HOST=${id}-influxdb`, - "_APP_INFLUXDB_PORT=8086", - `_APP_REDIS_HOST=${id}-redis`, - "_APP_REDIS_PORT=6379", - `OPEN_RUNTIMES_NETWORK=${network}`, - ...secrets - ], - ...defaultComposeConfiguration(network), - }, - [`${id}-usage-database`]: { - image: `${image}:${version}`, - container_name: `${id}-usage-database`, - labels: makeLabelForServices('appwrite'), - entrypoint: "usage --type=database", - depends_on: [ - `${id}-mariadb`, - `${id}-influxdb`, - ], - environment: [ - "_APP_ENV=production", - `_APP_OPENSSL_KEY_V1=${opensslKeyV1}`, - `_APP_DB_HOST=${mariadbHost}`, - `_APP_DB_PORT=${mariadbPort}`, - `_APP_DB_SCHEMA=${mariadbDatabase}`, - `_APP_DB_USER=${mariadbUser}`, - `_APP_DB_PASS=${mariadbPassword}`, - `_APP_INFLUXDB_HOST=${id}-influxdb`, - "_APP_INFLUXDB_PORT=8086", - `_APP_REDIS_HOST=${id}-redis`, - "_APP_REDIS_PORT=6379", - `OPEN_RUNTIMES_NETWORK=${network}`, - ...secrets - ], - ...defaultComposeConfiguration(network), - }, - [`${id}-influxdb`]: { - image: "appwrite/influxdb:1.5.0", - container_name: `${id}-influxdb`, - volumes: [ - `${id}-influxdb:/var/lib/influxdb:rw` - ], - ...defaultComposeConfiguration(network), - }, - [`${id}-telegraf`]: { - image: "appwrite/telegraf:1.4.0", - container_name: `${id}-telegraf`, - environment: [ - `_APP_INFLUXDB_HOST=${id}-influxdb`, - "_APP_INFLUXDB_PORT=8086", - `OPEN_RUNTIMES_NETWORK=${network}`, - ], - ...defaultComposeConfiguration(network), - } - }; - const composeFile: any = { - version: '3.8', - services: dockerCompose, - networks: { - [network]: { - external: true - } - }, - volumes: { - [`${id}-uploads`]: { - name: `${id}-uploads` - }, - [`${id}-cache`]: { - name: `${id}-cache` - }, - [`${id}-config`]: { - name: `${id}-config` - }, - [`${id}-certificates`]: { - name: `${id}-certificates` - }, - [`${id}-functions`]: { - name: `${id}-functions` - }, - [`${id}-builds`]: { - name: `${id}-builds` - }, - [`${id}-mariadb`]: { - name: `${id}-mariadb` - }, - [`${id}-redis`]: { - name: `${id}-redis` - }, - [`${id}-influxdb`]: { - name: `${id}-influxdb` - } - } - - }; - const composeFileDestination = `${workdir}/docker-compose.yaml`; - await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await startServiceContainers(destinationDocker.id, composeFileDestination) - return {} - } catch ({ status, message }) { - return errorHandler({ status, message }) - } -} async function startServiceContainers(dockerId, composeFileDestination) { try { await executeDockerCmd({ dockerId, command: `docker compose -f ${composeFileDestination} pull` }) @@ -1938,905 +143,6 @@ async function startServiceContainers(dockerId, composeFileDestination) { await asyncSleep(1000); await executeDockerCmd({ dockerId, command: `docker compose -f ${composeFileDestination} up -d` }) } -async function stopServiceContainers(request: FastifyRequest) { - try { - const { id } = request.params; - const teamId = request.user.teamId; - const { destinationDockerId } = await getServiceFromDB({ id, teamId }); - if (destinationDockerId) { - await executeDockerCmd({ - dockerId: destinationDockerId, - command: `docker ps -a --filter 'label=com.docker.compose.project=${id}' --format {{.ID}}|xargs -r -n 1 docker stop -t 0` - }) - await executeDockerCmd({ - dockerId: destinationDockerId, - command: `docker ps -a --filter 'label=com.docker.compose.project=${id}' --format {{.ID}}|xargs -r -n 1 docker rm --force` - }) - return {} - } - throw { status: 500, message: 'Could not stop containers.' } - } catch ({ status, message }) { - return errorHandler({ status, message }) - } -} -async function startMoodleService(request: FastifyRequest) { - try { - const { id } = request.params; - const teamId = request.user.teamId; - const service = await getServiceFromDB({ id, teamId }); - const { - type, - version, - fqdn, - destinationDockerId, - destinationDocker, - serviceSecret, - persistentStorage, - exposePort, - moodle: { - defaultUsername, - defaultPassword, - defaultEmail, - mariadbRootUser, - mariadbRootUserPassword, - mariadbDatabase, - mariadbPassword, - mariadbUser - } - } = service; - const network = destinationDockerId && destinationDocker.network; - const port = getServiceMainPort('moodle'); - - const { workdir } = await createDirectories({ repository: type, buildId: id }); - const image = getServiceImage(type); - const config = { - moodle: { - image: `${image}:${version}`, - volumes: [`${id}-data:/bitnami/moodle`], - environmentVariables: { - MOODLE_USERNAME: defaultUsername, - MOODLE_PASSWORD: defaultPassword, - MOODLE_EMAIL: defaultEmail, - MOODLE_DATABASE_HOST: `${id}-mariadb`, - MOODLE_DATABASE_USER: mariadbUser, - MOODLE_DATABASE_PASSWORD: mariadbPassword, - MOODLE_DATABASE_NAME: mariadbDatabase, - MOODLE_REVERSEPROXY: 'yes' - } - }, - mariadb: { - image: 'bitnami/mariadb:latest', - volumes: [`${id}-mariadb-data:/bitnami/mariadb`], - environmentVariables: { - MARIADB_USER: mariadbUser, - MARIADB_PASSWORD: mariadbPassword, - MARIADB_DATABASE: mariadbDatabase, - MARIADB_ROOT_USER: mariadbRootUser, - MARIADB_ROOT_PASSWORD: mariadbRootUserPassword - } - } - }; - if (serviceSecret.length > 0) { - serviceSecret.forEach((secret) => { - config.moodle.environmentVariables[secret.name] = secret.value; - }); - } - const { volumeMounts } = persistentVolumes(id, persistentStorage, config) - const composeFile: ComposeFile = { - version: '3.8', - services: { - [id]: { - container_name: id, - image: config.moodle.image, - environment: config.moodle.environmentVariables, - volumes: config.moodle.volumes, - labels: makeLabelForServices('moodle'), - ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - depends_on: [`${id}-mariadb`], - ...defaultComposeConfiguration(network), - }, - [`${id}-mariadb`]: { - container_name: `${id}-mariadb`, - image: config.mariadb.image, - environment: config.mariadb.environmentVariables, - volumes: config.mariadb.volumes, - ...defaultComposeConfiguration(network), - depends_on: [] - } - - }, - networks: { - [network]: { - external: true - } - }, - volumes: volumeMounts - - }; - const composeFileDestination = `${workdir}/docker-compose.yaml`; - await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await startServiceContainers(destinationDocker.id, composeFileDestination) - return {} - } catch ({ status, message }) { - return errorHandler({ status, message }) - } -} - -async function startGlitchTipService(request: FastifyRequest) { - try { - const { id } = request.params; - const teamId = request.user.teamId; - const service = await getServiceFromDB({ id, teamId }); - const { - type, - version, - fqdn, - destinationDockerId, - destinationDocker, - serviceSecret, - persistentStorage, - exposePort, - glitchTip: { - postgresqlDatabase, - postgresqlPassword, - postgresqlUser, - secretKeyBase, - defaultEmail, - defaultUsername, - defaultPassword, - defaultFromEmail, - emailSmtpHost, - emailSmtpPort, - emailSmtpUser, - emailSmtpPassword, - emailSmtpUseTls, - emailSmtpUseSsl, - emailBackend, - mailgunApiKey, - sendgridApiKey, - enableOpenUserRegistration, - } - } = service; - const network = destinationDockerId && destinationDocker.network; - const port = getServiceMainPort('glitchTip'); - - const { workdir } = await createDirectories({ repository: type, buildId: id }); - const image = getServiceImage(type); - - const config = { - glitchTip: { - image: `${image}:${version}`, - environmentVariables: { - PORT: port, - GLITCHTIP_DOMAIN: fqdn, - SECRET_KEY: secretKeyBase, - DATABASE_URL: `postgresql://${postgresqlUser}:${postgresqlPassword}@${id}-postgresql:5432/${postgresqlDatabase}`, - REDIS_URL: `redis://${id}-redis:6379/0`, - DEFAULT_FROM_EMAIL: defaultFromEmail, - EMAIL_HOST: emailSmtpHost, - EMAIL_PORT: emailSmtpPort, - EMAIL_HOST_USER: emailSmtpUser, - EMAIL_HOST_PASSWORD: emailSmtpPassword, - EMAIL_USE_TLS: emailSmtpUseTls ? 'True' : 'False', - EMAIL_USE_SSL: emailSmtpUseSsl ? 'True' : 'False', - EMAIL_BACKEND: emailBackend, - MAILGUN_API_KEY: mailgunApiKey, - SENDGRID_API_KEY: sendgridApiKey, - ENABLE_OPEN_USER_REGISTRATION: enableOpenUserRegistration, - DJANGO_SUPERUSER_EMAIL: defaultEmail, - DJANGO_SUPERUSER_USERNAME: defaultUsername, - DJANGO_SUPERUSER_PASSWORD: defaultPassword, - } - }, - postgresql: { - image: 'postgres:14-alpine', - volumes: [`${id}-postgresql-data:/var/lib/postgresql/data`], - environmentVariables: { - POSTGRES_USER: postgresqlUser, - POSTGRES_PASSWORD: postgresqlPassword, - POSTGRES_DB: postgresqlDatabase - } - }, - redis: { - image: 'redis:7-alpine', - volumes: [`${id}-redis-data:/data`], - } - }; - if (serviceSecret.length > 0) { - serviceSecret.forEach((secret) => { - config.glitchTip.environmentVariables[secret.name] = secret.value; - }); - } - const { volumeMounts } = persistentVolumes(id, persistentStorage, config) - const composeFile: ComposeFile = { - version: '3.8', - services: { - [id]: { - container_name: id, - image: config.glitchTip.image, - environment: config.glitchTip.environmentVariables, - labels: makeLabelForServices('glitchTip'), - ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - depends_on: [`${id}-postgresql`, `${id}-redis`], - ...defaultComposeConfiguration(network), - }, - [`${id}-worker`]: { - container_name: `${id}-worker`, - image: config.glitchTip.image, - command: './bin/run-celery-with-beat.sh', - environment: config.glitchTip.environmentVariables, - depends_on: [`${id}-postgresql`, `${id}-redis`], - ...defaultComposeConfiguration(network), - }, - [`${id}-setup`]: { - container_name: `${id}-setup`, - image: config.glitchTip.image, - command: 'sh -c "(./manage.py migrate || true) && (./manage.py createsuperuser --noinput || true)"', - environment: config.glitchTip.environmentVariables, - networks: [network], - restart: "no", - depends_on: [`${id}-postgresql`, `${id}-redis`] - }, - [`${id}-postgresql`]: { - image: config.postgresql.image, - container_name: `${id}-postgresql`, - environment: config.postgresql.environmentVariables, - volumes: config.postgresql.volumes, - ...defaultComposeConfiguration(network), - }, - [`${id}-redis`]: { - image: config.redis.image, - container_name: `${id}-redis`, - volumes: config.redis.volumes, - ...defaultComposeConfiguration(network), - } - }, - networks: { - [network]: { - external: true - } - }, - volumes: volumeMounts - }; - const composeFileDestination = `${workdir}/docker-compose.yaml`; - await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} pull` }) - await executeDockerCmd({ dockerId: destinationDocker.id, command: `docker compose -f ${composeFileDestination} up --build -d` }) - return {} - } catch ({ status, message }) { - return errorHandler({ status, message }) - } -} - -async function startSearXNGService(request: FastifyRequest) { - try { - const { id } = request.params; - const teamId = request.user.teamId; - const service = await getServiceFromDB({ id, teamId }); - const { type, version, destinationDockerId, destinationDocker, serviceSecret, exposePort, persistentStorage, fqdn, searxng: { secretKey, redisPassword } } = - service; - const network = destinationDockerId && destinationDocker.network; - const port = getServiceMainPort('searxng'); - - const { workdir } = await createDirectories({ repository: type, buildId: id }); - const image = getServiceImage(type); - - const config = { - searxng: { - image: `${image}:${version}`, - volumes: [`${id}-searxng:/etc/searxng`], - environmentVariables: { - SEARXNG_BASE_URL: `${fqdn}` - }, - }, - redis: { - image: 'redis:7-alpine', - } - }; - - const settingsYml = ` - # see https://docs.searxng.org/admin/engines/settings.html#use-default-settings - use_default_settings: true - server: - secret_key: ${secretKey} - limiter: true - image_proxy: true - ui: - static_use_hash: true - redis: - url: redis://:${redisPassword}@${id}-redis:6379/0` - - const Dockerfile = ` - FROM ${config.searxng.image} - COPY ./settings.yml /etc/searxng/settings.yml`; - - if (serviceSecret.length > 0) { - serviceSecret.forEach((secret) => { - config.searxng.environmentVariables[secret.name] = secret.value; - }); - } - const { volumeMounts } = persistentVolumes(id, persistentStorage, config) - const composeFile: ComposeFile = { - version: '3.8', - services: { - [id]: { - build: workdir, - container_name: id, - volumes: config.searxng.volumes, - environment: config.searxng.environmentVariables, - ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - labels: makeLabelForServices('searxng'), - cap_drop: ['ALL'], - cap_add: ['CHOWN', 'SETGID', 'SETUID', 'DAC_OVERRIDE'], - depends_on: [`${id}-redis`], - ...defaultComposeConfiguration(network), - }, - [`${id}-redis`]: { - container_name: `${id}-redis`, - image: config.redis.image, - command: `redis-server --requirepass ${redisPassword} --save "" --appendonly "no"`, - labels: makeLabelForServices('searxng'), - cap_drop: ['ALL'], - cap_add: ['SETGID', 'SETUID', 'DAC_OVERRIDE'], - ...defaultComposeConfiguration(network), - }, - }, - networks: { - [network]: { - external: true - } - }, - volumes: volumeMounts - }; - const composeFileDestination = `${workdir}/docker-compose.yaml`; - await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await fs.writeFile(`${workdir}/Dockerfile`, Dockerfile); - await fs.writeFile(`${workdir}/settings.yml`, settingsYml); - await startServiceContainers(destinationDocker.id, composeFileDestination) - return {} - } catch ({ status, message }) { - return errorHandler({ status, message }) - } -} - - -async function startWeblateService(request: FastifyRequest) { - try { - const { id } = request.params; - const teamId = request.user.teamId; - const service = await getServiceFromDB({ id, teamId }); - const { - weblate: { adminPassword, postgresqlHost, postgresqlPort, postgresqlUser, postgresqlPassword, postgresqlDatabase } - } = service; - const { type, version, destinationDockerId, destinationDocker, serviceSecret, exposePort, persistentStorage, fqdn } = - service; - const network = destinationDockerId && destinationDocker.network; - const port = getServiceMainPort('weblate'); - - const { workdir } = await createDirectories({ repository: type, buildId: id }); - const image = getServiceImage(type); - - const config = { - weblate: { - image: `${image}:${version}`, - volumes: [`${id}-data:/app/data`], - environmentVariables: { - WEBLATE_SITE_DOMAIN: getDomain(fqdn), - WEBLATE_ADMIN_PASSWORD: adminPassword, - POSTGRES_PASSWORD: postgresqlPassword, - POSTGRES_USER: postgresqlUser, - POSTGRES_DATABASE: postgresqlDatabase, - POSTGRES_HOST: postgresqlHost, - POSTGRES_PORT: postgresqlPort, - REDIS_HOST: `${id}-redis`, - } - }, - postgresql: { - image: `postgres:14-alpine`, - volumes: [`${id}-postgresql-data:/var/lib/postgresql/data`], - environmentVariables: { - POSTGRES_PASSWORD: postgresqlPassword, - POSTGRES_USER: postgresqlUser, - POSTGRES_DB: postgresqlDatabase, - POSTGRES_HOST: postgresqlHost, - POSTGRES_PORT: postgresqlPort, - } - }, - redis: { - image: `redis:6-alpine`, - volumes: [`${id}-redis-data:/data`], - } - - }; - - if (serviceSecret.length > 0) { - serviceSecret.forEach((secret) => { - config.weblate.environmentVariables[secret.name] = secret.value; - }); - } - const { volumeMounts } = persistentVolumes(id, persistentStorage, config) - const composeFile: ComposeFile = { - version: '3.8', - services: { - [id]: { - container_name: id, - image: config.weblate.image, - environment: config.weblate.environmentVariables, - ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - volumes: config.weblate.volumes, - labels: makeLabelForServices('weblate'), - ...defaultComposeConfiguration(network), - }, - [`${id}-postgresql`]: { - container_name: `${id}-postgresql`, - image: config.postgresql.image, - environment: config.postgresql.environmentVariables, - ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - volumes: config.postgresql.volumes, - labels: makeLabelForServices('weblate'), - ...defaultComposeConfiguration(network), - }, - [`${id}-redis`]: { - container_name: `${id}-redis`, - image: config.redis.image, - ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - volumes: config.redis.volumes, - labels: makeLabelForServices('weblate'), - ...defaultComposeConfiguration(network), - } - }, - networks: { - [network]: { - external: true - } - }, - volumes: volumeMounts - }; - const composeFileDestination = `${workdir}/docker-compose.yaml`; - await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await startServiceContainers(destinationDocker.id, composeFileDestination) - return {} - } catch ({ status, message }) { - return errorHandler({ status, message }) - } -} - -async function startTaigaService(request: FastifyRequest) { - try { - const { id } = request.params; - const teamId = request.user.teamId; - const service = await getServiceFromDB({ id, teamId }); - const { - taiga: { secretKey, djangoAdminUser, djangoAdminPassword, erlangSecret, rabbitMQUser, rabbitMQPassword, postgresqlHost, postgresqlPort, postgresqlUser, postgresqlPassword, postgresqlDatabase } - } = service; - const { type, version, destinationDockerId, destinationDocker, serviceSecret, exposePort, persistentStorage, fqdn } = - service; - const network = destinationDockerId && destinationDocker.network; - const port = getServiceMainPort('taiga'); - - const { workdir } = await createDirectories({ repository: type, buildId: id }); - const image = getServiceImage(type); - - const isHttps = fqdn.startsWith('https://'); - const superUserEntrypoint = `#!/bin/sh - set -e - python manage.py makemigrations - python manage.py migrate - - if [ "$DJANGO_SUPERUSER_USERNAME" ] - then - python manage.py createsuperuser \ - --noinput \ - --username $DJANGO_SUPERUSER_USERNAME \ - --email $DJANGO_SUPERUSER_EMAIL - fi - exec "$@"`; - const entrypoint = `#!/bin/sh - set -e - - /taiga-back/docker/entrypoint_superuser.sh || echo "Superuser creation failed, but continue" - /taiga-back/docker/entrypoint.sh - - exec "$@"`; - - const DockerfileBack = ` - FROM taigaio/taiga-back:latest - COPY ./entrypoint_superuser.sh /taiga-back/docker/entrypoint_superuser.sh - COPY ./entrypoint_coolify.sh /taiga-back/docker/entrypoint_coolify.sh - RUN ["chmod", "+x", "/taiga-back/docker/entrypoint_superuser.sh"] - RUN ["chmod", "+x", "/taiga-back/docker/entrypoint_coolify.sh"] - RUN ["chmod", "+x", "/taiga-back/docker/entrypoint.sh"]`; - - const DockerfileGateway = ` - FROM nginx:1.19-alpine - COPY ./nginx.conf /etc/nginx/conf.d/default.conf`; - - const nginxConf = `server { - listen 80 default_server; - - client_max_body_size 100M; - charset utf-8; - - # Frontend - location / { - proxy_pass http://${id}-taiga-front/; - proxy_pass_header Server; - proxy_set_header Host $http_host; - proxy_redirect off; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Scheme $scheme; - } - - # API - location /api/ { - proxy_pass http://${id}-taiga-back:8000/api/; - proxy_pass_header Server; - proxy_set_header Host $http_host; - proxy_redirect off; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Scheme $scheme; - } - - # Admin - location /admin/ { - proxy_pass http://${id}-taiga-back:8000/admin/; - proxy_pass_header Server; - proxy_set_header Host $http_host; - proxy_redirect off; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Scheme $scheme; - } - - # Static - location /static/ { - alias /taiga/static/; - } - - # Media - location /_protected/ { - internal; - alias /taiga/media/; - add_header Content-disposition "attachment"; - } - - # Unprotected section - location /media/exports/ { - alias /taiga/media/exports/; - add_header Content-disposition "attachment"; - } - - location /media/ { - proxy_set_header Host $http_host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Scheme $scheme; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_pass http://${id}-taiga-protected:8003/; - proxy_redirect off; - } - - # Events - location /events { - proxy_pass http://${id}-taiga-events:8888/events; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "upgrade"; - proxy_connect_timeout 7d; - proxy_send_timeout 7d; - proxy_read_timeout 7d; - } - }` - await fs.writeFile(`${workdir}/entrypoint_superuser.sh`, superUserEntrypoint); - await fs.writeFile(`${workdir}/entrypoint_coolify.sh`, entrypoint); - await fs.writeFile(`${workdir}/DockerfileBack`, DockerfileBack); - await fs.writeFile(`${workdir}/DockerfileGateway`, DockerfileGateway); - await fs.writeFile(`${workdir}/nginx.conf`, nginxConf); - - const config = { - ['taiga-gateway']: { - volumes: [`${id}-static-data:/taiga-back/static`, `${id}-media-data:/taiga-back/media`], - }, - ['taiga-front']: { - image: `${image}:${version}`, - environmentVariables: { - TAIGA_URL: fqdn, - TAIGA_WEBSOCKETS_URL: isHttps ? `wss://${getDomain(fqdn)}` : `ws://${getDomain(fqdn)}`, - TAIGA_SUBPATH: "", - PUBLIC_REGISTER_ENABLED: isDev ? "true" : "false", - } - }, - ['taiga-back']: { - volumes: [`${id}-static-data:/taiga-back/static`, `${id}-media-data:/taiga-back/media`], - environmentVariables: { - POSTGRES_DB: postgresqlDatabase, - POSTGRES_HOST: postgresqlHost, - POSTGRES_PORT: postgresqlPort, - POSTGRES_USER: postgresqlUser, - POSTGRES_PASSWORD: postgresqlPassword, - TAIGA_SECRET_KEY: secretKey, - TAIGA_SITES_SCHEME: isHttps ? 'https' : 'http', - TAIGA_SITES_DOMAIN: getDomain(fqdn), - TAIGA_SUBPATH: "", - EVENTS_PUSH_BACKEND_URL: `amqp://${rabbitMQUser}:${rabbitMQPassword}@${id}-taiga-rabbitmq:5672/taiga`, - CELERY_BROKER_URL: `amqp://${rabbitMQUser}:${rabbitMQPassword}@${id}-taiga-rabbitmq:5672/taiga`, - RABBITMQ_USER: rabbitMQUser, - RABBITMQ_PASS: rabbitMQPassword, - ENABLE_TELEMETRY: "False", - DJANGO_SUPERUSER_EMAIL: `admin@${getDomain(fqdn)}`, - DJANGO_SUPERUSER_PASSWORD: djangoAdminPassword, - DJANGO_SUPERUSER_USERNAME: djangoAdminUser, - PUBLIC_REGISTER_ENABLED: isDev ? "True" : "False", - SESSION_COOKIE_SECURE: isDev ? "False" : "True", - CSRF_COOKIE_SECURE: isDev ? "False" : "True", - - } - }, - ['taiga-async']: { - image: `taigaio/taiga-back:latest`, - volumes: [`${id}-static-data:/taiga-back/static`, `${id}-media-data:/taiga-back/media`], - environmentVariables: { - POSTGRES_DB: postgresqlDatabase, - POSTGRES_HOST: postgresqlHost, - POSTGRES_PORT: postgresqlPort, - POSTGRES_USER: postgresqlUser, - POSTGRES_PASSWORD: postgresqlPassword, - TAIGA_SECRET_KEY: secretKey, - TAIGA_SITES_SCHEME: isHttps ? 'https' : 'http', - TAIGA_SITES_DOMAIN: getDomain(fqdn), - TAIGA_SUBPATH: "", - RABBITMQ_USER: rabbitMQUser, - RABBITMQ_PASS: rabbitMQPassword, - ENABLE_TELEMETRY: "False", - } - }, - ['taiga-rabbitmq']: { - image: `rabbitmq:3.8-management-alpine`, - volumes: [`${id}-events:/var/lib/rabbitmq`], - environmentVariables: { - RABBITMQ_ERLANG_COOKIE: erlangSecret, - RABBITMQ_DEFAULT_USER: rabbitMQUser, - RABBITMQ_DEFAULT_PASS: rabbitMQPassword, - RABBITMQ_DEFAULT_VHOST: 'taiga' - } - }, - ['taiga-protected']: { - image: `taigaio/taiga-protected:latest`, - environmentVariables: { - MAX_AGE: 360, - SECRET_KEY: secretKey, - TAIGA_URL: fqdn - } - }, - ['taiga-events']: { - image: `taigaio/taiga-events:latest`, - environmentVariables: { - RABBITMQ_URL: `amqp://${rabbitMQUser}:${rabbitMQPassword}@${id}-taiga-rabbitmq:5672/taiga`, - RABBITMQ_USER: rabbitMQUser, - RABBITMQ_PASS: rabbitMQPassword, - TAIGA_SECRET_KEY: secretKey, - } - }, - - postgresql: { - image: `postgres:12.3`, - volumes: [`${id}-postgresql-data:/var/lib/postgresql/data`], - environmentVariables: { - POSTGRES_PASSWORD: postgresqlPassword, - POSTGRES_USER: postgresqlUser, - POSTGRES_DB: postgresqlDatabase - } - } - }; - - if (serviceSecret.length > 0) { - serviceSecret.forEach((secret) => { - config['taiga-back'].environmentVariables[secret.name] = secret.value; - }); - } - const { volumeMounts } = persistentVolumes(id, persistentStorage, config) - - const composeFile: ComposeFile = { - version: '3.8', - services: { - [id]: { - build: { - context: '.', - dockerfile: 'DockerfileGateway', - }, - container_name: id, - volumes: config['taiga-gateway'].volumes, - labels: makeLabelForServices('taiga'), - ...defaultComposeConfiguration(network), - }, - [`${id}-taiga-front`]: { - container_name: `${id}-taiga-front`, - image: config['taiga-front'].image, - environment: config['taiga-front'].environmentVariables, - labels: makeLabelForServices('taiga'), - ...defaultComposeConfiguration(network), - }, - [`${id}-taiga-back`]: { - build: { - context: '.', - dockerfile: 'DockerfileBack', - }, - entrypoint: '/taiga-back/docker/entrypoint_coolify.sh', - container_name: `${id}-taiga-back`, - environment: config['taiga-back'].environmentVariables, - ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - volumes: config['taiga-back'].volumes, - labels: makeLabelForServices('taiga'), - ...defaultComposeConfiguration(network), - }, - - [`${id}-async`]: { - container_name: `${id}-taiga-async`, - image: config['taiga-async'].image, - entrypoint: ["/taiga-back/docker/async_entrypoint.sh"], - environment: config['taiga-async'].environmentVariables, - volumes: config['taiga-async'].volumes, - labels: makeLabelForServices('taiga'), - ...defaultComposeConfiguration(network), - }, - [`${id}-taiga-rabbitmq`]: { - container_name: `${id}-taiga-rabbitmq`, - image: config['taiga-rabbitmq'].image, - volumes: config['taiga-rabbitmq'].volumes, - environment: config['taiga-rabbitmq'].environmentVariables, - labels: makeLabelForServices('taiga'), - ...defaultComposeConfiguration(network), - }, - [`${id}-taiga-protected`]: { - container_name: `${id}-taiga-protected`, - image: config['taiga-protected'].image, - environment: config['taiga-protected'].environmentVariables, - labels: makeLabelForServices('taiga'), - ...defaultComposeConfiguration(network), - }, - [`${id}-taiga-events`]: { - container_name: `${id}-taiga-events`, - image: config['taiga-events'].image, - environment: config['taiga-events'].environmentVariables, - labels: makeLabelForServices('taiga'), - ...defaultComposeConfiguration(network), - }, - [`${id}-postgresql`]: { - container_name: `${id}-postgresql`, - image: config.postgresql.image, - environment: config.postgresql.environmentVariables, - ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - volumes: config.postgresql.volumes, - labels: makeLabelForServices('taiga'), - ...defaultComposeConfiguration(network), - }, - - }, - networks: { - [network]: { - external: true - } - }, - volumes: volumeMounts - }; - const composeFileDestination = `${workdir}/docker-compose.yaml`; - await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - - await startServiceContainers(destinationDocker.id, composeFileDestination) - return {} - } catch ({ status, message }) { - return errorHandler({ status, message }) - } -} - -async function startGrafanaService(request: FastifyRequest) { - try { - const { id } = request.params; - const teamId = request.user.teamId; - const service = await getServiceFromDB({ id, teamId }); - const { type, version, destinationDockerId, destinationDocker, serviceSecret, exposePort, persistentStorage } = - service; - const network = destinationDockerId && destinationDocker.network; - const port = getServiceMainPort('grafana'); - - const { workdir } = await createDirectories({ repository: type, buildId: id }); - const image = getServiceImage(type); - - const config = { - grafana: { - image: `${image}:${version}`, - volumes: [`${id}-grafana:/var/lib/grafana`], - environmentVariables: {} - } - }; - if (serviceSecret.length > 0) { - serviceSecret.forEach((secret) => { - config.grafana.environmentVariables[secret.name] = secret.value; - }); - } - const { volumeMounts } = persistentVolumes(id, persistentStorage, config) - const composeFile: ComposeFile = { - version: '3.8', - services: { - [id]: { - container_name: id, - image: config.grafana.image, - volumes: config.grafana.volumes, - environment: config.grafana.environmentVariables, - ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - labels: makeLabelForServices('grafana'), - ...defaultComposeConfiguration(network), - } - }, - networks: { - [network]: { - external: true - } - }, - volumes: volumeMounts - }; - const composeFileDestination = `${workdir}/docker-compose.yaml`; - await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await startServiceContainers(destinationDocker.id, composeFileDestination) - return {} - } catch ({ status, message }) { - return errorHandler({ status, message }) - } -} -async function startTriliumService(request: FastifyRequest) { - try { - const { id } = request.params; - const teamId = request.user.teamId; - const service = await getServiceFromDB({ id, teamId }); - const { type, version, destinationDockerId, destinationDocker, serviceSecret, exposePort, persistentStorage } = - service; - const network = destinationDockerId && destinationDocker.network; - const port = getServiceMainPort('trilium'); - - const { workdir } = await createDirectories({ repository: type, buildId: id }); - const image = getServiceImage(type); - - const config = { - trilium: { - image: `${image}:${version}`, - volumes: [`${id}-trilium:/home/node/trilium-data`], - environmentVariables: {} - } - }; - if (serviceSecret.length > 0) { - serviceSecret.forEach((secret) => { - config.trilium.environmentVariables[secret.name] = secret.value; - }); - } - const { volumeMounts } = persistentVolumes(id, persistentStorage, config) - const composeFile: ComposeFile = { - version: '3.8', - services: { - [id]: { - container_name: id, - image: config.trilium.image, - volumes: config.trilium.volumes, - environment: config.trilium.environmentVariables, - ...(exposePort ? { ports: [`${exposePort}:${port}`] } : {}), - labels: makeLabelForServices('trilium'), - ...defaultComposeConfiguration(network), - } - }, - networks: { - [network]: { - external: true - } - }, - volumes: volumeMounts - }; - const composeFileDestination = `${workdir}/docker-compose.yaml`; - await fs.writeFile(composeFileDestination, yaml.dump(composeFile)); - await startServiceContainers(destinationDocker.id, composeFileDestination) - return {} - } catch ({ status, message }) { - return errorHandler({ status, message }) - } -} - export async function migrateAppwriteDB(request: FastifyRequest, reply: FastifyReply) { try { const { id } = request.params diff --git a/apps/api/src/routes/api/v1/services/index.ts b/apps/api/src/routes/api/v1/services/index.ts index 65fb52f57..830687bf7 100644 --- a/apps/api/src/routes/api/v1/services/index.ts +++ b/apps/api/src/routes/api/v1/services/index.ts @@ -70,11 +70,9 @@ const root: FastifyPluginAsync = async (fastify): Promise => { fastify.post('/:id/configuration/destination', async (request, reply) => await saveServiceDestination(request, reply)); fastify.get('/:id/usage', async (request) => await getServiceUsage(request)); - // fastify.get('/:id/logs', async (request) => await getServiceLogs(request)); fastify.get('/:id/logs/:containerId', async (request) => await getServiceLogs(request)); fastify.post('/:id/start', async (request) => await startService(request)); - fastify.post('/:id/:type/start', async (request) => await startService(request)); fastify.post('/:id/:type/stop', async (request) => await stopService(request)); fastify.post('/:id/:type/settings', async (request, reply) => await setSettingsService(request, reply)); diff --git a/apps/api/src/routes/webhooks/traefik/handlers.ts b/apps/api/src/routes/webhooks/traefik/handlers.ts index e741b5f6f..f89bcea46 100644 --- a/apps/api/src/routes/webhooks/traefik/handlers.ts +++ b/apps/api/src/routes/webhooks/traefik/handlers.ts @@ -1,7 +1,6 @@ import { FastifyRequest } from "fastify"; import { errorHandler, getDomain, isDev, prisma, executeDockerCmd, fixType } from "../../../lib/common"; import { supportedServiceTypesAndVersions } from "../../../lib/services/supportedVersions"; -import { includeServices } from "../../../lib/services/common"; import { TraefikOtherConfiguration } from "./types"; import { OnlyId } from "../../../types"; import { getTemplates } from "../../../lib/services"; @@ -363,7 +362,12 @@ export async function traefikConfiguration(request, reply) { } const services: any = await prisma.service.findMany({ where: { destinationDocker: { remoteEngine: false } }, - include: includeServices, + include: { + destinationDocker: true, + persistentStorage: true, + serviceSecret: true, + serviceSetting: true, + }, orderBy: { createdAt: 'desc' }, }); @@ -849,7 +853,12 @@ export async function remoteTraefikConfiguration(request: FastifyRequest } const services: any = await prisma.service.findMany({ where: { destinationDocker: { id } }, - include: includeServices, + include: { + destinationDocker: true, + persistentStorage: true, + serviceSecret: true, + serviceSetting: true, + }, orderBy: { createdAt: 'desc' } }); diff --git a/apps/ui/src/routes/services/[id]/index.svelte b/apps/ui/src/routes/services/[id]/index.svelte index 53d9281a6..4490a1284 100644 --- a/apps/ui/src/routes/services/[id]/index.svelte +++ b/apps/ui/src/routes/services/[id]/index.svelte @@ -228,7 +228,7 @@ class:loading={loading.cleanup}>Cleanup Unnecessary Database Logs {/if} - {#if service.type === 'appwrite' && $status.service.isRunning} + {#if service.type === 'appwrite' && $status.service.overallStatus === 'healthy'}