diff --git a/public/svgs/anythingllm.svg b/public/svgs/anythingllm.svg
new file mode 100644
index 000000000..1c25f8711
--- /dev/null
+++ b/public/svgs/anythingllm.svg
@@ -0,0 +1,166 @@
+
+
+
diff --git a/public/svgs/argilla.png b/public/svgs/argilla.png
new file mode 100644
index 000000000..3ead32785
Binary files /dev/null and b/public/svgs/argilla.png differ
diff --git a/public/svgs/infisical.png b/public/svgs/infisical.png
new file mode 100644
index 000000000..48eddae78
Binary files /dev/null and b/public/svgs/infisical.png differ
diff --git a/public/svgs/langfuse.png b/public/svgs/langfuse.png
new file mode 100644
index 000000000..8dec0fe4a
Binary files /dev/null and b/public/svgs/langfuse.png differ
diff --git a/public/svgs/litellm.svg b/public/svgs/litellm.svg
new file mode 100644
index 000000000..01830c3f6
--- /dev/null
+++ b/public/svgs/litellm.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/public/svgs/prefect.png b/public/svgs/prefect.png
new file mode 100644
index 000000000..2f87ec0d7
Binary files /dev/null and b/public/svgs/prefect.png differ
diff --git a/public/svgs/qdrant.png b/public/svgs/qdrant.png
new file mode 100644
index 000000000..ecb2a56d5
Binary files /dev/null and b/public/svgs/qdrant.png differ
diff --git a/public/svgs/searxng.svg b/public/svgs/searxng.svg
new file mode 100644
index 000000000..b94fe3728
--- /dev/null
+++ b/public/svgs/searxng.svg
@@ -0,0 +1,56 @@
+
+
diff --git a/public/svgs/unstructured.png b/public/svgs/unstructured.png
new file mode 100644
index 000000000..a6ec855b6
Binary files /dev/null and b/public/svgs/unstructured.png differ
diff --git a/public/svgs/weaviate.png b/public/svgs/weaviate.png
new file mode 100644
index 000000000..134294253
Binary files /dev/null and b/public/svgs/weaviate.png differ
diff --git a/templates/compose/anythingllm.yaml b/templates/compose/anythingllm.yaml
new file mode 100644
index 000000000..952fe5ce5
--- /dev/null
+++ b/templates/compose/anythingllm.yaml
@@ -0,0 +1,39 @@
+# documentation: https://docs.anythingllm.com/installation-docker/overview
+# slogan: AnythingLLM is the easiest to use, all-in-one AI application that can do RAG, AI Agents, and much more with no code or infrastructure headaches.
+# tags: lowcode, nocode, ai, llm, openai, anthropic, machine-learning, rag, agents, chatbot, api, team, bot
+# logo: svgs/anythingllm.svg
+
+version: "3"
+
+volumes:
+ storage_dir: null
+ hot_dir: null
+ outputs_dir: null
+
+services:
+ anything-llm:
+ image: mintplexlabs/anythingllm
+ environment:
+ - SERVICE_FQDN_ANYTHINGLLM_3001
+ - STORAGE_DIR=/app/server/storage
+ - DISABLE_TELEMETRY=true
+ - PASSWORDLOWERCASE=1
+ - PASSWORDMAXCHAR=250
+ - PASSWORDMINCHAR=6
+ - PASSWORDNUMERIC=1
+ - PASSWORDREQUIREMENTS=1
+ - PASSWORDSYMBOL=1
+ - PASSWORDUPPERCASE=1
+ - SIG_KEY=${SERVICE_PASSWORD_SIGKEY}
+ - SIG_SALT=${SERVICE_PASSWORD_SIGSALT}
+ - JWT_SECRET=${SERVICE_PASSWORD_JWTSECRET}
+ - AUTH_TOKEN=${SERVICE_PASSWORD_AUTHTOKEN}
+ - AUTH_TOKEN=${AUTH_TOKEN}
+ - SERVER_PORT=${SERVER_PORT:-3001}
+ cap_add:
+ - SYS_ADMIN
+ volumes:
+ - "storage_dir:/app/server/storage"
+ - "hot_dir:/app/collector/hotdir"
+ - "outputs_dir:/app/collector/outputs"
+ user: "${UID:-1000}:${GID:-1000}"
diff --git a/templates/compose/argilla.yaml b/templates/compose/argilla.yaml
new file mode 100644
index 000000000..3eedf1386
--- /dev/null
+++ b/templates/compose/argilla.yaml
@@ -0,0 +1,136 @@
+# documentation: https://docs.argilla.io/latest/
+# slogan: Argilla is a collaboration tool for AI engineers and domain experts who need to build high-quality datasets for their projects.
+# tags: workflow, orchestration, data-pipeline, python, argilla, ai, elasticsearch, datasets, data, machine-learning, data-science, nlp
+# logo: svgs/argilla.png
+# port: 6900
+
+version: "3"
+
+services:
+ argilla:
+ image: "argilla/argilla-server:v2.2.0"
+ restart: always
+ environment:
+ - SERVICE_FQDN_ARGILLA_6900
+ - ARGILLA_HOME_PATH=/var/lib/argilla
+ - ARGILLA_ELASTICSEARCH=http://elasticsearch:9200
+ - ARGILLA_DATABASE_URL=postgresql+asyncpg://${SERVICE_USER_POSTGRES}:${SERVICE_PASSWORD_POSTGRES}@postgres:5432/${POSTGRES_DB}
+ - ARGILLA_REDIS_URL=redis://redis:6379/0
+ - ARGILLA_AUTH_SECRET_KEY="${SERVICE_PASSWORD_AUTHSECRET}"
+ - ARGILLA_ENABLE_TELEMETRY=0
+ - HF_HUB_DISABLE_TELEMETRY=1
+ - REINDEX_DATASETS=1
+ - DEFAULT_USER_ENABLED=true
+ - USERNAME=${ARGILLA_USERNAME:-argilla}
+ - PASSWORD="${SERVICE_PASSWORD_ARGILLA}"
+ - API_KEY="${SERVICE_PASSWORD_APIKEY}"
+ - DEFAULT_USER_PASSWORD="${SERVICE_PASSWORD_ARGILLA}"
+ - DEFAULT_USER_API_KEY="${SERVICE_PASSWORD_APIKEY}"
+ depends_on:
+ elasticsearch:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+ postgres:
+ condition: service_healthy
+ volumes:
+ - "argilla_data:/var/lib/argilla"
+ healthcheck:
+ test:
+ - CMD-SHELL
+ - 'python -c "import requests as r;r.get(\"http://localhost:6900/api/_status\").raise_for_status()"'
+ interval: 10s
+ timeout: 10s
+ retries: 5
+ worker:
+ image: "argilla/argilla-server:v2.2.0"
+ restart: always
+ environment:
+ - ARGILLA_HOME_PATH=/var/lib/argilla
+ - ARGILLA_ELASTICSEARCH=http://elasticsearch:9200
+ - ARGILLA_DATABASE_URL=postgresql+asyncpg://${SERVICE_USER_POSTGRES}:${SERVICE_PASSWORD_POSTGRES}@postgres:5432/${POSTGRES_DB}
+ - ARGILLA_REDIS_URL=redis://redis:6379/0
+ - ARGILLA_AUTH_SECRET_KEY="${SERVICE_PASSWORD_AUTHSECRET}"
+ - ARGILLA_ENABLE_TELEMETRY=0
+ - HF_HUB_DISABLE_TELEMETRY=1
+ - REINDEX_DATASETS=1
+ - DEFAULT_USER_ENABLED=true
+ - USERNAME=${ARGILLA_USERNAME:-argilla}
+ - PASSWORD="${SERVICE_PASSWORD_ARGILLA}"
+ - API_KEY="${SERVICE_PASSWORD_APIKEY}"
+ - DEFAULT_USER_PASSWORD="${SERVICE_PASSWORD_ARGILLA}"
+ - DEFAULT_USER_API_KEY="${SERVICE_PASSWORD_APIKEY}"
+ - BACKGROUND_NUM_WORKERS=${BACKGROUND_NUM_WORKERS:-1}
+ depends_on:
+ elasticsearch:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+ postgres:
+ condition: service_healthy
+ volumes:
+ - "argilla_data:/var/lib/argilla"
+ command: "sh -c 'python -m argilla_server worker --num-workers $${BACKGROUND_NUM_WORKERS}'"
+ healthcheck:
+ test:
+ - CMD-SHELL
+ - pwd
+ interval: 10s
+ timeout: 10s
+ retries: 5
+ postgres:
+ image: "postgres:14"
+ restart: always
+ environment:
+ - POSTGRES_USER=${SERVICE_USER_POSTGRES}
+ - POSTGRES_PASSWORD=${SERVICE_PASSWORD_POSTGRES}
+ - POSTGRES_DB=${POSTGRES_DB:-argilla}
+ volumes:
+ - "pg_data:/var/lib/postgresql/data"
+ healthcheck:
+ test:
+ - CMD-SHELL
+ - "pg_isready -h localhost -U $${POSTGRES_USER} -d $${POSTGRES_DB}"
+ interval: 5s
+ timeout: 5s
+ retries: 3
+ redis:
+ image: "redis:7"
+ restart: always
+ volumes:
+ - "redis_data:/data"
+ healthcheck:
+ test:
+ - CMD-SHELL
+ - "redis-cli -h localhost -p 6379 ping"
+ interval: 5s
+ timeout: 5s
+ retries: 3
+ elasticsearch:
+ image: "docker.elastic.co/elasticsearch/elasticsearch:8.12.2"
+ restart: always
+ environment:
+ - node.name=elasticsearch
+ - cluster.name=es-argilla-local
+ - discovery.type=single-node
+ - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
+ - cluster.routing.allocation.disk.threshold_enabled=false
+ - xpack.security.enabled=false
+ ulimits:
+ memlock:
+ soft: -1
+ hard: -1
+ volumes:
+ - "elasticsearch_data:/usr/share/elasticsearch/data/"
+ healthcheck:
+ test:
+ - CMD-SHELL
+ - "curl --silent --fail http://elasticsearch:9200"
+ interval: 10s
+ timeout: 10s
+ retries: 5
+volumes:
+ argilla_data: null
+ elastic_data: null
+ redis_data: null
+ pg_data: null
diff --git a/templates/compose/infisical.yaml b/templates/compose/infisical.yaml
new file mode 100644
index 000000000..a1d4e9e8e
--- /dev/null
+++ b/templates/compose/infisical.yaml
@@ -0,0 +1,75 @@
+# documentation: https://infisical.com/docs/documentation/getting-started/introduction
+# slogan: Infisical is the open source secret management platform that developers use to centralize their application configuration and secrets like API keys and database credentials.
+# tags: security, environment, secrets, infisical, database, configuration, secret, api, keys, auth, encryption
+# logo: svgs/infisical.png
+# port: 8080
+
+version: "3"
+
+services:
+ backend:
+ restart: always
+ depends_on:
+ redis:
+ condition: service_healthy
+ db-migration:
+ condition: service_completed_successfully
+ image: "infisical/infisical:latest-postgres"
+ environment:
+ - SERVICE_FQDN_BACKEND_8080
+ - SITE_URL=${SERVICE_FQDN_BACKEND_8080}
+ - NODE_ENV=production
+ - ENCRYPTION_KEY=${SERVICE_PASSWORD_ENCRYPTIONKEY}
+ - AUTH_SECRET=${SERVICE_REALBASE64_64_AUTHSECRET}
+ - DB_CONNECTION_URI=postgres://${SERVICE_USER_POSTGRES}:${SERVICE_PASSWORD_POSTGRES}@db:5432/${POSTGRES_DB}
+ - REDIS_URL=redis://redis:6379
+ healthcheck:
+ test:
+ - CMD-SHELL
+ - "wget --no-verbose --tries=1 --spider http://127.0.0.1:8080/api/status || exit 1"
+ redis:
+ image: "redis:7"
+ environment:
+ - ALLOW_EMPTY_PASSWORD=yes
+ restart: always
+ healthcheck:
+ test:
+ - CMD-SHELL
+ - "redis-cli -h localhost -p 6379 ping"
+ interval: 5s
+ timeout: 5s
+ retries: 3
+ volumes:
+ - redis_data:/data
+ db:
+ image: "postgres:14-alpine"
+ restart: always
+ volumes:
+ - pg_data:/var/lib/postgresql/data
+ environment:
+ - POSTGRES_USER=${SERVICE_USER_POSTGRES}
+ - POSTGRES_PASSWORD=${SERVICE_PASSWORD_POSTGRES}
+ - POSTGRES_DB=${POSTGRES_DB:-infisical}
+ healthcheck:
+ test:
+ - CMD-SHELL
+ - "pg_isready -h localhost -U $${POSTGRES_USER} -d $${POSTGRES_DB}"
+ interval: 5s
+ timeout: 10s
+ retries: 10
+ db-migration:
+ depends_on:
+ db:
+ condition: service_healthy
+ image: "infisical/infisical:latest-postgres"
+ command: "npm run migration:latest"
+ restart: on-failure
+ environment:
+ - POSTGRES_USER=${SERVICE_USER_POSTGRES}
+ - POSTGRES_PASSWORD=${SERVICE_PASSWORD_POSTGRES}
+ - POSTGRES_DB=${POSTGRES_DB:-infisical}
+ - DB_CONNECTION_URI=postgres://${SERVICE_USER_POSTGRES}:${SERVICE_PASSWORD_POSTGRES}@db:5432/${POSTGRES_DB:-infisical}
+ - REDIS_URL=redis://redis:6379
+volumes:
+ pg_data:
+ redis_data:
diff --git a/templates/compose/langfuse.yaml b/templates/compose/langfuse.yaml
new file mode 100644
index 000000000..56753cf49
--- /dev/null
+++ b/templates/compose/langfuse.yaml
@@ -0,0 +1,54 @@
+# documentation: https://langfuse.com/docs
+# slogan: Langfuse is an open-source LLM engineering platform that helps teams collaboratively debug, analyze, and iterate on their LLM applications.
+# tags: ai, qdrant, weaviate, langchain, openai, gpt, llm, lmops, langfuse, llmops, tracing, observation, metrics
+# logo: svgs/langfuse.png
+# port: 3000
+
+version: "3"
+
+volumes:
+ pg_data: null
+
+services:
+ langfuse:
+ image: langfuse/langfuse:2
+ restart: always
+ environment:
+ - SERVICE_FQDN_LANGFUSE_3000
+ - DATABASE_URL=postgresql://${SERVICE_USER_POSTGRES}:${SERVICE_PASSWORD_POSTGRES}@postgres:5432/${POSTGRES_DB:-langfuse}
+ - DIRECT_URL=postgresql://${SERVICE_USER_POSTGRES}:${SERVICE_PASSWORD_POSTGRES}@postgres:5432/${POSTGRES_DB:-langfuse}
+ - SALT=$SERVICE_PASSWORD_SALT
+ - AUTH_DISABLE_SIGNUP=${AUTH_DISABLE_SIGNUP:-false}
+ - NEXTAUTH_URL=$SERVICE_FQDN_LANGFUSE_3000
+ - NEXTAUTH_SECRET=${SERVICE_BASE64_64_NEXTAUTHSECRET}
+ - TELEMETRY_ENABLED=${TELEMETRY_ENABLED:-false}
+ - LANGFUSE_ENABLE_EXPERIMENTAL_FEATURES=${LANGFUSE_ENABLE_EXPERIMENTAL_FEATURES:-false}
+ - HOSTNAME=0.0.0.0
+ healthcheck:
+ test:
+ - CMD
+ - wget
+ - "-q"
+ - "--spider"
+ - "http://127.0.0.1:3000/api/public/health"
+ interval: 5s
+ timeout: 5s
+ retries: 3
+ depends_on:
+ postgres:
+ condition: service_healthy
+ postgres:
+ image: "postgres:16-alpine"
+ environment:
+ - POSTGRES_DB=${POSTGRES_DB:-langfuse}
+ - POSTGRES_PASSWORD=$SERVICE_PASSWORD_POSTGRES
+ - POSTGRES_USER=$SERVICE_USER_POSTGRES
+ volumes:
+ - "pg_data:/var/lib/postgresql/data"
+ healthcheck:
+ test:
+ - CMD-SHELL
+ - "pg_isready -h localhost -U $${POSTGRES_USER} -d $${POSTGRES_DB}"
+ interval: 5s
+ timeout: 5s
+ retries: 10
diff --git a/templates/compose/litellm.yaml b/templates/compose/litellm.yaml
new file mode 100644
index 000000000..bf9063165
--- /dev/null
+++ b/templates/compose/litellm.yaml
@@ -0,0 +1,174 @@
+# documentation: https://docs.litellm.ai
+# slogan: Call all LLM APIs using the OpenAI format. Use Bedrock, Azure, OpenAI, Cohere, Anthropic, Ollama, Sagemaker, HuggingFace, Replicate, Groq (100+ LLMs)
+# tags: ai, qdrant, weaviate, langchain, openai, gpt, llm, lmops, anthropic, cohere, ollama, sagemaker, huggingface, replicate, groq
+# logo: svgs/litellm.svg
+# port: 4000
+
+version: "3"
+
+volumes:
+ pg_data: null
+ redis_data: null
+
+services:
+ litellm:
+ image: "ghcr.io/berriai/litellm-database:main-stable"
+ restart: always
+ depends_on:
+ postgres:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+ environment:
+ - SERVICE_FQDN_LITELLM_4000
+ - LITELLM_LOG=ERROR
+ - LITELLM_MODE=PRODUCTION
+ - "LITELLM_MASTER_KEY=${SERVICE_PASSWORD_MASTERKEY}"
+ - "UI_USERNAME=${SERVICE_USER_UI}"
+ - "UI_PASSWORD=${SERVICE_PASSWORD_UI}"
+ - "DATABASE_URL=postgresql://${SERVICE_USER_POSTGRES}:${SERVICE_PASSWORD_POSTGRES}@postgres:5432/${POSTGRES_DB:-litellm}"
+ - REDIS_HOST=redis
+ - REDIS_PORT=6379
+ - "POSTGRES_USER=${SERVICE_USER_POSTGRES}"
+ - "POSTGRES_PASSWORD=${SERVICE_PASSWORD_POSTGRES}"
+ - "POSTGRES_DB=${POSTGRES_DB:-litellm}"
+ - "OPENAI_API_KEY=${OPENAI_API_KEY}"
+ - "OPENAI_API_BASE=${OPENAI_API_BASE}"
+ - "ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}"
+ - "ANTHROPIC_API_BASE=${ANTHROPIC_API_BASE}"
+ - "VOYAGE_API_KEY=${VOYAGE_API_KEY}"
+ - "VOYAGE_API_BASE=${VOYAGE_API_BASE}"
+ volumes:
+ - type: bind
+ source: ./litellm-config.yaml
+ target: /app/config.yaml
+ content: |
+ general_settings:
+ proxy_batch_write_at: 60
+
+ router_settings:
+ redis_host: os.environ/REDIS_HOST
+ redis_port: os.environ/REDIS_PORT
+ redis_password: os.environ/REDIS_PASSWORD
+ enable_pre_call_check: true
+
+ litellm_settings:
+ set_verbose: false
+ json_logs: true
+ log_raw_request_response: true
+ # turn_off_message_logging: false
+ # redact_user_api_key_info: false
+ service_callback: ["prometheus_system"]
+ drop_params: true
+ # max_budget: 100
+ # budget_duration: 30d
+ num_retries: 3
+ request_timeout: 600
+ telemetry: false
+ cache: true
+ cache_params:
+ type: redis
+ host: os.environ/REDIS_HOST
+ port: os.environ/REDIS_PORT
+ password: os.environ/REDIS_PASSWORD
+ namespace: "litellm_cache"
+ ttl: 600
+ success_callback:
+ # - "langfuse"
+ - "prometheus"
+ failure_callback:
+ # - "langfuse"
+ - "prometheus"
+ model_list:
+ # OpenAI
+ - model_name: gpt-4
+ litellm_params:
+ model: openai/gpt-4
+ api_key: os.environ/OPENAI_API_KEY
+ api_base: os.environ/OPENAI_API_BASE
+ - model_name: gpt-4o
+ litellm_params:
+ model: openai/gpt-4o
+ api_key: os.environ/OPENAI_API_KEY
+ api_base: os.environ/OPENAI_API_BASE
+ - model_name: gpt-4o-mini
+ litellm_params:
+ model: openai/gpt-4o-mini
+ api_key: os.environ/OPENAI_API_KEY
+ api_base: os.environ/OPENAI_API_BASE
+ # Anthropic
+ - model_name: claude-3-haiku
+ litellm_params:
+ model: claude-3-haiku-20240307
+ api_key: "os.environ/ANTHROPIC_API_KEY"
+ api_base: "os.environ/ANTHROPIC_API_BASE"
+ - model_name: claude-3.5-sonnet
+ litellm_params:
+ model: claude-3-5-sonnet-20240620
+ api_key: "os.environ/ANTHROPIC_API_KEY"
+ api_base: "os.environ/ANTHROPIC_API_BASE"
+ # VoyageAI
+ - model_name: voyage-law-2
+ model_info:
+ output_vector_size: 1024
+ litellm_params:
+ model: voyage/voyage-law-2
+ api_key: "os.environ/VOYAGE_API_KEY"
+ api_base: "os.environ/VOYAGE_API_BASE"
+ # rpm: 300
+ # tpm: 1000000
+ - model_name: voyage-multilingual-2
+ model_info:
+ mode: embedding
+ max_tokens: 32000
+ max_input_tokens: 32000
+ output_vector_size: 1024
+ litellm_params:
+ model: voyage/voyage-multilingual-2
+ api_key: "os.environ/VOYAGE_API_KEY"
+ api_base: "os.environ/VOYAGE_API_BASE"
+ input_cost_per_token: 0.00000012
+ output_cost_per_token: 0
+ # rpm: 300
+ # tpm: 1000000
+ healthcheck:
+ test:
+ - CMD
+ - python
+ - "-c"
+ - "import requests as r;r.get('http://127.0.0.1:4000/health/liveliness').raise_for_status()"
+ interval: 5s
+ timeout: 5s
+ retries: 3
+ command:
+ - "--config"
+ - /app/config.yaml
+ - "--port"
+ - "4000"
+ - "--num_workers"
+ - "8"
+ postgres:
+ image: "postgres:16-alpine"
+ environment:
+ - POSTGRES_DB=${POSTGRES_DB:-litellm}
+ - POSTGRES_PASSWORD=$SERVICE_PASSWORD_POSTGRES
+ - POSTGRES_USER=$SERVICE_USER_POSTGRES
+ volumes:
+ - "pg_data:/var/lib/postgresql/data"
+ healthcheck:
+ test:
+ - CMD-SHELL
+ - "pg_isready -h localhost -U $${POSTGRES_USER} -d $${POSTGRES_DB}"
+ interval: 5s
+ timeout: 5s
+ retries: 3
+ redis:
+ image: redis:7-alpine
+ command: redis-server --appendonly yes
+ volumes:
+ - redis_data:/data
+ healthcheck:
+ test: ["CMD", "redis-cli", "ping"]
+ interval: 5s
+ timeout: 5s
+ retries: 3
diff --git a/templates/compose/prefect.yaml b/templates/compose/prefect.yaml
new file mode 100644
index 000000000..8d6c13271
--- /dev/null
+++ b/templates/compose/prefect.yaml
@@ -0,0 +1,87 @@
+# documentation: https://www.prefect.io/
+# slogan: Prefect is an orchestration and observability platform that empowers developers to build and scale workflows quickly.
+# tags: workflow, orchestration, data-pipeline, python, automation, data-processing, data-integration, etl
+# logo: svgs/prefect.png
+# port: 4200
+
+version: "3"
+
+volumes:
+ pg_data:
+
+services:
+ prefect:
+ image: "prefecthq/prefect:3-latest"
+ restart: always
+ depends_on:
+ postgresql:
+ condition: service_healthy
+ environment:
+ - SERVICE_FQDN_PREFECT_4200
+ - PREFECT_API_DATABASE_CONNECTION_URL=postgresql+asyncpg://${SERVICE_USER_POSTGRES}:${SERVICE_PASSWORD_POSTGRES}@postgresql:5432/${POSTGRES_DB:-prefect}
+ - PREFECT_API_KEY=${SERVICE_PASSWORD_APIKEY}
+ - PREFECT_EXPERIMENTAL_WARN=true
+ - PREFECT_EXPERIMENTAL_ENABLE_SCHEDULE_CONCURRENCY=true
+ - PREFECT_RUNNER_SERVER_ENABLE=true
+ - PREFECT_DEFAULT_WORK_POOL_NAME=${DEFAULT_POOL_NAME:-default}
+ command:
+ - prefect
+ - server
+ - start
+ - "--host"
+ - 0.0.0.0
+ - "--port"
+ - "4200"
+ healthcheck:
+ test:
+ - CMD
+ - python
+ - "-c"
+ - "import requests as r;r.get('http://127.0.0.1:4200/api/health').raise_for_status()"
+ interval: 5s
+ timeout: 5s
+ retries: 3
+ postgresql:
+ image: "postgres:16-alpine"
+ restart: always
+ volumes:
+ - "pg_data:/var/lib/postgresql/data"
+ environment:
+ - POSTGRES_USER=$SERVICE_USER_POSTGRES
+ - POSTGRES_PASSWORD=$SERVICE_PASSWORD_POSTGRES
+ - POSTGRES_DB=${POSTGRES_DB:-prefect}
+ healthcheck:
+ test:
+ - CMD-SHELL
+ - "pg_isready -U $${POSTGRES_USER} -d $${POSTGRES_DB}"
+ interval: 5s
+ timeout: 5s
+ retries: 3
+ agent:
+ image: "prefecthq/prefect:3-python3.12"
+ restart: always
+ depends_on:
+ prefect:
+ condition: service_healthy
+ entrypoint:
+ - /opt/prefect/entrypoint.sh
+ - prefect
+ - worker
+ - start
+ - "--pool=$${DEFAULT_POOL_NAME}"
+ - "--with-healthcheck"
+ - "--name=$${DEFAULT_WORKER_NAME}"
+ - "--limit=$${DEFAULT_POOL_LIMIT}"
+ environment:
+ - PREFECT_API_URL=http://prefect:4200/api
+ - PREFECT_API_KEY=${SERVICE_PASSWORD_APIKEY}
+ - DEFAULT_POOL_NAME=${DEFAULT_POOL_NAME:-default}
+ - DEFAULT_POOL_LIMIT=${DEFAULT_POOL_LIMIT:-1}
+ - DEFAULT_WORKER_NAME=${DEFAULT_WORKER_NAME:-worker1}
+ healthcheck:
+ test:
+ - CMD-SHELL
+ - pwd
+ interval: 5s
+ timeout: 5s
+ retries: 3
diff --git a/templates/compose/qdrant.yaml b/templates/compose/qdrant.yaml
new file mode 100644
index 000000000..2ec040624
--- /dev/null
+++ b/templates/compose/qdrant.yaml
@@ -0,0 +1,27 @@
+# documentation: https://qdrant.tech/documentation/
+# slogan: Qdrant is a vector similarity search engine that provides a production-ready service with a convenient API to store, search, and manage points (i.e. vectors) with an additional payload.
+# tags: ai, vector-database, semantic-search, machine-learning, bm25, embeddings, llm
+# logo: svgs/qdrant.png
+# port: 6333
+
+version: "3"
+
+volumes:
+ qdrant_storage: null
+
+services:
+ qdrant:
+ image: "qdrant/qdrant:latest"
+ restart: always
+ environment:
+ - SERVICE_FQDN_QDRANT_6333
+ - QDRANT__SERVICE__API_KEY=${SERVICE_PASSWORD_QDRANTAPIKEY}
+ volumes:
+ - "qdrant_storage:/qdrant/storage"
+ healthcheck:
+ test:
+ - CMD-SHELL
+ - bash -c ':> /dev/tcp/127.0.0.1/6333' || exit 1
+ interval: 5s
+ timeout: 5s
+ retries: 3
diff --git a/templates/compose/searxng.yaml b/templates/compose/searxng.yaml
new file mode 100644
index 000000000..30d5e923b
--- /dev/null
+++ b/templates/compose/searxng.yaml
@@ -0,0 +1,78 @@
+# documentation: https://docs.searxng.org
+# slogan: SearXNG is a free internet metasearch engine which aggregates results from more than 70 search services.
+# tags: search, google, engine, images, documents, rss, proxy, news, web, api
+# logo: svgs/searxng.svg
+# port: 8080
+
+version: "3"
+
+volumes:
+ redis_data: null
+
+services:
+ searxng:
+ image: searxng/searxng
+ restart: always
+ depends_on:
+ redis:
+ condition: service_healthy
+ environment:
+ - SERVICE_FQDN_SEARXNG_8080
+ - INSTANCE_NAME=coolify
+ - BASE_URL=${SERVICE_FQDN_SEARXNG_8080}
+ - SEARXNG_URL=${SERVICE_FQDN_SEARXNG_8080}
+ - SEARXNG_BIND_ADDRESS=0.0.0.0
+ - SEARXNG_SECRET=${SERVICE_PASSWORD_SEARXNGSECRET}
+ - SEARXNG_REDIS_URL=redis://redis:6379/0
+ healthcheck:
+ test:
+ - CMD
+ - wget
+ - "-q"
+ - "--spider"
+ - "http://127.0.0.1:8080/healthz"
+ interval: 5s
+ timeout: 5s
+ retries: 3
+ volumes:
+ - type: bind
+ source: ./settings.yml
+ target: /etc/searxng/settings.yml
+ content: |
+ # see https://docs.searxng.org/admin/settings/settings.html#settings-use-default-settings
+ use_default_settings: true
+ server:
+ limiter: false
+ image_proxy: true
+ search:
+ formats:
+ - html
+ - csv
+ - json
+ - rss
+ ui:
+ static_use_hash: true
+ - type: bind
+ source: ./limiter.toml
+ target: /etc/searxng/limiter.toml
+ content: |
+ # This configuration file updates the default configuration file
+ # See https://github.com/searxng/searxng/blob/master/searx/botdetection/limiter.toml
+
+ [botdetection.ip_limit]
+ # activate link_token method in the ip_limit method
+ link_token = true
+
+ redis:
+ image: "redis:7"
+ restart: always
+ volumes:
+ - "redis_data:/data"
+ healthcheck:
+ test:
+ - CMD
+ - redis-cli
+ - ping
+ interval: 5s
+ timeout: 5s
+ retries: 3
diff --git a/templates/compose/unstructured.yaml b/templates/compose/unstructured.yaml
new file mode 100644
index 000000000..1b8ea07f7
--- /dev/null
+++ b/templates/compose/unstructured.yaml
@@ -0,0 +1,24 @@
+# documentation: https://github.com/Unstructured-IO/unstructured-api?tab=readme-ov-file#--general-pre-processing-pipeline-for-documents
+# slogan: Unstructured provides a platform and tools to ingest and process unstructured documents for Retrieval Augmented Generation (RAG) and model fine-tuning.
+# tags: workflow, orchestration, data-pipeline, python, data, machine-learning, data-science, nlp, unstructured, ocr, data-extraction
+# logo: svgs/unstructured.png
+# port: 8000
+
+version: "3"
+
+services:
+ unstructured:
+ image: "downloads.unstructured.io/unstructured-io/unstructured-api:latest"
+ environment:
+ - SERVICE_FQDN_UNSTRUCTURED_8000
+ - "UNSTRUCTURED_API_KEY=${SERVICE_PASSWORD_APIKEY}"
+ healthcheck:
+ test:
+ - CMD
+ - wget
+ - "-qO-"
+ - "--spider"
+ - "http://0.0.0.0:8000/healthcheck"
+ interval: 15s
+ timeout: 15s
+ retries: 3
diff --git a/templates/compose/weaviate.yaml b/templates/compose/weaviate.yaml
new file mode 100644
index 000000000..54c767ff7
--- /dev/null
+++ b/templates/compose/weaviate.yaml
@@ -0,0 +1,50 @@
+# documentation: https://weaviate.io/developers/weaviate
+# slogan: Weaviate is an open-source vector database that stores both objects and vectors, allowing for combining vector search with structured filtering.
+# tags: ai, vector-database, semantic-search, machine-learning, bm25, embeddings, llm
+# logo: svgs/weaviate.png
+# port: 8080
+
+version: "3"
+
+services:
+ weaviate:
+ image: "cr.weaviate.io/semitechnologies/weaviate:1.26.4"
+ restart: always
+ volumes:
+ - "weaviate_data:/var/lib/weaviate"
+ command:
+ - "--host"
+ - 0.0.0.0
+ - "--port"
+ - "8080"
+ - "--scheme"
+ - http
+ environment:
+ - SERVICE_FQDN_WEAVIATE_8080
+ - DISABLE_TELEMETRY=true
+ - QUERY_DEFAULTS_LIMIT=1000
+ - LOG_LEVEL=info
+ - GOMEMLIMIT=${GOMEMLIMIT:-1024MiB}
+ - GOMAXPROCS=${GOMAXPROCS:-2}
+ - AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=false
+ - AUTHORIZATION_ADMINLIST_USERS=${AUTHORIZATION_ADMINLIST_USERS:-admin@example.com}
+ - AUTHENTICATION_APIKEY_USERS=${AUTHENTICATION_APIKEY_USERS:-admin@example.com}
+ - AUTHENTICATION_APIKEY_ENABLED=true
+ - "AUTHENTICATION_APIKEY_ALLOWED_KEYS=${SERVICE_PASSWORD_APIKEYS}"
+ - PERSISTENCE_DATA_PATH=/var/lib/weaviate
+ - DEFAULT_VECTORIZER_MODULE=none
+ - ENABLE_MODULES=${ENABLE_MODULES:-text2vec-openai,generative-openai,qna-openai}
+ - CLUSTER_HOSTNAME=node1
+ healthcheck:
+ test:
+ - CMD
+ - wget
+ - "-q"
+ - "--spider"
+ - "http://localhost:8080/v1/.well-known/ready"
+ interval: 5s
+ timeout: 5s
+ retries: 3
+
+volumes:
+ weaviate_data: null