Fixed litellm

This commit is contained in:
Alex Renoki
2024-10-04 07:09:58 +03:00
parent 97ee34f1a1
commit f4d650b03f

View File

@@ -4,13 +4,15 @@
# logo: svgs/litellm.svg
# port: 4000
version: "3"
volumes:
pg-data: null
redis-data: null
pg_data: null
redis_data: null
services:
litellm:
image: "ghcr.io/berriai/litellm-database:main-latest"
image: "ghcr.io/berriai/litellm-database:main-stable"
restart: always
depends_on:
postgres:
@@ -18,9 +20,10 @@ services:
redis:
condition: service_healthy
environment:
- SERVICE_FQDN_LITELLM_4000
- LITELLM_LOG=ERROR
- LITELLM_MODE=PRODUCTION
- "LITELLM_MASTER_KEY=${SERVICE_PASSWORD_32_LITELLM_MASTER_KEY}"
- "LITELLM_MASTER_KEY=${SERVICE_PASSWORD_MASTERKEY}"
- "UI_USERNAME=${SERVICE_USER_UI}"
- "UI_PASSWORD=${SERVICE_PASSWORD_UI}"
- "DATABASE_URL=postgresql://${SERVICE_USER_POSTGRES}:${SERVICE_PASSWORD_POSTGRES}@postgres:5432/${POSTGRES_DB:-litellm}"
@@ -36,16 +39,14 @@ services:
- "VOYAGE_API_KEY=${VOYAGE_API_KEY}"
- "VOYAGE_API_BASE=${VOYAGE_API_BASE}"
volumes:
- "./litellm-config.yaml:/app/config.yaml"
- type: bind
source: ./docker/app/config.yaml
source: ./litellm-config.yaml
target: /app/config.yaml
content: |
general_settings:
proxy_batch_write_at: 60
router_settings:
routing_strategy: usage-based-routing-v2
redis_host: os.environ/REDIS_HOST
redis_port: os.environ/REDIS_PORT
redis_password: os.environ/REDIS_PASSWORD
@@ -57,6 +58,7 @@ services:
log_raw_request_response: true
# turn_off_message_logging: false
# redact_user_api_key_info: false
service_callback: ["prometheus_system"]
drop_params: true
# max_budget: 100
# budget_duration: 30d
@@ -71,19 +73,14 @@ services:
password: os.environ/REDIS_PASSWORD
namespace: "litellm_cache"
ttl: 600
# success_callback:
# - "langfuse"
# - "prometheus"
# failure_callback:
# - "langfuse"
# - "prometheus"
success_callback:
# - "langfuse"
- "prometheus"
failure_callback:
# - "langfuse"
- "prometheus"
model_list:
# OpenAI
- model_name: gpt-3.5-turbo
litellm_params:
model: openai/gpt-3.5-turbo
api_key: os.environ/OPENAI_API_KEY
api_base: os.environ/OPENAI_API_BASE
- model_name: gpt-4
litellm_params:
model: openai/gpt-4
@@ -118,8 +115,8 @@ services:
model: voyage/voyage-law-2
api_key: "os.environ/VOYAGE_API_KEY"
api_base: "os.environ/VOYAGE_API_BASE"
rpm: 300
tpm: 1000000
# rpm: 300
# tpm: 1000000
- model_name: voyage-multilingual-2
model_info:
mode: embedding
@@ -132,8 +129,8 @@ services:
api_base: "os.environ/VOYAGE_API_BASE"
input_cost_per_token: 0.00000012
output_cost_per_token: 0
rpm: 300
tpm: 1000000
# rpm: 300
# tpm: 1000000
healthcheck:
test:
- CMD
@@ -157,7 +154,7 @@ services:
- POSTGRES_PASSWORD=$SERVICE_PASSWORD_POSTGRES
- POSTGRES_USER=$SERVICE_USER_POSTGRES
volumes:
- "pg-data:/var/lib/postgresql/data"
- "pg_data:/var/lib/postgresql/data"
healthcheck:
test:
- CMD-SHELL
@@ -169,7 +166,7 @@ services:
image: redis:7-alpine
command: redis-server --appendonly yes
volumes:
- redis-data:/data
- redis_data:/data
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 5s