From b8f25406cd3f6d1f293082427797ceecf73c398a Mon Sep 17 00:00:00 2001 From: Andras Bacsai Date: Tue, 27 Feb 2024 15:44:19 +0100 Subject: [PATCH] Refactor code to improve performance and readability --- templates/compose/supabase.yaml | 885 ++++++++++++++++++++++++++++++++ 1 file changed, 885 insertions(+) create mode 100644 templates/compose/supabase.yaml diff --git a/templates/compose/supabase.yaml b/templates/compose/supabase.yaml new file mode 100644 index 000000000..fe4bed55c --- /dev/null +++ b/templates/compose/supabase.yaml @@ -0,0 +1,885 @@ +# ignore: true +# documentation: +# slogan: +# tags: +# logo: + +services: + supabase-studio: + image: supabase/studio:20240205-b145c86 + healthcheck: + test: + [ + "CMD", + "node", + "-e", + "require('http').get('http://localhost:3000/api/profile', (r) => {if (r.statusCode !== 200) throw new Error(r.statusCode)})", + ] + timeout: 5s + interval: 5s + retries: 3 + depends_on: + supabase-analytics: + condition: service_healthy + environment: + - SERVICE_FQDN_SUPABASE + - HOSTNAME=0.0.0.0 + - STUDIO_PG_META_URL=http://meta:8080 + - POSTGRES_PASSWORD=${SERVICE_PASSWORD_POSTGRES} + + - DEFAULT_ORGANIZATION_NAME=${STUDIO_DEFAULT_ORGANIZATION:-Default Organization} + - DEFAULT_PROJECT_NAME=${STUDIO_DEFAULT_PROJECT:-Default Project} + + - SUPABASE_URL=http://kong:8000 + - SUPABASE_PUBLIC_URL=${SERVICE_FQDN_SUPABASE} + - SUPABASE_ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.ewogICJyb2xlIjogImFub24iLAogICJpc3MiOiAic3VwYWJhc2UiLAogICJpYXQiOiAxNzA4OTg4NDAwLAogICJleHAiOiAxODY2ODQxMjAwCn0.jCDqsoXGT58JnAjf27KOowNQsokkk0aR7rdbGG18P-8 + - SUPABASE_SERVICE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.ewogICJyb2xlIjogInNlcnZpY2Vfcm9sZSIsCiAgImlzcyI6ICJzdXBhYmFzZSIsCiAgImlhdCI6IDE3MDg5ODg0MDAsCiAgImV4cCI6IDE4NjY4NDEyMDAKfQ.GA7yF2BmqTzqGkP_oqDdJAQVt0djjIxGYuhE0zFDJV4 + + - LOGFLARE_API_KEY=${SERVICE_PASSWORD_LOGFLARE} + - LOGFLARE_URL=http://supabase-analytics:4000 + - NEXT_PUBLIC_ENABLE_LOGS=true + # Comment to use Big Query backend for analytics + - NEXT_ANALYTICS_BACKEND_PROVIDER=postgres + # Uncomment to use Big Query backend for analytics + # NEXT_ANALYTICS_BACKEND_PROVIDER=bigquery + supabase-db: + image: supabase/postgres:15.1.0.147 + healthcheck: + test: pg_isready -U postgres -h localhost + interval: 5s + timeout: 5s + retries: 10 + depends_on: + supabase-vector: + condition: service_healthy + command: + - postgres + - -c + - config_file=/etc/postgresql/postgresql.conf + - -c + - log_min_messages=fatal + restart: unless-stopped + environment: + - POSTGRES_HOST=/var/run/postgresql + - PGPORT=${POSTGRES_PORT:-5432} + - POSTGRES_PORT=${POSTGRES_PORT:-5432} + - PGPASSWORD=${SERVICE_PASSWORD_POSTGRES} + - POSTGRES_PASSWORD=${SERVICE_PASSWORD_POSTGRES} + - POSTGRES_USER=${SERVICE_USER_POSTGRES} + - PGDATABASE=${POSTGRES_DB:-supabase} + - POSTGRES_DB=${POSTGRES_DB:-supabase} + - JWT_SECRET=oasfhtfwevsna8e7wo3mca0d8x5aw2btk8on0eh4 + - JWT_EXP=${JWT_EXPIRY:-3600} + volumes: + - supabase-db-data:/var/lib/postgresql/data + - type: bind + source: ./volumes/db/realtime.sql + target: /docker-entrypoint-initdb.d/migrations/99-realtime.sql + content: | + \set pguser `echo "$SERVICE_USER_POSTGRES"` + + create schema if not exists _realtime; + alter schema _realtime owner to :pguser; + - type: bind + source: ./volumes/db/webhooks.sql + target: /docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql + content: | + BEGIN; + -- Create pg_net extension + CREATE EXTENSION IF NOT EXISTS pg_net SCHEMA extensions; + -- Create supabase_functions schema + CREATE SCHEMA supabase_functions AUTHORIZATION supabase_admin; + GRANT USAGE ON SCHEMA supabase_functions TO postgres, anon, authenticated, service_role; + ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON TABLES TO postgres, anon, authenticated, service_role; + ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON FUNCTIONS TO postgres, anon, authenticated, service_role; + ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON SEQUENCES TO postgres, anon, authenticated, service_role; + -- supabase_functions.migrations definition + CREATE TABLE supabase_functions.migrations ( + version text PRIMARY KEY, + inserted_at timestamptz NOT NULL DEFAULT NOW() + ); + -- Initial supabase_functions migration + INSERT INTO supabase_functions.migrations (version) VALUES ('initial'); + -- supabase_functions.hooks definition + CREATE TABLE supabase_functions.hooks ( + id bigserial PRIMARY KEY, + hook_table_id integer NOT NULL, + hook_name text NOT NULL, + created_at timestamptz NOT NULL DEFAULT NOW(), + request_id bigint + ); + CREATE INDEX supabase_functions_hooks_request_id_idx ON supabase_functions.hooks USING btree (request_id); + CREATE INDEX supabase_functions_hooks_h_table_id_h_name_idx ON supabase_functions.hooks USING btree (hook_table_id, hook_name); + COMMENT ON TABLE supabase_functions.hooks IS 'Supabase Functions Hooks: Audit trail for triggered hooks.'; + CREATE FUNCTION supabase_functions.http_request() + RETURNS trigger + LANGUAGE plpgsql + AS $function$ + DECLARE + request_id bigint; + payload jsonb; + url text := TG_ARGV[0]::text; + method text := TG_ARGV[1]::text; + headers jsonb DEFAULT '{}'::jsonb; + params jsonb DEFAULT '{}'::jsonb; + timeout_ms integer DEFAULT 1000; + BEGIN + IF url IS NULL OR url = 'null' THEN + RAISE EXCEPTION 'url argument is missing'; + END IF; + + IF method IS NULL OR method = 'null' THEN + RAISE EXCEPTION 'method argument is missing'; + END IF; + + IF TG_ARGV[2] IS NULL OR TG_ARGV[2] = 'null' THEN + headers = '{"Content-Type": "application/json"}'::jsonb; + ELSE + headers = TG_ARGV[2]::jsonb; + END IF; + + IF TG_ARGV[3] IS NULL OR TG_ARGV[3] = 'null' THEN + params = '{}'::jsonb; + ELSE + params = TG_ARGV[3]::jsonb; + END IF; + + IF TG_ARGV[4] IS NULL OR TG_ARGV[4] = 'null' THEN + timeout_ms = 1000; + ELSE + timeout_ms = TG_ARGV[4]::integer; + END IF; + + CASE + WHEN method = 'GET' THEN + SELECT http_get INTO request_id FROM net.http_get( + url, + params, + headers, + timeout_ms + ); + WHEN method = 'POST' THEN + payload = jsonb_build_object( + 'old_record', OLD, + 'record', NEW, + 'type', TG_OP, + 'table', TG_TABLE_NAME, + 'schema', TG_TABLE_SCHEMA + ); + + SELECT http_post INTO request_id FROM net.http_post( + url, + payload, + params, + headers, + timeout_ms + ); + ELSE + RAISE EXCEPTION 'method argument % is invalid', method; + END CASE; + + INSERT INTO supabase_functions.hooks + (hook_table_id, hook_name, request_id) + VALUES + (TG_RELID, TG_NAME, request_id); + + RETURN NEW; + END + $function$; + -- Supabase super admin + DO + $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'supabase_functions_admin' + ) + THEN + CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION; + END IF; + END + $$; + GRANT ALL PRIVILEGES ON SCHEMA supabase_functions TO supabase_functions_admin; + GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA supabase_functions TO supabase_functions_admin; + GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA supabase_functions TO supabase_functions_admin; + ALTER USER supabase_functions_admin SET search_path = "supabase_functions"; + ALTER table "supabase_functions".migrations OWNER TO supabase_functions_admin; + ALTER table "supabase_functions".hooks OWNER TO supabase_functions_admin; + ALTER function "supabase_functions".http_request() OWNER TO supabase_functions_admin; + GRANT supabase_functions_admin TO postgres; + -- Remove unused supabase_pg_net_admin role + DO + $$ + BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_roles + WHERE rolname = 'supabase_pg_net_admin' + ) + THEN + REASSIGN OWNED BY supabase_pg_net_admin TO supabase_admin; + DROP OWNED BY supabase_pg_net_admin; + DROP ROLE supabase_pg_net_admin; + END IF; + END + $$; + -- pg_net grants when extension is already enabled + DO + $$ + BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_extension + WHERE extname = 'pg_net' + ) + THEN + GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role; + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + END IF; + END + $$; + -- Event trigger for pg_net + CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access() + RETURNS event_trigger + LANGUAGE plpgsql + AS $$ + BEGIN + IF EXISTS ( + SELECT 1 + FROM pg_event_trigger_ddl_commands() AS ev + JOIN pg_extension AS ext + ON ev.objid = ext.oid + WHERE ext.extname = 'pg_net' + ) + THEN + GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role; + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER; + ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net; + REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC; + GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role; + END IF; + END; + $$; + COMMENT ON FUNCTION extensions.grant_pg_net_access IS 'Grants access to pg_net'; + DO + $$ + BEGIN + IF NOT EXISTS ( + SELECT 1 + FROM pg_event_trigger + WHERE evtname = 'issue_pg_net_access' + ) THEN + CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end WHEN TAG IN ('CREATE EXTENSION') + EXECUTE PROCEDURE extensions.grant_pg_net_access(); + END IF; + END + $$; + INSERT INTO supabase_functions.migrations (version) VALUES ('20210809183423_update_grants'); + ALTER function supabase_functions.http_request() SECURITY DEFINER; + ALTER function supabase_functions.http_request() SET search_path = supabase_functions; + REVOKE ALL ON FUNCTION supabase_functions.http_request() FROM PUBLIC; + GRANT EXECUTE ON FUNCTION supabase_functions.http_request() TO postgres, anon, authenticated, service_role; + COMMIT; + - type: bind + source: ./volumes/db/roles.sql + target: /docker-entrypoint-initdb.d/init-scripts/99-roles.sql + content: | + -- NOTE: change to your own passwords for production environments + \set pgpass `echo "$POSTGRES_PASSWORD"` + + ALTER USER authenticator WITH PASSWORD :'pgpass'; + ALTER USER pgbouncer WITH PASSWORD :'pgpass'; + ALTER USER supabase_auth_admin WITH PASSWORD :'pgpass'; + ALTER USER supabase_functions_admin WITH PASSWORD :'pgpass'; + ALTER USER supabase_storage_admin WITH PASSWORD :'pgpass'; + - type: bind + source: ./volumes/db/jwt.sql + target: /docker-entrypoint-initdb.d/init-scripts/99-jwt.sql + content: | + \set jwt_secret `echo "$JWT_SECRET"` + \set jwt_exp `echo "$JWT_EXP"` + + ALTER DATABASE postgres SET "app.settings.jwt_secret" TO :'jwt_secret'; + ALTER DATABASE postgres SET "app.settings.jwt_exp" TO :'jwt_exp'; + + - type: bind + source: ./volumes/db/logs.sql + target: /docker-entrypoint-initdb.d/migrations/99-logs.sql + content: | + \set pguser `echo "$SERVICE_USER_POSTGRES"` + + create schema if not exists _analytics; + alter schema _analytics owner to :pguser; + supabase-analytics: + image: supabase/logflare:1.4.0 + healthcheck: + test: ["CMD", "curl", "http://localhost:4000/health"] + timeout: 5s + interval: 5s + retries: 10 + restart: unless-stopped + depends_on: + supabase-db: + condition: service_healthy + # Uncomment to use Big Query backend for analytics + # volumes: + # - type: bind + # source: ${PWD}/gcloud.json + # target: /opt/app/rel/logflare/bin/gcloud.json + # read_only: true + environment: + - LOGFLARE_NODE_HOST=127.0.0.1 + - DB_USERNAME=supabase_admin + - DB_DATABASE=${POSTGRES_DB:-supabase} + - DB_HOSTNAME=${POSTGRES_HOST:-supabase-db} + - DB_PORT=${POSTGRES_PORT:-5432} + - DB_PASSWORD=${SERVICE_PASSWORD_POSTGRES} + - DB_SCHEMA=_analytics + - LOGFLARE_API_KEY=${SERVICE_PASSWORD_LOGFLARE} + - LOGFLARE_SINGLE_TENANT=true + - LOGFLARE_SUPABASE_MODE=true + + # Comment variables to use Big Query backend for analytics + - POSTGRES_BACKEND_URL=postgresql://supabase_admin:${SERVICE_PASSWORD_POSTGRES}@${POSTGRES_HOST:-supabase-db}:${POSTGRES_PORT:-5432}/${POSTGRES_DB:-supabase} + - POSTGRES_BACKEND_SCHEMA=_analytics + - LOGFLARE_FEATURE_FLAG_OVERRIDE=multibackend=true + + # Uncomment to use Big Query backend for analytics + # GOOGLE_PROJECT_ID=${GOOGLE_PROJECT_ID} + # GOOGLE_PROJECT_NUMBER=${GOOGLE_PROJECT_NUMBER} + supabase-vector: + image: timberio/vector:0.28.1-alpine + healthcheck: + test: + [ + "CMD", + "wget", + "--no-verbose", + "--tries=1", + "--spider", + "http://supabase-vector:9001/health", + ] + timeout: 5s + interval: 5s + retries: 3 + volumes: + - type: bind + source: ./volumes/logs/vector.yml + target: /etc/vector/vector.yml + read_only: true + content: | + api: + enabled: true + address: 0.0.0.0:9001 + + sources: + docker_host: + type: docker_logs + exclude_containers: + - supabase-vector + + transforms: + project_logs: + type: remap + inputs: + - docker_host + source: |- + .project = "default" + .event_message = del(.message) + .appname = del(.container_name) + del(.container_created_at) + del(.container_id) + del(.source_type) + del(.stream) + del(.label) + del(.image) + del(.host) + del(.stream) + router: + type: route + inputs: + - project_logs + route: + kong: '.appname == "supabase-kong"' + auth: '.appname == "supabase-auth"' + rest: '.appname == "supabase-rest"' + realtime: '.appname == "supabase-realtime"' + storage: '.appname == "supabase-storage"' + functions: '.appname == "supabase-functions"' + db: '.appname == "supabase-db"' + # Ignores non nginx errors since they are related with kong booting up + kong_logs: + type: remap + inputs: + - router.kong + source: |- + req, err = parse_nginx_log(.event_message, "combined") + if err == null { + .timestamp = req.timestamp + .metadata.request.headers.referer = req.referer + .metadata.request.headers.user_agent = req.agent + .metadata.request.headers.cf_connecting_ip = req.client + .metadata.request.method = req.method + .metadata.request.path = req.path + .metadata.request.protocol = req.protocol + .metadata.response.status_code = req.status + } + if err != null { + abort + } + # Ignores non nginx errors since they are related with kong booting up + kong_err: + type: remap + inputs: + - router.kong + source: |- + .metadata.request.method = "GET" + .metadata.response.status_code = 200 + parsed, err = parse_nginx_log(.event_message, "error") + if err == null { + .timestamp = parsed.timestamp + .severity = parsed.severity + .metadata.request.host = parsed.host + .metadata.request.headers.cf_connecting_ip = parsed.client + url, err = split(parsed.request, " ") + if err == null { + .metadata.request.method = url[0] + .metadata.request.path = url[1] + .metadata.request.protocol = url[2] + } + } + if err != null { + abort + } + # Gotrue logs are structured json strings which frontend parses directly. But we keep metadata for consistency. + auth_logs: + type: remap + inputs: + - router.auth + source: |- + parsed, err = parse_json(.event_message) + if err == null { + .metadata.timestamp = parsed.time + .metadata = merge!(.metadata, parsed) + } + # PostgREST logs are structured so we separate timestamp from message using regex + rest_logs: + type: remap + inputs: + - router.rest + source: |- + parsed, err = parse_regex(.event_message, r'^(?P