886 lines
35 KiB
YAML
886 lines
35 KiB
YAML
# ignore: true
|
|
# documentation:
|
|
# slogan:
|
|
# tags:
|
|
# logo:
|
|
|
|
services:
|
|
supabase-studio:
|
|
image: supabase/studio:20240205-b145c86
|
|
healthcheck:
|
|
test:
|
|
[
|
|
"CMD",
|
|
"node",
|
|
"-e",
|
|
"require('http').get('http://localhost:3000/api/profile', (r) => {if (r.statusCode !== 200) throw new Error(r.statusCode)})",
|
|
]
|
|
timeout: 5s
|
|
interval: 5s
|
|
retries: 3
|
|
depends_on:
|
|
supabase-analytics:
|
|
condition: service_healthy
|
|
environment:
|
|
- SERVICE_FQDN_SUPABASE
|
|
- HOSTNAME=0.0.0.0
|
|
- STUDIO_PG_META_URL=http://meta:8080
|
|
- POSTGRES_PASSWORD=${SERVICE_PASSWORD_POSTGRES}
|
|
|
|
- DEFAULT_ORGANIZATION_NAME=${STUDIO_DEFAULT_ORGANIZATION:-Default Organization}
|
|
- DEFAULT_PROJECT_NAME=${STUDIO_DEFAULT_PROJECT:-Default Project}
|
|
|
|
- SUPABASE_URL=http://kong:8000
|
|
- SUPABASE_PUBLIC_URL=${SERVICE_FQDN_SUPABASE}
|
|
- SUPABASE_ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.ewogICJyb2xlIjogImFub24iLAogICJpc3MiOiAic3VwYWJhc2UiLAogICJpYXQiOiAxNzA4OTg4NDAwLAogICJleHAiOiAxODY2ODQxMjAwCn0.jCDqsoXGT58JnAjf27KOowNQsokkk0aR7rdbGG18P-8
|
|
- SUPABASE_SERVICE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.ewogICJyb2xlIjogInNlcnZpY2Vfcm9sZSIsCiAgImlzcyI6ICJzdXBhYmFzZSIsCiAgImlhdCI6IDE3MDg5ODg0MDAsCiAgImV4cCI6IDE4NjY4NDEyMDAKfQ.GA7yF2BmqTzqGkP_oqDdJAQVt0djjIxGYuhE0zFDJV4
|
|
|
|
- LOGFLARE_API_KEY=${SERVICE_PASSWORD_LOGFLARE}
|
|
- LOGFLARE_URL=http://supabase-analytics:4000
|
|
- NEXT_PUBLIC_ENABLE_LOGS=true
|
|
# Comment to use Big Query backend for analytics
|
|
- NEXT_ANALYTICS_BACKEND_PROVIDER=postgres
|
|
# Uncomment to use Big Query backend for analytics
|
|
# NEXT_ANALYTICS_BACKEND_PROVIDER=bigquery
|
|
supabase-db:
|
|
image: supabase/postgres:15.1.0.147
|
|
healthcheck:
|
|
test: pg_isready -U postgres -h localhost
|
|
interval: 5s
|
|
timeout: 5s
|
|
retries: 10
|
|
depends_on:
|
|
supabase-vector:
|
|
condition: service_healthy
|
|
command:
|
|
- postgres
|
|
- -c
|
|
- config_file=/etc/postgresql/postgresql.conf
|
|
- -c
|
|
- log_min_messages=fatal
|
|
restart: unless-stopped
|
|
environment:
|
|
- POSTGRES_HOST=/var/run/postgresql
|
|
- PGPORT=${POSTGRES_PORT:-5432}
|
|
- POSTGRES_PORT=${POSTGRES_PORT:-5432}
|
|
- PGPASSWORD=${SERVICE_PASSWORD_POSTGRES}
|
|
- POSTGRES_PASSWORD=${SERVICE_PASSWORD_POSTGRES}
|
|
- POSTGRES_USER=${SERVICE_USER_POSTGRES}
|
|
- PGDATABASE=${POSTGRES_DB:-supabase}
|
|
- POSTGRES_DB=${POSTGRES_DB:-supabase}
|
|
- JWT_SECRET=oasfhtfwevsna8e7wo3mca0d8x5aw2btk8on0eh4
|
|
- JWT_EXP=${JWT_EXPIRY:-3600}
|
|
volumes:
|
|
- supabase-db-data:/var/lib/postgresql/data
|
|
- type: bind
|
|
source: ./volumes/db/realtime.sql
|
|
target: /docker-entrypoint-initdb.d/migrations/99-realtime.sql
|
|
content: |
|
|
\set pguser `echo "$SERVICE_USER_POSTGRES"`
|
|
|
|
create schema if not exists _realtime;
|
|
alter schema _realtime owner to :pguser;
|
|
- type: bind
|
|
source: ./volumes/db/webhooks.sql
|
|
target: /docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql
|
|
content: |
|
|
BEGIN;
|
|
-- Create pg_net extension
|
|
CREATE EXTENSION IF NOT EXISTS pg_net SCHEMA extensions;
|
|
-- Create supabase_functions schema
|
|
CREATE SCHEMA supabase_functions AUTHORIZATION supabase_admin;
|
|
GRANT USAGE ON SCHEMA supabase_functions TO postgres, anon, authenticated, service_role;
|
|
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON TABLES TO postgres, anon, authenticated, service_role;
|
|
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON FUNCTIONS TO postgres, anon, authenticated, service_role;
|
|
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON SEQUENCES TO postgres, anon, authenticated, service_role;
|
|
-- supabase_functions.migrations definition
|
|
CREATE TABLE supabase_functions.migrations (
|
|
version text PRIMARY KEY,
|
|
inserted_at timestamptz NOT NULL DEFAULT NOW()
|
|
);
|
|
-- Initial supabase_functions migration
|
|
INSERT INTO supabase_functions.migrations (version) VALUES ('initial');
|
|
-- supabase_functions.hooks definition
|
|
CREATE TABLE supabase_functions.hooks (
|
|
id bigserial PRIMARY KEY,
|
|
hook_table_id integer NOT NULL,
|
|
hook_name text NOT NULL,
|
|
created_at timestamptz NOT NULL DEFAULT NOW(),
|
|
request_id bigint
|
|
);
|
|
CREATE INDEX supabase_functions_hooks_request_id_idx ON supabase_functions.hooks USING btree (request_id);
|
|
CREATE INDEX supabase_functions_hooks_h_table_id_h_name_idx ON supabase_functions.hooks USING btree (hook_table_id, hook_name);
|
|
COMMENT ON TABLE supabase_functions.hooks IS 'Supabase Functions Hooks: Audit trail for triggered hooks.';
|
|
CREATE FUNCTION supabase_functions.http_request()
|
|
RETURNS trigger
|
|
LANGUAGE plpgsql
|
|
AS $function$
|
|
DECLARE
|
|
request_id bigint;
|
|
payload jsonb;
|
|
url text := TG_ARGV[0]::text;
|
|
method text := TG_ARGV[1]::text;
|
|
headers jsonb DEFAULT '{}'::jsonb;
|
|
params jsonb DEFAULT '{}'::jsonb;
|
|
timeout_ms integer DEFAULT 1000;
|
|
BEGIN
|
|
IF url IS NULL OR url = 'null' THEN
|
|
RAISE EXCEPTION 'url argument is missing';
|
|
END IF;
|
|
|
|
IF method IS NULL OR method = 'null' THEN
|
|
RAISE EXCEPTION 'method argument is missing';
|
|
END IF;
|
|
|
|
IF TG_ARGV[2] IS NULL OR TG_ARGV[2] = 'null' THEN
|
|
headers = '{"Content-Type": "application/json"}'::jsonb;
|
|
ELSE
|
|
headers = TG_ARGV[2]::jsonb;
|
|
END IF;
|
|
|
|
IF TG_ARGV[3] IS NULL OR TG_ARGV[3] = 'null' THEN
|
|
params = '{}'::jsonb;
|
|
ELSE
|
|
params = TG_ARGV[3]::jsonb;
|
|
END IF;
|
|
|
|
IF TG_ARGV[4] IS NULL OR TG_ARGV[4] = 'null' THEN
|
|
timeout_ms = 1000;
|
|
ELSE
|
|
timeout_ms = TG_ARGV[4]::integer;
|
|
END IF;
|
|
|
|
CASE
|
|
WHEN method = 'GET' THEN
|
|
SELECT http_get INTO request_id FROM net.http_get(
|
|
url,
|
|
params,
|
|
headers,
|
|
timeout_ms
|
|
);
|
|
WHEN method = 'POST' THEN
|
|
payload = jsonb_build_object(
|
|
'old_record', OLD,
|
|
'record', NEW,
|
|
'type', TG_OP,
|
|
'table', TG_TABLE_NAME,
|
|
'schema', TG_TABLE_SCHEMA
|
|
);
|
|
|
|
SELECT http_post INTO request_id FROM net.http_post(
|
|
url,
|
|
payload,
|
|
params,
|
|
headers,
|
|
timeout_ms
|
|
);
|
|
ELSE
|
|
RAISE EXCEPTION 'method argument % is invalid', method;
|
|
END CASE;
|
|
|
|
INSERT INTO supabase_functions.hooks
|
|
(hook_table_id, hook_name, request_id)
|
|
VALUES
|
|
(TG_RELID, TG_NAME, request_id);
|
|
|
|
RETURN NEW;
|
|
END
|
|
$function$;
|
|
-- Supabase super admin
|
|
DO
|
|
$$
|
|
BEGIN
|
|
IF NOT EXISTS (
|
|
SELECT 1
|
|
FROM pg_roles
|
|
WHERE rolname = 'supabase_functions_admin'
|
|
)
|
|
THEN
|
|
CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION;
|
|
END IF;
|
|
END
|
|
$$;
|
|
GRANT ALL PRIVILEGES ON SCHEMA supabase_functions TO supabase_functions_admin;
|
|
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA supabase_functions TO supabase_functions_admin;
|
|
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA supabase_functions TO supabase_functions_admin;
|
|
ALTER USER supabase_functions_admin SET search_path = "supabase_functions";
|
|
ALTER table "supabase_functions".migrations OWNER TO supabase_functions_admin;
|
|
ALTER table "supabase_functions".hooks OWNER TO supabase_functions_admin;
|
|
ALTER function "supabase_functions".http_request() OWNER TO supabase_functions_admin;
|
|
GRANT supabase_functions_admin TO postgres;
|
|
-- Remove unused supabase_pg_net_admin role
|
|
DO
|
|
$$
|
|
BEGIN
|
|
IF EXISTS (
|
|
SELECT 1
|
|
FROM pg_roles
|
|
WHERE rolname = 'supabase_pg_net_admin'
|
|
)
|
|
THEN
|
|
REASSIGN OWNED BY supabase_pg_net_admin TO supabase_admin;
|
|
DROP OWNED BY supabase_pg_net_admin;
|
|
DROP ROLE supabase_pg_net_admin;
|
|
END IF;
|
|
END
|
|
$$;
|
|
-- pg_net grants when extension is already enabled
|
|
DO
|
|
$$
|
|
BEGIN
|
|
IF EXISTS (
|
|
SELECT 1
|
|
FROM pg_extension
|
|
WHERE extname = 'pg_net'
|
|
)
|
|
THEN
|
|
GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
|
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
|
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
|
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
|
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
|
REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
|
REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
|
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
|
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
|
END IF;
|
|
END
|
|
$$;
|
|
-- Event trigger for pg_net
|
|
CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access()
|
|
RETURNS event_trigger
|
|
LANGUAGE plpgsql
|
|
AS $$
|
|
BEGIN
|
|
IF EXISTS (
|
|
SELECT 1
|
|
FROM pg_event_trigger_ddl_commands() AS ev
|
|
JOIN pg_extension AS ext
|
|
ON ev.objid = ext.oid
|
|
WHERE ext.extname = 'pg_net'
|
|
)
|
|
THEN
|
|
GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
|
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
|
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
|
|
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
|
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
|
|
REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
|
REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
|
|
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
|
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
|
|
END IF;
|
|
END;
|
|
$$;
|
|
COMMENT ON FUNCTION extensions.grant_pg_net_access IS 'Grants access to pg_net';
|
|
DO
|
|
$$
|
|
BEGIN
|
|
IF NOT EXISTS (
|
|
SELECT 1
|
|
FROM pg_event_trigger
|
|
WHERE evtname = 'issue_pg_net_access'
|
|
) THEN
|
|
CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end WHEN TAG IN ('CREATE EXTENSION')
|
|
EXECUTE PROCEDURE extensions.grant_pg_net_access();
|
|
END IF;
|
|
END
|
|
$$;
|
|
INSERT INTO supabase_functions.migrations (version) VALUES ('20210809183423_update_grants');
|
|
ALTER function supabase_functions.http_request() SECURITY DEFINER;
|
|
ALTER function supabase_functions.http_request() SET search_path = supabase_functions;
|
|
REVOKE ALL ON FUNCTION supabase_functions.http_request() FROM PUBLIC;
|
|
GRANT EXECUTE ON FUNCTION supabase_functions.http_request() TO postgres, anon, authenticated, service_role;
|
|
COMMIT;
|
|
- type: bind
|
|
source: ./volumes/db/roles.sql
|
|
target: /docker-entrypoint-initdb.d/init-scripts/99-roles.sql
|
|
content: |
|
|
-- NOTE: change to your own passwords for production environments
|
|
\set pgpass `echo "$POSTGRES_PASSWORD"`
|
|
|
|
ALTER USER authenticator WITH PASSWORD :'pgpass';
|
|
ALTER USER pgbouncer WITH PASSWORD :'pgpass';
|
|
ALTER USER supabase_auth_admin WITH PASSWORD :'pgpass';
|
|
ALTER USER supabase_functions_admin WITH PASSWORD :'pgpass';
|
|
ALTER USER supabase_storage_admin WITH PASSWORD :'pgpass';
|
|
- type: bind
|
|
source: ./volumes/db/jwt.sql
|
|
target: /docker-entrypoint-initdb.d/init-scripts/99-jwt.sql
|
|
content: |
|
|
\set jwt_secret `echo "$JWT_SECRET"`
|
|
\set jwt_exp `echo "$JWT_EXP"`
|
|
|
|
ALTER DATABASE postgres SET "app.settings.jwt_secret" TO :'jwt_secret';
|
|
ALTER DATABASE postgres SET "app.settings.jwt_exp" TO :'jwt_exp';
|
|
|
|
- type: bind
|
|
source: ./volumes/db/logs.sql
|
|
target: /docker-entrypoint-initdb.d/migrations/99-logs.sql
|
|
content: |
|
|
\set pguser `echo "$SERVICE_USER_POSTGRES"`
|
|
|
|
create schema if not exists _analytics;
|
|
alter schema _analytics owner to :pguser;
|
|
supabase-analytics:
|
|
image: supabase/logflare:1.4.0
|
|
healthcheck:
|
|
test: ["CMD", "curl", "http://localhost:4000/health"]
|
|
timeout: 5s
|
|
interval: 5s
|
|
retries: 10
|
|
restart: unless-stopped
|
|
depends_on:
|
|
supabase-db:
|
|
condition: service_healthy
|
|
# Uncomment to use Big Query backend for analytics
|
|
# volumes:
|
|
# - type: bind
|
|
# source: ${PWD}/gcloud.json
|
|
# target: /opt/app/rel/logflare/bin/gcloud.json
|
|
# read_only: true
|
|
environment:
|
|
- LOGFLARE_NODE_HOST=127.0.0.1
|
|
- DB_USERNAME=supabase_admin
|
|
- DB_DATABASE=${POSTGRES_DB:-supabase}
|
|
- DB_HOSTNAME=${POSTGRES_HOST:-supabase-db}
|
|
- DB_PORT=${POSTGRES_PORT:-5432}
|
|
- DB_PASSWORD=${SERVICE_PASSWORD_POSTGRES}
|
|
- DB_SCHEMA=_analytics
|
|
- LOGFLARE_API_KEY=${SERVICE_PASSWORD_LOGFLARE}
|
|
- LOGFLARE_SINGLE_TENANT=true
|
|
- LOGFLARE_SUPABASE_MODE=true
|
|
|
|
# Comment variables to use Big Query backend for analytics
|
|
- POSTGRES_BACKEND_URL=postgresql://supabase_admin:${SERVICE_PASSWORD_POSTGRES}@${POSTGRES_HOST:-supabase-db}:${POSTGRES_PORT:-5432}/${POSTGRES_DB:-supabase}
|
|
- POSTGRES_BACKEND_SCHEMA=_analytics
|
|
- LOGFLARE_FEATURE_FLAG_OVERRIDE=multibackend=true
|
|
|
|
# Uncomment to use Big Query backend for analytics
|
|
# GOOGLE_PROJECT_ID=${GOOGLE_PROJECT_ID}
|
|
# GOOGLE_PROJECT_NUMBER=${GOOGLE_PROJECT_NUMBER}
|
|
supabase-vector:
|
|
image: timberio/vector:0.28.1-alpine
|
|
healthcheck:
|
|
test:
|
|
[
|
|
"CMD",
|
|
"wget",
|
|
"--no-verbose",
|
|
"--tries=1",
|
|
"--spider",
|
|
"http://supabase-vector:9001/health",
|
|
]
|
|
timeout: 5s
|
|
interval: 5s
|
|
retries: 3
|
|
volumes:
|
|
- type: bind
|
|
source: ./volumes/logs/vector.yml
|
|
target: /etc/vector/vector.yml
|
|
read_only: true
|
|
content: |
|
|
api:
|
|
enabled: true
|
|
address: 0.0.0.0:9001
|
|
|
|
sources:
|
|
docker_host:
|
|
type: docker_logs
|
|
exclude_containers:
|
|
- supabase-vector
|
|
|
|
transforms:
|
|
project_logs:
|
|
type: remap
|
|
inputs:
|
|
- docker_host
|
|
source: |-
|
|
.project = "default"
|
|
.event_message = del(.message)
|
|
.appname = del(.container_name)
|
|
del(.container_created_at)
|
|
del(.container_id)
|
|
del(.source_type)
|
|
del(.stream)
|
|
del(.label)
|
|
del(.image)
|
|
del(.host)
|
|
del(.stream)
|
|
router:
|
|
type: route
|
|
inputs:
|
|
- project_logs
|
|
route:
|
|
kong: '.appname == "supabase-kong"'
|
|
auth: '.appname == "supabase-auth"'
|
|
rest: '.appname == "supabase-rest"'
|
|
realtime: '.appname == "supabase-realtime"'
|
|
storage: '.appname == "supabase-storage"'
|
|
functions: '.appname == "supabase-functions"'
|
|
db: '.appname == "supabase-db"'
|
|
# Ignores non nginx errors since they are related with kong booting up
|
|
kong_logs:
|
|
type: remap
|
|
inputs:
|
|
- router.kong
|
|
source: |-
|
|
req, err = parse_nginx_log(.event_message, "combined")
|
|
if err == null {
|
|
.timestamp = req.timestamp
|
|
.metadata.request.headers.referer = req.referer
|
|
.metadata.request.headers.user_agent = req.agent
|
|
.metadata.request.headers.cf_connecting_ip = req.client
|
|
.metadata.request.method = req.method
|
|
.metadata.request.path = req.path
|
|
.metadata.request.protocol = req.protocol
|
|
.metadata.response.status_code = req.status
|
|
}
|
|
if err != null {
|
|
abort
|
|
}
|
|
# Ignores non nginx errors since they are related with kong booting up
|
|
kong_err:
|
|
type: remap
|
|
inputs:
|
|
- router.kong
|
|
source: |-
|
|
.metadata.request.method = "GET"
|
|
.metadata.response.status_code = 200
|
|
parsed, err = parse_nginx_log(.event_message, "error")
|
|
if err == null {
|
|
.timestamp = parsed.timestamp
|
|
.severity = parsed.severity
|
|
.metadata.request.host = parsed.host
|
|
.metadata.request.headers.cf_connecting_ip = parsed.client
|
|
url, err = split(parsed.request, " ")
|
|
if err == null {
|
|
.metadata.request.method = url[0]
|
|
.metadata.request.path = url[1]
|
|
.metadata.request.protocol = url[2]
|
|
}
|
|
}
|
|
if err != null {
|
|
abort
|
|
}
|
|
# Gotrue logs are structured json strings which frontend parses directly. But we keep metadata for consistency.
|
|
auth_logs:
|
|
type: remap
|
|
inputs:
|
|
- router.auth
|
|
source: |-
|
|
parsed, err = parse_json(.event_message)
|
|
if err == null {
|
|
.metadata.timestamp = parsed.time
|
|
.metadata = merge!(.metadata, parsed)
|
|
}
|
|
# PostgREST logs are structured so we separate timestamp from message using regex
|
|
rest_logs:
|
|
type: remap
|
|
inputs:
|
|
- router.rest
|
|
source: |-
|
|
parsed, err = parse_regex(.event_message, r'^(?P<time>.*): (?P<msg>.*)$')
|
|
if err == null {
|
|
.event_message = parsed.msg
|
|
.timestamp = to_timestamp!(parsed.time)
|
|
.metadata.host = .project
|
|
}
|
|
# Realtime logs are structured so we parse the severity level using regex (ignore time because it has no date)
|
|
realtime_logs:
|
|
type: remap
|
|
inputs:
|
|
- router.realtime
|
|
source: |-
|
|
.metadata.project = del(.project)
|
|
.metadata.external_id = .metadata.project
|
|
parsed, err = parse_regex(.event_message, r'^(?P<time>\d+:\d+:\d+\.\d+) \[(?P<level>\w+)\] (?P<msg>.*)$')
|
|
if err == null {
|
|
.event_message = parsed.msg
|
|
.metadata.level = parsed.level
|
|
}
|
|
# Storage logs may contain json objects so we parse them for completeness
|
|
storage_logs:
|
|
type: remap
|
|
inputs:
|
|
- router.storage
|
|
source: |-
|
|
.metadata.project = del(.project)
|
|
.metadata.tenantId = .metadata.project
|
|
parsed, err = parse_json(.event_message)
|
|
if err == null {
|
|
.event_message = parsed.msg
|
|
.metadata.level = parsed.level
|
|
.metadata.timestamp = parsed.time
|
|
.metadata.context[0].host = parsed.hostname
|
|
.metadata.context[0].pid = parsed.pid
|
|
}
|
|
# Postgres logs some messages to stderr which we map to warning severity level
|
|
db_logs:
|
|
type: remap
|
|
inputs:
|
|
- router.db
|
|
source: |-
|
|
.metadata.host = "db-default"
|
|
.metadata.parsed.timestamp = .timestamp
|
|
|
|
parsed, err = parse_regex(.event_message, r'.*(?P<level>INFO|NOTICE|WARNING|ERROR|LOG|FATAL|PANIC?):.*', numeric_groups: true)
|
|
|
|
if err != null || parsed == null {
|
|
.metadata.parsed.error_severity = "info"
|
|
}
|
|
if parsed != null {
|
|
.metadata.parsed.error_severity = parsed.level
|
|
}
|
|
if .metadata.parsed.error_severity == "info" {
|
|
.metadata.parsed.error_severity = "log"
|
|
}
|
|
.metadata.parsed.error_severity = upcase!(.metadata.parsed.error_severity)
|
|
|
|
sinks:
|
|
logflare_auth:
|
|
type: 'http'
|
|
inputs:
|
|
- auth_logs
|
|
encoding:
|
|
codec: 'json'
|
|
method: 'post'
|
|
request:
|
|
retry_max_duration_secs: 10
|
|
uri: 'http://supabase-analytics:4000/api/logs?source_name=gotrue.logs.prod&api_key=${LOGFLARE_API_KEY}'
|
|
logflare_realtime:
|
|
type: 'http'
|
|
inputs:
|
|
- realtime_logs
|
|
encoding:
|
|
codec: 'json'
|
|
method: 'post'
|
|
request:
|
|
retry_max_duration_secs: 10
|
|
uri: 'http://supabase-analytics:4000/api/logs?source_name=realtime.logs.prod&api_key=${LOGFLARE_API_KEY}'
|
|
logflare_rest:
|
|
type: 'http'
|
|
inputs:
|
|
- rest_logs
|
|
encoding:
|
|
codec: 'json'
|
|
method: 'post'
|
|
request:
|
|
retry_max_duration_secs: 10
|
|
uri: 'http://supabase-analytics:4000/api/logs?source_name=postgREST.logs.prod&api_key=${LOGFLARE_API_KEY}'
|
|
logflare_db:
|
|
type: 'http'
|
|
inputs:
|
|
- db_logs
|
|
encoding:
|
|
codec: 'json'
|
|
method: 'post'
|
|
request:
|
|
retry_max_duration_secs: 10
|
|
# We must route the sink through kong because ingesting logs before logflare is fully initialised will
|
|
# lead to broken queries from studio. This works by the assumption that containers are started in the
|
|
# following order: vector > db > logflare > kong
|
|
uri: 'http://kong:8000/analytics/v1/api/logs?source_name=postgres.logs&api_key=${LOGFLARE_API_KEY}'
|
|
logflare_functions:
|
|
type: 'http'
|
|
inputs:
|
|
- router.functions
|
|
encoding:
|
|
codec: 'json'
|
|
method: 'post'
|
|
request:
|
|
retry_max_duration_secs: 10
|
|
uri: 'http://supabase-analytics:4000/api/logs?source_name=deno-relay-logs&api_key=${LOGFLARE_API_KEY}'
|
|
logflare_storage:
|
|
type: 'http'
|
|
inputs:
|
|
- storage_logs
|
|
encoding:
|
|
codec: 'json'
|
|
method: 'post'
|
|
request:
|
|
retry_max_duration_secs: 10
|
|
uri: 'http://supabase-analytics:4000/api/logs?source_name=storage.logs.prod.2&api_key=${LOGFLARE_API_KEY}'
|
|
logflare_kong:
|
|
type: 'http'
|
|
inputs:
|
|
- kong_logs
|
|
- kong_err
|
|
encoding:
|
|
codec: 'json'
|
|
method: 'post'
|
|
request:
|
|
retry_max_duration_secs: 10
|
|
uri: 'http://supabase-analytics:4000/api/logs?source_name=cloudflare.logs.prod&api_key=${LOGFLARE_API_KEY}'
|
|
|
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
|
environment:
|
|
- LOGFLARE_API_KEY=${SERVICE_PASSWORD_LOGFLARE}
|
|
command: ["--config", "etc/vector/vector.yml"]
|
|
supabase-kong:
|
|
image: kong:2.8.1
|
|
# https://unix.stackexchange.com/a/294837
|
|
entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start'
|
|
depends_on:
|
|
supabase-analytics:
|
|
condition: service_healthy
|
|
environment:
|
|
- KONG_DATABASE="off"
|
|
- KONG_DECLARATIVE_CONFIG=/home/kong/kong.yml
|
|
# https://github.com/supabase/cli/issues/14
|
|
- KONG_DNS_ORDER=LAST,A,CNAME
|
|
- KONG_PLUGINS=request-transformer,cors,key-auth,acl,basic-auth
|
|
- KONG_NGINX_PROXY_PROXY_BUFFER_SIZE=160k
|
|
- KONG_NGINX_PROXY_PROXY_BUFFERS=64 160k
|
|
- SUPABASE_ANON_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.ewogICJyb2xlIjogImFub24iLAogICJpc3MiOiAic3VwYWJhc2UiLAogICJpYXQiOiAxNzA4OTg4NDAwLAogICJleHAiOiAxODY2ODQxMjAwCn0.jCDqsoXGT58JnAjf27KOowNQsokkk0aR7rdbGG18P-8
|
|
- SUPABASE_SERVICE_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.ewogICJyb2xlIjogInNlcnZpY2Vfcm9sZSIsCiAgImlzcyI6ICJzdXBhYmFzZSIsCiAgImlhdCI6IDE3MDg5ODg0MDAsCiAgImV4cCI6IDE4NjY4NDEyMDAKfQ.GA7yF2BmqTzqGkP_oqDdJAQVt0djjIxGYuhE0zFDJV4
|
|
- DASHBOARD_USERNAME=admin
|
|
- DASHBOARD_PASSWORD=admin
|
|
volumes:
|
|
# https://github.com/supabase/supabase/issues/12661
|
|
- type: bind
|
|
source: ./volumes/api/kong.yml
|
|
target: /home/kong/temp.yml
|
|
content: |
|
|
_format_version: '2.1'
|
|
_transform: true
|
|
|
|
###
|
|
### Consumers / Users
|
|
###
|
|
consumers:
|
|
- username: DASHBOARD
|
|
- username: anon
|
|
keyauth_credentials:
|
|
- key: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.ewogICJyb2xlIjogImFub24iLAogICJpc3MiOiAic3VwYWJhc2UiLAogICJpYXQiOiAxNzA4OTg4NDAwLAogICJleHAiOiAxODY2ODQxMjAwCn0.jCDqsoXGT58JnAjf27KOowNQsokkk0aR7rdbGG18P-8
|
|
- username: service_role
|
|
keyauth_credentials:
|
|
- key: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.ewogICJyb2xlIjogInNlcnZpY2Vfcm9sZSIsCiAgImlzcyI6ICJzdXBhYmFzZSIsCiAgImlhdCI6IDE3MDg5ODg0MDAsCiAgImV4cCI6IDE4NjY4NDEyMDAKfQ.GA7yF2BmqTzqGkP_oqDdJAQVt0djjIxGYuhE0zFDJV4
|
|
|
|
###
|
|
### Access Control List
|
|
###
|
|
acls:
|
|
- consumer: anon
|
|
group: anon
|
|
- consumer: service_role
|
|
group: admin
|
|
|
|
###
|
|
### Dashboard credentials
|
|
###
|
|
basicauth_credentials:
|
|
- consumer: DASHBOARD
|
|
username: admin
|
|
password: admin
|
|
|
|
|
|
###
|
|
### API Routes
|
|
###
|
|
services:
|
|
|
|
## Open Auth routes
|
|
- name: auth-v1-open
|
|
url: http://auth:9999/verify
|
|
routes:
|
|
- name: auth-v1-open
|
|
strip_path: true
|
|
paths:
|
|
- /auth/v1/verify
|
|
plugins:
|
|
- name: cors
|
|
- name: auth-v1-open-callback
|
|
url: http://auth:9999/callback
|
|
routes:
|
|
- name: auth-v1-open-callback
|
|
strip_path: true
|
|
paths:
|
|
- /auth/v1/callback
|
|
plugins:
|
|
- name: cors
|
|
- name: auth-v1-open-authorize
|
|
url: http://auth:9999/authorize
|
|
routes:
|
|
- name: auth-v1-open-authorize
|
|
strip_path: true
|
|
paths:
|
|
- /auth/v1/authorize
|
|
plugins:
|
|
- name: cors
|
|
|
|
## Secure Auth routes
|
|
- name: auth-v1
|
|
_comment: 'GoTrue: /auth/v1/* -> http://auth:9999/*'
|
|
url: http://auth:9999/
|
|
routes:
|
|
- name: auth-v1-all
|
|
strip_path: true
|
|
paths:
|
|
- /auth/v1/
|
|
plugins:
|
|
- name: cors
|
|
- name: key-auth
|
|
config:
|
|
hide_credentials: false
|
|
- name: acl
|
|
config:
|
|
hide_groups_header: true
|
|
allow:
|
|
- admin
|
|
- anon
|
|
|
|
## Secure REST routes
|
|
- name: rest-v1
|
|
_comment: 'PostgREST: /rest/v1/* -> http://supabase-rest:3000/*'
|
|
url: http://supabase-rest:3000/
|
|
routes:
|
|
- name: rest-v1-all
|
|
strip_path: true
|
|
paths:
|
|
- /rest/v1/
|
|
plugins:
|
|
- name: cors
|
|
- name: key-auth
|
|
config:
|
|
hide_credentials: true
|
|
- name: acl
|
|
config:
|
|
hide_groups_header: true
|
|
allow:
|
|
- admin
|
|
- anon
|
|
|
|
## Secure GraphQL routes
|
|
- name: graphql-v1
|
|
_comment: 'PostgREST: /graphql/v1/* -> http://supabase-rest:3000/rpc/graphql'
|
|
url: http://supabase-rest:3000/rpc/graphql
|
|
routes:
|
|
- name: graphql-v1-all
|
|
strip_path: true
|
|
paths:
|
|
- /graphql/v1
|
|
plugins:
|
|
- name: cors
|
|
- name: key-auth
|
|
config:
|
|
hide_credentials: true
|
|
- name: request-transformer
|
|
config:
|
|
add:
|
|
headers:
|
|
- Content-Profile:graphql_public
|
|
- name: acl
|
|
config:
|
|
hide_groups_header: true
|
|
allow:
|
|
- admin
|
|
- anon
|
|
|
|
## Secure Realtime routes
|
|
- name: realtime-v1
|
|
_comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*'
|
|
url: http://realtime-dev.supabase-realtime:4000/socket/
|
|
routes:
|
|
- name: realtime-v1-all
|
|
strip_path: true
|
|
paths:
|
|
- /realtime/v1/
|
|
plugins:
|
|
- name: cors
|
|
- name: key-auth
|
|
config:
|
|
hide_credentials: false
|
|
- name: acl
|
|
config:
|
|
hide_groups_header: true
|
|
allow:
|
|
- admin
|
|
- anon
|
|
|
|
## Storage routes: the storage server manages its own auth
|
|
- name: storage-v1
|
|
_comment: 'Storage: /storage/v1/* -> http://storage:5000/*'
|
|
url: http://storage:5000/
|
|
routes:
|
|
- name: storage-v1-all
|
|
strip_path: true
|
|
paths:
|
|
- /storage/v1/
|
|
plugins:
|
|
- name: cors
|
|
|
|
## Edge Functions routes
|
|
- name: functions-v1
|
|
_comment: 'Edge Functions: /functions/v1/* -> http://functions:9000/*'
|
|
url: http://functions:9000/
|
|
routes:
|
|
- name: functions-v1-all
|
|
strip_path: true
|
|
paths:
|
|
- /functions/v1/
|
|
plugins:
|
|
- name: cors
|
|
|
|
## Analytics routes
|
|
- name: analytics-v1
|
|
_comment: 'Analytics: /analytics/v1/* -> http://logflare:4000/*'
|
|
url: http://supabase-analytics:4000/
|
|
routes:
|
|
- name: analytics-v1-all
|
|
strip_path: true
|
|
paths:
|
|
- /analytics/v1/
|
|
|
|
## Secure Database routes
|
|
- name: meta
|
|
_comment: 'pg-meta: /pg/* -> http://pg-meta:8080/*'
|
|
url: http://meta:8080/
|
|
routes:
|
|
- name: meta-all
|
|
strip_path: true
|
|
paths:
|
|
- /pg/
|
|
plugins:
|
|
- name: key-auth
|
|
config:
|
|
hide_credentials: false
|
|
- name: acl
|
|
config:
|
|
hide_groups_header: true
|
|
allow:
|
|
- admin
|
|
|
|
## Protected Dashboard - catch all remaining routes
|
|
- name: dashboard
|
|
_comment: 'Studio: /* -> http://studio:3000/*'
|
|
url: http://supabase-studio:3000/
|
|
routes:
|
|
- name: dashboard-all
|
|
strip_path: true
|
|
paths:
|
|
- /
|
|
plugins:
|
|
- name: cors
|
|
- name: basic-auth
|
|
config:
|
|
hide_credentials: true
|
|
supabase-rest:
|
|
image: postgrest/postgrest:v12.0.1
|
|
depends_on:
|
|
supabase-db:
|
|
# Disable this if you are using an external Postgres database
|
|
condition: service_healthy
|
|
supabase-analytics:
|
|
condition: service_healthy
|
|
restart: unless-stopped
|
|
environment:
|
|
- PGRST_DB_URI=postgres://authenticator:${SERVICE_PASSWORD_POSTGRES}@${POSTGRES_HOST:-supabase-db}:${POSTGRES_PORT:-5432}/${POSTGRES_DB:-supabase}
|
|
- PGRST_DB_SCHEMAS=${PGRST_DB_SCHEMAS:-public}
|
|
- PGRST_DB_ANON_ROLE=anon
|
|
- PGRST_JWT_SECRET=oasfhtfwevsna8e7wo3mca0d8x5aw2btk8on0eh4
|
|
- PGRST_DB_USE_LEGACY_GUCS="false"
|
|
- PGRST_APP_SETTINGS_JWT_SECRET=oasfhtfwevsna8e7wo3mca0d8x5aw2btk8on0eh4
|
|
- PGRST_APP_SETTINGS_JWT_EXP=${JWT_EXPIRY:-3600}
|
|
command: "postgrest"
|