name: supabase services: studio: container_name: supabase-studio image: supabase/studio:2025.10.01-sha-8460121 restart: unless-stopped healthcheck: test: [ 'CMD', 'node', '-e', "fetch('http://studio:3000/api/platform/profile').then((r) => {if (r.status !== 200) throw new Error(r.status)})", ] timeout: 10s interval: 5s retries: 3 depends_on: analytics: condition: service_healthy environment: # Binds nestjs listener to both IPv4 and IPv6 network interfaces HOSTNAME: '::' STUDIO_PG_META_URL: http://meta:8080 POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} PG_META_CRYPTO_KEY: ${PG_META_CRYPTO_KEY} DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION} DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT} OPENAI_API_KEY: ${OPENAI_API_KEY:-} SUPABASE_URL: http://kong:8000 SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL} SUPABASE_ANON_KEY: ${ANON_KEY} SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY} AUTH_JWT_SECRET: ${JWT_SECRET} LOGFLARE_PRIVATE_ACCESS_TOKEN: ${LOGFLARE_PRIVATE_ACCESS_TOKEN} LOGFLARE_URL: http://analytics:4000 NEXT_PUBLIC_ENABLE_LOGS: true # Comment to use Big Query backend for analytics NEXT_ANALYTICS_BACKEND_PROVIDER: postgres # Uncomment to use Big Query backend for analytics # NEXT_ANALYTICS_BACKEND_PROVIDER: bigquery kong: container_name: supabase-kong image: kong:2.8.1 restart: unless-stopped ports: - ${KONG_HTTP_PORT}:8000/tcp - ${KONG_HTTPS_PORT}:8443/tcp volumes: # https://github.com/supabase/supabase/issues/12661 - ./volumes/api/kong.yml:/home/kong/temp.yml:ro,z depends_on: analytics: condition: service_healthy environment: KONG_DATABASE: 'off' KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml # https://github.com/supabase/cli/issues/14 KONG_DNS_ORDER: LAST,A,CNAME KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k SUPABASE_ANON_KEY: ${ANON_KEY} SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY} DASHBOARD_USERNAME: ${DASHBOARD_USERNAME} DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD} # https://unix.stackexchange.com/a/294837 entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start' auth: container_name: supabase-auth image: supabase/gotrue:v2.180.0 restart: unless-stopped healthcheck: test: [ 'CMD', 'wget', '--no-verbose', '--tries=1', '--spider', 'http://localhost:9999/health', ] timeout: 5s interval: 5s retries: 3 depends_on: db: # Disable this if you are using an external Postgres database condition: service_healthy analytics: condition: service_healthy environment: GOTRUE_API_HOST: 0.0.0.0 GOTRUE_API_PORT: 9999 API_EXTERNAL_URL: ${API_EXTERNAL_URL} GOTRUE_DB_DRIVER: postgres GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} GOTRUE_SITE_URL: ${SITE_URL} GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS} GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP} GOTRUE_JWT_ADMIN_ROLES: service_role GOTRUE_JWT_AUD: authenticated GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated GOTRUE_JWT_EXP: ${JWT_EXPIRY} GOTRUE_JWT_SECRET: ${JWT_SECRET} GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP} GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED: ${ENABLE_ANONYMOUS_USERS} GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM} GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: ${ENABLE_SECURE_EMAIL_CHANGE_ENABLED} # Uncomment to bypass nonce check in ID Token flow. Commonly set to true when using Google Sign In on mobile. # GOTRUE_EXTERNAL_SKIP_NONCE_CHECK: true # GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true # GOTRUE_SMTP_MAX_FREQUENCY: 1s GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL} GOTRUE_SMTP_HOST: ${SMTP_HOST} GOTRUE_SMTP_PORT: ${SMTP_PORT} GOTRUE_SMTP_USER: ${SMTP_USER} GOTRUE_SMTP_PASS: ${SMTP_PASS} GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME} GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE} GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION} GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY} GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE} GOTRUE_MAILER_TEMPLATES_INVITE: '${MAILER_TEMPLATES_INVITE}' GOTRUE_MAILER_TEMPLATES_CONFIRMATION: '${MAILER_TEMPLATES_CONFIRMATION}' GOTRUE_MAILER_TEMPLATES_RECOVERY: '${MAILER_TEMPLATES_RECOVERY}' GOTRUE_MAILER_TEMPLATES_MAGIC_LINK: '${MAILER_TEMPLATES_MAGIC_LINK}' GOTRUE_MAILER_TEMPLATES_EMAIL_CHANGE: '${MAILER_TEMPLATES_EMAIL_CHANGE}' GOTRUE_MAILER_SUBJECTS_CONFIRMATION: '${MAILER_SUBJECTS_CONFIRMATION}' GOTRUE_MAILER_SUBJECTS_RECOVERY: '${MAILER_SUBJECTS_RECOVERY}' GOTRUE_MAILER_SUBJECTS_MAGIC_LINK: '${MAILER_SUBJECTS_MAGIC_LINK}' GOTRUE_MAILER_SUBJECTS_EMAIL_CHANGE: '${MAILER_SUBJECTS_EMAIL_CHANGE}' GOTRUE_MAILER_SUBJECTS_INVITE: '${MAILER_SUBJECTS_INVITE}' GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP} GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM} GOTRUE_SMS_OTP_LENGTH: ${SMS_OTP_LENGTH} # Uncomment to enable custom access token hook. Please see: https://supabase.com/docs/guides/auth/auth-hooks for full list of hooks and additional details about custom_access_token_hook # GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_ENABLED: "true" # GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_URI: "pg-functions://postgres/public/custom_access_token_hook" # GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_SECRETS: "" # GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_ENABLED: "true" # GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/mfa_verification_attempt" # GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_ENABLED: "true" # GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_URI: "pg-functions://postgres/public/password_verification_attempt" # GOTRUE_HOOK_SEND_SMS_ENABLED: "false" # GOTRUE_HOOK_SEND_SMS_URI: "pg-functions://postgres/public/custom_access_token_hook" # GOTRUE_HOOK_SEND_SMS_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n" # GOTRUE_HOOK_SEND_EMAIL_ENABLED: "false" # GOTRUE_HOOK_SEND_EMAIL_URI: "http://host.docker.internal:54321/functions/v1/email_sender" # GOTRUE_HOOK_SEND_EMAIL_SECRETS: "v1,whsec_VGhpcyBpcyBhbiBleGFtcGxlIG9mIGEgc2hvcnRlciBCYXNlNjQgc3RyaW5n" rest: container_name: supabase-rest image: postgrest/postgrest:v13.0.7 restart: unless-stopped depends_on: db: # Disable this if you are using an external Postgres database condition: service_healthy analytics: condition: service_healthy environment: PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS} PGRST_DB_ANON_ROLE: anon PGRST_JWT_SECRET: ${JWT_SECRET} PGRST_DB_USE_LEGACY_GUCS: 'false' PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET} PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY} command: ['postgrest'] realtime: # This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain container_name: realtime-dev.supabase-realtime image: supabase/realtime:v2.51.11 restart: unless-stopped depends_on: db: # Disable this if you are using an external Postgres database condition: service_healthy analytics: condition: service_healthy healthcheck: test: [ 'CMD', 'curl', '-sSfL', '--head', '-o', '/dev/null', '-H', 'Authorization: Bearer ${ANON_KEY}', 'http://localhost:4000/api/tenants/realtime-dev/health', ] timeout: 5s interval: 5s retries: 3 environment: PORT: 4000 DB_HOST: ${POSTGRES_HOST} DB_PORT: ${POSTGRES_PORT} DB_USER: supabase_admin DB_PASSWORD: ${POSTGRES_PASSWORD} DB_NAME: ${POSTGRES_DB} DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime' DB_ENC_KEY: supabaserealtime API_JWT_SECRET: ${JWT_SECRET} SECRET_KEY_BASE: ${SECRET_KEY_BASE} ERL_AFLAGS: -proto_dist inet_tcp DNS_NODES: "''" RLIMIT_NOFILE: '10000' APP_NAME: realtime SEED_SELF_HOST: true RUN_JANITOR: true # To use S3 backed storage: docker compose -f docker-compose.yml -f docker-compose.s3.yml up storage: container_name: supabase-storage image: supabase/storage-api:v1.28.0 restart: unless-stopped volumes: - ./volumes/storage:/var/lib/storage:z healthcheck: test: [ 'CMD', 'wget', '--no-verbose', '--tries=1', '--spider', 'http://storage:5000/status', ] timeout: 5s interval: 5s retries: 3 depends_on: db: # Disable this if you are using an external Postgres database condition: service_healthy rest: condition: service_started imgproxy: condition: service_started environment: ANON_KEY: ${ANON_KEY} SERVICE_KEY: ${SERVICE_ROLE_KEY} POSTGREST_URL: http://rest:3000 PGRST_JWT_SECRET: ${JWT_SECRET} DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} TUS_URL_PATH: '/storage/v1/upload/resumable' FILE_SIZE_LIMIT: 52428800 STORAGE_BACKEND: s3 GLOBAL_S3_BUCKET: stub GLOBAL_S3_ENDPOINT: 'http://minio:9000' GLOBAL_S3_PROTOCOL: http GLOBAL_S3_FORCE_PATH_STYLE: true AWS_ACCESS_KEY_ID: '${USER_MINIO}' AWS_SECRET_ACCESS_KEY: '${PASSWORD_MINIO}' AWS_DEFAULT_REGION: stub FILE_STORAGE_BACKEND_PATH: /var/lib/storage TENANT_ID: stub # TODO: https://github.com/supabase/storage-api/issues/55 REGION: stub ENABLE_IMAGE_TRANSFORMATION: 'true' IMGPROXY_URL: http://imgproxy:5001 labels: - traefik.enable=true - traefik.http.routers.supabase-storage.rule=Host(`${STORAGE_HOST}`) && PathPrefix(`/storage/v1`) - traefik.http.routers.supabase-storage.entrypoints=https - traefik.http.routers.supabase-storage.tls.certresolver=letsencrypt - traefik.http.middlewares.strip-storage-prefix.stripPrefix.prefixes=/storage/v1 - traefik.http.services.supabase-storage.loadbalancer.server.port=5000 - traefik.http.middlewares.supabase-storage-cors.headers.accesscontrolalloworiginlist=* - traefik.http.middlewares.supabase-storage-cors.headers.accesscontrolallowmethods=* - traefik.http.middlewares.supabase-storage-cors.headers.accesscontrolallowheaders=* - traefik.http.middlewares.supabase-storage-cors.headers.accesscontrolmaxage=86400 - traefik.http.middlewares.supabase-storage-cors.headers.addvaryheader=true - traefik.http.routers.supabase-storage.middlewares=supabase-storage-cors,strip-storage-prefix imgproxy: container_name: supabase-imgproxy image: darthsim/imgproxy:v3.8.0 restart: unless-stopped volumes: - ./volumes/storage:/var/lib/storage:z healthcheck: test: ['CMD', 'imgproxy', 'health'] timeout: 5s interval: 5s retries: 3 environment: IMGPROXY_BIND: ':5001' IMGPROXY_LOCAL_FILESYSTEM_ROOT: / IMGPROXY_USE_ETAG: 'true' IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION} minio: image: minio/minio ports: - '9000:9000' - '9001:9001' environment: MINIO_ROOT_USER: '${USER_MINIO}' MINIO_ROOT_PASSWORD: '${PASSWORD_MINIO}' command: 'server --console-address ":9001" /data' healthcheck: test: ['CMD', 'curl', '-f', 'http://minio:9000/minio/health/live'] interval: 2s timeout: 10s retries: 5 volumes: - './volumes/storage:/data:z' minio-createbucket: image: minio/mc restart: 'no' environment: MINIO_ROOT_USER: '${USER_MINIO}' MINIO_ROOT_PASSWORD: '${PASSWORD_MINIO}' depends_on: minio: condition: service_healthy entrypoint: > /bin/sh -c " /usr/bin/mc alias set supa-minio http://minio:9000 ${USER_MINIO} ${PASSWORD_MINIO}; /usr/bin/mc mb --ignore-existing supa-minio/stub; exit 0; " meta: container_name: supabase-meta image: supabase/postgres-meta:v0.91.6 restart: unless-stopped depends_on: db: # Disable this if you are using an external Postgres database condition: service_healthy analytics: condition: service_healthy environment: PG_META_PORT: 8080 PG_META_DB_HOST: ${POSTGRES_HOST} PG_META_DB_PORT: ${POSTGRES_PORT} PG_META_DB_NAME: ${POSTGRES_DB} PG_META_DB_USER: supabase_admin PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD} CRYPTO_KEY: ${PG_META_CRYPTO_KEY} functions: container_name: supabase-edge-functions image: supabase/edge-runtime:v1.69.6 restart: unless-stopped volumes: - ./volumes/functions:/home/deno/functions:Z depends_on: analytics: condition: service_healthy environment: JWT_SECRET: ${JWT_SECRET} SUPABASE_URL: http://kong:8000 SUPABASE_ANON_KEY: ${ANON_KEY} SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY} SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB} # TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786 VERIFY_JWT: '${FUNCTIONS_VERIFY_JWT}' command: ['start', '--main-service', '/home/deno/functions/main'] analytics: container_name: supabase-analytics image: supabase/logflare:1.22.6 restart: unless-stopped ports: - 4000:4000 # Uncomment to use Big Query backend for analytics # volumes: # - type: bind # source: ${PWD}/gcloud.json # target: /opt/app/rel/logflare/bin/gcloud.json # read_only: true healthcheck: test: ['CMD', 'curl', 'http://localhost:4000/health'] timeout: 5s interval: 5s retries: 10 depends_on: db: # Disable this if you are using an external Postgres database condition: service_healthy environment: LOGFLARE_NODE_HOST: 127.0.0.1 DB_USERNAME: supabase_admin DB_DATABASE: _supabase DB_HOSTNAME: ${POSTGRES_HOST} DB_PORT: ${POSTGRES_PORT} DB_PASSWORD: ${POSTGRES_PASSWORD} DB_SCHEMA: _analytics LOGFLARE_PUBLIC_ACCESS_TOKEN: ${LOGFLARE_PUBLIC_ACCESS_TOKEN} LOGFLARE_PRIVATE_ACCESS_TOKEN: ${LOGFLARE_PRIVATE_ACCESS_TOKEN} LOGFLARE_SINGLE_TENANT: true LOGFLARE_SUPABASE_MODE: true LOGFLARE_MIN_CLUSTER_SIZE: 1 # Comment variables to use Big Query backend for analytics POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase POSTGRES_BACKEND_SCHEMA: _analytics LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true # Uncomment to use Big Query backend for analytics # GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID} # GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER} # Comment out everything below this point if you are using an external Postgres database db: container_name: supabase-db image: supabase/postgres:15.8.1.085 restart: unless-stopped volumes: - ./volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z # Must be superuser to create event trigger - ./volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z # Must be superuser to alter reserved role - ./volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z # Initialize the database settings with JWT_SECRET and JWT_EXP - ./volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z # PGDATA directory is persisted between restarts - ./volumes/db/data:/var/lib/postgresql/data:Z # Changes required for internal supabase data such as _analytics - ./volumes/db/_supabase.sql:/docker-entrypoint-initdb.d/migrations/97-_supabase.sql:Z # Changes required for Analytics support - ./volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z # Changes required for Pooler support - ./volumes/db/pooler.sql:/docker-entrypoint-initdb.d/migrations/99-pooler.sql:Z # Use named volume to persist pgsodium decryption key between restarts - db-config:/etc/postgresql-custom healthcheck: test: ['CMD', 'pg_isready', '-U', 'postgres', '-h', 'localhost'] interval: 5s timeout: 5s retries: 10 depends_on: vector: condition: service_healthy environment: POSTGRES_HOST: /var/run/postgresql PGPORT: ${POSTGRES_PORT} POSTGRES_PORT: ${POSTGRES_PORT} PGPASSWORD: ${POSTGRES_PASSWORD} POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} PGDATABASE: ${POSTGRES_DB} POSTGRES_DB: ${POSTGRES_DB} JWT_SECRET: ${JWT_SECRET} JWT_EXP: ${JWT_EXPIRY} command: [ 'postgres', '-c', 'config_file=/etc/postgresql/postgresql.conf', '-c', 'log_min_messages=fatal', # prevents Realtime polling queries from appearing in logs ] vector: container_name: supabase-vector image: timberio/vector:0.28.1-alpine restart: unless-stopped volumes: - ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro,z - '/var/run/docker.sock:/var/run/docker.sock:ro,z' healthcheck: test: [ 'CMD', 'wget', '--no-verbose', '--tries=1', '--spider', 'http://vector:9001/health', ] timeout: 5s interval: 5s retries: 3 environment: LOGFLARE_PUBLIC_ACCESS_TOKEN: ${LOGFLARE_PUBLIC_ACCESS_TOKEN} command: ['--config', '/etc/vector/vector.yml'] security_opt: - 'label=disable' # Update the DATABASE_URL if you are using an external Postgres database supavisor: container_name: supabase-pooler image: supabase/supavisor:2.7.0 restart: unless-stopped ports: - ${POSTGRES_PORT}:5432 - ${POOLER_PROXY_PORT_TRANSACTION}:6543 volumes: - ./volumes/pooler/pooler.exs:/etc/pooler/pooler.exs:ro,z healthcheck: test: [ 'CMD', 'curl', '-sSfL', '--head', '-o', '/dev/null', 'http://127.0.0.1:4000/api/health', ] interval: 10s timeout: 5s retries: 5 depends_on: db: condition: service_healthy analytics: condition: service_healthy environment: PORT: 4000 POSTGRES_PORT: ${POSTGRES_PORT} POSTGRES_DB: ${POSTGRES_DB} POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} DATABASE_URL: ecto://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/_supabase CLUSTER_POSTGRES: true SECRET_KEY_BASE: ${SECRET_KEY_BASE} VAULT_ENC_KEY: ${VAULT_ENC_KEY} API_JWT_SECRET: ${JWT_SECRET} METRICS_JWT_SECRET: ${JWT_SECRET} REGION: local ERL_AFLAGS: -proto_dist inet_tcp POOLER_TENANT_ID: ${POOLER_TENANT_ID} POOLER_DEFAULT_POOL_SIZE: ${POOLER_DEFAULT_POOL_SIZE} POOLER_MAX_CLIENT_CONN: ${POOLER_MAX_CLIENT_CONN} POOLER_POOL_MODE: transaction DB_POOL_SIZE: ${POOLER_DB_POOL_SIZE} command: [ '/bin/sh', '-c', '/app/bin/migrate && /app/bin/supavisor eval "$$(cat /etc/pooler/pooler.exs)" && /app/bin/server', ] templates-server: image: 'caddy:2.9.1' volumes: - './volumes/templates:/templates' entrypoint: 'caddy file-server -r /templates --listen :80' volumes: db-config: