# Comment out everything below this point if you are using an external Postgres database db: container_name: supabase-db image: supabase/postgres:15.8.1.060 restart: unless-stopped volumes: - ./volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z # Must be superuser to create event trigger - ./volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z # Must be superuser to alter reserved role - ./volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z # Initialize the database settings with JWT_SECRET and JWT_EXP - ./volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z # PGDATA directory is persisted between restarts - ./volumes/db/data:/var/lib/postgresql/data:Z # Changes required for internal supabase data such as _analytics - ./volumes/db/_supabase.sql:/docker-entrypoint-initdb.d/migrations/97-_supabase.sql:Z # Changes required for Analytics support - ./volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z # Changes required for Pooler support - ./volumes/db/pooler.sql:/docker-entrypoint-initdb.d/migrations/99-pooler.sql:Z # Use named volume to persist pgsodium decryption key between restarts - db-config:/etc/postgresql-custom - ./volumes/db/schema-authelia.sh:/docker-entrypoint-initdb.d/schema-authelia.sh healthcheck: test: ["CMD", "pg_isready", "-U", "postgres", "-h", "localhost"] interval: 5s timeout: 5s retries: 10 depends_on: vector: condition: service_healthy environment: POSTGRES_HOST: /var/run/postgresql PGPORT: ${POSTGRES_PORT} POSTGRES_PORT: ${POSTGRES_PORT} PGPASSWORD: ${POSTGRES_PASSWORD:?error} POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} PGDATABASE: ${POSTGRES_DB} POSTGRES_DB: ${POSTGRES_DB} JWT_SECRET: ${JWT_SECRET} JWT_EXP: ${JWT_EXPIRY} AUTHELIA_SCHEMA: authelia command: ["postgres", "-c", "config_file=/etc/postgresql/postgresql.conf", "-c", "log_min_messages=fatal", # prevents Realtime polling queries from appearing in logs ] vector: container_name: supabase-vector image: timberio/vector:0.28.1-alpine restart: unless-stopped volumes: - ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro,z - ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro,z environment: LOGFLARE_PUBLIC_ACCESS_TOKEN: ${LOGFLARE_PUBLIC_ACCESS_TOKEN} command: ["--config", "/etc/vector/vector.yml"] security_opt: - "label=disable" healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9001/health"] interval: 10s timeout: 5s retries: 5 # Update the DATABASE_URL if you are using an external Postgres database supavisor: container_name: supabase-pooler image: supabase/supavisor:2.5.7 restart: unless-stopped # ports: # - ${POSTGRES_PORT}:5432 # - ${POOLER_PROXY_PORT_TRANSACTION}:6543 volumes: - ./volumes/pooler/pooler.exs:/etc/pooler/pooler.exs:ro,z healthcheck: test: ["CMD", "curl", "-sSfL", "--head", "-o", "/dev/null", "http://127.0.0.1:4000/api/health"] interval: 10s timeout: 5s retries: 5 depends_on: db: condition: service_healthy analytics: condition: service_healthy environment: PORT: 4000 POSTGRES_PORT: ${POSTGRES_PORT} POSTGRES_DB: ${POSTGRES_DB} POSTGRES_USER: ${POSTGRES_USER} POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} DATABASE_URL: ecto://supabase_admin:${POSTGRES_PASSWORD}@db:${POSTGRES_PORT}/supabase CLUSTER_POSTGRES: true SECRET_KEY_BASE: ${SECRET_KEY_BASE} VAULT_ENC_KEY: ${VAULT_ENC_KEY} API_JWT_SECRET: ${JWT_SECRET} METRICS_JWT_SECRET: ${JWT_SECRET} REGION: local ERL_AFLAGS: -proto_dist inet_tcp POOLER_TENANT_ID: ${POOLER_TENANT_ID} POOLER_DEFAULT_POOL_SIZE: ${POOLER_DEFAULT_POOL_SIZE} POOLER_MAX_CLIENT_CONN: ${POOLER_MAX_CLIENT_CONN} POOLER_POOL_MODE: transaction DB_POOL_SIZE: ${POOLER_DB_POOL_SIZE} command: ["/bin/sh", "-c", '/app/bin/migrate && /app/bin/supavisor eval "$$(cat /etc/pooler/pooler.exs)" && /app/bin/server']