Highly ambitious ATProtocol AppView service and sdks

update docs in api, extract system slice to env var, slice -> slices, and some other cleanup

+10 -10
.github/workflows/api.yml
··· 32 32 run: nix flake check --refresh 33 33 34 34 - name: Build Rust binary 35 - run: nix build .#slice 35 + run: nix build .#slices 36 36 37 37 - name: Build Docker image 38 - run: nix build .#sliceImg 38 + run: nix build .#slicesImg 39 39 40 40 - name: Load Docker image 41 41 run: docker load < result ··· 58 58 echo "Checking PostgreSQL status..." 59 59 docker ps | grep postgres-test 60 60 docker logs postgres-test 61 - 61 + 62 62 # Test PostgreSQL connection 63 63 echo "Testing PostgreSQL connection..." 64 64 docker exec postgres-test psql -U slice -d slice_test -c "SELECT version();" 65 - 65 + 66 66 # Start the Slice container with PostgreSQL backend 67 67 echo "Starting Slice container..." 68 68 docker run -d --name slice-test -p 8080:8080 \ ··· 74 74 # Wait for the service to start and show logs 75 75 echo "Waiting for service to start..." 76 76 sleep 10 77 - 77 + 78 78 echo "Checking all containers (including exited)..." 79 79 docker ps -a | grep -E "(slice-test|postgres-test)" || true 80 - 80 + 81 81 echo "Checking Slice container status..." 82 82 docker ps | grep slice-test || echo "Slice container not running!" 83 - 83 + 84 84 echo "Slice container logs:" 85 85 docker logs slice-test 2>&1 || echo "Failed to get logs" 86 - 86 + 87 87 echo "Checking if slice container exited:" 88 88 docker inspect slice-test --format='{{.State.Status}}' || echo "Container not found" 89 89 docker inspect slice-test --format='{{.State.ExitCode}}' || echo "No exit code" 90 - 90 + 91 91 # Additional wait 92 92 sleep 10 93 - 93 + 94 94 echo "Final Slice container logs:" 95 95 docker logs slice-test 2>&1 || echo "Failed to get final logs" 96 96
+4 -1
api/.env.example
··· 1 1 # Database configuration 2 - DATABASE_URL=postgresql://slice:slice@localhost:5432/slice 2 + DATABASE_URL=postgresql://slices:slices@localhost:5432/slices 3 3 4 4 # Server configuration 5 5 PORT=3000 ··· 9 9 10 10 # AT Protocol relay endpoint for syncing data 11 11 RELAY_ENDPOINT=https://relay1.us-west.bsky.network 12 + 13 + # System slice URI 14 + SYSTEM_SLICE_URI=at://did:plc:bcgltzqazw5tb6k2g3ttenbj/network.slices.slice/3lymhd4jhrd2z 12 15 13 16 # Logging level 14 17 RUST_LOG=debug
+103 -29
api/CLAUDE.md
··· 1 - ### OAuth 2.0 Endpoints with AIP 1 + # CLAUDE.md 2 + 3 + This file provides guidance to Claude Code (claude.ai/code) when working with 4 + code in this repository. 5 + 6 + ## Project Overview 7 + 8 + Slices is an AT Protocol (ATProto) indexing and querying service that allows 9 + developers to create custom slices (subsets) of the ATProto network data. It 10 + indexes records from the Bluesky/ATProto network via Jetstream, validates them 11 + against Lexicon schemas, and provides flexible querying capabilities through an 12 + XRPC API. 2 13 3 - The AIP server implements the following OAuth 2.0 endpoints: 14 + ## Development Setup 4 15 5 - - `GET ${AIP_BASE_URL}/oauth/authorize` - Authorization endpoint for OAuth flows 6 - - `POST ${AIP_BASE_URL}/oauth/token` - Token endpoint for exchanging 7 - authorization codes for access tokens 8 - - `POST ${AIP_BASE_URL}/oauth/par` - Pushed Authorization Request endpoint 9 - (RFC 9126) 10 - - `POST ${AIP_BASE_URL}/oauth/clients/register` - Dynamic Client Registration 11 - endpoint (RFC 7591) 12 - - `GET ${AIP_BASE_URL}/oauth/atp/callback` - ATProtocol OAuth callback handler 13 - - `GET ${AIP_BASE_URL}/.well-known/oauth-authorization-server` - OAuth server 14 - metadata discovery (RFC 8414) 15 - - `GET ${AIP_BASE_URL}/.well-known/oauth-protected-resource` - Protected 16 - resource metadata 17 - - `GET ${AIP_BASE_URL}/.well-known/jwks.json` - JSON Web Key Set for token 18 - verification 19 - - `GET ${AIP_BASE_URL}/oauth/userinfo` - introspection endpoint returning claims 20 - info where sub is the user's atproto did 21 - - `GET ${AIP_BASE_URL}/api/atproto/session` - returns atproto session data 16 + ### Database Connection 17 + 18 + The application uses PostgreSQL. You can connect to the database using: 19 + 20 + 1. **Docker Compose** (recommended for local development): 21 + ```bash 22 + docker-compose up postgres 23 + ``` 24 + This starts PostgreSQL on port 5432 with: 25 + - Database: `slices` 26 + - User: `slices` 27 + - Password: `slices` 28 + 29 + 2. **Environment Variables** (.env file): Create an `api/.env` file (copy from 30 + `api/.env.example`): 31 + ``` 32 + DATABASE_URL=postgresql://slices:slices@localhost:5432/slices 33 + SYSTEM_SLICE_URI=at://did:plc:bcgltzqazw5tb6k2g3ttenbj/network.slices.slice/3lymhd4jhrd2z 34 + AUTH_BASE_URL=http://localhost:8081 35 + RELAY_ENDPOINT=https://relay1.us-west.bsky.network 36 + ``` 37 + 38 + ## Common Development Commands 39 + 40 + ```bash 41 + # Type checking and validation 42 + cargo check 43 + 44 + # Run development server 45 + cargo run 46 + 47 + # Run sync script 48 + ./scripts/sync.sh http://localhost:3000 <token> # Local dev 49 + ./scripts/sync.sh https://api.slices.network <token> # Production 50 + 51 + # Database setup 52 + sqlx database create 53 + 54 + # Database migrations 55 + sqlx migrate run 56 + sqlx migrate add <migration_name> 57 + 58 + # sqlx query cache (run after changing queries) 59 + cargo sqlx prepare 60 + 61 + # Build for production 62 + cargo build --release 63 + ``` 64 + 65 + ## High-Level Architecture 66 + 67 + ### Data Flow 68 + 69 + 1. **Real-time Indexing:** Jetstream → JetstreamConsumer → Lexicon Validation → 70 + Database → Index 71 + 2. **XRPC Query:** HTTP Request → OAuth Verification → Dynamic Handler → 72 + Database Query → Response 73 + 3. **Background Sync:** Trigger → Job Queue → SyncService → ATProto Relay → 74 + Validation → Database 75 + 76 + ### Key Architectural Decisions 77 + 78 + - **Single-table design** for records using PostgreSQL with JSONB for 79 + flexibility across arbitrary lexicons 80 + - **Dynamic XRPC endpoint generation** - routes like 81 + `/{collection}.createRecord` are generated at runtime 82 + - **Dual indexing strategy** - real-time via Jetstream and bulk sync via 83 + background jobs 84 + - **Cursor-based pagination** using `base64(sort_value::indexed_at::cid)` for 85 + stable pagination 86 + - **OAuth DPoP authentication** integrated with AIP server for ATProto 87 + authentication 88 + 89 + ### Module Organization 90 + 91 + - `src/api/` - HTTP handlers for XRPC endpoints (actors, records, oauth, sync, 92 + etc.) 93 + - `src/main.rs` - Application entry point, server setup, Jetstream startup 94 + - `src/database.rs` - All database operations, query building, cursor pagination 95 + - `src/jetstream.rs` - Real-time event processing from ATProto firehose 96 + - `src/sync.rs` - Bulk synchronization operations with ATProto relay 97 + - `src/auth.rs` - OAuth verification and DPoP authentication setup 98 + - `src/errors.rs` - Error type definitions (reference for new errors) 22 99 23 100 ## Error Handling 24 101 25 102 All error strings must use this format: 26 103 27 - error-aip-<domain>-<number> <message>: <details> 104 + error-slices-<domain>-<number> <message>: <details> 28 105 29 106 Example errors: 30 107 31 - - error-slice-resolve-1 Multiple DIDs resolved for method 32 - - error-slice-plc-1 HTTP request failed: https://google.com/ Not Found 33 - - error-slice-key-1 Error decoding key: invalid 108 + - error-slices-resolve-1 Multiple DIDs resolved for method 109 + - error-slices-plc-1 HTTP request failed: https://google.com/ Not Found 110 + - error-slices-key-1 Error decoding key: invalid 34 111 35 112 Errors should be represented as enums using the `thiserror` library when 36 113 possible using `src/errors.rs` as a reference and example. ··· 48 125 49 126 ## HTTP Handler Organization 50 127 51 - HTTP handlers should be organized as Rust source files in the `src/http` 52 - directory and should have the `handler_` prefix. Each handler should have it's 53 - own request and response types and helper functionality. 54 - 55 - Example handler: `handler_index.rs` 128 + HTTP handlers should be organized as Rust source files in the `src/api` 129 + directory. Each handler should have its own request and response types and 130 + helper functionality. 56 131 57 132 - After updating, run `cargo check` to fix errors and warnings 58 133 - Don't use dead code, if it's not used remove it 59 - - Ise htmx and hyperscript when possible, if not javascript in script tag is ok
+1 -1
api/Cargo.lock
··· 2624 2624 checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" 2625 2625 2626 2626 [[package]] 2627 - name = "slice" 2627 + name = "slices" 2628 2628 version = "0.1.0" 2629 2629 dependencies = [ 2630 2630 "anyhow",
+1 -1
api/Cargo.toml
··· 1 1 [package] 2 - name = "slice" 2 + name = "slices" 3 3 version = "0.1.0" 4 4 edition = "2024" 5 5
api/docs/lexicons_spec.md context/lexicons_spec.md
+14 -14
api/flake.nix
··· 1 1 { 2 - description = "API service for Slice"; 2 + description = "API service for Slices"; 3 3 4 4 inputs = { 5 5 nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; ··· 43 43 inherit src; 44 44 version = "0.1.0"; 45 45 strictDeps = true; 46 - pname = "slice"; 47 - name = "slice"; 46 + pname = "slices"; 47 + name = "slices"; 48 48 buildInputs = with pkgs; [ 49 49 openssl 50 50 pkg-config ··· 71 71 # Fix for linker issues in Nix 72 72 CC = "${pkgs.stdenv.cc}/bin/cc"; 73 73 74 - cargoExtraArgs = "--bin slice"; 74 + cargoExtraArgs = "--bin slices"; 75 75 }; 76 76 77 77 # Build cargo artifacts 78 78 cargoArtifacts = craneLib.buildDepsOnly commonArgs; 79 79 80 80 # Build the package 81 - slice = craneLib.buildPackage (commonArgs // { 81 + slices = craneLib.buildPackage (commonArgs // { 82 82 cargoArtifacts = cargoArtifacts; 83 83 doCheck = false; 84 84 CARGO_PROFILE = "release"; ··· 86 86 87 87 # Copy migration files 88 88 migrationFiles = pkgs.stdenv.mkDerivation { 89 - name = "slice-migrations"; 89 + name = "slices-migrations"; 90 90 src = ./migrations; 91 91 installPhase = '' 92 92 mkdir -p $out/migrations ··· 96 96 97 97 # Copy script files 98 98 scriptFiles = pkgs.stdenv.mkDerivation { 99 - name = "slice-scripts"; 99 + name = "slices-scripts"; 100 100 src = ./scripts; 101 101 installPhase = '' 102 102 mkdir -p $out/scripts ··· 107 107 108 108 # Common OCI labels 109 109 ociLabels = { 110 - "org.opencontainers.image.title" = "slice"; 110 + "org.opencontainers.image.title" = "slices"; 111 111 "org.opencontainers.image.description" = "API service for Slices"; 112 112 "org.opencontainers.image.version" = "0.1.0"; 113 113 "org.opencontainers.image.authors" = "Slices Social"; ··· 115 115 }; 116 116 117 117 # Docker image for deployment 118 - sliceImg = pkgs.dockerTools.buildImage { 119 - name = "slice"; 118 + slicesImg = pkgs.dockerTools.buildImage { 119 + name = "slices"; 120 120 tag = "latest"; 121 121 fromImage = pkgs.dockerTools.pullImage { 122 122 imageName = "alpine"; ··· 126 126 copyToRoot = pkgs.buildEnv { 127 127 name = "image-root"; 128 128 paths = [ 129 - slice 129 + slices 130 130 pkgs.cacert 131 131 pkgs.postgresql 132 132 pkgs.deno ··· 136 136 }; 137 137 138 138 config = { 139 - Cmd = [ "/bin/slice" ]; 139 + Cmd = [ "/bin/slices" ]; 140 140 Env = [ 141 141 "RUST_BACKTRACE=1" 142 142 "RUST_LOG=info" ··· 151 151 152 152 in 153 153 { 154 - inherit slice sliceImg; 155 - default = slice; 154 + inherit slices slicesImg; 155 + default = slices; 156 156 }; 157 157 in 158 158 {
+1
api/fly.toml
··· 13 13 HTTP_PORT="8080" 14 14 RUST_LOG="debug" 15 15 AUTH_BASE_URL="https://slices-aip.fly.dev" 16 + SYSTEM_SLICE_URI="at://did:plc:bcgltzqazw5tb6k2g3ttenbj/network.slices.slice/3lymhd4jhrd2z" 16 17 17 18 [http_service] 18 19 internal_port = 8080
-33
api/scripts/prod_sync.sh
··· 1 - #!/bin/bash 2 - 3 - # Check if ACCESS_TOKEN environment variable is set 4 - # if [ -z "$ACCESS_TOKEN" ]; then 5 - # echo "❌ Error: ACCESS_TOKEN environment variable is required" 6 - # echo "Usage: ACCESS_TOKEN=your_token ./prod_sync.sh" 7 - # exit 1 8 - # fi 9 - 10 - echo "🔄 Testing Production Sync Endpoint..." 11 - 12 - echo "🎯 Syncing slice collections with specific repos" 13 - curl -s -X POST https://slices-api.fly.dev/xrpc/network.slices.slice.startSync \ 14 - -H "Content-Type: application/json" \ 15 - -H "Authorization: Bearer $ACCESS_TOKEN" \ 16 - -d '{ 17 - "slice": "at://did:plc:bcgltzqazw5tb6k2g3ttenbj/network.slices.slice/3lymhd4jhrd2z", 18 - "collections": [ 19 - "network.slices.slice", 20 - "network.slices.lexicon", 21 - "network.slices.actor.profile" 22 - ], 23 - "externalCollections": [ 24 - "app.bsky.actor.profile" 25 - ], 26 - "repos": [ 27 - "did:plc:bcgltzqazw5tb6k2g3ttenbj" 28 - ], 29 - "skipValidation": true 30 - }' | jq '.' 31 - 32 - echo "" 33 - echo "✅ Production sync started!"
+59
api/scripts/sync.sh
··· 1 + #!/bin/bash 2 + 3 + # Usage: ./sync.sh <api_endpoint> [bearer_token] 4 + # Examples: 5 + # ./sync.sh http://localhost:3000 <token> 6 + # ./sync.sh https://slices-api.fly.dev <token> 7 + # ACCESS_TOKEN=<token> ./sync.sh https://slices-api.fly.dev 8 + 9 + if [ -z "$1" ]; then 10 + echo "Usage: $0 <api_endpoint> [bearer_token]" 11 + echo "" 12 + echo "Examples:" 13 + echo " $0 http://localhost:3000 <token>" 14 + echo " $0 https://slices-api.fly.dev <token>" 15 + echo " ACCESS_TOKEN=<token> $0 https://slices-api.fly.dev" 16 + exit 1 17 + fi 18 + 19 + API_ENDPOINT="$1" 20 + 21 + # Get bearer token from argument or environment variable 22 + if [ -n "$2" ]; then 23 + TOKEN="$2" 24 + elif [ -n "$ACCESS_TOKEN" ]; then 25 + TOKEN="$ACCESS_TOKEN" 26 + else 27 + echo "❌ Error: Bearer token is required" 28 + echo "Provide it as second argument or set ACCESS_TOKEN environment variable" 29 + exit 1 30 + fi 31 + 32 + echo "🔄 Starting Sync..." 33 + echo "🌐 API Endpoint: $API_ENDPOINT" 34 + echo "" 35 + 36 + # Get system slice URI from environment or use default 37 + SYSTEM_SLICE_URI="${SYSTEM_SLICE_URI:-at://did:plc:bcgltzqazw5tb6k2g3ttenbj/network.slices.slice/3lymhd4jhrd2z}" 38 + 39 + echo "🎯 Syncing slice collections with specific repos" 40 + curl -s -X POST "$API_ENDPOINT/xrpc/network.slices.slice.startSync" \ 41 + -H "Content-Type: application/json" \ 42 + -H "Authorization: Bearer $TOKEN" \ 43 + -d "{ 44 + \"slice\": \"$SYSTEM_SLICE_URI\", 45 + \"collections\": [ 46 + \"network.slices.actor.profile\", 47 + \"network.slices.slice\", 48 + \"network.slices.lexicon\", 49 + \"network.slices.waitlist.invite\", 50 + \"network.slices.waitlist.request\" 51 + ], 52 + \"externalCollections\": [ 53 + \"app.bsky.actor.profile\" 54 + ], 55 + \"skipValidation\": true 56 + }" | jq '.' 57 + 58 + echo "" 59 + echo "✅ Sync complete!"
-33
api/scripts/test_sync.sh
··· 1 - #!/bin/bash 2 - 3 - if [ -z "$1" ]; then 4 - echo "Usage: $0 <bearer_token>" 5 - exit 1 6 - fi 7 - 8 - TOKEN="$1" 9 - 10 - echo "🔄 Testing Sync Endpoint..." 11 - 12 - echo "🎯 Syncing specific collections with specific repos" 13 - curl -s -X POST http://localhost:3000/xrpc/network.slices.slice.startSync \ 14 - -H "Content-Type: application/json" \ 15 - -H "Authorization: Bearer $TOKEN" \ 16 - -d '{ 17 - "slice": "at://did:plc:bcgltzqazw5tb6k2g3ttenbj/network.slices.slice/3lymhd4jhrd2z", 18 - "collections": [ 19 - "network.slices.actor.profile", 20 - "network.slices.slice", 21 - "network.slices.lexicon" 22 - ], 23 - "externalCollections": [ 24 - "app.bsky.actor.profile" 25 - ], 26 - "repos": [ 27 - "did:plc:bcgltzqazw5tb6k2g3ttenbj" 28 - ], 29 - "skipValidation": true 30 - }' | jq '.' 31 - 32 - echo "" 33 - echo "✅ Sync test complete!"
+2 -2
api/src/api/xrpc_dynamic.rs
··· 568 568 569 569 // For network.slices.lexicon collection, validate against the system slice 570 570 let validation_slice_uri = if collection == "network.slices.lexicon" { 571 - "at://did:plc:bcgltzqazw5tb6k2g3ttenbj/network.slices.slice/3lymhd4jhrd2z" 571 + &state.config.system_slice_uri 572 572 } else { 573 573 &slice_uri 574 574 }; ··· 684 684 685 685 // For network.slices.lexicon collection, validate against the system slice 686 686 let validation_slice_uri = if collection == "network.slices.lexicon" { 687 - "at://did:plc:bcgltzqazw5tb6k2g3ttenbj/network.slices.slice/3lymhd4jhrd2z" 687 + &state.config.system_slice_uri 688 688 } else { 689 689 &slice_uri 690 690 };
+17 -17
api/src/errors.rs
··· 8 8 9 9 #[derive(Error, Debug)] 10 10 pub enum DatabaseError { 11 - #[error("error-slice-database-1 SQL query failed: {0}")] 11 + #[error("error-slices-database-1 SQL query failed: {0}")] 12 12 SqlQuery(#[from] sqlx::Error), 13 13 14 - #[error("error-slice-database-2 Record not found: {uri}")] 14 + #[error("error-slices-database-2 Record not found: {uri}")] 15 15 RecordNotFound { uri: String }, 16 16 } 17 17 18 18 #[derive(Error, Debug)] 19 19 pub enum SyncError { 20 - #[error("error-slice-sync-1 HTTP request failed: {0}")] 20 + #[error("error-slices-sync-1 HTTP request failed: {0}")] 21 21 HttpRequest(#[from] reqwest::Error), 22 22 23 - #[error("error-slice-sync-2 Database operation failed: {0}")] 23 + #[error("error-slices-sync-2 Database operation failed: {0}")] 24 24 Database(#[from] DatabaseError), 25 25 26 - #[error("error-slice-sync-3 JSON parsing failed: {0}")] 26 + #[error("error-slices-sync-3 JSON parsing failed: {0}")] 27 27 JsonParse(#[from] serde_json::Error), 28 28 29 - #[error("error-slice-sync-4 Failed to list repos for collection: {status}")] 29 + #[error("error-slices-sync-4 Failed to list repos for collection: {status}")] 30 30 ListRepos { status: u16 }, 31 31 32 - #[error("error-slice-sync-5 Failed to list records: {status}")] 32 + #[error("error-slices-sync-5 Failed to list records: {status}")] 33 33 ListRecords { status: u16 }, 34 34 35 - #[error("error-slice-sync-6 Task join failed: {0}")] 35 + #[error("error-slices-sync-6 Task join failed: {0}")] 36 36 TaskJoin(#[from] tokio::task::JoinError), 37 37 38 - #[error("error-slice-sync-7 Generic error: {0}")] 38 + #[error("error-slices-sync-7 Generic error: {0}")] 39 39 Generic(String), 40 40 } 41 41 42 42 #[derive(Error, Debug)] 43 43 pub enum AppError { 44 - #[error("error-slice-app-1 Database connection failed: {0}")] 44 + #[error("error-slices-app-1 Database connection failed: {0}")] 45 45 DatabaseConnection(#[from] sqlx::Error), 46 46 47 - #[error("error-slice-app-2 Migration failed: {0}")] 47 + #[error("error-slices-app-2 Migration failed: {0}")] 48 48 Migration(#[from] sqlx::migrate::MigrateError), 49 49 50 - #[error("error-slice-app-3 Server bind failed: {0}")] 50 + #[error("error-slices-app-3 Server bind failed: {0}")] 51 51 ServerBind(#[from] std::io::Error), 52 52 53 - #[error("error-slice-app-4 Internal server error: {0}")] 53 + #[error("error-slices-app-4 Internal server error: {0}")] 54 54 Internal(String), 55 55 56 - #[error("error-slice-app-5 Resource not found: {0}")] 56 + #[error("error-slices-app-5 Resource not found: {0}")] 57 57 NotFound(String), 58 58 59 - #[error("error-slice-app-6 Bad request: {0}")] 59 + #[error("error-slices-app-6 Bad request: {0}")] 60 60 BadRequest(String), 61 61 } 62 62 63 63 #[derive(Error, Debug)] 64 64 pub enum SliceError { 65 - #[error("error-slice-jetstream-1 Jetstream error: {message}")] 65 + #[error("error-slices-jetstream-1 Jetstream error: {message}")] 66 66 JetstreamError { message: String }, 67 67 68 - #[error("error-slice-database Database error: {0}")] 68 + #[error("error-slices-database Database error: {0}")] 69 69 Database(#[from] DatabaseError), 70 70 71 71 }
+5
api/src/main.rs
··· 31 31 pub struct Config { 32 32 pub auth_base_url: String, 33 33 pub relay_endpoint: String, 34 + pub system_slice_uri: String, 34 35 } 35 36 36 37 #[derive(Clone)] ··· 76 77 let relay_endpoint = env::var("RELAY_ENDPOINT") 77 78 .unwrap_or_else(|_| "https://relay1.us-west.bsky.network".to_string()); 78 79 80 + let system_slice_uri = env::var("SYSTEM_SLICE_URI") 81 + .unwrap_or_else(|_| "at://did:plc:bcgltzqazw5tb6k2g3ttenbj/network.slices.slice/3lymhd4jhrd2z".to_string()); 82 + 79 83 let config = Config { 80 84 auth_base_url, 81 85 relay_endpoint, 86 + system_slice_uri, 82 87 }; 83 88 84 89 // Initialize global logger
+4 -4
docker-compose.yml
··· 4 4 postgres: 5 5 image: postgres:15 6 6 environment: 7 - POSTGRES_DB: slice 8 - POSTGRES_USER: slice 9 - POSTGRES_PASSWORD: slice 7 + POSTGRES_DB: slices 8 + POSTGRES_USER: slices 9 + POSTGRES_PASSWORD: slices 10 10 ports: 11 11 - "5432:5432" 12 12 volumes: 13 13 - postgres_data:/var/lib/postgresql/data 14 14 - ./schema.sql:/docker-entrypoint-initdb.d/01-schema.sql 15 15 healthcheck: 16 - test: ["CMD-SHELL", "pg_isready -U slice -d slice"] 16 + test: ["CMD-SHELL", "pg_isready -U slices -d slices"] 17 17 interval: 5s 18 18 timeout: 5s 19 19 retries: 5