Highly ambitious ATProtocol AppView service and sdks

update docs in api, extract system slice to env var, slice -> slices, and some other cleanup

+10 -10
.github/workflows/api.yml
··· 32 run: nix flake check --refresh 33 34 - name: Build Rust binary 35 - run: nix build .#slice 36 37 - name: Build Docker image 38 - run: nix build .#sliceImg 39 40 - name: Load Docker image 41 run: docker load < result ··· 58 echo "Checking PostgreSQL status..." 59 docker ps | grep postgres-test 60 docker logs postgres-test 61 - 62 # Test PostgreSQL connection 63 echo "Testing PostgreSQL connection..." 64 docker exec postgres-test psql -U slice -d slice_test -c "SELECT version();" 65 - 66 # Start the Slice container with PostgreSQL backend 67 echo "Starting Slice container..." 68 docker run -d --name slice-test -p 8080:8080 \ ··· 74 # Wait for the service to start and show logs 75 echo "Waiting for service to start..." 76 sleep 10 77 - 78 echo "Checking all containers (including exited)..." 79 docker ps -a | grep -E "(slice-test|postgres-test)" || true 80 - 81 echo "Checking Slice container status..." 82 docker ps | grep slice-test || echo "Slice container not running!" 83 - 84 echo "Slice container logs:" 85 docker logs slice-test 2>&1 || echo "Failed to get logs" 86 - 87 echo "Checking if slice container exited:" 88 docker inspect slice-test --format='{{.State.Status}}' || echo "Container not found" 89 docker inspect slice-test --format='{{.State.ExitCode}}' || echo "No exit code" 90 - 91 # Additional wait 92 sleep 10 93 - 94 echo "Final Slice container logs:" 95 docker logs slice-test 2>&1 || echo "Failed to get final logs" 96
··· 32 run: nix flake check --refresh 33 34 - name: Build Rust binary 35 + run: nix build .#slices 36 37 - name: Build Docker image 38 + run: nix build .#slicesImg 39 40 - name: Load Docker image 41 run: docker load < result ··· 58 echo "Checking PostgreSQL status..." 59 docker ps | grep postgres-test 60 docker logs postgres-test 61 + 62 # Test PostgreSQL connection 63 echo "Testing PostgreSQL connection..." 64 docker exec postgres-test psql -U slice -d slice_test -c "SELECT version();" 65 + 66 # Start the Slice container with PostgreSQL backend 67 echo "Starting Slice container..." 68 docker run -d --name slice-test -p 8080:8080 \ ··· 74 # Wait for the service to start and show logs 75 echo "Waiting for service to start..." 76 sleep 10 77 + 78 echo "Checking all containers (including exited)..." 79 docker ps -a | grep -E "(slice-test|postgres-test)" || true 80 + 81 echo "Checking Slice container status..." 82 docker ps | grep slice-test || echo "Slice container not running!" 83 + 84 echo "Slice container logs:" 85 docker logs slice-test 2>&1 || echo "Failed to get logs" 86 + 87 echo "Checking if slice container exited:" 88 docker inspect slice-test --format='{{.State.Status}}' || echo "Container not found" 89 docker inspect slice-test --format='{{.State.ExitCode}}' || echo "No exit code" 90 + 91 # Additional wait 92 sleep 10 93 + 94 echo "Final Slice container logs:" 95 docker logs slice-test 2>&1 || echo "Failed to get final logs" 96
+4 -1
api/.env.example
··· 1 # Database configuration 2 - DATABASE_URL=postgresql://slice:slice@localhost:5432/slice 3 4 # Server configuration 5 PORT=3000 ··· 9 10 # AT Protocol relay endpoint for syncing data 11 RELAY_ENDPOINT=https://relay1.us-west.bsky.network 12 13 # Logging level 14 RUST_LOG=debug
··· 1 # Database configuration 2 + DATABASE_URL=postgresql://slices:slices@localhost:5432/slices 3 4 # Server configuration 5 PORT=3000 ··· 9 10 # AT Protocol relay endpoint for syncing data 11 RELAY_ENDPOINT=https://relay1.us-west.bsky.network 12 + 13 + # System slice URI 14 + SYSTEM_SLICE_URI=at://did:plc:bcgltzqazw5tb6k2g3ttenbj/network.slices.slice/3lymhd4jhrd2z 15 16 # Logging level 17 RUST_LOG=debug
+103 -29
api/CLAUDE.md
··· 1 - ### OAuth 2.0 Endpoints with AIP 2 3 - The AIP server implements the following OAuth 2.0 endpoints: 4 5 - - `GET ${AIP_BASE_URL}/oauth/authorize` - Authorization endpoint for OAuth flows 6 - - `POST ${AIP_BASE_URL}/oauth/token` - Token endpoint for exchanging 7 - authorization codes for access tokens 8 - - `POST ${AIP_BASE_URL}/oauth/par` - Pushed Authorization Request endpoint 9 - (RFC 9126) 10 - - `POST ${AIP_BASE_URL}/oauth/clients/register` - Dynamic Client Registration 11 - endpoint (RFC 7591) 12 - - `GET ${AIP_BASE_URL}/oauth/atp/callback` - ATProtocol OAuth callback handler 13 - - `GET ${AIP_BASE_URL}/.well-known/oauth-authorization-server` - OAuth server 14 - metadata discovery (RFC 8414) 15 - - `GET ${AIP_BASE_URL}/.well-known/oauth-protected-resource` - Protected 16 - resource metadata 17 - - `GET ${AIP_BASE_URL}/.well-known/jwks.json` - JSON Web Key Set for token 18 - verification 19 - - `GET ${AIP_BASE_URL}/oauth/userinfo` - introspection endpoint returning claims 20 - info where sub is the user's atproto did 21 - - `GET ${AIP_BASE_URL}/api/atproto/session` - returns atproto session data 22 23 ## Error Handling 24 25 All error strings must use this format: 26 27 - error-aip-<domain>-<number> <message>: <details> 28 29 Example errors: 30 31 - - error-slice-resolve-1 Multiple DIDs resolved for method 32 - - error-slice-plc-1 HTTP request failed: https://google.com/ Not Found 33 - - error-slice-key-1 Error decoding key: invalid 34 35 Errors should be represented as enums using the `thiserror` library when 36 possible using `src/errors.rs` as a reference and example. ··· 48 49 ## HTTP Handler Organization 50 51 - HTTP handlers should be organized as Rust source files in the `src/http` 52 - directory and should have the `handler_` prefix. Each handler should have it's 53 - own request and response types and helper functionality. 54 - 55 - Example handler: `handler_index.rs` 56 57 - After updating, run `cargo check` to fix errors and warnings 58 - Don't use dead code, if it's not used remove it 59 - - Ise htmx and hyperscript when possible, if not javascript in script tag is ok
··· 1 + # CLAUDE.md 2 + 3 + This file provides guidance to Claude Code (claude.ai/code) when working with 4 + code in this repository. 5 + 6 + ## Project Overview 7 + 8 + Slices is an AT Protocol (ATProto) indexing and querying service that allows 9 + developers to create custom slices (subsets) of the ATProto network data. It 10 + indexes records from the Bluesky/ATProto network via Jetstream, validates them 11 + against Lexicon schemas, and provides flexible querying capabilities through an 12 + XRPC API. 13 14 + ## Development Setup 15 16 + ### Database Connection 17 + 18 + The application uses PostgreSQL. You can connect to the database using: 19 + 20 + 1. **Docker Compose** (recommended for local development): 21 + ```bash 22 + docker-compose up postgres 23 + ``` 24 + This starts PostgreSQL on port 5432 with: 25 + - Database: `slices` 26 + - User: `slices` 27 + - Password: `slices` 28 + 29 + 2. **Environment Variables** (.env file): Create an `api/.env` file (copy from 30 + `api/.env.example`): 31 + ``` 32 + DATABASE_URL=postgresql://slices:slices@localhost:5432/slices 33 + SYSTEM_SLICE_URI=at://did:plc:bcgltzqazw5tb6k2g3ttenbj/network.slices.slice/3lymhd4jhrd2z 34 + AUTH_BASE_URL=http://localhost:8081 35 + RELAY_ENDPOINT=https://relay1.us-west.bsky.network 36 + ``` 37 + 38 + ## Common Development Commands 39 + 40 + ```bash 41 + # Type checking and validation 42 + cargo check 43 + 44 + # Run development server 45 + cargo run 46 + 47 + # Run sync script 48 + ./scripts/sync.sh http://localhost:3000 <token> # Local dev 49 + ./scripts/sync.sh https://api.slices.network <token> # Production 50 + 51 + # Database setup 52 + sqlx database create 53 + 54 + # Database migrations 55 + sqlx migrate run 56 + sqlx migrate add <migration_name> 57 + 58 + # sqlx query cache (run after changing queries) 59 + cargo sqlx prepare 60 + 61 + # Build for production 62 + cargo build --release 63 + ``` 64 + 65 + ## High-Level Architecture 66 + 67 + ### Data Flow 68 + 69 + 1. **Real-time Indexing:** Jetstream → JetstreamConsumer → Lexicon Validation → 70 + Database → Index 71 + 2. **XRPC Query:** HTTP Request → OAuth Verification → Dynamic Handler → 72 + Database Query → Response 73 + 3. **Background Sync:** Trigger → Job Queue → SyncService → ATProto Relay → 74 + Validation → Database 75 + 76 + ### Key Architectural Decisions 77 + 78 + - **Single-table design** for records using PostgreSQL with JSONB for 79 + flexibility across arbitrary lexicons 80 + - **Dynamic XRPC endpoint generation** - routes like 81 + `/{collection}.createRecord` are generated at runtime 82 + - **Dual indexing strategy** - real-time via Jetstream and bulk sync via 83 + background jobs 84 + - **Cursor-based pagination** using `base64(sort_value::indexed_at::cid)` for 85 + stable pagination 86 + - **OAuth DPoP authentication** integrated with AIP server for ATProto 87 + authentication 88 + 89 + ### Module Organization 90 + 91 + - `src/api/` - HTTP handlers for XRPC endpoints (actors, records, oauth, sync, 92 + etc.) 93 + - `src/main.rs` - Application entry point, server setup, Jetstream startup 94 + - `src/database.rs` - All database operations, query building, cursor pagination 95 + - `src/jetstream.rs` - Real-time event processing from ATProto firehose 96 + - `src/sync.rs` - Bulk synchronization operations with ATProto relay 97 + - `src/auth.rs` - OAuth verification and DPoP authentication setup 98 + - `src/errors.rs` - Error type definitions (reference for new errors) 99 100 ## Error Handling 101 102 All error strings must use this format: 103 104 + error-slices-<domain>-<number> <message>: <details> 105 106 Example errors: 107 108 + - error-slices-resolve-1 Multiple DIDs resolved for method 109 + - error-slices-plc-1 HTTP request failed: https://google.com/ Not Found 110 + - error-slices-key-1 Error decoding key: invalid 111 112 Errors should be represented as enums using the `thiserror` library when 113 possible using `src/errors.rs` as a reference and example. ··· 125 126 ## HTTP Handler Organization 127 128 + HTTP handlers should be organized as Rust source files in the `src/api` 129 + directory. Each handler should have its own request and response types and 130 + helper functionality. 131 132 - After updating, run `cargo check` to fix errors and warnings 133 - Don't use dead code, if it's not used remove it
+1 -1
api/Cargo.lock
··· 2624 checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" 2625 2626 [[package]] 2627 - name = "slice" 2628 version = "0.1.0" 2629 dependencies = [ 2630 "anyhow",
··· 2624 checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" 2625 2626 [[package]] 2627 + name = "slices" 2628 version = "0.1.0" 2629 dependencies = [ 2630 "anyhow",
+1 -1
api/Cargo.toml
··· 1 [package] 2 - name = "slice" 3 version = "0.1.0" 4 edition = "2024" 5
··· 1 [package] 2 + name = "slices" 3 version = "0.1.0" 4 edition = "2024" 5
api/docs/lexicons_spec.md context/lexicons_spec.md
+14 -14
api/flake.nix
··· 1 { 2 - description = "API service for Slice"; 3 4 inputs = { 5 nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; ··· 43 inherit src; 44 version = "0.1.0"; 45 strictDeps = true; 46 - pname = "slice"; 47 - name = "slice"; 48 buildInputs = with pkgs; [ 49 openssl 50 pkg-config ··· 71 # Fix for linker issues in Nix 72 CC = "${pkgs.stdenv.cc}/bin/cc"; 73 74 - cargoExtraArgs = "--bin slice"; 75 }; 76 77 # Build cargo artifacts 78 cargoArtifacts = craneLib.buildDepsOnly commonArgs; 79 80 # Build the package 81 - slice = craneLib.buildPackage (commonArgs // { 82 cargoArtifacts = cargoArtifacts; 83 doCheck = false; 84 CARGO_PROFILE = "release"; ··· 86 87 # Copy migration files 88 migrationFiles = pkgs.stdenv.mkDerivation { 89 - name = "slice-migrations"; 90 src = ./migrations; 91 installPhase = '' 92 mkdir -p $out/migrations ··· 96 97 # Copy script files 98 scriptFiles = pkgs.stdenv.mkDerivation { 99 - name = "slice-scripts"; 100 src = ./scripts; 101 installPhase = '' 102 mkdir -p $out/scripts ··· 107 108 # Common OCI labels 109 ociLabels = { 110 - "org.opencontainers.image.title" = "slice"; 111 "org.opencontainers.image.description" = "API service for Slices"; 112 "org.opencontainers.image.version" = "0.1.0"; 113 "org.opencontainers.image.authors" = "Slices Social"; ··· 115 }; 116 117 # Docker image for deployment 118 - sliceImg = pkgs.dockerTools.buildImage { 119 - name = "slice"; 120 tag = "latest"; 121 fromImage = pkgs.dockerTools.pullImage { 122 imageName = "alpine"; ··· 126 copyToRoot = pkgs.buildEnv { 127 name = "image-root"; 128 paths = [ 129 - slice 130 pkgs.cacert 131 pkgs.postgresql 132 pkgs.deno ··· 136 }; 137 138 config = { 139 - Cmd = [ "/bin/slice" ]; 140 Env = [ 141 "RUST_BACKTRACE=1" 142 "RUST_LOG=info" ··· 151 152 in 153 { 154 - inherit slice sliceImg; 155 - default = slice; 156 }; 157 in 158 {
··· 1 { 2 + description = "API service for Slices"; 3 4 inputs = { 5 nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; ··· 43 inherit src; 44 version = "0.1.0"; 45 strictDeps = true; 46 + pname = "slices"; 47 + name = "slices"; 48 buildInputs = with pkgs; [ 49 openssl 50 pkg-config ··· 71 # Fix for linker issues in Nix 72 CC = "${pkgs.stdenv.cc}/bin/cc"; 73 74 + cargoExtraArgs = "--bin slices"; 75 }; 76 77 # Build cargo artifacts 78 cargoArtifacts = craneLib.buildDepsOnly commonArgs; 79 80 # Build the package 81 + slices = craneLib.buildPackage (commonArgs // { 82 cargoArtifacts = cargoArtifacts; 83 doCheck = false; 84 CARGO_PROFILE = "release"; ··· 86 87 # Copy migration files 88 migrationFiles = pkgs.stdenv.mkDerivation { 89 + name = "slices-migrations"; 90 src = ./migrations; 91 installPhase = '' 92 mkdir -p $out/migrations ··· 96 97 # Copy script files 98 scriptFiles = pkgs.stdenv.mkDerivation { 99 + name = "slices-scripts"; 100 src = ./scripts; 101 installPhase = '' 102 mkdir -p $out/scripts ··· 107 108 # Common OCI labels 109 ociLabels = { 110 + "org.opencontainers.image.title" = "slices"; 111 "org.opencontainers.image.description" = "API service for Slices"; 112 "org.opencontainers.image.version" = "0.1.0"; 113 "org.opencontainers.image.authors" = "Slices Social"; ··· 115 }; 116 117 # Docker image for deployment 118 + slicesImg = pkgs.dockerTools.buildImage { 119 + name = "slices"; 120 tag = "latest"; 121 fromImage = pkgs.dockerTools.pullImage { 122 imageName = "alpine"; ··· 126 copyToRoot = pkgs.buildEnv { 127 name = "image-root"; 128 paths = [ 129 + slices 130 pkgs.cacert 131 pkgs.postgresql 132 pkgs.deno ··· 136 }; 137 138 config = { 139 + Cmd = [ "/bin/slices" ]; 140 Env = [ 141 "RUST_BACKTRACE=1" 142 "RUST_LOG=info" ··· 151 152 in 153 { 154 + inherit slices slicesImg; 155 + default = slices; 156 }; 157 in 158 {
+1
api/fly.toml
··· 13 HTTP_PORT="8080" 14 RUST_LOG="debug" 15 AUTH_BASE_URL="https://slices-aip.fly.dev" 16 17 [http_service] 18 internal_port = 8080
··· 13 HTTP_PORT="8080" 14 RUST_LOG="debug" 15 AUTH_BASE_URL="https://slices-aip.fly.dev" 16 + SYSTEM_SLICE_URI="at://did:plc:bcgltzqazw5tb6k2g3ttenbj/network.slices.slice/3lymhd4jhrd2z" 17 18 [http_service] 19 internal_port = 8080
-33
api/scripts/prod_sync.sh
··· 1 - #!/bin/bash 2 - 3 - # Check if ACCESS_TOKEN environment variable is set 4 - # if [ -z "$ACCESS_TOKEN" ]; then 5 - # echo "❌ Error: ACCESS_TOKEN environment variable is required" 6 - # echo "Usage: ACCESS_TOKEN=your_token ./prod_sync.sh" 7 - # exit 1 8 - # fi 9 - 10 - echo "🔄 Testing Production Sync Endpoint..." 11 - 12 - echo "🎯 Syncing slice collections with specific repos" 13 - curl -s -X POST https://slices-api.fly.dev/xrpc/network.slices.slice.startSync \ 14 - -H "Content-Type: application/json" \ 15 - -H "Authorization: Bearer $ACCESS_TOKEN" \ 16 - -d '{ 17 - "slice": "at://did:plc:bcgltzqazw5tb6k2g3ttenbj/network.slices.slice/3lymhd4jhrd2z", 18 - "collections": [ 19 - "network.slices.slice", 20 - "network.slices.lexicon", 21 - "network.slices.actor.profile" 22 - ], 23 - "externalCollections": [ 24 - "app.bsky.actor.profile" 25 - ], 26 - "repos": [ 27 - "did:plc:bcgltzqazw5tb6k2g3ttenbj" 28 - ], 29 - "skipValidation": true 30 - }' | jq '.' 31 - 32 - echo "" 33 - echo "✅ Production sync started!"
···
+59
api/scripts/sync.sh
···
··· 1 + #!/bin/bash 2 + 3 + # Usage: ./sync.sh <api_endpoint> [bearer_token] 4 + # Examples: 5 + # ./sync.sh http://localhost:3000 <token> 6 + # ./sync.sh https://slices-api.fly.dev <token> 7 + # ACCESS_TOKEN=<token> ./sync.sh https://slices-api.fly.dev 8 + 9 + if [ -z "$1" ]; then 10 + echo "Usage: $0 <api_endpoint> [bearer_token]" 11 + echo "" 12 + echo "Examples:" 13 + echo " $0 http://localhost:3000 <token>" 14 + echo " $0 https://slices-api.fly.dev <token>" 15 + echo " ACCESS_TOKEN=<token> $0 https://slices-api.fly.dev" 16 + exit 1 17 + fi 18 + 19 + API_ENDPOINT="$1" 20 + 21 + # Get bearer token from argument or environment variable 22 + if [ -n "$2" ]; then 23 + TOKEN="$2" 24 + elif [ -n "$ACCESS_TOKEN" ]; then 25 + TOKEN="$ACCESS_TOKEN" 26 + else 27 + echo "❌ Error: Bearer token is required" 28 + echo "Provide it as second argument or set ACCESS_TOKEN environment variable" 29 + exit 1 30 + fi 31 + 32 + echo "🔄 Starting Sync..." 33 + echo "🌐 API Endpoint: $API_ENDPOINT" 34 + echo "" 35 + 36 + # Get system slice URI from environment or use default 37 + SYSTEM_SLICE_URI="${SYSTEM_SLICE_URI:-at://did:plc:bcgltzqazw5tb6k2g3ttenbj/network.slices.slice/3lymhd4jhrd2z}" 38 + 39 + echo "🎯 Syncing slice collections with specific repos" 40 + curl -s -X POST "$API_ENDPOINT/xrpc/network.slices.slice.startSync" \ 41 + -H "Content-Type: application/json" \ 42 + -H "Authorization: Bearer $TOKEN" \ 43 + -d "{ 44 + \"slice\": \"$SYSTEM_SLICE_URI\", 45 + \"collections\": [ 46 + \"network.slices.actor.profile\", 47 + \"network.slices.slice\", 48 + \"network.slices.lexicon\", 49 + \"network.slices.waitlist.invite\", 50 + \"network.slices.waitlist.request\" 51 + ], 52 + \"externalCollections\": [ 53 + \"app.bsky.actor.profile\" 54 + ], 55 + \"skipValidation\": true 56 + }" | jq '.' 57 + 58 + echo "" 59 + echo "✅ Sync complete!"
-33
api/scripts/test_sync.sh
··· 1 - #!/bin/bash 2 - 3 - if [ -z "$1" ]; then 4 - echo "Usage: $0 <bearer_token>" 5 - exit 1 6 - fi 7 - 8 - TOKEN="$1" 9 - 10 - echo "🔄 Testing Sync Endpoint..." 11 - 12 - echo "🎯 Syncing specific collections with specific repos" 13 - curl -s -X POST http://localhost:3000/xrpc/network.slices.slice.startSync \ 14 - -H "Content-Type: application/json" \ 15 - -H "Authorization: Bearer $TOKEN" \ 16 - -d '{ 17 - "slice": "at://did:plc:bcgltzqazw5tb6k2g3ttenbj/network.slices.slice/3lymhd4jhrd2z", 18 - "collections": [ 19 - "network.slices.actor.profile", 20 - "network.slices.slice", 21 - "network.slices.lexicon" 22 - ], 23 - "externalCollections": [ 24 - "app.bsky.actor.profile" 25 - ], 26 - "repos": [ 27 - "did:plc:bcgltzqazw5tb6k2g3ttenbj" 28 - ], 29 - "skipValidation": true 30 - }' | jq '.' 31 - 32 - echo "" 33 - echo "✅ Sync test complete!"
···
+2 -2
api/src/api/xrpc_dynamic.rs
··· 568 569 // For network.slices.lexicon collection, validate against the system slice 570 let validation_slice_uri = if collection == "network.slices.lexicon" { 571 - "at://did:plc:bcgltzqazw5tb6k2g3ttenbj/network.slices.slice/3lymhd4jhrd2z" 572 } else { 573 &slice_uri 574 }; ··· 684 685 // For network.slices.lexicon collection, validate against the system slice 686 let validation_slice_uri = if collection == "network.slices.lexicon" { 687 - "at://did:plc:bcgltzqazw5tb6k2g3ttenbj/network.slices.slice/3lymhd4jhrd2z" 688 } else { 689 &slice_uri 690 };
··· 568 569 // For network.slices.lexicon collection, validate against the system slice 570 let validation_slice_uri = if collection == "network.slices.lexicon" { 571 + &state.config.system_slice_uri 572 } else { 573 &slice_uri 574 }; ··· 684 685 // For network.slices.lexicon collection, validate against the system slice 686 let validation_slice_uri = if collection == "network.slices.lexicon" { 687 + &state.config.system_slice_uri 688 } else { 689 &slice_uri 690 };
+17 -17
api/src/errors.rs
··· 8 9 #[derive(Error, Debug)] 10 pub enum DatabaseError { 11 - #[error("error-slice-database-1 SQL query failed: {0}")] 12 SqlQuery(#[from] sqlx::Error), 13 14 - #[error("error-slice-database-2 Record not found: {uri}")] 15 RecordNotFound { uri: String }, 16 } 17 18 #[derive(Error, Debug)] 19 pub enum SyncError { 20 - #[error("error-slice-sync-1 HTTP request failed: {0}")] 21 HttpRequest(#[from] reqwest::Error), 22 23 - #[error("error-slice-sync-2 Database operation failed: {0}")] 24 Database(#[from] DatabaseError), 25 26 - #[error("error-slice-sync-3 JSON parsing failed: {0}")] 27 JsonParse(#[from] serde_json::Error), 28 29 - #[error("error-slice-sync-4 Failed to list repos for collection: {status}")] 30 ListRepos { status: u16 }, 31 32 - #[error("error-slice-sync-5 Failed to list records: {status}")] 33 ListRecords { status: u16 }, 34 35 - #[error("error-slice-sync-6 Task join failed: {0}")] 36 TaskJoin(#[from] tokio::task::JoinError), 37 38 - #[error("error-slice-sync-7 Generic error: {0}")] 39 Generic(String), 40 } 41 42 #[derive(Error, Debug)] 43 pub enum AppError { 44 - #[error("error-slice-app-1 Database connection failed: {0}")] 45 DatabaseConnection(#[from] sqlx::Error), 46 47 - #[error("error-slice-app-2 Migration failed: {0}")] 48 Migration(#[from] sqlx::migrate::MigrateError), 49 50 - #[error("error-slice-app-3 Server bind failed: {0}")] 51 ServerBind(#[from] std::io::Error), 52 53 - #[error("error-slice-app-4 Internal server error: {0}")] 54 Internal(String), 55 56 - #[error("error-slice-app-5 Resource not found: {0}")] 57 NotFound(String), 58 59 - #[error("error-slice-app-6 Bad request: {0}")] 60 BadRequest(String), 61 } 62 63 #[derive(Error, Debug)] 64 pub enum SliceError { 65 - #[error("error-slice-jetstream-1 Jetstream error: {message}")] 66 JetstreamError { message: String }, 67 68 - #[error("error-slice-database Database error: {0}")] 69 Database(#[from] DatabaseError), 70 71 }
··· 8 9 #[derive(Error, Debug)] 10 pub enum DatabaseError { 11 + #[error("error-slices-database-1 SQL query failed: {0}")] 12 SqlQuery(#[from] sqlx::Error), 13 14 + #[error("error-slices-database-2 Record not found: {uri}")] 15 RecordNotFound { uri: String }, 16 } 17 18 #[derive(Error, Debug)] 19 pub enum SyncError { 20 + #[error("error-slices-sync-1 HTTP request failed: {0}")] 21 HttpRequest(#[from] reqwest::Error), 22 23 + #[error("error-slices-sync-2 Database operation failed: {0}")] 24 Database(#[from] DatabaseError), 25 26 + #[error("error-slices-sync-3 JSON parsing failed: {0}")] 27 JsonParse(#[from] serde_json::Error), 28 29 + #[error("error-slices-sync-4 Failed to list repos for collection: {status}")] 30 ListRepos { status: u16 }, 31 32 + #[error("error-slices-sync-5 Failed to list records: {status}")] 33 ListRecords { status: u16 }, 34 35 + #[error("error-slices-sync-6 Task join failed: {0}")] 36 TaskJoin(#[from] tokio::task::JoinError), 37 38 + #[error("error-slices-sync-7 Generic error: {0}")] 39 Generic(String), 40 } 41 42 #[derive(Error, Debug)] 43 pub enum AppError { 44 + #[error("error-slices-app-1 Database connection failed: {0}")] 45 DatabaseConnection(#[from] sqlx::Error), 46 47 + #[error("error-slices-app-2 Migration failed: {0}")] 48 Migration(#[from] sqlx::migrate::MigrateError), 49 50 + #[error("error-slices-app-3 Server bind failed: {0}")] 51 ServerBind(#[from] std::io::Error), 52 53 + #[error("error-slices-app-4 Internal server error: {0}")] 54 Internal(String), 55 56 + #[error("error-slices-app-5 Resource not found: {0}")] 57 NotFound(String), 58 59 + #[error("error-slices-app-6 Bad request: {0}")] 60 BadRequest(String), 61 } 62 63 #[derive(Error, Debug)] 64 pub enum SliceError { 65 + #[error("error-slices-jetstream-1 Jetstream error: {message}")] 66 JetstreamError { message: String }, 67 68 + #[error("error-slices-database Database error: {0}")] 69 Database(#[from] DatabaseError), 70 71 }
+5
api/src/main.rs
··· 31 pub struct Config { 32 pub auth_base_url: String, 33 pub relay_endpoint: String, 34 } 35 36 #[derive(Clone)] ··· 76 let relay_endpoint = env::var("RELAY_ENDPOINT") 77 .unwrap_or_else(|_| "https://relay1.us-west.bsky.network".to_string()); 78 79 let config = Config { 80 auth_base_url, 81 relay_endpoint, 82 }; 83 84 // Initialize global logger
··· 31 pub struct Config { 32 pub auth_base_url: String, 33 pub relay_endpoint: String, 34 + pub system_slice_uri: String, 35 } 36 37 #[derive(Clone)] ··· 77 let relay_endpoint = env::var("RELAY_ENDPOINT") 78 .unwrap_or_else(|_| "https://relay1.us-west.bsky.network".to_string()); 79 80 + let system_slice_uri = env::var("SYSTEM_SLICE_URI") 81 + .unwrap_or_else(|_| "at://did:plc:bcgltzqazw5tb6k2g3ttenbj/network.slices.slice/3lymhd4jhrd2z".to_string()); 82 + 83 let config = Config { 84 auth_base_url, 85 relay_endpoint, 86 + system_slice_uri, 87 }; 88 89 // Initialize global logger
+4 -4
docker-compose.yml
··· 4 postgres: 5 image: postgres:15 6 environment: 7 - POSTGRES_DB: slice 8 - POSTGRES_USER: slice 9 - POSTGRES_PASSWORD: slice 10 ports: 11 - "5432:5432" 12 volumes: 13 - postgres_data:/var/lib/postgresql/data 14 - ./schema.sql:/docker-entrypoint-initdb.d/01-schema.sql 15 healthcheck: 16 - test: ["CMD-SHELL", "pg_isready -U slice -d slice"] 17 interval: 5s 18 timeout: 5s 19 retries: 5
··· 4 postgres: 5 image: postgres:15 6 environment: 7 + POSTGRES_DB: slices 8 + POSTGRES_USER: slices 9 + POSTGRES_PASSWORD: slices 10 ports: 11 - "5432:5432" 12 volumes: 13 - postgres_data:/var/lib/postgresql/data 14 - ./schema.sql:/docker-entrypoint-initdb.d/01-schema.sql 15 healthcheck: 16 + test: ["CMD-SHELL", "pg_isready -U slices -d slices"] 17 interval: 5s 18 timeout: 5s 19 retries: 5