Your music, beautifully tracked. All yours. (coming soon) teal.fm
teal-fm atproto

Compare changes

Choose any two refs to compare.

+10 -7
.env.template
··· 2 2 NODE_ENV=development 3 3 PORT=3000 4 4 HOST=0.0.0.0 5 - PUBLIC_URL=A publicly accessible url for aqua 5 + PUBLIC_URL= # A publicly accessible url for aqua 6 6 DB_USER=postgres 7 7 DB_PASSWORD=supersecurepassword123987 8 8 DB_NAME=teal 9 9 DATABASE_URL="postgresql://${DB_USER}:${DB_PASSWORD}@localhost:5432/${DB_NAME}" 10 10 DOCKER_DB_URL="postgresql://${DB_USER}:${DB_PASSWORD}@host.docker.internal:5432/${DB_NAME}" 11 - #This is not currently being used fully so can just use this default pubkey for now 11 + # `cargo run --bin teal gen-key` to generate a new pubkey 12 12 DID_WEB_PUBKEY=zQ3sheEnMKhEK87PSu4P2mjAevViqHcjKmgxBWsDQPjLRM9wP 13 - CLIENT_ADDRESS=A publicly accessible host for amethyst like amethyst.teal.fm 14 - PUBLIC_DID_WEB=did:web:{aqua's PUBLIC_URL goes here after did:web:} 13 + CLIENT_ADDRESS= # A publicly accessible host for amethyst like amethyst.teal.fm 14 + PUBLIC_DID_WEB= # did:web:{aqua's PUBLIC_URL goes here after did:web:} 15 + 16 + # amethyst 17 + EXPO_PUBLIC_DID_WEB= # same as PUBLIC_DID_WEB 18 + EXPO_PUBLIC_BASE_URL= # same as CLIENT_ADDRESS but with http scheme like https://amethyst.teal.fm 15 19 16 - #amethyst 17 - EXPO_PUBLIC_DID_WEB=same as PUBLIC_DID_WEB 18 - EXPO_PUBLIC_BASE_URL=same as CLIENT_ADDRESS but with http scheme like https://amethyst.teal.fm 20 + SQLX_OFFLINE=true 21 + SQLX_OFFLINE_DIR="./.sqlx"
-96
.github/dependabot.yml
··· 1 - version: 2 2 - updates: 3 - # Enable version updates for npm (Node.js dependencies) 4 - - package-ecosystem: "npm" 5 - directory: "/" 6 - schedule: 7 - interval: "weekly" 8 - day: "monday" 9 - time: "09:00" 10 - open-pull-requests-limit: 10 11 - assignees: 12 - - "@me" 13 - reviewers: 14 - - "@me" 15 - commit-message: 16 - prefix: "deps" 17 - include: "scope" 18 - 19 - # Enable version updates for Cargo (Rust dependencies) - services 20 - - package-ecosystem: "cargo" 21 - directory: "/services" 22 - schedule: 23 - interval: "weekly" 24 - day: "monday" 25 - time: "10:00" 26 - open-pull-requests-limit: 5 27 - assignees: 28 - - "@me" 29 - reviewers: 30 - - "@me" 31 - commit-message: 32 - prefix: "deps(rust)" 33 - include: "scope" 34 - 35 - # Enable version updates for Cargo (Rust dependencies) - aqua app 36 - - package-ecosystem: "cargo" 37 - directory: "/apps/aqua" 38 - schedule: 39 - interval: "weekly" 40 - day: "monday" 41 - time: "10:30" 42 - open-pull-requests-limit: 5 43 - assignees: 44 - - "@me" 45 - reviewers: 46 - - "@me" 47 - commit-message: 48 - prefix: "deps(rust)" 49 - include: "scope" 50 - 51 - # Enable version updates for GitHub Actions 52 - - package-ecosystem: "github-actions" 53 - directory: "/" 54 - schedule: 55 - interval: "weekly" 56 - day: "monday" 57 - time: "11:00" 58 - open-pull-requests-limit: 5 59 - assignees: 60 - - "@me" 61 - reviewers: 62 - - "@me" 63 - commit-message: 64 - prefix: "deps(actions)" 65 - include: "scope" 66 - 67 - # Enable version updates for Docker 68 - - package-ecosystem: "docker" 69 - directory: "/apps/aqua" 70 - schedule: 71 - interval: "weekly" 72 - day: "tuesday" 73 - time: "09:00" 74 - open-pull-requests-limit: 3 75 - assignees: 76 - - "@me" 77 - reviewers: 78 - - "@me" 79 - commit-message: 80 - prefix: "deps(docker)" 81 - include: "scope" 82 - 83 - - package-ecosystem: "docker" 84 - directory: "/services/cadet" 85 - schedule: 86 - interval: "weekly" 87 - day: "tuesday" 88 - time: "09:30" 89 - open-pull-requests-limit: 3 90 - assignees: 91 - - "@me" 92 - reviewers: 93 - - "@me" 94 - commit-message: 95 - prefix: "deps(docker)" 96 - include: "scope"
+91 -90
.github/workflows/ci.yml
··· 11 11 env: 12 12 CARGO_TERM_COLOR: always 13 13 SQLX_OFFLINE: true 14 + SQLX_OFFLINE_DIR: "./.sqlx" 14 15 15 16 jobs: 16 17 setup-and-build: ··· 30 31 setup-node: "true" 31 32 cache-key-suffix: "ci-build" 32 33 34 + - name: Setup SQLx offline files 35 + run: ./scripts/setup-sqlx-offline.sh 36 + 33 37 - name: Build Node packages 34 38 run: pnpm build 35 39 36 40 - name: Build Rust services (x86_64) 37 41 run: | 38 - cd services 39 42 cargo build --release --all-features 40 43 41 44 - name: Build Rust apps (x86_64) ··· 43 46 cd apps/aqua 44 47 cargo build --release --all-features 45 48 49 + - name: Collect executables (x86_64) 50 + run: | 51 + mkdir -p artifacts/x86_64 52 + # Copy service executables 53 + if [ -d "services/target/release" ]; then 54 + find services/target/release -maxdepth 1 -type f -executable ! -name "*.d" ! -name "*-*" -exec cp {} artifacts/x86_64/ \; 55 + fi 56 + # Copy app executables 57 + if [ -d "apps/aqua/target/release" ]; then 58 + find apps/aqua/target/release -maxdepth 1 -type f -executable ! -name "*.d" ! -name "*-*" -exec cp {} artifacts/x86_64/ \; 59 + fi 60 + echo "x86_64 executables:" 61 + ls -la artifacts/x86_64/ || echo "No executables found" 62 + 46 63 - name: Upload Node build artifacts 47 64 uses: actions/upload-artifact@v4 48 65 with: ··· 57 74 with: 58 75 name: rust-builds-x86_64 59 76 path: | 60 - target/release/ 61 - apps/aqua/target/release/ 77 + artifacts/x86_64/ 62 78 retention-days: 1 63 79 64 80 rust-cross-compile: ··· 80 96 lexicons-only-rust: "true" 81 97 cache-key-suffix: "cross-${{ matrix.target }}" 82 98 99 + - name: Setup SQLx offline files 100 + run: ./scripts/setup-sqlx-offline.sh 101 + 83 102 - name: Install cross-compilation tools 84 103 run: | 85 - cargo install cross 104 + cargo install cross --git https://github.com/cross-rs/cross 86 105 rustup target add ${{ matrix.target }} 106 + # Set up environment for cross-compilation 107 + echo "PKG_CONFIG_ALLOW_CROSS=1" >> $GITHUB_ENV 108 + echo "CROSS_NO_WARNINGS=0" >> $GITHUB_ENV 87 109 88 110 - name: Cross-compile services 89 111 run: | 90 - cd services 91 112 cross build --release --all-features --target ${{ matrix.target }} 92 113 93 - - name: Cross-compile apps 114 + - name: Collect cross-compiled executables 94 115 run: | 95 - cd apps/aqua 96 - cross build --release --all-features --target ${{ matrix.target }} 116 + mkdir -p artifacts/${{ matrix.target }} 117 + # Copy service executables 118 + if [ -d "services/target/${{ matrix.target }}/release" ]; then 119 + find services/target/${{ matrix.target }}/release -maxdepth 1 -type f -executable ! -name "*.d" ! -name "*-*" -exec cp {} artifacts/${{ matrix.target }}/ \; 120 + fi 121 + # Copy app executables 122 + if [ -d "apps/aqua/target/${{ matrix.target }}/release" ]; then 123 + find apps/aqua/target/${{ matrix.target }}/release -maxdepth 1 -type f -executable ! -name "*.d" ! -name "*-*" -exec cp {} artifacts/${{ matrix.target }}/ \; 124 + fi 125 + echo "Cross-compiled executables for ${{ matrix.target }}:" 126 + ls -la artifacts/${{ matrix.target }}/ || echo "No executables found" 97 127 98 128 - name: Upload cross-compiled artifacts 99 129 uses: actions/upload-artifact@v4 100 130 with: 101 131 name: rust-builds-${{ matrix.target }} 102 132 path: | 103 - target/${{ matrix.target }}/release/ 104 - apps/aqua/target/${{ matrix.target }}/release/ 133 + artifacts/${{ matrix.target }}/ 105 134 retention-days: 1 106 135 107 - rust-quality: 108 - name: Rust Quality Checks 109 - runs-on: ubuntu-latest 110 - needs: setup-and-build 111 - steps: 112 - - name: Checkout repository 113 - uses: actions/checkout@v4 136 + # disabled b/c it's triggered on autogenerated content 137 + # and can't find a way around it rn 138 + 139 + # rust-quality: 140 + # name: Rust Quality Checks 141 + # runs-on: ubuntu-latest 142 + # needs: setup-and-build 143 + # steps: 144 + # - name: Checkout repository 145 + # uses: actions/checkout@v4 146 + 147 + # - name: Setup environment 148 + # uses: ./.github/actions/setup 149 + # with: 150 + # setup-rust: "true" 151 + # setup-node: "true" 152 + # lexicons-only-rust: "true" 153 + # cache-key-suffix: "ci-build" 114 154 115 - - name: Setup environment 116 - uses: ./.github/actions/setup 117 - with: 118 - setup-rust: "true" 119 - setup-node: "true" 120 - lexicons-only-rust: "true" 121 - cache-key-suffix: "ci-build" 155 + # - name: Setup SQLx offline files 156 + # run: ./scripts/setup-sqlx-offline.sh 122 157 123 - - name: Check Rust formatting 124 - run: | 125 - cd services && cargo fmt --all -- --check 126 - cd ../apps/aqua && cargo fmt --all -- --check 158 + # # - name: Check Rust formatting 159 + # # run: | 160 + # # cargo fmt --all -- --check 127 161 128 - - name: Run Clippy 129 - run: | 130 - cd services && cargo clippy --all-targets --all-features -- -D warnings 131 - cd ../apps/aqua && cargo clippy --all-targets --all-features -- -D warnings 162 + # - name: Run Clippy 163 + # run: | 164 + # cargo clippy --all-targets --all-features --workspace --exclude types -- -D warnings 132 165 133 - - name: Run Rust tests 134 - run: | 135 - cd services && cargo test --all-features 166 + # - name: Run Rust tests 167 + # run: | 168 + # cargo test --all-features 136 169 137 - node-quality: 138 - name: Node.js Quality Checks 139 - runs-on: ubuntu-latest 140 - needs: setup-and-build 141 - steps: 142 - - name: Checkout repository 143 - uses: actions/checkout@v4 170 + # node-quality: 171 + # name: Node.js Quality Checks 172 + # runs-on: ubuntu-latest 173 + # needs: setup-and-build 174 + # steps: 175 + # - name: Checkout repository 176 + # uses: actions/checkout@v4 144 177 145 - - name: Setup environment 146 - uses: ./.github/actions/setup 147 - with: 148 - setup-node: "true" 149 - cache-key-suffix: "ci-build" 178 + # - name: Setup environment 179 + # uses: ./.github/actions/setup 180 + # with: 181 + # setup-node: "true" 182 + # cache-key-suffix: "ci-build" 150 183 151 - - name: Download Node build artifacts 152 - uses: actions/download-artifact@v4 153 - with: 154 - name: node-builds 155 - path: . 184 + # - name: Download Node build artifacts 185 + # uses: actions/download-artifact@v4 186 + # with: 187 + # name: node-builds 188 + # path: . 156 189 157 - - name: Type check 158 - run: pnpm typecheck 190 + # # - name: Type check 191 + # # run: pnpm typecheck 159 192 160 - - name: Lint and format check 161 - run: pnpm fix --check 193 + # - name: Lint and format check 194 + # run: pnpm fix --check 162 195 163 - - name: Run tests 164 - run: pnpm test 196 + # - name: Run tests 197 + # run: pnpm test 165 198 166 199 lexicon-validation: 167 200 name: Lexicon Validation ··· 182 215 run: | 183 216 pnpm lex:gen 184 217 git diff --exit-code || (echo "Lexicon files are out of sync. Run 'pnpm lex:gen' locally." && exit 1) 185 - 186 - security-audit: 187 - name: Security Audit 188 - runs-on: ubuntu-latest 189 - steps: 190 - - name: Checkout repository 191 - uses: actions/checkout@v4 192 - 193 - - name: Setup environment 194 - uses: ./.github/actions/setup 195 - with: 196 - setup-rust: "true" 197 - setup-node: "true" 198 - rust-components: "rustfmt,clippy" 199 - cache-key-suffix: "security" 200 - 201 - - name: Install and configure cargo-audit 202 - run: | 203 - cargo install cargo-audit 204 - cargo audit fetch 205 - 206 - - name: Run Rust security audit 207 - run: | 208 - for dir in services/ apps/aqua/; do 209 - if [ -f "$dir/Cargo.toml" ]; then 210 - echo "Running security audit for $dir" 211 - (cd "$dir" && cargo audit --deny-warnings --deny-unmaintained) 212 - fi 213 - done 214 - 215 - - name: Run Node.js security audit 216 - run: pnpm audit --audit-level=high
-110
.github/workflows/security.yml
··· 1 - # yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json 2 - 3 - name: Security 4 - 5 - on: 6 - push: 7 - branches: [main, develop] 8 - pull_request: 9 - branches: [main, develop] 10 - schedule: 11 - # Run security checks daily at 2 AM sunday 12 - - cron: "0 2 * * 0" 13 - 14 - jobs: 15 - codeql-analysis: 16 - name: CodeQL Analysis 17 - runs-on: ubuntu-latest 18 - permissions: 19 - actions: read 20 - contents: read 21 - security-events: write 22 - 23 - steps: 24 - - name: Checkout repository 25 - uses: actions/checkout@v4 26 - 27 - - name: Initialize CodeQL 28 - uses: github/codeql-action/init@v3 29 - with: 30 - languages: "javascript,typescript,rust" 31 - queries: security-extended,security-and-quality 32 - 33 - - name: Setup environment for all languages 34 - uses: ./.github/actions/setup 35 - with: 36 - setup-node: "true" 37 - setup-rust: "true" 38 - 39 - - name: Perform a full build for CodeQL 40 - run: | 41 - echo "Building Node.js projects..." 42 - pnpm build 43 - echo "Building Rust projects..." 44 - (cd services && cargo build --all-features) 45 - (cd apps/aqua && cargo build --all-features) 46 - 47 - - name: Perform CodeQL Analysis 48 - uses: github/codeql-action/analyze@v3 49 - 50 - docker-security-scan: 51 - name: Docker Security Scan 52 - runs-on: ubuntu-latest 53 - if: github.event_name == 'push' || github.event_name == 'schedule' 54 - strategy: 55 - matrix: 56 - service: [aqua, cadet] 57 - steps: 58 - - name: Checkout repository 59 - uses: actions/checkout@v4 60 - 61 - - name: Setup environment 62 - uses: ./.github/actions/setup 63 - with: 64 - setup-node: "true" 65 - lexicons-only-rust: "true" 66 - 67 - - name: Set up Docker Buildx 68 - uses: docker/setup-buildx-action@v3 69 - 70 - - name: Build Docker image 71 - uses: docker/build-push-action@v5 72 - with: 73 - context: . 74 - file: ${{ matrix.service == 'aqua' && './apps/aqua/Dockerfile' || './services/cadet/Dockerfile' }} 75 - load: true 76 - tags: ${{ matrix.service }}:latest 77 - cache-from: type=gha,scope=${{ matrix.service }} 78 - cache-to: type=gha,mode=max,scope=${{ matrix.service }} 79 - 80 - - name: Run Trivy vulnerability scanner 81 - uses: aquasecurity/trivy-action@master 82 - with: 83 - image-ref: "${{ matrix.service }}:latest" 84 - format: "sarif" 85 - output: "trivy-results-${{ matrix.service }}.sarif" 86 - severity: "CRITICAL,HIGH" 87 - exit-code: "1" 88 - 89 - - name: Upload Trivy scan results to GitHub Security tab 90 - uses: github/codeql-action/upload-sarif@v3 91 - if: always() 92 - with: 93 - sarif_file: "trivy-results-${{ matrix.service }}.sarif" 94 - 95 - secrets-scan: 96 - name: Secrets Scan 97 - runs-on: ubuntu-latest 98 - steps: 99 - - name: Checkout repository 100 - uses: actions/checkout@v4 101 - with: 102 - fetch-depth: 0 103 - 104 - - name: Run TruffleHog OSS 105 - uses: trufflesecurity/trufflehog@main 106 - with: 107 - path: ./ 108 - base: main 109 - head: HEAD 110 - extra_args: --debug --only-verified
+3
.gitignore
··· 65 65 vendor/**/*.d.ts 66 66 vendor/**/dist/ 67 67 vendor/**/node_modules/ 68 + 69 + # claude 70 + .claude
-3
.vscode/settings.json
··· 1 - { 2 - "deno.enable": false 3 - }
+208 -169
Cargo.lock
··· 124 124 "atmst", 125 125 "atrium-api", 126 126 "axum", 127 - "base64", 127 + "base64 0.22.1", 128 128 "chrono", 129 129 "clap", 130 130 "dotenvy", ··· 166 166 checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" 167 167 168 168 [[package]] 169 + name = "async-compression" 170 + version = "0.4.27" 171 + source = "registry+https://github.com/rust-lang/crates.io-index" 172 + checksum = "ddb939d66e4ae03cee6091612804ba446b12878410cfa17f785f4dd67d4014e8" 173 + dependencies = [ 174 + "flate2", 175 + "futures-core", 176 + "memchr", 177 + "pin-project-lite", 178 + "tokio", 179 + ] 180 + 181 + [[package]] 169 182 name = "async-lock" 170 183 version = "3.4.0" 171 184 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 234 247 "atrium-common", 235 248 "atrium-xrpc", 236 249 "chrono", 237 - "http", 250 + "http 1.3.1", 238 251 "ipld-core", 239 252 "langtag", 240 253 "regex", ··· 267 280 source = "registry+https://github.com/rust-lang/crates.io-index" 268 281 checksum = "0216ad50ce34e9ff982e171c3659e65dedaa2ed5ac2994524debdc9a9647ffa8" 269 282 dependencies = [ 270 - "http", 283 + "http 1.3.1", 271 284 "serde", 272 285 "serde_html_form", 273 286 "serde_json", ··· 315 328 "bytes", 316 329 "form_urlencoded", 317 330 "futures-util", 318 - "http", 331 + "http 1.3.1", 319 332 "http-body", 320 333 "http-body-util", 321 334 "hyper", ··· 348 361 dependencies = [ 349 362 "bytes", 350 363 "futures-core", 351 - "http", 364 + "http 1.3.1", 352 365 "http-body", 353 366 "http-body-util", 354 367 "mime", ··· 409 422 410 423 [[package]] 411 424 name = "base64" 425 + version = "0.21.7" 426 + source = "registry+https://github.com/rust-lang/crates.io-index" 427 + checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" 428 + 429 + [[package]] 430 + name = "base64" 412 431 version = "0.22.1" 413 432 source = "registry+https://github.com/rust-lang/crates.io-index" 414 433 checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" ··· 555 574 "async-trait", 556 575 "atmst", 557 576 "atrium-api", 558 - "base64", 577 + "base64 0.22.1", 559 578 "chrono", 560 579 "cid 0.11.1", 561 580 "dotenvy", ··· 577 596 "sqlx", 578 597 "time", 579 598 "tokio", 580 - "tokio-tungstenite", 599 + "tokio-tungstenite 0.24.0", 581 600 "tracing", 582 601 "tracing-subscriber", 583 602 "types", ··· 871 890 checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" 872 891 873 892 [[package]] 893 + name = "crc32fast" 894 + version = "1.5.0" 895 + source = "registry+https://github.com/rust-lang/crates.io-index" 896 + checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" 897 + dependencies = [ 898 + "cfg-if", 899 + ] 900 + 901 + [[package]] 874 902 name = "crossbeam-channel" 875 903 version = "0.5.15" 876 904 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 1258 1286 ] 1259 1287 1260 1288 [[package]] 1289 + name = "flate2" 1290 + version = "1.1.2" 1291 + source = "registry+https://github.com/rust-lang/crates.io-index" 1292 + checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" 1293 + dependencies = [ 1294 + "crc32fast", 1295 + "miniz_oxide", 1296 + ] 1297 + 1298 + [[package]] 1261 1299 name = "flume" 1262 1300 version = "0.11.1" 1263 1301 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 1280 1318 version = "0.1.5" 1281 1319 source = "registry+https://github.com/rust-lang/crates.io-index" 1282 1320 checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" 1283 - 1284 - [[package]] 1285 - name = "foreign-types" 1286 - version = "0.3.2" 1287 - source = "registry+https://github.com/rust-lang/crates.io-index" 1288 - checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" 1289 - dependencies = [ 1290 - "foreign-types-shared", 1291 - ] 1292 - 1293 - [[package]] 1294 - name = "foreign-types-shared" 1295 - version = "0.1.1" 1296 - source = "registry+https://github.com/rust-lang/crates.io-index" 1297 - checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" 1298 1321 1299 1322 [[package]] 1300 1323 name = "form_urlencoded" ··· 1497 1520 "fnv", 1498 1521 "futures-core", 1499 1522 "futures-sink", 1500 - "http", 1523 + "http 1.3.1", 1501 1524 "indexmap", 1502 1525 "slab", 1503 1526 "tokio", ··· 1572 1595 1573 1596 [[package]] 1574 1597 name = "http" 1598 + version = "0.2.12" 1599 + source = "registry+https://github.com/rust-lang/crates.io-index" 1600 + checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" 1601 + dependencies = [ 1602 + "bytes", 1603 + "fnv", 1604 + "itoa", 1605 + ] 1606 + 1607 + [[package]] 1608 + name = "http" 1575 1609 version = "1.3.1" 1576 1610 source = "registry+https://github.com/rust-lang/crates.io-index" 1577 1611 checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" ··· 1588 1622 checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" 1589 1623 dependencies = [ 1590 1624 "bytes", 1591 - "http", 1625 + "http 1.3.1", 1592 1626 ] 1593 1627 1594 1628 [[package]] ··· 1599 1633 dependencies = [ 1600 1634 "bytes", 1601 1635 "futures-core", 1602 - "http", 1636 + "http 1.3.1", 1603 1637 "http-body", 1604 1638 "pin-project-lite", 1605 1639 ] ··· 1626 1660 "futures-channel", 1627 1661 "futures-util", 1628 1662 "h2", 1629 - "http", 1663 + "http 1.3.1", 1630 1664 "http-body", 1631 1665 "httparse", 1632 1666 "httpdate", ··· 1643 1677 source = "registry+https://github.com/rust-lang/crates.io-index" 1644 1678 checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" 1645 1679 dependencies = [ 1646 - "http", 1680 + "http 1.3.1", 1647 1681 "hyper", 1648 1682 "hyper-util", 1649 - "rustls", 1650 - "rustls-native-certs", 1683 + "rustls 0.23.31", 1684 + "rustls-native-certs 0.8.1", 1651 1685 "rustls-pki-types", 1652 1686 "tokio", 1653 - "tokio-rustls", 1687 + "tokio-rustls 0.26.2", 1654 1688 "tower-service", 1655 1689 "webpki-roots 1.0.2", 1656 1690 ] 1657 1691 1658 1692 [[package]] 1659 - name = "hyper-tls" 1660 - version = "0.6.0" 1661 - source = "registry+https://github.com/rust-lang/crates.io-index" 1662 - checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" 1663 - dependencies = [ 1664 - "bytes", 1665 - "http-body-util", 1666 - "hyper", 1667 - "hyper-util", 1668 - "native-tls", 1669 - "tokio", 1670 - "tokio-native-tls", 1671 - "tower-service", 1672 - ] 1673 - 1674 - [[package]] 1675 1693 name = "hyper-util" 1676 1694 version = "0.1.16" 1677 1695 source = "registry+https://github.com/rust-lang/crates.io-index" 1678 1696 checksum = "8d9b05277c7e8da2c93a568989bb6207bef0112e8d17df7a6eda4a3cf143bc5e" 1679 1697 dependencies = [ 1680 - "base64", 1698 + "base64 0.22.1", 1681 1699 "bytes", 1682 1700 "futures-channel", 1683 1701 "futures-core", 1684 1702 "futures-util", 1685 - "http", 1703 + "http 1.3.1", 1686 1704 "http-body", 1687 1705 "hyper", 1688 1706 "ipnet", ··· 1690 1708 "percent-encoding", 1691 1709 "pin-project-lite", 1692 1710 "socket2 0.6.0", 1693 - "system-configuration", 1694 1711 "tokio", 1695 1712 "tower-service", 1696 1713 "tracing", 1697 - "windows-registry", 1698 1714 ] 1699 1715 1700 1716 [[package]] ··· 2236 2252 source = "registry+https://github.com/rust-lang/crates.io-index" 2237 2253 checksum = "dd7399781913e5393588a8d8c6a2867bf85fb38eaf2502fdce465aad2dc6f034" 2238 2254 dependencies = [ 2239 - "base64", 2255 + "base64 0.22.1", 2240 2256 "http-body-util", 2241 2257 "hyper", 2242 2258 "hyper-rustls", ··· 2330 2346 "bytes", 2331 2347 "encoding_rs", 2332 2348 "futures-util", 2333 - "http", 2349 + "http 1.3.1", 2334 2350 "httparse", 2335 2351 "memchr", 2336 2352 "mime", ··· 2445 2461 ] 2446 2462 2447 2463 [[package]] 2448 - name = "native-tls" 2449 - version = "0.2.14" 2450 - source = "registry+https://github.com/rust-lang/crates.io-index" 2451 - checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" 2452 - dependencies = [ 2453 - "libc", 2454 - "log", 2455 - "openssl", 2456 - "openssl-probe", 2457 - "openssl-sys", 2458 - "schannel", 2459 - "security-framework 2.11.1", 2460 - "security-framework-sys", 2461 - "tempfile", 2462 - ] 2463 - 2464 - [[package]] 2465 2464 name = "nom" 2466 2465 version = "7.1.3" 2467 2466 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 2593 2592 checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" 2594 2593 2595 2594 [[package]] 2596 - name = "openssl" 2597 - version = "0.10.73" 2598 - source = "registry+https://github.com/rust-lang/crates.io-index" 2599 - checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8" 2600 - dependencies = [ 2601 - "bitflags 2.9.1", 2602 - "cfg-if", 2603 - "foreign-types", 2604 - "libc", 2605 - "once_cell", 2606 - "openssl-macros", 2607 - "openssl-sys", 2608 - ] 2609 - 2610 - [[package]] 2611 - name = "openssl-macros" 2612 - version = "0.1.1" 2613 - source = "registry+https://github.com/rust-lang/crates.io-index" 2614 - checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" 2615 - dependencies = [ 2616 - "proc-macro2", 2617 - "quote", 2618 - "syn 2.0.104", 2619 - ] 2620 - 2621 - [[package]] 2622 2595 name = "openssl-probe" 2623 2596 version = "0.1.6" 2624 2597 source = "registry+https://github.com/rust-lang/crates.io-index" 2625 2598 checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" 2626 2599 2627 2600 [[package]] 2628 - name = "openssl-sys" 2629 - version = "0.9.109" 2630 - source = "registry+https://github.com/rust-lang/crates.io-index" 2631 - checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" 2632 - dependencies = [ 2633 - "cc", 2634 - "libc", 2635 - "pkg-config", 2636 - "vcpkg", 2637 - ] 2638 - 2639 - [[package]] 2640 2601 name = "option-ext" 2641 2602 version = "0.2.0" 2642 2603 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 2859 2820 "quinn-proto", 2860 2821 "quinn-udp", 2861 2822 "rustc-hash 2.1.1", 2862 - "rustls", 2823 + "rustls 0.23.31", 2863 2824 "socket2 0.5.10", 2864 2825 "thiserror 2.0.12", 2865 2826 "tokio", ··· 2879 2840 "rand 0.9.2", 2880 2841 "ring", 2881 2842 "rustc-hash 2.1.1", 2882 - "rustls", 2843 + "rustls 0.23.31", 2883 2844 "rustls-pki-types", 2884 2845 "slab", 2885 2846 "thiserror 2.0.12", ··· 3089 3050 source = "registry+https://github.com/rust-lang/crates.io-index" 3090 3051 checksum = "cbc931937e6ca3a06e3b6c0aa7841849b160a90351d6ab467a8b9b9959767531" 3091 3052 dependencies = [ 3092 - "base64", 3053 + "async-compression", 3054 + "base64 0.22.1", 3093 3055 "bytes", 3094 - "encoding_rs", 3095 3056 "futures-core", 3096 - "h2", 3097 - "http", 3057 + "futures-util", 3058 + "http 1.3.1", 3098 3059 "http-body", 3099 3060 "http-body-util", 3100 3061 "hyper", 3101 3062 "hyper-rustls", 3102 - "hyper-tls", 3103 3063 "hyper-util", 3104 3064 "js-sys", 3105 3065 "log", 3106 - "mime", 3107 - "native-tls", 3108 3066 "percent-encoding", 3109 3067 "pin-project-lite", 3110 3068 "quinn", 3111 - "rustls", 3069 + "rustls 0.23.31", 3112 3070 "rustls-pki-types", 3113 3071 "serde", 3114 3072 "serde_json", 3115 3073 "serde_urlencoded", 3116 3074 "sync_wrapper", 3117 3075 "tokio", 3118 - "tokio-native-tls", 3119 - "tokio-rustls", 3076 + "tokio-rustls 0.26.2", 3077 + "tokio-util", 3120 3078 "tower", 3121 3079 "tower-http", 3122 3080 "tower-service", 3123 3081 "url", 3124 3082 "wasm-bindgen", 3125 3083 "wasm-bindgen-futures", 3084 + "wasm-streams", 3126 3085 "web-sys", 3127 3086 "webpki-roots 1.0.2", 3128 3087 ] ··· 3163 3122 [[package]] 3164 3123 name = "rocketman" 3165 3124 version = "0.2.3" 3125 + source = "registry+https://github.com/rust-lang/crates.io-index" 3126 + checksum = "9928fe43979c19ff1f46f7920c30b76dfcead7a4d571c9836c4d02da8587f844" 3166 3127 dependencies = [ 3167 3128 "anyhow", 3168 3129 "async-trait", ··· 3170 3131 "derive_builder", 3171 3132 "flume", 3172 3133 "futures-util", 3173 - "metrics 0.23.1", 3134 + "metrics 0.24.2", 3174 3135 "rand 0.8.5", 3175 3136 "serde", 3176 3137 "serde_json", 3177 3138 "tokio", 3178 - "tokio-tungstenite", 3139 + "tokio-tungstenite 0.20.1", 3179 3140 "tracing", 3180 3141 "tracing-subscriber", 3181 3142 "url", ··· 3257 3218 3258 3219 [[package]] 3259 3220 name = "rustls" 3221 + version = "0.21.12" 3222 + source = "registry+https://github.com/rust-lang/crates.io-index" 3223 + checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" 3224 + dependencies = [ 3225 + "log", 3226 + "ring", 3227 + "rustls-webpki 0.101.7", 3228 + "sct", 3229 + ] 3230 + 3231 + [[package]] 3232 + name = "rustls" 3260 3233 version = "0.23.31" 3261 3234 source = "registry+https://github.com/rust-lang/crates.io-index" 3262 3235 checksum = "c0ebcbd2f03de0fc1122ad9bb24b127a5a6cd51d72604a3f3c50ac459762b6cc" ··· 3265 3238 "once_cell", 3266 3239 "ring", 3267 3240 "rustls-pki-types", 3268 - "rustls-webpki", 3241 + "rustls-webpki 0.103.4", 3269 3242 "subtle", 3270 3243 "zeroize", 3271 3244 ] 3272 3245 3273 3246 [[package]] 3274 3247 name = "rustls-native-certs" 3248 + version = "0.6.3" 3249 + source = "registry+https://github.com/rust-lang/crates.io-index" 3250 + checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" 3251 + dependencies = [ 3252 + "openssl-probe", 3253 + "rustls-pemfile", 3254 + "schannel", 3255 + "security-framework 2.11.1", 3256 + ] 3257 + 3258 + [[package]] 3259 + name = "rustls-native-certs" 3275 3260 version = "0.8.1" 3276 3261 source = "registry+https://github.com/rust-lang/crates.io-index" 3277 3262 checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" ··· 3283 3268 ] 3284 3269 3285 3270 [[package]] 3271 + name = "rustls-pemfile" 3272 + version = "1.0.4" 3273 + source = "registry+https://github.com/rust-lang/crates.io-index" 3274 + checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" 3275 + dependencies = [ 3276 + "base64 0.21.7", 3277 + ] 3278 + 3279 + [[package]] 3286 3280 name = "rustls-pki-types" 3287 3281 version = "1.12.0" 3288 3282 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 3290 3284 dependencies = [ 3291 3285 "web-time", 3292 3286 "zeroize", 3287 + ] 3288 + 3289 + [[package]] 3290 + name = "rustls-webpki" 3291 + version = "0.101.7" 3292 + source = "registry+https://github.com/rust-lang/crates.io-index" 3293 + checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" 3294 + dependencies = [ 3295 + "ring", 3296 + "untrusted", 3293 3297 ] 3294 3298 3295 3299 [[package]] ··· 3336 3340 version = "1.2.0" 3337 3341 source = "registry+https://github.com/rust-lang/crates.io-index" 3338 3342 checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" 3343 + 3344 + [[package]] 3345 + name = "sct" 3346 + version = "0.7.1" 3347 + source = "registry+https://github.com/rust-lang/crates.io-index" 3348 + checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" 3349 + dependencies = [ 3350 + "ring", 3351 + "untrusted", 3352 + ] 3339 3353 3340 3354 [[package]] 3341 3355 name = "sec1" ··· 3646 3660 source = "registry+https://github.com/rust-lang/crates.io-index" 3647 3661 checksum = "ee6798b1838b6a0f69c007c133b8df5866302197e404e8b6ee8ed3e3a5e68dc6" 3648 3662 dependencies = [ 3649 - "base64", 3663 + "base64 0.22.1", 3650 3664 "bytes", 3651 3665 "crc", 3652 3666 "crossbeam-queue", ··· 3663 3677 "memchr", 3664 3678 "once_cell", 3665 3679 "percent-encoding", 3680 + "rustls 0.23.31", 3666 3681 "serde", 3667 3682 "serde_json", 3668 3683 "sha2", ··· 3674 3689 "tracing", 3675 3690 "url", 3676 3691 "uuid", 3692 + "webpki-roots 0.26.11", 3677 3693 ] 3678 3694 3679 3695 [[package]] ··· 3721 3737 checksum = "aa003f0038df784eb8fecbbac13affe3da23b45194bd57dba231c8f48199c526" 3722 3738 dependencies = [ 3723 3739 "atoi", 3724 - "base64", 3740 + "base64 0.22.1", 3725 3741 "bitflags 2.9.1", 3726 3742 "byteorder", 3727 3743 "bytes", ··· 3765 3781 checksum = "db58fcd5a53cf07c184b154801ff91347e4c30d17a3562a635ff028ad5deda46" 3766 3782 dependencies = [ 3767 3783 "atoi", 3768 - "base64", 3784 + "base64 0.22.1", 3769 3785 "bitflags 2.9.1", 3770 3786 "byteorder", 3771 3787 "crc", ··· 3943 3959 ] 3944 3960 3945 3961 [[package]] 3946 - name = "system-configuration" 3947 - version = "0.6.1" 3948 - source = "registry+https://github.com/rust-lang/crates.io-index" 3949 - checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" 3950 - dependencies = [ 3951 - "bitflags 2.9.1", 3952 - "core-foundation 0.9.4", 3953 - "system-configuration-sys", 3954 - ] 3955 - 3956 - [[package]] 3957 - name = "system-configuration-sys" 3958 - version = "0.6.0" 3959 - source = "registry+https://github.com/rust-lang/crates.io-index" 3960 - checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" 3961 - dependencies = [ 3962 - "core-foundation-sys", 3963 - "libc", 3964 - ] 3965 - 3966 - [[package]] 3967 3962 name = "tagptr" 3968 3963 version = "0.2.0" 3969 3964 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 4140 4135 ] 4141 4136 4142 4137 [[package]] 4143 - name = "tokio-native-tls" 4144 - version = "0.3.1" 4138 + name = "tokio-rustls" 4139 + version = "0.24.1" 4145 4140 source = "registry+https://github.com/rust-lang/crates.io-index" 4146 - checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" 4141 + checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" 4147 4142 dependencies = [ 4148 - "native-tls", 4143 + "rustls 0.21.12", 4149 4144 "tokio", 4150 4145 ] 4151 4146 ··· 4155 4150 source = "registry+https://github.com/rust-lang/crates.io-index" 4156 4151 checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" 4157 4152 dependencies = [ 4158 - "rustls", 4153 + "rustls 0.23.31", 4159 4154 "tokio", 4160 4155 ] 4161 4156 ··· 4172 4167 4173 4168 [[package]] 4174 4169 name = "tokio-tungstenite" 4170 + version = "0.20.1" 4171 + source = "registry+https://github.com/rust-lang/crates.io-index" 4172 + checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" 4173 + dependencies = [ 4174 + "futures-util", 4175 + "log", 4176 + "rustls 0.21.12", 4177 + "rustls-native-certs 0.6.3", 4178 + "tokio", 4179 + "tokio-rustls 0.24.1", 4180 + "tungstenite 0.20.1", 4181 + "webpki-roots 0.25.4", 4182 + ] 4183 + 4184 + [[package]] 4185 + name = "tokio-tungstenite" 4175 4186 version = "0.24.0" 4176 4187 source = "registry+https://github.com/rust-lang/crates.io-index" 4177 4188 checksum = "edc5f74e248dc973e0dbb7b74c7e0d6fcc301c694ff50049504004ef4d0cdcd9" 4178 4189 dependencies = [ 4179 4190 "futures-util", 4180 4191 "log", 4181 - "rustls", 4192 + "rustls 0.23.31", 4182 4193 "rustls-pki-types", 4183 4194 "tokio", 4184 - "tokio-rustls", 4185 - "tungstenite", 4195 + "tokio-rustls 0.26.2", 4196 + "tungstenite 0.24.0", 4186 4197 "webpki-roots 0.26.11", 4187 4198 ] 4188 4199 ··· 4250 4261 "bitflags 2.9.1", 4251 4262 "bytes", 4252 4263 "futures-util", 4253 - "http", 4264 + "http 1.3.1", 4254 4265 "http-body", 4255 4266 "iri-string", 4256 4267 "pin-project-lite", ··· 4352 4363 4353 4364 [[package]] 4354 4365 name = "tungstenite" 4366 + version = "0.20.1" 4367 + source = "registry+https://github.com/rust-lang/crates.io-index" 4368 + checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9" 4369 + dependencies = [ 4370 + "byteorder", 4371 + "bytes", 4372 + "data-encoding", 4373 + "http 0.2.12", 4374 + "httparse", 4375 + "log", 4376 + "rand 0.8.5", 4377 + "rustls 0.21.12", 4378 + "sha1", 4379 + "thiserror 1.0.69", 4380 + "url", 4381 + "utf-8", 4382 + ] 4383 + 4384 + [[package]] 4385 + name = "tungstenite" 4355 4386 version = "0.24.0" 4356 4387 source = "registry+https://github.com/rust-lang/crates.io-index" 4357 4388 checksum = "18e5b8366ee7a95b16d32197d0b2604b43a0be89dc5fac9f8e96ccafbaedda8a" ··· 4359 4390 "byteorder", 4360 4391 "bytes", 4361 4392 "data-encoding", 4362 - "http", 4393 + "http 1.3.1", 4363 4394 "httparse", 4364 4395 "log", 4365 4396 "rand 0.8.5", 4366 - "rustls", 4397 + "rustls 0.23.31", 4367 4398 "rustls-pki-types", 4368 4399 "sha1", 4369 4400 "thiserror 1.0.69", ··· 4383 4414 "atrium-api", 4384 4415 "atrium-xrpc", 4385 4416 "chrono", 4386 - "http", 4417 + "http 1.3.1", 4387 4418 "ipld-core", 4388 4419 "langtag", 4389 4420 "regex", ··· 4648 4679 ] 4649 4680 4650 4681 [[package]] 4682 + name = "wasm-streams" 4683 + version = "0.4.2" 4684 + source = "registry+https://github.com/rust-lang/crates.io-index" 4685 + checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" 4686 + dependencies = [ 4687 + "futures-util", 4688 + "js-sys", 4689 + "wasm-bindgen", 4690 + "wasm-bindgen-futures", 4691 + "web-sys", 4692 + ] 4693 + 4694 + [[package]] 4651 4695 name = "web-sys" 4652 4696 version = "0.3.77" 4653 4697 source = "registry+https://github.com/rust-lang/crates.io-index" ··· 4666 4710 "js-sys", 4667 4711 "wasm-bindgen", 4668 4712 ] 4713 + 4714 + [[package]] 4715 + name = "webpki-roots" 4716 + version = "0.25.4" 4717 + source = "registry+https://github.com/rust-lang/crates.io-index" 4718 + checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" 4669 4719 4670 4720 [[package]] 4671 4721 name = "webpki-roots" ··· 4855 4905 dependencies = [ 4856 4906 "windows-core 0.61.2", 4857 4907 "windows-link", 4858 - ] 4859 - 4860 - [[package]] 4861 - name = "windows-registry" 4862 - version = "0.5.3" 4863 - source = "registry+https://github.com/rust-lang/crates.io-index" 4864 - checksum = "5b8a9ed28765efc97bbc954883f4e6796c33a06546ebafacbabee9696967499e" 4865 - dependencies = [ 4866 - "windows-link", 4867 - "windows-result 0.3.4", 4868 - "windows-strings", 4869 4908 ] 4870 4909 4871 4910 [[package]]
+27 -11
Cargo.toml
··· 1 1 [workspace] 2 - members = [ 3 - "apps/aqua", 4 - "services/cadet", 5 - "services/rocketman", 6 - "tools/teal-cli", 7 - ] 2 + members = ["apps/aqua", "services/cadet", "tools/teal-cli"] 3 + default-members = ["services/types"] 8 4 resolver = "2" 9 5 10 6 [workspace.dependencies] 11 7 # Shared dependencies 12 - tokio = { version = "1.0", features = ["rt-multi-thread", "macros"] } 8 + tokio = { version = "1.0", features = [ 9 + "rt-multi-thread", 10 + "macros", 11 + "time", 12 + "net", 13 + "sync", 14 + ] } 13 15 axum = { version = "0.8", features = ["macros"] } 14 16 tower-http = { version = "0.6", features = ["cors"] } 15 - sqlx = { version = "0.8", features = ["runtime-tokio", "postgres", "uuid"] } 17 + sqlx = { version = "0.8", features = [ 18 + "runtime-tokio", 19 + "postgres", 20 + "uuid", 21 + "tls-rustls", 22 + ] } 16 23 serde = { version = "1.0", features = ["derive"] } 17 24 anyhow = "1.0" 18 25 serde_json = "1.0" 19 26 tracing = "0.1" 20 27 tracing-subscriber = "0.3" 21 28 metrics = "0.23" 22 - reqwest = { version = "0.12", features = ["json", "rustls-tls"] } 29 + reqwest = { version = "0.12", default-features = false, features = [ 30 + "json", 31 + "rustls-tls", 32 + "stream", 33 + "gzip", 34 + ] } 23 35 url = "2.5" 24 36 rand = "0.8" 25 37 flume = "0.11" 26 38 async-trait = "0.1" 27 39 time = "0.3" 28 40 dotenvy = "0.15" 29 - tokio-tungstenite = { version = "*", features = ["rustls-tls-webpki-roots"] } 41 + tokio-tungstenite = { version = "*", default-features = false, features = [ 42 + "rustls-tls-webpki-roots", 43 + "connect", 44 + "handshake", 45 + ] } 30 46 atrium-api = "0.25" 31 47 chrono = "0.4" 32 48 uuid = { version = "1.0", features = ["v4", "serde"] } 33 49 types = { path = "services/types" } 34 - rocketman = { path = "services/rocketman" } 50 + rocketman = "0.2.3" 35 51 36 52 # CAR and IPLD dependencies 37 53 iroh-car = "0.5"
+18
Cross.toml
··· 1 + [build.env] 2 + passthrough = [ 3 + "CARGO_HOME", 4 + "CARGO_TARGET_DIR", 5 + "SQLX_OFFLINE", 6 + "PKG_CONFIG_ALLOW_CROSS", 7 + ] 8 + 9 + [target.aarch64-unknown-linux-gnu] 10 + image = "ghcr.io/cross-rs/aarch64-unknown-linux-gnu:main" 11 + 12 + [target.aarch64-unknown-linux-gnu.env] 13 + passthrough = [ 14 + "CARGO_HOME", 15 + "CARGO_TARGET_DIR", 16 + "SQLX_OFFLINE", 17 + "PKG_CONFIG_ALLOW_CROSS", 18 + ]
+20
apps/aqua/Cross.toml
··· 1 + [build.env] 2 + passthrough = [ 3 + "CARGO_HOME", 4 + "CARGO_TARGET_DIR", 5 + "SQLX_OFFLINE", 6 + "PKG_CONFIG_ALLOW_CROSS", 7 + ] 8 + 9 + [target.aarch64-unknown-linux-gnu] 10 + image = "ghcr.io/cross-rs/aarch64-unknown-linux-gnu:main" 11 + 12 + [target.aarch64-unknown-linux-gnu.env] 13 + passthrough = ["CARGO_HOME", "CARGO_TARGET_DIR", "SQLX_OFFLINE"] 14 + # Allow cross-compilation of native dependencies 15 + PKG_CONFIG_ALLOW_CROSS = "1" 16 + # Use static linking to reduce runtime dependencies 17 + RUSTFLAGS = "-C target-feature=+crt-static -C link-arg=-s" 18 + # Disable problematic features that might require OpenSSL 19 + CC_aarch64_unknown_linux_gnu = "aarch64-linux-gnu-gcc" 20 + CXX_aarch64_unknown_linux_gnu = "aarch64-linux-gnu-g++"
-24
compose.db-test.yml
··· 1 - version: "3.8" 2 - 3 - services: 4 - postgres: 5 - image: postgres:latest 6 - container_name: postgres_test_db 7 - environment: 8 - POSTGRES_USER: postgres 9 - POSTGRES_PASSWORD: testpass123 10 - POSTGRES_DB: teal_test 11 - ports: 12 - - "5433:5432" 13 - volumes: 14 - - postgres_test_data:/var/lib/postgresql/data 15 - networks: 16 - - test_network 17 - command: postgres -c log_statement=all -c log_destination=stderr 18 - 19 - networks: 20 - test_network: 21 - driver: bridge 22 - 23 - volumes: 24 - postgres_test_data:
-355
docs/migration-troubleshooting.md
··· 1 - # Migration Troubleshooting Guide 2 - 3 - ## Common Migration Issues and Solutions 4 - 5 - ### Issue: "cannot drop function because other objects depend on it" 6 - 7 - **Error Message:** 8 - ``` 9 - error: while executing migration 20241220000008: error returned from database: cannot drop function extract_discriminant(text) because other objects depend on it 10 - ``` 11 - 12 - **Cause:** 13 - This error occurs when trying to drop database functions that have dependent objects (views, other functions, triggers, etc.) without properly handling the dependencies. 14 - 15 - **Solution:** 16 - 17 - #### Option 1: Fix the Migration (Recommended) 18 - Update the problematic migration to handle dependencies properly: 19 - 20 - 1. **Edit the migration file** (e.g., `20241220000008_fix_discriminant_case_sensitivity.sql`): 21 - 22 - ```sql 23 - -- Drop dependent views first, then functions, then recreate everything 24 - DROP VIEW IF EXISTS discriminant_analysis CASCADE; 25 - DROP VIEW IF EXISTS discriminant_stats CASCADE; 26 - 27 - -- Drop existing functions with CASCADE to handle dependencies 28 - DROP FUNCTION IF EXISTS extract_discriminant(TEXT) CASCADE; 29 - DROP FUNCTION IF EXISTS get_base_name(TEXT) CASCADE; 30 - DROP FUNCTION IF EXISTS extract_edition_discriminant(TEXT) CASCADE; 31 - 32 - -- Then recreate functions and views... 33 - ``` 34 - 35 - 2. **Reset the migration state** if the migration was partially applied: 36 - 37 - ```bash 38 - # Connect to your database and reset the specific migration 39 - psql $DATABASE_URL -c "DELETE FROM _sqlx_migrations WHERE version = '20241220000008';" 40 - 41 - # Or reset all migrations and start fresh (WARNING: This drops all data) 42 - psql $DATABASE_URL -c "DROP SCHEMA public CASCADE; CREATE SCHEMA public;" 43 - ``` 44 - 45 - 3. **Run migrations again**: 46 - ```bash 47 - cd services 48 - DATABASE_URL="your_database_url" sqlx migrate run 49 - ``` 50 - 51 - #### Option 2: Manual Dependency Cleanup 52 - If you can't modify the migration file: 53 - 54 - 1. **Identify dependencies**: 55 - ```sql 56 - -- Find objects that depend on the function 57 - SELECT 58 - p.proname as function_name, 59 - d.objid, 60 - d.classid::regclass as object_type, 61 - d.refobjid 62 - FROM pg_depend d 63 - JOIN pg_proc p ON d.refobjid = p.oid 64 - WHERE p.proname = 'extract_discriminant'; 65 - ``` 66 - 67 - 2. **Drop dependencies manually**: 68 - ```sql 69 - -- Drop dependent views 70 - DROP VIEW IF EXISTS discriminant_analysis CASCADE; 71 - DROP VIEW IF EXISTS discriminant_stats CASCADE; 72 - DROP VIEW IF EXISTS track_variants CASCADE; 73 - DROP VIEW IF EXISTS release_variants CASCADE; 74 - 75 - -- Drop the functions 76 - DROP FUNCTION IF EXISTS extract_discriminant(TEXT) CASCADE; 77 - DROP FUNCTION IF EXISTS get_base_name(TEXT) CASCADE; 78 - DROP FUNCTION IF EXISTS extract_edition_discriminant(TEXT) CASCADE; 79 - ``` 80 - 81 - 3. **Continue with migration**: 82 - ```bash 83 - DATABASE_URL="your_database_url" sqlx migrate run 84 - ``` 85 - 86 - ### Issue: "migration was previously applied but has been modified" 87 - 88 - **Error Message:** 89 - ``` 90 - error: migration 20241220000008 was previously applied but has been modified 91 - ``` 92 - 93 - **Cause:** 94 - The migration file has been changed after it was already applied to the database. 95 - 96 - **Solutions:** 97 - 98 - #### Option 1: Reset Migration State 99 - ```bash 100 - # Remove the specific migration from tracking 101 - psql $DATABASE_URL -c "DELETE FROM _sqlx_migrations WHERE version = '20241220000008';" 102 - 103 - # Run migrations again 104 - DATABASE_URL="your_database_url" sqlx migrate run 105 - ``` 106 - 107 - #### Option 2: Create a New Migration 108 - ```bash 109 - # Create a new migration with your changes 110 - sqlx migrate add fix_discriminant_case_sensitivity_v2 111 - 112 - # Copy your changes to the new migration file 113 - # Run the new migration 114 - DATABASE_URL="your_database_url" sqlx migrate run 115 - ``` 116 - 117 - #### Option 3: Full Reset (WARNING: Destroys all data) 118 - ```bash 119 - # Connect to database and reset everything 120 - psql $DATABASE_URL -c "DROP SCHEMA public CASCADE; CREATE SCHEMA public;" 121 - 122 - # Run all migrations from scratch 123 - DATABASE_URL="your_database_url" sqlx migrate run 124 - ``` 125 - 126 - ### Issue: "No such file or directory" when running migrations 127 - 128 - **Error Message:** 129 - ``` 130 - error: while resolving migrations: No such file or directory (os error 2) 131 - ``` 132 - 133 - **Cause:** 134 - The migration directory is not found in the expected location. 135 - 136 - **Solutions:** 137 - 138 - #### Option 1: Check Migration Directory Location 139 - ```bash 140 - # Check where sqlx expects migrations 141 - cat services/.sqlx/.sqlxrc 142 - 143 - # Ensure migrations exist in the correct location 144 - ls -la services/migrations/ 145 - ``` 146 - 147 - #### Option 2: Copy Migrations to Correct Location 148 - ```bash 149 - # If migrations are in wrong location, copy them 150 - cp migrations/*.sql services/migrations/ 151 - 152 - # Or create symlink 153 - ln -s ../migrations services/migrations 154 - ``` 155 - 156 - #### Option 3: Update sqlx Configuration 157 - Edit `services/.sqlx/.sqlxrc`: 158 - ```toml 159 - [database] 160 - url = "postgres://localhost/teal" 161 - migrations = "../migrations" # Update path as needed 162 - ``` 163 - 164 - ### Issue: Database Connection Problems 165 - 166 - **Error Messages:** 167 - - `Connection refused (os error 61)` 168 - - `password authentication failed` 169 - - `database "teal_test" does not exist` 170 - 171 - **Solutions:** 172 - 173 - #### Connection Refused 174 - ```bash 175 - # Check if database is running 176 - docker ps | grep postgres 177 - 178 - # Start database if needed 179 - docker-compose -f compose.db-test.yml up -d 180 - 181 - # Wait for database to start 182 - sleep 5 183 - ``` 184 - 185 - #### Authentication Issues 186 - ```bash 187 - # Check connection string format 188 - DATABASE_URL="postgres://username:password@host:port/database" 189 - 190 - # Example for test database 191 - DATABASE_URL="postgres://postgres:testpass123@localhost:5433/teal_test" 192 - ``` 193 - 194 - #### Database Doesn't Exist 195 - ```bash 196 - # Create database 197 - docker exec postgres_container psql -U postgres -c "CREATE DATABASE teal_test;" 198 - 199 - # Or recreate test environment 200 - docker-compose -f compose.db-test.yml down 201 - docker-compose -f compose.db-test.yml up -d 202 - ``` 203 - 204 - ## Migration Best Practices 205 - 206 - ### 1. Handle Dependencies Properly 207 - Always use `CASCADE` when dropping objects with dependencies: 208 - ```sql 209 - DROP FUNCTION function_name(args) CASCADE; 210 - DROP VIEW view_name CASCADE; 211 - ``` 212 - 213 - ### 2. Test Migrations Locally 214 - ```bash 215 - # Use test database for migration testing 216 - DATABASE_URL="postgres://localhost:5433/teal_test" sqlx migrate run 217 - 218 - # Verify results 219 - psql "postgres://localhost:5433/teal_test" -c "SELECT extract_discriminant('Test (Example)');" 220 - ``` 221 - 222 - ### 3. Backup Before Major Migrations 223 - ```bash 224 - # Create backup 225 - pg_dump $DATABASE_URL > backup_before_migration.sql 226 - 227 - # Apply migrations 228 - sqlx migrate run 229 - 230 - # Restore if needed 231 - psql $DATABASE_URL < backup_before_migration.sql 232 - ``` 233 - 234 - ### 4. Version Control Migration Files 235 - - Never modify applied migrations 236 - - Create new migrations for changes 237 - - Use descriptive migration names 238 - - Include rollback instructions in comments 239 - 240 - ### 5. Migration File Structure 241 - ```sql 242 - -- Migration: descriptive_name 243 - -- Purpose: Brief description of what this migration does 244 - -- Dependencies: List any required prior migrations 245 - -- Rollback: Instructions for manual rollback if needed 246 - 247 - -- Drop dependencies first 248 - DROP VIEW IF EXISTS dependent_view CASCADE; 249 - 250 - -- Make changes 251 - CREATE OR REPLACE FUNCTION new_function() ...; 252 - 253 - -- Recreate dependencies 254 - CREATE VIEW dependent_view AS ...; 255 - 256 - -- Update existing data if needed 257 - UPDATE table_name SET column = new_value WHERE condition; 258 - 259 - -- Add comments 260 - COMMENT ON FUNCTION new_function IS 'Description of function purpose'; 261 - ``` 262 - 263 - ## Emergency Recovery 264 - 265 - ### Complete Database Reset 266 - If migrations are completely broken: 267 - 268 - ```bash 269 - # 1. Stop all services 270 - docker-compose down 271 - 272 - # 2. Remove database volume (WARNING: Destroys all data) 273 - docker volume rm teal_postgres_data 274 - 275 - # 3. Start fresh 276 - docker-compose up -d postgres 277 - 278 - # 4. Wait for database to initialize 279 - sleep 10 280 - 281 - # 5. Run all migrations from scratch 282 - DATABASE_URL="your_database_url" sqlx migrate run 283 - ``` 284 - 285 - ### Partial Recovery 286 - If only discriminant system is broken: 287 - 288 - ```sql 289 - -- Remove discriminant-related objects 290 - DROP VIEW IF EXISTS discriminant_analysis CASCADE; 291 - DROP VIEW IF EXISTS discriminant_stats CASCADE; 292 - DROP VIEW IF EXISTS track_variants CASCADE; 293 - DROP VIEW IF EXISTS release_variants CASCADE; 294 - DROP FUNCTION IF EXISTS extract_discriminant(TEXT) CASCADE; 295 - DROP FUNCTION IF EXISTS get_base_name(TEXT) CASCADE; 296 - DROP FUNCTION IF EXISTS extract_edition_discriminant(TEXT) CASCADE; 297 - 298 - -- Remove discriminant columns 299 - ALTER TABLE plays DROP COLUMN IF EXISTS track_discriminant; 300 - ALTER TABLE plays DROP COLUMN IF EXISTS release_discriminant; 301 - ALTER TABLE recordings DROP COLUMN IF EXISTS discriminant; 302 - ALTER TABLE releases DROP COLUMN IF EXISTS discriminant; 303 - 304 - -- Mark discriminant migrations as not applied 305 - DELETE FROM _sqlx_migrations WHERE version >= '20241220000006'; 306 - 307 - -- Re-run discriminant migrations 308 - ``` 309 - 310 - ## Getting Help 311 - 312 - ### Debug Information to Collect 313 - When reporting migration issues, include: 314 - 315 - 1. **Error message** (full stack trace) 316 - 2. **Migration file content** that's causing issues 317 - 3. **Database state**: 318 - ```sql 319 - SELECT version FROM _sqlx_migrations ORDER BY version; 320 - \df extract_discriminant 321 - \dv discriminant_* 322 - ``` 323 - 4. **Environment details**: 324 - - Database version: `SELECT version();` 325 - - Operating system 326 - - sqlx version: `cargo sqlx --version` 327 - 328 - ### Useful Debugging Commands 329 - ```sql 330 - -- Check applied migrations 331 - SELECT * FROM _sqlx_migrations ORDER BY version; 332 - 333 - -- Check function definitions 334 - \df+ extract_discriminant 335 - 336 - -- Check view definitions 337 - \d+ discriminant_analysis 338 - 339 - -- Check table schemas 340 - \d+ plays 341 - \d+ recordings 342 - \d+ releases 343 - 344 - -- Test function directly 345 - SELECT extract_discriminant('Test (Example)'); 346 - ``` 347 - 348 - ## Contact and Support 349 - 350 - For persistent migration issues: 351 - 1. Check this troubleshooting guide first 352 - 2. Review the specific migration file causing issues 353 - 3. Try solutions in order of preference (fix migration โ†’ manual cleanup โ†’ reset) 354 - 4. Create minimal reproduction case for complex issues 355 - 5. Document exact steps that led to the error for support requests
-1
lexicons/fm.teal.alpha/feed/play.json
··· 19 19 }, 20 20 "trackMbId": { 21 21 "type": "string", 22 - 23 22 "description": "The Musicbrainz ID of the track" 24 23 }, 25 24 "recordingMbId": {
+54
lexicons/fm.teal.alpha/feed/social/defs.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "fm.teal.alpha.feed.social.defs", 4 + "description": "This lexicon is in a not officially released state. It is subject to change. | Misc. items related to the social feed..", 5 + "defs": { 6 + "trackView": { 7 + "trackName": { 8 + "type": "string", 9 + "minLength": 1, 10 + "maxLength": 256, 11 + "maxGraphemes": 2560, 12 + "description": "The name of the track" 13 + }, 14 + "trackMbId": { 15 + "type": "string", 16 + "description": "The Musicbrainz ID of the track" 17 + }, 18 + "recordingMbId": { 19 + "type": "string", 20 + "description": "The Musicbrainz recording ID of the track" 21 + }, 22 + "artistNames": { 23 + "type": "array", 24 + "items": { 25 + "type": "string", 26 + "minLength": 1, 27 + "maxLength": 256, 28 + "maxGraphemes": 2560 29 + }, 30 + "description": "Array of artist names in order of original appearance. Prefer using 'artists'." 31 + }, 32 + "artistMbIds": { 33 + "type": "array", 34 + "items": { "type": "string" }, 35 + "description": "Array of Musicbrainz artist IDs. Prefer using 'artists'." 36 + }, 37 + "artists": { 38 + "type": "array", 39 + "items": { "type": "ref", "ref": "fm.teal.alpha.feed.defs#artist" }, 40 + "description": "Array of artists in order of original appearance." 41 + }, 42 + "releaseName": { 43 + "type": "string", 44 + "maxLength": 256, 45 + "maxGraphemes": 2560, 46 + "description": "The name of the release/album" 47 + }, 48 + "releaseMbId": { 49 + "type": "string", 50 + "description": "The Musicbrainz release ID" 51 + } 52 + } 53 + } 54 + }
+24
lexicons/fm.teal.alpha/feed/social/like.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "fm.teal.alpha.feed.social.like", 4 + "description": "This lexicon is in a not officially released state. It is subject to change. | The action of 'Liking' a Teal.fm post.", 5 + "defs": { 6 + "main": { 7 + "type": "record", 8 + "description": "Record containing a like for a teal.fm post.", 9 + "key": "tid", 10 + "record": { 11 + "type": "object", 12 + "required": ["subject", "createdAt"], 13 + "properties": { 14 + "subject": { "type": "ref", "ref": "com.atproto.repo.strongRef" }, 15 + "createdAt": { 16 + "type": "string", 17 + "format": "datetime", 18 + "description": "Client-declared timestamp when this post was originally created." 19 + } 20 + } 21 + } 22 + } 23 + } 24 + }
+30
lexicons/fm.teal.alpha/feed/social/playlist.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "fm.teal.alpha.feed.social.playlist", 4 + "description": "This lexicon is in a not officially released state. It is subject to change. | A teal.fm playlist, representing a list of tracks.", 5 + "defs": { 6 + "main": { 7 + "type": "record", 8 + "description": "Record containing the playlist metadata.", 9 + "key": "tid", 10 + "record": { 11 + "type": "object", 12 + "required": ["name", "createdAt"], 13 + "properties": { 14 + "name": { 15 + "type": "string", 16 + "description": "Display name for the playlist, required.", 17 + "minLength": 1, 18 + "maxLength": 50 19 + }, 20 + "description": { "type": "string", "maxLength": 5000 }, 21 + "createdAt": { 22 + "type": "string", 23 + "format": "datetime", 24 + "description": "Client-declared timestamp when this playlist was originally created." 25 + } 26 + } 27 + } 28 + } 29 + } 30 + }
+32
lexicons/fm.teal.alpha/feed/social/playlistItem.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "fm.teal.alpha.feed.social.playlistItem", 4 + "description": "This lexicon is in a not officially released state. It is subject to change. | A teal.fm playlist item.", 5 + "defs": { 6 + "main": { 7 + "type": "record", 8 + "description": "Record containing a playlist item for a teal.fm playlist.", 9 + "key": "tid", 10 + "record": { 11 + "type": "object", 12 + "required": ["subject", "createdAt", "trackName"], 13 + "properties": { 14 + "subject": { "type": "record", "ref": "com.atproto.repo.strongRef" }, 15 + "createdAt": { 16 + "type": "string", 17 + "format": "datetime", 18 + "description": "Client-declared timestamp when this post was originally created." 19 + }, 20 + "track": { 21 + "type": "ref", 22 + "ref": "fm.teal.alpha.feed.social.defs#trackView" 23 + }, 24 + "order": { 25 + "type": "integer", 26 + "description": "The order of the track in the playlist" 27 + } 28 + } 29 + } 30 + } 31 + } 32 + }
+104
lexicons/fm.teal.alpha/feed/social/post.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "fm.teal.alpha.feed.social.post", 4 + "description": "This lexicon is in a not officially released state. It is subject to change. | Record containing a teal.fm post. Teal.fm posts include a track that is connected to the post, and could have some text. Replies, by default, have the same track as the parent post.", 5 + "defs": { 6 + "main": { 7 + "type": "record", 8 + "description": "Record containing a teal.fm post.", 9 + "key": "tid", 10 + "record": { 11 + "type": "object", 12 + "required": ["text", "createdAt"], 13 + "properties": { 14 + "text": { 15 + "type": "string", 16 + "maxLength": 3000, 17 + "maxGraphemes": 300, 18 + "description": "The primary post content. May be an empty string, if there are embeds." 19 + }, 20 + 21 + "trackName": { 22 + "type": "string", 23 + "minLength": 1, 24 + "maxLength": 256, 25 + "maxGraphemes": 2560, 26 + "description": "The name of the track" 27 + }, 28 + "trackMbId": { 29 + "type": "string", 30 + "description": "The Musicbrainz ID of the track" 31 + }, 32 + "recordingMbId": { 33 + "type": "string", 34 + "description": "The Musicbrainz recording ID of the track" 35 + }, 36 + "duration": { 37 + "type": "integer", 38 + "description": "The duration of the track in seconds" 39 + }, 40 + "artistNames": { 41 + "type": "array", 42 + "items": { 43 + "type": "string", 44 + "minLength": 1, 45 + "maxLength": 256, 46 + "maxGraphemes": 2560 47 + }, 48 + "description": "The names of the artists" 49 + }, 50 + "artistMbIds": { 51 + "type": "array", 52 + "items": { "type": "string" }, 53 + "description": "The Musicbrainz IDs of the artists" 54 + }, 55 + "releaseName": { 56 + "type": "string", 57 + "maxLength": 256, 58 + "maxGraphemes": 2560, 59 + "description": "The name of the release/album" 60 + }, 61 + "releaseMbId": { 62 + "type": "string", 63 + "description": "The Musicbrainz ID of the release/album" 64 + }, 65 + "isrc": { 66 + "type": "string", 67 + "description": "The ISRC code associated with the recording" 68 + }, 69 + "reply": { "type": "ref", "ref": "#replyRef" }, 70 + "facets": { 71 + "type": "array", 72 + "description": "Rich text facets, which may include mentions, links, and other features.", 73 + "items": { "type": "ref", "ref": "fm.teal.alpha.richtext.facet" } 74 + }, 75 + "langs": { 76 + "type": "array", 77 + "description": "Indicates human language of post primary text content.", 78 + "maxLength": 3, 79 + "items": { "type": "string", "format": "language" } 80 + }, 81 + "tags": { 82 + "type": "array", 83 + "description": "Additional hashtags, in addition to any included in post text and facets.", 84 + "maxLength": 8, 85 + "items": { "type": "string", "maxLength": 640, "maxGraphemes": 64 } 86 + }, 87 + "createdAt": { 88 + "type": "string", 89 + "format": "datetime", 90 + "description": "Client-declared timestamp when this post was originally created." 91 + } 92 + } 93 + } 94 + }, 95 + "replyRef": { 96 + "type": "object", 97 + "required": ["root", "parent"], 98 + "properties": { 99 + "root": { "type": "ref", "ref": "com.atproto.repo.strongRef" }, 100 + "parent": { "type": "ref", "ref": "com.atproto.repo.strongRef" } 101 + } 102 + } 103 + } 104 + }
+24
lexicons/fm.teal.alpha/feed/social/repost.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "fm.teal.alpha.feed.social.repost", 4 + "description": "This lexicon is in a not officially released state. It is subject to change. | The action of 'Reposting' a Teal.fm post.", 5 + "defs": { 6 + "main": { 7 + "type": "record", 8 + "description": "Record containing a repost for a teal.fm post.", 9 + "key": "tid", 10 + "record": { 11 + "type": "object", 12 + "required": ["subject", "createdAt"], 13 + "properties": { 14 + "subject": { "type": "ref", "ref": "com.atproto.repo.strongRef" }, 15 + "createdAt": { 16 + "type": "string", 17 + "format": "datetime", 18 + "description": "Client-declared timestamp when this post was originally created." 19 + } 20 + } 21 + } 22 + } 23 + } 24 + }
+24
lexicons/fm.teal.alpha/richtext/facet.json
··· 1 + { 2 + "lexicon": 1, 3 + "id": "fm.teal.alpha.richtext.facet", 4 + "defs": { 5 + "main": { 6 + "type": "object", 7 + "description": "Annotation of a sub-string within rich text.", 8 + "required": ["index", "features"], 9 + "properties": { 10 + "index": { "type": "ref", "ref": "app.bsky.richtext.facet#byteSlice" }, 11 + "features": { 12 + "type": "array", 13 + "items": { 14 + "type": "union", 15 + "refs": [ 16 + "app.bsky.richtext.facet#mention", 17 + "app.bsky.richtext.facet#link" 18 + ] 19 + } 20 + } 21 + } 22 + } 23 + } 24 + }
-4
pnpm-lock.yaml
··· 254 254 255 255 services/cadet: {} 256 256 257 - services/rocketman: {} 258 - 259 257 services/satellite: {} 260 - 261 - services/types: {} 262 258 263 259 tools/lexicon-cli: 264 260 dependencies:
+69
scripts/setup-sqlx-offline.sh
··· 1 + #!/bin/bash 2 + 3 + # Script to copy .sqlx files to all Rust projects that use SQLx 4 + # This is needed for offline SQLx builds (SQLX_OFFLINE=true) 5 + 6 + set -e 7 + 8 + # Get the script directory (should be in teal/scripts/) 9 + SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 10 + PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" 11 + 12 + # Source .sqlx directory 13 + SQLX_SOURCE="$PROJECT_ROOT/.sqlx" 14 + 15 + # List of projects that use SQLx (relative to project root) 16 + SQLX_PROJECTS=( 17 + "apps/aqua" 18 + "services/cadet" 19 + "services/satellite" 20 + ) 21 + 22 + echo "๐Ÿ”ง Setting up SQLx offline files..." 23 + 24 + # Check if source .sqlx directory exists 25 + if [ ! -d "$SQLX_SOURCE" ]; then 26 + echo "โŒ Source .sqlx directory not found at: $SQLX_SOURCE" 27 + echo " Make sure you've run 'cargo sqlx prepare' from the services directory first." 28 + exit 1 29 + fi 30 + 31 + # Copy .sqlx files to each project that needs them 32 + for project in "${SQLX_PROJECTS[@]}"; do 33 + project_path="$PROJECT_ROOT/$project" 34 + target_sqlx="$project_path/.sqlx" 35 + 36 + if [ ! -d "$project_path" ]; then 37 + echo "โš ๏ธ Project directory not found: $project_path (skipping)" 38 + continue 39 + fi 40 + 41 + # Check if project actually uses SQLx 42 + if [ ! -f "$project_path/Cargo.toml" ]; then 43 + echo "โš ๏ธ No Cargo.toml found in $project (skipping)" 44 + continue 45 + fi 46 + 47 + if ! grep -q "sqlx" "$project_path/Cargo.toml"; then 48 + echo "โš ๏ธ Project $project doesn't appear to use SQLx (skipping)" 49 + continue 50 + fi 51 + 52 + echo "๐Ÿ“ฆ Copying .sqlx files to $project..." 53 + 54 + # Remove existing .sqlx directory if it exists 55 + if [ -d "$target_sqlx" ]; then 56 + rm -rf "$target_sqlx" 57 + fi 58 + 59 + # Copy the .sqlx directory 60 + cp -r "$SQLX_SOURCE" "$target_sqlx" 61 + 62 + echo " โœ… Copied $(ls -1 "$target_sqlx" | wc -l) query files" 63 + done 64 + 65 + echo "โœ… SQLx offline setup complete!" 66 + echo "" 67 + echo "Note: If you add new SQL queries or modify existing ones, you'll need to:" 68 + echo "1. Run 'cargo sqlx prepare' from the services directory" 69 + echo "2. Run this script again to update all project copies"
+21
services/Cargo.lock
··· 2836 2836 dependencies = [ 2837 2837 "aws-lc-rs", 2838 2838 "once_cell", 2839 + "ring", 2839 2840 "rustls-pki-types", 2840 2841 "rustls-webpki", 2841 2842 "subtle", ··· 3236 3237 "memchr", 3237 3238 "once_cell", 3238 3239 "percent-encoding", 3240 + "rustls", 3239 3241 "serde", 3240 3242 "serde_json", 3241 3243 "sha2", ··· 3247 3249 "tracing", 3248 3250 "url", 3249 3251 "uuid", 3252 + "webpki-roots 0.26.11", 3250 3253 ] 3251 3254 3252 3255 [[package]] ··· 4169 4172 dependencies = [ 4170 4173 "js-sys", 4171 4174 "wasm-bindgen", 4175 + ] 4176 + 4177 + [[package]] 4178 + name = "webpki-roots" 4179 + version = "0.26.11" 4180 + source = "registry+https://github.com/rust-lang/crates.io-index" 4181 + checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" 4182 + dependencies = [ 4183 + "webpki-roots 1.0.2", 4184 + ] 4185 + 4186 + [[package]] 4187 + name = "webpki-roots" 4188 + version = "1.0.2" 4189 + source = "registry+https://github.com/rust-lang/crates.io-index" 4190 + checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2" 4191 + dependencies = [ 4192 + "rustls-pki-types", 4172 4193 ] 4173 4194 4174 4195 [[package]]
+5 -4
services/Cargo.toml
··· 1 1 [workspace] 2 - members = ["cadet", "rocketman", "satellite", "types"] 2 + members = ["cadet", "satellite", "types"] 3 3 resolver = "2" 4 4 5 5 [workspace.dependencies] ··· 12 12 "postgres", 13 13 "uuid", 14 14 "chrono", 15 + "tls-rustls", 15 16 ] } 16 17 serde = { version = "1.0", features = ["derive"] } 17 18 anyhow = "1.0" ··· 19 20 tracing = "0.1" 20 21 tracing-subscriber = "0.3" 21 22 metrics = "0.23" 22 - reqwest = { version = "0.12", features = ["json"] } 23 + reqwest.workspace = true 23 24 url = "2.5" 24 25 rand = "0.8" 25 26 flume = "0.11" 26 27 async-trait = "0.1" 27 28 time = "0.3" 28 29 dotenvy = "0.15" 29 - tokio-tungstenite = "0.24" 30 + tokio-tungstenite.workspace = true 30 31 atrium-api = "0.25" 31 32 chrono = { version = "0.4", features = ["serde"] } 32 33 uuid = { version = "1.0", features = ["v4", "serde"] } 33 34 types = { path = "types" } 34 - rocketman = { path = "rocketman" } 35 + rocketman = "0.2.5" 35 36 36 37 # CAR and IPLD dependencies 37 38 iroh-car = "0.4"
+20
services/Cross.toml
··· 1 + [build.env] 2 + passthrough = [ 3 + "CARGO_HOME", 4 + "CARGO_TARGET_DIR", 5 + "SQLX_OFFLINE", 6 + "PKG_CONFIG_ALLOW_CROSS", 7 + ] 8 + 9 + [target.aarch64-unknown-linux-gnu] 10 + image = "ghcr.io/cross-rs/aarch64-unknown-linux-gnu:main" 11 + 12 + [target.aarch64-unknown-linux-gnu.env] 13 + passthrough = ["CARGO_HOME", "CARGO_TARGET_DIR", "SQLX_OFFLINE"] 14 + # Allow cross-compilation of native dependencies 15 + PKG_CONFIG_ALLOW_CROSS = "1" 16 + # Use static linking to reduce runtime dependencies 17 + RUSTFLAGS = "-C target-feature=+crt-static -C link-arg=-s" 18 + # Disable problematic features that might require OpenSSL 19 + CC_aarch64_unknown_linux_gnu = "aarch64-linux-gnu-gcc" 20 + CXX_aarch64_unknown_linux_gnu = "aarch64-linux-gnu-g++"
+4 -6
services/cadet/src/ingestors/car/car_import.rs
··· 768 768 769 769 // Test that we can decode the records 770 770 for cid in importer.cids() { 771 - if let Ok(ipld) = importer.decode_cbor(&cid) { 772 - if let Ipld::Map(map) = &ipld { 773 - if let Some(Ipld::String(record_type)) = map.get("$type") { 774 - assert!(record_type.starts_with("fm.teal.alpha.")); 775 - println!("Found Teal record: {}", record_type); 776 - } 771 + if let Ok(Ipld::Map(map)) = importer.decode_cbor(&cid) { 772 + if let Some(Ipld::String(record_type)) = map.get("$type") { 773 + assert!(record_type.starts_with("fm.teal.alpha.")); 774 + println!("Found Teal record: {}", record_type); 777 775 } 778 776 } 779 777 }
-34
services/rocketman/Cargo.toml
··· 1 - [package] 2 - name = "rocketman" 3 - version = "0.2.3" 4 - edition = "2021" 5 - 6 - license = "MIT" 7 - authors = ["Natalie B. <nat@natalie.sh>"] 8 - repository = "https://github.com/espeon/cadet" 9 - 10 - readme = "readme.md" 11 - 12 - description = "A modular(ish) jetstream consumer." 13 - 14 - [dependencies] 15 - tokio.workspace = true 16 - tokio-tungstenite.workspace = true 17 - futures-util = "0.3" 18 - url.workspace = true 19 - rand.workspace = true 20 - tracing.workspace = true 21 - tracing-subscriber.workspace = true 22 - metrics.workspace = true 23 - derive_builder = "0.20.2" 24 - bon = "3.3.2" 25 - serde = { workspace = true, features = ["derive"] } 26 - serde_json.workspace = true 27 - flume.workspace = true 28 - anyhow.workspace = true 29 - async-trait.workspace = true 30 - zstd = { version = "0.13.3", optional = true } 31 - 32 - [features] 33 - default = ["zstd"] 34 - zstd = ["dep:zstd"]
-76
services/rocketman/examples/spew-bsky-posts.rs
··· 1 - use async_trait::async_trait; 2 - use rocketman::{ 3 - connection::JetstreamConnection, 4 - handler, 5 - ingestion::LexiconIngestor, 6 - options::JetstreamOptions, 7 - types::event::{Commit, Event}, 8 - }; 9 - use serde_json::Value; 10 - use std::{collections::HashMap, sync::Arc, sync::Mutex}; 11 - 12 - #[tokio::main] 13 - async fn main() { 14 - // init the builder 15 - let opts = JetstreamOptions::builder() 16 - // your EXACT nsids 17 - .wanted_collections(vec!["app.bsky.feed.post".to_string()]) 18 - .build(); 19 - // create the jetstream connector 20 - let jetstream = JetstreamConnection::new(opts); 21 - 22 - // create your ingestors 23 - let mut ingestors: HashMap<String, Box<dyn LexiconIngestor + Send + Sync>> = HashMap::new(); 24 - ingestors.insert( 25 - // your EXACT nsid 26 - "app.bsky.feed.post".to_string(), 27 - Box::new(MyCoolIngestor), 28 - ); 29 - 30 - // tracks the last message we've processed 31 - let cursor: Arc<Mutex<Option<u64>>> = Arc::new(Mutex::new(None)); 32 - 33 - // get channels 34 - let msg_rx = jetstream.get_msg_rx(); 35 - let reconnect_tx = jetstream.get_reconnect_tx(); 36 - 37 - // spawn a task to process messages from the queue. 38 - // this is a simple implementation, you can use a more complex one based on needs. 39 - let c_cursor = cursor.clone(); 40 - tokio::spawn(async move { 41 - while let Ok(message) = msg_rx.recv_async().await { 42 - if let Err(e) = 43 - handler::handle_message(message, &ingestors, reconnect_tx.clone(), c_cursor.clone()) 44 - .await 45 - { 46 - eprintln!("Error processing message: {}", e); 47 - }; 48 - } 49 - }); 50 - 51 - // connect to jetstream 52 - // retries internally, but may fail if there is an extreme error. 53 - if let Err(e) = jetstream.connect(cursor.clone()).await { 54 - eprintln!("Failed to connect to Jetstream: {}", e); 55 - std::process::exit(1); 56 - } 57 - } 58 - 59 - pub struct MyCoolIngestor; 60 - 61 - /// A cool ingestor implementation. Will just print the message. Does not do verification. 62 - #[async_trait] 63 - impl LexiconIngestor for MyCoolIngestor { 64 - async fn ingest(&self, message: Event<Value>) -> anyhow::Result<()> { 65 - if let Some(Commit { 66 - record: Some(record), 67 - .. 68 - }) = message.commit 69 - { 70 - if let Some(Value::String(text)) = record.get("text") { 71 - println!("{text:?}"); 72 - } 73 - } 74 - Ok(()) 75 - } 76 - }
-11
services/rocketman/package.json
··· 1 - { 2 - "name": "@repo/rocketman", 3 - "private": true, 4 - "scripts": { 5 - "build": "cargo build --release", 6 - "build:rust": "cargo build --release", 7 - "dev": "cargo watch -x 'run'", 8 - "test": "cargo test", 9 - "test:rust": "cargo test" 10 - } 11 - }
-74
services/rocketman/readme.md
··· 1 - ## Rocketman 2 - 3 - A modular(ish) jetstream consumer. Backed by Tungstenite. 4 - 5 - 6 - ### Installation 7 - ```toml 8 - [dependencies] 9 - rocketman = "latest" # pyt the latest version here 10 - tokio = { version = "1", features = ["macros", "rt-multi-thread"] } 11 - ``` 12 - ### Usage 13 - ```rs 14 - #[tokio::main] 15 - async fn main() { 16 - // init the builder 17 - let opts = JetstreamOptions::builder() 18 - // your EXACT nsids 19 - .wanted_collections(vec!["com.example.cool.nsid".to_string()]) 20 - .build(); 21 - // create the jetstream connector 22 - let jetstream = JetstreamConnection::new(opts); 23 - 24 - // create your ingestors 25 - let mut ingestors: HashMap<String, Box<dyn LexiconIngestor + Send + Sync>> = HashMap::new(); 26 - ingestors.insert( 27 - // your EXACT nsid 28 - "com.example.cool.nsid".to_string(), 29 - Box::new(MyCoolIngestor), 30 - ); 31 - 32 - 33 - // tracks the last message we've processed 34 - let cursor: Arc<Mutex<Option<u64>>> = Arc::new(Mutex::new(None)); 35 - 36 - // get channels 37 - let msg_rx = jetstream.get_msg_rx(); 38 - let reconnect_tx = jetstream.get_reconnect_tx(); 39 - 40 - // spawn a task to process messages from the queue. 41 - // this is a simple implementation, you can use a more complex one based on needs. 42 - let c_cursor = cursor.clone(); 43 - tokio::spawn(async move { 44 - while let Ok(message) = msg_rx.recv_async().await { 45 - if let Err(e) = 46 - handler::handle_message(message, &ingestors, reconnect_tx.clone(), c_cursor.clone()) 47 - .await 48 - { 49 - error!("Error processing message: {}", e); 50 - }; 51 - } 52 - }); 53 - 54 - // connect to jetstream 55 - // retries internally, but may fail if there is an extreme error. 56 - if let Err(e) = jetstream.connect(cursor.clone()).await { 57 - error!("Failed to connect to Jetstream: {}", e); 58 - std::process::exit(1); 59 - } 60 - } 61 - 62 - pub struct MyCoolIngestor; 63 - 64 - /// A cool ingestor implementation. Will just print the message. Does not do verification. 65 - impl LexiconIngestor for MyCoolIngestor { 66 - async fn ingest(&self, message: Event<Value>) -> Result<()> { 67 - info!("{:?}", message); 68 - // Process message for default lexicon. 69 - Ok(()) 70 - } 71 - } 72 - ``` 73 - ### gratz 74 - Based heavily on [phil's jetstream consumer on atcosm constellation.](https://github.com/atcosm/links/blob/main/constellation/src/consumer/jetstream.rs)
-335
services/rocketman/src/connection.rs
··· 1 - use flume::{Receiver, Sender}; 2 - use futures_util::StreamExt; 3 - use metrics::{counter, describe_counter, describe_histogram, histogram, Unit}; 4 - use std::cmp::{max, min}; 5 - use std::sync::{Arc, Mutex}; 6 - use std::time::Instant; 7 - use tokio::time::{sleep, Duration}; 8 - use tokio_tungstenite::{connect_async, tungstenite::Message}; 9 - use tracing::{error, info}; 10 - use url::Url; 11 - 12 - use crate::options::JetstreamOptions; 13 - use crate::time::system_time::SystemTimeProvider; 14 - use crate::time::TimeProvider; 15 - 16 - pub struct JetstreamConnection { 17 - pub opts: JetstreamOptions, 18 - reconnect_tx: flume::Sender<()>, 19 - reconnect_rx: flume::Receiver<()>, 20 - msg_tx: flume::Sender<Message>, 21 - msg_rx: flume::Receiver<Message>, 22 - } 23 - 24 - impl JetstreamConnection { 25 - pub fn new(opts: JetstreamOptions) -> Self { 26 - let (reconnect_tx, reconnect_rx) = flume::bounded(opts.bound); 27 - let (msg_tx, msg_rx) = flume::bounded(opts.bound); 28 - Self { 29 - opts, 30 - reconnect_tx, 31 - reconnect_rx, 32 - msg_tx, 33 - msg_rx, 34 - } 35 - } 36 - 37 - pub fn get_reconnect_tx(&self) -> Sender<()> { 38 - self.reconnect_tx.clone() 39 - } 40 - 41 - pub fn get_msg_rx(&self) -> Receiver<Message> { 42 - self.msg_rx.clone() 43 - } 44 - 45 - fn build_ws_url(&self, cursor: Arc<Mutex<Option<u64>>>) -> String { 46 - let mut url = Url::parse(&self.opts.ws_url.to_string()).unwrap(); 47 - 48 - // Append query params 49 - if let Some(ref cols) = self.opts.wanted_collections { 50 - for col in cols { 51 - url.query_pairs_mut().append_pair("wantedCollections", col); 52 - } 53 - } 54 - if let Some(ref dids) = self.opts.wanted_dids { 55 - for did in dids { 56 - url.query_pairs_mut().append_pair("wantedDids", did); 57 - } 58 - } 59 - if let Some(cursor) = cursor.lock().unwrap().as_ref() { 60 - url.query_pairs_mut() 61 - .append_pair("cursor", &cursor.to_string()); 62 - } 63 - #[cfg(feature = "zstd")] 64 - if self.opts.compress { 65 - url.query_pairs_mut().append_pair("compress", "true"); 66 - } 67 - 68 - url.to_string() 69 - } 70 - 71 - pub async fn connect( 72 - &self, 73 - cursor: Arc<Mutex<Option<u64>>>, 74 - ) -> Result<(), Box<dyn std::error::Error>> { 75 - describe_counter!( 76 - "jetstream.connection.attempt", 77 - Unit::Count, 78 - "attempts to connect to jetstream service" 79 - ); 80 - describe_counter!( 81 - "jetstream.connection.error", 82 - Unit::Count, 83 - "errors connecting to jetstream service" 84 - ); 85 - describe_histogram!( 86 - "jetstream.connection.duration", 87 - Unit::Seconds, 88 - "Time connected to jetstream service" 89 - ); 90 - describe_counter!( 91 - "jetstream.connection.reconnect", 92 - Unit::Count, 93 - "reconnects to jetstream service" 94 - ); 95 - let mut retry_interval = 1; 96 - 97 - let time_provider = SystemTimeProvider::new(); 98 - 99 - let mut start_time = time_provider.now(); 100 - 101 - loop { 102 - counter!("jetstream.connection.attempt").increment(1); 103 - info!("Connecting to {}", self.opts.ws_url); 104 - let start = Instant::now(); 105 - 106 - let ws_url = self.build_ws_url(cursor.clone()); 107 - 108 - match connect_async(ws_url).await { 109 - Ok((ws_stream, response)) => { 110 - let elapsed = start.elapsed(); 111 - info!("Connected. HTTP status: {}", response.status()); 112 - 113 - let (_, mut read) = ws_stream.split(); 114 - 115 - loop { 116 - // Inner loop to handle messages, reconnect signals, and receive timeout 117 - let receive_timeout = 118 - sleep(Duration::from_secs(self.opts.timeout_time_sec as u64)); 119 - tokio::pin!(receive_timeout); 120 - 121 - loop { 122 - tokio::select! { 123 - message_result = read.next() => { 124 - match message_result { 125 - Some(message) => { 126 - // Reset timeout on message received 127 - receive_timeout.as_mut().reset(tokio::time::Instant::now() + Duration::from_secs(self.opts.timeout_time_sec as u64)); 128 - 129 - histogram!("jetstream.connection.duration").record(elapsed.as_secs_f64()); 130 - match message { 131 - Ok(message) => { 132 - if let Err(err) = self.msg_tx.send_async(message).await { 133 - counter!("jetstream.error").increment(1); 134 - error!("Failed to queue message: {}", err); 135 - } 136 - } 137 - Err(e) => { 138 - counter!("jetstream.error").increment(1); 139 - error!("Error: {}", e); 140 - } 141 - } 142 - } 143 - None => { 144 - info!("Stream closed by server."); 145 - counter!("jetstream.connection.reconnect").increment(1); 146 - break; // Stream ended, break inner loop to reconnect 147 - } 148 - } 149 - } 150 - _ = self.reconnect_rx.recv_async() => { 151 - info!("Reconnect signal received."); 152 - counter!("jetstream.connection.reconnect").increment(1); 153 - break; 154 - } 155 - _ = &mut receive_timeout => { 156 - // last final poll, just in case 157 - match read.next().await { 158 - Some(Ok(message)) => { 159 - if let Err(err) = self.msg_tx.send_async(message).await { 160 - counter!("jetstream.error").increment(1); 161 - error!("Failed to queue message: {}", err); 162 - } 163 - // Reset timeout to continue 164 - receive_timeout.as_mut().reset(tokio::time::Instant::now() + Duration::from_secs(self.opts.timeout_time_sec as u64)); 165 - } 166 - Some(Err(e)) => { 167 - counter!("jetstream.error").increment(1); 168 - error!("Error receiving message during final poll: {}", e); 169 - counter!("jetstream.connection.reconnect").increment(1); 170 - break; 171 - } 172 - None => { 173 - info!("No commits received in {} seconds, reconnecting.", self.opts.timeout_time_sec); 174 - counter!("jetstream.connection.reconnect").increment(1); 175 - break; 176 - } 177 - } 178 - } 179 - } 180 - } 181 - } 182 - } 183 - Err(e) => { 184 - let elapsed_time = time_provider.elapsed(start_time); 185 - // reset if time connected > the time we set 186 - if elapsed_time.as_secs() > self.opts.max_retry_interval_seconds { 187 - retry_interval = 0; 188 - start_time = time_provider.now(); 189 - } 190 - counter!("jetstream.connection.error").increment(1); 191 - error!("Connection error: {}", e); 192 - } 193 - } 194 - 195 - let sleep_time = max(1, min(self.opts.max_retry_interval_seconds, retry_interval)); 196 - info!("Reconnecting in {} seconds...", sleep_time); 197 - sleep(Duration::from_secs(sleep_time)).await; 198 - 199 - if retry_interval > self.opts.max_retry_interval_seconds { 200 - retry_interval = self.opts.max_retry_interval_seconds; 201 - } else { 202 - retry_interval *= 2; 203 - } 204 - } 205 - } 206 - 207 - pub fn force_reconnect(&self) -> Result<(), flume::SendError<()>> { 208 - info!("Force reconnect requested."); 209 - self.reconnect_tx.send(()) // Send a reconnect signal 210 - } 211 - } 212 - 213 - #[cfg(test)] 214 - mod tests { 215 - use super::*; 216 - use std::sync::{Arc, Mutex}; 217 - use tokio::task; 218 - use tokio::time::{timeout, Duration}; 219 - use tokio_tungstenite::tungstenite::Message; 220 - 221 - #[test] 222 - fn test_build_ws_url() { 223 - let opts = JetstreamOptions { 224 - wanted_collections: Some(vec!["col1".to_string(), "col2".to_string()]), 225 - wanted_dids: Some(vec!["did1".to_string()]), 226 - ..Default::default() 227 - }; 228 - let connection = JetstreamConnection::new(opts); 229 - 230 - let test = Arc::new(Mutex::new(Some(8373))); 231 - 232 - let url = connection.build_ws_url(test); 233 - 234 - assert!(url.starts_with("wss://")); 235 - assert!(url.contains("cursor=8373")); 236 - assert!(url.contains("wantedCollections=col1")); 237 - assert!(url.contains("wantedCollections=col2")); 238 - assert!(url.contains("wantedDids=did1")); 239 - } 240 - 241 - #[tokio::test] 242 - async fn test_force_reconnect() { 243 - let opts = JetstreamOptions::default(); 244 - let connection = JetstreamConnection::new(opts); 245 - 246 - // Spawn a task to listen for the reconnect signal 247 - let reconnect_rx = connection.reconnect_rx.clone(); 248 - let recv_task = task::spawn(async move { 249 - reconnect_rx 250 - .recv_async() 251 - .await 252 - .expect("Failed to receive reconnect signal"); 253 - }); 254 - 255 - connection 256 - .force_reconnect() 257 - .expect("Failed to send reconnect signal"); 258 - 259 - // Ensure reconnect signal was received 260 - assert!(recv_task.await.is_ok()); 261 - } 262 - 263 - #[tokio::test] 264 - async fn test_message_queue() { 265 - let opts = JetstreamOptions::default(); 266 - let connection = JetstreamConnection::new(opts); 267 - 268 - let msg_rx = connection.get_msg_rx(); 269 - let msg = Message::Text("test message".into()); 270 - 271 - // Send a message to the queue 272 - connection 273 - .msg_tx 274 - .send_async(msg.clone()) 275 - .await 276 - .expect("Failed to send message"); 277 - 278 - // Receive and verify the message 279 - let received = msg_rx 280 - .recv_async() 281 - .await 282 - .expect("Failed to receive message"); 283 - assert_eq!(received, msg); 284 - } 285 - 286 - #[tokio::test] 287 - async fn test_connection_retries_on_failure() { 288 - let opts = JetstreamOptions::default(); 289 - let connection = Arc::new(JetstreamConnection::new(opts)); 290 - 291 - let cursor = Arc::new(Mutex::new(None)); 292 - 293 - // Timeout to prevent infinite loop 294 - let result = timeout(Duration::from_secs(3), connection.connect(cursor)).await; 295 - 296 - assert!(result.is_err(), "Expected timeout due to retry logic"); 297 - } 298 - 299 - #[tokio::test] 300 - async fn test_reconnect_after_receive_timeout() { 301 - use tokio::net::TcpListener; 302 - use tokio_tungstenite::accept_async; 303 - 304 - let opts = JetstreamOptions { 305 - ws_url: crate::endpoints::JetstreamEndpoints::Custom("ws://127.0.0.1:9001".to_string()), 306 - bound: 5, 307 - max_retry_interval_seconds: 1, 308 - ..Default::default() 309 - }; 310 - let connection = JetstreamConnection::new(opts); 311 - let cursor = Arc::new(Mutex::new(None)); 312 - 313 - // set up dummy "websocket" 314 - let listener = TcpListener::bind("127.0.0.1:9001") 315 - .await 316 - .expect("Failed to bind"); 317 - let server_handle = tokio::spawn(async move { 318 - if let Ok((stream, _)) = listener.accept().await { 319 - let ws_stream = accept_async(stream).await.expect("Failed to accept"); 320 - // send nothing 321 - tokio::time::sleep(Duration::from_secs(6)).await; 322 - drop(ws_stream); 323 - } 324 - }); 325 - 326 - // spawn, then run for >30 seconds to trigger reconnect 327 - let connect_handle = tokio::spawn(async move { 328 - tokio::time::timeout(Duration::from_secs(5), connection.connect(cursor)) 329 - .await 330 - .ok(); 331 - }); 332 - 333 - let _ = tokio::join!(server_handle, connect_handle); 334 - } 335 - }
-65
services/rocketman/src/endpoints.rs
··· 1 - use std::fmt::{Display, Formatter, Result}; 2 - 3 - #[derive(Debug, Clone, PartialEq, Eq, Hash)] 4 - pub enum JetstreamEndpointLocations { 5 - UsEast, 6 - UsWest, 7 - } 8 - 9 - impl Display for JetstreamEndpointLocations { 10 - fn fmt(&self, f: &mut Formatter<'_>) -> Result { 11 - write!( 12 - f, 13 - "{}", 14 - match self { 15 - Self::UsEast => "us-east", 16 - Self::UsWest => "us-west", 17 - } 18 - ) 19 - } 20 - } 21 - 22 - #[derive(Debug, Clone, PartialEq, Eq, Hash)] 23 - pub enum JetstreamEndpoints { 24 - Public(JetstreamEndpointLocations, i8), 25 - Custom(String), 26 - } 27 - 28 - impl Display for JetstreamEndpoints { 29 - fn fmt(&self, f: &mut Formatter<'_>) -> Result { 30 - match self { 31 - Self::Public(location, id) => write!( 32 - f, 33 - "wss://jetstream{}.{}.bsky.network/subscribe", 34 - id, location 35 - ), 36 - Self::Custom(url) => write!(f, "{}", url), 37 - } 38 - } 39 - } 40 - 41 - impl Default for JetstreamEndpoints { 42 - fn default() -> Self { 43 - Self::Public(JetstreamEndpointLocations::UsEast, 2) 44 - } 45 - } 46 - 47 - #[cfg(test)] 48 - mod tests { 49 - use super::*; 50 - 51 - #[test] 52 - fn test_display_public() { 53 - let endpoint = JetstreamEndpoints::Public(JetstreamEndpointLocations::UsEast, 2); 54 - assert_eq!( 55 - endpoint.to_string(), 56 - "wss://jetstream2.us-east.bsky.network/subscribe" 57 - ); 58 - } 59 - 60 - #[test] 61 - fn test_display_custom() { 62 - let endpoint = JetstreamEndpoints::Custom("wss://custom.bsky.network/subscribe".into()); 63 - assert_eq!(endpoint.to_string(), "wss://custom.bsky.network/subscribe"); 64 - } 65 - }
-1
services/rocketman/src/err.rs
··· 1 - // TODO: error types instead of using anyhow
-452
services/rocketman/src/handler.rs
··· 1 - use anyhow::Result; 2 - use flume::Sender; 3 - use metrics::{counter, describe_counter, Unit}; 4 - use serde_json::Value; 5 - use std::{ 6 - collections::HashMap, 7 - sync::{Arc, Mutex}, 8 - }; 9 - use tokio_tungstenite::tungstenite::{Error, Message}; 10 - use tracing::{debug, error}; 11 - 12 - #[cfg(feature = "zstd")] 13 - use std::io::Cursor as IoCursor; 14 - #[cfg(feature = "zstd")] 15 - use std::sync::LazyLock; 16 - #[cfg(feature = "zstd")] 17 - use zstd::dict::DecoderDictionary; 18 - 19 - use crate::{ 20 - ingestion::LexiconIngestor, 21 - types::event::{Event, Kind}, 22 - }; 23 - 24 - /// The custom `zstd` dictionary used for decoding compressed Jetstream messages. 25 - /// 26 - /// Sourced from the [official Bluesky Jetstream repo.](https://github.com/bluesky-social/jetstream/tree/main/pkg/models) 27 - #[cfg(feature = "zstd")] 28 - static ZSTD_DICTIONARY: LazyLock<DecoderDictionary> = 29 - LazyLock::new(|| DecoderDictionary::copy(include_bytes!("../zstd/dictionary"))); 30 - 31 - pub async fn handle_message( 32 - message: Message, 33 - ingestors: &HashMap<String, Box<dyn LexiconIngestor + Send + Sync>>, 34 - reconnect_tx: Sender<()>, 35 - cursor: Arc<Mutex<Option<u64>>>, 36 - ) -> Result<()> { 37 - describe_counter!( 38 - "jetstream.event", 39 - Unit::Count, 40 - "number of event ingest attempts" 41 - ); 42 - describe_counter!( 43 - "jetstream.event.parse", 44 - Unit::Count, 45 - "events that were successfully processed" 46 - ); 47 - describe_counter!( 48 - "jetstream.event.fail", 49 - Unit::Count, 50 - "events that could not be read" 51 - ); 52 - describe_counter!("jetstream.error", Unit::Count, "errors encountered"); 53 - match message { 54 - Message::Text(text) => { 55 - debug!("Text message received"); 56 - counter!("jetstream.event").increment(1); 57 - let envelope: Event<Value> = serde_json::from_str(&text).map_err(|e| { 58 - anyhow::anyhow!("Failed to parse message: {} with json string {}", e, text) 59 - })?; 60 - debug!("envelope: {:?}", envelope); 61 - handle_envelope(envelope, cursor, ingestors).await?; 62 - Ok(()) 63 - } 64 - #[cfg(feature = "zstd")] 65 - Message::Binary(bytes) => { 66 - debug!("Binary message received"); 67 - counter!("jetstream.event").increment(1); 68 - let decoder = zstd::stream::Decoder::with_prepared_dictionary( 69 - IoCursor::new(bytes), 70 - &ZSTD_DICTIONARY, 71 - )?; 72 - let envelope: Event<Value> = serde_json::from_reader(decoder) 73 - .map_err(|e| anyhow::anyhow!("Failed to parse binary message: {}", e))?; 74 - debug!("envelope: {:?}", envelope); 75 - handle_envelope(envelope, cursor, ingestors).await?; 76 - Ok(()) 77 - } 78 - #[cfg(not(feature = "zstd"))] 79 - Message::Binary(_) => { 80 - debug!("Binary message received"); 81 - Err(anyhow::anyhow!( 82 - "binary message received but zstd feature is not enabled" 83 - )) 84 - } 85 - Message::Close(_) => { 86 - debug!("Server closed connection"); 87 - if let Err(e) = reconnect_tx.send(()) { 88 - counter!("jetstream.event.parse.error", "error" => "failed_to_send_reconnect_signal").increment(1); 89 - error!("Failed to send reconnect signal: {}", e); 90 - } 91 - Err(Error::ConnectionClosed.into()) 92 - } 93 - _ => Ok(()), 94 - } 95 - } 96 - 97 - async fn handle_envelope( 98 - envelope: Event<Value>, 99 - cursor: Arc<Mutex<Option<u64>>>, 100 - ingestors: &HashMap<String, Box<dyn LexiconIngestor + Send + Sync>>, 101 - ) -> Result<()> { 102 - if let Some(ref time_us) = envelope.time_us { 103 - debug!("Time: {}", time_us); 104 - if let Some(cursor) = cursor.lock().unwrap().as_mut() { 105 - debug!("Cursor: {}", cursor); 106 - if time_us > cursor { 107 - debug!("Cursor is behind, resetting"); 108 - *cursor = *time_us; 109 - } 110 - } 111 - } 112 - 113 - match envelope.kind { 114 - Kind::Commit => match extract_commit_nsid(&envelope) { 115 - Ok(nsid) => { 116 - if let Some(fun) = ingestors.get(&nsid) { 117 - match fun.ingest(envelope).await { 118 - Ok(_) => { 119 - counter!("jetstream.event.parse.commit", "nsid" => nsid).increment(1) 120 - } 121 - Err(e) => { 122 - error!("Error ingesting commit with nsid {}: {}", nsid, e); 123 - counter!("jetstream.error").increment(1); 124 - counter!("jetstream.event.fail").increment(1); 125 - } 126 - } 127 - } 128 - } 129 - Err(e) => error!("Error parsing commit: {}", e), 130 - }, 131 - Kind::Identity => { 132 - counter!("jetstream.event.parse.identity").increment(1); 133 - } 134 - Kind::Account => { 135 - counter!("jetstream.event.parse.account").increment(1); 136 - } 137 - Kind::Unknown(kind) => { 138 - counter!("jetstream.event.parse.unknown", "kind" => kind).increment(1); 139 - } 140 - } 141 - Ok(()) 142 - } 143 - 144 - fn extract_commit_nsid(envelope: &Event<Value>) -> anyhow::Result<String> { 145 - // if the type is not a commit 146 - if envelope.commit.is_none() { 147 - return Err(anyhow::anyhow!( 148 - "Message has no commit, so there is no nsid attached." 149 - )); 150 - } else if let Some(ref commit) = envelope.commit { 151 - return Ok(commit.collection.clone()); 152 - } 153 - 154 - Err(anyhow::anyhow!("Failed to extract nsid: unknown error")) 155 - } 156 - 157 - #[cfg(test)] 158 - mod tests { 159 - use super::*; 160 - use crate::types::event::Event; 161 - use anyhow::Result; 162 - use async_trait::async_trait; 163 - use flume::{Receiver, Sender}; 164 - use serde_json::json; 165 - use std::{ 166 - collections::HashMap, 167 - sync::{Arc, Mutex}, 168 - }; 169 - use tokio_tungstenite::tungstenite::Message; 170 - 171 - // Dummy ingestor that records if it was called. 172 - struct DummyIngestor { 173 - pub called: Arc<Mutex<bool>>, 174 - } 175 - 176 - #[async_trait] 177 - impl crate::ingestion::LexiconIngestor for DummyIngestor { 178 - async fn ingest(&self, _event: Event<serde_json::Value>) -> Result<(), anyhow::Error> { 179 - let mut called = self.called.lock().unwrap(); 180 - *called = true; 181 - Ok(()) 182 - } 183 - } 184 - 185 - // Dummy ingestor that always returns an error. 186 - struct ErrorIngestor; 187 - 188 - #[async_trait] 189 - impl crate::ingestion::LexiconIngestor for ErrorIngestor { 190 - async fn ingest(&self, _event: Event<serde_json::Value>) -> Result<(), anyhow::Error> { 191 - Err(anyhow::anyhow!("Ingest error")) 192 - } 193 - } 194 - 195 - // Helper to create a reconnect channel. 196 - fn setup_reconnect_channel() -> (Sender<()>, Receiver<()>) { 197 - flume::unbounded() 198 - } 199 - 200 - #[tokio::test] 201 - async fn test_valid_commit_success() { 202 - let (reconnect_tx, _reconnect_rx) = setup_reconnect_channel(); 203 - let cursor = Arc::new(Mutex::new(Some(100))); 204 - let called_flag = Arc::new(Mutex::new(false)); 205 - 206 - // Create a valid commit event JSON. 207 - let event_json = json!({ 208 - "did": "did:example:123", 209 - "time_us": 200, 210 - "kind": "commit", 211 - "commit": { 212 - "rev": "1", 213 - "operation": "create", 214 - "collection": "ns1", 215 - "rkey": "rkey1", 216 - "record": { "foo": "bar" }, 217 - "cid": "cid123" 218 - }, 219 - }) 220 - .to_string(); 221 - 222 - let mut ingestors: HashMap< 223 - String, 224 - Box<dyn crate::ingestion::LexiconIngestor + Send + Sync>, 225 - > = HashMap::new(); 226 - ingestors.insert( 227 - "ns1".to_string(), 228 - Box::new(DummyIngestor { 229 - called: called_flag.clone(), 230 - }), 231 - ); 232 - 233 - let result = handle_message( 234 - Message::Text(event_json), 235 - &ingestors, 236 - reconnect_tx, 237 - cursor.clone(), 238 - ) 239 - .await; 240 - assert!(result.is_ok()); 241 - // Check that the ingestor was called. 242 - assert!(*called_flag.lock().unwrap()); 243 - // Verify that the cursor got updated. 244 - assert_eq!(*cursor.lock().unwrap(), Some(200)); 245 - } 246 - 247 - #[cfg(feature = "zstd")] 248 - #[tokio::test] 249 - async fn test_binary_valid_commit() { 250 - let (reconnect_tx, _reconnect_rx) = setup_reconnect_channel(); 251 - let cursor = Arc::new(Mutex::new(Some(100))); 252 - let called_flag = Arc::new(Mutex::new(false)); 253 - 254 - let uncompressed_json = json!({ 255 - "did": "did:example:123", 256 - "time_us": 200, 257 - "kind": "commit", 258 - "commit": { 259 - "rev": "1", 260 - "operation": "create", 261 - "collection": "ns1", 262 - "rkey": "rkey1", 263 - "record": { "foo": "bar" }, 264 - "cid": "cid123" 265 - }, 266 - }) 267 - .to_string(); 268 - 269 - let compressed_dest: IoCursor<Vec<u8>> = IoCursor::new(vec![]); 270 - let mut encoder = zstd::Encoder::with_prepared_dictionary( 271 - compressed_dest, 272 - &zstd::dict::EncoderDictionary::copy(include_bytes!("../zstd/dictionary"), 0), 273 - ) 274 - .unwrap(); 275 - std::io::copy( 276 - &mut IoCursor::new(uncompressed_json.as_bytes()), 277 - &mut encoder, 278 - ) 279 - .unwrap(); 280 - let compressed_dest = encoder.finish().unwrap(); 281 - 282 - let mut ingestors: HashMap< 283 - String, 284 - Box<dyn crate::ingestion::LexiconIngestor + Send + Sync>, 285 - > = HashMap::new(); 286 - ingestors.insert( 287 - "ns1".to_string(), 288 - Box::new(DummyIngestor { 289 - called: called_flag.clone(), 290 - }), 291 - ); 292 - 293 - let result = handle_message( 294 - Message::Binary(compressed_dest.into_inner()), 295 - &ingestors, 296 - reconnect_tx, 297 - cursor.clone(), 298 - ) 299 - .await; 300 - 301 - assert!(result.is_ok()); 302 - // Check that the ingestor was called. 303 - assert!(*called_flag.lock().unwrap()); 304 - // Verify that the cursor got updated. 305 - assert_eq!(*cursor.lock().unwrap(), Some(200)); 306 - } 307 - 308 - #[tokio::test] 309 - async fn test_commit_ingest_failure() { 310 - let (reconnect_tx, _reconnect_rx) = setup_reconnect_channel(); 311 - let cursor = Arc::new(Mutex::new(Some(100))); 312 - 313 - // Valid commit event with an ingestor that fails. 314 - let event_json = json!({ 315 - "did": "did:example:123", 316 - "time_us": 300, 317 - "kind": "commit", 318 - "commit": { 319 - "rev": "1", 320 - "operation": "create", 321 - "collection": "ns_error", 322 - "rkey": "rkey1", 323 - "record": { "foo": "bar" }, 324 - "cid": "cid123" 325 - }, 326 - "identity": null 327 - }) 328 - .to_string(); 329 - 330 - let mut ingestors: HashMap< 331 - String, 332 - Box<dyn crate::ingestion::LexiconIngestor + Send + Sync>, 333 - > = HashMap::new(); 334 - ingestors.insert("ns_error".to_string(), Box::new(ErrorIngestor)); 335 - 336 - // Even though ingestion fails, handle_message returns Ok(()). 337 - let result = handle_message( 338 - Message::Text(event_json), 339 - &ingestors, 340 - reconnect_tx, 341 - cursor.clone(), 342 - ) 343 - .await; 344 - assert!(result.is_ok()); 345 - // Cursor should still update because it comes before the ingest call. 346 - assert_eq!(*cursor.lock().unwrap(), Some(300)); 347 - } 348 - 349 - #[tokio::test] 350 - async fn test_identity_message() { 351 - let (reconnect_tx, _reconnect_rx) = setup_reconnect_channel(); 352 - let cursor = Arc::new(Mutex::new(None)); 353 - // Valid identity event. 354 - let event_json = json!({ 355 - "did": "did:example:123", 356 - "time_us": 150, 357 - "kind": "identity", 358 - "commit": null, 359 - "identity": { 360 - "did": "did:example:123", 361 - "handle": "user", 362 - "seq": 1, 363 - "time": "2025-01-01T00:00:00Z" 364 - } 365 - }) 366 - .to_string(); 367 - let ingestors: HashMap<String, Box<dyn crate::ingestion::LexiconIngestor + Send + Sync>> = 368 - HashMap::new(); 369 - 370 - let result = 371 - handle_message(Message::Text(event_json), &ingestors, reconnect_tx, cursor).await; 372 - assert!(result.is_ok()); 373 - } 374 - 375 - #[tokio::test] 376 - async fn test_close_message() { 377 - let (reconnect_tx, reconnect_rx) = setup_reconnect_channel(); 378 - let cursor = Arc::new(Mutex::new(None)); 379 - let ingestors: HashMap<String, Box<dyn crate::ingestion::LexiconIngestor + Send + Sync>> = 380 - HashMap::new(); 381 - 382 - let result = handle_message(Message::Close(None), &ingestors, reconnect_tx, cursor).await; 383 - // Should return an error due to connection close. 384 - assert!(result.is_err()); 385 - // Verify that a reconnect signal was sent. 386 - let signal = reconnect_rx.recv_async().await; 387 - assert!(signal.is_ok()); 388 - } 389 - 390 - #[tokio::test] 391 - async fn test_invalid_json() { 392 - let (reconnect_tx, _reconnect_rx) = setup_reconnect_channel(); 393 - let cursor = Arc::new(Mutex::new(None)); 394 - let ingestors: HashMap<String, Box<dyn crate::ingestion::LexiconIngestor + Send + Sync>> = 395 - HashMap::new(); 396 - 397 - let invalid_json = "this is not json".to_string(); 398 - let result = handle_message( 399 - Message::Text(invalid_json), 400 - &ingestors, 401 - reconnect_tx, 402 - cursor, 403 - ) 404 - .await; 405 - assert!(result.is_err()); 406 - } 407 - 408 - #[tokio::test] 409 - async fn test_cursor_not_updated_if_lower() { 410 - let (reconnect_tx, _reconnect_rx) = setup_reconnect_channel(); 411 - // Set an initial cursor value. 412 - let cursor = Arc::new(Mutex::new(Some(300))); 413 - let event_json = json!({ 414 - "did": "did:example:123", 415 - "time_us": 200, 416 - "kind": "commit", 417 - "commit": { 418 - "rev": "1", 419 - "operation": "create", 420 - "collection": "ns1", 421 - "rkey": "rkey1", 422 - "record": { "foo": "bar" }, 423 - "cid": "cid123" 424 - }, 425 - "identity": null 426 - }) 427 - .to_string(); 428 - 429 - // Use a dummy ingestor that does nothing. 430 - let mut ingestors: HashMap< 431 - String, 432 - Box<dyn crate::ingestion::LexiconIngestor + Send + Sync>, 433 - > = HashMap::new(); 434 - ingestors.insert( 435 - "ns1".to_string(), 436 - Box::new(DummyIngestor { 437 - called: Arc::new(Mutex::new(false)), 438 - }), 439 - ); 440 - 441 - let result = handle_message( 442 - Message::Text(event_json), 443 - &ingestors, 444 - reconnect_tx, 445 - cursor.clone(), 446 - ) 447 - .await; 448 - assert!(result.is_ok()); 449 - // Cursor should remain unchanged. 450 - assert_eq!(*cursor.lock().unwrap(), Some(300)); 451 - } 452 - }
-22
services/rocketman/src/ingestion.rs
··· 1 - use anyhow::Result; 2 - use async_trait::async_trait; 3 - use serde_json::Value; 4 - use tracing::info; 5 - 6 - use crate::types::event::Event; 7 - 8 - #[async_trait] 9 - pub trait LexiconIngestor { 10 - async fn ingest(&self, message: Event<Value>) -> Result<()>; 11 - } 12 - 13 - pub struct DefaultLexiconIngestor; 14 - 15 - #[async_trait] 16 - impl LexiconIngestor for DefaultLexiconIngestor { 17 - async fn ingest(&self, message: Event<Value>) -> Result<()> { 18 - info!("Default lexicon processing: {:?}", message); 19 - // Process message for default lexicon. 20 - Ok(()) 21 - } 22 - }
-8
services/rocketman/src/lib.rs
··· 1 - // lib.rs 2 - pub mod connection; 3 - pub mod endpoints; 4 - pub mod handler; 5 - pub mod ingestion; 6 - pub mod options; 7 - pub mod time; 8 - pub mod types;
-40
services/rocketman/src/options.rs
··· 1 - use bon::Builder; 2 - 3 - use crate::endpoints::JetstreamEndpoints; 4 - 5 - #[derive(Builder, Debug)] 6 - pub struct JetstreamOptions { 7 - #[builder(default)] 8 - pub ws_url: JetstreamEndpoints, 9 - #[builder(default)] 10 - pub max_retry_interval_seconds: u64, 11 - #[builder(default)] 12 - pub connection_success_time_seconds: u64, 13 - #[builder(default)] 14 - pub bound: usize, 15 - #[builder(default)] 16 - pub timeout_time_sec: usize, 17 - #[cfg(feature = "zstd")] 18 - #[builder(default = true)] 19 - pub compress: bool, 20 - pub wanted_collections: Option<Vec<String>>, 21 - pub wanted_dids: Option<Vec<String>>, 22 - pub cursor: Option<String>, 23 - } 24 - 25 - impl Default for JetstreamOptions { 26 - fn default() -> Self { 27 - Self { 28 - ws_url: JetstreamEndpoints::default(), 29 - max_retry_interval_seconds: 120, 30 - connection_success_time_seconds: 60, 31 - bound: 65536, 32 - timeout_time_sec: 40, 33 - #[cfg(feature = "zstd")] 34 - compress: true, 35 - wanted_collections: None, 36 - wanted_dids: None, 37 - cursor: None, 38 - } 39 - } 40 - }
-11
services/rocketman/src/time/mod.rs
··· 1 - use std::time::{Duration, Instant, SystemTime}; 2 - 3 - pub mod system_time; 4 - 5 - pub trait TimeProvider { 6 - fn new() -> Self; 7 - fn now(&self) -> SystemTime; // Get the current time 8 - fn elapsed(&self, earlier: SystemTime) -> Duration; // Calculate the elapsed time. 9 - fn instant_now(&self) -> Instant; // For compatibility with your existing code (if needed) 10 - fn instant_elapsed(&self, earlier: Instant) -> Duration; 11 - }
-28
services/rocketman/src/time/system_time.rs
··· 1 - use std::time::{Duration, Instant, SystemTime}; 2 - 3 - use super::TimeProvider; 4 - 5 - #[derive(Default, Clone, Copy)] // Add these derives for ease of use 6 - pub struct SystemTimeProvider; // No fields needed, just a marker type 7 - 8 - impl TimeProvider for SystemTimeProvider { 9 - fn new() -> Self { 10 - Self 11 - } 12 - 13 - fn now(&self) -> SystemTime { 14 - SystemTime::now() 15 - } 16 - 17 - fn elapsed(&self, earlier: SystemTime) -> Duration { 18 - earlier.elapsed().unwrap_or_else(|_| Duration::from_secs(0)) 19 - } 20 - 21 - fn instant_now(&self) -> Instant { 22 - Instant::now() 23 - } 24 - 25 - fn instant_elapsed(&self, earlier: Instant) -> Duration { 26 - earlier.elapsed() 27 - } 28 - }
-116
services/rocketman/src/types/event.rs
··· 1 - use serde::{Deserialize, Deserializer, Serialize}; 2 - 3 - #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] 4 - #[serde(rename_all = "lowercase")] 5 - pub enum Kind { 6 - Account, 7 - Identity, 8 - Commit, 9 - Unknown(String), 10 - } 11 - 12 - #[derive(Debug, Serialize, Deserialize)] 13 - #[serde(rename_all = "snake_case")] 14 - pub struct Event<T> { 15 - pub did: String, 16 - pub time_us: Option<u64>, 17 - pub kind: Kind, 18 - pub commit: Option<Commit<T>>, 19 - pub identity: Option<Identity>, 20 - } 21 - 22 - #[derive(Debug, Serialize, Deserialize)] 23 - pub struct Identity { 24 - did: String, 25 - handle: Option<String>, 26 - seq: u64, 27 - time: String, 28 - } 29 - 30 - #[derive(Debug, Serialize, Deserialize)] 31 - #[serde(rename_all = "lowercase")] 32 - enum AccountStatus { 33 - TakenDown, 34 - Suspended, 35 - Deleted, 36 - Activated, 37 - } 38 - 39 - #[derive(Debug, Serialize, Deserialize)] 40 - pub struct Account { 41 - did: String, 42 - handle: String, 43 - seq: u64, 44 - time: String, 45 - status: AccountStatus, 46 - } 47 - 48 - #[derive(Debug, Serialize)] 49 - #[serde(rename_all = "camelCase")] 50 - pub struct Commit<T> { 51 - pub rev: String, 52 - pub operation: Operation, 53 - pub collection: String, 54 - pub rkey: String, 55 - pub record: Option<T>, 56 - pub cid: Option<String>, 57 - } 58 - 59 - #[derive(Debug, Serialize, Deserialize)] 60 - #[serde(rename_all = "lowercase")] 61 - pub enum Operation { 62 - Create, 63 - Update, 64 - Delete, 65 - } 66 - 67 - /// Enforce that record is None only when operation is 'delete' 68 - impl<'de, T> Deserialize<'de> for Commit<T> 69 - where 70 - T: Deserialize<'de>, 71 - { 72 - fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> 73 - where 74 - D: Deserializer<'de>, 75 - { 76 - // Helper struct to perform the deserialization. 77 - #[derive(Deserialize)] 78 - #[serde(rename_all = "camelCase")] 79 - struct Helper<T> { 80 - rev: String, 81 - operation: Operation, 82 - collection: String, 83 - rkey: String, 84 - record: Option<T>, 85 - cid: Option<String>, 86 - } 87 - 88 - let helper = Helper::deserialize(deserializer)?; 89 - 90 - match helper.operation { 91 - Operation::Delete => { 92 - if helper.record.is_some() || helper.cid.is_some() { 93 - return Err(<D::Error as serde::de::Error>::custom( 94 - "record and cid must be null when operation is delete", 95 - )); 96 - } 97 - } 98 - _ => { 99 - if helper.record.is_none() || helper.cid.is_none() { 100 - return Err(<D::Error as serde::de::Error>::custom( 101 - "record and cid must be present unless operation is delete", 102 - )); 103 - } 104 - } 105 - } 106 - 107 - Ok(Commit { 108 - rev: helper.rev, 109 - operation: helper.operation, 110 - collection: helper.collection, 111 - rkey: helper.rkey, 112 - record: helper.record, 113 - cid: helper.cid, 114 - }) 115 - } 116 - }
-1
services/rocketman/src/types/mod.rs
··· 1 - pub mod event;
services/rocketman/zstd/dictionary

This is a binary file and will not be displayed.

+1 -1
tools/teal-cli/src/main.rs
··· 31 31 force: bool, 32 32 33 33 /// Output format: json, multibase, or files 34 - #[arg(short, long, default_value = "files")] 34 + #[arg(long, default_value = "files")] 35 35 format: String, 36 36 }, 37 37