···33on:
44 workflow_dispatch:
55 push:
66+ branches:
77+ - main
88+ tags:
99+ - 'v*'
610711env:
812 REGISTRY: ghcr.io
···10141115jobs:
1216 build-and-push-image:
1313- runs-on: ubuntu-latest
1717+ strategy:
1818+ matrix:
1919+ include:
2020+ - arch: amd64
2121+ runner: ubuntu-latest
2222+ - arch: arm64
2323+ runner: ubuntu-24.04-arm
2424+ runs-on: ${{ matrix.runner }}
1425 # Sets the permissions granted to the `GITHUB_TOKEN` for the actions in this job.
1526 permissions:
1627 contents: read
1728 packages: write
1829 attestations: write
1930 id-token: write
2020- #
3131+ outputs:
3232+ digest-amd64: ${{ matrix.arch == 'amd64' && steps.push.outputs.digest || '' }}
3333+ digest-arm64: ${{ matrix.arch == 'arm64' && steps.push.outputs.digest || '' }}
2134 steps:
2235 - name: Checkout repository
2336 uses: actions/checkout@v4
3737+2438 # Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
2539 - name: Log in to the Container registry
2626- uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
4040+ uses: docker/login-action@v3
2741 with:
2842 registry: ${{ env.REGISTRY }}
2943 username: ${{ github.actor }}
3044 password: ${{ secrets.GITHUB_TOKEN }}
4545+3146 # This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels.
3247 - name: Extract metadata (tags, labels) for Docker
3348 id: meta
···3550 with:
3651 images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
3752 tags: |
3838- type=sha
3939- type=sha,format=long
5353+ type=raw,value=latest,enable={{is_default_branch}},suffix=-${{ matrix.arch }}
5454+ type=sha,suffix=-${{ matrix.arch }}
5555+ type=sha,format=long,suffix=-${{ matrix.arch }}
5656+ type=semver,pattern={{version}},suffix=-${{ matrix.arch }}
5757+ type=semver,pattern={{major}}.{{minor}},suffix=-${{ matrix.arch }}
5858+4059 # This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages.
4160 # It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository.
4261 # It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step.
4362 - name: Build and push Docker image
4463 id: push
4545- uses: docker/build-push-action@v5
6464+ uses: docker/build-push-action@v6
4665 with:
4766 context: .
4867 push: true
4968 tags: ${{ steps.meta.outputs.tags }}
5069 labels: ${{ steps.meta.outputs.labels }}
51707171+ publish-manifest:
7272+ needs: build-and-push-image
7373+ runs-on: ubuntu-latest
7474+ permissions:
7575+ packages: write
7676+ attestations: write
7777+ id-token: write
7878+ steps:
7979+ - name: Log in to the Container registry
8080+ uses: docker/login-action@v3
8181+ with:
8282+ registry: ${{ env.REGISTRY }}
8383+ username: ${{ github.actor }}
8484+ password: ${{ secrets.GITHUB_TOKEN }}
8585+8686+ - name: Extract metadata (tags, labels) for Docker
8787+ id: meta
8888+ uses: docker/metadata-action@v5
8989+ with:
9090+ images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
9191+ tags: |
9292+ type=raw,value=latest,enable={{is_default_branch}}
9393+ type=sha
9494+ type=sha,format=long
9595+ type=semver,pattern={{version}}
9696+ type=semver,pattern={{major}}.{{minor}}
9797+9898+ - name: Create and push manifest
9999+ run: |
100100+ # Split tags into an array
101101+ readarray -t tags <<< "${{ steps.meta.outputs.tags }}"
102102+103103+ # Create and push manifest for each tag
104104+ for tag in "${tags[@]}"; do
105105+ docker buildx imagetools create -t "$tag" \
106106+ "${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}@${{ needs.build-and-push-image.outputs.digest-amd64 }}" \
107107+ "${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}@${{ needs.build-and-push-image.outputs.digest-arm64 }}"
108108+ done
109109+52110 # This step generates an artifact attestation for the image, which is an unforgeable statement about where and how it was built. It increases supply chain security for people who consume the image. For more information, see "[AUTOTITLE](/actions/security-guides/using-artifact-attestations-to-establish-provenance-for-builds)."
53111 - name: Generate artifact attestation
54112 uses: actions/attest-build-provenance@v1
55113 with:
56114 subject-name: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME}}
5757- subject-digest: ${{ steps.push.outputs.digest }}
5858- push-to-registry: true115115+ subject-digest: ${{ needs.build-and-push-image.outputs.digest-amd64 }}
116116+ push-to-registry: true
···11# Cocoon
2233> [!WARNING]
44-You should not use this PDS. You should not rely on this code as a reference for a PDS implementation. You should not trust this code. Using this PDS implementation may result in data loss, corruption, etc.
44+I migrated and have been running my main account on this PDS for months now without issue, however, I am still not responsible if things go awry, particularly during account migration. Please use caution.
5566Cocoon is a PDS implementation in Go. It is highly experimental, and is not ready for any production use.
7788+## Quick Start with Docker Compose
99+1010+### Prerequisites
1111+1212+- Docker and Docker Compose installed
1313+- A domain name pointing to your server (for automatic HTTPS)
1414+- Ports 80 and 443 open in i.e. UFW
1515+1616+### Installation
1717+1818+1. **Clone the repository**
1919+ ```bash
2020+ git clone https://github.com/haileyok/cocoon.git
2121+ cd cocoon
2222+ ```
2323+2424+2. **Create your configuration file**
2525+ ```bash
2626+ cp .env.example .env
2727+ ```
2828+2929+3. **Edit `.env` with your settings**
3030+3131+ Required settings:
3232+ ```bash
3333+ COCOON_DID="did:web:your-domain.com"
3434+ COCOON_HOSTNAME="your-domain.com"
3535+ COCOON_CONTACT_EMAIL="you@example.com"
3636+ COCOON_RELAYS="https://bsky.network"
3737+3838+ # Generate with: openssl rand -hex 16
3939+ COCOON_ADMIN_PASSWORD="your-secure-password"
4040+4141+ # Generate with: openssl rand -hex 32
4242+ COCOON_SESSION_SECRET="your-session-secret"
4343+ ```
4444+4545+4. **Start the services**
4646+ ```bash
4747+ # Pull pre-built image from GitHub Container Registry
4848+ docker-compose pull
4949+ docker-compose up -d
5050+ ```
5151+5252+ Or build locally:
5353+ ```bash
5454+ docker-compose build
5555+ docker-compose up -d
5656+ ```
5757+5858+ **For PostgreSQL deployment:**
5959+ ```bash
6060+ # Add POSTGRES_PASSWORD to your .env file first!
6161+ docker-compose -f docker-compose.postgres.yaml up -d
6262+ ```
6363+6464+5. **Get your invite code**
6565+6666+ On first run, an invite code is automatically created. View it with:
6767+ ```bash
6868+ docker-compose logs create-invite
6969+ ```
7070+7171+ Or check the saved file:
7272+ ```bash
7373+ cat keys/initial-invite-code.txt
7474+ ```
7575+7676+ **IMPORTANT**: Save this invite code! You'll need it to create your first account.
7777+7878+6. **Monitor the services**
7979+ ```bash
8080+ docker-compose logs -f
8181+ ```
8282+8383+### What Gets Set Up
8484+8585+The Docker Compose setup includes:
8686+8787+- **init-keys**: Automatically generates cryptographic keys (rotation key and JWK) on first run
8888+- **cocoon**: The main PDS service running on port 8080
8989+- **create-invite**: Automatically creates an initial invite code after Cocoon starts (first run only)
9090+- **caddy**: Reverse proxy with automatic HTTPS via Let's Encrypt
9191+9292+### Data Persistence
9393+9494+The following directories will be created automatically:
9595+9696+- `./keys/` - Cryptographic keys (generated automatically)
9797+ - `rotation.key` - PDS rotation key
9898+ - `jwk.key` - JWK private key
9999+ - `initial-invite-code.txt` - Your first invite code (first run only)
100100+- `./data/` - SQLite database and blockstore
101101+- Docker volumes for Caddy configuration and certificates
102102+103103+### Optional Configuration
104104+105105+#### Database Configuration
106106+107107+By default, Cocoon uses SQLite which requires no additional setup. For production deployments with higher traffic, you can use PostgreSQL:
108108+109109+```bash
110110+# Database type: sqlite (default) or postgres
111111+COCOON_DB_TYPE="postgres"
112112+113113+# PostgreSQL connection string (required if db-type is postgres)
114114+# Format: postgres://user:password@host:port/database?sslmode=disable
115115+COCOON_DATABASE_URL="postgres://cocoon:password@localhost:5432/cocoon?sslmode=disable"
116116+117117+# Or use the standard DATABASE_URL environment variable
118118+DATABASE_URL="postgres://cocoon:password@localhost:5432/cocoon?sslmode=disable"
119119+```
120120+121121+For SQLite (default):
122122+```bash
123123+COCOON_DB_TYPE="sqlite"
124124+COCOON_DB_NAME="/data/cocoon/cocoon.db"
125125+```
126126+127127+> **Note**: When using PostgreSQL, database backups to S3 are not handled by Cocoon. Use `pg_dump` or your database provider's backup solution instead.
128128+129129+#### SMTP Email Settings
130130+```bash
131131+COCOON_SMTP_USER="your-smtp-username"
132132+COCOON_SMTP_PASS="your-smtp-password"
133133+COCOON_SMTP_HOST="smtp.example.com"
134134+COCOON_SMTP_PORT="587"
135135+COCOON_SMTP_EMAIL="noreply@example.com"
136136+COCOON_SMTP_NAME="Cocoon PDS"
137137+```
138138+139139+#### S3 Storage
140140+141141+Cocoon supports S3-compatible storage for both database backups (SQLite only) and blob storage (images, videos, etc.):
142142+143143+```bash
144144+# Enable S3 backups (SQLite databases only - hourly backups)
145145+COCOON_S3_BACKUPS_ENABLED=true
146146+147147+# Enable S3 for blob storage (images, videos, etc.)
148148+# When enabled, blobs are stored in S3 instead of the database
149149+COCOON_S3_BLOBSTORE_ENABLED=true
150150+151151+# S3 configuration (works with AWS S3, MinIO, Cloudflare R2, etc.)
152152+COCOON_S3_REGION="us-east-1"
153153+COCOON_S3_BUCKET="your-bucket"
154154+COCOON_S3_ENDPOINT="https://s3.amazonaws.com"
155155+COCOON_S3_ACCESS_KEY="your-access-key"
156156+COCOON_S3_SECRET_KEY="your-secret-key"
157157+158158+# Optional: CDN/public URL for blob redirects
159159+# When set, com.atproto.sync.getBlob redirects to this URL instead of proxying
160160+COCOON_S3_CDN_URL="https://cdn.example.com"
161161+```
162162+163163+**Blob Storage Options:**
164164+- `COCOON_S3_BLOBSTORE_ENABLED=false` (default): Blobs stored in the database
165165+- `COCOON_S3_BLOBSTORE_ENABLED=true`: Blobs stored in S3 bucket under `blobs/{did}/{cid}`
166166+167167+**Blob Serving Options:**
168168+- Without `COCOON_S3_CDN_URL`: Blobs are proxied through the PDS server
169169+- With `COCOON_S3_CDN_URL`: `getBlob` returns a 302 redirect to `{CDN_URL}/blobs/{did}/{cid}`
170170+171171+> **Tip**: For Cloudflare R2, you can use the public bucket URL as the CDN URL. For AWS S3, you can use CloudFront or the S3 bucket URL directly if public access is enabled.
172172+173173+### Management Commands
174174+175175+Create an invite code:
176176+```bash
177177+docker exec cocoon-pds /cocoon create-invite-code --uses 1
178178+```
179179+180180+Reset a user's password:
181181+```bash
182182+docker exec cocoon-pds /cocoon reset-password --did "did:plc:xxx"
183183+```
184184+185185+### Updating
186186+187187+```bash
188188+docker-compose pull
189189+docker-compose up -d
190190+```
191191+8192## Implemented Endpoints
919310194> [!NOTE]
···1219613197### Identity
141981515-- [ ] `com.atproto.identity.getRecommendedDidCredentials`
1616-- [ ] `com.atproto.identity.requestPlcOperationSignature`
199199+- [x] `com.atproto.identity.getRecommendedDidCredentials`
200200+- [x] `com.atproto.identity.requestPlcOperationSignature`
17201- [x] `com.atproto.identity.resolveHandle`
1818-- [ ] `com.atproto.identity.signPlcOperation`
1919-- [ ] `com.atproto.identity.submitPlcOperation`
202202+- [x] `com.atproto.identity.signPlcOperation`
203203+- [x] `com.atproto.identity.submitPlcOperation`
20204- [x] `com.atproto.identity.updateHandle`
2120522206### Repo
···27211- [x] `com.atproto.repo.deleteRecord`
28212- [x] `com.atproto.repo.describeRepo`
29213- [x] `com.atproto.repo.getRecord`
3030-- [x] `com.atproto.repo.importRepo` (Works "okay". You still have to handle PLC operations on your own when migrating. Use with extreme caution.)
214214+- [x] `com.atproto.repo.importRepo` (Works "okay". Use with extreme caution.)
31215- [x] `com.atproto.repo.listRecords`
3232-- [ ] `com.atproto.repo.listMissingBlobs`
216216+- [x] `com.atproto.repo.listMissingBlobs`
3321734218### Server
35219···40224- [x] `com.atproto.server.createInviteCode`
41225- [x] `com.atproto.server.createInviteCodes`
42226- [x] `com.atproto.server.deactivateAccount`
4343-- [ ] `com.atproto.server.deleteAccount`
227227+- [x] `com.atproto.server.deleteAccount`
44228- [x] `com.atproto.server.deleteSession`
45229- [x] `com.atproto.server.describeServer`
46230- [ ] `com.atproto.server.getAccountInviteCodes`
4747-- [ ] `com.atproto.server.getServiceAuth`
231231+- [x] `com.atproto.server.getServiceAuth`
48232- ~~[ ] `com.atproto.server.listAppPasswords`~~ - not going to add app passwords
49233- [x] `com.atproto.server.refreshSession`
5050-- [ ] `com.atproto.server.requestAccountDelete`
234234+- [x] `com.atproto.server.requestAccountDelete`
51235- [x] `com.atproto.server.requestEmailConfirmation`
52236- [x] `com.atproto.server.requestEmailUpdate`
53237- [x] `com.atproto.server.requestPasswordReset`
5454-- [ ] `com.atproto.server.reserveSigningKey`
238238+- [x] `com.atproto.server.reserveSigningKey`
55239- [x] `com.atproto.server.resetPassword`
56240- ~~[] `com.atproto.server.revokeAppPassword`~~ - not going to add app passwords
57241- [x] `com.atproto.server.updateEmail`
···7225673257### Other
742587575-- [ ] `com.atproto.label.queryLabels`
259259+- [x] `com.atproto.label.queryLabels`
76260- [x] `com.atproto.moderation.createReport` (Note: this should be handled by proxying, not actually implemented in the PDS)
77261- [x] `app.bsky.actor.getPreferences`
78262- [x] `app.bsky.actor.putPreferences`
···7575 }
76767777 proof := extractProof(headers)
7878-7978 if proof == "" {
8079 return nil, nil
8180 }
···197196198197 nonce, _ := claims["nonce"].(string)
199198 if nonce == "" {
200200- // WARN: this _must_ be `use_dpop_nonce` for clients know they should make another request
199199+ // reference impl checks if self.nonce is not null before returning an error, but we always have a
200200+ // nonce so we do not bother checking
201201 return nil, ErrUseDpopNonce
202202 }
203203204204 if nonce != "" && !dm.nonce.Check(nonce) {
205205- // WARN: this _must_ be `use_dpop_nonce` so that clients will fetch a new nonce
205205+ // dpop nonce mismatch
206206 return nil, ErrUseDpopNonce
207207 }
208208···237237}
238238239239func extractProof(headers http.Header) string {
240240- dpopHeaders := headers["Dpop"]
240240+ dpopHeaders := headers.Values("dpop")
241241 switch len(dpopHeaders) {
242242 case 0:
243243 return ""
···11package server
2233import (
44+ "context"
55+46 "github.com/haileyok/cocoon/models"
57)
6877-func (s *Server) getActorByHandle(handle string) (*models.Actor, error) {
99+func (s *Server) getActorByHandle(ctx context.Context, handle string) (*models.Actor, error) {
810 var actor models.Actor
99- if err := s.db.First(&actor, models.Actor{Handle: handle}).Error; err != nil {
1111+ if err := s.db.First(ctx, &actor, models.Actor{Handle: handle}).Error; err != nil {
1012 return nil, err
1113 }
1214 return &actor, nil
1315}
14161515-func (s *Server) getRepoByEmail(email string) (*models.Repo, error) {
1717+func (s *Server) getRepoByEmail(ctx context.Context, email string) (*models.Repo, error) {
1618 var repo models.Repo
1717- if err := s.db.First(&repo, models.Repo{Email: email}).Error; err != nil {
1919+ if err := s.db.First(ctx, &repo, models.Repo{Email: email}).Error; err != nil {
1820 return nil, err
1921 }
2022 return &repo, nil
2123}
22242323-func (s *Server) getRepoActorByEmail(email string) (*models.RepoActor, error) {
2525+func (s *Server) getRepoActorByEmail(ctx context.Context, email string) (*models.RepoActor, error) {
2426 var repo models.RepoActor
2525- if err := s.db.Raw("SELECT r.*, a.* FROM repos r LEFT JOIN actors a ON r.did = a.did WHERE r.email= ?", nil, email).Scan(&repo).Error; err != nil {
2727+ if err := s.db.Raw(ctx, "SELECT r.*, a.* FROM repos r LEFT JOIN actors a ON r.did = a.did WHERE r.email= ?", nil, email).Scan(&repo).Error; err != nil {
2628 return nil, err
2729 }
2830 return &repo, nil
2931}
30323131-func (s *Server) getRepoActorByDid(did string) (*models.RepoActor, error) {
3333+func (s *Server) getRepoActorByDid(ctx context.Context, did string) (*models.RepoActor, error) {
3234 var repo models.RepoActor
3333- if err := s.db.Raw("SELECT r.*, a.* FROM repos r LEFT JOIN actors a ON r.did = a.did WHERE r.did = ?", nil, did).Scan(&repo).Error; err != nil {
3535+ if err := s.db.Raw(ctx, "SELECT r.*, a.* FROM repos r LEFT JOIN actors a ON r.did = a.did WHERE r.did = ?", nil, did).Scan(&repo).Error; err != nil {
3436 return nil, err
3537 }
3638 return &repo, nil
+4-2
server/handle_account.go
···12121313func (s *Server) handleAccount(e echo.Context) error {
1414 ctx := e.Request().Context()
1515+ logger := s.logger.With("name", "handleAuth")
1616+1517 repo, sess, err := s.getSessionRepoOrErr(e)
1618 if err != nil {
1719 return e.Redirect(303, "/account/signin")
···2022 oldestPossibleSession := time.Now().Add(constants.ConfidentialClientSessionLifetime)
21232224 var tokens []provider.OauthToken
2323- if err := s.db.Raw("SELECT * FROM oauth_tokens WHERE sub = ? AND created_at < ? ORDER BY created_at ASC", nil, repo.Repo.Did, oldestPossibleSession).Scan(&tokens).Error; err != nil {
2424- s.logger.Error("couldnt fetch oauth sessions for account", "did", repo.Repo.Did, "error", err)
2525+ if err := s.db.Raw(ctx, "SELECT * FROM oauth_tokens WHERE sub = ? AND created_at < ? ORDER BY created_at ASC", nil, repo.Repo.Did, oldestPossibleSession).Scan(&tokens).Error; err != nil {
2626+ logger.Error("couldnt fetch oauth sessions for account", "did", repo.Repo.Did, "error", err)
2527 sess.AddFlash("Unable to fetch sessions. See server logs for more details.", "error")
2628 sess.Save(e.Request(), e.Response())
2729 return e.Render(200, "account.html", map[string]any{
+8-5
server/handle_account_revoke.go
···55 "github.com/labstack/echo/v4"
66)
7788-type AccountRevokeRequest struct {
88+type AccountRevokeInput struct {
99 Token string `form:"token"`
1010}
11111212func (s *Server) handleAccountRevoke(e echo.Context) error {
1313- var req AccountRevokeRequest
1313+ ctx := e.Request().Context()
1414+ logger := s.logger.With("name", "handleAcocuntRevoke")
1515+1616+ var req AccountRevokeInput
1417 if err := e.Bind(&req); err != nil {
1515- s.logger.Error("could not bind account revoke request", "error", err)
1818+ logger.Error("could not bind account revoke request", "error", err)
1619 return helpers.ServerError(e, nil)
1720 }
1821···2124 return e.Redirect(303, "/account/signin")
2225 }
23262424- if err := s.db.Exec("DELETE FROM oauth_tokens WHERE sub = ? AND token = ?", nil, repo.Repo.Did, req.Token).Error; err != nil {
2525- s.logger.Error("couldnt delete oauth session for account", "did", repo.Repo.Did, "token", req.Token, "error", err)
2727+ if err := s.db.Exec(ctx, "DELETE FROM oauth_tokens WHERE sub = ? AND token = ?", nil, repo.Repo.Did, req.Token).Error; err != nil {
2828+ logger.Error("couldnt delete oauth session for account", "did", repo.Repo.Did, "token", req.Token, "error", err)
2629 sess.AddFlash("Unable to revoke session. See server logs for more details.", "error")
2730 sess.Save(e.Request(), e.Response())
2831 return e.Redirect(303, "/account")
+68-16
server/handle_account_signin.go
···2233import (
44 "errors"
55+ "fmt"
56 "strings"
77+ "time"
6879 "github.com/bluesky-social/indigo/atproto/syntax"
810 "github.com/gorilla/sessions"
···1416 "gorm.io/gorm"
1517)
16181717-type OauthSigninRequest struct {
1818- Username string `form:"username"`
1919- Password string `form:"password"`
2020- QueryParams string `form:"query_params"`
1919+type OauthSigninInput struct {
2020+ Username string `form:"username"`
2121+ Password string `form:"password"`
2222+ AuthFactorToken string `form:"token"`
2323+ QueryParams string `form:"query_params"`
2124}
22252326func (s *Server) getSessionRepoOrErr(e echo.Context) (*models.RepoActor, *sessions.Session, error) {
2727+ ctx := e.Request().Context()
2828+2429 sess, err := session.Get("session", e)
2530 if err != nil {
2631 return nil, nil, err
···3136 return nil, sess, errors.New("did was not set in session")
3237 }
33383434- repo, err := s.getRepoActorByDid(did)
3939+ repo, err := s.getRepoActorByDid(ctx, did)
3540 if err != nil {
3641 return nil, sess, err
3742 }
···4247func getFlashesFromSession(e echo.Context, sess *sessions.Session) map[string]any {
4348 defer sess.Save(e.Request(), e.Response())
4449 return map[string]any{
4545- "errors": sess.Flashes("error"),
4646- "successes": sess.Flashes("success"),
5050+ "errors": sess.Flashes("error"),
5151+ "successes": sess.Flashes("success"),
5252+ "tokenrequired": sess.Flashes("tokenrequired"),
4753 }
4854}
4955···6066}
61676268func (s *Server) handleAccountSigninPost(e echo.Context) error {
6363- var req OauthSigninRequest
6969+ ctx := e.Request().Context()
7070+ logger := s.logger.With("name", "handleAccountSigninPost")
7171+7272+ var req OauthSigninInput
6473 if err := e.Bind(&req); err != nil {
6565- s.logger.Error("error binding sign in req", "error", err)
7474+ logger.Error("error binding sign in req", "error", err)
6675 return helpers.ServerError(e, nil)
6776 }
6877···7685 idtype = "handle"
7786 } else {
7887 idtype = "email"
8888+ }
8989+9090+ queryParams := ""
9191+ if req.QueryParams != "" {
9292+ queryParams = fmt.Sprintf("?%s", req.QueryParams)
7993 }
80948195 // TODO: we should make this a helper since we do it for the base create_session as well
···8397 var err error
8498 switch idtype {
8599 case "did":
8686- err = s.db.Raw("SELECT r.*, a.* FROM repos r LEFT JOIN actors a ON r.did = a.did WHERE r.did = ?", nil, req.Username).Scan(&repo).Error
100100+ err = s.db.Raw(ctx, "SELECT r.*, a.* FROM repos r LEFT JOIN actors a ON r.did = a.did WHERE r.did = ?", nil, req.Username).Scan(&repo).Error
87101 case "handle":
8888- err = s.db.Raw("SELECT r.*, a.* FROM actors a LEFT JOIN repos r ON a.did = r.did WHERE a.handle = ?", nil, req.Username).Scan(&repo).Error
102102+ err = s.db.Raw(ctx, "SELECT r.*, a.* FROM actors a LEFT JOIN repos r ON a.did = r.did WHERE a.handle = ?", nil, req.Username).Scan(&repo).Error
89103 case "email":
9090- err = s.db.Raw("SELECT r.*, a.* FROM repos r LEFT JOIN actors a ON r.did = a.did WHERE r.email = ?", nil, req.Username).Scan(&repo).Error
104104+ err = s.db.Raw(ctx, "SELECT r.*, a.* FROM repos r LEFT JOIN actors a ON r.did = a.did WHERE r.email = ?", nil, req.Username).Scan(&repo).Error
91105 }
92106 if err != nil {
93107 if err == gorm.ErrRecordNotFound {
···96110 sess.AddFlash("Something went wrong!", "error")
97111 }
98112 sess.Save(e.Request(), e.Response())
9999- return e.Redirect(303, "/account/signin")
113113+ return e.Redirect(303, "/account/signin"+queryParams)
100114 }
101115102116 if err := bcrypt.CompareHashAndPassword([]byte(repo.Password), []byte(req.Password)); err != nil {
···106120 sess.AddFlash("Something went wrong!", "error")
107121 }
108122 sess.Save(e.Request(), e.Response())
109109- return e.Redirect(303, "/account/signin")
123123+ return e.Redirect(303, "/account/signin"+queryParams)
124124+ }
125125+126126+ // if repo requires 2FA token and one hasn't been provided, return error prompting for one
127127+ if repo.TwoFactorType != models.TwoFactorTypeNone && req.AuthFactorToken == "" {
128128+ err = s.createAndSendTwoFactorCode(ctx, repo)
129129+ if err != nil {
130130+ sess.AddFlash("Something went wrong!", "error")
131131+ sess.Save(e.Request(), e.Response())
132132+ return e.Redirect(303, "/account/signin"+queryParams)
133133+ }
134134+135135+ sess.AddFlash("requires 2FA token", "tokenrequired")
136136+ sess.Save(e.Request(), e.Response())
137137+ return e.Redirect(303, "/account/signin"+queryParams)
138138+ }
139139+140140+ // if 2FAis required, now check that the one provided is valid
141141+ if repo.TwoFactorType != models.TwoFactorTypeNone {
142142+ if repo.TwoFactorCode == nil || repo.TwoFactorCodeExpiresAt == nil {
143143+ err = s.createAndSendTwoFactorCode(ctx, repo)
144144+ if err != nil {
145145+ sess.AddFlash("Something went wrong!", "error")
146146+ sess.Save(e.Request(), e.Response())
147147+ return e.Redirect(303, "/account/signin"+queryParams)
148148+ }
149149+150150+ sess.AddFlash("requires 2FA token", "tokenrequired")
151151+ sess.Save(e.Request(), e.Response())
152152+ return e.Redirect(303, "/account/signin"+queryParams)
153153+ }
154154+155155+ if *repo.TwoFactorCode != req.AuthFactorToken {
156156+ return helpers.InvalidTokenError(e)
157157+ }
158158+159159+ if time.Now().UTC().After(*repo.TwoFactorCodeExpiresAt) {
160160+ return helpers.ExpiredTokenError(e)
161161+ }
110162 }
111163112164 sess.Options = &sessions.Options{
···122174 return err
123175 }
124176125125- if req.QueryParams != "" {
126126- return e.Redirect(303, "/oauth/authorize?"+req.QueryParams)
177177+ if queryParams != "" {
178178+ return e.Redirect(303, "/oauth/authorize"+queryParams)
127179 } else {
128180 return e.Redirect(303, "/account")
129181 }
+3-1
server/handle_actor_put_preferences.go
···1010// This is kinda lame. Not great to implement app.bsky in the pds, but alas
11111212func (s *Server) handleActorPutPreferences(e echo.Context) error {
1313+ ctx := e.Request().Context()
1414+1315 repo := e.Get("repo").(*models.RepoActor)
14161517 var prefs map[string]any
···2224 return err
2325 }
24262525- if err := s.db.Exec("UPDATE repos SET preferences = ? WHERE did = ?", nil, b, repo.Repo.Did).Error; err != nil {
2727+ if err := s.db.Exec(ctx, "UPDATE repos SET preferences = ? WHERE did = ?", nil, b, repo.Repo.Did).Error; err != nil {
2628 return err
2729 }
2830