fork of whitequark.org/git-pages with mods for tangled

Compare changes

Choose any two refs to compare.

+9 -9
.forgejo/workflows/ci.yaml
··· 9 9 check: 10 10 runs-on: codeberg-small-lazy 11 11 container: 12 - image: docker.io/library/node:24-trixie-slim@sha256:45babd1b4ce0349fb12c4e24bf017b90b96d52806db32e001e3013f341bef0fe 12 + image: docker.io/library/node:24-trixie-slim@sha256:fcdfd7bcd8f641c8c76a8950343c73912d68ba341e8dd1074e663b784d3e76f4 13 13 steps: 14 14 - name: Check out source code 15 - uses: https://code.forgejo.org/actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 15 + uses: https://code.forgejo.org/actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 16 16 - name: Set up toolchain 17 - uses: https://code.forgejo.org/actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6 17 + uses: https://code.forgejo.org/actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 18 18 with: 19 19 go-version: '>=1.25.0' 20 20 - name: Install dependencies ··· 36 36 needs: [check] 37 37 runs-on: codeberg-medium-lazy 38 38 container: 39 - image: docker.io/library/node:24-trixie-slim@sha256:ef4ca6d078dd18322059a1f051225f7bbfc2bb60c16cbb5d8a1ba2cc8964fe8a 39 + image: docker.io/library/node:24-trixie-slim@sha256:fcdfd7bcd8f641c8c76a8950343c73912d68ba341e8dd1074e663b784d3e76f4 40 40 steps: 41 41 - name: Check out source code 42 - uses: https://code.forgejo.org/actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 42 + uses: https://code.forgejo.org/actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 43 43 - name: Set up toolchain 44 - uses: https://code.forgejo.org/actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6 44 + uses: https://code.forgejo.org/actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 45 45 with: 46 46 go-version: '>=1.25.0' 47 47 - name: Install dependencies ··· 58 58 build linux arm64 59 59 build darwin arm64 60 60 - name: Create release 61 - uses: https://code.forgejo.org/actions/forgejo-release@v2.7.3 61 + uses: https://code.forgejo.org/actions/forgejo-release@fc0488c944626f9265d87fbc4dd6c08f78014c63 # v2.7.3 62 62 with: 63 63 tag: ${{ startsWith(forge.event.ref, 'refs/tags/v') && forge.ref_name || 'latest' }} 64 64 release-dir: assets ··· 71 71 needs: [check] 72 72 runs-on: codeberg-medium-lazy 73 73 container: 74 - image: docker.io/library/node:24-trixie-slim@sha256:ef4ca6d078dd18322059a1f051225f7bbfc2bb60c16cbb5d8a1ba2cc8964fe8a 74 + image: docker.io/library/node:24-trixie-slim@sha256:fcdfd7bcd8f641c8c76a8950343c73912d68ba341e8dd1074e663b784d3e76f4 75 75 steps: 76 76 - name: Install dependencies 77 77 run: | 78 78 apt-get -y update 79 79 apt-get -y install buildah ca-certificates 80 80 - name: Check out source code 81 - uses: https://code.forgejo.org/actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 81 + uses: https://code.forgejo.org/actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 82 82 - name: Build container 83 83 run: | 84 84 printf '[storage]\ndriver="vfs"\nrunroot="/run/containers/storage"\ngraphroot="/var/lib/containers/storage"\n' | tee /etc/containers/storage.conf
+1
.gitignore
··· 4 4 /data 5 5 /config*.toml* 6 6 /git-pages 7 + nixos.qcow2
+3 -3
Dockerfile
··· 3 3 RUN apk --no-cache add ca-certificates 4 4 5 5 # Build supervisor. 6 - FROM docker.io/library/golang:1.25-alpine@sha256:aee43c3ccbf24fdffb7295693b6e33b21e01baec1b2a55acc351fde345e9ec34 AS supervisor-builder 6 + FROM docker.io/library/golang:1.25-alpine@sha256:d3f0cf7723f3429e3f9ed846243970b20a2de7bae6a5b66fc5914e228d831bbb AS supervisor-builder 7 7 RUN apk --no-cache add git 8 8 WORKDIR /build 9 9 RUN git clone https://github.com/ochinchina/supervisord . && \ ··· 12 12 go clean -cache -modcache 13 13 14 14 # Build Caddy with S3 storage backend. 15 - FROM docker.io/library/caddy:2.10.2-builder@sha256:53f91ad7c5f1ab9a607953199b7c1e10920c570ae002aef913d68ed7464fb19f AS caddy-builder 15 + FROM docker.io/library/caddy:2.10.2-builder@sha256:6e3ed727ce8695fc58e0a8de8e5d11888f6463c430ea5b40e0b5f679ab734868 AS caddy-builder 16 16 RUN xcaddy build ${CADDY_VERSION} \ 17 17 --with=github.com/ss098/certmagic-s3@v0.0.0-20250922022452-8af482af5f39 && \ 18 18 go clean -cache -modcache 19 19 20 20 # Build git-pages. 21 - FROM docker.io/library/golang:1.25-alpine@sha256:aee43c3ccbf24fdffb7295693b6e33b21e01baec1b2a55acc351fde345e9ec34 AS git-pages-builder 21 + FROM docker.io/library/golang:1.25-alpine@sha256:d3f0cf7723f3429e3f9ed846243970b20a2de7bae6a5b66fc5914e228d831bbb AS git-pages-builder 22 22 RUN apk --no-cache add git 23 23 WORKDIR /build 24 24 COPY go.mod go.sum ./
-14
LICENSE-0BSD.txt
··· 1 - Copyright (C) git-pages contributors 2 - Copyright (C) Catherine 'whitequark' 3 - 4 - Permission to use, copy, modify, and/or distribute this software for 5 - any purpose with or without fee is hereby granted. 6 - 7 - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 8 - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 9 - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 10 - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 11 - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN 12 - AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 13 - OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 -
+14
LICENSE.txt
··· 1 + Copyright (C) git-pages contributors 2 + Copyright (C) Catherine 'whitequark' 3 + 4 + Permission to use, copy, modify, and/or distribute this software for 5 + any purpose with or without fee is hereby granted. 6 + 7 + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 8 + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 9 + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 10 + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 11 + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN 12 + AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 13 + OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 +
+9 -2
README.md
··· 70 70 - The `POST` method requires an `application/json` body containing a Forgejo/Gitea/Gogs/GitHub webhook event payload. Requests where the `ref` key contains anything other than `refs/heads/pages` are ignored, and only the `pages` branch is used. The `repository.clone_url` key contains a repository URL to be shallowly cloned. 71 71 - If the received contents is empty, performs the same action as `DELETE`. 72 72 * In response to a `DELETE` request, the server unpublishes a site. The URL of the request must be the root URL of the site that is being unpublished. Site data remains stored for an indeterminate period of time, but becomes completely inaccessible. 73 + * If a `Dry-Run: yes` header is provided with a `PUT`, `DELETE`, or `POST` request, only the authorization checks are run; no destructive updates are made. Note that this functionality was added in _git-pages_ v0.2.0. 73 74 * All updates to site content are atomic (subject to consistency guarantees of the storage backend). That is, there is an instantaneous moment during an update before which the server will return the old content and after which it will return the new content. 74 75 * Files with a certain name, when placed in the root of a site, have special functions: 75 76 - [Netlify `_redirects`][_redirects] file can be used to specify HTTP redirect and rewrite rules. The _git-pages_ implementation currently does not support placeholders, query parameters, or conditions, and may differ from Netlify in other minor ways. If you find that a supported `_redirects` file feature does not work the same as on Netlify, please file an issue. (Note that _git-pages_ does not perform URL normalization; `/foo` and `/foo/` are *not* the same, unlike with Netlify.) 76 77 - [Netlify `_headers`][_headers] file can be used to specify custom HTTP response headers (if allowlisted by configuration). In particular, this is useful to enable [CORS requests][cors]. The _git-pages_ implementation may differ from Netlify in minor ways; if you find that a `_headers` file feature does not work the same as on Netlify, please file an issue. 78 + * Support for SHA-256 Git hashes is [limited by go-git][go-git-sha256]; once go-git implements the required features, _git-pages_ will automatically gain support for SHA-256 Git hashes. Note that shallow clones (used by _git-pages_ to conserve bandwidth if available) aren't supported yet in the Git protocol as of 2025. 77 79 78 80 [_redirects]: https://docs.netlify.com/manage/routing/redirects/overview/ 79 81 [_headers]: https://docs.netlify.com/manage/routing/headers/ 80 82 [cors]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Guides/CORS 83 + [go-git-sha256]: https://github.com/go-git/go-git/issues/706 81 84 82 85 83 86 Authorization ··· 91 94 2. **DNS Challenge:** If the method is `PUT`, `DELETE`, `POST`, and a well-formed `Authorization:` header is provided containing a `<token>`, and a TXT record lookup at `_git-pages-challenge.<host>` returns a record whose concatenated value equals `SHA256("<host> <token>")`, the request is authorized. 92 95 - **`Pages` scheme:** Request includes an `Authorization: Pages <token>` header. 93 96 - **`Basic` scheme:** Request includes an `Authorization: Basic <basic>` header, where `<basic>` is equal to `Base64("Pages:<token>")`. (Useful for non-Forgejo forges.) 94 - 3. **DNS Allowlist:** If the method is `PUT` or `POST`, and a TXT record lookup at `_git-pages-repository.<host>` returns a set of well-formed absolute URLs, and (for `PUT` requests) the body contains a repository URL, and the requested clone URLs is contained in this set of URLs, the request is authorized. 97 + 3. **DNS Allowlist:** If the method is `PUT` or `POST`, and the request URL is `scheme://<user>.<host>/`, and a TXT record lookup at `_git-pages-repository.<host>` returns a set of well-formed absolute URLs, and (for `PUT` requests) the body contains a repository URL, and the requested clone URLs is contained in this set of URLs, the request is authorized. 95 98 4. **Wildcard Match (content):** If the method is `POST`, and a `[[wildcard]]` configuration section exists where the suffix of a hostname (compared label-wise) is equal to `[[wildcard]].domain`, and (for `PUT` requests) the body contains a repository URL, and the requested clone URL is a *matching* clone URL, the request is authorized. 96 99 - **Index repository:** If the request URL is `scheme://<user>.<host>/`, a *matching* clone URL is computed by templating `[[wildcard]].clone-url` with `<user>` and `<project>`, where `<project>` is computed by templating each element of `[[wildcard]].index-repos` with `<user>`, and `[[wildcard]]` is the section where the match occurred. 97 100 - **Project repository:** If the request URL is `scheme://<user>.<host>/<project>/`, a *matching* clone URL is computed by templating `[[wildcard]].clone-url` with `<user>` and `<project>`, and `[[wildcard]]` is the section where the match occurred. ··· 115 118 * If `SENTRY_DSN` environment variable is set, panics are reported to Sentry. 116 119 * If `SENTRY_DSN` and `SENTRY_LOGS=1` environment variables are set, logs are uploaded to Sentry. 117 120 * If `SENTRY_DSN` and `SENTRY_TRACING=1` environment variables are set, traces are uploaded to Sentry. 121 + * Optional syslog integration allows transmitting application logs to a syslog daemon. When present, the `SYSLOG_ADDR` environment variable enables the integration, and the variable's value is used to configure the absolute path to a Unix socket (usually located at `/dev/log` on Unix systems) or a network address of one of the following formats: 122 + * for TLS over TCP: `tcp+tls://host:port`; 123 + * for plain TCP: `tcp://host:post`; 124 + * for UDP: `udp://host:port`. 118 125 119 126 120 127 Architecture (v2) ··· 160 167 License 161 168 ------- 162 169 163 - [0-clause BSD](LICENSE-0BSD.txt) 170 + [0-clause BSD](LICENSE.txt)
+6 -2
conf/config.example.toml
··· 2 2 # as the intrinsic default value. 3 3 4 4 log-format = "text" 5 + log-level = "info" 5 6 6 7 [server] 7 8 # Use "-" to disable the handler. ··· 15 16 index-repos = ["<user>.codeberg.page", "pages"] 16 17 index-repo-branch = "main" 17 18 authorization = "forgejo" 18 - fallback-proxy-to = "https://codeberg.page" 19 + 20 + [fallback] # non-default section 21 + proxy-to = "https://codeberg.page" 22 + insecure = false 19 23 20 24 [storage] 21 25 type = "fs" ··· 23 27 [storage.fs] 24 28 root = "./data" 25 29 26 - [storage.s3] # non-default bucket configuration 30 + [storage.s3] # non-default section 27 31 endpoint = "play.min.io" 28 32 access-key-id = "Q3AM3UQ867SPQQA43P2F" 29 33 secret-access-key = "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
+39 -2
flake.nix
··· 43 43 "-s -w" 44 44 ]; 45 45 46 - vendorHash = "sha256-oVXELOXbRTzzU8pUGNE4K552thlZXGAX7qpv6ETwz6o="; 46 + vendorHash = "sha256-oFKS3ciZyuzzMYg7g3idbssHfDdNYXzNjAXB6XDzMJg="; 47 47 }; 48 48 in 49 49 { ··· 63 63 inherit git-pages; 64 64 default = git-pages; 65 65 }; 66 + 67 + apps.vm = 68 + let 69 + guestSystem = if pkgs.stdenv.hostPlatform.isAarch64 then "aarch64-linux" else "x86_64-linux"; 70 + in 71 + { 72 + type = "app"; 73 + program = 74 + (pkgs.writeShellApplication { 75 + name = "vm"; 76 + text = '' 77 + exec ${ 78 + pkgs.lib.getExe 79 + (import ./nix/vm.nix { 80 + inherit nixpkgs self; 81 + system = guestSystem; 82 + hostSystem = system; 83 + }).config.system.build.vm 84 + } 85 + ''; 86 + }) 87 + + /bin/vm; 88 + }; 66 89 } 67 - ); 90 + ) 91 + // { 92 + nixosModules.default = 93 + { 94 + lib, 95 + pkgs, 96 + ... 97 + }: 98 + { 99 + imports = [ ./nix/module.nix ]; 100 + services.git-pages.package = 101 + lib.mkDefault 102 + self.packages.${pkgs.stdenv.hostPlatform.system}.git-pages; 103 + }; 104 + }; 68 105 }
+17 -14
go.mod
··· 4 4 5 5 require ( 6 6 codeberg.org/git-pages/go-headers v1.1.0 7 + codeberg.org/git-pages/go-slog-syslog v0.0.0-20251122144254-06c45d430fb9 7 8 github.com/KimMachineGun/automemlimit v0.7.5 8 9 github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500 9 10 github.com/creasty/defaults v1.8.0 10 - github.com/getsentry/sentry-go v0.36.2 11 - github.com/getsentry/sentry-go/slog v0.36.2 12 - github.com/go-git/go-billy/v6 v6.0.0-20251026101908-623011986e70 13 - github.com/go-git/go-git/v6 v6.0.0-20251029213217-0bbfc0875edd 11 + github.com/getsentry/sentry-go v0.40.0 12 + github.com/getsentry/sentry-go/slog v0.40.0 13 + github.com/go-git/go-billy/v6 v6.0.0-20251126203821-7f9c95185ee0 14 + github.com/go-git/go-git/v6 v6.0.0-20251128074608-48f817f57805 14 15 github.com/klauspost/compress v1.18.1 15 16 github.com/maypok86/otter/v2 v2.2.1 16 - github.com/minio/minio-go/v7 v7.0.95 17 + github.com/minio/minio-go/v7 v7.0.97 17 18 github.com/pelletier/go-toml/v2 v2.2.4 18 19 github.com/pquerna/cachecontrol v0.2.0 19 20 github.com/prometheus/client_golang v1.23.2 20 - github.com/samber/slog-multi v1.5.0 21 + github.com/samber/slog-multi v1.6.0 21 22 github.com/tj/go-redirects v0.0.0-20200911105812-fd1ba1020b37 22 23 github.com/valyala/fasttemplate v1.2.2 23 24 google.golang.org/protobuf v1.36.10 ··· 29 30 github.com/beorn7/perks v1.0.1 // indirect 30 31 github.com/cespare/xxhash/v2 v2.3.0 // indirect 31 32 github.com/cloudflare/circl v1.6.1 // indirect 32 - github.com/cyphar/filepath-securejoin v0.5.0 // indirect 33 + github.com/cyphar/filepath-securejoin v0.6.1 // indirect 33 34 github.com/dustin/go-humanize v1.0.1 // indirect 34 35 github.com/emirpasic/gods v1.18.1 // indirect 35 36 github.com/go-git/gcfg/v2 v2.0.2 // indirect 36 37 github.com/go-ini/ini v1.67.0 // indirect 37 - github.com/goccy/go-json v0.10.5 // indirect 38 38 github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect 39 39 github.com/google/uuid v1.6.0 // indirect 40 40 github.com/kevinburke/ssh_config v1.4.0 // indirect 41 41 github.com/klauspost/cpuid/v2 v2.3.0 // indirect 42 - github.com/minio/crc64nvme v1.0.2 // indirect 42 + github.com/klauspost/crc32 v1.3.0 // indirect 43 + github.com/leodido/go-syslog/v4 v4.3.0 // indirect 44 + github.com/minio/crc64nvme v1.1.0 // indirect 43 45 github.com/minio/md5-simd v1.1.2 // indirect 44 46 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 45 47 github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect ··· 50 52 github.com/prometheus/common v0.66.1 // indirect 51 53 github.com/prometheus/procfs v0.16.1 // indirect 52 54 github.com/rs/xid v1.6.0 // indirect 53 - github.com/samber/lo v1.51.0 // indirect 55 + github.com/samber/lo v1.52.0 // indirect 54 56 github.com/samber/slog-common v0.19.0 // indirect 55 57 github.com/sergi/go-diff v1.4.0 // indirect 56 58 github.com/tinylib/msgp v1.3.0 // indirect 57 59 github.com/tj/assert v0.0.3 // indirect 58 60 github.com/valyala/bytebufferpool v1.0.0 // indirect 59 61 go.yaml.in/yaml/v2 v2.4.2 // indirect 60 - golang.org/x/crypto v0.43.0 // indirect 61 - golang.org/x/net v0.46.0 // indirect 62 - golang.org/x/sys v0.37.0 // indirect 63 - golang.org/x/text v0.30.0 // indirect 62 + golang.org/x/crypto v0.45.0 // indirect 63 + golang.org/x/net v0.47.0 // indirect 64 + golang.org/x/sys v0.38.0 // indirect 65 + golang.org/x/text v0.31.0 // indirect 66 + gopkg.in/yaml.v3 v3.0.1 // indirect 64 67 )
+34 -32
go.sum
··· 1 - codeberg.org/git-pages/go-headers v1.0.0 h1:hvGU97hQdXaT5HwCpZJWQdg7akvtOBCSUNL4u2a5uTs= 2 - codeberg.org/git-pages/go-headers v1.0.0/go.mod h1:N4gwH0U3YPwmuyxqH7xBA8j44fTPX+vOEP7ejJVBPts= 3 1 codeberg.org/git-pages/go-headers v1.1.0 h1:rk7/SOSsn+XuL7PUQZFYUaWKHEaj6K8mXmUV9rF2VxE= 4 2 codeberg.org/git-pages/go-headers v1.1.0/go.mod h1:N4gwH0U3YPwmuyxqH7xBA8j44fTPX+vOEP7ejJVBPts= 3 + codeberg.org/git-pages/go-slog-syslog v0.0.0-20251122144254-06c45d430fb9 h1:xfPDg8ThBt3+t+C+pvM3bEH4ePUzP5t5kY2v19TqgKc= 4 + codeberg.org/git-pages/go-slog-syslog v0.0.0-20251122144254-06c45d430fb9/go.mod h1:8NPSXbYcVb71qqNM5cIgn1/uQgMisLbu2dVD1BNxsUw= 5 5 github.com/KimMachineGun/automemlimit v0.7.5 h1:RkbaC0MwhjL1ZuBKunGDjE/ggwAX43DwZrJqVwyveTk= 6 6 github.com/KimMachineGun/automemlimit v0.7.5/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM= 7 7 github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= ··· 22 22 github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= 23 23 github.com/creasty/defaults v1.8.0 h1:z27FJxCAa0JKt3utc0sCImAEb+spPucmKoOdLHvHYKk= 24 24 github.com/creasty/defaults v1.8.0/go.mod h1:iGzKe6pbEHnpMPtfDXZEr0NVxWnPTjb1bbDy08fPzYM= 25 - github.com/cyphar/filepath-securejoin v0.5.0 h1:hIAhkRBMQ8nIeuVwcAoymp7MY4oherZdAxD+m0u9zaw= 26 - github.com/cyphar/filepath-securejoin v0.5.0/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= 25 + github.com/cyphar/filepath-securejoin v0.6.1 h1:5CeZ1jPXEiYt3+Z6zqprSAgSWiggmpVyciv8syjIpVE= 26 + github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc= 27 27 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 28 28 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 29 29 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= ··· 33 33 github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE= 34 34 github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= 35 35 github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= 36 - github.com/getsentry/sentry-go v0.36.2 h1:uhuxRPTrUy0dnSzTd0LrYXlBYygLkKY0hhlG5LXarzM= 37 - github.com/getsentry/sentry-go v0.36.2/go.mod h1:p5Im24mJBeruET8Q4bbcMfCQ+F+Iadc4L48tB1apo2c= 38 - github.com/getsentry/sentry-go/slog v0.36.2 h1:PM27JHFE3lsE8fgI/cOueEOtjiktnC3Za2o5oL9PbJQ= 39 - github.com/getsentry/sentry-go/slog v0.36.2/go.mod h1:aVFAxnpA3FEtZeSBhBFAnWOlqhiLjaaoOZ0bmBN9IHo= 36 + github.com/getsentry/sentry-go v0.40.0 h1:VTJMN9zbTvqDqPwheRVLcp0qcUcM+8eFivvGocAaSbo= 37 + github.com/getsentry/sentry-go v0.40.0/go.mod h1:eRXCoh3uvmjQLY6qu63BjUZnaBu5L5WhMV1RwYO8W5s= 38 + github.com/getsentry/sentry-go/slog v0.40.0 h1:uR2EPL9w6uHw3XB983IAqzqM9mP+fjJpNY9kfob3/Z8= 39 + github.com/getsentry/sentry-go/slog v0.40.0/go.mod h1:ArRaP+0rsbnJGyvZwYDo/vDQT/YBbOQeOlO+DGW+F9s= 40 40 github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= 41 41 github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= 42 42 github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= 43 43 github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= 44 44 github.com/go-git/gcfg/v2 v2.0.2 h1:MY5SIIfTGGEMhdA7d7JePuVVxtKL7Hp+ApGDJAJ7dpo= 45 45 github.com/go-git/gcfg/v2 v2.0.2/go.mod h1:/lv2NsxvhepuMrldsFilrgct6pxzpGdSRC13ydTLSLs= 46 - github.com/go-git/go-billy/v6 v6.0.0-20251026101908-623011986e70 h1:TWpNrg9JPxp0q+KG0hoFGBulPIP/kMK1b0mDqjdEB/s= 47 - github.com/go-git/go-billy/v6 v6.0.0-20251026101908-623011986e70/go.mod h1:TpCYxdQ0tWZkrnAkd7yqK+z1C8RKcyjcaYAJNAcnUnM= 46 + github.com/go-git/go-billy/v6 v6.0.0-20251126203821-7f9c95185ee0 h1:eY5aB2GXiVdgTueBcqsBt53WuJTRZAuCdIS/86Pcq5c= 47 + github.com/go-git/go-billy/v6 v6.0.0-20251126203821-7f9c95185ee0/go.mod h1:0NjwVNrwtVFZBReAp5OoGklGJIgJFEbVyHneAr4lc8k= 48 48 github.com/go-git/go-git-fixtures/v5 v5.1.1 h1:OH8i1ojV9bWfr0ZfasfpgtUXQHQyVS8HXik/V1C099w= 49 49 github.com/go-git/go-git-fixtures/v5 v5.1.1/go.mod h1:Altk43lx3b1ks+dVoAG2300o5WWUnktvfY3VI6bcaXU= 50 - github.com/go-git/go-git/v6 v6.0.0-20251029213217-0bbfc0875edd h1:pn6+tR4O8McyqEr2MbQwqcySovpG8jDd11F/jQ6aAfA= 51 - github.com/go-git/go-git/v6 v6.0.0-20251029213217-0bbfc0875edd/go.mod h1:z9pQiXCfyOZIs/8qa5zmozzbcsDPtGN91UD7+qeX3hk= 50 + github.com/go-git/go-git/v6 v6.0.0-20251128074608-48f817f57805 h1:jxQ3BzYeErNRvlI/4+0mpwqMzvB4g97U+ksfgvrUEbY= 51 + github.com/go-git/go-git/v6 v6.0.0-20251128074608-48f817f57805/go.mod h1:dIwT3uWK1ooHInyVnK2JS5VfQ3peVGYaw2QPqX7uFvs= 52 52 github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= 53 53 github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= 54 - github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= 55 - github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= 56 54 github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= 57 55 github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= 58 56 github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= ··· 66 64 github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= 67 65 github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= 68 66 github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= 67 + github.com/klauspost/crc32 v1.3.0 h1:sSmTt3gUt81RP655XGZPElI0PelVTZ6YwCRnPSupoFM= 68 + github.com/klauspost/crc32 v1.3.0/go.mod h1:D7kQaZhnkX/Y0tstFGf8VUzv2UofNGqCjnC3zdHB0Hw= 69 69 github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 70 70 github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 71 71 github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= ··· 75 75 github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 76 76 github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= 77 77 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= 78 + github.com/leodido/go-syslog/v4 v4.3.0 h1:bbSpI/41bYK9iSdlYzcwvlxuLOE8yi4VTFmedtnghdA= 79 + github.com/leodido/go-syslog/v4 v4.3.0/go.mod h1:eJ8rUfDN5OS6dOkCOBYlg2a+hbAg6pJa99QXXgMrd98= 78 80 github.com/maypok86/otter/v2 v2.2.1 h1:hnGssisMFkdisYcvQ8L019zpYQcdtPse+g0ps2i7cfI= 79 81 github.com/maypok86/otter/v2 v2.2.1/go.mod h1:1NKY9bY+kB5jwCXBJfE59u+zAwOt6C7ni1FTlFFMqVs= 80 - github.com/minio/crc64nvme v1.0.2 h1:6uO1UxGAD+kwqWWp7mBFsi5gAse66C4NXO8cmcVculg= 81 - github.com/minio/crc64nvme v1.0.2/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg= 82 + github.com/minio/crc64nvme v1.1.0 h1:e/tAguZ+4cw32D+IO/8GSf5UVr9y+3eJcxZI2WOO/7Q= 83 + github.com/minio/crc64nvme v1.1.0/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg= 82 84 github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= 83 85 github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= 84 - github.com/minio/minio-go/v7 v7.0.95 h1:ywOUPg+PebTMTzn9VDsoFJy32ZuARN9zhB+K3IYEvYU= 85 - github.com/minio/minio-go/v7 v7.0.95/go.mod h1:wOOX3uxS334vImCNRVyIDdXX9OsXDm89ToynKgqUKlo= 86 + github.com/minio/minio-go/v7 v7.0.97 h1:lqhREPyfgHTB/ciX8k2r8k0D93WaFqxbJX36UZq5occ= 87 + github.com/minio/minio-go/v7 v7.0.97/go.mod h1:re5VXuo0pwEtoNLsNuSr0RrLfT/MBtohwdaSmPPSRSk= 86 88 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= 87 89 github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= 88 90 github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= ··· 113 115 github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= 114 116 github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= 115 117 github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= 116 - github.com/samber/lo v1.51.0 h1:kysRYLbHy/MB7kQZf5DSN50JHmMsNEdeY24VzJFu7wI= 117 - github.com/samber/lo v1.51.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0= 118 + github.com/samber/lo v1.52.0 h1:Rvi+3BFHES3A8meP33VPAxiBZX/Aws5RxrschYGjomw= 119 + github.com/samber/lo v1.52.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0= 118 120 github.com/samber/slog-common v0.19.0 h1:fNcZb8B2uOLooeYwFpAlKjkQTUafdjfqKcwcC89G9YI= 119 121 github.com/samber/slog-common v0.19.0/go.mod h1:dTz+YOU76aH007YUU0DffsXNsGFQRQllPQh9XyNoA3M= 120 - github.com/samber/slog-multi v1.5.0 h1:UDRJdsdb0R5vFQFy3l26rpX3rL3FEPJTJ2yKVjoiT1I= 121 - github.com/samber/slog-multi v1.5.0/go.mod h1:im2Zi3mH/ivSY5XDj6LFcKToRIWPw1OcjSVSdXt+2d0= 122 + github.com/samber/slog-multi v1.6.0 h1:i1uBY+aaln6ljwdf7Nrt4Sys8Kk6htuYuXDHWJsHtZg= 123 + github.com/samber/slog-multi v1.6.0/go.mod h1:qTqzmKdPpT0h4PFsTN5rYRgLwom1v+fNGuIrl1Xnnts= 122 124 github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= 123 125 github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= 124 126 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= ··· 140 142 go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= 141 143 go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= 142 144 go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= 143 - golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= 144 - golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= 145 - golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= 146 - golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= 147 - golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= 148 - golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= 149 - golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= 150 - golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= 151 - golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= 152 - golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= 145 + golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= 146 + golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= 147 + golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= 148 + golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= 149 + golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= 150 + golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= 151 + golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= 152 + golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= 153 + golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= 154 + golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= 153 155 google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= 154 156 google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= 155 157 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+127
nix/module.nix
··· 1 + { 2 + config, 3 + lib, 4 + pkgs, 5 + ... 6 + }: 7 + 8 + with lib; 9 + 10 + let 11 + cfg = config.services.git-pages; 12 + configFile = pkgs.writeText "git-pages-config.toml" cfg.configFile; 13 + in 14 + { 15 + options.services.git-pages = { 16 + enable = mkEnableOption "git-pages static site server"; 17 + 18 + package = mkOption { 19 + type = types.package; 20 + description = "The git-pages package to use."; 21 + }; 22 + 23 + user = mkOption { 24 + type = types.str; 25 + default = "git-pages"; 26 + description = "User under which git-pages runs."; 27 + }; 28 + 29 + group = mkOption { 30 + type = types.str; 31 + default = "git-pages"; 32 + description = "Group under which git-pages runs."; 33 + }; 34 + 35 + dataDir = mkOption { 36 + type = types.path; 37 + default = "/var/lib/git-pages"; 38 + description = "Directory where git-pages stores its data."; 39 + }; 40 + 41 + configFile = mkOption { 42 + type = types.lines; 43 + default = '' 44 + [server] 45 + pages = "tcp/:3000" 46 + caddy = "tcp/:3001" 47 + metrics = "tcp/:3002" 48 + 49 + [storage] 50 + type = "fs" 51 + 52 + [storage.fs] 53 + root = "${cfg.dataDir}/data" 54 + 55 + [limits] 56 + max-site-size = "128M" 57 + ''; 58 + }; 59 + 60 + openFirewall = mkOption { 61 + type = types.bool; 62 + default = false; 63 + description = "Whether to open the firewall for git-pages ports."; 64 + }; 65 + 66 + ports = { 67 + pages = mkOption { 68 + type = types.port; 69 + default = 3000; 70 + description = "Port for the main pages server."; 71 + }; 72 + 73 + caddy = mkOption { 74 + type = types.port; 75 + default = 3001; 76 + description = "Port for the Caddy integration endpoint."; 77 + }; 78 + 79 + metrics = mkOption { 80 + type = types.port; 81 + default = 3002; 82 + description = "Port for Prometheus metrics."; 83 + }; 84 + }; 85 + }; 86 + 87 + config = mkIf cfg.enable { 88 + users.users.${cfg.user} = { 89 + isSystemUser = true; 90 + group = cfg.group; 91 + home = cfg.dataDir; 92 + createHome = true; 93 + description = "git-pages service user"; 94 + }; 95 + 96 + users.groups.${cfg.group} = { }; 97 + 98 + systemd.services.git-pages = { 99 + description = "git-pages static site server"; 100 + after = [ "network.target" ]; 101 + wantedBy = [ "multi-user.target" ]; 102 + 103 + serviceConfig = { 104 + Type = "simple"; 105 + User = cfg.user; 106 + Group = cfg.group; 107 + WorkingDirectory = cfg.dataDir; 108 + ExecStart = "${cfg.package}/bin/git-pages -config ${configFile}"; 109 + Restart = "on-failure"; 110 + RestartSec = 5; 111 + }; 112 + }; 113 + 114 + systemd.tmpfiles.rules = [ 115 + "d '${cfg.dataDir}' 0750 ${cfg.user} ${cfg.group} - -" 116 + "d '${cfg.dataDir}/data' 0750 ${cfg.user} ${cfg.group} - -" 117 + ]; 118 + 119 + networking.firewall = mkIf cfg.openFirewall { 120 + allowedTCPPorts = with cfg.ports; [ 121 + pages 122 + caddy 123 + metrics 124 + ]; 125 + }; 126 + }; 127 + }
+94
nix/vm.nix
··· 1 + { 2 + nixpkgs, 3 + system, 4 + hostSystem, 5 + self, 6 + }: 7 + nixpkgs.lib.nixosSystem { 8 + inherit system; 9 + modules = [ 10 + self.nixosModules.default 11 + ( 12 + { 13 + lib, 14 + config, 15 + pkgs, 16 + ... 17 + }: 18 + { 19 + virtualisation.vmVariant.virtualisation = { 20 + host.pkgs = import nixpkgs { system = hostSystem; }; 21 + 22 + graphics = false; 23 + memorySize = 2048; 24 + diskSize = 10 * 1024; 25 + cores = 2; 26 + forwardPorts = [ 27 + # ssh 28 + { 29 + from = "host"; 30 + host.port = 2222; 31 + guest.port = 22; 32 + } 33 + # git-pages main server 34 + { 35 + from = "host"; 36 + host.port = 3000; 37 + guest.port = 3000; 38 + } 39 + # git-pages caddy integration 40 + { 41 + from = "host"; 42 + host.port = 3001; 43 + guest.port = 3001; 44 + } 45 + # git-pages metrics 46 + { 47 + from = "host"; 48 + host.port = 3002; 49 + guest.port = 3002; 50 + } 51 + ]; 52 + }; 53 + 54 + networking.firewall.enable = false; 55 + time.timeZone = "Europe/London"; 56 + services.getty.autologinUser = "root"; 57 + environment.systemPackages = with pkgs; [ 58 + curl 59 + vim 60 + git 61 + htop 62 + ]; 63 + 64 + services.git-pages = { 65 + enable = true; 66 + dataDir = "/var/lib/git-pages"; 67 + configFile = '' 68 + [server] 69 + pages = "tcp/0.0.0.0:3000" 70 + caddy = "tcp/0.0.0.0:3001" 71 + metrics = "tcp/0.0.0.0:3002" 72 + 73 + [storage] 74 + type = "fs" 75 + 76 + [storage.fs] 77 + root = "/var/lib/git-pages/data" 78 + 79 + # Example wildcard configuration for development 80 + [[wildcard]] 81 + domain = "*.localhost" 82 + clone-url = "https://github.com/{domain}.git" 83 + authorization = "" 84 + ''; 85 + }; 86 + 87 + users = { 88 + users.${config.services.git-pages.user}.uid = 777; 89 + groups.${config.services.git-pages.group}.gid = 777; 90 + }; 91 + } 92 + ) 93 + ]; 94 + }
+25 -16
src/auth.go
··· 6 6 "encoding/json" 7 7 "errors" 8 8 "fmt" 9 - "log" 10 9 "net" 11 10 "net/http" 12 11 "net/url" ··· 32 31 return false 33 32 } 34 33 35 - func authorizeInsecure() *Authorization { 34 + func authorizeInsecure(r *http.Request) *Authorization { 36 35 if config.Insecure { // for testing only 37 - log.Println("auth: INSECURE mode") 36 + logc.Println(r.Context(), "auth: INSECURE mode") 38 37 return &Authorization{ 39 38 repoURLs: nil, 40 39 branch: "pages", ··· 159 158 return nil, err 160 159 } 161 160 161 + projectName, err := GetProjectName(r) 162 + if err != nil { 163 + return nil, err 164 + } 165 + 162 166 allowlistHostname := fmt.Sprintf("_git-pages-repository.%s", host) 163 167 records, err := net.LookupTXT(allowlistHostname) 164 168 if err != nil { ··· 166 170 fmt.Sprintf("failed to look up DNS repository allowlist: %s TXT", allowlistHostname)} 167 171 } 168 172 173 + if projectName != ".index" { 174 + return nil, AuthError{http.StatusUnauthorized, 175 + "DNS repository allowlist only authorizes index site"} 176 + } 177 + 169 178 var ( 170 179 repoURLs []string 171 180 errs []error ··· 266 275 } 267 276 268 277 if len(dnsRecords) > 0 { 269 - log.Printf("auth: %s TXT/CNAME: %q\n", host, dnsRecords) 278 + logc.Printf(r.Context(), "auth: %s TXT/CNAME: %q\n", host, dnsRecords) 270 279 } 271 280 272 281 for _, dnsRecord := range dnsRecords { ··· 314 323 func AuthorizeMetadataRetrieval(r *http.Request) (*Authorization, error) { 315 324 causes := []error{AuthError{http.StatusUnauthorized, "unauthorized"}} 316 325 317 - auth := authorizeInsecure() 326 + auth := authorizeInsecure(r) 318 327 if auth != nil { 319 328 return auth, nil 320 329 } ··· 325 334 } else if err != nil { // bad request 326 335 return nil, err 327 336 } else { 328 - log.Println("auth: DNS challenge") 337 + logc.Println(r.Context(), "auth: DNS challenge") 329 338 return auth, nil 330 339 } 331 340 ··· 336 345 } else if err != nil { // bad request 337 346 return nil, err 338 347 } else { 339 - log.Printf("auth: wildcard %s\n", pattern.GetHost()) 348 + logc.Printf(r.Context(), "auth: wildcard %s\n", pattern.GetHost()) 340 349 return auth, nil 341 350 } 342 351 } ··· 348 357 } else if err != nil { // bad request 349 358 return nil, err 350 359 } else { 351 - log.Printf("auth: codeberg %s\n", r.Host) 360 + logc.Printf(r.Context(), "auth: codeberg %s\n", r.Host) 352 361 return auth, nil 353 362 } 354 363 } ··· 366 375 return nil, err 367 376 } 368 377 369 - auth := authorizeInsecure() 378 + auth := authorizeInsecure(r) 370 379 if auth != nil { 371 380 return auth, nil 372 381 } ··· 378 387 } else if err != nil { // bad request 379 388 return nil, err 380 389 } else { 381 - log.Println("auth: DNS challenge: allow *") 390 + logc.Println(r.Context(), "auth: DNS challenge: allow *") 382 391 return auth, nil 383 392 } 384 393 ··· 390 399 } else if err != nil { // bad request 391 400 return nil, err 392 401 } else { 393 - log.Printf("auth: DNS allowlist: allow %v\n", auth.repoURLs) 402 + logc.Printf(r.Context(), "auth: DNS allowlist: allow %v\n", auth.repoURLs) 394 403 return auth, nil 395 404 } 396 405 } ··· 404 413 } else if err != nil { // bad request 405 414 return nil, err 406 415 } else { 407 - log.Printf("auth: wildcard %s: allow %v\n", pattern.GetHost(), auth.repoURLs) 416 + logc.Printf(r.Context(), "auth: wildcard %s: allow %v\n", pattern.GetHost(), auth.repoURLs) 408 417 return auth, nil 409 418 } 410 419 } ··· 416 425 } else if err != nil { // bad request 417 426 return nil, err 418 427 } else { 419 - log.Printf("auth: codeberg %s: allow %v branch %s\n", 428 + logc.Printf(r.Context(), "auth: codeberg %s: allow %v branch %s\n", 420 429 r.Host, auth.repoURLs, auth.branch) 421 430 return auth, nil 422 431 } ··· 633 642 return nil, err 634 643 } 635 644 636 - auth := authorizeInsecure() 645 + auth := authorizeInsecure(r) 637 646 if auth != nil { 638 647 return auth, nil 639 648 } ··· 645 654 } else if err != nil { // bad request 646 655 return nil, err 647 656 } else { 648 - log.Printf("auth: forge token: allow\n") 657 + logc.Printf(r.Context(), "auth: forge token: allow\n") 649 658 return auth, nil 650 659 } 651 660 ··· 659 668 } else if err != nil { // bad request 660 669 return nil, err 661 670 } else { 662 - log.Println("auth: DNS challenge") 671 + logc.Println(r.Context(), "auth: DNS challenge") 663 672 return auth, nil 664 673 } 665 674 }
+5
src/backend.go
··· 11 11 ) 12 12 13 13 var ErrObjectNotFound = errors.New("not found") 14 + var ErrDomainFrozen = errors.New("domain administratively frozen") 14 15 15 16 func splitBlobName(name string) []string { 16 17 algo, hash, found := strings.Cut(name, "-") ··· 76 77 77 78 // Creates a domain. This allows us to start serving content for the domain. 78 79 CreateDomain(ctx context.Context, domain string) error 80 + 81 + // Freeze or thaw a domain. This allows a site to be administratively locked, e.g. if it 82 + // is discovered serving abusive content. 83 + FreezeDomain(ctx context.Context, domain string, freeze bool) error 79 84 } 80 85 81 86 func CreateBackend(config *StorageConfig) (backend Backend, err error) {
+38 -1
src/backend_fs.go
··· 208 208 return nil 209 209 } 210 210 211 + func domainFrozenMarkerName(domain string) string { 212 + return filepath.Join(domain, ".frozen") 213 + } 214 + 215 + func (fs *FSBackend) checkDomainFrozen(ctx context.Context, domain string) error { 216 + if _, err := fs.siteRoot.Stat(domainFrozenMarkerName(domain)); err == nil { 217 + return ErrDomainFrozen 218 + } else if !errors.Is(err, os.ErrNotExist) { 219 + return fmt.Errorf("stat: %w", err) 220 + } else { 221 + return nil 222 + } 223 + } 224 + 211 225 func (fs *FSBackend) CommitManifest(ctx context.Context, name string, manifest *Manifest) error { 226 + domain := filepath.Dir(name) 227 + if err := fs.checkDomainFrozen(ctx, domain); err != nil { 228 + return err 229 + } 230 + 212 231 manifestData := EncodeManifest(manifest) 213 232 manifestHashName := stagedManifestName(manifestData) 214 233 ··· 216 235 return fmt.Errorf("manifest not staged") 217 236 } 218 237 219 - if err := fs.siteRoot.MkdirAll(filepath.Dir(name), 0o755); err != nil { 238 + if err := fs.siteRoot.MkdirAll(domain, 0o755); err != nil { 220 239 return fmt.Errorf("mkdir: %w", err) 221 240 } 222 241 ··· 228 247 } 229 248 230 249 func (fs *FSBackend) DeleteManifest(ctx context.Context, name string) error { 250 + domain := filepath.Dir(name) 251 + if err := fs.checkDomainFrozen(ctx, domain); err != nil { 252 + return err 253 + } 254 + 231 255 err := fs.siteRoot.Remove(name) 232 256 if errors.Is(err, os.ErrNotExist) { 233 257 return nil ··· 250 274 func (fs *FSBackend) CreateDomain(ctx context.Context, domain string) error { 251 275 return nil // no-op 252 276 } 277 + 278 + func (fs *FSBackend) FreezeDomain(ctx context.Context, domain string, freeze bool) error { 279 + if freeze { 280 + return fs.siteRoot.WriteFile(domainFrozenMarkerName(domain), []byte{}, 0o644) 281 + } else { 282 + err := fs.siteRoot.Remove(domainFrozenMarkerName(domain)) 283 + if errors.Is(err, os.ErrNotExist) { 284 + return nil 285 + } else { 286 + return err 287 + } 288 + } 289 + }
+116 -40
src/backend_s3.go
··· 6 6 "crypto/sha256" 7 7 "fmt" 8 8 "io" 9 - "log" 10 9 "net/http" 11 10 "path" 12 11 "strings" ··· 36 35 manifestCacheEvictionsCount prometheus.Counter 37 36 38 37 s3GetObjectDurationSeconds *prometheus.HistogramVec 39 - s3GetObjectErrorsCount *prometheus.CounterVec 38 + s3GetObjectResponseCount *prometheus.CounterVec 40 39 ) 41 40 42 41 func initS3BackendMetrics() { ··· 96 95 NativeHistogramMaxBucketNumber: 100, 97 96 NativeHistogramMinResetDuration: 10 * time.Minute, 98 97 }, []string{"kind"}) 99 - s3GetObjectErrorsCount = promauto.NewCounterVec(prometheus.CounterOpts{ 100 - Name: "git_pages_s3_get_object_errors_count", 101 - Help: "Count of s3:GetObject errors", 102 - }, []string{"object_kind"}) 98 + s3GetObjectResponseCount = promauto.NewCounterVec(prometheus.CounterOpts{ 99 + Name: "git_pages_s3_get_object_responses_count", 100 + Help: "Count of s3:GetObject responses", 101 + }, []string{"kind", "code"}) 103 102 } 104 103 105 104 // Blobs can be safely cached indefinitely. They only need to be evicted to preserve memory. ··· 144 143 options.Weigher = weigher 145 144 } 146 145 if config.MaxStale != 0 { 147 - options.RefreshCalculator = otter.RefreshWriting[K, V](time.Duration(config.MaxAge)) 146 + options.RefreshCalculator = otter.RefreshWriting[K, V]( 147 + time.Duration(config.MaxAge)) 148 148 } 149 149 if config.MaxAge != 0 || config.MaxStale != 0 { 150 - options.ExpiryCalculator = otter.ExpiryWriting[K, V](time.Duration(config.MaxAge + config.MaxStale)) 150 + options.ExpiryCalculator = otter.ExpiryWriting[K, V]( 151 + time.Duration(config.MaxAge + config.MaxStale)) 151 152 } 152 153 return options 153 154 } ··· 170 171 if err != nil { 171 172 return nil, err 172 173 } else if !exists { 173 - log.Printf("s3: create bucket %s\n", bucket) 174 + logc.Printf(ctx, "s3: create bucket %s\n", bucket) 174 175 175 176 err = client.MakeBucket(ctx, bucket, 176 177 minio.MakeBucketOptions{Region: config.Region}) ··· 236 237 minio.StatObjectOptions{}) 237 238 if err != nil { 238 239 if errResp := minio.ToErrorResponse(err); errResp.Code == "NoSuchKey" { 239 - log.Printf("s3 feature %q: disabled", feature) 240 + logc.Printf(ctx, "s3 feature %q: disabled", feature) 240 241 return false, nil 241 242 } else { 242 243 return false, err 243 244 } 244 245 } 245 - log.Printf("s3 feature %q: enabled", feature) 246 + logc.Printf(ctx, "s3 feature %q: enabled", feature) 246 247 return true, nil 247 248 } 248 249 ··· 250 251 if err != nil { 251 252 err = fmt.Errorf("getting s3 backend feature %q: %w", feature, err) 252 253 ObserveError(err) 253 - log.Print(err) 254 + logc.Println(ctx, err) 254 255 return false 255 256 } 256 257 return isOn ··· 268 269 reader io.ReadSeeker, size uint64, mtime time.Time, err error, 269 270 ) { 270 271 loader := func(ctx context.Context, name string) (*CachedBlob, error) { 271 - log.Printf("s3: get blob %s\n", name) 272 + logc.Printf(ctx, "s3: get blob %s\n", name) 272 273 273 274 startTime := time.Now() 274 275 ··· 297 298 return &CachedBlob{data, stat.LastModified}, nil 298 299 } 299 300 301 + observer := func(ctx context.Context, name string) (*CachedBlob, error) { 302 + cached, err := loader(ctx, name) 303 + var code = "OK" 304 + if resp, ok := err.(minio.ErrorResponse); ok { 305 + code = resp.Code 306 + } 307 + s3GetObjectResponseCount.With(prometheus.Labels{"kind": "blob", "code": code}).Inc() 308 + return cached, err 309 + } 310 + 300 311 var cached *CachedBlob 301 - cached, err = s3.blobCache.Get(ctx, name, otter.LoaderFunc[string, *CachedBlob](loader)) 312 + cached, err = s3.blobCache.Get(ctx, name, otter.LoaderFunc[string, *CachedBlob](observer)) 302 313 if err != nil { 303 314 if errResp := minio.ToErrorResponse(err); errResp.Code == "NoSuchKey" { 304 - s3GetObjectErrorsCount.With(prometheus.Labels{"object_kind": "blob"}).Inc() 305 315 err = fmt.Errorf("%w: %s", ErrObjectNotFound, errResp.Key) 306 316 } 307 317 } else { ··· 313 323 } 314 324 315 325 func (s3 *S3Backend) PutBlob(ctx context.Context, name string, data []byte) error { 316 - log.Printf("s3: put blob %s (%s)\n", name, datasize.ByteSize(len(data)).HumanReadable()) 326 + logc.Printf(ctx, "s3: put blob %s (%s)\n", name, datasize.ByteSize(len(data)).HumanReadable()) 317 327 318 328 _, err := s3.client.StatObject(ctx, s3.bucket, blobObjectName(name), 319 329 minio.GetObjectOptions{}) ··· 325 335 return err 326 336 } else { 327 337 ObserveData(ctx, "blob.status", "created") 328 - log.Printf("s3: put blob %s (created)\n", name) 338 + logc.Printf(ctx, "s3: put blob %s (created)\n", name) 329 339 return nil 330 340 } 331 341 } else { ··· 333 343 } 334 344 } else { 335 345 ObserveData(ctx, "blob.status", "exists") 336 - log.Printf("s3: put blob %s (exists)\n", name) 346 + logc.Printf(ctx, "s3: put blob %s (exists)\n", name) 337 347 blobsDedupedCount.Inc() 338 348 blobsDedupedBytes.Add(float64(len(data))) 339 349 return nil ··· 341 351 } 342 352 343 353 func (s3 *S3Backend) DeleteBlob(ctx context.Context, name string) error { 344 - log.Printf("s3: delete blob %s\n", name) 354 + logc.Printf(ctx, "s3: delete blob %s\n", name) 345 355 346 356 return s3.client.RemoveObject(ctx, s3.bucket, blobObjectName(name), 347 357 minio.RemoveObjectOptions{}) ··· 356 366 } 357 367 358 368 func (s3 *S3Backend) ListManifests(ctx context.Context) (manifests []string, err error) { 359 - log.Print("s3: list manifests") 369 + logc.Print(ctx, "s3: list manifests") 360 370 361 371 ctx, cancel := context.WithCancel(ctx) 362 372 defer cancel() ··· 387 397 s3 *S3Backend 388 398 } 389 399 390 - func (l s3ManifestLoader) Load(ctx context.Context, key string) (*CachedManifest, error) { 400 + func (l s3ManifestLoader) Load( 401 + ctx context.Context, key string, 402 + ) ( 403 + *CachedManifest, error, 404 + ) { 391 405 return l.load(ctx, key, nil) 392 406 } 393 407 394 - func (l s3ManifestLoader) Reload(ctx context.Context, key string, oldValue *CachedManifest) (*CachedManifest, error) { 408 + func (l s3ManifestLoader) Reload( 409 + ctx context.Context, key string, oldValue *CachedManifest, 410 + ) ( 411 + *CachedManifest, error, 412 + ) { 395 413 return l.load(ctx, key, oldValue) 396 414 } 397 415 398 - func (l s3ManifestLoader) load(ctx context.Context, name string, oldManifest *CachedManifest) (*CachedManifest, error) { 416 + func (l s3ManifestLoader) load( 417 + ctx context.Context, name string, oldManifest *CachedManifest, 418 + ) ( 419 + *CachedManifest, error, 420 + ) { 421 + logc.Printf(ctx, "s3: get manifest %s\n", name) 422 + 399 423 loader := func() (*CachedManifest, error) { 400 - log.Printf("s3: get manifest %s\n", name) 401 - 402 - startTime := time.Now() 403 - 404 424 opts := minio.GetObjectOptions{} 405 425 if oldManifest != nil && oldManifest.etag != "" { 406 426 opts.SetMatchETagExcept(oldManifest.etag) ··· 426 446 if err != nil { 427 447 return nil, err 428 448 } 429 - 430 - s3GetObjectDurationSeconds. 431 - With(prometheus.Labels{"kind": "manifest"}). 432 - Observe(time.Since(startTime).Seconds()) 433 449 434 450 return &CachedManifest{manifest, uint32(len(data)), stat.LastModified, stat.ETag, nil}, nil 435 451 } 436 452 437 - var cached *CachedManifest 438 - cached, err := loader() 453 + observer := func() (*CachedManifest, error) { 454 + cached, err := loader() 455 + var code = "OK" 456 + if resp, ok := err.(minio.ErrorResponse); ok { 457 + code = resp.Code 458 + } 459 + s3GetObjectResponseCount.With(prometheus.Labels{"kind": "manifest", "code": code}).Inc() 460 + return cached, err 461 + } 462 + 463 + startTime := time.Now() 464 + cached, err := observer() 465 + s3GetObjectDurationSeconds. 466 + With(prometheus.Labels{"kind": "manifest"}). 467 + Observe(time.Since(startTime).Seconds()) 468 + 439 469 if err != nil { 440 - if errResp := minio.ToErrorResponse(err); errResp.Code == "NoSuchKey" { 441 - s3GetObjectErrorsCount.With(prometheus.Labels{"object_kind": "manifest"}).Inc() 470 + errResp := minio.ToErrorResponse(err) 471 + if errResp.Code == "NoSuchKey" { 442 472 err = fmt.Errorf("%w: %s", ErrObjectNotFound, errResp.Key) 443 473 return &CachedManifest{nil, 1, time.Time{}, "", err}, nil 444 474 } else if errResp.StatusCode == http.StatusNotModified && oldManifest != nil { ··· 476 506 477 507 func (s3 *S3Backend) StageManifest(ctx context.Context, manifest *Manifest) error { 478 508 data := EncodeManifest(manifest) 479 - log.Printf("s3: stage manifest %x\n", sha256.Sum256(data)) 509 + logc.Printf(ctx, "s3: stage manifest %x\n", sha256.Sum256(data)) 480 510 481 511 _, err := s3.client.PutObject(ctx, s3.bucket, stagedManifestObjectName(data), 482 512 bytes.NewReader(data), int64(len(data)), minio.PutObjectOptions{}) 483 513 return err 484 514 } 485 515 516 + func domainFrozenObjectName(domain string) string { 517 + return manifestObjectName(fmt.Sprintf("%s/.frozen", domain)) 518 + } 519 + 520 + func (s3 *S3Backend) checkDomainFrozen(ctx context.Context, domain string) error { 521 + _, err := s3.client.GetObject(ctx, s3.bucket, domainFrozenObjectName(domain), 522 + minio.GetObjectOptions{}) 523 + if err == nil { 524 + return ErrDomainFrozen 525 + } else if errResp := minio.ToErrorResponse(err); errResp.Code == "NoSuchKey" { 526 + return nil 527 + } else { 528 + return err 529 + } 530 + } 531 + 486 532 func (s3 *S3Backend) CommitManifest(ctx context.Context, name string, manifest *Manifest) error { 487 533 data := EncodeManifest(manifest) 488 - log.Printf("s3: commit manifest %x -> %s", sha256.Sum256(data), name) 534 + logc.Printf(ctx, "s3: commit manifest %x -> %s", sha256.Sum256(data), name) 535 + 536 + _, domain, _ := strings.Cut(name, "/") 537 + if err := s3.checkDomainFrozen(ctx, domain); err != nil { 538 + return err 539 + } 489 540 490 541 // Remove staged object unconditionally (whether commit succeeded or failed), since 491 542 // the upper layer has to retry the complete operation anyway. ··· 504 555 } 505 556 506 557 func (s3 *S3Backend) DeleteManifest(ctx context.Context, name string) error { 507 - log.Printf("s3: delete manifest %s\n", name) 558 + logc.Printf(ctx, "s3: delete manifest %s\n", name) 559 + 560 + _, domain, _ := strings.Cut(name, "/") 561 + if err := s3.checkDomainFrozen(ctx, domain); err != nil { 562 + return err 563 + } 508 564 509 565 err := s3.client.RemoveObject(ctx, s3.bucket, manifestObjectName(name), 510 566 minio.RemoveObjectOptions{}) ··· 517 573 } 518 574 519 575 func (s3 *S3Backend) CheckDomain(ctx context.Context, domain string) (exists bool, err error) { 520 - log.Printf("s3: check domain %s\n", domain) 576 + logc.Printf(ctx, "s3: check domain %s\n", domain) 521 577 522 578 _, err = s3.client.StatObject(ctx, s3.bucket, domainCheckObjectName(domain), 523 579 minio.StatObjectOptions{}) ··· 548 604 } 549 605 550 606 func (s3 *S3Backend) CreateDomain(ctx context.Context, domain string) error { 551 - log.Printf("s3: create domain %s\n", domain) 607 + logc.Printf(ctx, "s3: create domain %s\n", domain) 552 608 553 609 _, err := s3.client.PutObject(ctx, s3.bucket, domainCheckObjectName(domain), 554 610 &bytes.Reader{}, 0, minio.PutObjectOptions{}) 555 611 return err 556 612 } 613 + 614 + func (s3 *S3Backend) FreezeDomain(ctx context.Context, domain string, freeze bool) error { 615 + if freeze { 616 + logc.Printf(ctx, "s3: freeze domain %s\n", domain) 617 + 618 + _, err := s3.client.PutObject(ctx, s3.bucket, domainFrozenObjectName(domain), 619 + &bytes.Reader{}, 0, minio.PutObjectOptions{}) 620 + return err 621 + } else { 622 + logc.Printf(ctx, "s3: thaw domain %s\n", domain) 623 + 624 + err := s3.client.RemoveObject(ctx, s3.bucket, domainFrozenObjectName(domain), 625 + minio.RemoveObjectOptions{}) 626 + if errResp := minio.ToErrorResponse(err); errResp.Code == "NoSuchKey" { 627 + return nil 628 + } else { 629 + return err 630 + } 631 + } 632 + }
+37 -30
src/caddy.go
··· 1 1 package git_pages 2 2 3 3 import ( 4 + "context" 4 5 "crypto/tls" 5 6 "fmt" 6 - "log" 7 7 "net" 8 8 "net/http" 9 9 "net/url" ··· 22 22 // this isn't really what git-pages is designed for, and object store accesses can cost money. 23 23 // [^1]: https://letsencrypt.org/2025/07/01/issuing-our-first-ip-address-certificate 24 24 if ip := net.ParseIP(domain); ip != nil { 25 - log.Println("caddy:", domain, 404, "(bare IP)") 25 + logc.Println(r.Context(), "caddy:", domain, 404, "(bare IP)") 26 26 w.WriteHeader(http.StatusNotFound) 27 27 return 28 28 } ··· 35 35 // Pages v2, which would under some circumstances return certificates with subjectAltName 36 36 // not valid for the SNI. Go's TLS stack makes `tls.Dial` return an error for these, 37 37 // thankfully making it unnecessary to examine X.509 certificates manually here.) 38 - for _, wildcardConfig := range config.Wildcard { 39 - if wildcardConfig.FallbackProxyTo == "" { 40 - continue 41 - } 42 - fallbackURL, err := url.Parse(wildcardConfig.FallbackProxyTo) 43 - if err != nil { 44 - continue 45 - } 46 - if fallbackURL.Scheme != "https" { 47 - continue 48 - } 49 - connectHost := fallbackURL.Host 50 - if fallbackURL.Port() != "" { 51 - connectHost += ":" + fallbackURL.Port() 52 - } else { 53 - connectHost += ":443" 54 - } 55 - log.Printf("caddy: check TLS %s", fallbackURL) 56 - connection, err := tls.Dial("tcp", connectHost, &tls.Config{ServerName: domain}) 57 - if err != nil { 58 - continue 59 - } 60 - connection.Close() 61 - found = true 62 - break 38 + found, err = tryDialWithSNI(r.Context(), domain) 39 + if err != nil { 40 + logc.Printf(r.Context(), "caddy err: check SNI: %s\n", err) 63 41 } 64 42 } 65 43 66 44 if found { 67 - log.Println("caddy:", domain, 200) 45 + logc.Println(r.Context(), "caddy:", domain, 200) 68 46 w.WriteHeader(http.StatusOK) 69 47 } else if err == nil { 70 - log.Println("caddy:", domain, 404) 48 + logc.Println(r.Context(), "caddy:", domain, 404) 71 49 w.WriteHeader(http.StatusNotFound) 72 50 } else { 73 - log.Println("caddy:", domain, 500) 51 + logc.Println(r.Context(), "caddy:", domain, 500) 74 52 w.WriteHeader(http.StatusInternalServerError) 75 53 fmt.Fprintln(w, err) 76 54 } 77 55 } 56 + 57 + func tryDialWithSNI(ctx context.Context, domain string) (bool, error) { 58 + if config.Fallback.ProxyTo == "" { 59 + return false, nil 60 + } 61 + 62 + fallbackURL, err := url.Parse(config.Fallback.ProxyTo) 63 + if err != nil { 64 + return false, err 65 + } 66 + if fallbackURL.Scheme != "https" { 67 + return false, nil 68 + } 69 + 70 + connectHost := fallbackURL.Host 71 + if fallbackURL.Port() != "" { 72 + connectHost += ":" + fallbackURL.Port() 73 + } else { 74 + connectHost += ":443" 75 + } 76 + 77 + logc.Printf(ctx, "caddy: check TLS %s", fallbackURL) 78 + connection, err := tls.Dial("tcp", connectHost, &tls.Config{ServerName: domain}) 79 + if err != nil { 80 + return false, err 81 + } 82 + connection.Close() 83 + return true, nil 84 + }
+6 -6
src/collect.go
··· 22 22 23 23 appendFile := func(header *tar.Header, data []byte, transform Transform) (err error) { 24 24 switch transform { 25 - case Transform_None: 26 - case Transform_Zstandard: 25 + case Transform_Identity: 26 + case Transform_Zstd: 27 27 data, err = zstdDecoder.DecodeAll(data, []byte{}) 28 28 if err != nil { 29 29 return err ··· 53 53 header.Typeflag = tar.TypeDir 54 54 header.Mode = 0755 55 55 header.ModTime = manifestMtime 56 - err = appendFile(&header, nil, Transform_None) 56 + err = appendFile(&header, nil, Transform_Identity) 57 57 58 58 case Type_InlineFile: 59 59 header.Typeflag = tar.TypeReg ··· 79 79 header.Typeflag = tar.TypeSymlink 80 80 header.Mode = 0644 81 81 header.ModTime = manifestMtime 82 - err = appendFile(&header, entry.GetData(), Transform_None) 82 + err = appendFile(&header, entry.GetData(), Transform_Identity) 83 83 84 84 default: 85 85 return fmt.Errorf("unexpected entry type") ··· 95 95 Typeflag: tar.TypeReg, 96 96 Mode: 0644, 97 97 ModTime: manifestMtime, 98 - }, []byte(redirects), Transform_None) 98 + }, []byte(redirects), Transform_Identity) 99 99 if err != nil { 100 100 return err 101 101 } ··· 107 107 Typeflag: tar.TypeReg, 108 108 Mode: 0644, 109 109 ModTime: manifestMtime, 110 - }, []byte(headers), Transform_None) 110 + }, []byte(headers), Transform_Identity) 111 111 if err != nil { 112 112 return err 113 113 }
+12 -7
src/config.go
··· 38 38 Insecure bool `toml:"-" env:"insecure"` 39 39 Features []string `toml:"features"` 40 40 LogFormat string `toml:"log-format" default:"text"` 41 + LogLevel string `toml:"log-level" default:"info"` 41 42 Server ServerConfig `toml:"server"` 42 43 Wildcard []WildcardConfig `toml:"wildcard"` 44 + Fallback FallbackConfig `toml:"fallback"` 43 45 Storage StorageConfig `toml:"storage"` 44 46 Limits LimitsConfig `toml:"limits"` 45 47 Observability ObservabilityConfig `toml:"observability"` ··· 52 54 } 53 55 54 56 type WildcardConfig struct { 55 - Domain string `toml:"domain"` 56 - CloneURL string `toml:"clone-url"` 57 - IndexRepos []string `toml:"index-repos" default:"[]"` 58 - IndexRepoBranch string `toml:"index-repo-branch" default:"pages"` 59 - Authorization string `toml:"authorization"` 60 - FallbackProxyTo string `toml:"fallback-proxy-to"` 61 - FallbackInsecure bool `toml:"fallback-insecure"` 57 + Domain string `toml:"domain"` 58 + CloneURL string `toml:"clone-url"` 59 + IndexRepos []string `toml:"index-repos" default:"[]"` 60 + IndexRepoBranch string `toml:"index-repo-branch" default:"pages"` 61 + Authorization string `toml:"authorization"` 62 + } 63 + 64 + type FallbackConfig struct { 65 + ProxyTo string `toml:"proxy-to"` 66 + Insecure bool `toml:"insecure"` 62 67 } 63 68 64 69 type CacheConfig struct {
+9 -3
src/extract.go
··· 59 59 } 60 60 61 61 manifestEntry.Type = Type_InlineFile.Enum() 62 - manifestEntry.Size = proto.Int64(header.Size) 63 62 manifestEntry.Data = fileData 63 + manifestEntry.Transform = Transform_Identity.Enum() 64 + manifestEntry.OriginalSize = proto.Int64(header.Size) 65 + manifestEntry.CompressedSize = proto.Int64(header.Size) 64 66 65 67 case tar.TypeSymlink: 66 68 manifestEntry.Type = Type_Symlink.Enum() 67 - manifestEntry.Size = proto.Int64(header.Size) 68 69 manifestEntry.Data = []byte(header.Linkname) 70 + manifestEntry.Transform = Transform_Identity.Enum() 71 + manifestEntry.OriginalSize = proto.Int64(header.Size) 72 + manifestEntry.CompressedSize = proto.Int64(header.Size) 69 73 70 74 case tar.TypeDir: 71 75 manifestEntry.Type = Type_Directory.Enum() ··· 150 154 } else { 151 155 manifestEntry.Type = Type_InlineFile.Enum() 152 156 } 153 - manifestEntry.Size = proto.Int64(int64(file.UncompressedSize64)) 154 157 manifestEntry.Data = fileData 158 + manifestEntry.Transform = Transform_Identity.Enum() 159 + manifestEntry.OriginalSize = proto.Int64(int64(file.UncompressedSize64)) 160 + manifestEntry.CompressedSize = proto.Int64(int64(file.UncompressedSize64)) 155 161 } else { 156 162 manifestEntry.Type = Type_Directory.Enum() 157 163 }
+176 -42
src/fetch.go
··· 2 2 3 3 import ( 4 4 "context" 5 + "errors" 5 6 "fmt" 6 7 "io" 8 + "maps" 9 + "net/url" 7 10 "os" 11 + "slices" 8 12 13 + "github.com/c2h5oh/datasize" 9 14 "github.com/go-git/go-billy/v6/osfs" 10 15 "github.com/go-git/go-git/v6" 11 16 "github.com/go-git/go-git/v6/plumbing" 12 17 "github.com/go-git/go-git/v6/plumbing/cache" 13 18 "github.com/go-git/go-git/v6/plumbing/filemode" 14 19 "github.com/go-git/go-git/v6/plumbing/object" 20 + "github.com/go-git/go-git/v6/plumbing/protocol/packp" 21 + "github.com/go-git/go-git/v6/plumbing/transport" 15 22 "github.com/go-git/go-git/v6/storage/filesystem" 16 23 "google.golang.org/protobuf/proto" 17 24 ) 18 25 19 - func FetchRepository(ctx context.Context, repoURL string, branch string) (*Manifest, error) { 26 + func FetchRepository( 27 + ctx context.Context, repoURL string, branch string, oldManifest *Manifest, 28 + ) ( 29 + *Manifest, error, 30 + ) { 20 31 span, ctx := ObserveFunction(ctx, "FetchRepository", 21 32 "git.repository", repoURL, "git.branch", branch) 22 33 defer span.Finish() 23 34 24 - baseDir, err := os.MkdirTemp("", "fetchRepo") 35 + parsedRepoURL, err := url.Parse(repoURL) 25 36 if err != nil { 26 - return nil, fmt.Errorf("mkdtemp: %w", err) 37 + return nil, fmt.Errorf("URL parse: %w", err) 27 38 } 28 - defer os.RemoveAll(baseDir) 39 + 40 + var repo *git.Repository 41 + var storer *filesystem.Storage 42 + for _, filter := range []packp.Filter{packp.FilterBlobNone(), packp.Filter("")} { 43 + var tempDir string 44 + tempDir, err = os.MkdirTemp("", "fetchRepo") 45 + if err != nil { 46 + return nil, fmt.Errorf("mkdtemp: %w", err) 47 + } 48 + defer os.RemoveAll(tempDir) 29 49 30 - fs := osfs.New(baseDir, osfs.WithBoundOS()) 31 - cache := cache.NewObjectLRUDefault() 32 - storer := filesystem.NewStorageWithOptions(fs, cache, filesystem.Options{ 33 - ExclusiveAccess: true, 34 - LargeObjectThreshold: int64(config.Limits.GitLargeObjectThreshold.Bytes()), 35 - }) 36 - repo, err := git.CloneContext(ctx, storer, nil, &git.CloneOptions{ 37 - Bare: true, 38 - URL: repoURL, 39 - ReferenceName: plumbing.ReferenceName(branch), 40 - SingleBranch: true, 41 - Depth: 1, 42 - Tags: git.NoTags, 43 - }) 50 + storer = filesystem.NewStorageWithOptions( 51 + osfs.New(tempDir, osfs.WithBoundOS()), 52 + cache.NewObjectLRUDefault(), 53 + filesystem.Options{ 54 + ExclusiveAccess: true, 55 + LargeObjectThreshold: int64(config.Limits.GitLargeObjectThreshold.Bytes()), 56 + }, 57 + ) 58 + repo, err = git.CloneContext(ctx, storer, nil, &git.CloneOptions{ 59 + Bare: true, 60 + URL: repoURL, 61 + ReferenceName: plumbing.ReferenceName(branch), 62 + SingleBranch: true, 63 + Depth: 1, 64 + Tags: git.NoTags, 65 + Filter: filter, 66 + }) 67 + if err != nil { 68 + logc.Printf(ctx, "clone err: %s %s filter=%q\n", repoURL, branch, filter) 69 + continue 70 + } else { 71 + logc.Printf(ctx, "clone ok: %s %s filter=%q\n", repoURL, branch, filter) 72 + break 73 + } 74 + } 44 75 if err != nil { 45 76 return nil, fmt.Errorf("git clone: %w", err) 46 77 } ··· 63 94 walker := object.NewTreeWalker(tree, true, make(map[plumbing.Hash]bool)) 64 95 defer walker.Close() 65 96 66 - manifest := Manifest{ 97 + // Create a manifest for the tree object corresponding to `branch`, but do not populate it 98 + // with data yet; instead, record all the blobs we'll need. 99 + manifest := &Manifest{ 67 100 RepoUrl: proto.String(repoURL), 68 101 Branch: proto.String(branch), 69 102 Commit: proto.String(ref.Hash().String()), ··· 71 104 "": {Type: Type_Directory.Enum()}, 72 105 }, 73 106 } 107 + blobsNeeded := map[plumbing.Hash]*Entry{} 74 108 for { 75 109 name, entry, err := walker.Next() 76 110 if err == io.EOF { ··· 78 112 } else if err != nil { 79 113 return nil, fmt.Errorf("git walker: %w", err) 80 114 } else { 81 - manifestEntry := Entry{} 82 - if entry.Mode.IsFile() { 83 - blob, err := repo.BlobObject(entry.Hash) 84 - if err != nil { 85 - return nil, fmt.Errorf("git blob %s: %w", name, err) 86 - } 87 - 88 - reader, err := blob.Reader() 89 - if err != nil { 90 - return nil, fmt.Errorf("git blob open: %w", err) 91 - } 92 - defer reader.Close() 93 - 94 - data, err := io.ReadAll(reader) 95 - if err != nil { 96 - return nil, fmt.Errorf("git blob read: %w", err) 97 - } 98 - 115 + manifestEntry := &Entry{} 116 + if existingManifestEntry, found := blobsNeeded[entry.Hash]; found { 117 + // If the same blob is present twice, we only need to fetch it once (and both 118 + // instances will alias the same `Entry` structure in the manifest). 119 + manifestEntry = existingManifestEntry 120 + } else if entry.Mode.IsFile() { 121 + blobsNeeded[entry.Hash] = manifestEntry 99 122 if entry.Mode == filemode.Symlink { 100 123 manifestEntry.Type = Type_Symlink.Enum() 101 124 } else { 102 125 manifestEntry.Type = Type_InlineFile.Enum() 103 126 } 104 - manifestEntry.Size = proto.Int64(blob.Size) 105 - manifestEntry.Data = data 127 + manifestEntry.GitHash = proto.String(entry.Hash.String()) 106 128 } else if entry.Mode == filemode.Dir { 107 129 manifestEntry.Type = Type_Directory.Enum() 108 130 } else { 109 - AddProblem(&manifest, name, "unsupported mode %#o", entry.Mode) 131 + AddProblem(manifest, name, "unsupported mode %#o", entry.Mode) 110 132 continue 111 133 } 112 - manifest.Contents[name] = &manifestEntry 134 + manifest.Contents[name] = manifestEntry 113 135 } 114 136 } 115 - return &manifest, nil 137 + 138 + // Collect checkout statistics. 139 + var dataBytesFromOldManifest int64 140 + var dataBytesFromGitCheckout int64 141 + var dataBytesFromGitTransport int64 142 + 143 + // First, see if we can extract the blobs from the old manifest. This is the preferred option 144 + // because it avoids both network transfers and recompression. Note that we do not request 145 + // blobs from the backend under any circumstances to avoid creating a blob existence oracle. 146 + for _, oldManifestEntry := range oldManifest.GetContents() { 147 + if hash, ok := plumbing.FromHex(oldManifestEntry.GetGitHash()); ok { 148 + if manifestEntry, found := blobsNeeded[hash]; found { 149 + manifestEntry.Reset() 150 + proto.Merge(manifestEntry, oldManifestEntry) 151 + dataBytesFromOldManifest += oldManifestEntry.GetOriginalSize() 152 + delete(blobsNeeded, hash) 153 + } 154 + } 155 + } 156 + 157 + // Second, fill the manifest entries with data from the git checkout we just made. 158 + // This will only succeed if a `blob:none` filter isn't supported and we got a full 159 + // clone despite asking for a partial clone. 160 + for hash, manifestEntry := range blobsNeeded { 161 + if err := readGitBlob(repo, hash, manifestEntry); err == nil { 162 + dataBytesFromGitCheckout += manifestEntry.GetOriginalSize() 163 + delete(blobsNeeded, hash) 164 + } 165 + } 166 + 167 + // Third, if we still don't have data for some manifest entries, re-establish a git transport 168 + // and request the missing blobs (only) from the server. 169 + if len(blobsNeeded) > 0 { 170 + client, err := transport.Get(parsedRepoURL.Scheme) 171 + if err != nil { 172 + return nil, fmt.Errorf("git transport: %w", err) 173 + } 174 + 175 + endpoint, err := transport.NewEndpoint(repoURL) 176 + if err != nil { 177 + return nil, fmt.Errorf("git endpoint: %w", err) 178 + } 179 + 180 + session, err := client.NewSession(storer, endpoint, nil) 181 + if err != nil { 182 + return nil, fmt.Errorf("git session: %w", err) 183 + } 184 + 185 + connection, err := session.Handshake(ctx, transport.UploadPackService) 186 + if err != nil { 187 + return nil, fmt.Errorf("git connection: %w", err) 188 + } 189 + defer connection.Close() 190 + 191 + if err := connection.Fetch(ctx, &transport.FetchRequest{ 192 + Wants: slices.Collect(maps.Keys(blobsNeeded)), 193 + Depth: 1, 194 + // Git CLI behaves like this, even if the wants above are references to blobs. 195 + Filter: "blob:none", 196 + }); err != nil && !errors.Is(err, transport.ErrNoChange) { 197 + return nil, fmt.Errorf("git blob fetch request: %w", err) 198 + } 199 + 200 + // All remaining blobs should now be available. 201 + for hash, manifestEntry := range blobsNeeded { 202 + if err := readGitBlob(repo, hash, manifestEntry); err != nil { 203 + return nil, err 204 + } 205 + dataBytesFromGitTransport += manifestEntry.GetOriginalSize() 206 + delete(blobsNeeded, hash) 207 + } 208 + } 209 + 210 + logc.Printf(ctx, 211 + "fetch: %s from old manifest, %s from git checkout, %s from git transport\n", 212 + datasize.ByteSize(dataBytesFromOldManifest).HR(), 213 + datasize.ByteSize(dataBytesFromGitCheckout).HR(), 214 + datasize.ByteSize(dataBytesFromGitTransport).HR(), 215 + ) 216 + 217 + return manifest, nil 218 + } 219 + 220 + func readGitBlob(repo *git.Repository, hash plumbing.Hash, entry *Entry) error { 221 + blob, err := repo.BlobObject(hash) 222 + if err != nil { 223 + return fmt.Errorf("git blob %s: %w", hash, err) 224 + } 225 + 226 + reader, err := blob.Reader() 227 + if err != nil { 228 + return fmt.Errorf("git blob open: %w", err) 229 + } 230 + defer reader.Close() 231 + 232 + data, err := io.ReadAll(reader) 233 + if err != nil { 234 + return fmt.Errorf("git blob read: %w", err) 235 + } 236 + 237 + switch entry.GetType() { 238 + case Type_InlineFile, Type_Symlink: 239 + // okay 240 + default: 241 + panic(fmt.Errorf("readGitBlob encountered invalid entry: %v, %v", 242 + entry.GetType(), entry.GetTransform())) 243 + } 244 + 245 + entry.Data = data 246 + entry.Transform = Transform_Identity.Enum() 247 + entry.OriginalSize = proto.Int64(blob.Size) 248 + entry.CompressedSize = proto.Int64(blob.Size) 249 + return nil 116 250 }
+54
src/log.go
··· 1 + package git_pages 2 + 3 + import ( 4 + "context" 5 + "fmt" 6 + "log/slog" 7 + "os" 8 + "runtime" 9 + "strings" 10 + "time" 11 + ) 12 + 13 + var logc slogWithCtx 14 + 15 + type slogWithCtx struct{} 16 + 17 + func (l slogWithCtx) log(ctx context.Context, level slog.Level, msg string) { 18 + if ctx == nil { 19 + ctx = context.Background() 20 + } 21 + logger := slog.Default() 22 + if !logger.Enabled(ctx, level) { 23 + return 24 + } 25 + 26 + var pcs [1]uintptr 27 + // skip [runtime.Callers, this method, method calling this method] 28 + runtime.Callers(3, pcs[:]) 29 + 30 + record := slog.NewRecord(time.Now(), level, strings.TrimRight(msg, "\n"), pcs[0]) 31 + logger.Handler().Handle(ctx, record) 32 + } 33 + 34 + func (l slogWithCtx) Print(ctx context.Context, v ...any) { 35 + l.log(ctx, slog.LevelInfo, fmt.Sprint(v...)) 36 + } 37 + 38 + func (l slogWithCtx) Printf(ctx context.Context, format string, v ...any) { 39 + l.log(ctx, slog.LevelInfo, fmt.Sprintf(format, v...)) 40 + } 41 + 42 + func (l slogWithCtx) Println(ctx context.Context, v ...any) { 43 + l.log(ctx, slog.LevelInfo, fmt.Sprintln(v...)) 44 + } 45 + 46 + func (l slogWithCtx) Fatalf(ctx context.Context, format string, v ...any) { 47 + l.log(ctx, slog.LevelError, fmt.Sprintf(format, v...)) 48 + os.Exit(1) 49 + } 50 + 51 + func (l slogWithCtx) Fatalln(ctx context.Context, v ...any) { 52 + l.log(ctx, slog.LevelError, fmt.Sprintln(v...)) 53 + os.Exit(1) 54 + }
+138 -63
src/main.go
··· 2 2 3 3 import ( 4 4 "context" 5 + "crypto/tls" 5 6 "errors" 6 7 "flag" 7 8 "fmt" ··· 10 11 "log/slog" 11 12 "net" 12 13 "net/http" 14 + "net/http/httputil" 13 15 "net/url" 14 16 "os" 15 17 "runtime/debug" ··· 22 24 23 25 var config *Config 24 26 var wildcards []*WildcardPattern 27 + var fallback http.Handler 25 28 var backend Backend 26 29 27 - func configureFeatures() (err error) { 30 + func configureFeatures(ctx context.Context) (err error) { 28 31 if len(config.Features) > 0 { 29 - log.Println("features:", strings.Join(config.Features, ", ")) 32 + logc.Println(ctx, "features:", strings.Join(config.Features, ", ")) 30 33 } 31 34 return 32 35 } 33 36 34 - func configureMemLimit() (err error) { 37 + func configureMemLimit(ctx context.Context) (err error) { 35 38 // Avoid being OOM killed by not garbage collecting early enough. 36 39 memlimitBefore := datasize.ByteSize(debug.SetMemoryLimit(-1)) 37 40 automemlimit.SetGoMemLimitWithOpts( ··· 46 49 ) 47 50 memlimitAfter := datasize.ByteSize(debug.SetMemoryLimit(-1)) 48 51 if memlimitBefore == memlimitAfter { 49 - log.Println("memlimit: now", memlimitBefore.HR()) 52 + logc.Println(ctx, "memlimit: now", memlimitBefore.HR()) 50 53 } else { 51 - log.Println("memlimit: was", memlimitBefore.HR(), "now", memlimitAfter.HR()) 54 + logc.Println(ctx, "memlimit: was", memlimitBefore.HR(), "now", memlimitAfter.HR()) 52 55 } 53 56 return 54 57 } 55 58 56 - func configureWildcards() (err error) { 59 + func configureWildcards(_ context.Context) (err error) { 57 60 newWildcards, err := TranslateWildcards(config.Wildcard) 58 61 if err != nil { 59 62 return err ··· 63 66 } 64 67 } 65 68 66 - func listen(name string, listen string) net.Listener { 69 + func configureFallback(_ context.Context) (err error) { 70 + if config.Fallback.ProxyTo != "" { 71 + var fallbackURL *url.URL 72 + fallbackURL, err = url.Parse(config.Fallback.ProxyTo) 73 + if err != nil { 74 + err = fmt.Errorf("fallback: %w", err) 75 + return 76 + } 77 + 78 + fallback = &httputil.ReverseProxy{ 79 + Rewrite: func(r *httputil.ProxyRequest) { 80 + r.SetURL(fallbackURL) 81 + r.Out.Host = r.In.Host 82 + r.Out.Header["X-Forwarded-For"] = r.In.Header["X-Forwarded-For"] 83 + }, 84 + Transport: &http.Transport{ 85 + TLSClientConfig: &tls.Config{ 86 + InsecureSkipVerify: config.Fallback.Insecure, 87 + }, 88 + }, 89 + } 90 + } 91 + return 92 + } 93 + 94 + func listen(ctx context.Context, name string, listen string) net.Listener { 67 95 if listen == "-" { 68 96 return nil 69 97 } 70 98 71 99 protocol, address, ok := strings.Cut(listen, "/") 72 100 if !ok { 73 - log.Fatalf("%s: %s: malformed endpoint", name, listen) 101 + logc.Fatalf(ctx, "%s: %s: malformed endpoint", name, listen) 74 102 } 75 103 76 104 listener, err := net.Listen(protocol, address) 77 105 if err != nil { 78 - log.Fatalf("%s: %s\n", name, err) 106 + logc.Fatalf(ctx, "%s: %s\n", name, err) 79 107 } 80 108 81 109 return listener ··· 85 113 return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 86 114 defer func() { 87 115 if err := recover(); err != nil { 88 - log.Printf("panic: %s %s %s: %s\n%s", 116 + logc.Printf(r.Context(), "panic: %s %s %s: %s\n%s", 89 117 r.Method, r.Host, r.URL.Path, err, string(debug.Stack())) 90 118 http.Error(w, 91 119 fmt.Sprintf("internal server error: %s", err), ··· 97 125 }) 98 126 } 99 127 100 - func serve(listener net.Listener, handler http.Handler) { 128 + func serve(ctx context.Context, listener net.Listener, handler http.Handler) { 101 129 if listener != nil { 102 130 handler = panicHandler(handler) 103 131 ··· 107 135 if config.Feature("serve-h2c") { 108 136 server.Protocols.SetUnencryptedHTTP2(true) 109 137 } 110 - log.Fatalln(server.Serve(listener)) 138 + logc.Fatalln(ctx, server.Serve(listener)) 111 139 } 112 140 } 113 141 ··· 118 146 case 1: 119 147 return arg 120 148 default: 121 - log.Fatalf("webroot argument must be either 'domain.tld' or 'domain.tld/dir") 149 + logc.Fatalln(context.Background(), 150 + "webroot argument must be either 'domain.tld' or 'domain.tld/dir") 122 151 return "" 123 152 } 124 153 } ··· 130 159 } else { 131 160 writer, err = os.Create(flag.Arg(0)) 132 161 if err != nil { 133 - log.Fatalln(err) 162 + logc.Fatalln(context.Background(), err) 134 163 } 135 164 } 136 165 return ··· 141 170 fmt.Fprintf(os.Stderr, "(server) "+ 142 171 "git-pages [-config <file>|-no-config]\n") 143 172 fmt.Fprintf(os.Stderr, "(admin) "+ 144 - "git-pages {-run-migration <name>}\n") 173 + "git-pages {-run-migration <name>|-freeze-domain <domain>|-unfreeze-domain <domain>}\n") 145 174 fmt.Fprintf(os.Stderr, "(info) "+ 146 175 "git-pages {-print-config-env-vars|-print-config}\n") 147 176 fmt.Fprintf(os.Stderr, "(cli) "+ ··· 150 179 } 151 180 152 181 func Main() { 182 + ctx := context.Background() 183 + 153 184 flag.Usage = usage 154 185 printConfigEnvVars := flag.Bool("print-config-env-vars", false, 155 186 "print every recognized configuration environment variable and exit") ··· 169 200 "write archive for `site` (either 'domain.tld' or 'domain.tld/dir') in tar format") 170 201 updateSite := flag.String("update-site", "", 171 202 "update `site` (either 'domain.tld' or 'domain.tld/dir') from archive or repository URL") 203 + freezeDomain := flag.String("freeze-domain", "", 204 + "prevent any site uploads to a given `domain`") 205 + unfreezeDomain := flag.String("unfreeze-domain", "", 206 + "allow site uploads to a `domain` again after it has been frozen") 172 207 flag.Parse() 173 208 174 209 var cliOperations int 210 + if *runMigration != "" { 211 + cliOperations += 1 212 + } 175 213 if *getBlob != "" { 176 214 cliOperations += 1 177 215 } ··· 181 219 if *getArchive != "" { 182 220 cliOperations += 1 183 221 } 222 + if *updateSite != "" { 223 + cliOperations += 1 224 + } 225 + if *freezeDomain != "" { 226 + cliOperations += 1 227 + } 228 + if *unfreezeDomain != "" { 229 + cliOperations += 1 230 + } 184 231 if cliOperations > 1 { 185 - log.Fatalln("-get-blob, -get-manifest, and -get-archive are mutually exclusive") 232 + logc.Fatalln(ctx, "-get-blob, -get-manifest, -get-archive, -update-site, -freeze, and -unfreeze are mutually exclusive") 186 233 } 187 234 188 235 if *configTomlPath != "" && *noConfig { 189 - log.Fatalln("-no-config and -config are mutually exclusive") 236 + logc.Fatalln(ctx, "-no-config and -config are mutually exclusive") 190 237 } 191 238 192 239 if *printConfigEnvVars { ··· 199 246 *configTomlPath = "config.toml" 200 247 } 201 248 if config, err = Configure(*configTomlPath); err != nil { 202 - log.Fatalln("config:", err) 249 + logc.Fatalln(ctx, "config:", err) 203 250 } 204 251 205 252 if *printConfig { ··· 211 258 defer FiniObservability() 212 259 213 260 if err = errors.Join( 214 - configureFeatures(), 215 - configureMemLimit(), 216 - configureWildcards(), 261 + configureFeatures(ctx), 262 + configureMemLimit(ctx), 263 + configureWildcards(ctx), 264 + configureFallback(ctx), 217 265 ); err != nil { 218 - log.Fatalln(err) 266 + logc.Fatalln(ctx, err) 219 267 } 220 268 221 269 switch { 222 270 case *runMigration != "": 223 271 if backend, err = CreateBackend(&config.Storage); err != nil { 224 - log.Fatalln(err) 272 + logc.Fatalln(ctx, err) 225 273 } 226 274 227 - if err := RunMigration(context.Background(), *runMigration); err != nil { 228 - log.Fatalln(err) 275 + if err := RunMigration(ctx, *runMigration); err != nil { 276 + logc.Fatalln(ctx, err) 229 277 } 230 278 231 279 case *getBlob != "": 232 280 if backend, err = CreateBackend(&config.Storage); err != nil { 233 - log.Fatalln(err) 281 + logc.Fatalln(ctx, err) 234 282 } 235 283 236 - reader, _, _, err := backend.GetBlob(context.Background(), *getBlob) 284 + reader, _, _, err := backend.GetBlob(ctx, *getBlob) 237 285 if err != nil { 238 - log.Fatalln(err) 286 + logc.Fatalln(ctx, err) 239 287 } 240 288 io.Copy(fileOutputArg(), reader) 241 289 242 290 case *getManifest != "": 243 291 if backend, err = CreateBackend(&config.Storage); err != nil { 244 - log.Fatalln(err) 292 + logc.Fatalln(ctx, err) 245 293 } 246 294 247 295 webRoot := webRootArg(*getManifest) 248 - manifest, _, err := backend.GetManifest(context.Background(), webRoot, GetManifestOptions{}) 296 + manifest, _, err := backend.GetManifest(ctx, webRoot, GetManifestOptions{}) 249 297 if err != nil { 250 - log.Fatalln(err) 298 + logc.Fatalln(ctx, err) 251 299 } 252 300 fmt.Fprintln(fileOutputArg(), ManifestDebugJSON(manifest)) 253 301 254 302 case *getArchive != "": 255 303 if backend, err = CreateBackend(&config.Storage); err != nil { 256 - log.Fatalln(err) 304 + logc.Fatalln(ctx, err) 257 305 } 258 306 259 307 webRoot := webRootArg(*getArchive) 260 308 manifest, manifestMtime, err := 261 - backend.GetManifest(context.Background(), webRoot, GetManifestOptions{}) 309 + backend.GetManifest(ctx, webRoot, GetManifestOptions{}) 262 310 if err != nil { 263 - log.Fatalln(err) 311 + logc.Fatalln(ctx, err) 264 312 } 265 - CollectTar(context.Background(), fileOutputArg(), manifest, manifestMtime) 313 + CollectTar(ctx, fileOutputArg(), manifest, manifestMtime) 266 314 267 315 case *updateSite != "": 268 316 if backend, err = CreateBackend(&config.Storage); err != nil { 269 - log.Fatalln(err) 317 + logc.Fatalln(ctx, err) 270 318 } 271 319 272 320 if flag.NArg() != 1 { 273 - log.Fatalln("update source must be provided as the argument") 321 + logc.Fatalln(ctx, "update source must be provided as the argument") 274 322 } 275 323 276 324 sourceURL, err := url.Parse(flag.Arg(0)) 277 325 if err != nil { 278 - log.Fatalln(err) 326 + logc.Fatalln(ctx, err) 279 327 } 280 328 281 329 var result UpdateResult 282 330 if sourceURL.Scheme == "" { 283 331 file, err := os.Open(sourceURL.Path) 284 332 if err != nil { 285 - log.Fatalln(err) 333 + logc.Fatalln(ctx, err) 286 334 } 287 335 defer file.Close() 288 336 ··· 301 349 } 302 350 303 351 webRoot := webRootArg(*updateSite) 304 - result = UpdateFromArchive(context.Background(), webRoot, contentType, file) 352 + result = UpdateFromArchive(ctx, webRoot, contentType, file) 305 353 } else { 306 354 branch := "pages" 307 355 if sourceURL.Fragment != "" { ··· 309 357 } 310 358 311 359 webRoot := webRootArg(*updateSite) 312 - result = UpdateFromRepository(context.Background(), webRoot, sourceURL.String(), branch) 360 + result = UpdateFromRepository(ctx, webRoot, sourceURL.String(), branch) 313 361 } 314 362 315 363 switch result.outcome { 316 364 case UpdateError: 317 - log.Printf("error: %s\n", result.err) 365 + logc.Printf(ctx, "error: %s\n", result.err) 318 366 os.Exit(2) 319 367 case UpdateTimeout: 320 - log.Println("timeout") 368 + logc.Println(ctx, "timeout") 321 369 os.Exit(1) 322 370 case UpdateCreated: 323 - log.Println("created") 371 + logc.Println(ctx, "created") 324 372 case UpdateReplaced: 325 - log.Println("replaced") 373 + logc.Println(ctx, "replaced") 326 374 case UpdateDeleted: 327 - log.Println("deleted") 375 + logc.Println(ctx, "deleted") 328 376 case UpdateNoChange: 329 - log.Println("no-change") 377 + logc.Println(ctx, "no-change") 378 + } 379 + 380 + case *freezeDomain != "" || *unfreezeDomain != "": 381 + var domain string 382 + var freeze bool 383 + if *freezeDomain != "" { 384 + domain = *freezeDomain 385 + freeze = true 386 + } else { 387 + domain = *unfreezeDomain 388 + freeze = false 389 + } 390 + 391 + if backend, err = CreateBackend(&config.Storage); err != nil { 392 + logc.Fatalln(ctx, err) 393 + } 394 + 395 + if err = backend.FreezeDomain(ctx, domain, freeze); err != nil { 396 + logc.Fatalln(ctx, err) 397 + } 398 + if freeze { 399 + log.Println("frozen") 400 + } else { 401 + log.Println("thawed") 330 402 } 331 403 332 404 default: ··· 339 411 // The backend is not recreated (this is intentional as it allows preserving the cache). 340 412 OnReload(func() { 341 413 if newConfig, err := Configure(*configTomlPath); err != nil { 342 - log.Println("config: reload err:", err) 414 + logc.Println(ctx, "config: reload err:", err) 343 415 } else { 344 416 // From https://go.dev/ref/mem: 345 417 // > A read r of a memory location x holding a value that is not larger than ··· 349 421 // > concurrent write. 350 422 config = newConfig 351 423 if err = errors.Join( 352 - configureFeatures(), 353 - configureMemLimit(), 354 - configureWildcards(), 424 + configureFeatures(ctx), 425 + configureMemLimit(ctx), 426 + configureWildcards(ctx), 427 + configureFallback(ctx), 355 428 ); err != nil { 356 429 // At this point the configuration is in an in-between, corrupted state, so 357 430 // the only reasonable choice is to crash. 358 - log.Fatalln("config: reload fail:", err) 431 + logc.Fatalln(ctx, "config: reload fail:", err) 359 432 } else { 360 - log.Println("config: reload ok") 433 + logc.Println(ctx, "config: reload ok") 361 434 } 362 435 } 363 436 }) ··· 366 439 // spends some time initializing (which the S3 backend does) a proxy like Caddy can race 367 440 // with git-pages on startup and return errors for requests that would have been served 368 441 // just 0.5s later. 369 - pagesListener := listen("pages", config.Server.Pages) 370 - caddyListener := listen("caddy", config.Server.Caddy) 371 - metricsListener := listen("metrics", config.Server.Metrics) 442 + pagesListener := listen(ctx, "pages", config.Server.Pages) 443 + caddyListener := listen(ctx, "caddy", config.Server.Caddy) 444 + metricsListener := listen(ctx, "metrics", config.Server.Metrics) 372 445 373 446 if backend, err = CreateBackend(&config.Storage); err != nil { 374 - log.Fatalln(err) 447 + logc.Fatalln(ctx, err) 375 448 } 376 449 backend = NewObservedBackend(backend) 377 450 378 - go serve(pagesListener, ObserveHTTPHandler(http.HandlerFunc(ServePages))) 379 - go serve(caddyListener, ObserveHTTPHandler(http.HandlerFunc(ServeCaddy))) 380 - go serve(metricsListener, promhttp.Handler()) 451 + go serve(ctx, pagesListener, ObserveHTTPHandler(http.HandlerFunc(ServePages))) 452 + go serve(ctx, caddyListener, ObserveHTTPHandler(http.HandlerFunc(ServeCaddy))) 453 + go serve(ctx, metricsListener, promhttp.Handler()) 381 454 382 455 if config.Insecure { 383 - log.Println("serve: ready (INSECURE)") 456 + logc.Println(ctx, "serve: ready (INSECURE)") 384 457 } else { 385 - log.Println("serve: ready") 458 + logc.Println(ctx, "serve: ready") 386 459 } 387 - select {} 460 + 461 + WaitForInterrupt() 462 + logc.Println(ctx, "serve: exiting") 388 463 } 389 464 }
+44 -32
src/manifest.go
··· 8 8 "crypto/sha256" 9 9 "errors" 10 10 "fmt" 11 - "log" 12 11 "mime" 13 12 "net/http" 14 13 "path" ··· 145 144 for path, entry := range manifest.Contents { 146 145 if entry.GetType() == Type_Directory || entry.GetType() == Type_Symlink { 147 146 // no Content-Type 148 - } else if entry.GetType() == Type_InlineFile && entry.GetTransform() == Transform_None { 147 + } else if entry.GetType() == Type_InlineFile && entry.GetTransform() == Transform_Identity { 149 148 contentType := mime.TypeByExtension(filepath.Ext(path)) 150 149 if contentType == "" { 151 - contentType = http.DetectContentType(entry.Data[:512]) 150 + contentType = http.DetectContentType(entry.Data[:min(512, len(entry.Data))]) 152 151 } 153 152 entry.ContentType = proto.String(contentType) 154 - } else { 153 + } else if entry.GetContentType() == "" { 155 154 panic(fmt.Errorf("DetectContentType encountered invalid entry: %v, %v", 156 155 entry.GetType(), entry.GetTransform())) 157 156 } 158 157 } 159 158 } 160 159 161 - // The `clauspost/compress/zstd` package recommends reusing a compressor to avoid repeated 160 + // The `klauspost/compress/zstd` package recommends reusing a compressor to avoid repeated 162 161 // allocations of internal buffers. 163 162 var zstdEncoder, _ = zstd.NewWriter(nil, zstd.WithEncoderLevel(zstd.SpeedBetterCompression)) 164 163 ··· 167 166 span, _ := ObserveFunction(ctx, "CompressFiles") 168 167 defer span.Finish() 169 168 170 - var originalSize, compressedSize int64 169 + var originalSize int64 170 + var compressedSize int64 171 171 for _, entry := range manifest.Contents { 172 - if entry.GetType() == Type_InlineFile && entry.GetTransform() == Transform_None { 173 - mtype := getMediaType(entry.GetContentType()) 174 - if strings.HasPrefix(mtype, "video/") || strings.HasPrefix(mtype, "audio/") { 172 + if entry.GetType() == Type_InlineFile && entry.GetTransform() == Transform_Identity { 173 + mediaType := getMediaType(entry.GetContentType()) 174 + if strings.HasPrefix(mediaType, "video/") || strings.HasPrefix(mediaType, "audio/") { 175 175 continue 176 176 } 177 - originalSize += entry.GetSize() 178 - compressedData := zstdEncoder.EncodeAll(entry.GetData(), make([]byte, 0, entry.GetSize())) 179 - if len(compressedData) < int(*entry.Size) { 177 + compressedData := zstdEncoder.EncodeAll(entry.GetData(), 178 + make([]byte, 0, entry.GetOriginalSize())) 179 + if int64(len(compressedData)) < entry.GetOriginalSize() { 180 180 entry.Data = compressedData 181 - entry.Size = proto.Int64(int64(len(entry.Data))) 182 - entry.Transform = Transform_Zstandard.Enum() 181 + entry.Transform = Transform_Zstd.Enum() 182 + entry.CompressedSize = proto.Int64(int64(len(entry.Data))) 183 183 } 184 - compressedSize += entry.GetSize() 185 184 } 185 + originalSize += entry.GetOriginalSize() 186 + compressedSize += entry.GetCompressedSize() 186 187 } 187 188 manifest.OriginalSize = proto.Int64(originalSize) 188 189 manifest.CompressedSize = proto.Int64(compressedSize) 189 190 190 191 if originalSize != 0 { 191 192 spaceSaving := (float64(originalSize) - float64(compressedSize)) / float64(originalSize) 192 - log.Printf("compress: saved %.2f percent (%s to %s)", 193 + logc.Printf(ctx, "compress: saved %.2f percent (%s to %s)", 193 194 spaceSaving*100.0, 194 195 datasize.ByteSize(originalSize).HR(), 195 196 datasize.ByteSize(compressedSize).HR(), ··· 205 206 func PrepareManifest(ctx context.Context, manifest *Manifest) error { 206 207 // Parse Netlify-style `_redirects` 207 208 if err := ProcessRedirectsFile(manifest); err != nil { 208 - log.Printf("redirects err: %s\n", err) 209 + logc.Printf(ctx, "redirects err: %s\n", err) 209 210 } else if len(manifest.Redirects) > 0 { 210 - log.Printf("redirects ok: %d rules\n", len(manifest.Redirects)) 211 + logc.Printf(ctx, "redirects ok: %d rules\n", len(manifest.Redirects)) 211 212 } 212 213 213 214 // Parse Netlify-style `_headers` 214 215 if err := ProcessHeadersFile(manifest); err != nil { 215 - log.Printf("headers err: %s\n", err) 216 + logc.Printf(ctx, "headers err: %s\n", err) 216 217 } else if len(manifest.Headers) > 0 { 217 - log.Printf("headers ok: %d rules\n", len(manifest.Headers)) 218 + logc.Printf(ctx, "headers ok: %d rules\n", len(manifest.Headers)) 218 219 } 219 220 220 221 // Sniff content type like `http.ServeContent` ··· 247 248 CompressedSize: manifest.CompressedSize, 248 249 StoredSize: proto.Int64(0), 249 250 } 250 - extObjectSizes := make(map[string]int64) 251 251 for name, entry := range manifest.Contents { 252 252 cannotBeInlined := entry.GetType() == Type_InlineFile && 253 - entry.GetSize() > int64(config.Limits.MaxInlineFileSize.Bytes()) 253 + entry.GetCompressedSize() > int64(config.Limits.MaxInlineFileSize.Bytes()) 254 254 if cannotBeInlined { 255 255 dataHash := sha256.Sum256(entry.Data) 256 256 extManifest.Contents[name] = &Entry{ 257 - Type: Type_ExternalFile.Enum(), 258 - Size: entry.Size, 259 - Data: fmt.Appendf(nil, "sha256-%x", dataHash), 260 - Transform: entry.Transform, 261 - ContentType: entry.ContentType, 257 + Type: Type_ExternalFile.Enum(), 258 + OriginalSize: entry.OriginalSize, 259 + CompressedSize: entry.CompressedSize, 260 + Data: fmt.Appendf(nil, "sha256-%x", dataHash), 261 + Transform: entry.Transform, 262 + ContentType: entry.ContentType, 263 + GitHash: entry.GitHash, 262 264 } 263 - extObjectSizes[string(dataHash[:])] = entry.GetSize() 264 265 } else { 265 266 extManifest.Contents[name] = entry 266 267 } 267 268 } 268 - // `extObjectMap` stores size once per object, deduplicating it 269 - for _, storedSize := range extObjectSizes { 270 - *extManifest.StoredSize += storedSize 269 + 270 + // Compute the deduplicated storage size. 271 + var blobSizes = make(map[string]int64) 272 + for _, entry := range manifest.Contents { 273 + if entry.GetType() == Type_ExternalFile { 274 + blobSizes[string(entry.Data)] = entry.GetCompressedSize() 275 + } 276 + } 277 + for _, blobSize := range blobSizes { 278 + *extManifest.StoredSize += blobSize 271 279 } 272 280 273 281 // Upload the resulting manifest and the blob it references. ··· 303 311 } 304 312 305 313 if err := backend.CommitManifest(ctx, name, &extManifest); err != nil { 306 - return nil, fmt.Errorf("commit manifest: %w", err) 314 + if errors.Is(err, ErrDomainFrozen) { 315 + return nil, err 316 + } else { 317 + return nil, fmt.Errorf("commit manifest: %w", err) 318 + } 307 319 } 308 320 309 321 return &extManifest, nil
+3 -4
src/migrate.go
··· 3 3 import ( 4 4 "context" 5 5 "fmt" 6 - "log" 7 6 "slices" 8 7 "strings" 9 8 ) ··· 19 18 20 19 func createDomainMarkers(ctx context.Context) error { 21 20 if backend.HasFeature(ctx, FeatureCheckDomainMarker) { 22 - log.Print("store already has domain markers") 21 + logc.Print(ctx, "store already has domain markers") 23 22 return nil 24 23 } 25 24 ··· 36 35 } 37 36 } 38 37 for idx, domain := range domains { 39 - log.Printf("(%d / %d) creating domain %s", idx+1, len(domains), domain) 38 + logc.Printf(ctx, "(%d / %d) creating domain %s", idx+1, len(domains), domain) 40 39 if err := backend.CreateDomain(ctx, domain); err != nil { 41 40 return fmt.Errorf("creating domain %s: %w", domain, err) 42 41 } ··· 44 43 if err := backend.EnableFeature(ctx, FeatureCheckDomainMarker); err != nil { 45 44 return err 46 45 } 47 - log.Printf("created markers for %d domains", len(domains)) 46 + logc.Printf(ctx, "created markers for %d domains", len(domains)) 48 47 return nil 49 48 }
+65 -5
src/observe.go
··· 12 12 "os" 13 13 "runtime/debug" 14 14 "strconv" 15 + "strings" 16 + "sync" 15 17 "time" 16 18 17 19 slogmulti "github.com/samber/slog-multi" 20 + 21 + syslog "codeberg.org/git-pages/go-slog-syslog" 18 22 19 23 "github.com/prometheus/client_golang/prometheus" 20 24 "github.com/prometheus/client_golang/prometheus/promauto" ··· 41 45 }, []string{"method"}) 42 46 ) 43 47 48 + var syslogHandler syslog.Handler 49 + 44 50 func hasSentry() bool { 45 51 return os.Getenv("SENTRY_DSN") != "" 46 52 } ··· 55 61 56 62 logHandlers := []slog.Handler{} 57 63 64 + logLevel := slog.LevelInfo 65 + switch strings.ToLower(config.LogLevel) { 66 + case "debug": 67 + logLevel = slog.LevelDebug 68 + case "info": 69 + logLevel = slog.LevelInfo 70 + case "warn": 71 + logLevel = slog.LevelWarn 72 + case "error": 73 + logLevel = slog.LevelError 74 + default: 75 + log.Println("unknown log level", config.LogLevel) 76 + } 77 + 58 78 switch config.LogFormat { 59 79 case "none": 60 80 // nothing to do 61 81 case "text": 62 82 logHandlers = append(logHandlers, 63 - slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{})) 83 + slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: logLevel})) 64 84 case "json": 65 85 logHandlers = append(logHandlers, 66 - slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{})) 86 + slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: logLevel})) 67 87 default: 68 88 log.Println("unknown log format", config.LogFormat) 69 89 } 70 90 91 + if syslogAddr := os.Getenv("SYSLOG_ADDR"); syslogAddr != "" { 92 + var err error 93 + syslogHandler, err = syslog.NewHandler(&syslog.HandlerOptions{ 94 + Address: syslogAddr, 95 + AppName: "git-pages", 96 + StructuredDataID: "git-pages", 97 + }) 98 + if err != nil { 99 + log.Fatalf("syslog: %v", err) 100 + } 101 + logHandlers = append(logHandlers, syslogHandler) 102 + } 103 + 71 104 if hasSentry() { 72 105 enableLogs := false 73 106 if value, err := strconv.ParseBool(os.Getenv("SENTRY_LOGS")); err == nil { ··· 80 113 } 81 114 82 115 options := sentry.ClientOptions{} 116 + options.DisableTelemetryBuffer = !config.Feature("sentry-telemetry-buffer") 83 117 options.Environment = environment 84 118 options.EnableLogs = enableLogs 85 119 options.EnableTracing = enableTracing ··· 118 152 if enableLogs { 119 153 logHandlers = append(logHandlers, sentryslog.Option{ 120 154 AddSource: true, 155 + LogLevel: levelsFromMinimum(logLevel), 121 156 }.NewSentryHandler(context.Background())) 122 157 } 123 158 } ··· 125 160 slog.SetDefault(slog.New(slogmulti.Fanout(logHandlers...))) 126 161 } 127 162 163 + // From sentryslog, because for some reason they don't make it public. 164 + func levelsFromMinimum(minLevel slog.Level) []slog.Level { 165 + allLevels := []slog.Level{slog.LevelDebug, slog.LevelInfo, slog.LevelWarn, slog.LevelError, sentryslog.LevelFatal} 166 + var result []slog.Level 167 + for _, level := range allLevels { 168 + if level >= minLevel { 169 + result = append(result, level) 170 + } 171 + } 172 + return result 173 + } 174 + 128 175 func FiniObservability() { 176 + var wg sync.WaitGroup 177 + timeout := 2 * time.Second 178 + if syslogHandler != nil { 179 + wg.Go(func() { syslogHandler.Flush(timeout) }) 180 + } 129 181 if hasSentry() { 130 - sentry.Flush(2 * time.Second) 182 + wg.Go(func() { sentry.Flush(timeout) }) 131 183 } 184 + wg.Wait() 132 185 } 133 186 134 187 func ObserveError(err error) { ··· 364 417 } 365 418 366 419 func (backend *observedBackend) CheckDomain(ctx context.Context, domain string) (found bool, err error) { 367 - span, ctx := ObserveFunction(ctx, "CheckDomain", "manifest.domain", domain) 420 + span, ctx := ObserveFunction(ctx, "CheckDomain", "domain.name", domain) 368 421 found, err = backend.inner.CheckDomain(ctx, domain) 369 422 span.Finish() 370 423 return 371 424 } 372 425 373 426 func (backend *observedBackend) CreateDomain(ctx context.Context, domain string) (err error) { 374 - span, ctx := ObserveFunction(ctx, "CreateDomain", "manifest.domain", domain) 427 + span, ctx := ObserveFunction(ctx, "CreateDomain", "domain.name", domain) 375 428 err = backend.inner.CreateDomain(ctx, domain) 376 429 span.Finish() 377 430 return 378 431 } 432 + 433 + func (backend *observedBackend) FreezeDomain(ctx context.Context, domain string, freeze bool) (err error) { 434 + span, ctx := ObserveFunction(ctx, "FreezeDomain", "domain.name", domain, "domain.frozen", freeze) 435 + err = backend.inner.FreezeDomain(ctx, domain, freeze) 436 + span.Finish() 437 + return 438 + }
+71 -17
src/pages.go
··· 8 8 "errors" 9 9 "fmt" 10 10 "io" 11 - "log" 12 11 "maps" 13 12 "net/http" 14 13 "net/url" 15 14 "os" 16 15 "path" 16 + "slices" 17 17 "strconv" 18 18 "strings" 19 19 "time" ··· 27 27 const notFoundPage = "404.html" 28 28 29 29 var ( 30 + serveEncodingCount = promauto.NewCounterVec(prometheus.CounterOpts{ 31 + Name: "git_pages_serve_encoding_count", 32 + Help: "Count of blob transform vs negotiated encoding", 33 + }, []string{"transform", "negotiated"}) 34 + 30 35 siteUpdatesCount = promauto.NewCounterVec(prometheus.CounterOpts{ 31 36 Name: "git_pages_site_updates", 32 37 Help: "Count of site updates in total", ··· 131 136 result := <-indexManifestCh 132 137 manifest, manifestMtime, err = result.manifest, result.manifestMtime, result.err 133 138 if manifest == nil && errors.Is(err, ErrObjectNotFound) { 134 - if found, fallbackErr := HandleWildcardFallback(w, r); found { 135 - return fallbackErr 139 + if fallback != nil { 140 + logc.Printf(r.Context(), "fallback: %s via %s", host, config.Fallback.ProxyTo) 141 + fallback.ServeHTTP(w, r) 142 + return nil 136 143 } else { 137 144 w.WriteHeader(http.StatusNotFound) 138 145 fmt.Fprintf(w, "site not found\n") ··· 300 307 acceptedEncodings := parseHTTPEncodings(r.Header.Get("Accept-Encoding")) 301 308 negotiatedEncoding := true 302 309 switch entry.GetTransform() { 303 - case Transform_None: 304 - if acceptedEncodings.Negotiate("identity") != "identity" { 310 + case Transform_Identity: 311 + switch acceptedEncodings.Negotiate("identity") { 312 + case "identity": 313 + serveEncodingCount. 314 + With(prometheus.Labels{"transform": "identity", "negotiated": "identity"}). 315 + Inc() 316 + default: 305 317 negotiatedEncoding = false 318 + serveEncodingCount. 319 + With(prometheus.Labels{"transform": "identity", "negotiated": "failure"}). 320 + Inc() 306 321 } 307 - case Transform_Zstandard: 322 + case Transform_Zstd: 308 323 supported := []string{"zstd", "identity"} 309 324 if entry.ContentType == nil { 310 325 // If Content-Type is unset, `http.ServeContent` will try to sniff ··· 315 330 case "zstd": 316 331 // Set Content-Length ourselves since `http.ServeContent` only sets 317 332 // it if Content-Encoding is unset or if it's a range request. 318 - w.Header().Set("Content-Length", strconv.FormatInt(*entry.Size, 10)) 333 + w.Header().Set("Content-Length", strconv.FormatInt(entry.GetCompressedSize(), 10)) 319 334 w.Header().Set("Content-Encoding", "zstd") 335 + serveEncodingCount. 336 + With(prometheus.Labels{"transform": "zstd", "negotiated": "zstd"}). 337 + Inc() 320 338 case "identity": 321 339 compressedData, _ := io.ReadAll(reader) 322 340 decompressedData, err := zstdDecoder.DecodeAll(compressedData, []byte{}) ··· 326 344 return err 327 345 } 328 346 reader = bytes.NewReader(decompressedData) 347 + serveEncodingCount. 348 + With(prometheus.Labels{"transform": "zstd", "negotiated": "identity"}). 349 + Inc() 329 350 default: 330 351 negotiatedEncoding = false 352 + serveEncodingCount. 353 + With(prometheus.Labels{"transform": "zstd", "negotiated": "failure"}). 354 + Inc() 331 355 } 332 356 default: 333 357 return fmt.Errorf("unexpected transform") ··· 383 407 return nil 384 408 } 385 409 410 + func checkDryRun(w http.ResponseWriter, r *http.Request) bool { 411 + // "Dry run" requests are used to non-destructively check if the request would have 412 + // successfully been authorized. 413 + if r.Header.Get("Dry-Run") != "" { 414 + fmt.Fprintln(w, "dry-run ok") 415 + return true 416 + } 417 + return false 418 + } 419 + 386 420 func putPage(w http.ResponseWriter, r *http.Request) error { 387 421 var result UpdateResult 388 422 ··· 402 436 defer cancel() 403 437 404 438 contentType := getMediaType(r.Header.Get("Content-Type")) 405 - 406 - if contentType == "application/x-www-form-urlencoded" { 439 + switch contentType { 440 + case "application/x-www-form-urlencoded": 407 441 auth, err := AuthorizeUpdateFromRepository(r) 408 442 if err != nil { 409 443 return err ··· 428 462 return err 429 463 } 430 464 465 + if checkDryRun(w, r) { 466 + return nil 467 + } 468 + 431 469 result = UpdateFromRepository(updateCtx, webRoot, repoURL, branch) 432 - } else { 470 + 471 + default: 433 472 _, err := AuthorizeUpdateFromArchive(r) 434 473 if err != nil { 435 474 return err 475 + } 476 + 477 + if checkDryRun(w, r) { 478 + return nil 436 479 } 437 480 438 481 // request body contains archive ··· 448 491 w.WriteHeader(http.StatusUnsupportedMediaType) 449 492 } else if errors.Is(result.err, ErrArchiveTooLarge) { 450 493 w.WriteHeader(http.StatusRequestEntityTooLarge) 494 + } else if errors.Is(result.err, ErrDomainFrozen) { 495 + w.WriteHeader(http.StatusForbidden) 451 496 } else { 452 497 w.WriteHeader(http.StatusServiceUnavailable) 453 498 } ··· 496 541 return err 497 542 } 498 543 544 + if checkDryRun(w, r) { 545 + return nil 546 + } 547 + 499 548 err = backend.DeleteManifest(r.Context(), makeWebRoot(host, projectName)) 500 549 if err != nil { 501 550 w.WriteHeader(http.StatusInternalServerError) ··· 596 645 return err 597 646 } 598 647 648 + if checkDryRun(w, r) { 649 + return nil 650 + } 651 + 599 652 resultChan := make(chan UpdateResult) 600 653 go func(ctx context.Context) { 601 654 ctx, cancel := context.WithTimeout(ctx, time.Duration(config.Limits.UpdateTimeout)) ··· 623 676 w.WriteHeader(http.StatusGatewayTimeout) 624 677 fmt.Fprintln(w, "update timeout") 625 678 case UpdateNoChange: 626 - w.WriteHeader(http.StatusOK) 627 679 fmt.Fprintln(w, "unchanged") 628 680 case UpdateCreated: 629 - w.WriteHeader(http.StatusOK) 630 681 fmt.Fprintln(w, "created") 631 682 case UpdateReplaced: 632 - w.WriteHeader(http.StatusOK) 633 683 fmt.Fprintln(w, "replaced") 634 684 case UpdateDeleted: 635 - w.WriteHeader(http.StatusOK) 636 685 fmt.Fprintln(w, "deleted") 637 686 } 638 687 if result.manifest != nil { ··· 652 701 // any intentional deviation is an opportunity to miss an issue that will affect our 653 702 // visitors but not our health checks. 654 703 if r.Header.Get("Health-Check") == "" { 655 - log.Println("pages:", r.Method, r.Host, r.URL, r.Header.Get("Content-Type")) 704 + logc.Println(r.Context(), "pages:", r.Method, r.Host, r.URL, r.Header.Get("Content-Type")) 656 705 if region := os.Getenv("FLY_REGION"); region != "" { 657 706 machine_id := os.Getenv("FLY_MACHINE_ID") 658 707 w.Header().Add("Server", fmt.Sprintf("git-pages (fly.io; %s; %s)", region, machine_id)) ··· 666 715 ObserveData(r.Context(), "server.name", hostname) 667 716 } 668 717 } 718 + } 719 + allowedMethods := []string{"OPTIONS", "HEAD", "GET", "PUT", "DELETE", "POST"} 720 + if r.Method == "OPTIONS" || !slices.Contains(allowedMethods, r.Method) { 721 + w.Header().Add("Allow", strings.Join(allowedMethods, ", ")) 669 722 } 670 723 err := error(nil) 671 724 switch r.Method { 672 725 // REST API 726 + case http.MethodOptions: 727 + // no preflight options 673 728 case http.MethodHead, http.MethodGet: 674 729 err = getPage(w, r) 675 730 case http.MethodPut: ··· 680 735 case http.MethodPost: 681 736 err = postPage(w, r) 682 737 default: 683 - w.Header().Add("Allow", "HEAD, GET, PUT, DELETE, POST") 684 738 http.Error(w, "method not allowed", http.StatusMethodNotAllowed) 685 739 err = fmt.Errorf("method %s not allowed", r.Method) 686 740 } ··· 695 749 http.Error(w, message, http.StatusRequestEntityTooLarge) 696 750 err = errors.New(message) 697 751 } 698 - log.Println("pages err:", err) 752 + logc.Println(r.Context(), "pages err:", err) 699 753 } 700 754 }
+49 -21
src/schema.pb.go
··· 81 81 return file_schema_proto_rawDescGZIP(), []int{0} 82 82 } 83 83 84 + // Transformation names should match HTTP `Accept-Encoding:` header. 84 85 type Transform int32 85 86 86 87 const ( 87 88 // No transformation. 88 - Transform_None Transform = 0 89 + Transform_Identity Transform = 0 89 90 // Zstandard compression. 90 - Transform_Zstandard Transform = 1 91 + Transform_Zstd Transform = 1 91 92 ) 92 93 93 94 // Enum value maps for Transform. 94 95 var ( 95 96 Transform_name = map[int32]string{ 96 - 0: "None", 97 - 1: "Zstandard", 97 + 0: "Identity", 98 + 1: "Zstd", 98 99 } 99 100 Transform_value = map[string]int32{ 100 - "None": 0, 101 - "Zstandard": 1, 101 + "Identity": 0, 102 + "Zstd": 1, 102 103 } 103 104 ) 104 105 ··· 133 134 state protoimpl.MessageState `protogen:"open.v1"` 134 135 Type *Type `protobuf:"varint,1,opt,name=type,enum=Type" json:"type,omitempty"` 135 136 // Only present for `type == InlineFile` and `type == ExternalFile`. 136 - // For transformed entries, refers to the post-transformation (compressed) size. 137 - Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` 137 + // For transformed entries, refers to the pre-transformation (decompressed) size; otherwise 138 + // equal to `compressed_size`. 139 + OriginalSize *int64 `protobuf:"varint,7,opt,name=original_size,json=originalSize" json:"original_size,omitempty"` 140 + // Only present for `type == InlineFile` and `type == ExternalFile`. 141 + // For transformed entries, refers to the post-transformation (compressed) size; otherwise 142 + // equal to `original_size`. 143 + CompressedSize *int64 `protobuf:"varint,2,opt,name=compressed_size,json=compressedSize" json:"compressed_size,omitempty"` 138 144 // Meaning depends on `type`: 139 145 // - If `type == InlineFile`, contains file data. 140 146 // - If `type == ExternalFile`, contains blob name (an otherwise unspecified ··· 147 153 Transform *Transform `protobuf:"varint,4,opt,name=transform,enum=Transform" json:"transform,omitempty"` 148 154 // Only present for `type == InlineFile` and `type == ExternalFile`. 149 155 // Currently, optional (not present on certain legacy manifests). 150 - ContentType *string `protobuf:"bytes,5,opt,name=content_type,json=contentType" json:"content_type,omitempty"` 156 + ContentType *string `protobuf:"bytes,5,opt,name=content_type,json=contentType" json:"content_type,omitempty"` 157 + // May be present for `type == InlineFile` and `type == ExternalFile`. 158 + // Used to reduce the amount of work being done during git checkouts. 159 + // The type of hash used is determined by the length: 160 + // - 40 bytes: SHA1DC (as hex) 161 + // - 64 bytes: SHA256 (as hex) 162 + GitHash *string `protobuf:"bytes,6,opt,name=git_hash,json=gitHash" json:"git_hash,omitempty"` 151 163 unknownFields protoimpl.UnknownFields 152 164 sizeCache protoimpl.SizeCache 153 165 } ··· 189 201 return Type_Invalid 190 202 } 191 203 192 - func (x *Entry) GetSize() int64 { 193 - if x != nil && x.Size != nil { 194 - return *x.Size 204 + func (x *Entry) GetOriginalSize() int64 { 205 + if x != nil && x.OriginalSize != nil { 206 + return *x.OriginalSize 207 + } 208 + return 0 209 + } 210 + 211 + func (x *Entry) GetCompressedSize() int64 { 212 + if x != nil && x.CompressedSize != nil { 213 + return *x.CompressedSize 195 214 } 196 215 return 0 197 216 } ··· 207 226 if x != nil && x.Transform != nil { 208 227 return *x.Transform 209 228 } 210 - return Transform_None 229 + return Transform_Identity 211 230 } 212 231 213 232 func (x *Entry) GetContentType() string { 214 233 if x != nil && x.ContentType != nil { 215 234 return *x.ContentType 235 + } 236 + return "" 237 + } 238 + 239 + func (x *Entry) GetGitHash() string { 240 + if x != nil && x.GitHash != nil { 241 + return *x.GitHash 216 242 } 217 243 return "" 218 244 } ··· 568 594 569 595 const file_schema_proto_rawDesc = "" + 570 596 "\n" + 571 - "\fschema.proto\"\x97\x01\n" + 597 + "\fschema.proto\"\xec\x01\n" + 572 598 "\x05Entry\x12\x19\n" + 573 - "\x04type\x18\x01 \x01(\x0e2\x05.TypeR\x04type\x12\x12\n" + 574 - "\x04size\x18\x02 \x01(\x03R\x04size\x12\x12\n" + 599 + "\x04type\x18\x01 \x01(\x0e2\x05.TypeR\x04type\x12#\n" + 600 + "\roriginal_size\x18\a \x01(\x03R\foriginalSize\x12'\n" + 601 + "\x0fcompressed_size\x18\x02 \x01(\x03R\x0ecompressedSize\x12\x12\n" + 575 602 "\x04data\x18\x03 \x01(\fR\x04data\x12(\n" + 576 603 "\ttransform\x18\x04 \x01(\x0e2\n" + 577 604 ".TransformR\ttransform\x12!\n" + 578 - "\fcontent_type\x18\x05 \x01(\tR\vcontentType\"`\n" + 605 + "\fcontent_type\x18\x05 \x01(\tR\vcontentType\x12\x19\n" + 606 + "\bgit_hash\x18\x06 \x01(\tR\agitHash\"`\n" + 579 607 "\fRedirectRule\x12\x12\n" + 580 608 "\x04from\x18\x01 \x01(\tR\x04from\x12\x0e\n" + 581 609 "\x02to\x18\x02 \x01(\tR\x02to\x12\x16\n" + ··· 614 642 "\n" + 615 643 "InlineFile\x10\x02\x12\x10\n" + 616 644 "\fExternalFile\x10\x03\x12\v\n" + 617 - "\aSymlink\x10\x04*$\n" + 618 - "\tTransform\x12\b\n" + 619 - "\x04None\x10\x00\x12\r\n" + 620 - "\tZstandard\x10\x01B,Z*codeberg.org/git-pages/git-pages/git_pagesb\beditionsp\xe8\a" 645 + "\aSymlink\x10\x04*#\n" + 646 + "\tTransform\x12\f\n" + 647 + "\bIdentity\x10\x00\x12\b\n" + 648 + "\x04Zstd\x10\x01B,Z*codeberg.org/git-pages/git-pages/git_pagesb\beditionsp\xe8\a" 621 649 622 650 var ( 623 651 file_schema_proto_rawDescOnce sync.Once
+19 -7
src/schema.proto
··· 15 15 Symlink = 4; 16 16 } 17 17 18 + // Transformation names should match HTTP `Accept-Encoding:` header. 18 19 enum Transform { 19 20 // No transformation. 20 - None = 0; 21 + Identity = 0; 21 22 // Zstandard compression. 22 - Zstandard = 1; 23 + Zstd = 1; 23 24 } 24 25 25 26 message Entry { 26 27 Type type = 1; 27 28 // Only present for `type == InlineFile` and `type == ExternalFile`. 28 - // For transformed entries, refers to the post-transformation (compressed) size. 29 - int64 size = 2; 29 + // For transformed entries, refers to the pre-transformation (decompressed) size; otherwise 30 + // equal to `compressed_size`. 31 + int64 original_size = 7; 32 + // Only present for `type == InlineFile` and `type == ExternalFile`. 33 + // For transformed entries, refers to the post-transformation (compressed) size; otherwise 34 + // equal to `original_size`. 35 + int64 compressed_size = 2; 30 36 // Meaning depends on `type`: 31 37 // * If `type == InlineFile`, contains file data. 32 38 // * If `type == ExternalFile`, contains blob name (an otherwise unspecified ··· 40 46 // Only present for `type == InlineFile` and `type == ExternalFile`. 41 47 // Currently, optional (not present on certain legacy manifests). 42 48 string content_type = 5; 49 + // May be present for `type == InlineFile` and `type == ExternalFile`. 50 + // Used to reduce the amount of work being done during git checkouts. 51 + // The type of hash used is determined by the length: 52 + // * 40 bytes: SHA1DC (as hex) 53 + // * 64 bytes: SHA256 (as hex) 54 + string git_hash = 6; 43 55 } 44 56 45 57 // See https://docs.netlify.com/manage/routing/redirects/overview/ for details. ··· 75 87 76 88 // Contents 77 89 map<string, Entry> contents = 4; 78 - int64 original_size = 10; // total size of entries before compression 79 - int64 compressed_size = 5; // simple sum of each `entry.size` 80 - int64 stored_size = 8; // total size of (deduplicated) external objects 90 + int64 original_size = 10; // sum of each `entry.original_size` 91 + int64 compressed_size = 5; // sum of each `entry.compressed_size` 92 + int64 stored_size = 8; // sum of deduplicated `entry.compressed_size` for external files only 81 93 82 94 // Netlify-style `_redirects` and `_headers` 83 95 repeated RedirectRule redirects = 6;
+6
src/signal_other.go
··· 5 5 func OnReload(handler func()) { 6 6 // not implemented 7 7 } 8 + 9 + func WaitForInterrupt() { 10 + for { 11 + // Ctrl+C not supported 12 + } 13 + }
+7
src/signal_posix.go
··· 18 18 } 19 19 }() 20 20 } 21 + 22 + func WaitForInterrupt() { 23 + sigint := make(chan os.Signal, 1) 24 + signal.Notify(sigint, syscall.SIGINT) 25 + <-sigint 26 + signal.Stop(sigint) 27 + }
+13 -11
src/update.go
··· 5 5 "errors" 6 6 "fmt" 7 7 "io" 8 - "log" 9 8 "strings" 10 9 ) 11 10 ··· 71 70 status = "unchanged" 72 71 } 73 72 if newManifest.Commit != nil { 74 - log.Printf("update %s ok: %s %s", webRoot, status, *newManifest.Commit) 73 + logc.Printf(ctx, "update %s ok: %s %s", webRoot, status, *newManifest.Commit) 75 74 } else { 76 - log.Printf("update %s ok: %s", webRoot, status) 75 + logc.Printf(ctx, "update %s ok: %s", webRoot, status) 77 76 } 78 77 } else { 79 - log.Printf("update %s err: %s", webRoot, err) 78 + logc.Printf(ctx, "update %s err: %s", webRoot, err) 80 79 } 81 80 82 81 return UpdateResult{outcome, newManifest, err} ··· 91 90 span, ctx := ObserveFunction(ctx, "UpdateFromRepository", "repo.url", repoURL) 92 91 defer span.Finish() 93 92 94 - log.Printf("update %s: %s %s\n", webRoot, repoURL, branch) 93 + logc.Printf(ctx, "update %s: %s %s\n", webRoot, repoURL, branch) 95 94 96 - manifest, err := FetchRepository(ctx, repoURL, branch) 95 + oldManifest, _, _ := backend.GetManifest(ctx, webRoot, GetManifestOptions{}) 96 + // Ignore errors; worst case we have to re-fetch all of the blobs. 97 + 98 + manifest, err := FetchRepository(ctx, repoURL, branch, oldManifest) 97 99 if errors.Is(err, context.DeadlineExceeded) { 98 100 result = UpdateResult{UpdateTimeout, nil, fmt.Errorf("update timeout")} 99 101 } else if err != nil { ··· 119 121 120 122 switch contentType { 121 123 case "application/x-tar": 122 - log.Printf("update %s: (tar)", webRoot) 124 + logc.Printf(ctx, "update %s: (tar)", webRoot) 123 125 manifest, err = ExtractTar(reader) // yellow? 124 126 case "application/x-tar+gzip": 125 - log.Printf("update %s: (tar.gz)", webRoot) 127 + logc.Printf(ctx, "update %s: (tar.gz)", webRoot) 126 128 manifest, err = ExtractTarGzip(reader) // definitely yellow. 127 129 case "application/x-tar+zstd": 128 - log.Printf("update %s: (tar.zst)", webRoot) 130 + logc.Printf(ctx, "update %s: (tar.zst)", webRoot) 129 131 manifest, err = ExtractTarZstd(reader) 130 132 case "application/zip": 131 - log.Printf("update %s: (zip)", webRoot) 133 + logc.Printf(ctx, "update %s: (zip)", webRoot) 132 134 manifest, err = ExtractZip(reader) 133 135 default: 134 136 err = errArchiveFormat 135 137 } 136 138 137 139 if err != nil { 138 - log.Printf("update %s err: %s", webRoot, err) 140 + logc.Printf(ctx, "update %s err: %s", webRoot, err) 139 141 result = UpdateResult{UpdateError, nil, err} 140 142 } else { 141 143 result = Update(ctx, webRoot, manifest)
-55
src/wildcard.go
··· 1 1 package git_pages 2 2 3 3 import ( 4 - "crypto/tls" 5 4 "fmt" 6 - "log" 7 - "net/http" 8 - "net/http/httputil" 9 - "net/url" 10 5 "slices" 11 6 "strings" 12 7 ··· 19 14 IndexRepos []*fasttemplate.Template 20 15 IndexBranch string 21 16 Authorization bool 22 - FallbackURL *url.URL 23 - Fallback http.Handler 24 17 } 25 18 26 19 func (pattern *WildcardPattern) GetHost() string { ··· 79 72 return repoURLs, branch 80 73 } 81 74 82 - func (pattern *WildcardPattern) IsFallbackFor(host string) bool { 83 - if pattern.Fallback == nil { 84 - return false 85 - } 86 - _, found := pattern.Matches(host) 87 - return found 88 - } 89 - 90 - func HandleWildcardFallback(w http.ResponseWriter, r *http.Request) (bool, error) { 91 - host, err := GetHost(r) 92 - if err != nil { 93 - return false, err 94 - } 95 - 96 - for _, pattern := range wildcards { 97 - if pattern.IsFallbackFor(host) { 98 - log.Printf("proxy: %s via %s", pattern.GetHost(), pattern.FallbackURL) 99 - pattern.Fallback.ServeHTTP(w, r) 100 - return true, nil 101 - } 102 - } 103 - return false, nil 104 - } 105 - 106 75 func TranslateWildcards(configs []WildcardConfig) ([]*WildcardPattern, error) { 107 76 var wildcardPatterns []*WildcardPattern 108 77 for _, config := range configs { ··· 135 104 } 136 105 } 137 106 138 - var fallbackURL *url.URL 139 - var fallback http.Handler 140 - if config.FallbackProxyTo != "" { 141 - fallbackURL, err = url.Parse(config.FallbackProxyTo) 142 - if err != nil { 143 - return nil, fmt.Errorf("wildcard pattern: fallback URL: %w", err) 144 - } 145 - 146 - fallback = &httputil.ReverseProxy{ 147 - Rewrite: func(r *httputil.ProxyRequest) { 148 - r.SetURL(fallbackURL) 149 - r.Out.Host = r.In.Host 150 - r.Out.Header["X-Forwarded-For"] = r.In.Header["X-Forwarded-For"] 151 - }, 152 - Transport: &http.Transport{ 153 - TLSClientConfig: &tls.Config{ 154 - InsecureSkipVerify: config.FallbackInsecure, 155 - }, 156 - }, 157 - } 158 - } 159 - 160 107 wildcardPatterns = append(wildcardPatterns, &WildcardPattern{ 161 108 Domain: strings.Split(config.Domain, "."), 162 109 CloneURL: cloneURLTemplate, 163 110 IndexRepos: indexRepoTemplates, 164 111 IndexBranch: indexRepoBranch, 165 112 Authorization: authorization, 166 - FallbackURL: fallbackURL, 167 - Fallback: fallback, 168 113 }) 169 114 } 170 115 return wildcardPatterns, nil