+9
-9
.forgejo/workflows/ci.yaml
+9
-9
.forgejo/workflows/ci.yaml
···
9
check:
10
runs-on: codeberg-small-lazy
11
container:
12
-
image: docker.io/library/node:24-trixie-slim@sha256:45babd1b4ce0349fb12c4e24bf017b90b96d52806db32e001e3013f341bef0fe
13
steps:
14
- name: Check out source code
15
-
uses: https://code.forgejo.org/actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
16
- name: Set up toolchain
17
-
uses: https://code.forgejo.org/actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6
18
with:
19
go-version: '>=1.25.0'
20
- name: Install dependencies
···
36
needs: [check]
37
runs-on: codeberg-medium-lazy
38
container:
39
-
image: docker.io/library/node:24-trixie-slim@sha256:ef4ca6d078dd18322059a1f051225f7bbfc2bb60c16cbb5d8a1ba2cc8964fe8a
40
steps:
41
- name: Check out source code
42
-
uses: https://code.forgejo.org/actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
43
- name: Set up toolchain
44
-
uses: https://code.forgejo.org/actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6
45
with:
46
go-version: '>=1.25.0'
47
- name: Install dependencies
···
58
build linux arm64
59
build darwin arm64
60
- name: Create release
61
-
uses: https://code.forgejo.org/actions/forgejo-release@v2.7.3
62
with:
63
tag: ${{ startsWith(forge.event.ref, 'refs/tags/v') && forge.ref_name || 'latest' }}
64
release-dir: assets
···
71
needs: [check]
72
runs-on: codeberg-medium-lazy
73
container:
74
-
image: docker.io/library/node:24-trixie-slim@sha256:ef4ca6d078dd18322059a1f051225f7bbfc2bb60c16cbb5d8a1ba2cc8964fe8a
75
steps:
76
- name: Install dependencies
77
run: |
78
apt-get -y update
79
apt-get -y install buildah ca-certificates
80
- name: Check out source code
81
-
uses: https://code.forgejo.org/actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
82
- name: Build container
83
run: |
84
printf '[storage]\ndriver="vfs"\nrunroot="/run/containers/storage"\ngraphroot="/var/lib/containers/storage"\n' | tee /etc/containers/storage.conf
···
9
check:
10
runs-on: codeberg-small-lazy
11
container:
12
+
image: docker.io/library/node:24-trixie-slim@sha256:fcdfd7bcd8f641c8c76a8950343c73912d68ba341e8dd1074e663b784d3e76f4
13
steps:
14
- name: Check out source code
15
+
uses: https://code.forgejo.org/actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
16
- name: Set up toolchain
17
+
uses: https://code.forgejo.org/actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
18
with:
19
go-version: '>=1.25.0'
20
- name: Install dependencies
···
36
needs: [check]
37
runs-on: codeberg-medium-lazy
38
container:
39
+
image: docker.io/library/node:24-trixie-slim@sha256:fcdfd7bcd8f641c8c76a8950343c73912d68ba341e8dd1074e663b784d3e76f4
40
steps:
41
- name: Check out source code
42
+
uses: https://code.forgejo.org/actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
43
- name: Set up toolchain
44
+
uses: https://code.forgejo.org/actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
45
with:
46
go-version: '>=1.25.0'
47
- name: Install dependencies
···
58
build linux arm64
59
build darwin arm64
60
- name: Create release
61
+
uses: https://code.forgejo.org/actions/forgejo-release@fc0488c944626f9265d87fbc4dd6c08f78014c63 # v2.7.3
62
with:
63
tag: ${{ startsWith(forge.event.ref, 'refs/tags/v') && forge.ref_name || 'latest' }}
64
release-dir: assets
···
71
needs: [check]
72
runs-on: codeberg-medium-lazy
73
container:
74
+
image: docker.io/library/node:24-trixie-slim@sha256:fcdfd7bcd8f641c8c76a8950343c73912d68ba341e8dd1074e663b784d3e76f4
75
steps:
76
- name: Install dependencies
77
run: |
78
apt-get -y update
79
apt-get -y install buildah ca-certificates
80
- name: Check out source code
81
+
uses: https://code.forgejo.org/actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
82
- name: Build container
83
run: |
84
printf '[storage]\ndriver="vfs"\nrunroot="/run/containers/storage"\ngraphroot="/var/lib/containers/storage"\n' | tee /etc/containers/storage.conf
+1
.gitignore
+1
.gitignore
+3
-3
Dockerfile
+3
-3
Dockerfile
···
3
RUN apk --no-cache add ca-certificates
4
5
# Build supervisor.
6
-
FROM docker.io/library/golang:1.25-alpine@sha256:aee43c3ccbf24fdffb7295693b6e33b21e01baec1b2a55acc351fde345e9ec34 AS supervisor-builder
7
RUN apk --no-cache add git
8
WORKDIR /build
9
RUN git clone https://github.com/ochinchina/supervisord . && \
···
12
go clean -cache -modcache
13
14
# Build Caddy with S3 storage backend.
15
-
FROM docker.io/library/caddy:2.10.2-builder@sha256:53f91ad7c5f1ab9a607953199b7c1e10920c570ae002aef913d68ed7464fb19f AS caddy-builder
16
RUN xcaddy build ${CADDY_VERSION} \
17
--with=github.com/ss098/certmagic-s3@v0.0.0-20250922022452-8af482af5f39 && \
18
go clean -cache -modcache
19
20
# Build git-pages.
21
-
FROM docker.io/library/golang:1.25-alpine@sha256:aee43c3ccbf24fdffb7295693b6e33b21e01baec1b2a55acc351fde345e9ec34 AS git-pages-builder
22
RUN apk --no-cache add git
23
WORKDIR /build
24
COPY go.mod go.sum ./
···
3
RUN apk --no-cache add ca-certificates
4
5
# Build supervisor.
6
+
FROM docker.io/library/golang:1.25-alpine@sha256:d3f0cf7723f3429e3f9ed846243970b20a2de7bae6a5b66fc5914e228d831bbb AS supervisor-builder
7
RUN apk --no-cache add git
8
WORKDIR /build
9
RUN git clone https://github.com/ochinchina/supervisord . && \
···
12
go clean -cache -modcache
13
14
# Build Caddy with S3 storage backend.
15
+
FROM docker.io/library/caddy:2.10.2-builder@sha256:6e3ed727ce8695fc58e0a8de8e5d11888f6463c430ea5b40e0b5f679ab734868 AS caddy-builder
16
RUN xcaddy build ${CADDY_VERSION} \
17
--with=github.com/ss098/certmagic-s3@v0.0.0-20250922022452-8af482af5f39 && \
18
go clean -cache -modcache
19
20
# Build git-pages.
21
+
FROM docker.io/library/golang:1.25-alpine@sha256:d3f0cf7723f3429e3f9ed846243970b20a2de7bae6a5b66fc5914e228d831bbb AS git-pages-builder
22
RUN apk --no-cache add git
23
WORKDIR /build
24
COPY go.mod go.sum ./
-14
LICENSE-0BSD.txt
-14
LICENSE-0BSD.txt
···
1
-
Copyright (C) git-pages contributors
2
-
Copyright (C) Catherine 'whitequark'
3
-
4
-
Permission to use, copy, modify, and/or distribute this software for
5
-
any purpose with or without fee is hereby granted.
6
-
7
-
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8
-
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9
-
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10
-
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11
-
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
12
-
AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13
-
OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14
-
···
+14
LICENSE.txt
+14
LICENSE.txt
···
···
1
+
Copyright (C) git-pages contributors
2
+
Copyright (C) Catherine 'whitequark'
3
+
4
+
Permission to use, copy, modify, and/or distribute this software for
5
+
any purpose with or without fee is hereby granted.
6
+
7
+
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8
+
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9
+
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10
+
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11
+
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
12
+
AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13
+
OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14
+
+9
-2
README.md
+9
-2
README.md
···
70
- The `POST` method requires an `application/json` body containing a Forgejo/Gitea/Gogs/GitHub webhook event payload. Requests where the `ref` key contains anything other than `refs/heads/pages` are ignored, and only the `pages` branch is used. The `repository.clone_url` key contains a repository URL to be shallowly cloned.
71
- If the received contents is empty, performs the same action as `DELETE`.
72
* In response to a `DELETE` request, the server unpublishes a site. The URL of the request must be the root URL of the site that is being unpublished. Site data remains stored for an indeterminate period of time, but becomes completely inaccessible.
73
* All updates to site content are atomic (subject to consistency guarantees of the storage backend). That is, there is an instantaneous moment during an update before which the server will return the old content and after which it will return the new content.
74
* Files with a certain name, when placed in the root of a site, have special functions:
75
- [Netlify `_redirects`][_redirects] file can be used to specify HTTP redirect and rewrite rules. The _git-pages_ implementation currently does not support placeholders, query parameters, or conditions, and may differ from Netlify in other minor ways. If you find that a supported `_redirects` file feature does not work the same as on Netlify, please file an issue. (Note that _git-pages_ does not perform URL normalization; `/foo` and `/foo/` are *not* the same, unlike with Netlify.)
76
- [Netlify `_headers`][_headers] file can be used to specify custom HTTP response headers (if allowlisted by configuration). In particular, this is useful to enable [CORS requests][cors]. The _git-pages_ implementation may differ from Netlify in minor ways; if you find that a `_headers` file feature does not work the same as on Netlify, please file an issue.
77
78
[_redirects]: https://docs.netlify.com/manage/routing/redirects/overview/
79
[_headers]: https://docs.netlify.com/manage/routing/headers/
80
[cors]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Guides/CORS
81
82
83
Authorization
···
91
2. **DNS Challenge:** If the method is `PUT`, `DELETE`, `POST`, and a well-formed `Authorization:` header is provided containing a `<token>`, and a TXT record lookup at `_git-pages-challenge.<host>` returns a record whose concatenated value equals `SHA256("<host> <token>")`, the request is authorized.
92
- **`Pages` scheme:** Request includes an `Authorization: Pages <token>` header.
93
- **`Basic` scheme:** Request includes an `Authorization: Basic <basic>` header, where `<basic>` is equal to `Base64("Pages:<token>")`. (Useful for non-Forgejo forges.)
94
-
3. **DNS Allowlist:** If the method is `PUT` or `POST`, and a TXT record lookup at `_git-pages-repository.<host>` returns a set of well-formed absolute URLs, and (for `PUT` requests) the body contains a repository URL, and the requested clone URLs is contained in this set of URLs, the request is authorized.
95
4. **Wildcard Match (content):** If the method is `POST`, and a `[[wildcard]]` configuration section exists where the suffix of a hostname (compared label-wise) is equal to `[[wildcard]].domain`, and (for `PUT` requests) the body contains a repository URL, and the requested clone URL is a *matching* clone URL, the request is authorized.
96
- **Index repository:** If the request URL is `scheme://<user>.<host>/`, a *matching* clone URL is computed by templating `[[wildcard]].clone-url` with `<user>` and `<project>`, where `<project>` is computed by templating each element of `[[wildcard]].index-repos` with `<user>`, and `[[wildcard]]` is the section where the match occurred.
97
- **Project repository:** If the request URL is `scheme://<user>.<host>/<project>/`, a *matching* clone URL is computed by templating `[[wildcard]].clone-url` with `<user>` and `<project>`, and `[[wildcard]]` is the section where the match occurred.
···
115
* If `SENTRY_DSN` environment variable is set, panics are reported to Sentry.
116
* If `SENTRY_DSN` and `SENTRY_LOGS=1` environment variables are set, logs are uploaded to Sentry.
117
* If `SENTRY_DSN` and `SENTRY_TRACING=1` environment variables are set, traces are uploaded to Sentry.
118
119
120
Architecture (v2)
···
160
License
161
-------
162
163
-
[0-clause BSD](LICENSE-0BSD.txt)
···
70
- The `POST` method requires an `application/json` body containing a Forgejo/Gitea/Gogs/GitHub webhook event payload. Requests where the `ref` key contains anything other than `refs/heads/pages` are ignored, and only the `pages` branch is used. The `repository.clone_url` key contains a repository URL to be shallowly cloned.
71
- If the received contents is empty, performs the same action as `DELETE`.
72
* In response to a `DELETE` request, the server unpublishes a site. The URL of the request must be the root URL of the site that is being unpublished. Site data remains stored for an indeterminate period of time, but becomes completely inaccessible.
73
+
* If a `Dry-Run: yes` header is provided with a `PUT`, `DELETE`, or `POST` request, only the authorization checks are run; no destructive updates are made. Note that this functionality was added in _git-pages_ v0.2.0.
74
* All updates to site content are atomic (subject to consistency guarantees of the storage backend). That is, there is an instantaneous moment during an update before which the server will return the old content and after which it will return the new content.
75
* Files with a certain name, when placed in the root of a site, have special functions:
76
- [Netlify `_redirects`][_redirects] file can be used to specify HTTP redirect and rewrite rules. The _git-pages_ implementation currently does not support placeholders, query parameters, or conditions, and may differ from Netlify in other minor ways. If you find that a supported `_redirects` file feature does not work the same as on Netlify, please file an issue. (Note that _git-pages_ does not perform URL normalization; `/foo` and `/foo/` are *not* the same, unlike with Netlify.)
77
- [Netlify `_headers`][_headers] file can be used to specify custom HTTP response headers (if allowlisted by configuration). In particular, this is useful to enable [CORS requests][cors]. The _git-pages_ implementation may differ from Netlify in minor ways; if you find that a `_headers` file feature does not work the same as on Netlify, please file an issue.
78
+
* Support for SHA-256 Git hashes is [limited by go-git][go-git-sha256]; once go-git implements the required features, _git-pages_ will automatically gain support for SHA-256 Git hashes. Note that shallow clones (used by _git-pages_ to conserve bandwidth if available) aren't supported yet in the Git protocol as of 2025.
79
80
[_redirects]: https://docs.netlify.com/manage/routing/redirects/overview/
81
[_headers]: https://docs.netlify.com/manage/routing/headers/
82
[cors]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Guides/CORS
83
+
[go-git-sha256]: https://github.com/go-git/go-git/issues/706
84
85
86
Authorization
···
94
2. **DNS Challenge:** If the method is `PUT`, `DELETE`, `POST`, and a well-formed `Authorization:` header is provided containing a `<token>`, and a TXT record lookup at `_git-pages-challenge.<host>` returns a record whose concatenated value equals `SHA256("<host> <token>")`, the request is authorized.
95
- **`Pages` scheme:** Request includes an `Authorization: Pages <token>` header.
96
- **`Basic` scheme:** Request includes an `Authorization: Basic <basic>` header, where `<basic>` is equal to `Base64("Pages:<token>")`. (Useful for non-Forgejo forges.)
97
+
3. **DNS Allowlist:** If the method is `PUT` or `POST`, and the request URL is `scheme://<user>.<host>/`, and a TXT record lookup at `_git-pages-repository.<host>` returns a set of well-formed absolute URLs, and (for `PUT` requests) the body contains a repository URL, and the requested clone URLs is contained in this set of URLs, the request is authorized.
98
4. **Wildcard Match (content):** If the method is `POST`, and a `[[wildcard]]` configuration section exists where the suffix of a hostname (compared label-wise) is equal to `[[wildcard]].domain`, and (for `PUT` requests) the body contains a repository URL, and the requested clone URL is a *matching* clone URL, the request is authorized.
99
- **Index repository:** If the request URL is `scheme://<user>.<host>/`, a *matching* clone URL is computed by templating `[[wildcard]].clone-url` with `<user>` and `<project>`, where `<project>` is computed by templating each element of `[[wildcard]].index-repos` with `<user>`, and `[[wildcard]]` is the section where the match occurred.
100
- **Project repository:** If the request URL is `scheme://<user>.<host>/<project>/`, a *matching* clone URL is computed by templating `[[wildcard]].clone-url` with `<user>` and `<project>`, and `[[wildcard]]` is the section where the match occurred.
···
118
* If `SENTRY_DSN` environment variable is set, panics are reported to Sentry.
119
* If `SENTRY_DSN` and `SENTRY_LOGS=1` environment variables are set, logs are uploaded to Sentry.
120
* If `SENTRY_DSN` and `SENTRY_TRACING=1` environment variables are set, traces are uploaded to Sentry.
121
+
* Optional syslog integration allows transmitting application logs to a syslog daemon. When present, the `SYSLOG_ADDR` environment variable enables the integration, and the variable's value is used to configure the absolute path to a Unix socket (usually located at `/dev/log` on Unix systems) or a network address of one of the following formats:
122
+
* for TLS over TCP: `tcp+tls://host:port`;
123
+
* for plain TCP: `tcp://host:post`;
124
+
* for UDP: `udp://host:port`.
125
126
127
Architecture (v2)
···
167
License
168
-------
169
170
+
[0-clause BSD](LICENSE.txt)
+6
-2
conf/config.example.toml
+6
-2
conf/config.example.toml
···
2
# as the intrinsic default value.
3
4
log-format = "text"
5
6
[server]
7
# Use "-" to disable the handler.
···
15
index-repos = ["<user>.codeberg.page", "pages"]
16
index-repo-branch = "main"
17
authorization = "forgejo"
18
-
fallback-proxy-to = "https://codeberg.page"
19
20
[storage]
21
type = "fs"
···
23
[storage.fs]
24
root = "./data"
25
26
-
[storage.s3] # non-default bucket configuration
27
endpoint = "play.min.io"
28
access-key-id = "Q3AM3UQ867SPQQA43P2F"
29
secret-access-key = "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
···
2
# as the intrinsic default value.
3
4
log-format = "text"
5
+
log-level = "info"
6
7
[server]
8
# Use "-" to disable the handler.
···
16
index-repos = ["<user>.codeberg.page", "pages"]
17
index-repo-branch = "main"
18
authorization = "forgejo"
19
+
20
+
[fallback] # non-default section
21
+
proxy-to = "https://codeberg.page"
22
+
insecure = false
23
24
[storage]
25
type = "fs"
···
27
[storage.fs]
28
root = "./data"
29
30
+
[storage.s3] # non-default section
31
endpoint = "play.min.io"
32
access-key-id = "Q3AM3UQ867SPQQA43P2F"
33
secret-access-key = "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
+39
-2
flake.nix
+39
-2
flake.nix
···
43
"-s -w"
44
];
45
46
+
vendorHash = "sha256-oFKS3ciZyuzzMYg7g3idbssHfDdNYXzNjAXB6XDzMJg=";
47
};
48
in
49
{
···
63
inherit git-pages;
64
default = git-pages;
65
};
66
+
67
+
apps.vm =
68
+
let
69
+
guestSystem = if pkgs.stdenv.hostPlatform.isAarch64 then "aarch64-linux" else "x86_64-linux";
70
+
in
71
+
{
72
+
type = "app";
73
+
program =
74
+
(pkgs.writeShellApplication {
75
+
name = "vm";
76
+
text = ''
77
+
exec ${
78
+
pkgs.lib.getExe
79
+
(import ./nix/vm.nix {
80
+
inherit nixpkgs self;
81
+
system = guestSystem;
82
+
hostSystem = system;
83
+
}).config.system.build.vm
84
+
}
85
+
'';
86
+
})
87
+
+ /bin/vm;
88
+
};
89
}
90
+
)
91
+
// {
92
+
nixosModules.default =
93
+
{
94
+
lib,
95
+
pkgs,
96
+
...
97
+
}:
98
+
{
99
+
imports = [ ./nix/module.nix ];
100
+
services.git-pages.package =
101
+
lib.mkDefault
102
+
self.packages.${pkgs.stdenv.hostPlatform.system}.git-pages;
103
+
};
104
+
};
105
}
+17
-14
go.mod
+17
-14
go.mod
···
4
5
require (
6
codeberg.org/git-pages/go-headers v1.1.0
7
github.com/KimMachineGun/automemlimit v0.7.5
8
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500
9
github.com/creasty/defaults v1.8.0
10
-
github.com/getsentry/sentry-go v0.36.2
11
-
github.com/getsentry/sentry-go/slog v0.36.2
12
-
github.com/go-git/go-billy/v6 v6.0.0-20251026101908-623011986e70
13
-
github.com/go-git/go-git/v6 v6.0.0-20251029213217-0bbfc0875edd
14
github.com/klauspost/compress v1.18.1
15
github.com/maypok86/otter/v2 v2.2.1
16
-
github.com/minio/minio-go/v7 v7.0.95
17
github.com/pelletier/go-toml/v2 v2.2.4
18
github.com/pquerna/cachecontrol v0.2.0
19
github.com/prometheus/client_golang v1.23.2
20
-
github.com/samber/slog-multi v1.5.0
21
github.com/tj/go-redirects v0.0.0-20200911105812-fd1ba1020b37
22
github.com/valyala/fasttemplate v1.2.2
23
google.golang.org/protobuf v1.36.10
···
29
github.com/beorn7/perks v1.0.1 // indirect
30
github.com/cespare/xxhash/v2 v2.3.0 // indirect
31
github.com/cloudflare/circl v1.6.1 // indirect
32
-
github.com/cyphar/filepath-securejoin v0.5.0 // indirect
33
github.com/dustin/go-humanize v1.0.1 // indirect
34
github.com/emirpasic/gods v1.18.1 // indirect
35
github.com/go-git/gcfg/v2 v2.0.2 // indirect
36
github.com/go-ini/ini v1.67.0 // indirect
37
-
github.com/goccy/go-json v0.10.5 // indirect
38
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
39
github.com/google/uuid v1.6.0 // indirect
40
github.com/kevinburke/ssh_config v1.4.0 // indirect
41
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
42
-
github.com/minio/crc64nvme v1.0.2 // indirect
43
github.com/minio/md5-simd v1.1.2 // indirect
44
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
45
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
···
50
github.com/prometheus/common v0.66.1 // indirect
51
github.com/prometheus/procfs v0.16.1 // indirect
52
github.com/rs/xid v1.6.0 // indirect
53
-
github.com/samber/lo v1.51.0 // indirect
54
github.com/samber/slog-common v0.19.0 // indirect
55
github.com/sergi/go-diff v1.4.0 // indirect
56
github.com/tinylib/msgp v1.3.0 // indirect
57
github.com/tj/assert v0.0.3 // indirect
58
github.com/valyala/bytebufferpool v1.0.0 // indirect
59
go.yaml.in/yaml/v2 v2.4.2 // indirect
60
-
golang.org/x/crypto v0.43.0 // indirect
61
-
golang.org/x/net v0.46.0 // indirect
62
-
golang.org/x/sys v0.37.0 // indirect
63
-
golang.org/x/text v0.30.0 // indirect
64
)
···
4
5
require (
6
codeberg.org/git-pages/go-headers v1.1.0
7
+
codeberg.org/git-pages/go-slog-syslog v0.0.0-20251122144254-06c45d430fb9
8
github.com/KimMachineGun/automemlimit v0.7.5
9
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500
10
github.com/creasty/defaults v1.8.0
11
+
github.com/getsentry/sentry-go v0.40.0
12
+
github.com/getsentry/sentry-go/slog v0.40.0
13
+
github.com/go-git/go-billy/v6 v6.0.0-20251126203821-7f9c95185ee0
14
+
github.com/go-git/go-git/v6 v6.0.0-20251128074608-48f817f57805
15
github.com/klauspost/compress v1.18.1
16
github.com/maypok86/otter/v2 v2.2.1
17
+
github.com/minio/minio-go/v7 v7.0.97
18
github.com/pelletier/go-toml/v2 v2.2.4
19
github.com/pquerna/cachecontrol v0.2.0
20
github.com/prometheus/client_golang v1.23.2
21
+
github.com/samber/slog-multi v1.6.0
22
github.com/tj/go-redirects v0.0.0-20200911105812-fd1ba1020b37
23
github.com/valyala/fasttemplate v1.2.2
24
google.golang.org/protobuf v1.36.10
···
30
github.com/beorn7/perks v1.0.1 // indirect
31
github.com/cespare/xxhash/v2 v2.3.0 // indirect
32
github.com/cloudflare/circl v1.6.1 // indirect
33
+
github.com/cyphar/filepath-securejoin v0.6.1 // indirect
34
github.com/dustin/go-humanize v1.0.1 // indirect
35
github.com/emirpasic/gods v1.18.1 // indirect
36
github.com/go-git/gcfg/v2 v2.0.2 // indirect
37
github.com/go-ini/ini v1.67.0 // indirect
38
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
39
github.com/google/uuid v1.6.0 // indirect
40
github.com/kevinburke/ssh_config v1.4.0 // indirect
41
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
42
+
github.com/klauspost/crc32 v1.3.0 // indirect
43
+
github.com/leodido/go-syslog/v4 v4.3.0 // indirect
44
+
github.com/minio/crc64nvme v1.1.0 // indirect
45
github.com/minio/md5-simd v1.1.2 // indirect
46
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
47
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
···
52
github.com/prometheus/common v0.66.1 // indirect
53
github.com/prometheus/procfs v0.16.1 // indirect
54
github.com/rs/xid v1.6.0 // indirect
55
+
github.com/samber/lo v1.52.0 // indirect
56
github.com/samber/slog-common v0.19.0 // indirect
57
github.com/sergi/go-diff v1.4.0 // indirect
58
github.com/tinylib/msgp v1.3.0 // indirect
59
github.com/tj/assert v0.0.3 // indirect
60
github.com/valyala/bytebufferpool v1.0.0 // indirect
61
go.yaml.in/yaml/v2 v2.4.2 // indirect
62
+
golang.org/x/crypto v0.45.0 // indirect
63
+
golang.org/x/net v0.47.0 // indirect
64
+
golang.org/x/sys v0.38.0 // indirect
65
+
golang.org/x/text v0.31.0 // indirect
66
+
gopkg.in/yaml.v3 v3.0.1 // indirect
67
)
+34
-32
go.sum
+34
-32
go.sum
···
1
-
codeberg.org/git-pages/go-headers v1.0.0 h1:hvGU97hQdXaT5HwCpZJWQdg7akvtOBCSUNL4u2a5uTs=
2
-
codeberg.org/git-pages/go-headers v1.0.0/go.mod h1:N4gwH0U3YPwmuyxqH7xBA8j44fTPX+vOEP7ejJVBPts=
3
codeberg.org/git-pages/go-headers v1.1.0 h1:rk7/SOSsn+XuL7PUQZFYUaWKHEaj6K8mXmUV9rF2VxE=
4
codeberg.org/git-pages/go-headers v1.1.0/go.mod h1:N4gwH0U3YPwmuyxqH7xBA8j44fTPX+vOEP7ejJVBPts=
5
github.com/KimMachineGun/automemlimit v0.7.5 h1:RkbaC0MwhjL1ZuBKunGDjE/ggwAX43DwZrJqVwyveTk=
6
github.com/KimMachineGun/automemlimit v0.7.5/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM=
7
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
···
22
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
23
github.com/creasty/defaults v1.8.0 h1:z27FJxCAa0JKt3utc0sCImAEb+spPucmKoOdLHvHYKk=
24
github.com/creasty/defaults v1.8.0/go.mod h1:iGzKe6pbEHnpMPtfDXZEr0NVxWnPTjb1bbDy08fPzYM=
25
-
github.com/cyphar/filepath-securejoin v0.5.0 h1:hIAhkRBMQ8nIeuVwcAoymp7MY4oherZdAxD+m0u9zaw=
26
-
github.com/cyphar/filepath-securejoin v0.5.0/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
27
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
28
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
29
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
···
33
github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE=
34
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
35
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
36
-
github.com/getsentry/sentry-go v0.36.2 h1:uhuxRPTrUy0dnSzTd0LrYXlBYygLkKY0hhlG5LXarzM=
37
-
github.com/getsentry/sentry-go v0.36.2/go.mod h1:p5Im24mJBeruET8Q4bbcMfCQ+F+Iadc4L48tB1apo2c=
38
-
github.com/getsentry/sentry-go/slog v0.36.2 h1:PM27JHFE3lsE8fgI/cOueEOtjiktnC3Za2o5oL9PbJQ=
39
-
github.com/getsentry/sentry-go/slog v0.36.2/go.mod h1:aVFAxnpA3FEtZeSBhBFAnWOlqhiLjaaoOZ0bmBN9IHo=
40
github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c=
41
github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU=
42
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
43
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
44
github.com/go-git/gcfg/v2 v2.0.2 h1:MY5SIIfTGGEMhdA7d7JePuVVxtKL7Hp+ApGDJAJ7dpo=
45
github.com/go-git/gcfg/v2 v2.0.2/go.mod h1:/lv2NsxvhepuMrldsFilrgct6pxzpGdSRC13ydTLSLs=
46
-
github.com/go-git/go-billy/v6 v6.0.0-20251026101908-623011986e70 h1:TWpNrg9JPxp0q+KG0hoFGBulPIP/kMK1b0mDqjdEB/s=
47
-
github.com/go-git/go-billy/v6 v6.0.0-20251026101908-623011986e70/go.mod h1:TpCYxdQ0tWZkrnAkd7yqK+z1C8RKcyjcaYAJNAcnUnM=
48
github.com/go-git/go-git-fixtures/v5 v5.1.1 h1:OH8i1ojV9bWfr0ZfasfpgtUXQHQyVS8HXik/V1C099w=
49
github.com/go-git/go-git-fixtures/v5 v5.1.1/go.mod h1:Altk43lx3b1ks+dVoAG2300o5WWUnktvfY3VI6bcaXU=
50
-
github.com/go-git/go-git/v6 v6.0.0-20251029213217-0bbfc0875edd h1:pn6+tR4O8McyqEr2MbQwqcySovpG8jDd11F/jQ6aAfA=
51
-
github.com/go-git/go-git/v6 v6.0.0-20251029213217-0bbfc0875edd/go.mod h1:z9pQiXCfyOZIs/8qa5zmozzbcsDPtGN91UD7+qeX3hk=
52
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
53
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
54
-
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
55
-
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
56
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
57
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
58
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
···
66
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
67
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
68
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
69
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
70
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
71
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
···
75
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
76
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
77
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
78
github.com/maypok86/otter/v2 v2.2.1 h1:hnGssisMFkdisYcvQ8L019zpYQcdtPse+g0ps2i7cfI=
79
github.com/maypok86/otter/v2 v2.2.1/go.mod h1:1NKY9bY+kB5jwCXBJfE59u+zAwOt6C7ni1FTlFFMqVs=
80
-
github.com/minio/crc64nvme v1.0.2 h1:6uO1UxGAD+kwqWWp7mBFsi5gAse66C4NXO8cmcVculg=
81
-
github.com/minio/crc64nvme v1.0.2/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
82
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
83
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
84
-
github.com/minio/minio-go/v7 v7.0.95 h1:ywOUPg+PebTMTzn9VDsoFJy32ZuARN9zhB+K3IYEvYU=
85
-
github.com/minio/minio-go/v7 v7.0.95/go.mod h1:wOOX3uxS334vImCNRVyIDdXX9OsXDm89ToynKgqUKlo=
86
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
87
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
88
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
···
113
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
114
github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
115
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
116
-
github.com/samber/lo v1.51.0 h1:kysRYLbHy/MB7kQZf5DSN50JHmMsNEdeY24VzJFu7wI=
117
-
github.com/samber/lo v1.51.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0=
118
github.com/samber/slog-common v0.19.0 h1:fNcZb8B2uOLooeYwFpAlKjkQTUafdjfqKcwcC89G9YI=
119
github.com/samber/slog-common v0.19.0/go.mod h1:dTz+YOU76aH007YUU0DffsXNsGFQRQllPQh9XyNoA3M=
120
-
github.com/samber/slog-multi v1.5.0 h1:UDRJdsdb0R5vFQFy3l26rpX3rL3FEPJTJ2yKVjoiT1I=
121
-
github.com/samber/slog-multi v1.5.0/go.mod h1:im2Zi3mH/ivSY5XDj6LFcKToRIWPw1OcjSVSdXt+2d0=
122
github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=
123
github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
124
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
···
140
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
141
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
142
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
143
-
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
144
-
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
145
-
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
146
-
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
147
-
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
148
-
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
149
-
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
150
-
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
151
-
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
152
-
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
153
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
154
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
155
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
···
1
codeberg.org/git-pages/go-headers v1.1.0 h1:rk7/SOSsn+XuL7PUQZFYUaWKHEaj6K8mXmUV9rF2VxE=
2
codeberg.org/git-pages/go-headers v1.1.0/go.mod h1:N4gwH0U3YPwmuyxqH7xBA8j44fTPX+vOEP7ejJVBPts=
3
+
codeberg.org/git-pages/go-slog-syslog v0.0.0-20251122144254-06c45d430fb9 h1:xfPDg8ThBt3+t+C+pvM3bEH4ePUzP5t5kY2v19TqgKc=
4
+
codeberg.org/git-pages/go-slog-syslog v0.0.0-20251122144254-06c45d430fb9/go.mod h1:8NPSXbYcVb71qqNM5cIgn1/uQgMisLbu2dVD1BNxsUw=
5
github.com/KimMachineGun/automemlimit v0.7.5 h1:RkbaC0MwhjL1ZuBKunGDjE/ggwAX43DwZrJqVwyveTk=
6
github.com/KimMachineGun/automemlimit v0.7.5/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM=
7
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
···
22
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
23
github.com/creasty/defaults v1.8.0 h1:z27FJxCAa0JKt3utc0sCImAEb+spPucmKoOdLHvHYKk=
24
github.com/creasty/defaults v1.8.0/go.mod h1:iGzKe6pbEHnpMPtfDXZEr0NVxWnPTjb1bbDy08fPzYM=
25
+
github.com/cyphar/filepath-securejoin v0.6.1 h1:5CeZ1jPXEiYt3+Z6zqprSAgSWiggmpVyciv8syjIpVE=
26
+
github.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc=
27
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
28
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
29
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
···
33
github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE=
34
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
35
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
36
+
github.com/getsentry/sentry-go v0.40.0 h1:VTJMN9zbTvqDqPwheRVLcp0qcUcM+8eFivvGocAaSbo=
37
+
github.com/getsentry/sentry-go v0.40.0/go.mod h1:eRXCoh3uvmjQLY6qu63BjUZnaBu5L5WhMV1RwYO8W5s=
38
+
github.com/getsentry/sentry-go/slog v0.40.0 h1:uR2EPL9w6uHw3XB983IAqzqM9mP+fjJpNY9kfob3/Z8=
39
+
github.com/getsentry/sentry-go/slog v0.40.0/go.mod h1:ArRaP+0rsbnJGyvZwYDo/vDQT/YBbOQeOlO+DGW+F9s=
40
github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c=
41
github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU=
42
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
43
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
44
github.com/go-git/gcfg/v2 v2.0.2 h1:MY5SIIfTGGEMhdA7d7JePuVVxtKL7Hp+ApGDJAJ7dpo=
45
github.com/go-git/gcfg/v2 v2.0.2/go.mod h1:/lv2NsxvhepuMrldsFilrgct6pxzpGdSRC13ydTLSLs=
46
+
github.com/go-git/go-billy/v6 v6.0.0-20251126203821-7f9c95185ee0 h1:eY5aB2GXiVdgTueBcqsBt53WuJTRZAuCdIS/86Pcq5c=
47
+
github.com/go-git/go-billy/v6 v6.0.0-20251126203821-7f9c95185ee0/go.mod h1:0NjwVNrwtVFZBReAp5OoGklGJIgJFEbVyHneAr4lc8k=
48
github.com/go-git/go-git-fixtures/v5 v5.1.1 h1:OH8i1ojV9bWfr0ZfasfpgtUXQHQyVS8HXik/V1C099w=
49
github.com/go-git/go-git-fixtures/v5 v5.1.1/go.mod h1:Altk43lx3b1ks+dVoAG2300o5WWUnktvfY3VI6bcaXU=
50
+
github.com/go-git/go-git/v6 v6.0.0-20251128074608-48f817f57805 h1:jxQ3BzYeErNRvlI/4+0mpwqMzvB4g97U+ksfgvrUEbY=
51
+
github.com/go-git/go-git/v6 v6.0.0-20251128074608-48f817f57805/go.mod h1:dIwT3uWK1ooHInyVnK2JS5VfQ3peVGYaw2QPqX7uFvs=
52
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
53
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
54
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
55
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
56
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
···
64
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
65
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
66
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
67
+
github.com/klauspost/crc32 v1.3.0 h1:sSmTt3gUt81RP655XGZPElI0PelVTZ6YwCRnPSupoFM=
68
+
github.com/klauspost/crc32 v1.3.0/go.mod h1:D7kQaZhnkX/Y0tstFGf8VUzv2UofNGqCjnC3zdHB0Hw=
69
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
70
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
71
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
···
75
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
76
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
77
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
78
+
github.com/leodido/go-syslog/v4 v4.3.0 h1:bbSpI/41bYK9iSdlYzcwvlxuLOE8yi4VTFmedtnghdA=
79
+
github.com/leodido/go-syslog/v4 v4.3.0/go.mod h1:eJ8rUfDN5OS6dOkCOBYlg2a+hbAg6pJa99QXXgMrd98=
80
github.com/maypok86/otter/v2 v2.2.1 h1:hnGssisMFkdisYcvQ8L019zpYQcdtPse+g0ps2i7cfI=
81
github.com/maypok86/otter/v2 v2.2.1/go.mod h1:1NKY9bY+kB5jwCXBJfE59u+zAwOt6C7ni1FTlFFMqVs=
82
+
github.com/minio/crc64nvme v1.1.0 h1:e/tAguZ+4cw32D+IO/8GSf5UVr9y+3eJcxZI2WOO/7Q=
83
+
github.com/minio/crc64nvme v1.1.0/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
84
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
85
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
86
+
github.com/minio/minio-go/v7 v7.0.97 h1:lqhREPyfgHTB/ciX8k2r8k0D93WaFqxbJX36UZq5occ=
87
+
github.com/minio/minio-go/v7 v7.0.97/go.mod h1:re5VXuo0pwEtoNLsNuSr0RrLfT/MBtohwdaSmPPSRSk=
88
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
89
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
90
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
···
115
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
116
github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
117
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
118
+
github.com/samber/lo v1.52.0 h1:Rvi+3BFHES3A8meP33VPAxiBZX/Aws5RxrschYGjomw=
119
+
github.com/samber/lo v1.52.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0=
120
github.com/samber/slog-common v0.19.0 h1:fNcZb8B2uOLooeYwFpAlKjkQTUafdjfqKcwcC89G9YI=
121
github.com/samber/slog-common v0.19.0/go.mod h1:dTz+YOU76aH007YUU0DffsXNsGFQRQllPQh9XyNoA3M=
122
+
github.com/samber/slog-multi v1.6.0 h1:i1uBY+aaln6ljwdf7Nrt4Sys8Kk6htuYuXDHWJsHtZg=
123
+
github.com/samber/slog-multi v1.6.0/go.mod h1:qTqzmKdPpT0h4PFsTN5rYRgLwom1v+fNGuIrl1Xnnts=
124
github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=
125
github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
126
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
···
142
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
143
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
144
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
145
+
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
146
+
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
147
+
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
148
+
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
149
+
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
150
+
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
151
+
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
152
+
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
153
+
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
154
+
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
155
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
156
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
157
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+127
nix/module.nix
+127
nix/module.nix
···
···
1
+
{
2
+
config,
3
+
lib,
4
+
pkgs,
5
+
...
6
+
}:
7
+
8
+
with lib;
9
+
10
+
let
11
+
cfg = config.services.git-pages;
12
+
configFile = pkgs.writeText "git-pages-config.toml" cfg.configFile;
13
+
in
14
+
{
15
+
options.services.git-pages = {
16
+
enable = mkEnableOption "git-pages static site server";
17
+
18
+
package = mkOption {
19
+
type = types.package;
20
+
description = "The git-pages package to use.";
21
+
};
22
+
23
+
user = mkOption {
24
+
type = types.str;
25
+
default = "git-pages";
26
+
description = "User under which git-pages runs.";
27
+
};
28
+
29
+
group = mkOption {
30
+
type = types.str;
31
+
default = "git-pages";
32
+
description = "Group under which git-pages runs.";
33
+
};
34
+
35
+
dataDir = mkOption {
36
+
type = types.path;
37
+
default = "/var/lib/git-pages";
38
+
description = "Directory where git-pages stores its data.";
39
+
};
40
+
41
+
configFile = mkOption {
42
+
type = types.lines;
43
+
default = ''
44
+
[server]
45
+
pages = "tcp/:3000"
46
+
caddy = "tcp/:3001"
47
+
metrics = "tcp/:3002"
48
+
49
+
[storage]
50
+
type = "fs"
51
+
52
+
[storage.fs]
53
+
root = "${cfg.dataDir}/data"
54
+
55
+
[limits]
56
+
max-site-size = "128M"
57
+
'';
58
+
};
59
+
60
+
openFirewall = mkOption {
61
+
type = types.bool;
62
+
default = false;
63
+
description = "Whether to open the firewall for git-pages ports.";
64
+
};
65
+
66
+
ports = {
67
+
pages = mkOption {
68
+
type = types.port;
69
+
default = 3000;
70
+
description = "Port for the main pages server.";
71
+
};
72
+
73
+
caddy = mkOption {
74
+
type = types.port;
75
+
default = 3001;
76
+
description = "Port for the Caddy integration endpoint.";
77
+
};
78
+
79
+
metrics = mkOption {
80
+
type = types.port;
81
+
default = 3002;
82
+
description = "Port for Prometheus metrics.";
83
+
};
84
+
};
85
+
};
86
+
87
+
config = mkIf cfg.enable {
88
+
users.users.${cfg.user} = {
89
+
isSystemUser = true;
90
+
group = cfg.group;
91
+
home = cfg.dataDir;
92
+
createHome = true;
93
+
description = "git-pages service user";
94
+
};
95
+
96
+
users.groups.${cfg.group} = { };
97
+
98
+
systemd.services.git-pages = {
99
+
description = "git-pages static site server";
100
+
after = [ "network.target" ];
101
+
wantedBy = [ "multi-user.target" ];
102
+
103
+
serviceConfig = {
104
+
Type = "simple";
105
+
User = cfg.user;
106
+
Group = cfg.group;
107
+
WorkingDirectory = cfg.dataDir;
108
+
ExecStart = "${cfg.package}/bin/git-pages -config ${configFile}";
109
+
Restart = "on-failure";
110
+
RestartSec = 5;
111
+
};
112
+
};
113
+
114
+
systemd.tmpfiles.rules = [
115
+
"d '${cfg.dataDir}' 0750 ${cfg.user} ${cfg.group} - -"
116
+
"d '${cfg.dataDir}/data' 0750 ${cfg.user} ${cfg.group} - -"
117
+
];
118
+
119
+
networking.firewall = mkIf cfg.openFirewall {
120
+
allowedTCPPorts = with cfg.ports; [
121
+
pages
122
+
caddy
123
+
metrics
124
+
];
125
+
};
126
+
};
127
+
}
+94
nix/vm.nix
+94
nix/vm.nix
···
···
1
+
{
2
+
nixpkgs,
3
+
system,
4
+
hostSystem,
5
+
self,
6
+
}:
7
+
nixpkgs.lib.nixosSystem {
8
+
inherit system;
9
+
modules = [
10
+
self.nixosModules.default
11
+
(
12
+
{
13
+
lib,
14
+
config,
15
+
pkgs,
16
+
...
17
+
}:
18
+
{
19
+
virtualisation.vmVariant.virtualisation = {
20
+
host.pkgs = import nixpkgs { system = hostSystem; };
21
+
22
+
graphics = false;
23
+
memorySize = 2048;
24
+
diskSize = 10 * 1024;
25
+
cores = 2;
26
+
forwardPorts = [
27
+
# ssh
28
+
{
29
+
from = "host";
30
+
host.port = 2222;
31
+
guest.port = 22;
32
+
}
33
+
# git-pages main server
34
+
{
35
+
from = "host";
36
+
host.port = 3000;
37
+
guest.port = 3000;
38
+
}
39
+
# git-pages caddy integration
40
+
{
41
+
from = "host";
42
+
host.port = 3001;
43
+
guest.port = 3001;
44
+
}
45
+
# git-pages metrics
46
+
{
47
+
from = "host";
48
+
host.port = 3002;
49
+
guest.port = 3002;
50
+
}
51
+
];
52
+
};
53
+
54
+
networking.firewall.enable = false;
55
+
time.timeZone = "Europe/London";
56
+
services.getty.autologinUser = "root";
57
+
environment.systemPackages = with pkgs; [
58
+
curl
59
+
vim
60
+
git
61
+
htop
62
+
];
63
+
64
+
services.git-pages = {
65
+
enable = true;
66
+
dataDir = "/var/lib/git-pages";
67
+
configFile = ''
68
+
[server]
69
+
pages = "tcp/0.0.0.0:3000"
70
+
caddy = "tcp/0.0.0.0:3001"
71
+
metrics = "tcp/0.0.0.0:3002"
72
+
73
+
[storage]
74
+
type = "fs"
75
+
76
+
[storage.fs]
77
+
root = "/var/lib/git-pages/data"
78
+
79
+
# Example wildcard configuration for development
80
+
[[wildcard]]
81
+
domain = "*.localhost"
82
+
clone-url = "https://github.com/{domain}.git"
83
+
authorization = ""
84
+
'';
85
+
};
86
+
87
+
users = {
88
+
users.${config.services.git-pages.user}.uid = 777;
89
+
groups.${config.services.git-pages.group}.gid = 777;
90
+
};
91
+
}
92
+
)
93
+
];
94
+
}
+25
-16
src/auth.go
+25
-16
src/auth.go
···
6
"encoding/json"
7
"errors"
8
"fmt"
9
-
"log"
10
"net"
11
"net/http"
12
"net/url"
···
32
return false
33
}
34
35
-
func authorizeInsecure() *Authorization {
36
if config.Insecure { // for testing only
37
-
log.Println("auth: INSECURE mode")
38
return &Authorization{
39
repoURLs: nil,
40
branch: "pages",
···
159
return nil, err
160
}
161
162
allowlistHostname := fmt.Sprintf("_git-pages-repository.%s", host)
163
records, err := net.LookupTXT(allowlistHostname)
164
if err != nil {
···
166
fmt.Sprintf("failed to look up DNS repository allowlist: %s TXT", allowlistHostname)}
167
}
168
169
var (
170
repoURLs []string
171
errs []error
···
266
}
267
268
if len(dnsRecords) > 0 {
269
-
log.Printf("auth: %s TXT/CNAME: %q\n", host, dnsRecords)
270
}
271
272
for _, dnsRecord := range dnsRecords {
···
314
func AuthorizeMetadataRetrieval(r *http.Request) (*Authorization, error) {
315
causes := []error{AuthError{http.StatusUnauthorized, "unauthorized"}}
316
317
-
auth := authorizeInsecure()
318
if auth != nil {
319
return auth, nil
320
}
···
325
} else if err != nil { // bad request
326
return nil, err
327
} else {
328
-
log.Println("auth: DNS challenge")
329
return auth, nil
330
}
331
···
336
} else if err != nil { // bad request
337
return nil, err
338
} else {
339
-
log.Printf("auth: wildcard %s\n", pattern.GetHost())
340
return auth, nil
341
}
342
}
···
348
} else if err != nil { // bad request
349
return nil, err
350
} else {
351
-
log.Printf("auth: codeberg %s\n", r.Host)
352
return auth, nil
353
}
354
}
···
366
return nil, err
367
}
368
369
-
auth := authorizeInsecure()
370
if auth != nil {
371
return auth, nil
372
}
···
378
} else if err != nil { // bad request
379
return nil, err
380
} else {
381
-
log.Println("auth: DNS challenge: allow *")
382
return auth, nil
383
}
384
···
390
} else if err != nil { // bad request
391
return nil, err
392
} else {
393
-
log.Printf("auth: DNS allowlist: allow %v\n", auth.repoURLs)
394
return auth, nil
395
}
396
}
···
404
} else if err != nil { // bad request
405
return nil, err
406
} else {
407
-
log.Printf("auth: wildcard %s: allow %v\n", pattern.GetHost(), auth.repoURLs)
408
return auth, nil
409
}
410
}
···
416
} else if err != nil { // bad request
417
return nil, err
418
} else {
419
-
log.Printf("auth: codeberg %s: allow %v branch %s\n",
420
r.Host, auth.repoURLs, auth.branch)
421
return auth, nil
422
}
···
633
return nil, err
634
}
635
636
-
auth := authorizeInsecure()
637
if auth != nil {
638
return auth, nil
639
}
···
645
} else if err != nil { // bad request
646
return nil, err
647
} else {
648
-
log.Printf("auth: forge token: allow\n")
649
return auth, nil
650
}
651
···
659
} else if err != nil { // bad request
660
return nil, err
661
} else {
662
-
log.Println("auth: DNS challenge")
663
return auth, nil
664
}
665
}
···
6
"encoding/json"
7
"errors"
8
"fmt"
9
"net"
10
"net/http"
11
"net/url"
···
31
return false
32
}
33
34
+
func authorizeInsecure(r *http.Request) *Authorization {
35
if config.Insecure { // for testing only
36
+
logc.Println(r.Context(), "auth: INSECURE mode")
37
return &Authorization{
38
repoURLs: nil,
39
branch: "pages",
···
158
return nil, err
159
}
160
161
+
projectName, err := GetProjectName(r)
162
+
if err != nil {
163
+
return nil, err
164
+
}
165
+
166
allowlistHostname := fmt.Sprintf("_git-pages-repository.%s", host)
167
records, err := net.LookupTXT(allowlistHostname)
168
if err != nil {
···
170
fmt.Sprintf("failed to look up DNS repository allowlist: %s TXT", allowlistHostname)}
171
}
172
173
+
if projectName != ".index" {
174
+
return nil, AuthError{http.StatusUnauthorized,
175
+
"DNS repository allowlist only authorizes index site"}
176
+
}
177
+
178
var (
179
repoURLs []string
180
errs []error
···
275
}
276
277
if len(dnsRecords) > 0 {
278
+
logc.Printf(r.Context(), "auth: %s TXT/CNAME: %q\n", host, dnsRecords)
279
}
280
281
for _, dnsRecord := range dnsRecords {
···
323
func AuthorizeMetadataRetrieval(r *http.Request) (*Authorization, error) {
324
causes := []error{AuthError{http.StatusUnauthorized, "unauthorized"}}
325
326
+
auth := authorizeInsecure(r)
327
if auth != nil {
328
return auth, nil
329
}
···
334
} else if err != nil { // bad request
335
return nil, err
336
} else {
337
+
logc.Println(r.Context(), "auth: DNS challenge")
338
return auth, nil
339
}
340
···
345
} else if err != nil { // bad request
346
return nil, err
347
} else {
348
+
logc.Printf(r.Context(), "auth: wildcard %s\n", pattern.GetHost())
349
return auth, nil
350
}
351
}
···
357
} else if err != nil { // bad request
358
return nil, err
359
} else {
360
+
logc.Printf(r.Context(), "auth: codeberg %s\n", r.Host)
361
return auth, nil
362
}
363
}
···
375
return nil, err
376
}
377
378
+
auth := authorizeInsecure(r)
379
if auth != nil {
380
return auth, nil
381
}
···
387
} else if err != nil { // bad request
388
return nil, err
389
} else {
390
+
logc.Println(r.Context(), "auth: DNS challenge: allow *")
391
return auth, nil
392
}
393
···
399
} else if err != nil { // bad request
400
return nil, err
401
} else {
402
+
logc.Printf(r.Context(), "auth: DNS allowlist: allow %v\n", auth.repoURLs)
403
return auth, nil
404
}
405
}
···
413
} else if err != nil { // bad request
414
return nil, err
415
} else {
416
+
logc.Printf(r.Context(), "auth: wildcard %s: allow %v\n", pattern.GetHost(), auth.repoURLs)
417
return auth, nil
418
}
419
}
···
425
} else if err != nil { // bad request
426
return nil, err
427
} else {
428
+
logc.Printf(r.Context(), "auth: codeberg %s: allow %v branch %s\n",
429
r.Host, auth.repoURLs, auth.branch)
430
return auth, nil
431
}
···
642
return nil, err
643
}
644
645
+
auth := authorizeInsecure(r)
646
if auth != nil {
647
return auth, nil
648
}
···
654
} else if err != nil { // bad request
655
return nil, err
656
} else {
657
+
logc.Printf(r.Context(), "auth: forge token: allow\n")
658
return auth, nil
659
}
660
···
668
} else if err != nil { // bad request
669
return nil, err
670
} else {
671
+
logc.Println(r.Context(), "auth: DNS challenge")
672
return auth, nil
673
}
674
}
+5
src/backend.go
+5
src/backend.go
···
11
)
12
13
var ErrObjectNotFound = errors.New("not found")
14
15
func splitBlobName(name string) []string {
16
algo, hash, found := strings.Cut(name, "-")
···
76
77
// Creates a domain. This allows us to start serving content for the domain.
78
CreateDomain(ctx context.Context, domain string) error
79
}
80
81
func CreateBackend(config *StorageConfig) (backend Backend, err error) {
···
11
)
12
13
var ErrObjectNotFound = errors.New("not found")
14
+
var ErrDomainFrozen = errors.New("domain administratively frozen")
15
16
func splitBlobName(name string) []string {
17
algo, hash, found := strings.Cut(name, "-")
···
77
78
// Creates a domain. This allows us to start serving content for the domain.
79
CreateDomain(ctx context.Context, domain string) error
80
+
81
+
// Freeze or thaw a domain. This allows a site to be administratively locked, e.g. if it
82
+
// is discovered serving abusive content.
83
+
FreezeDomain(ctx context.Context, domain string, freeze bool) error
84
}
85
86
func CreateBackend(config *StorageConfig) (backend Backend, err error) {
+38
-1
src/backend_fs.go
+38
-1
src/backend_fs.go
···
208
return nil
209
}
210
211
func (fs *FSBackend) CommitManifest(ctx context.Context, name string, manifest *Manifest) error {
212
manifestData := EncodeManifest(manifest)
213
manifestHashName := stagedManifestName(manifestData)
214
···
216
return fmt.Errorf("manifest not staged")
217
}
218
219
-
if err := fs.siteRoot.MkdirAll(filepath.Dir(name), 0o755); err != nil {
220
return fmt.Errorf("mkdir: %w", err)
221
}
222
···
228
}
229
230
func (fs *FSBackend) DeleteManifest(ctx context.Context, name string) error {
231
err := fs.siteRoot.Remove(name)
232
if errors.Is(err, os.ErrNotExist) {
233
return nil
···
250
func (fs *FSBackend) CreateDomain(ctx context.Context, domain string) error {
251
return nil // no-op
252
}
···
208
return nil
209
}
210
211
+
func domainFrozenMarkerName(domain string) string {
212
+
return filepath.Join(domain, ".frozen")
213
+
}
214
+
215
+
func (fs *FSBackend) checkDomainFrozen(ctx context.Context, domain string) error {
216
+
if _, err := fs.siteRoot.Stat(domainFrozenMarkerName(domain)); err == nil {
217
+
return ErrDomainFrozen
218
+
} else if !errors.Is(err, os.ErrNotExist) {
219
+
return fmt.Errorf("stat: %w", err)
220
+
} else {
221
+
return nil
222
+
}
223
+
}
224
+
225
func (fs *FSBackend) CommitManifest(ctx context.Context, name string, manifest *Manifest) error {
226
+
domain := filepath.Dir(name)
227
+
if err := fs.checkDomainFrozen(ctx, domain); err != nil {
228
+
return err
229
+
}
230
+
231
manifestData := EncodeManifest(manifest)
232
manifestHashName := stagedManifestName(manifestData)
233
···
235
return fmt.Errorf("manifest not staged")
236
}
237
238
+
if err := fs.siteRoot.MkdirAll(domain, 0o755); err != nil {
239
return fmt.Errorf("mkdir: %w", err)
240
}
241
···
247
}
248
249
func (fs *FSBackend) DeleteManifest(ctx context.Context, name string) error {
250
+
domain := filepath.Dir(name)
251
+
if err := fs.checkDomainFrozen(ctx, domain); err != nil {
252
+
return err
253
+
}
254
+
255
err := fs.siteRoot.Remove(name)
256
if errors.Is(err, os.ErrNotExist) {
257
return nil
···
274
func (fs *FSBackend) CreateDomain(ctx context.Context, domain string) error {
275
return nil // no-op
276
}
277
+
278
+
func (fs *FSBackend) FreezeDomain(ctx context.Context, domain string, freeze bool) error {
279
+
if freeze {
280
+
return fs.siteRoot.WriteFile(domainFrozenMarkerName(domain), []byte{}, 0o644)
281
+
} else {
282
+
err := fs.siteRoot.Remove(domainFrozenMarkerName(domain))
283
+
if errors.Is(err, os.ErrNotExist) {
284
+
return nil
285
+
} else {
286
+
return err
287
+
}
288
+
}
289
+
}
+116
-40
src/backend_s3.go
+116
-40
src/backend_s3.go
···
6
"crypto/sha256"
7
"fmt"
8
"io"
9
-
"log"
10
"net/http"
11
"path"
12
"strings"
···
36
manifestCacheEvictionsCount prometheus.Counter
37
38
s3GetObjectDurationSeconds *prometheus.HistogramVec
39
-
s3GetObjectErrorsCount *prometheus.CounterVec
40
)
41
42
func initS3BackendMetrics() {
···
96
NativeHistogramMaxBucketNumber: 100,
97
NativeHistogramMinResetDuration: 10 * time.Minute,
98
}, []string{"kind"})
99
-
s3GetObjectErrorsCount = promauto.NewCounterVec(prometheus.CounterOpts{
100
-
Name: "git_pages_s3_get_object_errors_count",
101
-
Help: "Count of s3:GetObject errors",
102
-
}, []string{"object_kind"})
103
}
104
105
// Blobs can be safely cached indefinitely. They only need to be evicted to preserve memory.
···
144
options.Weigher = weigher
145
}
146
if config.MaxStale != 0 {
147
-
options.RefreshCalculator = otter.RefreshWriting[K, V](time.Duration(config.MaxAge))
148
}
149
if config.MaxAge != 0 || config.MaxStale != 0 {
150
-
options.ExpiryCalculator = otter.ExpiryWriting[K, V](time.Duration(config.MaxAge + config.MaxStale))
151
}
152
return options
153
}
···
170
if err != nil {
171
return nil, err
172
} else if !exists {
173
-
log.Printf("s3: create bucket %s\n", bucket)
174
175
err = client.MakeBucket(ctx, bucket,
176
minio.MakeBucketOptions{Region: config.Region})
···
236
minio.StatObjectOptions{})
237
if err != nil {
238
if errResp := minio.ToErrorResponse(err); errResp.Code == "NoSuchKey" {
239
-
log.Printf("s3 feature %q: disabled", feature)
240
return false, nil
241
} else {
242
return false, err
243
}
244
}
245
-
log.Printf("s3 feature %q: enabled", feature)
246
return true, nil
247
}
248
···
250
if err != nil {
251
err = fmt.Errorf("getting s3 backend feature %q: %w", feature, err)
252
ObserveError(err)
253
-
log.Print(err)
254
return false
255
}
256
return isOn
···
268
reader io.ReadSeeker, size uint64, mtime time.Time, err error,
269
) {
270
loader := func(ctx context.Context, name string) (*CachedBlob, error) {
271
-
log.Printf("s3: get blob %s\n", name)
272
273
startTime := time.Now()
274
···
297
return &CachedBlob{data, stat.LastModified}, nil
298
}
299
300
var cached *CachedBlob
301
-
cached, err = s3.blobCache.Get(ctx, name, otter.LoaderFunc[string, *CachedBlob](loader))
302
if err != nil {
303
if errResp := minio.ToErrorResponse(err); errResp.Code == "NoSuchKey" {
304
-
s3GetObjectErrorsCount.With(prometheus.Labels{"object_kind": "blob"}).Inc()
305
err = fmt.Errorf("%w: %s", ErrObjectNotFound, errResp.Key)
306
}
307
} else {
···
313
}
314
315
func (s3 *S3Backend) PutBlob(ctx context.Context, name string, data []byte) error {
316
-
log.Printf("s3: put blob %s (%s)\n", name, datasize.ByteSize(len(data)).HumanReadable())
317
318
_, err := s3.client.StatObject(ctx, s3.bucket, blobObjectName(name),
319
minio.GetObjectOptions{})
···
325
return err
326
} else {
327
ObserveData(ctx, "blob.status", "created")
328
-
log.Printf("s3: put blob %s (created)\n", name)
329
return nil
330
}
331
} else {
···
333
}
334
} else {
335
ObserveData(ctx, "blob.status", "exists")
336
-
log.Printf("s3: put blob %s (exists)\n", name)
337
blobsDedupedCount.Inc()
338
blobsDedupedBytes.Add(float64(len(data)))
339
return nil
···
341
}
342
343
func (s3 *S3Backend) DeleteBlob(ctx context.Context, name string) error {
344
-
log.Printf("s3: delete blob %s\n", name)
345
346
return s3.client.RemoveObject(ctx, s3.bucket, blobObjectName(name),
347
minio.RemoveObjectOptions{})
···
356
}
357
358
func (s3 *S3Backend) ListManifests(ctx context.Context) (manifests []string, err error) {
359
-
log.Print("s3: list manifests")
360
361
ctx, cancel := context.WithCancel(ctx)
362
defer cancel()
···
387
s3 *S3Backend
388
}
389
390
-
func (l s3ManifestLoader) Load(ctx context.Context, key string) (*CachedManifest, error) {
391
return l.load(ctx, key, nil)
392
}
393
394
-
func (l s3ManifestLoader) Reload(ctx context.Context, key string, oldValue *CachedManifest) (*CachedManifest, error) {
395
return l.load(ctx, key, oldValue)
396
}
397
398
-
func (l s3ManifestLoader) load(ctx context.Context, name string, oldManifest *CachedManifest) (*CachedManifest, error) {
399
loader := func() (*CachedManifest, error) {
400
-
log.Printf("s3: get manifest %s\n", name)
401
-
402
-
startTime := time.Now()
403
-
404
opts := minio.GetObjectOptions{}
405
if oldManifest != nil && oldManifest.etag != "" {
406
opts.SetMatchETagExcept(oldManifest.etag)
···
426
if err != nil {
427
return nil, err
428
}
429
-
430
-
s3GetObjectDurationSeconds.
431
-
With(prometheus.Labels{"kind": "manifest"}).
432
-
Observe(time.Since(startTime).Seconds())
433
434
return &CachedManifest{manifest, uint32(len(data)), stat.LastModified, stat.ETag, nil}, nil
435
}
436
437
-
var cached *CachedManifest
438
-
cached, err := loader()
439
if err != nil {
440
-
if errResp := minio.ToErrorResponse(err); errResp.Code == "NoSuchKey" {
441
-
s3GetObjectErrorsCount.With(prometheus.Labels{"object_kind": "manifest"}).Inc()
442
err = fmt.Errorf("%w: %s", ErrObjectNotFound, errResp.Key)
443
return &CachedManifest{nil, 1, time.Time{}, "", err}, nil
444
} else if errResp.StatusCode == http.StatusNotModified && oldManifest != nil {
···
476
477
func (s3 *S3Backend) StageManifest(ctx context.Context, manifest *Manifest) error {
478
data := EncodeManifest(manifest)
479
-
log.Printf("s3: stage manifest %x\n", sha256.Sum256(data))
480
481
_, err := s3.client.PutObject(ctx, s3.bucket, stagedManifestObjectName(data),
482
bytes.NewReader(data), int64(len(data)), minio.PutObjectOptions{})
483
return err
484
}
485
486
func (s3 *S3Backend) CommitManifest(ctx context.Context, name string, manifest *Manifest) error {
487
data := EncodeManifest(manifest)
488
-
log.Printf("s3: commit manifest %x -> %s", sha256.Sum256(data), name)
489
490
// Remove staged object unconditionally (whether commit succeeded or failed), since
491
// the upper layer has to retry the complete operation anyway.
···
504
}
505
506
func (s3 *S3Backend) DeleteManifest(ctx context.Context, name string) error {
507
-
log.Printf("s3: delete manifest %s\n", name)
508
509
err := s3.client.RemoveObject(ctx, s3.bucket, manifestObjectName(name),
510
minio.RemoveObjectOptions{})
···
517
}
518
519
func (s3 *S3Backend) CheckDomain(ctx context.Context, domain string) (exists bool, err error) {
520
-
log.Printf("s3: check domain %s\n", domain)
521
522
_, err = s3.client.StatObject(ctx, s3.bucket, domainCheckObjectName(domain),
523
minio.StatObjectOptions{})
···
548
}
549
550
func (s3 *S3Backend) CreateDomain(ctx context.Context, domain string) error {
551
-
log.Printf("s3: create domain %s\n", domain)
552
553
_, err := s3.client.PutObject(ctx, s3.bucket, domainCheckObjectName(domain),
554
&bytes.Reader{}, 0, minio.PutObjectOptions{})
555
return err
556
}
···
6
"crypto/sha256"
7
"fmt"
8
"io"
9
"net/http"
10
"path"
11
"strings"
···
35
manifestCacheEvictionsCount prometheus.Counter
36
37
s3GetObjectDurationSeconds *prometheus.HistogramVec
38
+
s3GetObjectResponseCount *prometheus.CounterVec
39
)
40
41
func initS3BackendMetrics() {
···
95
NativeHistogramMaxBucketNumber: 100,
96
NativeHistogramMinResetDuration: 10 * time.Minute,
97
}, []string{"kind"})
98
+
s3GetObjectResponseCount = promauto.NewCounterVec(prometheus.CounterOpts{
99
+
Name: "git_pages_s3_get_object_responses_count",
100
+
Help: "Count of s3:GetObject responses",
101
+
}, []string{"kind", "code"})
102
}
103
104
// Blobs can be safely cached indefinitely. They only need to be evicted to preserve memory.
···
143
options.Weigher = weigher
144
}
145
if config.MaxStale != 0 {
146
+
options.RefreshCalculator = otter.RefreshWriting[K, V](
147
+
time.Duration(config.MaxAge))
148
}
149
if config.MaxAge != 0 || config.MaxStale != 0 {
150
+
options.ExpiryCalculator = otter.ExpiryWriting[K, V](
151
+
time.Duration(config.MaxAge + config.MaxStale))
152
}
153
return options
154
}
···
171
if err != nil {
172
return nil, err
173
} else if !exists {
174
+
logc.Printf(ctx, "s3: create bucket %s\n", bucket)
175
176
err = client.MakeBucket(ctx, bucket,
177
minio.MakeBucketOptions{Region: config.Region})
···
237
minio.StatObjectOptions{})
238
if err != nil {
239
if errResp := minio.ToErrorResponse(err); errResp.Code == "NoSuchKey" {
240
+
logc.Printf(ctx, "s3 feature %q: disabled", feature)
241
return false, nil
242
} else {
243
return false, err
244
}
245
}
246
+
logc.Printf(ctx, "s3 feature %q: enabled", feature)
247
return true, nil
248
}
249
···
251
if err != nil {
252
err = fmt.Errorf("getting s3 backend feature %q: %w", feature, err)
253
ObserveError(err)
254
+
logc.Println(ctx, err)
255
return false
256
}
257
return isOn
···
269
reader io.ReadSeeker, size uint64, mtime time.Time, err error,
270
) {
271
loader := func(ctx context.Context, name string) (*CachedBlob, error) {
272
+
logc.Printf(ctx, "s3: get blob %s\n", name)
273
274
startTime := time.Now()
275
···
298
return &CachedBlob{data, stat.LastModified}, nil
299
}
300
301
+
observer := func(ctx context.Context, name string) (*CachedBlob, error) {
302
+
cached, err := loader(ctx, name)
303
+
var code = "OK"
304
+
if resp, ok := err.(minio.ErrorResponse); ok {
305
+
code = resp.Code
306
+
}
307
+
s3GetObjectResponseCount.With(prometheus.Labels{"kind": "blob", "code": code}).Inc()
308
+
return cached, err
309
+
}
310
+
311
var cached *CachedBlob
312
+
cached, err = s3.blobCache.Get(ctx, name, otter.LoaderFunc[string, *CachedBlob](observer))
313
if err != nil {
314
if errResp := minio.ToErrorResponse(err); errResp.Code == "NoSuchKey" {
315
err = fmt.Errorf("%w: %s", ErrObjectNotFound, errResp.Key)
316
}
317
} else {
···
323
}
324
325
func (s3 *S3Backend) PutBlob(ctx context.Context, name string, data []byte) error {
326
+
logc.Printf(ctx, "s3: put blob %s (%s)\n", name, datasize.ByteSize(len(data)).HumanReadable())
327
328
_, err := s3.client.StatObject(ctx, s3.bucket, blobObjectName(name),
329
minio.GetObjectOptions{})
···
335
return err
336
} else {
337
ObserveData(ctx, "blob.status", "created")
338
+
logc.Printf(ctx, "s3: put blob %s (created)\n", name)
339
return nil
340
}
341
} else {
···
343
}
344
} else {
345
ObserveData(ctx, "blob.status", "exists")
346
+
logc.Printf(ctx, "s3: put blob %s (exists)\n", name)
347
blobsDedupedCount.Inc()
348
blobsDedupedBytes.Add(float64(len(data)))
349
return nil
···
351
}
352
353
func (s3 *S3Backend) DeleteBlob(ctx context.Context, name string) error {
354
+
logc.Printf(ctx, "s3: delete blob %s\n", name)
355
356
return s3.client.RemoveObject(ctx, s3.bucket, blobObjectName(name),
357
minio.RemoveObjectOptions{})
···
366
}
367
368
func (s3 *S3Backend) ListManifests(ctx context.Context) (manifests []string, err error) {
369
+
logc.Print(ctx, "s3: list manifests")
370
371
ctx, cancel := context.WithCancel(ctx)
372
defer cancel()
···
397
s3 *S3Backend
398
}
399
400
+
func (l s3ManifestLoader) Load(
401
+
ctx context.Context, key string,
402
+
) (
403
+
*CachedManifest, error,
404
+
) {
405
return l.load(ctx, key, nil)
406
}
407
408
+
func (l s3ManifestLoader) Reload(
409
+
ctx context.Context, key string, oldValue *CachedManifest,
410
+
) (
411
+
*CachedManifest, error,
412
+
) {
413
return l.load(ctx, key, oldValue)
414
}
415
416
+
func (l s3ManifestLoader) load(
417
+
ctx context.Context, name string, oldManifest *CachedManifest,
418
+
) (
419
+
*CachedManifest, error,
420
+
) {
421
+
logc.Printf(ctx, "s3: get manifest %s\n", name)
422
+
423
loader := func() (*CachedManifest, error) {
424
opts := minio.GetObjectOptions{}
425
if oldManifest != nil && oldManifest.etag != "" {
426
opts.SetMatchETagExcept(oldManifest.etag)
···
446
if err != nil {
447
return nil, err
448
}
449
450
return &CachedManifest{manifest, uint32(len(data)), stat.LastModified, stat.ETag, nil}, nil
451
}
452
453
+
observer := func() (*CachedManifest, error) {
454
+
cached, err := loader()
455
+
var code = "OK"
456
+
if resp, ok := err.(minio.ErrorResponse); ok {
457
+
code = resp.Code
458
+
}
459
+
s3GetObjectResponseCount.With(prometheus.Labels{"kind": "manifest", "code": code}).Inc()
460
+
return cached, err
461
+
}
462
+
463
+
startTime := time.Now()
464
+
cached, err := observer()
465
+
s3GetObjectDurationSeconds.
466
+
With(prometheus.Labels{"kind": "manifest"}).
467
+
Observe(time.Since(startTime).Seconds())
468
+
469
if err != nil {
470
+
errResp := minio.ToErrorResponse(err)
471
+
if errResp.Code == "NoSuchKey" {
472
err = fmt.Errorf("%w: %s", ErrObjectNotFound, errResp.Key)
473
return &CachedManifest{nil, 1, time.Time{}, "", err}, nil
474
} else if errResp.StatusCode == http.StatusNotModified && oldManifest != nil {
···
506
507
func (s3 *S3Backend) StageManifest(ctx context.Context, manifest *Manifest) error {
508
data := EncodeManifest(manifest)
509
+
logc.Printf(ctx, "s3: stage manifest %x\n", sha256.Sum256(data))
510
511
_, err := s3.client.PutObject(ctx, s3.bucket, stagedManifestObjectName(data),
512
bytes.NewReader(data), int64(len(data)), minio.PutObjectOptions{})
513
return err
514
}
515
516
+
func domainFrozenObjectName(domain string) string {
517
+
return manifestObjectName(fmt.Sprintf("%s/.frozen", domain))
518
+
}
519
+
520
+
func (s3 *S3Backend) checkDomainFrozen(ctx context.Context, domain string) error {
521
+
_, err := s3.client.GetObject(ctx, s3.bucket, domainFrozenObjectName(domain),
522
+
minio.GetObjectOptions{})
523
+
if err == nil {
524
+
return ErrDomainFrozen
525
+
} else if errResp := minio.ToErrorResponse(err); errResp.Code == "NoSuchKey" {
526
+
return nil
527
+
} else {
528
+
return err
529
+
}
530
+
}
531
+
532
func (s3 *S3Backend) CommitManifest(ctx context.Context, name string, manifest *Manifest) error {
533
data := EncodeManifest(manifest)
534
+
logc.Printf(ctx, "s3: commit manifest %x -> %s", sha256.Sum256(data), name)
535
+
536
+
_, domain, _ := strings.Cut(name, "/")
537
+
if err := s3.checkDomainFrozen(ctx, domain); err != nil {
538
+
return err
539
+
}
540
541
// Remove staged object unconditionally (whether commit succeeded or failed), since
542
// the upper layer has to retry the complete operation anyway.
···
555
}
556
557
func (s3 *S3Backend) DeleteManifest(ctx context.Context, name string) error {
558
+
logc.Printf(ctx, "s3: delete manifest %s\n", name)
559
+
560
+
_, domain, _ := strings.Cut(name, "/")
561
+
if err := s3.checkDomainFrozen(ctx, domain); err != nil {
562
+
return err
563
+
}
564
565
err := s3.client.RemoveObject(ctx, s3.bucket, manifestObjectName(name),
566
minio.RemoveObjectOptions{})
···
573
}
574
575
func (s3 *S3Backend) CheckDomain(ctx context.Context, domain string) (exists bool, err error) {
576
+
logc.Printf(ctx, "s3: check domain %s\n", domain)
577
578
_, err = s3.client.StatObject(ctx, s3.bucket, domainCheckObjectName(domain),
579
minio.StatObjectOptions{})
···
604
}
605
606
func (s3 *S3Backend) CreateDomain(ctx context.Context, domain string) error {
607
+
logc.Printf(ctx, "s3: create domain %s\n", domain)
608
609
_, err := s3.client.PutObject(ctx, s3.bucket, domainCheckObjectName(domain),
610
&bytes.Reader{}, 0, minio.PutObjectOptions{})
611
return err
612
}
613
+
614
+
func (s3 *S3Backend) FreezeDomain(ctx context.Context, domain string, freeze bool) error {
615
+
if freeze {
616
+
logc.Printf(ctx, "s3: freeze domain %s\n", domain)
617
+
618
+
_, err := s3.client.PutObject(ctx, s3.bucket, domainFrozenObjectName(domain),
619
+
&bytes.Reader{}, 0, minio.PutObjectOptions{})
620
+
return err
621
+
} else {
622
+
logc.Printf(ctx, "s3: thaw domain %s\n", domain)
623
+
624
+
err := s3.client.RemoveObject(ctx, s3.bucket, domainFrozenObjectName(domain),
625
+
minio.RemoveObjectOptions{})
626
+
if errResp := minio.ToErrorResponse(err); errResp.Code == "NoSuchKey" {
627
+
return nil
628
+
} else {
629
+
return err
630
+
}
631
+
}
632
+
}
+37
-30
src/caddy.go
+37
-30
src/caddy.go
···
1
package git_pages
2
3
import (
4
"crypto/tls"
5
"fmt"
6
-
"log"
7
"net"
8
"net/http"
9
"net/url"
···
22
// this isn't really what git-pages is designed for, and object store accesses can cost money.
23
// [^1]: https://letsencrypt.org/2025/07/01/issuing-our-first-ip-address-certificate
24
if ip := net.ParseIP(domain); ip != nil {
25
-
log.Println("caddy:", domain, 404, "(bare IP)")
26
w.WriteHeader(http.StatusNotFound)
27
return
28
}
···
35
// Pages v2, which would under some circumstances return certificates with subjectAltName
36
// not valid for the SNI. Go's TLS stack makes `tls.Dial` return an error for these,
37
// thankfully making it unnecessary to examine X.509 certificates manually here.)
38
-
for _, wildcardConfig := range config.Wildcard {
39
-
if wildcardConfig.FallbackProxyTo == "" {
40
-
continue
41
-
}
42
-
fallbackURL, err := url.Parse(wildcardConfig.FallbackProxyTo)
43
-
if err != nil {
44
-
continue
45
-
}
46
-
if fallbackURL.Scheme != "https" {
47
-
continue
48
-
}
49
-
connectHost := fallbackURL.Host
50
-
if fallbackURL.Port() != "" {
51
-
connectHost += ":" + fallbackURL.Port()
52
-
} else {
53
-
connectHost += ":443"
54
-
}
55
-
log.Printf("caddy: check TLS %s", fallbackURL)
56
-
connection, err := tls.Dial("tcp", connectHost, &tls.Config{ServerName: domain})
57
-
if err != nil {
58
-
continue
59
-
}
60
-
connection.Close()
61
-
found = true
62
-
break
63
}
64
}
65
66
if found {
67
-
log.Println("caddy:", domain, 200)
68
w.WriteHeader(http.StatusOK)
69
} else if err == nil {
70
-
log.Println("caddy:", domain, 404)
71
w.WriteHeader(http.StatusNotFound)
72
} else {
73
-
log.Println("caddy:", domain, 500)
74
w.WriteHeader(http.StatusInternalServerError)
75
fmt.Fprintln(w, err)
76
}
77
}
···
1
package git_pages
2
3
import (
4
+
"context"
5
"crypto/tls"
6
"fmt"
7
"net"
8
"net/http"
9
"net/url"
···
22
// this isn't really what git-pages is designed for, and object store accesses can cost money.
23
// [^1]: https://letsencrypt.org/2025/07/01/issuing-our-first-ip-address-certificate
24
if ip := net.ParseIP(domain); ip != nil {
25
+
logc.Println(r.Context(), "caddy:", domain, 404, "(bare IP)")
26
w.WriteHeader(http.StatusNotFound)
27
return
28
}
···
35
// Pages v2, which would under some circumstances return certificates with subjectAltName
36
// not valid for the SNI. Go's TLS stack makes `tls.Dial` return an error for these,
37
// thankfully making it unnecessary to examine X.509 certificates manually here.)
38
+
found, err = tryDialWithSNI(r.Context(), domain)
39
+
if err != nil {
40
+
logc.Printf(r.Context(), "caddy err: check SNI: %s\n", err)
41
}
42
}
43
44
if found {
45
+
logc.Println(r.Context(), "caddy:", domain, 200)
46
w.WriteHeader(http.StatusOK)
47
} else if err == nil {
48
+
logc.Println(r.Context(), "caddy:", domain, 404)
49
w.WriteHeader(http.StatusNotFound)
50
} else {
51
+
logc.Println(r.Context(), "caddy:", domain, 500)
52
w.WriteHeader(http.StatusInternalServerError)
53
fmt.Fprintln(w, err)
54
}
55
}
56
+
57
+
func tryDialWithSNI(ctx context.Context, domain string) (bool, error) {
58
+
if config.Fallback.ProxyTo == "" {
59
+
return false, nil
60
+
}
61
+
62
+
fallbackURL, err := url.Parse(config.Fallback.ProxyTo)
63
+
if err != nil {
64
+
return false, err
65
+
}
66
+
if fallbackURL.Scheme != "https" {
67
+
return false, nil
68
+
}
69
+
70
+
connectHost := fallbackURL.Host
71
+
if fallbackURL.Port() != "" {
72
+
connectHost += ":" + fallbackURL.Port()
73
+
} else {
74
+
connectHost += ":443"
75
+
}
76
+
77
+
logc.Printf(ctx, "caddy: check TLS %s", fallbackURL)
78
+
connection, err := tls.Dial("tcp", connectHost, &tls.Config{ServerName: domain})
79
+
if err != nil {
80
+
return false, err
81
+
}
82
+
connection.Close()
83
+
return true, nil
84
+
}
+6
-6
src/collect.go
+6
-6
src/collect.go
···
22
23
appendFile := func(header *tar.Header, data []byte, transform Transform) (err error) {
24
switch transform {
25
-
case Transform_None:
26
-
case Transform_Zstandard:
27
data, err = zstdDecoder.DecodeAll(data, []byte{})
28
if err != nil {
29
return err
···
53
header.Typeflag = tar.TypeDir
54
header.Mode = 0755
55
header.ModTime = manifestMtime
56
-
err = appendFile(&header, nil, Transform_None)
57
58
case Type_InlineFile:
59
header.Typeflag = tar.TypeReg
···
79
header.Typeflag = tar.TypeSymlink
80
header.Mode = 0644
81
header.ModTime = manifestMtime
82
-
err = appendFile(&header, entry.GetData(), Transform_None)
83
84
default:
85
return fmt.Errorf("unexpected entry type")
···
95
Typeflag: tar.TypeReg,
96
Mode: 0644,
97
ModTime: manifestMtime,
98
-
}, []byte(redirects), Transform_None)
99
if err != nil {
100
return err
101
}
···
107
Typeflag: tar.TypeReg,
108
Mode: 0644,
109
ModTime: manifestMtime,
110
-
}, []byte(headers), Transform_None)
111
if err != nil {
112
return err
113
}
···
22
23
appendFile := func(header *tar.Header, data []byte, transform Transform) (err error) {
24
switch transform {
25
+
case Transform_Identity:
26
+
case Transform_Zstd:
27
data, err = zstdDecoder.DecodeAll(data, []byte{})
28
if err != nil {
29
return err
···
53
header.Typeflag = tar.TypeDir
54
header.Mode = 0755
55
header.ModTime = manifestMtime
56
+
err = appendFile(&header, nil, Transform_Identity)
57
58
case Type_InlineFile:
59
header.Typeflag = tar.TypeReg
···
79
header.Typeflag = tar.TypeSymlink
80
header.Mode = 0644
81
header.ModTime = manifestMtime
82
+
err = appendFile(&header, entry.GetData(), Transform_Identity)
83
84
default:
85
return fmt.Errorf("unexpected entry type")
···
95
Typeflag: tar.TypeReg,
96
Mode: 0644,
97
ModTime: manifestMtime,
98
+
}, []byte(redirects), Transform_Identity)
99
if err != nil {
100
return err
101
}
···
107
Typeflag: tar.TypeReg,
108
Mode: 0644,
109
ModTime: manifestMtime,
110
+
}, []byte(headers), Transform_Identity)
111
if err != nil {
112
return err
113
}
+12
-7
src/config.go
+12
-7
src/config.go
···
38
Insecure bool `toml:"-" env:"insecure"`
39
Features []string `toml:"features"`
40
LogFormat string `toml:"log-format" default:"text"`
41
Server ServerConfig `toml:"server"`
42
Wildcard []WildcardConfig `toml:"wildcard"`
43
Storage StorageConfig `toml:"storage"`
44
Limits LimitsConfig `toml:"limits"`
45
Observability ObservabilityConfig `toml:"observability"`
···
52
}
53
54
type WildcardConfig struct {
55
-
Domain string `toml:"domain"`
56
-
CloneURL string `toml:"clone-url"`
57
-
IndexRepos []string `toml:"index-repos" default:"[]"`
58
-
IndexRepoBranch string `toml:"index-repo-branch" default:"pages"`
59
-
Authorization string `toml:"authorization"`
60
-
FallbackProxyTo string `toml:"fallback-proxy-to"`
61
-
FallbackInsecure bool `toml:"fallback-insecure"`
62
}
63
64
type CacheConfig struct {
···
38
Insecure bool `toml:"-" env:"insecure"`
39
Features []string `toml:"features"`
40
LogFormat string `toml:"log-format" default:"text"`
41
+
LogLevel string `toml:"log-level" default:"info"`
42
Server ServerConfig `toml:"server"`
43
Wildcard []WildcardConfig `toml:"wildcard"`
44
+
Fallback FallbackConfig `toml:"fallback"`
45
Storage StorageConfig `toml:"storage"`
46
Limits LimitsConfig `toml:"limits"`
47
Observability ObservabilityConfig `toml:"observability"`
···
54
}
55
56
type WildcardConfig struct {
57
+
Domain string `toml:"domain"`
58
+
CloneURL string `toml:"clone-url"`
59
+
IndexRepos []string `toml:"index-repos" default:"[]"`
60
+
IndexRepoBranch string `toml:"index-repo-branch" default:"pages"`
61
+
Authorization string `toml:"authorization"`
62
+
}
63
+
64
+
type FallbackConfig struct {
65
+
ProxyTo string `toml:"proxy-to"`
66
+
Insecure bool `toml:"insecure"`
67
}
68
69
type CacheConfig struct {
+9
-3
src/extract.go
+9
-3
src/extract.go
···
59
}
60
61
manifestEntry.Type = Type_InlineFile.Enum()
62
-
manifestEntry.Size = proto.Int64(header.Size)
63
manifestEntry.Data = fileData
64
65
case tar.TypeSymlink:
66
manifestEntry.Type = Type_Symlink.Enum()
67
-
manifestEntry.Size = proto.Int64(header.Size)
68
manifestEntry.Data = []byte(header.Linkname)
69
70
case tar.TypeDir:
71
manifestEntry.Type = Type_Directory.Enum()
···
150
} else {
151
manifestEntry.Type = Type_InlineFile.Enum()
152
}
153
-
manifestEntry.Size = proto.Int64(int64(file.UncompressedSize64))
154
manifestEntry.Data = fileData
155
} else {
156
manifestEntry.Type = Type_Directory.Enum()
157
}
···
59
}
60
61
manifestEntry.Type = Type_InlineFile.Enum()
62
manifestEntry.Data = fileData
63
+
manifestEntry.Transform = Transform_Identity.Enum()
64
+
manifestEntry.OriginalSize = proto.Int64(header.Size)
65
+
manifestEntry.CompressedSize = proto.Int64(header.Size)
66
67
case tar.TypeSymlink:
68
manifestEntry.Type = Type_Symlink.Enum()
69
manifestEntry.Data = []byte(header.Linkname)
70
+
manifestEntry.Transform = Transform_Identity.Enum()
71
+
manifestEntry.OriginalSize = proto.Int64(header.Size)
72
+
manifestEntry.CompressedSize = proto.Int64(header.Size)
73
74
case tar.TypeDir:
75
manifestEntry.Type = Type_Directory.Enum()
···
154
} else {
155
manifestEntry.Type = Type_InlineFile.Enum()
156
}
157
manifestEntry.Data = fileData
158
+
manifestEntry.Transform = Transform_Identity.Enum()
159
+
manifestEntry.OriginalSize = proto.Int64(int64(file.UncompressedSize64))
160
+
manifestEntry.CompressedSize = proto.Int64(int64(file.UncompressedSize64))
161
} else {
162
manifestEntry.Type = Type_Directory.Enum()
163
}
+176
-42
src/fetch.go
+176
-42
src/fetch.go
···
2
3
import (
4
"context"
5
"fmt"
6
"io"
7
"os"
8
9
"github.com/go-git/go-billy/v6/osfs"
10
"github.com/go-git/go-git/v6"
11
"github.com/go-git/go-git/v6/plumbing"
12
"github.com/go-git/go-git/v6/plumbing/cache"
13
"github.com/go-git/go-git/v6/plumbing/filemode"
14
"github.com/go-git/go-git/v6/plumbing/object"
15
"github.com/go-git/go-git/v6/storage/filesystem"
16
"google.golang.org/protobuf/proto"
17
)
18
19
-
func FetchRepository(ctx context.Context, repoURL string, branch string) (*Manifest, error) {
20
span, ctx := ObserveFunction(ctx, "FetchRepository",
21
"git.repository", repoURL, "git.branch", branch)
22
defer span.Finish()
23
24
-
baseDir, err := os.MkdirTemp("", "fetchRepo")
25
if err != nil {
26
-
return nil, fmt.Errorf("mkdtemp: %w", err)
27
}
28
-
defer os.RemoveAll(baseDir)
29
30
-
fs := osfs.New(baseDir, osfs.WithBoundOS())
31
-
cache := cache.NewObjectLRUDefault()
32
-
storer := filesystem.NewStorageWithOptions(fs, cache, filesystem.Options{
33
-
ExclusiveAccess: true,
34
-
LargeObjectThreshold: int64(config.Limits.GitLargeObjectThreshold.Bytes()),
35
-
})
36
-
repo, err := git.CloneContext(ctx, storer, nil, &git.CloneOptions{
37
-
Bare: true,
38
-
URL: repoURL,
39
-
ReferenceName: plumbing.ReferenceName(branch),
40
-
SingleBranch: true,
41
-
Depth: 1,
42
-
Tags: git.NoTags,
43
-
})
44
if err != nil {
45
return nil, fmt.Errorf("git clone: %w", err)
46
}
···
63
walker := object.NewTreeWalker(tree, true, make(map[plumbing.Hash]bool))
64
defer walker.Close()
65
66
-
manifest := Manifest{
67
RepoUrl: proto.String(repoURL),
68
Branch: proto.String(branch),
69
Commit: proto.String(ref.Hash().String()),
···
71
"": {Type: Type_Directory.Enum()},
72
},
73
}
74
for {
75
name, entry, err := walker.Next()
76
if err == io.EOF {
···
78
} else if err != nil {
79
return nil, fmt.Errorf("git walker: %w", err)
80
} else {
81
-
manifestEntry := Entry{}
82
-
if entry.Mode.IsFile() {
83
-
blob, err := repo.BlobObject(entry.Hash)
84
-
if err != nil {
85
-
return nil, fmt.Errorf("git blob %s: %w", name, err)
86
-
}
87
-
88
-
reader, err := blob.Reader()
89
-
if err != nil {
90
-
return nil, fmt.Errorf("git blob open: %w", err)
91
-
}
92
-
defer reader.Close()
93
-
94
-
data, err := io.ReadAll(reader)
95
-
if err != nil {
96
-
return nil, fmt.Errorf("git blob read: %w", err)
97
-
}
98
-
99
if entry.Mode == filemode.Symlink {
100
manifestEntry.Type = Type_Symlink.Enum()
101
} else {
102
manifestEntry.Type = Type_InlineFile.Enum()
103
}
104
-
manifestEntry.Size = proto.Int64(blob.Size)
105
-
manifestEntry.Data = data
106
} else if entry.Mode == filemode.Dir {
107
manifestEntry.Type = Type_Directory.Enum()
108
} else {
109
-
AddProblem(&manifest, name, "unsupported mode %#o", entry.Mode)
110
continue
111
}
112
-
manifest.Contents[name] = &manifestEntry
113
}
114
}
115
-
return &manifest, nil
116
}
···
2
3
import (
4
"context"
5
+
"errors"
6
"fmt"
7
"io"
8
+
"maps"
9
+
"net/url"
10
"os"
11
+
"slices"
12
13
+
"github.com/c2h5oh/datasize"
14
"github.com/go-git/go-billy/v6/osfs"
15
"github.com/go-git/go-git/v6"
16
"github.com/go-git/go-git/v6/plumbing"
17
"github.com/go-git/go-git/v6/plumbing/cache"
18
"github.com/go-git/go-git/v6/plumbing/filemode"
19
"github.com/go-git/go-git/v6/plumbing/object"
20
+
"github.com/go-git/go-git/v6/plumbing/protocol/packp"
21
+
"github.com/go-git/go-git/v6/plumbing/transport"
22
"github.com/go-git/go-git/v6/storage/filesystem"
23
"google.golang.org/protobuf/proto"
24
)
25
26
+
func FetchRepository(
27
+
ctx context.Context, repoURL string, branch string, oldManifest *Manifest,
28
+
) (
29
+
*Manifest, error,
30
+
) {
31
span, ctx := ObserveFunction(ctx, "FetchRepository",
32
"git.repository", repoURL, "git.branch", branch)
33
defer span.Finish()
34
35
+
parsedRepoURL, err := url.Parse(repoURL)
36
if err != nil {
37
+
return nil, fmt.Errorf("URL parse: %w", err)
38
}
39
+
40
+
var repo *git.Repository
41
+
var storer *filesystem.Storage
42
+
for _, filter := range []packp.Filter{packp.FilterBlobNone(), packp.Filter("")} {
43
+
var tempDir string
44
+
tempDir, err = os.MkdirTemp("", "fetchRepo")
45
+
if err != nil {
46
+
return nil, fmt.Errorf("mkdtemp: %w", err)
47
+
}
48
+
defer os.RemoveAll(tempDir)
49
50
+
storer = filesystem.NewStorageWithOptions(
51
+
osfs.New(tempDir, osfs.WithBoundOS()),
52
+
cache.NewObjectLRUDefault(),
53
+
filesystem.Options{
54
+
ExclusiveAccess: true,
55
+
LargeObjectThreshold: int64(config.Limits.GitLargeObjectThreshold.Bytes()),
56
+
},
57
+
)
58
+
repo, err = git.CloneContext(ctx, storer, nil, &git.CloneOptions{
59
+
Bare: true,
60
+
URL: repoURL,
61
+
ReferenceName: plumbing.ReferenceName(branch),
62
+
SingleBranch: true,
63
+
Depth: 1,
64
+
Tags: git.NoTags,
65
+
Filter: filter,
66
+
})
67
+
if err != nil {
68
+
logc.Printf(ctx, "clone err: %s %s filter=%q\n", repoURL, branch, filter)
69
+
continue
70
+
} else {
71
+
logc.Printf(ctx, "clone ok: %s %s filter=%q\n", repoURL, branch, filter)
72
+
break
73
+
}
74
+
}
75
if err != nil {
76
return nil, fmt.Errorf("git clone: %w", err)
77
}
···
94
walker := object.NewTreeWalker(tree, true, make(map[plumbing.Hash]bool))
95
defer walker.Close()
96
97
+
// Create a manifest for the tree object corresponding to `branch`, but do not populate it
98
+
// with data yet; instead, record all the blobs we'll need.
99
+
manifest := &Manifest{
100
RepoUrl: proto.String(repoURL),
101
Branch: proto.String(branch),
102
Commit: proto.String(ref.Hash().String()),
···
104
"": {Type: Type_Directory.Enum()},
105
},
106
}
107
+
blobsNeeded := map[plumbing.Hash]*Entry{}
108
for {
109
name, entry, err := walker.Next()
110
if err == io.EOF {
···
112
} else if err != nil {
113
return nil, fmt.Errorf("git walker: %w", err)
114
} else {
115
+
manifestEntry := &Entry{}
116
+
if existingManifestEntry, found := blobsNeeded[entry.Hash]; found {
117
+
// If the same blob is present twice, we only need to fetch it once (and both
118
+
// instances will alias the same `Entry` structure in the manifest).
119
+
manifestEntry = existingManifestEntry
120
+
} else if entry.Mode.IsFile() {
121
+
blobsNeeded[entry.Hash] = manifestEntry
122
if entry.Mode == filemode.Symlink {
123
manifestEntry.Type = Type_Symlink.Enum()
124
} else {
125
manifestEntry.Type = Type_InlineFile.Enum()
126
}
127
+
manifestEntry.GitHash = proto.String(entry.Hash.String())
128
} else if entry.Mode == filemode.Dir {
129
manifestEntry.Type = Type_Directory.Enum()
130
} else {
131
+
AddProblem(manifest, name, "unsupported mode %#o", entry.Mode)
132
continue
133
}
134
+
manifest.Contents[name] = manifestEntry
135
}
136
}
137
+
138
+
// Collect checkout statistics.
139
+
var dataBytesFromOldManifest int64
140
+
var dataBytesFromGitCheckout int64
141
+
var dataBytesFromGitTransport int64
142
+
143
+
// First, see if we can extract the blobs from the old manifest. This is the preferred option
144
+
// because it avoids both network transfers and recompression. Note that we do not request
145
+
// blobs from the backend under any circumstances to avoid creating a blob existence oracle.
146
+
for _, oldManifestEntry := range oldManifest.GetContents() {
147
+
if hash, ok := plumbing.FromHex(oldManifestEntry.GetGitHash()); ok {
148
+
if manifestEntry, found := blobsNeeded[hash]; found {
149
+
manifestEntry.Reset()
150
+
proto.Merge(manifestEntry, oldManifestEntry)
151
+
dataBytesFromOldManifest += oldManifestEntry.GetOriginalSize()
152
+
delete(blobsNeeded, hash)
153
+
}
154
+
}
155
+
}
156
+
157
+
// Second, fill the manifest entries with data from the git checkout we just made.
158
+
// This will only succeed if a `blob:none` filter isn't supported and we got a full
159
+
// clone despite asking for a partial clone.
160
+
for hash, manifestEntry := range blobsNeeded {
161
+
if err := readGitBlob(repo, hash, manifestEntry); err == nil {
162
+
dataBytesFromGitCheckout += manifestEntry.GetOriginalSize()
163
+
delete(blobsNeeded, hash)
164
+
}
165
+
}
166
+
167
+
// Third, if we still don't have data for some manifest entries, re-establish a git transport
168
+
// and request the missing blobs (only) from the server.
169
+
if len(blobsNeeded) > 0 {
170
+
client, err := transport.Get(parsedRepoURL.Scheme)
171
+
if err != nil {
172
+
return nil, fmt.Errorf("git transport: %w", err)
173
+
}
174
+
175
+
endpoint, err := transport.NewEndpoint(repoURL)
176
+
if err != nil {
177
+
return nil, fmt.Errorf("git endpoint: %w", err)
178
+
}
179
+
180
+
session, err := client.NewSession(storer, endpoint, nil)
181
+
if err != nil {
182
+
return nil, fmt.Errorf("git session: %w", err)
183
+
}
184
+
185
+
connection, err := session.Handshake(ctx, transport.UploadPackService)
186
+
if err != nil {
187
+
return nil, fmt.Errorf("git connection: %w", err)
188
+
}
189
+
defer connection.Close()
190
+
191
+
if err := connection.Fetch(ctx, &transport.FetchRequest{
192
+
Wants: slices.Collect(maps.Keys(blobsNeeded)),
193
+
Depth: 1,
194
+
// Git CLI behaves like this, even if the wants above are references to blobs.
195
+
Filter: "blob:none",
196
+
}); err != nil && !errors.Is(err, transport.ErrNoChange) {
197
+
return nil, fmt.Errorf("git blob fetch request: %w", err)
198
+
}
199
+
200
+
// All remaining blobs should now be available.
201
+
for hash, manifestEntry := range blobsNeeded {
202
+
if err := readGitBlob(repo, hash, manifestEntry); err != nil {
203
+
return nil, err
204
+
}
205
+
dataBytesFromGitTransport += manifestEntry.GetOriginalSize()
206
+
delete(blobsNeeded, hash)
207
+
}
208
+
}
209
+
210
+
logc.Printf(ctx,
211
+
"fetch: %s from old manifest, %s from git checkout, %s from git transport\n",
212
+
datasize.ByteSize(dataBytesFromOldManifest).HR(),
213
+
datasize.ByteSize(dataBytesFromGitCheckout).HR(),
214
+
datasize.ByteSize(dataBytesFromGitTransport).HR(),
215
+
)
216
+
217
+
return manifest, nil
218
+
}
219
+
220
+
func readGitBlob(repo *git.Repository, hash plumbing.Hash, entry *Entry) error {
221
+
blob, err := repo.BlobObject(hash)
222
+
if err != nil {
223
+
return fmt.Errorf("git blob %s: %w", hash, err)
224
+
}
225
+
226
+
reader, err := blob.Reader()
227
+
if err != nil {
228
+
return fmt.Errorf("git blob open: %w", err)
229
+
}
230
+
defer reader.Close()
231
+
232
+
data, err := io.ReadAll(reader)
233
+
if err != nil {
234
+
return fmt.Errorf("git blob read: %w", err)
235
+
}
236
+
237
+
switch entry.GetType() {
238
+
case Type_InlineFile, Type_Symlink:
239
+
// okay
240
+
default:
241
+
panic(fmt.Errorf("readGitBlob encountered invalid entry: %v, %v",
242
+
entry.GetType(), entry.GetTransform()))
243
+
}
244
+
245
+
entry.Data = data
246
+
entry.Transform = Transform_Identity.Enum()
247
+
entry.OriginalSize = proto.Int64(blob.Size)
248
+
entry.CompressedSize = proto.Int64(blob.Size)
249
+
return nil
250
}
+54
src/log.go
+54
src/log.go
···
···
1
+
package git_pages
2
+
3
+
import (
4
+
"context"
5
+
"fmt"
6
+
"log/slog"
7
+
"os"
8
+
"runtime"
9
+
"strings"
10
+
"time"
11
+
)
12
+
13
+
var logc slogWithCtx
14
+
15
+
type slogWithCtx struct{}
16
+
17
+
func (l slogWithCtx) log(ctx context.Context, level slog.Level, msg string) {
18
+
if ctx == nil {
19
+
ctx = context.Background()
20
+
}
21
+
logger := slog.Default()
22
+
if !logger.Enabled(ctx, level) {
23
+
return
24
+
}
25
+
26
+
var pcs [1]uintptr
27
+
// skip [runtime.Callers, this method, method calling this method]
28
+
runtime.Callers(3, pcs[:])
29
+
30
+
record := slog.NewRecord(time.Now(), level, strings.TrimRight(msg, "\n"), pcs[0])
31
+
logger.Handler().Handle(ctx, record)
32
+
}
33
+
34
+
func (l slogWithCtx) Print(ctx context.Context, v ...any) {
35
+
l.log(ctx, slog.LevelInfo, fmt.Sprint(v...))
36
+
}
37
+
38
+
func (l slogWithCtx) Printf(ctx context.Context, format string, v ...any) {
39
+
l.log(ctx, slog.LevelInfo, fmt.Sprintf(format, v...))
40
+
}
41
+
42
+
func (l slogWithCtx) Println(ctx context.Context, v ...any) {
43
+
l.log(ctx, slog.LevelInfo, fmt.Sprintln(v...))
44
+
}
45
+
46
+
func (l slogWithCtx) Fatalf(ctx context.Context, format string, v ...any) {
47
+
l.log(ctx, slog.LevelError, fmt.Sprintf(format, v...))
48
+
os.Exit(1)
49
+
}
50
+
51
+
func (l slogWithCtx) Fatalln(ctx context.Context, v ...any) {
52
+
l.log(ctx, slog.LevelError, fmt.Sprintln(v...))
53
+
os.Exit(1)
54
+
}
+138
-63
src/main.go
+138
-63
src/main.go
···
2
3
import (
4
"context"
5
"errors"
6
"flag"
7
"fmt"
···
10
"log/slog"
11
"net"
12
"net/http"
13
"net/url"
14
"os"
15
"runtime/debug"
···
22
23
var config *Config
24
var wildcards []*WildcardPattern
25
var backend Backend
26
27
-
func configureFeatures() (err error) {
28
if len(config.Features) > 0 {
29
-
log.Println("features:", strings.Join(config.Features, ", "))
30
}
31
return
32
}
33
34
-
func configureMemLimit() (err error) {
35
// Avoid being OOM killed by not garbage collecting early enough.
36
memlimitBefore := datasize.ByteSize(debug.SetMemoryLimit(-1))
37
automemlimit.SetGoMemLimitWithOpts(
···
46
)
47
memlimitAfter := datasize.ByteSize(debug.SetMemoryLimit(-1))
48
if memlimitBefore == memlimitAfter {
49
-
log.Println("memlimit: now", memlimitBefore.HR())
50
} else {
51
-
log.Println("memlimit: was", memlimitBefore.HR(), "now", memlimitAfter.HR())
52
}
53
return
54
}
55
56
-
func configureWildcards() (err error) {
57
newWildcards, err := TranslateWildcards(config.Wildcard)
58
if err != nil {
59
return err
···
63
}
64
}
65
66
-
func listen(name string, listen string) net.Listener {
67
if listen == "-" {
68
return nil
69
}
70
71
protocol, address, ok := strings.Cut(listen, "/")
72
if !ok {
73
-
log.Fatalf("%s: %s: malformed endpoint", name, listen)
74
}
75
76
listener, err := net.Listen(protocol, address)
77
if err != nil {
78
-
log.Fatalf("%s: %s\n", name, err)
79
}
80
81
return listener
···
85
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
86
defer func() {
87
if err := recover(); err != nil {
88
-
log.Printf("panic: %s %s %s: %s\n%s",
89
r.Method, r.Host, r.URL.Path, err, string(debug.Stack()))
90
http.Error(w,
91
fmt.Sprintf("internal server error: %s", err),
···
97
})
98
}
99
100
-
func serve(listener net.Listener, handler http.Handler) {
101
if listener != nil {
102
handler = panicHandler(handler)
103
···
107
if config.Feature("serve-h2c") {
108
server.Protocols.SetUnencryptedHTTP2(true)
109
}
110
-
log.Fatalln(server.Serve(listener))
111
}
112
}
113
···
118
case 1:
119
return arg
120
default:
121
-
log.Fatalf("webroot argument must be either 'domain.tld' or 'domain.tld/dir")
122
return ""
123
}
124
}
···
130
} else {
131
writer, err = os.Create(flag.Arg(0))
132
if err != nil {
133
-
log.Fatalln(err)
134
}
135
}
136
return
···
141
fmt.Fprintf(os.Stderr, "(server) "+
142
"git-pages [-config <file>|-no-config]\n")
143
fmt.Fprintf(os.Stderr, "(admin) "+
144
-
"git-pages {-run-migration <name>}\n")
145
fmt.Fprintf(os.Stderr, "(info) "+
146
"git-pages {-print-config-env-vars|-print-config}\n")
147
fmt.Fprintf(os.Stderr, "(cli) "+
···
150
}
151
152
func Main() {
153
flag.Usage = usage
154
printConfigEnvVars := flag.Bool("print-config-env-vars", false,
155
"print every recognized configuration environment variable and exit")
···
169
"write archive for `site` (either 'domain.tld' or 'domain.tld/dir') in tar format")
170
updateSite := flag.String("update-site", "",
171
"update `site` (either 'domain.tld' or 'domain.tld/dir') from archive or repository URL")
172
flag.Parse()
173
174
var cliOperations int
175
if *getBlob != "" {
176
cliOperations += 1
177
}
···
181
if *getArchive != "" {
182
cliOperations += 1
183
}
184
if cliOperations > 1 {
185
-
log.Fatalln("-get-blob, -get-manifest, and -get-archive are mutually exclusive")
186
}
187
188
if *configTomlPath != "" && *noConfig {
189
-
log.Fatalln("-no-config and -config are mutually exclusive")
190
}
191
192
if *printConfigEnvVars {
···
199
*configTomlPath = "config.toml"
200
}
201
if config, err = Configure(*configTomlPath); err != nil {
202
-
log.Fatalln("config:", err)
203
}
204
205
if *printConfig {
···
211
defer FiniObservability()
212
213
if err = errors.Join(
214
-
configureFeatures(),
215
-
configureMemLimit(),
216
-
configureWildcards(),
217
); err != nil {
218
-
log.Fatalln(err)
219
}
220
221
switch {
222
case *runMigration != "":
223
if backend, err = CreateBackend(&config.Storage); err != nil {
224
-
log.Fatalln(err)
225
}
226
227
-
if err := RunMigration(context.Background(), *runMigration); err != nil {
228
-
log.Fatalln(err)
229
}
230
231
case *getBlob != "":
232
if backend, err = CreateBackend(&config.Storage); err != nil {
233
-
log.Fatalln(err)
234
}
235
236
-
reader, _, _, err := backend.GetBlob(context.Background(), *getBlob)
237
if err != nil {
238
-
log.Fatalln(err)
239
}
240
io.Copy(fileOutputArg(), reader)
241
242
case *getManifest != "":
243
if backend, err = CreateBackend(&config.Storage); err != nil {
244
-
log.Fatalln(err)
245
}
246
247
webRoot := webRootArg(*getManifest)
248
-
manifest, _, err := backend.GetManifest(context.Background(), webRoot, GetManifestOptions{})
249
if err != nil {
250
-
log.Fatalln(err)
251
}
252
fmt.Fprintln(fileOutputArg(), ManifestDebugJSON(manifest))
253
254
case *getArchive != "":
255
if backend, err = CreateBackend(&config.Storage); err != nil {
256
-
log.Fatalln(err)
257
}
258
259
webRoot := webRootArg(*getArchive)
260
manifest, manifestMtime, err :=
261
-
backend.GetManifest(context.Background(), webRoot, GetManifestOptions{})
262
if err != nil {
263
-
log.Fatalln(err)
264
}
265
-
CollectTar(context.Background(), fileOutputArg(), manifest, manifestMtime)
266
267
case *updateSite != "":
268
if backend, err = CreateBackend(&config.Storage); err != nil {
269
-
log.Fatalln(err)
270
}
271
272
if flag.NArg() != 1 {
273
-
log.Fatalln("update source must be provided as the argument")
274
}
275
276
sourceURL, err := url.Parse(flag.Arg(0))
277
if err != nil {
278
-
log.Fatalln(err)
279
}
280
281
var result UpdateResult
282
if sourceURL.Scheme == "" {
283
file, err := os.Open(sourceURL.Path)
284
if err != nil {
285
-
log.Fatalln(err)
286
}
287
defer file.Close()
288
···
301
}
302
303
webRoot := webRootArg(*updateSite)
304
-
result = UpdateFromArchive(context.Background(), webRoot, contentType, file)
305
} else {
306
branch := "pages"
307
if sourceURL.Fragment != "" {
···
309
}
310
311
webRoot := webRootArg(*updateSite)
312
-
result = UpdateFromRepository(context.Background(), webRoot, sourceURL.String(), branch)
313
}
314
315
switch result.outcome {
316
case UpdateError:
317
-
log.Printf("error: %s\n", result.err)
318
os.Exit(2)
319
case UpdateTimeout:
320
-
log.Println("timeout")
321
os.Exit(1)
322
case UpdateCreated:
323
-
log.Println("created")
324
case UpdateReplaced:
325
-
log.Println("replaced")
326
case UpdateDeleted:
327
-
log.Println("deleted")
328
case UpdateNoChange:
329
-
log.Println("no-change")
330
}
331
332
default:
···
339
// The backend is not recreated (this is intentional as it allows preserving the cache).
340
OnReload(func() {
341
if newConfig, err := Configure(*configTomlPath); err != nil {
342
-
log.Println("config: reload err:", err)
343
} else {
344
// From https://go.dev/ref/mem:
345
// > A read r of a memory location x holding a value that is not larger than
···
349
// > concurrent write.
350
config = newConfig
351
if err = errors.Join(
352
-
configureFeatures(),
353
-
configureMemLimit(),
354
-
configureWildcards(),
355
); err != nil {
356
// At this point the configuration is in an in-between, corrupted state, so
357
// the only reasonable choice is to crash.
358
-
log.Fatalln("config: reload fail:", err)
359
} else {
360
-
log.Println("config: reload ok")
361
}
362
}
363
})
···
366
// spends some time initializing (which the S3 backend does) a proxy like Caddy can race
367
// with git-pages on startup and return errors for requests that would have been served
368
// just 0.5s later.
369
-
pagesListener := listen("pages", config.Server.Pages)
370
-
caddyListener := listen("caddy", config.Server.Caddy)
371
-
metricsListener := listen("metrics", config.Server.Metrics)
372
373
if backend, err = CreateBackend(&config.Storage); err != nil {
374
-
log.Fatalln(err)
375
}
376
backend = NewObservedBackend(backend)
377
378
-
go serve(pagesListener, ObserveHTTPHandler(http.HandlerFunc(ServePages)))
379
-
go serve(caddyListener, ObserveHTTPHandler(http.HandlerFunc(ServeCaddy)))
380
-
go serve(metricsListener, promhttp.Handler())
381
382
if config.Insecure {
383
-
log.Println("serve: ready (INSECURE)")
384
} else {
385
-
log.Println("serve: ready")
386
}
387
-
select {}
388
}
389
}
···
2
3
import (
4
"context"
5
+
"crypto/tls"
6
"errors"
7
"flag"
8
"fmt"
···
11
"log/slog"
12
"net"
13
"net/http"
14
+
"net/http/httputil"
15
"net/url"
16
"os"
17
"runtime/debug"
···
24
25
var config *Config
26
var wildcards []*WildcardPattern
27
+
var fallback http.Handler
28
var backend Backend
29
30
+
func configureFeatures(ctx context.Context) (err error) {
31
if len(config.Features) > 0 {
32
+
logc.Println(ctx, "features:", strings.Join(config.Features, ", "))
33
}
34
return
35
}
36
37
+
func configureMemLimit(ctx context.Context) (err error) {
38
// Avoid being OOM killed by not garbage collecting early enough.
39
memlimitBefore := datasize.ByteSize(debug.SetMemoryLimit(-1))
40
automemlimit.SetGoMemLimitWithOpts(
···
49
)
50
memlimitAfter := datasize.ByteSize(debug.SetMemoryLimit(-1))
51
if memlimitBefore == memlimitAfter {
52
+
logc.Println(ctx, "memlimit: now", memlimitBefore.HR())
53
} else {
54
+
logc.Println(ctx, "memlimit: was", memlimitBefore.HR(), "now", memlimitAfter.HR())
55
}
56
return
57
}
58
59
+
func configureWildcards(_ context.Context) (err error) {
60
newWildcards, err := TranslateWildcards(config.Wildcard)
61
if err != nil {
62
return err
···
66
}
67
}
68
69
+
func configureFallback(_ context.Context) (err error) {
70
+
if config.Fallback.ProxyTo != "" {
71
+
var fallbackURL *url.URL
72
+
fallbackURL, err = url.Parse(config.Fallback.ProxyTo)
73
+
if err != nil {
74
+
err = fmt.Errorf("fallback: %w", err)
75
+
return
76
+
}
77
+
78
+
fallback = &httputil.ReverseProxy{
79
+
Rewrite: func(r *httputil.ProxyRequest) {
80
+
r.SetURL(fallbackURL)
81
+
r.Out.Host = r.In.Host
82
+
r.Out.Header["X-Forwarded-For"] = r.In.Header["X-Forwarded-For"]
83
+
},
84
+
Transport: &http.Transport{
85
+
TLSClientConfig: &tls.Config{
86
+
InsecureSkipVerify: config.Fallback.Insecure,
87
+
},
88
+
},
89
+
}
90
+
}
91
+
return
92
+
}
93
+
94
+
func listen(ctx context.Context, name string, listen string) net.Listener {
95
if listen == "-" {
96
return nil
97
}
98
99
protocol, address, ok := strings.Cut(listen, "/")
100
if !ok {
101
+
logc.Fatalf(ctx, "%s: %s: malformed endpoint", name, listen)
102
}
103
104
listener, err := net.Listen(protocol, address)
105
if err != nil {
106
+
logc.Fatalf(ctx, "%s: %s\n", name, err)
107
}
108
109
return listener
···
113
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
114
defer func() {
115
if err := recover(); err != nil {
116
+
logc.Printf(r.Context(), "panic: %s %s %s: %s\n%s",
117
r.Method, r.Host, r.URL.Path, err, string(debug.Stack()))
118
http.Error(w,
119
fmt.Sprintf("internal server error: %s", err),
···
125
})
126
}
127
128
+
func serve(ctx context.Context, listener net.Listener, handler http.Handler) {
129
if listener != nil {
130
handler = panicHandler(handler)
131
···
135
if config.Feature("serve-h2c") {
136
server.Protocols.SetUnencryptedHTTP2(true)
137
}
138
+
logc.Fatalln(ctx, server.Serve(listener))
139
}
140
}
141
···
146
case 1:
147
return arg
148
default:
149
+
logc.Fatalln(context.Background(),
150
+
"webroot argument must be either 'domain.tld' or 'domain.tld/dir")
151
return ""
152
}
153
}
···
159
} else {
160
writer, err = os.Create(flag.Arg(0))
161
if err != nil {
162
+
logc.Fatalln(context.Background(), err)
163
}
164
}
165
return
···
170
fmt.Fprintf(os.Stderr, "(server) "+
171
"git-pages [-config <file>|-no-config]\n")
172
fmt.Fprintf(os.Stderr, "(admin) "+
173
+
"git-pages {-run-migration <name>|-freeze-domain <domain>|-unfreeze-domain <domain>}\n")
174
fmt.Fprintf(os.Stderr, "(info) "+
175
"git-pages {-print-config-env-vars|-print-config}\n")
176
fmt.Fprintf(os.Stderr, "(cli) "+
···
179
}
180
181
func Main() {
182
+
ctx := context.Background()
183
+
184
flag.Usage = usage
185
printConfigEnvVars := flag.Bool("print-config-env-vars", false,
186
"print every recognized configuration environment variable and exit")
···
200
"write archive for `site` (either 'domain.tld' or 'domain.tld/dir') in tar format")
201
updateSite := flag.String("update-site", "",
202
"update `site` (either 'domain.tld' or 'domain.tld/dir') from archive or repository URL")
203
+
freezeDomain := flag.String("freeze-domain", "",
204
+
"prevent any site uploads to a given `domain`")
205
+
unfreezeDomain := flag.String("unfreeze-domain", "",
206
+
"allow site uploads to a `domain` again after it has been frozen")
207
flag.Parse()
208
209
var cliOperations int
210
+
if *runMigration != "" {
211
+
cliOperations += 1
212
+
}
213
if *getBlob != "" {
214
cliOperations += 1
215
}
···
219
if *getArchive != "" {
220
cliOperations += 1
221
}
222
+
if *updateSite != "" {
223
+
cliOperations += 1
224
+
}
225
+
if *freezeDomain != "" {
226
+
cliOperations += 1
227
+
}
228
+
if *unfreezeDomain != "" {
229
+
cliOperations += 1
230
+
}
231
if cliOperations > 1 {
232
+
logc.Fatalln(ctx, "-get-blob, -get-manifest, -get-archive, -update-site, -freeze, and -unfreeze are mutually exclusive")
233
}
234
235
if *configTomlPath != "" && *noConfig {
236
+
logc.Fatalln(ctx, "-no-config and -config are mutually exclusive")
237
}
238
239
if *printConfigEnvVars {
···
246
*configTomlPath = "config.toml"
247
}
248
if config, err = Configure(*configTomlPath); err != nil {
249
+
logc.Fatalln(ctx, "config:", err)
250
}
251
252
if *printConfig {
···
258
defer FiniObservability()
259
260
if err = errors.Join(
261
+
configureFeatures(ctx),
262
+
configureMemLimit(ctx),
263
+
configureWildcards(ctx),
264
+
configureFallback(ctx),
265
); err != nil {
266
+
logc.Fatalln(ctx, err)
267
}
268
269
switch {
270
case *runMigration != "":
271
if backend, err = CreateBackend(&config.Storage); err != nil {
272
+
logc.Fatalln(ctx, err)
273
}
274
275
+
if err := RunMigration(ctx, *runMigration); err != nil {
276
+
logc.Fatalln(ctx, err)
277
}
278
279
case *getBlob != "":
280
if backend, err = CreateBackend(&config.Storage); err != nil {
281
+
logc.Fatalln(ctx, err)
282
}
283
284
+
reader, _, _, err := backend.GetBlob(ctx, *getBlob)
285
if err != nil {
286
+
logc.Fatalln(ctx, err)
287
}
288
io.Copy(fileOutputArg(), reader)
289
290
case *getManifest != "":
291
if backend, err = CreateBackend(&config.Storage); err != nil {
292
+
logc.Fatalln(ctx, err)
293
}
294
295
webRoot := webRootArg(*getManifest)
296
+
manifest, _, err := backend.GetManifest(ctx, webRoot, GetManifestOptions{})
297
if err != nil {
298
+
logc.Fatalln(ctx, err)
299
}
300
fmt.Fprintln(fileOutputArg(), ManifestDebugJSON(manifest))
301
302
case *getArchive != "":
303
if backend, err = CreateBackend(&config.Storage); err != nil {
304
+
logc.Fatalln(ctx, err)
305
}
306
307
webRoot := webRootArg(*getArchive)
308
manifest, manifestMtime, err :=
309
+
backend.GetManifest(ctx, webRoot, GetManifestOptions{})
310
if err != nil {
311
+
logc.Fatalln(ctx, err)
312
}
313
+
CollectTar(ctx, fileOutputArg(), manifest, manifestMtime)
314
315
case *updateSite != "":
316
if backend, err = CreateBackend(&config.Storage); err != nil {
317
+
logc.Fatalln(ctx, err)
318
}
319
320
if flag.NArg() != 1 {
321
+
logc.Fatalln(ctx, "update source must be provided as the argument")
322
}
323
324
sourceURL, err := url.Parse(flag.Arg(0))
325
if err != nil {
326
+
logc.Fatalln(ctx, err)
327
}
328
329
var result UpdateResult
330
if sourceURL.Scheme == "" {
331
file, err := os.Open(sourceURL.Path)
332
if err != nil {
333
+
logc.Fatalln(ctx, err)
334
}
335
defer file.Close()
336
···
349
}
350
351
webRoot := webRootArg(*updateSite)
352
+
result = UpdateFromArchive(ctx, webRoot, contentType, file)
353
} else {
354
branch := "pages"
355
if sourceURL.Fragment != "" {
···
357
}
358
359
webRoot := webRootArg(*updateSite)
360
+
result = UpdateFromRepository(ctx, webRoot, sourceURL.String(), branch)
361
}
362
363
switch result.outcome {
364
case UpdateError:
365
+
logc.Printf(ctx, "error: %s\n", result.err)
366
os.Exit(2)
367
case UpdateTimeout:
368
+
logc.Println(ctx, "timeout")
369
os.Exit(1)
370
case UpdateCreated:
371
+
logc.Println(ctx, "created")
372
case UpdateReplaced:
373
+
logc.Println(ctx, "replaced")
374
case UpdateDeleted:
375
+
logc.Println(ctx, "deleted")
376
case UpdateNoChange:
377
+
logc.Println(ctx, "no-change")
378
+
}
379
+
380
+
case *freezeDomain != "" || *unfreezeDomain != "":
381
+
var domain string
382
+
var freeze bool
383
+
if *freezeDomain != "" {
384
+
domain = *freezeDomain
385
+
freeze = true
386
+
} else {
387
+
domain = *unfreezeDomain
388
+
freeze = false
389
+
}
390
+
391
+
if backend, err = CreateBackend(&config.Storage); err != nil {
392
+
logc.Fatalln(ctx, err)
393
+
}
394
+
395
+
if err = backend.FreezeDomain(ctx, domain, freeze); err != nil {
396
+
logc.Fatalln(ctx, err)
397
+
}
398
+
if freeze {
399
+
log.Println("frozen")
400
+
} else {
401
+
log.Println("thawed")
402
}
403
404
default:
···
411
// The backend is not recreated (this is intentional as it allows preserving the cache).
412
OnReload(func() {
413
if newConfig, err := Configure(*configTomlPath); err != nil {
414
+
logc.Println(ctx, "config: reload err:", err)
415
} else {
416
// From https://go.dev/ref/mem:
417
// > A read r of a memory location x holding a value that is not larger than
···
421
// > concurrent write.
422
config = newConfig
423
if err = errors.Join(
424
+
configureFeatures(ctx),
425
+
configureMemLimit(ctx),
426
+
configureWildcards(ctx),
427
+
configureFallback(ctx),
428
); err != nil {
429
// At this point the configuration is in an in-between, corrupted state, so
430
// the only reasonable choice is to crash.
431
+
logc.Fatalln(ctx, "config: reload fail:", err)
432
} else {
433
+
logc.Println(ctx, "config: reload ok")
434
}
435
}
436
})
···
439
// spends some time initializing (which the S3 backend does) a proxy like Caddy can race
440
// with git-pages on startup and return errors for requests that would have been served
441
// just 0.5s later.
442
+
pagesListener := listen(ctx, "pages", config.Server.Pages)
443
+
caddyListener := listen(ctx, "caddy", config.Server.Caddy)
444
+
metricsListener := listen(ctx, "metrics", config.Server.Metrics)
445
446
if backend, err = CreateBackend(&config.Storage); err != nil {
447
+
logc.Fatalln(ctx, err)
448
}
449
backend = NewObservedBackend(backend)
450
451
+
go serve(ctx, pagesListener, ObserveHTTPHandler(http.HandlerFunc(ServePages)))
452
+
go serve(ctx, caddyListener, ObserveHTTPHandler(http.HandlerFunc(ServeCaddy)))
453
+
go serve(ctx, metricsListener, promhttp.Handler())
454
455
if config.Insecure {
456
+
logc.Println(ctx, "serve: ready (INSECURE)")
457
} else {
458
+
logc.Println(ctx, "serve: ready")
459
}
460
+
461
+
WaitForInterrupt()
462
+
logc.Println(ctx, "serve: exiting")
463
}
464
}
+44
-32
src/manifest.go
+44
-32
src/manifest.go
···
8
"crypto/sha256"
9
"errors"
10
"fmt"
11
-
"log"
12
"mime"
13
"net/http"
14
"path"
···
145
for path, entry := range manifest.Contents {
146
if entry.GetType() == Type_Directory || entry.GetType() == Type_Symlink {
147
// no Content-Type
148
-
} else if entry.GetType() == Type_InlineFile && entry.GetTransform() == Transform_None {
149
contentType := mime.TypeByExtension(filepath.Ext(path))
150
if contentType == "" {
151
-
contentType = http.DetectContentType(entry.Data[:512])
152
}
153
entry.ContentType = proto.String(contentType)
154
-
} else {
155
panic(fmt.Errorf("DetectContentType encountered invalid entry: %v, %v",
156
entry.GetType(), entry.GetTransform()))
157
}
158
}
159
}
160
161
-
// The `clauspost/compress/zstd` package recommends reusing a compressor to avoid repeated
162
// allocations of internal buffers.
163
var zstdEncoder, _ = zstd.NewWriter(nil, zstd.WithEncoderLevel(zstd.SpeedBetterCompression))
164
···
167
span, _ := ObserveFunction(ctx, "CompressFiles")
168
defer span.Finish()
169
170
-
var originalSize, compressedSize int64
171
for _, entry := range manifest.Contents {
172
-
if entry.GetType() == Type_InlineFile && entry.GetTransform() == Transform_None {
173
-
mtype := getMediaType(entry.GetContentType())
174
-
if strings.HasPrefix(mtype, "video/") || strings.HasPrefix(mtype, "audio/") {
175
continue
176
}
177
-
originalSize += entry.GetSize()
178
-
compressedData := zstdEncoder.EncodeAll(entry.GetData(), make([]byte, 0, entry.GetSize()))
179
-
if len(compressedData) < int(*entry.Size) {
180
entry.Data = compressedData
181
-
entry.Size = proto.Int64(int64(len(entry.Data)))
182
-
entry.Transform = Transform_Zstandard.Enum()
183
}
184
-
compressedSize += entry.GetSize()
185
}
186
}
187
manifest.OriginalSize = proto.Int64(originalSize)
188
manifest.CompressedSize = proto.Int64(compressedSize)
189
190
if originalSize != 0 {
191
spaceSaving := (float64(originalSize) - float64(compressedSize)) / float64(originalSize)
192
-
log.Printf("compress: saved %.2f percent (%s to %s)",
193
spaceSaving*100.0,
194
datasize.ByteSize(originalSize).HR(),
195
datasize.ByteSize(compressedSize).HR(),
···
205
func PrepareManifest(ctx context.Context, manifest *Manifest) error {
206
// Parse Netlify-style `_redirects`
207
if err := ProcessRedirectsFile(manifest); err != nil {
208
-
log.Printf("redirects err: %s\n", err)
209
} else if len(manifest.Redirects) > 0 {
210
-
log.Printf("redirects ok: %d rules\n", len(manifest.Redirects))
211
}
212
213
// Parse Netlify-style `_headers`
214
if err := ProcessHeadersFile(manifest); err != nil {
215
-
log.Printf("headers err: %s\n", err)
216
} else if len(manifest.Headers) > 0 {
217
-
log.Printf("headers ok: %d rules\n", len(manifest.Headers))
218
}
219
220
// Sniff content type like `http.ServeContent`
···
247
CompressedSize: manifest.CompressedSize,
248
StoredSize: proto.Int64(0),
249
}
250
-
extObjectSizes := make(map[string]int64)
251
for name, entry := range manifest.Contents {
252
cannotBeInlined := entry.GetType() == Type_InlineFile &&
253
-
entry.GetSize() > int64(config.Limits.MaxInlineFileSize.Bytes())
254
if cannotBeInlined {
255
dataHash := sha256.Sum256(entry.Data)
256
extManifest.Contents[name] = &Entry{
257
-
Type: Type_ExternalFile.Enum(),
258
-
Size: entry.Size,
259
-
Data: fmt.Appendf(nil, "sha256-%x", dataHash),
260
-
Transform: entry.Transform,
261
-
ContentType: entry.ContentType,
262
}
263
-
extObjectSizes[string(dataHash[:])] = entry.GetSize()
264
} else {
265
extManifest.Contents[name] = entry
266
}
267
}
268
-
// `extObjectMap` stores size once per object, deduplicating it
269
-
for _, storedSize := range extObjectSizes {
270
-
*extManifest.StoredSize += storedSize
271
}
272
273
// Upload the resulting manifest and the blob it references.
···
303
}
304
305
if err := backend.CommitManifest(ctx, name, &extManifest); err != nil {
306
-
return nil, fmt.Errorf("commit manifest: %w", err)
307
}
308
309
return &extManifest, nil
···
8
"crypto/sha256"
9
"errors"
10
"fmt"
11
"mime"
12
"net/http"
13
"path"
···
144
for path, entry := range manifest.Contents {
145
if entry.GetType() == Type_Directory || entry.GetType() == Type_Symlink {
146
// no Content-Type
147
+
} else if entry.GetType() == Type_InlineFile && entry.GetTransform() == Transform_Identity {
148
contentType := mime.TypeByExtension(filepath.Ext(path))
149
if contentType == "" {
150
+
contentType = http.DetectContentType(entry.Data[:min(512, len(entry.Data))])
151
}
152
entry.ContentType = proto.String(contentType)
153
+
} else if entry.GetContentType() == "" {
154
panic(fmt.Errorf("DetectContentType encountered invalid entry: %v, %v",
155
entry.GetType(), entry.GetTransform()))
156
}
157
}
158
}
159
160
+
// The `klauspost/compress/zstd` package recommends reusing a compressor to avoid repeated
161
// allocations of internal buffers.
162
var zstdEncoder, _ = zstd.NewWriter(nil, zstd.WithEncoderLevel(zstd.SpeedBetterCompression))
163
···
166
span, _ := ObserveFunction(ctx, "CompressFiles")
167
defer span.Finish()
168
169
+
var originalSize int64
170
+
var compressedSize int64
171
for _, entry := range manifest.Contents {
172
+
if entry.GetType() == Type_InlineFile && entry.GetTransform() == Transform_Identity {
173
+
mediaType := getMediaType(entry.GetContentType())
174
+
if strings.HasPrefix(mediaType, "video/") || strings.HasPrefix(mediaType, "audio/") {
175
continue
176
}
177
+
compressedData := zstdEncoder.EncodeAll(entry.GetData(),
178
+
make([]byte, 0, entry.GetOriginalSize()))
179
+
if int64(len(compressedData)) < entry.GetOriginalSize() {
180
entry.Data = compressedData
181
+
entry.Transform = Transform_Zstd.Enum()
182
+
entry.CompressedSize = proto.Int64(int64(len(entry.Data)))
183
}
184
}
185
+
originalSize += entry.GetOriginalSize()
186
+
compressedSize += entry.GetCompressedSize()
187
}
188
manifest.OriginalSize = proto.Int64(originalSize)
189
manifest.CompressedSize = proto.Int64(compressedSize)
190
191
if originalSize != 0 {
192
spaceSaving := (float64(originalSize) - float64(compressedSize)) / float64(originalSize)
193
+
logc.Printf(ctx, "compress: saved %.2f percent (%s to %s)",
194
spaceSaving*100.0,
195
datasize.ByteSize(originalSize).HR(),
196
datasize.ByteSize(compressedSize).HR(),
···
206
func PrepareManifest(ctx context.Context, manifest *Manifest) error {
207
// Parse Netlify-style `_redirects`
208
if err := ProcessRedirectsFile(manifest); err != nil {
209
+
logc.Printf(ctx, "redirects err: %s\n", err)
210
} else if len(manifest.Redirects) > 0 {
211
+
logc.Printf(ctx, "redirects ok: %d rules\n", len(manifest.Redirects))
212
}
213
214
// Parse Netlify-style `_headers`
215
if err := ProcessHeadersFile(manifest); err != nil {
216
+
logc.Printf(ctx, "headers err: %s\n", err)
217
} else if len(manifest.Headers) > 0 {
218
+
logc.Printf(ctx, "headers ok: %d rules\n", len(manifest.Headers))
219
}
220
221
// Sniff content type like `http.ServeContent`
···
248
CompressedSize: manifest.CompressedSize,
249
StoredSize: proto.Int64(0),
250
}
251
for name, entry := range manifest.Contents {
252
cannotBeInlined := entry.GetType() == Type_InlineFile &&
253
+
entry.GetCompressedSize() > int64(config.Limits.MaxInlineFileSize.Bytes())
254
if cannotBeInlined {
255
dataHash := sha256.Sum256(entry.Data)
256
extManifest.Contents[name] = &Entry{
257
+
Type: Type_ExternalFile.Enum(),
258
+
OriginalSize: entry.OriginalSize,
259
+
CompressedSize: entry.CompressedSize,
260
+
Data: fmt.Appendf(nil, "sha256-%x", dataHash),
261
+
Transform: entry.Transform,
262
+
ContentType: entry.ContentType,
263
+
GitHash: entry.GitHash,
264
}
265
} else {
266
extManifest.Contents[name] = entry
267
}
268
}
269
+
270
+
// Compute the deduplicated storage size.
271
+
var blobSizes = make(map[string]int64)
272
+
for _, entry := range manifest.Contents {
273
+
if entry.GetType() == Type_ExternalFile {
274
+
blobSizes[string(entry.Data)] = entry.GetCompressedSize()
275
+
}
276
+
}
277
+
for _, blobSize := range blobSizes {
278
+
*extManifest.StoredSize += blobSize
279
}
280
281
// Upload the resulting manifest and the blob it references.
···
311
}
312
313
if err := backend.CommitManifest(ctx, name, &extManifest); err != nil {
314
+
if errors.Is(err, ErrDomainFrozen) {
315
+
return nil, err
316
+
} else {
317
+
return nil, fmt.Errorf("commit manifest: %w", err)
318
+
}
319
}
320
321
return &extManifest, nil
+3
-4
src/migrate.go
+3
-4
src/migrate.go
···
3
import (
4
"context"
5
"fmt"
6
-
"log"
7
"slices"
8
"strings"
9
)
···
19
20
func createDomainMarkers(ctx context.Context) error {
21
if backend.HasFeature(ctx, FeatureCheckDomainMarker) {
22
-
log.Print("store already has domain markers")
23
return nil
24
}
25
···
36
}
37
}
38
for idx, domain := range domains {
39
-
log.Printf("(%d / %d) creating domain %s", idx+1, len(domains), domain)
40
if err := backend.CreateDomain(ctx, domain); err != nil {
41
return fmt.Errorf("creating domain %s: %w", domain, err)
42
}
···
44
if err := backend.EnableFeature(ctx, FeatureCheckDomainMarker); err != nil {
45
return err
46
}
47
-
log.Printf("created markers for %d domains", len(domains))
48
return nil
49
}
···
3
import (
4
"context"
5
"fmt"
6
"slices"
7
"strings"
8
)
···
18
19
func createDomainMarkers(ctx context.Context) error {
20
if backend.HasFeature(ctx, FeatureCheckDomainMarker) {
21
+
logc.Print(ctx, "store already has domain markers")
22
return nil
23
}
24
···
35
}
36
}
37
for idx, domain := range domains {
38
+
logc.Printf(ctx, "(%d / %d) creating domain %s", idx+1, len(domains), domain)
39
if err := backend.CreateDomain(ctx, domain); err != nil {
40
return fmt.Errorf("creating domain %s: %w", domain, err)
41
}
···
43
if err := backend.EnableFeature(ctx, FeatureCheckDomainMarker); err != nil {
44
return err
45
}
46
+
logc.Printf(ctx, "created markers for %d domains", len(domains))
47
return nil
48
}
+65
-5
src/observe.go
+65
-5
src/observe.go
···
12
"os"
13
"runtime/debug"
14
"strconv"
15
"time"
16
17
slogmulti "github.com/samber/slog-multi"
18
19
"github.com/prometheus/client_golang/prometheus"
20
"github.com/prometheus/client_golang/prometheus/promauto"
···
41
}, []string{"method"})
42
)
43
44
func hasSentry() bool {
45
return os.Getenv("SENTRY_DSN") != ""
46
}
···
55
56
logHandlers := []slog.Handler{}
57
58
switch config.LogFormat {
59
case "none":
60
// nothing to do
61
case "text":
62
logHandlers = append(logHandlers,
63
-
slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{}))
64
case "json":
65
logHandlers = append(logHandlers,
66
-
slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{}))
67
default:
68
log.Println("unknown log format", config.LogFormat)
69
}
70
71
if hasSentry() {
72
enableLogs := false
73
if value, err := strconv.ParseBool(os.Getenv("SENTRY_LOGS")); err == nil {
···
80
}
81
82
options := sentry.ClientOptions{}
83
options.Environment = environment
84
options.EnableLogs = enableLogs
85
options.EnableTracing = enableTracing
···
118
if enableLogs {
119
logHandlers = append(logHandlers, sentryslog.Option{
120
AddSource: true,
121
}.NewSentryHandler(context.Background()))
122
}
123
}
···
125
slog.SetDefault(slog.New(slogmulti.Fanout(logHandlers...)))
126
}
127
128
func FiniObservability() {
129
if hasSentry() {
130
-
sentry.Flush(2 * time.Second)
131
}
132
}
133
134
func ObserveError(err error) {
···
364
}
365
366
func (backend *observedBackend) CheckDomain(ctx context.Context, domain string) (found bool, err error) {
367
-
span, ctx := ObserveFunction(ctx, "CheckDomain", "manifest.domain", domain)
368
found, err = backend.inner.CheckDomain(ctx, domain)
369
span.Finish()
370
return
371
}
372
373
func (backend *observedBackend) CreateDomain(ctx context.Context, domain string) (err error) {
374
-
span, ctx := ObserveFunction(ctx, "CreateDomain", "manifest.domain", domain)
375
err = backend.inner.CreateDomain(ctx, domain)
376
span.Finish()
377
return
378
}
···
12
"os"
13
"runtime/debug"
14
"strconv"
15
+
"strings"
16
+
"sync"
17
"time"
18
19
slogmulti "github.com/samber/slog-multi"
20
+
21
+
syslog "codeberg.org/git-pages/go-slog-syslog"
22
23
"github.com/prometheus/client_golang/prometheus"
24
"github.com/prometheus/client_golang/prometheus/promauto"
···
45
}, []string{"method"})
46
)
47
48
+
var syslogHandler syslog.Handler
49
+
50
func hasSentry() bool {
51
return os.Getenv("SENTRY_DSN") != ""
52
}
···
61
62
logHandlers := []slog.Handler{}
63
64
+
logLevel := slog.LevelInfo
65
+
switch strings.ToLower(config.LogLevel) {
66
+
case "debug":
67
+
logLevel = slog.LevelDebug
68
+
case "info":
69
+
logLevel = slog.LevelInfo
70
+
case "warn":
71
+
logLevel = slog.LevelWarn
72
+
case "error":
73
+
logLevel = slog.LevelError
74
+
default:
75
+
log.Println("unknown log level", config.LogLevel)
76
+
}
77
+
78
switch config.LogFormat {
79
case "none":
80
// nothing to do
81
case "text":
82
logHandlers = append(logHandlers,
83
+
slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: logLevel}))
84
case "json":
85
logHandlers = append(logHandlers,
86
+
slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: logLevel}))
87
default:
88
log.Println("unknown log format", config.LogFormat)
89
}
90
91
+
if syslogAddr := os.Getenv("SYSLOG_ADDR"); syslogAddr != "" {
92
+
var err error
93
+
syslogHandler, err = syslog.NewHandler(&syslog.HandlerOptions{
94
+
Address: syslogAddr,
95
+
AppName: "git-pages",
96
+
StructuredDataID: "git-pages",
97
+
})
98
+
if err != nil {
99
+
log.Fatalf("syslog: %v", err)
100
+
}
101
+
logHandlers = append(logHandlers, syslogHandler)
102
+
}
103
+
104
if hasSentry() {
105
enableLogs := false
106
if value, err := strconv.ParseBool(os.Getenv("SENTRY_LOGS")); err == nil {
···
113
}
114
115
options := sentry.ClientOptions{}
116
+
options.DisableTelemetryBuffer = !config.Feature("sentry-telemetry-buffer")
117
options.Environment = environment
118
options.EnableLogs = enableLogs
119
options.EnableTracing = enableTracing
···
152
if enableLogs {
153
logHandlers = append(logHandlers, sentryslog.Option{
154
AddSource: true,
155
+
LogLevel: levelsFromMinimum(logLevel),
156
}.NewSentryHandler(context.Background()))
157
}
158
}
···
160
slog.SetDefault(slog.New(slogmulti.Fanout(logHandlers...)))
161
}
162
163
+
// From sentryslog, because for some reason they don't make it public.
164
+
func levelsFromMinimum(minLevel slog.Level) []slog.Level {
165
+
allLevels := []slog.Level{slog.LevelDebug, slog.LevelInfo, slog.LevelWarn, slog.LevelError, sentryslog.LevelFatal}
166
+
var result []slog.Level
167
+
for _, level := range allLevels {
168
+
if level >= minLevel {
169
+
result = append(result, level)
170
+
}
171
+
}
172
+
return result
173
+
}
174
+
175
func FiniObservability() {
176
+
var wg sync.WaitGroup
177
+
timeout := 2 * time.Second
178
+
if syslogHandler != nil {
179
+
wg.Go(func() { syslogHandler.Flush(timeout) })
180
+
}
181
if hasSentry() {
182
+
wg.Go(func() { sentry.Flush(timeout) })
183
}
184
+
wg.Wait()
185
}
186
187
func ObserveError(err error) {
···
417
}
418
419
func (backend *observedBackend) CheckDomain(ctx context.Context, domain string) (found bool, err error) {
420
+
span, ctx := ObserveFunction(ctx, "CheckDomain", "domain.name", domain)
421
found, err = backend.inner.CheckDomain(ctx, domain)
422
span.Finish()
423
return
424
}
425
426
func (backend *observedBackend) CreateDomain(ctx context.Context, domain string) (err error) {
427
+
span, ctx := ObserveFunction(ctx, "CreateDomain", "domain.name", domain)
428
err = backend.inner.CreateDomain(ctx, domain)
429
span.Finish()
430
return
431
}
432
+
433
+
func (backend *observedBackend) FreezeDomain(ctx context.Context, domain string, freeze bool) (err error) {
434
+
span, ctx := ObserveFunction(ctx, "FreezeDomain", "domain.name", domain, "domain.frozen", freeze)
435
+
err = backend.inner.FreezeDomain(ctx, domain, freeze)
436
+
span.Finish()
437
+
return
438
+
}
+71
-17
src/pages.go
+71
-17
src/pages.go
···
8
"errors"
9
"fmt"
10
"io"
11
-
"log"
12
"maps"
13
"net/http"
14
"net/url"
15
"os"
16
"path"
17
"strconv"
18
"strings"
19
"time"
···
27
const notFoundPage = "404.html"
28
29
var (
30
siteUpdatesCount = promauto.NewCounterVec(prometheus.CounterOpts{
31
Name: "git_pages_site_updates",
32
Help: "Count of site updates in total",
···
131
result := <-indexManifestCh
132
manifest, manifestMtime, err = result.manifest, result.manifestMtime, result.err
133
if manifest == nil && errors.Is(err, ErrObjectNotFound) {
134
-
if found, fallbackErr := HandleWildcardFallback(w, r); found {
135
-
return fallbackErr
136
} else {
137
w.WriteHeader(http.StatusNotFound)
138
fmt.Fprintf(w, "site not found\n")
···
300
acceptedEncodings := parseHTTPEncodings(r.Header.Get("Accept-Encoding"))
301
negotiatedEncoding := true
302
switch entry.GetTransform() {
303
-
case Transform_None:
304
-
if acceptedEncodings.Negotiate("identity") != "identity" {
305
negotiatedEncoding = false
306
}
307
-
case Transform_Zstandard:
308
supported := []string{"zstd", "identity"}
309
if entry.ContentType == nil {
310
// If Content-Type is unset, `http.ServeContent` will try to sniff
···
315
case "zstd":
316
// Set Content-Length ourselves since `http.ServeContent` only sets
317
// it if Content-Encoding is unset or if it's a range request.
318
-
w.Header().Set("Content-Length", strconv.FormatInt(*entry.Size, 10))
319
w.Header().Set("Content-Encoding", "zstd")
320
case "identity":
321
compressedData, _ := io.ReadAll(reader)
322
decompressedData, err := zstdDecoder.DecodeAll(compressedData, []byte{})
···
326
return err
327
}
328
reader = bytes.NewReader(decompressedData)
329
default:
330
negotiatedEncoding = false
331
}
332
default:
333
return fmt.Errorf("unexpected transform")
···
383
return nil
384
}
385
386
func putPage(w http.ResponseWriter, r *http.Request) error {
387
var result UpdateResult
388
···
402
defer cancel()
403
404
contentType := getMediaType(r.Header.Get("Content-Type"))
405
-
406
-
if contentType == "application/x-www-form-urlencoded" {
407
auth, err := AuthorizeUpdateFromRepository(r)
408
if err != nil {
409
return err
···
428
return err
429
}
430
431
result = UpdateFromRepository(updateCtx, webRoot, repoURL, branch)
432
-
} else {
433
_, err := AuthorizeUpdateFromArchive(r)
434
if err != nil {
435
return err
436
}
437
438
// request body contains archive
···
448
w.WriteHeader(http.StatusUnsupportedMediaType)
449
} else if errors.Is(result.err, ErrArchiveTooLarge) {
450
w.WriteHeader(http.StatusRequestEntityTooLarge)
451
} else {
452
w.WriteHeader(http.StatusServiceUnavailable)
453
}
···
496
return err
497
}
498
499
err = backend.DeleteManifest(r.Context(), makeWebRoot(host, projectName))
500
if err != nil {
501
w.WriteHeader(http.StatusInternalServerError)
···
596
return err
597
}
598
599
resultChan := make(chan UpdateResult)
600
go func(ctx context.Context) {
601
ctx, cancel := context.WithTimeout(ctx, time.Duration(config.Limits.UpdateTimeout))
···
623
w.WriteHeader(http.StatusGatewayTimeout)
624
fmt.Fprintln(w, "update timeout")
625
case UpdateNoChange:
626
-
w.WriteHeader(http.StatusOK)
627
fmt.Fprintln(w, "unchanged")
628
case UpdateCreated:
629
-
w.WriteHeader(http.StatusOK)
630
fmt.Fprintln(w, "created")
631
case UpdateReplaced:
632
-
w.WriteHeader(http.StatusOK)
633
fmt.Fprintln(w, "replaced")
634
case UpdateDeleted:
635
-
w.WriteHeader(http.StatusOK)
636
fmt.Fprintln(w, "deleted")
637
}
638
if result.manifest != nil {
···
652
// any intentional deviation is an opportunity to miss an issue that will affect our
653
// visitors but not our health checks.
654
if r.Header.Get("Health-Check") == "" {
655
-
log.Println("pages:", r.Method, r.Host, r.URL, r.Header.Get("Content-Type"))
656
if region := os.Getenv("FLY_REGION"); region != "" {
657
machine_id := os.Getenv("FLY_MACHINE_ID")
658
w.Header().Add("Server", fmt.Sprintf("git-pages (fly.io; %s; %s)", region, machine_id))
···
666
ObserveData(r.Context(), "server.name", hostname)
667
}
668
}
669
}
670
err := error(nil)
671
switch r.Method {
672
// REST API
673
case http.MethodHead, http.MethodGet:
674
err = getPage(w, r)
675
case http.MethodPut:
···
680
case http.MethodPost:
681
err = postPage(w, r)
682
default:
683
-
w.Header().Add("Allow", "HEAD, GET, PUT, DELETE, POST")
684
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
685
err = fmt.Errorf("method %s not allowed", r.Method)
686
}
···
695
http.Error(w, message, http.StatusRequestEntityTooLarge)
696
err = errors.New(message)
697
}
698
-
log.Println("pages err:", err)
699
}
700
}
···
8
"errors"
9
"fmt"
10
"io"
11
"maps"
12
"net/http"
13
"net/url"
14
"os"
15
"path"
16
+
"slices"
17
"strconv"
18
"strings"
19
"time"
···
27
const notFoundPage = "404.html"
28
29
var (
30
+
serveEncodingCount = promauto.NewCounterVec(prometheus.CounterOpts{
31
+
Name: "git_pages_serve_encoding_count",
32
+
Help: "Count of blob transform vs negotiated encoding",
33
+
}, []string{"transform", "negotiated"})
34
+
35
siteUpdatesCount = promauto.NewCounterVec(prometheus.CounterOpts{
36
Name: "git_pages_site_updates",
37
Help: "Count of site updates in total",
···
136
result := <-indexManifestCh
137
manifest, manifestMtime, err = result.manifest, result.manifestMtime, result.err
138
if manifest == nil && errors.Is(err, ErrObjectNotFound) {
139
+
if fallback != nil {
140
+
logc.Printf(r.Context(), "fallback: %s via %s", host, config.Fallback.ProxyTo)
141
+
fallback.ServeHTTP(w, r)
142
+
return nil
143
} else {
144
w.WriteHeader(http.StatusNotFound)
145
fmt.Fprintf(w, "site not found\n")
···
307
acceptedEncodings := parseHTTPEncodings(r.Header.Get("Accept-Encoding"))
308
negotiatedEncoding := true
309
switch entry.GetTransform() {
310
+
case Transform_Identity:
311
+
switch acceptedEncodings.Negotiate("identity") {
312
+
case "identity":
313
+
serveEncodingCount.
314
+
With(prometheus.Labels{"transform": "identity", "negotiated": "identity"}).
315
+
Inc()
316
+
default:
317
negotiatedEncoding = false
318
+
serveEncodingCount.
319
+
With(prometheus.Labels{"transform": "identity", "negotiated": "failure"}).
320
+
Inc()
321
}
322
+
case Transform_Zstd:
323
supported := []string{"zstd", "identity"}
324
if entry.ContentType == nil {
325
// If Content-Type is unset, `http.ServeContent` will try to sniff
···
330
case "zstd":
331
// Set Content-Length ourselves since `http.ServeContent` only sets
332
// it if Content-Encoding is unset or if it's a range request.
333
+
w.Header().Set("Content-Length", strconv.FormatInt(entry.GetCompressedSize(), 10))
334
w.Header().Set("Content-Encoding", "zstd")
335
+
serveEncodingCount.
336
+
With(prometheus.Labels{"transform": "zstd", "negotiated": "zstd"}).
337
+
Inc()
338
case "identity":
339
compressedData, _ := io.ReadAll(reader)
340
decompressedData, err := zstdDecoder.DecodeAll(compressedData, []byte{})
···
344
return err
345
}
346
reader = bytes.NewReader(decompressedData)
347
+
serveEncodingCount.
348
+
With(prometheus.Labels{"transform": "zstd", "negotiated": "identity"}).
349
+
Inc()
350
default:
351
negotiatedEncoding = false
352
+
serveEncodingCount.
353
+
With(prometheus.Labels{"transform": "zstd", "negotiated": "failure"}).
354
+
Inc()
355
}
356
default:
357
return fmt.Errorf("unexpected transform")
···
407
return nil
408
}
409
410
+
func checkDryRun(w http.ResponseWriter, r *http.Request) bool {
411
+
// "Dry run" requests are used to non-destructively check if the request would have
412
+
// successfully been authorized.
413
+
if r.Header.Get("Dry-Run") != "" {
414
+
fmt.Fprintln(w, "dry-run ok")
415
+
return true
416
+
}
417
+
return false
418
+
}
419
+
420
func putPage(w http.ResponseWriter, r *http.Request) error {
421
var result UpdateResult
422
···
436
defer cancel()
437
438
contentType := getMediaType(r.Header.Get("Content-Type"))
439
+
switch contentType {
440
+
case "application/x-www-form-urlencoded":
441
auth, err := AuthorizeUpdateFromRepository(r)
442
if err != nil {
443
return err
···
462
return err
463
}
464
465
+
if checkDryRun(w, r) {
466
+
return nil
467
+
}
468
+
469
result = UpdateFromRepository(updateCtx, webRoot, repoURL, branch)
470
+
471
+
default:
472
_, err := AuthorizeUpdateFromArchive(r)
473
if err != nil {
474
return err
475
+
}
476
+
477
+
if checkDryRun(w, r) {
478
+
return nil
479
}
480
481
// request body contains archive
···
491
w.WriteHeader(http.StatusUnsupportedMediaType)
492
} else if errors.Is(result.err, ErrArchiveTooLarge) {
493
w.WriteHeader(http.StatusRequestEntityTooLarge)
494
+
} else if errors.Is(result.err, ErrDomainFrozen) {
495
+
w.WriteHeader(http.StatusForbidden)
496
} else {
497
w.WriteHeader(http.StatusServiceUnavailable)
498
}
···
541
return err
542
}
543
544
+
if checkDryRun(w, r) {
545
+
return nil
546
+
}
547
+
548
err = backend.DeleteManifest(r.Context(), makeWebRoot(host, projectName))
549
if err != nil {
550
w.WriteHeader(http.StatusInternalServerError)
···
645
return err
646
}
647
648
+
if checkDryRun(w, r) {
649
+
return nil
650
+
}
651
+
652
resultChan := make(chan UpdateResult)
653
go func(ctx context.Context) {
654
ctx, cancel := context.WithTimeout(ctx, time.Duration(config.Limits.UpdateTimeout))
···
676
w.WriteHeader(http.StatusGatewayTimeout)
677
fmt.Fprintln(w, "update timeout")
678
case UpdateNoChange:
679
fmt.Fprintln(w, "unchanged")
680
case UpdateCreated:
681
fmt.Fprintln(w, "created")
682
case UpdateReplaced:
683
fmt.Fprintln(w, "replaced")
684
case UpdateDeleted:
685
fmt.Fprintln(w, "deleted")
686
}
687
if result.manifest != nil {
···
701
// any intentional deviation is an opportunity to miss an issue that will affect our
702
// visitors but not our health checks.
703
if r.Header.Get("Health-Check") == "" {
704
+
logc.Println(r.Context(), "pages:", r.Method, r.Host, r.URL, r.Header.Get("Content-Type"))
705
if region := os.Getenv("FLY_REGION"); region != "" {
706
machine_id := os.Getenv("FLY_MACHINE_ID")
707
w.Header().Add("Server", fmt.Sprintf("git-pages (fly.io; %s; %s)", region, machine_id))
···
715
ObserveData(r.Context(), "server.name", hostname)
716
}
717
}
718
+
}
719
+
allowedMethods := []string{"OPTIONS", "HEAD", "GET", "PUT", "DELETE", "POST"}
720
+
if r.Method == "OPTIONS" || !slices.Contains(allowedMethods, r.Method) {
721
+
w.Header().Add("Allow", strings.Join(allowedMethods, ", "))
722
}
723
err := error(nil)
724
switch r.Method {
725
// REST API
726
+
case http.MethodOptions:
727
+
// no preflight options
728
case http.MethodHead, http.MethodGet:
729
err = getPage(w, r)
730
case http.MethodPut:
···
735
case http.MethodPost:
736
err = postPage(w, r)
737
default:
738
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
739
err = fmt.Errorf("method %s not allowed", r.Method)
740
}
···
749
http.Error(w, message, http.StatusRequestEntityTooLarge)
750
err = errors.New(message)
751
}
752
+
logc.Println(r.Context(), "pages err:", err)
753
}
754
}
+49
-21
src/schema.pb.go
+49
-21
src/schema.pb.go
···
81
return file_schema_proto_rawDescGZIP(), []int{0}
82
}
83
84
type Transform int32
85
86
const (
87
// No transformation.
88
-
Transform_None Transform = 0
89
// Zstandard compression.
90
-
Transform_Zstandard Transform = 1
91
)
92
93
// Enum value maps for Transform.
94
var (
95
Transform_name = map[int32]string{
96
-
0: "None",
97
-
1: "Zstandard",
98
}
99
Transform_value = map[string]int32{
100
-
"None": 0,
101
-
"Zstandard": 1,
102
}
103
)
104
···
133
state protoimpl.MessageState `protogen:"open.v1"`
134
Type *Type `protobuf:"varint,1,opt,name=type,enum=Type" json:"type,omitempty"`
135
// Only present for `type == InlineFile` and `type == ExternalFile`.
136
-
// For transformed entries, refers to the post-transformation (compressed) size.
137
-
Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
138
// Meaning depends on `type`:
139
// - If `type == InlineFile`, contains file data.
140
// - If `type == ExternalFile`, contains blob name (an otherwise unspecified
···
147
Transform *Transform `protobuf:"varint,4,opt,name=transform,enum=Transform" json:"transform,omitempty"`
148
// Only present for `type == InlineFile` and `type == ExternalFile`.
149
// Currently, optional (not present on certain legacy manifests).
150
-
ContentType *string `protobuf:"bytes,5,opt,name=content_type,json=contentType" json:"content_type,omitempty"`
151
unknownFields protoimpl.UnknownFields
152
sizeCache protoimpl.SizeCache
153
}
···
189
return Type_Invalid
190
}
191
192
-
func (x *Entry) GetSize() int64 {
193
-
if x != nil && x.Size != nil {
194
-
return *x.Size
195
}
196
return 0
197
}
···
207
if x != nil && x.Transform != nil {
208
return *x.Transform
209
}
210
-
return Transform_None
211
}
212
213
func (x *Entry) GetContentType() string {
214
if x != nil && x.ContentType != nil {
215
return *x.ContentType
216
}
217
return ""
218
}
···
568
569
const file_schema_proto_rawDesc = "" +
570
"\n" +
571
-
"\fschema.proto\"\x97\x01\n" +
572
"\x05Entry\x12\x19\n" +
573
-
"\x04type\x18\x01 \x01(\x0e2\x05.TypeR\x04type\x12\x12\n" +
574
-
"\x04size\x18\x02 \x01(\x03R\x04size\x12\x12\n" +
575
"\x04data\x18\x03 \x01(\fR\x04data\x12(\n" +
576
"\ttransform\x18\x04 \x01(\x0e2\n" +
577
".TransformR\ttransform\x12!\n" +
578
-
"\fcontent_type\x18\x05 \x01(\tR\vcontentType\"`\n" +
579
"\fRedirectRule\x12\x12\n" +
580
"\x04from\x18\x01 \x01(\tR\x04from\x12\x0e\n" +
581
"\x02to\x18\x02 \x01(\tR\x02to\x12\x16\n" +
···
614
"\n" +
615
"InlineFile\x10\x02\x12\x10\n" +
616
"\fExternalFile\x10\x03\x12\v\n" +
617
-
"\aSymlink\x10\x04*$\n" +
618
-
"\tTransform\x12\b\n" +
619
-
"\x04None\x10\x00\x12\r\n" +
620
-
"\tZstandard\x10\x01B,Z*codeberg.org/git-pages/git-pages/git_pagesb\beditionsp\xe8\a"
621
622
var (
623
file_schema_proto_rawDescOnce sync.Once
···
81
return file_schema_proto_rawDescGZIP(), []int{0}
82
}
83
84
+
// Transformation names should match HTTP `Accept-Encoding:` header.
85
type Transform int32
86
87
const (
88
// No transformation.
89
+
Transform_Identity Transform = 0
90
// Zstandard compression.
91
+
Transform_Zstd Transform = 1
92
)
93
94
// Enum value maps for Transform.
95
var (
96
Transform_name = map[int32]string{
97
+
0: "Identity",
98
+
1: "Zstd",
99
}
100
Transform_value = map[string]int32{
101
+
"Identity": 0,
102
+
"Zstd": 1,
103
}
104
)
105
···
134
state protoimpl.MessageState `protogen:"open.v1"`
135
Type *Type `protobuf:"varint,1,opt,name=type,enum=Type" json:"type,omitempty"`
136
// Only present for `type == InlineFile` and `type == ExternalFile`.
137
+
// For transformed entries, refers to the pre-transformation (decompressed) size; otherwise
138
+
// equal to `compressed_size`.
139
+
OriginalSize *int64 `protobuf:"varint,7,opt,name=original_size,json=originalSize" json:"original_size,omitempty"`
140
+
// Only present for `type == InlineFile` and `type == ExternalFile`.
141
+
// For transformed entries, refers to the post-transformation (compressed) size; otherwise
142
+
// equal to `original_size`.
143
+
CompressedSize *int64 `protobuf:"varint,2,opt,name=compressed_size,json=compressedSize" json:"compressed_size,omitempty"`
144
// Meaning depends on `type`:
145
// - If `type == InlineFile`, contains file data.
146
// - If `type == ExternalFile`, contains blob name (an otherwise unspecified
···
153
Transform *Transform `protobuf:"varint,4,opt,name=transform,enum=Transform" json:"transform,omitempty"`
154
// Only present for `type == InlineFile` and `type == ExternalFile`.
155
// Currently, optional (not present on certain legacy manifests).
156
+
ContentType *string `protobuf:"bytes,5,opt,name=content_type,json=contentType" json:"content_type,omitempty"`
157
+
// May be present for `type == InlineFile` and `type == ExternalFile`.
158
+
// Used to reduce the amount of work being done during git checkouts.
159
+
// The type of hash used is determined by the length:
160
+
// - 40 bytes: SHA1DC (as hex)
161
+
// - 64 bytes: SHA256 (as hex)
162
+
GitHash *string `protobuf:"bytes,6,opt,name=git_hash,json=gitHash" json:"git_hash,omitempty"`
163
unknownFields protoimpl.UnknownFields
164
sizeCache protoimpl.SizeCache
165
}
···
201
return Type_Invalid
202
}
203
204
+
func (x *Entry) GetOriginalSize() int64 {
205
+
if x != nil && x.OriginalSize != nil {
206
+
return *x.OriginalSize
207
+
}
208
+
return 0
209
+
}
210
+
211
+
func (x *Entry) GetCompressedSize() int64 {
212
+
if x != nil && x.CompressedSize != nil {
213
+
return *x.CompressedSize
214
}
215
return 0
216
}
···
226
if x != nil && x.Transform != nil {
227
return *x.Transform
228
}
229
+
return Transform_Identity
230
}
231
232
func (x *Entry) GetContentType() string {
233
if x != nil && x.ContentType != nil {
234
return *x.ContentType
235
+
}
236
+
return ""
237
+
}
238
+
239
+
func (x *Entry) GetGitHash() string {
240
+
if x != nil && x.GitHash != nil {
241
+
return *x.GitHash
242
}
243
return ""
244
}
···
594
595
const file_schema_proto_rawDesc = "" +
596
"\n" +
597
+
"\fschema.proto\"\xec\x01\n" +
598
"\x05Entry\x12\x19\n" +
599
+
"\x04type\x18\x01 \x01(\x0e2\x05.TypeR\x04type\x12#\n" +
600
+
"\roriginal_size\x18\a \x01(\x03R\foriginalSize\x12'\n" +
601
+
"\x0fcompressed_size\x18\x02 \x01(\x03R\x0ecompressedSize\x12\x12\n" +
602
"\x04data\x18\x03 \x01(\fR\x04data\x12(\n" +
603
"\ttransform\x18\x04 \x01(\x0e2\n" +
604
".TransformR\ttransform\x12!\n" +
605
+
"\fcontent_type\x18\x05 \x01(\tR\vcontentType\x12\x19\n" +
606
+
"\bgit_hash\x18\x06 \x01(\tR\agitHash\"`\n" +
607
"\fRedirectRule\x12\x12\n" +
608
"\x04from\x18\x01 \x01(\tR\x04from\x12\x0e\n" +
609
"\x02to\x18\x02 \x01(\tR\x02to\x12\x16\n" +
···
642
"\n" +
643
"InlineFile\x10\x02\x12\x10\n" +
644
"\fExternalFile\x10\x03\x12\v\n" +
645
+
"\aSymlink\x10\x04*#\n" +
646
+
"\tTransform\x12\f\n" +
647
+
"\bIdentity\x10\x00\x12\b\n" +
648
+
"\x04Zstd\x10\x01B,Z*codeberg.org/git-pages/git-pages/git_pagesb\beditionsp\xe8\a"
649
650
var (
651
file_schema_proto_rawDescOnce sync.Once
+19
-7
src/schema.proto
+19
-7
src/schema.proto
···
15
Symlink = 4;
16
}
17
18
enum Transform {
19
// No transformation.
20
-
None = 0;
21
// Zstandard compression.
22
-
Zstandard = 1;
23
}
24
25
message Entry {
26
Type type = 1;
27
// Only present for `type == InlineFile` and `type == ExternalFile`.
28
-
// For transformed entries, refers to the post-transformation (compressed) size.
29
-
int64 size = 2;
30
// Meaning depends on `type`:
31
// * If `type == InlineFile`, contains file data.
32
// * If `type == ExternalFile`, contains blob name (an otherwise unspecified
···
40
// Only present for `type == InlineFile` and `type == ExternalFile`.
41
// Currently, optional (not present on certain legacy manifests).
42
string content_type = 5;
43
}
44
45
// See https://docs.netlify.com/manage/routing/redirects/overview/ for details.
···
75
76
// Contents
77
map<string, Entry> contents = 4;
78
-
int64 original_size = 10; // total size of entries before compression
79
-
int64 compressed_size = 5; // simple sum of each `entry.size`
80
-
int64 stored_size = 8; // total size of (deduplicated) external objects
81
82
// Netlify-style `_redirects` and `_headers`
83
repeated RedirectRule redirects = 6;
···
15
Symlink = 4;
16
}
17
18
+
// Transformation names should match HTTP `Accept-Encoding:` header.
19
enum Transform {
20
// No transformation.
21
+
Identity = 0;
22
// Zstandard compression.
23
+
Zstd = 1;
24
}
25
26
message Entry {
27
Type type = 1;
28
// Only present for `type == InlineFile` and `type == ExternalFile`.
29
+
// For transformed entries, refers to the pre-transformation (decompressed) size; otherwise
30
+
// equal to `compressed_size`.
31
+
int64 original_size = 7;
32
+
// Only present for `type == InlineFile` and `type == ExternalFile`.
33
+
// For transformed entries, refers to the post-transformation (compressed) size; otherwise
34
+
// equal to `original_size`.
35
+
int64 compressed_size = 2;
36
// Meaning depends on `type`:
37
// * If `type == InlineFile`, contains file data.
38
// * If `type == ExternalFile`, contains blob name (an otherwise unspecified
···
46
// Only present for `type == InlineFile` and `type == ExternalFile`.
47
// Currently, optional (not present on certain legacy manifests).
48
string content_type = 5;
49
+
// May be present for `type == InlineFile` and `type == ExternalFile`.
50
+
// Used to reduce the amount of work being done during git checkouts.
51
+
// The type of hash used is determined by the length:
52
+
// * 40 bytes: SHA1DC (as hex)
53
+
// * 64 bytes: SHA256 (as hex)
54
+
string git_hash = 6;
55
}
56
57
// See https://docs.netlify.com/manage/routing/redirects/overview/ for details.
···
87
88
// Contents
89
map<string, Entry> contents = 4;
90
+
int64 original_size = 10; // sum of each `entry.original_size`
91
+
int64 compressed_size = 5; // sum of each `entry.compressed_size`
92
+
int64 stored_size = 8; // sum of deduplicated `entry.compressed_size` for external files only
93
94
// Netlify-style `_redirects` and `_headers`
95
repeated RedirectRule redirects = 6;
+6
src/signal_other.go
+6
src/signal_other.go
+7
src/signal_posix.go
+7
src/signal_posix.go
+13
-11
src/update.go
+13
-11
src/update.go
···
5
"errors"
6
"fmt"
7
"io"
8
-
"log"
9
"strings"
10
)
11
···
71
status = "unchanged"
72
}
73
if newManifest.Commit != nil {
74
-
log.Printf("update %s ok: %s %s", webRoot, status, *newManifest.Commit)
75
} else {
76
-
log.Printf("update %s ok: %s", webRoot, status)
77
}
78
} else {
79
-
log.Printf("update %s err: %s", webRoot, err)
80
}
81
82
return UpdateResult{outcome, newManifest, err}
···
91
span, ctx := ObserveFunction(ctx, "UpdateFromRepository", "repo.url", repoURL)
92
defer span.Finish()
93
94
-
log.Printf("update %s: %s %s\n", webRoot, repoURL, branch)
95
96
-
manifest, err := FetchRepository(ctx, repoURL, branch)
97
if errors.Is(err, context.DeadlineExceeded) {
98
result = UpdateResult{UpdateTimeout, nil, fmt.Errorf("update timeout")}
99
} else if err != nil {
···
119
120
switch contentType {
121
case "application/x-tar":
122
-
log.Printf("update %s: (tar)", webRoot)
123
manifest, err = ExtractTar(reader) // yellow?
124
case "application/x-tar+gzip":
125
-
log.Printf("update %s: (tar.gz)", webRoot)
126
manifest, err = ExtractTarGzip(reader) // definitely yellow.
127
case "application/x-tar+zstd":
128
-
log.Printf("update %s: (tar.zst)", webRoot)
129
manifest, err = ExtractTarZstd(reader)
130
case "application/zip":
131
-
log.Printf("update %s: (zip)", webRoot)
132
manifest, err = ExtractZip(reader)
133
default:
134
err = errArchiveFormat
135
}
136
137
if err != nil {
138
-
log.Printf("update %s err: %s", webRoot, err)
139
result = UpdateResult{UpdateError, nil, err}
140
} else {
141
result = Update(ctx, webRoot, manifest)
···
5
"errors"
6
"fmt"
7
"io"
8
"strings"
9
)
10
···
70
status = "unchanged"
71
}
72
if newManifest.Commit != nil {
73
+
logc.Printf(ctx, "update %s ok: %s %s", webRoot, status, *newManifest.Commit)
74
} else {
75
+
logc.Printf(ctx, "update %s ok: %s", webRoot, status)
76
}
77
} else {
78
+
logc.Printf(ctx, "update %s err: %s", webRoot, err)
79
}
80
81
return UpdateResult{outcome, newManifest, err}
···
90
span, ctx := ObserveFunction(ctx, "UpdateFromRepository", "repo.url", repoURL)
91
defer span.Finish()
92
93
+
logc.Printf(ctx, "update %s: %s %s\n", webRoot, repoURL, branch)
94
95
+
oldManifest, _, _ := backend.GetManifest(ctx, webRoot, GetManifestOptions{})
96
+
// Ignore errors; worst case we have to re-fetch all of the blobs.
97
+
98
+
manifest, err := FetchRepository(ctx, repoURL, branch, oldManifest)
99
if errors.Is(err, context.DeadlineExceeded) {
100
result = UpdateResult{UpdateTimeout, nil, fmt.Errorf("update timeout")}
101
} else if err != nil {
···
121
122
switch contentType {
123
case "application/x-tar":
124
+
logc.Printf(ctx, "update %s: (tar)", webRoot)
125
manifest, err = ExtractTar(reader) // yellow?
126
case "application/x-tar+gzip":
127
+
logc.Printf(ctx, "update %s: (tar.gz)", webRoot)
128
manifest, err = ExtractTarGzip(reader) // definitely yellow.
129
case "application/x-tar+zstd":
130
+
logc.Printf(ctx, "update %s: (tar.zst)", webRoot)
131
manifest, err = ExtractTarZstd(reader)
132
case "application/zip":
133
+
logc.Printf(ctx, "update %s: (zip)", webRoot)
134
manifest, err = ExtractZip(reader)
135
default:
136
err = errArchiveFormat
137
}
138
139
if err != nil {
140
+
logc.Printf(ctx, "update %s err: %s", webRoot, err)
141
result = UpdateResult{UpdateError, nil, err}
142
} else {
143
result = Update(ctx, webRoot, manifest)
-55
src/wildcard.go
-55
src/wildcard.go
···
1
package git_pages
2
3
import (
4
-
"crypto/tls"
5
"fmt"
6
-
"log"
7
-
"net/http"
8
-
"net/http/httputil"
9
-
"net/url"
10
"slices"
11
"strings"
12
···
19
IndexRepos []*fasttemplate.Template
20
IndexBranch string
21
Authorization bool
22
-
FallbackURL *url.URL
23
-
Fallback http.Handler
24
}
25
26
func (pattern *WildcardPattern) GetHost() string {
···
79
return repoURLs, branch
80
}
81
82
-
func (pattern *WildcardPattern) IsFallbackFor(host string) bool {
83
-
if pattern.Fallback == nil {
84
-
return false
85
-
}
86
-
_, found := pattern.Matches(host)
87
-
return found
88
-
}
89
-
90
-
func HandleWildcardFallback(w http.ResponseWriter, r *http.Request) (bool, error) {
91
-
host, err := GetHost(r)
92
-
if err != nil {
93
-
return false, err
94
-
}
95
-
96
-
for _, pattern := range wildcards {
97
-
if pattern.IsFallbackFor(host) {
98
-
log.Printf("proxy: %s via %s", pattern.GetHost(), pattern.FallbackURL)
99
-
pattern.Fallback.ServeHTTP(w, r)
100
-
return true, nil
101
-
}
102
-
}
103
-
return false, nil
104
-
}
105
-
106
func TranslateWildcards(configs []WildcardConfig) ([]*WildcardPattern, error) {
107
var wildcardPatterns []*WildcardPattern
108
for _, config := range configs {
···
135
}
136
}
137
138
-
var fallbackURL *url.URL
139
-
var fallback http.Handler
140
-
if config.FallbackProxyTo != "" {
141
-
fallbackURL, err = url.Parse(config.FallbackProxyTo)
142
-
if err != nil {
143
-
return nil, fmt.Errorf("wildcard pattern: fallback URL: %w", err)
144
-
}
145
-
146
-
fallback = &httputil.ReverseProxy{
147
-
Rewrite: func(r *httputil.ProxyRequest) {
148
-
r.SetURL(fallbackURL)
149
-
r.Out.Host = r.In.Host
150
-
r.Out.Header["X-Forwarded-For"] = r.In.Header["X-Forwarded-For"]
151
-
},
152
-
Transport: &http.Transport{
153
-
TLSClientConfig: &tls.Config{
154
-
InsecureSkipVerify: config.FallbackInsecure,
155
-
},
156
-
},
157
-
}
158
-
}
159
-
160
wildcardPatterns = append(wildcardPatterns, &WildcardPattern{
161
Domain: strings.Split(config.Domain, "."),
162
CloneURL: cloneURLTemplate,
163
IndexRepos: indexRepoTemplates,
164
IndexBranch: indexRepoBranch,
165
Authorization: authorization,
166
-
FallbackURL: fallbackURL,
167
-
Fallback: fallback,
168
})
169
}
170
return wildcardPatterns, nil
···
1
package git_pages
2
3
import (
4
"fmt"
5
"slices"
6
"strings"
7
···
14
IndexRepos []*fasttemplate.Template
15
IndexBranch string
16
Authorization bool
17
}
18
19
func (pattern *WildcardPattern) GetHost() string {
···
72
return repoURLs, branch
73
}
74
75
func TranslateWildcards(configs []WildcardConfig) ([]*WildcardPattern, error) {
76
var wildcardPatterns []*WildcardPattern
77
for _, config := range configs {
···
104
}
105
}
106
107
wildcardPatterns = append(wildcardPatterns, &WildcardPattern{
108
Domain: strings.Split(config.Domain, "."),
109
CloneURL: cloneURLTemplate,
110
IndexRepos: indexRepoTemplates,
111
IndexBranch: indexRepoBranch,
112
Authorization: authorization,
113
})
114
}
115
return wildcardPatterns, nil