+15
-12
.forgejo/workflows/ci.yaml
+15
-12
.forgejo/workflows/ci.yaml
···
10
10
11
11
jobs:
12
12
check:
13
-
runs-on: codeberg-small-lazy
13
+
runs-on: debian-trixie
14
14
container:
15
-
image: docker.io/library/node:24-trixie-slim@sha256:fcdfd7bcd8f641c8c76a8950343c73912d68ba341e8dd1074e663b784d3e76f4
15
+
image: docker.io/library/node:24-trixie-slim@sha256:b05474903f463ce4064c09986525e6588c3e66c51b69be9c93a39fb359f883ce
16
16
steps:
17
17
- name: Check out source code
18
-
uses: https://code.forgejo.org/actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
18
+
uses: https://code.forgejo.org/actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
19
19
- name: Set up toolchain
20
20
uses: https://code.forgejo.org/actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
21
21
with:
···
28
28
- name: Build service
29
29
run: |
30
30
go build
31
+
- name: Run tests
32
+
run: |
33
+
go test ./...
31
34
- name: Run static analysis
32
35
run: |
33
-
go vet
34
-
staticcheck
36
+
go vet ./...
37
+
staticcheck ./...
35
38
36
39
release:
37
40
# IMPORTANT: This workflow step will not work without the Releases unit enabled!
38
41
if: ${{ forge.ref == 'refs/heads/main' || startsWith(forge.event.ref, 'refs/tags/v') }}
39
42
needs: [check]
40
-
runs-on: codeberg-medium-lazy
43
+
runs-on: debian-trixie
41
44
container:
42
-
image: docker.io/library/node:24-trixie-slim@sha256:fcdfd7bcd8f641c8c76a8950343c73912d68ba341e8dd1074e663b784d3e76f4
45
+
image: docker.io/library/node:24-trixie-slim@sha256:b05474903f463ce4064c09986525e6588c3e66c51b69be9c93a39fb359f883ce
43
46
steps:
44
47
- name: Check out source code
45
-
uses: https://code.forgejo.org/actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
48
+
uses: https://code.forgejo.org/actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
46
49
- name: Set up toolchain
47
50
uses: https://code.forgejo.org/actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
48
51
with:
···
72
75
package:
73
76
if: ${{ forge.ref == 'refs/heads/main' || startsWith(forge.event.ref, 'refs/tags/v') }}
74
77
needs: [check]
75
-
runs-on: codeberg-medium-lazy
78
+
runs-on: debian-trixie
76
79
container:
77
-
image: docker.io/library/node:24-trixie-slim@sha256:fcdfd7bcd8f641c8c76a8950343c73912d68ba341e8dd1074e663b784d3e76f4
80
+
image: docker.io/library/node:24-trixie-slim@sha256:b05474903f463ce4064c09986525e6588c3e66c51b69be9c93a39fb359f883ce
78
81
steps:
79
82
- name: Install dependencies
80
83
run: |
81
84
apt-get -y update
82
-
apt-get -y install buildah ca-certificates
85
+
apt-get -y install ca-certificates buildah qemu-user-binfmt
83
86
- name: Check out source code
84
-
uses: https://code.forgejo.org/actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
87
+
uses: https://code.forgejo.org/actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
85
88
- name: Authenticate with Docker
86
89
run: |
87
90
buildah login --authfile=/tmp/authfile-${FORGE}.json \
+8
-8
Dockerfile
+8
-8
Dockerfile
···
1
1
# Install CA certificates.
2
-
FROM docker.io/library/alpine:latest@sha256:4b7ce07002c69e8f3d704a9c5d6fd3053be500b7f1c69fc0d80990c2ad8dd412 AS ca-certificates-builder
2
+
FROM docker.io/library/alpine:3 AS ca-certificates-builder
3
3
RUN apk --no-cache add ca-certificates
4
4
5
5
# Build supervisor.
6
-
FROM docker.io/library/golang:1.25-alpine@sha256:d3f0cf7723f3429e3f9ed846243970b20a2de7bae6a5b66fc5914e228d831bbb AS supervisor-builder
6
+
FROM docker.io/library/golang:1.25-alpine@sha256:ac09a5f469f307e5da71e766b0bd59c9c49ea460a528cc3e6686513d64a6f1fb AS supervisor-builder
7
7
RUN apk --no-cache add git
8
8
WORKDIR /build
9
9
RUN git clone https://github.com/ochinchina/supervisord . && \
···
11
11
RUN GOBIN=/usr/bin go install -ldflags "-s -w"
12
12
13
13
# Build Caddy with S3 storage backend.
14
-
FROM docker.io/library/caddy:2.10.2-builder@sha256:6e3ed727ce8695fc58e0a8de8e5d11888f6463c430ea5b40e0b5f679ab734868 AS caddy-builder
14
+
FROM docker.io/library/caddy:2.10.2-builder@sha256:6644af24bde2b4dbb07eb57637051abd2aa713e9787fa1eb544c3f31a0620898 AS caddy-builder
15
15
RUN xcaddy build ${CADDY_VERSION} \
16
16
--with=github.com/ss098/certmagic-s3@v0.0.0-20250922022452-8af482af5f39
17
17
18
18
# Build git-pages.
19
-
FROM docker.io/library/golang:1.25-alpine@sha256:d3f0cf7723f3429e3f9ed846243970b20a2de7bae6a5b66fc5914e228d831bbb AS git-pages-builder
19
+
FROM docker.io/library/golang:1.25-alpine@sha256:ac09a5f469f307e5da71e766b0bd59c9c49ea460a528cc3e6686513d64a6f1fb AS git-pages-builder
20
20
RUN apk --no-cache add git
21
21
WORKDIR /build
22
22
COPY go.mod go.sum ./
···
26
26
RUN go build -ldflags "-s -w" -o git-pages .
27
27
28
28
# Compose git-pages and Caddy.
29
-
FROM docker.io/library/busybox:1.37.0-musl@sha256:ef13e7482851632be3faf5bd1d28d4727c0810901d564b35416f309975a12a30
29
+
FROM docker.io/library/busybox:1.37.0-musl@sha256:03db190ed4c1ceb1c55d179a0940e2d71d42130636a780272629735893292223
30
30
COPY --from=ca-certificates-builder /etc/ssl/cert.pem /etc/ssl/cert.pem
31
31
COPY --from=supervisor-builder /usr/bin/supervisord /bin/supervisord
32
32
COPY --from=caddy-builder /usr/bin/caddy /bin/caddy
···
36
36
RUN mkdir /app/data
37
37
COPY conf/supervisord.conf /app/supervisord.conf
38
38
COPY conf/Caddyfile /app/Caddyfile
39
-
COPY conf/config.example.toml /app/config.toml
39
+
COPY conf/config.docker.toml /app/config.toml
40
40
41
41
# Caddy ports:
42
42
EXPOSE 80/tcp 443/tcp 443/udp
···
46
46
# While the default command is to run git-pages standalone, the intended configuration
47
47
# is to use it with Caddy and store both site data and credentials to an S3-compatible
48
48
# object store.
49
-
# * In a standalone configuration, the default, git-caddy listens on port 3000 (http).
50
-
# * In a combined configuration, supervisord launches both git-caddy and Caddy, and
49
+
# * In a standalone configuration, the default, git-pages listens on port 3000 (http).
50
+
# * In a combined configuration, supervisord launches both git-pages and Caddy, and
51
51
# Caddy listens on ports 80 (http) and 443 (https).
52
52
CMD ["git-pages"]
53
53
# CMD ["supervisord"]
+8
-4
README.md
+8
-4
README.md
···
90
90
* Files with a certain name, when placed in the root of a site, have special functions:
91
91
- [Netlify `_redirects`][_redirects] file can be used to specify HTTP redirect and rewrite rules. The _git-pages_ implementation currently does not support placeholders, query parameters, or conditions, and may differ from Netlify in other minor ways. If you find that a supported `_redirects` file feature does not work the same as on Netlify, please file an issue. (Note that _git-pages_ does not perform URL normalization; `/foo` and `/foo/` are *not* the same, unlike with Netlify.)
92
92
- [Netlify `_headers`][_headers] file can be used to specify custom HTTP response headers (if allowlisted by configuration). In particular, this is useful to enable [CORS requests][cors]. The _git-pages_ implementation may differ from Netlify in minor ways; if you find that a `_headers` file feature does not work the same as on Netlify, please file an issue.
93
+
* Incremental updates can be made using `PUT` or `PATCH` requests where the body contains an archive (both tar and zip are supported).
94
+
- Any archive entry that is a symlink to `/git/pages/<git-sha256>` is replaced with an existing manifest entry for the same site whose git blob hash matches `<git-sha256>`. If there is no existing manifest entry with the specified git hash, the update fails with a `422 Unprocessable Entity`.
95
+
- For this error response only, if the negotiated content type is `application/vnd.git-pages.unresolved`, the response will contain the `<git-sha256>` of each unresolved reference, one per line.
93
96
* Support for SHA-256 Git hashes is [limited by go-git][go-git-sha256]; once go-git implements the required features, _git-pages_ will automatically gain support for SHA-256 Git hashes. Note that shallow clones (used by _git-pages_ to conserve bandwidth if available) aren't supported yet in the Git protocol as of 2025.
94
97
95
98
[_redirects]: https://docs.netlify.com/manage/routing/redirects/overview/
···
134
137
* If `SENTRY_DSN` environment variable is set, panics are reported to Sentry.
135
138
* If `SENTRY_DSN` and `SENTRY_LOGS=1` environment variables are set, logs are uploaded to Sentry.
136
139
* If `SENTRY_DSN` and `SENTRY_TRACING=1` environment variables are set, traces are uploaded to Sentry.
137
-
* Optional syslog integration allows transmitting application logs to a syslog daemon. When present, the `SYSLOG_ADDR` environment variable enables the integration, and the variable's value is used to configure the absolute path to a Unix socket (usually located at `/dev/log` on Unix systems) or a network address of one of the following formats:
138
-
* for TLS over TCP: `tcp+tls://host:port`;
139
-
* for plain TCP: `tcp://host:post`;
140
-
* for UDP: `udp://host:port`.
140
+
* Optional syslog integration allows transmitting application logs to a syslog daemon. When present, the `SYSLOG_ADDR` environment variable enables the integration, and the value is used to configure the syslog destination. The value must follow the format `family/address` and is usually one of the following:
141
+
* a Unix datagram socket: `unixgram//dev/log`;
142
+
* TLS over TCP: `tcp+tls/host:port`;
143
+
* plain TCP: `tcp/host:post`;
144
+
* UDP: `udp/host:port`.
141
145
142
146
143
147
Architecture (v2)
-6
conf/Caddyfile
-6
conf/Caddyfile
···
25
25
on_demand
26
26
}
27
27
28
-
# initial PUT/POST for a new domain has to happen over HTTP
29
-
@upgrade `method('GET') && protocol('http')`
30
-
redir @upgrade https://{host}{uri} 301
31
-
32
28
reverse_proxy http://localhost:3000
33
-
header Alt-Svc `h3=":443"; persist=1, h2=":443"; persist=1`
34
-
encode
35
29
}
+4
conf/config.docker.toml
+4
conf/config.docker.toml
+5
-6
conf/config.example.toml
+5
-6
conf/config.example.toml
···
2
2
# as the intrinsic default value.
3
3
4
4
log-format = "text"
5
-
log-level = "info"
6
5
7
6
[server]
8
7
# Use "-" to disable the handler.
9
-
pages = "tcp/:3000"
10
-
caddy = "tcp/:3001"
11
-
metrics = "tcp/:3002"
8
+
pages = "tcp/localhost:3000"
9
+
caddy = "tcp/localhost:3001"
10
+
metrics = "tcp/localhost:3002"
12
11
13
12
[[wildcard]] # non-default section
14
13
domain = "codeberg.page"
···
51
50
update-timeout = "60s"
52
51
max-heap-size-ratio = 0.5 # * RAM_size
53
52
forbidden-domains = []
54
-
# allowed-repository-url-prefixes = <nil>
53
+
allowed-repository-url-prefixes = []
55
54
allowed-custom-headers = ["X-Clacks-Overhead"]
56
55
57
56
[audit]
58
57
node-id = 0
59
58
collect = false
60
-
include-ip = false
59
+
include-ip = ""
61
60
notify-url = ""
62
61
63
62
[observability]
+24
flake.lock
+24
flake.lock
···
18
18
"type": "github"
19
19
}
20
20
},
21
+
"gomod2nix": {
22
+
"inputs": {
23
+
"flake-utils": [
24
+
"flake-utils"
25
+
],
26
+
"nixpkgs": [
27
+
"nixpkgs"
28
+
]
29
+
},
30
+
"locked": {
31
+
"lastModified": 1763982521,
32
+
"narHash": "sha256-ur4QIAHwgFc0vXiaxn5No/FuZicxBr2p0gmT54xZkUQ=",
33
+
"owner": "nix-community",
34
+
"repo": "gomod2nix",
35
+
"rev": "02e63a239d6eabd595db56852535992c898eba72",
36
+
"type": "github"
37
+
},
38
+
"original": {
39
+
"owner": "nix-community",
40
+
"repo": "gomod2nix",
41
+
"type": "github"
42
+
}
43
+
},
21
44
"nix-filter": {
22
45
"locked": {
23
46
"lastModified": 1757882181,
···
52
75
"root": {
53
76
"inputs": {
54
77
"flake-utils": "flake-utils",
78
+
"gomod2nix": "gomod2nix",
55
79
"nix-filter": "nix-filter",
56
80
"nixpkgs": "nixpkgs"
57
81
}
+19
-4
flake.nix
+19
-4
flake.nix
···
3
3
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
4
4
flake-utils.url = "github:numtide/flake-utils";
5
5
nix-filter.url = "github:numtide/nix-filter";
6
+
7
+
gomod2nix = {
8
+
url = "github:nix-community/gomod2nix";
9
+
inputs.nixpkgs.follows = "nixpkgs";
10
+
inputs.flake-utils.follows = "flake-utils";
11
+
};
6
12
};
7
13
8
14
outputs =
···
11
17
nixpkgs,
12
18
flake-utils,
13
19
nix-filter,
14
-
}:
20
+
...
21
+
}@inputs:
15
22
flake-utils.lib.eachDefaultSystem (
16
23
system:
17
24
let
18
-
pkgs = nixpkgs.legacyPackages.${system};
25
+
pkgs = import nixpkgs {
26
+
inherit system;
27
+
28
+
overlays = [
29
+
inputs.gomod2nix.overlays.default
30
+
];
31
+
};
19
32
20
-
git-pages = pkgs.buildGo125Module {
33
+
git-pages = pkgs.buildGoApplication {
21
34
pname = "git-pages";
22
35
version = "0";
23
36
···
43
56
"-s -w"
44
57
];
45
58
46
-
vendorHash = "sha256-40LyEXdJDpWPe9UvqM2siqXdpbae1ba7kN7FtySPpBc=";
59
+
go = pkgs.go_1_25;
60
+
modules = ./gomod2nix.toml;
47
61
};
48
62
in
49
63
{
···
56
70
57
71
packages = with pkgs; [
58
72
caddy
73
+
gomod2nix
59
74
];
60
75
};
61
76
+16
-12
go.mod
+16
-12
go.mod
···
3
3
go 1.25.0
4
4
5
5
require (
6
-
codeberg.org/git-pages/go-headers v1.1.0
7
-
codeberg.org/git-pages/go-slog-syslog v0.0.0-20251122144254-06c45d430fb9
6
+
codeberg.org/git-pages/go-headers v1.1.1
7
+
codeberg.org/git-pages/go-slog-syslog v0.0.0-20251207093707-892f654e80b7
8
8
github.com/KimMachineGun/automemlimit v0.7.5
9
9
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500
10
10
github.com/creasty/defaults v1.8.0
11
+
github.com/dghubble/trie v0.1.0
11
12
github.com/fatih/color v1.18.0
12
13
github.com/getsentry/sentry-go v0.40.0
13
14
github.com/getsentry/sentry-go/slog v0.40.0
14
-
github.com/go-git/go-billy/v6 v6.0.0-20251126203821-7f9c95185ee0
15
-
github.com/go-git/go-git/v6 v6.0.0-20251128074608-48f817f57805
15
+
github.com/go-git/go-billy/v6 v6.0.0-20251217170237-e9738f50a3cd
16
+
github.com/go-git/go-git/v6 v6.0.0-20251231065035-29ae690a9f19
16
17
github.com/jpillora/backoff v1.0.0
17
18
github.com/kankanreno/go-snowflake v1.2.0
18
-
github.com/klauspost/compress v1.18.1
19
-
github.com/maypok86/otter/v2 v2.2.1
19
+
github.com/klauspost/compress v1.18.2
20
+
github.com/maypok86/otter/v2 v2.3.0
20
21
github.com/minio/minio-go/v7 v7.0.97
21
22
github.com/pelletier/go-toml/v2 v2.2.4
22
23
github.com/pquerna/cachecontrol v0.2.0
23
24
github.com/prometheus/client_golang v1.23.2
24
-
github.com/samber/slog-multi v1.6.0
25
+
github.com/samber/slog-multi v1.7.0
25
26
github.com/tj/go-redirects v0.0.0-20200911105812-fd1ba1020b37
26
27
github.com/valyala/fasttemplate v1.2.2
27
-
google.golang.org/protobuf v1.36.10
28
+
golang.org/x/net v0.48.0
29
+
google.golang.org/protobuf v1.36.11
28
30
)
29
31
30
32
require (
···
34
36
github.com/cespare/xxhash/v2 v2.3.0 // indirect
35
37
github.com/cloudflare/circl v1.6.1 // indirect
36
38
github.com/cyphar/filepath-securejoin v0.6.1 // indirect
39
+
github.com/davecgh/go-spew v1.1.1 // indirect
37
40
github.com/dustin/go-humanize v1.0.1 // indirect
38
41
github.com/emirpasic/gods v1.18.1 // indirect
39
42
github.com/go-git/gcfg/v2 v2.0.2 // indirect
···
53
56
github.com/philhofer/fwd v1.2.0 // indirect
54
57
github.com/pjbgf/sha1cd v0.5.0 // indirect
55
58
github.com/pkg/errors v0.9.1 // indirect
59
+
github.com/pmezard/go-difflib v1.0.0 // indirect
56
60
github.com/prometheus/client_model v0.6.2 // indirect
57
61
github.com/prometheus/common v0.66.1 // indirect
58
62
github.com/prometheus/procfs v0.16.1 // indirect
···
60
64
github.com/samber/lo v1.52.0 // indirect
61
65
github.com/samber/slog-common v0.19.0 // indirect
62
66
github.com/sergi/go-diff v1.4.0 // indirect
67
+
github.com/stretchr/testify v1.11.1 // indirect
63
68
github.com/tinylib/msgp v1.3.0 // indirect
64
69
github.com/tj/assert v0.0.3 // indirect
65
70
github.com/valyala/bytebufferpool v1.0.0 // indirect
66
71
go.yaml.in/yaml/v2 v2.4.2 // indirect
67
-
golang.org/x/crypto v0.45.0 // indirect
68
-
golang.org/x/net v0.47.0 // indirect
69
-
golang.org/x/sys v0.38.0 // indirect
70
-
golang.org/x/text v0.31.0 // indirect
72
+
golang.org/x/crypto v0.46.0 // indirect
73
+
golang.org/x/sys v0.39.0 // indirect
74
+
golang.org/x/text v0.32.0 // indirect
71
75
gopkg.in/yaml.v3 v3.0.1 // indirect
72
76
)
+30
-30
go.sum
+30
-30
go.sum
···
1
-
codeberg.org/git-pages/go-headers v1.1.0 h1:rk7/SOSsn+XuL7PUQZFYUaWKHEaj6K8mXmUV9rF2VxE=
2
-
codeberg.org/git-pages/go-headers v1.1.0/go.mod h1:N4gwH0U3YPwmuyxqH7xBA8j44fTPX+vOEP7ejJVBPts=
3
-
codeberg.org/git-pages/go-slog-syslog v0.0.0-20251122144254-06c45d430fb9 h1:xfPDg8ThBt3+t+C+pvM3bEH4ePUzP5t5kY2v19TqgKc=
4
-
codeberg.org/git-pages/go-slog-syslog v0.0.0-20251122144254-06c45d430fb9/go.mod h1:8NPSXbYcVb71qqNM5cIgn1/uQgMisLbu2dVD1BNxsUw=
1
+
codeberg.org/git-pages/go-headers v1.1.1 h1:fpIBELKo66Z2k+gCeYl5mCEXVQ99Lmx1iup1nbo2shE=
2
+
codeberg.org/git-pages/go-headers v1.1.1/go.mod h1:N4gwH0U3YPwmuyxqH7xBA8j44fTPX+vOEP7ejJVBPts=
3
+
codeberg.org/git-pages/go-slog-syslog v0.0.0-20251207093707-892f654e80b7 h1:+rkrAxhNZo/eKEcKOqVOsF6ohAPv5amz0JLburOeRjs=
4
+
codeberg.org/git-pages/go-slog-syslog v0.0.0-20251207093707-892f654e80b7/go.mod h1:8NPSXbYcVb71qqNM5cIgn1/uQgMisLbu2dVD1BNxsUw=
5
5
github.com/KimMachineGun/automemlimit v0.7.5 h1:RkbaC0MwhjL1ZuBKunGDjE/ggwAX43DwZrJqVwyveTk=
6
6
github.com/KimMachineGun/automemlimit v0.7.5/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM=
7
7
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
···
27
27
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
28
28
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
29
29
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
30
+
github.com/dghubble/trie v0.1.0 h1:kJnjBLFFElBwS60N4tkPvnLhnpcDxbBjIulgI8CpNGM=
31
+
github.com/dghubble/trie v0.1.0/go.mod h1:sOmnzfBNH7H92ow2292dDFWNsVQuh/izuD7otCYb1ak=
30
32
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
31
33
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
32
-
github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o=
33
-
github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE=
34
34
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
35
35
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
36
36
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
···
45
45
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
46
46
github.com/go-git/gcfg/v2 v2.0.2 h1:MY5SIIfTGGEMhdA7d7JePuVVxtKL7Hp+ApGDJAJ7dpo=
47
47
github.com/go-git/gcfg/v2 v2.0.2/go.mod h1:/lv2NsxvhepuMrldsFilrgct6pxzpGdSRC13ydTLSLs=
48
-
github.com/go-git/go-billy/v6 v6.0.0-20251126203821-7f9c95185ee0 h1:eY5aB2GXiVdgTueBcqsBt53WuJTRZAuCdIS/86Pcq5c=
49
-
github.com/go-git/go-billy/v6 v6.0.0-20251126203821-7f9c95185ee0/go.mod h1:0NjwVNrwtVFZBReAp5OoGklGJIgJFEbVyHneAr4lc8k=
50
-
github.com/go-git/go-git-fixtures/v5 v5.1.1 h1:OH8i1ojV9bWfr0ZfasfpgtUXQHQyVS8HXik/V1C099w=
51
-
github.com/go-git/go-git-fixtures/v5 v5.1.1/go.mod h1:Altk43lx3b1ks+dVoAG2300o5WWUnktvfY3VI6bcaXU=
52
-
github.com/go-git/go-git/v6 v6.0.0-20251128074608-48f817f57805 h1:jxQ3BzYeErNRvlI/4+0mpwqMzvB4g97U+ksfgvrUEbY=
53
-
github.com/go-git/go-git/v6 v6.0.0-20251128074608-48f817f57805/go.mod h1:dIwT3uWK1ooHInyVnK2JS5VfQ3peVGYaw2QPqX7uFvs=
48
+
github.com/go-git/go-billy/v6 v6.0.0-20251217170237-e9738f50a3cd h1:Gd/f9cGi/3h1JOPaa6er+CkKUGyGX2DBJdFbDKVO+R0=
49
+
github.com/go-git/go-billy/v6 v6.0.0-20251217170237-e9738f50a3cd/go.mod h1:d3XQcsHu1idnquxt48kAv+h+1MUiYKLH/e7LAzjP+pI=
50
+
github.com/go-git/go-git-fixtures/v5 v5.1.2-0.20251229094738-4b14af179146 h1:xYfxAopYyL44ot6dMBIb1Z1njFM0ZBQ99HdIB99KxLs=
51
+
github.com/go-git/go-git-fixtures/v5 v5.1.2-0.20251229094738-4b14af179146/go.mod h1:QE/75B8tBSLNGyUUbA9tw3EGHoFtYOtypa2h8YJxsWI=
52
+
github.com/go-git/go-git/v6 v6.0.0-20251231065035-29ae690a9f19 h1:0lz2eJScP8v5YZQsrEw+ggWC5jNySjg4bIZo5BIh6iI=
53
+
github.com/go-git/go-git/v6 v6.0.0-20251231065035-29ae690a9f19/go.mod h1:L+Evfcs7EdTqxwv854354cb6+++7TFL3hJn3Wy4g+3w=
54
54
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
55
55
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
56
56
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
···
65
65
github.com/kankanreno/go-snowflake v1.2.0/go.mod h1:6CZ+10PeVsFXKZUTYyJzPiRIjn1IXbInaWLCX/LDJ0g=
66
66
github.com/kevinburke/ssh_config v1.4.0 h1:6xxtP5bZ2E4NF5tuQulISpTO2z8XbtH8cg1PWkxoFkQ=
67
67
github.com/kevinburke/ssh_config v1.4.0/go.mod h1:q2RIzfka+BXARoNexmF9gkxEX7DmvbW9P4hIVx2Kg4M=
68
-
github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
69
-
github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
68
+
github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
69
+
github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
70
70
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
71
71
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
72
72
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
···
88
88
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
89
89
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
90
90
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
91
-
github.com/maypok86/otter/v2 v2.2.1 h1:hnGssisMFkdisYcvQ8L019zpYQcdtPse+g0ps2i7cfI=
92
-
github.com/maypok86/otter/v2 v2.2.1/go.mod h1:1NKY9bY+kB5jwCXBJfE59u+zAwOt6C7ni1FTlFFMqVs=
91
+
github.com/maypok86/otter/v2 v2.3.0 h1:8H8AVVFUSzJwIegKwv1uF5aGitTY+AIrtktg7OcLs8w=
92
+
github.com/maypok86/otter/v2 v2.3.0/go.mod h1:XgIdlpmL6jYz882/CAx1E4C1ukfgDKSaw4mWq59+7l8=
93
93
github.com/minio/crc64nvme v1.1.0 h1:e/tAguZ+4cw32D+IO/8GSf5UVr9y+3eJcxZI2WOO/7Q=
94
94
github.com/minio/crc64nvme v1.1.0/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
95
95
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
···
130
130
github.com/samber/lo v1.52.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0=
131
131
github.com/samber/slog-common v0.19.0 h1:fNcZb8B2uOLooeYwFpAlKjkQTUafdjfqKcwcC89G9YI=
132
132
github.com/samber/slog-common v0.19.0/go.mod h1:dTz+YOU76aH007YUU0DffsXNsGFQRQllPQh9XyNoA3M=
133
-
github.com/samber/slog-multi v1.6.0 h1:i1uBY+aaln6ljwdf7Nrt4Sys8Kk6htuYuXDHWJsHtZg=
134
-
github.com/samber/slog-multi v1.6.0/go.mod h1:qTqzmKdPpT0h4PFsTN5rYRgLwom1v+fNGuIrl1Xnnts=
133
+
github.com/samber/slog-multi v1.7.0 h1:GKhbkxU3ujkyMsefkuz4qvE6EcgtSuqjFisPnfdzVLI=
134
+
github.com/samber/slog-multi v1.7.0/go.mod h1:qTqzmKdPpT0h4PFsTN5rYRgLwom1v+fNGuIrl1Xnnts=
135
135
github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=
136
136
github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
137
137
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
···
153
153
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
154
154
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
155
155
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
156
-
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
157
-
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
158
-
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
159
-
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
156
+
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
157
+
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
158
+
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
159
+
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
160
160
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
161
161
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
162
-
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
163
-
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
164
-
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
165
-
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
166
-
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
167
-
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
168
-
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
169
-
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
162
+
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
163
+
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
164
+
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
165
+
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
166
+
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
167
+
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
168
+
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
169
+
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
170
170
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
171
171
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
172
172
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+204
gomod2nix.toml
+204
gomod2nix.toml
···
1
+
schema = 3
2
+
3
+
[mod]
4
+
[mod."codeberg.org/git-pages/go-headers"]
5
+
version = "v1.1.1"
6
+
hash = "sha256-qgL7l1FHXxcBWhBnBLEI0yENd6P+frvwlKxEAXLA3VY="
7
+
[mod."codeberg.org/git-pages/go-slog-syslog"]
8
+
version = "v0.0.0-20251207093707-892f654e80b7"
9
+
hash = "sha256-ye+DBIyxqTEOViYRrQPWyGJCaLmyKSDwH5btlqDPizM="
10
+
[mod."github.com/KimMachineGun/automemlimit"]
11
+
version = "v0.7.5"
12
+
hash = "sha256-lH/ip9j2hbYUc2W/XIYve/5TScQPZtEZe3hu76CY//k="
13
+
[mod."github.com/Microsoft/go-winio"]
14
+
version = "v0.6.2"
15
+
hash = "sha256-tVNWDUMILZbJvarcl/E7tpSnkn7urqgSHa2Eaka5vSU="
16
+
[mod."github.com/ProtonMail/go-crypto"]
17
+
version = "v1.3.0"
18
+
hash = "sha256-TUG+C4MyeWglOmiwiW2/NUVurFHXLgEPRd3X9uQ1NGI="
19
+
[mod."github.com/beorn7/perks"]
20
+
version = "v1.0.1"
21
+
hash = "sha256-h75GUqfwJKngCJQVE5Ao5wnO3cfKD9lSIteoLp/3xJ4="
22
+
[mod."github.com/c2h5oh/datasize"]
23
+
version = "v0.0.0-20231215233829-aa82cc1e6500"
24
+
hash = "sha256-8MqL7xCvE6fIjanz2jwkaLP1OE5kLu62TOcQx452DHQ="
25
+
[mod."github.com/cespare/xxhash/v2"]
26
+
version = "v2.3.0"
27
+
hash = "sha256-7hRlwSR+fos1kx4VZmJ/7snR7zHh8ZFKX+qqqqGcQpY="
28
+
[mod."github.com/cloudflare/circl"]
29
+
version = "v1.6.1"
30
+
hash = "sha256-Dc69V12eIFnJoUNmwg6VKXHfAMijbAeEVSDe8AiOaLo="
31
+
[mod."github.com/creasty/defaults"]
32
+
version = "v1.8.0"
33
+
hash = "sha256-I1LE1cfOhMS5JxB7+fWTKieefw2Gge1UhIZh+A6pa6s="
34
+
[mod."github.com/cyphar/filepath-securejoin"]
35
+
version = "v0.6.1"
36
+
hash = "sha256-obqip8c1c9mjXFznyXF8aDnpcMw7ttzv+e28anCa/v0="
37
+
[mod."github.com/davecgh/go-spew"]
38
+
version = "v1.1.1"
39
+
hash = "sha256-nhzSUrE1fCkN0+RL04N4h8jWmRFPPPWbCuDc7Ss0akI="
40
+
[mod."github.com/dghubble/trie"]
41
+
version = "v0.1.0"
42
+
hash = "sha256-hVh7uYylpMCCSPcxl70hJTmzSwaA1MxBmJFBO5Xdncc="
43
+
[mod."github.com/dustin/go-humanize"]
44
+
version = "v1.0.1"
45
+
hash = "sha256-yuvxYYngpfVkUg9yAmG99IUVmADTQA0tMbBXe0Fq0Mc="
46
+
[mod."github.com/emirpasic/gods"]
47
+
version = "v1.18.1"
48
+
hash = "sha256-hGDKddjLj+5dn2woHtXKUdd49/3xdsqnhx7VEdCu1m4="
49
+
[mod."github.com/fatih/color"]
50
+
version = "v1.18.0"
51
+
hash = "sha256-pP5y72FSbi4j/BjyVq/XbAOFjzNjMxZt2R/lFFxGWvY="
52
+
[mod."github.com/getsentry/sentry-go"]
53
+
version = "v0.40.0"
54
+
hash = "sha256-mJ+EzM8WRzJ2Yp7ithDJNceU4+GbzQyi46yc8J8d13Y="
55
+
[mod."github.com/getsentry/sentry-go/slog"]
56
+
version = "v0.40.0"
57
+
hash = "sha256-uc9TpKiWMEpRbxwV2uGQeq1DDdZi+APOgu2StVzzEkw="
58
+
[mod."github.com/go-git/gcfg/v2"]
59
+
version = "v2.0.2"
60
+
hash = "sha256-icqMDeC/tEg/3979EuEN67Ml5KjdDA0R3QvR6iLLrSI="
61
+
[mod."github.com/go-git/go-billy/v6"]
62
+
version = "v6.0.0-20251217170237-e9738f50a3cd"
63
+
hash = "sha256-b2yunYcPUiLTU+Rr8qTBdsDEfsIhZDYmyqKW5udmpFY="
64
+
[mod."github.com/go-git/go-git/v6"]
65
+
version = "v6.0.0-20251224103503-78aff6aa5ea9"
66
+
hash = "sha256-kYjDqH0NZ+sxQnj5K8xKfO2WOVKtQ/7tWcqY6KYqAZE="
67
+
[mod."github.com/go-ini/ini"]
68
+
version = "v1.67.0"
69
+
hash = "sha256-V10ahGNGT+NLRdKUyRg1dos5RxLBXBk1xutcnquc/+4="
70
+
[mod."github.com/golang/groupcache"]
71
+
version = "v0.0.0-20241129210726-2c02b8208cf8"
72
+
hash = "sha256-AdLZ3dJLe/yduoNvZiXugZxNfmwJjNQyQGsIdzYzH74="
73
+
[mod."github.com/google/uuid"]
74
+
version = "v1.6.0"
75
+
hash = "sha256-VWl9sqUzdOuhW0KzQlv0gwwUQClYkmZwSydHG2sALYw="
76
+
[mod."github.com/jpillora/backoff"]
77
+
version = "v1.0.0"
78
+
hash = "sha256-uxHg68NN8hrwPCrPfLYYprZHf7dMyEoPoF46JFx0IHU="
79
+
[mod."github.com/kankanreno/go-snowflake"]
80
+
version = "v1.2.0"
81
+
hash = "sha256-713xGEqjwaUGIu2EHII5sldWmcquFpxZmte/7R/O6LA="
82
+
[mod."github.com/kevinburke/ssh_config"]
83
+
version = "v1.4.0"
84
+
hash = "sha256-UclxB7Ll1FZCgU2SrGkiGdr4CoSRJ127MNnZtxKTsvg="
85
+
[mod."github.com/klauspost/compress"]
86
+
version = "v1.18.2"
87
+
hash = "sha256-mRa+6qEi5joqQao13ZFogmq67rOQzHCVbCCjKA+HKEc="
88
+
[mod."github.com/klauspost/cpuid/v2"]
89
+
version = "v2.3.0"
90
+
hash = "sha256-50JhbQyT67BK38HIdJihPtjV7orYp96HknI2VP7A9Yc="
91
+
[mod."github.com/klauspost/crc32"]
92
+
version = "v1.3.0"
93
+
hash = "sha256-RsS/MDJbVzVB+i74whqABgwZJWMw+AutF6HhJBVgbag="
94
+
[mod."github.com/leodido/go-syslog/v4"]
95
+
version = "v4.3.0"
96
+
hash = "sha256-fCJ2rgrrPR/Ey/PoAsJhd8Sl8mblAnnMAmBuoWFBTgg="
97
+
[mod."github.com/mattn/go-colorable"]
98
+
version = "v0.1.13"
99
+
hash = "sha256-qb3Qbo0CELGRIzvw7NVM1g/aayaz4Tguppk9MD2/OI8="
100
+
[mod."github.com/mattn/go-isatty"]
101
+
version = "v0.0.20"
102
+
hash = "sha256-qhw9hWtU5wnyFyuMbKx+7RB8ckQaFQ8D+8GKPkN3HHQ="
103
+
[mod."github.com/maypok86/otter/v2"]
104
+
version = "v2.3.0"
105
+
hash = "sha256-ELzmi/s2WqDeUmzSGnfx+ys2Hs28XHqF7vlEzyRotIA="
106
+
[mod."github.com/minio/crc64nvme"]
107
+
version = "v1.1.0"
108
+
hash = "sha256-OwlE70X91WO4HdbpGsOaB4w12Qrk0duCpfLeAskiqY8="
109
+
[mod."github.com/minio/md5-simd"]
110
+
version = "v1.1.2"
111
+
hash = "sha256-vykcXvy2VBBAXnJott/XsGTT0gk2UL36JzZKfJ1KAUY="
112
+
[mod."github.com/minio/minio-go/v7"]
113
+
version = "v7.0.97"
114
+
hash = "sha256-IwF14tWVYjBi28jUG9iFYd4Lpbc7Fvyy0zRzEZ82UEE="
115
+
[mod."github.com/munnerz/goautoneg"]
116
+
version = "v0.0.0-20191010083416-a7dc8b61c822"
117
+
hash = "sha256-79URDDFenmGc9JZu+5AXHToMrtTREHb3BC84b/gym9Q="
118
+
[mod."github.com/pbnjay/memory"]
119
+
version = "v0.0.0-20210728143218-7b4eea64cf58"
120
+
hash = "sha256-QI+F1oPLOOtwNp8+m45OOoSfYFs3QVjGzE0rFdpF/IA="
121
+
[mod."github.com/pelletier/go-toml/v2"]
122
+
version = "v2.2.4"
123
+
hash = "sha256-8qQIPldbsS5RO8v/FW/se3ZsAyvLzexiivzJCbGRg2Q="
124
+
[mod."github.com/philhofer/fwd"]
125
+
version = "v1.2.0"
126
+
hash = "sha256-cGx2/0QQay46MYGZuamFmU0TzNaFyaO+J7Ddzlr/3dI="
127
+
[mod."github.com/pjbgf/sha1cd"]
128
+
version = "v0.5.0"
129
+
hash = "sha256-11XBkhdciQGsQ7jEMZ6PgphRKjruTSc7ZxfOwDuPCr8="
130
+
[mod."github.com/pkg/errors"]
131
+
version = "v0.9.1"
132
+
hash = "sha256-mNfQtcrQmu3sNg/7IwiieKWOgFQOVVe2yXgKBpe/wZw="
133
+
[mod."github.com/pmezard/go-difflib"]
134
+
version = "v1.0.0"
135
+
hash = "sha256-/FtmHnaGjdvEIKAJtrUfEhV7EVo5A/eYrtdnUkuxLDA="
136
+
[mod."github.com/pquerna/cachecontrol"]
137
+
version = "v0.2.0"
138
+
hash = "sha256-tuTERCFfwmqPepw/rs5cyv9fArCD30BqgjZqwMV+vzQ="
139
+
[mod."github.com/prometheus/client_golang"]
140
+
version = "v1.23.2"
141
+
hash = "sha256-3GD4fBFa1tJu8MS4TNP6r2re2eViUE+kWUaieIOQXCg="
142
+
[mod."github.com/prometheus/client_model"]
143
+
version = "v0.6.2"
144
+
hash = "sha256-q6Fh6v8iNJN9ypD47LjWmx66YITa3FyRjZMRsuRTFeQ="
145
+
[mod."github.com/prometheus/common"]
146
+
version = "v0.66.1"
147
+
hash = "sha256-bqHPaV9IV70itx63wqwgy2PtxMN0sn5ThVxDmiD7+Tk="
148
+
[mod."github.com/prometheus/procfs"]
149
+
version = "v0.16.1"
150
+
hash = "sha256-OBCvKlLW2obct35p0L9Q+1ZrxZjpTmbgHMP2rng9hpo="
151
+
[mod."github.com/rs/xid"]
152
+
version = "v1.6.0"
153
+
hash = "sha256-rJB7h3KuH1DPp5n4dY3MiGnV1Y96A10lf5OUl+MLkzU="
154
+
[mod."github.com/samber/lo"]
155
+
version = "v1.52.0"
156
+
hash = "sha256-xgMsPJv3rydHH10NZU8wz/DhK2VbbR8ymivOg1ChTp0="
157
+
[mod."github.com/samber/slog-common"]
158
+
version = "v0.19.0"
159
+
hash = "sha256-OYXVbZML7c3mFClVy8GEnNoWW+4OfcBsxWDtKh1u7B8="
160
+
[mod."github.com/samber/slog-multi"]
161
+
version = "v1.6.0"
162
+
hash = "sha256-uebbTcvsBP2LdOUIjDptES+HZOXxThnIt3+FKL0qJy4="
163
+
[mod."github.com/sergi/go-diff"]
164
+
version = "v1.4.0"
165
+
hash = "sha256-rs9NKpv/qcQEMRg7CmxGdP4HGuFdBxlpWf9LbA9wS4k="
166
+
[mod."github.com/stretchr/testify"]
167
+
version = "v1.11.1"
168
+
hash = "sha256-sWfjkuKJyDllDEtnM8sb/pdLzPQmUYWYtmeWz/5suUc="
169
+
[mod."github.com/tinylib/msgp"]
170
+
version = "v1.3.0"
171
+
hash = "sha256-PnpndO7k5Yl036vhWJGDsrcz0jsTX8sUiTqm/D3rAVw="
172
+
[mod."github.com/tj/assert"]
173
+
version = "v0.0.3"
174
+
hash = "sha256-4xhmZcHpUafabaXejE9ucVnGxG/txomvKzBg6cbkusg="
175
+
[mod."github.com/tj/go-redirects"]
176
+
version = "v0.0.0-20200911105812-fd1ba1020b37"
177
+
hash = "sha256-GpYpxdT4F7PkwGXLo7cYVcIRJrzd1sKHtFDH+bRb6Tk="
178
+
[mod."github.com/valyala/bytebufferpool"]
179
+
version = "v1.0.0"
180
+
hash = "sha256-I9FPZ3kCNRB+o0dpMwBnwZ35Fj9+ThvITn8a3Jr8mAY="
181
+
[mod."github.com/valyala/fasttemplate"]
182
+
version = "v1.2.2"
183
+
hash = "sha256-gp+lNXE8zjO+qJDM/YbS6V43HFsYP6PKn4ux1qa5lZ0="
184
+
[mod."go.yaml.in/yaml/v2"]
185
+
version = "v2.4.2"
186
+
hash = "sha256-oC8RWdf1zbMYCtmR0ATy/kCkhIwPR9UqFZSMOKLVF/A="
187
+
[mod."golang.org/x/crypto"]
188
+
version = "v0.46.0"
189
+
hash = "sha256-I8N/spcw3/h0DFA+V1WK38HctckWIB9ep93DEVCALxU="
190
+
[mod."golang.org/x/net"]
191
+
version = "v0.48.0"
192
+
hash = "sha256-oZpddsiJwWCH3Aipa+XXpy7G/xHY5fEagUSok7T0bXE="
193
+
[mod."golang.org/x/sys"]
194
+
version = "v0.39.0"
195
+
hash = "sha256-dxTBu/JAWUkPbjFIXXRFdhQWyn+YyEpIC+tWqGo0Y6U="
196
+
[mod."golang.org/x/text"]
197
+
version = "v0.32.0"
198
+
hash = "sha256-9PXtWBKKY9rG4AgjSP4N+I1DhepXhy8SF/vWSIDIoWs="
199
+
[mod."google.golang.org/protobuf"]
200
+
version = "v1.36.11"
201
+
hash = "sha256-7W+6jntfI/awWL3JP6yQedxqP5S9o3XvPgJ2XxxsIeE="
202
+
[mod."gopkg.in/yaml.v3"]
203
+
version = "v3.0.1"
204
+
hash = "sha256-FqL9TKYJ0XkNwJFnq9j0VvJ5ZUU1RvH/52h/f5bkYAU="
+3
-1
renovate.json
+3
-1
renovate.json
+13
-14
src/audit.go
+13
-14
src/audit.go
···
265
265
var _ Backend = (*auditedBackend)(nil)
266
266
267
267
func NewAuditedBackend(backend Backend) Backend {
268
-
if config.Feature("audit") {
269
-
return &auditedBackend{backend}
270
-
} else {
271
-
return backend
272
-
}
268
+
return &auditedBackend{backend}
273
269
}
274
270
275
271
// This function does not retry appending audit records; as such, if it returns an error,
···
378
374
return audited.Backend.DeleteManifest(ctx, name, opts)
379
375
}
380
376
381
-
func (audited *auditedBackend) FreezeDomain(ctx context.Context, domain string, freeze bool) (err error) {
382
-
var event AuditEvent
383
-
if freeze {
384
-
event = AuditEvent_FreezeDomain
385
-
} else {
386
-
event = AuditEvent_UnfreezeDomain
387
-
}
377
+
func (audited *auditedBackend) FreezeDomain(ctx context.Context, domain string) (err error) {
378
+
audited.appendNewAuditRecord(ctx, &AuditRecord{
379
+
Event: AuditEvent_FreezeDomain.Enum(),
380
+
Domain: proto.String(domain),
381
+
})
382
+
383
+
return audited.Backend.FreezeDomain(ctx, domain)
384
+
}
385
+
386
+
func (audited *auditedBackend) UnfreezeDomain(ctx context.Context, domain string) (err error) {
388
387
audited.appendNewAuditRecord(ctx, &AuditRecord{
389
-
Event: event.Enum(),
388
+
Event: AuditEvent_UnfreezeDomain.Enum(),
390
389
Domain: proto.String(domain),
391
390
})
392
391
393
-
return audited.Backend.FreezeDomain(ctx, domain, freeze)
392
+
return audited.Backend.UnfreezeDomain(ctx, domain)
394
393
}
+32
-7
src/auth.go
+32
-7
src/auth.go
···
12
12
"slices"
13
13
"strings"
14
14
"time"
15
+
16
+
"golang.org/x/net/idna"
15
17
)
16
18
17
19
type AuthError struct {
···
42
44
return nil
43
45
}
44
46
47
+
var idnaProfile = idna.New(idna.MapForLookup(), idna.BidiRule())
48
+
45
49
func GetHost(r *http.Request) (string, error) {
46
-
// FIXME: handle IDNA
47
50
host, _, err := net.SplitHostPort(r.Host)
48
51
if err != nil {
49
-
// dirty but the go stdlib doesn't have a "split port if present" function
50
52
host = r.Host
51
53
}
52
-
if strings.HasPrefix(host, ".") {
54
+
// this also rejects invalid characters and labels
55
+
host, err = idnaProfile.ToASCII(host)
56
+
if err != nil {
57
+
if config.Feature("relaxed-idna") {
58
+
// unfortunately, the go IDNA library has some significant issues around its
59
+
// Unicode TR46 implementation: https://github.com/golang/go/issues/76804
60
+
// we would like to allow *just* the _ here, but adding `idna.StrictDomainName(false)`
61
+
// would also accept domains like `*.foo.bar` which should clearly be disallowed.
62
+
// as a workaround, accept a domain name if it is valid with all `_` characters
63
+
// replaced with an alphanumeric character (we use `a`); this allows e.g. `foo_bar.xxx`
64
+
// and `foo__bar.xxx`, as well as `_foo.xxx` and `foo_.xxx`. labels starting with
65
+
// an underscore are explicitly rejected below.
66
+
_, err = idnaProfile.ToASCII(strings.ReplaceAll(host, "_", "a"))
67
+
}
68
+
if err != nil {
69
+
return "", AuthError{http.StatusBadRequest,
70
+
fmt.Sprintf("malformed host name %q", host)}
71
+
}
72
+
}
73
+
if strings.HasPrefix(host, ".") || strings.HasPrefix(host, "_") {
53
74
return "", AuthError{http.StatusBadRequest,
54
-
fmt.Sprintf("host name %q is reserved", host)}
75
+
fmt.Sprintf("reserved host name %q", host)}
55
76
}
56
77
host = strings.TrimSuffix(host, ".")
57
78
return host, nil
58
79
}
59
80
81
+
func IsValidProjectName(name string) bool {
82
+
return !strings.HasPrefix(name, ".") && !strings.Contains(name, "%")
83
+
}
84
+
60
85
func GetProjectName(r *http.Request) (string, error) {
61
86
// path must be either `/` or `/foo/` (`/foo` is accepted as an alias)
62
87
path := strings.TrimPrefix(strings.TrimSuffix(r.URL.Path, "/"), "/")
63
-
if path == ".index" || strings.HasPrefix(path, ".index/") {
88
+
if !IsValidProjectName(path) {
64
89
return "", AuthError{http.StatusBadRequest,
65
90
fmt.Sprintf("directory name %q is reserved", ".index")}
66
91
} else if strings.Contains(path, "/") {
···
436
461
}
437
462
438
463
func checkAllowedURLPrefix(repoURL string) error {
439
-
if config.Limits.AllowedRepositoryURLPrefixes != nil {
464
+
if len(config.Limits.AllowedRepositoryURLPrefixes) > 0 {
440
465
allowedPrefix := false
441
466
repoURL = strings.ToLower(repoURL)
442
467
for _, allowedRepoURLPrefix := range config.Limits.AllowedRepositoryURLPrefixes {
···
658
683
return auth, nil
659
684
}
660
685
661
-
if config.Limits.AllowedRepositoryURLPrefixes != nil {
686
+
if len(config.Limits.AllowedRepositoryURLPrefixes) > 0 {
662
687
causes = append(causes, AuthError{http.StatusUnauthorized, "DNS challenge not allowed"})
663
688
} else {
664
689
// DNS challenge gives absolute authority.
+28
-10
src/backend.go
+28
-10
src/backend.go
···
6
6
"fmt"
7
7
"io"
8
8
"iter"
9
-
"slices"
10
9
"strings"
11
10
"time"
12
11
)
···
17
16
var ErrDomainFrozen = errors.New("domain administratively frozen")
18
17
19
18
func splitBlobName(name string) []string {
20
-
algo, hash, found := strings.Cut(name, "-")
21
-
if found {
22
-
return slices.Concat([]string{algo}, splitBlobName(hash))
19
+
if algo, hash, found := strings.Cut(name, "-"); found {
20
+
return []string{algo, hash[0:2], hash[2:4], hash[4:]}
23
21
} else {
24
-
return []string{name[0:2], name[2:4], name[4:]}
22
+
panic("malformed blob name")
25
23
}
26
24
}
27
25
26
+
func joinBlobName(parts []string) string {
27
+
return fmt.Sprintf("%s-%s", parts[0], strings.Join(parts[1:], ""))
28
+
}
29
+
28
30
type BackendFeature string
29
31
30
32
const (
31
33
FeatureCheckDomainMarker BackendFeature = "check-domain-marker"
32
34
)
33
35
36
+
type BlobMetadata struct {
37
+
Name string
38
+
Size int64
39
+
LastModified time.Time
40
+
}
41
+
34
42
type GetManifestOptions struct {
35
43
// If true and the manifest is past the cache `MaxAge`, `GetManifest` blocks and returns
36
44
// a fresh object instead of revalidating in background and returning a stale object.
···
38
46
}
39
47
40
48
type ManifestMetadata struct {
49
+
Name string
50
+
Size int64
41
51
LastModified time.Time
42
52
ETag string
43
53
}
···
77
87
78
88
// Retrieve a blob. Returns `reader, size, mtime, err`.
79
89
GetBlob(ctx context.Context, name string) (
80
-
reader io.ReadSeeker, size uint64, mtime time.Time, err error,
90
+
reader io.ReadSeeker, metadata BlobMetadata, err error,
81
91
)
82
92
83
93
// Store a blob. If a blob called `name` already exists, this function returns `nil` without
···
88
98
// Delete a blob. This is an unconditional operation that can break integrity of manifests.
89
99
DeleteBlob(ctx context.Context, name string) error
90
100
101
+
// Iterate through all blobs. Whether blobs that are newly added during iteration will appear
102
+
// in the results is unspecified.
103
+
EnumerateBlobs(ctx context.Context) iter.Seq2[BlobMetadata, error]
104
+
91
105
// Retrieve a manifest.
92
106
GetManifest(ctx context.Context, name string, opts GetManifestOptions) (
93
107
manifest *Manifest, metadata ManifestMetadata, err error,
···
110
124
// Delete a manifest.
111
125
DeleteManifest(ctx context.Context, name string, opts ModifyManifestOptions) error
112
126
113
-
// List all manifests.
114
-
ListManifests(ctx context.Context) (manifests []string, err error)
127
+
// Iterate through all manifests. Whether manifests that are newly added during iteration
128
+
// will appear in the results is unspecified.
129
+
EnumerateManifests(ctx context.Context) iter.Seq2[ManifestMetadata, error]
115
130
116
131
// Check whether a domain has any deployments.
117
132
CheckDomain(ctx context.Context, domain string) (found bool, err error)
···
119
134
// Create a domain. This allows us to start serving content for the domain.
120
135
CreateDomain(ctx context.Context, domain string) error
121
136
122
-
// Freeze or thaw a domain. This allows a site to be administratively locked, e.g. if it
137
+
// Freeze a domain. This allows a site to be administratively locked, e.g. if it
123
138
// is discovered serving abusive content.
124
-
FreezeDomain(ctx context.Context, domain string, freeze bool) error
139
+
FreezeDomain(ctx context.Context, domain string) error
140
+
141
+
// Thaw a domain. This removes the previously placed administrative lock (if any).
142
+
UnfreezeDomain(ctx context.Context, domain string) error
125
143
126
144
// Append a record to the audit log.
127
145
AppendAuditLog(ctx context.Context, id AuditID, record *AuditRecord) error
+65
-25
src/backend_fs.go
+65
-25
src/backend_fs.go
···
11
11
"os"
12
12
"path/filepath"
13
13
"strings"
14
-
"time"
15
14
)
16
15
17
16
type FSBackend struct {
···
118
117
func (fs *FSBackend) GetBlob(
119
118
ctx context.Context, name string,
120
119
) (
121
-
reader io.ReadSeeker, size uint64, mtime time.Time, err error,
120
+
reader io.ReadSeeker, metadata BlobMetadata, err error,
122
121
) {
123
122
blobPath := filepath.Join(splitBlobName(name)...)
124
123
stat, err := fs.blobRoot.Stat(blobPath)
···
134
133
err = fmt.Errorf("open: %w", err)
135
134
return
136
135
}
137
-
return file, uint64(stat.Size()), stat.ModTime(), nil
136
+
return file, BlobMetadata{name, int64(stat.Size()), stat.ModTime()}, nil
138
137
}
139
138
140
139
func (fs *FSBackend) PutBlob(ctx context.Context, name string, data []byte) error {
···
182
181
return fs.blobRoot.Remove(blobPath)
183
182
}
184
183
185
-
func (fs *FSBackend) ListManifests(ctx context.Context) (manifests []string, err error) {
186
-
err = iofs.WalkDir(fs.siteRoot.FS(), ".",
187
-
func(path string, entry iofs.DirEntry, err error) error {
188
-
if strings.Count(path, "/") > 1 {
189
-
return iofs.SkipDir
190
-
}
191
-
_, project, _ := strings.Cut(path, "/")
192
-
if project == "" || strings.HasPrefix(project, ".") && project != ".index" {
184
+
func (fs *FSBackend) EnumerateBlobs(ctx context.Context) iter.Seq2[BlobMetadata, error] {
185
+
return func(yield func(BlobMetadata, error) bool) {
186
+
iofs.WalkDir(fs.blobRoot.FS(), ".",
187
+
func(path string, entry iofs.DirEntry, err error) error {
188
+
var metadata BlobMetadata
189
+
if err != nil {
190
+
// report error
191
+
} else if entry.IsDir() {
192
+
// skip directory
193
+
return nil
194
+
} else if info, err := entry.Info(); err != nil {
195
+
// report error
196
+
} else {
197
+
// report blob
198
+
metadata.Name = joinBlobName(strings.Split(path, "/"))
199
+
metadata.Size = info.Size()
200
+
metadata.LastModified = info.ModTime()
201
+
}
202
+
if !yield(metadata, err) {
203
+
return iofs.SkipAll
204
+
}
193
205
return nil
194
-
}
195
-
manifests = append(manifests, path)
196
-
return nil
197
-
})
198
-
return
206
+
})
207
+
}
199
208
}
200
209
201
210
func (fs *FSBackend) GetManifest(
···
387
396
}
388
397
}
389
398
399
+
func (fs *FSBackend) EnumerateManifests(ctx context.Context) iter.Seq2[ManifestMetadata, error] {
400
+
return func(yield func(ManifestMetadata, error) bool) {
401
+
iofs.WalkDir(fs.siteRoot.FS(), ".",
402
+
func(path string, entry iofs.DirEntry, err error) error {
403
+
_, project, _ := strings.Cut(path, "/")
404
+
var metadata ManifestMetadata
405
+
if err != nil {
406
+
// report error
407
+
} else if entry.IsDir() {
408
+
// skip directory
409
+
return nil
410
+
} else if project == "" || strings.HasPrefix(project, ".") && project != ".index" {
411
+
// skip internal
412
+
return nil
413
+
} else if info, err := entry.Info(); err != nil {
414
+
// report error
415
+
} else {
416
+
// report blob
417
+
metadata.Name = path
418
+
metadata.Size = info.Size()
419
+
metadata.LastModified = info.ModTime()
420
+
// not setting metadata.ETag since it is too costly
421
+
}
422
+
if !yield(metadata, err) {
423
+
return iofs.SkipAll
424
+
}
425
+
return nil
426
+
})
427
+
}
428
+
}
429
+
390
430
func (fs *FSBackend) CheckDomain(ctx context.Context, domain string) (bool, error) {
391
431
_, err := fs.siteRoot.Stat(domain)
392
432
if errors.Is(err, os.ErrNotExist) {
···
402
442
return nil // no-op
403
443
}
404
444
405
-
func (fs *FSBackend) FreezeDomain(ctx context.Context, domain string, freeze bool) error {
406
-
if freeze {
407
-
return fs.siteRoot.WriteFile(domainFrozenMarkerName(domain), []byte{}, 0o644)
445
+
func (fs *FSBackend) FreezeDomain(ctx context.Context, domain string) error {
446
+
return fs.siteRoot.WriteFile(domainFrozenMarkerName(domain), []byte{}, 0o644)
447
+
}
448
+
449
+
func (fs *FSBackend) UnfreezeDomain(ctx context.Context, domain string) error {
450
+
err := fs.siteRoot.Remove(domainFrozenMarkerName(domain))
451
+
if errors.Is(err, os.ErrNotExist) {
452
+
return nil
408
453
} else {
409
-
err := fs.siteRoot.Remove(domainFrozenMarkerName(domain))
410
-
if errors.Is(err, os.ErrNotExist) {
411
-
return nil
412
-
} else {
413
-
return err
414
-
}
454
+
return err
415
455
}
416
456
}
417
457
+87
-47
src/backend_s3.go
+87
-47
src/backend_s3.go
···
266
266
func (s3 *S3Backend) GetBlob(
267
267
ctx context.Context, name string,
268
268
) (
269
-
reader io.ReadSeeker, size uint64, mtime time.Time, err error,
269
+
reader io.ReadSeeker, metadata BlobMetadata, err error,
270
270
) {
271
271
loader := func(ctx context.Context, name string) (*CachedBlob, error) {
272
272
logc.Printf(ctx, "s3: get blob %s\n", name)
···
316
316
}
317
317
} else {
318
318
reader = bytes.NewReader(cached.blob)
319
-
size = uint64(len(cached.blob))
320
-
mtime = cached.mtime
319
+
metadata.Name = name
320
+
metadata.Size = int64(len(cached.blob))
321
+
metadata.LastModified = cached.mtime
321
322
}
322
323
return
323
324
}
···
357
358
minio.RemoveObjectOptions{})
358
359
}
359
360
361
+
func (s3 *S3Backend) EnumerateBlobs(ctx context.Context) iter.Seq2[BlobMetadata, error] {
362
+
return func(yield func(BlobMetadata, error) bool) {
363
+
logc.Print(ctx, "s3: enumerate blobs")
364
+
365
+
ctx, cancel := context.WithCancel(ctx)
366
+
defer cancel()
367
+
368
+
prefix := "blob/"
369
+
for object := range s3.client.ListObjectsIter(ctx, s3.bucket, minio.ListObjectsOptions{
370
+
Prefix: prefix,
371
+
Recursive: true,
372
+
}) {
373
+
var metadata BlobMetadata
374
+
var err error
375
+
if err = object.Err; err == nil {
376
+
key := strings.TrimPrefix(object.Key, prefix)
377
+
if strings.HasSuffix(key, "/") {
378
+
continue // directory; skip
379
+
} else {
380
+
metadata.Name = joinBlobName(strings.Split(key, "/"))
381
+
metadata.Size = object.Size
382
+
metadata.LastModified = object.LastModified
383
+
}
384
+
}
385
+
if !yield(metadata, err) {
386
+
break
387
+
}
388
+
}
389
+
}
390
+
}
391
+
360
392
func manifestObjectName(name string) string {
361
393
return fmt.Sprintf("site/%s", name)
362
394
}
363
395
364
396
func stagedManifestObjectName(manifestData []byte) string {
365
397
return fmt.Sprintf("dirty/%x", sha256.Sum256(manifestData))
366
-
}
367
-
368
-
func (s3 *S3Backend) ListManifests(ctx context.Context) (manifests []string, err error) {
369
-
logc.Print(ctx, "s3: list manifests")
370
-
371
-
ctx, cancel := context.WithCancel(ctx)
372
-
defer cancel()
373
-
374
-
prefix := manifestObjectName("")
375
-
for object := range s3.client.ListObjectsIter(ctx, s3.bucket, minio.ListObjectsOptions{
376
-
Prefix: prefix,
377
-
Recursive: true,
378
-
}) {
379
-
if object.Err != nil {
380
-
return nil, object.Err
381
-
}
382
-
key := strings.TrimRight(strings.TrimPrefix(object.Key, prefix), "/")
383
-
if strings.Count(key, "/") > 1 {
384
-
continue
385
-
}
386
-
_, project, _ := strings.Cut(key, "/")
387
-
if project == "" || strings.HasPrefix(project, ".") && project != ".index" {
388
-
continue
389
-
}
390
-
manifests = append(manifests, key)
391
-
}
392
-
393
-
return
394
398
}
395
399
396
400
type s3ManifestLoader struct {
···
636
640
return err
637
641
}
638
642
643
+
func (s3 *S3Backend) EnumerateManifests(ctx context.Context) iter.Seq2[ManifestMetadata, error] {
644
+
return func(yield func(ManifestMetadata, error) bool) {
645
+
logc.Print(ctx, "s3: enumerate manifests")
646
+
647
+
ctx, cancel := context.WithCancel(ctx)
648
+
defer cancel()
649
+
650
+
prefix := "site/"
651
+
for object := range s3.client.ListObjectsIter(ctx, s3.bucket, minio.ListObjectsOptions{
652
+
Prefix: prefix,
653
+
Recursive: true,
654
+
}) {
655
+
var metadata ManifestMetadata
656
+
var err error
657
+
if err = object.Err; err == nil {
658
+
key := strings.TrimPrefix(object.Key, prefix)
659
+
_, project, _ := strings.Cut(key, "/")
660
+
if strings.HasSuffix(key, "/") {
661
+
continue // directory; skip
662
+
} else if project == "" || strings.HasPrefix(project, ".") && project != ".index" {
663
+
continue // internal; skip
664
+
} else {
665
+
metadata.Name = key
666
+
metadata.Size = object.Size
667
+
metadata.LastModified = object.LastModified
668
+
metadata.ETag = object.ETag
669
+
}
670
+
}
671
+
if !yield(metadata, err) {
672
+
break
673
+
}
674
+
}
675
+
}
676
+
}
677
+
639
678
func domainCheckObjectName(domain string) string {
640
679
return manifestObjectName(fmt.Sprintf("%s/.exists", domain))
641
680
}
···
679
718
return err
680
719
}
681
720
682
-
func (s3 *S3Backend) FreezeDomain(ctx context.Context, domain string, freeze bool) error {
683
-
if freeze {
684
-
logc.Printf(ctx, "s3: freeze domain %s\n", domain)
721
+
func (s3 *S3Backend) FreezeDomain(ctx context.Context, domain string) error {
722
+
logc.Printf(ctx, "s3: freeze domain %s\n", domain)
685
723
686
-
_, err := s3.client.PutObject(ctx, s3.bucket, domainFrozenObjectName(domain),
687
-
&bytes.Reader{}, 0, minio.PutObjectOptions{})
688
-
return err
724
+
_, err := s3.client.PutObject(ctx, s3.bucket, domainFrozenObjectName(domain),
725
+
&bytes.Reader{}, 0, minio.PutObjectOptions{})
726
+
return err
727
+
728
+
}
729
+
730
+
func (s3 *S3Backend) UnfreezeDomain(ctx context.Context, domain string) error {
731
+
logc.Printf(ctx, "s3: unfreeze domain %s\n", domain)
732
+
733
+
err := s3.client.RemoveObject(ctx, s3.bucket, domainFrozenObjectName(domain),
734
+
minio.RemoveObjectOptions{})
735
+
if errResp := minio.ToErrorResponse(err); errResp.Code == "NoSuchKey" {
736
+
return nil
689
737
} else {
690
-
logc.Printf(ctx, "s3: thaw domain %s\n", domain)
691
-
692
-
err := s3.client.RemoveObject(ctx, s3.bucket, domainFrozenObjectName(domain),
693
-
minio.RemoveObjectOptions{})
694
-
if errResp := minio.ToErrorResponse(err); errResp.Code == "NoSuchKey" {
695
-
return nil
696
-
} else {
697
-
return err
698
-
}
738
+
return err
699
739
}
700
740
}
701
741
···
741
781
ctx context.Context, opts SearchAuditLogOptions,
742
782
) iter.Seq2[AuditID, error] {
743
783
return func(yield func(AuditID, error) bool) {
744
-
logc.Printf(ctx, "s3: query audit\n")
784
+
logc.Printf(ctx, "s3: search audit\n")
745
785
746
786
ctx, cancel := context.WithCancel(ctx)
747
787
defer cancel()
+7
-5
src/collect.go
+7
-5
src/collect.go
···
5
5
"context"
6
6
"fmt"
7
7
"io"
8
-
"time"
9
8
)
10
9
11
10
type Flusher interface {
···
66
65
67
66
case Type_ExternalFile:
68
67
var blobReader io.Reader
69
-
var blobMtime time.Time
68
+
var blobMetadata BlobMetadata
70
69
var blobData []byte
71
-
blobReader, _, blobMtime, err = backend.GetBlob(context, string(entry.Data))
70
+
blobReader, blobMetadata, err = backend.GetBlob(context, string(entry.Data))
71
+
if err != nil {
72
+
return
73
+
}
74
+
blobData, err = io.ReadAll(blobReader)
72
75
if err != nil {
73
76
return
74
77
}
75
-
blobData, _ = io.ReadAll(blobReader)
76
78
header.Typeflag = tar.TypeReg
77
79
header.Mode = 0644
78
-
header.ModTime = blobMtime
80
+
header.ModTime = blobMetadata.LastModified
79
81
err = appendFile(&header, blobData, entry.GetTransform())
80
82
81
83
case Type_Symlink:
+11
-9
src/config.go
+11
-9
src/config.go
···
63
63
Insecure bool `toml:"-" env:"insecure"`
64
64
Features []string `toml:"features"`
65
65
LogFormat string `toml:"log-format" default:"text"`
66
-
LogLevel string `toml:"log-level" default:"info"`
67
66
Server ServerConfig `toml:"server"`
68
67
Wildcard []WildcardConfig `toml:"wildcard"`
69
68
Fallback FallbackConfig `toml:"fallback"`
···
74
73
}
75
74
76
75
type ServerConfig struct {
77
-
Pages string `toml:"pages" default:"tcp/:3000"`
78
-
Caddy string `toml:"caddy" default:"tcp/:3001"`
79
-
Metrics string `toml:"metrics" default:"tcp/:3002"`
76
+
Pages string `toml:"pages" default:"tcp/localhost:3000"`
77
+
Caddy string `toml:"caddy" default:"tcp/localhost:3001"`
78
+
Metrics string `toml:"metrics" default:"tcp/localhost:3002"`
80
79
}
81
80
82
81
type WildcardConfig struct {
···
140
139
// List of domains unconditionally forbidden for uploads.
141
140
ForbiddenDomains []string `toml:"forbidden-domains" default:"[]"`
142
141
// List of allowed repository URL prefixes. Setting this option prohibits uploading archives.
143
-
AllowedRepositoryURLPrefixes []string `toml:"allowed-repository-url-prefixes"`
142
+
AllowedRepositoryURLPrefixes []string `toml:"allowed-repository-url-prefixes" default:"[]"`
144
143
// List of allowed custom headers. Header name must be in the MIME canonical form,
145
144
// e.g. `Foo-Bar`. Setting this option permits including this custom header in `_headers`,
146
145
// unless it is fundamentally unsafe.
···
152
151
NodeID int `toml:"node-id"`
153
152
// Whether audit reports should be stored whenever an audit event occurs.
154
153
Collect bool `toml:"collect"`
155
-
// Whether audit reports should include principal's IP address.
156
-
IncludeIPs bool `toml:"include-ip"`
154
+
// If not empty, includes the principal's IP address in audit reports, with the value specifying
155
+
// the source of the IP address. If the value is "X-Forwarded-For", the last item of the
156
+
// corresponding header field (assumed to be comma-separated) is used. If the value is
157
+
// "RemoteAddr", the connecting host's address is used. Any other value is disallowed.
158
+
IncludeIPs string `toml:"include-ip"`
157
159
// Endpoint to notify with a `GET /<notify-url>?<id>` whenever an audit event occurs.
158
160
NotifyURL *URL `toml:"notify-url"`
159
161
}
···
163
165
SlowResponseThreshold Duration `toml:"slow-response-threshold" default:"500ms"`
164
166
}
165
167
166
-
func (config *Config) DebugJSON() string {
167
-
result, err := json.MarshalIndent(config, "", " ")
168
+
func (config *Config) TOML() string {
169
+
result, err := toml.Marshal(config)
168
170
if err != nil {
169
171
panic(err)
170
172
}
+20
src/extract.go
+20
src/extract.go
···
9
9
"errors"
10
10
"fmt"
11
11
"io"
12
+
"math"
12
13
"os"
13
14
"strings"
14
15
···
144
145
return nil, UnresolvedRefError{missing}
145
146
}
146
147
148
+
// Ensure parent directories exist for all entries.
149
+
EnsureLeadingDirectories(manifest)
150
+
147
151
logc.Printf(ctx,
148
152
"reuse: %s recycled, %s transferred\n",
149
153
datasize.ByteSize(dataBytesRecycled).HR(),
···
153
157
return manifest, nil
154
158
}
155
159
160
+
// Used for zstd decompression inside zip files, it is recommended to share this.
161
+
var zstdDecomp = zstd.ZipDecompressor()
162
+
156
163
func ExtractZip(ctx context.Context, reader io.Reader, oldManifest *Manifest) (*Manifest, error) {
157
164
data, err := io.ReadAll(reader)
158
165
if err != nil {
···
163
170
if err != nil {
164
171
return nil, err
165
172
}
173
+
174
+
// Support zstd compression inside zip files.
175
+
archive.RegisterDecompressor(zstd.ZipMethodWinZip, zstdDecomp)
176
+
archive.RegisterDecompressor(zstd.ZipMethodPKWare, zstdDecomp)
166
177
167
178
// Detect and defuse zipbombs.
168
179
var totalSize uint64
169
180
for _, file := range archive.File {
181
+
if totalSize+file.UncompressedSize64 < totalSize {
182
+
// Would overflow
183
+
totalSize = math.MaxUint64
184
+
break
185
+
}
170
186
totalSize += file.UncompressedSize64
171
187
}
172
188
if totalSize > config.Limits.MaxSiteSize.Bytes() {
···
213
229
return nil, UnresolvedRefError{missing}
214
230
}
215
231
232
+
// Ensure parent directories exist for all entries.
233
+
EnsureLeadingDirectories(manifest)
234
+
216
235
logc.Printf(ctx,
217
236
"reuse: %s recycled, %s transferred\n",
218
237
datasize.ByteSize(dataBytesRecycled).HR(),
···
221
240
222
241
return manifest, nil
223
242
}
243
+
+19
-6
src/fetch.go
+19
-6
src/fetch.go
···
23
23
"google.golang.org/protobuf/proto"
24
24
)
25
25
26
+
var ErrRepositoryTooLarge = errors.New("repository too large")
27
+
26
28
func FetchRepository(
27
29
ctx context.Context, repoURL string, branch string, oldManifest *Manifest,
28
30
) (
···
57
59
repo, err = git.CloneContext(ctx, storer, nil, &git.CloneOptions{
58
60
Bare: true,
59
61
URL: repoURL,
60
-
ReferenceName: plumbing.ReferenceName(branch),
62
+
ReferenceName: plumbing.NewBranchReferenceName(branch),
61
63
SingleBranch: true,
62
64
Depth: 1,
63
65
Tags: git.NoTags,
···
152
154
// This will only succeed if a `blob:none` filter isn't supported and we got a full
153
155
// clone despite asking for a partial clone.
154
156
for hash, manifestEntry := range blobsNeeded {
155
-
if err := readGitBlob(repo, hash, manifestEntry); err == nil {
156
-
dataBytesTransferred += manifestEntry.GetOriginalSize()
157
+
if err := readGitBlob(repo, hash, manifestEntry, &dataBytesTransferred); err == nil {
157
158
delete(blobsNeeded, hash)
159
+
} else if errors.Is(err, ErrRepositoryTooLarge) {
160
+
return nil, err
158
161
}
159
162
}
160
163
···
193
196
194
197
// All remaining blobs should now be available.
195
198
for hash, manifestEntry := range blobsNeeded {
196
-
if err := readGitBlob(repo, hash, manifestEntry); err != nil {
199
+
if err := readGitBlob(repo, hash, manifestEntry, &dataBytesTransferred); err != nil {
197
200
return nil, err
198
201
}
199
-
dataBytesTransferred += manifestEntry.GetOriginalSize()
200
202
delete(blobsNeeded, hash)
201
203
}
202
204
}
···
210
212
return manifest, nil
211
213
}
212
214
213
-
func readGitBlob(repo *git.Repository, hash plumbing.Hash, entry *Entry) error {
215
+
func readGitBlob(
216
+
repo *git.Repository, hash plumbing.Hash, entry *Entry, bytesTransferred *int64,
217
+
) error {
214
218
blob, err := repo.BlobObject(hash)
215
219
if err != nil {
216
220
return fmt.Errorf("git blob %s: %w", hash, err)
···
239
243
entry.Transform = Transform_Identity.Enum()
240
244
entry.OriginalSize = proto.Int64(blob.Size)
241
245
entry.CompressedSize = proto.Int64(blob.Size)
246
+
247
+
*bytesTransferred += blob.Size
248
+
if uint64(*bytesTransferred) > config.Limits.MaxSiteSize.Bytes() {
249
+
return fmt.Errorf("%w: fetch exceeds %s limit",
250
+
ErrRepositoryTooLarge,
251
+
config.Limits.MaxSiteSize.HR(),
252
+
)
253
+
}
254
+
242
255
return nil
243
256
}
+88
src/garbage.go
+88
src/garbage.go
···
1
+
package git_pages
2
+
3
+
import (
4
+
"context"
5
+
"fmt"
6
+
7
+
"github.com/c2h5oh/datasize"
8
+
"github.com/dghubble/trie"
9
+
)
10
+
11
+
func trieReduce(data trie.Trier) (items, total int64) {
12
+
data.Walk(func(key string, value any) error {
13
+
items += 1
14
+
total += *value.(*int64)
15
+
return nil
16
+
})
17
+
return
18
+
}
19
+
20
+
func TraceGarbage(ctx context.Context) error {
21
+
allBlobs := trie.NewRuneTrie()
22
+
liveBlobs := trie.NewRuneTrie()
23
+
24
+
traceManifest := func(manifestName string, manifest *Manifest) error {
25
+
for _, entry := range manifest.GetContents() {
26
+
if entry.GetType() == Type_ExternalFile {
27
+
blobName := string(entry.Data)
28
+
if size := allBlobs.Get(blobName); size == nil {
29
+
return fmt.Errorf("%s: dangling reference %s", manifestName, blobName)
30
+
} else {
31
+
liveBlobs.Put(blobName, size)
32
+
}
33
+
}
34
+
}
35
+
return nil
36
+
}
37
+
38
+
// Enumerate all blobs.
39
+
for metadata, err := range backend.EnumerateBlobs(ctx) {
40
+
if err != nil {
41
+
return fmt.Errorf("trace blobs err: %w", err)
42
+
}
43
+
allBlobs.Put(metadata.Name, &metadata.Size)
44
+
}
45
+
46
+
// Enumerate blobs live via site manifests.
47
+
for metadata, err := range backend.EnumerateManifests(ctx) {
48
+
if err != nil {
49
+
return fmt.Errorf("trace sites err: %w", err)
50
+
}
51
+
manifest, _, err := backend.GetManifest(ctx, metadata.Name, GetManifestOptions{})
52
+
if err != nil {
53
+
return fmt.Errorf("trace sites err: %w", err)
54
+
}
55
+
err = traceManifest(metadata.Name, manifest)
56
+
if err != nil {
57
+
return fmt.Errorf("trace sites err: %w", err)
58
+
}
59
+
}
60
+
61
+
// Enumerate blobs live via audit records.
62
+
for auditID, err := range backend.SearchAuditLog(ctx, SearchAuditLogOptions{}) {
63
+
if err != nil {
64
+
return fmt.Errorf("trace audit err: %w", err)
65
+
}
66
+
auditRecord, err := backend.QueryAuditLog(ctx, auditID)
67
+
if err != nil {
68
+
return fmt.Errorf("trace audit err: %w", err)
69
+
}
70
+
if auditRecord.Manifest != nil {
71
+
err = traceManifest(auditID.String(), auditRecord.Manifest)
72
+
if err != nil {
73
+
return fmt.Errorf("trace audit err: %w", err)
74
+
}
75
+
}
76
+
}
77
+
78
+
allBlobsCount, allBlobsSize := trieReduce(allBlobs)
79
+
liveBlobsCount, liveBlobsSize := trieReduce(liveBlobs)
80
+
logc.Printf(ctx, "trace all: %d blobs, %s",
81
+
allBlobsCount, datasize.ByteSize(allBlobsSize).HR())
82
+
logc.Printf(ctx, "trace live: %d blobs, %s",
83
+
liveBlobsCount, datasize.ByteSize(liveBlobsSize).HR())
84
+
logc.Printf(ctx, "trace dead: %d blobs, %s",
85
+
allBlobsCount-liveBlobsCount, datasize.ByteSize(allBlobsSize-liveBlobsSize).HR())
86
+
87
+
return nil
88
+
}
+105
-17
src/http.go
+105
-17
src/http.go
···
2
2
3
3
import (
4
4
"cmp"
5
+
"fmt"
6
+
"net"
7
+
"net/http"
5
8
"regexp"
6
9
"slices"
7
10
"strconv"
···
10
13
11
14
var httpAcceptRegexp = regexp.MustCompile(`` +
12
15
// token optionally prefixed by whitespace
13
-
`^[ \t]*([a-zA-Z0-9$!#$%&'*+.^_\x60|~-]+)` +
16
+
`^[ \t]*([a-zA-Z0-9$!#$%&'*+./^_\x60|~-]+)` +
14
17
// quality value prefixed by a semicolon optionally surrounded by whitespace
15
18
`(?:[ \t]*;[ \t]*q=(0(?:\.[0-9]{1,3})?|1(?:\.0{1,3})?))?` +
16
19
// optional whitespace followed by comma or end of line
···
22
25
qval float64
23
26
}
24
27
25
-
type HTTPEncodings struct {
26
-
encodings []httpAcceptOffer
27
-
}
28
-
29
-
func ParseHTTPAcceptEncoding(headerValue string) (result HTTPEncodings) {
28
+
func parseGenericAcceptHeader(headerValue string) (result []httpAcceptOffer) {
30
29
for headerValue != "" {
31
30
matches := httpAcceptRegexp.FindStringSubmatch(headerValue)
32
31
if matches == nil {
33
-
return HTTPEncodings{}
32
+
return
34
33
}
35
-
enc := httpAcceptOffer{strings.ToLower(matches[1]), 1.0}
34
+
offer := httpAcceptOffer{strings.ToLower(matches[1]), 1.0}
36
35
if matches[2] != "" {
37
-
enc.qval, _ = strconv.ParseFloat(matches[2], 64)
36
+
offer.qval, _ = strconv.ParseFloat(matches[2], 64)
38
37
}
39
-
result.encodings = append(result.encodings, enc)
38
+
result = append(result, offer)
40
39
headerValue = headerValue[len(matches[0]):]
41
40
}
41
+
return
42
+
}
43
+
44
+
func preferredAcceptOffer(offers []httpAcceptOffer) string {
45
+
slices.SortStableFunc(offers, func(a, b httpAcceptOffer) int {
46
+
return -cmp.Compare(a.qval, b.qval)
47
+
})
48
+
for _, offer := range offers {
49
+
if offer.qval != 0 {
50
+
return offer.code
51
+
}
52
+
}
53
+
return ""
54
+
}
55
+
56
+
type HTTPContentTypes struct {
57
+
contentTypes []httpAcceptOffer
58
+
}
59
+
60
+
func ParseAcceptHeader(headerValue string) (result HTTPContentTypes) {
61
+
if headerValue == "" {
62
+
headerValue = "*/*"
63
+
}
64
+
result = HTTPContentTypes{parseGenericAcceptHeader(headerValue)}
65
+
return
66
+
}
67
+
68
+
func (e *HTTPContentTypes) Negotiate(offers ...string) string {
69
+
prefs := make(map[string]float64, len(offers))
70
+
for _, code := range offers {
71
+
prefs[code] = 0
72
+
}
73
+
for _, ctyp := range e.contentTypes {
74
+
if ctyp.code == "*/*" {
75
+
for code := range prefs {
76
+
prefs[code] = ctyp.qval
77
+
}
78
+
} else if _, ok := prefs[ctyp.code]; ok {
79
+
prefs[ctyp.code] = ctyp.qval
80
+
}
81
+
}
82
+
ctyps := make([]httpAcceptOffer, len(offers))
83
+
for idx, code := range offers {
84
+
ctyps[idx] = httpAcceptOffer{code, prefs[code]}
85
+
}
86
+
return preferredAcceptOffer(ctyps)
87
+
}
88
+
89
+
type HTTPEncodings struct {
90
+
encodings []httpAcceptOffer
91
+
}
92
+
93
+
func ParseAcceptEncodingHeader(headerValue string) (result HTTPEncodings) {
94
+
result = HTTPEncodings{parseGenericAcceptHeader(headerValue)}
42
95
if len(result.encodings) == 0 {
43
96
// RFC 9110 says (https://httpwg.org/specs/rfc9110.html#field.accept-encoding):
44
97
// "If no Accept-Encoding header field is in the request, any content
···
77
130
for idx, code := range offers {
78
131
encs[idx] = httpAcceptOffer{code, prefs[code]}
79
132
}
80
-
slices.SortStableFunc(encs, func(a, b httpAcceptOffer) int {
81
-
return -cmp.Compare(a.qval, b.qval)
82
-
})
83
-
for _, enc := range encs {
84
-
if enc.qval != 0 {
85
-
return enc.code
133
+
return preferredAcceptOffer(encs)
134
+
}
135
+
136
+
func chainHTTPMiddleware(middleware ...func(http.Handler) http.Handler) func(http.Handler) http.Handler {
137
+
return func(handler http.Handler) http.Handler {
138
+
for idx := len(middleware) - 1; idx >= 0; idx-- {
139
+
handler = middleware[idx](handler)
86
140
}
141
+
return handler
87
142
}
88
-
return ""
143
+
}
144
+
145
+
func remoteAddrMiddleware(handler http.Handler) http.Handler {
146
+
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
147
+
var readXForwardedFor bool
148
+
switch config.Audit.IncludeIPs {
149
+
case "X-Forwarded-For":
150
+
readXForwardedFor = true
151
+
case "RemoteAddr", "":
152
+
readXForwardedFor = false
153
+
default:
154
+
panic(fmt.Errorf("config.Audit.IncludeIPs is set to an unknown value (%q)",
155
+
config.Audit.IncludeIPs))
156
+
}
157
+
158
+
usingOriginalRemoteAddr := true
159
+
if readXForwardedFor {
160
+
forwardedFor := strings.Split(r.Header.Get("X-Forwarded-For"), ",")
161
+
if len(forwardedFor) > 0 {
162
+
remoteAddr := strings.TrimSpace(forwardedFor[len(forwardedFor)-1])
163
+
if remoteAddr != "" {
164
+
r.RemoteAddr = remoteAddr
165
+
usingOriginalRemoteAddr = false
166
+
}
167
+
}
168
+
}
169
+
if usingOriginalRemoteAddr {
170
+
if ipAddress, _, err := net.SplitHostPort(r.RemoteAddr); err == nil {
171
+
r.RemoteAddr = ipAddress
172
+
}
173
+
}
174
+
175
+
handler.ServeHTTP(w, r)
176
+
})
89
177
}
+109
-56
src/main.go
+109
-56
src/main.go
···
14
14
"net/http/httputil"
15
15
"net/url"
16
16
"os"
17
+
"path"
17
18
"runtime/debug"
18
19
"strings"
19
20
"time"
···
132
133
133
134
func serve(ctx context.Context, listener net.Listener, handler http.Handler) {
134
135
if listener != nil {
135
-
handler = panicHandler(handler)
136
-
137
136
server := http.Server{Handler: handler}
138
137
server.Protocols = new(http.Protocols)
139
138
server.Protocols.SetHTTP1(true)
···
172
171
fmt.Fprintf(os.Stderr, "Usage:\n")
173
172
fmt.Fprintf(os.Stderr, "(server) "+
174
173
"git-pages [-config <file>|-no-config]\n")
174
+
fmt.Fprintf(os.Stderr, "(info) "+
175
+
"git-pages {-print-config-env-vars|-print-config}\n")
176
+
fmt.Fprintf(os.Stderr, "(debug) "+
177
+
"git-pages {-list-blobs|-list-manifests}\n")
178
+
fmt.Fprintf(os.Stderr, "(debug) "+
179
+
"git-pages {-get-blob|-get-manifest|-get-archive|-update-site} <ref> [file]\n")
175
180
fmt.Fprintf(os.Stderr, "(admin) "+
176
-
"git-pages {-run-migration <name>|-freeze-domain <domain>|-unfreeze-domain <domain>}\n")
181
+
"git-pages {-freeze-domain <domain>|-unfreeze-domain <domain>}\n")
177
182
fmt.Fprintf(os.Stderr, "(audit) "+
178
183
"git-pages {-audit-log|-audit-read <id>|-audit-server <endpoint> <program> [args...]}\n")
179
-
fmt.Fprintf(os.Stderr, "(info) "+
180
-
"git-pages {-print-config-env-vars|-print-config}\n")
181
-
fmt.Fprintf(os.Stderr, "(cli) "+
182
-
"git-pages {-get-blob|-get-manifest|-get-archive|-update-site} <ref> [file]\n")
184
+
fmt.Fprintf(os.Stderr, "(maint) "+
185
+
"git-pages {-run-migration <name>|-trace-garbage}\n")
183
186
flag.PrintDefaults()
184
187
}
185
188
···
187
190
ctx := context.Background()
188
191
189
192
flag.Usage = usage
193
+
configTomlPath := flag.String("config", "",
194
+
"load configuration from `filename` (default: 'config.toml')")
195
+
noConfig := flag.Bool("no-config", false,
196
+
"run without configuration file (configure via environment variables)")
190
197
printConfigEnvVars := flag.Bool("print-config-env-vars", false,
191
198
"print every recognized configuration environment variable and exit")
192
199
printConfig := flag.Bool("print-config", false,
193
200
"print configuration as JSON and exit")
194
-
configTomlPath := flag.String("config", "",
195
-
"load configuration from `filename` (default: 'config.toml')")
196
-
noConfig := flag.Bool("no-config", false,
197
-
"run without configuration file (configure via environment variables)")
198
-
runMigration := flag.String("run-migration", "",
199
-
"run a store `migration` (one of: create-domain-markers)")
201
+
listBlobs := flag.Bool("list-blobs", false,
202
+
"enumerate every blob with its metadata")
203
+
listManifests := flag.Bool("list-manifests", false,
204
+
"enumerate every manifest with its metadata")
200
205
getBlob := flag.String("get-blob", "",
201
206
"write contents of `blob` ('sha256-xxxxxxx...xxx')")
202
207
getManifest := flag.String("get-manifest", "",
···
213
218
"display audit log")
214
219
auditRead := flag.String("audit-read", "",
215
220
"extract contents of audit record `id` to files '<id>-*'")
221
+
auditRollback := flag.String("audit-rollback", "",
222
+
"restore site from contents of audit record `id`")
216
223
auditServer := flag.String("audit-server", "",
217
224
"listen for notifications on `endpoint` and spawn a process for each audit event")
225
+
runMigration := flag.String("run-migration", "",
226
+
"run a store `migration` (one of: create-domain-markers)")
227
+
traceGarbage := flag.Bool("trace-garbage", false,
228
+
"estimate total size of unreachable blobs")
218
229
flag.Parse()
219
230
220
231
var cliOperations int
221
232
for _, selected := range []bool{
222
-
*runMigration != "",
233
+
*listBlobs,
234
+
*listManifests,
223
235
*getBlob != "",
224
236
*getManifest != "",
225
237
*getArchive != "",
···
228
240
*unfreezeDomain != "",
229
241
*auditLog,
230
242
*auditRead != "",
243
+
*auditRollback != "",
231
244
*auditServer != "",
245
+
*runMigration != "",
246
+
*traceGarbage,
232
247
} {
233
248
if selected {
234
249
cliOperations++
235
250
}
236
251
}
237
252
if cliOperations > 1 {
238
-
logc.Fatalln(ctx, "-get-blob, -get-manifest, -get-archive, -update-site, "+
239
-
"-freeze, -unfreeze, -audit-log, and -audit-read are mutually exclusive")
253
+
logc.Fatalln(ctx, "-list-blobs, -list-manifests, -get-blob, -get-manifest, -get-archive, "+
254
+
"-update-site, -freeze-domain, -unfreeze-domain, -audit-log, -audit-read, "+
255
+
"-audit-rollback, -audit-server, -run-migration, and -trace-garbage are "+
256
+
"mutually exclusive")
240
257
}
241
258
242
259
if *configTomlPath != "" && *noConfig {
···
257
274
}
258
275
259
276
if *printConfig {
260
-
fmt.Println(config.DebugJSON())
277
+
fmt.Println(config.TOML())
261
278
return
262
279
}
263
280
···
274
291
logc.Fatalln(ctx, err)
275
292
}
276
293
277
-
switch {
278
-
case *runMigration != "":
294
+
// The server has its own logic for creating the backend.
295
+
if cliOperations > 0 {
279
296
if backend, err = CreateBackend(ctx, &config.Storage); err != nil {
280
297
logc.Fatalln(ctx, err)
281
298
}
299
+
}
282
300
283
-
if err := RunMigration(ctx, *runMigration); err != nil {
284
-
logc.Fatalln(ctx, err)
301
+
switch {
302
+
case *listBlobs:
303
+
for metadata, err := range backend.EnumerateBlobs(ctx) {
304
+
if err != nil {
305
+
logc.Fatalln(ctx, err)
306
+
}
307
+
fmt.Fprintf(color.Output, "%s %s %s\n",
308
+
metadata.Name,
309
+
color.HiWhiteString(metadata.LastModified.UTC().Format(time.RFC3339)),
310
+
color.HiGreenString(fmt.Sprint(metadata.Size)),
311
+
)
285
312
}
286
313
287
-
case *getBlob != "":
288
-
if backend, err = CreateBackend(ctx, &config.Storage); err != nil {
289
-
logc.Fatalln(ctx, err)
314
+
case *listManifests:
315
+
for metadata, err := range backend.EnumerateManifests(ctx) {
316
+
if err != nil {
317
+
logc.Fatalln(ctx, err)
318
+
}
319
+
fmt.Fprintf(color.Output, "%s %s %s\n",
320
+
metadata.Name,
321
+
color.HiWhiteString(metadata.LastModified.UTC().Format(time.RFC3339)),
322
+
color.HiGreenString(fmt.Sprint(metadata.Size)),
323
+
)
290
324
}
291
325
292
-
reader, _, _, err := backend.GetBlob(ctx, *getBlob)
326
+
case *getBlob != "":
327
+
reader, _, err := backend.GetBlob(ctx, *getBlob)
293
328
if err != nil {
294
329
logc.Fatalln(ctx, err)
295
330
}
296
331
io.Copy(fileOutputArg(), reader)
297
332
298
333
case *getManifest != "":
299
-
if backend, err = CreateBackend(ctx, &config.Storage); err != nil {
300
-
logc.Fatalln(ctx, err)
301
-
}
302
-
303
334
webRoot := webRootArg(*getManifest)
304
335
manifest, _, err := backend.GetManifest(ctx, webRoot, GetManifestOptions{})
305
336
if err != nil {
···
308
339
fmt.Fprintln(fileOutputArg(), string(ManifestJSON(manifest)))
309
340
310
341
case *getArchive != "":
311
-
if backend, err = CreateBackend(ctx, &config.Storage); err != nil {
312
-
logc.Fatalln(ctx, err)
313
-
}
314
-
315
342
webRoot := webRootArg(*getArchive)
316
343
manifest, metadata, err :=
317
344
backend.GetManifest(ctx, webRoot, GetManifestOptions{})
···
325
352
case *updateSite != "":
326
353
ctx = WithPrincipal(ctx)
327
354
GetPrincipal(ctx).CliAdmin = proto.Bool(true)
328
-
329
-
if backend, err = CreateBackend(ctx, &config.Storage); err != nil {
330
-
logc.Fatalln(ctx, err)
331
-
}
332
355
333
356
if flag.NArg() != 1 {
334
357
logc.Fatalln(ctx, "update source must be provided as the argument")
···
404
427
freeze = false
405
428
}
406
429
407
-
if backend, err = CreateBackend(ctx, &config.Storage); err != nil {
408
-
logc.Fatalln(ctx, err)
409
-
}
410
-
411
-
if err = backend.FreezeDomain(ctx, domain, freeze); err != nil {
412
-
logc.Fatalln(ctx, err)
413
-
}
414
430
if freeze {
431
+
if err = backend.FreezeDomain(ctx, domain); err != nil {
432
+
logc.Fatalln(ctx, err)
433
+
}
415
434
logc.Println(ctx, "frozen")
416
435
} else {
436
+
if err = backend.UnfreezeDomain(ctx, domain); err != nil {
437
+
logc.Fatalln(ctx, err)
438
+
}
417
439
logc.Println(ctx, "thawed")
418
440
}
419
441
420
442
case *auditLog:
421
-
if backend, err = CreateBackend(ctx, &config.Storage); err != nil {
422
-
logc.Fatalln(ctx, err)
423
-
}
424
-
425
443
ch := make(chan *AuditRecord)
426
444
ids := []AuditID{}
427
445
for id, err := range backend.SearchAuditLog(ctx, SearchAuditLogOptions{}) {
···
456
474
}
457
475
458
476
case *auditRead != "":
459
-
if backend, err = CreateBackend(ctx, &config.Storage); err != nil {
460
-
logc.Fatalln(ctx, err)
461
-
}
462
-
463
477
id, err := ParseAuditID(*auditRead)
464
478
if err != nil {
465
479
logc.Fatalln(ctx, err)
···
474
488
logc.Fatalln(ctx, err)
475
489
}
476
490
477
-
case *auditServer != "":
478
-
if backend, err = CreateBackend(ctx, &config.Storage); err != nil {
491
+
case *auditRollback != "":
492
+
ctx = WithPrincipal(ctx)
493
+
GetPrincipal(ctx).CliAdmin = proto.Bool(true)
494
+
495
+
id, err := ParseAuditID(*auditRollback)
496
+
if err != nil {
497
+
logc.Fatalln(ctx, err)
498
+
}
499
+
500
+
record, err := backend.QueryAuditLog(ctx, id)
501
+
if err != nil {
502
+
logc.Fatalln(ctx, err)
503
+
}
504
+
505
+
if record.GetManifest() == nil || record.GetDomain() == "" || record.GetProject() == "" {
506
+
logc.Fatalln(ctx, "no manifest in audit record")
507
+
}
508
+
509
+
webRoot := path.Join(record.GetDomain(), record.GetProject())
510
+
err = backend.StageManifest(ctx, record.GetManifest())
511
+
if err != nil {
512
+
logc.Fatalln(ctx, err)
513
+
}
514
+
err = backend.CommitManifest(ctx, webRoot, record.GetManifest(), ModifyManifestOptions{})
515
+
if err != nil {
479
516
logc.Fatalln(ctx, err)
480
517
}
481
518
519
+
case *auditServer != "":
482
520
if flag.NArg() < 1 {
483
521
logc.Fatalln(ctx, "handler path not provided")
484
522
}
···
489
527
}
490
528
491
529
serve(ctx, listen(ctx, "audit", *auditServer), ObserveHTTPHandler(processor))
530
+
531
+
case *runMigration != "":
532
+
if err = RunMigration(ctx, *runMigration); err != nil {
533
+
logc.Fatalln(ctx, err)
534
+
}
535
+
536
+
case *traceGarbage:
537
+
if err = TraceGarbage(ctx); err != nil {
538
+
logc.Fatalln(ctx, err)
539
+
}
492
540
493
541
default:
494
542
// Hook a signal (SIGHUP on *nix, nothing on Windows) for reloading the configuration
···
537
585
}
538
586
backend = NewObservedBackend(backend)
539
587
540
-
go serve(ctx, pagesListener, ObserveHTTPHandler(http.HandlerFunc(ServePages)))
541
-
go serve(ctx, caddyListener, ObserveHTTPHandler(http.HandlerFunc(ServeCaddy)))
588
+
middleware := chainHTTPMiddleware(
589
+
panicHandler,
590
+
remoteAddrMiddleware,
591
+
ObserveHTTPHandler,
592
+
)
593
+
go serve(ctx, pagesListener, middleware(http.HandlerFunc(ServePages)))
594
+
go serve(ctx, caddyListener, middleware(http.HandlerFunc(ServeCaddy)))
542
595
go serve(ctx, metricsListener, promhttp.Handler())
543
596
544
597
if config.Insecure {
+39
-9
src/manifest.go
+39
-9
src/manifest.go
···
144
144
return fmt.Errorf("%s: %s", pathName, cause)
145
145
}
146
146
147
+
// EnsureLeadingDirectories adds directory entries for any parent directories
148
+
// that are implicitly referenced by files in the manifest but don't have
149
+
// explicit directory entries. (This can be the case if an archive is created
150
+
// via globs rather than including a whole directory.)
151
+
func EnsureLeadingDirectories(manifest *Manifest) {
152
+
for name := range manifest.Contents {
153
+
for dir := path.Dir(name); dir != "." && dir != ""; dir = path.Dir(dir) {
154
+
if _, exists := manifest.Contents[dir]; !exists {
155
+
AddDirectory(manifest, dir)
156
+
}
157
+
}
158
+
}
159
+
}
160
+
147
161
func GetProblemReport(manifest *Manifest) []string {
148
162
var report []string
149
163
for _, problem := range manifest.Problems {
150
164
report = append(report,
151
-
fmt.Sprintf("%s: %s", problem.GetPath(), problem.GetCause()))
165
+
fmt.Sprintf("/%s: %s", problem.GetPath(), problem.GetCause()))
152
166
}
153
167
return report
154
168
}
···
257
271
// At the moment, there isn't a good way to report errors except to log them on the terminal.
258
272
// (Perhaps in the future they could be exposed at `.git-pages/status.txt`?)
259
273
func PrepareManifest(ctx context.Context, manifest *Manifest) error {
260
-
// Parse Netlify-style `_redirects`
274
+
// Parse Netlify-style `_redirects`.
261
275
if err := ProcessRedirectsFile(manifest); err != nil {
262
276
logc.Printf(ctx, "redirects err: %s\n", err)
263
277
} else if len(manifest.Redirects) > 0 {
264
278
logc.Printf(ctx, "redirects ok: %d rules\n", len(manifest.Redirects))
265
279
}
266
280
267
-
// Parse Netlify-style `_headers`
281
+
// Check if any redirects are unreachable.
282
+
LintRedirects(manifest)
283
+
284
+
// Parse Netlify-style `_headers`.
268
285
if err := ProcessHeadersFile(manifest); err != nil {
269
286
logc.Printf(ctx, "headers err: %s\n", err)
270
287
} else if len(manifest.Headers) > 0 {
271
288
logc.Printf(ctx, "headers ok: %d rules\n", len(manifest.Headers))
272
289
}
273
290
274
-
// Sniff content type like `http.ServeContent`
291
+
// Sniff content type like `http.ServeContent`.
275
292
DetectContentType(manifest)
276
293
277
-
// Opportunistically compress blobs (must be done last)
294
+
// Opportunistically compress blobs (must be done last).
278
295
CompressFiles(ctx, manifest)
279
296
280
297
return nil
281
298
}
282
299
300
+
var ErrSiteTooLarge = errors.New("site too large")
283
301
var ErrManifestTooLarge = errors.New("manifest too large")
284
302
285
303
// Uploads inline file data over certain size to the storage backend. Returns a copy of
···
322
340
}
323
341
}
324
342
325
-
// Compute the deduplicated storage size.
326
-
var blobSizes = make(map[string]int64)
327
-
for _, entry := range manifest.Contents {
343
+
// Compute the total and deduplicated storage size.
344
+
totalSize := int64(0)
345
+
blobSizes := map[string]int64{}
346
+
for _, entry := range extManifest.Contents {
347
+
totalSize += entry.GetOriginalSize()
328
348
if entry.GetType() == Type_ExternalFile {
329
349
blobSizes[string(entry.Data)] = entry.GetCompressedSize()
330
350
}
331
351
}
352
+
if uint64(totalSize) > config.Limits.MaxSiteSize.Bytes() {
353
+
return nil, fmt.Errorf("%w: contents size %s exceeds %s limit",
354
+
ErrSiteTooLarge,
355
+
datasize.ByteSize(totalSize).HR(),
356
+
config.Limits.MaxSiteSize.HR(),
357
+
)
358
+
}
332
359
for _, blobSize := range blobSizes {
333
360
*extManifest.StoredSize += blobSize
334
361
}
···
350
377
wg := sync.WaitGroup{}
351
378
ch := make(chan error, len(extManifest.Contents))
352
379
for name, entry := range extManifest.Contents {
353
-
if entry.GetType() == Type_ExternalFile {
380
+
// Upload external entries (those that were decided as ineligible for being stored inline).
381
+
// If the entry in the original manifest is already an external reference, there's no need
382
+
// to externalize it (and no way for us to do so, since the entry only contains the blob name).
383
+
if entry.GetType() == Type_ExternalFile && manifest.Contents[name].GetType() == Type_InlineFile {
354
384
wg.Go(func() {
355
385
err := backend.PutBlob(ctx, string(entry.Data), manifest.Contents[name].Data)
356
386
if err != nil {
+7
-4
src/migrate.go
+7
-4
src/migrate.go
···
22
22
return nil
23
23
}
24
24
25
-
var manifests, domains []string
26
-
manifests, err := backend.ListManifests(ctx)
27
-
if err != nil {
28
-
return fmt.Errorf("list manifests: %w", err)
25
+
var manifests []string
26
+
for metadata, err := range backend.EnumerateManifests(ctx) {
27
+
if err != nil {
28
+
return fmt.Errorf("enum manifests: %w", err)
29
+
}
30
+
manifests = append(manifests, metadata.Name)
29
31
}
30
32
slices.Sort(manifests)
33
+
var domains []string
31
34
for _, manifest := range manifests {
32
35
domain, _, _ := strings.Cut(manifest, "/")
33
36
if len(domains) == 0 || domains[len(domains)-1] != domain {
+104
-70
src/observe.go
+104
-70
src/observe.go
···
13
13
"os"
14
14
"runtime/debug"
15
15
"strconv"
16
-
"strings"
17
16
"sync"
18
17
"time"
19
18
···
52
51
return os.Getenv("SENTRY_DSN") != ""
53
52
}
54
53
54
+
func chainSentryMiddleware(
55
+
middleware ...func(event *sentry.Event, hint *sentry.EventHint) *sentry.Event,
56
+
) func(event *sentry.Event, hint *sentry.EventHint) *sentry.Event {
57
+
return func(event *sentry.Event, hint *sentry.EventHint) *sentry.Event {
58
+
for idx := 0; idx < len(middleware) && event != nil; idx++ {
59
+
event = middleware[idx](event, hint)
60
+
}
61
+
return event
62
+
}
63
+
}
64
+
65
+
// sensitiveHTTPHeaders extends the list of sensitive headers defined in the Sentry Go SDK with our
66
+
// own application-specific header field names.
67
+
var sensitiveHTTPHeaders = map[string]struct{}{
68
+
"Forge-Authorization": {},
69
+
}
70
+
71
+
// scrubSentryEvent removes sensitive HTTP header fields from the Sentry event.
72
+
func scrubSentryEvent(event *sentry.Event, hint *sentry.EventHint) *sentry.Event {
73
+
if event.Request != nil && event.Request.Headers != nil {
74
+
for key := range event.Request.Headers {
75
+
if _, ok := sensitiveHTTPHeaders[key]; ok {
76
+
delete(event.Request.Headers, key)
77
+
}
78
+
}
79
+
}
80
+
return event
81
+
}
82
+
83
+
// sampleSentryEvent returns a function that discards a Sentry event according to the sample rate,
84
+
// unless the associated HTTP request triggers a mutation or it took too long to produce a response,
85
+
// in which case the event is never discarded.
86
+
func sampleSentryEvent(sampleRate float64) func(*sentry.Event, *sentry.EventHint) *sentry.Event {
87
+
return func(event *sentry.Event, hint *sentry.EventHint) *sentry.Event {
88
+
newSampleRate := sampleRate
89
+
if event.Request != nil {
90
+
switch event.Request.Method {
91
+
case "PUT", "POST", "DELETE":
92
+
newSampleRate = 1
93
+
}
94
+
}
95
+
duration := event.Timestamp.Sub(event.StartTime)
96
+
threshold := time.Duration(config.Observability.SlowResponseThreshold)
97
+
if duration >= threshold {
98
+
newSampleRate = 1
99
+
}
100
+
if rand.Float64() < newSampleRate {
101
+
return event
102
+
}
103
+
return nil
104
+
}
105
+
}
106
+
55
107
func InitObservability() {
56
108
debug.SetPanicOnFault(true)
57
109
···
62
114
63
115
logHandlers := []slog.Handler{}
64
116
65
-
logLevel := slog.LevelInfo
66
-
switch strings.ToLower(config.LogLevel) {
67
-
case "debug":
68
-
logLevel = slog.LevelDebug
69
-
case "info":
70
-
logLevel = slog.LevelInfo
71
-
case "warn":
72
-
logLevel = slog.LevelWarn
73
-
case "error":
74
-
logLevel = slog.LevelError
75
-
default:
76
-
log.Println("unknown log level", config.LogLevel)
77
-
}
78
-
79
117
switch config.LogFormat {
80
118
case "none":
81
119
// nothing to do
82
120
case "text":
83
121
logHandlers = append(logHandlers,
84
-
slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: logLevel}))
122
+
slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{}))
85
123
case "json":
86
124
logHandlers = append(logHandlers,
87
-
slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: logLevel}))
125
+
slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{}))
88
126
default:
89
127
log.Println("unknown log format", config.LogFormat)
90
128
}
···
113
151
enableTracing = value
114
152
}
115
153
154
+
tracesSampleRate := 1.00
155
+
switch environment {
156
+
case "development", "staging":
157
+
default:
158
+
tracesSampleRate = 0.05
159
+
}
160
+
116
161
options := sentry.ClientOptions{}
117
162
options.DisableTelemetryBuffer = !config.Feature("sentry-telemetry-buffer")
118
163
options.Environment = environment
119
164
options.EnableLogs = enableLogs
120
165
options.EnableTracing = enableTracing
121
-
options.TracesSampleRate = 1
122
-
switch environment {
123
-
case "development", "staging":
124
-
default:
125
-
options.BeforeSendTransaction = func(event *sentry.Event, hint *sentry.EventHint) *sentry.Event {
126
-
sampleRate := 0.05
127
-
if trace, ok := event.Contexts["trace"]; ok {
128
-
if data, ok := trace["data"].(map[string]any); ok {
129
-
if method, ok := data["http.request.method"].(string); ok {
130
-
switch method {
131
-
case "PUT", "DELETE", "POST":
132
-
sampleRate = 1
133
-
default:
134
-
duration := event.Timestamp.Sub(event.StartTime)
135
-
threshold := time.Duration(config.Observability.SlowResponseThreshold)
136
-
if duration >= threshold {
137
-
sampleRate = 1
138
-
}
139
-
}
140
-
}
141
-
}
142
-
}
143
-
if rand.Float64() < sampleRate {
144
-
return event
145
-
}
146
-
return nil
147
-
}
148
-
}
166
+
options.TracesSampleRate = 1 // use our own custom sampling logic
167
+
options.BeforeSend = scrubSentryEvent
168
+
options.BeforeSendTransaction = chainSentryMiddleware(
169
+
sampleSentryEvent(tracesSampleRate),
170
+
scrubSentryEvent,
171
+
)
149
172
if err := sentry.Init(options); err != nil {
150
173
log.Fatalf("sentry: %s\n", err)
151
174
}
···
153
176
if enableLogs {
154
177
logHandlers = append(logHandlers, sentryslog.Option{
155
178
AddSource: true,
156
-
LogLevel: levelsFromMinimum(logLevel),
157
179
}.NewSentryHandler(context.Background()))
158
180
}
159
181
}
160
182
161
183
slog.SetDefault(slog.New(slogmulti.Fanout(logHandlers...)))
162
-
}
163
-
164
-
// From sentryslog, because for some reason they don't make it public.
165
-
func levelsFromMinimum(minLevel slog.Level) []slog.Level {
166
-
allLevels := []slog.Level{slog.LevelDebug, slog.LevelInfo, slog.LevelWarn, slog.LevelError, sentryslog.LevelFatal}
167
-
var result []slog.Level
168
-
for _, level := range allLevels {
169
-
if level >= minLevel {
170
-
result = append(result, level)
171
-
}
172
-
}
173
-
return result
174
184
}
175
185
176
186
func FiniObservability() {
···
344
354
func (backend *observedBackend) GetBlob(
345
355
ctx context.Context, name string,
346
356
) (
347
-
reader io.ReadSeeker, size uint64, mtime time.Time, err error,
357
+
reader io.ReadSeeker, metadata BlobMetadata, err error,
348
358
) {
349
359
span, ctx := ObserveFunction(ctx, "GetBlob", "blob.name", name)
350
-
if reader, size, mtime, err = backend.inner.GetBlob(ctx, name); err == nil {
351
-
ObserveData(ctx, "blob.size", size)
360
+
if reader, metadata, err = backend.inner.GetBlob(ctx, name); err == nil {
361
+
ObserveData(ctx, "blob.size", metadata.Size)
352
362
blobsRetrievedCount.Inc()
353
-
blobsRetrievedBytes.Add(float64(size))
363
+
blobsRetrievedBytes.Add(float64(metadata.Size))
354
364
}
355
365
span.Finish()
356
366
return
···
373
383
return
374
384
}
375
385
376
-
func (backend *observedBackend) ListManifests(ctx context.Context) (manifests []string, err error) {
377
-
span, ctx := ObserveFunction(ctx, "ListManifests")
378
-
manifests, err = backend.inner.ListManifests(ctx)
379
-
span.Finish()
380
-
return
386
+
func (backend *observedBackend) EnumerateBlobs(ctx context.Context) iter.Seq2[BlobMetadata, error] {
387
+
return func(yield func(BlobMetadata, error) bool) {
388
+
span, ctx := ObserveFunction(ctx, "EnumerateBlobs")
389
+
for metadata, err := range backend.inner.EnumerateBlobs(ctx) {
390
+
if !yield(metadata, err) {
391
+
break
392
+
}
393
+
}
394
+
span.Finish()
395
+
}
381
396
}
382
397
383
398
func (backend *observedBackend) GetManifest(
···
421
436
return
422
437
}
423
438
439
+
func (backend *observedBackend) EnumerateManifests(ctx context.Context) iter.Seq2[ManifestMetadata, error] {
440
+
return func(yield func(ManifestMetadata, error) bool) {
441
+
span, ctx := ObserveFunction(ctx, "EnumerateManifests")
442
+
for metadata, err := range backend.inner.EnumerateManifests(ctx) {
443
+
if !yield(metadata, err) {
444
+
break
445
+
}
446
+
}
447
+
span.Finish()
448
+
}
449
+
}
450
+
424
451
func (backend *observedBackend) CheckDomain(ctx context.Context, domain string) (found bool, err error) {
425
452
span, ctx := ObserveFunction(ctx, "CheckDomain", "domain.name", domain)
426
453
found, err = backend.inner.CheckDomain(ctx, domain)
···
435
462
return
436
463
}
437
464
438
-
func (backend *observedBackend) FreezeDomain(ctx context.Context, domain string, freeze bool) (err error) {
439
-
span, ctx := ObserveFunction(ctx, "FreezeDomain", "domain.name", domain, "domain.frozen", freeze)
440
-
err = backend.inner.FreezeDomain(ctx, domain, freeze)
465
+
func (backend *observedBackend) FreezeDomain(ctx context.Context, domain string) (err error) {
466
+
span, ctx := ObserveFunction(ctx, "FreezeDomain", "domain.name", domain)
467
+
err = backend.inner.FreezeDomain(ctx, domain)
468
+
span.Finish()
469
+
return
470
+
}
471
+
472
+
func (backend *observedBackend) UnfreezeDomain(ctx context.Context, domain string) (err error) {
473
+
span, ctx := ObserveFunction(ctx, "UnfreezeDomain", "domain.name", domain)
474
+
err = backend.inner.UnfreezeDomain(ctx, domain)
441
475
span.Finish()
442
476
return
443
477
}
+81
-45
src/pages.go
+81
-45
src/pages.go
···
9
9
"fmt"
10
10
"io"
11
11
"maps"
12
-
"net"
13
12
"net/http"
14
13
"net/url"
15
14
"os"
···
133
132
err = nil
134
133
sitePath = strings.TrimPrefix(r.URL.Path, "/")
135
134
if projectName, projectPath, hasProjectSlash := strings.Cut(sitePath, "/"); projectName != "" {
136
-
var projectManifest *Manifest
137
-
var projectMetadata ManifestMetadata
138
-
projectManifest, projectMetadata, err = backend.GetManifest(
139
-
r.Context(), makeWebRoot(host, projectName),
140
-
GetManifestOptions{BypassCache: bypassCache},
141
-
)
142
-
if err == nil {
143
-
if !hasProjectSlash {
144
-
writeRedirect(w, http.StatusFound, r.URL.Path+"/")
145
-
return nil
135
+
if IsValidProjectName(projectName) {
136
+
var projectManifest *Manifest
137
+
var projectMetadata ManifestMetadata
138
+
projectManifest, projectMetadata, err = backend.GetManifest(
139
+
r.Context(), makeWebRoot(host, projectName),
140
+
GetManifestOptions{BypassCache: bypassCache},
141
+
)
142
+
if err == nil {
143
+
if !hasProjectSlash {
144
+
writeRedirect(w, http.StatusFound, r.URL.Path+"/")
145
+
return nil
146
+
}
147
+
sitePath, manifest, metadata = projectPath, projectManifest, projectMetadata
146
148
}
147
-
sitePath, manifest, metadata = projectPath, projectManifest, projectMetadata
148
149
}
149
150
}
150
151
if manifest == nil && (err == nil || errors.Is(err, ErrObjectNotFound)) {
···
214
215
215
216
// we only offer `/.git-pages/archive.tar` and not the `.tar.gz`/`.tar.zst` variants
216
217
// because HTTP can already request compression using the `Content-Encoding` mechanism
217
-
acceptedEncodings := ParseHTTPAcceptEncoding(r.Header.Get("Accept-Encoding"))
218
+
acceptedEncodings := ParseAcceptEncodingHeader(r.Header.Get("Accept-Encoding"))
219
+
w.Header().Add("Vary", "Accept-Encoding")
218
220
negotiated := acceptedEncodings.Negotiate("zstd", "gzip", "identity")
219
221
if negotiated != "" {
220
222
w.Header().Set("Content-Encoding", negotiated)
···
245
247
entryPath := sitePath
246
248
entry := (*Entry)(nil)
247
249
appliedRedirect := false
248
-
status := 200
250
+
status := http.StatusOK
249
251
reader := io.ReadSeeker(nil)
250
252
mtime := time.Time{}
251
253
for {
···
263
265
redirectKind = RedirectForce
264
266
}
265
267
originalURL := (&url.URL{Host: r.Host}).ResolveReference(r.URL)
266
-
redirectURL, redirectStatus := ApplyRedirectRules(manifest, originalURL, redirectKind)
268
+
_, redirectURL, redirectStatus := ApplyRedirectRules(manifest, originalURL, redirectKind)
267
269
if Is3xxHTTPStatus(redirectStatus) {
268
270
writeRedirect(w, redirectStatus, redirectURL.String())
269
271
return nil
···
277
279
}
278
280
}
279
281
if entry == nil || entry.GetType() == Type_InvalidEntry {
280
-
status = 404
282
+
status = http.StatusNotFound
281
283
if entryPath != notFoundPage {
282
284
entryPath = notFoundPage
283
285
continue
···
293
295
w.WriteHeader(http.StatusNotModified)
294
296
return nil
295
297
} else {
296
-
reader, _, mtime, err = backend.GetBlob(r.Context(), string(entry.Data))
298
+
var metadata BlobMetadata
299
+
reader, metadata, err = backend.GetBlob(r.Context(), string(entry.Data))
297
300
if err != nil {
298
301
ObserveError(err) // all storage errors must be reported
299
302
w.WriteHeader(http.StatusInternalServerError)
300
303
fmt.Fprintf(w, "internal server error: %s\n", err)
301
304
return err
302
305
}
306
+
mtime = metadata.LastModified
303
307
w.Header().Set("ETag", etag)
304
308
}
305
309
} else if entry.GetType() == Type_Directory {
···
322
326
defer closer.Close()
323
327
}
324
328
325
-
offeredEncodings := []string{}
326
-
acceptedEncodings := ParseHTTPAcceptEncoding(r.Header.Get("Accept-Encoding"))
329
+
var offeredEncodings []string
330
+
acceptedEncodings := ParseAcceptEncodingHeader(r.Header.Get("Accept-Encoding"))
331
+
w.Header().Add("Vary", "Accept-Encoding")
327
332
negotiatedEncoding := true
328
333
switch entry.GetTransform() {
329
334
case Transform_Identity:
···
379
384
if !negotiatedEncoding {
380
385
w.Header().Set("Accept-Encoding", strings.Join(offeredEncodings, ", "))
381
386
w.WriteHeader(http.StatusNotAcceptable)
382
-
return fmt.Errorf("no supported content encodings (Accept-Encoding: %q)",
387
+
return fmt.Errorf("no supported content encodings (Accept-Encoding: %s)",
383
388
r.Header.Get("Accept-Encoding"))
384
389
}
385
390
···
414
419
io.Copy(w, reader)
415
420
}
416
421
} else {
417
-
// consider content fresh for 60 seconds (the same as the freshness interval of
418
-
// manifests in the S3 backend), and use stale content anyway as long as it's not
419
-
// older than a hour; while it is cheap to handle If-Modified-Since queries
420
-
// server-side, on the client `max-age=0, must-revalidate` causes every resource
421
-
// to block the page load every time
422
-
w.Header().Set("Cache-Control", "max-age=60, stale-while-revalidate=3600")
423
-
// see https://web.dev/articles/stale-while-revalidate for details
422
+
if _, hasCacheControl := w.Header()["Cache-Control"]; !hasCacheControl {
423
+
// consider content fresh for 60 seconds (the same as the freshness interval of
424
+
// manifests in the S3 backend), and use stale content anyway as long as it's not
425
+
// older than a hour; while it is cheap to handle If-Modified-Since queries
426
+
// server-side, on the client `max-age=0, must-revalidate` causes every resource
427
+
// to block the page load every time
428
+
w.Header().Set("Cache-Control", "max-age=60, stale-while-revalidate=3600")
429
+
// see https://web.dev/articles/stale-while-revalidate for details
430
+
}
424
431
425
432
// http.ServeContent handles conditional requests and range requests
426
433
http.ServeContent(w, r, entryPath, mtime, reader)
···
506
513
result = UpdateFromArchive(ctx, webRoot, contentType, reader)
507
514
}
508
515
509
-
return reportUpdateResult(w, result)
516
+
return reportUpdateResult(w, r, result)
510
517
}
511
518
512
519
func patchPage(w http.ResponseWriter, r *http.Request) error {
···
569
576
contentType := getMediaType(r.Header.Get("Content-Type"))
570
577
reader := http.MaxBytesReader(w, r.Body, int64(config.Limits.MaxSiteSize.Bytes()))
571
578
result := PartialUpdateFromArchive(ctx, webRoot, contentType, reader, parents)
572
-
return reportUpdateResult(w, result)
579
+
return reportUpdateResult(w, r, result)
573
580
}
574
581
575
-
func reportUpdateResult(w http.ResponseWriter, result UpdateResult) error {
582
+
func reportUpdateResult(w http.ResponseWriter, r *http.Request, result UpdateResult) error {
583
+
var unresolvedRefErr UnresolvedRefError
584
+
if result.outcome == UpdateError && errors.As(result.err, &unresolvedRefErr) {
585
+
offeredContentTypes := []string{"text/plain", "application/vnd.git-pages.unresolved"}
586
+
acceptedContentTypes := ParseAcceptHeader(r.Header.Get("Accept"))
587
+
switch acceptedContentTypes.Negotiate(offeredContentTypes...) {
588
+
default:
589
+
w.Header().Set("Accept", strings.Join(offeredContentTypes, ", "))
590
+
w.WriteHeader(http.StatusNotAcceptable)
591
+
return fmt.Errorf("no supported content types (Accept: %s)", r.Header.Get("Accept"))
592
+
case "application/vnd.git-pages.unresolved":
593
+
w.Header().Set("Content-Type", "application/vnd.git-pages.unresolved")
594
+
w.WriteHeader(http.StatusUnprocessableEntity)
595
+
for _, missingRef := range unresolvedRefErr.missing {
596
+
fmt.Fprintln(w, missingRef)
597
+
}
598
+
return nil
599
+
case "text/plain":
600
+
// handled below
601
+
}
602
+
}
603
+
576
604
switch result.outcome {
577
605
case UpdateError:
578
-
var unresolvedRefErr UnresolvedRefError
579
-
if errors.Is(result.err, ErrManifestTooLarge) {
580
-
w.WriteHeader(http.StatusRequestEntityTooLarge)
606
+
if errors.Is(result.err, ErrSiteTooLarge) {
607
+
w.WriteHeader(http.StatusUnprocessableEntity)
608
+
} else if errors.Is(result.err, ErrManifestTooLarge) {
609
+
w.WriteHeader(http.StatusUnprocessableEntity)
581
610
} else if errors.Is(result.err, errArchiveFormat) {
582
611
w.WriteHeader(http.StatusUnsupportedMediaType)
583
612
} else if errors.Is(result.err, ErrArchiveTooLarge) {
584
613
w.WriteHeader(http.StatusRequestEntityTooLarge)
614
+
} else if errors.Is(result.err, ErrRepositoryTooLarge) {
615
+
w.WriteHeader(http.StatusUnprocessableEntity)
585
616
} else if errors.Is(result.err, ErrMalformedPatch) {
586
617
w.WriteHeader(http.StatusUnprocessableEntity)
587
618
} else if errors.Is(result.err, ErrPreconditionFailed) {
···
741
772
result := UpdateFromRepository(ctx, webRoot, repoURL, auth.branch)
742
773
resultChan <- result
743
774
observeSiteUpdate("webhook", &result)
744
-
}(context.Background())
775
+
}(context.WithoutCancel(r.Context()))
745
776
746
777
var result UpdateResult
747
778
select {
···
782
813
783
814
func ServePages(w http.ResponseWriter, r *http.Request) {
784
815
r = r.WithContext(WithPrincipal(r.Context()))
785
-
if config.Audit.IncludeIPs {
786
-
if ipAddress, _, err := net.SplitHostPort(r.RemoteAddr); err == nil {
787
-
GetPrincipal(r.Context()).IpAddress = proto.String(ipAddress)
788
-
}
816
+
if config.Audit.IncludeIPs != "" {
817
+
GetPrincipal(r.Context()).IpAddress = proto.String(r.RemoteAddr)
789
818
}
790
819
// We want upstream health checks to be done as closely to the normal flow as possible;
791
820
// any intentional deviation is an opportunity to miss an issue that will affect our
792
821
// visitors but not our health checks.
793
822
if r.Header.Get("Health-Check") == "" {
794
-
logc.Println(r.Context(), "pages:", r.Method, r.Host, r.URL, r.Header.Get("Content-Type"))
823
+
var mediaType string
824
+
switch r.Method {
825
+
case "HEAD", "GET":
826
+
mediaType = r.Header.Get("Accept")
827
+
default:
828
+
mediaType = r.Header.Get("Content-Type")
829
+
}
830
+
logc.Println(r.Context(), "pages:", r.Method, r.Host, r.URL, mediaType)
795
831
if region := os.Getenv("FLY_REGION"); region != "" {
796
832
machine_id := os.Getenv("FLY_MACHINE_ID")
797
833
w.Header().Add("Server", fmt.Sprintf("git-pages (fly.io; %s; %s)", region, machine_id))
···
813
849
err := error(nil)
814
850
switch r.Method {
815
851
// REST API
816
-
case http.MethodOptions:
852
+
case "OPTIONS":
817
853
// no preflight options
818
-
case http.MethodHead, http.MethodGet:
854
+
case "HEAD", "GET":
819
855
err = getPage(w, r)
820
-
case http.MethodPut:
856
+
case "PUT":
821
857
err = putPage(w, r)
822
-
case http.MethodPatch:
858
+
case "PATCH":
823
859
err = patchPage(w, r)
824
-
case http.MethodDelete:
860
+
case "DELETE":
825
861
err = deletePage(w, r)
826
862
// webhook API
827
-
case http.MethodPost:
863
+
case "POST":
828
864
err = postPage(w, r)
829
865
default:
830
866
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
+55
src/pages_test.go
+55
src/pages_test.go
···
1
+
package git_pages
2
+
3
+
import (
4
+
"net/http"
5
+
"strings"
6
+
"testing"
7
+
)
8
+
9
+
func checkHost(t *testing.T, host string, expectOk string, expectErr string) {
10
+
host, err := GetHost(&http.Request{Host: host})
11
+
if expectErr != "" {
12
+
if err == nil || !strings.HasPrefix(err.Error(), expectErr) {
13
+
t.Errorf("%s: expect err %s, got err %s", host, expectErr, err)
14
+
}
15
+
}
16
+
if expectOk != "" {
17
+
if err != nil {
18
+
t.Errorf("%s: expect ok %s, got err %s", host, expectOk, err)
19
+
} else if host != expectOk {
20
+
t.Errorf("%s: expect ok %s, got ok %s", host, expectOk, host)
21
+
}
22
+
}
23
+
}
24
+
25
+
func TestHelloName(t *testing.T) {
26
+
config = &Config{Features: []string{}}
27
+
28
+
checkHost(t, "foo.bar", "foo.bar", "")
29
+
checkHost(t, "foo-baz.bar", "foo-baz.bar", "")
30
+
checkHost(t, "foo--baz.bar", "foo--baz.bar", "")
31
+
checkHost(t, "foo.bar.", "foo.bar", "")
32
+
checkHost(t, ".foo.bar", "", "reserved host name")
33
+
checkHost(t, "..foo.bar", "", "reserved host name")
34
+
35
+
checkHost(t, "ร.bar", "xn--zca.bar", "")
36
+
checkHost(t, "xn--zca.bar", "xn--zca.bar", "")
37
+
38
+
checkHost(t, "foo-.bar", "", "malformed host name")
39
+
checkHost(t, "-foo.bar", "", "malformed host name")
40
+
checkHost(t, "foo_.bar", "", "malformed host name")
41
+
checkHost(t, "_foo.bar", "", "malformed host name")
42
+
checkHost(t, "foo_baz.bar", "", "malformed host name")
43
+
checkHost(t, "foo__baz.bar", "", "malformed host name")
44
+
checkHost(t, "*.foo.bar", "", "malformed host name")
45
+
46
+
config = &Config{Features: []string{"relaxed-idna"}}
47
+
48
+
checkHost(t, "foo-.bar", "", "malformed host name")
49
+
checkHost(t, "-foo.bar", "", "malformed host name")
50
+
checkHost(t, "foo_.bar", "foo_.bar", "")
51
+
checkHost(t, "_foo.bar", "", "reserved host name")
52
+
checkHost(t, "foo_baz.bar", "foo_baz.bar", "")
53
+
checkHost(t, "foo__baz.bar", "foo__baz.bar", "")
54
+
checkHost(t, "*.foo.bar", "", "malformed host name")
55
+
}
+58
-14
src/redirects.go
+58
-14
src/redirects.go
···
13
13
14
14
const RedirectsFileName string = "_redirects"
15
15
16
-
func unparseRule(rule redirects.Rule) string {
16
+
// Converts our Protobuf representation to tj/go-redirects.
17
+
func exportRedirectRule(rule *RedirectRule) *redirects.Rule {
18
+
return &redirects.Rule{
19
+
From: rule.GetFrom(),
20
+
To: rule.GetTo(),
21
+
Status: int(rule.GetStatus()),
22
+
Force: rule.GetForce(),
23
+
}
24
+
}
25
+
26
+
func unparseRedirectRule(rule *redirects.Rule) string {
17
27
var statusPart string
18
28
if rule.Force {
19
29
statusPart = fmt.Sprintf("%d!", rule.Status)
···
49
59
return status >= 300 && status <= 399
50
60
}
51
61
52
-
func validateRedirectRule(rule redirects.Rule) error {
62
+
func validateRedirectRule(rule *redirects.Rule) error {
53
63
if len(rule.Params) > 0 {
54
64
return fmt.Errorf("rules with parameters are not supported")
55
65
}
···
103
113
}
104
114
105
115
for index, rule := range rules {
106
-
if err := validateRedirectRule(rule); err != nil {
116
+
if err := validateRedirectRule(&rule); err != nil {
107
117
AddProblem(manifest, RedirectsFileName,
108
-
"rule #%d %q: %s", index+1, unparseRule(rule), err)
118
+
"rule #%d %q: %s", index+1, unparseRedirectRule(&rule), err)
109
119
continue
110
120
}
111
121
manifest.Redirects = append(manifest.Redirects, &RedirectRule{
···
121
131
func CollectRedirectsFile(manifest *Manifest) string {
122
132
var rules []string
123
133
for _, rule := range manifest.GetRedirects() {
124
-
rules = append(rules, unparseRule(redirects.Rule{
125
-
From: rule.GetFrom(),
126
-
To: rule.GetTo(),
127
-
Status: int(rule.GetStatus()),
128
-
Force: rule.GetForce(),
129
-
})+"\n")
134
+
rules = append(rules, unparseRedirectRule(exportRedirectRule(rule))+"\n")
130
135
}
131
136
return strings.Join(rules, "")
132
137
}
···
147
152
148
153
const (
149
154
RedirectAny RedirectKind = iota
155
+
RedirectNormal
150
156
RedirectForce
151
157
)
152
158
153
159
func ApplyRedirectRules(
154
160
manifest *Manifest, fromURL *url.URL, kind RedirectKind,
155
161
) (
156
-
toURL *url.URL, status int,
162
+
rule *RedirectRule, toURL *url.URL, status int,
157
163
) {
158
164
fromSegments := pathSegments(fromURL.Path)
159
165
next:
160
-
for _, rule := range manifest.Redirects {
161
-
if kind == RedirectForce && !*rule.Force {
166
+
for _, rule = range manifest.Redirects {
167
+
switch {
168
+
case kind == RedirectNormal && *rule.Force:
169
+
continue
170
+
case kind == RedirectForce && !*rule.Force:
162
171
continue
163
172
}
164
173
// check if the rule matches fromURL
···
205
214
RawQuery: fromURL.RawQuery,
206
215
}
207
216
status = int(*rule.Status)
208
-
break
217
+
return
209
218
}
210
219
// no redirect found
220
+
rule = nil
211
221
return
212
222
}
223
+
224
+
func redirectHasSplat(rule *RedirectRule) bool {
225
+
ruleFromURL, _ := url.Parse(*rule.From) // pre-validated in `validateRedirectRule`
226
+
ruleFromSegments := pathSegments(ruleFromURL.Path)
227
+
return slices.Contains(ruleFromSegments, "*")
228
+
}
229
+
230
+
func LintRedirects(manifest *Manifest) {
231
+
for name, entry := range manifest.GetContents() {
232
+
nameURL, err := url.Parse("/" + name)
233
+
if err != nil {
234
+
continue
235
+
}
236
+
237
+
// Check if the entry URL would trigger a non-forced redirect if the entry didn't exist.
238
+
// If the redirect matches exactly one URL (i.e. has no splat) then it will never be
239
+
// triggered and an issue is reported; if the rule has a splat, it will always be possible
240
+
// to trigger it, as it matches an infinite number of URLs.
241
+
rule, _, _ := ApplyRedirectRules(manifest, nameURL, RedirectNormal)
242
+
if rule != nil && !redirectHasSplat(rule) {
243
+
entryDesc := "file"
244
+
if entry.GetType() == Type_Directory {
245
+
entryDesc = "directory"
246
+
}
247
+
AddProblem(manifest, name,
248
+
"%s shadows redirect %q; remove the %s or use a %d! forced redirect instead",
249
+
entryDesc,
250
+
unparseRedirectRule(exportRedirectRule(rule)),
251
+
entryDesc,
252
+
rule.GetStatus(),
253
+
)
254
+
}
255
+
}
256
+
}
+29
src/signal.go
+29
src/signal.go
···
1
+
// See https://pkg.go.dev/os/signal#hdr-Windows for a description of what this module
2
+
// will do on Windows (tl;dr nothing calls the reload handler, the interrupt handler works
3
+
// more or less how you'd expect).
4
+
5
+
package git_pages
6
+
7
+
import (
8
+
"os"
9
+
"os/signal"
10
+
"syscall"
11
+
)
12
+
13
+
func OnReload(handler func()) {
14
+
sighup := make(chan os.Signal, 1)
15
+
signal.Notify(sighup, syscall.SIGHUP)
16
+
go func() {
17
+
for {
18
+
<-sighup
19
+
handler()
20
+
}
21
+
}()
22
+
}
23
+
24
+
func WaitForInterrupt() {
25
+
sigint := make(chan os.Signal, 1)
26
+
signal.Notify(sigint, syscall.SIGINT, syscall.SIGTERM)
27
+
<-sigint
28
+
signal.Stop(sigint)
29
+
}
-13
src/signal_other.go
-13
src/signal_other.go
-27
src/signal_posix.go
-27
src/signal_posix.go
···
1
-
//go:build unix
2
-
3
-
package git_pages
4
-
5
-
import (
6
-
"os"
7
-
"os/signal"
8
-
"syscall"
9
-
)
10
-
11
-
func OnReload(handler func()) {
12
-
sighup := make(chan os.Signal, 1)
13
-
signal.Notify(sighup, syscall.SIGHUP)
14
-
go func() {
15
-
for {
16
-
<-sighup
17
-
handler()
18
-
}
19
-
}()
20
-
}
21
-
22
-
func WaitForInterrupt() {
23
-
sigint := make(chan os.Signal, 1)
24
-
signal.Notify(sigint, syscall.SIGINT)
25
-
<-sigint
26
-
signal.Stop(sigint)
27
-
}
+9
-1
src/update.go
+9
-1
src/update.go
···
182
182
// `*Manifest` objects, which should never be mutated.
183
183
newManifest := &Manifest{}
184
184
proto.Merge(newManifest, oldManifest)
185
+
newManifest.RepoUrl = nil
186
+
newManifest.Branch = nil
187
+
newManifest.Commit = nil
185
188
if err := ApplyTarPatch(newManifest, reader, parents); err != nil {
186
189
return nil, err
187
190
} else {
···
226
229
}
227
230
228
231
func observeUpdateResult(result UpdateResult) {
229
-
if result.err != nil {
232
+
var unresolvedRefErr UnresolvedRefError
233
+
if errors.As(result.err, &unresolvedRefErr) {
234
+
// This error is an expected outcome of an incremental update's probe phase.
235
+
} else if errors.Is(result.err, ErrWriteConflict) {
236
+
// This error is an expected outcome of an incremental update losing a race.
237
+
} else if result.err != nil {
230
238
ObserveError(result.err)
231
239
}
232
240
}