forked from
whitequark.org/git-pages
fork of whitequark.org/git-pages with mods for tangled
1package git_pages
2
3import (
4 "bytes"
5 "compress/gzip"
6 "context"
7 "encoding/json"
8 "errors"
9 "fmt"
10 "io"
11 "maps"
12 "net/http"
13 "net/url"
14 "os"
15 "path"
16 "slices"
17 "strconv"
18 "strings"
19 "time"
20
21 "github.com/klauspost/compress/zstd"
22 "github.com/pquerna/cachecontrol/cacheobject"
23 "github.com/prometheus/client_golang/prometheus"
24 "github.com/prometheus/client_golang/prometheus/promauto"
25)
26
27const notFoundPage = "404.html"
28
29var (
30 serveEncodingCount = promauto.NewCounterVec(prometheus.CounterOpts{
31 Name: "git_pages_serve_encoding_count",
32 Help: "Count of blob transform vs negotiated encoding",
33 }, []string{"transform", "negotiated"})
34
35 siteUpdatesCount = promauto.NewCounterVec(prometheus.CounterOpts{
36 Name: "git_pages_site_updates",
37 Help: "Count of site updates in total",
38 }, []string{"via"})
39 siteUpdateOkCount = promauto.NewCounterVec(prometheus.CounterOpts{
40 Name: "git_pages_site_update_ok",
41 Help: "Count of successful site updates",
42 }, []string{"outcome"})
43 siteUpdateErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{
44 Name: "git_pages_site_update_error",
45 Help: "Count of failed site updates",
46 }, []string{"cause"})
47)
48
49func reportSiteUpdate(via string, result *UpdateResult) {
50 siteUpdatesCount.With(prometheus.Labels{"via": via}).Inc()
51
52 switch result.outcome {
53 case UpdateError:
54 siteUpdateErrorCount.With(prometheus.Labels{"cause": "other"}).Inc()
55 case UpdateTimeout:
56 siteUpdateErrorCount.With(prometheus.Labels{"cause": "timeout"}).Inc()
57 case UpdateNoChange:
58 siteUpdateOkCount.With(prometheus.Labels{"outcome": "no-change"}).Inc()
59 case UpdateCreated:
60 siteUpdateOkCount.With(prometheus.Labels{"outcome": "created"}).Inc()
61 case UpdateReplaced:
62 siteUpdateOkCount.With(prometheus.Labels{"outcome": "replaced"}).Inc()
63 case UpdateDeleted:
64 siteUpdateOkCount.With(prometheus.Labels{"outcome": "deleted"}).Inc()
65 }
66}
67
68func makeWebRoot(host string, projectName string) string {
69 return fmt.Sprintf("%s/%s", strings.ToLower(host), projectName)
70}
71
72func writeRedirect(w http.ResponseWriter, code int, path string) {
73 w.Header().Set("Location", path)
74 w.WriteHeader(code)
75 fmt.Fprintf(w, "see %s\n", path)
76}
77
78// The `clauspost/compress/zstd` package recommends reusing a decompressor to avoid repeated
79// allocations of internal buffers.
80var zstdDecoder, _ = zstd.NewReader(nil)
81
82func getPage(w http.ResponseWriter, r *http.Request) error {
83 var err error
84 var sitePath string
85 var manifest *Manifest
86 var manifestMtime time.Time
87
88 cacheControl, err := cacheobject.ParseRequestCacheControl(r.Header.Get("Cache-Control"))
89 if err != nil {
90 cacheControl = &cacheobject.RequestCacheDirectives{
91 MaxAge: -1,
92 MaxStale: -1,
93 MinFresh: -1,
94 }
95 }
96
97 bypassCache := cacheControl.NoCache || cacheControl.MaxAge == 0
98
99 host, err := GetHost(r)
100 if err != nil {
101 return err
102 }
103
104 type indexManifestResult struct {
105 manifest *Manifest
106 manifestMtime time.Time
107 err error
108 }
109 indexManifestCh := make(chan indexManifestResult, 1)
110 go func() {
111 manifest, mtime, err := backend.GetManifest(
112 r.Context(), makeWebRoot(host, ".index"),
113 GetManifestOptions{BypassCache: bypassCache},
114 )
115 indexManifestCh <- (indexManifestResult{manifest, mtime, err})
116 }()
117
118 err = nil
119 sitePath = strings.TrimPrefix(r.URL.Path, "/")
120 if projectName, projectPath, hasProjectSlash := strings.Cut(sitePath, "/"); projectName != "" {
121 var projectManifest *Manifest
122 var projectManifestMtime time.Time
123 projectManifest, projectManifestMtime, err = backend.GetManifest(
124 r.Context(), makeWebRoot(host, projectName),
125 GetManifestOptions{BypassCache: bypassCache},
126 )
127 if err == nil {
128 if !hasProjectSlash {
129 writeRedirect(w, http.StatusFound, r.URL.Path+"/")
130 return nil
131 }
132 sitePath, manifest, manifestMtime = projectPath, projectManifest, projectManifestMtime
133 }
134 }
135 if manifest == nil && (err == nil || errors.Is(err, ErrObjectNotFound)) {
136 result := <-indexManifestCh
137 manifest, manifestMtime, err = result.manifest, result.manifestMtime, result.err
138 if manifest == nil && errors.Is(err, ErrObjectNotFound) {
139 if fallback != nil {
140 logc.Printf(r.Context(), "fallback: %s via %s", host, config.Fallback.ProxyTo)
141 fallback.ServeHTTP(w, r)
142 return nil
143 } else {
144 w.WriteHeader(http.StatusNotFound)
145 fmt.Fprintf(w, "site not found\n")
146 return err
147 }
148 }
149 }
150 if err != nil {
151 ObserveError(err) // all storage errors must be reported
152 w.WriteHeader(http.StatusInternalServerError)
153 fmt.Fprintf(w, "internal server error (%s)\n", err)
154 return err
155 }
156
157 if r.Header.Get("Origin") != "" {
158 // allow JavaScript code to access responses (including errors) even across origins
159 w.Header().Set("Access-Control-Allow-Origin", "*")
160 }
161
162 if sitePath == ".git-pages" {
163 // metadata directory name shouldn't be served even if present in site manifest
164 w.WriteHeader(http.StatusNotFound)
165 fmt.Fprintf(w, "not found\n")
166 return nil
167 }
168 if metadataPath, found := strings.CutPrefix(sitePath, ".git-pages/"); found {
169 lastModified := manifestMtime.UTC().Format(http.TimeFormat)
170 switch {
171 case metadataPath == "health":
172 w.Header().Add("Last-Modified", lastModified)
173 w.WriteHeader(http.StatusOK)
174 fmt.Fprintf(w, "ok\n")
175 return nil
176
177 case metadataPath == "manifest.json":
178 // metadata requests require authorization to avoid making pushes from private
179 // repositories enumerable
180 _, err := AuthorizeMetadataRetrieval(r)
181 if err != nil {
182 return err
183 }
184
185 w.Header().Add("Content-Type", "application/json; charset=utf-8")
186 w.Header().Add("Last-Modified", lastModified)
187 w.WriteHeader(http.StatusOK)
188 w.Write([]byte(ManifestDebugJSON(manifest)))
189 return nil
190
191 case metadataPath == "archive.tar" && config.Feature("archive-site"):
192 // same as above
193 _, err := AuthorizeMetadataRetrieval(r)
194 if err != nil {
195 return err
196 }
197
198 // we only offer `/.git-pages/archive.tar` and not the `.tar.gz`/`.tar.zst` variants
199 // because HTTP can already request compression using the `Content-Encoding` mechanism
200 acceptedEncodings := parseHTTPEncodings(r.Header.Get("Accept-Encoding"))
201 negotiated := acceptedEncodings.Negotiate("zstd", "gzip", "identity")
202 if negotiated != "" {
203 w.Header().Set("Content-Encoding", negotiated)
204 }
205 w.Header().Add("Content-Type", "application/x-tar")
206 w.Header().Add("Last-Modified", lastModified)
207 w.Header().Add("Transfer-Encoding", "chunked")
208 w.WriteHeader(http.StatusOK)
209 var iow io.Writer
210 switch negotiated {
211 case "", "identity":
212 iow = w
213 case "gzip":
214 iow = gzip.NewWriter(w)
215 case "zstd":
216 iow, _ = zstd.NewWriter(w)
217 }
218 return CollectTar(r.Context(), iow, manifest, manifestMtime)
219
220 default:
221 w.WriteHeader(http.StatusNotFound)
222 fmt.Fprintf(w, "not found\n")
223 return nil
224 }
225 }
226
227 entryPath := sitePath
228 entry := (*Entry)(nil)
229 appliedRedirect := false
230 status := 200
231 reader := io.ReadSeeker(nil)
232 mtime := time.Time{}
233 for {
234 entryPath, _ = strings.CutSuffix(entryPath, "/")
235 entryPath, err = ExpandSymlinks(manifest, entryPath)
236 if err != nil {
237 w.WriteHeader(http.StatusInternalServerError)
238 fmt.Fprintln(w, err)
239 return err
240 }
241 entry = manifest.Contents[entryPath]
242 if !appliedRedirect {
243 redirectKind := RedirectAny
244 if entry != nil && entry.GetType() != Type_Invalid {
245 redirectKind = RedirectForce
246 }
247 originalURL := (&url.URL{Host: r.Host}).ResolveReference(r.URL)
248 redirectURL, redirectStatus := ApplyRedirectRules(manifest, originalURL, redirectKind)
249 if Is3xxHTTPStatus(redirectStatus) {
250 writeRedirect(w, redirectStatus, redirectURL.String())
251 return nil
252 } else if redirectURL != nil {
253 entryPath = strings.TrimPrefix(redirectURL.Path, "/")
254 status = int(redirectStatus)
255 // Apply user redirects at most once; if something ends in a loop, it should be
256 // the user agent, not the pages server.
257 appliedRedirect = true
258 continue
259 }
260 }
261 if entry == nil || entry.GetType() == Type_Invalid {
262 status = 404
263 if entryPath != notFoundPage {
264 entryPath = notFoundPage
265 continue
266 } else {
267 reader = bytes.NewReader([]byte("not found\n"))
268 break
269 }
270 } else if entry.GetType() == Type_InlineFile {
271 reader = bytes.NewReader(entry.Data)
272 } else if entry.GetType() == Type_ExternalFile {
273 etag := fmt.Sprintf(`"%s"`, entry.Data)
274 if r.Header.Get("If-None-Match") == etag {
275 w.WriteHeader(http.StatusNotModified)
276 return nil
277 } else {
278 reader, _, mtime, err = backend.GetBlob(r.Context(), string(entry.Data))
279 if err != nil {
280 ObserveError(err) // all storage errors must be reported
281 w.WriteHeader(http.StatusInternalServerError)
282 fmt.Fprintf(w, "internal server error: %s\n", err)
283 return err
284 }
285 w.Header().Set("ETag", etag)
286 }
287 } else if entry.GetType() == Type_Directory {
288 if strings.HasSuffix(r.URL.Path, "/") {
289 entryPath = path.Join(entryPath, "index.html")
290 continue
291 } else {
292 // redirect from `dir` to `dir/`, otherwise when `dir/index.html` is served,
293 // links in it will have the wrong base URL
294 newPath := r.URL.Path + "/"
295 writeRedirect(w, http.StatusFound, newPath)
296 return nil
297 }
298 } else if entry.GetType() == Type_Symlink {
299 return fmt.Errorf("unexpected symlink")
300 }
301 break
302 }
303 if closer, ok := reader.(io.Closer); ok {
304 defer closer.Close()
305 }
306
307 acceptedEncodings := parseHTTPEncodings(r.Header.Get("Accept-Encoding"))
308 negotiatedEncoding := true
309 switch entry.GetTransform() {
310 case Transform_Identity:
311 switch acceptedEncodings.Negotiate("identity") {
312 case "identity":
313 serveEncodingCount.
314 With(prometheus.Labels{"transform": "identity", "negotiated": "identity"}).
315 Inc()
316 default:
317 negotiatedEncoding = false
318 serveEncodingCount.
319 With(prometheus.Labels{"transform": "identity", "negotiated": "failure"}).
320 Inc()
321 }
322 case Transform_Zstd:
323 supported := []string{"zstd", "identity"}
324 if entry.ContentType == nil {
325 // If Content-Type is unset, `http.ServeContent` will try to sniff
326 // the file contents. That won't work if it's compressed.
327 supported = []string{"identity"}
328 }
329 switch acceptedEncodings.Negotiate(supported...) {
330 case "zstd":
331 // Set Content-Length ourselves since `http.ServeContent` only sets
332 // it if Content-Encoding is unset or if it's a range request.
333 w.Header().Set("Content-Length", strconv.FormatInt(entry.GetCompressedSize(), 10))
334 w.Header().Set("Content-Encoding", "zstd")
335 serveEncodingCount.
336 With(prometheus.Labels{"transform": "zstd", "negotiated": "zstd"}).
337 Inc()
338 case "identity":
339 compressedData, _ := io.ReadAll(reader)
340 decompressedData, err := zstdDecoder.DecodeAll(compressedData, []byte{})
341 if err != nil {
342 w.WriteHeader(http.StatusInternalServerError)
343 fmt.Fprintf(w, "internal server error: %s\n", err)
344 return err
345 }
346 reader = bytes.NewReader(decompressedData)
347 serveEncodingCount.
348 With(prometheus.Labels{"transform": "zstd", "negotiated": "identity"}).
349 Inc()
350 default:
351 negotiatedEncoding = false
352 serveEncodingCount.
353 With(prometheus.Labels{"transform": "zstd", "negotiated": "failure"}).
354 Inc()
355 }
356 default:
357 return fmt.Errorf("unexpected transform")
358 }
359 if !negotiatedEncoding {
360 w.WriteHeader(http.StatusNotAcceptable)
361 return fmt.Errorf("no supported content encodings (accept-encoding: %q)",
362 r.Header.Get("Accept-Encoding"))
363 }
364
365 if entry != nil && entry.ContentType != nil {
366 w.Header().Set("X-Content-Type-Options", "nosniff")
367 w.Header().Set("Content-Type", *entry.ContentType)
368 }
369
370 customHeaders, err := ApplyHeaderRules(manifest, &url.URL{Path: entryPath})
371 if err != nil {
372 // This is an "internal server error" from an HTTP point of view, but also
373 // either an issue with the site or a misconfiguration from our point of view.
374 // Since it's not a problem with the server we don't observe the error.
375 //
376 // Note that this behavior is different from a site upload with a malformed
377 // `_headers` file (where it is semantically ignored); this is because a broken
378 // upload is something the uploader can notice and fix, but a change in server
379 // configuration is something they are unaware of and won't be notified of.
380 w.WriteHeader(http.StatusInternalServerError)
381 fmt.Fprintf(w, "%s\n", err)
382 return err
383 } else {
384 // If the header has passed all of our stringent, deny-by-default checks, it means
385 // it's good enough to overwrite whatever was our builtin option (if any).
386 maps.Copy(w.Header(), customHeaders)
387 }
388
389 // decide on the HTTP status
390 if status != 200 {
391 w.WriteHeader(status)
392 if reader != nil {
393 io.Copy(w, reader)
394 }
395 } else {
396 // consider content fresh for 60 seconds (the same as the freshness interval of
397 // manifests in the S3 backend), and use stale content anyway as long as it's not
398 // older than a hour; while it is cheap to handle If-Modified-Since queries
399 // server-side, on the client `max-age=0, must-revalidate` causes every resource
400 // to block the page load every time
401 w.Header().Set("Cache-Control", "max-age=60, stale-while-revalidate=3600")
402 // see https://web.dev/articles/stale-while-revalidate for details
403
404 // http.ServeContent handles conditional requests and range requests
405 http.ServeContent(w, r, entryPath, mtime, reader)
406 }
407 return nil
408}
409
410func checkDryRun(w http.ResponseWriter, r *http.Request) bool {
411 // "Dry run" requests are used to non-destructively check if the request would have
412 // successfully been authorized.
413 if r.Header.Get("Dry-Run") != "" {
414 fmt.Fprintln(w, "dry-run ok")
415 return true
416 }
417 return false
418}
419
420func putPage(w http.ResponseWriter, r *http.Request) error {
421 var result UpdateResult
422
423 host, err := GetHost(r)
424 if err != nil {
425 return err
426 }
427
428 projectName, err := GetProjectName(r)
429 if err != nil {
430 return err
431 }
432
433 webRoot := makeWebRoot(host, projectName)
434
435 updateCtx, cancel := context.WithTimeout(r.Context(), time.Duration(config.Limits.UpdateTimeout))
436 defer cancel()
437
438 contentType := getMediaType(r.Header.Get("Content-Type"))
439 switch contentType {
440 case "application/x-www-form-urlencoded":
441 auth, err := AuthorizeUpdateFromRepository(r)
442 if err != nil {
443 return err
444 }
445
446 // URLs have no length limit, but 64K seems enough for a repository URL
447 requestBody, err := io.ReadAll(http.MaxBytesReader(w, r.Body, 65536))
448 if err != nil {
449 return fmt.Errorf("body read: %w", err)
450 }
451
452 repoURL := string(requestBody)
453 if err := AuthorizeRepository(repoURL, auth); err != nil {
454 return err
455 }
456
457 branch := "pages"
458 if customBranch := r.Header.Get("Branch"); customBranch != "" {
459 branch = customBranch
460 }
461 if err := AuthorizeBranch(branch, auth); err != nil {
462 return err
463 }
464
465 if checkDryRun(w, r) {
466 return nil
467 }
468
469 result = UpdateFromRepository(updateCtx, webRoot, repoURL, branch)
470
471 default:
472 _, err := AuthorizeUpdateFromArchive(r)
473 if err != nil {
474 return err
475 }
476
477 if checkDryRun(w, r) {
478 return nil
479 }
480
481 // request body contains archive
482 reader := http.MaxBytesReader(w, r.Body, int64(config.Limits.MaxSiteSize.Bytes()))
483 result = UpdateFromArchive(updateCtx, webRoot, contentType, reader)
484 }
485
486 switch result.outcome {
487 case UpdateError:
488 if errors.Is(result.err, ErrManifestTooLarge) {
489 w.WriteHeader(http.StatusRequestEntityTooLarge)
490 } else if errors.Is(result.err, errArchiveFormat) {
491 w.WriteHeader(http.StatusUnsupportedMediaType)
492 } else if errors.Is(result.err, ErrArchiveTooLarge) {
493 w.WriteHeader(http.StatusRequestEntityTooLarge)
494 } else if errors.Is(result.err, ErrDomainFrozen) {
495 w.WriteHeader(http.StatusForbidden)
496 } else {
497 w.WriteHeader(http.StatusServiceUnavailable)
498 }
499 case UpdateTimeout:
500 w.WriteHeader(http.StatusGatewayTimeout)
501 case UpdateNoChange:
502 w.Header().Add("Update-Result", "no-change")
503 case UpdateCreated:
504 w.Header().Add("Update-Result", "created")
505 case UpdateReplaced:
506 w.Header().Add("Update-Result", "replaced")
507 case UpdateDeleted:
508 w.Header().Add("Update-Result", "deleted")
509 }
510 if result.manifest != nil {
511 if result.manifest.Commit != nil {
512 fmt.Fprintln(w, *result.manifest.Commit)
513 } else {
514 fmt.Fprintln(w, "(archive)")
515 }
516 for _, problem := range GetProblemReport(result.manifest) {
517 fmt.Fprintln(w, problem)
518 }
519 } else if result.err != nil {
520 fmt.Fprintln(w, result.err)
521 } else {
522 fmt.Fprintln(w, "internal error")
523 }
524 reportSiteUpdate("rest", &result)
525 return nil
526}
527
528func deletePage(w http.ResponseWriter, r *http.Request) error {
529 _, err := AuthorizeUpdateFromRepository(r)
530 if err != nil {
531 return err
532 }
533
534 host, err := GetHost(r)
535 if err != nil {
536 return err
537 }
538
539 projectName, err := GetProjectName(r)
540 if err != nil {
541 return err
542 }
543
544 if checkDryRun(w, r) {
545 return nil
546 }
547
548 err = backend.DeleteManifest(r.Context(), makeWebRoot(host, projectName))
549 if err != nil {
550 w.WriteHeader(http.StatusInternalServerError)
551 } else {
552 w.Header().Add("Update-Result", "deleted")
553 w.WriteHeader(http.StatusOK)
554 }
555 if err != nil {
556 fmt.Fprintln(w, err)
557 }
558 return err
559}
560
561func postPage(w http.ResponseWriter, r *http.Request) error {
562 // Start a timer for the request timeout immediately.
563 requestTimeout := 3 * time.Second
564 requestTimer := time.NewTimer(requestTimeout)
565
566 auth, err := AuthorizeUpdateFromRepository(r)
567 if err != nil {
568 return err
569 }
570
571 host, err := GetHost(r)
572 if err != nil {
573 return err
574 }
575
576 projectName, err := GetProjectName(r)
577 if err != nil {
578 return err
579 }
580
581 webRoot := makeWebRoot(host, projectName)
582
583 eventName := ""
584 for _, header := range []string{
585 "X-Forgejo-Event",
586 "X-GitHub-Event",
587 "X-Gitea-Event",
588 "X-Gogs-Event",
589 } {
590 eventName = r.Header.Get(header)
591 if eventName != "" {
592 break
593 }
594 }
595
596 if eventName == "" {
597 http.Error(w,
598 "expected a Forgejo, GitHub, Gitea, or Gogs webhook request", http.StatusBadRequest)
599 return fmt.Errorf("event expected")
600 }
601
602 if eventName != "push" {
603 http.Error(w, "only push events are allowed", http.StatusBadRequest)
604 return fmt.Errorf("invalid event")
605 }
606
607 if r.Header.Get("Content-Type") != "application/json" {
608 http.Error(w, "only JSON payload is allowed", http.StatusBadRequest)
609 return fmt.Errorf("invalid content type")
610 }
611
612 // Event payloads have no length limit, but events bigger than 16M seem excessive.
613 requestBody, err := io.ReadAll(http.MaxBytesReader(w, r.Body, 16*1048576))
614 if err != nil {
615 return fmt.Errorf("body read: %w", err)
616 }
617
618 var event struct {
619 Ref string `json:"ref"`
620 Repository struct {
621 CloneURL string `json:"clone_url"`
622 } `json:"repository"`
623 }
624 err = json.NewDecoder(bytes.NewReader(requestBody)).Decode(&event)
625 if err != nil {
626 http.Error(w, fmt.Sprintf("invalid request body: %s", err), http.StatusBadRequest)
627 return err
628 }
629
630 if event.Ref != fmt.Sprintf("refs/heads/%s", auth.branch) {
631 code := http.StatusUnauthorized
632 if strings.Contains(r.Header.Get("User-Agent"), "GitHub-Hookshot") {
633 // GitHub has no way to restrict branches for a webhook, and responding with 401
634 // for every non-pages branch makes the "Recent Deliveries" tab look awful.
635 code = http.StatusOK
636 }
637 http.Error(w,
638 fmt.Sprintf("ref %s not in allowlist [refs/heads/%v]", event.Ref, auth.branch),
639 code)
640 return nil
641 }
642
643 repoURL := event.Repository.CloneURL
644 if err := AuthorizeRepository(repoURL, auth); err != nil {
645 return err
646 }
647
648 if checkDryRun(w, r) {
649 return nil
650 }
651
652 resultChan := make(chan UpdateResult)
653 go func(ctx context.Context) {
654 ctx, cancel := context.WithTimeout(ctx, time.Duration(config.Limits.UpdateTimeout))
655 defer cancel()
656
657 result := UpdateFromRepository(ctx, webRoot, repoURL, auth.branch)
658 resultChan <- result
659 reportSiteUpdate("webhook", &result)
660 }(context.Background())
661
662 var result UpdateResult
663 select {
664 case result = <-resultChan:
665 case <-requestTimer.C:
666 w.WriteHeader(http.StatusAccepted)
667 fmt.Fprintf(w, "updating (taking longer than %s)", requestTimeout)
668 return nil
669 }
670
671 switch result.outcome {
672 case UpdateError:
673 w.WriteHeader(http.StatusServiceUnavailable)
674 fmt.Fprintf(w, "update error: %s\n", result.err)
675 case UpdateTimeout:
676 w.WriteHeader(http.StatusGatewayTimeout)
677 fmt.Fprintln(w, "update timeout")
678 case UpdateNoChange:
679 fmt.Fprintln(w, "unchanged")
680 case UpdateCreated:
681 fmt.Fprintln(w, "created")
682 case UpdateReplaced:
683 fmt.Fprintln(w, "replaced")
684 case UpdateDeleted:
685 fmt.Fprintln(w, "deleted")
686 }
687 if result.manifest != nil {
688 report := GetProblemReport(result.manifest)
689 if len(report) > 0 {
690 fmt.Fprintln(w, "problems:")
691 }
692 for _, problem := range report {
693 fmt.Fprintf(w, "- %s\n", problem)
694 }
695 }
696 return nil
697}
698
699func ServePages(w http.ResponseWriter, r *http.Request) {
700 // We want upstream health checks to be done as closely to the normal flow as possible;
701 // any intentional deviation is an opportunity to miss an issue that will affect our
702 // visitors but not our health checks.
703 if r.Header.Get("Health-Check") == "" {
704 logc.Println(r.Context(), "pages:", r.Method, r.Host, r.URL, r.Header.Get("Content-Type"))
705 if region := os.Getenv("FLY_REGION"); region != "" {
706 machine_id := os.Getenv("FLY_MACHINE_ID")
707 w.Header().Add("Server", fmt.Sprintf("git-pages (fly.io; %s; %s)", region, machine_id))
708 ObserveData(r.Context(), "server.name", machine_id, "server.region", region)
709 } else if hostname, err := os.Hostname(); err == nil {
710 if region := os.Getenv("PAGES_REGION"); region != "" {
711 w.Header().Add("Server", fmt.Sprintf("git-pages (%s; %s)", region, hostname))
712 ObserveData(r.Context(), "server.name", hostname, "server.region", region)
713 } else {
714 w.Header().Add("Server", fmt.Sprintf("git-pages (%s)", hostname))
715 ObserveData(r.Context(), "server.name", hostname)
716 }
717 }
718 }
719 allowedMethods := []string{"OPTIONS", "HEAD", "GET", "PUT", "DELETE", "POST"}
720 if r.Method == "OPTIONS" || !slices.Contains(allowedMethods, r.Method) {
721 w.Header().Add("Allow", strings.Join(allowedMethods, ", "))
722 }
723 err := error(nil)
724 switch r.Method {
725 // REST API
726 case http.MethodOptions:
727 // no preflight options
728 case http.MethodHead, http.MethodGet:
729 err = getPage(w, r)
730 case http.MethodPut:
731 err = putPage(w, r)
732 case http.MethodDelete:
733 err = deletePage(w, r)
734 // webhook API
735 case http.MethodPost:
736 err = postPage(w, r)
737 default:
738 http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
739 err = fmt.Errorf("method %s not allowed", r.Method)
740 }
741 if err != nil {
742 var authErr AuthError
743 if errors.As(err, &authErr) {
744 http.Error(w, prettyErrMsg(err), authErr.code)
745 }
746 var tooLargeErr *http.MaxBytesError
747 if errors.As(err, &tooLargeErr) {
748 message := "request body too large"
749 http.Error(w, message, http.StatusRequestEntityTooLarge)
750 err = errors.New(message)
751 }
752 logc.Println(r.Context(), "pages err:", err)
753 }
754}