forked from
whitequark.org/git-pages
fork of whitequark.org/git-pages with mods for tangled
1package git_pages
2
3import (
4 "bytes"
5 "compress/gzip"
6 "context"
7 "encoding/json"
8 "errors"
9 "fmt"
10 "io"
11 "log"
12 "maps"
13 "net/http"
14 "net/url"
15 "os"
16 "path"
17 "strconv"
18 "strings"
19 "time"
20
21 "github.com/klauspost/compress/zstd"
22 "github.com/pquerna/cachecontrol/cacheobject"
23 "github.com/prometheus/client_golang/prometheus"
24 "github.com/prometheus/client_golang/prometheus/promauto"
25)
26
27const notFoundPage = "404.html"
28
29var (
30 siteUpdatesCount = promauto.NewCounterVec(prometheus.CounterOpts{
31 Name: "git_pages_site_updates",
32 Help: "Count of site updates in total",
33 }, []string{"via"})
34 siteUpdateOkCount = promauto.NewCounterVec(prometheus.CounterOpts{
35 Name: "git_pages_site_update_ok",
36 Help: "Count of successful site updates",
37 }, []string{"outcome"})
38 siteUpdateErrorCount = promauto.NewCounterVec(prometheus.CounterOpts{
39 Name: "git_pages_site_update_error",
40 Help: "Count of failed site updates",
41 }, []string{"cause"})
42)
43
44func reportSiteUpdate(via string, result *UpdateResult) {
45 siteUpdatesCount.With(prometheus.Labels{"via": via}).Inc()
46
47 switch result.outcome {
48 case UpdateError:
49 siteUpdateErrorCount.With(prometheus.Labels{"cause": "other"}).Inc()
50 case UpdateTimeout:
51 siteUpdateErrorCount.With(prometheus.Labels{"cause": "timeout"}).Inc()
52 case UpdateNoChange:
53 siteUpdateOkCount.With(prometheus.Labels{"outcome": "no-change"}).Inc()
54 case UpdateCreated:
55 siteUpdateOkCount.With(prometheus.Labels{"outcome": "created"}).Inc()
56 case UpdateReplaced:
57 siteUpdateOkCount.With(prometheus.Labels{"outcome": "replaced"}).Inc()
58 case UpdateDeleted:
59 siteUpdateOkCount.With(prometheus.Labels{"outcome": "deleted"}).Inc()
60 }
61}
62
63func makeWebRoot(host string, projectName string) string {
64 return fmt.Sprintf("%s/%s", strings.ToLower(host), projectName)
65}
66
67func writeRedirect(w http.ResponseWriter, code int, path string) {
68 w.Header().Set("Location", path)
69 w.WriteHeader(code)
70 fmt.Fprintf(w, "see %s\n", path)
71}
72
73// The `clauspost/compress/zstd` package recommends reusing a decompressor to avoid repeated
74// allocations of internal buffers.
75var zstdDecoder, _ = zstd.NewReader(nil)
76
77func getPage(w http.ResponseWriter, r *http.Request) error {
78 var err error
79 var sitePath string
80 var manifest *Manifest
81 var manifestMtime time.Time
82
83 cacheControl, err := cacheobject.ParseRequestCacheControl(r.Header.Get("Cache-Control"))
84 if err != nil {
85 cacheControl = &cacheobject.RequestCacheDirectives{
86 MaxAge: -1,
87 MaxStale: -1,
88 MinFresh: -1,
89 }
90 }
91
92 bypassCache := cacheControl.NoCache || cacheControl.MaxAge == 0
93
94 host, err := GetHost(r)
95 if err != nil {
96 return err
97 }
98
99 type indexManifestResult struct {
100 manifest *Manifest
101 manifestMtime time.Time
102 err error
103 }
104 indexManifestCh := make(chan indexManifestResult, 1)
105 go func() {
106 manifest, mtime, err := backend.GetManifest(
107 r.Context(), makeWebRoot(host, ".index"),
108 GetManifestOptions{BypassCache: bypassCache},
109 )
110 indexManifestCh <- (indexManifestResult{manifest, mtime, err})
111 }()
112
113 err = nil
114 sitePath = strings.TrimPrefix(r.URL.Path, "/")
115 if projectName, projectPath, hasProjectSlash := strings.Cut(sitePath, "/"); projectName != "" {
116 var projectManifest *Manifest
117 var projectManifestMtime time.Time
118 projectManifest, projectManifestMtime, err = backend.GetManifest(
119 r.Context(), makeWebRoot(host, projectName),
120 GetManifestOptions{BypassCache: bypassCache},
121 )
122 if err == nil {
123 if !hasProjectSlash {
124 writeRedirect(w, http.StatusFound, r.URL.Path+"/")
125 return nil
126 }
127 sitePath, manifest, manifestMtime = projectPath, projectManifest, projectManifestMtime
128 }
129 }
130 if manifest == nil && (err == nil || errors.Is(err, ErrObjectNotFound)) {
131 result := <-indexManifestCh
132 manifest, manifestMtime, err = result.manifest, result.manifestMtime, result.err
133 if manifest == nil && errors.Is(err, ErrObjectNotFound) {
134 if found, fallbackErr := HandleWildcardFallback(w, r); found {
135 return fallbackErr
136 } else {
137 w.WriteHeader(http.StatusNotFound)
138 fmt.Fprintf(w, "site not found\n")
139 return err
140 }
141 }
142 }
143 if err != nil {
144 ObserveError(err) // all storage errors must be reported
145 w.WriteHeader(http.StatusInternalServerError)
146 fmt.Fprintf(w, "internal server error (%s)\n", err)
147 return err
148 }
149
150 if r.Header.Get("Origin") != "" {
151 // allow JavaScript code to access responses (including errors) even across origins
152 w.Header().Set("Access-Control-Allow-Origin", "*")
153 }
154
155 if sitePath == ".git-pages" {
156 // metadata directory name shouldn't be served even if present in site manifest
157 w.WriteHeader(http.StatusNotFound)
158 fmt.Fprintf(w, "not found\n")
159 return nil
160 }
161 if metadataPath, found := strings.CutPrefix(sitePath, ".git-pages/"); found {
162 lastModified := manifestMtime.UTC().Format(http.TimeFormat)
163 switch {
164 case metadataPath == "health":
165 w.Header().Add("Last-Modified", lastModified)
166 w.WriteHeader(http.StatusOK)
167 fmt.Fprintf(w, "ok\n")
168 return nil
169
170 case metadataPath == "manifest.json":
171 // metadata requests require authorization to avoid making pushes from private
172 // repositories enumerable
173 _, err := AuthorizeMetadataRetrieval(r)
174 if err != nil {
175 return err
176 }
177
178 w.Header().Add("Content-Type", "application/json; charset=utf-8")
179 w.Header().Add("Last-Modified", lastModified)
180 w.WriteHeader(http.StatusOK)
181 w.Write([]byte(ManifestDebugJSON(manifest)))
182 return nil
183
184 case metadataPath == "archive.tar" && config.Feature("archive-site"):
185 // same as above
186 _, err := AuthorizeMetadataRetrieval(r)
187 if err != nil {
188 return err
189 }
190
191 // we only offer `/.git-pages/archive.tar` and not the `.tar.gz`/`.tar.zst` variants
192 // because HTTP can already request compression using the `Content-Encoding` mechanism
193 acceptedEncodings := parseHTTPEncodings(r.Header.Get("Accept-Encoding"))
194 negotiated := acceptedEncodings.Negotiate("zstd", "gzip", "identity")
195 if negotiated != "" {
196 w.Header().Set("Content-Encoding", negotiated)
197 }
198 w.Header().Add("Content-Type", "application/x-tar")
199 w.Header().Add("Last-Modified", lastModified)
200 w.Header().Add("Transfer-Encoding", "chunked")
201 w.WriteHeader(http.StatusOK)
202 var iow io.Writer
203 switch negotiated {
204 case "", "identity":
205 iow = w
206 case "gzip":
207 iow = gzip.NewWriter(w)
208 case "zstd":
209 iow, _ = zstd.NewWriter(w)
210 }
211 return CollectTar(r.Context(), iow, manifest, manifestMtime)
212
213 default:
214 w.WriteHeader(http.StatusNotFound)
215 fmt.Fprintf(w, "not found\n")
216 return nil
217 }
218 }
219
220 entryPath := sitePath
221 entry := (*Entry)(nil)
222 appliedRedirect := false
223 status := 200
224 reader := io.ReadSeeker(nil)
225 mtime := time.Time{}
226 for {
227 entryPath, _ = strings.CutSuffix(entryPath, "/")
228 entryPath, err = ExpandSymlinks(manifest, entryPath)
229 if err != nil {
230 w.WriteHeader(http.StatusInternalServerError)
231 fmt.Fprintln(w, err)
232 return err
233 }
234 entry = manifest.Contents[entryPath]
235 if !appliedRedirect {
236 redirectKind := RedirectAny
237 if entry != nil && entry.GetType() != Type_Invalid {
238 redirectKind = RedirectForce
239 }
240 originalURL := (&url.URL{Host: r.Host}).ResolveReference(r.URL)
241 redirectURL, redirectStatus := ApplyRedirectRules(manifest, originalURL, redirectKind)
242 if Is3xxHTTPStatus(redirectStatus) {
243 writeRedirect(w, redirectStatus, redirectURL.String())
244 return nil
245 } else if redirectURL != nil {
246 entryPath = strings.TrimPrefix(redirectURL.Path, "/")
247 status = int(redirectStatus)
248 // Apply user redirects at most once; if something ends in a loop, it should be
249 // the user agent, not the pages server.
250 appliedRedirect = true
251 continue
252 }
253 }
254 if entry == nil || entry.GetType() == Type_Invalid {
255 status = 404
256 if entryPath != notFoundPage {
257 entryPath = notFoundPage
258 continue
259 } else {
260 reader = bytes.NewReader([]byte("not found\n"))
261 break
262 }
263 } else if entry.GetType() == Type_InlineFile {
264 reader = bytes.NewReader(entry.Data)
265 } else if entry.GetType() == Type_ExternalFile {
266 etag := fmt.Sprintf(`"%s"`, entry.Data)
267 if r.Header.Get("If-None-Match") == etag {
268 w.WriteHeader(http.StatusNotModified)
269 return nil
270 } else {
271 reader, _, mtime, err = backend.GetBlob(r.Context(), string(entry.Data))
272 if err != nil {
273 ObserveError(err) // all storage errors must be reported
274 w.WriteHeader(http.StatusInternalServerError)
275 fmt.Fprintf(w, "internal server error: %s\n", err)
276 return err
277 }
278 w.Header().Set("ETag", etag)
279 }
280 } else if entry.GetType() == Type_Directory {
281 if strings.HasSuffix(r.URL.Path, "/") {
282 entryPath = path.Join(entryPath, "index.html")
283 continue
284 } else {
285 // redirect from `dir` to `dir/`, otherwise when `dir/index.html` is served,
286 // links in it will have the wrong base URL
287 newPath := r.URL.Path + "/"
288 writeRedirect(w, http.StatusFound, newPath)
289 return nil
290 }
291 } else if entry.GetType() == Type_Symlink {
292 return fmt.Errorf("unexpected symlink")
293 }
294 break
295 }
296 if closer, ok := reader.(io.Closer); ok {
297 defer closer.Close()
298 }
299
300 acceptedEncodings := parseHTTPEncodings(r.Header.Get("Accept-Encoding"))
301 negotiatedEncoding := true
302 switch entry.GetTransform() {
303 case Transform_None:
304 if acceptedEncodings.Negotiate("identity") != "identity" {
305 negotiatedEncoding = false
306 }
307 case Transform_Zstandard:
308 supported := []string{"zstd", "identity"}
309 if entry.ContentType == nil {
310 // If Content-Type is unset, `http.ServeContent` will try to sniff
311 // the file contents. That won't work if it's compressed.
312 supported = []string{"identity"}
313 }
314 switch acceptedEncodings.Negotiate(supported...) {
315 case "zstd":
316 // Set Content-Length ourselves since `http.ServeContent` only sets
317 // it if Content-Encoding is unset or if it's a range request.
318 w.Header().Set("Content-Length", strconv.FormatInt(*entry.Size, 10))
319 w.Header().Set("Content-Encoding", "zstd")
320 case "identity":
321 compressedData, _ := io.ReadAll(reader)
322 decompressedData, err := zstdDecoder.DecodeAll(compressedData, []byte{})
323 if err != nil {
324 w.WriteHeader(http.StatusInternalServerError)
325 fmt.Fprintf(w, "internal server error: %s\n", err)
326 return err
327 }
328 reader = bytes.NewReader(decompressedData)
329 default:
330 negotiatedEncoding = false
331 }
332 default:
333 return fmt.Errorf("unexpected transform")
334 }
335 if !negotiatedEncoding {
336 w.WriteHeader(http.StatusNotAcceptable)
337 return fmt.Errorf("no supported content encodings (accept-encoding: %q)",
338 r.Header.Get("Accept-Encoding"))
339 }
340
341 if entry != nil && entry.ContentType != nil {
342 w.Header().Set("X-Content-Type-Options", "nosniff")
343 w.Header().Set("Content-Type", *entry.ContentType)
344 }
345
346 customHeaders, err := ApplyHeaderRules(manifest, &url.URL{Path: entryPath})
347 if err != nil {
348 // This is an "internal server error" from an HTTP point of view, but also
349 // either an issue with the site or a misconfiguration from our point of view.
350 // Since it's not a problem with the server we don't observe the error.
351 //
352 // Note that this behavior is different from a site upload with a malformed
353 // `_headers` file (where it is semantically ignored); this is because a broken
354 // upload is something the uploader can notice and fix, but a change in server
355 // configuration is something they are unaware of and won't be notified of.
356 w.WriteHeader(http.StatusInternalServerError)
357 fmt.Fprintf(w, "%s\n", err)
358 return err
359 } else {
360 // If the header has passed all of our stringent, deny-by-default checks, it means
361 // it's good enough to overwrite whatever was our builtin option (if any).
362 maps.Copy(w.Header(), customHeaders)
363 }
364
365 // decide on the HTTP status
366 if status != 200 {
367 w.WriteHeader(status)
368 if reader != nil {
369 io.Copy(w, reader)
370 }
371 } else {
372 // consider content fresh for 60 seconds (the same as the freshness interval of
373 // manifests in the S3 backend), and use stale content anyway as long as it's not
374 // older than a hour; while it is cheap to handle If-Modified-Since queries
375 // server-side, on the client `max-age=0, must-revalidate` causes every resource
376 // to block the page load every time
377 w.Header().Set("Cache-Control", "max-age=60, stale-while-revalidate=3600")
378 // see https://web.dev/articles/stale-while-revalidate for details
379
380 // http.ServeContent handles conditional requests and range requests
381 http.ServeContent(w, r, entryPath, mtime, reader)
382 }
383 return nil
384}
385
386func putPage(w http.ResponseWriter, r *http.Request) error {
387 var result UpdateResult
388
389 host, err := GetHost(r)
390 if err != nil {
391 return err
392 }
393
394 projectName, err := GetProjectName(r)
395 if err != nil {
396 return err
397 }
398
399 webRoot := makeWebRoot(host, projectName)
400
401 updateCtx, cancel := context.WithTimeout(r.Context(), time.Duration(config.Limits.UpdateTimeout))
402 defer cancel()
403
404 contentType := getMediaType(r.Header.Get("Content-Type"))
405
406 if contentType == "application/x-www-form-urlencoded" {
407 auth, err := AuthorizeUpdateFromRepository(r)
408 if err != nil {
409 return err
410 }
411
412 // URLs have no length limit, but 64K seems enough for a repository URL
413 requestBody, err := io.ReadAll(http.MaxBytesReader(w, r.Body, 65536))
414 if err != nil {
415 return fmt.Errorf("body read: %w", err)
416 }
417
418 repoURL := string(requestBody)
419 if err := AuthorizeRepository(repoURL, auth); err != nil {
420 return err
421 }
422
423 branch := "pages"
424 if customBranch := r.Header.Get("Branch"); customBranch != "" {
425 branch = customBranch
426 }
427 if err := AuthorizeBranch(branch, auth); err != nil {
428 return err
429 }
430
431 result = UpdateFromRepository(updateCtx, webRoot, repoURL, branch)
432 } else {
433 _, err := AuthorizeUpdateFromArchive(r)
434 if err != nil {
435 return err
436 }
437
438 // request body contains archive
439 reader := http.MaxBytesReader(w, r.Body, int64(config.Limits.MaxSiteSize.Bytes()))
440 result = UpdateFromArchive(updateCtx, webRoot, contentType, reader)
441 }
442
443 switch result.outcome {
444 case UpdateError:
445 if errors.Is(result.err, ErrManifestTooLarge) {
446 w.WriteHeader(http.StatusRequestEntityTooLarge)
447 } else if errors.Is(result.err, errArchiveFormat) {
448 w.WriteHeader(http.StatusUnsupportedMediaType)
449 } else if errors.Is(result.err, ErrArchiveTooLarge) {
450 w.WriteHeader(http.StatusRequestEntityTooLarge)
451 } else {
452 w.WriteHeader(http.StatusServiceUnavailable)
453 }
454 case UpdateTimeout:
455 w.WriteHeader(http.StatusGatewayTimeout)
456 case UpdateNoChange:
457 w.Header().Add("Update-Result", "no-change")
458 case UpdateCreated:
459 w.Header().Add("Update-Result", "created")
460 case UpdateReplaced:
461 w.Header().Add("Update-Result", "replaced")
462 case UpdateDeleted:
463 w.Header().Add("Update-Result", "deleted")
464 }
465 if result.manifest != nil {
466 if result.manifest.Commit != nil {
467 fmt.Fprintln(w, *result.manifest.Commit)
468 } else {
469 fmt.Fprintln(w, "(archive)")
470 }
471 for _, problem := range GetProblemReport(result.manifest) {
472 fmt.Fprintln(w, problem)
473 }
474 } else if result.err != nil {
475 fmt.Fprintln(w, result.err)
476 } else {
477 fmt.Fprintln(w, "internal error")
478 }
479 reportSiteUpdate("rest", &result)
480 return nil
481}
482
483func deletePage(w http.ResponseWriter, r *http.Request) error {
484 _, err := AuthorizeUpdateFromRepository(r)
485 if err != nil {
486 return err
487 }
488
489 host, err := GetHost(r)
490 if err != nil {
491 return err
492 }
493
494 projectName, err := GetProjectName(r)
495 if err != nil {
496 return err
497 }
498
499 err = backend.DeleteManifest(r.Context(), makeWebRoot(host, projectName))
500 if err != nil {
501 w.WriteHeader(http.StatusInternalServerError)
502 } else {
503 w.Header().Add("Update-Result", "deleted")
504 w.WriteHeader(http.StatusOK)
505 }
506 if err != nil {
507 fmt.Fprintln(w, err)
508 }
509 return err
510}
511
512func postPage(w http.ResponseWriter, r *http.Request) error {
513 // Start a timer for the request timeout immediately.
514 requestTimeout := 3 * time.Second
515 requestTimer := time.NewTimer(requestTimeout)
516
517 auth, err := AuthorizeUpdateFromRepository(r)
518 if err != nil {
519 return err
520 }
521
522 host, err := GetHost(r)
523 if err != nil {
524 return err
525 }
526
527 projectName, err := GetProjectName(r)
528 if err != nil {
529 return err
530 }
531
532 webRoot := makeWebRoot(host, projectName)
533
534 eventName := ""
535 for _, header := range []string{
536 "X-Forgejo-Event",
537 "X-GitHub-Event",
538 "X-Gitea-Event",
539 "X-Gogs-Event",
540 } {
541 eventName = r.Header.Get(header)
542 if eventName != "" {
543 break
544 }
545 }
546
547 if eventName == "" {
548 http.Error(w,
549 "expected a Forgejo, GitHub, Gitea, or Gogs webhook request", http.StatusBadRequest)
550 return fmt.Errorf("event expected")
551 }
552
553 if eventName != "push" {
554 http.Error(w, "only push events are allowed", http.StatusBadRequest)
555 return fmt.Errorf("invalid event")
556 }
557
558 if r.Header.Get("Content-Type") != "application/json" {
559 http.Error(w, "only JSON payload is allowed", http.StatusBadRequest)
560 return fmt.Errorf("invalid content type")
561 }
562
563 // Event payloads have no length limit, but events bigger than 16M seem excessive.
564 requestBody, err := io.ReadAll(http.MaxBytesReader(w, r.Body, 16*1048576))
565 if err != nil {
566 return fmt.Errorf("body read: %w", err)
567 }
568
569 var event struct {
570 Ref string `json:"ref"`
571 Repository struct {
572 CloneURL string `json:"clone_url"`
573 } `json:"repository"`
574 }
575 err = json.NewDecoder(bytes.NewReader(requestBody)).Decode(&event)
576 if err != nil {
577 http.Error(w, fmt.Sprintf("invalid request body: %s", err), http.StatusBadRequest)
578 return err
579 }
580
581 if event.Ref != fmt.Sprintf("refs/heads/%s", auth.branch) {
582 code := http.StatusUnauthorized
583 if strings.Contains(r.Header.Get("User-Agent"), "GitHub-Hookshot") {
584 // GitHub has no way to restrict branches for a webhook, and responding with 401
585 // for every non-pages branch makes the "Recent Deliveries" tab look awful.
586 code = http.StatusOK
587 }
588 http.Error(w,
589 fmt.Sprintf("ref %s not in allowlist [refs/heads/%v]", event.Ref, auth.branch),
590 code)
591 return nil
592 }
593
594 repoURL := event.Repository.CloneURL
595 if err := AuthorizeRepository(repoURL, auth); err != nil {
596 return err
597 }
598
599 resultChan := make(chan UpdateResult)
600 go func(ctx context.Context) {
601 ctx, cancel := context.WithTimeout(ctx, time.Duration(config.Limits.UpdateTimeout))
602 defer cancel()
603
604 result := UpdateFromRepository(ctx, webRoot, repoURL, auth.branch)
605 resultChan <- result
606 reportSiteUpdate("webhook", &result)
607 }(context.Background())
608
609 var result UpdateResult
610 select {
611 case result = <-resultChan:
612 case <-requestTimer.C:
613 w.WriteHeader(http.StatusAccepted)
614 fmt.Fprintf(w, "updating (taking longer than %s)", requestTimeout)
615 return nil
616 }
617
618 switch result.outcome {
619 case UpdateError:
620 w.WriteHeader(http.StatusServiceUnavailable)
621 fmt.Fprintf(w, "update error: %s\n", result.err)
622 case UpdateTimeout:
623 w.WriteHeader(http.StatusGatewayTimeout)
624 fmt.Fprintln(w, "update timeout")
625 case UpdateNoChange:
626 w.WriteHeader(http.StatusOK)
627 fmt.Fprintln(w, "unchanged")
628 case UpdateCreated:
629 w.WriteHeader(http.StatusOK)
630 fmt.Fprintln(w, "created")
631 case UpdateReplaced:
632 w.WriteHeader(http.StatusOK)
633 fmt.Fprintln(w, "replaced")
634 case UpdateDeleted:
635 w.WriteHeader(http.StatusOK)
636 fmt.Fprintln(w, "deleted")
637 }
638 if result.manifest != nil {
639 report := GetProblemReport(result.manifest)
640 if len(report) > 0 {
641 fmt.Fprintln(w, "problems:")
642 }
643 for _, problem := range report {
644 fmt.Fprintf(w, "- %s\n", problem)
645 }
646 }
647 return nil
648}
649
650func ServePages(w http.ResponseWriter, r *http.Request) {
651 // We want upstream health checks to be done as closely to the normal flow as possible;
652 // any intentional deviation is an opportunity to miss an issue that will affect our
653 // visitors but not our health checks.
654 if r.Header.Get("Health-Check") == "" {
655 log.Println("pages:", r.Method, r.Host, r.URL, r.Header.Get("Content-Type"))
656 if region := os.Getenv("FLY_REGION"); region != "" {
657 machine_id := os.Getenv("FLY_MACHINE_ID")
658 w.Header().Add("Server", fmt.Sprintf("git-pages (fly.io; %s; %s)", region, machine_id))
659 ObserveData(r.Context(), "server.name", machine_id, "server.region", region)
660 } else if hostname, err := os.Hostname(); err == nil {
661 if region := os.Getenv("PAGES_REGION"); region != "" {
662 w.Header().Add("Server", fmt.Sprintf("git-pages (%s; %s)", region, hostname))
663 ObserveData(r.Context(), "server.name", hostname, "server.region", region)
664 } else {
665 w.Header().Add("Server", fmt.Sprintf("git-pages (%s)", hostname))
666 ObserveData(r.Context(), "server.name", hostname)
667 }
668 }
669 }
670 err := error(nil)
671 switch r.Method {
672 // REST API
673 case http.MethodHead, http.MethodGet:
674 err = getPage(w, r)
675 case http.MethodPut:
676 err = putPage(w, r)
677 case http.MethodDelete:
678 err = deletePage(w, r)
679 // webhook API
680 case http.MethodPost:
681 err = postPage(w, r)
682 default:
683 w.Header().Add("Allow", "HEAD, GET, PUT, DELETE, POST")
684 http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
685 err = fmt.Errorf("method %s not allowed", r.Method)
686 }
687 if err != nil {
688 var authErr AuthError
689 if errors.As(err, &authErr) {
690 http.Error(w, prettyErrMsg(err), authErr.code)
691 }
692 var tooLargeErr *http.MaxBytesError
693 if errors.As(err, &tooLargeErr) {
694 message := "request body too large"
695 http.Error(w, message, http.StatusRequestEntityTooLarge)
696 err = errors.New(message)
697 }
698 log.Println("pages err:", err)
699 }
700}