[mirror] Scalable static site server for Git forges (like GitHub Pages)
10
fork

Configure Feed

Select the types of activity you want to include in your feed.

Limit amount of data fetched from git repository.

Like limiting the size of an archive, it is a supplementary check meant
to limit resource consumption prior to the final check done in
`StoreManifest()`.

+20 -5
+18 -5
src/fetch.go
··· 23 23 "google.golang.org/protobuf/proto" 24 24 ) 25 25 26 + var ErrRepositoryTooLarge = errors.New("repository too large") 27 + 26 28 func FetchRepository( 27 29 ctx context.Context, repoURL string, branch string, oldManifest *Manifest, 28 30 ) ( ··· 152 154 // This will only succeed if a `blob:none` filter isn't supported and we got a full 153 155 // clone despite asking for a partial clone. 154 156 for hash, manifestEntry := range blobsNeeded { 155 - if err := readGitBlob(repo, hash, manifestEntry); err == nil { 156 - dataBytesTransferred += manifestEntry.GetOriginalSize() 157 + if err := readGitBlob(repo, hash, manifestEntry, &dataBytesTransferred); err == nil { 157 158 delete(blobsNeeded, hash) 159 + } else if errors.Is(err, ErrRepositoryTooLarge) { 160 + return nil, err 158 161 } 159 162 } 160 163 ··· 193 196 194 197 // All remaining blobs should now be available. 195 198 for hash, manifestEntry := range blobsNeeded { 196 - if err := readGitBlob(repo, hash, manifestEntry); err != nil { 199 + if err := readGitBlob(repo, hash, manifestEntry, &dataBytesTransferred); err != nil { 197 200 return nil, err 198 201 } 199 - dataBytesTransferred += manifestEntry.GetOriginalSize() 200 202 delete(blobsNeeded, hash) 201 203 } 202 204 } ··· 210 212 return manifest, nil 211 213 } 212 214 213 - func readGitBlob(repo *git.Repository, hash plumbing.Hash, entry *Entry) error { 215 + func readGitBlob( 216 + repo *git.Repository, hash plumbing.Hash, entry *Entry, bytesTransferred *int64, 217 + ) error { 214 218 blob, err := repo.BlobObject(hash) 215 219 if err != nil { 216 220 return fmt.Errorf("git blob %s: %w", hash, err) ··· 239 243 entry.Transform = Transform_Identity.Enum() 240 244 entry.OriginalSize = proto.Int64(blob.Size) 241 245 entry.CompressedSize = proto.Int64(blob.Size) 246 + 247 + *bytesTransferred += blob.Size 248 + if uint64(*bytesTransferred) > config.Limits.MaxSiteSize.Bytes() { 249 + return fmt.Errorf("%w: fetch exceeds %s limit", 250 + ErrRepositoryTooLarge, 251 + config.Limits.MaxSiteSize.HR(), 252 + ) 253 + } 254 + 242 255 return nil 243 256 }
+2
src/pages.go
··· 607 607 w.WriteHeader(http.StatusUnsupportedMediaType) 608 608 } else if errors.Is(result.err, ErrArchiveTooLarge) { 609 609 w.WriteHeader(http.StatusRequestEntityTooLarge) 610 + } else if errors.Is(result.err, ErrRepositoryTooLarge) { 611 + w.WriteHeader(http.StatusUnprocessableEntity) 610 612 } else if errors.Is(result.err, ErrMalformedPatch) { 611 613 w.WriteHeader(http.StatusUnprocessableEntity) 612 614 } else if errors.Is(result.err, ErrPreconditionFailed) {