loading up the forgejo repo on tangled to test page performance
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

Enable more `revive` linter rules (#30608)

Noteable additions:

- `redefines-builtin-id` forbid variable names that shadow go builtins
- `empty-lines` remove unnecessary empty lines that `gofumpt` does not
remove for some reason
- `superfluous-else` eliminate more superfluous `else` branches

Rules are also sorted alphabetically and I cleaned up various parts of
`.golangci.yml`.

(cherry picked from commit 74f0c84fa4245a20ce6fb87dac1faf2aeeded2a2)

Conflicts:
.golangci.yml
apply the linter recommendations to Forgejo code as well

authored by

silverwind and committed by
Earl Warren
12b199c5 31b608a1

+130 -193
+36 -23
.golangci.yml
··· 1 1 linters: 2 + enable-all: false 3 + disable-all: true 4 + fast: false 2 5 enable: 3 6 - bidichk 4 - # - deadcode # deprecated - https://github.com/golangci/golangci-lint/issues/1841 5 7 - depguard 6 8 - dupl 7 9 - errcheck 8 10 - forbidigo 9 11 - gocritic 10 - # - gocyclo # The cyclomatic complexety of a lot of functions is too high, we should refactor those another time. 11 12 - gofmt 12 13 - gofumpt 13 14 - gosimple ··· 17 18 - nolintlint 18 19 - revive 19 20 - staticcheck 20 - # - structcheck # deprecated - https://github.com/golangci/golangci-lint/issues/1841 21 21 - stylecheck 22 22 - typecheck 23 23 - unconvert 24 24 - unused 25 - # - varcheck # deprecated - https://github.com/golangci/golangci-lint/issues/1841 26 25 - wastedassign 27 - enable-all: false 28 - disable-all: true 29 - fast: false 30 26 31 27 run: 32 28 timeout: 10m ··· 34 30 - node_modules 35 31 - public 36 32 - web_src 33 + 34 + output: 35 + sort-results: true 37 36 38 37 linters-settings: 39 38 stylecheck: ··· 51 50 errorCode: 1 52 51 warningCode: 1 53 52 rules: 53 + - name: atomic 54 + - name: bare-return 54 55 - name: blank-imports 56 + - name: constant-logical-expr 55 57 - name: context-as-argument 56 58 - name: context-keys-type 57 59 - name: dot-imports 60 + - name: duplicated-imports 61 + - name: empty-lines 62 + - name: error-naming 58 63 - name: error-return 59 64 - name: error-strings 60 - - name: error-naming 65 + - name: errorf 61 66 - name: exported 67 + - name: identical-branches 62 68 - name: if-return 63 69 - name: increment-decrement 64 - - name: var-naming 65 - - name: var-declaration 70 + - name: indent-error-flow 71 + - name: modifies-value-receiver 66 72 - name: package-comments 67 73 - name: range 68 74 - name: receiver-naming 75 + - name: redefines-builtin-id 76 + - name: string-of-int 77 + - name: superfluous-else 69 78 - name: time-naming 79 + - name: unconditional-recursion 70 80 - name: unexported-return 71 - - name: indent-error-flow 72 - - name: errorf 73 - - name: duplicated-imports 74 - - name: modifies-value-receiver 81 + - name: unreachable-code 82 + - name: var-declaration 83 + - name: var-naming 75 84 gofumpt: 76 85 extra-rules: true 77 86 depguard: ··· 96 105 issues: 97 106 max-issues-per-linter: 0 98 107 max-same-issues: 0 108 + exclude-dirs: [node_modules, public, web_src] 109 + exclude-case-sensitive: true 99 110 exclude-rules: 100 - # Exclude some linters from running on tests files. 111 + - path: models/db/sql_postgres_with_schema.go 112 + linters: 113 + - nolintlint 101 114 - path: _test\.go 102 115 linters: 103 116 - gocyclo ··· 115 128 - path: cmd 116 129 linters: 117 130 - forbidigo 118 - - linters: 131 + - text: "webhook" 132 + linters: 119 133 - dupl 120 - text: "webhook" 121 - - linters: 134 + - text: "`ID' should not be capitalized" 135 + linters: 122 136 - gocritic 123 - text: "`ID' should not be capitalized" 124 - - linters: 137 + - text: "swagger" 138 + linters: 125 139 - unused 126 140 - deadcode 127 - text: "swagger" 128 - - linters: 141 + - text: "argument x is overwritten before first use" 142 + linters: 129 143 - staticcheck 130 - text: "argument x is overwritten before first use" 131 144 - text: "commentFormatting: put a space between `//` and comment text" 132 145 linters: 133 146 - gocritic
+1 -1
cmd/hook.go
··· 482 482 fmt.Fprintf(os.Stderr, " %s\n", res.URL) 483 483 } 484 484 fmt.Fprintln(os.Stderr, "") 485 - os.Stderr.Sync() 485 + _ = os.Stderr.Sync() 486 486 } 487 487 } 488 488
-1
models/asymkey/gpg_key_object_verification.go
··· 94 94 Reason: "gpg.error.no_committer_account", 95 95 } 96 96 } 97 - 98 97 } 99 98 } 100 99
-1
models/db/engine.go
··· 236 236 // Need to map provided names to beans... 237 237 beanMap := make(map[string]any) 238 238 for _, bean := range tables { 239 - 240 239 beanMap[strings.ToLower(reflect.Indirect(reflect.ValueOf(bean)).Type().Name())] = bean 241 240 beanMap[strings.ToLower(x.TableName(bean))] = bean 242 241 beanMap[strings.ToLower(x.TableName(bean, true))] = bean
-2
models/issues/review.go
··· 345 345 return nil, err 346 346 } 347 347 } 348 - 349 348 } else if opts.ReviewerTeam != nil { 350 349 review.Type = ReviewTypeRequest 351 350 review.ReviewerTeamID = opts.ReviewerTeam.ID 352 - 353 351 } else { 354 352 return nil, fmt.Errorf("provide either reviewer or reviewer team") 355 353 }
-2
models/migrations/base/db.go
··· 214 214 return err 215 215 } 216 216 sequenceMap[sequence] = sequenceData 217 - 218 217 } 219 218 220 219 // CASCADE causes postgres to drop all the constraints on the old table ··· 279 278 return err 280 279 } 281 280 } 282 - 283 281 } 284 282 285 283 default:
-2
models/migrations/v1_11/v111.go
··· 263 263 for _, u := range units { 264 264 var found bool 265 265 for _, team := range teams { 266 - 267 266 var teamU []*TeamUnit 268 267 var unitEnabled bool 269 268 err = sess.Where("team_id = ?", team.ID).Find(&teamU) ··· 332 331 } 333 332 334 333 if !protectedBranch.EnableApprovalsWhitelist { 335 - 336 334 perm, err := getUserRepoPermission(sess, baseRepo, reviewer) 337 335 if err != nil { 338 336 return false, err
+2 -2
models/migrations/v1_20/v250.go
··· 104 104 105 105 // Convert to new metadata format 106 106 107 - new := &MetadataNew{ 107 + newMetadata := &MetadataNew{ 108 108 Type: old.Type, 109 109 IsTagged: old.IsTagged, 110 110 Platform: old.Platform, ··· 119 119 Manifests: manifests, 120 120 } 121 121 122 - metadataJSON, err := json.Marshal(new) 122 + metadataJSON, err := json.Marshal(newMetadata) 123 123 if err != nil { 124 124 return err 125 125 }
-1
models/migrations/v1_6/v71.go
··· 61 61 if _, err := sess.ID(tfa.ID).Cols("scratch_salt, scratch_hash").Update(tfa); err != nil { 62 62 return fmt.Errorf("couldn't add in scratch_hash and scratch_salt: %w", err) 63 63 } 64 - 65 64 } 66 65 } 67 66
-1
models/migrations/v1_9/v85.go
··· 81 81 if _, err := sess.ID(token.ID).Cols("token_hash, token_salt, token_last_eight, sha1").Update(token); err != nil { 82 82 return fmt.Errorf("couldn't add in sha1, token_hash, token_salt and token_last_eight: %w", err) 83 83 } 84 - 85 84 } 86 85 } 87 86
+1 -2
models/organization/team.go
··· 222 222 if err != nil { 223 223 if ignoreNonExistent { 224 224 continue 225 - } else { 226 - return nil, err 227 225 } 226 + return nil, err 228 227 } 229 228 ids = append(ids, u.ID) 230 229 }
-2
models/project/board.go
··· 110 110 var items []string 111 111 112 112 switch project.BoardType { 113 - 114 113 case BoardTypeBugTriage: 115 114 items = setting.Project.ProjectBoardBugTriageType 116 115 117 116 case BoardTypeBasicKanban: 118 117 items = setting.Project.ProjectBoardBasicKanbanType 119 - 120 118 case BoardTypeNone: 121 119 fallthrough 122 120 default:
-1
models/repo/user_repo.go
··· 135 135 // the owner of a private repo needs to be explicitly added. 136 136 cond = cond.Or(builder.Eq{"`user`.id": repo.Owner.ID}) 137 137 } 138 - 139 138 } else { 140 139 // This is a "public" repository: 141 140 // Any user that has read access, is a watcher or organization member can be requested to review
+1 -2
models/user/user.go
··· 1007 1007 if err != nil { 1008 1008 if ignoreNonExistent { 1009 1009 continue 1010 - } else { 1011 - return nil, err 1012 1010 } 1011 + return nil, err 1013 1012 } 1014 1013 ids = append(ids, u.ID) 1015 1014 }
+6 -6
modules/auth/password/password.go
··· 63 63 func setupComplexity(values []string) { 64 64 if len(values) != 1 || values[0] != "off" { 65 65 for _, val := range values { 66 - if complex, ok := charComplexities[val]; ok { 67 - validChars += complex.ValidChars 68 - requiredList = append(requiredList, complex) 66 + if complexity, ok := charComplexities[val]; ok { 67 + validChars += complexity.ValidChars 68 + requiredList = append(requiredList, complexity) 69 69 } 70 70 } 71 71 if len(requiredList) == 0 { 72 72 // No valid character classes found; use all classes as default 73 - for _, complex := range charComplexities { 74 - validChars += complex.ValidChars 75 - requiredList = append(requiredList, complex) 73 + for _, complexity := range charComplexities { 74 + validChars += complexity.ValidChars 75 + requiredList = append(requiredList, complexity) 76 76 } 77 77 } 78 78 }
+3 -3
modules/git/batch_reader.go
··· 307 307 308 308 // Deal with the binary hash 309 309 idx = 0 310 - len := objectFormat.FullLength() / 2 311 - for idx < len { 310 + length := objectFormat.FullLength() / 2 311 + for idx < length { 312 312 var read int 313 - read, err = rd.Read(shaBuf[idx:len]) 313 + read, err = rd.Read(shaBuf[idx:length]) 314 314 n += read 315 315 if err != nil { 316 316 return mode, fname, sha, n, err
+1 -2
modules/git/commit_reader.go
··· 49 49 if len(line) > 0 && line[0] == ' ' { 50 50 _, _ = signatureSB.Write(line[1:]) 51 51 continue 52 - } else { 53 - pgpsig = false 54 52 } 53 + pgpsig = false 55 54 } 56 55 57 56 if !message {
-1
modules/git/pipeline/lfs_nogogit.go
··· 213 213 errChan <- err 214 214 break 215 215 } 216 - 217 216 } 218 217 }() 219 218
+4 -4
modules/git/repo_commit.go
··· 251 251 return nil, err 252 252 } 253 253 254 - len := objectFormat.FullLength() 254 + length := objectFormat.FullLength() 255 255 commits := []*Commit{} 256 - shaline := make([]byte, len+1) 256 + shaline := make([]byte, length+1) 257 257 for { 258 258 n, err := io.ReadFull(stdoutReader, shaline) 259 - if err != nil || n < len { 259 + if err != nil || n < length { 260 260 if err == io.EOF { 261 261 err = nil 262 262 } 263 263 return commits, err 264 264 } 265 - objectID, err := NewIDFromString(string(shaline[0:len])) 265 + objectID, err := NewIDFromString(string(shaline[0:length])) 266 266 if err != nil { 267 267 return nil, err 268 268 }
-1
modules/git/submodule.go
··· 64 64 // ex: git@try.gitea.io:go-gitea/gitea 65 65 match := scpSyntax.FindAllStringSubmatch(refURI, -1) 66 66 if len(match) > 0 { 67 - 68 67 m := match[0] 69 68 refHostname := m[2] 70 69 pth := m[3]
-2
modules/indexer/code/bleve/bleve.go
··· 193 193 func (b *Indexer) Index(ctx context.Context, repo *repo_model.Repository, sha string, changes *internal.RepoChanges) error { 194 194 batch := inner_bleve.NewFlushingBatch(b.inner.Indexer, maxBatchSize) 195 195 if len(changes.Updates) > 0 { 196 - 197 196 // Now because of some insanity with git cat-file not immediately failing if not run in a valid git directory we need to run git rev-parse first! 198 197 if err := git.EnsureValidGitRepository(ctx, repo.RepoPath()); err != nil { 199 198 log.Error("Unable to open git repo: %s for %-v: %v", repo.RepoPath(), repo, err) ··· 337 336 if result, err = b.inner.Indexer.Search(facetRequest); err != nil { 338 337 return 0, nil, nil, err 339 338 } 340 - 341 339 } 342 340 languagesFacet := result.Facets["languages"] 343 341 for _, term := range languagesFacet.Terms.Terms() {
-1
modules/indexer/issues/elasticsearch/elasticsearch.go
··· 145 145 query := elastic.NewBoolQuery() 146 146 147 147 if options.Keyword != "" { 148 - 149 148 searchType := esMultiMatchTypePhrasePrefix 150 149 if options.IsFuzzyKeyword { 151 150 searchType = esMultiMatchTypeBestFields
-1
modules/log/event_format.go
··· 125 125 if mode.Colorize { 126 126 buf = append(buf, resetBytes...) 127 127 } 128 - 129 128 } 130 129 if flags&(Lshortfile|Llongfile) != 0 { 131 130 if mode.Colorize {
-2
modules/markup/markdown/markdown_test.go
··· 460 460 res, err := markdown.RenderString(&markup.RenderContext{Ctx: git.DefaultContext}, test.testcase) 461 461 assert.NoError(t, err, "Unexpected error in testcase: %q", test.testcase) 462 462 assert.Equal(t, template.HTML(test.expected), res, "Unexpected result in testcase %q", test.testcase) 463 - 464 463 } 465 464 466 465 negativeTests := []string{ ··· 555 554 res, err := markdown.RenderString(&markup.RenderContext{Ctx: git.DefaultContext}, test.testcase) 556 555 assert.NoError(t, err, "Unexpected error in testcase: %q", test.testcase) 557 556 assert.Equal(t, template.HTML(test.expected), res, "Unexpected result in testcase %q", test.testcase) 558 - 559 557 } 560 558 } 561 559
-1
modules/markup/orgmode/orgmode.go
··· 147 147 } 148 148 if len(link) > 0 && !markup.IsLinkStr(link) && 149 149 link[0] != '#' && !strings.HasPrefix(link, mailto) { 150 - 151 150 var base string 152 151 if r.Ctx.IsWiki { 153 152 base = r.Ctx.Links.WikiLink()
+16 -16
modules/packages/rubygems/marshal.go
··· 147 147 return e.w.WriteByte(byte(i - 5)) 148 148 } 149 149 150 - var len int 150 + var length int 151 151 if 122 < i && i <= 0xff { 152 - len = 1 152 + length = 1 153 153 } else if 0xff < i && i <= 0xffff { 154 - len = 2 154 + length = 2 155 155 } else if 0xffff < i && i <= 0xffffff { 156 - len = 3 156 + length = 3 157 157 } else if 0xffffff < i && i <= 0x3fffffff { 158 - len = 4 158 + length = 4 159 159 } else if -0x100 <= i && i < -123 { 160 - len = -1 160 + length = -1 161 161 } else if -0x10000 <= i && i < -0x100 { 162 - len = -2 162 + length = -2 163 163 } else if -0x1000000 <= i && i < -0x100000 { 164 - len = -3 164 + length = -3 165 165 } else if -0x40000000 <= i && i < -0x1000000 { 166 - len = -4 166 + length = -4 167 167 } else { 168 168 return ErrInvalidIntRange 169 169 } 170 170 171 - if err := e.w.WriteByte(byte(len)); err != nil { 171 + if err := e.w.WriteByte(byte(length)); err != nil { 172 172 return err 173 173 } 174 - if len < 0 { 175 - len = -len 174 + if length < 0 { 175 + length = -length 176 176 } 177 177 178 - for c := 0; c < len; c++ { 178 + for c := 0; c < length; c++ { 179 179 if err := e.w.WriteByte(byte(i >> uint(8*c) & 0xff)); err != nil { 180 180 return err 181 181 } ··· 244 244 return err 245 245 } 246 246 247 - len := arr.Len() 247 + length := arr.Len() 248 248 249 - if err := e.marshalIntInternal(int64(len)); err != nil { 249 + if err := e.marshalIntInternal(int64(length)); err != nil { 250 250 return err 251 251 } 252 252 253 - for i := 0; i < len; i++ { 253 + for i := 0; i < length; i++ { 254 254 if err := e.marshal(arr.Index(i).Interface()); err != nil { 255 255 return err 256 256 }
-1
modules/process/manager_stacktraces.go
··· 339 339 } 340 340 sort.Slice(processes, after(processes)) 341 341 if !flat { 342 - 343 342 var sortChildren func(process *Process) 344 343 345 344 sortChildren = func(process *Process) {
-1
modules/repository/temp.go
··· 32 32 if err != nil { 33 33 log.Error("Unable to create temporary directory: %s-*.git (%v)", prefix, err) 34 34 return "", fmt.Errorf("Failed to create dir %s-*.git: %w", prefix, err) 35 - 36 35 } 37 36 return basePath, nil 38 37 }
+1 -2
modules/setting/time.go
··· 19 19 DefaultUILocation, err = time.LoadLocation(zone) 20 20 if err != nil { 21 21 log.Fatal("Load time zone failed: %v", err) 22 - } else { 23 - log.Info("Default UI Location is %v", zone) 24 22 } 23 + log.Info("Default UI Location is %v", zone) 25 24 } 26 25 if DefaultUILocation == nil { 27 26 DefaultUILocation = time.Local
+2 -3
modules/templates/htmlrenderer.go
··· 138 138 if setting.IsProd { 139 139 // in prod mode, Forgejo must have correct templates to run 140 140 log.Fatal("Forgejo can't run with template errors: %s", msg) 141 - } else { 142 - // in dev mode, do not need to really exit, because the template errors could be fixed by developer soon and the templates get reloaded 143 - log.Error("There are template errors but Forgejo continues to run in dev mode: %s", msg) 144 141 } 142 + // in dev mode, do not need to really exit, because the template errors could be fixed by developer soon and the templates get reloaded 143 + log.Error("There are template errors but Forgejo continues to run in dev mode: %s", msg) 145 144 } 146 145 147 146 type templateErrorPrettier struct {
+1 -2
modules/templates/mailer.go
··· 84 84 if err = buildSubjectBodyTemplate(subjectTemplates, bodyTemplates, tmplName, content); err != nil { 85 85 if firstRun { 86 86 log.Fatal("Failed to parse mail template, err: %v", err) 87 - } else { 88 - log.Error("Failed to parse mail template, err: %v", err) 89 87 } 88 + log.Error("Failed to parse mail template, err: %v", err) 90 89 } 91 90 } 92 91 }
+3 -3
modules/util/util_test.go
··· 121 121 } 122 122 123 123 func Test_RandomInt(t *testing.T) { 124 - int, err := CryptoRandomInt(255) 125 - assert.True(t, int >= 0) 126 - assert.True(t, int <= 255) 124 + randInt, err := CryptoRandomInt(255) 125 + assert.True(t, randInt >= 0) 126 + assert.True(t, randInt <= 255) 127 127 assert.NoError(t, err) 128 128 } 129 129
-1
routers/api/actions/artifacts.go
··· 144 144 145 145 var task *actions.ActionTask 146 146 if err == nil { 147 - 148 147 task, err = actions.GetTaskByID(req.Context(), tID) 149 148 if err != nil { 150 149 log.Error("Error runner api getting task by ID: %v", err)
+2 -2
routers/api/packages/alpine/alpine.go
··· 144 144 return 145 145 } 146 146 147 - upload, close, err := ctx.UploadStream() 147 + upload, needToClose, err := ctx.UploadStream() 148 148 if err != nil { 149 149 apiError(ctx, http.StatusInternalServerError, err) 150 150 return 151 151 } 152 - if close { 152 + if needToClose { 153 153 defer upload.Close() 154 154 } 155 155
+2 -2
routers/api/packages/conan/conan.go
··· 310 310 return 311 311 } 312 312 313 - upload, close, err := ctx.UploadStream() 313 + upload, needToClose, err := ctx.UploadStream() 314 314 if err != nil { 315 315 apiError(ctx, http.StatusBadRequest, err) 316 316 return 317 317 } 318 - if close { 318 + if needToClose { 319 319 defer upload.Close() 320 320 } 321 321
+2 -2
routers/api/packages/conda/conda.go
··· 174 174 } 175 175 176 176 func UploadPackageFile(ctx *context.Context) { 177 - upload, close, err := ctx.UploadStream() 177 + upload, needToClose, err := ctx.UploadStream() 178 178 if err != nil { 179 179 apiError(ctx, http.StatusInternalServerError, err) 180 180 return 181 181 } 182 - if close { 182 + if needToClose { 183 183 defer upload.Close() 184 184 } 185 185
+3 -3
routers/api/packages/container/container.go
··· 385 385 } 386 386 return 387 387 } 388 - close := true 388 + doClose := true 389 389 defer func() { 390 - if close { 390 + if doClose { 391 391 uploader.Close() 392 392 } 393 393 }() ··· 427 427 apiError(ctx, http.StatusInternalServerError, err) 428 428 return 429 429 } 430 - close = false 430 + doClose = false 431 431 432 432 if err := container_service.RemoveBlobUploadByID(ctx, uploader.ID); err != nil { 433 433 apiError(ctx, http.StatusInternalServerError, err)
+2 -2
routers/api/packages/cran/cran.go
··· 151 151 } 152 152 153 153 func uploadPackageFile(ctx *context.Context, compositeKey string, properties map[string]string) { 154 - upload, close, err := ctx.UploadStream() 154 + upload, needToClose, err := ctx.UploadStream() 155 155 if err != nil { 156 156 apiError(ctx, http.StatusBadRequest, err) 157 157 return 158 158 } 159 - if close { 159 + if needToClose { 160 160 defer upload.Close() 161 161 } 162 162
+2 -2
routers/api/packages/debian/debian.go
··· 127 127 return 128 128 } 129 129 130 - upload, close, err := ctx.UploadStream() 130 + upload, needToClose, err := ctx.UploadStream() 131 131 if err != nil { 132 132 apiError(ctx, http.StatusInternalServerError, err) 133 133 return 134 134 } 135 - if close { 135 + if needToClose { 136 136 defer upload.Close() 137 137 } 138 138
+2 -2
routers/api/packages/generic/generic.go
··· 90 90 return 91 91 } 92 92 93 - upload, close, err := ctx.UploadStream() 93 + upload, needToClose, err := ctx.UploadStream() 94 94 if err != nil { 95 95 apiError(ctx, http.StatusInternalServerError, err) 96 96 return 97 97 } 98 - if close { 98 + if needToClose { 99 99 defer upload.Close() 100 100 } 101 101
+2 -2
routers/api/packages/goproxy/goproxy.go
··· 154 154 } 155 155 156 156 func UploadPackage(ctx *context.Context) { 157 - upload, close, err := ctx.UploadStream() 157 + upload, needToClose, err := ctx.UploadStream() 158 158 if err != nil { 159 159 apiError(ctx, http.StatusInternalServerError, err) 160 160 return 161 161 } 162 - if close { 162 + if needToClose { 163 163 defer upload.Close() 164 164 } 165 165
+2 -2
routers/api/packages/nuget/nuget.go
··· 594 594 func processUploadedFile(ctx *context.Context, expectedType nuget_module.PackageType) (*nuget_module.Package, *packages_module.HashedBuffer, []io.Closer) { 595 595 closables := make([]io.Closer, 0, 2) 596 596 597 - upload, close, err := ctx.UploadStream() 597 + upload, needToClose, err := ctx.UploadStream() 598 598 if err != nil { 599 599 apiError(ctx, http.StatusBadRequest, err) 600 600 return nil, nil, closables 601 601 } 602 602 603 - if close { 603 + if needToClose { 604 604 closables = append(closables, upload) 605 605 } 606 606
+2 -2
routers/api/packages/rpm/rpm.go
··· 117 117 } 118 118 119 119 func UploadPackageFile(ctx *context.Context) { 120 - upload, close, err := ctx.UploadStream() 120 + upload, needToClose, err := ctx.UploadStream() 121 121 if err != nil { 122 122 apiError(ctx, http.StatusInternalServerError, err) 123 123 return 124 124 } 125 - if close { 125 + if needToClose { 126 126 defer upload.Close() 127 127 } 128 128
+2 -2
routers/api/packages/rubygems/rubygems.go
··· 197 197 198 198 // UploadPackageFile adds a file to the package. If the package does not exist, it gets created. 199 199 func UploadPackageFile(ctx *context.Context) { 200 - upload, close, err := ctx.UploadStream() 200 + upload, needToClose, err := ctx.UploadStream() 201 201 if err != nil { 202 202 apiError(ctx, http.StatusBadRequest, err) 203 203 return 204 204 } 205 - if close { 205 + if needToClose { 206 206 defer upload.Close() 207 207 } 208 208
-1
routers/api/v1/repo/issue.go
··· 217 217 218 218 var includedAnyLabels []int64 219 219 { 220 - 221 220 labels := ctx.FormTrim("labels") 222 221 var includedLabelNames []string 223 222 if len(labels) > 0 {
-1
routers/api/v1/repo/mirror.go
··· 180 180 if err == nil { 181 181 responsePushMirrors = append(responsePushMirrors, m) 182 182 } 183 - 184 183 } 185 184 ctx.SetLinkHeader(len(responsePushMirrors), utils.GetListOptions(ctx).PageSize) 186 185 ctx.SetTotalCountHeader(count)
-2
routers/api/v1/repo/pull.go
··· 1058 1058 isSameRepo = true 1059 1059 headUser = ctx.Repo.Owner 1060 1060 headBranch = headInfos[0] 1061 - 1062 1061 } else if len(headInfos) == 2 { 1063 1062 headUser, err = user_model.GetUserByName(ctx, headInfos[0]) 1064 1063 if err != nil { ··· 1072 1071 headBranch = headInfos[1] 1073 1072 // The head repository can also point to the same repo 1074 1073 isSameRepo = ctx.Repo.Owner.ID == headUser.ID 1075 - 1076 1074 } else { 1077 1075 ctx.NotFound() 1078 1076 return nil, nil, nil, nil, "", ""
-1
routers/api/v1/repo/pull_review.go
··· 875 875 } 876 876 877 877 if ctx.Repo.Repository.Owner.IsOrganization() && len(opts.TeamReviewers) > 0 { 878 - 879 878 teamReviewers := make([]*organization.Team, 0, len(opts.TeamReviewers)) 880 879 for _, t := range opts.TeamReviewers { 881 880 var teamReviewer *organization.Team
-1
routers/api/v1/repo/repo.go
··· 1094 1094 1095 1095 // update MirrorInterval 1096 1096 if opts.MirrorInterval != nil { 1097 - 1098 1097 // MirrorInterval should be a duration 1099 1098 interval, err := time.ParseDuration(*opts.MirrorInterval) 1100 1099 if err != nil {
-1
routers/api/v1/repo/wiki.go
··· 478 478 func findWikiRepoCommit(ctx *context.APIContext) (*git.Repository, *git.Commit) { 479 479 wikiRepo, err := gitrepo.OpenWikiRepository(ctx, ctx.Repo.Repository) 480 480 if err != nil { 481 - 482 481 if git.IsErrNotExist(err) || err.Error() == "no such file or directory" { 483 482 ctx.NotFound(err) 484 483 } else {
-1
routers/private/hook_pre_receive.go
··· 259 259 UserMsg: fmt.Sprintf("branch %s is protected from force push", branchName), 260 260 }) 261 261 return 262 - 263 262 } 264 263 } 265 264
-1
routers/web/repo/actions/view.go
··· 689 689 writer := zip.NewWriter(ctx.Resp) 690 690 defer writer.Close() 691 691 for _, art := range artifacts { 692 - 693 692 f, err := storage.ActionsArtifacts.Open(art.StoragePath) 694 693 if err != nil { 695 694 ctx.Error(http.StatusInternalServerError, err.Error())
-4
routers/web/repo/issue.go
··· 927 927 } 928 928 } 929 929 } 930 - 931 930 } 932 931 933 932 if template.Ref != "" && !strings.HasPrefix(template.Ref, "refs/") { // Assume that the ref intended is always a branch - for tags users should use refs/tags/<ref> ··· 1680 1679 if comment.ProjectID > 0 && comment.Project == nil { 1681 1680 comment.Project = ghostProject 1682 1681 } 1683 - 1684 1682 } else if comment.Type == issues_model.CommentTypeAssignees || comment.Type == issues_model.CommentTypeReviewRequest { 1685 1683 if err = comment.LoadAssigneeUserAndTeam(ctx); err != nil { 1686 1684 ctx.ServerError("LoadAssigneeUserAndTeam", err) ··· 2605 2603 2606 2604 var includedAnyLabels []int64 2607 2605 { 2608 - 2609 2606 labels := ctx.FormTrim("labels") 2610 2607 var includedLabelNames []string 2611 2608 if len(labels) > 0 { ··· 2993 2990 if (ctx.Repo.CanWriteIssuesOrPulls(issue.IsPull) || (ctx.IsSigned && issue.IsPoster(ctx.Doer.ID))) && 2994 2991 (form.Status == "reopen" || form.Status == "close") && 2995 2992 !(issue.IsPull && issue.PullRequest.HasMerged) { 2996 - 2997 2993 // Duplication and conflict check should apply to reopen pull request. 2998 2994 var pr *issues_model.PullRequest 2999 2995
-3
routers/web/repo/pull.go
··· 670 670 } 671 671 672 672 if pb != nil && pb.EnableStatusCheck { 673 - 674 673 var missingRequiredChecks []string 675 674 for _, requiredContext := range pb.StatusCheckContexts { 676 675 contextFound := false ··· 873 872 874 873 // Validate the given commit sha to show (if any passed) 875 874 if willShowSpecifiedCommit || willShowSpecifiedCommitRange { 876 - 877 875 foundStartCommit := len(specifiedStartCommit) == 0 878 876 foundEndCommit := len(specifiedEndCommit) == 0 879 877 ··· 1185 1183 ctx.Flash.Error(flashError) 1186 1184 ctx.Redirect(issue.Link()) 1187 1185 return 1188 - 1189 1186 } 1190 1187 ctx.Flash.Error(err.Error()) 1191 1188 ctx.Redirect(issue.Link())
-1
routers/web/repo/pull_review.go
··· 302 302 303 303 updatedFiles := make(map[string]pull_model.ViewedState, len(data.Files)) 304 304 for file, viewed := range data.Files { 305 - 306 305 // Only unviewed and viewed are possible, has-changed can not be set from the outside 307 306 state := pull_model.Unviewed 308 307 if viewed {
-1
routers/web/repo/view.go
··· 352 352 // or of directory if not in root directory. 353 353 ctx.Data["LatestCommit"] = latestCommit 354 354 if latestCommit != nil { 355 - 356 355 verification := asymkey_model.ParseCommitWithSignature(ctx, latestCommit) 357 356 358 357 if err := asymkey_model.CalculateTrustStatus(verification, ctx.Repo.Repository.GetTrustModel(), func(user *user_model.User) (bool, error) {
-12
routers/web/webfinger.go
··· 102 102 default: 103 103 ctx.Error(http.StatusNotFound) 104 104 return 105 - 106 - } 107 - case 4: 108 - //nolint:gocritic 109 - if parts[3] == "teams" { 110 - ctx.Error(http.StatusNotFound) 111 - return 112 - 113 - } else { 114 - ctx.Error(http.StatusNotFound) 115 - return 116 105 } 117 106 118 107 default: 119 108 ctx.Error(http.StatusNotFound) 120 109 return 121 - 122 110 } 123 111 124 112 default:
+5 -3
services/actions/notifier_helper.go
··· 329 329 TriggerEvent: dwf.TriggerEvent.Name, 330 330 Status: actions_model.StatusWaiting, 331 331 } 332 - if need, err := ifNeedApproval(ctx, run, input.Repo, input.Doer); err != nil { 332 + 333 + need, err := ifNeedApproval(ctx, run, input.Repo, input.Doer) 334 + if err != nil { 333 335 log.Error("check if need approval for repo %d with user %d: %v", input.Repo.ID, input.Doer.ID, err) 334 336 continue 335 - } else { 336 - run.NeedApproval = need 337 337 } 338 + 339 + run.NeedApproval = need 338 340 339 341 if err := run.LoadAttributes(ctx); err != nil { 340 342 log.Error("LoadAttributes: %v", err)
-1
services/auth/source/ldap/source_sync.go
··· 159 159 !strings.EqualFold(usr.Email, su.Mail) || 160 160 usr.FullName != fullName || 161 161 !usr.IsActive { 162 - 163 162 log.Trace("SyncExternalUsers[%s]: Updating user %s", source.authSource.Name, usr.Name) 164 163 165 164 opts := &user_service.UpdateOptions{
-2
services/context/repo.go
··· 848 848 case RepoRefBranch: 849 849 ref := getRefNameFromPath(ctx, repo, path, repo.GitRepo.IsBranchExist) 850 850 if len(ref) == 0 { 851 - 852 851 // check if ref is HEAD 853 852 parts := strings.Split(path, "/") 854 853 if parts[0] == headRefName { ··· 991 990 return cancel 992 991 } 993 992 ctx.Repo.CommitID = ctx.Repo.Commit.ID.String() 994 - 995 993 } else if refType.RefTypeIncludesTags() && ctx.Repo.GitRepo.IsTagExist(refName) { 996 994 ctx.Repo.IsViewTag = true 997 995 ctx.Repo.TagName = refName
-1
services/doctor/packages_nuget.go
··· 51 51 logger.Info("Found %d versions for package %s", len(pvs), pkg.Name) 52 52 53 53 for _, pv := range pvs { 54 - 55 54 pfs, err := packages.GetFilesByVersionID(ctx, pv.ID) 56 55 if err != nil { 57 56 logger.Error("Failed to get files for package version %s %s: %v", pkg.Name, pv.Version, err)
+2 -2
services/gitdiff/gitdiff.go
··· 1040 1040 // diff --git a/b b/b b/b b/b b/b b/b 1041 1041 // 1042 1042 midpoint := (len(line) + len(cmdDiffHead) - 1) / 2 1043 - new, old := line[len(cmdDiffHead):midpoint], line[midpoint+1:] 1044 - if len(new) > 2 && len(old) > 2 && new[2:] == old[2:] { 1043 + newl, old := line[len(cmdDiffHead):midpoint], line[midpoint+1:] 1044 + if len(newl) > 2 && len(old) > 2 && newl[2:] == old[2:] { 1045 1045 curFile.OldName = old[2:] 1046 1046 curFile.Name = old[2:] 1047 1047 }
+4 -5
services/issue/commit.go
··· 117 117 var refIssue *issues_model.Issue 118 118 var err error 119 119 for _, ref := range references.FindAllIssueReferences(c.Message) { 120 - 121 120 // issue is from another repo 122 121 if len(ref.Owner) > 0 && len(ref.Name) > 0 { 123 122 refRepo, err = repo_model.GetRepositoryByOwnerAndName(ctx, ref.Owner, ref.Name) ··· 185 184 continue 186 185 } 187 186 } 188 - close := ref.Action == references.XRefActionCloses 189 - if close && len(ref.TimeLog) > 0 { 187 + isClosed := ref.Action == references.XRefActionCloses 188 + if isClosed && len(ref.TimeLog) > 0 { 190 189 if err := issueAddTime(ctx, refIssue, doer, c.Timestamp, ref.TimeLog); err != nil { 191 190 return err 192 191 } 193 192 } 194 - if close != refIssue.IsClosed { 193 + if isClosed != refIssue.IsClosed { 195 194 refIssue.Repo = refRepo 196 - if err := ChangeStatus(ctx, refIssue, doer, c.Sha1, close); err != nil { 195 + if err := ChangeStatus(ctx, refIssue, doer, c.Sha1, isClosed); err != nil { 197 196 return err 198 197 } 199 198 }
-1
services/migrations/gitea_downloader.go
··· 410 410 return nil, false, fmt.Errorf("error while listing issues: %w", err) 411 411 } 412 412 for _, issue := range issues { 413 - 414 413 labels := make([]*base.Label, 0, len(issue.Labels)) 415 414 for i := range issue.Labels { 416 415 labels = append(labels, g.convertGiteaLabel(issue.Labels[i]))
-1
services/migrations/gitlab.go
··· 421 421 return nil, false, fmt.Errorf("error while listing issues: %w", err) 422 422 } 423 423 for _, issue := range issues { 424 - 425 424 labels := make([]*base.Label, 0, len(issue.Labels)) 426 425 for _, l := range issue.Labels { 427 426 labels = append(labels, &base.Label{
+3 -4
services/mirror/mirror_pull.go
··· 523 523 theCommits.Commits = theCommits.Commits[:setting.UI.FeedMaxCommitNum] 524 524 } 525 525 526 - if newCommit, err := gitRepo.GetCommit(newCommitID); err != nil { 526 + newCommit, err := gitRepo.GetCommit(newCommitID) 527 + if err != nil { 527 528 log.Error("SyncMirrors [repo: %-v]: unable to get commit %s: %v", m.Repo, newCommitID, err) 528 529 continue 529 - } else { 530 - theCommits.HeadCommit = repo_module.CommitToPushCommit(newCommit) 531 530 } 532 531 532 + theCommits.HeadCommit = repo_module.CommitToPushCommit(newCommit) 533 533 theCommits.CompareURL = m.Repo.ComposeCompareURL(oldCommitID, newCommitID) 534 534 535 535 notify_service.SyncPushCommits(ctx, m.Repo.MustOwner(ctx), m.Repo, &repo_module.PushUpdateOptions{ ··· 557 557 log.Error("SyncMirrors [repo: %-v]: unable to update repository 'updated_unix': %v", m.Repo, err) 558 558 return false 559 559 } 560 - 561 560 } 562 561 563 562 log.Trace("SyncMirrors [repo: %-v]: Successfully updated", m.Repo)
+3 -3
services/pull/merge.go
··· 237 237 if err = ref.Issue.LoadRepo(ctx); err != nil { 238 238 return err 239 239 } 240 - close := ref.RefAction == references.XRefActionCloses 241 - if close != ref.Issue.IsClosed { 242 - if err = issue_service.ChangeStatus(ctx, ref.Issue, doer, pr.MergedCommitID, close); err != nil { 240 + isClosed := ref.RefAction == references.XRefActionCloses 241 + if isClosed != ref.Issue.IsClosed { 242 + if err = issue_service.ChangeStatus(ctx, ref.Issue, doer, pr.MergedCommitID, isClosed); err != nil { 243 243 // Allow ErrDependenciesLeft 244 244 if !issues_model.IsErrDependenciesLeft(err) { 245 245 return err
-1
services/pull/pull.go
··· 803 803 if err != nil { 804 804 log.Error("Unable to get commits between: %s %s Error: %v", pr.HeadBranch, pr.MergeBase, err) 805 805 return "" 806 - 807 806 } 808 807 if len(commits) == 0 { 809 808 break
-1
services/repository/adopt.go
··· 357 357 return err 358 358 } 359 359 repoNamesToCheck = repoNamesToCheck[:0] 360 - 361 360 } 362 361 return filepath.SkipDir 363 362 }); err != nil {
-1
services/repository/contributors_graph.go
··· 194 194 Stats: &commitStats, 195 195 } 196 196 extendedCommitStats = append(extendedCommitStats, res) 197 - 198 197 } 199 198 _ = stdoutReader.Close() 200 199 return nil
-2
services/repository/files/update.go
··· 208 208 return nil, fmt.Errorf("ConvertToSHA1: Invalid last commit ID: %w", err) 209 209 } 210 210 opts.LastCommitID = lastCommitID.String() 211 - 212 211 } 213 212 214 213 for _, file := range opts.Files { ··· 360 359 Path: file.Options.treePath, 361 360 } 362 361 } 363 - 364 362 } 365 363 } 366 364
-1
services/user/delete.go
··· 106 106 107 107 if purge || (setting.Service.UserDeleteWithCommentsMaxTime != 0 && 108 108 u.CreatedUnix.AsTime().Add(setting.Service.UserDeleteWithCommentsMaxTime).After(time.Now())) { 109 - 110 109 // Delete Comments 111 110 const batchSize = 50 112 111 for {
+3 -3
services/user/update_test.go
··· 94 94 assert.NoError(t, unittest.PrepareTestDatabase()) 95 95 96 96 user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 28}) 97 - copy := *user 97 + userCopy := *user 98 98 99 99 assert.NoError(t, UpdateAuth(db.DefaultContext, user, &UpdateAuthOptions{ 100 100 LoginName: optional.Some("new-login"), ··· 106 106 MustChangePassword: optional.Some(true), 107 107 })) 108 108 assert.True(t, user.MustChangePassword) 109 - assert.NotEqual(t, copy.Passwd, user.Passwd) 110 - assert.NotEqual(t, copy.Salt, user.Salt) 109 + assert.NotEqual(t, userCopy.Passwd, user.Passwd) 110 + assert.NotEqual(t, userCopy.Salt, user.Salt) 111 111 112 112 assert.NoError(t, UpdateAuth(db.DefaultContext, user, &UpdateAuthOptions{ 113 113 ProhibitLogin: optional.Some(true),
-2
services/webhook/discord.go
··· 304 304 305 305 func parseHookPullRequestEventType(event webhook_module.HookEventType) (string, error) { 306 306 switch event { 307 - 308 307 case webhook_module.HookEventPullRequestReviewApproved: 309 308 return "approved", nil 310 309 case webhook_module.HookEventPullRequestReviewRejected: 311 310 return "rejected", nil 312 311 case webhook_module.HookEventPullRequestReviewComment: 313 312 return "comment", nil 314 - 315 313 default: 316 314 return "", errors.New("unknown event type") 317 315 }
-1
services/webhook/matrix.go
··· 217 217 if i < len(p.Commits)-1 { 218 218 text += "<br>" 219 219 } 220 - 221 220 } 222 221 223 222 return m.newPayload(text, p.Commits...)
+4 -2
tests/e2e/e2e_test.go
··· 106 106 cmd := exec.Command(runArgs[0], runArgs...) 107 107 cmd.Env = os.Environ() 108 108 cmd.Env = append(cmd.Env, fmt.Sprintf("GITEA_URL=%s", setting.AppURL)) 109 + 109 110 var stdout, stderr bytes.Buffer 110 111 cmd.Stdout = &stdout 111 112 cmd.Stderr = &stderr 113 + 112 114 err := cmd.Run() 113 115 if err != nil { 114 116 // Currently colored output is conflicting. Using Printf until that is resolved. 115 117 fmt.Printf("%v", stdout.String()) 116 118 fmt.Printf("%v", stderr.String()) 117 119 log.Fatal("Playwright Failed: %s", err) 118 - } else { 119 - fmt.Printf("%v", stdout.String()) 120 120 } 121 + 122 + fmt.Printf("%v", stdout.String()) 121 123 }) 122 124 }) 123 125 }
+5 -5
tests/integration/api_notification_test.go
··· 111 111 112 112 MakeRequest(t, NewRequest(t, "GET", "/api/v1/notifications/new"), http.StatusUnauthorized) 113 113 114 - new := struct { 114 + newStruct := struct { 115 115 New int64 `json:"new"` 116 116 }{} 117 117 ··· 119 119 req = NewRequest(t, "GET", "/api/v1/notifications/new"). 120 120 AddTokenAuth(token) 121 121 resp = MakeRequest(t, req, http.StatusOK) 122 - DecodeJSON(t, resp, &new) 123 - assert.True(t, new.New > 0) 122 + DecodeJSON(t, resp, &newStruct) 123 + assert.True(t, newStruct.New > 0) 124 124 125 125 // -- mark notifications as read -- 126 126 req = NewRequest(t, "GET", "/api/v1/notifications?status-types=unread"). ··· 153 153 req = NewRequest(t, "GET", "/api/v1/notifications/new"). 154 154 AddTokenAuth(token) 155 155 resp = MakeRequest(t, req, http.StatusOK) 156 - DecodeJSON(t, resp, &new) 157 - assert.True(t, new.New == 0) 156 + DecodeJSON(t, resp, &newStruct) 157 + assert.True(t, newStruct.New == 0) 158 158 } 159 159 160 160 func TestAPINotificationPUT(t *testing.T) {
-1
tests/integration/api_packages_alpine_test.go
··· 480 480 req = NewRequest(t, "DELETE", fmt.Sprintf("%s/%s/%s/x86_64/%s-%s.apk", rootURL, branch, repository, pkg, packageVersion)). 481 481 AddBasicAuth(user.Name) 482 482 MakeRequest(t, req, http.StatusNoContent) 483 - 484 483 } 485 484 // Deleting the last file of an architecture should remove that index 486 485 req := NewRequest(t, "GET", fmt.Sprintf("%s/%s/%s/x86_64/APKINDEX.tar.gz", rootURL, branch, repository))
-1
tests/integration/pull_status_test.go
··· 71 71 72 72 // Update commit status, and check if icon is updated as well 73 73 for _, status := range statusList { 74 - 75 74 // Call API to add status for commit 76 75 t.Run("CreateStatus", doAPICreateCommitStatus(testCtx, commitID, api.CreateStatusOption{ 77 76 State: status,