nixpkgs mirror (for testing)
github.com/NixOS/nixpkgs
nix
1module.exports = async ({ github, context, core, dry }) => {
2 const path = require('node:path')
3 const { DefaultArtifactClient } = require('@actions/artifact')
4 const { readFile, writeFile } = require('node:fs/promises')
5 const withRateLimit = require('./withRateLimit.js')
6 const { classify } = require('../supportedBranches.js')
7 const { handleMerge } = require('./merge.js')
8 const { handleReviewers } = require('./reviewers.js')
9
10 const artifactClient = new DefaultArtifactClient()
11
12 // Detect if running in a fork (not NixOS/nixpkgs)
13 const isFork = context.repo.owner !== 'NixOS'
14
15 const orgId = (
16 await github.rest.orgs.get({
17 org: context.repo.owner,
18 })
19 ).data.id
20
21 async function downloadMaintainerMap(branch) {
22 let run
23
24 const commits = (
25 await github.rest.repos.listCommits({
26 ...context.repo,
27 sha: branch,
28 // We look at 10 commits to find a maintainer map, but this is an arbitrary number. The
29 // head commit might not have a map, if the queue was bypassed to merge it. This happens
30 // frequently on staging-esque branches. The branch with the highest chance of getting
31 // 10 consecutive bypassing commits is the stable staging-next branch. Luckily, this
32 // also means that the number of PRs open towards that branch is very low, so falling
33 // back to slightly imprecise maintainer data from master only has a marginal effect.
34 per_page: 10,
35 })
36 ).data
37
38 for (const commit of commits) {
39 const run = (
40 await github.rest.actions.listWorkflowRuns({
41 ...context.repo,
42 workflow_id: 'merge-group.yml',
43 status: 'success',
44 exclude_pull_requests: true,
45 per_page: 1,
46 head_sha: commit.sha,
47 })
48 ).data.workflow_runs[0]
49 if (!run) continue
50
51 const artifact = (
52 await github.rest.actions.listWorkflowRunArtifacts({
53 ...context.repo,
54 run_id: run.id,
55 name: 'maintainers',
56 })
57 ).data.artifacts[0]
58 if (!artifact || artifact.expired) continue
59
60 await artifactClient.downloadArtifact(artifact.id, {
61 findBy: {
62 repositoryName: context.repo.repo,
63 repositoryOwner: context.repo.owner,
64 token: core.getInput('github-token'),
65 },
66 path: path.resolve(path.join('branches', branch)),
67 expectedHash: artifact.digest,
68 })
69
70 return JSON.parse(
71 await readFile(
72 path.resolve(path.join('branches', branch, 'maintainers.json')),
73 'utf-8',
74 ),
75 )
76 }
77
78 // We get here when none of the 10 commits we looked at contained a maintainer map.
79 // For the master branch, we don't have any fallback options, so we error out.
80 // In forks without merge-group history, return empty map to allow testing.
81 if (branch === 'master') {
82 if (isFork) {
83 core.warning(
84 'No maintainer map found. Using empty map (expected in forks without merge-group history).',
85 )
86 return {}
87 }
88 throw new Error('No maintainer map found.')
89 }
90
91 // For other branches, we select a suitable fallback below.
92 const { stable, version } = classify(branch)
93
94 const release = `release-${version}`
95 if (stable && branch !== release) {
96 // Only fallback to the release branch from *other* stable branches.
97 // Explicitly avoids infinite recursion.
98 return await getMaintainerMap(release)
99 } else {
100 // Falling back to master as last resort.
101 // This can either be the case for unstable staging-esque or wip branches,
102 // or for the primary stable branch (release-XX.YY).
103 return await getMaintainerMap('master')
104 }
105 }
106
107 // Simple cache for maintainer maps to avoid downloading the same artifacts
108 // over and over again. Ultimately returns a promise, so the result must be
109 // awaited for.
110 const maintainerMaps = {}
111 function getMaintainerMap(branch) {
112 if (!maintainerMaps[branch]) {
113 maintainerMaps[branch] = downloadMaintainerMap(branch)
114 }
115 return maintainerMaps[branch]
116 }
117
118 // Caching the list of team members saves API requests when running the bot on the schedule and
119 // processing many PRs at once.
120 const members = {}
121 function getTeamMembers(team_slug) {
122 if (context.eventName === 'pull_request') {
123 // We have no chance of getting a token in the pull_request context with the right
124 // permissions to access the members endpoint below. Thus, we're pretending to have
125 // no members. This is OK; because this is only for the Test workflow, not for
126 // real use.
127 return []
128 }
129
130 // Forks don't have NixOS teams, return empty list
131 if (isFork) {
132 return []
133 }
134
135 if (!members[team_slug]) {
136 members[team_slug] = github.paginate(github.rest.teams.listMembersInOrg, {
137 org: context.repo.owner,
138 team_slug,
139 per_page: 100,
140 })
141 }
142
143 return members[team_slug]
144 }
145
146 // Caching users saves API requests when running the bot on the schedule and processing
147 // many PRs at once. It also helps to encapsulate the special logic we need, because
148 // actions/github doesn't support that endpoint fully, yet.
149 const users = {}
150 function getUser(id) {
151 if (!users[id]) {
152 users[id] = github
153 .request({
154 method: 'GET',
155 url: '/user/{id}',
156 id,
157 })
158 .then((resp) => resp.data)
159 .catch((e) => {
160 // User may have deleted their account
161 if (e.status === 404) return null
162 throw e
163 })
164 }
165
166 return users[id]
167 }
168
169 // Same for teams
170 const teams = {}
171 function getTeam(id) {
172 if (!teams[id]) {
173 teams[id] = github
174 .request({
175 method: 'GET',
176 url: '/organizations/{orgId}/team/{id}',
177 orgId,
178 id,
179 })
180 .then((resp) => resp.data)
181 .catch((e) => {
182 // Team may have been deleted
183 if (e.status === 404) return null
184 throw e
185 })
186 }
187
188 return teams[id]
189 }
190
191 async function handlePullRequest({ item, stats, events }) {
192 const log = (k, v) => core.info(`PR #${item.number} - ${k}: ${v}`)
193
194 const pull_number = item.number
195
196 // This API request is important for the merge-conflict label, because it triggers the
197 // creation of a new test merge commit. This is needed to actually determine the state of a PR.
198 const pull_request = (
199 await github.rest.pulls.get({
200 ...context.repo,
201 pull_number,
202 })
203 ).data
204
205 log('author', pull_request.user?.login)
206
207 const maintainers = await getMaintainerMap(pull_request.base.ref)
208
209 const merge_bot_eligible = await handleMerge({
210 github,
211 context,
212 core,
213 log,
214 dry,
215 pull_request,
216 events,
217 maintainers,
218 getTeamMembers,
219 getUser,
220 })
221
222 // Check for any human reviews other than the PR author, GitHub actions and other GitHub apps.
223 const reviews = (
224 await github.graphql(
225 `query($owner: String!, $repo: String!, $pr: Int!) {
226 repository(owner: $owner, name: $repo) {
227 pullRequest(number: $pr) {
228 # Unlikely that there's ever more than 100 reviews, so let's not bother,
229 # but once https://github.com/actions/github-script/issues/309 is resolved,
230 # it would be easy to enable pagination.
231 reviews(first: 100) {
232 nodes {
233 state
234 user: author {
235 # Only get users, no bots
236 ... on User {
237 login
238 # Set the id field in the resulting JSON to GraphQL's databaseId
239 # databaseId in GraphQL-land is the same as id in REST-land
240 id: databaseId
241 }
242 }
243 onBehalfOf(first: 100) {
244 nodes {
245 slug
246 }
247 }
248 }
249 }
250 }
251 }
252 }`,
253 {
254 owner: context.repo.owner,
255 repo: context.repo.repo,
256 pr: pull_number,
257 },
258 )
259 ).repository.pullRequest.reviews.nodes.filter(
260 (r) =>
261 // The `... on User` makes it such that .login only exists for users,
262 // but we still need to filter the others out.
263 // Accounts could be deleted as well, so don't count them.
264 r.user?.login &&
265 // Also exclude author reviews, can't request their review in any case
266 r.user.id !== pull_request.user?.id,
267 )
268
269 const approvals = new Set(
270 reviews
271 .filter((review) => review.state === 'APPROVED')
272 .map((review) => review.user?.id),
273 )
274
275 // After creation of a Pull Request, `merge_commit_sha` will be null initially:
276 // The very first merge commit will only be calculated after a little while.
277 // To avoid labeling the PR as conflicted before that, we wait a few minutes.
278 // This is intentionally less than the time that Eval takes, so that the label job
279 // running after Eval can indeed label the PR as conflicted if that is the case.
280 const merge_commit_sha_valid =
281 Date.now() - new Date(pull_request.created_at) > 3 * 60 * 1000
282
283 const prLabels = {
284 // We intentionally don't use the mergeable or mergeable_state attributes.
285 // Those have an intermediate state while the test merge commit is created.
286 // This doesn't work well for us, because we might have just triggered another
287 // test merge commit creation by request the pull request via API at the start
288 // of this function.
289 // The attribute merge_commit_sha keeps the old value of null or the hash *until*
290 // the new test merge commit has either successfully been created or failed so.
291 // This essentially means we are updating the merge conflict label in two steps:
292 // On the first pass of the day, we just fetch the pull request, which triggers
293 // the creation. At this stage, the label is likely not updated, yet.
294 // The second pass will then read the result from the first pass and set the label.
295 '2.status: merge conflict':
296 merge_commit_sha_valid && !pull_request.merge_commit_sha,
297 '2.status: merge-bot eligible': merge_bot_eligible,
298 '12.approvals: 1': approvals.size === 1,
299 '12.approvals: 2': approvals.size === 2,
300 '12.approvals: 3+': approvals.size >= 3,
301 '12.first-time contribution': [
302 'NONE',
303 'FIRST_TIMER',
304 'FIRST_TIME_CONTRIBUTOR',
305 ].includes(pull_request.author_association),
306 }
307
308 const { id: run_id, conclusion } =
309 (
310 await github.rest.actions.listWorkflowRuns({
311 ...context.repo,
312 workflow_id: 'pull-request-target.yml',
313 event: 'pull_request_target',
314 exclude_pull_requests: true,
315 head_sha: pull_request.head.sha,
316 })
317 ).data.workflow_runs[0] ??
318 // TODO: Remove this after 2026-02-01, at which point all pr.yml artifacts will have expired.
319 (
320 await github.rest.actions.listWorkflowRuns({
321 ...context.repo,
322 // In older PRs, we need pr.yml instead of pull-request-target.yml.
323 workflow_id: 'pr.yml',
324 event: 'pull_request_target',
325 exclude_pull_requests: true,
326 head_sha: pull_request.head.sha,
327 })
328 ).data.workflow_runs[0] ??
329 {}
330
331 // Newer PRs might not have run Eval to completion, yet.
332 // Older PRs might not have an eval.yml workflow, yet.
333 // In either case we continue without fetching an artifact on a best-effort basis.
334 log('Last eval run', run_id ?? '<n/a>')
335
336 if (conclusion === 'success') {
337 Object.assign(prLabels, {
338 // We only set this label if the latest eval run was successful, because if it was not, it
339 // *could* have requested reviewers. We will let the PR author fix CI first, before "escalating"
340 // this PR to "needs: reviewer".
341 // Since the first Eval run on a PR always sets rebuild labels, the same PR will be "recently
342 // updated" for the next scheduled run. Thus, this label will still be set within a few minutes
343 // after a PR is created, if required.
344 // Note that a "requested reviewer" disappears once they have given a review, so we check
345 // existing reviews, too.
346 '9.needs: reviewer':
347 !pull_request.draft &&
348 pull_request.requested_reviewers.length === 0 &&
349 reviews.length === 0,
350 })
351 }
352
353 const artifact =
354 run_id &&
355 (
356 await github.rest.actions.listWorkflowRunArtifacts({
357 ...context.repo,
358 run_id,
359 name: 'comparison',
360 })
361 ).data.artifacts[0]
362
363 // Instead of checking the boolean artifact.expired, we will give us a minute to
364 // actually download the artifact in the next step and avoid that race condition.
365 // Older PRs, where the workflow run was already eval.yml, but the artifact was not
366 // called "comparison", yet, will skip the download.
367 const expired =
368 !artifact ||
369 new Date(artifact?.expires_at ?? 0) < new Date(Date.now() + 60 * 1000)
370 log('Artifact expires at', artifact?.expires_at ?? '<n/a>')
371 if (!expired) {
372 stats.artifacts++
373
374 await artifactClient.downloadArtifact(artifact.id, {
375 findBy: {
376 repositoryName: context.repo.repo,
377 repositoryOwner: context.repo.owner,
378 token: core.getInput('github-token'),
379 },
380 path: path.resolve(pull_number.toString()),
381 expectedHash: artifact.digest,
382 })
383
384 const changedPaths = JSON.parse(
385 await readFile(`${pull_number}/changed-paths.json`, 'utf-8'),
386 )
387 const evalLabels = changedPaths.labels
388
389 // Fetch all PR commits to check their messages for package patterns
390 const prCommits = await github.paginate(github.rest.pulls.listCommits, {
391 ...context.repo,
392 pull_number,
393 per_page: 100,
394 })
395 const commitSubjects = prCommits.map(
396 (c) => c.commit.message.split('\n')[0],
397 )
398
399 // Label new package PRs: "packagename: init at X.Y.Z"
400 // Exclude NixOS module commits like "nixos/timekpr: init at 0.5.8"
401 const newPackagePattern = /^(?<!nixos\/)\S+: init at\b/
402 const hasNewPackages = changedPaths.attrdiff?.added?.length > 0
403 const commitsIndicateNewPackage = commitSubjects.some((msg) =>
404 newPackagePattern.test(msg),
405 )
406 evalLabels['8.has: package (new)'] =
407 hasNewPackages && commitsIndicateNewPackage
408
409 // Label package update PRs: "packagename: X.Y.Z -> A.B.C"
410 // Matches versions like: 1.2.3, 0-unstable-2024-01-15, 1.3rc1, alpha, unstable
411 // Exclude NixOS module commits like "nixos/ncps: types.str -> types.path"
412 const updatePackagePattern =
413 /^(?<!nixos\/)\S+: [\w.-]*\d[\w.-]* (->|→) [\w.-]*\d[\w.-]*$/
414 const commitsIndicateUpdate = commitSubjects.some((msg) =>
415 updatePackagePattern.test(msg),
416 )
417 evalLabels['8.has: package (update)'] = commitsIndicateUpdate
418
419 // TODO: Get "changed packages" information from list of changed by-name files
420 // in addition to just the Eval results, to make this work for these packages
421 // when Eval results have expired as well.
422 let packages
423 try {
424 packages = JSON.parse(
425 await readFile(`${pull_number}/packages.json`, 'utf-8'),
426 )
427 } catch (e) {
428 if (e.code !== 'ENOENT') throw e
429 // TODO: Remove this fallback code once all old artifacts without packages.json
430 // have expired. This should be the case in ~ February 2026.
431 packages = Array.from(
432 new Set(
433 Object.values(
434 JSON.parse(
435 await readFile(`${pull_number}/maintainers.json`, 'utf-8'),
436 ),
437 ).flat(1),
438 ),
439 )
440 }
441
442 Object.assign(prLabels, evalLabels, {
443 '11.by: package-maintainer':
444 Boolean(packages.length) &&
445 packages.every((pkg) =>
446 maintainers[pkg]?.includes(pull_request.user.id),
447 ),
448 '12.approved-by: package-maintainer': packages.some((pkg) =>
449 maintainers[pkg]?.some((m) => approvals.has(m)),
450 ),
451 })
452
453 if (!pull_request.draft) {
454 let owners = []
455 try {
456 // TODO: Create owner map similar to maintainer map.
457 owners = (await readFile(`${pull_number}/owners.txt`, 'utf-8')).split(
458 '\n',
459 )
460 } catch (e) {
461 // Older artifacts don't have the owners.txt, yet.
462 if (e.code !== 'ENOENT') throw e
463 }
464
465 let team_maintainers = []
466 try {
467 team_maintainers = Object.keys(
468 JSON.parse(await readFile(`${pull_number}/teams.json`, 'utf-8')),
469 ).map((id) => parseInt(id))
470 } catch (e) {
471 // Older artifacts don't have the teams.json, yet.
472 if (e.code !== 'ENOENT') throw e
473 }
474
475 // We set this label earlier already, but the current PR state can be very different
476 // after handleReviewers has requested reviews, so update it in this case to prevent
477 // this label from flip-flopping.
478 prLabels['9.needs: reviewer'] = await handleReviewers({
479 github,
480 context,
481 core,
482 log,
483 dry,
484 pull_request,
485 reviews,
486 // TODO: Use maintainer map instead of the artifact.
487 user_maintainers: Object.keys(
488 JSON.parse(
489 await readFile(`${pull_number}/maintainers.json`, 'utf-8'),
490 ),
491 ).map((id) => parseInt(id)),
492 team_maintainers,
493 owners,
494 getUser,
495 getTeam,
496 })
497 }
498 }
499
500 return prLabels
501 }
502
503 // Returns true if the issue was closed. In this case, the labeling does not need to
504 // continue for this issue. Returns false if no action was taken.
505 async function handleAutoClose(item) {
506 const issue_number = item.number
507
508 if (item.labels.some(({ name }) => name === '0.kind: packaging request')) {
509 const body = [
510 'Thank you for your interest in packaging new software in Nixpkgs. Unfortunately, to mitigate the unsustainable growth of unmaintained packages, **Nixpkgs is no longer accepting package requests** via Issues.',
511 '',
512 'As a [volunteer community][community], we are always open to new contributors. If you wish to see this package in Nixpkgs, **we encourage you to [contribute] it yourself**, via a Pull Request. Anyone can [become a package maintainer][maintainers]! You can find language-specific packaging information in the [Nixpkgs Manual][nixpkgs]. Should you need any help, please reach out to the community on [Matrix] or [Discourse].',
513 '',
514 '[community]: https://nixos.org/community',
515 '[contribute]: https://github.com/NixOS/nixpkgs/blob/master/pkgs/README.md#quick-start-to-adding-a-package',
516 '[maintainers]: https://github.com/NixOS/nixpkgs/blob/master/maintainers/README.md',
517 '[nixpkgs]: https://nixos.org/manual/nixpkgs/unstable/',
518 '[Matrix]: https://matrix.to/#/#dev:nixos.org',
519 '[Discourse]: https://discourse.nixos.org/c/dev/14',
520 ].join('\n')
521
522 core.info(`Issue #${item.number}: auto-closed`)
523
524 if (!dry) {
525 await github.rest.issues.createComment({
526 ...context.repo,
527 issue_number,
528 body,
529 })
530
531 await github.rest.issues.update({
532 ...context.repo,
533 issue_number,
534 state: 'closed',
535 state_reason: 'not_planned',
536 })
537 }
538
539 return true
540 }
541 return false
542 }
543
544 async function handle({ item, stats }) {
545 try {
546 const log = (k, v, skip) => {
547 core.info(`#${item.number} - ${k}: ${v}${skip ? ' (skipped)' : ''}`)
548 return skip
549 }
550
551 log('Last updated at', item.updated_at)
552 log('URL', item.html_url)
553
554 const issue_number = item.number
555
556 const itemLabels = {}
557
558 const events = await github.paginate(
559 github.rest.issues.listEventsForTimeline,
560 {
561 ...context.repo,
562 issue_number,
563 per_page: 100,
564 },
565 )
566
567 const latest_event_at = new Date(
568 events
569 .filter(({ event }) =>
570 [
571 // These events are hand-picked from:
572 // https://docs.github.com/en/rest/using-the-rest-api/issue-event-types?apiVersion=2022-11-28
573 // Each of those causes a PR/issue to *not* be considered as stale anymore.
574 // Most of these use created_at.
575 'assigned',
576 'commented', // uses updated_at, because that could be > created_at
577 'committed', // uses committer.date
578 ...(item.labels.some(({ name }) => name === '5.scope: tracking')
579 ? ['cross-referenced']
580 : []),
581 'head_ref_force_pushed',
582 'milestoned',
583 'pinned',
584 'ready_for_review',
585 'renamed',
586 'reopened',
587 'review_dismissed',
588 'review_requested',
589 'reviewed', // uses submitted_at
590 'unlocked',
591 'unmarked_as_duplicate',
592 ].includes(event),
593 )
594 .map(
595 ({ created_at, updated_at, committer, submitted_at }) =>
596 new Date(
597 updated_at ?? created_at ?? submitted_at ?? committer.date,
598 ),
599 )
600 // Reverse sort by date value. The default sort() sorts by string representation, which is bad for dates.
601 .sort((a, b) => b - a)
602 .at(0) ?? item.created_at,
603 )
604 log('latest_event_at', latest_event_at.toISOString())
605
606 const stale_at = new Date(new Date().setDate(new Date().getDate() - 180))
607 const is_stale = latest_event_at < stale_at
608
609 if (item.pull_request || context.payload.pull_request) {
610 // No need to compute merge commits for stale PRs over and over again.
611 // This increases the repo size on GitHub's side unnecessarily and wastes
612 // a lot of API requests, too. Any relevant change will result in the
613 // stale status to change and thus pick up the PR again for labeling.
614 if (!is_stale) {
615 stats.prs++
616 Object.assign(
617 itemLabels,
618 await handlePullRequest({ item, stats, events }),
619 )
620 }
621 } else {
622 stats.issues++
623 if (item.labels.some(({ name }) => name === '4.workflow: auto-close')) {
624 // If this returns true, the issue was closed. In this case we return, to not
625 // label the issue anymore. Most importantly this avoids unlabeling stale issues
626 // which are closed via auto-close.
627 if (await handleAutoClose(item)) return
628 }
629 }
630
631 // Create a map (Label -> Boolean) of all currently set labels.
632 // Each label is set to True and can be disabled later.
633 const before = Object.fromEntries(
634 (
635 await github.paginate(github.rest.issues.listLabelsOnIssue, {
636 ...context.repo,
637 issue_number,
638 })
639 ).map(({ name }) => [name, true]),
640 )
641
642 Object.assign(itemLabels, {
643 '2.status: stale': !before['1.severity: security'] && is_stale,
644 })
645
646 const after = Object.assign({}, before, itemLabels)
647
648 // No need for an API request, if all labels are the same.
649 const hasChanges = Object.keys(after).some(
650 (name) => (before[name] ?? false) !== after[name],
651 )
652 if (log('Has label changes', hasChanges, !hasChanges)) return
653
654 // Skipping labeling on a pull_request event, because we have no privileges.
655 const labels = Object.entries(after)
656 .filter(([, value]) => value)
657 .map(([name]) => name)
658 if (log('Set labels', labels, dry)) return
659
660 await github.rest.issues.setLabels({
661 ...context.repo,
662 issue_number,
663 labels,
664 })
665 } catch (cause) {
666 throw new Error(`Labeling #${item.number} failed.`, { cause })
667 }
668 }
669
670 // Controls level of parallelism. Applies to both the number of concurrent requests
671 // as well as the number of concurrent workers going through the list of PRs.
672 // We'll only boost concurrency when we're running many PRs in parallel on a schedule,
673 // but not for single PRs. This avoids things going wild, when we accidentally make
674 // too many API requests on treewides.
675 const maxConcurrent = context.payload.pull_request ? 1 : 20
676
677 await withRateLimit({ github, core, maxConcurrent }, async (stats) => {
678 if (context.payload.pull_request) {
679 await handle({ item: context.payload.pull_request, stats })
680 } else {
681 const lastRun = (
682 await github.rest.actions.listWorkflowRuns({
683 ...context.repo,
684 workflow_id: 'bot.yml',
685 event: 'schedule',
686 status: 'success',
687 exclude_pull_requests: true,
688 per_page: 1,
689 })
690 ).data.workflow_runs[0]
691
692 const cutoff = new Date(
693 Math.max(
694 // Go back as far as the last successful run of this workflow to make sure
695 // we are not leaving anyone behind on GHA failures.
696 // Defaults to go back 1 hour on the first run.
697 new Date(
698 lastRun?.created_at ?? Date.now() - 1 * 60 * 60 * 1000,
699 ).getTime(),
700 // Go back max. 1 day to prevent hitting all API rate limits immediately,
701 // when GH API returns a wrong workflow by accident.
702 Date.now() - 24 * 60 * 60 * 1000,
703 ),
704 )
705 core.info(`cutoff timestamp: ${cutoff.toISOString()}`)
706
707 const updatedItems = await github.paginate(
708 github.rest.search.issuesAndPullRequests,
709 {
710 q: [
711 `repo:"${context.repo.owner}/${context.repo.repo}"`,
712 'is:open',
713 `updated:>=${cutoff.toISOString()}`,
714 ].join(' AND '),
715 per_page: 100,
716 // TODO: Remove after 2025-11-04, when it becomes the default.
717 advanced_search: true,
718 },
719 )
720
721 let cursor
722
723 // No workflow run available the first time.
724 if (lastRun) {
725 // The cursor to iterate through the full list of issues and pull requests
726 // is passed between jobs as an artifact.
727 const artifact = (
728 await github.rest.actions.listWorkflowRunArtifacts({
729 ...context.repo,
730 run_id: lastRun.id,
731 name: 'pagination-cursor',
732 })
733 ).data.artifacts[0]
734
735 // If the artifact is not available, the next iteration starts at the beginning.
736 if (artifact && !artifact.expired) {
737 stats.artifacts++
738
739 const { downloadPath } = await artifactClient.downloadArtifact(
740 artifact.id,
741 {
742 findBy: {
743 repositoryName: context.repo.repo,
744 repositoryOwner: context.repo.owner,
745 token: core.getInput('github-token'),
746 },
747 expectedHash: artifact.digest,
748 },
749 )
750
751 cursor = await readFile(path.resolve(downloadPath, 'cursor'), 'utf-8')
752 }
753 }
754
755 // From GitHub's API docs:
756 // GitHub's REST API considers every pull request an issue, but not every issue is a pull request.
757 // For this reason, "Issues" endpoints may return both issues and pull requests in the response.
758 // You can identify pull requests by the pull_request key.
759 const allItems = await github.rest.issues.listForRepo({
760 ...context.repo,
761 state: 'open',
762 sort: 'created',
763 direction: 'asc',
764 per_page: 100,
765 after: cursor,
766 })
767
768 // Regex taken and comment adjusted from:
769 // https://github.com/octokit/plugin-paginate-rest.js/blob/8e5da25f975d2f31dda6b8b588d71f2c768a8df2/src/iterator.ts#L36-L41
770 // `allItems.headers.link` format:
771 // <https://api.github.com/repositories/4542716/issues?page=3&per_page=100&after=Y3Vyc29yOnYyOpLPAAABl8qNnYDOvnSJxA%3D%3D>; rel="next",
772 // <https://api.github.com/repositories/4542716/issues?page=1&per_page=100&before=Y3Vyc29yOnYyOpLPAAABl8xFV9DOvoouJg%3D%3D>; rel="prev"
773 // Sets `next` to undefined if "next" URL is not present or `link` header is not set.
774 const next = ((allItems.headers.link ?? '').match(
775 /<([^<>]+)>;\s*rel="next"/,
776 ) ?? [])[1]
777 if (next) {
778 cursor = new URL(next).searchParams.get('after')
779 const uploadPath = path.resolve('cursor')
780 await writeFile(uploadPath, cursor, 'utf-8')
781 if (dry) {
782 core.info(`pagination-cursor: ${cursor} (upload skipped)`)
783 } else {
784 // No stats.artifacts++, because this does not allow passing a custom token.
785 // Thus, the upload will not happen with the app token, but the default github.token.
786 await artifactClient.uploadArtifact(
787 'pagination-cursor',
788 [uploadPath],
789 path.resolve('.'),
790 {
791 retentionDays: 1,
792 },
793 )
794 }
795 }
796
797 // Some items might be in both search results, so filtering out duplicates as well.
798 const items = []
799 .concat(updatedItems, allItems.data)
800 .filter(
801 (thisItem, idx, arr) =>
802 idx ===
803 arr.findIndex((firstItem) => firstItem.number === thisItem.number),
804 )
805
806 // Instead of handling all items in parallel we set up some workers to handle the queue
807 // with more controlled parallelism. This avoids problems with `pull_request` fetched at
808 // the beginning getting out of date towards the end, because it took the whole job 20
809 // minutes or more to go through 100's of PRs.
810 await Promise.all(
811 Array.from({ length: maxConcurrent }, async () => {
812 while (true) {
813 const item = items.pop()
814 if (!item) break
815 try {
816 await handle({ item, stats })
817 } catch (e) {
818 core.setFailed(`${e.message}\n${e.cause.stack}`)
819 }
820 }
821 }),
822 )
823 }
824 })
825}