···17 NIXPKGS_CI_APP_PRIVATE_KEY:
18 required: true
19 workflow_dispatch:
20- inputs:
21- updatedWithin:
22- description: 'Updated within [hours]'
23- type: number
24- required: false
25- default: 0 # everything since last run
2627concurrency:
28 # This explicitly avoids using `run_id` for the concurrency key to make sure that only
29- # *one* non-PR run can run at a time.
30 group: labels-${{ github.workflow }}-${{ github.event_name }}-${{ github.event.pull_request.number }}
31- # PR- and manually-triggered runs will be cancelled, but scheduled runs will be queued.
32 cancel-in-progress: ${{ github.event_name != 'schedule' }}
3334# This is used as fallback without app only.
···6970 - name: Labels from API data and Eval results
71 uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
72- env:
73- UPDATED_WITHIN: ${{ inputs.updatedWithin }}
74 with:
75 github-token: ${{ steps.app-token.outputs.token || github.token }}
76 script: |
···101 github.hook.wrap('request', async (request, options) => {
102 // Requests to the /rate_limit endpoint do not count against the rate limit.
103 if (options.url == '/rate_limit') return request(options)
000104 stats.requests++
105 if (['POST', 'PUT', 'PATCH', 'DELETE'].includes(options.method))
106 return writeLimits.schedule(request.bind(null, options))
···126 await updateReservoir()
127 // Update remaining requests every minute to account for other jobs running in parallel.
128 const reservoirUpdater = setInterval(updateReservoir, 60 * 1000)
129- process.on('uncaughtException', () => clearInterval(reservoirUpdater))
130131- if (process.env.UPDATED_WITHIN && !/^\d+$/.test(process.env.UPDATED_WITHIN))
132- throw new Error('Please enter "updated within" as integer in hours.')
0000133134- const cutoff = new Date(await (async () => {
135- // Always run for Pull Request triggers, no cutoff since there will be a single
136- // response only anyway. 0 is the Unix epoch, so always smaller.
137- if (context.payload.pull_request?.number) return 0
138-139- // Manually triggered via UI when updatedWithin is set. Will fallthrough to the last
140- // option if the updatedWithin parameter is set to 0, which is the default.
141- const updatedWithin = Number.parseInt(process.env.UPDATED_WITHIN, 10)
142- if (updatedWithin) return new Date().getTime() - updatedWithin * 60 * 60 * 1000
143-144- // Normally a scheduled run, but could be workflow_dispatch, see above. Go back as far
145- // as the last successful run of this workflow to make sure we are not leaving anyone
146- // behind on GHA failures.
147- // Defaults to go back 1 hour on the first run.
148- return (await github.rest.actions.listWorkflowRuns({
149- ...context.repo,
150- workflow_id: 'labels.yml',
151- event: 'schedule',
152- status: 'success',
153- exclude_pull_requests: true
154- })).data.workflow_runs[0]?.created_at ?? new Date().getTime() - 1 * 60 * 60 * 1000
155- })())
156- core.info('cutoff timestamp: ' + cutoff.toISOString())
157-158- // To simplify this action's logic we fetch the pull_request data again below, even if
159- // we are already in a pull_request event's context and would have the data readily
160- // available. We do this by filtering the list of pull requests with head and base
161- // branch - there can only be a single open Pull Request for any such combination.
162- const prEventCondition = !context.payload.pull_request ? undefined : {
163- // "label" is in the format of `user:branch` or `org:branch`
164- head: context.payload.pull_request.head.label,
165- base: context.payload.pull_request.base.ref
166- }
167168- const prs = await github.paginate(
169- github.rest.pulls.list,
170- {
171- ...context.repo,
172- state: 'open',
173- sort: 'updated',
174- direction: 'desc',
175- ...prEventCondition
176- },
177- (response, done) => response.data.map(async (pull_request) => {
178- try {
179- const log = (k,v,skip) => {
180- core.info(`PR #${pull_request.number} - ${k}: ${v}` + (skip ? ' (skipped)' : ''))
181- return skip
182- }
183184- if (log('Last updated at', pull_request.updated_at, new Date(pull_request.updated_at) < cutoff))
185- return done()
186- stats.prs++
187- log('URL', pull_request.html_url)
00188189- const run_id = (await github.rest.actions.listWorkflowRuns({
0000000000190 ...context.repo,
191- workflow_id: 'pr.yml',
0192 event: 'pull_request_target',
193- // For PR events, the workflow run is still in progress with this job itself.
194- status: prEventCondition ? 'in_progress' : 'success',
195 exclude_pull_requests: true,
196 head_sha: pull_request.head.sha
197- })).data.workflow_runs[0]?.id ??
198- // TODO: Remove this after 2025-09-17, at which point all eval.yml artifacts will have expired.
199- (await github.rest.actions.listWorkflowRuns({
200- ...context.repo,
201- // In older PRs, we need eval.yml instead of pr.yml.
202- workflow_id: 'eval.yml',
203- event: 'pull_request_target',
204- status: 'success',
205- exclude_pull_requests: true,
206- head_sha: pull_request.head.sha
207- })).data.workflow_runs[0]?.id
208209- // Newer PRs might not have run Eval to completion, yet. We can skip them, because this
210- // job will be run as part of that Eval run anyway.
211- if (log('Last eval run', run_id ?? '<pending>', !run_id))
212- return;
213214- const artifact = (await github.rest.actions.listWorkflowRunArtifacts({
215- ...context.repo,
216- run_id,
217- name: 'comparison'
218- })).data.artifacts[0]
219220- // Instead of checking the boolean artifact.expired, we will give us a minute to
221- // actually download the artifact in the next step and avoid that race condition.
222- // Older PRs, where the workflow run was already eval.yml, but the artifact was not
223- // called "comparison", yet, will be skipped as well.
224- const expired = new Date(artifact?.expires_at ?? 0) < new Date(new Date().getTime() + 60 * 1000)
225- if (log('Artifact expires at', artifact?.expires_at ?? '<not found>', expired))
226- return;
227 stats.artifacts++
228229 await artifactClient.downloadArtifact(artifact.id, {
···232 repositoryOwner: context.repo.owner,
233 token: core.getInput('github-token')
234 },
235- path: path.resolve(pull_request.number.toString()),
236 expectedHash: artifact.digest
237 })
0238239- // Create a map (Label -> Boolean) of all currently set labels.
240- // Each label is set to True and can be disabled later.
241- const before = Object.fromEntries(
242- (await github.paginate(github.rest.issues.listLabelsOnIssue, {
243- ...context.repo,
244- issue_number: pull_request.number
245- }))
246- .map(({ name }) => [name, true])
247- )
000000000248249- const approvals = new Set(
250- (await github.paginate(github.rest.pulls.listReviews, {
00251 ...context.repo,
252- pull_number: pull_request.number
253- }))
254- .filter(review => review.state == 'APPROVED')
255- .map(review => review.user?.id)
256- )
0000000000000000000000002570000000000000000000000000000000000000258 const maintainers = new Set(Object.keys(
259- JSON.parse(await readFile(`${pull_request.number}/maintainers.json`, 'utf-8'))
260 ).map(m => Number.parseInt(m, 10)))
261262- const evalLabels = JSON.parse(await readFile(`${pull_request.number}/changed-paths.json`, 'utf-8')).labels
263264- // Manage the labels
265- const after = Object.assign(
266- {},
267- before,
268 // Ignore `evalLabels` if it's an array.
269 // This can happen for older eval runs, before we switched to objects.
270 // The old eval labels would have been set by the eval run,
···272 // TODO: Simplify once old eval results have expired (~2025-10)
273 (Array.isArray(evalLabels) ? undefined : evalLabels),
274 {
275- '12.approvals: 1': approvals.size == 1,
276- '12.approvals: 2': approvals.size == 2,
277- '12.approvals: 3+': approvals.size >= 3,
278 '12.approved-by: package-maintainer': Array.from(maintainers).some(m => approvals.has(m)),
279- '12.first-time contribution':
280- [ 'NONE', 'FIRST_TIMER', 'FIRST_TIME_CONTRIBUTOR' ].includes(pull_request.author_association),
281 }
282 )
0283284- // No need for an API request, if all labels are the same.
285- const hasChanges = Object.keys(after).some(name => (before[name] ?? false) != after[name])
286- if (log('Has changes', hasChanges, !hasChanges))
287- return;
288289- // Skipping labeling on a pull_request event, because we have no privileges.
290- const labels = Object.entries(after).filter(([,value]) => value).map(([name]) => name)
291- if (log('Set labels', labels, context.eventName == 'pull_request'))
292- return;
293294- await github.rest.issues.setLabels({
295- ...context.repo,
296- issue_number: pull_request.number,
297- labels
298- })
299- } catch (cause) {
300- throw new Error(`Labeling PR #${pull_request.number} failed.`, { cause })
301- }
302- })
303- );
000000000000304305- (await Promise.allSettled(prs.flat()))
306- .filter(({ status }) => status == 'rejected')
307- .map(({ reason }) => core.setFailed(`${reason.message}\n${reason.cause.stack}`))
00308309- core.notice(`Processed ${stats.prs} PRs, made ${stats.requests + stats.artifacts} API requests and downloaded ${stats.artifacts} artifacts.`)
310- clearInterval(reservoirUpdater)
000000000000000000000000000000000000000000000000000000000000311312 - name: Log current API rate limits
313 env:
···17 NIXPKGS_CI_APP_PRIVATE_KEY:
18 required: true
19 workflow_dispatch:
0000002021concurrency:
22 # This explicitly avoids using `run_id` for the concurrency key to make sure that only
23+ # *one* scheduled run can run at a time.
24 group: labels-${{ github.workflow }}-${{ github.event_name }}-${{ github.event.pull_request.number }}
25+ # PR-triggered runs will be cancelled, but scheduled runs will be queued.
26 cancel-in-progress: ${{ github.event_name != 'schedule' }}
2728# This is used as fallback without app only.
···6364 - name: Labels from API data and Eval results
65 uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
0066 with:
67 github-token: ${{ steps.app-token.outputs.token || github.token }}
68 script: |
···93 github.hook.wrap('request', async (request, options) => {
94 // Requests to the /rate_limit endpoint do not count against the rate limit.
95 if (options.url == '/rate_limit') return request(options)
96+ // Search requests are in a different resource group, which allows 30 requests / minute.
97+ // We do less than a handful each run, so not implementing throttling for now.
98+ if (options.url.startsWith('/search/')) return request(options)
99 stats.requests++
100 if (['POST', 'PUT', 'PATCH', 'DELETE'].includes(options.method))
101 return writeLimits.schedule(request.bind(null, options))
···121 await updateReservoir()
122 // Update remaining requests every minute to account for other jobs running in parallel.
123 const reservoirUpdater = setInterval(updateReservoir, 60 * 1000)
0124125+ async function handle(item) {
126+ try {
127+ const log = (k,v,skip) => {
128+ core.info(`#${item.number} - ${k}: ${v}` + (skip ? ' (skipped)' : ''))
129+ return skip
130+ }
131132+ log('Last updated at', item.updated_at)
133+ stats.prs++
134+ log('URL', item.html_url)
000000000000000000000000000000135136+ const pull_number = item.number
137+ const issue_number = item.number
0000000000000138139+ // This API request is important for the merge-conflict label, because it triggers the
140+ // creation of a new test merge commit. This is needed to actually determine the state of a PR.
141+ const pull_request = (await github.rest.pulls.get({
142+ ...context.repo,
143+ pull_number
144+ })).data
145146+ const run_id = (await github.rest.actions.listWorkflowRuns({
147+ ...context.repo,
148+ workflow_id: 'pr.yml',
149+ event: 'pull_request_target',
150+ // In pull_request contexts the workflow is still running.
151+ status: context.payload.pull_request ? undefined : 'success',
152+ exclude_pull_requests: true,
153+ head_sha: pull_request.head.sha
154+ })).data.workflow_runs[0]?.id ??
155+ // TODO: Remove this after 2025-09-17, at which point all eval.yml artifacts will have expired.
156+ (await github.rest.actions.listWorkflowRuns({
157 ...context.repo,
158+ // In older PRs, we need eval.yml instead of pr.yml.
159+ workflow_id: 'eval.yml',
160 event: 'pull_request_target',
161+ status: 'success',
0162 exclude_pull_requests: true,
163 head_sha: pull_request.head.sha
164+ })).data.workflow_runs[0]?.id
0000000000165166+ // Newer PRs might not have run Eval to completion, yet.
167+ // Older PRs might not have an eval.yml workflow, yet.
168+ // In either case we continue without fetching an artifact on a best-effort basis.
169+ log('Last eval run', run_id ?? '<n/a>')
170171+ const artifact = run_id && (await github.rest.actions.listWorkflowRunArtifacts({
172+ ...context.repo,
173+ run_id,
174+ name: 'comparison'
175+ })).data.artifacts[0]
176177+ // Instead of checking the boolean artifact.expired, we will give us a minute to
178+ // actually download the artifact in the next step and avoid that race condition.
179+ // Older PRs, where the workflow run was already eval.yml, but the artifact was not
180+ // called "comparison", yet, will skip the download.
181+ const expired = !artifact || new Date(artifact?.expires_at ?? 0) < new Date(new Date().getTime() + 60 * 1000)
182+ log('Artifact expires at', artifact?.expires_at ?? '<n/a>')
183+ if (!expired) {
184 stats.artifacts++
185186 await artifactClient.downloadArtifact(artifact.id, {
···189 repositoryOwner: context.repo.owner,
190 token: core.getInput('github-token')
191 },
192+ path: path.resolve(pull_number.toString()),
193 expectedHash: artifact.digest
194 })
195+ }
196197+ // Create a map (Label -> Boolean) of all currently set labels.
198+ // Each label is set to True and can be disabled later.
199+ const before = Object.fromEntries(
200+ (await github.paginate(github.rest.issues.listLabelsOnIssue, {
201+ ...context.repo,
202+ issue_number
203+ }))
204+ .map(({ name }) => [name, true])
205+ )
206+207+ const approvals = new Set(
208+ (await github.paginate(github.rest.pulls.listReviews, {
209+ ...context.repo,
210+ pull_number
211+ }))
212+ .filter(review => review.state == 'APPROVED')
213+ .map(review => review.user?.id)
214+ )
215216+ const latest_event_at = new Date(
217+ (await github.paginate(
218+ github.rest.issues.listEventsForTimeline,
219+ {
220 ...context.repo,
221+ issue_number,
222+ per_page: 100
223+ }
224+ ))
225+ .filter(({ event }) => [
226+ // These events are hand-picked from:
227+ // https://docs.github.com/en/rest/using-the-rest-api/issue-event-types?apiVersion=2022-11-28
228+ // Each of those causes a PR/issue to *not* be considered as stale anymore.
229+ // Most of these use created_at.
230+ 'assigned',
231+ 'commented', // uses updated_at, because that could be > created_at
232+ 'committed', // uses committer.date
233+ 'head_ref_force_pushed',
234+ 'milestoned',
235+ 'pinned',
236+ 'ready_for_review',
237+ 'renamed',
238+ 'reopened',
239+ 'review_dismissed',
240+ 'review_requested',
241+ 'reviewed', // uses submitted_at
242+ 'unlocked',
243+ 'unmarked_as_duplicate',
244+ ].includes(event))
245+ .map(({ created_at, updated_at, committer, submitted_at }) => new Date(updated_at ?? created_at ?? submitted_at ?? committer.date))
246+ // Reverse sort by date value. The default sort() sorts by string representation, which is bad for dates.
247+ .sort((a,b) => b-a)
248+ .at(0) ?? item.created_at
249+ )
250251+ const stale_at = new Date(new Date().setDate(new Date().getDate() - 180))
252+253+ // After creation of a Pull Request, `merge_commit_sha` will be null initially:
254+ // The very first merge commit will only be calculated after a little while.
255+ // To avoid labeling the PR as conflicted before that, we wait a few minutes.
256+ // This is intentionally less than the time that Eval takes, so that the label job
257+ // running after Eval can indeed label the PR as conflicted if that is the case.
258+ const merge_commit_sha_valid = new Date() - new Date(pull_request.created_at) > 3 * 60 * 1000
259+260+ // Manage most of the labels, without eval results
261+ const after = Object.assign(
262+ {},
263+ before,
264+ {
265+ // We intentionally don't use the mergeable or mergeable_state attributes.
266+ // Those have an intermediate state while the test merge commit is created.
267+ // This doesn't work well for us, because we might have just triggered another
268+ // test merge commit creation by request the pull request via API at the start
269+ // of this function.
270+ // The attribute merge_commit_sha keeps the old value of null or the hash *until*
271+ // the new test merge commit has either successfully been created or failed so.
272+ // This essentially means we are updating the merge conflict label in two steps:
273+ // On the first pass of the day, we just fetch the pull request, which triggers
274+ // the creation. At this stage, the label is likely not updated, yet.
275+ // The second pass will then read the result from the first pass and set the label.
276+ '2.status: merge conflict': merge_commit_sha_valid && !pull_request.merge_commit_sha,
277+ '2.status: stale': !before['1.severity: security'] && latest_event_at < stale_at,
278+ '12.approvals: 1': approvals.size == 1,
279+ '12.approvals: 2': approvals.size == 2,
280+ '12.approvals: 3+': approvals.size >= 3,
281+ '12.first-time contribution':
282+ [ 'NONE', 'FIRST_TIMER', 'FIRST_TIME_CONTRIBUTOR' ].includes(pull_request.author_association),
283+ }
284+ )
285+286+ // Manage labels based on eval results
287+ if (!expired) {
288 const maintainers = new Set(Object.keys(
289+ JSON.parse(await readFile(`${pull_number}/maintainers.json`, 'utf-8'))
290 ).map(m => Number.parseInt(m, 10)))
291292+ const evalLabels = JSON.parse(await readFile(`${pull_number}/changed-paths.json`, 'utf-8')).labels
293294+ Object.assign(
295+ after,
00296 // Ignore `evalLabels` if it's an array.
297 // This can happen for older eval runs, before we switched to objects.
298 // The old eval labels would have been set by the eval run,
···300 // TODO: Simplify once old eval results have expired (~2025-10)
301 (Array.isArray(evalLabels) ? undefined : evalLabels),
302 {
000303 '12.approved-by: package-maintainer': Array.from(maintainers).some(m => approvals.has(m)),
00304 }
305 )
306+ }
307308+ // No need for an API request, if all labels are the same.
309+ const hasChanges = Object.keys(after).some(name => (before[name] ?? false) != after[name])
310+ if (log('Has changes', hasChanges, !hasChanges))
311+ return;
312313+ // Skipping labeling on a pull_request event, because we have no privileges.
314+ const labels = Object.entries(after).filter(([,value]) => value).map(([name]) => name)
315+ if (log('Set labels', labels, context.eventName == 'pull_request'))
316+ return;
317318+ await github.rest.issues.setLabels({
319+ ...context.repo,
320+ issue_number,
321+ labels
322+ })
323+ } catch (cause) {
324+ throw new Error(`Labeling #${item.number} failed.`, { cause })
325+ }
326+ }
327+328+ try {
329+ if (context.payload.pull_request) {
330+ await handle(context.payload.pull_request)
331+ } else {
332+ const workflowData = (await github.rest.actions.listWorkflowRuns({
333+ ...context.repo,
334+ workflow_id: 'labels.yml',
335+ event: 'schedule',
336+ status: 'success',
337+ exclude_pull_requests: true,
338+ per_page: 1
339+ })).data
340341+ // Go back as far as the last successful run of this workflow to make sure
342+ // we are not leaving anyone behind on GHA failures.
343+ // Defaults to go back 1 hour on the first run.
344+ const cutoff = new Date(workflowData.workflow_runs[0]?.created_at ?? new Date().getTime() - 1 * 60 * 60 * 1000)
345+ core.info('cutoff timestamp: ' + cutoff.toISOString())
346347+ const updatedItems = await github.paginate(
348+ github.rest.search.issuesAndPullRequests,
349+ {
350+ q: [
351+ `repo:"${process.env.GITHUB_REPOSITORY}"`,
352+ 'type:pr',
353+ 'is:open',
354+ `updated:>=${cutoff.toISOString()}`
355+ ].join(' AND '),
356+ // TODO: Remove in 2025-10, when it becomes the default.
357+ advanced_search: true
358+ }
359+ )
360+361+ // The search endpoint only allows fetching the first 1000 records, but the
362+ // pull request list endpoint does not support counting the total number
363+ // of results.
364+ // Thus, we use /search for counting and /pulls for reading the response.
365+ const { total_count: total_pulls } = (await github.rest.search.issuesAndPullRequests({
366+ q: [
367+ `repo:"${process.env.GITHUB_REPOSITORY}"`,
368+ 'type:pr',
369+ 'is:open'
370+ ].join(' AND '),
371+ sort: 'created',
372+ direction: 'asc',
373+ // TODO: Remove in 2025-10, when it becomes the default.
374+ advanced_search: true,
375+ per_page: 1
376+ })).data
377+ const { total_count: total_runs } = workflowData
378+379+ const allPulls = (await github.rest.pulls.list({
380+ ...context.repo,
381+ state: 'open',
382+ sort: 'created',
383+ direction: 'asc',
384+ per_page: 100,
385+ // We iterate through pages of 100 items across scheduled runs. With currently ~7000 open PRs and
386+ // up to 6*24=144 scheduled runs per day, we hit every PR twice each day.
387+ // We might not hit every PR on one iteration, because the pages will shift slightly when
388+ // PRs are closed or merged. We assume this to be OK on the bigger scale, because a PR which was
389+ // missed once, would have to move through the whole page to be missed again. This is very unlikely,
390+ // so it should certainly be hit on the next iteration.
391+ // TODO: Evaluate after a while, whether the above holds still true and potentially implement
392+ // an overlap between runs.
393+ page: total_runs % Math.ceil(total_pulls / 100)
394+ })).data
395+396+ // Some items might be in both search results, so filtering out duplicates as well.
397+ const items = [].concat(updatedItems, allPulls)
398+ .filter((thisItem, idx, arr) => idx == arr.findIndex(firstItem => firstItem.number == thisItem.number))
399+400+ ;(await Promise.allSettled(items.map(handle)))
401+ .filter(({ status }) => status == 'rejected')
402+ .map(({ reason }) => core.setFailed(`${reason.message}\n${reason.cause.stack}`))
403+404+ core.notice(`Processed ${stats.prs} PRs, made ${stats.requests + stats.artifacts} API requests and downloaded ${stats.artifacts} artifacts.`)
405+ }
406+ } finally {
407+ clearInterval(reservoirUpdater)
408+ }
409410 - name: Log current API rate limits
411 env:
+1-1
CONTRIBUTING.md
···313314To streamline automated updates, leverage the nixpkgs-merge-bot by simply commenting `@NixOS/nixpkgs-merge-bot merge`. The bot will verify if the following conditions are met, refusing to merge otherwise:
315316-- the PR author should be @r-ryantm;
317- the commenter that issued the command should be among the package maintainers;
318- the package should reside in `pkgs/by-name`.
319
···313314To streamline automated updates, leverage the nixpkgs-merge-bot by simply commenting `@NixOS/nixpkgs-merge-bot merge`. The bot will verify if the following conditions are met, refusing to merge otherwise:
315316+- the PR author should be @r-ryantm or a Nixpkgs committer;
317- the commenter that issued the command should be among the package maintainers;
318- the package should reside in `pkgs/by-name`.
319
+6
doc/release-notes/rl-2511.section.md
···33- `podofo` has been updated from `0.9.8` to `1.0.0`. These releases are by nature very incompatable due to major api changes. The legacy versions can be found under `podofo_0_10` and `podofo_0_9`.
34 Changelog: https://github.com/podofo/podofo/blob/1.0.0/CHANGELOG.md, API-Migration-Guide: https://github.com/podofo/podofo/blob/1.0.0/API-MIGRATION.md.
35000036## Other Notable Changes {#sec-nixpkgs-release-25.11-notable-changes}
3738<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
3940- Added `rewriteURL` attribute to the nixpkgs `config`, to allow for rewriting the URLs downloaded by `fetchurl`.
004142- New hardening flags, `strictflexarrays1` and `strictflexarrays3` were made available, corresponding to the gcc/clang options `-fstrict-flex-arrays=1` and `-fstrict-flex-arrays=3` respectively.
43
···33- `podofo` has been updated from `0.9.8` to `1.0.0`. These releases are by nature very incompatable due to major api changes. The legacy versions can be found under `podofo_0_10` and `podofo_0_9`.
34 Changelog: https://github.com/podofo/podofo/blob/1.0.0/CHANGELOG.md, API-Migration-Guide: https://github.com/podofo/podofo/blob/1.0.0/API-MIGRATION.md.
3536+- NetBox was updated to `>= 4.3.0`. Have a look at the breaking changes
37+ of the [4.3 release](https://github.com/netbox-community/netbox/releases/tag/v4.2.0),
38+ make the required changes to your database, if needed, then upgrade by setting `services.netbox.package = pkgs.netbox_4_3;` in your configuration.
39+40## Other Notable Changes {#sec-nixpkgs-release-25.11-notable-changes}
4142<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
4344- Added `rewriteURL` attribute to the nixpkgs `config`, to allow for rewriting the URLs downloaded by `fetchurl`.
45+46+- The systemd initrd will now respect `x-systemd.wants` and `x-systemd.requires` for reliably unlocking multi-disk bcachefs volumes.
4748- New hardening flags, `strictflexarrays1` and `strictflexarrays3` were made available, corresponding to the gcc/clang options `-fstrict-flex-arrays=1` and `-fstrict-flex-arrays=3` respectively.
49
+1-1
maintainers/README.md
···178for further information.
179180# nixpkgs-merge-bot
181-To streamline autoupdates, leverage the nixpkgs-merge-bot by commenting `@NixOS/nixpkgs-merge-bot merge` if the package resides in pkgs-by-name and the commenter is among the package maintainers. The bot ensures that all ofborg checks, except for darwin, are successfully completed before merging the pull request. Should the checks still be underway, the bot patiently waits for ofborg to finish before attempting the merge again.
182183# Guidelines for Committers
184
···178for further information.
179180# nixpkgs-merge-bot
181+To streamline autoupdates, leverage the nixpkgs-merge-bot by commenting `@NixOS/nixpkgs-merge-bot merge` if the package resides in pkgs-by-name, the commenter is among the package maintainers, and the pull request author is @r-ryantm or a Nixpkgs committer. The bot ensures that all ofborg checks, except for darwin, are successfully completed before merging the pull request. Should the checks still be underway, the bot patiently waits for ofborg to finish before attempting the merge again.
182183# Guidelines for Committers
184
···759 enableFeatureFreezePing = true;
760 };
76100000000000000762 lumiguide = {
763 # Verify additions by approval of an already existing member of the team.
764 members = [
···759 enableFeatureFreezePing = true;
760 };
761762+ loongarch64 = {
763+ members = [
764+ aleksana
765+ Cryolitia
766+ darkyzhou
767+ dramforever
768+ wegank
769+ ];
770+ githubTeams = [ "loongarch64" ];
771+ scope = "Maintain LoongArch64 related packages and code";
772+ shortName = "LoongArch64";
773+ enableFeatureFreezePing = true;
774+ };
775+776 lumiguide = {
777 # Verify additions by approval of an already existing member of the team.
778 members = [
···161 # Generated with `uuidgen`. Random but fixed to improve reproducibility.
162 default = "0867da16-f251-457d-a9e8-c31f9a3c220b";
163 description = ''
164- A UUID to use as a seed. You can set this to `null` to explicitly
165 randomize the partition UUIDs.
0166 '';
167 };
168
···161 # Generated with `uuidgen`. Random but fixed to improve reproducibility.
162 default = "0867da16-f251-457d-a9e8-c31f9a3c220b";
163 description = ''
164+ A UUID to use as a seed. You can set this to `random` to explicitly
165 randomize the partition UUIDs.
166+ See {manpage}`systemd-repart(8)` for more information.
167 '';
168 };
169
···566 # exact version or even running a newer version.
567 ./patches/chromium-136-nodejs-assert-minimal-version-instead-of-exact-match.patch
568 ]
569- ++ lib.optionals (chromiumVersionAtLeast "137") [
570 (fetchpatch {
571 # Partial revert of upstream clang+llvm bump revert to fix the following error when building with LLVM < 21:
572 # clang++: error: unknown argument: '-fextend-variable-liveness=none'
573 # https://chromium-review.googlesource.com/c/chromium/src/+/6514242
0574 name = "chromium-137-llvm-19.patch";
575 url = "https://chromium.googlesource.com/chromium/src/+/ddf8f8a465be2779bd826db57f1299ccd2f3aa25^!?format=TEXT";
576 includes = [ "build/config/compiler/BUILD.gn" ];
···579 hash = "sha256-wAR8E4WKMvdkW8DzdKpyNpp4dynIsYAbnJ2MqE8V2o8=";
580 })
581 ]
582- ++ lib.optionals (chromiumVersionAtLeast "137") [
583 (fetchpatch {
584 # Backport "Fix build with system libpng" that fixes a typo in core/fxcodec/png/png_decoder.cpp that causes
585 # the build to fail at the final linking step.
586 # https://pdfium-review.googlesource.com/c/pdfium/+/132130
0587 name = "pdfium-Fix-build-with-system-libpng.patch";
588 url = "https://pdfium.googlesource.com/pdfium.git/+/83f11d630aa1cb6d5ceb292364412f7b0585a201^!?format=TEXT";
589 extraPrefix = "third_party/pdfium/";
···566 # exact version or even running a newer version.
567 ./patches/chromium-136-nodejs-assert-minimal-version-instead-of-exact-match.patch
568 ]
569+ ++ lib.optionals (versionRange "137" "138") [
570 (fetchpatch {
571 # Partial revert of upstream clang+llvm bump revert to fix the following error when building with LLVM < 21:
572 # clang++: error: unknown argument: '-fextend-variable-liveness=none'
573 # https://chromium-review.googlesource.com/c/chromium/src/+/6514242
574+ # Upstream relanded this in M138+ with <https://chromium-review.googlesource.com/c/chromium/src/+/6541127>.
575 name = "chromium-137-llvm-19.patch";
576 url = "https://chromium.googlesource.com/chromium/src/+/ddf8f8a465be2779bd826db57f1299ccd2f3aa25^!?format=TEXT";
577 includes = [ "build/config/compiler/BUILD.gn" ];
···580 hash = "sha256-wAR8E4WKMvdkW8DzdKpyNpp4dynIsYAbnJ2MqE8V2o8=";
581 })
582 ]
583+ ++ lib.optionals (versionRange "137" "138") [
584 (fetchpatch {
585 # Backport "Fix build with system libpng" that fixes a typo in core/fxcodec/png/png_decoder.cpp that causes
586 # the build to fail at the final linking step.
587 # https://pdfium-review.googlesource.com/c/pdfium/+/132130
588+ # Started shipping with M138+.
589 name = "pdfium-Fix-build-with-system-libpng.patch";
590 url = "https://pdfium.googlesource.com/pdfium.git/+/83f11d630aa1cb6d5ceb292364412f7b0585a201^!?format=TEXT";
591 extraPrefix = "third_party/pdfium/";
···89stdenv.mkDerivation {
10 pname = "epson-inkjet-printer-escpr2";
11- version = "1.2.28";
1213 src = fetchurl {
14 # To find the most recent version go to
15 # https://support.epson.net/linux/Printer/LSB_distribution_pages/en/escpr2.php
16 # and retrieve the download link for source package for arm CPU for the tar.gz (the x86 link targets to rpm source files)
17- url = "https://download3.ebz.epson.net/dsc/f/03/00/16/80/15/8bd63ccd14a1966e9c3658d374686c5bb104bb04/epson-inkjet-printer-escpr2-1.2.28-1.tar.gz";
18- hash = "sha256-lv8Hgo7JzT4igY8ek7EXdyFO34l735dpMC+gWkO5rvY=";
19 };
2021 buildInputs = [ cups ];
···27 # Fixes "implicit declaration of function" errors
28 # source of patch: https://aur.archlinux.org/packages/epson-inkjet-printer-escpr2
29 (fetchurl {
30- url = "https://aur.archlinux.org/cgit/aur.git/plain/bug_x86_64.patch?h=epson-inkjet-printer-escpr2&id=575d1b959063044f233cca099caceec8e6d5c02f";
31- sha256 = "sha256-G6/3oj25FUT+xv9aJ7qP5PBZWLfy+V8MCHUYucDhtzM=";
32 })
33 ];
34
···89stdenv.mkDerivation {
10 pname = "epson-inkjet-printer-escpr2";
11+ version = "1.2.34";
1213 src = fetchurl {
14 # To find the most recent version go to
15 # https://support.epson.net/linux/Printer/LSB_distribution_pages/en/escpr2.php
16 # and retrieve the download link for source package for arm CPU for the tar.gz (the x86 link targets to rpm source files)
17+ url = "https://download3.ebz.epson.net/dsc/f/03/00/17/17/88/53f956e8d0a0dfc9cb7d0c35907183deb028a8b7/epson-inkjet-printer-escpr2-1.2.34-1.tar.gz";
18+ hash = "sha256-7EpK/EI9MHTX2z+JtMB2Urt/e893cwNX5DAGSbjDyj4=";
19 };
2021 buildInputs = [ cups ];
···27 # Fixes "implicit declaration of function" errors
28 # source of patch: https://aur.archlinux.org/packages/epson-inkjet-printer-escpr2
29 (fetchurl {
30+ url = "https://aur.archlinux.org/cgit/aur.git/plain/bug_x86_64.patch?h=epson-inkjet-printer-escpr2&id=8fbca325d6d39fa3ffe001f90a432380bdeacc2f";
31+ sha256 = "sha256-V8ejK33qyHPX4x8EOgR+XWW44KR8DQwHx2w+O71gQwo=";
32 })
33 ];
34
+43
pkgs/by-name/gi/gitea-mcp-server/package.nix
···0000000000000000000000000000000000000000000
···1+{
2+ lib,
3+ buildGoModule,
4+ fetchFromGitea,
5+}:
6+buildGoModule (finalAttrs: {
7+ pname = "gitea-mcp-server";
8+ version = "0.2.0";
9+10+ src = fetchFromGitea {
11+ domain = "gitea.com";
12+ owner = "gitea";
13+ repo = "gitea-mcp";
14+ tag = "v${finalAttrs.version}";
15+ hash = "sha256-ZUnpE25XIYzSwdEilzXnhqGR676iBQcR2yiT3jhJApc=";
16+ };
17+18+ vendorHash = "sha256-u9jIjrbDUhnaaeBET+pKQTKhaQLUeQvKOXSBfS0vMJM=";
19+20+ subPackages = [ "." ];
21+22+ doCheck = false; # no test
23+24+ postInstall = ''
25+ install -Dm644 README.md LICENSE -t $out/share/doc/gitea-mcp-server
26+ '';
27+28+ meta = {
29+ description = "Gitea Model Context Protocol (MCP) Server";
30+ longDescription = ''
31+ The Gitea MCP Server is a Model Context Protocol (MCP) server that provides
32+ seamless integration with Gitea APIs, enabling advanced automation and
33+ interaction capabilities for developers and tools.
34+35+ This server allows LLMs to interact with Gitea repositories, issues, pull
36+ requests, and other Gitea features through structured API interactions.
37+ '';
38+ homepage = "https://gitea.com/gitea/gitea-mcp";
39+ license = lib.licenses.mit;
40+ mainProgram = "gitea-mcp";
41+ maintainers = with lib.maintainers; [ connerohnesorge ];
42+ };
43+})
···1617 buildInputs = [ libX11 ];
180019 # Function are declared after they are used in the file, this is error since gcc-14.
20 # randnum.c:25:3: warning: implicit declaration of function 'srand' [-Wimplicit-function-declaration]
21 # randnum.c:33:7: warning: implicit declaration of function 'rand'; did you mean 'randnum'? [-Wimplicit-function-declaration]
22 # text.c:34:50: warning: implicit declaration of function 'strlen' [-Wimplicit-function-declaration]
23- env.NIX_CFLAGS_COMPILE = "-Wno-error=implicit-function-declaration";
0000000002425 installPhase = ''
26 runHook preInstall
···35 description = "3D vector-based clone of the atari game Missile Command";
36 mainProgram = "icbm3d";
37 license = lib.licenses.gpl2Plus;
38- platforms = lib.platforms.linux;
39 };
40})
···1617 buildInputs = [ libX11 ];
1819+ buildFlags = [ "CC=${stdenv.cc.targetPrefix}cc" ]; # fix darwin and cross-compiled builds
20+21 # Function are declared after they are used in the file, this is error since gcc-14.
22 # randnum.c:25:3: warning: implicit declaration of function 'srand' [-Wimplicit-function-declaration]
23 # randnum.c:33:7: warning: implicit declaration of function 'rand'; did you mean 'randnum'? [-Wimplicit-function-declaration]
24 # text.c:34:50: warning: implicit declaration of function 'strlen' [-Wimplicit-function-declaration]
25+ postPatch = ''
26+ substituteInPlace randnum.c --replace-fail 'stdio.h' 'stdlib.h'
27+ sed -i '1i\
28+ #include <string.h>' text.c
29+30+ # The Makefile tries to install icbm3d immediately after building it, and
31+ # ends up trying to copy it to /icbm3d. Normally this just gets an error
32+ # and moves on, but it's probably better to not try it in the first place.
33+ sed -i '/INSTALLROOT/d' makefile
34+ '';
3536 installPhase = ''
37 runHook preInstall
···46 description = "3D vector-based clone of the atari game Missile Command";
47 mainProgram = "icbm3d";
48 license = lib.licenses.gpl2Plus;
49+ platforms = lib.platforms.unix;
50 };
51})
···9596 # TODO: figure out why this is even necessary and why the missing dylib only crashes
97 # random instead of every test
98- preCheck = lib.optionalString stdenv.hostPlatform.isDarwin ''
99 mkdir -p $lib/lib
100 cp src/.libs/libgcrypt.20.dylib $lib/lib
101 '';
···9596 # TODO: figure out why this is even necessary and why the missing dylib only crashes
97 # random instead of every test
98+ preCheck = lib.optionalString (stdenv.hostPlatform.isDarwin && !stdenv.hostPlatform.isStatic) ''
99 mkdir -p $lib/lib
100 cp src/.libs/libgcrypt.20.dylib $lib/lib
101 '';
···3821 };
38223823 # Not in aliases because it wouldn't get picked up by callPackage
3824- netbox = netbox_4_2;
38253826 netcap-nodpi = callPackage ../by-name/ne/netcap/package.nix {
3827 withDpi = false;
···3821 };
38223823 # Not in aliases because it wouldn't get picked up by callPackage
3824+ netbox = netbox_4_3;
38253826 netcap-nodpi = callPackage ../by-name/ne/netcap/package.nix {
3827 withDpi = false;