···1717 NIXPKGS_CI_APP_PRIVATE_KEY:
1818 required: true
1919 workflow_dispatch:
2020- inputs:
2121- updatedWithin:
2222- description: 'Updated within [hours]'
2323- type: number
2424- required: false
2525- default: 0 # everything since last run
26202721concurrency:
2822 # This explicitly avoids using `run_id` for the concurrency key to make sure that only
2929- # *one* non-PR run can run at a time.
2323+ # *one* scheduled run can run at a time.
3024 group: labels-${{ github.workflow }}-${{ github.event_name }}-${{ github.event.pull_request.number }}
3131- # PR- and manually-triggered runs will be cancelled, but scheduled runs will be queued.
2525+ # PR-triggered runs will be cancelled, but scheduled runs will be queued.
3226 cancel-in-progress: ${{ github.event_name != 'schedule' }}
33273428# This is used as fallback without app only.
···69637064 - name: Labels from API data and Eval results
7165 uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
7272- env:
7373- UPDATED_WITHIN: ${{ inputs.updatedWithin }}
7466 with:
7567 github-token: ${{ steps.app-token.outputs.token || github.token }}
7668 script: |
···10193 github.hook.wrap('request', async (request, options) => {
10294 // Requests to the /rate_limit endpoint do not count against the rate limit.
10395 if (options.url == '/rate_limit') return request(options)
9696+ // Search requests are in a different resource group, which allows 30 requests / minute.
9797+ // We do less than a handful each run, so not implementing throttling for now.
9898+ if (options.url.startsWith('/search/')) return request(options)
10499 stats.requests++
105100 if (['POST', 'PUT', 'PATCH', 'DELETE'].includes(options.method))
106101 return writeLimits.schedule(request.bind(null, options))
···126121 await updateReservoir()
127122 // Update remaining requests every minute to account for other jobs running in parallel.
128123 const reservoirUpdater = setInterval(updateReservoir, 60 * 1000)
129129- process.on('uncaughtException', () => clearInterval(reservoirUpdater))
130124131131- if (process.env.UPDATED_WITHIN && !/^\d+$/.test(process.env.UPDATED_WITHIN))
132132- throw new Error('Please enter "updated within" as integer in hours.')
125125+ async function handle(item) {
126126+ try {
127127+ const log = (k,v,skip) => {
128128+ core.info(`#${item.number} - ${k}: ${v}` + (skip ? ' (skipped)' : ''))
129129+ return skip
130130+ }
133131134134- const cutoff = new Date(await (async () => {
135135- // Always run for Pull Request triggers, no cutoff since there will be a single
136136- // response only anyway. 0 is the Unix epoch, so always smaller.
137137- if (context.payload.pull_request?.number) return 0
138138-139139- // Manually triggered via UI when updatedWithin is set. Will fallthrough to the last
140140- // option if the updatedWithin parameter is set to 0, which is the default.
141141- const updatedWithin = Number.parseInt(process.env.UPDATED_WITHIN, 10)
142142- if (updatedWithin) return new Date().getTime() - updatedWithin * 60 * 60 * 1000
143143-144144- // Normally a scheduled run, but could be workflow_dispatch, see above. Go back as far
145145- // as the last successful run of this workflow to make sure we are not leaving anyone
146146- // behind on GHA failures.
147147- // Defaults to go back 1 hour on the first run.
148148- return (await github.rest.actions.listWorkflowRuns({
149149- ...context.repo,
150150- workflow_id: 'labels.yml',
151151- event: 'schedule',
152152- status: 'success',
153153- exclude_pull_requests: true
154154- })).data.workflow_runs[0]?.created_at ?? new Date().getTime() - 1 * 60 * 60 * 1000
155155- })())
156156- core.info('cutoff timestamp: ' + cutoff.toISOString())
157157-158158- // To simplify this action's logic we fetch the pull_request data again below, even if
159159- // we are already in a pull_request event's context and would have the data readily
160160- // available. We do this by filtering the list of pull requests with head and base
161161- // branch - there can only be a single open Pull Request for any such combination.
162162- const prEventCondition = !context.payload.pull_request ? undefined : {
163163- // "label" is in the format of `user:branch` or `org:branch`
164164- head: context.payload.pull_request.head.label,
165165- base: context.payload.pull_request.base.ref
166166- }
132132+ log('Last updated at', item.updated_at)
133133+ stats.prs++
134134+ log('URL', item.html_url)
167135168168- const prs = await github.paginate(
169169- github.rest.pulls.list,
170170- {
171171- ...context.repo,
172172- state: 'open',
173173- sort: 'updated',
174174- direction: 'desc',
175175- ...prEventCondition
176176- },
177177- (response, done) => response.data.map(async (pull_request) => {
178178- try {
179179- const log = (k,v,skip) => {
180180- core.info(`PR #${pull_request.number} - ${k}: ${v}` + (skip ? ' (skipped)' : ''))
181181- return skip
182182- }
136136+ const pull_number = item.number
137137+ const issue_number = item.number
183138184184- if (log('Last updated at', pull_request.updated_at, new Date(pull_request.updated_at) < cutoff))
185185- return done()
186186- stats.prs++
187187- log('URL', pull_request.html_url)
139139+ // This API request is important for the merge-conflict label, because it triggers the
140140+ // creation of a new test merge commit. This is needed to actually determine the state of a PR.
141141+ const pull_request = (await github.rest.pulls.get({
142142+ ...context.repo,
143143+ pull_number
144144+ })).data
188145189189- const run_id = (await github.rest.actions.listWorkflowRuns({
146146+ const run_id = (await github.rest.actions.listWorkflowRuns({
147147+ ...context.repo,
148148+ workflow_id: 'pr.yml',
149149+ event: 'pull_request_target',
150150+ // In pull_request contexts the workflow is still running.
151151+ status: context.payload.pull_request ? undefined : 'success',
152152+ exclude_pull_requests: true,
153153+ head_sha: pull_request.head.sha
154154+ })).data.workflow_runs[0]?.id ??
155155+ // TODO: Remove this after 2025-09-17, at which point all eval.yml artifacts will have expired.
156156+ (await github.rest.actions.listWorkflowRuns({
190157 ...context.repo,
191191- workflow_id: 'pr.yml',
158158+ // In older PRs, we need eval.yml instead of pr.yml.
159159+ workflow_id: 'eval.yml',
192160 event: 'pull_request_target',
193193- // For PR events, the workflow run is still in progress with this job itself.
194194- status: prEventCondition ? 'in_progress' : 'success',
161161+ status: 'success',
195162 exclude_pull_requests: true,
196163 head_sha: pull_request.head.sha
197197- })).data.workflow_runs[0]?.id ??
198198- // TODO: Remove this after 2025-09-17, at which point all eval.yml artifacts will have expired.
199199- (await github.rest.actions.listWorkflowRuns({
200200- ...context.repo,
201201- // In older PRs, we need eval.yml instead of pr.yml.
202202- workflow_id: 'eval.yml',
203203- event: 'pull_request_target',
204204- status: 'success',
205205- exclude_pull_requests: true,
206206- head_sha: pull_request.head.sha
207207- })).data.workflow_runs[0]?.id
164164+ })).data.workflow_runs[0]?.id
208165209209- // Newer PRs might not have run Eval to completion, yet. We can skip them, because this
210210- // job will be run as part of that Eval run anyway.
211211- if (log('Last eval run', run_id ?? '<pending>', !run_id))
212212- return;
166166+ // Newer PRs might not have run Eval to completion, yet.
167167+ // Older PRs might not have an eval.yml workflow, yet.
168168+ // In either case we continue without fetching an artifact on a best-effort basis.
169169+ log('Last eval run', run_id ?? '<n/a>')
213170214214- const artifact = (await github.rest.actions.listWorkflowRunArtifacts({
215215- ...context.repo,
216216- run_id,
217217- name: 'comparison'
218218- })).data.artifacts[0]
171171+ const artifact = run_id && (await github.rest.actions.listWorkflowRunArtifacts({
172172+ ...context.repo,
173173+ run_id,
174174+ name: 'comparison'
175175+ })).data.artifacts[0]
219176220220- // Instead of checking the boolean artifact.expired, we will give us a minute to
221221- // actually download the artifact in the next step and avoid that race condition.
222222- // Older PRs, where the workflow run was already eval.yml, but the artifact was not
223223- // called "comparison", yet, will be skipped as well.
224224- const expired = new Date(artifact?.expires_at ?? 0) < new Date(new Date().getTime() + 60 * 1000)
225225- if (log('Artifact expires at', artifact?.expires_at ?? '<not found>', expired))
226226- return;
177177+ // Instead of checking the boolean artifact.expired, we will give us a minute to
178178+ // actually download the artifact in the next step and avoid that race condition.
179179+ // Older PRs, where the workflow run was already eval.yml, but the artifact was not
180180+ // called "comparison", yet, will skip the download.
181181+ const expired = !artifact || new Date(artifact?.expires_at ?? 0) < new Date(new Date().getTime() + 60 * 1000)
182182+ log('Artifact expires at', artifact?.expires_at ?? '<n/a>')
183183+ if (!expired) {
227184 stats.artifacts++
228185229186 await artifactClient.downloadArtifact(artifact.id, {
···232189 repositoryOwner: context.repo.owner,
233190 token: core.getInput('github-token')
234191 },
235235- path: path.resolve(pull_request.number.toString()),
192192+ path: path.resolve(pull_number.toString()),
236193 expectedHash: artifact.digest
237194 })
195195+ }
238196239239- // Create a map (Label -> Boolean) of all currently set labels.
240240- // Each label is set to True and can be disabled later.
241241- const before = Object.fromEntries(
242242- (await github.paginate(github.rest.issues.listLabelsOnIssue, {
243243- ...context.repo,
244244- issue_number: pull_request.number
245245- }))
246246- .map(({ name }) => [name, true])
247247- )
197197+ // Create a map (Label -> Boolean) of all currently set labels.
198198+ // Each label is set to True and can be disabled later.
199199+ const before = Object.fromEntries(
200200+ (await github.paginate(github.rest.issues.listLabelsOnIssue, {
201201+ ...context.repo,
202202+ issue_number
203203+ }))
204204+ .map(({ name }) => [name, true])
205205+ )
206206+207207+ const approvals = new Set(
208208+ (await github.paginate(github.rest.pulls.listReviews, {
209209+ ...context.repo,
210210+ pull_number
211211+ }))
212212+ .filter(review => review.state == 'APPROVED')
213213+ .map(review => review.user?.id)
214214+ )
248215249249- const approvals = new Set(
250250- (await github.paginate(github.rest.pulls.listReviews, {
216216+ const latest_event_at = new Date(
217217+ (await github.paginate(
218218+ github.rest.issues.listEventsForTimeline,
219219+ {
251220 ...context.repo,
252252- pull_number: pull_request.number
253253- }))
254254- .filter(review => review.state == 'APPROVED')
255255- .map(review => review.user?.id)
256256- )
221221+ issue_number,
222222+ per_page: 100
223223+ }
224224+ ))
225225+ .filter(({ event }) => [
226226+ // These events are hand-picked from:
227227+ // https://docs.github.com/en/rest/using-the-rest-api/issue-event-types?apiVersion=2022-11-28
228228+ // Each of those causes a PR/issue to *not* be considered as stale anymore.
229229+ // Most of these use created_at.
230230+ 'assigned',
231231+ 'commented', // uses updated_at, because that could be > created_at
232232+ 'committed', // uses committer.date
233233+ 'head_ref_force_pushed',
234234+ 'milestoned',
235235+ 'pinned',
236236+ 'ready_for_review',
237237+ 'renamed',
238238+ 'reopened',
239239+ 'review_dismissed',
240240+ 'review_requested',
241241+ 'reviewed', // uses submitted_at
242242+ 'unlocked',
243243+ 'unmarked_as_duplicate',
244244+ ].includes(event))
245245+ .map(({ created_at, updated_at, committer, submitted_at }) => new Date(updated_at ?? created_at ?? submitted_at ?? committer.date))
246246+ // Reverse sort by date value. The default sort() sorts by string representation, which is bad for dates.
247247+ .sort((a,b) => b-a)
248248+ .at(0) ?? item.created_at
249249+ )
257250251251+ const stale_at = new Date(new Date().setDate(new Date().getDate() - 180))
252252+253253+ // After creation of a Pull Request, `merge_commit_sha` will be null initially:
254254+ // The very first merge commit will only be calculated after a little while.
255255+ // To avoid labeling the PR as conflicted before that, we wait a few minutes.
256256+ // This is intentionally less than the time that Eval takes, so that the label job
257257+ // running after Eval can indeed label the PR as conflicted if that is the case.
258258+ const merge_commit_sha_valid = new Date() - new Date(pull_request.created_at) > 3 * 60 * 1000
259259+260260+ // Manage most of the labels, without eval results
261261+ const after = Object.assign(
262262+ {},
263263+ before,
264264+ {
265265+ // We intentionally don't use the mergeable or mergeable_state attributes.
266266+ // Those have an intermediate state while the test merge commit is created.
267267+ // This doesn't work well for us, because we might have just triggered another
268268+ // test merge commit creation by request the pull request via API at the start
269269+ // of this function.
270270+ // The attribute merge_commit_sha keeps the old value of null or the hash *until*
271271+ // the new test merge commit has either successfully been created or failed so.
272272+ // This essentially means we are updating the merge conflict label in two steps:
273273+ // On the first pass of the day, we just fetch the pull request, which triggers
274274+ // the creation. At this stage, the label is likely not updated, yet.
275275+ // The second pass will then read the result from the first pass and set the label.
276276+ '2.status: merge conflict': merge_commit_sha_valid && !pull_request.merge_commit_sha,
277277+ '2.status: stale': !before['1.severity: security'] && latest_event_at < stale_at,
278278+ '12.approvals: 1': approvals.size == 1,
279279+ '12.approvals: 2': approvals.size == 2,
280280+ '12.approvals: 3+': approvals.size >= 3,
281281+ '12.first-time contribution':
282282+ [ 'NONE', 'FIRST_TIMER', 'FIRST_TIME_CONTRIBUTOR' ].includes(pull_request.author_association),
283283+ }
284284+ )
285285+286286+ // Manage labels based on eval results
287287+ if (!expired) {
258288 const maintainers = new Set(Object.keys(
259259- JSON.parse(await readFile(`${pull_request.number}/maintainers.json`, 'utf-8'))
289289+ JSON.parse(await readFile(`${pull_number}/maintainers.json`, 'utf-8'))
260290 ).map(m => Number.parseInt(m, 10)))
261291262262- const evalLabels = JSON.parse(await readFile(`${pull_request.number}/changed-paths.json`, 'utf-8')).labels
292292+ const evalLabels = JSON.parse(await readFile(`${pull_number}/changed-paths.json`, 'utf-8')).labels
263293264264- // Manage the labels
265265- const after = Object.assign(
266266- {},
267267- before,
294294+ Object.assign(
295295+ after,
268296 // Ignore `evalLabels` if it's an array.
269297 // This can happen for older eval runs, before we switched to objects.
270298 // The old eval labels would have been set by the eval run,
···272300 // TODO: Simplify once old eval results have expired (~2025-10)
273301 (Array.isArray(evalLabels) ? undefined : evalLabels),
274302 {
275275- '12.approvals: 1': approvals.size == 1,
276276- '12.approvals: 2': approvals.size == 2,
277277- '12.approvals: 3+': approvals.size >= 3,
278303 '12.approved-by: package-maintainer': Array.from(maintainers).some(m => approvals.has(m)),
279279- '12.first-time contribution':
280280- [ 'NONE', 'FIRST_TIMER', 'FIRST_TIME_CONTRIBUTOR' ].includes(pull_request.author_association),
281304 }
282305 )
306306+ }
283307284284- // No need for an API request, if all labels are the same.
285285- const hasChanges = Object.keys(after).some(name => (before[name] ?? false) != after[name])
286286- if (log('Has changes', hasChanges, !hasChanges))
287287- return;
308308+ // No need for an API request, if all labels are the same.
309309+ const hasChanges = Object.keys(after).some(name => (before[name] ?? false) != after[name])
310310+ if (log('Has changes', hasChanges, !hasChanges))
311311+ return;
288312289289- // Skipping labeling on a pull_request event, because we have no privileges.
290290- const labels = Object.entries(after).filter(([,value]) => value).map(([name]) => name)
291291- if (log('Set labels', labels, context.eventName == 'pull_request'))
292292- return;
313313+ // Skipping labeling on a pull_request event, because we have no privileges.
314314+ const labels = Object.entries(after).filter(([,value]) => value).map(([name]) => name)
315315+ if (log('Set labels', labels, context.eventName == 'pull_request'))
316316+ return;
293317294294- await github.rest.issues.setLabels({
295295- ...context.repo,
296296- issue_number: pull_request.number,
297297- labels
298298- })
299299- } catch (cause) {
300300- throw new Error(`Labeling PR #${pull_request.number} failed.`, { cause })
301301- }
302302- })
303303- );
318318+ await github.rest.issues.setLabels({
319319+ ...context.repo,
320320+ issue_number,
321321+ labels
322322+ })
323323+ } catch (cause) {
324324+ throw new Error(`Labeling #${item.number} failed.`, { cause })
325325+ }
326326+ }
327327+328328+ try {
329329+ if (context.payload.pull_request) {
330330+ await handle(context.payload.pull_request)
331331+ } else {
332332+ const workflowData = (await github.rest.actions.listWorkflowRuns({
333333+ ...context.repo,
334334+ workflow_id: 'labels.yml',
335335+ event: 'schedule',
336336+ status: 'success',
337337+ exclude_pull_requests: true,
338338+ per_page: 1
339339+ })).data
304340305305- (await Promise.allSettled(prs.flat()))
306306- .filter(({ status }) => status == 'rejected')
307307- .map(({ reason }) => core.setFailed(`${reason.message}\n${reason.cause.stack}`))
341341+ // Go back as far as the last successful run of this workflow to make sure
342342+ // we are not leaving anyone behind on GHA failures.
343343+ // Defaults to go back 1 hour on the first run.
344344+ const cutoff = new Date(workflowData.workflow_runs[0]?.created_at ?? new Date().getTime() - 1 * 60 * 60 * 1000)
345345+ core.info('cutoff timestamp: ' + cutoff.toISOString())
308346309309- core.notice(`Processed ${stats.prs} PRs, made ${stats.requests + stats.artifacts} API requests and downloaded ${stats.artifacts} artifacts.`)
310310- clearInterval(reservoirUpdater)
347347+ const updatedItems = await github.paginate(
348348+ github.rest.search.issuesAndPullRequests,
349349+ {
350350+ q: [
351351+ `repo:"${process.env.GITHUB_REPOSITORY}"`,
352352+ 'type:pr',
353353+ 'is:open',
354354+ `updated:>=${cutoff.toISOString()}`
355355+ ].join(' AND '),
356356+ // TODO: Remove in 2025-10, when it becomes the default.
357357+ advanced_search: true
358358+ }
359359+ )
360360+361361+ // The search endpoint only allows fetching the first 1000 records, but the
362362+ // pull request list endpoint does not support counting the total number
363363+ // of results.
364364+ // Thus, we use /search for counting and /pulls for reading the response.
365365+ const { total_count: total_pulls } = (await github.rest.search.issuesAndPullRequests({
366366+ q: [
367367+ `repo:"${process.env.GITHUB_REPOSITORY}"`,
368368+ 'type:pr',
369369+ 'is:open'
370370+ ].join(' AND '),
371371+ sort: 'created',
372372+ direction: 'asc',
373373+ // TODO: Remove in 2025-10, when it becomes the default.
374374+ advanced_search: true,
375375+ per_page: 1
376376+ })).data
377377+ const { total_count: total_runs } = workflowData
378378+379379+ const allPulls = (await github.rest.pulls.list({
380380+ ...context.repo,
381381+ state: 'open',
382382+ sort: 'created',
383383+ direction: 'asc',
384384+ per_page: 100,
385385+ // We iterate through pages of 100 items across scheduled runs. With currently ~7000 open PRs and
386386+ // up to 6*24=144 scheduled runs per day, we hit every PR twice each day.
387387+ // We might not hit every PR on one iteration, because the pages will shift slightly when
388388+ // PRs are closed or merged. We assume this to be OK on the bigger scale, because a PR which was
389389+ // missed once, would have to move through the whole page to be missed again. This is very unlikely,
390390+ // so it should certainly be hit on the next iteration.
391391+ // TODO: Evaluate after a while, whether the above holds still true and potentially implement
392392+ // an overlap between runs.
393393+ page: total_runs % Math.ceil(total_pulls / 100)
394394+ })).data
395395+396396+ // Some items might be in both search results, so filtering out duplicates as well.
397397+ const items = [].concat(updatedItems, allPulls)
398398+ .filter((thisItem, idx, arr) => idx == arr.findIndex(firstItem => firstItem.number == thisItem.number))
399399+400400+ ;(await Promise.allSettled(items.map(handle)))
401401+ .filter(({ status }) => status == 'rejected')
402402+ .map(({ reason }) => core.setFailed(`${reason.message}\n${reason.cause.stack}`))
403403+404404+ core.notice(`Processed ${stats.prs} PRs, made ${stats.requests + stats.artifacts} API requests and downloaded ${stats.artifacts} artifacts.`)
405405+ }
406406+ } finally {
407407+ clearInterval(reservoirUpdater)
408408+ }
311409312410 - name: Log current API rate limits
313411 env:
+1-1
CONTRIBUTING.md
···313313314314To streamline automated updates, leverage the nixpkgs-merge-bot by simply commenting `@NixOS/nixpkgs-merge-bot merge`. The bot will verify if the following conditions are met, refusing to merge otherwise:
315315316316-- the PR author should be @r-ryantm;
316316+- the PR author should be @r-ryantm or a Nixpkgs committer;
317317- the commenter that issued the command should be among the package maintainers;
318318- the package should reside in `pkgs/by-name`.
319319
+6
doc/release-notes/rl-2511.section.md
···3333- `podofo` has been updated from `0.9.8` to `1.0.0`. These releases are by nature very incompatable due to major api changes. The legacy versions can be found under `podofo_0_10` and `podofo_0_9`.
3434 Changelog: https://github.com/podofo/podofo/blob/1.0.0/CHANGELOG.md, API-Migration-Guide: https://github.com/podofo/podofo/blob/1.0.0/API-MIGRATION.md.
35353636+- NetBox was updated to `>= 4.3.0`. Have a look at the breaking changes
3737+ of the [4.3 release](https://github.com/netbox-community/netbox/releases/tag/v4.2.0),
3838+ make the required changes to your database, if needed, then upgrade by setting `services.netbox.package = pkgs.netbox_4_3;` in your configuration.
3939+3640## Other Notable Changes {#sec-nixpkgs-release-25.11-notable-changes}
37413842<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
39434044- Added `rewriteURL` attribute to the nixpkgs `config`, to allow for rewriting the URLs downloaded by `fetchurl`.
4545+4646+- The systemd initrd will now respect `x-systemd.wants` and `x-systemd.requires` for reliably unlocking multi-disk bcachefs volumes.
41474248- New hardening flags, `strictflexarrays1` and `strictflexarrays3` were made available, corresponding to the gcc/clang options `-fstrict-flex-arrays=1` and `-fstrict-flex-arrays=3` respectively.
4349
+1-1
maintainers/README.md
···178178for further information.
179179180180# nixpkgs-merge-bot
181181-To streamline autoupdates, leverage the nixpkgs-merge-bot by commenting `@NixOS/nixpkgs-merge-bot merge` if the package resides in pkgs-by-name and the commenter is among the package maintainers. The bot ensures that all ofborg checks, except for darwin, are successfully completed before merging the pull request. Should the checks still be underway, the bot patiently waits for ofborg to finish before attempting the merge again.
181181+To streamline autoupdates, leverage the nixpkgs-merge-bot by commenting `@NixOS/nixpkgs-merge-bot merge` if the package resides in pkgs-by-name, the commenter is among the package maintainers, and the pull request author is @r-ryantm or a Nixpkgs committer. The bot ensures that all ofborg checks, except for darwin, are successfully completed before merging the pull request. Should the checks still be underway, the bot patiently waits for ofborg to finish before attempting the merge again.
182182183183# Guidelines for Committers
184184
···161161 # Generated with `uuidgen`. Random but fixed to improve reproducibility.
162162 default = "0867da16-f251-457d-a9e8-c31f9a3c220b";
163163 description = ''
164164- A UUID to use as a seed. You can set this to `null` to explicitly
164164+ A UUID to use as a seed. You can set this to `random` to explicitly
165165 randomize the partition UUIDs.
166166+ See {manpage}`systemd-repart(8)` for more information.
166167 '';
167168 };
168169
···16161717 buildInputs = [ libX11 ];
18181919+ buildFlags = [ "CC=${stdenv.cc.targetPrefix}cc" ]; # fix darwin and cross-compiled builds
2020+1921 # Function are declared after they are used in the file, this is error since gcc-14.
2022 # randnum.c:25:3: warning: implicit declaration of function 'srand' [-Wimplicit-function-declaration]
2123 # randnum.c:33:7: warning: implicit declaration of function 'rand'; did you mean 'randnum'? [-Wimplicit-function-declaration]
2224 # text.c:34:50: warning: implicit declaration of function 'strlen' [-Wimplicit-function-declaration]
2323- env.NIX_CFLAGS_COMPILE = "-Wno-error=implicit-function-declaration";
2525+ postPatch = ''
2626+ substituteInPlace randnum.c --replace-fail 'stdio.h' 'stdlib.h'
2727+ sed -i '1i\
2828+ #include <string.h>' text.c
2929+3030+ # The Makefile tries to install icbm3d immediately after building it, and
3131+ # ends up trying to copy it to /icbm3d. Normally this just gets an error
3232+ # and moves on, but it's probably better to not try it in the first place.
3333+ sed -i '/INSTALLROOT/d' makefile
3434+ '';
24352536 installPhase = ''
2637 runHook preInstall
···3546 description = "3D vector-based clone of the atari game Missile Command";
3647 mainProgram = "icbm3d";
3748 license = lib.licenses.gpl2Plus;
3838- platforms = lib.platforms.linux;
4949+ platforms = lib.platforms.unix;
3950 };
4051})
···95959696 # TODO: figure out why this is even necessary and why the missing dylib only crashes
9797 # random instead of every test
9898- preCheck = lib.optionalString stdenv.hostPlatform.isDarwin ''
9898+ preCheck = lib.optionalString (stdenv.hostPlatform.isDarwin && !stdenv.hostPlatform.isStatic) ''
9999 mkdir -p $lib/lib
100100 cp src/.libs/libgcrypt.20.dylib $lib/lib
101101 '';
···38213821 };
3822382238233823 # Not in aliases because it wouldn't get picked up by callPackage
38243824- netbox = netbox_4_2;
38243824+ netbox = netbox_4_3;
3825382538263826 netcap-nodpi = callPackage ../by-name/ne/netcap/package.nix {
38273827 withDpi = false;