···265265266266- The `cawbird` package is dropped from nixpkgs, as it got broken by the Twitter API closing down and has been abandoned upstream.
267267268268+- `hardware.nvidia` gained `datacenter` options for enabling NVIDIA Data Center drivers and configuration of NVLink/NVSwitch topologies through `nv-fabricmanager`.
269269+268270- Certificate generation via the `security.acme` now limits the concurrent number of running certificate renewals and generation jobs, to avoid spiking resource usage when processing many certificates at once. The limit defaults to *5* and can be adjusted via `maxConcurrentRenewals`. Setting it to *0* disables the limits altogether.
269271270272- New `boot.bcache.enable` (default enabled) allows completely removing `bcache` mount support.
+314-208
nixos/modules/hardware/video/nvidia.nix
···44 pkgs,
55 ...
66}: let
77+ x11Enabled = config.services.xserver.enable
88+ && (lib.elem "nvidia" config.services.xserver.videoDrivers);
79 nvidia_x11 =
88- if (lib.elem "nvidia" config.services.xserver.videoDrivers)
1010+ if x11Enabled || cfg.datacenter.enable
911 then cfg.package
1012 else null;
1113···1820 primeEnabled = syncCfg.enable || reverseSyncCfg.enable || offloadCfg.enable;
1921 busIDType = lib.types.strMatching "([[:print:]]+[\:\@][0-9]{1,3}\:[0-9]{1,2}\:[0-9])?";
2022 ibtSupport = cfg.open || (nvidia_x11.ibtSupport or false);
2323+ settingsFormat = pkgs.formats.keyValue {};
2124in {
2225 options = {
2326 hardware.nvidia = {
2727+ datacenter.enable = lib.mkEnableOption (lib.mdDoc ''
2828+ Data Center drivers for NVIDIA cards on a NVLink topology.
2929+ '');
3030+ datacenter.settings = lib.mkOption {
3131+ type = settingsFormat.type;
3232+ default = {
3333+ LOG_LEVEL=4;
3434+ LOG_FILE_NAME="/var/log/fabricmanager.log";
3535+ LOG_APPEND_TO_LOG=1;
3636+ LOG_FILE_MAX_SIZE=1024;
3737+ LOG_USE_SYSLOG=0;
3838+ DAEMONIZE=1;
3939+ BIND_INTERFACE_IP="127.0.0.1";
4040+ STARTING_TCP_PORT=16000;
4141+ FABRIC_MODE=0;
4242+ FABRIC_MODE_RESTART=0;
4343+ STATE_FILE_NAME="/var/tmp/fabricmanager.state";
4444+ FM_CMD_BIND_INTERFACE="127.0.0.1";
4545+ FM_CMD_PORT_NUMBER=6666;
4646+ FM_STAY_RESIDENT_ON_FAILURES=0;
4747+ ACCESS_LINK_FAILURE_MODE=0;
4848+ TRUNK_LINK_FAILURE_MODE=0;
4949+ NVSWITCH_FAILURE_MODE=0;
5050+ ABORT_CUDA_JOBS_ON_FM_EXIT=1;
5151+ TOPOLOGY_FILE_PATH=nvidia_x11.fabricmanager + "/share/nvidia-fabricmanager/nvidia/nvswitch";
5252+ };
5353+ defaultText = lib.literalExpression ''
5454+ {
5555+ LOG_LEVEL=4;
5656+ LOG_FILE_NAME="/var/log/fabricmanager.log";
5757+ LOG_APPEND_TO_LOG=1;
5858+ LOG_FILE_MAX_SIZE=1024;
5959+ LOG_USE_SYSLOG=0;
6060+ DAEMONIZE=1;
6161+ BIND_INTERFACE_IP="127.0.0.1";
6262+ STARTING_TCP_PORT=16000;
6363+ FABRIC_MODE=0;
6464+ FABRIC_MODE_RESTART=0;
6565+ STATE_FILE_NAME="/var/tmp/fabricmanager.state";
6666+ FM_CMD_BIND_INTERFACE="127.0.0.1";
6767+ FM_CMD_PORT_NUMBER=6666;
6868+ FM_STAY_RESIDENT_ON_FAILURES=0;
6969+ ACCESS_LINK_FAILURE_MODE=0;
7070+ TRUNK_LINK_FAILURE_MODE=0;
7171+ NVSWITCH_FAILURE_MODE=0;
7272+ ABORT_CUDA_JOBS_ON_FM_EXIT=1;
7373+ TOPOLOGY_FILE_PATH=nvidia_x11.fabricmanager + "/share/nvidia-fabricmanager/nvidia/nvswitch";
7474+ }
7575+ '';
7676+ description = lib.mdDoc ''
7777+ Additional configuration options for fabricmanager.
7878+ '';
7979+ };
8080+2481 powerManagement.enable = lib.mkEnableOption (lib.mdDoc ''
2582 experimental power management through systemd. For more information, see
2683 the NVIDIA docs, on Chapter 21. Configuring Power Management Support.
···167224 It also drastically increases the time the driver needs to clock down after load.
168225 '');
169226170170- package = lib.mkPackageOptionMD config.boot.kernelPackages.nvidiaPackages "nvidia_x11" {
171171- default = "stable";
227227+ package = lib.mkOption {
228228+ default = config.boot.kernelPackages.nvidiaPackages."${if cfg.datacenter.enable then "dc" else "stable"}";
229229+ defaultText = lib.literalExpression ''
230230+ config.boot.kernelPackages.nvidiaPackages."\$\{if cfg.datacenter.enable then "dc" else "stable"}"
231231+ '';
172232 example = lib.mdDoc "config.boot.kernelPackages.nvidiaPackages.legacy_470";
233233+ description = lib.mdDoc ''
234234+ The NVIDIA driver package to use.
235235+ '';
173236 };
174237175238 open = lib.mkEnableOption (lib.mdDoc ''
···188251 then pCfg.intelBusId
189252 else pCfg.amdgpuBusId;
190253 in
191191- lib.mkIf (nvidia_x11 != null) {
192192- assertions = [
254254+ lib.mkIf (nvidia_x11 != null) (lib.mkMerge [
255255+ # Common
256256+ ({
257257+ assertions = [
258258+ {
259259+ assertion = !(x11Enabled && cfg.datacenter.enable);
260260+ message = "You cannot configure both X11 and Data Center drivers at the same time.";
261261+ }
262262+ ];
263263+ boot = {
264264+ blacklistedKernelModules = ["nouveau" "nvidiafb"];
265265+ kernelModules = [ "nvidia-uvm" ];
266266+ };
267267+ systemd.tmpfiles.rules =
268268+ lib.optional config.virtualisation.docker.enableNvidia
269269+ "L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin";
270270+ services.udev.extraRules =
271271+ ''
272272+ # Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
273273+ KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) 255'"
274274+ KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'for i in $$(cat /proc/driver/nvidia/gpus/*/information | grep Minor | cut -d \ -f 4); do mknod -m 666 /dev/nvidia$${i} c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) $${i}; done'"
275275+ KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) 254'"
276276+ KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 0'"
277277+ KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 1'"
278278+ '';
279279+ hardware.opengl = {
280280+ extraPackages = [
281281+ nvidia_x11.out
282282+ ];
283283+ extraPackages32 = [
284284+ nvidia_x11.lib32
285285+ ];
286286+ };
287287+ environment.systemPackages = [
288288+ nvidia_x11.bin
289289+ ];
290290+ })
291291+ # X11
292292+ (lib.mkIf x11Enabled {
293293+ assertions = [
193294 {
194295 assertion = primeEnabled -> pCfg.intelBusId == "" || pCfg.amdgpuBusId == "";
195296 message = "You cannot configure both an Intel iGPU and an AMD APU. Pick the one corresponding to your processor.";
···248349 {
249350 assertion = cfg.dynamicBoost.enable -> lib.versionAtLeast nvidia_x11.version "510.39.01";
250351 message = "NVIDIA's Dynamic Boost feature only exists on versions >= 510.39.01";
251251- }
252252- ];
352352+ }];
253353254254- # If Optimus/PRIME is enabled, we:
255255- # - Specify the configured NVIDIA GPU bus ID in the Device section for the
256256- # "nvidia" driver.
257257- # - Add the AllowEmptyInitialConfiguration option to the Screen section for the
258258- # "nvidia" driver, in order to allow the X server to start without any outputs.
259259- # - Add a separate Device section for the Intel GPU, using the "modesetting"
260260- # driver and with the configured BusID.
261261- # - OR add a separate Device section for the AMD APU, using the "amdgpu"
262262- # driver and with the configures BusID.
263263- # - Reference that Device section from the ServerLayout section as an inactive
264264- # device.
265265- # - Configure the display manager to run specific `xrandr` commands which will
266266- # configure/enable displays connected to the Intel iGPU / AMD APU.
354354+ # If Optimus/PRIME is enabled, we:
355355+ # - Specify the configured NVIDIA GPU bus ID in the Device section for the
356356+ # "nvidia" driver.
357357+ # - Add the AllowEmptyInitialConfiguration option to the Screen section for the
358358+ # "nvidia" driver, in order to allow the X server to start without any outputs.
359359+ # - Add a separate Device section for the Intel GPU, using the "modesetting"
360360+ # driver and with the configured BusID.
361361+ # - OR add a separate Device section for the AMD APU, using the "amdgpu"
362362+ # driver and with the configures BusID.
363363+ # - Reference that Device section from the ServerLayout section as an inactive
364364+ # device.
365365+ # - Configure the display manager to run specific `xrandr` commands which will
366366+ # configure/enable displays connected to the Intel iGPU / AMD APU.
267367268268- # reverse sync implies offloading
269269- hardware.nvidia.prime.offload.enable = lib.mkDefault reverseSyncCfg.enable;
368368+ # reverse sync implies offloading
369369+ hardware.nvidia.prime.offload.enable = lib.mkDefault reverseSyncCfg.enable;
270370271271- services.xserver.drivers =
272272- lib.optional primeEnabled {
273273- name = igpuDriver;
274274- display = offloadCfg.enable;
275275- modules = lib.optional (igpuDriver == "amdgpu") pkgs.xorg.xf86videoamdgpu;
276276- deviceSection =
277277- ''
278278- BusID "${igpuBusId}"
279279- ''
280280- + lib.optionalString (syncCfg.enable && igpuDriver != "amdgpu") ''
281281- Option "AccelMethod" "none"
282282- '';
283283- }
284284- ++ lib.singleton {
285285- name = "nvidia";
286286- modules = [nvidia_x11.bin];
287287- display = !offloadCfg.enable;
288288- deviceSection =
289289- lib.optionalString primeEnabled
290290- ''
291291- BusID "${pCfg.nvidiaBusId}"
292292- ''
293293- + lib.optionalString pCfg.allowExternalGpu ''
294294- Option "AllowExternalGpus"
295295- '';
296296- screenSection =
297297- ''
298298- Option "RandRRotation" "on"
299299- ''
300300- + lib.optionalString syncCfg.enable ''
301301- Option "AllowEmptyInitialConfiguration"
302302- ''
303303- + lib.optionalString cfg.forceFullCompositionPipeline ''
304304- Option "metamodes" "nvidia-auto-select +0+0 {ForceFullCompositionPipeline=On}"
305305- Option "AllowIndirectGLXProtocol" "off"
306306- Option "TripleBuffer" "on"
307307- '';
308308- };
371371+ services.xserver.drivers =
372372+ lib.optional primeEnabled {
373373+ name = igpuDriver;
374374+ display = offloadCfg.enable;
375375+ modules = lib.optional (igpuDriver == "amdgpu") pkgs.xorg.xf86videoamdgpu;
376376+ deviceSection =
377377+ ''
378378+ BusID "${igpuBusId}"
379379+ ''
380380+ + lib.optionalString (syncCfg.enable && igpuDriver != "amdgpu") ''
381381+ Option "AccelMethod" "none"
382382+ '';
383383+ }
384384+ ++ lib.singleton {
385385+ name = "nvidia";
386386+ modules = [nvidia_x11.bin];
387387+ display = !offloadCfg.enable;
388388+ deviceSection =
389389+ lib.optionalString primeEnabled
390390+ ''
391391+ BusID "${pCfg.nvidiaBusId}"
392392+ ''
393393+ + lib.optionalString pCfg.allowExternalGpu ''
394394+ Option "AllowExternalGpus"
395395+ '';
396396+ screenSection =
397397+ ''
398398+ Option "RandRRotation" "on"
399399+ ''
400400+ + lib.optionalString syncCfg.enable ''
401401+ Option "AllowEmptyInitialConfiguration"
402402+ ''
403403+ + lib.optionalString cfg.forceFullCompositionPipeline ''
404404+ Option "metamodes" "nvidia-auto-select +0+0 {ForceFullCompositionPipeline=On}"
405405+ Option "AllowIndirectGLXProtocol" "off"
406406+ Option "TripleBuffer" "on"
407407+ '';
408408+ };
309409310310- services.xserver.serverLayoutSection =
311311- lib.optionalString syncCfg.enable ''
312312- Inactive "Device-${igpuDriver}[0]"
313313- ''
314314- + lib.optionalString reverseSyncCfg.enable ''
315315- Inactive "Device-nvidia[0]"
316316- ''
317317- + lib.optionalString offloadCfg.enable ''
318318- Option "AllowNVIDIAGPUScreens"
319319- '';
410410+ services.xserver.serverLayoutSection =
411411+ lib.optionalString syncCfg.enable ''
412412+ Inactive "Device-${igpuDriver}[0]"
413413+ ''
414414+ + lib.optionalString reverseSyncCfg.enable ''
415415+ Inactive "Device-nvidia[0]"
416416+ ''
417417+ + lib.optionalString offloadCfg.enable ''
418418+ Option "AllowNVIDIAGPUScreens"
419419+ '';
320420321321- services.xserver.displayManager.setupCommands = let
322322- gpuProviderName =
323323- if igpuDriver == "amdgpu"
324324- then
325325- # find the name of the provider if amdgpu
326326- "`${lib.getExe pkgs.xorg.xrandr} --listproviders | ${lib.getExe pkgs.gnugrep} -i AMD | ${lib.getExe pkgs.gnused} -n 's/^.*name://p'`"
327327- else igpuDriver;
328328- providerCmdParams =
329329- if syncCfg.enable
330330- then "\"${gpuProviderName}\" NVIDIA-0"
331331- else "NVIDIA-G0 \"${gpuProviderName}\"";
332332- in
333333- lib.optionalString (syncCfg.enable || reverseSyncCfg.enable) ''
334334- # Added by nvidia configuration module for Optimus/PRIME.
335335- ${lib.getExe pkgs.xorg.xrandr} --setprovideroutputsource ${providerCmdParams}
336336- ${lib.getExe pkgs.xorg.xrandr} --auto
337337- '';
421421+ services.xserver.displayManager.setupCommands = let
422422+ gpuProviderName =
423423+ if igpuDriver == "amdgpu"
424424+ then
425425+ # find the name of the provider if amdgpu
426426+ "`${lib.getExe pkgs.xorg.xrandr} --listproviders | ${lib.getExe pkgs.gnugrep} -i AMD | ${lib.getExe pkgs.gnused} -n 's/^.*name://p'`"
427427+ else igpuDriver;
428428+ providerCmdParams =
429429+ if syncCfg.enable
430430+ then "\"${gpuProviderName}\" NVIDIA-0"
431431+ else "NVIDIA-G0 \"${gpuProviderName}\"";
432432+ in
433433+ lib.optionalString (syncCfg.enable || reverseSyncCfg.enable) ''
434434+ # Added by nvidia configuration module for Optimus/PRIME.
435435+ ${lib.getExe pkgs.xorg.xrandr} --setprovideroutputsource ${providerCmdParams}
436436+ ${lib.getExe pkgs.xorg.xrandr} --auto
437437+ '';
338438339339- environment.etc = {
340340- "nvidia/nvidia-application-profiles-rc" = lib.mkIf nvidia_x11.useProfiles {source = "${nvidia_x11.bin}/share/nvidia/nvidia-application-profiles-rc";};
439439+ environment.etc = {
440440+ "nvidia/nvidia-application-profiles-rc" = lib.mkIf nvidia_x11.useProfiles {source = "${nvidia_x11.bin}/share/nvidia/nvidia-application-profiles-rc";};
341441342342- # 'nvidia_x11' installs it's files to /run/opengl-driver/...
343343- "egl/egl_external_platform.d".source = "/run/opengl-driver/share/egl/egl_external_platform.d/";
344344- };
442442+ # 'nvidia_x11' installs it's files to /run/opengl-driver/...
443443+ "egl/egl_external_platform.d".source = "/run/opengl-driver/share/egl/egl_external_platform.d/";
444444+ };
345445346346- hardware.opengl = {
347347- extraPackages = [
348348- nvidia_x11.out
349349- pkgs.nvidia-vaapi-driver
350350- ];
351351- extraPackages32 = [
352352- nvidia_x11.lib32
353353- pkgs.pkgsi686Linux.nvidia-vaapi-driver
354354- ];
355355- };
356356- environment.systemPackages =
357357- [nvidia_x11.bin]
358358- ++ lib.optional cfg.nvidiaSettings nvidia_x11.settings
359359- ++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced
360360- ++ lib.optional offloadCfg.enableOffloadCmd
361361- (pkgs.writeShellScriptBin "nvidia-offload" ''
362362- export __NV_PRIME_RENDER_OFFLOAD=1
363363- export __NV_PRIME_RENDER_OFFLOAD_PROVIDER=NVIDIA-G0
364364- export __GLX_VENDOR_LIBRARY_NAME=nvidia
365365- export __VK_LAYER_NV_optimus=NVIDIA_only
366366- exec "$@"
367367- '');
446446+ hardware.opengl = {
447447+ extraPackages = [
448448+ pkgs.nvidia-vaapi-driver
449449+ ];
450450+ extraPackages32 = [
451451+ pkgs.pkgsi686Linux.nvidia-vaapi-driver
452452+ ];
453453+ };
454454+ environment.systemPackages =
455455+ lib.optional cfg.nvidiaSettings nvidia_x11.settings
456456+ ++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced
457457+ ++ lib.optional offloadCfg.enableOffloadCmd
458458+ (pkgs.writeShellScriptBin "nvidia-offload" ''
459459+ export __NV_PRIME_RENDER_OFFLOAD=1
460460+ export __NV_PRIME_RENDER_OFFLOAD_PROVIDER=NVIDIA-G0
461461+ export __GLX_VENDOR_LIBRARY_NAME=nvidia
462462+ export __VK_LAYER_NV_optimus=NVIDIA_only
463463+ exec "$@"
464464+ '');
368465369369- systemd.packages = lib.optional cfg.powerManagement.enable nvidia_x11.out;
466466+ systemd.packages = lib.optional cfg.powerManagement.enable nvidia_x11.out;
370467371371- systemd.services = let
372372- nvidiaService = state: {
373373- description = "NVIDIA system ${state} actions";
374374- path = [pkgs.kbd];
375375- serviceConfig = {
376376- Type = "oneshot";
377377- ExecStart = "${nvidia_x11.out}/bin/nvidia-sleep.sh '${state}'";
468468+ systemd.services = let
469469+ nvidiaService = state: {
470470+ description = "NVIDIA system ${state} actions";
471471+ path = [pkgs.kbd];
472472+ serviceConfig = {
473473+ Type = "oneshot";
474474+ ExecStart = "${nvidia_x11.out}/bin/nvidia-sleep.sh '${state}'";
475475+ };
476476+ before = ["systemd-${state}.service"];
477477+ requiredBy = ["systemd-${state}.service"];
378478 };
379379- before = ["systemd-${state}.service"];
380380- requiredBy = ["systemd-${state}.service"];
381381- };
382382- in
383383- lib.mkMerge [
384384- (lib.mkIf cfg.powerManagement.enable {
385385- nvidia-suspend = nvidiaService "suspend";
386386- nvidia-hibernate = nvidiaService "hibernate";
387387- nvidia-resume =
388388- (nvidiaService "resume")
389389- // {
390390- before = [];
391391- after = ["systemd-suspend.service" "systemd-hibernate.service"];
392392- requiredBy = ["systemd-suspend.service" "systemd-hibernate.service"];
479479+ in
480480+ lib.mkMerge [
481481+ (lib.mkIf cfg.powerManagement.enable {
482482+ nvidia-suspend = nvidiaService "suspend";
483483+ nvidia-hibernate = nvidiaService "hibernate";
484484+ nvidia-resume =
485485+ (nvidiaService "resume")
486486+ // {
487487+ before = [];
488488+ after = ["systemd-suspend.service" "systemd-hibernate.service"];
489489+ requiredBy = ["systemd-suspend.service" "systemd-hibernate.service"];
490490+ };
491491+ })
492492+ (lib.mkIf cfg.nvidiaPersistenced {
493493+ "nvidia-persistenced" = {
494494+ description = "NVIDIA Persistence Daemon";
495495+ wantedBy = ["multi-user.target"];
496496+ serviceConfig = {
497497+ Type = "forking";
498498+ Restart = "always";
499499+ PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
500500+ ExecStart = "${lib.getExe nvidia_x11.persistenced} --verbose";
501501+ ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
502502+ };
393503 };
394394- })
395395- (lib.mkIf cfg.nvidiaPersistenced {
396396- "nvidia-persistenced" = {
397397- description = "NVIDIA Persistence Daemon";
398398- wantedBy = ["multi-user.target"];
399399- serviceConfig = {
400400- Type = "forking";
401401- Restart = "always";
402402- PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
403403- ExecStart = "${lib.getExe nvidia_x11.persistenced} --verbose";
404404- ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
504504+ })
505505+ (lib.mkIf cfg.dynamicBoost.enable {
506506+ "nvidia-powerd" = {
507507+ description = "nvidia-powerd service";
508508+ path = [
509509+ pkgs.util-linux # nvidia-powerd wants lscpu
510510+ ];
511511+ wantedBy = ["multi-user.target"];
512512+ serviceConfig = {
513513+ Type = "dbus";
514514+ BusName = "nvidia.powerd.server";
515515+ ExecStart = "${nvidia_x11.bin}/bin/nvidia-powerd";
516516+ };
405517 };
406406- };
407407- })
408408- (lib.mkIf cfg.dynamicBoost.enable {
409409- "nvidia-powerd" = {
410410- description = "nvidia-powerd service";
411411- path = [
412412- pkgs.util-linux # nvidia-powerd wants lscpu
413413- ];
414414- wantedBy = ["multi-user.target"];
415415- serviceConfig = {
416416- Type = "dbus";
417417- BusName = "nvidia.powerd.server";
418418- ExecStart = "${nvidia_x11.bin}/bin/nvidia-powerd";
419419- };
420420- };
421421- })
422422- ];
518518+ })
519519+ ];
520520+ services.acpid.enable = true;
423521424424- services.acpid.enable = true;
425425-426426- services.dbus.packages = lib.optional cfg.dynamicBoost.enable nvidia_x11.bin;
427427-428428- hardware.firmware = lib.optional cfg.open nvidia_x11.firmware;
429429-430430- systemd.tmpfiles.rules =
431431- lib.optional config.virtualisation.docker.enableNvidia
432432- "L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin"
433433- ++ lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
434434- "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
435435-436436- boot = {
437437- blacklistedKernelModules = ["nouveau" "nvidiafb"];
522522+ services.dbus.packages = lib.optional cfg.dynamicBoost.enable nvidia_x11.bin;
438523439439- extraModulePackages =
440440- if cfg.open
441441- then [nvidia_x11.open]
442442- else [nvidia_x11.bin];
524524+ hardware.firmware = lib.optional cfg.open nvidia_x11.firmware;
443525444444- # nvidia-uvm is required by CUDA applications.
445445- kernelModules =
446446- ["nvidia-uvm"]
447447- ++ lib.optionals config.services.xserver.enable ["nvidia" "nvidia_modeset" "nvidia_drm"];
526526+ systemd.tmpfiles.rules =
527527+ lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
528528+ "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
448529449449- # If requested enable modesetting via kernel parameter.
450450- kernelParams =
451451- lib.optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
452452- ++ lib.optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1"
453453- ++ lib.optional cfg.open "nvidia.NVreg_OpenRmEnableUnsupportedGpus=1"
454454- ++ lib.optional (config.boot.kernelPackages.kernel.kernelAtLeast "6.2" && !ibtSupport) "ibt=off";
530530+ boot = {
531531+ extraModulePackages =
532532+ if cfg.open
533533+ then [nvidia_x11.open]
534534+ else [nvidia_x11.bin];
535535+ # nvidia-uvm is required by CUDA applications.
536536+ kernelModules =
537537+ lib.optionals config.services.xserver.enable ["nvidia" "nvidia_modeset" "nvidia_drm"];
455538456456- # enable finegrained power management
457457- extraModprobeConfig = lib.optionalString cfg.powerManagement.finegrained ''
458458- options nvidia "NVreg_DynamicPowerManagement=0x02"
459459- '';
460460- };
539539+ # If requested enable modesetting via kernel parameter.
540540+ kernelParams =
541541+ lib.optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
542542+ ++ lib.optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1"
543543+ ++ lib.optional cfg.open "nvidia.NVreg_OpenRmEnableUnsupportedGpus=1"
544544+ ++ lib.optional (config.boot.kernelPackages.kernel.kernelAtLeast "6.2" && !ibtSupport) "ibt=off";
461545462462- services.udev.extraRules =
463463- ''
464464- # Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
465465- KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) 255'"
466466- KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'for i in $$(cat /proc/driver/nvidia/gpus/*/information | grep Minor | cut -d \ -f 4); do mknod -m 666 /dev/nvidia$${i} c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) $${i}; done'"
467467- KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) 254'"
468468- KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 0'"
469469- KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 1'"
470470- ''
471471- + lib.optionalString cfg.powerManagement.finegrained (
546546+ # enable finegrained power management
547547+ extraModprobeConfig = lib.optionalString cfg.powerManagement.finegrained ''
548548+ options nvidia "NVreg_DynamicPowerManagement=0x02"
549549+ '';
550550+ };
551551+ services.udev.extraRules =
552552+ lib.optionalString cfg.powerManagement.finegrained (
472553 lib.optionalString (lib.versionOlder config.boot.kernelPackages.kernel.version "5.5") ''
473554 # Remove NVIDIA USB xHCI Host Controller devices, if present
474555 ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c0330", ATTR{remove}="1"
···489570 ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="on"
490571 ''
491572 );
492492- };
573573+ })
574574+ # Data Center
575575+ (lib.mkIf (cfg.datacenter.enable) {
576576+ boot.extraModulePackages = [
577577+ nvidia_x11.bin
578578+ ];
579579+ systemd.services.nvidia-fabricmanager = {
580580+ enable = true;
581581+ description = "Start NVIDIA NVLink Management";
582582+ wantedBy = [ "multi-user.target" ];
583583+ unitConfig.After = [ "network-online.target" ];
584584+ unitConfig.Requires = [ "network-online.target" ];
585585+ serviceConfig = {
586586+ Type = "forking";
587587+ TimeoutStartSec = 240;
588588+ ExecStart = let
589589+ nv-fab-conf = settingsFormat.generate "fabricmanager.conf" cfg.datacenter.settings;
590590+ in
591591+ nvidia_x11.fabricmanager + "/bin/nv-fabricmanager -c " + nv-fab-conf;
592592+ LimitCORE="infinity";
593593+ };
594594+ };
595595+ environment.systemPackages =
596596+ lib.optional cfg.datacenter.enable nvidia_x11.fabricmanager;
597597+ })
598598+ ]);
493599}
···410410 nvidia_x11_legacy470 = nvidiaPackages.legacy_470;
411411 nvidia_x11_production = nvidiaPackages.production;
412412 nvidia_x11_vulkan_beta = nvidiaPackages.vulkan_beta;
413413+ nvidia_dc = nvidiaPackages.dc;
414414+ nvidia_dc_520 = nvidiaPackages.dc_520;
413415414416 # this is not a replacement for nvidia_x11*
415417 # only the opensource kernel driver exposed for hydra to build