···265266- The `cawbird` package is dropped from nixpkgs, as it got broken by the Twitter API closing down and has been abandoned upstream.
26700268- Certificate generation via the `security.acme` now limits the concurrent number of running certificate renewals and generation jobs, to avoid spiking resource usage when processing many certificates at once. The limit defaults to *5* and can be adjusted via `maxConcurrentRenewals`. Setting it to *0* disables the limits altogether.
269270- New `boot.bcache.enable` (default enabled) allows completely removing `bcache` mount support.
···265266- The `cawbird` package is dropped from nixpkgs, as it got broken by the Twitter API closing down and has been abandoned upstream.
267268+- `hardware.nvidia` gained `datacenter` options for enabling NVIDIA Data Center drivers and configuration of NVLink/NVSwitch topologies through `nv-fabricmanager`.
269+270- Certificate generation via the `security.acme` now limits the concurrent number of running certificate renewals and generation jobs, to avoid spiking resource usage when processing many certificates at once. The limit defaults to *5* and can be adjusted via `maxConcurrentRenewals`. Setting it to *0* disables the limits altogether.
271272- New `boot.bcache.enable` (default enabled) allows completely removing `bcache` mount support.
+314-208
nixos/modules/hardware/video/nvidia.nix
···4 pkgs,
5 ...
6}: let
007 nvidia_x11 =
8- if (lib.elem "nvidia" config.services.xserver.videoDrivers)
9 then cfg.package
10 else null;
11···18 primeEnabled = syncCfg.enable || reverseSyncCfg.enable || offloadCfg.enable;
19 busIDType = lib.types.strMatching "([[:print:]]+[\:\@][0-9]{1,3}\:[0-9]{1,2}\:[0-9])?";
20 ibtSupport = cfg.open || (nvidia_x11.ibtSupport or false);
021in {
22 options = {
23 hardware.nvidia = {
00000000000000000000000000000000000000000000000000000024 powerManagement.enable = lib.mkEnableOption (lib.mdDoc ''
25 experimental power management through systemd. For more information, see
26 the NVIDIA docs, on Chapter 21. Configuring Power Management Support.
···167 It also drastically increases the time the driver needs to clock down after load.
168 '');
169170- package = lib.mkPackageOptionMD config.boot.kernelPackages.nvidiaPackages "nvidia_x11" {
171- default = "stable";
000172 example = lib.mdDoc "config.boot.kernelPackages.nvidiaPackages.legacy_470";
000173 };
174175 open = lib.mkEnableOption (lib.mdDoc ''
···188 then pCfg.intelBusId
189 else pCfg.amdgpuBusId;
190 in
191- lib.mkIf (nvidia_x11 != null) {
192- assertions = [
00000000000000000000000000000000000000193 {
194 assertion = primeEnabled -> pCfg.intelBusId == "" || pCfg.amdgpuBusId == "";
195 message = "You cannot configure both an Intel iGPU and an AMD APU. Pick the one corresponding to your processor.";
···248 {
249 assertion = cfg.dynamicBoost.enable -> lib.versionAtLeast nvidia_x11.version "510.39.01";
250 message = "NVIDIA's Dynamic Boost feature only exists on versions >= 510.39.01";
251- }
252- ];
253254- # If Optimus/PRIME is enabled, we:
255- # - Specify the configured NVIDIA GPU bus ID in the Device section for the
256- # "nvidia" driver.
257- # - Add the AllowEmptyInitialConfiguration option to the Screen section for the
258- # "nvidia" driver, in order to allow the X server to start without any outputs.
259- # - Add a separate Device section for the Intel GPU, using the "modesetting"
260- # driver and with the configured BusID.
261- # - OR add a separate Device section for the AMD APU, using the "amdgpu"
262- # driver and with the configures BusID.
263- # - Reference that Device section from the ServerLayout section as an inactive
264- # device.
265- # - Configure the display manager to run specific `xrandr` commands which will
266- # configure/enable displays connected to the Intel iGPU / AMD APU.
267268- # reverse sync implies offloading
269- hardware.nvidia.prime.offload.enable = lib.mkDefault reverseSyncCfg.enable;
270271- services.xserver.drivers =
272- lib.optional primeEnabled {
273- name = igpuDriver;
274- display = offloadCfg.enable;
275- modules = lib.optional (igpuDriver == "amdgpu") pkgs.xorg.xf86videoamdgpu;
276- deviceSection =
277- ''
278- BusID "${igpuBusId}"
279- ''
280- + lib.optionalString (syncCfg.enable && igpuDriver != "amdgpu") ''
281- Option "AccelMethod" "none"
282- '';
283- }
284- ++ lib.singleton {
285- name = "nvidia";
286- modules = [nvidia_x11.bin];
287- display = !offloadCfg.enable;
288- deviceSection =
289- lib.optionalString primeEnabled
290- ''
291- BusID "${pCfg.nvidiaBusId}"
292- ''
293- + lib.optionalString pCfg.allowExternalGpu ''
294- Option "AllowExternalGpus"
295- '';
296- screenSection =
297- ''
298- Option "RandRRotation" "on"
299- ''
300- + lib.optionalString syncCfg.enable ''
301- Option "AllowEmptyInitialConfiguration"
302- ''
303- + lib.optionalString cfg.forceFullCompositionPipeline ''
304- Option "metamodes" "nvidia-auto-select +0+0 {ForceFullCompositionPipeline=On}"
305- Option "AllowIndirectGLXProtocol" "off"
306- Option "TripleBuffer" "on"
307- '';
308- };
309310- services.xserver.serverLayoutSection =
311- lib.optionalString syncCfg.enable ''
312- Inactive "Device-${igpuDriver}[0]"
313- ''
314- + lib.optionalString reverseSyncCfg.enable ''
315- Inactive "Device-nvidia[0]"
316- ''
317- + lib.optionalString offloadCfg.enable ''
318- Option "AllowNVIDIAGPUScreens"
319- '';
320321- services.xserver.displayManager.setupCommands = let
322- gpuProviderName =
323- if igpuDriver == "amdgpu"
324- then
325- # find the name of the provider if amdgpu
326- "`${lib.getExe pkgs.xorg.xrandr} --listproviders | ${lib.getExe pkgs.gnugrep} -i AMD | ${lib.getExe pkgs.gnused} -n 's/^.*name://p'`"
327- else igpuDriver;
328- providerCmdParams =
329- if syncCfg.enable
330- then "\"${gpuProviderName}\" NVIDIA-0"
331- else "NVIDIA-G0 \"${gpuProviderName}\"";
332- in
333- lib.optionalString (syncCfg.enable || reverseSyncCfg.enable) ''
334- # Added by nvidia configuration module for Optimus/PRIME.
335- ${lib.getExe pkgs.xorg.xrandr} --setprovideroutputsource ${providerCmdParams}
336- ${lib.getExe pkgs.xorg.xrandr} --auto
337- '';
338339- environment.etc = {
340- "nvidia/nvidia-application-profiles-rc" = lib.mkIf nvidia_x11.useProfiles {source = "${nvidia_x11.bin}/share/nvidia/nvidia-application-profiles-rc";};
341342- # 'nvidia_x11' installs it's files to /run/opengl-driver/...
343- "egl/egl_external_platform.d".source = "/run/opengl-driver/share/egl/egl_external_platform.d/";
344- };
345346- hardware.opengl = {
347- extraPackages = [
348- nvidia_x11.out
349- pkgs.nvidia-vaapi-driver
350- ];
351- extraPackages32 = [
352- nvidia_x11.lib32
353- pkgs.pkgsi686Linux.nvidia-vaapi-driver
354- ];
355- };
356- environment.systemPackages =
357- [nvidia_x11.bin]
358- ++ lib.optional cfg.nvidiaSettings nvidia_x11.settings
359- ++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced
360- ++ lib.optional offloadCfg.enableOffloadCmd
361- (pkgs.writeShellScriptBin "nvidia-offload" ''
362- export __NV_PRIME_RENDER_OFFLOAD=1
363- export __NV_PRIME_RENDER_OFFLOAD_PROVIDER=NVIDIA-G0
364- export __GLX_VENDOR_LIBRARY_NAME=nvidia
365- export __VK_LAYER_NV_optimus=NVIDIA_only
366- exec "$@"
367- '');
368369- systemd.packages = lib.optional cfg.powerManagement.enable nvidia_x11.out;
370371- systemd.services = let
372- nvidiaService = state: {
373- description = "NVIDIA system ${state} actions";
374- path = [pkgs.kbd];
375- serviceConfig = {
376- Type = "oneshot";
377- ExecStart = "${nvidia_x11.out}/bin/nvidia-sleep.sh '${state}'";
000378 };
379- before = ["systemd-${state}.service"];
380- requiredBy = ["systemd-${state}.service"];
381- };
382- in
383- lib.mkMerge [
384- (lib.mkIf cfg.powerManagement.enable {
385- nvidia-suspend = nvidiaService "suspend";
386- nvidia-hibernate = nvidiaService "hibernate";
387- nvidia-resume =
388- (nvidiaService "resume")
389- // {
390- before = [];
391- after = ["systemd-suspend.service" "systemd-hibernate.service"];
392- requiredBy = ["systemd-suspend.service" "systemd-hibernate.service"];
0000000000393 };
394- })
395- (lib.mkIf cfg.nvidiaPersistenced {
396- "nvidia-persistenced" = {
397- description = "NVIDIA Persistence Daemon";
398- wantedBy = ["multi-user.target"];
399- serviceConfig = {
400- Type = "forking";
401- Restart = "always";
402- PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
403- ExecStart = "${lib.getExe nvidia_x11.persistenced} --verbose";
404- ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
00405 };
406- };
407- })
408- (lib.mkIf cfg.dynamicBoost.enable {
409- "nvidia-powerd" = {
410- description = "nvidia-powerd service";
411- path = [
412- pkgs.util-linux # nvidia-powerd wants lscpu
413- ];
414- wantedBy = ["multi-user.target"];
415- serviceConfig = {
416- Type = "dbus";
417- BusName = "nvidia.powerd.server";
418- ExecStart = "${nvidia_x11.bin}/bin/nvidia-powerd";
419- };
420- };
421- })
422- ];
423424- services.acpid.enable = true;
425-426- services.dbus.packages = lib.optional cfg.dynamicBoost.enable nvidia_x11.bin;
427-428- hardware.firmware = lib.optional cfg.open nvidia_x11.firmware;
429-430- systemd.tmpfiles.rules =
431- lib.optional config.virtualisation.docker.enableNvidia
432- "L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin"
433- ++ lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
434- "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
435-436- boot = {
437- blacklistedKernelModules = ["nouveau" "nvidiafb"];
438439- extraModulePackages =
440- if cfg.open
441- then [nvidia_x11.open]
442- else [nvidia_x11.bin];
443444- # nvidia-uvm is required by CUDA applications.
445- kernelModules =
446- ["nvidia-uvm"]
447- ++ lib.optionals config.services.xserver.enable ["nvidia" "nvidia_modeset" "nvidia_drm"];
448449- # If requested enable modesetting via kernel parameter.
450- kernelParams =
451- lib.optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
452- ++ lib.optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1"
453- ++ lib.optional cfg.open "nvidia.NVreg_OpenRmEnableUnsupportedGpus=1"
454- ++ lib.optional (config.boot.kernelPackages.kernel.kernelAtLeast "6.2" && !ibtSupport) "ibt=off";
00455456- # enable finegrained power management
457- extraModprobeConfig = lib.optionalString cfg.powerManagement.finegrained ''
458- options nvidia "NVreg_DynamicPowerManagement=0x02"
459- '';
460- };
0461462- services.udev.extraRules =
463- ''
464- # Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
465- KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) 255'"
466- KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'for i in $$(cat /proc/driver/nvidia/gpus/*/information | grep Minor | cut -d \ -f 4); do mknod -m 666 /dev/nvidia$${i} c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) $${i}; done'"
467- KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) 254'"
468- KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 0'"
469- KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 1'"
470- ''
471- + lib.optionalString cfg.powerManagement.finegrained (
472 lib.optionalString (lib.versionOlder config.boot.kernelPackages.kernel.version "5.5") ''
473 # Remove NVIDIA USB xHCI Host Controller devices, if present
474 ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c0330", ATTR{remove}="1"
···489 ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="on"
490 ''
491 );
492- };
0000000000000000000000000493}
···4 pkgs,
5 ...
6}: let
7+ x11Enabled = config.services.xserver.enable
8+ && (lib.elem "nvidia" config.services.xserver.videoDrivers);
9 nvidia_x11 =
10+ if x11Enabled || cfg.datacenter.enable
11 then cfg.package
12 else null;
13···20 primeEnabled = syncCfg.enable || reverseSyncCfg.enable || offloadCfg.enable;
21 busIDType = lib.types.strMatching "([[:print:]]+[\:\@][0-9]{1,3}\:[0-9]{1,2}\:[0-9])?";
22 ibtSupport = cfg.open || (nvidia_x11.ibtSupport or false);
23+ settingsFormat = pkgs.formats.keyValue {};
24in {
25 options = {
26 hardware.nvidia = {
27+ datacenter.enable = lib.mkEnableOption (lib.mdDoc ''
28+ Data Center drivers for NVIDIA cards on a NVLink topology.
29+ '');
30+ datacenter.settings = lib.mkOption {
31+ type = settingsFormat.type;
32+ default = {
33+ LOG_LEVEL=4;
34+ LOG_FILE_NAME="/var/log/fabricmanager.log";
35+ LOG_APPEND_TO_LOG=1;
36+ LOG_FILE_MAX_SIZE=1024;
37+ LOG_USE_SYSLOG=0;
38+ DAEMONIZE=1;
39+ BIND_INTERFACE_IP="127.0.0.1";
40+ STARTING_TCP_PORT=16000;
41+ FABRIC_MODE=0;
42+ FABRIC_MODE_RESTART=0;
43+ STATE_FILE_NAME="/var/tmp/fabricmanager.state";
44+ FM_CMD_BIND_INTERFACE="127.0.0.1";
45+ FM_CMD_PORT_NUMBER=6666;
46+ FM_STAY_RESIDENT_ON_FAILURES=0;
47+ ACCESS_LINK_FAILURE_MODE=0;
48+ TRUNK_LINK_FAILURE_MODE=0;
49+ NVSWITCH_FAILURE_MODE=0;
50+ ABORT_CUDA_JOBS_ON_FM_EXIT=1;
51+ TOPOLOGY_FILE_PATH=nvidia_x11.fabricmanager + "/share/nvidia-fabricmanager/nvidia/nvswitch";
52+ };
53+ defaultText = lib.literalExpression ''
54+ {
55+ LOG_LEVEL=4;
56+ LOG_FILE_NAME="/var/log/fabricmanager.log";
57+ LOG_APPEND_TO_LOG=1;
58+ LOG_FILE_MAX_SIZE=1024;
59+ LOG_USE_SYSLOG=0;
60+ DAEMONIZE=1;
61+ BIND_INTERFACE_IP="127.0.0.1";
62+ STARTING_TCP_PORT=16000;
63+ FABRIC_MODE=0;
64+ FABRIC_MODE_RESTART=0;
65+ STATE_FILE_NAME="/var/tmp/fabricmanager.state";
66+ FM_CMD_BIND_INTERFACE="127.0.0.1";
67+ FM_CMD_PORT_NUMBER=6666;
68+ FM_STAY_RESIDENT_ON_FAILURES=0;
69+ ACCESS_LINK_FAILURE_MODE=0;
70+ TRUNK_LINK_FAILURE_MODE=0;
71+ NVSWITCH_FAILURE_MODE=0;
72+ ABORT_CUDA_JOBS_ON_FM_EXIT=1;
73+ TOPOLOGY_FILE_PATH=nvidia_x11.fabricmanager + "/share/nvidia-fabricmanager/nvidia/nvswitch";
74+ }
75+ '';
76+ description = lib.mdDoc ''
77+ Additional configuration options for fabricmanager.
78+ '';
79+ };
80+81 powerManagement.enable = lib.mkEnableOption (lib.mdDoc ''
82 experimental power management through systemd. For more information, see
83 the NVIDIA docs, on Chapter 21. Configuring Power Management Support.
···224 It also drastically increases the time the driver needs to clock down after load.
225 '');
226227+ package = lib.mkOption {
228+ default = config.boot.kernelPackages.nvidiaPackages."${if cfg.datacenter.enable then "dc" else "stable"}";
229+ defaultText = lib.literalExpression ''
230+ config.boot.kernelPackages.nvidiaPackages."\$\{if cfg.datacenter.enable then "dc" else "stable"}"
231+ '';
232 example = lib.mdDoc "config.boot.kernelPackages.nvidiaPackages.legacy_470";
233+ description = lib.mdDoc ''
234+ The NVIDIA driver package to use.
235+ '';
236 };
237238 open = lib.mkEnableOption (lib.mdDoc ''
···251 then pCfg.intelBusId
252 else pCfg.amdgpuBusId;
253 in
254+ lib.mkIf (nvidia_x11 != null) (lib.mkMerge [
255+ # Common
256+ ({
257+ assertions = [
258+ {
259+ assertion = !(x11Enabled && cfg.datacenter.enable);
260+ message = "You cannot configure both X11 and Data Center drivers at the same time.";
261+ }
262+ ];
263+ boot = {
264+ blacklistedKernelModules = ["nouveau" "nvidiafb"];
265+ kernelModules = [ "nvidia-uvm" ];
266+ };
267+ systemd.tmpfiles.rules =
268+ lib.optional config.virtualisation.docker.enableNvidia
269+ "L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin";
270+ services.udev.extraRules =
271+ ''
272+ # Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
273+ KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) 255'"
274+ KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'for i in $$(cat /proc/driver/nvidia/gpus/*/information | grep Minor | cut -d \ -f 4); do mknod -m 666 /dev/nvidia$${i} c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) $${i}; done'"
275+ KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c $$(grep nvidia-frontend /proc/devices | cut -d \ -f 1) 254'"
276+ KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 0'"
277+ KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \ -f 1) 1'"
278+ '';
279+ hardware.opengl = {
280+ extraPackages = [
281+ nvidia_x11.out
282+ ];
283+ extraPackages32 = [
284+ nvidia_x11.lib32
285+ ];
286+ };
287+ environment.systemPackages = [
288+ nvidia_x11.bin
289+ ];
290+ })
291+ # X11
292+ (lib.mkIf x11Enabled {
293+ assertions = [
294 {
295 assertion = primeEnabled -> pCfg.intelBusId == "" || pCfg.amdgpuBusId == "";
296 message = "You cannot configure both an Intel iGPU and an AMD APU. Pick the one corresponding to your processor.";
···349 {
350 assertion = cfg.dynamicBoost.enable -> lib.versionAtLeast nvidia_x11.version "510.39.01";
351 message = "NVIDIA's Dynamic Boost feature only exists on versions >= 510.39.01";
352+ }];
0353354+ # If Optimus/PRIME is enabled, we:
355+ # - Specify the configured NVIDIA GPU bus ID in the Device section for the
356+ # "nvidia" driver.
357+ # - Add the AllowEmptyInitialConfiguration option to the Screen section for the
358+ # "nvidia" driver, in order to allow the X server to start without any outputs.
359+ # - Add a separate Device section for the Intel GPU, using the "modesetting"
360+ # driver and with the configured BusID.
361+ # - OR add a separate Device section for the AMD APU, using the "amdgpu"
362+ # driver and with the configures BusID.
363+ # - Reference that Device section from the ServerLayout section as an inactive
364+ # device.
365+ # - Configure the display manager to run specific `xrandr` commands which will
366+ # configure/enable displays connected to the Intel iGPU / AMD APU.
367368+ # reverse sync implies offloading
369+ hardware.nvidia.prime.offload.enable = lib.mkDefault reverseSyncCfg.enable;
370371+ services.xserver.drivers =
372+ lib.optional primeEnabled {
373+ name = igpuDriver;
374+ display = offloadCfg.enable;
375+ modules = lib.optional (igpuDriver == "amdgpu") pkgs.xorg.xf86videoamdgpu;
376+ deviceSection =
377+ ''
378+ BusID "${igpuBusId}"
379+ ''
380+ + lib.optionalString (syncCfg.enable && igpuDriver != "amdgpu") ''
381+ Option "AccelMethod" "none"
382+ '';
383+ }
384+ ++ lib.singleton {
385+ name = "nvidia";
386+ modules = [nvidia_x11.bin];
387+ display = !offloadCfg.enable;
388+ deviceSection =
389+ lib.optionalString primeEnabled
390+ ''
391+ BusID "${pCfg.nvidiaBusId}"
392+ ''
393+ + lib.optionalString pCfg.allowExternalGpu ''
394+ Option "AllowExternalGpus"
395+ '';
396+ screenSection =
397+ ''
398+ Option "RandRRotation" "on"
399+ ''
400+ + lib.optionalString syncCfg.enable ''
401+ Option "AllowEmptyInitialConfiguration"
402+ ''
403+ + lib.optionalString cfg.forceFullCompositionPipeline ''
404+ Option "metamodes" "nvidia-auto-select +0+0 {ForceFullCompositionPipeline=On}"
405+ Option "AllowIndirectGLXProtocol" "off"
406+ Option "TripleBuffer" "on"
407+ '';
408+ };
409410+ services.xserver.serverLayoutSection =
411+ lib.optionalString syncCfg.enable ''
412+ Inactive "Device-${igpuDriver}[0]"
413+ ''
414+ + lib.optionalString reverseSyncCfg.enable ''
415+ Inactive "Device-nvidia[0]"
416+ ''
417+ + lib.optionalString offloadCfg.enable ''
418+ Option "AllowNVIDIAGPUScreens"
419+ '';
420421+ services.xserver.displayManager.setupCommands = let
422+ gpuProviderName =
423+ if igpuDriver == "amdgpu"
424+ then
425+ # find the name of the provider if amdgpu
426+ "`${lib.getExe pkgs.xorg.xrandr} --listproviders | ${lib.getExe pkgs.gnugrep} -i AMD | ${lib.getExe pkgs.gnused} -n 's/^.*name://p'`"
427+ else igpuDriver;
428+ providerCmdParams =
429+ if syncCfg.enable
430+ then "\"${gpuProviderName}\" NVIDIA-0"
431+ else "NVIDIA-G0 \"${gpuProviderName}\"";
432+ in
433+ lib.optionalString (syncCfg.enable || reverseSyncCfg.enable) ''
434+ # Added by nvidia configuration module for Optimus/PRIME.
435+ ${lib.getExe pkgs.xorg.xrandr} --setprovideroutputsource ${providerCmdParams}
436+ ${lib.getExe pkgs.xorg.xrandr} --auto
437+ '';
438439+ environment.etc = {
440+ "nvidia/nvidia-application-profiles-rc" = lib.mkIf nvidia_x11.useProfiles {source = "${nvidia_x11.bin}/share/nvidia/nvidia-application-profiles-rc";};
441442+ # 'nvidia_x11' installs it's files to /run/opengl-driver/...
443+ "egl/egl_external_platform.d".source = "/run/opengl-driver/share/egl/egl_external_platform.d/";
444+ };
445446+ hardware.opengl = {
447+ extraPackages = [
448+ pkgs.nvidia-vaapi-driver
449+ ];
450+ extraPackages32 = [
451+ pkgs.pkgsi686Linux.nvidia-vaapi-driver
452+ ];
453+ };
454+ environment.systemPackages =
455+ lib.optional cfg.nvidiaSettings nvidia_x11.settings
456+ ++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced
457+ ++ lib.optional offloadCfg.enableOffloadCmd
458+ (pkgs.writeShellScriptBin "nvidia-offload" ''
459+ export __NV_PRIME_RENDER_OFFLOAD=1
460+ export __NV_PRIME_RENDER_OFFLOAD_PROVIDER=NVIDIA-G0
461+ export __GLX_VENDOR_LIBRARY_NAME=nvidia
462+ export __VK_LAYER_NV_optimus=NVIDIA_only
463+ exec "$@"
464+ '');
000465466+ systemd.packages = lib.optional cfg.powerManagement.enable nvidia_x11.out;
467468+ systemd.services = let
469+ nvidiaService = state: {
470+ description = "NVIDIA system ${state} actions";
471+ path = [pkgs.kbd];
472+ serviceConfig = {
473+ Type = "oneshot";
474+ ExecStart = "${nvidia_x11.out}/bin/nvidia-sleep.sh '${state}'";
475+ };
476+ before = ["systemd-${state}.service"];
477+ requiredBy = ["systemd-${state}.service"];
478 };
479+ in
480+ lib.mkMerge [
481+ (lib.mkIf cfg.powerManagement.enable {
482+ nvidia-suspend = nvidiaService "suspend";
483+ nvidia-hibernate = nvidiaService "hibernate";
484+ nvidia-resume =
485+ (nvidiaService "resume")
486+ // {
487+ before = [];
488+ after = ["systemd-suspend.service" "systemd-hibernate.service"];
489+ requiredBy = ["systemd-suspend.service" "systemd-hibernate.service"];
490+ };
491+ })
492+ (lib.mkIf cfg.nvidiaPersistenced {
493+ "nvidia-persistenced" = {
494+ description = "NVIDIA Persistence Daemon";
495+ wantedBy = ["multi-user.target"];
496+ serviceConfig = {
497+ Type = "forking";
498+ Restart = "always";
499+ PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
500+ ExecStart = "${lib.getExe nvidia_x11.persistenced} --verbose";
501+ ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
502+ };
503 };
504+ })
505+ (lib.mkIf cfg.dynamicBoost.enable {
506+ "nvidia-powerd" = {
507+ description = "nvidia-powerd service";
508+ path = [
509+ pkgs.util-linux # nvidia-powerd wants lscpu
510+ ];
511+ wantedBy = ["multi-user.target"];
512+ serviceConfig = {
513+ Type = "dbus";
514+ BusName = "nvidia.powerd.server";
515+ ExecStart = "${nvidia_x11.bin}/bin/nvidia-powerd";
516+ };
517 };
518+ })
519+ ];
520+ services.acpid.enable = true;
00000000000000521522+ services.dbus.packages = lib.optional cfg.dynamicBoost.enable nvidia_x11.bin;
0000000000000523524+ hardware.firmware = lib.optional cfg.open nvidia_x11.firmware;
000525526+ systemd.tmpfiles.rules =
527+ lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
528+ "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
0529530+ boot = {
531+ extraModulePackages =
532+ if cfg.open
533+ then [nvidia_x11.open]
534+ else [nvidia_x11.bin];
535+ # nvidia-uvm is required by CUDA applications.
536+ kernelModules =
537+ lib.optionals config.services.xserver.enable ["nvidia" "nvidia_modeset" "nvidia_drm"];
538539+ # If requested enable modesetting via kernel parameter.
540+ kernelParams =
541+ lib.optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
542+ ++ lib.optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1"
543+ ++ lib.optional cfg.open "nvidia.NVreg_OpenRmEnableUnsupportedGpus=1"
544+ ++ lib.optional (config.boot.kernelPackages.kernel.kernelAtLeast "6.2" && !ibtSupport) "ibt=off";
545546+ # enable finegrained power management
547+ extraModprobeConfig = lib.optionalString cfg.powerManagement.finegrained ''
548+ options nvidia "NVreg_DynamicPowerManagement=0x02"
549+ '';
550+ };
551+ services.udev.extraRules =
552+ lib.optionalString cfg.powerManagement.finegrained (
000553 lib.optionalString (lib.versionOlder config.boot.kernelPackages.kernel.version "5.5") ''
554 # Remove NVIDIA USB xHCI Host Controller devices, if present
555 ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c0330", ATTR{remove}="1"
···570 ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="on"
571 ''
572 );
573+ })
574+ # Data Center
575+ (lib.mkIf (cfg.datacenter.enable) {
576+ boot.extraModulePackages = [
577+ nvidia_x11.bin
578+ ];
579+ systemd.services.nvidia-fabricmanager = {
580+ enable = true;
581+ description = "Start NVIDIA NVLink Management";
582+ wantedBy = [ "multi-user.target" ];
583+ unitConfig.After = [ "network-online.target" ];
584+ unitConfig.Requires = [ "network-online.target" ];
585+ serviceConfig = {
586+ Type = "forking";
587+ TimeoutStartSec = 240;
588+ ExecStart = let
589+ nv-fab-conf = settingsFormat.generate "fabricmanager.conf" cfg.datacenter.settings;
590+ in
591+ nvidia_x11.fabricmanager + "/bin/nv-fabricmanager -c " + nv-fab-conf;
592+ LimitCORE="infinity";
593+ };
594+ };
595+ environment.systemPackages =
596+ lib.optional cfg.datacenter.enable nvidia_x11.fabricmanager;
597+ })
598+ ]);
599}
···410 nvidia_x11_legacy470 = nvidiaPackages.legacy_470;
411 nvidia_x11_production = nvidiaPackages.production;
412 nvidia_x11_vulkan_beta = nvidiaPackages.vulkan_beta;
00413414 # this is not a replacement for nvidia_x11*
415 # only the opensource kernel driver exposed for hydra to build
···410 nvidia_x11_legacy470 = nvidiaPackages.legacy_470;
411 nvidia_x11_production = nvidiaPackages.production;
412 nvidia_x11_vulkan_beta = nvidiaPackages.vulkan_beta;
413+ nvidia_dc = nvidiaPackages.dc;
414+ nvidia_dc_520 = nvidiaPackages.dc_520;
415416 # this is not a replacement for nvidia_x11*
417 # only the opensource kernel driver exposed for hydra to build