lol

Merge pull request #301188 from SomeoneSerge/cudaPackages/rename-cudatoolkit

cudaPackages.cudatoolkit: replace with symlinkJoin

authored by

Someone and committed by
GitHub
446fee38 91ff44d5

+107 -11
+6
nixos/doc/manual/release-notes/rl-2405.section.md
··· 315 315 316 316 - The `cudaPackages` package scope has been updated to `cudaPackages_12`. 317 317 318 + - The deprecated `cudaPackages.cudatoolkit` has been replaced with a 319 + symlink-based wrapper for the splayed redistributable CUDA packages. The 320 + wrapper only includes tools and libraries necessary to build common packages 321 + like e.g. tensorflow. The original runfile-based `cudatoolkit` is still 322 + available as `cudatoolkit-legacy-runfile`. 323 + 318 324 - The `halloy` package was updated past 2024.5 which introduced a breaking change by switching the config format from YAML to TOML. See https://github.com/squidowl/halloy/releases/tag/2024.5 for details. 319 325 320 326 - Ada packages (libraries and tools) have been moved into the `gnatPackages` scope. `gnatPackages` uses the default GNAT compiler, `gnat12Packages` and `gnat13Packages` use the respective matching compiler version.
+2 -2
pkgs/applications/science/math/caffe/default.nix
··· 22 22 }: 23 23 24 24 let 25 - inherit (cudaPackages) cudatoolkit nccl; 25 + inherit (cudaPackages) backendStdenv cudatoolkit nccl; 26 26 # The default for cudatoolkit 10.1 is CUDNN 8.0.5, the last version to support CUDA 10.1. 27 27 # However, this caffe does not build with CUDNN 8.x, so we use CUDNN 7.6.5 instead. 28 28 # Earlier versions of cudatoolkit use pre-8.x CUDNN, so we use the default. ··· 59 59 "-DBLAS=open" 60 60 ] ++ (if cudaSupport then [ 61 61 "-DCUDA_ARCH_NAME=All" 62 - "-DCUDA_HOST_COMPILER=${cudatoolkit.cc}/bin/cc" 62 + "-DCUDA_HOST_COMPILER=${backendStdenv.cc}/bin/cc" 63 63 ] else [ "-DCPU_ONLY=ON" ]) 64 64 ++ ["-DUSE_NCCL=${toggle ncclSupport}"] 65 65 ++ ["-DUSE_LEVELDB=${toggle leveldbSupport}"]
+2 -2
pkgs/applications/science/math/mxnet/default.nix
··· 5 5 }: 6 6 7 7 let 8 - inherit (cudaPackages) cudatoolkit cudaFlags cudnn; 8 + inherit (cudaPackages) backendStdenv cudatoolkit cudaFlags cudnn; 9 9 in 10 10 11 11 assert cudnnSupport -> cudaSupport; ··· 49 49 ++ (if cudaSupport then [ 50 50 "-DUSE_OLDCMAKECUDA=ON" # see https://github.com/apache/incubator-mxnet/issues/10743 51 51 "-DCUDA_ARCH_NAME=All" 52 - "-DCUDA_HOST_COMPILER=${cudatoolkit.cc}/bin/cc" 52 + "-DCUDA_HOST_COMPILER=${backendStdenv.cc}/bin/cc" 53 53 "-DMXNET_CUDA_ARCH=${builtins.concatStringsSep ";" cudaFlags.realArches}" 54 54 ] else [ "-DUSE_CUDA=OFF" ]) 55 55 ++ lib.optional (!cudnnSupport) "-DUSE_CUDNN=OFF";
+1 -1
pkgs/development/cuda-modules/cudatoolkit/default.nix
··· 411 411 }; 412 412 413 413 meta = with lib; { 414 - description = "A compiler for NVIDIA GPUs, math libraries, and tools"; 414 + description = "The deprecated runfile-based CUDAToolkit installation (a compiler for NVIDIA GPUs, math libraries, and tools)"; 415 415 homepage = "https://developer.nvidia.com/cuda-toolkit"; 416 416 platforms = [ "x86_64-linux" ]; 417 417 license = licenses.nvidiaCuda;
+86
pkgs/development/cuda-modules/cudatoolkit/redist-wrapper.nix
··· 1 + { 2 + lib, 3 + symlinkJoin, 4 + backendStdenv, 5 + cudaOlder, 6 + cudatoolkit-legacy-runfile, 7 + cudaVersion, 8 + cuda_cccl ? null, 9 + cuda_cudart ? null, 10 + cuda_cuobjdump ? null, 11 + cuda_cupti ? null, 12 + cuda_cuxxfilt ? null, 13 + cuda_gdb ? null, 14 + cuda_nvcc ? null, 15 + cuda_nvdisasm ? null, 16 + cuda_nvml_dev ? null, 17 + cuda_nvprune ? null, 18 + cuda_nvrtc ? null, 19 + cuda_nvtx ? null, 20 + cuda_profiler_api ? null, 21 + cuda_sanitizer_api ? null, 22 + libcublas ? null, 23 + libcufft ? null, 24 + libcurand ? null, 25 + libcusolver ? null, 26 + libcusparse ? null, 27 + libnpp ? null, 28 + }: 29 + 30 + let 31 + getAllOutputs = p: [ 32 + (lib.getBin p) 33 + (lib.getLib p) 34 + (lib.getDev p) 35 + ]; 36 + hostPackages = [ 37 + cuda_cuobjdump 38 + cuda_gdb 39 + cuda_nvcc 40 + cuda_nvdisasm 41 + cuda_nvprune 42 + ]; 43 + targetPackages = [ 44 + cuda_cccl 45 + cuda_cudart 46 + cuda_cupti 47 + cuda_cuxxfilt 48 + cuda_nvml_dev 49 + cuda_nvrtc 50 + cuda_nvtx 51 + cuda_profiler_api 52 + cuda_sanitizer_api 53 + libcublas 54 + libcufft 55 + libcurand 56 + libcusolver 57 + libcusparse 58 + libnpp 59 + ]; 60 + 61 + # This assumes we put `cudatoolkit` in `buildInputs` instead of `nativeBuildInputs`: 62 + allPackages = (map (p: p.__spliced.buildHost or p) hostPackages) ++ targetPackages; 63 + in 64 + 65 + if cudaOlder "11.4" then 66 + cudatoolkit-legacy-runfile 67 + else 68 + symlinkJoin rec { 69 + name = "cuda-merged-${cudaVersion}"; 70 + version = cudaVersion; 71 + 72 + paths = builtins.concatMap getAllOutputs allPackages; 73 + 74 + passthru = { 75 + cc = lib.warn "cudaPackages.cudatoolkit is deprecated, refer to the manual and use splayed packages instead" backendStdenv.cc; 76 + lib = symlinkJoin { 77 + inherit name; 78 + paths = map (p: lib.getLib p) allPackages; 79 + }; 80 + }; 81 + 82 + meta = with lib; { 83 + description = "A wrapper substituting the deprecated runfile-based CUDA installation"; 84 + license = licenses.nvidiaCuda; 85 + }; 86 + }
+1 -1
pkgs/development/libraries/lightgbm/default.nix
··· 77 77 ''; 78 78 79 79 cmakeFlags = lib.optionals doCheck [ "-DBUILD_CPP_TEST=ON" ] 80 - ++ lib.optionals cudaSupport [ "-DUSE_CUDA=1" "-DCMAKE_CXX_COMPILER=${cudaPackages.cudatoolkit.cc}/bin/cc" ] 80 + ++ lib.optionals cudaSupport [ "-DUSE_CUDA=1" "-DCMAKE_CXX_COMPILER=${cudaPackages.backendStdenv.cc}/bin/cc" ] 81 81 ++ lib.optionals openclSupport [ "-DUSE_GPU=ON" ] 82 82 ++ lib.optionals mpiSupport [ "-DUSE_MPI=ON" ] 83 83 ++ lib.optionals hdfsSupport [
+2 -2
pkgs/development/libraries/opencv/3.x.nix
··· 39 39 assert enablePython -> pythonPackages != null; 40 40 41 41 let 42 - inherit (cudaPackages) cudatoolkit; 42 + inherit (cudaPackages) backendStdenv cudatoolkit; 43 43 inherit (cudaPackages.cudaFlags) cudaCapabilities; 44 44 45 45 version = "3.4.18"; ··· 241 241 (opencvFlag "TBB" enableTbb) 242 242 ] ++ lib.optionals enableCuda [ 243 243 "-DCUDA_FAST_MATH=ON" 244 - "-DCUDA_HOST_COMPILER=${cudatoolkit.cc}/bin/cc" 244 + "-DCUDA_HOST_COMPILER=${backendStdenv.cc}/bin/cc" 245 245 "-DCUDA_NVCC_FLAGS=--expt-relaxed-constexpr" 246 246 "-DCUDA_ARCH_BIN=${lib.concatStringsSep ";" cudaCapabilities}" 247 247 "-DCUDA_ARCH_PTX=${lib.last cudaCapabilities}"
+2 -2
pkgs/development/libraries/xgboost/default.nix
··· 75 75 "-DUSE_CUDA=ON" 76 76 # Their CMakeLists.txt does not respect CUDA_HOST_COMPILER, instead using the CXX compiler. 77 77 # https://github.com/dmlc/xgboost/blob/ccf43d4ba0a94e2f0a3cc5a526197539ae46f410/CMakeLists.txt#L145 78 - "-DCMAKE_C_COMPILER=${cudaPackages.cudatoolkit.cc}/bin/gcc" 79 - "-DCMAKE_CXX_COMPILER=${cudaPackages.cudatoolkit.cc}/bin/g++" 78 + "-DCMAKE_C_COMPILER=${cudaPackages.backendStdenv.cc}/bin/gcc" 79 + "-DCMAKE_CXX_COMPILER=${cudaPackages.backendStdenv.cc}/bin/g++" 80 80 ] ++ lib.optionals 81 81 (cudaSupport 82 82 && lib.versionAtLeast cudaPackages.cudatoolkit.version "11.4.0")
+5 -1
pkgs/top-level/cuda-packages.nix
··· 69 69 backendStdenv = final.callPackage ../development/cuda-modules/backend-stdenv.nix { }; 70 70 71 71 # Loose packages 72 - cudatoolkit = final.callPackage ../development/cuda-modules/cudatoolkit { }; 72 + 73 + # TODO: Move to aliases.nix once all Nixpkgs has migrated to the splayed CUDA packages 74 + cudatoolkit = final.callPackage ../development/cuda-modules/cudatoolkit/redist-wrapper.nix { }; 75 + cudatoolkit-legacy-runfile = final.callPackage ../development/cuda-modules/cudatoolkit { }; 76 + 73 77 saxpy = final.callPackage ../development/cuda-modules/saxpy { }; 74 78 nccl = final.callPackage ../development/cuda-modules/nccl { }; 75 79 nccl-tests = final.callPackage ../development/cuda-modules/nccl-tests { };