nixpkgs mirror (for testing)
github.com/NixOS/nixpkgs
nix
1{
2 _cuda,
3 backendStdenv,
4 buildRedist,
5 setupCudaHook,
6 cudaAtLeast,
7 cudaOlder,
8 cuda_cccl,
9 lib,
10 libnvvm,
11 makeBinaryWrapper,
12}:
13buildRedist (finalAttrs: {
14 redistName = "cuda";
15 pname = "cuda_nvcc";
16
17 # NOTE: We restrict cuda_nvcc to a single output to avoid breaking consumers which expect NVCC to be within a single
18 # directory structure. This happens partly because NVCC is also home to NVVM.
19 outputs = [
20 "out"
21 ];
22
23 # The nvcc and cicc binaries contain hard-coded references to /usr
24 allowFHSReferences = true;
25
26 nativeBuildInputs = [
27 makeBinaryWrapper
28 ];
29
30 # Entries here will be in nativeBuildInputs when cuda_nvcc is in nativeBuildInputs
31 propagatedBuildInputs = [ setupCudaHook ];
32
33 # Patch the nvcc.profile.
34 # Syntax:
35 # - `=` for assignment,
36 # - `?=` for conditional assignment,
37 # - `+=` to "prepend",
38 # - `=+` to "append".
39
40 # Cf. https://web.archive.org/web/20220912081901/https://developer.download.nvidia.com/compute/DevZone/docs/html/C/doc/nvcc.pdf
41
42 # We set all variables with the lowest priority (=+), but we do force
43 # nvcc to use the fixed backend toolchain. Cf. comments in
44 # backend-stdenv.nix
45
46 # As an example, here's the nvcc.profile for CUDA 11.8-12.4 (yes, that is a leading newline):
47
48 #
49 # TOP = $(_HERE_)/..
50 #
51 # NVVMIR_LIBRARY_DIR = $(TOP)/$(_NVVM_BRANCH_)/libdevice
52 #
53 # LD_LIBRARY_PATH += $(TOP)/lib:
54 # PATH += $(TOP)/$(_NVVM_BRANCH_)/bin:$(_HERE_):
55 #
56 # INCLUDES += "-I$(TOP)/$(_TARGET_DIR_)/include" $(_SPACE_)
57 #
58 # LIBRARIES =+ $(_SPACE_) "-L$(TOP)/$(_TARGET_DIR_)/lib$(_TARGET_SIZE_)/stubs" "-L$(TOP)/$(_TARGET_DIR_)/lib$(_TARGET_SIZE_)"
59 #
60 # CUDAFE_FLAGS +=
61 # PTXAS_FLAGS +=
62
63 # And here's the nvcc.profile for CUDA 12.5:
64
65 #
66 # TOP = $(_HERE_)/..
67 #
68 # CICC_PATH = $(TOP)/nvvm/bin
69 # CICC_NEXT_PATH = $(TOP)/nvvm-next/bin
70 # NVVMIR_LIBRARY_DIR = $(TOP)/nvvm/libdevice
71 #
72 # LD_LIBRARY_PATH += $(TOP)/lib:
73 # PATH += $(CICC_PATH):$(_HERE_):
74 #
75 # INCLUDES += "-I$(TOP)/$(_TARGET_DIR_)/include" $(_SPACE_)
76 #
77 # LIBRARIES =+ $(_SPACE_) "-L$(TOP)/$(_TARGET_DIR_)/lib$(_TARGET_SIZE_)/stubs" "-L$(TOP)/$(_TARGET_DIR_)/lib$(_TARGET_SIZE_)"
78 #
79 # CUDAFE_FLAGS +=
80 # PTXAS_FLAGS +=
81
82 postInstall =
83 let
84 # TODO: Should we also patch the LIBRARIES line's use of $(TOP)/$(_TARGET_DIR_)?
85 oldNvvmDir = lib.concatStringsSep "/" (
86 [ "$(TOP)" ]
87 ++ lib.optionals (cudaOlder "12.5") [ "$(_NVVM_BRANCH_)" ]
88 ++ lib.optionals (cudaAtLeast "12.5") [ "nvvm" ]
89 );
90 newNvvmDir = "\${!outputBin:?}/nvvm";
91 in
92 lib.optionalString finalAttrs.finalPackage.meta.available (
93 # From CUDA 13.0, NVVM is available as a separate library and not bundled in the NVCC redist.
94 lib.optionalString (cudaOlder "13.0") ''
95 nixLog "moving $PWD/nvvm to ''${!outputBin:?} and renaming lib64 to lib"
96 moveToOutput "nvvm" "''${!outputBin:?}"
97 mv --verbose --no-clobber "${newNvvmDir}/lib64" "${newNvvmDir}/lib"
98 ''
99 # NVVM is unpacked and made top-level; we cannot make a symlink to it because build systems (like CMake)
100 # may take the target and do relative path operations to it.
101 + lib.optionalString (cudaAtLeast "13.0") ''
102 nixLog "copying ${libnvvm} to ${newNvvmDir} and fixing permissions"
103 cp -rv "${libnvvm}" "${newNvvmDir}"
104 chmod -Rv u+w "${newNvvmDir}"
105 ''
106 # Unconditional patching to remove the use of $(_TARGET_SIZE_) since we don't use lib64 in Nixpkgs
107 + ''
108 nixLog 'removing $(_TARGET_SIZE_) from nvcc.profile'
109 substituteInPlace "''${!outputBin:?}/bin/nvcc.profile" \
110 --replace-fail \
111 '$(_TARGET_SIZE_)' \
112 ""
113 ''
114 # CUDA 13.0+ introduced
115 # SYSTEM_INCLUDES += "-isystem" "$(TOP)/$(_TARGET_DIR_)/include/cccl" $(_SPACE_)
116 # so we need to make sure to patch the reference to cccl.
117 + lib.optionalString (cudaAtLeast "13.0") ''
118 nixLog "patching nvcc.profile to include correct path to cccl"
119 substituteInPlace "''${!outputBin:?}/bin/nvcc.profile" \
120 --replace-fail \
121 '$(TOP)/$(_TARGET_DIR_)/include/cccl' \
122 "${lib.getOutput "include" cuda_cccl}/include"
123 ''
124 # Unconditional patching to switch to the correct include paths.
125 # NOTE: _TARGET_DIR_ appears to be used for the target architecture, which is relevant for cross-compilation.
126 + ''
127 nixLog "patching nvcc.profile to use the correct include paths"
128 substituteInPlace "''${!outputBin:?}/bin/nvcc.profile" \
129 --replace-fail \
130 '$(TOP)/$(_TARGET_DIR_)/include' \
131 "''${!outputInclude:?}/include"
132 ''
133 # Fixup the nvcc.profile to use the correct paths for NVVM.
134 # NOTE: In our replacement substitution, we use double quotes to allow for variable expansion.
135 # NOTE: We use a trailing slash only on the NVVM directory replacement to prevent partial matches.
136 + ''
137 nixLog "patching nvcc.profile to use the correct NVVM paths"
138 substituteInPlace "''${!outputBin:?}/bin/nvcc.profile" \
139 --replace-fail \
140 '${oldNvvmDir}/' \
141 "${newNvvmDir}/"
142
143 nixLog "adding ${newNvvmDir} to nvcc.profile"
144 cat << EOF >> "''${!outputBin:?}/bin/nvcc.profile"
145
146 # Expose the split-out nvvm
147 LIBRARIES =+ \$(_SPACE_) "-L${newNvvmDir}/lib"
148 INCLUDES =+ \$(_SPACE_) "-I${newNvvmDir}/include"
149 EOF
150 ''
151 # Add the dependency on backendStdenv.cc to the nvcc.profile.
152 # NOTE: NVCC explodes in horrifying fashion if GCC is not on PATH -- it fails even before
153 # reading nvcc.profile!
154 + ''
155 nixLog "setting compiler-bindir to backendStdenv.cc in nvcc.profile"
156 cat << EOF >> "''${!outputBin:?}/bin/nvcc.profile"
157 # Fix a compatible backend compiler
158 compiler-bindir = ${backendStdenv.cc}/bin
159 EOF
160
161 nixLog "wrapping nvcc to add backendStdenv.cc to its PATH"
162 wrapProgramBinary \
163 "''${!outputBin:?}/bin/nvcc" \
164 --prefix PATH : ${lib.makeBinPath [ backendStdenv.cc ]}
165 ''
166 );
167
168 brokenAssertions = [
169 # TODO(@connorbaker): Build fails on x86 when using pkgsLLVM.
170 # .../include/crt/host_defines.h:67:2:
171 # error: "libc++ is not supported on x86 system"
172 #
173 # 67 | #error "libc++ is not supported on x86 system"
174 # | ^
175 #
176 # 1 error generated.
177 #
178 # # --error 0x1 --
179 {
180 message = "cannot use libc++ on x86_64-linux";
181 assertion = backendStdenv.hostNixSystem == "x86_64-linux" -> backendStdenv.cc.libcxx == null;
182 }
183 ];
184
185 meta = {
186 description = "CUDA compiler driver";
187 homepage = "https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc";
188 mainProgram = "nvcc";
189 };
190})