···11, fetchurl
12, fontconfig
13, freetype
14-, gcc
15, gdk-pixbuf
16, glib
17, glibc
···22, perl
23, python3
24, requireFile
25-, stdenv
26, unixODBC
27, xorg
28, zlib
29}:
3031-stdenv.mkDerivation rec {
32 pname = "cudatoolkit";
33 inherit version runPatches;
34···146147 # Fix builds with newer glibc version
148 sed -i "1 i#define _BITS_FLOATN_H" "$out/include/host_defines.h"
149-150- # Ensure that cmake can find CUDA.
00000151 mkdir -p $out/nix-support
152- echo "cmakeFlags+=' -DCUDA_TOOLKIT_ROOT_DIR=$out'" >> $out/nix-support/setup-hook
153-154- # Set the host compiler to be used by nvcc.
155- # FIXME: redist cuda_nvcc copy-pastes this code
156-157- # For CMake-based projects:
158- # https://cmake.org/cmake/help/latest/module/FindCUDA.html#input-variables
159- # https://cmake.org/cmake/help/latest/envvar/CUDAHOSTCXX.html
160- # https://cmake.org/cmake/help/latest/variable/CMAKE_CUDA_HOST_COMPILER.html
161-162- # For non-CMake projects:
163- # FIXME: results in "incompatible redefinition" warnings ...but we keep
164- # both this and cmake variables until we come up with a more general
165- # solution
166- # https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#compiler-bindir-directory-ccbin
167-168 cat <<EOF >> $out/nix-support/setup-hook
169-170- cmakeFlags+=' -DCUDA_HOST_COMPILER=${gcc}/bin'
171- cmakeFlags+=' -DCMAKE_CUDA_HOST_COMPILER=${gcc}/bin'
172 if [ -z "\''${CUDAHOSTCXX-}" ]; then
173- export CUDAHOSTCXX=${gcc}/bin;
174 fi
175-176- export NVCC_PREPEND_FLAGS+=' --compiler-bindir=${gcc}/bin'
177 EOF
178-179180 # Move some libraries to the lib output so that programs that
181 # depend on them don't pull in this entire monstrosity.
···212213 # The path to libstdc++ and such
214 #
215- # NB:
216- # 1. "gcc" (gcc-wrapper) here is what's exposed as cudaPackages.cudatoolkit.cc
217- # 2. "gcc.cc" is the unwrapped gcc
218- # 3. "gcc.cc.lib" is one of its outputs
219- "${gcc.cc.lib}/lib64"
220221 "$out/jre/lib/amd64/jli"
222 "$out/lib64"
···286 popd
287 '';
288 passthru = {
289- cc = gcc;
290 majorMinorVersion = lib.versions.majorMinor version;
291 majorVersion = lib.versions.majorMinor version;
292 };
···11, fetchurl
12, fontconfig
13, freetype
14+, gcc # :: String
15, gdk-pixbuf
16, glib
17, glibc
···22, perl
23, python3
24, requireFile
25+, backendStdenv # E.g. gcc11Stdenv, set in extension.nix
26, unixODBC
27, xorg
28, zlib
29}:
3031+backendStdenv.mkDerivation rec {
32 pname = "cudatoolkit";
33 inherit version runPatches;
34···146147 # Fix builds with newer glibc version
148 sed -i "1 i#define _BITS_FLOATN_H" "$out/include/host_defines.h"
149+ '' +
150+ # Point NVCC at a compatible compiler
151+ # FIXME: redist cuda_nvcc copy-pastes this code
152+ # Refer to comments in the overrides for cuda_nvcc for explanation
153+ # CUDA_TOOLKIT_ROOT_DIR is legacy,
154+ # Cf. https://cmake.org/cmake/help/latest/module/FindCUDA.html#input-variables
155+ ''
156 mkdir -p $out/nix-support
0000000000000000157 cat <<EOF >> $out/nix-support/setup-hook
158+ cmakeFlags+=' -DCUDA_TOOLKIT_ROOT_DIR=$out'
159+ cmakeFlags+=' -DCUDA_HOST_COMPILER=${backendStdenv.cc}/bin'
160+ cmakeFlags+=' -DCMAKE_CUDA_HOST_COMPILER=${backendStdenv.cc}/bin'
161 if [ -z "\''${CUDAHOSTCXX-}" ]; then
162+ export CUDAHOSTCXX=${backendStdenv.cc}/bin;
163 fi
164+ export NVCC_PREPEND_FLAGS+=' --compiler-bindir=${backendStdenv.cc}/bin'
0165 EOF
0166167 # Move some libraries to the lib output so that programs that
168 # depend on them don't pull in this entire monstrosity.
···199200 # The path to libstdc++ and such
201 #
202+ # `backendStdenv` is the cuda-compatible toolchain that we pick in
203+ # extension.nix; we hand it to NVCC to use as a back-end, and we link
204+ # cudatoolkit's binaries against its libstdc++
205+ "${backendStdenv.cc.cc.lib}/lib64"
0206207 "$out/jre/lib/amd64/jli"
208 "$out/lib64"
···272 popd
273 '';
274 passthru = {
275+ cc = backendStdenv.cc;
276 majorMinorVersion = lib.versions.majorMinor version;
277 majorVersion = lib.versions.majorMinor version;
278 };
···7 # Version info for the classic cudatoolkit packages that contain everything that is in redist.
8 cudatoolkitVersions = final.lib.importTOML ./versions.toml;
900000000010 ### Add classic cudatoolkit package
11- cudatoolkit = buildCudaToolkitPackage ((attrs: attrs // { gcc = prev.pkgs.${attrs.gcc}; }) cudatoolkitVersions.${final.cudaVersion});
1213 cudaFlags = final.callPackage ./flags.nix {};
1415-in {
16- inherit cudatoolkit cudaFlags;
000017}
···7 # Version info for the classic cudatoolkit packages that contain everything that is in redist.
8 cudatoolkitVersions = final.lib.importTOML ./versions.toml;
910+ finalVersion = cudatoolkitVersions.${final.cudaVersion};
11+12+ # Exposed as cudaPackages.backendStdenv.
13+ # We don't call it just "stdenv" to avoid confusion: e.g. this toolchain doesn't contain nvcc.
14+ # Instead, it's the back-end toolchain for nvcc to use.
15+ # We also use this to link a compatible libstdc++ (backendStdenv.cc.cc.lib)
16+ # Cf. https://github.com/NixOS/nixpkgs/pull/218265 for context
17+ backendStdenv = prev.pkgs."${finalVersion.gcc}Stdenv";
18+19 ### Add classic cudatoolkit package
20+ cudatoolkit = buildCudaToolkitPackage (finalVersion // { inherit backendStdenv; });
2122 cudaFlags = final.callPackage ./flags.nix {};
2324+in
25+{
26+ inherit
27+ backendStdenv
28+ cudatoolkit
29+ cudaFlags;
30}
···1{ lib
2-, stdenv
3, fetchurl
4, autoPatchelfHook
5, autoAddOpenGLRunpathHook
···11let
12 arch = "linux-x86_64";
13in
14-stdenv.mkDerivation {
15 inherit pname;
16 inherit (attrs) version;
17···33 # autoPatchelfHook will search for a libstdc++ and we're giving it a
34 # "compatible" libstdc++ from the same toolchain that NVCC uses.
35 #
36- # E.g. it might happen that stdenv=gcc12Stdenv, but we build against cuda11
37- # that only "supports" gcc11. Linking against gcc12's libraries we might
38- # sometimes actually sometimes encounter dynamic linkage errors at runtime
39 # NB: We don't actually know if this is the right thing to do
40- cudatoolkit.cc.cc.lib
41 ];
4243 dontBuild = true;
···51 runHook postInstall
52 '';
5354- passthru.stdenv = stdenv;
5556 meta = {
57 description = attrs.name;
···1{ lib
2+, backendStdenv
3, fetchurl
4, autoPatchelfHook
5, autoAddOpenGLRunpathHook
···11let
12 arch = "linux-x86_64";
13in
14+backendStdenv.mkDerivation {
15 inherit pname;
16 inherit (attrs) version;
17···33 # autoPatchelfHook will search for a libstdc++ and we're giving it a
34 # "compatible" libstdc++ from the same toolchain that NVCC uses.
35 #
00036 # NB: We don't actually know if this is the right thing to do
37+ backendStdenv.cc.cc.lib
38 ];
3940 dontBuild = true;
···48 runHook postInstall
49 '';
5051+ passthru.stdenv = backendStdenv;
5253 meta = {
54 description = attrs.name;
···32}:
3334let
0000000000000000000035 inherit (cudaPackages) cudatoolkit cudnn nccl;
36in
37···44let
45 withTensorboard = (pythonOlder "3.6") || tensorboardSupport;
46047 cudatoolkit_joined = symlinkJoin {
48 name = "${cudatoolkit.name}-merged";
49 paths = [
···56 ];
57 };
5800059 cudatoolkit_cc_joined = symlinkJoin {
60- name = "${cudatoolkit.cc.name}-merged";
61 paths = [
62- cudatoolkit.cc
63 binutils.bintools # for ar, dwp, nm, objcopy, objdump, strip
64 ];
65 };
···175 '';
176 }) else _bazel-build;
177178- _bazel-build = (buildBazelPackage.override (lib.optionalAttrs stdenv.isDarwin {
179- # clang 7 fails to emit a symbol for
180- # __ZN4llvm11SmallPtrSetIPKNS_10AllocaInstELj8EED1Ev in any of the
181- # translation units, so the build fails at link time
182- stdenv = llvmPackages_11.stdenv;
183- })) {
184 name = "${pname}-${version}";
185 bazel = bazel_5;
186···211 flatbuffers-core
212 giflib
213 grpc
214- icu
0215 jsoncpp
216 libjpeg_turbo
217 libpng
218 lmdb-core
219- pybind11
220 snappy
221 sqlite
222 ] ++ lib.optionals cudaSupport [
···301302 TF_NEED_CUDA = tfFeature cudaSupport;
303 TF_CUDA_PATHS = lib.optionalString cudaSupport "${cudatoolkit_joined},${cudnn},${nccl}";
304- GCC_HOST_COMPILER_PREFIX = lib.optionalString cudaSupport "${cudatoolkit_cc_joined}/bin";
305- GCC_HOST_COMPILER_PATH = lib.optionalString cudaSupport "${cudatoolkit_cc_joined}/bin/gcc";
306 TF_CUDA_COMPUTE_CAPABILITIES = lib.concatStringsSep "," cudaCapabilities;
0000307308 postPatch = ''
309 # bazel 3.3 should work just as well as bazel 3.1
···32}:
3334let
35+ originalStdenv = stdenv;
36+in
37+let
38+ # Tensorflow looks at many toolchain-related variables which may diverge.
39+ #
40+ # Toolchain for cuda-enabled builds.
41+ # We want to achieve two things:
42+ # 1. NVCC should use a compatible back-end (e.g. gcc11 for cuda11)
43+ # 2. Normal C++ files should be compiled with the same toolchain,
44+ # to avoid potential weird dynamic linkage errors at runtime.
45+ # This may not be necessary though
46+ #
47+ # Toolchain for Darwin:
48+ # clang 7 fails to emit a symbol for
49+ # __ZN4llvm11SmallPtrSetIPKNS_10AllocaInstELj8EED1Ev in any of the
50+ # translation units, so the build fails at link time
51+ stdenv =
52+ if cudaSupport then cudaPackages.backendStdenv
53+ else if originalStdenv.isDarwin then llvmPackages_11.stdenv
54+ else originalStdenv;
55 inherit (cudaPackages) cudatoolkit cudnn nccl;
56in
57···64let
65 withTensorboard = (pythonOlder "3.6") || tensorboardSupport;
6667+ # FIXME: migrate to redist cudaPackages
68 cudatoolkit_joined = symlinkJoin {
69 name = "${cudatoolkit.name}-merged";
70 paths = [
···77 ];
78 };
7980+ # Tensorflow expects bintools at hard-coded paths, e.g. /usr/bin/ar
81+ # The only way to overcome that is to set GCC_HOST_COMPILER_PREFIX,
82+ # but that path must contain cc as well, so we merge them
83 cudatoolkit_cc_joined = symlinkJoin {
84+ name = "${stdenv.cc.name}-merged";
85 paths = [
86+ stdenv.cc
87 binutils.bintools # for ar, dwp, nm, objcopy, objdump, strip
88 ];
89 };
···199 '';
200 }) else _bazel-build;
201202+ _bazel-build = buildBazelPackage.override { inherit stdenv; } {
00000203 name = "${pname}-${version}";
204 bazel = bazel_5;
205···230 flatbuffers-core
231 giflib
232 grpc
233+ # Necessary to fix the "`GLIBCXX_3.4.30' not found" error
234+ (icu.override { inherit stdenv; })
235 jsoncpp
236 libjpeg_turbo
237 libpng
238 lmdb-core
239+ (pybind11.overridePythonAttrs (_: { inherit stdenv; }))
240 snappy
241 sqlite
242 ] ++ lib.optionals cudaSupport [
···321322 TF_NEED_CUDA = tfFeature cudaSupport;
323 TF_CUDA_PATHS = lib.optionalString cudaSupport "${cudatoolkit_joined},${cudnn},${nccl}";
00324 TF_CUDA_COMPUTE_CAPABILITIES = lib.concatStringsSep "," cudaCapabilities;
325+326+ # Needed even when we override stdenv: e.g. for ar
327+ GCC_HOST_COMPILER_PREFIX = lib.optionalString cudaSupport "${cudatoolkit_cc_joined}/bin";
328+ GCC_HOST_COMPILER_PATH = lib.optionalString cudaSupport "${cudatoolkit_cc_joined}/bin/cc";
329330 postPatch = ''
331 # bazel 3.3 should work just as well as bazel 3.1