python3Packages.tensorflow: fix `GLIBCXX_3.4.30' not found

Make tensorflow (and a bunch of ther things) use CUDA-compatible
toolchain. Introduces cudaPackages.backendStdenv

+88 -69
+20 -34
pkgs/development/compilers/cudatoolkit/common.nix
··· 11 , fetchurl 12 , fontconfig 13 , freetype 14 - , gcc 15 , gdk-pixbuf 16 , glib 17 , glibc ··· 22 , perl 23 , python3 24 , requireFile 25 - , stdenv 26 , unixODBC 27 , xorg 28 , zlib 29 }: 30 31 - stdenv.mkDerivation rec { 32 pname = "cudatoolkit"; 33 inherit version runPatches; 34 ··· 146 147 # Fix builds with newer glibc version 148 sed -i "1 i#define _BITS_FLOATN_H" "$out/include/host_defines.h" 149 - 150 - # Ensure that cmake can find CUDA. 151 mkdir -p $out/nix-support 152 - echo "cmakeFlags+=' -DCUDA_TOOLKIT_ROOT_DIR=$out'" >> $out/nix-support/setup-hook 153 - 154 - # Set the host compiler to be used by nvcc. 155 - # FIXME: redist cuda_nvcc copy-pastes this code 156 - 157 - # For CMake-based projects: 158 - # https://cmake.org/cmake/help/latest/module/FindCUDA.html#input-variables 159 - # https://cmake.org/cmake/help/latest/envvar/CUDAHOSTCXX.html 160 - # https://cmake.org/cmake/help/latest/variable/CMAKE_CUDA_HOST_COMPILER.html 161 - 162 - # For non-CMake projects: 163 - # FIXME: results in "incompatible redefinition" warnings ...but we keep 164 - # both this and cmake variables until we come up with a more general 165 - # solution 166 - # https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#compiler-bindir-directory-ccbin 167 - 168 cat <<EOF >> $out/nix-support/setup-hook 169 - 170 - cmakeFlags+=' -DCUDA_HOST_COMPILER=${gcc}/bin' 171 - cmakeFlags+=' -DCMAKE_CUDA_HOST_COMPILER=${gcc}/bin' 172 if [ -z "\''${CUDAHOSTCXX-}" ]; then 173 - export CUDAHOSTCXX=${gcc}/bin; 174 fi 175 - 176 - export NVCC_PREPEND_FLAGS+=' --compiler-bindir=${gcc}/bin' 177 EOF 178 - 179 180 # Move some libraries to the lib output so that programs that 181 # depend on them don't pull in this entire monstrosity. ··· 212 213 # The path to libstdc++ and such 214 # 215 - # NB: 216 - # 1. "gcc" (gcc-wrapper) here is what's exposed as cudaPackages.cudatoolkit.cc 217 - # 2. "gcc.cc" is the unwrapped gcc 218 - # 3. "gcc.cc.lib" is one of its outputs 219 - "${gcc.cc.lib}/lib64" 220 221 "$out/jre/lib/amd64/jli" 222 "$out/lib64" ··· 286 popd 287 ''; 288 passthru = { 289 - cc = gcc; 290 majorMinorVersion = lib.versions.majorMinor version; 291 majorVersion = lib.versions.majorMinor version; 292 };
··· 11 , fetchurl 12 , fontconfig 13 , freetype 14 + , gcc # :: String 15 , gdk-pixbuf 16 , glib 17 , glibc ··· 22 , perl 23 , python3 24 , requireFile 25 + , backendStdenv # E.g. gcc11Stdenv, set in extension.nix 26 , unixODBC 27 , xorg 28 , zlib 29 }: 30 31 + backendStdenv.mkDerivation rec { 32 pname = "cudatoolkit"; 33 inherit version runPatches; 34 ··· 146 147 # Fix builds with newer glibc version 148 sed -i "1 i#define _BITS_FLOATN_H" "$out/include/host_defines.h" 149 + '' + 150 + # Point NVCC at a compatible compiler 151 + # FIXME: redist cuda_nvcc copy-pastes this code 152 + # Refer to comments in the overrides for cuda_nvcc for explanation 153 + # CUDA_TOOLKIT_ROOT_DIR is legacy, 154 + # Cf. https://cmake.org/cmake/help/latest/module/FindCUDA.html#input-variables 155 + '' 156 mkdir -p $out/nix-support 157 cat <<EOF >> $out/nix-support/setup-hook 158 + cmakeFlags+=' -DCUDA_TOOLKIT_ROOT_DIR=$out' 159 + cmakeFlags+=' -DCUDA_HOST_COMPILER=${backendStdenv.cc}/bin' 160 + cmakeFlags+=' -DCMAKE_CUDA_HOST_COMPILER=${backendStdenv.cc}/bin' 161 if [ -z "\''${CUDAHOSTCXX-}" ]; then 162 + export CUDAHOSTCXX=${backendStdenv.cc}/bin; 163 fi 164 + export NVCC_PREPEND_FLAGS+=' --compiler-bindir=${backendStdenv.cc}/bin' 165 EOF 166 167 # Move some libraries to the lib output so that programs that 168 # depend on them don't pull in this entire monstrosity. ··· 199 200 # The path to libstdc++ and such 201 # 202 + # `backendStdenv` is the cuda-compatible toolchain that we pick in 203 + # extension.nix; we hand it to NVCC to use as a back-end, and we link 204 + # cudatoolkit's binaries against its libstdc++ 205 + "${backendStdenv.cc.cc.lib}/lib64" 206 207 "$out/jre/lib/amd64/jli" 208 "$out/lib64" ··· 272 popd 273 ''; 274 passthru = { 275 + cc = backendStdenv.cc; 276 majorMinorVersion = lib.versions.majorMinor version; 277 majorVersion = lib.versions.majorMinor version; 278 };
+16 -3
pkgs/development/compilers/cudatoolkit/extension.nix
··· 7 # Version info for the classic cudatoolkit packages that contain everything that is in redist. 8 cudatoolkitVersions = final.lib.importTOML ./versions.toml; 9 10 ### Add classic cudatoolkit package 11 - cudatoolkit = buildCudaToolkitPackage ((attrs: attrs // { gcc = prev.pkgs.${attrs.gcc}; }) cudatoolkitVersions.${final.cudaVersion}); 12 13 cudaFlags = final.callPackage ./flags.nix {}; 14 15 - in { 16 - inherit cudatoolkit cudaFlags; 17 }
··· 7 # Version info for the classic cudatoolkit packages that contain everything that is in redist. 8 cudatoolkitVersions = final.lib.importTOML ./versions.toml; 9 10 + finalVersion = cudatoolkitVersions.${final.cudaVersion}; 11 + 12 + # Exposed as cudaPackages.backendStdenv. 13 + # We don't call it just "stdenv" to avoid confusion: e.g. this toolchain doesn't contain nvcc. 14 + # Instead, it's the back-end toolchain for nvcc to use. 15 + # We also use this to link a compatible libstdc++ (backendStdenv.cc.cc.lib) 16 + # Cf. https://github.com/NixOS/nixpkgs/pull/218265 for context 17 + backendStdenv = prev.pkgs."${finalVersion.gcc}Stdenv"; 18 + 19 ### Add classic cudatoolkit package 20 + cudatoolkit = buildCudaToolkitPackage (finalVersion // { inherit backendStdenv; }); 21 22 cudaFlags = final.callPackage ./flags.nix {}; 23 24 + in 25 + { 26 + inherit 27 + backendStdenv 28 + cudatoolkit 29 + cudaFlags; 30 }
+4 -7
pkgs/development/compilers/cudatoolkit/redist/build-cuda-redist-package.nix
··· 1 { lib 2 - , stdenv 3 , fetchurl 4 , autoPatchelfHook 5 , autoAddOpenGLRunpathHook ··· 11 let 12 arch = "linux-x86_64"; 13 in 14 - stdenv.mkDerivation { 15 inherit pname; 16 inherit (attrs) version; 17 ··· 33 # autoPatchelfHook will search for a libstdc++ and we're giving it a 34 # "compatible" libstdc++ from the same toolchain that NVCC uses. 35 # 36 - # E.g. it might happen that stdenv=gcc12Stdenv, but we build against cuda11 37 - # that only "supports" gcc11. Linking against gcc12's libraries we might 38 - # sometimes actually sometimes encounter dynamic linkage errors at runtime 39 # NB: We don't actually know if this is the right thing to do 40 - cudatoolkit.cc.cc.lib 41 ]; 42 43 dontBuild = true; ··· 51 runHook postInstall 52 ''; 53 54 - passthru.stdenv = stdenv; 55 56 meta = { 57 description = attrs.name;
··· 1 { lib 2 + , backendStdenv 3 , fetchurl 4 , autoPatchelfHook 5 , autoAddOpenGLRunpathHook ··· 11 let 12 arch = "linux-x86_64"; 13 in 14 + backendStdenv.mkDerivation { 15 inherit pname; 16 inherit (attrs) version; 17 ··· 33 # autoPatchelfHook will search for a libstdc++ and we're giving it a 34 # "compatible" libstdc++ from the same toolchain that NVCC uses. 35 # 36 # NB: We don't actually know if this is the right thing to do 37 + backendStdenv.cc.cc.lib 38 ]; 39 40 dontBuild = true; ··· 48 runHook postInstall 49 ''; 50 51 + passthru.stdenv = backendStdenv; 52 53 meta = { 54 description = attrs.name;
+1 -2
pkgs/development/compilers/cudatoolkit/redist/overrides.nix
··· 24 25 cuda_nvcc = prev.cuda_nvcc.overrideAttrs (oldAttrs: 26 let 27 - inherit (prev.cudatoolkit) cc; 28 in 29 { 30 # Point NVCC at a compatible compiler ··· 44 postInstall = (oldAttrs.postInstall or "") + '' 45 mkdir -p $out/nix-support 46 cat <<EOF >> $out/nix-support/setup-hook 47 - cmakeFlags+=' -DCUDA_TOOLKIT_ROOT_DIR=$out' 48 cmakeFlags+=' -DCUDA_HOST_COMPILER=${cc}/bin' 49 cmakeFlags+=' -DCMAKE_CUDA_HOST_COMPILER=${cc}/bin' 50 if [ -z "\''${CUDAHOSTCXX-}" ]; then
··· 24 25 cuda_nvcc = prev.cuda_nvcc.overrideAttrs (oldAttrs: 26 let 27 + inherit (prev.backendStdenv) cc; 28 in 29 { 30 # Point NVCC at a compatible compiler ··· 44 postInstall = (oldAttrs.postInstall or "") + '' 45 mkdir -p $out/nix-support 46 cat <<EOF >> $out/nix-support/setup-hook 47 cmakeFlags+=' -DCUDA_HOST_COMPILER=${cc}/bin' 48 cmakeFlags+=' -DCMAKE_CUDA_HOST_COMPILER=${cc}/bin' 49 if [ -z "\''${CUDAHOSTCXX-}" ]; then
+4 -4
pkgs/development/libraries/science/math/cudnn/generic.nix
··· 1 { 2 - stdenv, 3 lib, 4 zlib, 5 useCudatoolkitRunfile ? false, 6 cudaVersion, 7 cudaMajorVersion, 8 - cudatoolkit, # if cuda>=11: only used for .cc 9 libcublas ? null, # cuda <11 doesn't ship redist packages 10 autoPatchelfHook, 11 autoAddOpenGLRunpathHook, ··· 26 maxCudaVersion, 27 }: 28 assert useCudatoolkitRunfile || (libcublas != null); let 29 - inherit (cudatoolkit) cc; 30 inherit (lib) lists strings trivial versions; 31 32 # majorMinorPatch :: String -> String ··· 46 then cudatoolkit 47 else libcublas; 48 in 49 - stdenv.mkDerivation { 50 pname = "cudatoolkit-${cudaMajorVersion}-cudnn"; 51 version = versionTriple; 52
··· 1 { 2 + backendStdenv, 3 lib, 4 zlib, 5 useCudatoolkitRunfile ? false, 6 cudaVersion, 7 cudaMajorVersion, 8 + cudatoolkit, # For cuda < 11 9 libcublas ? null, # cuda <11 doesn't ship redist packages 10 autoPatchelfHook, 11 autoAddOpenGLRunpathHook, ··· 26 maxCudaVersion, 27 }: 28 assert useCudatoolkitRunfile || (libcublas != null); let 29 + inherit (backendStdenv) cc; 30 inherit (lib) lists strings trivial versions; 31 32 # majorMinorPatch :: String -> String ··· 46 then cudatoolkit 47 else libcublas; 48 in 49 + backendStdenv.mkDerivation { 50 pname = "cudatoolkit-${cudaMajorVersion}-cudnn"; 51 version = versionTriple; 52
+5 -3
pkgs/development/libraries/science/math/tensorrt/generic.nix
··· 1 { lib 2 - , stdenv 3 , requireFile 4 , autoPatchelfHook 5 , autoAddOpenGLRunpathHook ··· 18 assert lib.assertMsg (lib.strings.versionAtLeast cudnn.version fileVersionCudnn) 19 "This version of TensorRT requires at least cuDNN ${fileVersionCudnn} (current version is ${cudnn.version})"; 20 21 - stdenv.mkDerivation rec { 22 pname = "cudatoolkit-${cudatoolkit.majorVersion}-tensorrt"; 23 version = fullVersion; 24 src = requireFile rec { ··· 45 46 # Used by autoPatchelfHook 47 buildInputs = [ 48 - cudatoolkit.cc.cc.lib # libstdc++ 49 cudatoolkit 50 cudnn 51 ]; ··· 73 "$out/lib/libnvinfer_plugin.so.${mostOfVersion}" \ 74 "$out/lib/libnvinfer_builder_resource.so.${mostOfVersion}" 75 ''; 76 77 meta = with lib; { 78 # Check that the cudatoolkit version satisfies our min/max constraints (both
··· 1 { lib 2 + , backendStdenv 3 , requireFile 4 , autoPatchelfHook 5 , autoAddOpenGLRunpathHook ··· 18 assert lib.assertMsg (lib.strings.versionAtLeast cudnn.version fileVersionCudnn) 19 "This version of TensorRT requires at least cuDNN ${fileVersionCudnn} (current version is ${cudnn.version})"; 20 21 + backendStdenv.mkDerivation rec { 22 pname = "cudatoolkit-${cudatoolkit.majorVersion}-tensorrt"; 23 version = fullVersion; 24 src = requireFile rec { ··· 45 46 # Used by autoPatchelfHook 47 buildInputs = [ 48 + backendStdenv.cc.cc.lib # libstdc++ 49 cudatoolkit 50 cudnn 51 ]; ··· 73 "$out/lib/libnvinfer_plugin.so.${mostOfVersion}" \ 74 "$out/lib/libnvinfer_builder_resource.so.${mostOfVersion}" 75 ''; 76 + 77 + passthru.stdenv = backendStdenv; 78 79 meta = with lib; { 80 # Check that the cudatoolkit version satisfies our min/max constraints (both
+34 -12
pkgs/development/python-modules/tensorflow/default.nix
··· 32 }: 33 34 let 35 inherit (cudaPackages) cudatoolkit cudnn nccl; 36 in 37 ··· 44 let 45 withTensorboard = (pythonOlder "3.6") || tensorboardSupport; 46 47 cudatoolkit_joined = symlinkJoin { 48 name = "${cudatoolkit.name}-merged"; 49 paths = [ ··· 56 ]; 57 }; 58 59 cudatoolkit_cc_joined = symlinkJoin { 60 - name = "${cudatoolkit.cc.name}-merged"; 61 paths = [ 62 - cudatoolkit.cc 63 binutils.bintools # for ar, dwp, nm, objcopy, objdump, strip 64 ]; 65 }; ··· 175 ''; 176 }) else _bazel-build; 177 178 - _bazel-build = (buildBazelPackage.override (lib.optionalAttrs stdenv.isDarwin { 179 - # clang 7 fails to emit a symbol for 180 - # __ZN4llvm11SmallPtrSetIPKNS_10AllocaInstELj8EED1Ev in any of the 181 - # translation units, so the build fails at link time 182 - stdenv = llvmPackages_11.stdenv; 183 - })) { 184 name = "${pname}-${version}"; 185 bazel = bazel_5; 186 ··· 211 flatbuffers-core 212 giflib 213 grpc 214 - icu 215 jsoncpp 216 libjpeg_turbo 217 libpng 218 lmdb-core 219 - pybind11 220 snappy 221 sqlite 222 ] ++ lib.optionals cudaSupport [ ··· 301 302 TF_NEED_CUDA = tfFeature cudaSupport; 303 TF_CUDA_PATHS = lib.optionalString cudaSupport "${cudatoolkit_joined},${cudnn},${nccl}"; 304 - GCC_HOST_COMPILER_PREFIX = lib.optionalString cudaSupport "${cudatoolkit_cc_joined}/bin"; 305 - GCC_HOST_COMPILER_PATH = lib.optionalString cudaSupport "${cudatoolkit_cc_joined}/bin/gcc"; 306 TF_CUDA_COMPUTE_CAPABILITIES = lib.concatStringsSep "," cudaCapabilities; 307 308 postPatch = '' 309 # bazel 3.3 should work just as well as bazel 3.1
··· 32 }: 33 34 let 35 + originalStdenv = stdenv; 36 + in 37 + let 38 + # Tensorflow looks at many toolchain-related variables which may diverge. 39 + # 40 + # Toolchain for cuda-enabled builds. 41 + # We want to achieve two things: 42 + # 1. NVCC should use a compatible back-end (e.g. gcc11 for cuda11) 43 + # 2. Normal C++ files should be compiled with the same toolchain, 44 + # to avoid potential weird dynamic linkage errors at runtime. 45 + # This may not be necessary though 46 + # 47 + # Toolchain for Darwin: 48 + # clang 7 fails to emit a symbol for 49 + # __ZN4llvm11SmallPtrSetIPKNS_10AllocaInstELj8EED1Ev in any of the 50 + # translation units, so the build fails at link time 51 + stdenv = 52 + if cudaSupport then cudaPackages.backendStdenv 53 + else if originalStdenv.isDarwin then llvmPackages_11.stdenv 54 + else originalStdenv; 55 inherit (cudaPackages) cudatoolkit cudnn nccl; 56 in 57 ··· 64 let 65 withTensorboard = (pythonOlder "3.6") || tensorboardSupport; 66 67 + # FIXME: migrate to redist cudaPackages 68 cudatoolkit_joined = symlinkJoin { 69 name = "${cudatoolkit.name}-merged"; 70 paths = [ ··· 77 ]; 78 }; 79 80 + # Tensorflow expects bintools at hard-coded paths, e.g. /usr/bin/ar 81 + # The only way to overcome that is to set GCC_HOST_COMPILER_PREFIX, 82 + # but that path must contain cc as well, so we merge them 83 cudatoolkit_cc_joined = symlinkJoin { 84 + name = "${stdenv.cc.name}-merged"; 85 paths = [ 86 + stdenv.cc 87 binutils.bintools # for ar, dwp, nm, objcopy, objdump, strip 88 ]; 89 }; ··· 199 ''; 200 }) else _bazel-build; 201 202 + _bazel-build = buildBazelPackage.override { inherit stdenv; } { 203 name = "${pname}-${version}"; 204 bazel = bazel_5; 205 ··· 230 flatbuffers-core 231 giflib 232 grpc 233 + # Necessary to fix the "`GLIBCXX_3.4.30' not found" error 234 + (icu.override { inherit stdenv; }) 235 jsoncpp 236 libjpeg_turbo 237 libpng 238 lmdb-core 239 + (pybind11.overridePythonAttrs (_: { inherit stdenv; })) 240 snappy 241 sqlite 242 ] ++ lib.optionals cudaSupport [ ··· 321 322 TF_NEED_CUDA = tfFeature cudaSupport; 323 TF_CUDA_PATHS = lib.optionalString cudaSupport "${cudatoolkit_joined},${cudnn},${nccl}"; 324 TF_CUDA_COMPUTE_CAPABILITIES = lib.concatStringsSep "," cudaCapabilities; 325 + 326 + # Needed even when we override stdenv: e.g. for ar 327 + GCC_HOST_COMPILER_PREFIX = lib.optionalString cudaSupport "${cudatoolkit_cc_joined}/bin"; 328 + GCC_HOST_COMPILER_PATH = lib.optionalString cudaSupport "${cudatoolkit_cc_joined}/bin/cc"; 329 330 postPatch = '' 331 # bazel 3.3 should work just as well as bazel 3.1
+4 -4
pkgs/test/cuda/cuda-library-samples/generic.nix
··· 1 - { lib, stdenv, fetchFromGitHub 2 , cmake, addOpenGLRunpath 3 , cudatoolkit 4 , cutensor ··· 35 in 36 37 { 38 - cublas = stdenv.mkDerivation (commonAttrs // { 39 pname = "cuda-library-samples-cublas"; 40 41 src = "${src}/cuBLASLt"; 42 }); 43 44 - cusolver = stdenv.mkDerivation (commonAttrs // { 45 pname = "cuda-library-samples-cusolver"; 46 47 src = "${src}/cuSOLVER"; ··· 49 sourceRoot = "cuSOLVER/gesv"; 50 }); 51 52 - cutensor = stdenv.mkDerivation (commonAttrs // { 53 pname = "cuda-library-samples-cutensor"; 54 55 src = "${src}/cuTENSOR";
··· 1 + { lib, backendStdenv, fetchFromGitHub 2 , cmake, addOpenGLRunpath 3 , cudatoolkit 4 , cutensor ··· 35 in 36 37 { 38 + cublas = backendStdenv.mkDerivation (commonAttrs // { 39 pname = "cuda-library-samples-cublas"; 40 41 src = "${src}/cuBLASLt"; 42 }); 43 44 + cusolver = backendStdenv.mkDerivation (commonAttrs // { 45 pname = "cuda-library-samples-cusolver"; 46 47 src = "${src}/cuSOLVER"; ··· 49 sourceRoot = "cuSOLVER/gesv"; 50 }); 51 52 + cutensor = backendStdenv.mkDerivation (commonAttrs // { 53 pname = "cuda-library-samples-cutensor"; 54 55 src = "${src}/cuTENSOR";