1{ 2 lib, 3 stdenv, 4 buildPythonPackage, 5 fetchurl, 6 7 # buildInputs 8 llvmPackages, 9 10 # build-system 11 distutils, 12 13 # dependencies 14 ml-dtypes, 15 absl-py, 16 astunparse, 17 flatbuffers, 18 gast, 19 google-pasta, 20 grpcio, 21 h5py, 22 libclang, 23 numpy, 24 opt-einsum, 25 packaging, 26 protobuf, 27 requests, 28 six, 29 tensorboard, 30 termcolor, 31 typing-extensions, 32 wrapt, 33 isPy3k, 34 mock, 35 36 config, 37 cudaSupport ? config.cudaSupport, 38 cudaPackages, 39 zlib, 40 python, 41 addDriverRunpath, 42}: 43 44# We keep this binary build for three reasons: 45# - the source build doesn't work on Darwin. 46# - the source build is currently brittle and not easy to maintain 47# - the source build doesn't work on NVIDIA Jetson platforms 48 49# unsupported combination 50assert !(stdenv.hostPlatform.isDarwin && cudaSupport); 51 52let 53 packages = import ./binary-hashes.nix; 54 inherit (cudaPackages) cudatoolkit cudnn; 55 56 isCudaJetson = cudaSupport && cudaPackages.flags.isJetsonBuild; 57in 58buildPythonPackage rec { 59 pname = "tensorflow" + lib.optionalString cudaSupport "-gpu"; 60 version = packages."${"version" + lib.optionalString isCudaJetson "_jetson"}"; 61 format = "wheel"; 62 63 src = 64 let 65 pyVerNoDot = lib.strings.stringAsChars (x: lib.optionalString (x != ".") x) python.pythonVersion; 66 platform = stdenv.system; 67 cuda = lib.optionalString cudaSupport (if isCudaJetson then "_jetson" else "_gpu"); 68 key = "${platform}_${pyVerNoDot}${cuda}"; 69 in 70 fetchurl (packages.${key} or (throw "tensorflow-bin: unsupported configuration: ${key}")); 71 72 buildInputs = [ llvmPackages.openmp ]; 73 74 build-system = [ 75 distutils 76 ]; 77 78 nativeBuildInputs = 79 lib.optionals cudaSupport [ addDriverRunpath ] 80 ++ lib.optionals isCudaJetson [ cudaPackages.autoAddCudaCompatRunpath ]; 81 82 dependencies = [ 83 absl-py 84 astunparse 85 flatbuffers 86 gast 87 google-pasta 88 grpcio 89 h5py 90 libclang 91 ml-dtypes 92 numpy 93 opt-einsum 94 packaging 95 protobuf 96 requests 97 six 98 tensorboard 99 termcolor 100 typing-extensions 101 wrapt 102 ] ++ lib.optional (!isPy3k) mock; 103 104 preConfigure = '' 105 unset SOURCE_DATE_EPOCH 106 107 # Make sure that dist and the wheel file are writable. 108 chmod u+rwx -R ./dist 109 110 pushd dist 111 112 for f in tensorflow-*+nv*.whl; do 113 # e.g. *nv24.07* -> *nv24.7* 114 mv "$f" "$(sed -E 's/(nv[0-9]+)\.0*([0-9]+)/\1.\2/' <<< "$f")" 115 done 116 117 popd 118 ''; 119 120 postFixup = 121 # When using the cpu-only wheel, the final package will be named `tensorflow_cpu`. 122 # Then, in each package requiring `tensorflow`, our pythonRuntimeDepsCheck will fail with: 123 # importlib.metadata.PackageNotFoundError: No package metadata was found for tensorflow 124 # Hence, we manually rename the package to `tensorflow`. 125 lib.optionalString ((builtins.match ".*tensorflow_cpu.*" src.url) != null) '' 126 ( 127 cd $out/${python.sitePackages} 128 129 dest="tensorflow-${version}.dist-info" 130 131 mv tensorflow_cpu-${version}.dist-info "$dest" 132 133 ( 134 cd "$dest" 135 136 substituteInPlace METADATA \ 137 --replace-fail "tensorflow_cpu" "tensorflow" 138 substituteInPlace RECORD \ 139 --replace-fail "tensorflow_cpu" "tensorflow" 140 ) 141 ) 142 '' 143 # Note that we need to run *after* the fixup phase because the 144 # libraries are loaded at runtime. If we run in preFixup then 145 # patchelf --shrink-rpath will remove the cuda libraries. 146 + ( 147 let 148 # rpaths we only need to add if CUDA is enabled. 149 cudapaths = lib.optionals cudaSupport [ 150 cudatoolkit.out 151 cudatoolkit.lib 152 cudnn 153 ]; 154 155 libpaths = [ 156 (lib.getLib stdenv.cc.cc) 157 zlib 158 ]; 159 160 rpath = lib.makeLibraryPath (libpaths ++ cudapaths); 161 in 162 lib.optionalString stdenv.hostPlatform.isLinux '' 163 # This is an array containing all the directories in the tensorflow2 164 # package that contain .so files. 165 # 166 # TODO: Create this list programmatically, and remove paths that aren't 167 # actually needed. 168 rrPathArr=( 169 "$out/${python.sitePackages}/tensorflow/" 170 "$out/${python.sitePackages}/tensorflow/core/kernels" 171 "$out/${python.sitePackages}/tensorflow/compiler/mlir/stablehlo/" 172 "$out/${python.sitePackages}/tensorflow/compiler/tf2tensorrt/" 173 "$out/${python.sitePackages}/tensorflow/compiler/tf2xla/ops/" 174 "$out/${python.sitePackages}/tensorflow/include/external/ml_dtypes/" 175 "$out/${python.sitePackages}/tensorflow/lite/experimental/microfrontend/python/ops/" 176 "$out/${python.sitePackages}/tensorflow/lite/python/analyzer_wrapper/" 177 "$out/${python.sitePackages}/tensorflow/lite/python/interpreter_wrapper/" 178 "$out/${python.sitePackages}/tensorflow/lite/python/metrics/" 179 "$out/${python.sitePackages}/tensorflow/lite/python/optimize/" 180 "$out/${python.sitePackages}/tensorflow/python/" 181 "$out/${python.sitePackages}/tensorflow/python/autograph/impl/testing" 182 "$out/${python.sitePackages}/tensorflow/python/client" 183 "$out/${python.sitePackages}/tensorflow/python/data/experimental/service" 184 "$out/${python.sitePackages}/tensorflow/python/framework" 185 "$out/${python.sitePackages}/tensorflow/python/grappler" 186 "$out/${python.sitePackages}/tensorflow/python/lib/core" 187 "$out/${python.sitePackages}/tensorflow/python/lib/io" 188 "$out/${python.sitePackages}/tensorflow/python/platform" 189 "$out/${python.sitePackages}/tensorflow/python/profiler/internal" 190 "$out/${python.sitePackages}/tensorflow/python/saved_model" 191 "$out/${python.sitePackages}/tensorflow/python/util" 192 "$out/${python.sitePackages}/tensorflow/tsl/python/lib/core" 193 "$out/${python.sitePackages}/tensorflow.libs/" 194 "${rpath}" 195 ) 196 197 # The the bash array into a colon-separated list of RPATHs. 198 rrPath=$(IFS=$':'; echo "''${rrPathArr[*]}") 199 echo "about to run patchelf with the following rpath: $rrPath" 200 201 find $out -type f \( -name '*.so' -or -name '*.so.*' \) | while read lib; do 202 echo "about to patchelf $lib..." 203 chmod a+rx "$lib" 204 patchelf --set-rpath "$rrPath" "$lib" 205 ${lib.optionalString cudaSupport '' 206 addDriverRunpath "$lib" 207 ''} 208 done 209 '' 210 ); 211 212 # Upstream has a pip hack that results in bin/tensorboard being in both tensorflow 213 # and the propagated input tensorboard, which causes environment collisions. 214 # Another possibility would be to have tensorboard only in the buildInputs 215 # See https://github.com/NixOS/nixpkgs/pull/44381 for more information. 216 postInstall = '' 217 rm $out/bin/tensorboard 218 ''; 219 220 pythonImportsCheck = [ 221 "tensorflow" 222 "tensorflow.python" 223 "tensorflow.python.framework" 224 ]; 225 226 meta = { 227 description = "Computation using data flow graphs for scalable machine learning"; 228 homepage = "http://tensorflow.org"; 229 sourceProvenance = with lib.sourceTypes; [ binaryNativeCode ]; 230 license = lib.licenses.asl20; 231 maintainers = with lib.maintainers; [ 232 jyp 233 abbradar 234 ]; 235 badPlatforms = [ "x86_64-darwin" ]; 236 }; 237}