nixpkgs mirror (for testing) github.com/NixOS/nixpkgs
nix
at python-updates 244 lines 7.5 kB view raw
1{ 2 lib, 3 stdenv, 4 buildPythonPackage, 5 fetchurl, 6 7 # buildInputs 8 llvmPackages, 9 10 # build-system 11 distutils, 12 13 # dependencies 14 ml-dtypes, 15 absl-py, 16 astunparse, 17 flatbuffers, 18 gast, 19 google-pasta, 20 grpcio, 21 h5py, 22 libclang, 23 numpy, 24 opt-einsum, 25 packaging, 26 protobuf, 27 requests, 28 six, 29 tensorboard, 30 termcolor, 31 typing-extensions, 32 wrapt, 33 isPy3k, 34 mock, 35 36 config, 37 cudaSupport ? config.cudaSupport, 38 cudaPackages, 39 zlib, 40 python, 41 addDriverRunpath, 42}: 43 44# We keep this binary build for three reasons: 45# - the source build doesn't work on Darwin. 46# - the source build is currently brittle and not easy to maintain 47# - the source build doesn't work on NVIDIA Jetson platforms 48 49let 50 packages = import ./binary-hashes.nix; 51 inherit (cudaPackages) cudatoolkit cudnn; 52 53 isCudaJetson = cudaSupport && cudaPackages.flags.isJetsonBuild; 54in 55buildPythonPackage rec { 56 pname = "tensorflow" + lib.optionalString cudaSupport "-gpu"; 57 version = packages."${"version" + lib.optionalString isCudaJetson "_jetson"}"; 58 format = "wheel"; 59 60 src = 61 let 62 pyVerNoDot = lib.strings.stringAsChars (x: lib.optionalString (x != ".") x) python.pythonVersion; 63 platform = stdenv.system; 64 cuda = lib.optionalString cudaSupport (if isCudaJetson then "_jetson" else "_gpu"); 65 key = "${platform}_${pyVerNoDot}${cuda}"; 66 in 67 fetchurl (packages.${key} or (throw "tensorflow-bin: unsupported configuration: ${key}")); 68 69 buildInputs = [ llvmPackages.openmp ]; 70 71 build-system = [ 72 distutils 73 ]; 74 75 nativeBuildInputs = 76 lib.optionals cudaSupport [ addDriverRunpath ] 77 ++ lib.optionals isCudaJetson [ cudaPackages.autoAddCudaCompatRunpath ]; 78 79 dependencies = [ 80 absl-py 81 astunparse 82 flatbuffers 83 gast 84 google-pasta 85 grpcio 86 h5py 87 libclang 88 ml-dtypes 89 numpy 90 opt-einsum 91 packaging 92 protobuf 93 requests 94 six 95 tensorboard 96 termcolor 97 typing-extensions 98 wrapt 99 ] 100 ++ lib.optional (!isPy3k) mock; 101 102 pythonRemoveDeps = [ 103 "libclang" 104 "keras" 105 ]; 106 107 preConfigure = '' 108 unset SOURCE_DATE_EPOCH 109 110 # Make sure that dist and the wheel file are writable. 111 chmod u+rwx -R ./dist 112 113 pushd dist 114 115 for f in tensorflow-*+nv*.whl; do 116 # e.g. *nv24.07* -> *nv24.7* 117 mv "$f" "$(sed -E 's/(nv[0-9]+)\.0*([0-9]+)/\1.\2/' <<< "$f")" 118 done 119 120 popd 121 ''; 122 123 postFixup = 124 # When using the cpu-only wheel, the final package will be named `tensorflow_cpu`. 125 # Then, in each package requiring `tensorflow`, our pythonRuntimeDepsCheck will fail with: 126 # importlib.metadata.PackageNotFoundError: No package metadata was found for tensorflow 127 # Hence, we manually rename the package to `tensorflow`. 128 lib.optionalString ((builtins.match ".*tensorflow_cpu.*" src.url) != null) '' 129 ( 130 cd $out/${python.sitePackages} 131 132 dest="tensorflow-${version}.dist-info" 133 134 mv tensorflow_cpu-${version}.dist-info "$dest" 135 136 ( 137 cd "$dest" 138 139 substituteInPlace METADATA \ 140 --replace-fail "tensorflow_cpu" "tensorflow" 141 substituteInPlace RECORD \ 142 --replace-fail "tensorflow_cpu" "tensorflow" 143 ) 144 ) 145 '' 146 # Note that we need to run *after* the fixup phase because the 147 # libraries are loaded at runtime. If we run in preFixup then 148 # patchelf --shrink-rpath will remove the cuda libraries. 149 + ( 150 let 151 # rpaths we only need to add if CUDA is enabled. 152 cudapaths = lib.optionals cudaSupport [ 153 cudatoolkit.out 154 cudatoolkit.lib 155 cudnn 156 ]; 157 158 libpaths = [ 159 (lib.getLib stdenv.cc.cc) 160 zlib 161 ]; 162 163 rpath = lib.makeLibraryPath (libpaths ++ cudapaths); 164 in 165 lib.optionalString stdenv.hostPlatform.isLinux '' 166 # This is an array containing all the directories in the tensorflow2 167 # package that contain .so files. 168 # 169 # TODO: Create this list programmatically, and remove paths that aren't 170 # actually needed. 171 rrPathArr=( 172 "$out/${python.sitePackages}/tensorflow/" 173 "$out/${python.sitePackages}/tensorflow/core/kernels" 174 "$out/${python.sitePackages}/tensorflow/compiler/mlir/stablehlo/" 175 "$out/${python.sitePackages}/tensorflow/compiler/tf2tensorrt/" 176 "$out/${python.sitePackages}/tensorflow/compiler/tf2xla/ops/" 177 "$out/${python.sitePackages}/tensorflow/include/external/ml_dtypes/" 178 "$out/${python.sitePackages}/tensorflow/lite/experimental/microfrontend/python/ops/" 179 "$out/${python.sitePackages}/tensorflow/lite/python/analyzer_wrapper/" 180 "$out/${python.sitePackages}/tensorflow/lite/python/interpreter_wrapper/" 181 "$out/${python.sitePackages}/tensorflow/lite/python/metrics/" 182 "$out/${python.sitePackages}/tensorflow/lite/python/optimize/" 183 "$out/${python.sitePackages}/tensorflow/python/" 184 "$out/${python.sitePackages}/tensorflow/python/autograph/impl/testing" 185 "$out/${python.sitePackages}/tensorflow/python/client" 186 "$out/${python.sitePackages}/tensorflow/python/data/experimental/service" 187 "$out/${python.sitePackages}/tensorflow/python/framework" 188 "$out/${python.sitePackages}/tensorflow/python/grappler" 189 "$out/${python.sitePackages}/tensorflow/python/lib/core" 190 "$out/${python.sitePackages}/tensorflow/python/lib/io" 191 "$out/${python.sitePackages}/tensorflow/python/platform" 192 "$out/${python.sitePackages}/tensorflow/python/profiler/internal" 193 "$out/${python.sitePackages}/tensorflow/python/saved_model" 194 "$out/${python.sitePackages}/tensorflow/python/util" 195 "$out/${python.sitePackages}/tensorflow/tsl/python/lib/core" 196 "$out/${python.sitePackages}/tensorflow.libs/" 197 "${rpath}" 198 ) 199 200 # The the bash array into a colon-separated list of RPATHs. 201 rrPath=$(IFS=$':'; echo "''${rrPathArr[*]}") 202 echo "about to run patchelf with the following rpath: $rrPath" 203 204 find $out -type f \( -name '*.so' -or -name '*.so.*' \) | while read lib; do 205 echo "about to patchelf $lib..." 206 chmod a+rx "$lib" 207 patchelf --set-rpath "$rrPath" "$lib" 208 ${lib.optionalString cudaSupport '' 209 addDriverRunpath "$lib" 210 ''} 211 done 212 '' 213 ) 214 # Symlink nvcc besides TensorFlow so that routines that require JIT can work 215 # properly. 216 + lib.optionalString cudaSupport '' 217 ln -s ${cudaPackages.cuda_nvcc} "$out/${python.sitePackages}/tensorflow/cuda" 218 ''; 219 220 # Upstream has a pip hack that results in bin/tensorboard being in both tensorflow 221 # and the propagated input tensorboard, which causes environment collisions. 222 # Another possibility would be to have tensorboard only in the buildInputs 223 # See https://github.com/NixOS/nixpkgs/pull/44381 for more information. 224 postInstall = '' 225 rm $out/bin/tensorboard 226 ''; 227 228 pythonImportsCheck = [ 229 "tensorflow" 230 "tensorflow.python" 231 "tensorflow.python.framework" 232 ]; 233 234 meta = { 235 description = "Computation using data flow graphs for scalable machine learning"; 236 homepage = "http://tensorflow.org"; 237 sourceProvenance = with lib.sourceTypes; [ binaryNativeCode ]; 238 license = lib.licenses.asl20; 239 maintainers = [ ]; 240 badPlatforms = [ "x86_64-darwin" ]; 241 # unsupported combination 242 broken = stdenv.hostPlatform.isDarwin && cudaSupport; 243 }; 244}