nixpkgs mirror (for testing) github.com/NixOS/nixpkgs
nix
at 22.05 197 lines 6.0 kB view raw
1{ stdenv 2, lib 3, fetchurl 4, buildPythonPackage 5, isPy3k, pythonOlder, pythonAtLeast, astor 6, gast 7, google-pasta 8, wrapt 9, numpy 10, six 11, termcolor 12, protobuf 13, absl-py 14, grpcio 15, mock 16, scipy 17, wheel 18, opt-einsum 19, backports_weakref 20, tensorflow-estimator 21, tensorboard 22, cudaSupport ? false 23, cudaPackages ? {} 24, patchelfUnstable 25, zlib 26, python 27, keras-applications 28, keras-preprocessing 29, addOpenGLRunpath 30, astunparse 31, flatbuffers 32, h5py 33, typing-extensions 34}: 35 36# We keep this binary build for two reasons: 37# - the source build doesn't work on Darwin. 38# - the source build is currently brittle and not easy to maintain 39 40# unsupported combination 41assert ! (stdenv.isDarwin && cudaSupport); 42 43let 44 packages = import ./binary-hashes.nix; 45 inherit (cudaPackages) cudatoolkit cudnn; 46in buildPythonPackage { 47 pname = "tensorflow" + lib.optionalString cudaSupport "-gpu"; 48 inherit (packages) version; 49 format = "wheel"; 50 51 # See https://github.com/tensorflow/tensorflow/issues/55581#issuecomment-1101890383 52 disabled = pythonAtLeast "3.10" && !cudaSupport; 53 54 src = let 55 pyVerNoDot = lib.strings.stringAsChars (x: if x == "." then "" else x) python.pythonVersion; 56 platform = if stdenv.isDarwin then "mac" else "linux"; 57 unit = if cudaSupport then "gpu" else "cpu"; 58 key = "${platform}_py_${pyVerNoDot}_${unit}"; 59 in fetchurl packages.${key}; 60 61 propagatedBuildInputs = [ 62 astunparse 63 flatbuffers 64 typing-extensions 65 protobuf 66 numpy 67 scipy 68 termcolor 69 grpcio 70 six 71 astor 72 absl-py 73 gast 74 opt-einsum 75 google-pasta 76 wrapt 77 tensorflow-estimator 78 tensorboard 79 keras-applications 80 keras-preprocessing 81 h5py 82 ] ++ lib.optional (!isPy3k) mock 83 ++ lib.optionals (pythonOlder "3.4") [ backports_weakref ]; 84 85 # remove patchelfUnstable once patchelf 0.14 with https://github.com/NixOS/patchelf/pull/256 becomes the default 86 nativeBuildInputs = [ wheel ] ++ lib.optional cudaSupport [ addOpenGLRunpath patchelfUnstable ]; 87 88 preConfigure = '' 89 unset SOURCE_DATE_EPOCH 90 91 # Make sure that dist and the wheel file are writable. 92 chmod u+rwx -R ./dist 93 94 pushd dist 95 96 wheel unpack --dest unpacked ./*.whl 97 rm ./*.whl 98 ( 99 cd unpacked/tensorflow* 100 # Adjust dependency requirements: 101 # - Relax tensorflow-estimator version requirement that doesn't match what we have packaged 102 # - The purpose of python3Packages.libclang is not clear at the moment and we don't have it packaged yet 103 # - keras and tensorlow-io-gcs-filesystem will be considered as optional for now. 104 sed -i *.dist-info/METADATA \ 105 -e "s/Requires-Dist: tf-estimator-nightly.*/Requires-Dist: tensorflow-estimator/" \ 106 -e "/Requires-Dist: libclang/d" \ 107 -e "/Requires-Dist: keras/d" \ 108 -e "/Requires-Dist: tensorflow-io-gcs-filesystem/d" 109 ) 110 wheel pack ./unpacked/tensorflow* 111 112 popd 113 ''; 114 115 # Note that we need to run *after* the fixup phase because the 116 # libraries are loaded at runtime. If we run in preFixup then 117 # patchelf --shrink-rpath will remove the cuda libraries. 118 postFixup = 119 let 120 # rpaths we only need to add if CUDA is enabled. 121 cudapaths = lib.optionals cudaSupport [ 122 cudatoolkit.out 123 cudatoolkit.lib 124 cudnn 125 ]; 126 127 libpaths = [ 128 stdenv.cc.cc.lib 129 zlib 130 ]; 131 132 rpath = lib.makeLibraryPath (libpaths ++ cudapaths); 133 in 134 lib.optionalString stdenv.isLinux '' 135 # This is an array containing all the directories in the tensorflow2 136 # package that contain .so files. 137 # 138 # TODO: Create this list programmatically, and remove paths that aren't 139 # actually needed. 140 rrPathArr=( 141 "$out/${python.sitePackages}/tensorflow/" 142 "$out/${python.sitePackages}/tensorflow/core/kernels" 143 "$out/${python.sitePackages}/tensorflow/compiler/tf2tensorrt/" 144 "$out/${python.sitePackages}/tensorflow/compiler/tf2xla/ops/" 145 "$out/${python.sitePackages}/tensorflow/lite/experimental/microfrontend/python/ops/" 146 "$out/${python.sitePackages}/tensorflow/lite/python/interpreter_wrapper/" 147 "$out/${python.sitePackages}/tensorflow/lite/python/optimize/" 148 "$out/${python.sitePackages}/tensorflow/python/" 149 "$out/${python.sitePackages}/tensorflow/python/framework/" 150 "$out/${python.sitePackages}/tensorflow/python/autograph/impl/testing" 151 "$out/${python.sitePackages}/tensorflow/python/data/experimental/service" 152 "$out/${python.sitePackages}/tensorflow/python/framework" 153 "$out/${python.sitePackages}/tensorflow/python/profiler/internal" 154 "${rpath}" 155 ) 156 157 # The the bash array into a colon-separated list of RPATHs. 158 rrPath=$(IFS=$':'; echo "''${rrPathArr[*]}") 159 echo "about to run patchelf with the following rpath: $rrPath" 160 161 find $out -type f \( -name '*.so' -or -name '*.so.*' \) | while read lib; do 162 echo "about to patchelf $lib..." 163 chmod a+rx "$lib" 164 patchelf --set-rpath "$rrPath" "$lib" 165 ${lib.optionalString cudaSupport '' 166 addOpenGLRunpath "$lib" 167 ''} 168 done 169 ''; 170 171 # Upstream has a pip hack that results in bin/tensorboard being in both tensorflow 172 # and the propagated input tensorboard, which causes environment collisions. 173 # Another possibility would be to have tensorboard only in the buildInputs 174 # See https://github.com/NixOS/nixpkgs/pull/44381 for more information. 175 postInstall = '' 176 rm $out/bin/tensorboard 177 ''; 178 179 pythonImportsCheck = [ 180 "tensorflow" 181 "tensorflow.python" 182 "tensorflow.python.framework" 183 ]; 184 185 passthru = { 186 inherit cudaPackages; 187 }; 188 189 meta = with lib; { 190 broken = stdenv.isDarwin; 191 description = "Computation using data flow graphs for scalable machine learning"; 192 homepage = "http://tensorflow.org"; 193 license = licenses.asl20; 194 maintainers = with maintainers; [ jyp abbradar cdepillabout ]; 195 platforms = [ "x86_64-linux" "x86_64-darwin" ]; 196 }; 197}