Clone of https://github.com/NixOS/nixpkgs.git (to stress-test knotserver)
at 21.05 196 lines 5.8 kB view raw
1{ stdenv 2, lib 3, fetchurl 4, buildPythonPackage 5, isPy3k, pythonOlder, pythonAtLeast, isPy38 6, astor 7, gast 8, google-pasta 9, wrapt 10, numpy 11, six 12, termcolor 13, protobuf 14, absl-py 15, grpcio 16, mock 17, scipy 18, wheel 19, opt-einsum 20, backports_weakref 21, tensorflow-estimator_2 22, tensorflow-tensorboard_2 23, cudaSupport ? false 24, cudatoolkit ? null 25, cudnn ? null 26, nvidia_x11 ? null 27, zlib 28, python 29, symlinkJoin 30, keras-applications 31, keras-preprocessing 32, addOpenGLRunpath 33}: 34 35# We keep this binary build for two reasons: 36# - the source build doesn't work on Darwin. 37# - the source build is currently brittle and not easy to maintain 38 39assert cudaSupport -> cudatoolkit != null 40 && cudnn != null 41 && nvidia_x11 != null; 42 43# unsupported combination 44assert ! (stdenv.isDarwin && cudaSupport); 45 46let 47 packages = import ./binary-hashes.nix; 48 49 variant = if cudaSupport then "-gpu" else ""; 50 pname = "tensorflow${variant}"; 51 52in buildPythonPackage { 53 inherit pname; 54 inherit (packages) version; 55 format = "wheel"; 56 57 disabled = pythonAtLeast "3.8"; 58 59 src = let 60 pyVerNoDot = lib.strings.stringAsChars (x: if x == "." then "" else x) python.pythonVersion; 61 platform = if stdenv.isDarwin then "mac" else "linux"; 62 unit = if cudaSupport then "gpu" else "cpu"; 63 key = "${platform}_py_${pyVerNoDot}_${unit}"; 64 in fetchurl packages.${key}; 65 66 propagatedBuildInputs = [ 67 protobuf 68 numpy 69 scipy 70 termcolor 71 grpcio 72 six 73 astor 74 absl-py 75 gast 76 opt-einsum 77 google-pasta 78 wrapt 79 tensorflow-estimator_2 80 tensorflow-tensorboard_2 81 keras-applications 82 keras-preprocessing 83 ] ++ lib.optional (!isPy3k) mock 84 ++ lib.optionals (pythonOlder "3.4") [ backports_weakref ]; 85 86 nativeBuildInputs = [ wheel ] ++ lib.optional cudaSupport addOpenGLRunpath; 87 88 preConfigure = '' 89 unset SOURCE_DATE_EPOCH 90 91 # Make sure that dist and the wheel file are writable. 92 chmod u+rwx -R ./dist 93 94 pushd dist 95 96 # Unpack the wheel file. 97 wheel unpack --dest unpacked ./*.whl 98 99 # Tensorflow wheels tightly constrain the versions of gast, tensorflow-estimator and scipy. 100 # This code relaxes these requirements: 101 substituteInPlace ./unpacked/tensorflow*/tensorflow_core/tools/pip_package/setup.py \ 102 --replace "tensorflow_estimator >= 2.1.0rc0, < 2.2.0" "tensorflow_estimator" \ 103 --replace "tensorboard >= 2.1.0, < 2.2.0" "tensorboard" \ 104 --replace "gast == 0.2.2" "gast" \ 105 --replace "scipy == 1.2.2" "scipy" 106 107 substituteInPlace ./unpacked/tensorflow*/tensorflow*.dist-info/METADATA \ 108 --replace "gast (==0.2.2)" "gast" \ 109 --replace "tensorflow-estimator (<2.2.0,>=2.1.0rc0)" "tensorflow_estimator" \ 110 --replace "tensorboard (<2.2.0,>=2.1.0)" "tensorboard" \ 111 --replace "scipy (==1.4.1)" "scipy" 112 113 # Pack the wheel file back up. 114 wheel pack ./unpacked/tensorflow* 115 116 popd 117 ''; 118 119 # Note that we need to run *after* the fixup phase because the 120 # libraries are loaded at runtime. If we run in preFixup then 121 # patchelf --shrink-rpath will remove the cuda libraries. 122 postFixup = 123 let 124 # rpaths we only need to add if CUDA is enabled. 125 cudapaths = lib.optionals cudaSupport [ 126 cudatoolkit.out 127 cudatoolkit.lib 128 cudnn 129 nvidia_x11 130 ]; 131 132 libpaths = [ 133 stdenv.cc.cc.lib 134 zlib 135 ]; 136 137 rpath = lib.makeLibraryPath (libpaths ++ cudapaths); 138 in 139 lib.optionalString stdenv.isLinux '' 140 # This is an array containing all the directories in the tensorflow2 141 # package that contain .so files. 142 # 143 # TODO: Create this list programmatically, and remove paths that aren't 144 # actually needed. 145 rrPathArr=( 146 "$out/${python.sitePackages}/tensorflow_core/" 147 "$out/${python.sitePackages}/tensorflow_core/compiler/tf2tensorrt/" 148 "$out/${python.sitePackages}/tensorflow_core/compiler/tf2xla/ops/" 149 "$out/${python.sitePackages}/tensorflow_core/lite/experimental/microfrontend/python/ops/" 150 "$out/${python.sitePackages}/tensorflow_core/lite/python/interpreter_wrapper/" 151 "$out/${python.sitePackages}/tensorflow_core/lite/python/optimize/" 152 "$out/${python.sitePackages}/tensorflow_core/python/" 153 "$out/${python.sitePackages}/tensorflow_core/python/framework/" 154 "${rpath}" 155 ) 156 157 # The the bash array into a colon-separated list of RPATHs. 158 rrPath=$(IFS=$':'; echo "''${rrPathArr[*]}") 159 echo "about to run patchelf with the following rpath: $rrPath" 160 161 find $out -type f \( -name '*.so' -or -name '*.so.*' \) | while read lib; do 162 echo "about to patchelf $lib..." 163 chmod a+rx "$lib" 164 patchelf --set-rpath "$rrPath" "$lib" 165 ${lib.optionalString cudaSupport '' 166 addOpenGLRunpath "$lib" 167 ''} 168 done 169 ''; 170 171 # Upstream has a pip hack that results in bin/tensorboard being in both tensorflow 172 # and the propagated input tensorflow-tensorboard, which causes environment collisions. 173 # Another possibility would be to have tensorboard only in the buildInputs 174 # See https://github.com/NixOS/nixpkgs/pull/44381 for more information. 175 postInstall = '' 176 rm $out/bin/tensorboard 177 ''; 178 179 pythonImportsCheck = [ 180 "tensorflow" 181 "tensorflow.keras" 182 "tensorflow.python" 183 "tensorflow.python.framework" 184 ]; 185 186 meta = with lib; { 187 description = "Computation using data flow graphs for scalable machine learning"; 188 homepage = "http://tensorflow.org"; 189 license = licenses.asl20; 190 maintainers = with maintainers; [ jyp abbradar cdepillabout ]; 191 platforms = [ "x86_64-linux" "x86_64-darwin" ]; 192 # Python 2.7 build uses different string encoding. 193 # See https://github.com/NixOS/nixpkgs/pull/37044#issuecomment-373452253 194 broken = stdenv.isDarwin && !isPy3k; 195 }; 196}