1{ stdenv
2, lib
3, fetchurl
4, buildPythonPackage
5, isPy3k, pythonOlder, pythonAtLeast, astor
6, gast
7, google-pasta
8, wrapt
9, numpy
10, six
11, termcolor
12, packaging
13, protobuf
14, absl-py
15, grpcio
16, mock
17, scipy
18, wheel
19, jax
20, opt-einsum
21, backports_weakref
22, tensorflow-estimator-bin
23, tensorboard
24, cudaSupport ? false
25, cudaPackages ? {}
26, zlib
27, python
28, keras-applications
29, keras-preprocessing
30, addOpenGLRunpath
31, astunparse
32, flatbuffers
33, h5py
34, typing-extensions
35}:
36
37# We keep this binary build for two reasons:
38# - the source build doesn't work on Darwin.
39# - the source build is currently brittle and not easy to maintain
40
41# unsupported combination
42assert ! (stdenv.isDarwin && cudaSupport);
43
44let
45 packages = import ./binary-hashes.nix;
46 inherit (cudaPackages) cudatoolkit cudnn;
47in buildPythonPackage {
48 pname = "tensorflow" + lib.optionalString cudaSupport "-gpu";
49 inherit (packages) version;
50 format = "wheel";
51
52 # Python 3.11 still unsupported
53 disabled = pythonAtLeast "3.11";
54
55 src = let
56 pyVerNoDot = lib.strings.stringAsChars (x: if x == "." then "" else x) python.pythonVersion;
57 platform = if stdenv.isDarwin then "mac" else "linux";
58 unit = if cudaSupport then "gpu" else "cpu";
59 key = "${platform}_py_${pyVerNoDot}_${unit}";
60 in fetchurl (packages.${key} or {});
61
62 propagatedBuildInputs = [
63 astunparse
64 flatbuffers
65 typing-extensions
66 packaging
67 protobuf
68 numpy
69 scipy
70 jax
71 termcolor
72 grpcio
73 six
74 astor
75 absl-py
76 gast
77 opt-einsum
78 google-pasta
79 wrapt
80 tensorflow-estimator-bin
81 tensorboard
82 keras-applications
83 keras-preprocessing
84 h5py
85 ] ++ lib.optional (!isPy3k) mock
86 ++ lib.optionals (pythonOlder "3.4") [ backports_weakref ];
87
88 nativeBuildInputs = [ wheel ] ++ lib.optionals cudaSupport [ addOpenGLRunpath ];
89
90 preConfigure = ''
91 unset SOURCE_DATE_EPOCH
92
93 # Make sure that dist and the wheel file are writable.
94 chmod u+rwx -R ./dist
95
96 pushd dist
97
98 orig_name="$(echo ./*.whl)"
99 wheel unpack --dest unpacked ./*.whl
100 rm ./*.whl
101 (
102 cd unpacked/tensorflow*
103 # Adjust dependency requirements:
104 # - Relax flatbuffers, gast, protobuf, tensorboard, and tensorflow-estimator version requirements that don't match what we have packaged
105 # - The purpose of python3Packages.libclang is not clear at the moment and we don't have it packaged yet
106 # - keras and tensorlow-io-gcs-filesystem will be considered as optional for now.
107 # - numpy was pinned to fix some internal tests: https://github.com/tensorflow/tensorflow/issues/60216
108 sed -i *.dist-info/METADATA \
109 -e "/Requires-Dist: flatbuffers/d" \
110 -e "/Requires-Dist: gast/d" \
111 -e "/Requires-Dist: keras/d" \
112 -e "/Requires-Dist: libclang/d" \
113 -e "/Requires-Dist: protobuf/d" \
114 -e "/Requires-Dist: tensorboard/d" \
115 -e "/Requires-Dist: tensorflow-estimator/d" \
116 -e "/Requires-Dist: tensorflow-io-gcs-filesystem/d" \
117 -e "s/Requires-Dist: numpy (.*)/Requires-Dist: numpy/"
118 )
119 wheel pack ./unpacked/tensorflow*
120 mv *.whl $orig_name # avoid changes to the _os_arch.whl suffix
121
122 popd
123 '';
124
125 # Note that we need to run *after* the fixup phase because the
126 # libraries are loaded at runtime. If we run in preFixup then
127 # patchelf --shrink-rpath will remove the cuda libraries.
128 postFixup =
129 let
130 # rpaths we only need to add if CUDA is enabled.
131 cudapaths = lib.optionals cudaSupport [
132 cudatoolkit.out
133 cudatoolkit.lib
134 cudnn
135 ];
136
137 libpaths = [
138 stdenv.cc.cc.lib
139 zlib
140 ];
141
142 rpath = lib.makeLibraryPath (libpaths ++ cudapaths);
143 in
144 lib.optionalString stdenv.isLinux ''
145 # This is an array containing all the directories in the tensorflow2
146 # package that contain .so files.
147 #
148 # TODO: Create this list programmatically, and remove paths that aren't
149 # actually needed.
150 rrPathArr=(
151 "$out/${python.sitePackages}/tensorflow/"
152 "$out/${python.sitePackages}/tensorflow/core/kernels"
153 "$out/${python.sitePackages}/tensorflow/compiler/tf2tensorrt/"
154 "$out/${python.sitePackages}/tensorflow/compiler/tf2xla/ops/"
155 "$out/${python.sitePackages}/tensorflow/lite/experimental/microfrontend/python/ops/"
156 "$out/${python.sitePackages}/tensorflow/lite/python/interpreter_wrapper/"
157 "$out/${python.sitePackages}/tensorflow/lite/python/optimize/"
158 "$out/${python.sitePackages}/tensorflow/python/"
159 "$out/${python.sitePackages}/tensorflow/python/framework/"
160 "$out/${python.sitePackages}/tensorflow/python/autograph/impl/testing"
161 "$out/${python.sitePackages}/tensorflow/python/data/experimental/service"
162 "$out/${python.sitePackages}/tensorflow/python/framework"
163 "$out/${python.sitePackages}/tensorflow/python/profiler/internal"
164 "${rpath}"
165 )
166
167 # The the bash array into a colon-separated list of RPATHs.
168 rrPath=$(IFS=$':'; echo "''${rrPathArr[*]}")
169 echo "about to run patchelf with the following rpath: $rrPath"
170
171 find $out -type f \( -name '*.so' -or -name '*.so.*' \) | while read lib; do
172 echo "about to patchelf $lib..."
173 chmod a+rx "$lib"
174 patchelf --set-rpath "$rrPath" "$lib"
175 ${lib.optionalString cudaSupport ''
176 addOpenGLRunpath "$lib"
177 ''}
178 done
179 '';
180
181 # Upstream has a pip hack that results in bin/tensorboard being in both tensorflow
182 # and the propagated input tensorboard, which causes environment collisions.
183 # Another possibility would be to have tensorboard only in the buildInputs
184 # See https://github.com/NixOS/nixpkgs/pull/44381 for more information.
185 postInstall = ''
186 rm $out/bin/tensorboard
187 '';
188
189 pythonImportsCheck = [
190 "tensorflow"
191 "tensorflow.python"
192 "tensorflow.python.framework"
193 ];
194
195 passthru = {
196 inherit cudaPackages;
197 };
198
199 meta = with lib; {
200 description = "Computation using data flow graphs for scalable machine learning";
201 homepage = "http://tensorflow.org";
202 sourceProvenance = with sourceTypes; [ binaryNativeCode ];
203 license = licenses.asl20;
204 maintainers = with maintainers; [ jyp abbradar ];
205 platforms = [ "x86_64-linux" "x86_64-darwin" ];
206 };
207}