1{ stdenv
2, lib
3, fetchurl
4, buildPythonPackage
5, isPy3k, pythonOlder, isPy38
6, astor
7, gast
8, google-pasta
9, wrapt
10, numpy
11, six
12, termcolor
13, protobuf
14, absl-py
15, grpcio
16, mock
17, scipy
18, wheel
19, opt-einsum
20, backports_weakref
21, tensorflow-estimator_2
22, tensorflow-tensorboard_2
23, cudaSupport ? false
24, cudatoolkit ? null
25, cudnn ? null
26, nvidia_x11 ? null
27, zlib
28, python
29, symlinkJoin
30, keras-applications
31, keras-preprocessing
32, addOpenGLRunpath
33}:
34
35# We keep this binary build for two reasons:
36# - the source build doesn't work on Darwin.
37# - the source build is currently brittle and not easy to maintain
38
39assert cudaSupport -> cudatoolkit != null
40 && cudnn != null
41 && nvidia_x11 != null;
42
43# unsupported combination
44assert ! (stdenv.isDarwin && cudaSupport);
45
46let
47 packages = import ./binary-hashes.nix;
48
49 variant = if cudaSupport then "-gpu" else "";
50 pname = "tensorflow${variant}";
51
52in buildPythonPackage {
53 inherit pname;
54 inherit (packages) version;
55 format = "wheel";
56
57 disabled = isPy38;
58
59 src = let
60 pyVerNoDot = lib.strings.stringAsChars (x: if x == "." then "" else x) python.pythonVersion;
61 platform = if stdenv.isDarwin then "mac" else "linux";
62 unit = if cudaSupport then "gpu" else "cpu";
63 key = "${platform}_py_${pyVerNoDot}_${unit}";
64 in fetchurl packages.${key};
65
66 propagatedBuildInputs = [
67 protobuf
68 numpy
69 scipy
70 termcolor
71 grpcio
72 six
73 astor
74 absl-py
75 gast
76 opt-einsum
77 google-pasta
78 wrapt
79 tensorflow-estimator_2
80 tensorflow-tensorboard_2
81 keras-applications
82 keras-preprocessing
83 ] ++ lib.optional (!isPy3k) mock
84 ++ lib.optionals (pythonOlder "3.4") [ backports_weakref ];
85
86 nativeBuildInputs = [ wheel ] ++ lib.optional cudaSupport addOpenGLRunpath;
87
88 preConfigure = ''
89 unset SOURCE_DATE_EPOCH
90
91 # Make sure that dist and the wheel file are writable.
92 chmod u+rwx -R ./dist
93
94 pushd dist
95
96 # Unpack the wheel file.
97 wheel unpack --dest unpacked ./*.whl
98
99 # Tensorflow has a hard dependency on gast==0.2.2, but we relax it to
100 # gast==0.3.2.
101 substituteInPlace ./unpacked/tensorflow*/tensorflow_core/tools/pip_package/setup.py --replace "gast == 0.2.2" "gast == 0.3.2"
102 substituteInPlace ./unpacked/tensorflow*/tensorflow_*.dist-info/METADATA --replace "gast (==0.2.2)" "gast (==0.3.2)"
103
104 # Pack the wheel file back up.
105 wheel pack ./unpacked/tensorflow*
106
107 popd
108 '';
109
110 # Note that we need to run *after* the fixup phase because the
111 # libraries are loaded at runtime. If we run in preFixup then
112 # patchelf --shrink-rpath will remove the cuda libraries.
113 postFixup =
114 let
115 # rpaths we only need to add if CUDA is enabled.
116 cudapaths = lib.optionals cudaSupport [
117 cudatoolkit.out
118 cudatoolkit.lib
119 cudnn
120 nvidia_x11
121 ];
122
123 libpaths = [
124 stdenv.cc.cc.lib
125 zlib
126 ];
127
128 rpath = stdenv.lib.makeLibraryPath (libpaths ++ cudapaths);
129 in
130 lib.optionalString stdenv.isLinux ''
131 # This is an array containing all the directories in the tensorflow2
132 # package that contain .so files.
133 #
134 # TODO: Create this list programmatically, and remove paths that aren't
135 # actually needed.
136 rrPathArr=(
137 "$out/${python.sitePackages}/tensorflow_core/"
138 "$out/${python.sitePackages}/tensorflow_core/compiler/tf2tensorrt/"
139 "$out/${python.sitePackages}/tensorflow_core/compiler/tf2xla/ops/"
140 "$out/${python.sitePackages}/tensorflow_core/lite/experimental/microfrontend/python/ops/"
141 "$out/${python.sitePackages}/tensorflow_core/lite/python/interpreter_wrapper/"
142 "$out/${python.sitePackages}/tensorflow_core/lite/python/optimize/"
143 "$out/${python.sitePackages}/tensorflow_core/python/"
144 "$out/${python.sitePackages}/tensorflow_core/python/framework/"
145 "${rpath}"
146 )
147
148 # The the bash array into a colon-separated list of RPATHs.
149 rrPath=$(IFS=$':'; echo "''${rrPathArr[*]}")
150 echo "about to run patchelf with the following rpath: $rrPath"
151
152 find $out -type f \( -name '*.so' -or -name '*.so.*' \) | while read lib; do
153 echo "about to patchelf $lib..."
154 chmod a+rx "$lib"
155 patchelf --set-rpath "$rrPath" "$lib"
156 ${lib.optionalString cudaSupport ''
157 addOpenGLRunpath "$lib"
158 ''}
159 done
160 '';
161
162 # Upstream has a pip hack that results in bin/tensorboard being in both tensorflow
163 # and the propagated input tensorflow-tensorboard, which causes environment collisions.
164 # Another possibility would be to have tensorboard only in the buildInputs
165 # See https://github.com/NixOS/nixpkgs/pull/44381 for more information.
166 postInstall = ''
167 rm $out/bin/tensorboard
168 '';
169
170 pythonImportsCheck = [
171 "tensorflow"
172 "tensorflow.keras"
173 "tensorflow.python"
174 "tensorflow.python.framework"
175 ];
176
177 meta = with stdenv.lib; {
178 description = "Computation using data flow graphs for scalable machine learning";
179 homepage = "http://tensorflow.org";
180 license = licenses.asl20;
181 maintainers = with maintainers; [ jyp abbradar cdepillabout ];
182 platforms = [ "x86_64-linux" "x86_64-darwin" ];
183 broken = true;
184 };
185}