1{ stdenv, fetchurl, buildPythonPackage, pythonOlder,
2 cudaSupport ? false, cudatoolkit ? null, cudnn ? null,
3 fetchFromGitHub, lib, numpy, pyyaml, cffi, typing, cmake, hypothesis, numactl,
4 linkFarm, symlinkJoin,
5 utillinux, which }:
6
7assert cudnn == null || cudatoolkit != null;
8assert !cudaSupport || cudatoolkit != null;
9
10let
11 cudatoolkit_joined = symlinkJoin {
12 name = "${cudatoolkit.name}-unsplit";
13 paths = [ cudatoolkit.out cudatoolkit.lib ];
14 };
15
16 # Normally libcuda.so.1 is provided at runtime by nvidia-x11 via
17 # LD_LIBRARY_PATH=/run/opengl-driver/lib. We only use the stub
18 # libcuda.so from cudatoolkit for running tests, so that we don’t have
19 # to recompile pytorch on every update to nvidia-x11 or the kernel.
20 cudaStub = linkFarm "cuda-stub" [{
21 name = "libcuda.so.1";
22 path = "${cudatoolkit}/lib/stubs/libcuda.so";
23 }];
24 cudaStubEnv = lib.optionalString cudaSupport
25 "LD_LIBRARY_PATH=${cudaStub}\${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH} ";
26
27in buildPythonPackage rec {
28 version = "1.0.0";
29 pname = "pytorch";
30
31 src = fetchFromGitHub {
32 owner = "pytorch";
33 repo = "pytorch";
34 rev = "v${version}";
35 fetchSubmodules = true;
36 sha256 = "076cpbig4sywn9vv674c0xdg832sdrd5pk1d0725pjkm436kpvlm";
37 };
38
39 patches =
40 [ # Skips two tests that are only meant to run on multi GPUs
41 (fetchurl {
42 url = "https://github.com/pytorch/pytorch/commit/bfa666eb0deebac21b03486e26642fd70d66e478.patch";
43 sha256 = "1fgblcj02gjc0y62svwc5gnml879q3x2z7m69c9gax79dpr37s9i";
44 })
45 ];
46
47 preConfigure = lib.optionalString cudaSupport ''
48 export CC=${cudatoolkit.cc}/bin/gcc CXX=${cudatoolkit.cc}/bin/g++
49 '' + lib.optionalString (cudaSupport && cudnn != null) ''
50 export CUDNN_INCLUDE_DIR=${cudnn}/include
51 '';
52
53 preFixup = ''
54 function join_by { local IFS="$1"; shift; echo "$*"; }
55 function strip2 {
56 IFS=':'
57 read -ra RP <<< $(patchelf --print-rpath $1)
58 IFS=' '
59 RP_NEW=$(join_by : ''${RP[@]:2})
60 patchelf --set-rpath \$ORIGIN:''${RP_NEW} "$1"
61 }
62
63 for f in $(find ''${out} -name 'libcaffe2*.so')
64 do
65 strip2 $f
66 done
67 '';
68
69 # Override the (weirdly) wrong version set by default. See
70 # https://github.com/NixOS/nixpkgs/pull/52437#issuecomment-449718038
71 # https://github.com/pytorch/pytorch/blob/v1.0.0/setup.py#L267
72 PYTORCH_BUILD_VERSION = version;
73 PYTORCH_BUILD_NUMBER = 0;
74
75 # Suppress a weird warning in mkl-dnn, part of ideep in pytorch
76 # (upstream seems to have fixed this in the wrong place?)
77 # https://github.com/intel/mkl-dnn/commit/8134d346cdb7fe1695a2aa55771071d455fae0bc
78 NIX_CFLAGS_COMPILE = lib.optionals (numpy.blasImplementation == "mkl") [ "-Wno-error=array-bounds" ];
79
80 nativeBuildInputs = [
81 cmake
82 utillinux
83 which
84 ];
85
86 buildInputs = [
87 numpy.blas
88 ] ++ lib.optionals cudaSupport [ cudatoolkit_joined cudnn ]
89 ++ lib.optionals stdenv.isLinux [ numactl ];
90
91 propagatedBuildInputs = [
92 cffi
93 numpy
94 pyyaml
95 ] ++ lib.optional (pythonOlder "3.5") typing;
96
97 checkInputs = [ hypothesis ];
98 checkPhase = ''
99 ${cudaStubEnv}python test/run_test.py --exclude dataloader sparse torch utils thd_distributed distributed cpp_extensions
100 '';
101
102 meta = {
103 description = "Open source, prototype-to-production deep learning platform";
104 homepage = https://pytorch.org/;
105 license = lib.licenses.bsd3;
106 platforms = lib.platforms.linux;
107 maintainers = with lib.maintainers; [ teh thoughtpolice ];
108 };
109}