1{
2 lib,
3 stdenv,
4 python,
5 buildPythonPackage,
6 pythonOlder,
7 pythonAtLeast,
8 fetchurl,
9
10 # nativeBuildInputs
11 addDriverRunpath,
12 autoAddDriverRunpath,
13 autoPatchelfHook,
14
15 # buildInputs
16 cudaPackages,
17
18 # dependencies
19 filelock,
20 future,
21 jinja2,
22 networkx,
23 numpy,
24 pyyaml,
25 requests,
26 setuptools,
27 sympy,
28 typing-extensions,
29 triton,
30
31 callPackage,
32}:
33
34let
35 pyVerNoDot = builtins.replaceStrings [ "." ] [ "" ] python.pythonVersion;
36 srcs = import ./binary-hashes.nix version;
37 unsupported = throw "Unsupported system";
38 version = "2.5.1";
39in
40buildPythonPackage {
41 inherit version;
42
43 pname = "torch";
44 # Don't forget to update torch to the same version.
45
46 format = "wheel";
47
48 disabled = (pythonOlder "3.9") || (pythonAtLeast "3.13");
49
50 src = fetchurl srcs."${stdenv.system}-${pyVerNoDot}" or unsupported;
51
52 nativeBuildInputs = lib.optionals stdenv.hostPlatform.isLinux [
53 addDriverRunpath
54 autoAddDriverRunpath
55 autoPatchelfHook
56 ];
57
58 buildInputs = lib.optionals stdenv.hostPlatform.isLinux (
59 with cudaPackages;
60 [
61 # $out/${sitePackages}/nvfuser/_C*.so wants libnvToolsExt.so.1 but torch/lib only ships
62 # libnvToolsExt-$hash.so.1
63 cuda_nvtx
64
65 cuda_cudart
66 cuda_cupti
67 cuda_nvrtc
68 cudnn
69 libcublas
70 libcufft
71 libcurand
72 libcusolver
73 libcusparse
74 nccl
75 ]
76 );
77
78 autoPatchelfIgnoreMissingDeps = lib.optionals stdenv.hostPlatform.isLinux [
79 # This is the hardware-dependent userspace driver that comes from
80 # nvidia_x11 package. It must be deployed at runtime in
81 # /run/opengl-driver/lib or pointed at by LD_LIBRARY_PATH variable, rather
82 # than pinned in runpath
83 "libcuda.so.1"
84 ];
85
86 dependencies = [
87 filelock
88 future
89 jinja2
90 networkx
91 numpy
92 pyyaml
93 requests
94 setuptools
95 sympy
96 typing-extensions
97 ] ++ lib.optionals (stdenv.hostPlatform.isLinux && stdenv.hostPlatform.isx86_64) [ triton ];
98
99 postInstall = ''
100 # ONNX conversion
101 rm -rf $out/bin
102 '';
103
104 postFixup = lib.optionalString stdenv.hostPlatform.isLinux ''
105 addAutoPatchelfSearchPath "$out/${python.sitePackages}/torch/lib"
106 '';
107
108 # See https://github.com/NixOS/nixpkgs/issues/296179
109 #
110 # This is a quick hack to add `libnvrtc` to the runpath so that torch can find
111 # it when it is needed at runtime.
112 extraRunpaths = lib.optionals stdenv.hostPlatform.isLinux [
113 "${lib.getLib cudaPackages.cuda_nvrtc}/lib"
114 ];
115 postPhases = lib.optionals stdenv.hostPlatform.isLinux [ "postPatchelfPhase" ];
116 postPatchelfPhase = ''
117 while IFS= read -r -d $'\0' elf ; do
118 for extra in $extraRunpaths ; do
119 echo patchelf "$elf" --add-rpath "$extra" >&2
120 patchelf "$elf" --add-rpath "$extra"
121 done
122 done < <(
123 find "''${!outputLib}" "$out" -type f -iname '*.so' -print0
124 )
125 '';
126
127 # The wheel-binary is not stripped to avoid the error of `ImportError: libtorch_cuda_cpp.so: ELF load command address/offset not properly aligned.`.
128 dontStrip = true;
129
130 pythonImportsCheck = [ "torch" ];
131
132 passthru.tests = callPackage ./tests.nix { };
133
134 meta = {
135 description = "PyTorch: Tensors and Dynamic neural networks in Python with strong GPU acceleration";
136 homepage = "https://pytorch.org/";
137 changelog = "https://github.com/pytorch/pytorch/releases/tag/v${version}";
138 # Includes CUDA and Intel MKL, but redistributions of the binary are not limited.
139 # https://docs.nvidia.com/cuda/eula/index.html
140 # https://www.intel.com/content/www/us/en/developer/articles/license/onemkl-license-faq.html
141 # torch's license is BSD3.
142 # torch-bin used to vendor CUDA. It still links against CUDA and MKL.
143 license = with lib.licenses; [
144 bsd3
145 issl
146 unfreeRedistributable
147 ];
148 sourceProvenance = with lib.sourceTypes; [ binaryNativeCode ];
149 platforms = [
150 "aarch64-darwin"
151 "aarch64-linux"
152 "x86_64-linux"
153 ];
154 hydraPlatforms = [ ]; # output size 3.2G on 1.11.0
155 maintainers = with lib.maintainers; [ junjihashimoto ];
156 };
157}