1{
2 lib,
3 stdenv,
4 buildPythonPackage,
5 autoAddDriverRunpath,
6 fetchurl,
7 python,
8 pythonAtLeast,
9 pythonOlder,
10 addOpenGLRunpath,
11 cudaPackages,
12 future,
13 numpy,
14 autoPatchelfHook,
15 pyyaml,
16 requests,
17 setuptools,
18 typing-extensions,
19 sympy,
20 jinja2,
21 networkx,
22 filelock,
23 openai-triton,
24}:
25
26let
27 pyVerNoDot = builtins.replaceStrings [ "." ] [ "" ] python.pythonVersion;
28 srcs = import ./binary-hashes.nix version;
29 unsupported = throw "Unsupported system";
30 version = "2.3.0";
31in
32buildPythonPackage {
33 inherit version;
34
35 pname = "torch";
36 # Don't forget to update torch to the same version.
37
38 format = "wheel";
39
40 disabled = (pythonOlder "3.8") || (pythonAtLeast "3.13");
41
42 src = fetchurl srcs."${stdenv.system}-${pyVerNoDot}" or unsupported;
43
44 nativeBuildInputs = lib.optionals stdenv.isLinux [
45 addOpenGLRunpath
46 autoPatchelfHook
47 autoAddDriverRunpath
48 ];
49
50 buildInputs = lib.optionals stdenv.isLinux (
51 with cudaPackages;
52 [
53 # $out/${sitePackages}/nvfuser/_C*.so wants libnvToolsExt.so.1 but torch/lib only ships
54 # libnvToolsExt-$hash.so.1
55 cuda_nvtx
56
57 cuda_cudart
58 cuda_cupti
59 cuda_nvrtc
60 cudnn
61 libcublas
62 libcufft
63 libcurand
64 libcusolver
65 libcusparse
66 nccl
67 ]
68 );
69
70 autoPatchelfIgnoreMissingDeps = lib.optionals stdenv.isLinux [
71 # This is the hardware-dependent userspace driver that comes from
72 # nvidia_x11 package. It must be deployed at runtime in
73 # /run/opengl-driver/lib or pointed at by LD_LIBRARY_PATH variable, rather
74 # than pinned in runpath
75 "libcuda.so.1"
76 ];
77
78 propagatedBuildInputs = [
79 future
80 numpy
81 pyyaml
82 requests
83 setuptools
84 typing-extensions
85 sympy
86 jinja2
87 networkx
88 filelock
89 ] ++ lib.optionals (stdenv.isLinux && stdenv.isx86_64) [ openai-triton ];
90
91 postInstall = ''
92 # ONNX conversion
93 rm -rf $out/bin
94 '';
95
96 postFixup = lib.optionalString stdenv.isLinux ''
97 addAutoPatchelfSearchPath "$out/${python.sitePackages}/torch/lib"
98 '';
99
100 # The wheel-binary is not stripped to avoid the error of `ImportError: libtorch_cuda_cpp.so: ELF load command address/offset not properly aligned.`.
101 dontStrip = true;
102
103 pythonImportsCheck = [ "torch" ];
104
105 meta = with lib; {
106 description = "PyTorch: Tensors and Dynamic neural networks in Python with strong GPU acceleration";
107 homepage = "https://pytorch.org/";
108 changelog = "https://github.com/pytorch/pytorch/releases/tag/v${version}";
109 # Includes CUDA and Intel MKL, but redistributions of the binary are not limited.
110 # https://docs.nvidia.com/cuda/eula/index.html
111 # https://www.intel.com/content/www/us/en/developer/articles/license/onemkl-license-faq.html
112 # torch's license is BSD3.
113 # torch-bin used to vendor CUDA. It still links against CUDA and MKL.
114 license = with licenses; [
115 bsd3
116 issl
117 unfreeRedistributable
118 ];
119 sourceProvenance = with sourceTypes; [ binaryNativeCode ];
120 platforms = [
121 "aarch64-darwin"
122 "aarch64-linux"
123 "x86_64-linux"
124 ];
125 hydraPlatforms = [ ]; # output size 3.2G on 1.11.0
126 maintainers = with maintainers; [ junjihashimoto ];
127 };
128}