nixpkgs mirror (for testing) github.com/NixOS/nixpkgs
nix
at python-updates 166 lines 4.2 kB view raw
1{ 2 lib, 3 stdenv, 4 python, 5 buildPythonPackage, 6 pythonOlder, 7 pythonAtLeast, 8 fetchurl, 9 10 # nativeBuildInputs 11 addDriverRunpath, 12 autoAddDriverRunpath, 13 autoPatchelfHook, 14 15 # buildInputs 16 cudaPackages, 17 18 # dependencies 19 cuda-bindings, 20 filelock, 21 jinja2, 22 networkx, 23 numpy, 24 pyyaml, 25 requests, 26 setuptools, 27 sympy, 28 typing-extensions, 29 triton, 30 31 callPackage, 32}: 33 34let 35 pyVerNoDot = builtins.replaceStrings [ "." ] [ "" ] python.pythonVersion; 36 srcs = import ./binary-hashes.nix version; 37 unsupported = throw "Unsupported system"; 38 version = "2.10.0"; 39in 40buildPythonPackage { 41 inherit version; 42 43 pname = "torch"; 44 # Don't forget to update torch to the same version. 45 46 format = "wheel"; 47 48 disabled = (pythonOlder "3.10") || (pythonAtLeast "3.15"); 49 50 src = fetchurl srcs."${stdenv.system}-${pyVerNoDot}" or unsupported; 51 52 nativeBuildInputs = lib.optionals stdenv.hostPlatform.isLinux [ 53 addDriverRunpath 54 autoAddDriverRunpath 55 autoPatchelfHook 56 ]; 57 58 buildInputs = lib.optionals stdenv.hostPlatform.isLinux ( 59 with cudaPackages; 60 [ 61 # $out/${sitePackages}/nvfuser/_C*.so wants libnvToolsExt.so.1 but torch/lib only ships 62 # libnvToolsExt-$hash.so.1 63 cuda_nvtx 64 65 cuda_cudart 66 cuda_cupti 67 cuda_nvrtc 68 cudnn 69 libcublas 70 libcufft 71 libcufile 72 libcurand 73 libcusolver 74 libcusparse 75 libcusparse_lt 76 libnvshmem 77 nccl 78 ] 79 ); 80 81 autoPatchelfIgnoreMissingDeps = lib.optionals stdenv.hostPlatform.isLinux [ 82 # This is the hardware-dependent userspace driver that comes from 83 # nvidia_x11 package. It must be deployed at runtime in 84 # /run/opengl-driver/lib or pointed at by LD_LIBRARY_PATH variable, rather 85 # than pinned in runpath 86 "libcuda.so.1" 87 ]; 88 89 dependencies = [ 90 filelock 91 jinja2 92 networkx 93 numpy 94 pyyaml 95 requests 96 setuptools 97 sympy 98 typing-extensions 99 ] 100 ++ lib.optionals stdenv.hostPlatform.isLinux [ 101 cuda-bindings 102 ] 103 ++ lib.optionals (stdenv.hostPlatform.isLinux && stdenv.hostPlatform.isx86_64) [ triton ]; 104 105 postInstall = '' 106 # ONNX conversion 107 rm -rf $out/bin 108 ''; 109 110 postFixup = lib.optionalString stdenv.hostPlatform.isLinux '' 111 addAutoPatchelfSearchPath "$out/${python.sitePackages}/torch/lib" 112 ''; 113 114 # See https://github.com/NixOS/nixpkgs/issues/296179 115 # 116 # This is a quick hack to add `libnvrtc` to the runpath so that torch can find 117 # it when it is needed at runtime. 118 extraRunpaths = lib.optionals stdenv.hostPlatform.isLinux [ 119 "${lib.getLib cudaPackages.cuda_nvrtc}/lib" 120 ]; 121 postPhases = lib.optionals stdenv.hostPlatform.isLinux [ "postPatchelfPhase" ]; 122 postPatchelfPhase = '' 123 while IFS= read -r -d $'\0' elf ; do 124 for extra in $extraRunpaths ; do 125 echo patchelf "$elf" --add-rpath "$extra" >&2 126 patchelf "$elf" --add-rpath "$extra" 127 done 128 done < <( 129 find "''${!outputLib}" "$out" -type f -iname '*.so' -print0 130 ) 131 ''; 132 133 # The wheel-binary is not stripped to avoid the error of `ImportError: libtorch_cuda_cpp.so: ELF load command address/offset not properly aligned.`. 134 dontStrip = true; 135 136 pythonImportsCheck = [ "torch" ]; 137 138 passthru.tests = callPackage ../tests { }; 139 140 meta = { 141 description = "PyTorch: Tensors and Dynamic neural networks in Python with strong GPU acceleration"; 142 homepage = "https://pytorch.org/"; 143 changelog = "https://github.com/pytorch/pytorch/releases/tag/v${version}"; 144 # Includes CUDA and Intel MKL, but redistributions of the binary are not limited. 145 # https://docs.nvidia.com/cuda/eula/index.html 146 # https://www.intel.com/content/www/us/en/developer/articles/license/onemkl-license-faq.html 147 # torch's license is BSD3. 148 # torch-bin used to vendor CUDA. It still links against CUDA and MKL. 149 license = with lib.licenses; [ 150 bsd3 151 issl 152 unfreeRedistributable 153 ]; 154 sourceProvenance = with lib.sourceTypes; [ binaryNativeCode ]; 155 platforms = [ 156 "aarch64-darwin" 157 "aarch64-linux" 158 "x86_64-linux" 159 ]; 160 hydraPlatforms = [ ]; # output size 3.2G on 1.11.0 161 maintainers = with lib.maintainers; [ 162 GaetanLepage 163 junjihashimoto 164 ]; 165 }; 166}