nixpkgs mirror (for testing)
github.com/NixOS/nixpkgs
nix
1{
2 lib,
3 stdenv,
4 buildPythonPackage,
5 fetchFromGitHub,
6 cython,
7 fastrlock,
8 numpy,
9 pytestCheckHook,
10 mock,
11 setuptools,
12 cudaPackages,
13 addDriverRunpath,
14 symlinkJoin,
15}:
16
17let
18 inherit (cudaPackages) cudnn;
19
20 shouldUsePkg = lib.mapNullable (pkg: if pkg.meta.available or true then pkg else null);
21
22 # some packages are not available on all platforms
23 cuda_nvprof = shouldUsePkg (cudaPackages.nvprof or null);
24 libcutensor = shouldUsePkg (cudaPackages.libcutensor or null);
25 nccl = shouldUsePkg (cudaPackages.nccl or null);
26
27 outpaths = lib.filter (outpath: outpath != null) (
28 with cudaPackages;
29 [
30 cuda_cccl # <nv/target>
31 cuda_cudart
32 cuda_nvcc # <crt/host_defines.h>
33 cuda_nvprof
34 cuda_nvrtc
35 cuda_nvtx
36 cuda_profiler_api
37 libcublas
38 libcufft
39 libcurand
40 libcusolver
41 libcusparse
42 # NOTE: libcusparse_lt is too new for CuPy, so we must do without.
43 # libcusparse_lt
44 ]
45 );
46 cudatoolkit-joined = symlinkJoin {
47 name = "cudatoolkit-joined-${cudaPackages.cudaMajorMinorVersion}";
48 paths =
49 outpaths ++ lib.concatMap (outpath: lib.map (output: outpath.${output}) outpath.outputs) outpaths;
50 };
51in
52buildPythonPackage.override { stdenv = cudaPackages.backendStdenv; } rec {
53 pname = "cupy";
54 version = "13.6.0";
55 pyproject = true;
56
57 src = fetchFromGitHub {
58 owner = "cupy";
59 repo = "cupy";
60 tag = "v${version}";
61 hash = "sha256-nU3VL0MSCN+mI5m7C5sKAjBSL6ybM6YAk5lJiIDY0ck=";
62 fetchSubmodules = true;
63 };
64
65 env.LDFLAGS = toString [
66 # Fake libcuda.so (the real one is deployed impurely)
67 "-L${lib.getOutput "stubs" cudaPackages.cuda_cudart}/lib/stubs"
68 ];
69
70 # See https://docs.cupy.dev/en/v10.2.0/reference/environment.html. Setting both
71 # CUPY_NUM_BUILD_JOBS and CUPY_NUM_NVCC_THREADS to NIX_BUILD_CORES results in
72 # a small amount of thrashing but it turns out there are a large number of
73 # very short builds and a few extremely long ones, so setting both ends up
74 # working nicely in practice.
75 preConfigure = ''
76 export CUPY_NUM_BUILD_JOBS="$NIX_BUILD_CORES"
77 export CUPY_NUM_NVCC_THREADS="$NIX_BUILD_CORES"
78 '';
79
80 build-system = [
81 cython
82 fastrlock
83 setuptools
84 ];
85
86 nativeBuildInputs = [
87 addDriverRunpath
88 cudatoolkit-joined
89 ];
90
91 buildInputs = [
92 cudatoolkit-joined
93 cudnn
94 libcutensor
95 nccl
96 ];
97
98 # NVCC = "${lib.getExe cudaPackages.cuda_nvcc}"; # FIXME: splicing/buildPackages
99 CUDA_PATH = "${cudatoolkit-joined}";
100
101 dependencies = [
102 fastrlock
103 numpy
104 ];
105
106 nativeCheckInputs = [
107 pytestCheckHook
108 mock
109 ];
110
111 # Won't work with the GPU, whose drivers won't be accessible from the build
112 # sandbox
113 doCheck = false;
114
115 postFixup = ''
116 find $out -type f \( -name '*.so' -or -name '*.so.*' \) | while read lib; do
117 addDriverRunpath "$lib"
118 done
119 '';
120
121 enableParallelBuilding = true;
122
123 meta = {
124 description = "NumPy-compatible matrix library accelerated by CUDA";
125 homepage = "https://cupy.chainer.org/";
126 changelog = "https://github.com/cupy/cupy/releases/tag/${src.tag}";
127 license = lib.licenses.mit;
128 platforms = [
129 "aarch64-linux"
130 "x86_64-linux"
131 ];
132 maintainers = [ ];
133 };
134}