1{ lib
2, stdenv
3, pythonAtLeast
4, pythonOlder
5, fetchFromGitHub
6, python
7, buildPythonPackage
8, setuptools
9, numpy
10, llvmlite
11, libcxx
12, importlib-metadata
13, substituteAll
14, runCommand
15
16, config
17
18# CUDA-only dependencies:
19, addOpenGLRunpath ? null
20, cudaPackages ? {}
21
22# CUDA flags:
23, cudaSupport ? config.cudaSupport
24}:
25
26let
27 inherit (cudaPackages) cudatoolkit;
28in buildPythonPackage rec {
29 # Using an untagged version, with numpy 1.25 support, when it's released
30 # also drop the versioneer patch in postPatch
31 version = "0.58.1";
32 pname = "numba";
33 pyproject = true;
34
35 disabled = pythonOlder "3.8" || pythonAtLeast "3.12";
36
37 src = fetchFromGitHub {
38 owner = "numba";
39 repo = "numba";
40 rev = "refs/tags/${version}";
41 # Upstream uses .gitattributes to inject information about the revision
42 # hash and the refname into `numba/_version.py`, see:
43 #
44 # - https://git-scm.com/docs/gitattributes#_export_subst and
45 # - https://github.com/numba/numba/blame/5ef7c86f76a6e8cc90e9486487294e0c34024797/numba/_version.py#L25-L31
46 #
47 # Hence this hash may change if GitHub / Git will change it's behavior.
48 # Hopefully this will not happen until the next release. We are fairly sure
49 # that upstream relies on those strings to be valid, that's why we don't
50 # use `forceFetchGit = true;`.` If in the future we'll observe the hash
51 # changes too often, we can always use forceFetchGit, and inject the
52 # relevant strings ourselves, using `sed` commands, in extraPostFetch.
53 hash = "sha256-1Tj2GFoUwRRCWBFxhreF+0Mr+Tjyb7+X4peO+T0qGNs=";
54 };
55 env.NIX_CFLAGS_COMPILE = lib.optionalString stdenv.isDarwin "-I${lib.getDev libcxx}/include/c++/v1";
56
57 nativeBuildInputs = [
58 numpy
59 ] ++ lib.optionals cudaSupport [
60 addOpenGLRunpath
61 ];
62
63 propagatedBuildInputs = [
64 numpy
65 llvmlite
66 setuptools
67 ] ++ lib.optionals (pythonOlder "3.9") [
68 importlib-metadata
69 ] ++ lib.optionals cudaSupport [
70 cudatoolkit
71 cudatoolkit.lib
72 ];
73
74 patches = lib.optionals cudaSupport [
75 (substituteAll {
76 src = ./cuda_path.patch;
77 cuda_toolkit_path = cudatoolkit;
78 cuda_toolkit_lib_path = cudatoolkit.lib;
79 })
80 ];
81
82 postFixup = lib.optionalString cudaSupport ''
83 find $out -type f \( -name '*.so' -or -name '*.so.*' \) | while read lib; do
84 addOpenGLRunpath "$lib"
85 patchelf --set-rpath "${cudatoolkit}/lib:${cudatoolkit.lib}/lib:$(patchelf --print-rpath "$lib")" "$lib"
86 done
87 '';
88
89 # run a smoke test in a temporary directory so that
90 # a) Python picks up the installed library in $out instead of the build files
91 # b) we have somewhere to put $HOME so some caching tests work
92 # c) it doesn't take 6 CPU hours for the full suite
93 checkPhase = ''
94 runHook preCheck
95
96 pushd $(mktemp -d)
97 HOME=. ${python.interpreter} -m numba.runtests -m $NIX_BUILD_CORES numba.tests.test_usecases
98 popd
99
100 runHook postCheck
101 '';
102
103 pythonImportsCheck = [
104 "numba"
105 ];
106
107 passthru.tests = {
108 # CONTRIBUTOR NOTE: numba also contains CUDA tests, though these cannot be run in
109 # this sandbox environment. Consider running similar commands to those below outside the
110 # sandbox manually if you have the appropriate hardware; support will be detected
111 # and the corresponding tests enabled automatically.
112 # Also, the full suite currently does not complete on anything but x86_64-linux.
113 fullSuite = runCommand "${pname}-test" {} ''
114 pushd $(mktemp -d)
115 # pip and python in $PATH is needed for the test suite to pass fully
116 PATH=${python.withPackages (p: [ p.numba p.pip ])}/bin:$PATH
117 HOME=$PWD python -m numba.runtests -m $NIX_BUILD_CORES
118 popd
119 touch $out # stop Nix from complaining no output was generated and failing the build
120 '';
121 };
122
123 meta = with lib; {
124 description = "Compiling Python code using LLVM";
125 homepage = "https://numba.pydata.org/";
126 license = licenses.bsd2;
127 mainProgram = "numba";
128 maintainers = with maintainers; [ fridh ];
129 };
130}