nixpkgs mirror (for testing) github.com/NixOS/nixpkgs
nix
at python-updates 160 lines 3.9 kB view raw
1{ 2 lib, 3 stdenv, 4 gcc13Stdenv, 5 buildPythonPackage, 6 fetchFromGitHub, 7 fetchpatch, 8 9 # nativeBuildInputs 10 cmake, 11 ninja, 12 autoAddDriverRunpath, 13 14 # build-system 15 pathspec, 16 pyproject-metadata, 17 scikit-build-core, 18 19 # dependencies 20 diskcache, 21 jinja2, 22 numpy, 23 typing-extensions, 24 25 # tests 26 scipy, 27 huggingface-hub, 28 29 # passthru 30 gitUpdater, 31 pytestCheckHook, 32 llama-cpp-python, 33 34 config, 35 cudaSupport ? config.cudaSupport, 36 cudaPackages ? { }, 37 38}: 39let 40 stdenvTarget = if cudaSupport then gcc13Stdenv else stdenv; 41in 42buildPythonPackage.override { stdenv = stdenvTarget; } rec { 43 pname = "llama-cpp-python"; 44 version = "0.3.16"; 45 pyproject = true; 46 47 src = fetchFromGitHub { 48 owner = "abetlen"; 49 repo = "llama-cpp-python"; 50 tag = "v${version}"; 51 hash = "sha256-EUDtCv86J4bznsTqNsdgj1IYkAu83cf+RydFTUb2NEE="; 52 fetchSubmodules = true; 53 }; 54 # src = /home/gaetan/llama-cpp-python; 55 56 patches = [ 57 # Fix test failure on a machine with no metal devices (e.g. nix-community darwin builder) 58 # https://github.com/ggml-org/llama.cpp/pull/15531 59 (fetchpatch { 60 url = "https://github.com/ggml-org/llama.cpp/pull/15531/commits/63a83ffefe4d478ebadff89300a0a3c5d660f56a.patch"; 61 stripLen = 1; 62 extraPrefix = "vendor/llama.cpp/"; 63 hash = "sha256-9LGnzviBgYYOOww8lhiLXf7xgd/EtxRXGQMredOO4qM="; 64 }) 65 ]; 66 67 dontUseCmakeConfigure = true; 68 cmakeFlags = [ 69 # Set GGML_NATIVE=off. Otherwise, cmake attempts to build with 70 # -march=native* which is either a no-op (if cc-wrapper is able to ignore 71 # it), or an attempt to build a non-reproducible binary. 72 # 73 # This issue was spotted when cmake rules appended feature modifiers to 74 # -mcpu, breaking linux build as follows: 75 # 76 # cc1: error: unknown value ‘native+nodotprod+noi8mm+nosve’ for ‘-mcpu’ 77 (lib.cmakeBool "GGML_NATIVE" false) 78 (lib.cmakeFeature "GGML_BUILD_NUMBER" "1") 79 ] 80 ++ lib.optionals cudaSupport [ 81 (lib.cmakeBool "GGML_CUDA" true) 82 (lib.cmakeFeature "CUDAToolkit_ROOT" "${lib.getDev cudaPackages.cuda_nvcc}") 83 (lib.cmakeFeature "CMAKE_CUDA_COMPILER" "${lib.getExe cudaPackages.cuda_nvcc}") 84 ]; 85 86 enableParallelBuilding = true; 87 88 nativeBuildInputs = [ 89 cmake 90 ninja 91 ] 92 ++ lib.optionals cudaSupport [ 93 autoAddDriverRunpath 94 ]; 95 96 build-system = [ 97 pathspec 98 pyproject-metadata 99 scikit-build-core 100 ]; 101 102 buildInputs = lib.optionals cudaSupport ( 103 with cudaPackages; 104 [ 105 cuda_cudart # cuda_runtime.h 106 cuda_cccl # <thrust/*> 107 libcublas # cublas_v2.h 108 ] 109 ); 110 111 dependencies = [ 112 diskcache 113 jinja2 114 numpy 115 typing-extensions 116 ]; 117 118 nativeCheckInputs = [ 119 pytestCheckHook 120 scipy 121 huggingface-hub 122 ]; 123 124 disabledTests = [ 125 # tries to download model from huggingface-hub 126 "test_real_model" 127 "test_real_llama" 128 ]; 129 130 pythonImportsCheck = lib.optionals (!cudaSupport) [ 131 # `libllama.so` is loaded at import time, and failing when cudaSupport is enabled as the cuda 132 # driver is missing in the sandbox: 133 # RuntimeError: Failed to load shared library '/nix/store/...-python3.13-llama-cpp-python-0.3.16/lib/python3.13/site-packages/llama_cpp/lib/libllama.so': 134 # libcuda.so.1: cannot open shared object file: No such file or directory 135 "llama_cpp" 136 ]; 137 138 passthru = { 139 updateScript = gitUpdater { 140 rev-prefix = "v"; 141 allowedVersions = "^[.0-9]+$"; 142 }; 143 tests = lib.optionalAttrs stdenvTarget.hostPlatform.isLinux { 144 withCuda = llama-cpp-python.override { 145 cudaSupport = true; 146 }; 147 }; 148 }; 149 150 meta = { 151 description = "Python bindings for llama.cpp"; 152 homepage = "https://github.com/abetlen/llama-cpp-python"; 153 changelog = "https://github.com/abetlen/llama-cpp-python/blob/v${version}/CHANGELOG.md"; 154 license = lib.licenses.mit; 155 maintainers = with lib.maintainers; [ 156 booxter 157 kirillrdy 158 ]; 159 }; 160}