llama-cpp: 5985 -> 6123

+22 -5
+15
pkgs/by-name/ll/llama-cpp/disable_bfloat16.patch
··· 1 + index b97e7bf9..9cdc62a4 100644 2 + --- a/ggml/src/ggml-vulkan/CMakeLists.txt 3 + +++ b/ggml/src/ggml-vulkan/CMakeLists.txt 4 + @@ -70,11 +70,5 @@ if (Vulkan_FOUND) 5 + "GGML_VULKAN_INTEGER_DOT_GLSLC_SUPPORT" 6 + ) 7 + 8 + - test_shader_extension_support( 9 + - "GL_EXT_bfloat16" 10 + - "${CMAKE_CURRENT_SOURCE_DIR}/vulkan-shaders/test_bfloat16_support.comp" 11 + - "GGML_VULKAN_BFLOAT16_GLSLC_SUPPORT" 12 + - ) 13 + - 14 + target_link_libraries(ggml-vulkan PRIVATE Vulkan::Vulkan) 15 + target_include_directories(ggml-vulkan PRIVATE ${CMAKE_CURRENT_BINARY_DIR})
+7 -5
pkgs/by-name/ll/llama-cpp/package.nix
··· 72 72 in 73 73 effectiveStdenv.mkDerivation (finalAttrs: { 74 74 pname = "llama-cpp"; 75 - version = "5985"; 75 + version = "6123"; 76 76 77 77 src = fetchFromGitHub { 78 78 owner = "ggml-org"; 79 79 repo = "llama.cpp"; 80 80 tag = "b${finalAttrs.version}"; 81 - hash = "sha256-OoV/p4Es/X/xQW7PpDLq5YLVYjieIE5+1itvtJECH54="; 81 + hash = "sha256-4kqbKGPPOkOkHXA4IeLuj/0P5jpqtGlGuVKeUD4UhZY="; 82 82 leaveDotGit = true; 83 83 postFetch = '' 84 84 git -C "$out" rev-parse --short HEAD > $out/COMMIT 85 85 find "$out" -name .git -print0 | xargs -0 rm -rf 86 86 ''; 87 87 }; 88 + 89 + patches = lib.optionals vulkanSupport [ ./disable_bfloat16.patch ]; 88 90 89 91 postPatch = '' 90 92 # Workaround for local-ai package which overrides this package to an older llama-cpp ··· 139 141 ++ optionals cudaSupport [ 140 142 (cmakeFeature "CMAKE_CUDA_ARCHITECTURES" cudaPackages.flags.cmakeCudaArchitecturesString) 141 143 ] 142 - ++ optionals rocmSupport ([ 144 + ++ optionals rocmSupport [ 143 145 (cmakeFeature "CMAKE_HIP_COMPILER" "${rocmPackages.clr.hipClangPath}/clang++") 144 146 # TODO: this should become `clr.gpuTargets` in the future. 145 147 (cmakeFeature "CMAKE_HIP_ARCHITECTURES" rocmPackages.rocblas.amdgpu_targets) 146 - ]) 148 + ] 147 149 ++ optionals metalSupport [ 148 150 (cmakeFeature "CMAKE_C_FLAGS" "-D__ARM_FEATURE_DOTPROD=1") 149 151 (cmakeBool "LLAMA_METAL_EMBED_LIBRARY" true) ··· 186 188 ]; 187 189 platforms = platforms.unix; 188 190 badPlatforms = optionals (cudaSupport || openclSupport) lib.platforms.darwin; 189 - broken = (metalSupport && !effectiveStdenv.hostPlatform.isDarwin); 191 + broken = metalSupport && !effectiveStdenv.hostPlatform.isDarwin; 190 192 }; 191 193 })