tangled
alpha
login
or
join now
pyrox.dev
/
nixpkgs
lol
0
fork
atom
overview
issues
pulls
pipelines
llama-cpp: 5985 -> 6123
Peter H. Hoeg
6 months ago
3c3372cd
85dbfc7a
+22
-5
2 changed files
expand all
collapse all
unified
split
pkgs
by-name
ll
llama-cpp
disable_bfloat16.patch
package.nix
+15
pkgs/by-name/ll/llama-cpp/disable_bfloat16.patch
···
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
···
1
+
index b97e7bf9..9cdc62a4 100644
2
+
--- a/ggml/src/ggml-vulkan/CMakeLists.txt
3
+
+++ b/ggml/src/ggml-vulkan/CMakeLists.txt
4
+
@@ -70,11 +70,5 @@ if (Vulkan_FOUND)
5
+
"GGML_VULKAN_INTEGER_DOT_GLSLC_SUPPORT"
6
+
)
7
+
8
+
- test_shader_extension_support(
9
+
- "GL_EXT_bfloat16"
10
+
- "${CMAKE_CURRENT_SOURCE_DIR}/vulkan-shaders/test_bfloat16_support.comp"
11
+
- "GGML_VULKAN_BFLOAT16_GLSLC_SUPPORT"
12
+
- )
13
+
-
14
+
target_link_libraries(ggml-vulkan PRIVATE Vulkan::Vulkan)
15
+
target_include_directories(ggml-vulkan PRIVATE ${CMAKE_CURRENT_BINARY_DIR})
+7
-5
pkgs/by-name/ll/llama-cpp/package.nix
···
72
in
73
effectiveStdenv.mkDerivation (finalAttrs: {
74
pname = "llama-cpp";
75
-
version = "5985";
76
77
src = fetchFromGitHub {
78
owner = "ggml-org";
79
repo = "llama.cpp";
80
tag = "b${finalAttrs.version}";
81
-
hash = "sha256-OoV/p4Es/X/xQW7PpDLq5YLVYjieIE5+1itvtJECH54=";
82
leaveDotGit = true;
83
postFetch = ''
84
git -C "$out" rev-parse --short HEAD > $out/COMMIT
85
find "$out" -name .git -print0 | xargs -0 rm -rf
86
'';
87
};
0
0
88
89
postPatch = ''
90
# Workaround for local-ai package which overrides this package to an older llama-cpp
···
139
++ optionals cudaSupport [
140
(cmakeFeature "CMAKE_CUDA_ARCHITECTURES" cudaPackages.flags.cmakeCudaArchitecturesString)
141
]
142
-
++ optionals rocmSupport ([
143
(cmakeFeature "CMAKE_HIP_COMPILER" "${rocmPackages.clr.hipClangPath}/clang++")
144
# TODO: this should become `clr.gpuTargets` in the future.
145
(cmakeFeature "CMAKE_HIP_ARCHITECTURES" rocmPackages.rocblas.amdgpu_targets)
146
-
])
147
++ optionals metalSupport [
148
(cmakeFeature "CMAKE_C_FLAGS" "-D__ARM_FEATURE_DOTPROD=1")
149
(cmakeBool "LLAMA_METAL_EMBED_LIBRARY" true)
···
186
];
187
platforms = platforms.unix;
188
badPlatforms = optionals (cudaSupport || openclSupport) lib.platforms.darwin;
189
-
broken = (metalSupport && !effectiveStdenv.hostPlatform.isDarwin);
190
};
191
})
···
72
in
73
effectiveStdenv.mkDerivation (finalAttrs: {
74
pname = "llama-cpp";
75
+
version = "6123";
76
77
src = fetchFromGitHub {
78
owner = "ggml-org";
79
repo = "llama.cpp";
80
tag = "b${finalAttrs.version}";
81
+
hash = "sha256-4kqbKGPPOkOkHXA4IeLuj/0P5jpqtGlGuVKeUD4UhZY=";
82
leaveDotGit = true;
83
postFetch = ''
84
git -C "$out" rev-parse --short HEAD > $out/COMMIT
85
find "$out" -name .git -print0 | xargs -0 rm -rf
86
'';
87
};
88
+
89
+
patches = lib.optionals vulkanSupport [ ./disable_bfloat16.patch ];
90
91
postPatch = ''
92
# Workaround for local-ai package which overrides this package to an older llama-cpp
···
141
++ optionals cudaSupport [
142
(cmakeFeature "CMAKE_CUDA_ARCHITECTURES" cudaPackages.flags.cmakeCudaArchitecturesString)
143
]
144
+
++ optionals rocmSupport [
145
(cmakeFeature "CMAKE_HIP_COMPILER" "${rocmPackages.clr.hipClangPath}/clang++")
146
# TODO: this should become `clr.gpuTargets` in the future.
147
(cmakeFeature "CMAKE_HIP_ARCHITECTURES" rocmPackages.rocblas.amdgpu_targets)
148
+
]
149
++ optionals metalSupport [
150
(cmakeFeature "CMAKE_C_FLAGS" "-D__ARM_FEATURE_DOTPROD=1")
151
(cmakeBool "LLAMA_METAL_EMBED_LIBRARY" true)
···
188
];
189
platforms = platforms.unix;
190
badPlatforms = optionals (cudaSupport || openclSupport) lib.platforms.darwin;
191
+
broken = metalSupport && !effectiveStdenv.hostPlatform.isDarwin;
192
};
193
})