nixpkgs mirror (for testing)
github.com/NixOS/nixpkgs
nix
1{
2 config,
3 lib,
4 rustPlatform,
5 fetchFromGitHub,
6 nix-update-script,
7 stdenv,
8
9 git,
10 openssl,
11 pkg-config,
12 protobuf,
13 cmake,
14
15 llama-cpp,
16
17 apple-sdk_15,
18 autoAddDriverRunpath,
19 versionCheckHook,
20
21 cudaSupport ? config.cudaSupport,
22 rocmSupport ? config.rocmSupport,
23 metalSupport ? stdenv.hostPlatform.isDarwin && stdenv.hostPlatform.isAarch64,
24 # one of [ null "cpu" "rocm" "cuda" "metal" ];
25 acceleration ? null,
26}:
27
28let
29 inherit (lib) optional optionals flatten;
30 # References:
31 # https://github.com/NixOS/nixpkgs/blob/master/pkgs/by-name/ll/llama-cpp/package.nix
32 # https://github.com/NixOS/nixpkgs/blob/master/pkgs/tools/misc/ollama/default.nix
33
34 pname = "tabby";
35 version = "0.28.0";
36
37 availableAccelerations = flatten [
38 (optional cudaSupport "cuda")
39 (optional rocmSupport "rocm")
40 (optional metalSupport "metal")
41 ];
42
43 warnIfMultipleAccelerationMethods =
44 configured:
45 (
46 let
47 len = builtins.length configured;
48 result = if len == 0 then "cpu" else (builtins.head configured);
49 in
50 lib.warnIf (len > 1) ''
51 building tabby with multiple acceleration methods enabled is not
52 supported; falling back to `${result}`
53 '' result
54 );
55
56 # If user did not not override the acceleration attribute, then try to use one of
57 # - nixpkgs.config.cudaSupport
58 # - nixpkgs.config.rocmSupport
59 # - metal if (stdenv.hostPlatform.isDarwin && stdenv.hostPlatform.isAarch64)
60 # !! warn if multiple acceleration methods are enabled and default to the first one in the list
61 featureDevice =
62 if (builtins.isNull acceleration) then
63 (warnIfMultipleAccelerationMethods availableAccelerations)
64 else
65 acceleration;
66
67 warnIfNotLinux =
68 api:
69 (lib.warnIfNot stdenv.hostPlatform.isLinux
70 "building tabby with `${api}` is only supported on linux; falling back to cpu"
71 stdenv.hostPlatform.isLinux
72 );
73 warnIfNotDarwinAarch64 =
74 api:
75 (lib.warnIfNot (stdenv.hostPlatform.isDarwin && stdenv.hostPlatform.isAarch64)
76 "building tabby with `${api}` is only supported on Darwin-aarch64; falling back to cpu"
77 (stdenv.hostPlatform.isDarwin && stdenv.hostPlatform.isAarch64)
78 );
79
80 validAccel = lib.assertOneOf "tabby.featureDevice" featureDevice [
81 "cpu"
82 "rocm"
83 "cuda"
84 "metal"
85 ];
86
87 # TODO(ghthor): there is a bug here where featureDevice could be cuda, but enableCuda is false
88 # The would result in a startup failure of the service module.
89 enableRocm = validAccel && (featureDevice == "rocm") && (warnIfNotLinux "rocm");
90 enableCuda = validAccel && (featureDevice == "cuda") && (warnIfNotLinux "cuda");
91 enableMetal = validAccel && (featureDevice == "metal") && (warnIfNotDarwinAarch64 "metal");
92
93 # We have to use override here because tabby doesn't actually tell llama-cpp
94 # to use a specific device type as it is relying on llama-cpp only being
95 # built to use one type of device.
96 #
97 # See: https://github.com/TabbyML/tabby/blob/v0.11.1/crates/llama-cpp-bindings/include/engine.h#L20
98 #
99 llamaccpPackage = llama-cpp.override {
100 rocmSupport = enableRocm;
101 cudaSupport = enableCuda;
102 metalSupport = enableMetal;
103 };
104
105 # TODO(ghthor): some of this can be removed
106 darwinBuildInputs = [
107 llamaccpPackage
108 ]
109 ++ optionals stdenv.hostPlatform.isDarwin ([
110 apple-sdk_15
111 ]);
112
113 cudaBuildInputs = [ llamaccpPackage ];
114 rocmBuildInputs = [ llamaccpPackage ];
115
116in
117rustPlatform.buildRustPackage {
118 inherit pname version;
119 inherit featureDevice;
120
121 src = fetchFromGitHub {
122 owner = "TabbyML";
123 repo = "tabby";
124 tag = "v${version}";
125 hash = "sha256-cdY1/k7zZ4am6JP9ghnnJFHop/ZcnC/9alzd2MS8xqc=";
126 fetchSubmodules = true;
127 };
128
129 cargoHash = "sha256-yEns0QAARmuV697/na08K8uwJWZihY3pMyCZcERDlFM=";
130
131 # Don't need to build llama-cpp-server (included in default build)
132 # We also don't add CUDA features here since we're using the overridden llama-cpp package
133 cargoBuildFlags = [
134 "--no-default-features"
135 "--features"
136 "ee"
137 "--package"
138 "tabby"
139 ];
140
141 nativeInstallCheckInputs = [
142 versionCheckHook
143 ];
144 versionCheckProgramArg = "--version";
145 doInstallCheck = true;
146
147 nativeBuildInputs = [
148 git
149 pkg-config
150 protobuf
151 cmake
152 ]
153 ++ optionals enableCuda [
154 autoAddDriverRunpath
155 ];
156
157 buildInputs = [
158 openssl
159 ]
160 ++ optionals stdenv.hostPlatform.isDarwin darwinBuildInputs
161 ++ optionals enableCuda cudaBuildInputs
162 ++ optionals enableRocm rocmBuildInputs;
163
164 postInstall = ''
165 # NOTE: Project contains a subproject for building llama-server
166 # But, we already have a derivation for this
167 ln -s ${lib.getExe' llama-cpp "llama-server"} $out/bin/llama-server
168 '';
169
170 env = {
171 OPENSSL_NO_VENDOR = 1;
172 };
173
174 # Fails with:
175 # file cannot create directory: /var/empty/local/lib64/cmake/Llama
176 doCheck = false;
177
178 passthru.updateScript = nix-update-script {
179 extraArgs = [
180 "--version-regex"
181 "^v([0-9.]+)$"
182 ];
183 };
184
185 meta = {
186 homepage = "https://github.com/TabbyML/tabby";
187 changelog = "https://github.com/TabbyML/tabby/releases/tag/v${version}";
188 description = "Self-hosted AI coding assistant";
189 mainProgram = "tabby";
190 license = lib.licenses.asl20;
191 maintainers = [ lib.maintainers.ghthor ];
192 broken = stdenv.hostPlatform.isDarwin && !stdenv.hostPlatform.isAarch64;
193 };
194}