lol

tabby: 0.24.0 -> 0.26.0 (#393336)

Co-authored-by: ghthor <ghthor@gmail.com>

authored by

nixpkgs-merge-bot[bot]
ghthor
and committed by
GitHub
1c3d6e52 c48ca1c5

+3 -3
+3 -3
pkgs/by-name/ta/tabby/package.nix
··· 32 32 # https://github.com/NixOS/nixpkgs/blob/master/pkgs/tools/misc/ollama/default.nix 33 33 34 34 pname = "tabby"; 35 - version = "0.24.0"; 35 + version = "0.26.0"; 36 36 37 37 availableAccelerations = flatten [ 38 38 (optional cudaSupport "cuda") ··· 121 121 owner = "TabbyML"; 122 122 repo = "tabby"; 123 123 tag = "v${version}"; 124 - hash = "sha256-poWUfPp/7w6dNjh6yoP5oTbaP4lL91hb1+zQG8tjUDE="; 124 + hash = "sha256-OIt0UtknzPikGowfYWMufBXl0Ktt6zsZKqRMx63UqR4="; 125 125 fetchSubmodules = true; 126 126 }; 127 127 128 128 useFetchCargoVendor = true; 129 - cargoHash = "sha256-CTn/b42FI+Y6qy3MKVESIbIlsXmIkZBlxUXnRtHWZcc="; 129 + cargoHash = "sha256-wkd2EVCyWkUEo/gqNuX+P5wDeNmx0Jrd7UhhvIZwAFU="; 130 130 131 131 # Don't need to build llama-cpp-server (included in default build) 132 132 # We also don't add CUDA features here since we're using the overridden llama-cpp package