1{
2 lib,
3 buildPythonPackage,
4 fetchFromGitHub,
5 fetchurl,
6
7 # build-system
8 poetry-core,
9
10 # dependencies
11 docx2txt,
12 fastapi,
13 injector,
14 llama-index-core,
15 llama-index-readers-file,
16 python-multipart,
17 pyyaml,
18 transformers,
19 uvicorn,
20 watchdog,
21
22 # optional-dependencies
23 python,
24 huggingface-hub,
25 gradio,
26
27 # tests
28 nixosTests,
29}:
30
31buildPythonPackage rec {
32 pname = "private-gpt";
33 version = "0.6.2";
34 pyproject = true;
35
36 src = fetchFromGitHub {
37 owner = "zylon-ai";
38 repo = "private-gpt";
39 rev = "refs/tags/v${version}";
40 hash = "sha256-IYTysU3W/NrtBuLe3ZJkztVSK+gzjkGIg0qcBYzB3bs=";
41 };
42
43 build-system = [ poetry-core ];
44
45 pythonRelaxDeps = [
46 "cryptography"
47 "fastapi"
48 "llama-index-core"
49 "llama-index-readers-file"
50 "python-multipart"
51 ];
52
53 dependencies = [
54 docx2txt
55 fastapi
56 injector
57 llama-index-core
58 llama-index-readers-file
59 python-multipart
60 pyyaml
61 transformers
62 uvicorn
63 watchdog
64 ] ++ lib.flatten (builtins.attrValues optional-dependencies);
65
66 # This is needed for running the tests and the service in offline mode,
67 # See related issue at https://github.com/zylon-ai/private-gpt/issues/1870
68 passthru.cl100k_base.tiktoken = fetchurl {
69 url = "https://openaipublic.blob.core.windows.net/encodings/cl100k_base.tiktoken";
70 hash = "sha256-Ijkht27pm96ZW3/3OFE+7xAPtR0YyTWXoRO8/+hlsqc=";
71 };
72
73 optional-dependencies = with python.pkgs; {
74 embeddings-huggingface = [
75 huggingface-hub
76 llama-index-embeddings-huggingface
77 ];
78 embeddings-ollama = [ llama-index-embeddings-ollama ];
79 embeddings-openai = [ llama-index-embeddings-openai ];
80 embeddings-sagemaker = [ boto3 ];
81 llms-ollama = [ llama-index-llms-ollama ];
82 llms-openai = [ llama-index-llms-openai ];
83 llms-openai-like = [ llama-index-llms-openai-like ];
84 llms-sagemaker = [ boto3 ];
85 ui = [ gradio ];
86 vector-stores-chroma = [ llama-index-vector-stores-chroma ];
87 vector-stores-postgres = [ llama-index-vector-stores-postgres ];
88 vector-stores-qdrant = [ llama-index-vector-stores-qdrant ];
89 };
90
91 postInstall = ''
92 cp settings*.yaml $out/${python.sitePackages}/private_gpt/
93 '';
94
95 pythonImportsCheck = [ "private_gpt" ];
96
97 passthru.tests = {
98 inherit (nixosTests) private-gpt;
99 };
100
101 meta = {
102 changelog = "https://github.com/zylon-ai/private-gpt/blob/${src.rev}/CHANGELOG.md";
103 description = "Interact with your documents using the power of GPT, 100% privately, no data leaks";
104 homepage = "https://github.com/zylon-ai/private-gpt";
105 license = lib.licenses.asl20;
106 mainProgram = "private-gpt";
107 maintainers = with lib.maintainers; [ GaetanLepage ];
108 };
109}