1{ 2 lib, 3 buildPythonPackage, 4 fetchFromGitHub, 5 fetchurl, 6 7 # build-system 8 poetry-core, 9 10 # dependencies 11 cryptography, 12 docx2txt, 13 fastapi, 14 injector, 15 llama-index-core, 16 llama-index-readers-file, 17 python-multipart, 18 pyyaml, 19 transformers, 20 watchdog, 21 22 # optional-dependencies 23 python, 24 huggingface-hub, 25 gradio, 26 27 # tests 28 nixosTests, 29}: 30 31buildPythonPackage rec { 32 pname = "private-gpt"; 33 version = "0.6.2"; 34 pyproject = true; 35 36 src = fetchFromGitHub { 37 owner = "zylon-ai"; 38 repo = "private-gpt"; 39 tag = "v${version}"; 40 hash = "sha256-IYTysU3W/NrtBuLe3ZJkztVSK+gzjkGIg0qcBYzB3bs="; 41 }; 42 43 build-system = [ poetry-core ]; 44 45 pythonRelaxDeps = [ 46 "cryptography" 47 "docx2txt" 48 "fastapi" 49 "llama-index-core" 50 "llama-index-readers-file" 51 "python-multipart" 52 "watchdog" 53 ]; 54 55 dependencies = [ 56 cryptography 57 docx2txt 58 fastapi 59 injector 60 llama-index-core 61 llama-index-readers-file 62 python-multipart 63 pyyaml 64 transformers 65 watchdog 66 ] ++ lib.flatten (builtins.attrValues optional-dependencies); 67 68 # This is needed for running the tests and the service in offline mode, 69 # See related issue at https://github.com/zylon-ai/private-gpt/issues/1870 70 passthru.cl100k_base.tiktoken = fetchurl { 71 url = "https://openaipublic.blob.core.windows.net/encodings/cl100k_base.tiktoken"; 72 hash = "sha256-Ijkht27pm96ZW3/3OFE+7xAPtR0YyTWXoRO8/+hlsqc="; 73 }; 74 75 optional-dependencies = with python.pkgs; { 76 embeddings-huggingface = [ 77 huggingface-hub 78 llama-index-embeddings-huggingface 79 ]; 80 embeddings-ollama = [ llama-index-embeddings-ollama ]; 81 embeddings-openai = [ llama-index-embeddings-openai ]; 82 embeddings-sagemaker = [ boto3 ]; 83 llms-ollama = [ llama-index-llms-ollama ]; 84 llms-openai = [ llama-index-llms-openai ]; 85 llms-openai-like = [ llama-index-llms-openai-like ]; 86 llms-sagemaker = [ boto3 ]; 87 ui = [ gradio ]; 88 vector-stores-chroma = [ llama-index-vector-stores-chroma ]; 89 vector-stores-postgres = [ llama-index-vector-stores-postgres ]; 90 vector-stores-qdrant = [ llama-index-vector-stores-qdrant ]; 91 }; 92 93 postInstall = '' 94 cp settings*.yaml $out/${python.sitePackages}/private_gpt/ 95 ''; 96 97 pythonImportsCheck = [ "private_gpt" ]; 98 99 passthru.tests = { 100 inherit (nixosTests) private-gpt; 101 }; 102 103 meta = { 104 changelog = "https://github.com/zylon-ai/private-gpt/blob/${src.rev}/CHANGELOG.md"; 105 description = "Interact with your documents using the power of GPT, 100% privately, no data leaks"; 106 homepage = "https://github.com/zylon-ai/private-gpt"; 107 license = lib.licenses.asl20; 108 mainProgram = "private-gpt"; 109 maintainers = with lib.maintainers; [ GaetanLepage ]; 110 }; 111}