1{ 2 lib, 3 buildPythonPackage, 4 python, 5 fetchFromGitHub, 6 poetry-core, 7 fastapi, 8 injector, 9 llama-index-core, 10 llama-index-readers-file, 11 huggingface-hub, 12 python-multipart, 13 pyyaml, 14 transformers, 15 uvicorn, 16 watchdog, 17 gradio, 18 fetchurl, 19 fetchpatch, 20}: 21 22buildPythonPackage rec { 23 pname = "private-gpt"; 24 version = "0.5.0"; 25 pyproject = true; 26 27 src = fetchFromGitHub { 28 owner = "zylon-ai"; 29 repo = "private-gpt"; 30 rev = "v${version}"; 31 hash = "sha256-bjydzJhOJjmbflcJbuMyNsmby7HtNPFW3MY2Tw12cHw="; 32 }; 33 34 patches = [ 35 # Fix a vulnerability, to be removed in the next bump version 36 # See https://github.com/zylon-ai/private-gpt/pull/1890 37 (fetchpatch { 38 url = "https://github.com/zylon-ai/private-gpt/commit/86368c61760c9cee5d977131d23ad2a3e063cbe9.patch"; 39 hash = "sha256-4ysRUuNaHW4bmNzg4fn++89b430LP6AzYDoX2HplVH0="; 40 }) 41 ]; 42 43 build-system = [ poetry-core ]; 44 45 dependencies = [ 46 fastapi 47 injector 48 llama-index-core 49 llama-index-readers-file 50 python-multipart 51 pyyaml 52 transformers 53 uvicorn 54 watchdog 55 ] ++ lib.flatten (builtins.attrValues passthru.optional-dependencies); 56 57 # This is needed for running the tests and the service in offline mode, 58 # See related issue at https://github.com/zylon-ai/private-gpt/issues/1870 59 passthru.cl100k_base.tiktoken = fetchurl { 60 url = "https://openaipublic.blob.core.windows.net/encodings/cl100k_base.tiktoken"; 61 hash = "sha256-Ijkht27pm96ZW3/3OFE+7xAPtR0YyTWXoRO8/+hlsqc="; 62 }; 63 64 passthru.optional-dependencies = with python.pkgs; { 65 embeddings-huggingface = [ 66 huggingface-hub 67 llama-index-embeddings-huggingface 68 ]; 69 embeddings-ollama = [ llama-index-embeddings-ollama ]; 70 embeddings-openai = [ llama-index-embeddings-openai ]; 71 embeddings-sagemaker = [ boto3 ]; 72 llms-ollama = [ llama-index-llms-ollama ]; 73 llms-openai = [ llama-index-llms-openai ]; 74 llms-openai-like = [ llama-index-llms-openai-like ]; 75 llms-sagemaker = [ boto3 ]; 76 ui = [ gradio ]; 77 vector-stores-chroma = [ llama-index-vector-stores-chroma ]; 78 vector-stores-postgres = [ llama-index-vector-stores-postgres ]; 79 vector-stores-qdrant = [ llama-index-vector-stores-qdrant ]; 80 }; 81 82 postInstall = '' 83 cp settings*.yaml $out/${python.sitePackages}/private_gpt/ 84 ''; 85 86 pythonImportsCheck = [ "private_gpt" ]; 87 88 meta = { 89 changelog = "https://github.com/zylon-ai/private-gpt/blob/${src.rev}/CHANGELOG.md"; 90 description = "Interact with your documents using the power of GPT, 100% privately, no data leaks"; 91 homepage = "https://github.com/zylon-ai/private-gpt"; 92 license = lib.licenses.asl20; 93 mainProgram = "private-gpt"; 94 maintainers = with lib.maintainers; [ drupol ]; 95 }; 96}