1{
2 lib,
3 buildPythonPackage,
4 fetchPypi,
5 google-generativeai,
6 llama-index-core,
7 poetry-core,
8 pythonOlder,
9}:
10
11buildPythonPackage rec {
12 pname = "llama-index-embeddings-gemini";
13 version = "0.1.8";
14 pyproject = true;
15
16 disabled = pythonOlder "3.9";
17
18 src = fetchPypi {
19 pname = "llama_index_embeddings_gemini";
20 inherit version;
21 hash = "sha256-rQKyPqZnyVYH2h5TTVV53kYYldGURWARWjYBrBb4d5M=";
22 };
23
24 pythonRelaxDeps = [ "google-generativeai" ];
25
26 build-system = [ poetry-core ];
27
28
29 dependencies = [
30 google-generativeai
31 llama-index-core
32 ];
33
34 # Tests are only available in the mono repo
35 doCheck = false;
36
37 pythonImportsCheck = [ "llama_index.embeddings.gemini" ];
38
39 meta = with lib; {
40 description = "LlamaIndex Llms Integration for Gemini";
41 homepage = "https://github.com/run-llama/llama_index/tree/main/llama-index-integrations/embeddings/llama-index-embeddings-gemini";
42 license = licenses.mit;
43 maintainers = with maintainers; [ fab ];
44 };
45}