at 24.05-pre 3.6 kB view raw
1{ lib 2, buildPythonPackage 3, hatch-fancy-pypi-readme 4, hatch-vcs 5, hatchling 6, pytestCheckHook 7, pythonOlder 8, pythonRelaxDepsHook 9, accelerate 10, bentoml 11, bitsandbytes 12, build 13, click 14, ctranslate2 15, datasets 16, docker 17, einops 18, ghapi 19, huggingface-hub 20, hypothesis 21, ipython 22, jupyter 23, jupytext 24, nbformat 25, notebook 26, openai 27, openllm-client 28, openllm-core 29, optimum 30, peft 31, pytest-mock 32, pytest-randomly 33, pytest-rerunfailures 34, pytest-xdist 35, safetensors 36, scipy 37, sentencepiece 38, soundfile 39, syrupy 40, tabulate 41, tiktoken 42, transformers 43, openai-triton 44, xformers 45}: 46 47buildPythonPackage rec { 48 inherit (openllm-core) src version; 49 pname = "openllm"; 50 pyproject = true; 51 52 disabled = pythonOlder "3.8"; 53 54 sourceRoot = "source/openllm-python"; 55 56 nativeBuildInputs = [ 57 hatch-fancy-pypi-readme 58 hatch-vcs 59 hatchling 60 pythonRelaxDepsHook 61 ]; 62 63 pythonRemoveDeps = [ 64 # remove cuda-python as it has an unfree license 65 "cuda-python" 66 ]; 67 68 propagatedBuildInputs = [ 69 accelerate 70 bentoml 71 bitsandbytes 72 build 73 click 74 einops 75 ghapi 76 openllm-client 77 openllm-core 78 optimum 79 safetensors 80 scipy 81 sentencepiece 82 tabulate 83 transformers 84 ] ++ bentoml.optional-dependencies.io 85 ++ tabulate.optional-dependencies.widechars 86 ++ transformers.optional-dependencies.tokenizers 87 ++ transformers.optional-dependencies.torch; 88 89 passthru.optional-dependencies = { 90 agents = [ 91 # diffusers 92 soundfile 93 transformers 94 ] ++ transformers.optional-dependencies.agents; 95 awq = [ 96 # autoawq 97 ]; 98 baichuan = [ 99 # cpm-kernels 100 ]; 101 chatglm = [ 102 # cpm-kernels 103 ]; 104 ctranslate = [ 105 ctranslate2 106 ]; 107 falcon = [ 108 xformers 109 ]; 110 fine-tune = [ 111 datasets 112 huggingface-hub 113 peft 114 # trl 115 ]; 116 ggml = [ 117 # ctransformers 118 ]; 119 gptq = [ 120 # auto-gptq 121 ]; # ++ autogptq.optional-dependencies.triton; 122 grpc = [ 123 bentoml 124 ] ++ bentoml.optional-dependencies.grpc; 125 mpt = [ 126 openai-triton 127 ]; 128 openai = [ 129 openai 130 tiktoken 131 ] ++ openai.optional-dependencies.datalib; 132 playground = [ 133 ipython 134 jupyter 135 jupytext 136 nbformat 137 notebook 138 ]; 139 starcoder = [ 140 bitsandbytes 141 ]; 142 vllm = [ 143 # vllm 144 ]; 145 full = with passthru.optional-dependencies; ( 146 agents ++ awq ++ baichuan ++ chatglm ++ ctranslate ++ falcon ++ fine-tune ++ ggml ++ gptq ++ mpt ++ openai ++ playground ++ starcoder ++ vllm 147 ); 148 all = passthru.optional-dependencies.full; 149 }; 150 151 nativeCheckInputs = [ 152 docker 153 hypothesis 154 pytest-mock 155 pytest-randomly 156 pytest-rerunfailures 157 pytest-xdist 158 pytestCheckHook 159 syrupy 160 ]; 161 162 preCheck = '' 163 export HOME=$TMPDIR 164 # skip GPUs test on CI 165 export GITHUB_ACTIONS=1 166 # disable hypothesis' deadline 167 export CI=1 168 ''; 169 170 disabledTestPaths = [ 171 # require network access 172 "tests/models" 173 ]; 174 175 disabledTests = [ 176 # incompatible with recent TypedDict 177 # https://github.com/bentoml/OpenLLM/blob/f3fd32d596253ae34c68e2e9655f19f40e05f666/openllm-python/tests/configuration_test.py#L18-L21 178 "test_missing_default" 179 ]; 180 181 pythonImportsCheck = [ "openllm" ]; 182 183 meta = with lib; { 184 description = "Operating LLMs in production"; 185 homepage = "https://github.com/bentoml/OpenLLM/tree/main/openllm-python"; 186 changelog = "https://github.com/bentoml/OpenLLM/blob/${src.rev}/CHANGELOG.md"; 187 license = licenses.asl20; 188 maintainers = with maintainers; [ happysalada natsukium ]; 189 }; 190}