Merge pull request #225507 from MayNiklas/update-whisper

openai-whisper: 20230124 -> 20230314

authored by

Martin Weinelt and committed by
GitHub
e4daee5b a32622d8

+36 -6
+33 -5
pkgs/development/python-modules/openai-whisper/default.nix
··· 2 2 , fetchFromGitHub 3 3 , buildPythonPackage 4 4 , substituteAll 5 + , cudaSupport ? false 5 6 6 7 # runtime 7 8 , ffmpeg ··· 9 10 # propagates 10 11 , numpy 11 12 , torch 13 + , torchWithCuda 12 14 , tqdm 13 15 , more-itertools 14 16 , transformers 15 17 , ffmpeg-python 18 + , numba 19 + , openai-triton 20 + , scipy 21 + , tiktoken 16 22 17 23 # tests 18 24 , pytestCheckHook ··· 20 26 21 27 buildPythonPackage rec { 22 28 pname = "whisper"; 23 - version = "20230124"; 29 + version = "20230314"; 24 30 format = "setuptools"; 25 31 26 32 src = fetchFromGitHub { 27 33 owner = "openai"; 28 34 repo = pname; 29 35 rev = "refs/tags/v${version}"; 30 - hash = "sha256-+3fs/EXK5NGlISuMTk7r2ZZ4tNFKbNFNkVS2LmHBvwk="; 36 + hash = "sha256-qQCELjRFeRCT1k1CBc3netRtFvt+an/EbkrgnmiX/mc="; 31 37 }; 32 38 33 39 patches = [ ··· 39 45 40 46 propagatedBuildInputs = [ 41 47 numpy 42 - torch 43 48 tqdm 44 49 more-itertools 45 50 transformers 46 51 ffmpeg-python 52 + numba 53 + scipy 54 + tiktoken 55 + ] ++ lib.optionals (!cudaSupport) [ 56 + torch 57 + ] ++ lib.optionals (cudaSupport) [ 58 + openai-triton 59 + torchWithCuda 47 60 ]; 48 61 62 + postPatch = '' 63 + substituteInPlace requirements.txt \ 64 + --replace "tiktoken==0.3.1" "tiktoken>=0.3.1" 65 + '' 66 + # openai-triton is only needed for CUDA support. 67 + # triton needs CUDA to be build. 68 + # -> by making it optional, we can build whisper without unfree packages enabled 69 + + lib.optionalString (!cudaSupport) '' 70 + sed -i '/if sys.platform.startswith("linux") and platform.machine() == "x86_64":/{N;d}' setup.py 71 + ''; 72 + 49 73 preCheck = '' 50 74 export HOME=$TMPDIR 51 75 ''; ··· 56 80 57 81 disabledTests = [ 58 82 # requires network access to download models 83 + "test_tokenizer" 59 84 "test_transcribe" 85 + # requires NVIDIA drivers 86 + "test_dtw_cuda_equivalence" 87 + "test_median_filter_equivalence" 60 88 ]; 61 89 62 90 meta = with lib; { 91 + changelog = "https://github.com/openai/whisper/blob/v$[version}/CHANGELOG.md"; 63 92 description = "General-purpose speech recognition model"; 64 93 homepage = "https://github.com/openai/whisper"; 65 94 license = licenses.mit; 66 - maintainers = with maintainers; [ hexa ]; 95 + maintainers = with maintainers; [ hexa MayNiklas ]; 67 96 }; 68 97 } 69 -
+3 -1
pkgs/top-level/python-packages.nix
··· 6815 6815 6816 6816 openai-triton = callPackage ../development/python-modules/openai-triton { llvmPackages = pkgs.llvmPackages_rocm; }; 6817 6817 6818 - openai-whisper = callPackage ../development/python-modules/openai-whisper { }; 6818 + openai-whisper = callPackage ../development/python-modules/openai-whisper { 6819 + cudaSupport = pkgs.config.cudaSupport or false; 6820 + }; 6819 6821 6820 6822 openant = callPackage ../development/python-modules/openant { }; 6821 6823