nixpkgs mirror (for testing) github.com/NixOS/nixpkgs
nix
at python-updates 202 lines 6.0 kB view raw
1{ 2 stdenv, 3 lib, 4 buildPythonPackage, 5 fetchFromGitHub, 6 pythonAtLeast, 7 8 # buildInputs 9 llvmPackages, 10 11 # build-system 12 setuptools, 13 14 # dependencies 15 huggingface-hub, 16 numpy, 17 packaging, 18 psutil, 19 pyyaml, 20 safetensors, 21 torch, 22 23 # tests 24 addBinToPathHook, 25 evaluate, 26 parameterized, 27 pytestCheckHook, 28 transformers, 29 config, 30 cudatoolkit, 31 writableTmpDirAsHomeHook, 32}: 33 34buildPythonPackage (finalAttrs: { 35 pname = "accelerate"; 36 version = "1.12.0"; 37 pyproject = true; 38 39 src = fetchFromGitHub { 40 owner = "huggingface"; 41 repo = "accelerate"; 42 tag = "v${finalAttrs.version}"; 43 hash = "sha256-PwwaQSLOm+8Hd3trM1P+jRhYyoWM3QxOe5XT99haEmg="; 44 }; 45 46 buildInputs = [ llvmPackages.openmp ]; 47 48 build-system = [ setuptools ]; 49 50 dependencies = [ 51 huggingface-hub 52 numpy 53 packaging 54 psutil 55 pyyaml 56 safetensors 57 torch 58 ]; 59 60 nativeCheckInputs = [ 61 addBinToPathHook 62 evaluate 63 parameterized 64 pytestCheckHook 65 transformers 66 writableTmpDirAsHomeHook 67 ]; 68 69 preCheck = lib.optionalString config.cudaSupport '' 70 export TRITON_PTXAS_PATH="${lib.getExe' cudatoolkit "ptxas"}" 71 ''; 72 enabledTestPaths = [ "tests" ]; 73 disabledTests = [ 74 # try to download data: 75 "FeatureExamplesTests" 76 "test_infer_auto_device_map_on_t0pp" 77 78 # require socket communication 79 "test_explicit_dtypes" 80 "test_gated" 81 "test_invalid_model_name" 82 "test_invalid_model_name_transformers" 83 "test_no_metadata" 84 "test_no_split_modules" 85 "test_remote_code" 86 "test_transformers_model" 87 "test_extract_model_keep_torch_compile" 88 "test_extract_model_remove_torch_compile" 89 "test_regions_are_compiled" 90 91 # nondeterministic, tests GC behaviour by thresholding global ram usage 92 "test_free_memory_dereferences_prepared_components" 93 94 # set the environment variable, CC, which conflicts with standard environment 95 "test_patch_environment_key_exists" 96 ] 97 ++ lib.optionals ((pythonAtLeast "3.13") || (torch.rocmSupport or false)) [ 98 # RuntimeError: Dynamo is not supported on Python 3.13+ 99 # OR torch.compile tests broken on torch 2.5 + rocm 100 "test_can_unwrap_distributed_compiled_model_keep_torch_compile" 101 "test_can_unwrap_distributed_compiled_model_remove_torch_compile" 102 "test_convert_to_fp32" 103 "test_send_to_device_compiles" 104 ] 105 ++ lib.optionals (stdenv.hostPlatform.isLinux && stdenv.hostPlatform.isAarch64) [ 106 # usual aarch64-linux RuntimeError: DataLoader worker (pid(s) <...>) exited unexpectedly 107 "CheckpointTest" 108 # TypeError: unsupported operand type(s) for /: 'NoneType' and 'int' (it seems cpuinfo doesn't work here) 109 "test_mpi_multicpu_config_cmd" 110 ] 111 ++ 112 lib.optionals 113 (stdenv.hostPlatform.isLinux && stdenv.hostPlatform.isAarch64 && (pythonAtLeast "3.14")) 114 [ 115 # RuntimeError: There is no current event loop in thread 'MainThread' 116 "test_accelerate_test" 117 ] 118 ++ lib.optionals (!config.cudaSupport) [ 119 # requires ptxas from cudatoolkit, which is unfree 120 "test_dynamo_extract_model" 121 ] 122 ++ lib.optionals stdenv.hostPlatform.isDarwin [ 123 # RuntimeError: 'accelerate-launch /nix/store/a7vhm7b74a7bmxc35j26s9iy1zfaqjs... 124 "test_accelerate_test" 125 "test_init_trackers" 126 "test_init_trackers" 127 "test_log" 128 "test_log_with_tensor" 129 130 # After enabling MPS in pytorch, these tests started failing 131 "test_accelerated_optimizer_step_was_skipped" 132 "test_auto_wrap_policy" 133 "test_autocast_kwargs" 134 "test_automatic_loading" 135 "test_backward_prefetch" 136 "test_can_resume_training" 137 "test_can_resume_training_checkpoints_relative_path" 138 "test_can_resume_training_with_folder" 139 "test_can_unwrap_model_fp16" 140 "test_checkpoint_deletion" 141 "test_cpu_offload" 142 "test_cpu_ram_efficient_loading" 143 "test_grad_scaler_kwargs" 144 "test_invalid_registration" 145 "test_map_location" 146 "test_mixed_precision" 147 "test_mixed_precision_buffer_autocast_override" 148 "test_project_dir" 149 "test_project_dir_with_config" 150 "test_sharding_strategy" 151 "test_state_dict_type" 152 "test_with_save_limit" 153 "test_with_scheduler" 154 155 # torch._inductor.exc.InductorError: TypeError: cannot determine truth value of Relational 156 "test_regional_compilation_cold_start" 157 "test_regional_compilation_inference_speedup" 158 159 # Fails in nixpkgs-review due to a port conflict with simultaneous python builds 160 "test_config_compatibility" 161 162 # Fails with `sandbox=false` by mis-configuring the model it's using. 163 # AttributeError: 'DistributedDataParallel' object has no attribute '_ignored_modules'. Did you mean: 'named_modules'? 164 "test_ignored_modules_regex" 165 166 # Illegal instruction (x86_64) / Trace/BPT Error 5 (aarch64) 167 "test_can_pickle_dataloader" 168 ] 169 ++ lib.optionals (stdenv.hostPlatform.isDarwin && stdenv.hostPlatform.isx86_64) [ 170 # RuntimeError: torch_shm_manager: execl failed: Permission denied 171 "CheckpointTest" 172 ] 173 ++ 174 lib.optionals 175 (stdenv.hostPlatform.isLinux && stdenv.hostPlatform.isx86_64 && (pythonAtLeast "3.14")) 176 [ 177 # https://github.com/huggingface/accelerate/issues/3899 178 "test_accelerate_test" 179 "test_cpu" 180 ]; 181 182 disabledTestPaths = lib.optionals (!(stdenv.hostPlatform.isLinux && stdenv.hostPlatform.isx86_64)) [ 183 # numerous instances of torch.multiprocessing.spawn.ProcessRaisedException: 184 "tests/test_cpu.py" 185 "tests/test_grad_sync.py" 186 "tests/test_metrics.py" 187 "tests/test_scheduler.py" 188 ]; 189 190 pythonImportsCheck = [ "accelerate" ]; 191 192 __darwinAllowLocalNetworking = true; 193 194 meta = { 195 homepage = "https://huggingface.co/docs/accelerate"; 196 description = "Simple way to train and use PyTorch models with multi-GPU, TPU, mixed-precision"; 197 changelog = "https://github.com/huggingface/accelerate/releases/tag/${finalAttrs.src.tag}"; 198 license = lib.licenses.asl20; 199 maintainers = with lib.maintainers; [ bcdarwin ]; 200 mainProgram = "accelerate"; 201 }; 202})