Merge pull request #271078 from SomeoneSerge/feat/torch-propagated-cuda

cudaPackages.setupCudaHook: propagate deps and the hook

authored by Connor Baker and committed by GitHub c94fdf82 e816589e

+150 -90
+3 -17
pkgs/development/compilers/cudatoolkit/extension.nix
··· 47 ./hooks/mark-for-cudatoolkit-root-hook.sh) 48 { }); 49 50 - # Normally propagated by cuda_nvcc or cudatoolkit through their depsHostHostPropagated 51 setupCudaHook = (final.callPackage 52 ({ makeSetupHook, backendStdenv }: 53 makeSetupHook 54 { 55 name = "setup-cuda-hook"; 56 57 # Point NVCC at a compatible compiler 58 substitutions.ccRoot = "${backendStdenv.cc}"; 59 60 # Required in addition to ccRoot as otherwise bin/gcc is looked up 61 # when building CMakeCUDACompilerId.cu 62 substitutions.ccFullPath = "${backendStdenv.cc}/bin/${backendStdenv.cc.targetPrefix}c++"; 63 - 64 - # Required by cmake's enable_language(CUDA) to build a test program 65 - # When implementing cross-compilation support: this is 66 - # final.pkgs.targetPackages.cudaPackages.cuda_cudart 67 - # Given the multiple-outputs each CUDA redist has, we can specify the exact components we 68 - # need from the package. CMake requires: 69 - # - the cuda_runtime.h header, which is in the dev output 70 - # - the dynamic library, which is in the lib output 71 - # - the static library, which is in the static output 72 - substitutions.cudartFlags = let cudart = final.cuda_cudart; in 73 - builtins.concatStringsSep " " (final.lib.optionals (final ? cuda_cudart) ([ 74 - "-I${final.lib.getDev cudart}/include" 75 - "-L${final.lib.getLib cudart}/lib" 76 - ] ++ final.lib.optionals (builtins.elem "static" cudart.outputs) [ 77 - "-L${cudart.static}/lib" 78 - ])); 79 } 80 ./hooks/setup-cuda-hook.sh) 81 { });
··· 47 ./hooks/mark-for-cudatoolkit-root-hook.sh) 48 { }); 49 50 + # Currently propagated by cuda_nvcc or cudatoolkit, rather than used directly 51 setupCudaHook = (final.callPackage 52 ({ makeSetupHook, backendStdenv }: 53 makeSetupHook 54 { 55 name = "setup-cuda-hook"; 56 57 + substitutions.setupCudaHook = placeholder "out"; 58 + 59 # Point NVCC at a compatible compiler 60 substitutions.ccRoot = "${backendStdenv.cc}"; 61 62 # Required in addition to ccRoot as otherwise bin/gcc is looked up 63 # when building CMakeCUDACompilerId.cu 64 substitutions.ccFullPath = "${backendStdenv.cc}/bin/${backendStdenv.cc.targetPrefix}c++"; 65 } 66 ./hooks/setup-cuda-hook.sh) 67 { });
+7 -1
pkgs/development/compilers/cudatoolkit/hooks/mark-for-cudatoolkit-root-hook.sh
··· 1 # shellcheck shell=bash 2 3 markForCUDAToolkit_ROOT() { 4 mkdir -p "${prefix}/nix-support" 5 - touch "${prefix}/nix-support/include-in-cudatoolkit-root" 6 } 7 8 fixupOutputHooks+=(markForCUDAToolkit_ROOT)
··· 1 # shellcheck shell=bash 2 3 + # Should we mimick cc-wrapper's "hygiene"? 4 + [[ -z ${strictDeps-} ]] || (( "$hostOffset" < 0 )) || return 0 5 + 6 + echo "Sourcing mark-for-cudatoolkit-root-hook" >&2 7 + 8 markForCUDAToolkit_ROOT() { 9 mkdir -p "${prefix}/nix-support" 10 + [[ -f "${prefix}/nix-support/include-in-cudatoolkit-root" ]] && return 11 + echo "$pname-$output" > "${prefix}/nix-support/include-in-cudatoolkit-root" 12 } 13 14 fixupOutputHooks+=(markForCUDAToolkit_ROOT)
-5
pkgs/development/compilers/cudatoolkit/hooks/nvcc-setup-hook.sh
··· 1 - # shellcheck shell=bash 2 - 3 - # CMake's enable_language(CUDA) runs a compiler test and it doesn't account for 4 - # CUDAToolkit_ROOT. We have to help it locate libcudart 5 - export NVCC_APPEND_FLAGS+=" -L@cudartLib@/lib -L@cudartStatic@/lib -I@cudartInclude@/include"
···
+84 -17
pkgs/development/compilers/cudatoolkit/hooks/setup-cuda-hook.sh
··· 3 # Only run the hook from nativeBuildInputs 4 (( "$hostOffset" == -1 && "$targetOffset" == 0)) || return 0 5 6 - echo Sourcing setup-cuda-hook >&2 7 8 - extendCUDAToolkit_ROOT() { 9 - if [[ -f "$1/nix-support/include-in-cudatoolkit-root" ]] ; then 10 - addToSearchPathWithCustomDelimiter ";" CUDAToolkit_ROOT "$1" 11 12 - if [[ -d "$1/include" ]] ; then 13 - addToSearchPathWithCustomDelimiter ";" CUDAToolkit_INCLUDE_DIR "$1/include" 14 fi 15 - fi 16 } 17 - 18 - addEnvHooks "$targetOffset" extendCUDAToolkit_ROOT 19 20 setupCUDAToolkitCompilers() { 21 echo Executing setupCUDAToolkitCompilers >&2 ··· 58 59 # CMake's enable_language(CUDA) runs a compiler test and it doesn't account for 60 # CUDAToolkit_ROOT. We have to help it locate libcudart 61 - local cudartFlags="@cudartFlags@" 62 - if [[ -z "${nvccDontPrependCudartFlags-}" ]] && [[ -n "${cudartFlags:-}" ]] ; then 63 - export NVCC_APPEND_FLAGS+=" $cudartFlags" 64 fi 65 } 66 67 - setupCMakeCUDAToolkit_ROOT() { 68 - export cmakeFlags+=" -DCUDAToolkit_INCLUDE_DIR=$CUDAToolkit_INCLUDE_DIR -DCUDAToolkit_ROOT=$CUDAToolkit_ROOT" 69 - } 70 71 - postHooks+=(setupCUDAToolkitCompilers) 72 - preConfigureHooks+=(setupCMakeCUDAToolkit_ROOT)
··· 3 # Only run the hook from nativeBuildInputs 4 (( "$hostOffset" == -1 && "$targetOffset" == 0)) || return 0 5 6 + guard=Sourcing 7 + reason= 8 + 9 + [[ -n ${cudaSetupHookOnce-} ]] && guard=Skipping && reason=" because the hook has been propagated more than once" 10 + 11 + if (( "${NIX_DEBUG:-0}" >= 1 )) ; then 12 + echo "$guard hostOffset=$hostOffset targetOffset=$targetOffset setupCudaHook$reason" >&2 13 + else 14 + echo "$guard setup-cuda-hook$reason" >&2 15 + fi 16 + 17 + [[ "$guard" = Sourcing ]] || return 0 18 + 19 + declare -g cudaSetupHookOnce=1 20 + declare -Ag cudaHostPathsSeen=() 21 + declare -Ag cudaOutputToPath=() 22 + 23 + extendcudaHostPathsSeen() { 24 + (( "${NIX_DEBUG:-0}" >= 1 )) && echo "extendcudaHostPathsSeen $1" >&2 25 + 26 + local markerPath="$1/nix-support/include-in-cudatoolkit-root" 27 + [[ ! -f "${markerPath}" ]] && return 28 + [[ -v cudaHostPathsSeen[$1] ]] && return 29 + 30 + cudaHostPathsSeen["$1"]=1 31 + 32 + # E.g. cuda_cudart-lib 33 + local cudaOutputName 34 + read -r cudaOutputName < "$markerPath" 35 + 36 + [[ -z "$cudaOutputName" ]] && return 37 + 38 + local oldPath="${cudaOutputToPath[$cudaOutputName]-}" 39 + [[ -n "$oldPath" ]] && echo "extendcudaHostPathsSeen: warning: overwriting $cudaOutputName from $oldPath to $1" >&2 40 + cudaOutputToPath["$cudaOutputName"]="$1" 41 + } 42 + addEnvHooks "$targetOffset" extendcudaHostPathsSeen 43 44 + setupCUDAToolkit_ROOT() { 45 + (( "${NIX_DEBUG:-0}" >= 1 )) && echo "setupCUDAToolkit_ROOT: cudaHostPathsSeen=${!cudaHostPathsSeen[*]}" >&2 46 47 + for path in "${!cudaHostPathsSeen[@]}" ; do 48 + addToSearchPathWithCustomDelimiter ";" CUDAToolkit_ROOT "$path" 49 + if [[ -d "$path/include" ]] ; then 50 + addToSearchPathWithCustomDelimiter ";" CUDAToolkit_INCLUDE_DIR "$path/include" 51 fi 52 + done 53 + 54 + export cmakeFlags+=" -DCUDAToolkit_INCLUDE_DIR=$CUDAToolkit_INCLUDE_DIR -DCUDAToolkit_ROOT=$CUDAToolkit_ROOT" 55 } 56 + preConfigureHooks+=(setupCUDAToolkit_ROOT) 57 58 setupCUDAToolkitCompilers() { 59 echo Executing setupCUDAToolkitCompilers >&2 ··· 96 97 # CMake's enable_language(CUDA) runs a compiler test and it doesn't account for 98 # CUDAToolkit_ROOT. We have to help it locate libcudart 99 + if [[ -z "${nvccDontPrependCudartFlags-}" ]] ; then 100 + if [[ ! -v cudaOutputToPath["cuda_cudart-out"] ]] ; then 101 + echo "setupCUDAToolkitCompilers: missing cudaPackages.cuda_cudart. This may become an an error in the future" >&2 102 + # exit 1 103 + fi 104 + for pkg in "${!cudaOutputToPath[@]}" ; do 105 + [[ ! "$pkg" = cuda_cudart* ]] && continue 106 + 107 + local path="${cudaOutputToPath[$pkg]}" 108 + if [[ -d "$path/include" ]] ; then 109 + export NVCC_PREPEND_FLAGS+=" -I$path/include" 110 + fi 111 + if [[ -d "$path/lib" ]] ; then 112 + export NVCC_PREPEND_FLAGS+=" -L$path/lib" 113 + fi 114 + done 115 fi 116 } 117 + preConfigureHooks+=(setupCUDAToolkitCompilers) 118 119 + propagateCudaLibraries() { 120 + (( "${NIX_DEBUG:-0}" >= 1 )) && echo "propagateCudaLibraries: cudaPropagateToOutput=$cudaPropagateToOutput cudaHostPathsSeen=${!cudaHostPathsSeen[*]}" >&2 121 + 122 + [[ -z "${cudaPropagateToOutput-}" ]] && return 123 + 124 + mkdir -p "${!cudaPropagateToOutput}/nix-support" 125 + # One'd expect this should be propagated-bulid-build-deps, but that doesn't seem to work 126 + echo "@setupCudaHook@" >> "${!cudaPropagateToOutput}/nix-support/propagated-native-build-inputs" 127 + 128 + local propagatedBuildInputs=( "${!cudaHostPathsSeen[@]}" ) 129 + for output in $(getAllOutputNames) ; do 130 + if [[ ! "$output" = "$cudaPropagateToOutput" ]] ; then 131 + propagatedBuildInputs+=( "${!output}" ) 132 + fi 133 + break 134 + done 135 136 + # One'd expect this should be propagated-host-host-deps, but that doesn't seem to work 137 + printWords "${propagatedBuildInputs[@]}" >> "${!cudaPropagateToOutput}/nix-support/propagated-build-inputs" 138 + } 139 + postFixupHooks+=(propagateCudaLibraries)
+1 -1
pkgs/development/libraries/cctag/default.nix
··· 49 buildInputs = [ 50 boost179 51 eigen 52 - opencv 53 ]; 54 55 # Tests are broken on Darwin (linking issue)
··· 49 buildInputs = [ 50 boost179 51 eigen 52 + opencv.cxxdev 53 ]; 54 55 # Tests are broken on Darwin (linking issue)
+5
pkgs/development/libraries/nvidia-optical-flow-sdk/default.nix
··· 18 cp -R * $out/include 19 ''; 20 21 meta = with lib; { 22 description = "Nvidia optical flow headers for computing the relative motion of pixels between images"; 23 homepage = "https://developer.nvidia.com/opticalflow-sdk";
··· 18 cp -R * $out/include 19 ''; 20 21 + postFixup = '' 22 + mkdir -p $out/nix-support 23 + echo $pname >> "$out/nix-support/include-in-cudatoolkit-root" 24 + ''; 25 + 26 meta = with lib; { 27 description = "Nvidia optical flow headers for computing the relative motion of pixels between images"; 28 homepage = "https://developer.nvidia.com/opticalflow-sdk";
+25 -10
pkgs/development/libraries/opencv/4.x.nix
··· 247 248 outputs = [ 249 "out" 250 "package_tests" 251 ]; 252 253 postUnpack = lib.optionalString buildContrib '' 254 cp --no-preserve=mode -r "${contribSrc}/modules" "$NIX_BUILD_TOP/source/opencv_contrib" ··· 328 bzip2 AVFoundation Cocoa VideoDecodeAcceleration CoreMedia MediaToolbox Accelerate 329 ] 330 ++ lib.optionals enableDocs [ doxygen graphviz-nox ] 331 - ++ lib.optionals enableCuda (with cudaPackages; [ 332 - cuda_cudart 333 - cuda_cccl # <thrust/*> 334 - libnpp # npp.h 335 ] ++ lib.optionals enableCublas [ 336 - libcublas # cublas_v2.h 337 ] ++ lib.optionals enableCudnn [ 338 - cudnn # cudnn.h 339 ] ++ lib.optionals enableCufft [ 340 - libcufft # cufft.h 341 - ]); 342 343 - propagatedBuildInputs = lib.optional enablePython pythonPackages.numpy 344 - ++ lib.optionals enableCuda [ nvidia-optical-flow-sdk ]; 345 346 nativeBuildInputs = [ cmake pkg-config unzip ] 347 ++ lib.optionals enablePython [ ··· 458 postInstall = '' 459 sed -i "s|{exec_prefix}/$out|{exec_prefix}|;s|{prefix}/$out|{prefix}|" \ 460 "$out/lib/pkgconfig/opencv4.pc" 461 '' 462 # install python distribution information, so other packages can `import opencv` 463 + lib.optionalString enablePython ''
··· 247 248 outputs = [ 249 "out" 250 + "cxxdev" 251 "package_tests" 252 ]; 253 + cudaPropagateToOutput = "cxxdev"; 254 255 postUnpack = lib.optionalString buildContrib '' 256 cp --no-preserve=mode -r "${contribSrc}/modules" "$NIX_BUILD_TOP/source/opencv_contrib" ··· 330 bzip2 AVFoundation Cocoa VideoDecodeAcceleration CoreMedia MediaToolbox Accelerate 331 ] 332 ++ lib.optionals enableDocs [ doxygen graphviz-nox ] 333 + ++ lib.optionals enableCuda (with cudaPackages; [ 334 + cuda_cudart.lib 335 + cuda_cudart.dev 336 + cuda_cccl.dev # <thrust/*> 337 + libnpp.dev # npp.h 338 + libnpp.lib 339 + libnpp.static 340 + nvidia-optical-flow-sdk 341 ] ++ lib.optionals enableCublas [ 342 + # May start using the default $out instead once 343 + # https://github.com/NixOS/nixpkgs/issues/271792 344 + # has been addressed 345 + libcublas.static 346 + libcublas.lib 347 + libcublas.dev # cublas_v2.h 348 ] ++ lib.optionals enableCudnn [ 349 + cudnn.dev # cudnn.h 350 + cudnn.lib 351 + cudnn.static 352 ] ++ lib.optionals enableCufft [ 353 + libcufft.dev # cufft.h 354 + libcufft.lib 355 + libcufft.static 356 + ]); 357 358 + propagatedBuildInputs = lib.optionals enablePython [ pythonPackages.numpy ]; 359 360 nativeBuildInputs = [ cmake pkg-config unzip ] 361 ++ lib.optionals enablePython [ ··· 472 postInstall = '' 473 sed -i "s|{exec_prefix}/$out|{exec_prefix}|;s|{prefix}/$out|{prefix}|" \ 474 "$out/lib/pkgconfig/opencv4.pc" 475 + mkdir $cxxdev 476 '' 477 # install python distribution information, so other packages can `import opencv` 478 + lib.optionalString enablePython ''
+2 -1
pkgs/development/libraries/openvino/default.nix
··· 122 "-DENABLE_CPPLINT:BOOL=OFF" 123 "-DBUILD_TESTING:BOOL=OFF" 124 "-DENABLE_SAMPLES:BOOL=OFF" 125 ]; 126 127 env.NIX_CFLAGS_COMPILE = lib.optionalString stdenv.isAarch64 "-Wno-narrowing"; ··· 133 buildInputs = [ 134 libusb1 135 libxml2 136 - opencv 137 protobuf 138 pugixml 139 tbb
··· 122 "-DENABLE_CPPLINT:BOOL=OFF" 123 "-DBUILD_TESTING:BOOL=OFF" 124 "-DENABLE_SAMPLES:BOOL=OFF" 125 + (lib.cmakeBool "CMAKE_VERBOSE_MAKEFILE" true) 126 ]; 127 128 env.NIX_CFLAGS_COMPILE = lib.optionalString stdenv.isAarch64 "-Wno-narrowing"; ··· 134 buildInputs = [ 135 libusb1 136 libxml2 137 + opencv.cxxdev 138 protobuf 139 pugixml 140 tbb
+14 -3
pkgs/development/python-modules/torch/default.nix
··· 134 "out" # output standard python package 135 "dev" # output libtorch headers 136 "lib" # output libtorch libraries 137 ]; 138 139 src = fetchFromGitHub { 140 owner = "pytorch"; ··· 339 cuda_cccl.dev # <thrust/*> 340 cuda_cudart.dev # cuda_runtime.h and libraries 341 cuda_cudart.lib 342 cuda_cupti.dev # For kineto 343 cuda_cupti.lib # For kineto 344 cuda_nvcc.dev # crt/host_config.h; even though we include this in nativeBuildinputs, it's needed here too ··· 371 ++ lib.optionals rocmSupport [ rocmPackages.llvm.openmp ] 372 ++ lib.optionals (cudaSupport || rocmSupport) [ effectiveMagma ] 373 ++ lib.optionals stdenv.isLinux [ numactl ] 374 - ++ lib.optionals stdenv.isDarwin [ Accelerate CoreServices libobjc ]; 375 376 propagatedBuildInputs = [ 377 cffi ··· 392 393 # torch/csrc requires `pybind11` at runtime 394 pybind11 395 ] 396 - ++ lib.optionals tritonSupport [ openai-triton ] 397 ++ lib.optionals MPISupport [ mpi ] 398 ++ lib.optionals rocmSupport [ rocmtoolkit_joined ]; 399 ··· 454 --replace "/build/source/torch/include" "$dev/include" 455 ''; 456 457 - postFixup = lib.optionalString stdenv.isDarwin '' 458 for f in $(ls $lib/lib/*.dylib); do 459 install_name_tool -id $lib/lib/$(basename $f) $f || true 460 done
··· 134 "out" # output standard python package 135 "dev" # output libtorch headers 136 "lib" # output libtorch libraries 137 + "cxxdev" # propagated deps for the cmake consumers of torch 138 ]; 139 + cudaPropagateToOutput = "cxxdev"; 140 141 src = fetchFromGitHub { 142 owner = "pytorch"; ··· 341 cuda_cccl.dev # <thrust/*> 342 cuda_cudart.dev # cuda_runtime.h and libraries 343 cuda_cudart.lib 344 + cuda_cudart.static 345 cuda_cupti.dev # For kineto 346 cuda_cupti.lib # For kineto 347 cuda_nvcc.dev # crt/host_config.h; even though we include this in nativeBuildinputs, it's needed here too ··· 374 ++ lib.optionals rocmSupport [ rocmPackages.llvm.openmp ] 375 ++ lib.optionals (cudaSupport || rocmSupport) [ effectiveMagma ] 376 ++ lib.optionals stdenv.isLinux [ numactl ] 377 + ++ lib.optionals stdenv.isDarwin [ Accelerate CoreServices libobjc ] 378 + ++ lib.optionals tritonSupport [ openai-triton ] 379 + ++ lib.optionals MPISupport [ mpi ] 380 + ++ lib.optionals rocmSupport [ rocmtoolkit_joined ]; 381 382 propagatedBuildInputs = [ 383 cffi ··· 398 399 # torch/csrc requires `pybind11` at runtime 400 pybind11 401 + ] ++ lib.optionals tritonSupport [ openai-triton ]; 402 + 403 + propagatedCxxBuildInputs = [ 404 ] 405 ++ lib.optionals MPISupport [ mpi ] 406 ++ lib.optionals rocmSupport [ rocmtoolkit_joined ]; 407 ··· 462 --replace "/build/source/torch/include" "$dev/include" 463 ''; 464 465 + postFixup = '' 466 + mkdir -p "$cxxdev/nix-support" 467 + printWords "''${propagatedCxxBuildInputs[@]}" >> "$cxxdev/nix-support/propagated-build-inputs" 468 + '' + lib.optionalString stdenv.isDarwin '' 469 for f in $(ls $lib/lib/*.dylib); do 470 install_name_tool -id $lib/lib/$(basename $f) $f || true 471 done
+1 -11
pkgs/development/python-modules/torchaudio/default.nix
··· 60 ffmpeg-full 61 pybind11 62 sox 63 - ] ++ lib.optionals cudaSupport [ 64 - cudaPackages.libcurand.dev 65 - cudaPackages.libcurand.lib 66 - cudaPackages.cuda_cudart # cuda_runtime.h and libraries 67 - cudaPackages.cuda_cccl.dev # <thrust/*> 68 - cudaPackages.cuda_nvtx.dev 69 - cudaPackages.cuda_nvtx.lib # -llibNVToolsExt 70 - cudaPackages.libcublas.dev 71 - cudaPackages.libcublas.lib 72 - cudaPackages.libcufft.dev 73 - cudaPackages.libcufft.lib 74 ]; 75 76 propagatedBuildInputs = [
··· 60 ffmpeg-full 61 pybind11 62 sox 63 + torch.cxxdev 64 ]; 65 66 propagatedBuildInputs = [
+8 -24
pkgs/development/python-modules/torchvision/default.nix
··· 17 inherit (torch) cudaCapabilities cudaPackages cudaSupport; 18 inherit (cudaPackages) backendStdenv cudaVersion; 19 20 - # NOTE: torchvision doesn't use cudnn; torch does! 21 - # For this reason it is not included. 22 - cuda-common-redist = with cudaPackages; [ 23 - cuda_cccl # <thrust/*> 24 - libcublas # cublas_v2.h 25 - libcusolver # cusolverDn.h 26 - libcusparse # cusparse.h 27 - ]; 28 - 29 - cuda-native-redist = symlinkJoin { 30 - name = "cuda-native-redist-${cudaVersion}"; 31 - paths = with cudaPackages; [ 32 - cuda_cudart # cuda_runtime.h 33 - cuda_nvcc 34 - ] ++ cuda-common-redist; 35 - }; 36 - 37 - cuda-redist = symlinkJoin { 38 - name = "cuda-redist-${cudaVersion}"; 39 - paths = cuda-common-redist; 40 - }; 41 - 42 pname = "torchvision"; 43 version = "0.16.1"; 44 in ··· 52 hash = "sha256-TsYBDtedTQ3+F3LM4JwzkGH2XOr0WSp1Au5YoR07rSA="; 53 }; 54 55 - nativeBuildInputs = [ libpng ninja which ] ++ lib.optionals cudaSupport [ cuda-native-redist ]; 56 57 - buildInputs = [ libjpeg_turbo libpng ] ++ lib.optionals cudaSupport [ cuda-redist ]; 58 59 propagatedBuildInputs = [ numpy pillow torch scipy ]; 60
··· 17 inherit (torch) cudaCapabilities cudaPackages cudaSupport; 18 inherit (cudaPackages) backendStdenv cudaVersion; 19 20 pname = "torchvision"; 21 version = "0.16.1"; 22 in ··· 30 hash = "sha256-TsYBDtedTQ3+F3LM4JwzkGH2XOr0WSp1Au5YoR07rSA="; 31 }; 32 33 + nativeBuildInputs = [ 34 + libpng 35 + ninja 36 + which 37 + ] ++ lib.optionals cudaSupport [ 38 + cudaPackages.cuda_nvcc 39 + ]; 40 41 + buildInputs = [ libjpeg_turbo libpng torch.cxxdev ]; 42 43 propagatedBuildInputs = [ numpy pillow torch scipy ]; 44