{ lib, stdenv, fetchFromGitHub, rocmUpdateScript, cmake, rocm-cmake, clr, libxml2, libedit, rocm-comgr, rocm-device-libs, rocm-runtime, zstd, zlib, ncurses, python3Packages, buildRockCompiler ? false, buildTests ? false, # `argument of type 'NoneType' is not iterable` }: # FIXME: rocmlir has an entire separate LLVM build in a subdirectory this is silly # It seems to be forked from AMD's own LLVM # If possible reusing the rocmPackages.llvm build would be better # Would have to confirm it is compatible with ROCm's tagged LLVM. # Fairly likely it's not given AMD's track record with forking their own software in incompatible ways # in subdirs # Theoretically, we could have our MLIR have an output # with the source and built objects so that we can just # use it as the external LLVM repo for this let suffix = if buildRockCompiler then "-rock" else ""; llvmNativeTarget = if stdenv.hostPlatform.isx86_64 then "X86" else if stdenv.hostPlatform.isAarch64 then "AArch64" else throw "Unsupported ROCm LLVM platform"; in stdenv.mkDerivation (finalAttrs: { pname = "rocmlir${suffix}"; version = "7.1.1"; outputs = [ "out" ] ++ lib.optionals (!buildRockCompiler) [ "external" ]; src = fetchFromGitHub { owner = "ROCm"; repo = "rocMLIR"; rev = "rocm-${finalAttrs.version}"; hash = "sha256-A9vUvsEZrZlNEW4cscF66L48rJQ1zJYmIzwXQ2QzJ3s="; }; nativeBuildInputs = [ clr cmake rocm-cmake python3Packages.python python3Packages.tomli ]; buildInputs = [ libxml2 libedit rocm-comgr rocm-runtime rocm-device-libs ]; propagatedBuildInputs = [ zstd zlib ncurses ]; cmakeFlags = [ (lib.cmakeFeature "LLVM_TARGETS_TO_BUILD" "AMDGPU${ lib.optionalString (!buildRockCompiler) ";${llvmNativeTarget}" }") (lib.cmakeFeature "LLVM_USE_LINKER" "lld") (lib.cmakeFeature "LLVM_ENABLE_ZSTD" "FORCE_ON") (lib.cmakeFeature "LLVM_ENABLE_ZLIB" "FORCE_ON") (lib.cmakeBool "LLVM_ENABLE_LIBCXX" true) (lib.cmakeBool "LLVM_ENABLE_TERMINFO" true) (lib.cmakeFeature "ROCM_PATH" "${clr}") # Manually define CMAKE_INSTALL_