Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

kselftest/arm64: Add FPMR coverage to fp-ptrace

Add coverage for FPMR to fp-ptrace. FPMR can be available independently of
SVE and SME, if SME is supported then FPMR is cleared by entering and
exiting streaming mode. As with other registers we generate random values
to load into the register, we restrict these to bitfields which are always
defined. We also leave bitfields where the valid values are affected by
the set of supported FP8 formats zero to reduce complexity, it is unlikely
that specific bitfields will be affected by ptrace issues.

Signed-off-by: Mark Brown <broonie@kernel.org>
Link: https://lore.kernel.org/r/20241112-arm64-fp-ptrace-fpmr-v2-3-250b57c61254@kernel.org
[catalin.marinas@arm.com: use REG_FPMR instead of FPMR]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

authored by

Mark Brown and committed by
Catalin Marinas
7dbd26d0 7e9c5b00

+146 -7
+16 -7
tools/testing/selftests/arm64/fp/fp-ptrace-asm.S
··· 71 71 tbz x7, #SVCR_SM_SHIFT, check_sve_in 72 72 73 73 // Load FFR if we have FA64 74 - mov x4, #0 75 - tbz x0, #HAVE_FA64_SHIFT, load_sve 76 - mov x4, #1 74 + ubfx x4, x0, #HAVE_FA64_SHIFT, #1 77 75 b load_sve 78 76 79 77 // SVE? 80 78 check_sve_in: 81 - tbz x0, #HAVE_SVE_SHIFT, wait_for_writes 79 + tbz x0, #HAVE_SVE_SHIFT, check_fpmr_in 82 80 mov x4, #1 83 81 84 82 load_sve: ··· 141 143 ldr p14, [x7, #14, MUL VL] 142 144 ldr p15, [x7, #15, MUL VL] 143 145 146 + // This has to come after we set PSTATE.SM 147 + check_fpmr_in: 148 + tbz x0, #HAVE_FPMR_SHIFT, wait_for_writes 149 + adrp x7, fpmr_in 150 + ldr x7, [x7, :lo12:fpmr_in] 151 + msr REG_FPMR, x7 152 + 144 153 wait_for_writes: 145 154 // Wait for the parent 146 155 brk #0 ··· 171 166 stp q28, q29, [x7, #16 * 28] 172 167 stp q30, q31, [x7, #16 * 30] 173 168 169 + tbz x0, #HAVE_FPMR_SHIFT, check_sme_out 170 + mrs x7, REG_FPMR 171 + adrp x6, fpmr_out 172 + str x7, [x6, :lo12:fpmr_out] 173 + 174 + check_sme_out: 174 175 tbz x0, #HAVE_SME_SHIFT, check_sve_out 175 176 176 177 rdsvl 11, 1 ··· 208 197 tbz x7, #SVCR_SM_SHIFT, check_sve_out 209 198 210 199 // Do we have FA64 and FFR? 211 - mov x4, #0 212 - tbz x0, #HAVE_FA64_SHIFT, read_sve 213 - mov x4, #1 200 + ubfx x4, x0, #HAVE_FA64_SHIFT, #1 214 201 b read_sve 215 202 216 203 // SVE?
+126
tools/testing/selftests/arm64/fp/fp-ptrace.c
··· 31 31 32 32 #include "fp-ptrace.h" 33 33 34 + #include <linux/bits.h> 35 + 36 + #define FPMR_LSCALE2_MASK GENMASK(37, 32) 37 + #define FPMR_NSCALE_MASK GENMASK(31, 24) 38 + #define FPMR_LSCALE_MASK GENMASK(22, 16) 39 + #define FPMR_OSC_MASK GENMASK(15, 15) 40 + #define FPMR_OSM_MASK GENMASK(14, 14) 41 + 34 42 /* <linux/elf.h> and <sys/auxv.h> don't like each other, so: */ 35 43 #ifndef NT_ARM_SVE 36 44 #define NT_ARM_SVE 0x405 ··· 56 48 #define NT_ARM_ZT 0x40d 57 49 #endif 58 50 51 + #ifndef NT_ARM_FPMR 52 + #define NT_ARM_FPMR 0x40e 53 + #endif 54 + 59 55 #define ARCH_VQ_MAX 256 60 56 61 57 /* VL 128..2048 in powers of 2 */ 62 58 #define MAX_NUM_VLS 5 59 + 60 + /* 61 + * FPMR bits we can set without doing feature checks to see if values 62 + * are valid. 63 + */ 64 + #define FPMR_SAFE_BITS (FPMR_LSCALE2_MASK | FPMR_NSCALE_MASK | \ 65 + FPMR_LSCALE_MASK | FPMR_OSC_MASK | FPMR_OSM_MASK) 63 66 64 67 #define NUM_FPR 32 65 68 __uint128_t v_in[NUM_FPR]; ··· 96 77 char zt_in[ZT_SIG_REG_BYTES]; 97 78 char zt_expected[ZT_SIG_REG_BYTES]; 98 79 char zt_out[ZT_SIG_REG_BYTES]; 80 + 81 + uint64_t fpmr_in, fpmr_expected, fpmr_out; 99 82 100 83 uint64_t sve_vl_out; 101 84 uint64_t sme_vl_out; ··· 147 126 static bool fa64_supported(void) 148 127 { 149 128 return getauxval(AT_HWCAP2) & HWCAP2_SME_FA64; 129 + } 130 + 131 + static bool fpmr_supported(void) 132 + { 133 + return getauxval(AT_HWCAP2) & HWCAP2_FPMR; 150 134 } 151 135 152 136 static bool compare_buffer(const char *name, void *out, ··· 259 233 flags |= HAVE_SME2; 260 234 if (fa64_supported()) 261 235 flags |= HAVE_FA64; 236 + if (fpmr_supported()) 237 + flags |= HAVE_FPMR; 262 238 263 239 load_and_save(flags); 264 240 ··· 348 320 iov_child.iov_base = &zt_out; 349 321 iov_child.iov_len = sizeof(zt_out); 350 322 read_one_child_regs(child, "ZT", &iov_parent, &iov_child); 323 + } 324 + 325 + if (fpmr_supported()) { 326 + iov_parent.iov_base = &fpmr_out; 327 + iov_parent.iov_len = sizeof(fpmr_out); 328 + iov_child.iov_base = &fpmr_out; 329 + iov_child.iov_len = sizeof(fpmr_out); 330 + read_one_child_regs(child, "FPMR", &iov_parent, &iov_child); 351 331 } 352 332 } 353 333 ··· 631 595 return compare_buffer("initial ZT", buf, zt_in, ZT_SIG_REG_BYTES); 632 596 } 633 597 598 + static bool check_ptrace_values_fpmr(pid_t child, struct test_config *config) 599 + { 600 + uint64_t val; 601 + struct iovec iov; 602 + int ret; 603 + 604 + if (!fpmr_supported()) 605 + return true; 606 + 607 + iov.iov_base = &val; 608 + iov.iov_len = sizeof(val); 609 + ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_FPMR, &iov); 610 + if (ret != 0) { 611 + ksft_print_msg("Failed to read initial FPMR: %s (%d)\n", 612 + strerror(errno), errno); 613 + return false; 614 + } 615 + 616 + return compare_buffer("initial FPMR", &val, &fpmr_in, sizeof(val)); 617 + } 634 618 635 619 static bool check_ptrace_values(pid_t child, struct test_config *config) 636 620 { ··· 683 627 pass = false; 684 628 685 629 if (!check_ptrace_values_zt(child, config)) 630 + pass = false; 631 + 632 + if (!check_ptrace_values_fpmr(child, config)) 686 633 pass = false; 687 634 688 635 return pass; ··· 891 832 { 892 833 int vq = __sve_vq_from_vl(vl_in(config)); 893 834 int sme_vq = __sve_vq_from_vl(config->sme_vl_in); 835 + bool sm_change; 894 836 895 837 svcr_in = config->svcr_in; 896 838 svcr_expected = config->svcr_expected; 897 839 svcr_out = 0; 840 + 841 + if (sme_supported() && 842 + (svcr_in & SVCR_SM) != (svcr_expected & SVCR_SM)) 843 + sm_change = true; 844 + else 845 + sm_change = false; 898 846 899 847 fill_random(&v_in, sizeof(v_in)); 900 848 memcpy(v_expected, v_in, sizeof(v_in)); ··· 948 882 else 949 883 memset(zt_expected, 0, ZT_SIG_REG_BYTES); 950 884 memset(zt_out, 0, sizeof(zt_out)); 885 + } 886 + 887 + if (fpmr_supported()) { 888 + fill_random(&fpmr_in, sizeof(fpmr_in)); 889 + fpmr_in &= FPMR_SAFE_BITS; 890 + 891 + /* Entering or exiting streaming mode clears FPMR */ 892 + if (sm_change) 893 + fpmr_expected = 0; 894 + else 895 + fpmr_expected = fpmr_in; 896 + } else { 897 + fpmr_in = 0; 898 + fpmr_expected = 0; 899 + fpmr_out = 0; 951 900 } 952 901 } 953 902 ··· 1013 932 1014 933 if (!compare_buffer("saved ZT", zt_out, zt_expected, ZT_SIG_REG_BYTES)) 1015 934 pass = false; 935 + 936 + if (fpmr_out != fpmr_expected) { 937 + ksft_print_msg("Mismatch in saved FPMR: %lx != %lx\n", 938 + fpmr_out, fpmr_expected); 939 + pass = false; 940 + } 1016 941 1017 942 return pass; 1018 943 } ··· 1094 1007 ret = ptrace(PTRACE_SETREGSET, child, NT_PRFPREG, &iov); 1095 1008 if (ret == -1) 1096 1009 ksft_print_msg("FPSIMD set failed: (%s) %d\n", 1010 + strerror(errno), errno); 1011 + } 1012 + 1013 + static bool fpmr_write_supported(struct test_config *config) 1014 + { 1015 + if (!fpmr_supported()) 1016 + return false; 1017 + 1018 + if (!sve_sme_same(config)) 1019 + return false; 1020 + 1021 + return true; 1022 + } 1023 + 1024 + static void fpmr_write_expected(struct test_config *config) 1025 + { 1026 + fill_random(&fpmr_expected, sizeof(fpmr_expected)); 1027 + fpmr_expected &= FPMR_SAFE_BITS; 1028 + } 1029 + 1030 + static void fpmr_write(pid_t child, struct test_config *config) 1031 + { 1032 + struct iovec iov; 1033 + int ret; 1034 + 1035 + iov.iov_len = sizeof(fpmr_expected); 1036 + iov.iov_base = &fpmr_expected; 1037 + ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_FPMR, &iov); 1038 + if (ret != 0) 1039 + ksft_print_msg("Failed to write FPMR: %s (%d)\n", 1097 1040 strerror(errno), errno); 1098 1041 } 1099 1042 ··· 1383 1266 .set_expected_values = fpsimd_write_expected, 1384 1267 .modify_values = fpsimd_write, 1385 1268 }, 1269 + { 1270 + .name = "FPMR write", 1271 + .supported = fpmr_write_supported, 1272 + .set_expected_values = fpmr_write_expected, 1273 + .modify_values = fpmr_write, 1274 + }, 1386 1275 }; 1387 1276 1388 1277 static struct test_definition sve_test_defs[] = { ··· 1597 1474 1598 1475 if (fa64_supported()) 1599 1476 ksft_print_msg("FA64 supported\n"); 1477 + 1478 + if (fpmr_supported()) 1479 + ksft_print_msg("FPMR supported\n"); 1600 1480 1601 1481 ksft_set_plan(tests); 1602 1482
+2
tools/testing/selftests/arm64/fp/fp-ptrace.h
··· 14 14 #define HAVE_SME_SHIFT 1 15 15 #define HAVE_SME2_SHIFT 2 16 16 #define HAVE_FA64_SHIFT 3 17 + #define HAVE_FPMR_SHIFT 4 17 18 18 19 #define HAVE_SVE (1 << HAVE_SVE_SHIFT) 19 20 #define HAVE_SME (1 << HAVE_SME_SHIFT) 20 21 #define HAVE_SME2 (1 << HAVE_SME2_SHIFT) 21 22 #define HAVE_FA64 (1 << HAVE_FA64_SHIFT) 23 + #define HAVE_FPMR (1 << HAVE_FPMR_SHIFT) 22 24 23 25 #endif
+2
tools/testing/selftests/arm64/fp/sme-inst.h
··· 5 5 #ifndef SME_INST_H 6 6 #define SME_INST_H 7 7 8 + #define REG_FPMR S3_3_C4_C4_2 9 + 8 10 /* 9 11 * RDSVL X\nx, #\imm 10 12 */