Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/rseq: Implement parametrized mm_cid test

Adapt to the rseq.h API changes introduced by commits
"selftests/rseq: <arch>: Template memory ordering and percpu access mode".

Build a new param_test_mm_cid, param_test_mm_cid_benchmark, and
param_test_mm_cid_compare_twice executables to test the new "mm_cid"
rseq field.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20221122203932.231377-20-mathieu.desnoyers@efficios.com

authored by

Mathieu Desnoyers and committed by
Peter Zijlstra
ee31fff0 cead7206

+122 -49
+3
tools/testing/selftests/rseq/.gitignore
··· 6 6 param_test 7 7 param_test_benchmark 8 8 param_test_compare_twice 9 + param_test_mm_cid 10 + param_test_mm_cid_benchmark 11 + param_test_mm_cid_compare_twice
+14 -1
tools/testing/selftests/rseq/Makefile
··· 13 13 OVERRIDE_TARGETS = 1 14 14 15 15 TEST_GEN_PROGS = basic_test basic_percpu_ops_test basic_percpu_ops_mm_cid_test param_test \ 16 - param_test_benchmark param_test_compare_twice 16 + param_test_benchmark param_test_compare_twice param_test_mm_cid \ 17 + param_test_mm_cid_benchmark param_test_mm_cid_compare_twice 17 18 18 19 TEST_GEN_PROGS_EXTENDED = librseq.so 19 20 ··· 40 39 $(OUTPUT)/param_test_compare_twice: param_test.c $(TEST_GEN_PROGS_EXTENDED) \ 41 40 rseq.h rseq-*.h 42 41 $(CC) $(CFLAGS) -DRSEQ_COMPARE_TWICE $< $(LDLIBS) -lrseq -o $@ 42 + 43 + $(OUTPUT)/param_test_mm_cid: param_test.c $(TEST_GEN_PROGS_EXTENDED) \ 44 + rseq.h rseq-*.h 45 + $(CC) $(CFLAGS) -DBUILDOPT_RSEQ_PERCPU_MM_CID $< $(LDLIBS) -lrseq -o $@ 46 + 47 + $(OUTPUT)/param_test_mm_cid_benchmark: param_test.c $(TEST_GEN_PROGS_EXTENDED) \ 48 + rseq.h rseq-*.h 49 + $(CC) $(CFLAGS) -DBUILDOPT_RSEQ_PERCPU_MM_CID -DBENCHMARK $< $(LDLIBS) -lrseq -o $@ 50 + 51 + $(OUTPUT)/param_test_mm_cid_compare_twice: param_test.c $(TEST_GEN_PROGS_EXTENDED) \ 52 + rseq.h rseq-*.h 53 + $(CC) $(CFLAGS) -DBUILDOPT_RSEQ_PERCPU_MM_CID -DRSEQ_COMPARE_TWICE $< $(LDLIBS) -lrseq -o $@
+100 -48
tools/testing/selftests/rseq/param_test.c
··· 16 16 #include <signal.h> 17 17 #include <errno.h> 18 18 #include <stddef.h> 19 + #include <stdbool.h> 19 20 20 21 static inline pid_t rseq_gettid(void) 21 22 { ··· 37 36 38 37 static int opt_yield, opt_signal, opt_sleep, 39 38 opt_disable_rseq, opt_threads = 200, 40 - opt_disable_mod = 0, opt_test = 's', opt_mb = 0; 39 + opt_disable_mod = 0, opt_test = 's'; 41 40 42 41 static long long opt_reps = 5000; 43 42 ··· 265 264 266 265 #include "rseq.h" 267 266 267 + static enum rseq_mo opt_mo = RSEQ_MO_RELAXED; 268 + 269 + #ifdef RSEQ_ARCH_HAS_OFFSET_DEREF_ADDV 270 + #define TEST_MEMBARRIER 271 + 272 + static int sys_membarrier(int cmd, int flags, int cpu_id) 273 + { 274 + return syscall(__NR_membarrier, cmd, flags, cpu_id); 275 + } 276 + #endif 277 + 278 + #ifdef BUILDOPT_RSEQ_PERCPU_MM_CID 279 + # define RSEQ_PERCPU RSEQ_PERCPU_MM_CID 280 + static 281 + int get_current_cpu_id(void) 282 + { 283 + return rseq_current_mm_cid(); 284 + } 285 + static 286 + bool rseq_validate_cpu_id(void) 287 + { 288 + return rseq_mm_cid_available(); 289 + } 290 + # ifdef TEST_MEMBARRIER 291 + /* 292 + * Membarrier does not currently support targeting a mm_cid, so 293 + * issue the barrier on all cpus. 294 + */ 295 + static 296 + int rseq_membarrier_expedited(int cpu) 297 + { 298 + return sys_membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ, 299 + 0, 0); 300 + } 301 + # endif /* TEST_MEMBARRIER */ 302 + #else 303 + # define RSEQ_PERCPU RSEQ_PERCPU_CPU_ID 304 + static 305 + int get_current_cpu_id(void) 306 + { 307 + return rseq_cpu_start(); 308 + } 309 + static 310 + bool rseq_validate_cpu_id(void) 311 + { 312 + return rseq_current_cpu_raw() >= 0; 313 + } 314 + # ifdef TEST_MEMBARRIER 315 + static 316 + int rseq_membarrier_expedited(int cpu) 317 + { 318 + return sys_membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ, 319 + MEMBARRIER_CMD_FLAG_CPU, cpu); 320 + } 321 + # endif /* TEST_MEMBARRIER */ 322 + #endif 323 + 268 324 struct percpu_lock_entry { 269 325 intptr_t v; 270 326 } __attribute__((aligned(128))); ··· 409 351 for (;;) { 410 352 int ret; 411 353 412 - cpu = rseq_cpu_start(); 413 - ret = rseq_cmpeqv_storev(&lock->c[cpu].v, 354 + cpu = get_current_cpu_id(); 355 + ret = rseq_cmpeqv_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU, 356 + &lock->c[cpu].v, 414 357 0, 1, cpu); 415 358 if (rseq_likely(!ret)) 416 359 break; ··· 528 469 do { 529 470 int cpu; 530 471 531 - cpu = rseq_cpu_start(); 532 - ret = rseq_addv(&data->c[cpu].count, 1, cpu); 472 + cpu = get_current_cpu_id(); 473 + ret = rseq_addv(RSEQ_MO_RELAXED, RSEQ_PERCPU, 474 + &data->c[cpu].count, 1, cpu); 533 475 } while (rseq_unlikely(ret)); 534 476 #ifndef BENCHMARK 535 477 if (i != 0 && !(i % (reps / 10))) ··· 599 539 intptr_t *targetptr, newval, expect; 600 540 int ret; 601 541 602 - cpu = rseq_cpu_start(); 542 + cpu = get_current_cpu_id(); 603 543 /* Load list->c[cpu].head with single-copy atomicity. */ 604 544 expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head); 605 545 newval = (intptr_t)node; 606 546 targetptr = (intptr_t *)&list->c[cpu].head; 607 547 node->next = (struct percpu_list_node *)expect; 608 - ret = rseq_cmpeqv_storev(targetptr, expect, newval, cpu); 548 + ret = rseq_cmpeqv_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU, 549 + targetptr, expect, newval, cpu); 609 550 if (rseq_likely(!ret)) 610 551 break; 611 552 /* Retry if comparison fails or rseq aborts. */ ··· 632 571 long offset; 633 572 int ret; 634 573 635 - cpu = rseq_cpu_start(); 574 + cpu = get_current_cpu_id(); 636 575 targetptr = (intptr_t *)&list->c[cpu].head; 637 576 expectnot = (intptr_t)NULL; 638 577 offset = offsetof(struct percpu_list_node, next); 639 578 load = (intptr_t *)&head; 640 - ret = rseq_cmpnev_storeoffp_load(targetptr, expectnot, 641 - offset, load, cpu); 579 + ret = rseq_cmpnev_storeoffp_load(RSEQ_MO_RELAXED, RSEQ_PERCPU, 580 + targetptr, expectnot, 581 + offset, load, cpu); 642 582 if (rseq_likely(!ret)) { 643 583 node = head; 644 584 break; ··· 777 715 intptr_t offset; 778 716 int ret; 779 717 780 - cpu = rseq_cpu_start(); 718 + cpu = get_current_cpu_id(); 781 719 offset = RSEQ_READ_ONCE(buffer->c[cpu].offset); 782 720 if (offset == buffer->c[cpu].buflen) 783 721 break; ··· 785 723 targetptr_spec = (intptr_t *)&buffer->c[cpu].array[offset]; 786 724 newval_final = offset + 1; 787 725 targetptr_final = &buffer->c[cpu].offset; 788 - if (opt_mb) 789 - ret = rseq_cmpeqv_trystorev_storev_release( 790 - targetptr_final, offset, targetptr_spec, 791 - newval_spec, newval_final, cpu); 792 - else 793 - ret = rseq_cmpeqv_trystorev_storev(targetptr_final, 794 - offset, targetptr_spec, newval_spec, 795 - newval_final, cpu); 726 + ret = rseq_cmpeqv_trystorev_storev(opt_mo, RSEQ_PERCPU, 727 + targetptr_final, offset, targetptr_spec, 728 + newval_spec, newval_final, cpu); 796 729 if (rseq_likely(!ret)) { 797 730 result = true; 798 731 break; ··· 810 753 intptr_t offset; 811 754 int ret; 812 755 813 - cpu = rseq_cpu_start(); 756 + cpu = get_current_cpu_id(); 814 757 /* Load offset with single-copy atomicity. */ 815 758 offset = RSEQ_READ_ONCE(buffer->c[cpu].offset); 816 759 if (offset == 0) { ··· 820 763 head = RSEQ_READ_ONCE(buffer->c[cpu].array[offset - 1]); 821 764 newval = offset - 1; 822 765 targetptr = (intptr_t *)&buffer->c[cpu].offset; 823 - ret = rseq_cmpeqv_cmpeqv_storev(targetptr, offset, 766 + ret = rseq_cmpeqv_cmpeqv_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU, 767 + targetptr, offset, 824 768 (intptr_t *)&buffer->c[cpu].array[offset - 1], 825 769 (intptr_t)head, newval, cpu); 826 770 if (rseq_likely(!ret)) ··· 978 920 size_t copylen; 979 921 int ret; 980 922 981 - cpu = rseq_cpu_start(); 923 + cpu = get_current_cpu_id(); 982 924 /* Load offset with single-copy atomicity. */ 983 925 offset = RSEQ_READ_ONCE(buffer->c[cpu].offset); 984 926 if (offset == buffer->c[cpu].buflen) ··· 989 931 copylen = sizeof(item); 990 932 newval_final = offset + 1; 991 933 targetptr_final = &buffer->c[cpu].offset; 992 - if (opt_mb) 993 - ret = rseq_cmpeqv_trymemcpy_storev_release( 994 - targetptr_final, offset, 995 - destptr, srcptr, copylen, 996 - newval_final, cpu); 997 - else 998 - ret = rseq_cmpeqv_trymemcpy_storev(targetptr_final, 999 - offset, destptr, srcptr, copylen, 1000 - newval_final, cpu); 934 + ret = rseq_cmpeqv_trymemcpy_storev( 935 + opt_mo, RSEQ_PERCPU, 936 + targetptr_final, offset, 937 + destptr, srcptr, copylen, 938 + newval_final, cpu); 1001 939 if (rseq_likely(!ret)) { 1002 940 result = true; 1003 941 break; ··· 1018 964 size_t copylen; 1019 965 int ret; 1020 966 1021 - cpu = rseq_cpu_start(); 967 + cpu = get_current_cpu_id(); 1022 968 /* Load offset with single-copy atomicity. */ 1023 969 offset = RSEQ_READ_ONCE(buffer->c[cpu].offset); 1024 970 if (offset == 0) ··· 1029 975 copylen = sizeof(*item); 1030 976 newval_final = offset - 1; 1031 977 targetptr_final = &buffer->c[cpu].offset; 1032 - ret = rseq_cmpeqv_trymemcpy_storev(targetptr_final, 1033 - offset, destptr, srcptr, copylen, 978 + ret = rseq_cmpeqv_trymemcpy_storev(RSEQ_MO_RELAXED, RSEQ_PERCPU, 979 + targetptr_final, offset, destptr, srcptr, copylen, 1034 980 newval_final, cpu); 1035 981 if (rseq_likely(!ret)) { 1036 982 result = true; ··· 1205 1151 } 1206 1152 1207 1153 /* Test MEMBARRIER_CMD_PRIVATE_RESTART_RSEQ_ON_CPU membarrier command. */ 1208 - #ifdef RSEQ_ARCH_HAS_OFFSET_DEREF_ADDV 1154 + #ifdef TEST_MEMBARRIER 1209 1155 struct test_membarrier_thread_args { 1210 1156 int stop; 1211 1157 intptr_t percpu_list_ptr; ··· 1232 1178 int ret; 1233 1179 1234 1180 do { 1235 - int cpu = rseq_cpu_start(); 1181 + int cpu = get_current_cpu_id(); 1236 1182 1237 - ret = rseq_offset_deref_addv(&args->percpu_list_ptr, 1183 + ret = rseq_offset_deref_addv(RSEQ_MO_RELAXED, RSEQ_PERCPU, 1184 + &args->percpu_list_ptr, 1238 1185 sizeof(struct percpu_list_entry) * cpu, 1, cpu); 1239 1186 } while (rseq_unlikely(ret)); 1240 1187 } ··· 1270 1215 1271 1216 for (i = 0; i < CPU_SETSIZE; i++) 1272 1217 free(list->c[i].head); 1273 - } 1274 - 1275 - static int sys_membarrier(int cmd, int flags, int cpu_id) 1276 - { 1277 - return syscall(__NR_membarrier, cmd, flags, cpu_id); 1278 1218 } 1279 1219 1280 1220 /* ··· 1310 1260 1311 1261 /* Make list_b "active". */ 1312 1262 atomic_store(&args->percpu_list_ptr, (intptr_t)&list_b); 1313 - if (sys_membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ, 1314 - MEMBARRIER_CMD_FLAG_CPU, cpu_a) && 1263 + if (rseq_membarrier_expedited(cpu_a) && 1315 1264 errno != ENXIO /* missing CPU */) { 1316 1265 perror("sys_membarrier"); 1317 1266 abort(); ··· 1333 1284 1334 1285 /* Make list_a "active". */ 1335 1286 atomic_store(&args->percpu_list_ptr, (intptr_t)&list_a); 1336 - if (sys_membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ, 1337 - MEMBARRIER_CMD_FLAG_CPU, cpu_b) && 1287 + if (rseq_membarrier_expedited(cpu_b) && 1338 1288 errno != ENXIO /* missing CPU*/) { 1339 1289 perror("sys_membarrier"); 1340 1290 abort(); ··· 1404 1356 abort(); 1405 1357 } 1406 1358 } 1407 - #else /* RSEQ_ARCH_HAS_OFFSET_DEREF_ADDV */ 1359 + #else /* TEST_MEMBARRIER */ 1408 1360 void test_membarrier(void) 1409 1361 { 1410 1362 fprintf(stderr, "rseq_offset_deref_addv is not implemented on this architecture. " ··· 1561 1513 verbose = 1; 1562 1514 break; 1563 1515 case 'M': 1564 - opt_mb = 1; 1516 + opt_mo = RSEQ_MO_RELEASE; 1565 1517 break; 1566 1518 default: 1567 1519 show_usage(argc, argv); ··· 1581 1533 1582 1534 if (!opt_disable_rseq && rseq_register_current_thread()) 1583 1535 goto error; 1536 + if (!opt_disable_rseq && !rseq_validate_cpu_id()) { 1537 + fprintf(stderr, "Error: cpu id getter unavailable\n"); 1538 + goto error; 1539 + } 1584 1540 switch (opt_test) { 1585 1541 case 's': 1586 1542 printf_verbose("spinlock\n");
+5
tools/testing/selftests/rseq/run_param_test.sh
··· 42 42 ./param_test ${TEST_LIST[$i]} -r ${REPS} -t ${NR_THREADS} ${@} ${EXTRA_ARGS} || exit 1 43 43 echo "Running compare-twice test ${TEST_NAME[$i]}" 44 44 ./param_test_compare_twice ${TEST_LIST[$i]} -r ${REPS} -t ${NR_THREADS} ${@} ${EXTRA_ARGS} || exit 1 45 + 46 + echo "Running mm_cid test ${TEST_NAME[$i]}" 47 + ./param_test_mm_cid ${TEST_LIST[$i]} -r ${REPS} -t ${NR_THREADS} ${@} ${EXTRA_ARGS} || exit 1 48 + echo "Running mm_cid compare-twice test ${TEST_NAME[$i]}" 49 + ./param_test_mm_cid_compare_twice ${TEST_LIST[$i]} -r ${REPS} -t ${NR_THREADS} ${@} ${EXTRA_ARGS} || exit 1 45 50 let "i++" 46 51 done 47 52 }