Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'linux-kselftest-next-6.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest

Pull Kselftest updates from Shuah Khan:
"A mix of fixes, enhancements, and new tests. Bulk of the changes
enhance and fix rseq and resctrl tests.

In addition, user_events, dmabuf-heaps and perf_events are added to
default kselftest build and test coverage. A futex test fix, enhance
prctl test coverage, and minor fixes are included in this update"

* tag 'linux-kselftest-next-6.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest: (32 commits)
selftests: cachestat: use proper syscall number macro
selftests: cachestat: properly link in librt
selftests/futex: Order calls to futex_lock_pi
selftests: Hook more tests into the build infrastructure
selftests/user_events: Reenable build
selftests/filesystems: Add six consecutive 'x' characters to mktemp
selftests/rseq: Use rseq_unqual_scalar_typeof in macros
selftests/rseq: Fix arm64 buggy load-acquire/store-release macros
selftests/rseq: Implement rseq_unqual_scalar_typeof
selftests/rseq: Fix CID_ID typo in Makefile
selftests:prctl: add set-process-name to .gitignore
selftests:prctl: Fix make clean override warning
selftests/resctrl: Remove test type checks from cat_val()
selftests/resctrl: Pass the real number of tests to show_cache_info()
selftests/resctrl: Move CAT/CMT test global vars to function they are used in
selftests/resctrl: Don't use variable argument list for ->setup()
selftests/resctrl: Don't pass test name to fill_buf
selftests/resctrl: Improve parameter consistency in fill_buf
selftests/resctrl: Remove unnecessary startptr global from fill_buf
selftests/resctrl: Remove "malloc_and_init_memory" param from run_fill_buf()
...

+332 -294
+4
tools/testing/selftests/Makefile
··· 12 12 TARGETS += cpufreq 13 13 TARGETS += cpu-hotplug 14 14 TARGETS += damon 15 + TARGETS += dmabuf-heaps 15 16 TARGETS += drivers/dma-buf 16 17 TARGETS += drivers/s390x/uvdevice 17 18 TARGETS += drivers/net/bonding ··· 58 57 TARGETS += net/openvswitch 59 58 TARGETS += netfilter 60 59 TARGETS += nsfs 60 + TARGETS += perf_events 61 61 TARGETS += pidfd 62 62 TARGETS += pid_namespace 63 63 TARGETS += powerpc ··· 91 89 TARGETS += tmpfs 92 90 TARGETS += tpm2 93 91 TARGETS += tty 92 + TARGETS += uevents 94 93 TARGETS += user 94 + TARGETS += user_events 95 95 TARGETS += vDSO 96 96 TARGETS += mm 97 97 TARGETS += x86
+1 -1
tools/testing/selftests/cachestat/Makefile
··· 3 3 4 4 CFLAGS += $(KHDR_INCLUDES) 5 5 CFLAGS += -Wall 6 - CFLAGS += -lrt 6 + LDLIBS += -lrt 7 7 8 8 include ../lib.mk
+3 -4
tools/testing/selftests/cachestat/test_cachestat.c
··· 23 23 "/dev/zero", "/dev/null", "/dev/urandom", 24 24 "/proc/version", "/proc" 25 25 }; 26 - static const int cachestat_nr = 451; 27 26 28 27 void print_cachestat(struct cachestat *cs) 29 28 { ··· 143 144 } 144 145 } 145 146 146 - syscall_ret = syscall(cachestat_nr, fd, &cs_range, &cs, 0); 147 + syscall_ret = syscall(__NR_cachestat, fd, &cs_range, &cs, 0); 147 148 148 149 ksft_print_msg("Cachestat call returned %ld\n", syscall_ret); 149 150 ··· 171 172 ksft_print_msg("fsync fails.\n"); 172 173 ret = KSFT_FAIL; 173 174 } else { 174 - syscall_ret = syscall(cachestat_nr, fd, &cs_range, &cs, 0); 175 + syscall_ret = syscall(__NR_cachestat, fd, &cs_range, &cs, 0); 175 176 176 177 ksft_print_msg("Cachestat call (after fsync) returned %ld\n", 177 178 syscall_ret); ··· 232 233 goto close_fd; 233 234 } 234 235 235 - syscall_ret = syscall(cachestat_nr, fd, &cs_range, &cs, 0); 236 + syscall_ret = syscall(__NR_cachestat, fd, &cs_range, &cs, 0); 236 237 237 238 if (syscall_ret) { 238 239 ksft_print_msg("Cachestat returned non-zero.\n");
+1 -1
tools/testing/selftests/filesystems/fat/run_fat_tests.sh
··· 12 12 set -o pipefail 13 13 14 14 BASE_DIR="$(dirname $0)" 15 - TMP_DIR="$(mktemp -d /tmp/fat_tests_tmp.XXXX)" 15 + TMP_DIR="$(mktemp -d /tmp/fat_tests_tmp.XXXXXX)" 16 16 IMG_PATH="${TMP_DIR}/fat.img" 17 17 MNT_PATH="${TMP_DIR}/mnt" 18 18
+7
tools/testing/selftests/futex/functional/futex_wait_timeout.c
··· 24 24 25 25 static long timeout_ns = 100000; /* 100us default timeout */ 26 26 static futex_t futex_pi; 27 + static pthread_barrier_t barrier; 27 28 28 29 void usage(char *prog) 29 30 { ··· 48 47 ret = futex_lock_pi(&futex_pi, NULL, 0, 0); 49 48 if (ret != 0) 50 49 error("futex_lock_pi failed\n", ret); 50 + 51 + pthread_barrier_wait(&barrier); 51 52 52 53 /* Blocks forever */ 53 54 ret = futex_wait(&lock, 0, NULL, 0); ··· 133 130 basename(argv[0])); 134 131 ksft_print_msg("\tArguments: timeout=%ldns\n", timeout_ns); 135 132 133 + pthread_barrier_init(&barrier, NULL, 2); 136 134 pthread_create(&thread, NULL, get_pi_lock, NULL); 137 135 138 136 /* initialize relative timeout */ ··· 167 163 res = futex_wait_requeue_pi(&f1, f1, &futex_pi, &to, 0); 168 164 test_timeout(res, &ret, "futex_wait_requeue_pi monotonic", ETIMEDOUT); 169 165 166 + /* Wait until the other thread calls futex_lock_pi() */ 167 + pthread_barrier_wait(&barrier); 168 + pthread_barrier_destroy(&barrier); 170 169 /* 171 170 * FUTEX_LOCK_PI with CLOCK_REALTIME 172 171 * Due to historical reasons, FUTEX_LOCK_PI supports only realtime
+1
tools/testing/selftests/prctl/.gitignore
··· 3 3 disable-tsc-on-off-stress-test 4 4 disable-tsc-test 5 5 set-anon-vma-name-test 6 + set-process-name
+1 -3
tools/testing/selftests/prctl/Makefile
··· 5 5 6 6 ifeq ($(ARCH),x86) 7 7 TEST_PROGS := disable-tsc-ctxt-sw-stress-test disable-tsc-on-off-stress-test \ 8 - disable-tsc-test set-anon-vma-name-test 8 + disable-tsc-test set-anon-vma-name-test set-process-name 9 9 all: $(TEST_PROGS) 10 10 11 11 include ../lib.mk 12 12 13 - clean: 14 - rm -fr $(TEST_PROGS) 15 13 endif 16 14 endif
+62
tools/testing/selftests/prctl/set-process-name.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * This test covers the PR_SET_NAME functionality of prctl calls 4 + */ 5 + 6 + #include <errno.h> 7 + #include <sys/prctl.h> 8 + #include <string.h> 9 + 10 + #include "../kselftest_harness.h" 11 + 12 + #define CHANGE_NAME "changename" 13 + #define EMPTY_NAME "" 14 + #define TASK_COMM_LEN 16 15 + 16 + int set_name(char *name) 17 + { 18 + int res; 19 + 20 + res = prctl(PR_SET_NAME, name, NULL, NULL, NULL); 21 + 22 + if (res < 0) 23 + return -errno; 24 + return res; 25 + } 26 + 27 + int check_is_name_correct(char *check_name) 28 + { 29 + char name[TASK_COMM_LEN]; 30 + int res; 31 + 32 + res = prctl(PR_GET_NAME, name, NULL, NULL, NULL); 33 + 34 + if (res < 0) 35 + return -errno; 36 + 37 + return !strcmp(name, check_name); 38 + } 39 + 40 + int check_null_pointer(char *check_name) 41 + { 42 + char *name = NULL; 43 + int res; 44 + 45 + res = prctl(PR_GET_NAME, name, NULL, NULL, NULL); 46 + 47 + return res; 48 + } 49 + 50 + TEST(rename_process) { 51 + 52 + EXPECT_GE(set_name(CHANGE_NAME), 0); 53 + EXPECT_TRUE(check_is_name_correct(CHANGE_NAME)); 54 + 55 + EXPECT_GE(set_name(EMPTY_NAME), 0); 56 + EXPECT_TRUE(check_is_name_correct(EMPTY_NAME)); 57 + 58 + EXPECT_GE(set_name(CHANGE_NAME), 0); 59 + EXPECT_LT(check_null_pointer(CHANGE_NAME), 0); 60 + } 61 + 62 + TEST_HARNESS_MAIN
+1 -1
tools/testing/selftests/resctrl/Makefile
··· 7 7 8 8 include ../lib.mk 9 9 10 - $(OUTPUT)/resctrl_tests: $(wildcard *.c) 10 + $(OUTPUT)/resctrl_tests: $(wildcard *.[ch])
+31 -35
tools/testing/selftests/resctrl/cache.c
··· 87 87 static int get_llc_perf(unsigned long *llc_perf_miss) 88 88 { 89 89 __u64 total_misses; 90 + int ret; 90 91 91 92 /* Stop counters after one span to get miss rate */ 92 93 93 94 ioctl(fd_lm, PERF_EVENT_IOC_DISABLE, 0); 94 95 95 - if (read(fd_lm, &rf_cqm, sizeof(struct read_format)) == -1) { 96 + ret = read(fd_lm, &rf_cqm, sizeof(struct read_format)); 97 + if (ret == -1) { 96 98 perror("Could not get llc misses through perf"); 97 - 98 99 return -1; 99 100 } 100 101 101 102 total_misses = rf_cqm.values[0].value; 102 - 103 - close(fd_lm); 104 - 105 103 *llc_perf_miss = total_misses; 106 104 107 105 return 0; ··· 210 212 */ 211 213 int cat_val(struct resctrl_val_param *param) 212 214 { 213 - int malloc_and_init_memory = 1, memflush = 1, operation = 0, ret = 0; 215 + int memflush = 1, operation = 0, ret = 0; 214 216 char *resctrl_val = param->resctrl_val; 215 217 pid_t bm_pid; 216 218 ··· 230 232 if (ret) 231 233 return ret; 232 234 233 - if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) 234 - initialize_llc_perf(); 235 + initialize_llc_perf(); 235 236 236 237 /* Test runs until the callback setup() tells the test to stop. */ 237 238 while (1) { 238 - if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) { 239 - ret = param->setup(1, param); 240 - if (ret == END_OF_TESTS) { 241 - ret = 0; 242 - break; 243 - } 244 - if (ret < 0) 245 - break; 246 - ret = reset_enable_llc_perf(bm_pid, param->cpu_no); 247 - if (ret) 248 - break; 249 - 250 - if (run_fill_buf(param->span, malloc_and_init_memory, 251 - memflush, operation, resctrl_val)) { 252 - fprintf(stderr, "Error-running fill buffer\n"); 253 - ret = -1; 254 - break; 255 - } 256 - 257 - sleep(1); 258 - ret = measure_cache_vals(param, bm_pid); 259 - if (ret) 260 - break; 261 - } else { 239 + ret = param->setup(param); 240 + if (ret == END_OF_TESTS) { 241 + ret = 0; 262 242 break; 263 243 } 244 + if (ret < 0) 245 + break; 246 + ret = reset_enable_llc_perf(bm_pid, param->cpu_no); 247 + if (ret) 248 + break; 249 + 250 + if (run_fill_buf(param->span, memflush, operation, true)) { 251 + fprintf(stderr, "Error-running fill buffer\n"); 252 + ret = -1; 253 + goto pe_close; 254 + } 255 + 256 + sleep(1); 257 + ret = measure_cache_vals(param, bm_pid); 258 + if (ret) 259 + goto pe_close; 264 260 } 265 261 262 + return ret; 263 + 264 + pe_close: 265 + close(fd_lm); 266 266 return ret; 267 267 } 268 268 ··· 278 282 * Return: 0 on success. non-zero on failure. 279 283 */ 280 284 int show_cache_info(unsigned long sum_llc_val, int no_of_bits, 281 - unsigned long cache_span, unsigned long max_diff, 285 + size_t cache_span, unsigned long max_diff, 282 286 unsigned long max_diff_percent, unsigned long num_of_runs, 283 287 bool platform, bool cmt) 284 288 { ··· 287 291 long avg_diff = 0; 288 292 int ret; 289 293 290 - avg_llc_val = sum_llc_val / (num_of_runs - 1); 294 + avg_llc_val = sum_llc_val / num_of_runs; 291 295 avg_diff = (long)abs(cache_span - avg_llc_val); 292 296 diff_percent = ((float)cache_span - avg_llc_val) / cache_span * 100; 293 297 ··· 300 304 ksft_print_msg("Percent diff=%d\n", abs((int)diff_percent)); 301 305 ksft_print_msg("Number of bits: %d\n", no_of_bits); 302 306 ksft_print_msg("Average LLC val: %lu\n", avg_llc_val); 303 - ksft_print_msg("Cache span (%s): %lu\n", cmt ? "bytes" : "lines", 307 + ksft_print_msg("Cache span (%s): %zu\n", cmt ? "bytes" : "lines", 304 308 cache_span); 305 309 306 310 return ret;
+6 -22
tools/testing/selftests/resctrl/cat_test.c
··· 17 17 #define MAX_DIFF_PERCENT 4 18 18 #define MAX_DIFF 1000000 19 19 20 - static int count_of_bits; 21 - static char cbm_mask[256]; 22 - static unsigned long long_mask; 23 - static unsigned long cache_size; 24 - 25 20 /* 26 21 * Change schemata. Write schemata to specified 27 22 * con_mon grp, mon_grp in resctrl FS. 28 23 * Run 5 times in order to get average values. 29 24 */ 30 - static int cat_setup(int num, ...) 25 + static int cat_setup(struct resctrl_val_param *p) 31 26 { 32 - struct resctrl_val_param *p; 33 27 char schemata[64]; 34 - va_list param; 35 28 int ret = 0; 36 - 37 - va_start(param, num); 38 - p = va_arg(param, struct resctrl_val_param *); 39 - va_end(param); 40 29 41 30 /* Run NUM_OF_RUNS times */ 42 31 if (p->num_of_runs >= NUM_OF_RUNS) ··· 77 88 no_of_bits = count_bits(param->mask); 78 89 79 90 return show_cache_info(sum_llc_perf_miss, no_of_bits, param->span / 64, 80 - MAX_DIFF, MAX_DIFF_PERCENT, NUM_OF_RUNS, 91 + MAX_DIFF, MAX_DIFF_PERCENT, runs - 1, 81 92 get_vendor() == ARCH_INTEL, false); 82 93 } 83 94 ··· 91 102 { 92 103 unsigned long l_mask, l_mask_1; 93 104 int ret, pipefd[2], sibling_cpu_no; 105 + unsigned long cache_size = 0; 106 + unsigned long long_mask; 107 + char cbm_mask[256]; 108 + int count_of_bits; 94 109 char pipe_message; 95 - 96 - cache_size = 0; 97 - 98 - ret = remount_resctrlfs(true); 99 - if (ret) 100 - return ret; 101 110 102 111 /* Get default cbm mask for L3/L2 cache */ 103 112 ret = get_cbm_mask(cache_type, cbm_mask); ··· 131 144 struct resctrl_val_param param = { 132 145 .resctrl_val = CAT_STR, 133 146 .cpu_no = cpu_no, 134 - .mum_resctrlfs = false, 135 147 .setup = cat_setup, 136 148 }; 137 149 ··· 213 227 214 228 out: 215 229 cat_test_cleanup(); 216 - if (bm_pid) 217 - umount_resctrlfs(); 218 230 219 231 return ret; 220 232 }
+7 -22
tools/testing/selftests/resctrl/cmt_test.c
··· 16 16 #define MAX_DIFF 2000000 17 17 #define MAX_DIFF_PERCENT 15 18 18 19 - static int count_of_bits; 20 - static char cbm_mask[256]; 21 - static unsigned long long_mask; 22 - static unsigned long cache_size; 23 - 24 - static int cmt_setup(int num, ...) 19 + static int cmt_setup(struct resctrl_val_param *p) 25 20 { 26 - struct resctrl_val_param *p; 27 - va_list param; 28 - 29 - va_start(param, num); 30 - p = va_arg(param, struct resctrl_val_param *); 31 - va_end(param); 32 - 33 21 /* Run NUM_OF_RUNS times */ 34 22 if (p->num_of_runs >= NUM_OF_RUNS) 35 23 return END_OF_TESTS; ··· 59 71 fclose(fp); 60 72 61 73 return show_cache_info(sum_llc_occu_resc, no_of_bits, param->span, 62 - MAX_DIFF, MAX_DIFF_PERCENT, NUM_OF_RUNS, 74 + MAX_DIFF, MAX_DIFF_PERCENT, runs - 1, 63 75 true, true); 64 76 } 65 77 ··· 70 82 71 83 int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd) 72 84 { 85 + unsigned long cache_size = 0; 86 + unsigned long long_mask; 87 + char cbm_mask[256]; 88 + int count_of_bits; 73 89 int ret; 74 - 75 - cache_size = 0; 76 - 77 - ret = remount_resctrlfs(true); 78 - if (ret) 79 - return ret; 80 90 81 91 if (!validate_resctrl_feature_request(CMT_STR)) 82 92 return -1; ··· 103 117 .ctrlgrp = "c1", 104 118 .mongrp = "m1", 105 119 .cpu_no = cpu_no, 106 - .mum_resctrlfs = false, 107 120 .filename = RESULT_FILE_NAME, 108 121 .mask = ~(long_mask << n) & long_mask, 109 122 .span = cache_size * n / count_of_bits, ··· 111 126 }; 112 127 113 128 if (strcmp(benchmark_cmd[0], "fill_buf") == 0) 114 - sprintf(benchmark_cmd[1], "%lu", param.span); 129 + sprintf(benchmark_cmd[1], "%zu", param.span); 115 130 116 131 remove(RESULT_FILE_NAME); 117 132
+31 -56
tools/testing/selftests/resctrl/fill_buf.c
··· 22 22 #define PAGE_SIZE (4 * 1024) 23 23 #define MB (1024 * 1024) 24 24 25 - static unsigned char *startptr; 26 - 27 25 static void sb(void) 28 26 { 29 27 #if defined(__i386) || defined(__x86_64) ··· 38 40 #endif 39 41 } 40 42 41 - static void mem_flush(void *p, size_t s) 43 + static void mem_flush(unsigned char *buf, size_t buf_size) 42 44 { 43 - char *cp = (char *)p; 45 + unsigned char *cp = buf; 44 46 size_t i = 0; 45 47 46 - s = s / CL_SIZE; /* mem size in cache llines */ 48 + buf_size = buf_size / CL_SIZE; /* mem size in cache lines */ 47 49 48 - for (i = 0; i < s; i++) 50 + for (i = 0; i < buf_size; i++) 49 51 cl_flush(&cp[i * CL_SIZE]); 50 52 51 53 sb(); 52 54 } 53 55 54 - static void *malloc_and_init_memory(size_t s) 56 + static void *malloc_and_init_memory(size_t buf_size) 55 57 { 56 58 void *p = NULL; 57 59 uint64_t *p64; 58 60 size_t s64; 59 61 int ret; 60 62 61 - ret = posix_memalign(&p, PAGE_SIZE, s); 63 + ret = posix_memalign(&p, PAGE_SIZE, buf_size); 62 64 if (ret < 0) 63 65 return NULL; 64 66 65 67 p64 = (uint64_t *)p; 66 - s64 = s / sizeof(uint64_t); 68 + s64 = buf_size / sizeof(uint64_t); 67 69 68 70 while (s64 > 0) { 69 71 *p64 = (uint64_t)rand(); ··· 74 76 return p; 75 77 } 76 78 77 - static int fill_one_span_read(unsigned char *start_ptr, unsigned char *end_ptr) 79 + static int fill_one_span_read(unsigned char *buf, size_t buf_size) 78 80 { 81 + unsigned char *end_ptr = buf + buf_size; 79 82 unsigned char sum, *p; 80 83 81 84 sum = 0; 82 - p = start_ptr; 85 + p = buf; 83 86 while (p < end_ptr) { 84 87 sum += *p; 85 88 p += (CL_SIZE / 2); ··· 89 90 return sum; 90 91 } 91 92 92 - static 93 - void fill_one_span_write(unsigned char *start_ptr, unsigned char *end_ptr) 93 + static void fill_one_span_write(unsigned char *buf, size_t buf_size) 94 94 { 95 + unsigned char *end_ptr = buf + buf_size; 95 96 unsigned char *p; 96 97 97 - p = start_ptr; 98 + p = buf; 98 99 while (p < end_ptr) { 99 100 *p = '1'; 100 101 p += (CL_SIZE / 2); 101 102 } 102 103 } 103 104 104 - static int fill_cache_read(unsigned char *start_ptr, unsigned char *end_ptr, 105 - char *resctrl_val) 105 + static int fill_cache_read(unsigned char *buf, size_t buf_size, bool once) 106 106 { 107 107 int ret = 0; 108 108 FILE *fp; 109 109 110 110 while (1) { 111 - ret = fill_one_span_read(start_ptr, end_ptr); 112 - if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) 111 + ret = fill_one_span_read(buf, buf_size); 112 + if (once) 113 113 break; 114 114 } 115 115 ··· 124 126 return 0; 125 127 } 126 128 127 - static int fill_cache_write(unsigned char *start_ptr, unsigned char *end_ptr, 128 - char *resctrl_val) 129 + static int fill_cache_write(unsigned char *buf, size_t buf_size, bool once) 129 130 { 130 131 while (1) { 131 - fill_one_span_write(start_ptr, end_ptr); 132 - if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) 132 + fill_one_span_write(buf, buf_size); 133 + if (once) 133 134 break; 134 135 } 135 136 136 137 return 0; 137 138 } 138 139 139 - static int 140 - fill_cache(unsigned long long buf_size, int malloc_and_init, int memflush, 141 - int op, char *resctrl_val) 140 + static int fill_cache(size_t buf_size, int memflush, int op, bool once) 142 141 { 143 - unsigned char *start_ptr, *end_ptr; 144 - unsigned long long i; 142 + unsigned char *buf; 145 143 int ret; 146 144 147 - if (malloc_and_init) 148 - start_ptr = malloc_and_init_memory(buf_size); 149 - else 150 - start_ptr = malloc(buf_size); 151 - 152 - if (!start_ptr) 145 + buf = malloc_and_init_memory(buf_size); 146 + if (!buf) 153 147 return -1; 154 - 155 - startptr = start_ptr; 156 - end_ptr = start_ptr + buf_size; 157 - 158 - /* 159 - * It's better to touch the memory once to avoid any compiler 160 - * optimizations 161 - */ 162 - if (!malloc_and_init) { 163 - for (i = 0; i < buf_size; i++) 164 - *start_ptr++ = (unsigned char)rand(); 165 - } 166 - 167 - start_ptr = startptr; 168 148 169 149 /* Flush the memory before using to avoid "cache hot pages" effect */ 170 150 if (memflush) 171 - mem_flush(start_ptr, buf_size); 151 + mem_flush(buf, buf_size); 172 152 173 153 if (op == 0) 174 - ret = fill_cache_read(start_ptr, end_ptr, resctrl_val); 154 + ret = fill_cache_read(buf, buf_size, once); 175 155 else 176 - ret = fill_cache_write(start_ptr, end_ptr, resctrl_val); 156 + ret = fill_cache_write(buf, buf_size, once); 157 + 158 + free(buf); 177 159 178 160 if (ret) { 179 161 printf("\n Error in fill cache read/write...\n"); 180 162 return -1; 181 163 } 182 164 183 - free(startptr); 184 165 185 166 return 0; 186 167 } 187 168 188 - int run_fill_buf(unsigned long span, int malloc_and_init_memory, 189 - int memflush, int op, char *resctrl_val) 169 + int run_fill_buf(size_t span, int memflush, int op, bool once) 190 170 { 191 - unsigned long long cache_size = span; 171 + size_t cache_size = span; 192 172 int ret; 193 173 194 - ret = fill_cache(cache_size, malloc_and_init_memory, memflush, op, 195 - resctrl_val); 174 + ret = fill_cache(cache_size, memflush, op, once); 196 175 if (ret) { 197 176 printf("\n Error in fill cache\n"); 198 177 return -1;
+1 -8
tools/testing/selftests/resctrl/mba_test.c
··· 22 22 * con_mon grp, mon_grp in resctrl FS. 23 23 * For each allocation, run 5 times in order to get average values. 24 24 */ 25 - static int mba_setup(int num, ...) 25 + static int mba_setup(struct resctrl_val_param *p) 26 26 { 27 27 static int runs_per_allocation, allocation = 100; 28 - struct resctrl_val_param *p; 29 28 char allocation_str[64]; 30 - va_list param; 31 29 int ret; 32 - 33 - va_start(param, num); 34 - p = va_arg(param, struct resctrl_val_param *); 35 - va_end(param); 36 30 37 31 if (runs_per_allocation >= NUM_OF_RUNS) 38 32 runs_per_allocation = 0; ··· 148 154 .ctrlgrp = "c1", 149 155 .mongrp = "m1", 150 156 .cpu_no = cpu_no, 151 - .mum_resctrlfs = true, 152 157 .filename = RESULT_FILE_NAME, 153 158 .bw_report = bw_report, 154 159 .setup = mba_setup
+5 -12
tools/testing/selftests/resctrl/mbm_test.c
··· 15 15 #define NUM_OF_RUNS 5 16 16 17 17 static int 18 - show_bw_info(unsigned long *bw_imc, unsigned long *bw_resc, int span) 18 + show_bw_info(unsigned long *bw_imc, unsigned long *bw_resc, size_t span) 19 19 { 20 20 unsigned long avg_bw_imc = 0, avg_bw_resc = 0; 21 21 unsigned long sum_bw_imc = 0, sum_bw_resc = 0; ··· 40 40 ksft_print_msg("%s Check MBM diff within %d%%\n", 41 41 ret ? "Fail:" : "Pass:", MAX_DIFF_PERCENT); 42 42 ksft_print_msg("avg_diff_per: %d%%\n", avg_diff_per); 43 - ksft_print_msg("Span (MB): %d\n", span); 43 + ksft_print_msg("Span (MB): %zu\n", span / MB); 44 44 ksft_print_msg("avg_bw_imc: %lu\n", avg_bw_imc); 45 45 ksft_print_msg("avg_bw_resc: %lu\n", avg_bw_resc); 46 46 47 47 return ret; 48 48 } 49 49 50 - static int check_results(int span) 50 + static int check_results(size_t span) 51 51 { 52 52 unsigned long bw_imc[NUM_OF_RUNS], bw_resc[NUM_OF_RUNS]; 53 53 char temp[1024], *token_array[8]; ··· 86 86 return ret; 87 87 } 88 88 89 - static int mbm_setup(int num, ...) 89 + static int mbm_setup(struct resctrl_val_param *p) 90 90 { 91 - struct resctrl_val_param *p; 92 - va_list param; 93 91 int ret = 0; 94 - 95 - va_start(param, num); 96 - p = va_arg(param, struct resctrl_val_param *); 97 - va_end(param); 98 92 99 93 /* Run NUM_OF_RUNS times */ 100 94 if (p->num_of_runs >= NUM_OF_RUNS) ··· 109 115 remove(RESULT_FILE_NAME); 110 116 } 111 117 112 - int mbm_bw_change(int span, int cpu_no, char *bw_report, char **benchmark_cmd) 118 + int mbm_bw_change(size_t span, int cpu_no, char *bw_report, char **benchmark_cmd) 113 119 { 114 120 struct resctrl_val_param param = { 115 121 .resctrl_val = MBM_STR, ··· 117 123 .mongrp = "m1", 118 124 .span = span, 119 125 .cpu_no = cpu_no, 120 - .mum_resctrlfs = true, 121 126 .filename = RESULT_FILE_NAME, 122 127 .bw_report = bw_report, 123 128 .setup = mbm_setup
+7 -10
tools/testing/selftests/resctrl/resctrl.h
··· 3 3 #ifndef RESCTRL_H 4 4 #define RESCTRL_H 5 5 #include <stdio.h> 6 - #include <stdarg.h> 7 6 #include <math.h> 8 7 #include <errno.h> 9 8 #include <sched.h> ··· 42 43 do { \ 43 44 perror(err_msg); \ 44 45 kill(ppid, SIGKILL); \ 46 + umount_resctrlfs(); \ 45 47 exit(EXIT_FAILURE); \ 46 48 } while (0) 47 49 ··· 53 53 * @mongrp: Name of the monitor group (mon grp) 54 54 * @cpu_no: CPU number to which the benchmark would be binded 55 55 * @span: Memory bytes accessed in each benchmark iteration 56 - * @mum_resctrlfs: Should the resctrl FS be remounted? 57 56 * @filename: Name of file to which the o/p should be written 58 57 * @bw_report: Bandwidth report type (reads vs writes) 59 58 * @setup: Call back function to setup test environment ··· 62 63 char ctrlgrp[64]; 63 64 char mongrp[64]; 64 65 int cpu_no; 65 - unsigned long span; 66 - bool mum_resctrlfs; 66 + size_t span; 67 67 char filename[64]; 68 68 char *bw_report; 69 69 unsigned long mask; 70 70 int num_of_runs; 71 - int (*setup)(int num, ...); 71 + int (*setup)(struct resctrl_val_param *param); 72 72 }; 73 73 74 74 #define MBM_STR "mbm" ··· 82 84 int get_vendor(void); 83 85 bool check_resctrlfs_support(void); 84 86 int filter_dmesg(void); 85 - int remount_resctrlfs(bool mum_resctrlfs); 86 87 int get_resource_id(int cpu_no, int *resource_id); 88 + int mount_resctrlfs(void); 87 89 int umount_resctrlfs(void); 88 90 int validate_bw_report_request(char *bw_report); 89 91 bool validate_resctrl_feature_request(const char *resctrl_val); ··· 96 98 char *resctrl_val); 97 99 int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu, 98 100 int group_fd, unsigned long flags); 99 - int run_fill_buf(unsigned long span, int malloc_and_init_memory, int memflush, 100 - int op, char *resctrl_va); 101 + int run_fill_buf(size_t span, int memflush, int op, bool once); 101 102 int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param); 102 - int mbm_bw_change(int span, int cpu_no, char *bw_report, char **benchmark_cmd); 103 + int mbm_bw_change(size_t span, int cpu_no, char *bw_report, char **benchmark_cmd); 103 104 void tests_cleanup(void); 104 105 void mbm_test_cleanup(void); 105 106 int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd); ··· 117 120 int get_core_sibling(int cpu_no); 118 121 int measure_cache_vals(struct resctrl_val_param *param, int bm_pid); 119 122 int show_cache_info(unsigned long sum_llc_val, int no_of_bits, 120 - unsigned long cache_span, unsigned long max_diff, 123 + size_t cache_span, unsigned long max_diff, 121 124 unsigned long max_diff_percent, unsigned long num_of_runs, 122 125 bool platform, bool cmt); 123 126
+60 -29
tools/testing/selftests/resctrl/resctrl_tests.c
··· 70 70 cat_test_cleanup(); 71 71 } 72 72 73 - static void run_mbm_test(bool has_ben, char **benchmark_cmd, int span, 73 + static void run_mbm_test(char **benchmark_cmd, size_t span, 74 74 int cpu_no, char *bw_report) 75 75 { 76 76 int res; 77 77 78 78 ksft_print_msg("Starting MBM BW change ...\n"); 79 79 80 - if (!validate_resctrl_feature_request(MBM_STR) || (get_vendor() != ARCH_INTEL)) { 81 - ksft_test_result_skip("Hardware does not support MBM or MBM is disabled\n"); 80 + res = mount_resctrlfs(); 81 + if (res) { 82 + ksft_exit_fail_msg("Failed to mount resctrl FS\n"); 82 83 return; 83 84 } 84 85 85 - if (!has_ben) 86 - sprintf(benchmark_cmd[5], "%s", MBA_STR); 86 + if (!validate_resctrl_feature_request(MBM_STR) || (get_vendor() != ARCH_INTEL)) { 87 + ksft_test_result_skip("Hardware does not support MBM or MBM is disabled\n"); 88 + goto umount; 89 + } 90 + 87 91 res = mbm_bw_change(span, cpu_no, bw_report, benchmark_cmd); 88 92 ksft_test_result(!res, "MBM: bw change\n"); 89 93 if ((get_vendor() == ARCH_INTEL) && res) 90 94 ksft_print_msg("Intel MBM may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n"); 95 + 96 + umount: 97 + umount_resctrlfs(); 91 98 } 92 99 93 - static void run_mba_test(bool has_ben, char **benchmark_cmd, int span, 94 - int cpu_no, char *bw_report) 100 + static void run_mba_test(char **benchmark_cmd, int cpu_no, char *bw_report) 95 101 { 96 102 int res; 97 103 98 104 ksft_print_msg("Starting MBA Schemata change ...\n"); 99 105 100 - if (!validate_resctrl_feature_request(MBA_STR) || (get_vendor() != ARCH_INTEL)) { 101 - ksft_test_result_skip("Hardware does not support MBA or MBA is disabled\n"); 106 + res = mount_resctrlfs(); 107 + if (res) { 108 + ksft_exit_fail_msg("Failed to mount resctrl FS\n"); 102 109 return; 103 110 } 104 111 105 - if (!has_ben) 106 - sprintf(benchmark_cmd[1], "%d", span); 112 + if (!validate_resctrl_feature_request(MBA_STR) || (get_vendor() != ARCH_INTEL)) { 113 + ksft_test_result_skip("Hardware does not support MBA or MBA is disabled\n"); 114 + goto umount; 115 + } 116 + 107 117 res = mba_schemata_change(cpu_no, bw_report, benchmark_cmd); 108 118 ksft_test_result(!res, "MBA: schemata change\n"); 119 + 120 + umount: 121 + umount_resctrlfs(); 109 122 } 110 123 111 - static void run_cmt_test(bool has_ben, char **benchmark_cmd, int cpu_no) 124 + static void run_cmt_test(char **benchmark_cmd, int cpu_no) 112 125 { 113 126 int res; 114 127 115 128 ksft_print_msg("Starting CMT test ...\n"); 116 - if (!validate_resctrl_feature_request(CMT_STR)) { 117 - ksft_test_result_skip("Hardware does not support CMT or CMT is disabled\n"); 129 + 130 + res = mount_resctrlfs(); 131 + if (res) { 132 + ksft_exit_fail_msg("Failed to mount resctrl FS\n"); 118 133 return; 119 134 } 120 135 121 - if (!has_ben) 122 - sprintf(benchmark_cmd[5], "%s", CMT_STR); 136 + if (!validate_resctrl_feature_request(CMT_STR)) { 137 + ksft_test_result_skip("Hardware does not support CMT or CMT is disabled\n"); 138 + goto umount; 139 + } 140 + 123 141 res = cmt_resctrl_val(cpu_no, 5, benchmark_cmd); 124 142 ksft_test_result(!res, "CMT: test\n"); 125 143 if ((get_vendor() == ARCH_INTEL) && res) 126 144 ksft_print_msg("Intel CMT may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n"); 145 + 146 + umount: 147 + umount_resctrlfs(); 127 148 } 128 149 129 150 static void run_cat_test(int cpu_no, int no_of_bits) ··· 153 132 154 133 ksft_print_msg("Starting CAT test ...\n"); 155 134 135 + res = mount_resctrlfs(); 136 + if (res) { 137 + ksft_exit_fail_msg("Failed to mount resctrl FS\n"); 138 + return; 139 + } 140 + 156 141 if (!validate_resctrl_feature_request(CAT_STR)) { 157 142 ksft_test_result_skip("Hardware does not support CAT or CAT is disabled\n"); 158 - return; 143 + goto umount; 159 144 } 160 145 161 146 res = cat_perf_miss_val(cpu_no, no_of_bits, "L3"); 162 147 ksft_test_result(!res, "CAT: test\n"); 148 + 149 + umount: 150 + umount_resctrlfs(); 163 151 } 164 152 165 153 int main(int argc, char **argv) 166 154 { 167 155 bool has_ben = false, mbm_test = true, mba_test = true, cmt_test = true; 168 - int c, cpu_no = 1, span = 250, argc_new = argc, i, no_of_bits = 0; 169 156 char *benchmark_cmd[BENCHMARK_ARGS], bw_report[64], bm_type[64]; 170 157 char benchmark_cmd_area[BENCHMARK_ARGS][BENCHMARK_ARG_SIZE]; 158 + int c, cpu_no = 1, argc_new = argc, i, no_of_bits = 0; 171 159 int ben_ind, ben_count, tests = 0; 160 + size_t span = 250 * MB; 172 161 bool cat_test = true; 173 162 174 163 for (i = 0; i < argc; i++) { ··· 263 232 benchmark_cmd[ben_count] = NULL; 264 233 } else { 265 234 /* If no benchmark is given by "-b" argument, use fill_buf. */ 266 - for (i = 0; i < 6; i++) 235 + for (i = 0; i < 5; i++) 267 236 benchmark_cmd[i] = benchmark_cmd_area[i]; 268 237 269 238 strcpy(benchmark_cmd[0], "fill_buf"); 270 - sprintf(benchmark_cmd[1], "%d", span); 239 + sprintf(benchmark_cmd[1], "%zu", span); 271 240 strcpy(benchmark_cmd[2], "1"); 272 - strcpy(benchmark_cmd[3], "1"); 273 - strcpy(benchmark_cmd[4], "0"); 274 - strcpy(benchmark_cmd[5], ""); 275 - benchmark_cmd[6] = NULL; 241 + strcpy(benchmark_cmd[3], "0"); 242 + strcpy(benchmark_cmd[4], "false"); 243 + benchmark_cmd[5] = NULL; 276 244 } 277 245 278 246 sprintf(bw_report, "reads"); ··· 280 250 if (!check_resctrlfs_support()) 281 251 return ksft_exit_skip("resctrl FS does not exist. Enable X86_CPU_RESCTRL config option.\n"); 282 252 253 + if (umount_resctrlfs()) 254 + return ksft_exit_skip("resctrl FS unmount failed.\n"); 255 + 283 256 filter_dmesg(); 284 257 285 258 ksft_set_plan(tests ? : 4); 286 259 287 260 if (mbm_test) 288 - run_mbm_test(has_ben, benchmark_cmd, span, cpu_no, bw_report); 261 + run_mbm_test(benchmark_cmd, span, cpu_no, bw_report); 289 262 290 263 if (mba_test) 291 - run_mba_test(has_ben, benchmark_cmd, span, cpu_no, bw_report); 264 + run_mba_test(benchmark_cmd, cpu_no, bw_report); 292 265 293 266 if (cmt_test) 294 - run_cmt_test(has_ben, benchmark_cmd, cpu_no); 267 + run_cmt_test(benchmark_cmd, cpu_no); 295 268 296 269 if (cat_test) 297 270 run_cat_test(cpu_no, no_of_bits); 298 - 299 - umount_resctrlfs(); 300 271 301 272 ksft_finished(); 302 273 }
+1 -6
tools/testing/selftests/resctrl/resctrl_val.c
··· 648 648 return ret; 649 649 } 650 650 651 - ret = remount_resctrlfs(param->mum_resctrlfs); 652 - if (ret) 653 - return ret; 654 - 655 651 /* 656 652 * If benchmark wasn't successfully started by child, then child should 657 653 * kill parent, so save parent's pid ··· 759 763 760 764 /* Test runs until the callback setup() tells the test to stop. */ 761 765 while (1) { 762 - ret = param->setup(1, param); 766 + ret = param->setup(param); 763 767 if (ret == END_OF_TESTS) { 764 768 ret = 0; 765 769 break; ··· 784 788 signal_handler_unregister(); 785 789 out: 786 790 kill(bm_pid, SIGKILL); 787 - umount_resctrlfs(); 788 791 789 792 return ret; 790 793 }
+32 -34
tools/testing/selftests/resctrl/resctrlfs.c
··· 48 48 } 49 49 50 50 /* 51 - * remount_resctrlfs - Remount resctrl FS at /sys/fs/resctrl 52 - * @mum_resctrlfs: Should the resctrl FS be remounted? 51 + * mount_resctrlfs - Mount resctrl FS at /sys/fs/resctrl 53 52 * 54 - * If not mounted, mount it. 55 - * If mounted and mum_resctrlfs then remount resctrl FS. 56 - * If mounted and !mum_resctrlfs then noop 53 + * Mounts resctrl FS. Fails if resctrl FS is already mounted to avoid 54 + * pre-existing settings interfering with the test results. 57 55 * 58 56 * Return: 0 on success, non-zero on failure 59 57 */ 60 - int remount_resctrlfs(bool mum_resctrlfs) 58 + int mount_resctrlfs(void) 61 59 { 62 - char mountpoint[256]; 63 60 int ret; 64 61 65 - ret = find_resctrl_mount(mountpoint); 66 - if (ret) 67 - strcpy(mountpoint, RESCTRL_PATH); 68 - 69 - if (!ret && mum_resctrlfs && umount(mountpoint)) 70 - ksft_print_msg("Fail: unmounting \"%s\"\n", mountpoint); 71 - 72 - if (!ret && !mum_resctrlfs) 73 - return 0; 62 + ret = find_resctrl_mount(NULL); 63 + if (ret != -ENOENT) 64 + return -1; 74 65 75 66 ksft_print_msg("Mounting resctrl to \"%s\"\n", RESCTRL_PATH); 76 67 ret = mount("resctrl", RESCTRL_PATH, "resctrl", 0, NULL); ··· 73 82 74 83 int umount_resctrlfs(void) 75 84 { 76 - if (find_resctrl_mount(NULL)) 77 - return 0; 85 + char mountpoint[256]; 86 + int ret; 78 87 79 - if (umount(RESCTRL_PATH)) { 88 + ret = find_resctrl_mount(mountpoint); 89 + if (ret == -ENOENT) 90 + return 0; 91 + if (ret) 92 + return ret; 93 + 94 + if (umount(mountpoint)) { 80 95 perror("# Unable to umount resctrl"); 81 96 82 97 return errno; ··· 302 305 */ 303 306 void run_benchmark(int signum, siginfo_t *info, void *ucontext) 304 307 { 305 - int operation, ret, malloc_and_init_memory, memflush; 306 - unsigned long span, buffer_span; 308 + int operation, ret, memflush; 307 309 char **benchmark_cmd; 308 - char resctrl_val[64]; 310 + size_t span; 311 + bool once; 309 312 FILE *fp; 310 313 311 314 benchmark_cmd = info->si_ptr; ··· 321 324 if (strcmp(benchmark_cmd[0], "fill_buf") == 0) { 322 325 /* Execute default fill_buf benchmark */ 323 326 span = strtoul(benchmark_cmd[1], NULL, 10); 324 - malloc_and_init_memory = atoi(benchmark_cmd[2]); 325 - memflush = atoi(benchmark_cmd[3]); 326 - operation = atoi(benchmark_cmd[4]); 327 - sprintf(resctrl_val, "%s", benchmark_cmd[5]); 328 - 329 - if (strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) 330 - buffer_span = span * MB; 327 + memflush = atoi(benchmark_cmd[2]); 328 + operation = atoi(benchmark_cmd[3]); 329 + if (!strcmp(benchmark_cmd[4], "true")) 330 + once = true; 331 + else if (!strcmp(benchmark_cmd[4], "false")) 332 + once = false; 331 333 else 332 - buffer_span = span; 334 + PARENT_EXIT("Invalid once parameter"); 333 335 334 - if (run_fill_buf(buffer_span, malloc_and_init_memory, memflush, 335 - operation, resctrl_val)) 336 + if (run_fill_buf(span, memflush, operation, once)) 336 337 fprintf(stderr, "Error in running fill buffer\n"); 337 338 } else { 338 339 /* Execute specified benchmark */ ··· 606 611 * validate_resctrl_feature_request - Check if requested feature is valid. 607 612 * @resctrl_val: Requested feature 608 613 * 609 - * Return: True if the feature is supported, else false 614 + * Return: True if the feature is supported, else false. False is also 615 + * returned if resctrl FS is not mounted. 610 616 */ 611 617 bool validate_resctrl_feature_request(const char *resctrl_val) 612 618 { ··· 615 619 bool found = false; 616 620 char *res; 617 621 FILE *inf; 622 + int ret; 618 623 619 624 if (!resctrl_val) 620 625 return false; 621 626 622 - if (remount_resctrlfs(false)) 627 + ret = find_resctrl_mount(NULL); 628 + if (ret) 623 629 return false; 624 630 625 631 if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
+1 -1
tools/testing/selftests/rseq/Makefile
··· 33 33 $(CC) $(CFLAGS) $< $(LDLIBS) -lrseq -o $@ 34 34 35 35 $(OUTPUT)/basic_percpu_ops_mm_cid_test: basic_percpu_ops_test.c $(TEST_GEN_PROGS_EXTENDED) rseq.h rseq-*.h 36 - $(CC) $(CFLAGS) -DBUILDOPT_RSEQ_PERCPU_MM_CID_ID $< $(LDLIBS) -lrseq -o $@ 36 + $(CC) $(CFLAGS) -DBUILDOPT_RSEQ_PERCPU_MM_CID $< $(LDLIBS) -lrseq -o $@ 37 37 38 38 $(OUTPUT)/param_test_benchmark: param_test.c $(TEST_GEN_PROGS_EXTENDED) \ 39 39 rseq.h rseq-*.h
+26
tools/testing/selftests/rseq/compiler.h
··· 33 33 #define RSEQ_COMBINE_TOKENS(_tokena, _tokenb) \ 34 34 RSEQ__COMBINE_TOKENS(_tokena, _tokenb) 35 35 36 + #ifdef __cplusplus 37 + #define rseq_unqual_scalar_typeof(x) \ 38 + std::remove_cv<std::remove_reference<decltype(x)>::type>::type 39 + #else 40 + #define rseq_scalar_type_to_expr(type) \ 41 + unsigned type: (unsigned type)0, \ 42 + signed type: (signed type)0 43 + 44 + /* 45 + * Use C11 _Generic to express unqualified type from expression. This removes 46 + * volatile qualifier from expression type. 47 + */ 48 + #define rseq_unqual_scalar_typeof(x) \ 49 + __typeof__( \ 50 + _Generic((x), \ 51 + char: (char)0, \ 52 + rseq_scalar_type_to_expr(char), \ 53 + rseq_scalar_type_to_expr(short), \ 54 + rseq_scalar_type_to_expr(int), \ 55 + rseq_scalar_type_to_expr(long), \ 56 + rseq_scalar_type_to_expr(long long), \ 57 + default: (x) \ 58 + ) \ 59 + ) 60 + #endif 61 + 36 62 #endif /* RSEQ_COMPILER_H_ */
+2 -2
tools/testing/selftests/rseq/rseq-arm.h
··· 66 66 67 67 #define rseq_smp_load_acquire(p) \ 68 68 __extension__ ({ \ 69 - __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \ 69 + rseq_unqual_scalar_typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \ 70 70 rseq_smp_mb(); \ 71 71 ____p1; \ 72 72 }) ··· 76 76 #define rseq_smp_store_release(p, v) \ 77 77 do { \ 78 78 rseq_smp_mb(); \ 79 - RSEQ_WRITE_ONCE(*p, v); \ 79 + RSEQ_WRITE_ONCE(*(p), v); \ 80 80 } while (0) 81 81 82 82 #define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, start_ip, \
+30 -28
tools/testing/selftests/rseq/rseq-arm64.h
··· 27 27 28 28 #define rseq_smp_load_acquire(p) \ 29 29 __extension__ ({ \ 30 - __typeof(*p) ____p1; \ 31 - switch (sizeof(*p)) { \ 30 + union { rseq_unqual_scalar_typeof(*(p)) __val; char __c[sizeof(*(p))]; } __u; \ 31 + switch (sizeof(*(p))) { \ 32 32 case 1: \ 33 - asm volatile ("ldarb %w0, %1" \ 34 - : "=r" (*(__u8 *)p) \ 35 - : "Q" (*p) : "memory"); \ 33 + __asm__ __volatile__ ("ldarb %w0, %1" \ 34 + : "=r" (*(__u8 *)__u.__c) \ 35 + : "Q" (*(p)) : "memory"); \ 36 36 break; \ 37 37 case 2: \ 38 - asm volatile ("ldarh %w0, %1" \ 39 - : "=r" (*(__u16 *)p) \ 40 - : "Q" (*p) : "memory"); \ 38 + __asm__ __volatile__ ("ldarh %w0, %1" \ 39 + : "=r" (*(__u16 *)__u.__c) \ 40 + : "Q" (*(p)) : "memory"); \ 41 41 break; \ 42 42 case 4: \ 43 - asm volatile ("ldar %w0, %1" \ 44 - : "=r" (*(__u32 *)p) \ 45 - : "Q" (*p) : "memory"); \ 43 + __asm__ __volatile__ ("ldar %w0, %1" \ 44 + : "=r" (*(__u32 *)__u.__c) \ 45 + : "Q" (*(p)) : "memory"); \ 46 46 break; \ 47 47 case 8: \ 48 - asm volatile ("ldar %0, %1" \ 49 - : "=r" (*(__u64 *)p) \ 50 - : "Q" (*p) : "memory"); \ 48 + __asm__ __volatile__ ("ldar %0, %1" \ 49 + : "=r" (*(__u64 *)__u.__c) \ 50 + : "Q" (*(p)) : "memory"); \ 51 51 break; \ 52 52 } \ 53 - ____p1; \ 53 + (rseq_unqual_scalar_typeof(*(p)))__u.__val; \ 54 54 }) 55 55 56 56 #define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb() 57 57 58 58 #define rseq_smp_store_release(p, v) \ 59 59 do { \ 60 - switch (sizeof(*p)) { \ 60 + union { rseq_unqual_scalar_typeof(*(p)) __val; char __c[sizeof(*(p))]; } __u = \ 61 + { .__val = (rseq_unqual_scalar_typeof(*(p))) (v) }; \ 62 + switch (sizeof(*(p))) { \ 61 63 case 1: \ 62 - asm volatile ("stlrb %w1, %0" \ 63 - : "=Q" (*p) \ 64 - : "r" ((__u8)v) \ 64 + __asm__ __volatile__ ("stlrb %w1, %0" \ 65 + : "=Q" (*(p)) \ 66 + : "r" (*(__u8 *)__u.__c) \ 65 67 : "memory"); \ 66 68 break; \ 67 69 case 2: \ 68 - asm volatile ("stlrh %w1, %0" \ 69 - : "=Q" (*p) \ 70 - : "r" ((__u16)v) \ 70 + __asm__ __volatile__ ("stlrh %w1, %0" \ 71 + : "=Q" (*(p)) \ 72 + : "r" (*(__u16 *)__u.__c) \ 71 73 : "memory"); \ 72 74 break; \ 73 75 case 4: \ 74 - asm volatile ("stlr %w1, %0" \ 75 - : "=Q" (*p) \ 76 - : "r" ((__u32)v) \ 76 + __asm__ __volatile__ ("stlr %w1, %0" \ 77 + : "=Q" (*(p)) \ 78 + : "r" (*(__u32 *)__u.__c) \ 77 79 : "memory"); \ 78 80 break; \ 79 81 case 8: \ 80 - asm volatile ("stlr %1, %0" \ 81 - : "=Q" (*p) \ 82 - : "r" ((__u64)v) \ 82 + __asm__ __volatile__ ("stlr %1, %0" \ 83 + : "=Q" (*(p)) \ 84 + : "r" (*(__u64 *)__u.__c) \ 83 85 : "memory"); \ 84 86 break; \ 85 87 } \
+2 -2
tools/testing/selftests/rseq/rseq-mips.h
··· 45 45 46 46 #define rseq_smp_load_acquire(p) \ 47 47 __extension__ ({ \ 48 - __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \ 48 + rseq_unqual_scalar_typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \ 49 49 rseq_smp_mb(); \ 50 50 ____p1; \ 51 51 }) ··· 55 55 #define rseq_smp_store_release(p, v) \ 56 56 do { \ 57 57 rseq_smp_mb(); \ 58 - RSEQ_WRITE_ONCE(*p, v); \ 58 + RSEQ_WRITE_ONCE(*(p), v); \ 59 59 } while (0) 60 60 61 61 #if _MIPS_SZLONG == 64
+2 -2
tools/testing/selftests/rseq/rseq-ppc.h
··· 23 23 24 24 #define rseq_smp_load_acquire(p) \ 25 25 __extension__ ({ \ 26 - __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \ 26 + rseq_unqual_scalar_typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \ 27 27 rseq_smp_lwsync(); \ 28 28 ____p1; \ 29 29 }) ··· 33 33 #define rseq_smp_store_release(p, v) \ 34 34 do { \ 35 35 rseq_smp_lwsync(); \ 36 - RSEQ_WRITE_ONCE(*p, v); \ 36 + RSEQ_WRITE_ONCE(*(p), v); \ 37 37 } while (0) 38 38 39 39 /*
+3 -3
tools/testing/selftests/rseq/rseq-riscv.h
··· 36 36 37 37 #define rseq_smp_load_acquire(p) \ 38 38 __extension__ ({ \ 39 - __typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \ 40 - RISCV_FENCE(r, rw) \ 39 + rseq_unqual_scalar_typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \ 40 + RISCV_FENCE(r, rw); \ 41 41 ____p1; \ 42 42 }) 43 43 ··· 46 46 #define rseq_smp_store_release(p, v) \ 47 47 do { \ 48 48 RISCV_FENCE(rw, w); \ 49 - RSEQ_WRITE_ONCE(*(p), v); \ 49 + RSEQ_WRITE_ONCE(*(p), v); \ 50 50 } while (0) 51 51 52 52 #define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, start_ip, \
+2 -2
tools/testing/selftests/rseq/rseq-s390.h
··· 15 15 16 16 #define rseq_smp_load_acquire(p) \ 17 17 __extension__ ({ \ 18 - __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \ 18 + rseq_unqual_scalar_typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \ 19 19 rseq_barrier(); \ 20 20 ____p1; \ 21 21 }) ··· 25 25 #define rseq_smp_store_release(p, v) \ 26 26 do { \ 27 27 rseq_barrier(); \ 28 - RSEQ_WRITE_ONCE(*p, v); \ 28 + RSEQ_WRITE_ONCE(*(p), v); \ 29 29 } while (0) 30 30 31 31 #ifdef __s390x__
+2 -2
tools/testing/selftests/rseq/rseq-x86.h
··· 42 42 43 43 #define rseq_smp_load_acquire(p) \ 44 44 __extension__ ({ \ 45 - __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \ 45 + rseq_unqual_scalar_typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p)); \ 46 46 rseq_barrier(); \ 47 47 ____p1; \ 48 48 }) ··· 52 52 #define rseq_smp_store_release(p, v) \ 53 53 do { \ 54 54 rseq_barrier(); \ 55 - RSEQ_WRITE_ONCE(*p, v); \ 55 + RSEQ_WRITE_ONCE(*(p), v); \ 56 56 } while (0) 57 57 58 58 #define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, \
-8
tools/testing/selftests/user_events/Makefile
··· 2 2 CFLAGS += -Wl,-no-as-needed -Wall $(KHDR_INCLUDES) 3 3 LDLIBS += -lrt -lpthread -lm 4 4 5 - # Note: 6 - # This test depends on <linux/user_events.h> exported in uapi 7 - # The following commit removed user_events.h out of uapi: 8 - # commit 5cfff569cab8bf544bab62c911c5d6efd5af5e05 9 - # tracing: Move user_events.h temporarily out of include/uapi 10 - # This test will not compile until user_events.h is added 11 - # back to uapi. 12 - 13 5 TEST_GEN_PROGS = ftrace_test dyn_test perf_test abi_test 14 6 15 7 TEST_FILES := settings