Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'linux_kselftest-next-6.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest

Pull kselftest update from Shuah Khan:
"timer test:
- remove duplicate defines
- fixes to improve error reporting

rtc test:
- check rtc alarm status in alarm test

resctrl test:
- add array overrun checks during iMC config parsing code and when
reading strings
- fixes and reorganizing code"

* tag 'linux_kselftest-next-6.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest: (23 commits)
selftests/resctrl: Replace magic constants used as array size
selftests/resctrl: Keep results from first test run
selftests/resctrl: Do not compare performance counters and resctrl at low bandwidth
selftests/resctrl: Use cache size to determine "fill_buf" buffer size
selftests/resctrl: Ensure measurements skip initialization of default benchmark
selftests/resctrl: Make benchmark parameter passing robust
selftests/resctrl: Remove unused measurement code
selftests/resctrl: Only support measured read operation
selftests/resctrl: Remove "once" parameter required to be false
selftests/resctrl: Make wraparound handling obvious
selftests/resctrl: Protect against array overflow when reading strings
selftests/resctrl: Protect against array overrun during iMC config parsing
selftests/resctrl: Fix memory overflow due to unhandled wraparound
selftests/resctrl: Print accurate buffer size as part of MBM results
selftests/resctrl: Make functions only used in same file static
selftests: Add a test mangling with uc_sigmask
selftests: Rename sigaltstack to generic signal
selftest: rtc: Add to check rtc alarm status for alarm related test
selftests:timers: remove local CLOCKID defines
selftests: timers: Remove unneeded semicolon
...

+703 -571
+9
Documentation/dev-tools/kselftest.rst
··· 31 31 userspace may wish to use the `Test Harness`_. Tests that need to be 32 32 run in kernel space may wish to use a `Test Module`_. 33 33 34 + Documentation on the tests 35 + ========================== 36 + 37 + For documentation on the kselftests themselves, see: 38 + 39 + .. toctree:: 40 + 41 + testing-devices 42 + 34 43 Running the selftests (hotplug tests are run in limited mode) 35 44 ============================================================= 36 45
+47
Documentation/dev-tools/testing-devices.rst
··· 1 + .. SPDX-License-Identifier: GPL-2.0 2 + .. Copyright (c) 2024 Collabora Ltd 3 + 4 + ============================= 5 + Device testing with kselftest 6 + ============================= 7 + 8 + 9 + There are a few different kselftests available for testing devices generically, 10 + with some overlap in coverage and different requirements. This document aims to 11 + give an overview of each one. 12 + 13 + Note: Paths in this document are relative to the kselftest folder 14 + (``tools/testing/selftests``). 15 + 16 + Device oriented kselftests: 17 + 18 + * Devicetree (``dt``) 19 + 20 + * **Coverage**: Probe status for devices described in Devicetree 21 + * **Requirements**: None 22 + 23 + * Error logs (``devices/error_logs``) 24 + 25 + * **Coverage**: Error (or more critical) log messages presence coming from any 26 + device 27 + * **Requirements**: None 28 + 29 + * Discoverable bus (``devices/probe``) 30 + 31 + * **Coverage**: Presence and probe status of USB or PCI devices that have been 32 + described in the reference file 33 + * **Requirements**: Manually describe the devices that should be tested in a 34 + YAML reference file (see ``devices/probe/boards/google,spherion.yaml`` for 35 + an example) 36 + 37 + * Exist (``devices/exist``) 38 + 39 + * **Coverage**: Presence of all devices 40 + * **Requirements**: Generate the reference (see ``devices/exist/README.rst`` 41 + for details) on a known-good kernel 42 + 43 + Therefore, the suggestion is to enable the error log and devicetree tests on all 44 + (DT-based) platforms, since they don't have any requirements. Then to greatly 45 + improve coverage, generate the reference for each platform and enable the exist 46 + test. The discoverable bus test can be used to verify the probe status of 47 + specific USB or PCI devices, but is probably not worth it for most cases.
+1 -1
tools/testing/selftests/Makefile
··· 91 91 TARGETS += sched_ext 92 92 TARGETS += seccomp 93 93 TARGETS += sgx 94 - TARGETS += sigaltstack 94 + TARGETS += signal 95 95 TARGETS += size 96 96 TARGETS += sparc64 97 97 TARGETS += splice
+14 -23
tools/testing/selftests/resctrl/cmt_test.c
··· 99 99 } 100 100 101 101 /* Field 3 is llc occ resc value */ 102 - if (runs > 0) 103 - sum_llc_occu_resc += strtoul(token_array[3], NULL, 0); 102 + sum_llc_occu_resc += strtoul(token_array[3], NULL, 0); 104 103 runs++; 105 104 } 106 105 fclose(fp); 107 106 108 107 return show_results_info(sum_llc_occu_resc, no_of_bits, span, 109 - MAX_DIFF, MAX_DIFF_PERCENT, runs - 1, true); 108 + MAX_DIFF, MAX_DIFF_PERCENT, runs, true); 110 109 } 111 110 112 111 static void cmt_test_cleanup(void) ··· 115 116 116 117 static int cmt_run_test(const struct resctrl_test *test, const struct user_params *uparams) 117 118 { 118 - const char * const *cmd = uparams->benchmark_cmd; 119 - const char *new_cmd[BENCHMARK_ARGS]; 119 + struct fill_buf_param fill_buf = {}; 120 120 unsigned long cache_total_size = 0; 121 121 int n = uparams->bits ? : 5; 122 122 unsigned long long_mask; 123 - char *span_str = NULL; 124 123 int count_of_bits; 125 124 size_t span; 126 - int ret, i; 125 + int ret; 127 126 128 127 ret = get_full_cbm("L3", &long_mask); 129 128 if (ret) ··· 152 155 153 156 span = cache_portion_size(cache_total_size, param.mask, long_mask); 154 157 155 - if (strcmp(cmd[0], "fill_buf") == 0) { 156 - /* Duplicate the command to be able to replace span in it */ 157 - for (i = 0; uparams->benchmark_cmd[i]; i++) 158 - new_cmd[i] = uparams->benchmark_cmd[i]; 159 - new_cmd[i] = NULL; 160 - 161 - ret = asprintf(&span_str, "%zu", span); 162 - if (ret < 0) 163 - return -1; 164 - new_cmd[1] = span_str; 165 - cmd = new_cmd; 158 + if (uparams->fill_buf) { 159 + fill_buf.buf_size = span; 160 + fill_buf.memflush = uparams->fill_buf->memflush; 161 + param.fill_buf = &fill_buf; 162 + } else if (!uparams->benchmark_cmd[0]) { 163 + fill_buf.buf_size = span; 164 + fill_buf.memflush = true; 165 + param.fill_buf = &fill_buf; 166 166 } 167 167 168 168 remove(RESULT_FILE_NAME); 169 169 170 - ret = resctrl_val(test, uparams, cmd, &param); 170 + ret = resctrl_val(test, uparams, &param); 171 171 if (ret) 172 - goto out; 172 + return ret; 173 173 174 174 ret = check_results(&param, span, n); 175 175 if (ret && (get_vendor() == ARCH_INTEL)) 176 176 ksft_print_msg("Intel CMT may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n"); 177 - 178 - out: 179 - free(span_str); 180 177 181 178 return ret; 182 179 }
+10 -35
tools/testing/selftests/resctrl/fill_buf.c
··· 88 88 return sum; 89 89 } 90 90 91 - static void fill_one_span_write(unsigned char *buf, size_t buf_size) 92 - { 93 - unsigned char *end_ptr = buf + buf_size; 94 - unsigned char *p; 95 - 96 - p = buf; 97 - while (p < end_ptr) { 98 - *p = '1'; 99 - p += (CL_SIZE / 2); 100 - } 101 - } 102 - 103 91 void fill_cache_read(unsigned char *buf, size_t buf_size, bool once) 104 92 { 105 93 int ret = 0; ··· 102 114 *value_sink = ret; 103 115 } 104 116 105 - static void fill_cache_write(unsigned char *buf, size_t buf_size, bool once) 106 - { 107 - while (1) { 108 - fill_one_span_write(buf, buf_size); 109 - if (once) 110 - break; 111 - } 112 - } 113 - 114 - unsigned char *alloc_buffer(size_t buf_size, int memflush) 117 + unsigned char *alloc_buffer(size_t buf_size, bool memflush) 115 118 { 116 119 void *buf = NULL; 117 120 uint64_t *p64; 118 - size_t s64; 121 + ssize_t s64; 119 122 int ret; 120 123 121 124 ret = posix_memalign(&buf, PAGE_SIZE, buf_size); ··· 130 151 return buf; 131 152 } 132 153 133 - int run_fill_buf(size_t buf_size, int memflush, int op, bool once) 154 + ssize_t get_fill_buf_size(int cpu_no, const char *cache_type) 134 155 { 135 - unsigned char *buf; 156 + unsigned long cache_total_size = 0; 157 + int ret; 136 158 137 - buf = alloc_buffer(buf_size, memflush); 138 - if (!buf) 139 - return -1; 159 + ret = get_cache_size(cpu_no, cache_type, &cache_total_size); 160 + if (ret) 161 + return ret; 140 162 141 - if (op == 0) 142 - fill_cache_read(buf, buf_size, once); 143 - else 144 - fill_cache_write(buf, buf_size, once); 145 - free(buf); 146 - 147 - return 0; 163 + return cache_total_size * 2 > MINIMUM_SPAN ? 164 + cache_total_size * 2 : MINIMUM_SPAN; 148 165 }
+38 -16
tools/testing/selftests/resctrl/mba_test.c
··· 21 21 { 22 22 int ret; 23 23 24 - ret = initialize_mem_bw_imc(); 24 + ret = initialize_read_mem_bw_imc(); 25 25 if (ret) 26 26 return ret; 27 27 ··· 39 39 const struct user_params *uparams, 40 40 struct resctrl_val_param *p) 41 41 { 42 - static int runs_per_allocation, allocation = 100; 42 + static unsigned int allocation = ALLOCATION_MIN; 43 + static int runs_per_allocation; 43 44 char allocation_str[64]; 44 45 int ret; 45 46 ··· 51 50 if (runs_per_allocation++ != 0) 52 51 return 0; 53 52 54 - if (allocation < ALLOCATION_MIN || allocation > ALLOCATION_MAX) 53 + if (allocation > ALLOCATION_MAX) 55 54 return END_OF_TESTS; 56 55 57 56 sprintf(allocation_str, "%d", allocation); ··· 60 59 if (ret < 0) 61 60 return ret; 62 61 63 - allocation -= ALLOCATION_STEP; 62 + allocation += ALLOCATION_STEP; 64 63 65 64 return 0; 66 65 } ··· 68 67 static int mba_measure(const struct user_params *uparams, 69 68 struct resctrl_val_param *param, pid_t bm_pid) 70 69 { 71 - return measure_mem_bw(uparams, param, bm_pid, "reads"); 70 + return measure_read_mem_bw(uparams, param, bm_pid); 72 71 } 73 72 74 73 static bool show_mba_info(unsigned long *bw_imc, unsigned long *bw_resc) 75 74 { 76 - int allocation, runs; 75 + unsigned int allocation; 77 76 bool ret = false; 77 + int runs; 78 78 79 79 ksft_print_msg("Results are displayed in (MB)\n"); 80 80 /* Memory bandwidth from 100% down to 10% */ ··· 86 84 int avg_diff_per; 87 85 float avg_diff; 88 86 89 - /* 90 - * The first run is discarded due to inaccurate value from 91 - * phase transition. 92 - */ 93 - for (runs = NUM_OF_RUNS * allocation + 1; 87 + for (runs = NUM_OF_RUNS * allocation; 94 88 runs < NUM_OF_RUNS * allocation + NUM_OF_RUNS ; runs++) { 95 89 sum_bw_imc += bw_imc[runs]; 96 90 sum_bw_resc += bw_resc[runs]; 97 91 } 98 92 99 - avg_bw_imc = sum_bw_imc / (NUM_OF_RUNS - 1); 100 - avg_bw_resc = sum_bw_resc / (NUM_OF_RUNS - 1); 93 + avg_bw_imc = sum_bw_imc / NUM_OF_RUNS; 94 + avg_bw_resc = sum_bw_resc / NUM_OF_RUNS; 95 + if (avg_bw_imc < THROTTLE_THRESHOLD || avg_bw_resc < THROTTLE_THRESHOLD) { 96 + ksft_print_msg("Bandwidth below threshold (%d MiB). Dropping results from MBA schemata %u.\n", 97 + THROTTLE_THRESHOLD, 98 + ALLOCATION_MIN + ALLOCATION_STEP * allocation); 99 + continue; 100 + } 101 + 101 102 avg_diff = (float)labs(avg_bw_resc - avg_bw_imc) / avg_bw_imc; 102 103 avg_diff_per = (int)(avg_diff * 100); 103 104 ··· 108 103 avg_diff_per > MAX_DIFF_PERCENT ? 109 104 "Fail:" : "Pass:", 110 105 MAX_DIFF_PERCENT, 111 - ALLOCATION_MAX - ALLOCATION_STEP * allocation); 106 + ALLOCATION_MIN + ALLOCATION_STEP * allocation); 112 107 113 108 ksft_print_msg("avg_diff_per: %d%%\n", avg_diff_per); 114 109 ksft_print_msg("avg_bw_imc: %lu\n", avg_bw_imc); ··· 127 122 128 123 static int check_results(void) 129 124 { 125 + unsigned long bw_resc[NUM_OF_RUNS * ALLOCATION_MAX / ALLOCATION_STEP]; 126 + unsigned long bw_imc[NUM_OF_RUNS * ALLOCATION_MAX / ALLOCATION_STEP]; 130 127 char *token_array[8], output[] = RESULT_FILE_NAME, temp[512]; 131 - unsigned long bw_imc[1024], bw_resc[1024]; 132 128 int runs; 133 129 FILE *fp; 134 130 ··· 176 170 .setup = mba_setup, 177 171 .measure = mba_measure, 178 172 }; 173 + struct fill_buf_param fill_buf = {}; 179 174 int ret; 180 175 181 176 remove(RESULT_FILE_NAME); 182 177 183 - ret = resctrl_val(test, uparams, uparams->benchmark_cmd, &param); 178 + if (uparams->fill_buf) { 179 + fill_buf.buf_size = uparams->fill_buf->buf_size; 180 + fill_buf.memflush = uparams->fill_buf->memflush; 181 + param.fill_buf = &fill_buf; 182 + } else if (!uparams->benchmark_cmd[0]) { 183 + ssize_t buf_size; 184 + 185 + buf_size = get_fill_buf_size(uparams->cpu, "L3"); 186 + if (buf_size < 0) 187 + return buf_size; 188 + fill_buf.buf_size = buf_size; 189 + fill_buf.memflush = true; 190 + param.fill_buf = &fill_buf; 191 + } 192 + 193 + ret = resctrl_val(test, uparams, &param); 184 194 if (ret) 185 195 return ret; 186 196
+25 -12
tools/testing/selftests/resctrl/mbm_test.c
··· 22 22 int runs, ret, avg_diff_per; 23 23 float avg_diff = 0; 24 24 25 - /* 26 - * Discard the first value which is inaccurate due to monitoring setup 27 - * transition phase. 28 - */ 29 - for (runs = 1; runs < NUM_OF_RUNS ; runs++) { 25 + for (runs = 0; runs < NUM_OF_RUNS; runs++) { 30 26 sum_bw_imc += bw_imc[runs]; 31 27 sum_bw_resc += bw_resc[runs]; 32 28 } 33 29 34 - avg_bw_imc = sum_bw_imc / 4; 35 - avg_bw_resc = sum_bw_resc / 4; 30 + avg_bw_imc = sum_bw_imc / NUM_OF_RUNS; 31 + avg_bw_resc = sum_bw_resc / NUM_OF_RUNS; 36 32 avg_diff = (float)labs(avg_bw_resc - avg_bw_imc) / avg_bw_imc; 37 33 avg_diff_per = (int)(avg_diff * 100); 38 34 ··· 36 40 ksft_print_msg("%s Check MBM diff within %d%%\n", 37 41 ret ? "Fail:" : "Pass:", MAX_DIFF_PERCENT); 38 42 ksft_print_msg("avg_diff_per: %d%%\n", avg_diff_per); 39 - ksft_print_msg("Span (MB): %zu\n", span / MB); 43 + if (span) 44 + ksft_print_msg("Span (MB): %zu\n", span / MB); 40 45 ksft_print_msg("avg_bw_imc: %lu\n", avg_bw_imc); 41 46 ksft_print_msg("avg_bw_resc: %lu\n", avg_bw_resc); 42 47 ··· 87 90 { 88 91 int ret; 89 92 90 - ret = initialize_mem_bw_imc(); 93 + ret = initialize_read_mem_bw_imc(); 91 94 if (ret) 92 95 return ret; 93 96 ··· 118 121 static int mbm_measure(const struct user_params *uparams, 119 122 struct resctrl_val_param *param, pid_t bm_pid) 120 123 { 121 - return measure_mem_bw(uparams, param, bm_pid, "reads"); 124 + return measure_read_mem_bw(uparams, param, bm_pid); 122 125 } 123 126 124 127 static void mbm_test_cleanup(void) ··· 135 138 .setup = mbm_setup, 136 139 .measure = mbm_measure, 137 140 }; 141 + struct fill_buf_param fill_buf = {}; 138 142 int ret; 139 143 140 144 remove(RESULT_FILE_NAME); 141 145 142 - ret = resctrl_val(test, uparams, uparams->benchmark_cmd, &param); 146 + if (uparams->fill_buf) { 147 + fill_buf.buf_size = uparams->fill_buf->buf_size; 148 + fill_buf.memflush = uparams->fill_buf->memflush; 149 + param.fill_buf = &fill_buf; 150 + } else if (!uparams->benchmark_cmd[0]) { 151 + ssize_t buf_size; 152 + 153 + buf_size = get_fill_buf_size(uparams->cpu, "L3"); 154 + if (buf_size < 0) 155 + return buf_size; 156 + fill_buf.buf_size = buf_size; 157 + fill_buf.memflush = true; 158 + param.fill_buf = &fill_buf; 159 + } 160 + 161 + ret = resctrl_val(test, uparams, &param); 143 162 if (ret) 144 163 return ret; 145 164 146 - ret = check_results(DEFAULT_SPAN); 165 + ret = check_results(param.fill_buf ? param.fill_buf->buf_size : 0); 147 166 if (ret && (get_vendor() == ARCH_INTEL)) 148 167 ksft_print_msg("Intel MBM may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n"); 149 168
+57 -22
tools/testing/selftests/resctrl/resctrl.h
··· 41 41 42 42 #define BENCHMARK_ARGS 64 43 43 44 - #define DEFAULT_SPAN (250 * MB) 44 + #define MINIMUM_SPAN (250 * MB) 45 + 46 + /* 47 + * Memory bandwidth (in MiB) below which the bandwidth comparisons 48 + * between iMC and resctrl are considered unreliable. For example RAS 49 + * features or memory performance features that generate memory traffic 50 + * may drive accesses that are counted differently by performance counters 51 + * and MBM respectively, for instance generating "overhead" traffic which 52 + * is not counted against any specific RMID. 53 + */ 54 + #define THROTTLE_THRESHOLD 750 55 + 56 + /* 57 + * fill_buf_param: "fill_buf" benchmark parameters 58 + * @buf_size: Size (in bytes) of buffer used in benchmark. 59 + * "fill_buf" allocates and initializes buffer of 60 + * @buf_size. User can change value via command line. 61 + * @memflush: If false the buffer will not be flushed after 62 + * allocation and initialization, otherwise the 63 + * buffer will be flushed. User can change value via 64 + * command line (via integers with 0 interpreted as 65 + * false and anything else as true). 66 + */ 67 + struct fill_buf_param { 68 + size_t buf_size; 69 + bool memflush; 70 + }; 45 71 46 72 /* 47 73 * user_params: User supplied parameters 48 74 * @cpu: CPU number to which the benchmark will be bound to 49 75 * @bits: Number of bits used for cache allocation size 50 76 * @benchmark_cmd: Benchmark command to run during (some of the) tests 77 + * @fill_buf: Pointer to user provided parameters for "fill_buf", 78 + * NULL if user did not provide parameters and test 79 + * specific defaults should be used. 51 80 */ 52 81 struct user_params { 53 82 int cpu; 54 83 int bits; 55 84 const char *benchmark_cmd[BENCHMARK_ARGS]; 85 + const struct fill_buf_param *fill_buf; 56 86 }; 57 87 58 88 /* ··· 117 87 * @init: Callback function to initialize test environment 118 88 * @setup: Callback function to setup per test run environment 119 89 * @measure: Callback that performs the measurement (a single test) 90 + * @fill_buf: Parameters for default "fill_buf" benchmark. 91 + * Initialized with user provided parameters, possibly 92 + * adapted to be relevant to the test. If user does 93 + * not provide parameters for "fill_buf" nor a 94 + * replacement benchmark then initialized with defaults 95 + * appropriate for test. NULL if user provided 96 + * benchmark. 120 97 */ 121 98 struct resctrl_val_param { 122 - const char *ctrlgrp; 123 - const char *mongrp; 124 - char filename[64]; 125 - unsigned long mask; 126 - int num_of_runs; 127 - int (*init)(const struct resctrl_val_param *param, 128 - int domain_id); 129 - int (*setup)(const struct resctrl_test *test, 130 - const struct user_params *uparams, 131 - struct resctrl_val_param *param); 132 - int (*measure)(const struct user_params *uparams, 133 - struct resctrl_val_param *param, 134 - pid_t bm_pid); 99 + const char *ctrlgrp; 100 + const char *mongrp; 101 + char filename[64]; 102 + unsigned long mask; 103 + int num_of_runs; 104 + int (*init)(const struct resctrl_val_param *param, 105 + int domain_id); 106 + int (*setup)(const struct resctrl_test *test, 107 + const struct user_params *uparams, 108 + struct resctrl_val_param *param); 109 + int (*measure)(const struct user_params *uparams, 110 + struct resctrl_val_param *param, 111 + pid_t bm_pid); 112 + struct fill_buf_param *fill_buf; 135 113 }; 136 114 137 115 struct perf_event_read { ··· 164 126 int get_domain_id(const char *resource, int cpu_no, int *domain_id); 165 127 int mount_resctrlfs(void); 166 128 int umount_resctrlfs(void); 167 - const char *get_bw_report_type(const char *bw_report); 168 129 bool resctrl_resource_exists(const char *resource); 169 130 bool resctrl_mon_feature_exists(const char *resource, const char *feature); 170 131 bool resource_info_file_exists(const char *resource, const char *file); ··· 176 139 int write_bm_pid_to_resctrl(pid_t bm_pid, const char *ctrlgrp, const char *mongrp); 177 140 int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu, 178 141 int group_fd, unsigned long flags); 179 - unsigned char *alloc_buffer(size_t buf_size, int memflush); 142 + unsigned char *alloc_buffer(size_t buf_size, bool memflush); 180 143 void mem_flush(unsigned char *buf, size_t buf_size); 181 144 void fill_cache_read(unsigned char *buf, size_t buf_size, bool once); 182 - int run_fill_buf(size_t buf_size, int memflush, int op, bool once); 183 - int initialize_mem_bw_imc(void); 184 - int measure_mem_bw(const struct user_params *uparams, 185 - struct resctrl_val_param *param, pid_t bm_pid, 186 - const char *bw_report); 145 + ssize_t get_fill_buf_size(int cpu_no, const char *cache_type); 146 + int initialize_read_mem_bw_imc(void); 147 + int measure_read_mem_bw(const struct user_params *uparams, 148 + struct resctrl_val_param *param, pid_t bm_pid); 187 149 void initialize_mem_bw_resctrl(const struct resctrl_val_param *param, 188 150 int domain_id); 189 151 int resctrl_val(const struct resctrl_test *test, 190 152 const struct user_params *uparams, 191 - const char * const *benchmark_cmd, 192 153 struct resctrl_val_param *param); 193 154 unsigned long create_bit_mask(unsigned int start, unsigned int len); 194 155 unsigned int count_contiguous_bits(unsigned long val, unsigned int *start);
+79 -16
tools/testing/selftests/resctrl/resctrl_tests.c
··· 148 148 test_cleanup(test); 149 149 } 150 150 151 + /* 152 + * Allocate and initialize a struct fill_buf_param with user provided 153 + * (via "-b fill_buf <fill_buf parameters>") parameters. 154 + * 155 + * Use defaults (that may not be appropriate for all tests) for any 156 + * fill_buf parameters omitted by the user. 157 + * 158 + * Historically it may have been possible for user space to provide 159 + * additional parameters, "operation" ("read" vs "write") in 160 + * benchmark_cmd[3] and "once" (run "once" or until terminated) in 161 + * benchmark_cmd[4]. Changing these parameters have never been 162 + * supported with the default of "read" operation and running until 163 + * terminated built into the tests. Any unsupported values for 164 + * (original) "fill_buf" parameters are treated as failure. 165 + * 166 + * Return: On failure, forcibly exits the test on any parsing failure, 167 + * returns NULL if no parsing needed (user did not actually provide 168 + * "-b fill_buf"). 169 + * On success, returns pointer to newly allocated and fully 170 + * initialized struct fill_buf_param that caller must free. 171 + */ 172 + static struct fill_buf_param *alloc_fill_buf_param(struct user_params *uparams) 173 + { 174 + struct fill_buf_param *fill_param = NULL; 175 + char *endptr = NULL; 176 + 177 + if (!uparams->benchmark_cmd[0] || strcmp(uparams->benchmark_cmd[0], "fill_buf")) 178 + return NULL; 179 + 180 + fill_param = malloc(sizeof(*fill_param)); 181 + if (!fill_param) 182 + ksft_exit_skip("Unable to allocate memory for fill_buf parameters.\n"); 183 + 184 + if (uparams->benchmark_cmd[1] && *uparams->benchmark_cmd[1] != '\0') { 185 + errno = 0; 186 + fill_param->buf_size = strtoul(uparams->benchmark_cmd[1], &endptr, 10); 187 + if (errno || *endptr != '\0') { 188 + free(fill_param); 189 + ksft_exit_skip("Unable to parse benchmark buffer size.\n"); 190 + } 191 + } else { 192 + fill_param->buf_size = MINIMUM_SPAN; 193 + } 194 + 195 + if (uparams->benchmark_cmd[2] && *uparams->benchmark_cmd[2] != '\0') { 196 + errno = 0; 197 + fill_param->memflush = strtol(uparams->benchmark_cmd[2], &endptr, 10) != 0; 198 + if (errno || *endptr != '\0') { 199 + free(fill_param); 200 + ksft_exit_skip("Unable to parse benchmark memflush parameter.\n"); 201 + } 202 + } else { 203 + fill_param->memflush = true; 204 + } 205 + 206 + if (uparams->benchmark_cmd[3] && *uparams->benchmark_cmd[3] != '\0') { 207 + if (strcmp(uparams->benchmark_cmd[3], "0")) { 208 + free(fill_param); 209 + ksft_exit_skip("Only read operations supported.\n"); 210 + } 211 + } 212 + 213 + if (uparams->benchmark_cmd[4] && *uparams->benchmark_cmd[4] != '\0') { 214 + if (strcmp(uparams->benchmark_cmd[4], "false")) { 215 + free(fill_param); 216 + ksft_exit_skip("fill_buf is required to run until termination.\n"); 217 + } 218 + } 219 + 220 + return fill_param; 221 + } 222 + 151 223 static void init_user_params(struct user_params *uparams) 152 224 { 153 225 memset(uparams, 0, sizeof(*uparams)); ··· 230 158 231 159 int main(int argc, char **argv) 232 160 { 161 + struct fill_buf_param *fill_param = NULL; 233 162 int tests = ARRAY_SIZE(resctrl_tests); 234 163 bool test_param_seen = false; 235 164 struct user_params uparams; 236 - char *span_str = NULL; 237 - int ret, c, i; 165 + int c, i; 238 166 239 167 init_user_params(&uparams); 240 168 ··· 311 239 } 312 240 last_arg: 313 241 242 + fill_param = alloc_fill_buf_param(&uparams); 243 + if (fill_param) 244 + uparams.fill_buf = fill_param; 245 + 314 246 ksft_print_header(); 315 247 316 248 /* ··· 333 257 334 258 filter_dmesg(); 335 259 336 - if (!uparams.benchmark_cmd[0]) { 337 - /* If no benchmark is given by "-b" argument, use fill_buf. */ 338 - uparams.benchmark_cmd[0] = "fill_buf"; 339 - ret = asprintf(&span_str, "%u", DEFAULT_SPAN); 340 - if (ret < 0) 341 - ksft_exit_fail_msg("Out of memory!\n"); 342 - uparams.benchmark_cmd[1] = span_str; 343 - uparams.benchmark_cmd[2] = "1"; 344 - uparams.benchmark_cmd[3] = "0"; 345 - uparams.benchmark_cmd[4] = "false"; 346 - uparams.benchmark_cmd[5] = NULL; 347 - } 348 - 349 260 ksft_set_plan(tests); 350 261 351 262 for (i = 0; i < ARRAY_SIZE(resctrl_tests); i++) 352 263 run_single_test(resctrl_tests[i], &uparams); 353 264 354 - free(span_str); 265 + free(fill_param); 355 266 ksft_finished(); 356 267 }
+132 -319
tools/testing/selftests/resctrl/resctrl_val.c
··· 12 12 13 13 #define UNCORE_IMC "uncore_imc" 14 14 #define READ_FILE_NAME "events/cas_count_read" 15 - #define WRITE_FILE_NAME "events/cas_count_write" 16 15 #define DYN_PMU_PATH "/sys/bus/event_source/devices" 17 16 #define SCALE 0.00006103515625 18 17 #define MAX_IMCS 20 19 18 #define MAX_TOKENS 5 20 - #define READ 0 21 - #define WRITE 1 22 19 23 20 #define CON_MBM_LOCAL_BYTES_PATH \ 24 21 "%s/%s/mon_data/mon_L3_%02d/mbm_local_bytes" ··· 38 41 39 42 static char mbm_total_path[1024]; 40 43 static int imcs; 41 - static struct imc_counter_config imc_counters_config[MAX_IMCS][2]; 44 + static struct imc_counter_config imc_counters_config[MAX_IMCS]; 42 45 static const struct resctrl_test *current_test; 43 46 44 - void membw_initialize_perf_event_attr(int i, int j) 47 + static void read_mem_bw_initialize_perf_event_attr(int i) 45 48 { 46 - memset(&imc_counters_config[i][j].pe, 0, 49 + memset(&imc_counters_config[i].pe, 0, 47 50 sizeof(struct perf_event_attr)); 48 - imc_counters_config[i][j].pe.type = imc_counters_config[i][j].type; 49 - imc_counters_config[i][j].pe.size = sizeof(struct perf_event_attr); 50 - imc_counters_config[i][j].pe.disabled = 1; 51 - imc_counters_config[i][j].pe.inherit = 1; 52 - imc_counters_config[i][j].pe.exclude_guest = 0; 53 - imc_counters_config[i][j].pe.config = 54 - imc_counters_config[i][j].umask << 8 | 55 - imc_counters_config[i][j].event; 56 - imc_counters_config[i][j].pe.sample_type = PERF_SAMPLE_IDENTIFIER; 57 - imc_counters_config[i][j].pe.read_format = 51 + imc_counters_config[i].pe.type = imc_counters_config[i].type; 52 + imc_counters_config[i].pe.size = sizeof(struct perf_event_attr); 53 + imc_counters_config[i].pe.disabled = 1; 54 + imc_counters_config[i].pe.inherit = 1; 55 + imc_counters_config[i].pe.exclude_guest = 0; 56 + imc_counters_config[i].pe.config = 57 + imc_counters_config[i].umask << 8 | 58 + imc_counters_config[i].event; 59 + imc_counters_config[i].pe.sample_type = PERF_SAMPLE_IDENTIFIER; 60 + imc_counters_config[i].pe.read_format = 58 61 PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING; 59 62 } 60 63 61 - void membw_ioctl_perf_event_ioc_reset_enable(int i, int j) 64 + static void read_mem_bw_ioctl_perf_event_ioc_reset_enable(int i) 62 65 { 63 - ioctl(imc_counters_config[i][j].fd, PERF_EVENT_IOC_RESET, 0); 64 - ioctl(imc_counters_config[i][j].fd, PERF_EVENT_IOC_ENABLE, 0); 66 + ioctl(imc_counters_config[i].fd, PERF_EVENT_IOC_RESET, 0); 67 + ioctl(imc_counters_config[i].fd, PERF_EVENT_IOC_ENABLE, 0); 65 68 } 66 69 67 - void membw_ioctl_perf_event_ioc_disable(int i, int j) 70 + static void read_mem_bw_ioctl_perf_event_ioc_disable(int i) 68 71 { 69 - ioctl(imc_counters_config[i][j].fd, PERF_EVENT_IOC_DISABLE, 0); 72 + ioctl(imc_counters_config[i].fd, PERF_EVENT_IOC_DISABLE, 0); 70 73 } 71 74 72 75 /* 73 - * get_event_and_umask: Parse config into event and umask 76 + * get_read_event_and_umask: Parse config into event and umask 74 77 * @cas_count_cfg: Config 75 78 * @count: iMC number 76 - * @op: Operation (read/write) 77 79 */ 78 - void get_event_and_umask(char *cas_count_cfg, int count, bool op) 80 + static void get_read_event_and_umask(char *cas_count_cfg, int count) 79 81 { 80 82 char *token[MAX_TOKENS]; 81 83 int i = 0; 82 84 83 - strcat(cas_count_cfg, ","); 84 85 token[0] = strtok(cas_count_cfg, "=,"); 85 86 86 87 for (i = 1; i < MAX_TOKENS; i++) 87 88 token[i] = strtok(NULL, "=,"); 88 89 89 - for (i = 0; i < MAX_TOKENS; i++) { 90 + for (i = 0; i < MAX_TOKENS - 1; i++) { 90 91 if (!token[i]) 91 92 break; 92 - if (strcmp(token[i], "event") == 0) { 93 - if (op == READ) 94 - imc_counters_config[count][READ].event = 95 - strtol(token[i + 1], NULL, 16); 96 - else 97 - imc_counters_config[count][WRITE].event = 98 - strtol(token[i + 1], NULL, 16); 99 - } 100 - if (strcmp(token[i], "umask") == 0) { 101 - if (op == READ) 102 - imc_counters_config[count][READ].umask = 103 - strtol(token[i + 1], NULL, 16); 104 - else 105 - imc_counters_config[count][WRITE].umask = 106 - strtol(token[i + 1], NULL, 16); 107 - } 93 + if (strcmp(token[i], "event") == 0) 94 + imc_counters_config[count].event = strtol(token[i + 1], NULL, 16); 95 + if (strcmp(token[i], "umask") == 0) 96 + imc_counters_config[count].umask = strtol(token[i + 1], NULL, 16); 108 97 } 109 98 } 110 99 111 - static int open_perf_event(int i, int cpu_no, int j) 100 + static int open_perf_read_event(int i, int cpu_no) 112 101 { 113 - imc_counters_config[i][j].fd = 114 - perf_event_open(&imc_counters_config[i][j].pe, -1, cpu_no, -1, 102 + imc_counters_config[i].fd = 103 + perf_event_open(&imc_counters_config[i].pe, -1, cpu_no, -1, 115 104 PERF_FLAG_FD_CLOEXEC); 116 105 117 - if (imc_counters_config[i][j].fd == -1) { 106 + if (imc_counters_config[i].fd == -1) { 118 107 fprintf(stderr, "Error opening leader %llx\n", 119 - imc_counters_config[i][j].pe.config); 108 + imc_counters_config[i].pe.config); 120 109 121 110 return -1; 122 111 } ··· 110 127 return 0; 111 128 } 112 129 113 - /* Get type and config (read and write) of an iMC counter */ 130 + /* Get type and config of an iMC counter's read event. */ 114 131 static int read_from_imc_dir(char *imc_dir, int count) 115 132 { 116 133 char cas_count_cfg[1024], imc_counter_cfg[1024], imc_counter_type[1024]; ··· 124 141 125 142 return -1; 126 143 } 127 - if (fscanf(fp, "%u", &imc_counters_config[count][READ].type) <= 0) { 144 + if (fscanf(fp, "%u", &imc_counters_config[count].type) <= 0) { 128 145 ksft_perror("Could not get iMC type"); 129 146 fclose(fp); 130 147 131 148 return -1; 132 149 } 133 150 fclose(fp); 134 - 135 - imc_counters_config[count][WRITE].type = 136 - imc_counters_config[count][READ].type; 137 151 138 152 /* Get read config */ 139 153 sprintf(imc_counter_cfg, "%s%s", imc_dir, READ_FILE_NAME); ··· 140 160 141 161 return -1; 142 162 } 143 - if (fscanf(fp, "%s", cas_count_cfg) <= 0) { 163 + if (fscanf(fp, "%1023s", cas_count_cfg) <= 0) { 144 164 ksft_perror("Could not get iMC cas count read"); 145 165 fclose(fp); 146 166 ··· 148 168 } 149 169 fclose(fp); 150 170 151 - get_event_and_umask(cas_count_cfg, count, READ); 152 - 153 - /* Get write config */ 154 - sprintf(imc_counter_cfg, "%s%s", imc_dir, WRITE_FILE_NAME); 155 - fp = fopen(imc_counter_cfg, "r"); 156 - if (!fp) { 157 - ksft_perror("Failed to open iMC config file"); 158 - 159 - return -1; 160 - } 161 - if (fscanf(fp, "%s", cas_count_cfg) <= 0) { 162 - ksft_perror("Could not get iMC cas count write"); 163 - fclose(fp); 164 - 165 - return -1; 166 - } 167 - fclose(fp); 168 - 169 - get_event_and_umask(cas_count_cfg, count, WRITE); 171 + get_read_event_and_umask(cas_count_cfg, count); 170 172 171 173 return 0; 172 174 } 173 175 174 176 /* 175 177 * A system can have 'n' number of iMC (Integrated Memory Controller) 176 - * counters, get that 'n'. For each iMC counter get it's type and config. 177 - * Also, each counter has two configs, one for read and the other for write. 178 - * A config again has two parts, event and umask. 178 + * counters, get that 'n'. Discover the properties of the available 179 + * counters in support of needed performance measurement via perf. 180 + * For each iMC counter get it's type and config. Also obtain each 181 + * counter's event and umask for the memory read events that will be 182 + * measured. 183 + * 179 184 * Enumerate all these details into an array of structures. 180 185 * 181 186 * Return: >= 0 on success. < 0 on failure. ··· 221 256 return count; 222 257 } 223 258 224 - int initialize_mem_bw_imc(void) 259 + int initialize_read_mem_bw_imc(void) 225 260 { 226 - int imc, j; 261 + int imc; 227 262 228 263 imcs = num_of_imcs(); 229 264 if (imcs <= 0) 230 265 return imcs; 231 266 232 267 /* Initialize perf_event_attr structures for all iMC's */ 233 - for (imc = 0; imc < imcs; imc++) { 234 - for (j = 0; j < 2; j++) 235 - membw_initialize_perf_event_attr(imc, j); 236 - } 268 + for (imc = 0; imc < imcs; imc++) 269 + read_mem_bw_initialize_perf_event_attr(imc); 237 270 238 271 return 0; 239 272 } 240 273 241 - static void perf_close_imc_mem_bw(void) 274 + static void perf_close_imc_read_mem_bw(void) 242 275 { 243 276 int mc; 244 277 245 278 for (mc = 0; mc < imcs; mc++) { 246 - if (imc_counters_config[mc][READ].fd != -1) 247 - close(imc_counters_config[mc][READ].fd); 248 - if (imc_counters_config[mc][WRITE].fd != -1) 249 - close(imc_counters_config[mc][WRITE].fd); 279 + if (imc_counters_config[mc].fd != -1) 280 + close(imc_counters_config[mc].fd); 250 281 } 251 282 } 252 283 253 284 /* 254 - * perf_open_imc_mem_bw - Open perf fds for IMCs 285 + * perf_open_imc_read_mem_bw - Open perf fds for IMCs 255 286 * @cpu_no: CPU number that the benchmark PID is bound to 256 287 * 257 288 * Return: = 0 on success. < 0 on failure. 258 289 */ 259 - static int perf_open_imc_mem_bw(int cpu_no) 290 + static int perf_open_imc_read_mem_bw(int cpu_no) 260 291 { 261 292 int imc, ret; 262 293 263 - for (imc = 0; imc < imcs; imc++) { 264 - imc_counters_config[imc][READ].fd = -1; 265 - imc_counters_config[imc][WRITE].fd = -1; 266 - } 294 + for (imc = 0; imc < imcs; imc++) 295 + imc_counters_config[imc].fd = -1; 267 296 268 297 for (imc = 0; imc < imcs; imc++) { 269 - ret = open_perf_event(imc, cpu_no, READ); 270 - if (ret) 271 - goto close_fds; 272 - ret = open_perf_event(imc, cpu_no, WRITE); 298 + ret = open_perf_read_event(imc, cpu_no); 273 299 if (ret) 274 300 goto close_fds; 275 301 } ··· 268 312 return 0; 269 313 270 314 close_fds: 271 - perf_close_imc_mem_bw(); 315 + perf_close_imc_read_mem_bw(); 272 316 return -1; 273 317 } 274 318 275 319 /* 276 - * do_mem_bw_test - Perform memory bandwidth test 320 + * do_imc_read_mem_bw_test - Perform memory bandwidth test 277 321 * 278 322 * Runs memory bandwidth test over one second period. Also, handles starting 279 323 * and stopping of the IMC perf counters around the test. 280 324 */ 281 - static void do_imc_mem_bw_test(void) 325 + static void do_imc_read_mem_bw_test(void) 282 326 { 283 327 int imc; 284 328 285 - for (imc = 0; imc < imcs; imc++) { 286 - membw_ioctl_perf_event_ioc_reset_enable(imc, READ); 287 - membw_ioctl_perf_event_ioc_reset_enable(imc, WRITE); 288 - } 329 + for (imc = 0; imc < imcs; imc++) 330 + read_mem_bw_ioctl_perf_event_ioc_reset_enable(imc); 289 331 290 332 sleep(1); 291 333 292 - /* Stop counters after a second to get results (both read and write) */ 293 - for (imc = 0; imc < imcs; imc++) { 294 - membw_ioctl_perf_event_ioc_disable(imc, READ); 295 - membw_ioctl_perf_event_ioc_disable(imc, WRITE); 296 - } 334 + /* Stop counters after a second to get results. */ 335 + for (imc = 0; imc < imcs; imc++) 336 + read_mem_bw_ioctl_perf_event_ioc_disable(imc); 297 337 } 298 338 299 339 /* 300 - * get_mem_bw_imc - Memory bandwidth as reported by iMC counters 301 - * @bw_report: Bandwidth report type (reads, writes) 340 + * get_read_mem_bw_imc - Memory read bandwidth as reported by iMC counters 302 341 * 303 - * Memory bandwidth utilized by a process on a socket can be calculated 304 - * using iMC counters. Perf events are used to read these counters. 342 + * Memory read bandwidth utilized by a process on a socket can be calculated 343 + * using iMC counters' read events. Perf events are used to read these 344 + * counters. 305 345 * 306 346 * Return: = 0 on success. < 0 on failure. 307 347 */ 308 - static int get_mem_bw_imc(const char *bw_report, float *bw_imc) 348 + static int get_read_mem_bw_imc(float *bw_imc) 309 349 { 310 - float reads, writes, of_mul_read, of_mul_write; 350 + float reads = 0, of_mul_read = 1; 311 351 int imc; 312 352 313 - /* Start all iMC counters to log values (both read and write) */ 314 - reads = 0, writes = 0, of_mul_read = 1, of_mul_write = 1; 315 - 316 353 /* 317 - * Get results which are stored in struct type imc_counter_config 354 + * Log read event values from all iMC counters into 355 + * struct imc_counter_config. 318 356 * Take overflow into consideration before calculating total bandwidth. 319 357 */ 320 358 for (imc = 0; imc < imcs; imc++) { 321 359 struct imc_counter_config *r = 322 - &imc_counters_config[imc][READ]; 323 - struct imc_counter_config *w = 324 - &imc_counters_config[imc][WRITE]; 360 + &imc_counters_config[imc]; 325 361 326 362 if (read(r->fd, &r->return_value, 327 363 sizeof(struct membw_read_format)) == -1) { 328 364 ksft_perror("Couldn't get read bandwidth through iMC"); 329 - return -1; 330 - } 331 - 332 - if (read(w->fd, &w->return_value, 333 - sizeof(struct membw_read_format)) == -1) { 334 - ksft_perror("Couldn't get write bandwidth through iMC"); 335 365 return -1; 336 366 } 337 367 ··· 328 386 of_mul_read = (float)r_time_enabled / 329 387 (float)r_time_running; 330 388 331 - __u64 w_time_enabled = w->return_value.time_enabled; 332 - __u64 w_time_running = w->return_value.time_running; 333 - 334 - if (w_time_enabled != w_time_running) 335 - of_mul_write = (float)w_time_enabled / 336 - (float)w_time_running; 337 389 reads += r->return_value.value * of_mul_read * SCALE; 338 - writes += w->return_value.value * of_mul_write * SCALE; 339 390 } 340 391 341 - if (strcmp(bw_report, "reads") == 0) { 342 - *bw_imc = reads; 343 - return 0; 344 - } 345 - 346 - if (strcmp(bw_report, "writes") == 0) { 347 - *bw_imc = writes; 348 - return 0; 349 - } 350 - 351 - *bw_imc = reads + writes; 392 + *bw_imc = reads; 352 393 return 0; 353 394 } 354 395 ··· 373 448 return 0; 374 449 } 375 450 376 - static pid_t bm_pid, ppid; 451 + static pid_t bm_pid; 377 452 378 453 void ctrlc_handler(int signum, siginfo_t *info, void *ptr) 379 454 { ··· 431 506 } 432 507 } 433 508 434 - static void parent_exit(pid_t ppid) 435 - { 436 - kill(ppid, SIGKILL); 437 - umount_resctrlfs(); 438 - exit(EXIT_FAILURE); 439 - } 440 - 441 509 /* 442 510 * print_results_bw: the memory bandwidth results are stored in a file 443 511 * @filename: file that stores the results ··· 470 552 } 471 553 472 554 /* 473 - * measure_mem_bw - Measures memory bandwidth numbers while benchmark runs 555 + * measure_read_mem_bw - Measures read memory bandwidth numbers while benchmark runs 474 556 * @uparams: User supplied parameters 475 557 * @param: Parameters passed to resctrl_val() 476 558 * @bm_pid: PID that runs the benchmark 477 - * @bw_report: Bandwidth report type (reads, writes) 478 559 * 479 560 * Measure memory bandwidth from resctrl and from another source which is 480 561 * perf imc value or could be something else if perf imc event is not 481 562 * available. Compare the two values to validate resctrl value. It takes 482 563 * 1 sec to measure the data. 564 + * resctrl does not distinguish between read and write operations so 565 + * its data includes all memory operations. 483 566 */ 484 - int measure_mem_bw(const struct user_params *uparams, 485 - struct resctrl_val_param *param, pid_t bm_pid, 486 - const char *bw_report) 567 + int measure_read_mem_bw(const struct user_params *uparams, 568 + struct resctrl_val_param *param, pid_t bm_pid) 487 569 { 488 570 unsigned long bw_resc, bw_resc_start, bw_resc_end; 489 571 FILE *mem_bw_fp; 490 572 float bw_imc; 491 573 int ret; 492 574 493 - bw_report = get_bw_report_type(bw_report); 494 - if (!bw_report) 495 - return -1; 496 - 497 575 mem_bw_fp = open_mem_bw_resctrl(mbm_total_path); 498 576 if (!mem_bw_fp) 499 577 return -1; 500 578 501 - ret = perf_open_imc_mem_bw(uparams->cpu); 579 + ret = perf_open_imc_read_mem_bw(uparams->cpu); 502 580 if (ret < 0) 503 581 goto close_fp; 504 582 ··· 504 590 505 591 rewind(mem_bw_fp); 506 592 507 - do_imc_mem_bw_test(); 593 + do_imc_read_mem_bw_test(); 508 594 509 595 ret = get_mem_bw_resctrl(mem_bw_fp, &bw_resc_end); 510 596 if (ret < 0) 511 597 goto close_imc; 512 598 513 - ret = get_mem_bw_imc(bw_report, &bw_imc); 599 + ret = get_read_mem_bw_imc(&bw_imc); 514 600 if (ret < 0) 515 601 goto close_imc; 516 602 517 - perf_close_imc_mem_bw(); 603 + perf_close_imc_read_mem_bw(); 518 604 fclose(mem_bw_fp); 519 605 520 606 bw_resc = (bw_resc_end - bw_resc_start) / MB; ··· 522 608 return print_results_bw(param->filename, bm_pid, bw_imc, bw_resc); 523 609 524 610 close_imc: 525 - perf_close_imc_mem_bw(); 611 + perf_close_imc_read_mem_bw(); 526 612 close_fp: 527 613 fclose(mem_bw_fp); 528 614 return ret; 529 - } 530 - 531 - /* 532 - * run_benchmark - Run a specified benchmark or fill_buf (default benchmark) 533 - * in specified signal. Direct benchmark stdio to /dev/null. 534 - * @signum: signal number 535 - * @info: signal info 536 - * @ucontext: user context in signal handling 537 - */ 538 - static void run_benchmark(int signum, siginfo_t *info, void *ucontext) 539 - { 540 - int operation, ret, memflush; 541 - char **benchmark_cmd; 542 - size_t span; 543 - bool once; 544 - FILE *fp; 545 - 546 - benchmark_cmd = info->si_ptr; 547 - 548 - /* 549 - * Direct stdio of child to /dev/null, so that only parent writes to 550 - * stdio (console) 551 - */ 552 - fp = freopen("/dev/null", "w", stdout); 553 - if (!fp) { 554 - ksft_perror("Unable to direct benchmark status to /dev/null"); 555 - parent_exit(ppid); 556 - } 557 - 558 - if (strcmp(benchmark_cmd[0], "fill_buf") == 0) { 559 - /* Execute default fill_buf benchmark */ 560 - span = strtoul(benchmark_cmd[1], NULL, 10); 561 - memflush = atoi(benchmark_cmd[2]); 562 - operation = atoi(benchmark_cmd[3]); 563 - if (!strcmp(benchmark_cmd[4], "true")) { 564 - once = true; 565 - } else if (!strcmp(benchmark_cmd[4], "false")) { 566 - once = false; 567 - } else { 568 - ksft_print_msg("Invalid once parameter\n"); 569 - parent_exit(ppid); 570 - } 571 - 572 - if (run_fill_buf(span, memflush, operation, once)) 573 - fprintf(stderr, "Error in running fill buffer\n"); 574 - } else { 575 - /* Execute specified benchmark */ 576 - ret = execvp(benchmark_cmd[0], benchmark_cmd); 577 - if (ret) 578 - ksft_perror("execvp"); 579 - } 580 - 581 - fclose(stdout); 582 - ksft_print_msg("Unable to run specified benchmark\n"); 583 - parent_exit(ppid); 584 615 } 585 616 586 617 /* ··· 533 674 * the benchmark 534 675 * @test: test information structure 535 676 * @uparams: user supplied parameters 536 - * @benchmark_cmd: benchmark command and its arguments 537 677 * @param: parameters passed to resctrl_val() 538 678 * 539 679 * Return: 0 when the test was run, < 0 on error. 540 680 */ 541 681 int resctrl_val(const struct resctrl_test *test, 542 682 const struct user_params *uparams, 543 - const char * const *benchmark_cmd, 544 683 struct resctrl_val_param *param) 545 684 { 546 - struct sigaction sigact; 547 - int ret = 0, pipefd[2]; 548 - char pipe_message = 0; 549 - union sigval value; 685 + unsigned char *buf = NULL; 686 + cpu_set_t old_affinity; 550 687 int domain_id; 688 + int ret = 0; 689 + pid_t ppid; 551 690 552 691 if (strcmp(param->filename, "") == 0) 553 692 sprintf(param->filename, "stdio"); ··· 556 699 return ret; 557 700 } 558 701 559 - /* 560 - * If benchmark wasn't successfully started by child, then child should 561 - * kill parent, so save parent's pid 562 - */ 563 702 ppid = getpid(); 564 703 565 - if (pipe(pipefd)) { 566 - ksft_perror("Unable to create pipe"); 567 - 568 - return -1; 569 - } 570 - 571 - /* 572 - * Fork to start benchmark, save child's pid so that it can be killed 573 - * when needed 574 - */ 575 - fflush(stdout); 576 - bm_pid = fork(); 577 - if (bm_pid == -1) { 578 - ksft_perror("Unable to fork"); 579 - 580 - return -1; 581 - } 582 - 583 - if (bm_pid == 0) { 584 - /* 585 - * Mask all signals except SIGUSR1, parent uses SIGUSR1 to 586 - * start benchmark 587 - */ 588 - sigfillset(&sigact.sa_mask); 589 - sigdelset(&sigact.sa_mask, SIGUSR1); 590 - 591 - sigact.sa_sigaction = run_benchmark; 592 - sigact.sa_flags = SA_SIGINFO; 593 - 594 - /* Register for "SIGUSR1" signal from parent */ 595 - if (sigaction(SIGUSR1, &sigact, NULL)) { 596 - ksft_perror("Can't register child for signal"); 597 - parent_exit(ppid); 598 - } 599 - 600 - /* Tell parent that child is ready */ 601 - close(pipefd[0]); 602 - pipe_message = 1; 603 - if (write(pipefd[1], &pipe_message, sizeof(pipe_message)) < 604 - sizeof(pipe_message)) { 605 - ksft_perror("Failed signaling parent process"); 606 - close(pipefd[1]); 607 - return -1; 608 - } 609 - close(pipefd[1]); 610 - 611 - /* Suspend child until delivery of "SIGUSR1" from parent */ 612 - sigsuspend(&sigact.sa_mask); 613 - 614 - ksft_perror("Child is done"); 615 - parent_exit(ppid); 616 - } 617 - 618 - ksft_print_msg("Benchmark PID: %d\n", (int)bm_pid); 619 - 620 - /* 621 - * The cast removes constness but nothing mutates benchmark_cmd within 622 - * the context of this process. At the receiving process, it becomes 623 - * argv, which is mutable, on exec() but that's after fork() so it 624 - * doesn't matter for the process running the tests. 625 - */ 626 - value.sival_ptr = (void *)benchmark_cmd; 627 - 628 - /* Taskset benchmark to specified cpu */ 629 - ret = taskset_benchmark(bm_pid, uparams->cpu, NULL); 704 + /* Taskset test to specified CPU. */ 705 + ret = taskset_benchmark(ppid, uparams->cpu, &old_affinity); 630 706 if (ret) 631 - goto out; 707 + return ret; 632 708 633 - /* Write benchmark to specified control&monitoring grp in resctrl FS */ 634 - ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp); 709 + /* Write test to specified control & monitoring group in resctrl FS. */ 710 + ret = write_bm_pid_to_resctrl(ppid, param->ctrlgrp, param->mongrp); 635 711 if (ret) 636 - goto out; 712 + goto reset_affinity; 637 713 638 714 if (param->init) { 639 715 ret = param->init(param, domain_id); 640 716 if (ret) 641 - goto out; 717 + goto reset_affinity; 642 718 } 643 719 644 - /* Parent waits for child to be ready. */ 645 - close(pipefd[1]); 646 - while (pipe_message != 1) { 647 - if (read(pipefd[0], &pipe_message, sizeof(pipe_message)) < 648 - sizeof(pipe_message)) { 649 - ksft_perror("Failed reading message from child process"); 650 - close(pipefd[0]); 651 - goto out; 720 + /* 721 + * If not running user provided benchmark, run the default 722 + * "fill_buf". First phase of "fill_buf" is to prepare the 723 + * buffer that the benchmark will operate on. No measurements 724 + * are needed during this phase and prepared memory will be 725 + * passed to next part of benchmark via copy-on-write thus 726 + * no impact on the benchmark that relies on reading from 727 + * memory only. 728 + */ 729 + if (param->fill_buf) { 730 + buf = alloc_buffer(param->fill_buf->buf_size, 731 + param->fill_buf->memflush); 732 + if (!buf) { 733 + ret = -ENOMEM; 734 + goto reset_affinity; 652 735 } 653 736 } 654 - close(pipefd[0]); 655 737 656 - /* Signal child to start benchmark */ 657 - if (sigqueue(bm_pid, SIGUSR1, value) == -1) { 658 - ksft_perror("sigqueue SIGUSR1 to child"); 659 - ret = -1; 660 - goto out; 738 + fflush(stdout); 739 + bm_pid = fork(); 740 + if (bm_pid == -1) { 741 + ret = -errno; 742 + ksft_perror("Unable to fork"); 743 + goto free_buf; 661 744 } 662 745 663 - /* Give benchmark enough time to fully run */ 746 + /* 747 + * What needs to be measured runs in separate process until 748 + * terminated. 749 + */ 750 + if (bm_pid == 0) { 751 + if (param->fill_buf) 752 + fill_cache_read(buf, param->fill_buf->buf_size, false); 753 + else if (uparams->benchmark_cmd[0]) 754 + execvp(uparams->benchmark_cmd[0], (char **)uparams->benchmark_cmd); 755 + exit(EXIT_SUCCESS); 756 + } 757 + 758 + ksft_print_msg("Benchmark PID: %d\n", (int)bm_pid); 759 + 760 + /* Give benchmark enough time to fully run. */ 664 761 sleep(1); 665 762 666 763 /* Test runs until the callback setup() tells the test to stop. */ ··· 632 821 break; 633 822 } 634 823 635 - out: 636 824 kill(bm_pid, SIGKILL); 637 - 825 + free_buf: 826 + free(buf); 827 + reset_affinity: 828 + taskset_restore(ppid, &old_affinity); 638 829 return ret; 639 830 }
+1 -18
tools/testing/selftests/resctrl/resctrlfs.c
··· 182 182 183 183 return -1; 184 184 } 185 - if (fscanf(fp, "%s", cache_str) <= 0) { 185 + if (fscanf(fp, "%63s", cache_str) <= 0) { 186 186 ksft_perror("Could not get cache_size"); 187 187 fclose(fp); 188 188 ··· 829 829 waitpid(pid, NULL, 0); 830 830 831 831 return 0; 832 - } 833 - 834 - const char *get_bw_report_type(const char *bw_report) 835 - { 836 - if (strcmp(bw_report, "reads") == 0) 837 - return bw_report; 838 - if (strcmp(bw_report, "writes") == 0) 839 - return bw_report; 840 - if (strcmp(bw_report, "nt-writes") == 0) { 841 - return "writes"; 842 - } 843 - if (strcmp(bw_report, "total") == 0) 844 - return bw_report; 845 - 846 - fprintf(stderr, "Requested iMC bandwidth report type unavailable\n"); 847 - 848 - return NULL; 849 832 } 850 833 851 834 int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
+1 -1
tools/testing/selftests/rtc/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 - CFLAGS += -O3 -Wl,-no-as-needed -Wall 2 + CFLAGS += -O3 -Wl,-no-as-needed -Wall -I$(top_srcdir)/usr/include 3 3 LDLIBS += -lrt -lpthread -lm 4 4 5 5 TEST_GEN_PROGS = rtctest
+64
tools/testing/selftests/rtc/rtctest.c
··· 25 25 26 26 static char *rtc_file = "/dev/rtc0"; 27 27 28 + enum rtc_alarm_state { 29 + RTC_ALARM_UNKNOWN, 30 + RTC_ALARM_ENABLED, 31 + RTC_ALARM_DISABLED, 32 + }; 33 + 28 34 FIXTURE(rtc) { 29 35 int fd; 30 36 }; ··· 86 80 req.tv_sec = rem.tv_sec; 87 81 req.tv_nsec = rem.tv_nsec; 88 82 } 83 + } 84 + 85 + static enum rtc_alarm_state get_rtc_alarm_state(int fd) 86 + { 87 + struct rtc_param param = { 0 }; 88 + int rc; 89 + 90 + /* Validate kernel reflects unsupported RTC alarm state */ 91 + param.param = RTC_PARAM_FEATURES; 92 + param.index = 0; 93 + rc = ioctl(fd, RTC_PARAM_GET, &param); 94 + if (rc < 0) 95 + return RTC_ALARM_UNKNOWN; 96 + 97 + if ((param.uvalue & _BITUL(RTC_FEATURE_ALARM)) == 0) 98 + return RTC_ALARM_DISABLED; 99 + 100 + return RTC_ALARM_ENABLED; 89 101 } 90 102 91 103 TEST_F_TIMEOUT(rtc, date_read_loop, READ_LOOP_DURATION_SEC + 2) { ··· 221 197 fd_set readfds; 222 198 time_t secs, new; 223 199 int rc; 200 + enum rtc_alarm_state alarm_state = RTC_ALARM_UNKNOWN; 224 201 225 202 if (self->fd == -1 && errno == ENOENT) 226 203 SKIP(return, "Skipping test since %s does not exist", rtc_file); 227 204 ASSERT_NE(-1, self->fd); 205 + 206 + alarm_state = get_rtc_alarm_state(self->fd); 207 + if (alarm_state == RTC_ALARM_DISABLED) 208 + SKIP(return, "Skipping test since alarms are not supported."); 228 209 229 210 rc = ioctl(self->fd, RTC_RD_TIME, &tm); 230 211 ASSERT_NE(-1, rc); ··· 239 210 240 211 rc = ioctl(self->fd, RTC_ALM_SET, &tm); 241 212 if (rc == -1) { 213 + /* 214 + * Report error if rtc alarm was enabled. Fallback to check ioctl 215 + * error number if rtc alarm state is unknown. 216 + */ 217 + ASSERT_EQ(RTC_ALARM_UNKNOWN, alarm_state); 242 218 ASSERT_EQ(EINVAL, errno); 243 219 TH_LOG("skip alarms are not supported."); 244 220 return; ··· 289 255 fd_set readfds; 290 256 time_t secs, new; 291 257 int rc; 258 + enum rtc_alarm_state alarm_state = RTC_ALARM_UNKNOWN; 292 259 293 260 if (self->fd == -1 && errno == ENOENT) 294 261 SKIP(return, "Skipping test since %s does not exist", rtc_file); 295 262 ASSERT_NE(-1, self->fd); 263 + 264 + alarm_state = get_rtc_alarm_state(self->fd); 265 + if (alarm_state == RTC_ALARM_DISABLED) 266 + SKIP(return, "Skipping test since alarms are not supported."); 296 267 297 268 rc = ioctl(self->fd, RTC_RD_TIME, &alarm.time); 298 269 ASSERT_NE(-1, rc); ··· 309 270 310 271 rc = ioctl(self->fd, RTC_WKALM_SET, &alarm); 311 272 if (rc == -1) { 273 + /* 274 + * Report error if rtc alarm was enabled. Fallback to check ioctl 275 + * error number if rtc alarm state is unknown. 276 + */ 277 + ASSERT_EQ(RTC_ALARM_UNKNOWN, alarm_state); 312 278 ASSERT_EQ(EINVAL, errno); 313 279 TH_LOG("skip alarms are not supported."); 314 280 return; ··· 351 307 fd_set readfds; 352 308 time_t secs, new; 353 309 int rc; 310 + enum rtc_alarm_state alarm_state = RTC_ALARM_UNKNOWN; 354 311 355 312 if (self->fd == -1 && errno == ENOENT) 356 313 SKIP(return, "Skipping test since %s does not exist", rtc_file); 357 314 ASSERT_NE(-1, self->fd); 315 + 316 + alarm_state = get_rtc_alarm_state(self->fd); 317 + if (alarm_state == RTC_ALARM_DISABLED) 318 + SKIP(return, "Skipping test since alarms are not supported."); 358 319 359 320 rc = ioctl(self->fd, RTC_RD_TIME, &tm); 360 321 ASSERT_NE(-1, rc); ··· 369 320 370 321 rc = ioctl(self->fd, RTC_ALM_SET, &tm); 371 322 if (rc == -1) { 323 + /* 324 + * Report error if rtc alarm was enabled. Fallback to check ioctl 325 + * error number if rtc alarm state is unknown. 326 + */ 327 + ASSERT_EQ(RTC_ALARM_UNKNOWN, alarm_state); 372 328 ASSERT_EQ(EINVAL, errno); 373 329 TH_LOG("skip alarms are not supported."); 374 330 return; ··· 419 365 fd_set readfds; 420 366 time_t secs, new; 421 367 int rc; 368 + enum rtc_alarm_state alarm_state = RTC_ALARM_UNKNOWN; 422 369 423 370 if (self->fd == -1 && errno == ENOENT) 424 371 SKIP(return, "Skipping test since %s does not exist", rtc_file); 425 372 ASSERT_NE(-1, self->fd); 373 + 374 + alarm_state = get_rtc_alarm_state(self->fd); 375 + if (alarm_state == RTC_ALARM_DISABLED) 376 + SKIP(return, "Skipping test since alarms are not supported."); 426 377 427 378 rc = ioctl(self->fd, RTC_RD_TIME, &alarm.time); 428 379 ASSERT_NE(-1, rc); ··· 439 380 440 381 rc = ioctl(self->fd, RTC_WKALM_SET, &alarm); 441 382 if (rc == -1) { 383 + /* 384 + * Report error if rtc alarm was enabled. Fallback to check ioctl 385 + * error number if rtc alarm state is unknown. 386 + */ 387 + ASSERT_EQ(RTC_ALARM_UNKNOWN, alarm_state); 442 388 ASSERT_EQ(EINVAL, errno); 443 389 TH_LOG("skip alarms are not supported."); 444 390 return;
+1
tools/testing/selftests/sigaltstack/.gitignore tools/testing/selftests/signal/.gitignore
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 + mangle_uc_sigmask 2 3 sas
+2 -1
tools/testing/selftests/sigaltstack/Makefile tools/testing/selftests/signal/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 CFLAGS = -Wall 3 - TEST_GEN_PROGS = sas 3 + TEST_GEN_PROGS = mangle_uc_sigmask 4 + TEST_GEN_PROGS += sas 4 5 5 6 include ../lib.mk 6 7
tools/testing/selftests/sigaltstack/current_stack_pointer.h tools/testing/selftests/signal/current_stack_pointer.h
tools/testing/selftests/sigaltstack/sas.c tools/testing/selftests/signal/sas.c
+184
tools/testing/selftests/signal/mangle_uc_sigmask.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright (C) 2024 ARM Ltd. 4 + * 5 + * Author: Dev Jain <dev.jain@arm.com> 6 + * 7 + * Test describing a clear distinction between signal states - delivered and 8 + * blocked, and their relation with ucontext. 9 + * 10 + * A process can request blocking of a signal by masking it into its set of 11 + * blocked signals; such a signal, when sent to the process by the kernel, 12 + * will get blocked by the process and it may later unblock it and take an 13 + * action. At that point, the signal will be delivered. 14 + * 15 + * We test the following functionalities of the kernel: 16 + * 17 + * ucontext_t describes the interrupted context of the thread; this implies 18 + * that, in case of registering a handler and catching the corresponding 19 + * signal, that state is before what was jumping into the handler. 20 + * 21 + * The thread's mask of blocked signals can be permanently changed, i.e, not 22 + * just during the execution of the handler, by mangling with uc_sigmask 23 + * from inside the handler. 24 + * 25 + * Assume that we block the set of signals, S1, by sigaction(), and say, the 26 + * signal for which the handler was installed, is S2. When S2 is sent to the 27 + * program, it will be considered "delivered", since we will act on the 28 + * signal and jump to the handler. Any instances of S1 or S2 raised, while the 29 + * program is executing inside the handler, will be blocked; they will be 30 + * delivered immediately upon termination of the handler. 31 + * 32 + * For standard signals (also see real-time signals in the man page), multiple 33 + * blocked instances of the same signal are not queued; such a signal will 34 + * be delivered just once. 35 + */ 36 + 37 + #include <stdio.h> 38 + #include <stdlib.h> 39 + #include <signal.h> 40 + #include <ucontext.h> 41 + 42 + #include "../kselftest.h" 43 + 44 + void handler_verify_ucontext(int signo, siginfo_t *info, void *uc) 45 + { 46 + int ret; 47 + 48 + /* Kernel dumps ucontext with USR2 blocked */ 49 + ret = sigismember(&(((ucontext_t *)uc)->uc_sigmask), SIGUSR2); 50 + ksft_test_result(ret == 1, "USR2 blocked in ucontext\n"); 51 + 52 + /* 53 + * USR2 is blocked; can be delivered neither here, nor after 54 + * exit from handler 55 + */ 56 + if (raise(SIGUSR2)) 57 + ksft_exit_fail_perror("raise"); 58 + } 59 + 60 + void handler_segv(int signo, siginfo_t *info, void *uc) 61 + { 62 + /* 63 + * Three cases possible: 64 + * 1. Program already terminated due to segmentation fault. 65 + * 2. SEGV was blocked even after returning from handler_usr. 66 + * 3. SEGV was delivered on returning from handler_usr. 67 + * The last option must happen. 68 + */ 69 + ksft_test_result_pass("SEGV delivered\n"); 70 + } 71 + 72 + static int cnt; 73 + 74 + void handler_usr(int signo, siginfo_t *info, void *uc) 75 + { 76 + int ret; 77 + 78 + /* 79 + * Break out of infinite recursion caused by raise(SIGUSR1) invoked 80 + * from inside the handler 81 + */ 82 + ++cnt; 83 + if (cnt > 1) 84 + return; 85 + 86 + /* SEGV blocked during handler execution, delivered on return */ 87 + if (raise(SIGSEGV)) 88 + ksft_exit_fail_perror("raise"); 89 + 90 + ksft_print_msg("SEGV bypassed successfully\n"); 91 + 92 + /* 93 + * Signal responsible for handler invocation is blocked by default; 94 + * delivered on return, leading to recursion 95 + */ 96 + if (raise(SIGUSR1)) 97 + ksft_exit_fail_perror("raise"); 98 + 99 + ksft_test_result(cnt == 1, 100 + "USR1 is blocked, cannot invoke handler right now\n"); 101 + 102 + /* Raise USR1 again; only one instance must be delivered upon exit */ 103 + if (raise(SIGUSR1)) 104 + ksft_exit_fail_perror("raise"); 105 + 106 + /* SEGV has been blocked in sa_mask, but ucontext is empty */ 107 + ret = sigismember(&(((ucontext_t *)uc)->uc_sigmask), SIGSEGV); 108 + ksft_test_result(ret == 0, "SEGV not blocked in ucontext\n"); 109 + 110 + /* USR1 has been blocked, but ucontext is empty */ 111 + ret = sigismember(&(((ucontext_t *)uc)->uc_sigmask), SIGUSR1); 112 + ksft_test_result(ret == 0, "USR1 not blocked in ucontext\n"); 113 + 114 + /* 115 + * Mangle ucontext; this will be copied back into &current->blocked 116 + * on return from the handler. 117 + */ 118 + if (sigaddset(&((ucontext_t *)uc)->uc_sigmask, SIGUSR2)) 119 + ksft_exit_fail_perror("sigaddset"); 120 + } 121 + 122 + int main(int argc, char *argv[]) 123 + { 124 + struct sigaction act, act2; 125 + sigset_t set, oldset; 126 + 127 + ksft_print_header(); 128 + ksft_set_plan(7); 129 + 130 + act.sa_flags = SA_SIGINFO; 131 + act.sa_sigaction = &handler_usr; 132 + 133 + /* Add SEGV to blocked mask */ 134 + if (sigemptyset(&act.sa_mask) || sigaddset(&act.sa_mask, SIGSEGV) 135 + || (sigismember(&act.sa_mask, SIGSEGV) != 1)) 136 + ksft_exit_fail_msg("Cannot add SEGV to blocked mask\n"); 137 + 138 + if (sigaction(SIGUSR1, &act, NULL)) 139 + ksft_exit_fail_perror("Cannot install handler"); 140 + 141 + act2.sa_flags = SA_SIGINFO; 142 + act2.sa_sigaction = &handler_segv; 143 + 144 + if (sigaction(SIGSEGV, &act2, NULL)) 145 + ksft_exit_fail_perror("Cannot install handler"); 146 + 147 + /* Invoke handler */ 148 + if (raise(SIGUSR1)) 149 + ksft_exit_fail_perror("raise"); 150 + 151 + /* USR1 must not be queued */ 152 + ksft_test_result(cnt == 2, "handler invoked only twice\n"); 153 + 154 + /* Mangled ucontext implies USR2 is blocked for current thread */ 155 + if (raise(SIGUSR2)) 156 + ksft_exit_fail_perror("raise"); 157 + 158 + ksft_print_msg("USR2 bypassed successfully\n"); 159 + 160 + act.sa_sigaction = &handler_verify_ucontext; 161 + if (sigaction(SIGUSR1, &act, NULL)) 162 + ksft_exit_fail_perror("Cannot install handler"); 163 + 164 + if (raise(SIGUSR1)) 165 + ksft_exit_fail_perror("raise"); 166 + 167 + /* 168 + * Raising USR2 in handler_verify_ucontext is redundant since it 169 + * is blocked 170 + */ 171 + ksft_print_msg("USR2 still blocked on return from handler\n"); 172 + 173 + /* Confirm USR2 blockage by sigprocmask() too */ 174 + if (sigemptyset(&set)) 175 + ksft_exit_fail_perror("sigemptyset"); 176 + 177 + if (sigprocmask(SIG_BLOCK, &set, &oldset)) 178 + ksft_exit_fail_perror("sigprocmask"); 179 + 180 + ksft_test_result(sigismember(&oldset, SIGUSR2) == 1, 181 + "USR2 present in &current->blocked\n"); 182 + 183 + ksft_finished(); 184 + }
+1 -1
tools/testing/selftests/timers/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 - CFLAGS += -O3 -Wl,-no-as-needed -Wall 2 + CFLAGS += -O3 -Wl,-no-as-needed -Wall -I $(top_srcdir) 3 3 LDLIBS += -lrt -lpthread -lm 4 4 5 5 # these are all "safe" tests that don't modify
+1 -5
tools/testing/selftests/timers/adjtick.c
··· 22 22 #include <sys/time.h> 23 23 #include <sys/timex.h> 24 24 #include <time.h> 25 + #include <include/vdso/time64.h> 25 26 26 27 #include "../kselftest.h" 27 - 28 - #define CLOCK_MONOTONIC_RAW 4 29 - 30 - #define NSEC_PER_SEC 1000000000LL 31 - #define USEC_PER_SEC 1000000 32 28 33 29 #define MILLION 1000000 34 30
+4 -18
tools/testing/selftests/timers/alarmtimer-suspend.c
··· 28 28 #include <signal.h> 29 29 #include <stdlib.h> 30 30 #include <pthread.h> 31 + #include <include/vdso/time64.h> 32 + #include <errno.h> 31 33 #include "../kselftest.h" 32 34 33 - #define CLOCK_REALTIME 0 34 - #define CLOCK_MONOTONIC 1 35 - #define CLOCK_PROCESS_CPUTIME_ID 2 36 - #define CLOCK_THREAD_CPUTIME_ID 3 37 - #define CLOCK_MONOTONIC_RAW 4 38 - #define CLOCK_REALTIME_COARSE 5 39 - #define CLOCK_MONOTONIC_COARSE 6 40 - #define CLOCK_BOOTTIME 7 41 - #define CLOCK_REALTIME_ALARM 8 42 - #define CLOCK_BOOTTIME_ALARM 9 43 - #define CLOCK_HWSPECIFIC 10 44 - #define CLOCK_TAI 11 45 - #define NR_CLOCKIDS 12 46 - 47 - 48 - #define NSEC_PER_SEC 1000000000ULL 49 35 #define UNREASONABLE_LAT (NSEC_PER_SEC * 5) /* hopefully we resume in 5 secs */ 50 36 51 37 #define SUSPEND_SECS 15 ··· 128 142 129 143 alarmcount = 0; 130 144 if (timer_create(alarm_clock_id, &se, &tm1) == -1) { 131 - printf("timer_create failed, %s unsupported?\n", 132 - clockstring(alarm_clock_id)); 145 + printf("timer_create failed, %s unsupported?: %s\n", 146 + clockstring(alarm_clock_id), strerror(errno)); 133 147 break; 134 148 } 135 149
+5 -16
tools/testing/selftests/timers/inconsistency-check.c
··· 28 28 #include <sys/timex.h> 29 29 #include <string.h> 30 30 #include <signal.h> 31 + #include <include/vdso/time64.h> 31 32 #include "../kselftest.h" 32 33 33 - #define CALLS_PER_LOOP 64 34 - #define NSEC_PER_SEC 1000000000ULL 35 - 36 - #define CLOCK_REALTIME 0 37 - #define CLOCK_MONOTONIC 1 38 - #define CLOCK_PROCESS_CPUTIME_ID 2 39 - #define CLOCK_THREAD_CPUTIME_ID 3 40 - #define CLOCK_MONOTONIC_RAW 4 41 - #define CLOCK_REALTIME_COARSE 5 42 - #define CLOCK_MONOTONIC_COARSE 6 43 - #define CLOCK_BOOTTIME 7 44 - #define CLOCK_REALTIME_ALARM 8 45 - #define CLOCK_BOOTTIME_ALARM 9 34 + /* CLOCK_HWSPECIFIC == CLOCK_SGI_CYCLE (Deprecated) */ 46 35 #define CLOCK_HWSPECIFIC 10 47 - #define CLOCK_TAI 11 48 - #define NR_CLOCKIDS 12 36 + 37 + #define CALLS_PER_LOOP 64 49 38 50 39 char *clockstring(int clockid) 51 40 { ··· 141 152 { 142 153 int clockid, opt; 143 154 int userclock = CLOCK_REALTIME; 144 - int maxclocks = NR_CLOCKIDS; 155 + int maxclocks = CLOCK_TAI + 1; 145 156 int runtime = 10; 146 157 struct timespec ts; 147 158
+1 -1
tools/testing/selftests/timers/leap-a-day.c
··· 48 48 #include <string.h> 49 49 #include <signal.h> 50 50 #include <unistd.h> 51 + #include <include/vdso/time64.h> 51 52 #include "../kselftest.h" 52 53 53 - #define NSEC_PER_SEC 1000000000ULL 54 54 #define CLOCK_TAI 11 55 55 56 56 time_t next_leap;
+1 -1
tools/testing/selftests/timers/mqueue-lat.c
··· 29 29 #include <signal.h> 30 30 #include <errno.h> 31 31 #include <mqueue.h> 32 + #include <include/vdso/time64.h> 32 33 #include "../kselftest.h" 33 34 34 - #define NSEC_PER_SEC 1000000000ULL 35 35 36 36 #define TARGET_TIMEOUT 100000000 /* 100ms in nanoseconds */ 37 37 #define UNRESONABLE_LATENCY 40000000 /* 40ms in nanosecs */
+5 -16
tools/testing/selftests/timers/nanosleep.c
··· 27 27 #include <sys/timex.h> 28 28 #include <string.h> 29 29 #include <signal.h> 30 + #include <include/vdso/time64.h> 30 31 #include "../kselftest.h" 31 32 32 - #define NSEC_PER_SEC 1000000000ULL 33 - 34 - #define CLOCK_REALTIME 0 35 - #define CLOCK_MONOTONIC 1 36 - #define CLOCK_PROCESS_CPUTIME_ID 2 37 - #define CLOCK_THREAD_CPUTIME_ID 3 38 - #define CLOCK_MONOTONIC_RAW 4 39 - #define CLOCK_REALTIME_COARSE 5 40 - #define CLOCK_MONOTONIC_COARSE 6 41 - #define CLOCK_BOOTTIME 7 42 - #define CLOCK_REALTIME_ALARM 8 43 - #define CLOCK_BOOTTIME_ALARM 9 33 + /* CLOCK_HWSPECIFIC == CLOCK_SGI_CYCLE (Deprecated) */ 44 34 #define CLOCK_HWSPECIFIC 10 45 - #define CLOCK_TAI 11 46 - #define NR_CLOCKIDS 12 47 35 48 36 #define UNSUPPORTED 0xf00f 49 37 ··· 120 132 { 121 133 long long length; 122 134 int clockid, ret; 135 + int max_clocks = CLOCK_TAI + 1; 123 136 124 137 ksft_print_header(); 125 - ksft_set_plan(NR_CLOCKIDS); 138 + ksft_set_plan(max_clocks); 126 139 127 - for (clockid = CLOCK_REALTIME; clockid < NR_CLOCKIDS; clockid++) { 140 + for (clockid = CLOCK_REALTIME; clockid < max_clocks; clockid++) { 128 141 129 142 /* Skip cputime clockids since nanosleep won't increment cputime */ 130 143 if (clockid == CLOCK_PROCESS_CPUTIME_ID ||
+5 -17
tools/testing/selftests/timers/nsleep-lat.c
··· 24 24 #include <sys/timex.h> 25 25 #include <string.h> 26 26 #include <signal.h> 27 + #include <include/vdso/time64.h> 27 28 #include "../kselftest.h" 28 - 29 - #define NSEC_PER_SEC 1000000000ULL 30 29 31 30 #define UNRESONABLE_LATENCY 40000000 /* 40ms in nanosecs */ 32 31 33 - 34 - #define CLOCK_REALTIME 0 35 - #define CLOCK_MONOTONIC 1 36 - #define CLOCK_PROCESS_CPUTIME_ID 2 37 - #define CLOCK_THREAD_CPUTIME_ID 3 38 - #define CLOCK_MONOTONIC_RAW 4 39 - #define CLOCK_REALTIME_COARSE 5 40 - #define CLOCK_MONOTONIC_COARSE 6 41 - #define CLOCK_BOOTTIME 7 42 - #define CLOCK_REALTIME_ALARM 8 43 - #define CLOCK_BOOTTIME_ALARM 9 32 + /* CLOCK_HWSPECIFIC == CLOCK_SGI_CYCLE (Deprecated) */ 44 33 #define CLOCK_HWSPECIFIC 10 45 - #define CLOCK_TAI 11 46 - #define NR_CLOCKIDS 12 47 34 48 35 #define UNSUPPORTED 0xf00f 49 36 ··· 132 145 { 133 146 long long length; 134 147 int clockid, ret; 148 + int max_clocks = CLOCK_TAI + 1; 135 149 136 150 ksft_print_header(); 137 - ksft_set_plan(NR_CLOCKIDS - CLOCK_REALTIME - SKIPPED_CLOCK_COUNT); 151 + ksft_set_plan(max_clocks - CLOCK_REALTIME - SKIPPED_CLOCK_COUNT); 138 152 139 - for (clockid = CLOCK_REALTIME; clockid < NR_CLOCKIDS; clockid++) { 153 + for (clockid = CLOCK_REALTIME; clockid < max_clocks; clockid++) { 140 154 141 155 /* Skip cputime clockids since nanosleep won't increment cputime */ 142 156 if (clockid == CLOCK_PROCESS_CPUTIME_ID ||
+7 -8
tools/testing/selftests/timers/posix_timers.c
··· 15 15 #include <string.h> 16 16 #include <unistd.h> 17 17 #include <time.h> 18 + #include <include/vdso/time64.h> 18 19 #include <pthread.h> 19 20 20 21 #include "../kselftest.h" 21 22 22 23 #define DELAY 2 23 - #define USECS_PER_SEC 1000000 24 - #define NSECS_PER_SEC 1000000000 25 24 26 25 static void __fatal_error(const char *test, const char *name, const char *what) 27 26 { ··· 85 86 long long diff; 86 87 87 88 diff = end.tv_usec - start.tv_usec; 88 - diff += (end.tv_sec - start.tv_sec) * USECS_PER_SEC; 89 + diff += (end.tv_sec - start.tv_sec) * USEC_PER_SEC; 89 90 90 - if (llabs(diff - DELAY * USECS_PER_SEC) > USECS_PER_SEC / 2) { 91 + if (llabs(diff - DELAY * USEC_PER_SEC) > USEC_PER_SEC / 2) { 91 92 printf("Diff too high: %lld..", diff); 92 93 return -1; 93 94 } ··· 447 448 { 448 449 int64_t diff; 449 450 450 - diff = NSECS_PER_SEC * (int64_t)((int) t1.tv_sec - (int) t2.tv_sec); 451 + diff = NSEC_PER_SEC * (int64_t)((int) t1.tv_sec - (int) t2.tv_sec); 451 452 diff += ((int) t1.tv_nsec - (int) t2.tv_nsec); 452 453 return diff; 453 454 } ··· 478 479 do { 479 480 if (clock_gettime(which, &now)) 480 481 fatal_error(name, "clock_gettime()"); 481 - } while (calcdiff_ns(now, start) < NSECS_PER_SEC); 482 + } while (calcdiff_ns(now, start) < NSEC_PER_SEC); 482 483 483 484 if (timer_gettime(timerid, &its)) 484 485 fatal_error(name, "timer_gettime()"); ··· 535 536 wraps++; 536 537 prev = its; 537 538 538 - } while (calcdiff_ns(now, start) < NSECS_PER_SEC); 539 + } while (calcdiff_ns(now, start) < NSEC_PER_SEC); 539 540 540 541 if (timer_delete(timerid)) 541 542 fatal_error(name, "timer_delete()"); ··· 586 587 do { 587 588 if (clock_gettime(which, &now)) 588 589 fatal_error(name, "clock_gettime()"); 589 - } while (calcdiff_ns(now, start) < NSECS_PER_SEC); 590 + } while (calcdiff_ns(now, start) < NSEC_PER_SEC); 590 591 591 592 /* Unblock it, which should deliver a signal */ 592 593 if (sigprocmask(SIG_UNBLOCK, &set, NULL))
+1 -3
tools/testing/selftests/timers/raw_skew.c
··· 25 25 #include <sys/time.h> 26 26 #include <sys/timex.h> 27 27 #include <time.h> 28 + #include <include/vdso/time64.h> 28 29 #include "../kselftest.h" 29 - 30 - #define CLOCK_MONOTONIC_RAW 4 31 - #define NSEC_PER_SEC 1000000000LL 32 30 33 31 #define shift_right(x, s) ({ \ 34 32 __typeof__(x) __x = (x); \
+1 -2
tools/testing/selftests/timers/set-2038.c
··· 27 27 #include <unistd.h> 28 28 #include <time.h> 29 29 #include <sys/time.h> 30 + #include <include/vdso/time64.h> 30 31 #include "../kselftest.h" 31 - 32 - #define NSEC_PER_SEC 1000000000LL 33 32 34 33 #define KTIME_MAX ((long long)~((unsigned long long)1 << 63)) 35 34 #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
+5 -16
tools/testing/selftests/timers/set-timer-lat.c
··· 28 28 #include <signal.h> 29 29 #include <stdlib.h> 30 30 #include <pthread.h> 31 + #include <include/vdso/time64.h> 31 32 #include "../kselftest.h" 32 33 33 - #define CLOCK_REALTIME 0 34 - #define CLOCK_MONOTONIC 1 35 - #define CLOCK_PROCESS_CPUTIME_ID 2 36 - #define CLOCK_THREAD_CPUTIME_ID 3 37 - #define CLOCK_MONOTONIC_RAW 4 38 - #define CLOCK_REALTIME_COARSE 5 39 - #define CLOCK_MONOTONIC_COARSE 6 40 - #define CLOCK_BOOTTIME 7 41 - #define CLOCK_REALTIME_ALARM 8 42 - #define CLOCK_BOOTTIME_ALARM 9 34 + /* CLOCK_HWSPECIFIC == CLOCK_SGI_CYCLE (Deprecated) */ 43 35 #define CLOCK_HWSPECIFIC 10 44 - #define CLOCK_TAI 11 45 - #define NR_CLOCKIDS 12 46 36 47 - 48 - #define NSEC_PER_SEC 1000000000ULL 49 37 #define UNRESONABLE_LATENCY 40000000 /* 40ms in nanosecs */ 50 38 51 39 #define TIMER_SECS 1 ··· 68 80 return "CLOCK_BOOTTIME_ALARM"; 69 81 case CLOCK_TAI: 70 82 return "CLOCK_TAI"; 71 - }; 83 + } 72 84 return "UNKNOWN_CLOCKID"; 73 85 } 74 86 ··· 242 254 struct sigaction act; 243 255 int signum = SIGRTMAX; 244 256 int ret = 0; 257 + int max_clocks = CLOCK_TAI + 1; 245 258 246 259 /* Set up signal handler: */ 247 260 sigfillset(&act.sa_mask); ··· 251 262 sigaction(signum, &act, NULL); 252 263 253 264 printf("Setting timers for every %i seconds\n", TIMER_SECS); 254 - for (clock_id = 0; clock_id < NR_CLOCKIDS; clock_id++) { 265 + for (clock_id = 0; clock_id < max_clocks; clock_id++) { 255 266 256 267 if ((clock_id == CLOCK_PROCESS_CPUTIME_ID) || 257 268 (clock_id == CLOCK_THREAD_CPUTIME_ID) ||
+1 -3
tools/testing/selftests/timers/valid-adjtimex.c
··· 29 29 #include <string.h> 30 30 #include <signal.h> 31 31 #include <unistd.h> 32 + #include <include/vdso/time64.h> 32 33 #include "../kselftest.h" 33 - 34 - #define NSEC_PER_SEC 1000000000LL 35 - #define USEC_PER_SEC 1000000LL 36 34 37 35 #define ADJ_SETOFFSET 0x0100 38 36