Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/resctrl: Remove unused measurement code

The MBM and MBA resctrl selftests run a benchmark during which
it takes measurements of read memory bandwidth via perf.
Code exists to support measurements of write memory bandwidth
but there exists no path with which this code can execute.

While code exists for write memory bandwidth measurement
there has not yet been a use case for it. Remove this unused code.
Rename relevant functions to include "read" so that it is clear
that it relates only to memory bandwidth reads, while renaming
the functions also add consistency by changing the "membw"
instances to more prevalent "mem_bw".

Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>

authored by

Reinette Chatre and committed by
Shuah Khan
76f8f009 13842417

+85 -182
+2 -2
tools/testing/selftests/resctrl/mba_test.c
··· 21 21 { 22 22 int ret; 23 23 24 - ret = initialize_mem_bw_imc(); 24 + ret = initialize_read_mem_bw_imc(); 25 25 if (ret) 26 26 return ret; 27 27 ··· 68 68 static int mba_measure(const struct user_params *uparams, 69 69 struct resctrl_val_param *param, pid_t bm_pid) 70 70 { 71 - return measure_mem_bw(uparams, param, bm_pid, "reads"); 71 + return measure_read_mem_bw(uparams, param, bm_pid); 72 72 } 73 73 74 74 static bool show_mba_info(unsigned long *bw_imc, unsigned long *bw_resc)
+2 -2
tools/testing/selftests/resctrl/mbm_test.c
··· 91 91 { 92 92 int ret; 93 93 94 - ret = initialize_mem_bw_imc(); 94 + ret = initialize_read_mem_bw_imc(); 95 95 if (ret) 96 96 return ret; 97 97 ··· 122 122 static int mbm_measure(const struct user_params *uparams, 123 123 struct resctrl_val_param *param, pid_t bm_pid) 124 124 { 125 - return measure_mem_bw(uparams, param, bm_pid, "reads"); 125 + return measure_read_mem_bw(uparams, param, bm_pid); 126 126 } 127 127 128 128 static void mbm_test_cleanup(void)
+3 -5
tools/testing/selftests/resctrl/resctrl.h
··· 126 126 int get_domain_id(const char *resource, int cpu_no, int *domain_id); 127 127 int mount_resctrlfs(void); 128 128 int umount_resctrlfs(void); 129 - const char *get_bw_report_type(const char *bw_report); 130 129 bool resctrl_resource_exists(const char *resource); 131 130 bool resctrl_mon_feature_exists(const char *resource, const char *feature); 132 131 bool resource_info_file_exists(const char *resource, const char *file); ··· 142 143 void mem_flush(unsigned char *buf, size_t buf_size); 143 144 void fill_cache_read(unsigned char *buf, size_t buf_size, bool once); 144 145 int run_fill_buf(size_t buf_size, int memflush); 145 - int initialize_mem_bw_imc(void); 146 - int measure_mem_bw(const struct user_params *uparams, 147 - struct resctrl_val_param *param, pid_t bm_pid, 148 - const char *bw_report); 146 + int initialize_read_mem_bw_imc(void); 147 + int measure_read_mem_bw(const struct user_params *uparams, 148 + struct resctrl_val_param *param, pid_t bm_pid); 149 149 void initialize_mem_bw_resctrl(const struct resctrl_val_param *param, 150 150 int domain_id); 151 151 int resctrl_val(const struct resctrl_test *test,
+78 -156
tools/testing/selftests/resctrl/resctrl_val.c
··· 12 12 13 13 #define UNCORE_IMC "uncore_imc" 14 14 #define READ_FILE_NAME "events/cas_count_read" 15 - #define WRITE_FILE_NAME "events/cas_count_write" 16 15 #define DYN_PMU_PATH "/sys/bus/event_source/devices" 17 16 #define SCALE 0.00006103515625 18 17 #define MAX_IMCS 20 19 18 #define MAX_TOKENS 5 20 - #define READ 0 21 - #define WRITE 1 22 19 23 20 #define CON_MBM_LOCAL_BYTES_PATH \ 24 21 "%s/%s/mon_data/mon_L3_%02d/mbm_local_bytes" ··· 38 41 39 42 static char mbm_total_path[1024]; 40 43 static int imcs; 41 - static struct imc_counter_config imc_counters_config[MAX_IMCS][2]; 44 + static struct imc_counter_config imc_counters_config[MAX_IMCS]; 42 45 static const struct resctrl_test *current_test; 43 46 44 - static void membw_initialize_perf_event_attr(int i, int j) 47 + static void read_mem_bw_initialize_perf_event_attr(int i) 45 48 { 46 - memset(&imc_counters_config[i][j].pe, 0, 49 + memset(&imc_counters_config[i].pe, 0, 47 50 sizeof(struct perf_event_attr)); 48 - imc_counters_config[i][j].pe.type = imc_counters_config[i][j].type; 49 - imc_counters_config[i][j].pe.size = sizeof(struct perf_event_attr); 50 - imc_counters_config[i][j].pe.disabled = 1; 51 - imc_counters_config[i][j].pe.inherit = 1; 52 - imc_counters_config[i][j].pe.exclude_guest = 0; 53 - imc_counters_config[i][j].pe.config = 54 - imc_counters_config[i][j].umask << 8 | 55 - imc_counters_config[i][j].event; 56 - imc_counters_config[i][j].pe.sample_type = PERF_SAMPLE_IDENTIFIER; 57 - imc_counters_config[i][j].pe.read_format = 51 + imc_counters_config[i].pe.type = imc_counters_config[i].type; 52 + imc_counters_config[i].pe.size = sizeof(struct perf_event_attr); 53 + imc_counters_config[i].pe.disabled = 1; 54 + imc_counters_config[i].pe.inherit = 1; 55 + imc_counters_config[i].pe.exclude_guest = 0; 56 + imc_counters_config[i].pe.config = 57 + imc_counters_config[i].umask << 8 | 58 + imc_counters_config[i].event; 59 + imc_counters_config[i].pe.sample_type = PERF_SAMPLE_IDENTIFIER; 60 + imc_counters_config[i].pe.read_format = 58 61 PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING; 59 62 } 60 63 61 - static void membw_ioctl_perf_event_ioc_reset_enable(int i, int j) 64 + static void read_mem_bw_ioctl_perf_event_ioc_reset_enable(int i) 62 65 { 63 - ioctl(imc_counters_config[i][j].fd, PERF_EVENT_IOC_RESET, 0); 64 - ioctl(imc_counters_config[i][j].fd, PERF_EVENT_IOC_ENABLE, 0); 66 + ioctl(imc_counters_config[i].fd, PERF_EVENT_IOC_RESET, 0); 67 + ioctl(imc_counters_config[i].fd, PERF_EVENT_IOC_ENABLE, 0); 65 68 } 66 69 67 - static void membw_ioctl_perf_event_ioc_disable(int i, int j) 70 + static void read_mem_bw_ioctl_perf_event_ioc_disable(int i) 68 71 { 69 - ioctl(imc_counters_config[i][j].fd, PERF_EVENT_IOC_DISABLE, 0); 72 + ioctl(imc_counters_config[i].fd, PERF_EVENT_IOC_DISABLE, 0); 70 73 } 71 74 72 75 /* 73 - * get_event_and_umask: Parse config into event and umask 76 + * get_read_event_and_umask: Parse config into event and umask 74 77 * @cas_count_cfg: Config 75 78 * @count: iMC number 76 - * @op: Operation (read/write) 77 79 */ 78 - static void get_event_and_umask(char *cas_count_cfg, int count, bool op) 80 + static void get_read_event_and_umask(char *cas_count_cfg, int count) 79 81 { 80 82 char *token[MAX_TOKENS]; 81 83 int i = 0; ··· 87 91 for (i = 0; i < MAX_TOKENS - 1; i++) { 88 92 if (!token[i]) 89 93 break; 90 - if (strcmp(token[i], "event") == 0) { 91 - if (op == READ) 92 - imc_counters_config[count][READ].event = 93 - strtol(token[i + 1], NULL, 16); 94 - else 95 - imc_counters_config[count][WRITE].event = 96 - strtol(token[i + 1], NULL, 16); 97 - } 98 - if (strcmp(token[i], "umask") == 0) { 99 - if (op == READ) 100 - imc_counters_config[count][READ].umask = 101 - strtol(token[i + 1], NULL, 16); 102 - else 103 - imc_counters_config[count][WRITE].umask = 104 - strtol(token[i + 1], NULL, 16); 105 - } 94 + if (strcmp(token[i], "event") == 0) 95 + imc_counters_config[count].event = strtol(token[i + 1], NULL, 16); 96 + if (strcmp(token[i], "umask") == 0) 97 + imc_counters_config[count].umask = strtol(token[i + 1], NULL, 16); 106 98 } 107 99 } 108 100 109 - static int open_perf_event(int i, int cpu_no, int j) 101 + static int open_perf_read_event(int i, int cpu_no) 110 102 { 111 - imc_counters_config[i][j].fd = 112 - perf_event_open(&imc_counters_config[i][j].pe, -1, cpu_no, -1, 103 + imc_counters_config[i].fd = 104 + perf_event_open(&imc_counters_config[i].pe, -1, cpu_no, -1, 113 105 PERF_FLAG_FD_CLOEXEC); 114 106 115 - if (imc_counters_config[i][j].fd == -1) { 107 + if (imc_counters_config[i].fd == -1) { 116 108 fprintf(stderr, "Error opening leader %llx\n", 117 - imc_counters_config[i][j].pe.config); 109 + imc_counters_config[i].pe.config); 118 110 119 111 return -1; 120 112 } ··· 110 126 return 0; 111 127 } 112 128 113 - /* Get type and config (read and write) of an iMC counter */ 129 + /* Get type and config of an iMC counter's read event. */ 114 130 static int read_from_imc_dir(char *imc_dir, int count) 115 131 { 116 132 char cas_count_cfg[1024], imc_counter_cfg[1024], imc_counter_type[1024]; ··· 124 140 125 141 return -1; 126 142 } 127 - if (fscanf(fp, "%u", &imc_counters_config[count][READ].type) <= 0) { 143 + if (fscanf(fp, "%u", &imc_counters_config[count].type) <= 0) { 128 144 ksft_perror("Could not get iMC type"); 129 145 fclose(fp); 130 146 131 147 return -1; 132 148 } 133 149 fclose(fp); 134 - 135 - imc_counters_config[count][WRITE].type = 136 - imc_counters_config[count][READ].type; 137 150 138 151 /* Get read config */ 139 152 sprintf(imc_counter_cfg, "%s%s", imc_dir, READ_FILE_NAME); ··· 148 167 } 149 168 fclose(fp); 150 169 151 - get_event_and_umask(cas_count_cfg, count, READ); 152 - 153 - /* Get write config */ 154 - sprintf(imc_counter_cfg, "%s%s", imc_dir, WRITE_FILE_NAME); 155 - fp = fopen(imc_counter_cfg, "r"); 156 - if (!fp) { 157 - ksft_perror("Failed to open iMC config file"); 158 - 159 - return -1; 160 - } 161 - if (fscanf(fp, "%1023s", cas_count_cfg) <= 0) { 162 - ksft_perror("Could not get iMC cas count write"); 163 - fclose(fp); 164 - 165 - return -1; 166 - } 167 - fclose(fp); 168 - 169 - get_event_and_umask(cas_count_cfg, count, WRITE); 170 + get_read_event_and_umask(cas_count_cfg, count); 170 171 171 172 return 0; 172 173 } 173 174 174 175 /* 175 176 * A system can have 'n' number of iMC (Integrated Memory Controller) 176 - * counters, get that 'n'. For each iMC counter get it's type and config. 177 - * Also, each counter has two configs, one for read and the other for write. 178 - * A config again has two parts, event and umask. 177 + * counters, get that 'n'. Discover the properties of the available 178 + * counters in support of needed performance measurement via perf. 179 + * For each iMC counter get it's type and config. Also obtain each 180 + * counter's event and umask for the memory read events that will be 181 + * measured. 182 + * 179 183 * Enumerate all these details into an array of structures. 180 184 * 181 185 * Return: >= 0 on success. < 0 on failure. ··· 221 255 return count; 222 256 } 223 257 224 - int initialize_mem_bw_imc(void) 258 + int initialize_read_mem_bw_imc(void) 225 259 { 226 - int imc, j; 260 + int imc; 227 261 228 262 imcs = num_of_imcs(); 229 263 if (imcs <= 0) 230 264 return imcs; 231 265 232 266 /* Initialize perf_event_attr structures for all iMC's */ 233 - for (imc = 0; imc < imcs; imc++) { 234 - for (j = 0; j < 2; j++) 235 - membw_initialize_perf_event_attr(imc, j); 236 - } 267 + for (imc = 0; imc < imcs; imc++) 268 + read_mem_bw_initialize_perf_event_attr(imc); 237 269 238 270 return 0; 239 271 } 240 272 241 - static void perf_close_imc_mem_bw(void) 273 + static void perf_close_imc_read_mem_bw(void) 242 274 { 243 275 int mc; 244 276 245 277 for (mc = 0; mc < imcs; mc++) { 246 - if (imc_counters_config[mc][READ].fd != -1) 247 - close(imc_counters_config[mc][READ].fd); 248 - if (imc_counters_config[mc][WRITE].fd != -1) 249 - close(imc_counters_config[mc][WRITE].fd); 278 + if (imc_counters_config[mc].fd != -1) 279 + close(imc_counters_config[mc].fd); 250 280 } 251 281 } 252 282 253 283 /* 254 - * perf_open_imc_mem_bw - Open perf fds for IMCs 284 + * perf_open_imc_read_mem_bw - Open perf fds for IMCs 255 285 * @cpu_no: CPU number that the benchmark PID is bound to 256 286 * 257 287 * Return: = 0 on success. < 0 on failure. 258 288 */ 259 - static int perf_open_imc_mem_bw(int cpu_no) 289 + static int perf_open_imc_read_mem_bw(int cpu_no) 260 290 { 261 291 int imc, ret; 262 292 263 - for (imc = 0; imc < imcs; imc++) { 264 - imc_counters_config[imc][READ].fd = -1; 265 - imc_counters_config[imc][WRITE].fd = -1; 266 - } 293 + for (imc = 0; imc < imcs; imc++) 294 + imc_counters_config[imc].fd = -1; 267 295 268 296 for (imc = 0; imc < imcs; imc++) { 269 - ret = open_perf_event(imc, cpu_no, READ); 270 - if (ret) 271 - goto close_fds; 272 - ret = open_perf_event(imc, cpu_no, WRITE); 297 + ret = open_perf_read_event(imc, cpu_no); 273 298 if (ret) 274 299 goto close_fds; 275 300 } ··· 268 311 return 0; 269 312 270 313 close_fds: 271 - perf_close_imc_mem_bw(); 314 + perf_close_imc_read_mem_bw(); 272 315 return -1; 273 316 } 274 317 275 318 /* 276 - * do_mem_bw_test - Perform memory bandwidth test 319 + * do_imc_read_mem_bw_test - Perform memory bandwidth test 277 320 * 278 321 * Runs memory bandwidth test over one second period. Also, handles starting 279 322 * and stopping of the IMC perf counters around the test. 280 323 */ 281 - static void do_imc_mem_bw_test(void) 324 + static void do_imc_read_mem_bw_test(void) 282 325 { 283 326 int imc; 284 327 285 - for (imc = 0; imc < imcs; imc++) { 286 - membw_ioctl_perf_event_ioc_reset_enable(imc, READ); 287 - membw_ioctl_perf_event_ioc_reset_enable(imc, WRITE); 288 - } 328 + for (imc = 0; imc < imcs; imc++) 329 + read_mem_bw_ioctl_perf_event_ioc_reset_enable(imc); 289 330 290 331 sleep(1); 291 332 292 - /* Stop counters after a second to get results (both read and write) */ 293 - for (imc = 0; imc < imcs; imc++) { 294 - membw_ioctl_perf_event_ioc_disable(imc, READ); 295 - membw_ioctl_perf_event_ioc_disable(imc, WRITE); 296 - } 333 + /* Stop counters after a second to get results. */ 334 + for (imc = 0; imc < imcs; imc++) 335 + read_mem_bw_ioctl_perf_event_ioc_disable(imc); 297 336 } 298 337 299 338 /* 300 - * get_mem_bw_imc - Memory bandwidth as reported by iMC counters 301 - * @bw_report: Bandwidth report type (reads, writes) 339 + * get_read_mem_bw_imc - Memory read bandwidth as reported by iMC counters 302 340 * 303 - * Memory bandwidth utilized by a process on a socket can be calculated 304 - * using iMC counters. Perf events are used to read these counters. 341 + * Memory read bandwidth utilized by a process on a socket can be calculated 342 + * using iMC counters' read events. Perf events are used to read these 343 + * counters. 305 344 * 306 345 * Return: = 0 on success. < 0 on failure. 307 346 */ 308 - static int get_mem_bw_imc(const char *bw_report, float *bw_imc) 347 + static int get_read_mem_bw_imc(float *bw_imc) 309 348 { 310 - float reads, writes, of_mul_read, of_mul_write; 349 + float reads = 0, of_mul_read = 1; 311 350 int imc; 312 351 313 - /* Start all iMC counters to log values (both read and write) */ 314 - reads = 0, writes = 0, of_mul_read = 1, of_mul_write = 1; 315 - 316 352 /* 317 - * Get results which are stored in struct type imc_counter_config 353 + * Log read event values from all iMC counters into 354 + * struct imc_counter_config. 318 355 * Take overflow into consideration before calculating total bandwidth. 319 356 */ 320 357 for (imc = 0; imc < imcs; imc++) { 321 358 struct imc_counter_config *r = 322 - &imc_counters_config[imc][READ]; 323 - struct imc_counter_config *w = 324 - &imc_counters_config[imc][WRITE]; 359 + &imc_counters_config[imc]; 325 360 326 361 if (read(r->fd, &r->return_value, 327 362 sizeof(struct membw_read_format)) == -1) { 328 363 ksft_perror("Couldn't get read bandwidth through iMC"); 329 - return -1; 330 - } 331 - 332 - if (read(w->fd, &w->return_value, 333 - sizeof(struct membw_read_format)) == -1) { 334 - ksft_perror("Couldn't get write bandwidth through iMC"); 335 364 return -1; 336 365 } 337 366 ··· 328 385 of_mul_read = (float)r_time_enabled / 329 386 (float)r_time_running; 330 387 331 - __u64 w_time_enabled = w->return_value.time_enabled; 332 - __u64 w_time_running = w->return_value.time_running; 333 - 334 - if (w_time_enabled != w_time_running) 335 - of_mul_write = (float)w_time_enabled / 336 - (float)w_time_running; 337 388 reads += r->return_value.value * of_mul_read * SCALE; 338 - writes += w->return_value.value * of_mul_write * SCALE; 339 389 } 340 390 341 - if (strcmp(bw_report, "reads") == 0) { 342 - *bw_imc = reads; 343 - return 0; 344 - } 345 - 346 - if (strcmp(bw_report, "writes") == 0) { 347 - *bw_imc = writes; 348 - return 0; 349 - } 350 - 351 - *bw_imc = reads + writes; 391 + *bw_imc = reads; 352 392 return 0; 353 393 } 354 394 ··· 477 551 } 478 552 479 553 /* 480 - * measure_mem_bw - Measures memory bandwidth numbers while benchmark runs 554 + * measure_read_mem_bw - Measures read memory bandwidth numbers while benchmark runs 481 555 * @uparams: User supplied parameters 482 556 * @param: Parameters passed to resctrl_val() 483 557 * @bm_pid: PID that runs the benchmark 484 - * @bw_report: Bandwidth report type (reads, writes) 485 558 * 486 559 * Measure memory bandwidth from resctrl and from another source which is 487 560 * perf imc value or could be something else if perf imc event is not 488 561 * available. Compare the two values to validate resctrl value. It takes 489 562 * 1 sec to measure the data. 563 + * resctrl does not distinguish between read and write operations so 564 + * its data includes all memory operations. 490 565 */ 491 - int measure_mem_bw(const struct user_params *uparams, 492 - struct resctrl_val_param *param, pid_t bm_pid, 493 - const char *bw_report) 566 + int measure_read_mem_bw(const struct user_params *uparams, 567 + struct resctrl_val_param *param, pid_t bm_pid) 494 568 { 495 569 unsigned long bw_resc, bw_resc_start, bw_resc_end; 496 570 FILE *mem_bw_fp; 497 571 float bw_imc; 498 572 int ret; 499 573 500 - bw_report = get_bw_report_type(bw_report); 501 - if (!bw_report) 502 - return -1; 503 - 504 574 mem_bw_fp = open_mem_bw_resctrl(mbm_total_path); 505 575 if (!mem_bw_fp) 506 576 return -1; 507 577 508 - ret = perf_open_imc_mem_bw(uparams->cpu); 578 + ret = perf_open_imc_read_mem_bw(uparams->cpu); 509 579 if (ret < 0) 510 580 goto close_fp; 511 581 ··· 511 589 512 590 rewind(mem_bw_fp); 513 591 514 - do_imc_mem_bw_test(); 592 + do_imc_read_mem_bw_test(); 515 593 516 594 ret = get_mem_bw_resctrl(mem_bw_fp, &bw_resc_end); 517 595 if (ret < 0) 518 596 goto close_imc; 519 597 520 - ret = get_mem_bw_imc(bw_report, &bw_imc); 598 + ret = get_read_mem_bw_imc(&bw_imc); 521 599 if (ret < 0) 522 600 goto close_imc; 523 601 524 - perf_close_imc_mem_bw(); 602 + perf_close_imc_read_mem_bw(); 525 603 fclose(mem_bw_fp); 526 604 527 605 bw_resc = (bw_resc_end - bw_resc_start) / MB; ··· 529 607 return print_results_bw(param->filename, bm_pid, bw_imc, bw_resc); 530 608 531 609 close_imc: 532 - perf_close_imc_mem_bw(); 610 + perf_close_imc_read_mem_bw(); 533 611 close_fp: 534 612 fclose(mem_bw_fp); 535 613 return ret;
-17
tools/testing/selftests/resctrl/resctrlfs.c
··· 831 831 return 0; 832 832 } 833 833 834 - const char *get_bw_report_type(const char *bw_report) 835 - { 836 - if (strcmp(bw_report, "reads") == 0) 837 - return bw_report; 838 - if (strcmp(bw_report, "writes") == 0) 839 - return bw_report; 840 - if (strcmp(bw_report, "nt-writes") == 0) { 841 - return "writes"; 842 - } 843 - if (strcmp(bw_report, "total") == 0) 844 - return bw_report; 845 - 846 - fprintf(stderr, "Requested iMC bandwidth report type unavailable\n"); 847 - 848 - return NULL; 849 - } 850 - 851 834 int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu, 852 835 int group_fd, unsigned long flags) 853 836 {