Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf record: Adapt affinity to machines with #CPUs > 1K

Use struct mmap_cpu_mask type for the tool's thread and mmap data
buffers to overcome current 1024 CPUs mask size limitation of cpu_set_t
type.

Currently glibc's cpu_set_t type has an internal mask size limit of 1024
CPUs.

Moving to the 'struct mmap_cpu_mask' type allows overcoming that limit.

The tools bitmap API is used to manipulate objects of 'struct mmap_cpu_mask'
type.

Committer notes:

To print the 'nbits' struct member we must use %zd, since it is a
size_t, this fixes the build in some toolchains/arches.

Reported-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Alexey Budankov <alexey.budankov@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@redhat.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lore.kernel.org/lkml/96d7e2ff-ce8b-c1e0-d52c-aa59ea96f0ea@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

authored by

Alexey Budankov and committed by
Arnaldo Carvalho de Melo
8384a260 9c080c02

+45 -13
+22 -6
tools/perf/builtin-record.c
··· 62 62 #include <linux/string.h> 63 63 #include <linux/time64.h> 64 64 #include <linux/zalloc.h> 65 + #include <linux/bitmap.h> 65 66 66 67 struct switch_output { 67 68 bool enabled; ··· 94 93 bool timestamp_boundary; 95 94 struct switch_output switch_output; 96 95 unsigned long long samples; 97 - cpu_set_t affinity_mask; 96 + struct mmap_cpu_mask affinity_mask; 98 97 unsigned long output_max_size; /* = 0: unlimited */ 99 98 }; 100 99 ··· 962 961 static void record__adjust_affinity(struct record *rec, struct mmap *map) 963 962 { 964 963 if (rec->opts.affinity != PERF_AFFINITY_SYS && 965 - !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) { 966 - CPU_ZERO(&rec->affinity_mask); 967 - CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask); 968 - sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask); 964 + !bitmap_equal(rec->affinity_mask.bits, map->affinity_mask.bits, 965 + rec->affinity_mask.nbits)) { 966 + bitmap_zero(rec->affinity_mask.bits, rec->affinity_mask.nbits); 967 + bitmap_or(rec->affinity_mask.bits, rec->affinity_mask.bits, 968 + map->affinity_mask.bits, rec->affinity_mask.nbits); 969 + sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&rec->affinity_mask), 970 + (cpu_set_t *)rec->affinity_mask.bits); 971 + if (verbose == 2) 972 + mmap_cpu_mask__scnprintf(&rec->affinity_mask, "thread"); 969 973 } 970 974 } 971 975 ··· 2439 2433 # undef REASON 2440 2434 #endif 2441 2435 2442 - CPU_ZERO(&rec->affinity_mask); 2443 2436 rec->opts.affinity = PERF_AFFINITY_SYS; 2444 2437 2445 2438 rec->evlist = evlist__new(); ··· 2503 2498 symbol_conf.allow_aliases = true; 2504 2499 2505 2500 symbol__init(NULL); 2501 + 2502 + if (rec->opts.affinity != PERF_AFFINITY_SYS) { 2503 + rec->affinity_mask.nbits = cpu__max_cpu(); 2504 + rec->affinity_mask.bits = bitmap_alloc(rec->affinity_mask.nbits); 2505 + if (!rec->affinity_mask.bits) { 2506 + pr_err("Failed to allocate thread mask for %zd cpus\n", rec->affinity_mask.nbits); 2507 + return -ENOMEM; 2508 + } 2509 + pr_debug2("thread mask[%zd]: empty\n", rec->affinity_mask.nbits); 2510 + } 2506 2511 2507 2512 err = record__auxtrace_init(rec); 2508 2513 if (err) ··· 2628 2613 2629 2614 err = __cmd_record(&record, argc, argv); 2630 2615 out: 2616 + bitmap_free(rec->affinity_mask.bits); 2631 2617 evlist__delete(rec->evlist); 2632 2618 symbol__exit(); 2633 2619 auxtrace_record__free(rec->itr);
+22 -6
tools/perf/util/mmap.c
··· 219 219 220 220 void mmap__munmap(struct mmap *map) 221 221 { 222 + bitmap_free(map->affinity_mask.bits); 223 + 222 224 perf_mmap__aio_munmap(map); 223 225 if (map->data != NULL) { 224 226 munmap(map->data, mmap__mmap_len(map)); ··· 229 227 auxtrace_mmap__munmap(&map->auxtrace_mmap); 230 228 } 231 229 232 - static void build_node_mask(int node, cpu_set_t *mask) 230 + static void build_node_mask(int node, struct mmap_cpu_mask *mask) 233 231 { 234 232 int c, cpu, nr_cpus; 235 233 const struct perf_cpu_map *cpu_map = NULL; ··· 242 240 for (c = 0; c < nr_cpus; c++) { 243 241 cpu = cpu_map->map[c]; /* map c index to online cpu index */ 244 242 if (cpu__get_node(cpu) == node) 245 - CPU_SET(cpu, mask); 243 + set_bit(cpu, mask->bits); 246 244 } 247 245 } 248 246 249 - static void perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp) 247 + static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp) 250 248 { 251 - CPU_ZERO(&map->affinity_mask); 249 + map->affinity_mask.nbits = cpu__max_cpu(); 250 + map->affinity_mask.bits = bitmap_alloc(map->affinity_mask.nbits); 251 + if (!map->affinity_mask.bits) 252 + return -1; 253 + 252 254 if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) 253 255 build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask); 254 256 else if (mp->affinity == PERF_AFFINITY_CPU) 255 - CPU_SET(map->core.cpu, &map->affinity_mask); 257 + set_bit(map->core.cpu, map->affinity_mask.bits); 258 + 259 + return 0; 256 260 } 257 261 258 262 int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu) ··· 269 261 return -1; 270 262 } 271 263 272 - perf_mmap__setup_affinity_mask(map, mp); 264 + if (mp->affinity != PERF_AFFINITY_SYS && 265 + perf_mmap__setup_affinity_mask(map, mp)) { 266 + pr_debug2("failed to alloc mmap affinity mask, error %d\n", 267 + errno); 268 + return -1; 269 + } 270 + 271 + if (verbose == 2) 272 + mmap_cpu_mask__scnprintf(&map->affinity_mask, "mmap"); 273 273 274 274 map->core.flush = mp->flush; 275 275
+1 -1
tools/perf/util/mmap.h
··· 40 40 int nr_cblocks; 41 41 } aio; 42 42 #endif 43 - cpu_set_t affinity_mask; 43 + struct mmap_cpu_mask affinity_mask; 44 44 void *data; 45 45 int comp_level; 46 46 };