Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2#include <errno.h>
3#include <perf/cpumap.h>
4#include <stdlib.h>
5#include <linux/refcount.h>
6#include <internal/cpumap.h>
7#include <asm/bug.h>
8#include <stdio.h>
9#include <string.h>
10#include <unistd.h>
11#include <ctype.h>
12#include <limits.h>
13#include "internal.h"
14#include <api/fs/fs.h>
15
16#define MAX_NR_CPUS 4096
17
18void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus)
19{
20 RC_CHK_ACCESS(map)->nr = nr_cpus;
21}
22
23struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus)
24{
25 RC_STRUCT(perf_cpu_map) *cpus;
26 struct perf_cpu_map *result;
27
28 if (nr_cpus == 0)
29 return NULL;
30
31 cpus = malloc(sizeof(*cpus) + sizeof(struct perf_cpu) * nr_cpus);
32 if (ADD_RC_CHK(result, cpus)) {
33 cpus->nr = nr_cpus;
34 refcount_set(&cpus->refcnt, 1);
35 }
36 return result;
37}
38
39struct perf_cpu_map *perf_cpu_map__new_any_cpu(void)
40{
41 struct perf_cpu_map *cpus = perf_cpu_map__alloc(1);
42
43 if (cpus)
44 RC_CHK_ACCESS(cpus)->map[0].cpu = -1;
45
46 return cpus;
47}
48
49static void cpu_map__delete(struct perf_cpu_map *map)
50{
51 if (map) {
52 WARN_ONCE(refcount_read(perf_cpu_map__refcnt(map)) != 0,
53 "cpu_map refcnt unbalanced\n");
54 RC_CHK_FREE(map);
55 }
56}
57
58struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
59{
60 struct perf_cpu_map *result;
61
62 if (RC_CHK_GET(result, map))
63 refcount_inc(perf_cpu_map__refcnt(map));
64
65 return result;
66}
67
68void perf_cpu_map__put(struct perf_cpu_map *map)
69{
70 if (map) {
71 if (refcount_dec_and_test(perf_cpu_map__refcnt(map)))
72 cpu_map__delete(map);
73 else
74 RC_CHK_PUT(map);
75 }
76}
77
78static struct perf_cpu_map *cpu_map__new_sysconf(void)
79{
80 struct perf_cpu_map *cpus;
81 int nr_cpus, nr_cpus_conf;
82
83 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
84 if (nr_cpus < 0)
85 return NULL;
86
87 nr_cpus_conf = sysconf(_SC_NPROCESSORS_CONF);
88 if (nr_cpus != nr_cpus_conf) {
89 pr_warning("Number of online CPUs (%d) differs from the number configured (%d) the CPU map will only cover the first %d CPUs.",
90 nr_cpus, nr_cpus_conf, nr_cpus);
91 }
92
93 cpus = perf_cpu_map__alloc(nr_cpus);
94 if (cpus != NULL) {
95 int i;
96
97 for (i = 0; i < nr_cpus; ++i)
98 RC_CHK_ACCESS(cpus)->map[i].cpu = i;
99 }
100
101 return cpus;
102}
103
104static struct perf_cpu_map *cpu_map__new_sysfs_online(void)
105{
106 struct perf_cpu_map *cpus = NULL;
107 char *buf = NULL;
108 size_t buf_len;
109
110 if (sysfs__read_str("devices/system/cpu/online", &buf, &buf_len) >= 0) {
111 cpus = perf_cpu_map__new(buf);
112 free(buf);
113 }
114 return cpus;
115}
116
117struct perf_cpu_map *perf_cpu_map__new_online_cpus(void)
118{
119 struct perf_cpu_map *cpus = cpu_map__new_sysfs_online();
120
121 if (cpus)
122 return cpus;
123
124 return cpu_map__new_sysconf();
125}
126
127
128static int cmp_cpu(const void *a, const void *b)
129{
130 const struct perf_cpu *cpu_a = a, *cpu_b = b;
131
132 return cpu_a->cpu - cpu_b->cpu;
133}
134
135static struct perf_cpu __perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
136{
137 return RC_CHK_ACCESS(cpus)->map[idx];
138}
139
140static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu *tmp_cpus)
141{
142 size_t payload_size = nr_cpus * sizeof(struct perf_cpu);
143 struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr_cpus);
144 int i, j;
145
146 if (cpus != NULL) {
147 memcpy(RC_CHK_ACCESS(cpus)->map, tmp_cpus, payload_size);
148 qsort(RC_CHK_ACCESS(cpus)->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu);
149 /* Remove dups */
150 j = 0;
151 for (i = 0; i < nr_cpus; i++) {
152 if (i == 0 ||
153 __perf_cpu_map__cpu(cpus, i).cpu !=
154 __perf_cpu_map__cpu(cpus, i - 1).cpu) {
155 RC_CHK_ACCESS(cpus)->map[j++].cpu =
156 __perf_cpu_map__cpu(cpus, i).cpu;
157 }
158 }
159 perf_cpu_map__set_nr(cpus, j);
160 assert(j <= nr_cpus);
161 }
162 return cpus;
163}
164
165struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
166{
167 struct perf_cpu_map *cpus = NULL;
168 unsigned long start_cpu, end_cpu = 0;
169 char *p = NULL;
170 int i, nr_cpus = 0;
171 struct perf_cpu *tmp_cpus = NULL, *tmp;
172 int max_entries = 0;
173
174 if (!cpu_list)
175 return perf_cpu_map__new_online_cpus();
176
177 /*
178 * must handle the case of empty cpumap to cover
179 * TOPOLOGY header for NUMA nodes with no CPU
180 * ( e.g., because of CPU hotplug)
181 */
182 if (!isdigit(*cpu_list) && *cpu_list != '\0')
183 goto out;
184
185 while (isdigit(*cpu_list)) {
186 p = NULL;
187 start_cpu = strtoul(cpu_list, &p, 0);
188 if (start_cpu >= INT16_MAX
189 || (*p != '\0' && *p != ',' && *p != '-' && *p != '\n'))
190 goto invalid;
191
192 if (*p == '-') {
193 cpu_list = ++p;
194 p = NULL;
195 end_cpu = strtoul(cpu_list, &p, 0);
196
197 if (end_cpu >= INT16_MAX || (*p != '\0' && *p != ',' && *p != '\n'))
198 goto invalid;
199
200 if (end_cpu < start_cpu)
201 goto invalid;
202 } else {
203 end_cpu = start_cpu;
204 }
205
206 WARN_ONCE(end_cpu >= MAX_NR_CPUS, "Perf can support %d CPUs. "
207 "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
208
209 for (; start_cpu <= end_cpu; start_cpu++) {
210 /* check for duplicates */
211 for (i = 0; i < nr_cpus; i++)
212 if (tmp_cpus[i].cpu == (int16_t)start_cpu)
213 goto invalid;
214
215 if (nr_cpus == max_entries) {
216 max_entries += max(end_cpu - start_cpu + 1, 16UL);
217 tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
218 if (tmp == NULL)
219 goto invalid;
220 tmp_cpus = tmp;
221 }
222 tmp_cpus[nr_cpus++].cpu = (int16_t)start_cpu;
223 }
224 if (*p)
225 ++p;
226
227 cpu_list = p;
228 }
229
230 if (nr_cpus > 0) {
231 cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
232 } else if (*cpu_list != '\0') {
233 pr_warning("Unexpected characters at end of cpu list ('%s'), using online CPUs.",
234 cpu_list);
235 cpus = perf_cpu_map__new_online_cpus();
236 } else {
237 cpus = perf_cpu_map__new_any_cpu();
238 }
239invalid:
240 free(tmp_cpus);
241out:
242 return cpus;
243}
244
245struct perf_cpu_map *perf_cpu_map__new_int(int cpu)
246{
247 struct perf_cpu_map *cpus = perf_cpu_map__alloc(1);
248
249 if (cpus)
250 RC_CHK_ACCESS(cpus)->map[0].cpu = cpu;
251
252 return cpus;
253}
254
255static int __perf_cpu_map__nr(const struct perf_cpu_map *cpus)
256{
257 return RC_CHK_ACCESS(cpus)->nr;
258}
259
260struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
261{
262 struct perf_cpu result = {
263 .cpu = -1
264 };
265
266 if (cpus && idx < __perf_cpu_map__nr(cpus))
267 return __perf_cpu_map__cpu(cpus, idx);
268
269 return result;
270}
271
272int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
273{
274 return cpus ? __perf_cpu_map__nr(cpus) : 1;
275}
276
277bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map)
278{
279 return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true;
280}
281
282bool perf_cpu_map__is_any_cpu_or_is_empty(const struct perf_cpu_map *map)
283{
284 if (!map)
285 return true;
286
287 return __perf_cpu_map__nr(map) == 1 && __perf_cpu_map__cpu(map, 0).cpu == -1;
288}
289
290bool perf_cpu_map__is_empty(const struct perf_cpu_map *map)
291{
292 return map == NULL;
293}
294
295int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
296{
297 int low, high;
298
299 if (!cpus)
300 return -1;
301
302 low = 0;
303 high = __perf_cpu_map__nr(cpus);
304 while (low < high) {
305 int idx = (low + high) / 2;
306 struct perf_cpu cpu_at_idx = __perf_cpu_map__cpu(cpus, idx);
307
308 if (cpu_at_idx.cpu == cpu.cpu)
309 return idx;
310
311 if (cpu_at_idx.cpu > cpu.cpu)
312 high = idx;
313 else
314 low = idx + 1;
315 }
316
317 return -1;
318}
319
320bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
321{
322 return perf_cpu_map__idx(cpus, cpu) != -1;
323}
324
325bool perf_cpu_map__equal(const struct perf_cpu_map *lhs, const struct perf_cpu_map *rhs)
326{
327 int nr;
328
329 if (lhs == rhs)
330 return true;
331
332 if (!lhs || !rhs)
333 return false;
334
335 nr = __perf_cpu_map__nr(lhs);
336 if (nr != __perf_cpu_map__nr(rhs))
337 return false;
338
339 for (int idx = 0; idx < nr; idx++) {
340 if (__perf_cpu_map__cpu(lhs, idx).cpu != __perf_cpu_map__cpu(rhs, idx).cpu)
341 return false;
342 }
343 return true;
344}
345
346bool perf_cpu_map__has_any_cpu(const struct perf_cpu_map *map)
347{
348 return map && __perf_cpu_map__cpu(map, 0).cpu == -1;
349}
350
351struct perf_cpu perf_cpu_map__min(const struct perf_cpu_map *map)
352{
353 struct perf_cpu cpu, result = {
354 .cpu = -1
355 };
356 int idx;
357
358 perf_cpu_map__for_each_cpu_skip_any(cpu, idx, map) {
359 result = cpu;
360 break;
361 }
362 return result;
363}
364
365struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map)
366{
367 struct perf_cpu result = {
368 .cpu = -1
369 };
370
371 if (!map)
372 return result;
373
374 // The CPUs are always sorted and nr is always > 0 as 0 length map is
375 // encoded as NULL.
376 return __perf_cpu_map__cpu(map, __perf_cpu_map__nr(map) - 1);
377}
378
379/** Is 'b' a subset of 'a'. */
380bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b)
381{
382 if (a == b || !b)
383 return true;
384 if (!a || __perf_cpu_map__nr(b) > __perf_cpu_map__nr(a))
385 return false;
386
387 for (int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) {
388 if (__perf_cpu_map__cpu(a, i).cpu > __perf_cpu_map__cpu(b, j).cpu)
389 return false;
390 if (__perf_cpu_map__cpu(a, i).cpu == __perf_cpu_map__cpu(b, j).cpu) {
391 j++;
392 if (j == __perf_cpu_map__nr(b))
393 return true;
394 }
395 }
396 return false;
397}
398
399/*
400 * Merge two cpumaps.
401 *
402 * If 'other' is subset of '*orig', '*orig' keeps itself with no reference count
403 * change (similar to "realloc").
404 *
405 * If '*orig' is subset of 'other', '*orig' reuses 'other' with its reference
406 * count increased.
407 *
408 * Otherwise, '*orig' gets freed and replaced with a new map.
409 */
410int perf_cpu_map__merge(struct perf_cpu_map **orig, struct perf_cpu_map *other)
411{
412 struct perf_cpu *tmp_cpus;
413 int tmp_len;
414 int i, j, k;
415 struct perf_cpu_map *merged;
416
417 if (perf_cpu_map__is_subset(*orig, other))
418 return 0;
419 if (perf_cpu_map__is_subset(other, *orig)) {
420 perf_cpu_map__put(*orig);
421 *orig = perf_cpu_map__get(other);
422 return 0;
423 }
424
425 tmp_len = __perf_cpu_map__nr(*orig) + __perf_cpu_map__nr(other);
426 tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
427 if (!tmp_cpus)
428 return -ENOMEM;
429
430 /* Standard merge algorithm from wikipedia */
431 i = j = k = 0;
432 while (i < __perf_cpu_map__nr(*orig) && j < __perf_cpu_map__nr(other)) {
433 if (__perf_cpu_map__cpu(*orig, i).cpu <= __perf_cpu_map__cpu(other, j).cpu) {
434 if (__perf_cpu_map__cpu(*orig, i).cpu == __perf_cpu_map__cpu(other, j).cpu)
435 j++;
436 tmp_cpus[k++] = __perf_cpu_map__cpu(*orig, i++);
437 } else
438 tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
439 }
440
441 while (i < __perf_cpu_map__nr(*orig))
442 tmp_cpus[k++] = __perf_cpu_map__cpu(*orig, i++);
443
444 while (j < __perf_cpu_map__nr(other))
445 tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
446 assert(k <= tmp_len);
447
448 merged = cpu_map__trim_new(k, tmp_cpus);
449 free(tmp_cpus);
450 perf_cpu_map__put(*orig);
451 *orig = merged;
452 return 0;
453}
454
455struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
456 struct perf_cpu_map *other)
457{
458 int i, j, k;
459 struct perf_cpu_map *merged;
460
461 if (perf_cpu_map__is_subset(other, orig))
462 return perf_cpu_map__get(orig);
463 if (perf_cpu_map__is_subset(orig, other))
464 return perf_cpu_map__get(other);
465
466 i = j = k = 0;
467 while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
468 if (__perf_cpu_map__cpu(orig, i).cpu < __perf_cpu_map__cpu(other, j).cpu)
469 i++;
470 else if (__perf_cpu_map__cpu(orig, i).cpu > __perf_cpu_map__cpu(other, j).cpu)
471 j++;
472 else { /* CPUs match. */
473 i++;
474 j++;
475 k++;
476 }
477 }
478 if (k == 0) /* Maps are completely disjoint. */
479 return NULL;
480
481 merged = perf_cpu_map__alloc(k);
482 if (!merged)
483 return NULL;
484 /* Entries are added to merged in sorted order, so no need to sort again. */
485 i = j = k = 0;
486 while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
487 if (__perf_cpu_map__cpu(orig, i).cpu < __perf_cpu_map__cpu(other, j).cpu)
488 i++;
489 else if (__perf_cpu_map__cpu(orig, i).cpu > __perf_cpu_map__cpu(other, j).cpu)
490 j++;
491 else {
492 j++;
493 RC_CHK_ACCESS(merged)->map[k++] = __perf_cpu_map__cpu(orig, i++);
494 }
495 }
496 return merged;
497}