Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf tools: Use dedicated non-atomic clear/set bit helpers

Use the dedicated non-atomic helpers for {clear,set}_bit() and their
test variants, i.e. the double-underscore versions. Depsite being
defined in atomic.h, and despite the kernel versions being atomic in the
kernel, tools' {clear,set}_bit() helpers aren't actually atomic. Move
to the double-underscore versions so that the versions that are expected
to be atomic (for kernel developers) can be made atomic without
affecting users that don't want atomic operations.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Cc: James Morse <james.morse@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Oliver Upton <oliver.upton@linux.dev>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Yury Norov <yury.norov@gmail.com>
Cc: alexandru elisei <alexandru.elisei@arm.com>
Cc: kvm@vger.kernel.org
Cc: kvmarm@lists.cs.columbia.edu
Cc: kvmarm@lists.linux.dev
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lore.kernel.org/lkml/20221119013450.2643007-6-seanjc@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

authored by

Sean Christopherson and committed by
Arnaldo Carvalho de Melo
49bd97c2 0c3852ad

+27 -27
+1 -1
tools/perf/bench/find-bit-bench.c
··· 70 70 bitmap_zero(to_test, num_bits); 71 71 skip = num_bits / set_bits; 72 72 for (i = 0; i < num_bits; i += skip) 73 - set_bit(i, to_test); 73 + __set_bit(i, to_test); 74 74 75 75 for (i = 0; i < outer_iterations; i++) { 76 76 old = accumulator;
+3 -3
tools/perf/builtin-c2c.c
··· 230 230 "WARNING: no sample cpu value")) 231 231 return; 232 232 233 - set_bit(sample->cpu, c2c_he->cpuset); 233 + __set_bit(sample->cpu, c2c_he->cpuset); 234 234 } 235 235 236 236 static void c2c_he__set_node(struct c2c_hist_entry *c2c_he, ··· 247 247 if (WARN_ONCE(node < 0, "WARNING: failed to find node\n")) 248 248 return; 249 249 250 - set_bit(node, c2c_he->nodeset); 250 + __set_bit(node, c2c_he->nodeset); 251 251 252 252 if (c2c_he->paddr != sample->phys_addr) { 253 253 c2c_he->paddr_cnt++; ··· 2318 2318 continue; 2319 2319 2320 2320 perf_cpu_map__for_each_cpu(cpu, idx, map) { 2321 - set_bit(cpu.cpu, set); 2321 + __set_bit(cpu.cpu, set); 2322 2322 2323 2323 if (WARN_ONCE(cpu2node[cpu.cpu] != -1, "node/cpu topology bug")) 2324 2324 return -EINVAL;
+3 -3
tools/perf/builtin-kwork.c
··· 222 222 list_add_tail(&page->list, &kwork->atom_page_list); 223 223 224 224 found_atom: 225 - set_bit(i, page->bitmap); 225 + __set_bit(i, page->bitmap); 226 226 atom->time = sample->time; 227 227 atom->prev = NULL; 228 228 atom->page_addr = page; ··· 235 235 if (atom->prev != NULL) 236 236 atom_free(atom->prev); 237 237 238 - clear_bit(atom->bit_inpage, 239 - ((struct kwork_atom_page *)atom->page_addr)->bitmap); 238 + __clear_bit(atom->bit_inpage, 239 + ((struct kwork_atom_page *)atom->page_addr)->bitmap); 240 240 } 241 241 242 242 static void atom_del(struct kwork_atom *atom)
+3 -3
tools/perf/builtin-record.c
··· 3555 3555 /* Return ENODEV is input cpu is greater than max cpu */ 3556 3556 if ((unsigned long)cpu.cpu > mask->nbits) 3557 3557 return -ENODEV; 3558 - set_bit(cpu.cpu, mask->bits); 3558 + __set_bit(cpu.cpu, mask->bits); 3559 3559 } 3560 3560 3561 3561 return 0; ··· 3627 3627 pr_debug("nr_threads: %d\n", rec->nr_threads); 3628 3628 3629 3629 for (t = 0; t < rec->nr_threads; t++) { 3630 - set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits); 3631 - set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits); 3630 + __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits); 3631 + __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits); 3632 3632 if (verbose) { 3633 3633 pr_debug("thread_masks[%d]: ", t); 3634 3634 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
+1 -1
tools/perf/builtin-sched.c
··· 1573 1573 1574 1574 if (sched->map.comp) { 1575 1575 cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS); 1576 - if (!test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) { 1576 + if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) { 1577 1577 sched->map.comp_cpus[cpus_nr++] = this_cpu; 1578 1578 new_cpu = true; 1579 1579 }
+1 -1
tools/perf/tests/bitmap.c
··· 18 18 19 19 if (map && bm) { 20 20 for (i = 0; i < perf_cpu_map__nr(map); i++) 21 - set_bit(perf_cpu_map__cpu(map, i).cpu, bm); 21 + __set_bit(perf_cpu_map__cpu(map, i).cpu, bm); 22 22 } 23 23 24 24 if (map)
+1 -1
tools/perf/tests/mem2node.c
··· 33 33 int i; 34 34 35 35 perf_cpu_map__for_each_cpu(cpu, i, map) 36 - set_bit(cpu.cpu, bm); 36 + __set_bit(cpu.cpu, bm); 37 37 } 38 38 39 39 if (map)
+2 -2
tools/perf/util/affinity.c
··· 58 58 return; 59 59 60 60 a->changed = true; 61 - set_bit(cpu, a->sched_cpus); 61 + __set_bit(cpu, a->sched_cpus); 62 62 /* 63 63 * We ignore errors because affinity is just an optimization. 64 64 * This could happen for example with isolated CPUs or cpusets. 65 65 * In this case the IPIs inside the kernel's perf API still work. 66 66 */ 67 67 sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->sched_cpus); 68 - clear_bit(cpu, a->sched_cpus); 68 + __clear_bit(cpu, a->sched_cpus); 69 69 } 70 70 71 71 static void __affinity__cleanup(struct affinity *a)
+4 -4
tools/perf/util/header.c
··· 79 79 80 80 void perf_header__set_feat(struct perf_header *header, int feat) 81 81 { 82 - set_bit(feat, header->adds_features); 82 + __set_bit(feat, header->adds_features); 83 83 } 84 84 85 85 void perf_header__clear_feat(struct perf_header *header, int feat) 86 86 { 87 - clear_bit(feat, header->adds_features); 87 + __clear_bit(feat, header->adds_features); 88 88 } 89 89 90 90 bool perf_header__has_feat(const struct perf_header *header, int feat) ··· 1358 1358 rewinddir(dir); 1359 1359 1360 1360 for_each_memory(phys, dir) { 1361 - set_bit(phys, n->set); 1361 + __set_bit(phys, n->set); 1362 1362 } 1363 1363 1364 1364 closedir(dir); ··· 3952 3952 3953 3953 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 3954 3954 bitmap_zero(header->adds_features, HEADER_FEAT_BITS); 3955 - set_bit(HEADER_BUILD_ID, header->adds_features); 3955 + __set_bit(HEADER_BUILD_ID, header->adds_features); 3956 3956 } 3957 3957 } 3958 3958
+3 -3
tools/perf/util/mmap.c
··· 111 111 pr_err("Failed to allocate node mask for mbind: error %m\n"); 112 112 return -1; 113 113 } 114 - set_bit(node_index, node_mask); 114 + __set_bit(node_index, node_mask); 115 115 if (mbind(data, mmap_len, MPOL_BIND, node_mask, node_index + 1 + 1, 0)) { 116 116 pr_err("Failed to bind [%p-%p] AIO buffer to node %lu: error %m\n", 117 117 data, data + mmap_len, node_index); ··· 256 256 for (idx = 0; idx < nr_cpus; idx++) { 257 257 cpu = perf_cpu_map__cpu(cpu_map, idx); /* map c index to online cpu index */ 258 258 if (cpu__get_node(cpu) == node) 259 - set_bit(cpu.cpu, mask->bits); 259 + __set_bit(cpu.cpu, mask->bits); 260 260 } 261 261 } 262 262 ··· 270 270 if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) 271 271 build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask); 272 272 else if (mp->affinity == PERF_AFFINITY_CPU) 273 - set_bit(map->core.cpu.cpu, map->affinity_mask.bits); 273 + __set_bit(map->core.cpu.cpu, map->affinity_mask.bits); 274 274 275 275 return 0; 276 276 }
+1 -1
tools/perf/util/pmu.c
··· 1533 1533 1534 1534 memset(bits, 0, BITS_TO_BYTES(PERF_PMU_FORMAT_BITS)); 1535 1535 for (b = from; b <= to; b++) 1536 - set_bit(b, bits); 1536 + __set_bit(b, bits); 1537 1537 } 1538 1538 1539 1539 void perf_pmu__del_formats(struct list_head *formats)
+1 -1
tools/perf/util/scripting-engines/trace-event-perl.c
··· 365 365 366 366 sprintf(handler, "%s::%s", event->system, event->name); 367 367 368 - if (!test_and_set_bit(event->id, events_defined)) 368 + if (!__test_and_set_bit(event->id, events_defined)) 369 369 define_event_symbols(event, handler, event->print_fmt.args); 370 370 371 371 s = nsecs / NSEC_PER_SEC;
+1 -1
tools/perf/util/scripting-engines/trace-event-python.c
··· 934 934 935 935 sprintf(handler_name, "%s__%s", event->system, event->name); 936 936 937 - if (!test_and_set_bit(event->id, events_defined)) 937 + if (!__test_and_set_bit(event->id, events_defined)) 938 938 define_event_symbols(event, handler_name, event->print_fmt.args); 939 939 940 940 handler = get_handler(handler_name);
+1 -1
tools/perf/util/session.c
··· 2749 2749 goto out_delete_map; 2750 2750 } 2751 2751 2752 - set_bit(cpu.cpu, cpu_bitmap); 2752 + __set_bit(cpu.cpu, cpu_bitmap); 2753 2753 } 2754 2754 2755 2755 err = 0;
+1 -1
tools/perf/util/svghelper.c
··· 741 741 break; 742 742 } 743 743 744 - set_bit(c.cpu, cpumask_bits(b)); 744 + __set_bit(c.cpu, cpumask_bits(b)); 745 745 } 746 746 747 747 perf_cpu_map__put(m);