Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf tools: Use list_del_init() more thorougly

To allow for destructors to check if they're operating on a object still
in a list, and to avoid going from use after free list entries into
still valid, or even also other already removed from list entries.

Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-deh17ub44atyox3j90e6rksu@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

+38 -38
+1 -1
tools/perf/builtin-ftrace.c
··· 431 431 struct filter_entry *pos, *tmp; 432 432 433 433 list_for_each_entry_safe(pos, tmp, head, list) { 434 - list_del(&pos->list); 434 + list_del_init(&pos->list); 435 435 free(pos); 436 436 } 437 437 }
+4 -4
tools/perf/builtin-lock.c
··· 454 454 /* broken lock sequence, discard it */ 455 455 ls->discard = 1; 456 456 bad_hist[BROKEN_ACQUIRE]++; 457 - list_del(&seq->list); 457 + list_del_init(&seq->list); 458 458 free(seq); 459 459 goto end; 460 460 default: ··· 515 515 /* broken lock sequence, discard it */ 516 516 ls->discard = 1; 517 517 bad_hist[BROKEN_ACQUIRED]++; 518 - list_del(&seq->list); 518 + list_del_init(&seq->list); 519 519 free(seq); 520 520 goto end; 521 521 default: ··· 570 570 /* broken lock sequence, discard it */ 571 571 ls->discard = 1; 572 572 bad_hist[BROKEN_CONTENDED]++; 573 - list_del(&seq->list); 573 + list_del_init(&seq->list); 574 574 free(seq); 575 575 goto end; 576 576 default: ··· 639 639 640 640 ls->nr_release++; 641 641 free_seq: 642 - list_del(&seq->list); 642 + list_del_init(&seq->list); 643 643 free(seq); 644 644 end: 645 645 return 0;
+1 -1
tools/perf/pmu-events/jevents.c
··· 407 407 408 408 list_for_each_entry_safe(es, next, &arch_std_events, list) { 409 409 FOR_ALL_EVENT_STRUCT_FIELDS(FREE_EVENT_FIELD); 410 - list_del(&es->list); 410 + list_del_init(&es->list); 411 411 free(es); 412 412 } 413 413 }
+1 -1
tools/perf/tests/switch-tracking.c
··· 238 238 239 239 while (!list_empty(events)) { 240 240 node = list_entry(events->next, struct event_node, list); 241 - list_del(&node->list); 241 + list_del_init(&node->list); 242 242 free(node); 243 243 } 244 244 }
+1 -1
tools/perf/ui/gtk/annotate.c
··· 152 152 gtk_container_add(GTK_CONTAINER(window), view); 153 153 154 154 list_for_each_entry_safe(pos, n, &notes->src->source, al.node) { 155 - list_del(&pos->al.node); 155 + list_del_init(&pos->al.node); 156 156 disasm_line__free(pos); 157 157 } 158 158
+2 -2
tools/perf/util/annotate.c
··· 1586 1586 return; 1587 1587 } 1588 1588 1589 - list_del(&dl->al.node); 1589 + list_del_init(&dl->al.node); 1590 1590 disasm_line__free(dl); 1591 1591 } 1592 1592 } ··· 2463 2463 struct annotation_line *al, *n; 2464 2464 2465 2465 list_for_each_entry_safe(al, n, &as->source, node) { 2466 - list_del(&al->node); 2466 + list_del_init(&al->node); 2467 2467 disasm_line__free(disasm_line(al)); 2468 2468 } 2469 2469 }
+2 -2
tools/perf/util/auxtrace.c
··· 408 408 409 409 buffer = list_entry(queues->queue_array[i].head.next, 410 410 struct auxtrace_buffer, list); 411 - list_del(&buffer->list); 411 + list_del_init(&buffer->list); 412 412 auxtrace_buffer__free(buffer); 413 413 } 414 414 } ··· 612 612 struct auxtrace_index *auxtrace_index, *n; 613 613 614 614 list_for_each_entry_safe(auxtrace_index, n, head, list) { 615 - list_del(&auxtrace_index->list); 615 + list_del_init(&auxtrace_index->list); 616 616 free(auxtrace_index); 617 617 } 618 618 }
+1 -1
tools/perf/util/bpf-loader.c
··· 829 829 bpf_map_op__delete(struct bpf_map_op *op) 830 830 { 831 831 if (!list_empty(&op->list)) 832 - list_del(&op->list); 832 + list_del_init(&op->list); 833 833 if (op->key_type == BPF_MAP_KEY_RANGES) 834 834 parse_events__clear_array(&op->k.array); 835 835 free(op);
+1 -1
tools/perf/util/call-path.c
··· 40 40 struct call_path_block *pos, *n; 41 41 42 42 list_for_each_entry_safe(pos, n, &cpr->blocks, node) { 43 - list_del(&pos->node); 43 + list_del_init(&pos->node); 44 44 free(pos); 45 45 } 46 46 free(cpr);
+5 -5
tools/perf/util/callchain.c
··· 636 636 struct callchain_list *call, *tmp; 637 637 638 638 list_for_each_entry_safe(call, tmp, &new->val, list) { 639 - list_del(&call->list); 639 + list_del_init(&call->list); 640 640 map__zput(call->ms.map); 641 641 free(call); 642 642 } ··· 1002 1002 callchain_cursor_append(cursor, list->ip, 1003 1003 list->ms.map, list->ms.sym, 1004 1004 false, NULL, 0, 0, 0, list->srcline); 1005 - list_del(&list->list); 1005 + list_del_init(&list->list); 1006 1006 map__zput(list->ms.map); 1007 1007 free(list); 1008 1008 } ··· 1453 1453 struct rb_node *n; 1454 1454 1455 1455 list_for_each_entry_safe(list, tmp, &node->parent_val, list) { 1456 - list_del(&list->list); 1456 + list_del_init(&list->list); 1457 1457 map__zput(list->ms.map); 1458 1458 free(list); 1459 1459 } 1460 1460 1461 1461 list_for_each_entry_safe(list, tmp, &node->val, list) { 1462 - list_del(&list->list); 1462 + list_del_init(&list->list); 1463 1463 map__zput(list->ms.map); 1464 1464 free(list); 1465 1465 } ··· 1544 1544 1545 1545 out: 1546 1546 list_for_each_entry_safe(chain, new, &head, list) { 1547 - list_del(&chain->list); 1547 + list_del_init(&chain->list); 1548 1548 map__zput(chain->ms.map); 1549 1549 free(chain); 1550 1550 }
+2 -2
tools/perf/util/db-export.c
··· 34 34 de = list_entry(dbe->deferred.next, struct deferred_export, 35 35 node); 36 36 err = dbe->export_comm(dbe, de->comm); 37 - list_del(&de->node); 37 + list_del_init(&de->node); 38 38 free(de); 39 39 if (err) 40 40 return err; ··· 50 50 while (!list_empty(&dbe->deferred)) { 51 51 de = list_entry(dbe->deferred.next, struct deferred_export, 52 52 node); 53 - list_del(&de->node); 53 + list_del_init(&de->node); 54 54 free(de); 55 55 } 56 56 }
+1 -1
tools/perf/util/dso.c
··· 434 434 435 435 static void dso__list_del(struct dso *dso) 436 436 { 437 - list_del(&dso->data.open_entry); 437 + list_del_init(&dso->data.open_entry); 438 438 WARN_ONCE(dso__data_open_cnt <= 0, 439 439 "DSO data fd counter out of bounds."); 440 440 dso__data_open_cnt--;
+1 -1
tools/perf/util/evsel.c
··· 1298 1298 struct perf_evsel_config_term *term, *h; 1299 1299 1300 1300 list_for_each_entry_safe(term, h, &evsel->config_terms, list) { 1301 - list_del(&term->list); 1301 + list_del_init(&term->list); 1302 1302 free(term); 1303 1303 } 1304 1304 }
+2 -2
tools/perf/util/hist.c
··· 2741 2741 2742 2742 list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) { 2743 2743 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) { 2744 - list_del(&fmt->list); 2744 + list_del_init(&fmt->list); 2745 2745 free(fmt); 2746 2746 } 2747 - list_del(&node->list); 2747 + list_del_init(&node->list); 2748 2748 free(node); 2749 2749 } 2750 2750 }
+3 -3
tools/perf/util/ordered-events.c
··· 138 138 139 139 if (!list_empty(cache)) { 140 140 new = list_entry(cache->next, struct ordered_event, list); 141 - list_del(&new->list); 141 + list_del_init(&new->list); 142 142 } else if (oe->buffer) { 143 143 new = &oe->buffer->event[oe->buffer_idx]; 144 144 if (++oe->buffer_idx == MAX_SAMPLE_BUFFER) ··· 394 394 * yet, we need to free only allocated ones ... 395 395 */ 396 396 if (oe->buffer) { 397 - list_del(&oe->buffer->list); 397 + list_del_init(&oe->buffer->list); 398 398 ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe); 399 399 } 400 400 401 401 /* ... and continue with the rest */ 402 402 list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) { 403 - list_del(&buffer->list); 403 + list_del_init(&buffer->list); 404 404 ordered_events_buffer__free(buffer, MAX_SAMPLE_BUFFER, oe); 405 405 } 406 406 }
+1 -1
tools/perf/util/parse-events.c
··· 652 652 pr_debug("Failed to add BPF event %s:%s\n", 653 653 group, event); 654 654 list_for_each_entry_safe(evsel, tmp, &new_evsels, node) { 655 - list_del(&evsel->node); 655 + list_del_init(&evsel->node); 656 656 perf_evsel__delete(evsel); 657 657 } 658 658 return err;
+1 -1
tools/perf/util/pmu.c
··· 1245 1245 info->metric_expr = alias->metric_expr; 1246 1246 info->metric_name = alias->metric_name; 1247 1247 1248 - list_del(&term->list); 1248 + list_del_init(&term->list); 1249 1249 free(term); 1250 1250 } 1251 1251
+1 -1
tools/perf/util/probe-event.c
··· 2333 2333 while (!list_empty(blacklist)) { 2334 2334 node = list_first_entry(blacklist, 2335 2335 struct kprobe_blacklist_node, list); 2336 - list_del(&node->list); 2336 + list_del_init(&node->list); 2337 2337 zfree(&node->symbol); 2338 2338 free(node); 2339 2339 }
+1 -1
tools/perf/util/s390-cpumsf.c
··· 756 756 */ 757 757 if (err) { 758 758 sfq->buffer = NULL; 759 - list_del(&buffer->list); 759 + list_del_init(&buffer->list); 760 760 auxtrace_buffer__free(buffer); 761 761 if (err > 0) /* Buffer done, no error */ 762 762 err = 0;
+1 -1
tools/perf/util/srccode.c
··· 83 83 84 84 static void free_srcfile(struct srcfile *sf) 85 85 { 86 - list_del(&sf->nd); 86 + list_del_init(&sf->nd); 87 87 hlist_del(&sf->hash_nd); 88 88 map_total_sz -= sf->maplen; 89 89 munmap(sf->map, sf->maplen);
+3 -3
tools/perf/util/symbol-elf.c
··· 1478 1478 struct phdr_data *p, *tmp; 1479 1479 1480 1480 list_for_each_entry_safe(p, tmp, &kci->phdrs, node) { 1481 - list_del(&p->node); 1481 + list_del_init(&p->node); 1482 1482 free(p); 1483 1483 } 1484 1484 } ··· 1501 1501 struct sym_data *s, *tmp; 1502 1502 1503 1503 list_for_each_entry_safe(s, tmp, &kci->syms, node) { 1504 - list_del(&s->node); 1504 + list_del_init(&s->node); 1505 1505 free(s); 1506 1506 } 1507 1507 } ··· 2252 2252 int nr_free = 0; 2253 2253 2254 2254 list_for_each_entry_safe(pos, tmp, sdt_notes, note_list) { 2255 - list_del(&pos->note_list); 2255 + list_del_init(&pos->note_list); 2256 2256 zfree(&pos->name); 2257 2257 zfree(&pos->provider); 2258 2258 free(pos);
+2 -2
tools/perf/util/thread.c
··· 93 93 down_write(&thread->namespaces_lock); 94 94 list_for_each_entry_safe(namespaces, tmp_namespaces, 95 95 &thread->namespaces_list, list) { 96 - list_del(&namespaces->list); 96 + list_del_init(&namespaces->list); 97 97 namespaces__free(namespaces); 98 98 } 99 99 up_write(&thread->namespaces_lock); 100 100 101 101 down_write(&thread->comm_lock); 102 102 list_for_each_entry_safe(comm, tmp_comm, &thread->comm_list, list) { 103 - list_del(&comm->list); 103 + list_del_init(&comm->list); 104 104 comm__free(comm); 105 105 } 106 106 up_write(&thread->comm_lock);