Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf bpf: Stop using deprecated bpf_object__next() API

Libbpf has deprecated the ability to keep track of object list inside
libbpf, it now requires applications to track usage multiple bpf objects
directly. Remove usage of bpf_object__next() API and hoist the tracking
logic to perf.

Signed-off-by: Christy Lee <christylee@fb.com>
Acked-by: Song Liu <songliubraving@fb.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: bpf@vger.kernel.org
Cc: kernel-team@fb.com
Link: https://lore.kernel.org/bpf/20220212073054.1052880-3-andrii@kernel.org
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

authored by

Christy Lee and committed by
Arnaldo Carvalho de Melo
e8eaadf4 710f6c38

+79 -19
+79 -19
tools/perf/util/bpf-loader.c
··· 49 49 int *type_mapping; 50 50 }; 51 51 52 + struct bpf_perf_object { 53 + struct list_head list; 54 + struct bpf_object *obj; 55 + }; 56 + 57 + static LIST_HEAD(bpf_objects_list); 58 + 59 + static struct bpf_perf_object * 60 + bpf_perf_object__next(struct bpf_perf_object *prev) 61 + { 62 + struct bpf_perf_object *next; 63 + 64 + if (!prev) 65 + next = list_first_entry(&bpf_objects_list, 66 + struct bpf_perf_object, 67 + list); 68 + else 69 + next = list_next_entry(prev, list); 70 + 71 + /* Empty list is noticed here so don't need checking on entry. */ 72 + if (&next->list == &bpf_objects_list) 73 + return NULL; 74 + 75 + return next; 76 + } 77 + 78 + #define bpf_perf_object__for_each(perf_obj, tmp) \ 79 + for ((perf_obj) = bpf_perf_object__next(NULL), \ 80 + (tmp) = bpf_perf_object__next(perf_obj); \ 81 + (perf_obj) != NULL; \ 82 + (perf_obj) = (tmp), (tmp) = bpf_perf_object__next(tmp)) 83 + 52 84 static bool libbpf_initialized; 85 + 86 + static int bpf_perf_object__add(struct bpf_object *obj) 87 + { 88 + struct bpf_perf_object *perf_obj = zalloc(sizeof(*perf_obj)); 89 + 90 + if (perf_obj) { 91 + INIT_LIST_HEAD(&perf_obj->list); 92 + perf_obj->obj = obj; 93 + list_add_tail(&perf_obj->list, &bpf_objects_list); 94 + } 95 + return perf_obj ? 0 : -ENOMEM; 96 + } 53 97 54 98 struct bpf_object * 55 99 bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name) ··· 111 67 return ERR_PTR(-EINVAL); 112 68 } 113 69 70 + if (bpf_perf_object__add(obj)) { 71 + bpf_object__close(obj); 72 + return ERR_PTR(-ENOMEM); 73 + } 74 + 114 75 return obj; 76 + } 77 + 78 + static void bpf_perf_object__close(struct bpf_perf_object *perf_obj) 79 + { 80 + list_del(&perf_obj->list); 81 + bpf_object__close(perf_obj->obj); 82 + free(perf_obj); 115 83 } 116 84 117 85 struct bpf_object *bpf__prepare_load(const char *filename, bool source) ··· 156 100 llvm__dump_obj(filename, obj_buf, obj_buf_sz); 157 101 158 102 free(obj_buf); 159 - } else 103 + } else { 160 104 obj = bpf_object__open(filename); 105 + } 161 106 162 107 if (IS_ERR_OR_NULL(obj)) { 163 108 pr_debug("bpf: failed to load %s\n", filename); 164 109 return obj; 110 + } 111 + 112 + if (bpf_perf_object__add(obj)) { 113 + bpf_object__close(obj); 114 + return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE); 165 115 } 166 116 167 117 return obj; ··· 175 113 176 114 void bpf__clear(void) 177 115 { 178 - struct bpf_object *obj, *tmp; 116 + struct bpf_perf_object *perf_obj, *tmp; 179 117 180 - bpf_object__for_each_safe(obj, tmp) { 181 - bpf__unprobe(obj); 182 - bpf_object__close(obj); 118 + bpf_perf_object__for_each(perf_obj, tmp) { 119 + bpf__unprobe(perf_obj->obj); 120 + bpf_perf_object__close(perf_obj); 183 121 } 184 122 } 185 123 ··· 1563 1501 1564 1502 int bpf__apply_obj_config(void) 1565 1503 { 1566 - struct bpf_object *obj, *tmp; 1504 + struct bpf_perf_object *perf_obj, *tmp; 1567 1505 int err; 1568 1506 1569 - bpf_object__for_each_safe(obj, tmp) { 1570 - err = apply_obj_config_object(obj); 1507 + bpf_perf_object__for_each(perf_obj, tmp) { 1508 + err = apply_obj_config_object(perf_obj->obj); 1571 1509 if (err) 1572 1510 return err; 1573 1511 } ··· 1575 1513 return 0; 1576 1514 } 1577 1515 1578 - #define bpf__for_each_map(pos, obj, objtmp) \ 1579 - bpf_object__for_each_safe(obj, objtmp) \ 1580 - bpf_object__for_each_map(pos, obj) 1516 + #define bpf__perf_for_each_map(map, pobj, tmp) \ 1517 + bpf_perf_object__for_each(pobj, tmp) \ 1518 + bpf_object__for_each_map(map, pobj->obj) 1581 1519 1582 - #define bpf__for_each_map_named(pos, obj, objtmp, name) \ 1583 - bpf__for_each_map(pos, obj, objtmp) \ 1584 - if (bpf_map__name(pos) && \ 1585 - (strcmp(name, \ 1586 - bpf_map__name(pos)) == 0)) 1520 + #define bpf__perf_for_each_map_named(map, pobj, pobjtmp, name) \ 1521 + bpf__perf_for_each_map(map, pobj, pobjtmp) \ 1522 + if (bpf_map__name(map) && (strcmp(name, bpf_map__name(map)) == 0)) 1587 1523 1588 1524 struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name) 1589 1525 { 1590 1526 struct bpf_map_priv *tmpl_priv = NULL; 1591 - struct bpf_object *obj, *tmp; 1527 + struct bpf_perf_object *perf_obj, *tmp; 1592 1528 struct evsel *evsel = NULL; 1593 1529 struct bpf_map *map; 1594 1530 int err; 1595 1531 bool need_init = false; 1596 1532 1597 - bpf__for_each_map_named(map, obj, tmp, name) { 1533 + bpf__perf_for_each_map_named(map, perf_obj, tmp, name) { 1598 1534 struct bpf_map_priv *priv = bpf_map__priv(map); 1599 1535 1600 1536 if (IS_ERR(priv)) ··· 1628 1568 evsel = evlist__last(evlist); 1629 1569 } 1630 1570 1631 - bpf__for_each_map_named(map, obj, tmp, name) { 1571 + bpf__perf_for_each_map_named(map, perf_obj, tmp, name) { 1632 1572 struct bpf_map_priv *priv = bpf_map__priv(map); 1633 1573 1634 1574 if (IS_ERR(priv))