Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tools/bpftool: Show info for processes holding BPF map/prog/link/btf FDs

Add bpf_iter-based way to find all the processes that hold open FDs against
BPF object (map, prog, link, btf). bpftool always attempts to discover this,
but will silently give up if kernel doesn't yet support bpf_iter BPF programs.
Process name and PID are emitted for each process (task group).

Sample output for each of 4 BPF objects:

$ sudo ./bpftool prog show
2694: cgroup_device tag 8c42dee26e8cd4c2 gpl
loaded_at 2020-06-16T15:34:32-0700 uid 0
xlated 648B jited 409B memlock 4096B
pids systemd(1)
2907: cgroup_skb name egress tag 9ad187367cf2b9e8 gpl
loaded_at 2020-06-16T18:06:54-0700 uid 0
xlated 48B jited 59B memlock 4096B map_ids 2436
btf_id 1202
pids test_progs(2238417), test_progs(2238445)

$ sudo ./bpftool map show
2436: array name test_cgr.bss flags 0x400
key 4B value 8B max_entries 1 memlock 8192B
btf_id 1202
pids test_progs(2238417), test_progs(2238445)
2445: array name pid_iter.rodata flags 0x480
key 4B value 4B max_entries 1 memlock 8192B
btf_id 1214 frozen
pids bpftool(2239612)

$ sudo ./bpftool link show
61: cgroup prog 2908
cgroup_id 375301 attach_type egress
pids test_progs(2238417), test_progs(2238445)
62: cgroup prog 2908
cgroup_id 375344 attach_type egress
pids test_progs(2238417), test_progs(2238445)

$ sudo ./bpftool btf show
1202: size 1527B prog_ids 2908,2907 map_ids 2436
pids test_progs(2238417), test_progs(2238445)
1242: size 34684B
pids bpftool(2258892)

Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Reviewed-by: Quentin Monnet <quentin@isovalent.com>
Link: https://lore.kernel.org/bpf/20200619231703.738941-9-andriin@fb.com

authored by

Andrii Nakryiko and committed by
Alexei Starovoitov
d53dee3f bd9bedf8

+378
+2
tools/bpf/bpftool/Makefile
··· 150 150 151 151 $(OUTPUT)prog.o: $(OUTPUT)profiler.skel.h 152 152 153 + $(OUTPUT)pids.o: $(OUTPUT)pid_iter.skel.h 154 + 153 155 endif 154 156 endif 155 157
+6
tools/bpf/bpftool/btf.c
··· 809 809 printf("%s%u", n++ == 0 ? " map_ids " : ",", 810 810 obj->obj_id); 811 811 } 812 + emit_obj_refs_plain(&refs_table, info->id, "\n\tpids "); 812 813 813 814 printf("\n"); 814 815 } ··· 842 841 jsonw_uint(json_wtr, obj->obj_id); 843 842 } 844 843 jsonw_end_array(json_wtr); /* map_ids */ 844 + 845 + emit_obj_refs_json(&refs_table, info->id, json_wtr); /* pids */ 846 + 845 847 jsonw_end_object(json_wtr); /* btf object */ 846 848 } 847 849 ··· 897 893 close(fd); 898 894 return err; 899 895 } 896 + build_obj_refs_table(&refs_table, BPF_OBJ_BTF); 900 897 901 898 if (fd >= 0) { 902 899 err = show_btf(fd, &btf_prog_table, &btf_map_table); ··· 944 939 exit_free: 945 940 delete_btf_table(&btf_prog_table); 946 941 delete_btf_table(&btf_map_table); 942 + delete_obj_refs_table(&refs_table); 947 943 948 944 return err; 949 945 }
+7
tools/bpf/bpftool/link.c
··· 143 143 } 144 144 jsonw_end_array(json_wtr); 145 145 } 146 + 147 + emit_obj_refs_json(&refs_table, info->id, json_wtr); 148 + 146 149 jsonw_end_object(json_wtr); 147 150 148 151 return 0; ··· 215 212 printf("\n\tpinned %s", obj->path); 216 213 } 217 214 } 215 + emit_obj_refs_plain(&refs_table, info->id, "\n\tpids "); 218 216 219 217 printf("\n"); 220 218 ··· 261 257 262 258 if (show_pinned) 263 259 build_pinned_obj_table(&link_table, BPF_OBJ_LINK); 260 + build_obj_refs_table(&refs_table, BPF_OBJ_LINK); 264 261 265 262 if (argc == 2) { 266 263 fd = link_parse_fd(&argc, &argv); ··· 300 295 } 301 296 if (json_output) 302 297 jsonw_end_array(json_wtr); 298 + 299 + delete_obj_refs_table(&refs_table); 303 300 304 301 return errno == ENOENT ? 0 : -1; 305 302 }
+1
tools/bpf/bpftool/main.c
··· 31 31 struct pinned_obj_table prog_table; 32 32 struct pinned_obj_table map_table; 33 33 struct pinned_obj_table link_table; 34 + struct obj_refs_table refs_table; 34 35 35 36 static void __noreturn clean_and_exit(int i) 36 37 {
+27
tools/bpf/bpftool/main.h
··· 127 127 extern const char * const map_type_name[]; 128 128 extern const size_t map_type_name_size; 129 129 130 + /* keep in sync with the definition in skeleton/pid_iter.bpf.c */ 130 131 enum bpf_obj_type { 131 132 BPF_OBJ_UNKNOWN, 132 133 BPF_OBJ_PROG, 133 134 BPF_OBJ_MAP, 134 135 BPF_OBJ_LINK, 136 + BPF_OBJ_BTF, 135 137 }; 136 138 137 139 extern const char *bin_name; ··· 141 139 extern json_writer_t *json_wtr; 142 140 extern bool json_output; 143 141 extern bool show_pinned; 142 + extern bool show_pids; 144 143 extern bool block_mount; 145 144 extern bool verifier_logs; 146 145 extern bool relaxed_maps; 147 146 extern struct pinned_obj_table prog_table; 148 147 extern struct pinned_obj_table map_table; 149 148 extern struct pinned_obj_table link_table; 149 + extern struct obj_refs_table refs_table; 150 150 151 151 void __printf(1, 2) p_err(const char *fmt, ...); 152 152 void __printf(1, 2) p_info(const char *fmt, ...); ··· 172 168 struct hlist_node hash; 173 169 }; 174 170 171 + struct obj_refs_table { 172 + DECLARE_HASHTABLE(table, 16); 173 + }; 174 + 175 + struct obj_ref { 176 + int pid; 177 + char comm[16]; 178 + }; 179 + 180 + struct obj_refs { 181 + struct hlist_node node; 182 + __u32 id; 183 + int ref_cnt; 184 + struct obj_ref *refs; 185 + }; 186 + 175 187 struct btf; 176 188 struct bpf_line_info; 177 189 178 190 int build_pinned_obj_table(struct pinned_obj_table *table, 179 191 enum bpf_obj_type type); 180 192 void delete_pinned_obj_table(struct pinned_obj_table *tab); 193 + __weak int build_obj_refs_table(struct obj_refs_table *table, 194 + enum bpf_obj_type type); 195 + __weak void delete_obj_refs_table(struct obj_refs_table *table); 196 + __weak void emit_obj_refs_json(struct obj_refs_table *table, __u32 id, 197 + json_writer_t *json_wtr); 198 + __weak void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id, 199 + const char *prefix); 181 200 void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode); 182 201 void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode); 183 202
+7
tools/bpf/bpftool/map.c
··· 509 509 jsonw_end_array(json_wtr); 510 510 } 511 511 512 + emit_obj_refs_json(&refs_table, info->id, json_wtr); 513 + 512 514 jsonw_end_object(json_wtr); 513 515 514 516 return 0; ··· 598 596 if (frozen) 599 597 printf("%sfrozen", info->btf_id ? " " : ""); 600 598 599 + emit_obj_refs_plain(&refs_table, info->id, "\n\tpids "); 600 + 601 601 printf("\n"); 602 602 return 0; 603 603 } ··· 658 654 659 655 if (show_pinned) 660 656 build_pinned_obj_table(&map_table, BPF_OBJ_MAP); 657 + build_obj_refs_table(&refs_table, BPF_OBJ_MAP); 661 658 662 659 if (argc == 2) 663 660 return do_show_subset(argc, argv); ··· 701 696 } 702 697 if (json_output) 703 698 jsonw_end_array(json_wtr); 699 + 700 + delete_obj_refs_table(&refs_table); 704 701 705 702 return errno == ENOENT ? 0 : -1; 706 703 }
+229
tools/bpf/bpftool/pids.c
··· 1 + // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + /* Copyright (C) 2020 Facebook */ 3 + #include <errno.h> 4 + #include <stdbool.h> 5 + #include <stdio.h> 6 + #include <stdlib.h> 7 + #include <string.h> 8 + #include <unistd.h> 9 + #include <bpf/bpf.h> 10 + 11 + #include "main.h" 12 + #include "skeleton/pid_iter.h" 13 + 14 + #ifdef BPFTOOL_WITHOUT_SKELETONS 15 + 16 + int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type) 17 + { 18 + p_err("bpftool built without PID iterator support"); 19 + return -ENOTSUP; 20 + } 21 + void delete_obj_refs_table(struct obj_refs_table *table) {} 22 + 23 + #else /* BPFTOOL_WITHOUT_SKELETONS */ 24 + 25 + #include "pid_iter.skel.h" 26 + 27 + static void add_ref(struct obj_refs_table *table, struct pid_iter_entry *e) 28 + { 29 + struct obj_refs *refs; 30 + struct obj_ref *ref; 31 + void *tmp; 32 + int i; 33 + 34 + hash_for_each_possible(table->table, refs, node, e->id) { 35 + if (refs->id != e->id) 36 + continue; 37 + 38 + for (i = 0; i < refs->ref_cnt; i++) { 39 + if (refs->refs[i].pid == e->pid) 40 + return; 41 + } 42 + 43 + tmp = realloc(refs->refs, (refs->ref_cnt + 1) * sizeof(*ref)); 44 + if (!tmp) { 45 + p_err("failed to re-alloc memory for ID %u, PID %d, COMM %s...", 46 + e->id, e->pid, e->comm); 47 + return; 48 + } 49 + refs->refs = tmp; 50 + ref = &refs->refs[refs->ref_cnt]; 51 + ref->pid = e->pid; 52 + memcpy(ref->comm, e->comm, sizeof(ref->comm)); 53 + refs->ref_cnt++; 54 + 55 + return; 56 + } 57 + 58 + /* new ref */ 59 + refs = calloc(1, sizeof(*refs)); 60 + if (!refs) { 61 + p_err("failed to alloc memory for ID %u, PID %d, COMM %s...", 62 + e->id, e->pid, e->comm); 63 + return; 64 + } 65 + 66 + refs->id = e->id; 67 + refs->refs = malloc(sizeof(*refs->refs)); 68 + if (!refs->refs) { 69 + free(refs); 70 + p_err("failed to alloc memory for ID %u, PID %d, COMM %s...", 71 + e->id, e->pid, e->comm); 72 + return; 73 + } 74 + ref = &refs->refs[0]; 75 + ref->pid = e->pid; 76 + memcpy(ref->comm, e->comm, sizeof(ref->comm)); 77 + refs->ref_cnt = 1; 78 + hash_add(table->table, &refs->node, e->id); 79 + } 80 + 81 + static int __printf(2, 0) 82 + libbpf_print_none(__maybe_unused enum libbpf_print_level level, 83 + __maybe_unused const char *format, 84 + __maybe_unused va_list args) 85 + { 86 + return 0; 87 + } 88 + 89 + int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type) 90 + { 91 + char buf[4096]; 92 + struct pid_iter_bpf *skel; 93 + struct pid_iter_entry *e; 94 + int err, ret, fd = -1, i; 95 + libbpf_print_fn_t default_print; 96 + 97 + hash_init(table->table); 98 + set_max_rlimit(); 99 + 100 + skel = pid_iter_bpf__open(); 101 + if (!skel) { 102 + p_err("failed to open PID iterator skeleton"); 103 + return -1; 104 + } 105 + 106 + skel->rodata->obj_type = type; 107 + 108 + /* we don't want output polluted with libbpf errors if bpf_iter is not 109 + * supported 110 + */ 111 + default_print = libbpf_set_print(libbpf_print_none); 112 + err = pid_iter_bpf__load(skel); 113 + libbpf_set_print(default_print); 114 + if (err) { 115 + /* too bad, kernel doesn't support BPF iterators yet */ 116 + err = 0; 117 + goto out; 118 + } 119 + err = pid_iter_bpf__attach(skel); 120 + if (err) { 121 + /* if we loaded above successfully, attach has to succeed */ 122 + p_err("failed to attach PID iterator: %d", err); 123 + goto out; 124 + } 125 + 126 + fd = bpf_iter_create(bpf_link__fd(skel->links.iter)); 127 + if (fd < 0) { 128 + err = -errno; 129 + p_err("failed to create PID iterator session: %d", err); 130 + goto out; 131 + } 132 + 133 + while (true) { 134 + ret = read(fd, buf, sizeof(buf)); 135 + if (ret < 0) { 136 + err = -errno; 137 + p_err("failed to read PID iterator output: %d", err); 138 + goto out; 139 + } 140 + if (ret == 0) 141 + break; 142 + if (ret % sizeof(*e)) { 143 + err = -EINVAL; 144 + p_err("invalid PID iterator output format"); 145 + goto out; 146 + } 147 + ret /= sizeof(*e); 148 + 149 + e = (void *)buf; 150 + for (i = 0; i < ret; i++, e++) { 151 + add_ref(table, e); 152 + } 153 + } 154 + err = 0; 155 + out: 156 + if (fd >= 0) 157 + close(fd); 158 + pid_iter_bpf__destroy(skel); 159 + return err; 160 + } 161 + 162 + void delete_obj_refs_table(struct obj_refs_table *table) 163 + { 164 + struct obj_refs *refs; 165 + struct hlist_node *tmp; 166 + unsigned int bkt; 167 + 168 + hash_for_each_safe(table->table, bkt, tmp, refs, node) { 169 + hash_del(&refs->node); 170 + free(refs->refs); 171 + free(refs); 172 + } 173 + } 174 + 175 + void emit_obj_refs_json(struct obj_refs_table *table, __u32 id, json_writer_t *json_wtr) 176 + { 177 + struct obj_refs *refs; 178 + struct obj_ref *ref; 179 + int i; 180 + 181 + if (hash_empty(table->table)) 182 + return; 183 + 184 + hash_for_each_possible(table->table, refs, node, id) { 185 + if (refs->id != id) 186 + continue; 187 + if (refs->ref_cnt == 0) 188 + break; 189 + 190 + jsonw_name(json_wtr, "pids"); 191 + jsonw_start_array(json_wtr); 192 + for (i = 0; i < refs->ref_cnt; i++) { 193 + ref = &refs->refs[i]; 194 + jsonw_start_object(json_wtr); 195 + jsonw_int_field(json_wtr, "pid", ref->pid); 196 + jsonw_string_field(json_wtr, "comm", ref->comm); 197 + jsonw_end_object(json_wtr); 198 + } 199 + jsonw_end_array(json_wtr); 200 + break; 201 + } 202 + } 203 + 204 + void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id, const char *prefix) 205 + { 206 + struct obj_refs *refs; 207 + struct obj_ref *ref; 208 + int i; 209 + 210 + if (hash_empty(table->table)) 211 + return; 212 + 213 + hash_for_each_possible(table->table, refs, node, id) { 214 + if (refs->id != id) 215 + continue; 216 + if (refs->ref_cnt == 0) 217 + break; 218 + 219 + printf("%s", prefix); 220 + for (i = 0; i < refs->ref_cnt; i++) { 221 + ref = &refs->refs[i]; 222 + printf("%s%s(%d)", i == 0 ? "" : ", ", ref->comm, ref->pid); 223 + } 224 + break; 225 + } 226 + } 227 + 228 + 229 + #endif
+7
tools/bpf/bpftool/prog.c
··· 190 190 jsonw_end_array(json_wtr); 191 191 } 192 192 193 + emit_obj_refs_json(&refs_table, info->id, json_wtr); 194 + 193 195 jsonw_end_object(json_wtr); 194 196 } 195 197 ··· 258 256 if (info->btf_id) 259 257 printf("\n\tbtf_id %d", info->btf_id); 260 258 259 + emit_obj_refs_plain(&refs_table, info->id, "\n\tpids "); 260 + 261 261 printf("\n"); 262 262 } 263 263 ··· 325 321 326 322 if (show_pinned) 327 323 build_pinned_obj_table(&prog_table, BPF_OBJ_PROG); 324 + build_obj_refs_table(&refs_table, BPF_OBJ_PROG); 328 325 329 326 if (argc == 2) 330 327 return do_show_subset(argc, argv); ··· 366 361 367 362 if (json_output) 368 363 jsonw_end_array(json_wtr); 364 + 365 + delete_obj_refs_table(&refs_table); 369 366 370 367 return err; 371 368 }
+80
tools/bpf/bpftool/skeleton/pid_iter.bpf.c
··· 1 + // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + /* Copyright (c) 2020 Facebook */ 3 + #include <vmlinux.h> 4 + #include <bpf/bpf_helpers.h> 5 + #include <bpf/bpf_core_read.h> 6 + #include <bpf/bpf_tracing.h> 7 + #include "pid_iter.h" 8 + 9 + /* keep in sync with the definition in main.h */ 10 + enum bpf_obj_type { 11 + BPF_OBJ_UNKNOWN, 12 + BPF_OBJ_PROG, 13 + BPF_OBJ_MAP, 14 + BPF_OBJ_LINK, 15 + BPF_OBJ_BTF, 16 + }; 17 + 18 + extern const void bpf_link_fops __ksym; 19 + extern const void bpf_map_fops __ksym; 20 + extern const void bpf_prog_fops __ksym; 21 + extern const void btf_fops __ksym; 22 + 23 + const volatile enum bpf_obj_type obj_type = BPF_OBJ_UNKNOWN; 24 + 25 + static __always_inline __u32 get_obj_id(void *ent, enum bpf_obj_type type) 26 + { 27 + switch (type) { 28 + case BPF_OBJ_PROG: 29 + return BPF_CORE_READ((struct bpf_prog *)ent, aux, id); 30 + case BPF_OBJ_MAP: 31 + return BPF_CORE_READ((struct bpf_map *)ent, id); 32 + case BPF_OBJ_BTF: 33 + return BPF_CORE_READ((struct btf *)ent, id); 34 + case BPF_OBJ_LINK: 35 + return BPF_CORE_READ((struct bpf_link *)ent, id); 36 + default: 37 + return 0; 38 + } 39 + } 40 + 41 + SEC("iter/task_file") 42 + int iter(struct bpf_iter__task_file *ctx) 43 + { 44 + struct file *file = ctx->file; 45 + struct task_struct *task = ctx->task; 46 + struct pid_iter_entry e; 47 + const void *fops; 48 + 49 + if (!file || !task) 50 + return 0; 51 + 52 + switch (obj_type) { 53 + case BPF_OBJ_PROG: 54 + fops = &bpf_prog_fops; 55 + break; 56 + case BPF_OBJ_MAP: 57 + fops = &bpf_map_fops; 58 + break; 59 + case BPF_OBJ_BTF: 60 + fops = &btf_fops; 61 + break; 62 + case BPF_OBJ_LINK: 63 + fops = &bpf_link_fops; 64 + break; 65 + default: 66 + return 0; 67 + } 68 + 69 + if (file->f_op != fops) 70 + return 0; 71 + 72 + e.pid = task->tgid; 73 + e.id = get_obj_id(file->private_data, obj_type); 74 + bpf_probe_read(&e.comm, sizeof(e.comm), task->group_leader->comm); 75 + bpf_seq_write(ctx->meta->seq, &e, sizeof(e)); 76 + 77 + return 0; 78 + } 79 + 80 + char LICENSE[] SEC("license") = "Dual BSD/GPL";
+12
tools/bpf/bpftool/skeleton/pid_iter.h
··· 1 + /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ 2 + /* Copyright (c) 2020 Facebook */ 3 + #ifndef __PID_ITER_H 4 + #define __PID_ITER_H 5 + 6 + struct pid_iter_entry { 7 + __u32 id; 8 + int pid; 9 + char comm[16]; 10 + }; 11 + 12 + #endif