Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf: Do not walk twice the hash map on free

If someone stores both a timer and a workqueue in a hash map, on free, we
would walk it twice.

Add a check in htab_free_malloced_timers_or_wq and free the timers and
workqueues if they are present.

Fixes: 246331e3f1ea ("bpf: allow struct bpf_wq to be embedded in arraymaps and hashmaps")
Signed-off-by: Benjamin Tissoires <bentiss@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/bpf/20240430-bpf-next-v3-2-27afe7f3b17c@kernel.org

authored by

Benjamin Tissoires and committed by
Daniel Borkmann
a891711d b98a5c68

+13 -36
+13 -36
kernel/bpf/hashtab.c
··· 221 221 return !htab_is_percpu(htab) && !htab_is_lru(htab); 222 222 } 223 223 224 - static void htab_free_prealloced_timers(struct bpf_htab *htab) 224 + static void htab_free_prealloced_timers_and_wq(struct bpf_htab *htab) 225 225 { 226 226 u32 num_entries = htab->map.max_entries; 227 227 int i; 228 228 229 - if (!btf_record_has_field(htab->map.record, BPF_TIMER)) 230 - return; 231 229 if (htab_has_extra_elems(htab)) 232 230 num_entries += num_possible_cpus(); 233 231 ··· 233 235 struct htab_elem *elem; 234 236 235 237 elem = get_htab_elem(htab, i); 236 - bpf_obj_free_timer(htab->map.record, elem->key + round_up(htab->map.key_size, 8)); 237 - cond_resched(); 238 - } 239 - } 240 - 241 - static void htab_free_prealloced_wq(struct bpf_htab *htab) 242 - { 243 - u32 num_entries = htab->map.max_entries; 244 - int i; 245 - 246 - if (!btf_record_has_field(htab->map.record, BPF_WORKQUEUE)) 247 - return; 248 - if (htab_has_extra_elems(htab)) 249 - num_entries += num_possible_cpus(); 250 - 251 - for (i = 0; i < num_entries; i++) { 252 - struct htab_elem *elem; 253 - 254 - elem = get_htab_elem(htab, i); 255 - bpf_obj_free_workqueue(htab->map.record, 256 - elem->key + round_up(htab->map.key_size, 8)); 238 + if (btf_record_has_field(htab->map.record, BPF_TIMER)) 239 + bpf_obj_free_timer(htab->map.record, 240 + elem->key + round_up(htab->map.key_size, 8)); 241 + if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE)) 242 + bpf_obj_free_workqueue(htab->map.record, 243 + elem->key + round_up(htab->map.key_size, 8)); 257 244 cond_resched(); 258 245 } 259 246 } ··· 1498 1515 migrate_enable(); 1499 1516 } 1500 1517 1501 - static void htab_free_malloced_timers_or_wq(struct bpf_htab *htab, bool is_timer) 1518 + static void htab_free_malloced_timers_and_wq(struct bpf_htab *htab) 1502 1519 { 1503 1520 int i; 1504 1521 ··· 1510 1527 1511 1528 hlist_nulls_for_each_entry(l, n, head, hash_node) { 1512 1529 /* We only free timer on uref dropping to zero */ 1513 - if (is_timer) 1530 + if (btf_record_has_field(htab->map.record, BPF_TIMER)) 1514 1531 bpf_obj_free_timer(htab->map.record, 1515 1532 l->key + round_up(htab->map.key_size, 8)); 1516 - else 1533 + if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE)) 1517 1534 bpf_obj_free_workqueue(htab->map.record, 1518 1535 l->key + round_up(htab->map.key_size, 8)); 1519 1536 } ··· 1527 1544 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1528 1545 1529 1546 /* We only free timer and workqueue on uref dropping to zero */ 1530 - if (btf_record_has_field(htab->map.record, BPF_TIMER)) { 1547 + if (btf_record_has_field(htab->map.record, BPF_TIMER | BPF_WORKQUEUE)) { 1531 1548 if (!htab_is_prealloc(htab)) 1532 - htab_free_malloced_timers_or_wq(htab, true); 1549 + htab_free_malloced_timers_and_wq(htab); 1533 1550 else 1534 - htab_free_prealloced_timers(htab); 1535 - } 1536 - if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE)) { 1537 - if (!htab_is_prealloc(htab)) 1538 - htab_free_malloced_timers_or_wq(htab, false); 1539 - else 1540 - htab_free_prealloced_wq(htab); 1551 + htab_free_prealloced_timers_and_wq(htab); 1541 1552 } 1542 1553 } 1543 1554