Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf: Allow compiler to inline most of bpf_local_storage_lookup()

In various performance profiles of kernels with BPF programs attached,
bpf_local_storage_lookup() appears as a significant portion of CPU
cycles spent. To enable the compiler generate more optimal code, turn
bpf_local_storage_lookup() into a static inline function, where only the
cache insertion code path is outlined

Notably, outlining cache insertion helps avoid bloating callers by
duplicating setting up calls to raw_spin_{lock,unlock}_irqsave() (on
architectures which do not inline spin_lock/unlock, such as x86), which
would cause the compiler produce worse code by deciding to outline
otherwise inlinable functions. The call overhead is neutral, because we
make 2 calls either way: either calling raw_spin_lock_irqsave() and
raw_spin_unlock_irqsave(); or call __bpf_local_storage_insert_cache(),
which calls raw_spin_lock_irqsave(), followed by a tail-call to
raw_spin_unlock_irqsave() where the compiler can perform TCO and (in
optimized uninstrumented builds) turns it into a plain jump. The call to
__bpf_local_storage_insert_cache() can be elided entirely if
cacheit_lockit is a false constant expression.

Based on results from './benchs/run_bench_local_storage.sh' (21 trials,
reboot between each trial; x86 defconfig + BPF, clang 16) this produces
improvements in throughput and latency in the majority of cases, with an
average (geomean) improvement of 8%:

+---- Hashmap Control --------------------
|
| + num keys: 10
| : <before> | <after>
| +-+ hashmap (control) sequential get +----------------------+----------------------
| +- hits throughput | 14.789 M ops/s | 14.745 M ops/s ( ~ )
| +- hits latency | 67.679 ns/op | 67.879 ns/op ( ~ )
| +- important_hits throughput | 14.789 M ops/s | 14.745 M ops/s ( ~ )
|
| + num keys: 1000
| : <before> | <after>
| +-+ hashmap (control) sequential get +----------------------+----------------------
| +- hits throughput | 12.233 M ops/s | 12.170 M ops/s ( ~ )
| +- hits latency | 81.754 ns/op | 82.185 ns/op ( ~ )
| +- important_hits throughput | 12.233 M ops/s | 12.170 M ops/s ( ~ )
|
| + num keys: 10000
| : <before> | <after>
| +-+ hashmap (control) sequential get +----------------------+----------------------
| +- hits throughput | 7.220 M ops/s | 7.204 M ops/s ( ~ )
| +- hits latency | 138.522 ns/op | 138.842 ns/op ( ~ )
| +- important_hits throughput | 7.220 M ops/s | 7.204 M ops/s ( ~ )
|
| + num keys: 100000
| : <before> | <after>
| +-+ hashmap (control) sequential get +----------------------+----------------------
| +- hits throughput | 5.061 M ops/s | 5.165 M ops/s (+2.1%)
| +- hits latency | 198.483 ns/op | 194.270 ns/op (-2.1%)
| +- important_hits throughput | 5.061 M ops/s | 5.165 M ops/s (+2.1%)
|
| + num keys: 4194304
| : <before> | <after>
| +-+ hashmap (control) sequential get +----------------------+----------------------
| +- hits throughput | 2.864 M ops/s | 2.882 M ops/s ( ~ )
| +- hits latency | 365.220 ns/op | 361.418 ns/op (-1.0%)
| +- important_hits throughput | 2.864 M ops/s | 2.882 M ops/s ( ~ )
|
+---- Local Storage ----------------------
|
| + num_maps: 1
| : <before> | <after>
| +-+ local_storage cache sequential get +----------------------+----------------------
| +- hits throughput | 33.005 M ops/s | 39.068 M ops/s (+18.4%)
| +- hits latency | 30.300 ns/op | 25.598 ns/op (-15.5%)
| +- important_hits throughput | 33.005 M ops/s | 39.068 M ops/s (+18.4%)
| :
| : <before> | <after>
| +-+ local_storage cache interleaved get +----------------------+----------------------
| +- hits throughput | 37.151 M ops/s | 44.926 M ops/s (+20.9%)
| +- hits latency | 26.919 ns/op | 22.259 ns/op (-17.3%)
| +- important_hits throughput | 37.151 M ops/s | 44.926 M ops/s (+20.9%)
|
| + num_maps: 10
| : <before> | <after>
| +-+ local_storage cache sequential get +----------------------+----------------------
| +- hits throughput | 32.288 M ops/s | 38.099 M ops/s (+18.0%)
| +- hits latency | 30.972 ns/op | 26.248 ns/op (-15.3%)
| +- important_hits throughput | 3.229 M ops/s | 3.810 M ops/s (+18.0%)
| :
| : <before> | <after>
| +-+ local_storage cache interleaved get +----------------------+----------------------
| +- hits throughput | 34.473 M ops/s | 41.145 M ops/s (+19.4%)
| +- hits latency | 29.010 ns/op | 24.307 ns/op (-16.2%)
| +- important_hits throughput | 12.312 M ops/s | 14.695 M ops/s (+19.4%)
|
| + num_maps: 16
| : <before> | <after>
| +-+ local_storage cache sequential get +----------------------+----------------------
| +- hits throughput | 32.524 M ops/s | 38.341 M ops/s (+17.9%)
| +- hits latency | 30.748 ns/op | 26.083 ns/op (-15.2%)
| +- important_hits throughput | 2.033 M ops/s | 2.396 M ops/s (+17.9%)
| :
| : <before> | <after>
| +-+ local_storage cache interleaved get +----------------------+----------------------
| +- hits throughput | 34.575 M ops/s | 41.338 M ops/s (+19.6%)
| +- hits latency | 28.925 ns/op | 24.193 ns/op (-16.4%)
| +- important_hits throughput | 11.001 M ops/s | 13.153 M ops/s (+19.6%)
|
| + num_maps: 17
| : <before> | <after>
| +-+ local_storage cache sequential get +----------------------+----------------------
| +- hits throughput | 28.861 M ops/s | 32.756 M ops/s (+13.5%)
| +- hits latency | 34.649 ns/op | 30.530 ns/op (-11.9%)
| +- important_hits throughput | 1.700 M ops/s | 1.929 M ops/s (+13.5%)
| :
| : <before> | <after>
| +-+ local_storage cache interleaved get +----------------------+----------------------
| +- hits throughput | 31.529 M ops/s | 36.110 M ops/s (+14.5%)
| +- hits latency | 31.719 ns/op | 27.697 ns/op (-12.7%)
| +- important_hits throughput | 9.598 M ops/s | 10.993 M ops/s (+14.5%)
|
| + num_maps: 24
| : <before> | <after>
| +-+ local_storage cache sequential get +----------------------+----------------------
| +- hits throughput | 18.602 M ops/s | 19.937 M ops/s (+7.2%)
| +- hits latency | 53.767 ns/op | 50.166 ns/op (-6.7%)
| +- important_hits throughput | 0.776 M ops/s | 0.831 M ops/s (+7.2%)
| :
| : <before> | <after>
| +-+ local_storage cache interleaved get +----------------------+----------------------
| +- hits throughput | 21.718 M ops/s | 23.332 M ops/s (+7.4%)
| +- hits latency | 46.047 ns/op | 42.865 ns/op (-6.9%)
| +- important_hits throughput | 6.110 M ops/s | 6.564 M ops/s (+7.4%)
|
| + num_maps: 32
| : <before> | <after>
| +-+ local_storage cache sequential get +----------------------+----------------------
| +- hits throughput | 14.118 M ops/s | 14.626 M ops/s (+3.6%)
| +- hits latency | 70.856 ns/op | 68.381 ns/op (-3.5%)
| +- important_hits throughput | 0.442 M ops/s | 0.458 M ops/s (+3.6%)
| :
| : <before> | <after>
| +-+ local_storage cache interleaved get +----------------------+----------------------
| +- hits throughput | 17.111 M ops/s | 17.906 M ops/s (+4.6%)
| +- hits latency | 58.451 ns/op | 55.865 ns/op (-4.4%)
| +- important_hits throughput | 4.776 M ops/s | 4.998 M ops/s (+4.6%)
|
| + num_maps: 100
| : <before> | <after>
| +-+ local_storage cache sequential get +----------------------+----------------------
| +- hits throughput | 5.281 M ops/s | 5.528 M ops/s (+4.7%)
| +- hits latency | 192.398 ns/op | 183.059 ns/op (-4.9%)
| +- important_hits throughput | 0.053 M ops/s | 0.055 M ops/s (+4.9%)
| :
| : <before> | <after>
| +-+ local_storage cache interleaved get +----------------------+----------------------
| +- hits throughput | 6.265 M ops/s | 6.498 M ops/s (+3.7%)
| +- hits latency | 161.436 ns/op | 152.877 ns/op (-5.3%)
| +- important_hits throughput | 1.636 M ops/s | 1.697 M ops/s (+3.7%)
|
| + num_maps: 1000
| : <before> | <after>
| +-+ local_storage cache sequential get +----------------------+----------------------
| +- hits throughput | 0.355 M ops/s | 0.354 M ops/s ( ~ )
| +- hits latency | 2826.538 ns/op | 2827.139 ns/op ( ~ )
| +- important_hits throughput | 0.000 M ops/s | 0.000 M ops/s ( ~ )
| :
| : <before> | <after>
| +-+ local_storage cache interleaved get +----------------------+----------------------
| +- hits throughput | 0.404 M ops/s | 0.403 M ops/s ( ~ )
| +- hits latency | 2481.190 ns/op | 2487.555 ns/op ( ~ )
| +- important_hits throughput | 0.102 M ops/s | 0.101 M ops/s ( ~ )

The on_lookup test in {cgrp,task}_ls_recursion.c is removed
because the bpf_local_storage_lookup is no longer traceable
and adding tracepoint will make the compiler generate worse
code: https://lore.kernel.org/bpf/ZcJmok64Xqv6l4ZS@elver.google.com/

Signed-off-by: Marco Elver <elver@google.com>
Cc: Martin KaFai Lau <martin.lau@linux.dev>
Acked-by: Yonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/r/20240207122626.3508658-1-elver@google.com
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>

authored by

Marco Elver and committed by
Martin KaFai Lau
68bc61c2 a7170d81

+41 -90
+28 -2
include/linux/bpf_local_storage.h
··· 129 129 struct bpf_local_storage_cache *cache, 130 130 bool bpf_ma); 131 131 132 - struct bpf_local_storage_data * 132 + void __bpf_local_storage_insert_cache(struct bpf_local_storage *local_storage, 133 + struct bpf_local_storage_map *smap, 134 + struct bpf_local_storage_elem *selem); 135 + /* If cacheit_lockit is false, this lookup function is lockless */ 136 + static inline struct bpf_local_storage_data * 133 137 bpf_local_storage_lookup(struct bpf_local_storage *local_storage, 134 138 struct bpf_local_storage_map *smap, 135 - bool cacheit_lockit); 139 + bool cacheit_lockit) 140 + { 141 + struct bpf_local_storage_data *sdata; 142 + struct bpf_local_storage_elem *selem; 143 + 144 + /* Fast path (cache hit) */ 145 + sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx], 146 + bpf_rcu_lock_held()); 147 + if (sdata && rcu_access_pointer(sdata->smap) == smap) 148 + return sdata; 149 + 150 + /* Slow path (cache miss) */ 151 + hlist_for_each_entry_rcu(selem, &local_storage->list, snode, 152 + rcu_read_lock_trace_held()) 153 + if (rcu_access_pointer(SDATA(selem)->smap) == smap) 154 + break; 155 + 156 + if (!selem) 157 + return NULL; 158 + if (cacheit_lockit) 159 + __bpf_local_storage_insert_cache(local_storage, smap, selem); 160 + return SDATA(selem); 161 + } 136 162 137 163 void bpf_local_storage_destroy(struct bpf_local_storage *local_storage); 138 164
+13 -39
kernel/bpf/bpf_local_storage.c
··· 414 414 bpf_selem_unlink_storage(selem, reuse_now); 415 415 } 416 416 417 - /* If cacheit_lockit is false, this lookup function is lockless */ 418 - struct bpf_local_storage_data * 419 - bpf_local_storage_lookup(struct bpf_local_storage *local_storage, 420 - struct bpf_local_storage_map *smap, 421 - bool cacheit_lockit) 417 + void __bpf_local_storage_insert_cache(struct bpf_local_storage *local_storage, 418 + struct bpf_local_storage_map *smap, 419 + struct bpf_local_storage_elem *selem) 422 420 { 423 - struct bpf_local_storage_data *sdata; 424 - struct bpf_local_storage_elem *selem; 421 + unsigned long flags; 425 422 426 - /* Fast path (cache hit) */ 427 - sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx], 428 - bpf_rcu_lock_held()); 429 - if (sdata && rcu_access_pointer(sdata->smap) == smap) 430 - return sdata; 431 - 432 - /* Slow path (cache miss) */ 433 - hlist_for_each_entry_rcu(selem, &local_storage->list, snode, 434 - rcu_read_lock_trace_held()) 435 - if (rcu_access_pointer(SDATA(selem)->smap) == smap) 436 - break; 437 - 438 - if (!selem) 439 - return NULL; 440 - 441 - sdata = SDATA(selem); 442 - if (cacheit_lockit) { 443 - unsigned long flags; 444 - 445 - /* spinlock is needed to avoid racing with the 446 - * parallel delete. Otherwise, publishing an already 447 - * deleted sdata to the cache will become a use-after-free 448 - * problem in the next bpf_local_storage_lookup(). 449 - */ 450 - raw_spin_lock_irqsave(&local_storage->lock, flags); 451 - if (selem_linked_to_storage(selem)) 452 - rcu_assign_pointer(local_storage->cache[smap->cache_idx], 453 - sdata); 454 - raw_spin_unlock_irqrestore(&local_storage->lock, flags); 455 - } 456 - 457 - return sdata; 423 + /* spinlock is needed to avoid racing with the 424 + * parallel delete. Otherwise, publishing an already 425 + * deleted sdata to the cache will become a use-after-free 426 + * problem in the next bpf_local_storage_lookup(). 427 + */ 428 + raw_spin_lock_irqsave(&local_storage->lock, flags); 429 + if (selem_linked_to_storage(selem)) 430 + rcu_assign_pointer(local_storage->cache[smap->cache_idx], SDATA(selem)); 431 + raw_spin_unlock_irqrestore(&local_storage->lock, flags); 458 432 } 459 433 460 434 static int check_flags(const struct bpf_local_storage_data *old_sdata,
-6
tools/testing/selftests/bpf/prog_tests/task_local_storage.c
··· 117 117 ASSERT_OK(err, "lookup map_b"); 118 118 ASSERT_EQ(value, 100, "map_b value"); 119 119 120 - prog_fd = bpf_program__fd(skel->progs.on_lookup); 121 - memset(&info, 0, sizeof(info)); 122 - err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); 123 - ASSERT_OK(err, "get prog info"); 124 - ASSERT_GT(info.recursion_misses, 0, "on_lookup prog recursion"); 125 - 126 120 prog_fd = bpf_program__fd(skel->progs.on_update); 127 121 memset(&info, 0, sizeof(info)); 128 122 err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
-26
tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c
··· 27 27 struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym; 28 28 void bpf_cgroup_release(struct cgroup *cgrp) __ksym; 29 29 30 - static void __on_lookup(struct cgroup *cgrp) 31 - { 32 - bpf_cgrp_storage_delete(&map_a, cgrp); 33 - bpf_cgrp_storage_delete(&map_b, cgrp); 34 - } 35 - 36 - SEC("fentry/bpf_local_storage_lookup") 37 - int BPF_PROG(on_lookup) 38 - { 39 - struct task_struct *task = bpf_get_current_task_btf(); 40 - struct cgroup *cgrp; 41 - 42 - if (is_cgroup1) { 43 - cgrp = bpf_task_get_cgroup1(task, target_hid); 44 - if (!cgrp) 45 - return 0; 46 - 47 - __on_lookup(cgrp); 48 - bpf_cgroup_release(cgrp); 49 - return 0; 50 - } 51 - 52 - __on_lookup(task->cgroups->dfl_cgrp); 53 - return 0; 54 - } 55 - 56 30 static void __on_update(struct cgroup *cgrp) 57 31 { 58 32 long *ptr;
-17
tools/testing/selftests/bpf/progs/task_ls_recursion.c
··· 27 27 __type(value, long); 28 28 } map_b SEC(".maps"); 29 29 30 - SEC("fentry/bpf_local_storage_lookup") 31 - int BPF_PROG(on_lookup) 32 - { 33 - struct task_struct *task = bpf_get_current_task_btf(); 34 - 35 - if (!test_pid || task->pid != test_pid) 36 - return 0; 37 - 38 - /* The bpf_task_storage_delete will call 39 - * bpf_local_storage_lookup. The prog->active will 40 - * stop the recursion. 41 - */ 42 - bpf_task_storage_delete(&map_a, task); 43 - bpf_task_storage_delete(&map_b, task); 44 - return 0; 45 - } 46 - 47 30 SEC("fentry/bpf_local_storage_update") 48 31 int BPF_PROG(on_update) 49 32 {