Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf: rename bpf_compute_data_end into bpf_compute_data_pointers

Just do the rename into bpf_compute_data_pointers() as we'll add
one more pointer here to recompute.

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Daniel Borkmann and committed by
David S. Miller
6aaae2b6 3bd3b9ed

+21 -18
+6 -3
include/linux/filter.h
··· 496 496 void *data_hard_start; 497 497 }; 498 498 499 - /* compute the linear packet data range [data, data_end) which 500 - * will be accessed by cls_bpf, act_bpf and lwt programs 499 + /* Compute the linear packet data range [data, data_end) which 500 + * will be accessed by various program types (cls_bpf, act_bpf, 501 + * lwt, ...). Subsystems allowing direct data access must (!) 502 + * ensure that cb[] area can be written to when BPF program is 503 + * invoked (otherwise cb[] save/restore is necessary). 501 504 */ 502 - static inline void bpf_compute_data_end(struct sk_buff *skb) 505 + static inline void bpf_compute_data_pointers(struct sk_buff *skb) 503 506 { 504 507 struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; 505 508
+2 -2
kernel/bpf/sockmap.c
··· 102 102 103 103 skb_orphan(skb); 104 104 skb->sk = psock->sock; 105 - bpf_compute_data_end(skb); 105 + bpf_compute_data_pointers(skb); 106 106 rc = (*prog->bpf_func)(skb, prog->insnsi); 107 107 skb->sk = NULL; 108 108 ··· 369 369 * any socket yet. 370 370 */ 371 371 skb->sk = psock->sock; 372 - bpf_compute_data_end(skb); 372 + bpf_compute_data_pointers(skb); 373 373 rc = (*prog->bpf_func)(skb, prog->insnsi); 374 374 skb->sk = NULL; 375 375 rcu_read_unlock();
+1 -1
net/bpf/test_run.c
··· 133 133 if (is_l2) 134 134 __skb_push(skb, ETH_HLEN); 135 135 if (is_direct_pkt_access) 136 - bpf_compute_data_end(skb); 136 + bpf_compute_data_pointers(skb); 137 137 retval = bpf_test_run(prog, skb, repeat, &duration); 138 138 if (!is_l2) 139 139 __skb_push(skb, ETH_HLEN);
+7 -7
net/core/filter.c
··· 1402 1402 { 1403 1403 int err = __bpf_try_make_writable(skb, write_len); 1404 1404 1405 - bpf_compute_data_end(skb); 1405 + bpf_compute_data_pointers(skb); 1406 1406 return err; 1407 1407 } 1408 1408 ··· 1962 1962 ret = skb_vlan_push(skb, vlan_proto, vlan_tci); 1963 1963 bpf_pull_mac_rcsum(skb); 1964 1964 1965 - bpf_compute_data_end(skb); 1965 + bpf_compute_data_pointers(skb); 1966 1966 return ret; 1967 1967 } 1968 1968 ··· 1984 1984 ret = skb_vlan_pop(skb); 1985 1985 bpf_pull_mac_rcsum(skb); 1986 1986 1987 - bpf_compute_data_end(skb); 1987 + bpf_compute_data_pointers(skb); 1988 1988 return ret; 1989 1989 } 1990 1990 ··· 2178 2178 * need to be verified first. 2179 2179 */ 2180 2180 ret = bpf_skb_proto_xlat(skb, proto); 2181 - bpf_compute_data_end(skb); 2181 + bpf_compute_data_pointers(skb); 2182 2182 return ret; 2183 2183 } 2184 2184 ··· 2303 2303 ret = shrink ? bpf_skb_net_shrink(skb, len_diff_abs) : 2304 2304 bpf_skb_net_grow(skb, len_diff_abs); 2305 2305 2306 - bpf_compute_data_end(skb); 2306 + bpf_compute_data_pointers(skb); 2307 2307 return ret; 2308 2308 } 2309 2309 ··· 2394 2394 skb_gso_reset(skb); 2395 2395 } 2396 2396 2397 - bpf_compute_data_end(skb); 2397 + bpf_compute_data_pointers(skb); 2398 2398 return ret; 2399 2399 } 2400 2400 ··· 2434 2434 skb_reset_mac_header(skb); 2435 2435 } 2436 2436 2437 - bpf_compute_data_end(skb); 2437 + bpf_compute_data_pointers(skb); 2438 2438 return 0; 2439 2439 } 2440 2440
+1 -1
net/core/lwt_bpf.c
··· 51 51 */ 52 52 preempt_disable(); 53 53 rcu_read_lock(); 54 - bpf_compute_data_end(skb); 54 + bpf_compute_data_pointers(skb); 55 55 ret = bpf_prog_run_save_cb(lwt->prog, skb); 56 56 rcu_read_unlock(); 57 57
+2 -2
net/sched/act_bpf.c
··· 49 49 filter = rcu_dereference(prog->filter); 50 50 if (at_ingress) { 51 51 __skb_push(skb, skb->mac_len); 52 - bpf_compute_data_end(skb); 52 + bpf_compute_data_pointers(skb); 53 53 filter_res = BPF_PROG_RUN(filter, skb); 54 54 __skb_pull(skb, skb->mac_len); 55 55 } else { 56 - bpf_compute_data_end(skb); 56 + bpf_compute_data_pointers(skb); 57 57 filter_res = BPF_PROG_RUN(filter, skb); 58 58 } 59 59 rcu_read_unlock();
+2 -2
net/sched/cls_bpf.c
··· 99 99 } else if (at_ingress) { 100 100 /* It is safe to push/pull even if skb_shared() */ 101 101 __skb_push(skb, skb->mac_len); 102 - bpf_compute_data_end(skb); 102 + bpf_compute_data_pointers(skb); 103 103 filter_res = BPF_PROG_RUN(prog->filter, skb); 104 104 __skb_pull(skb, skb->mac_len); 105 105 } else { 106 - bpf_compute_data_end(skb); 106 + bpf_compute_data_pointers(skb); 107 107 filter_res = BPF_PROG_RUN(prog->filter, skb); 108 108 } 109 109