Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xdp: Add xdp_do_redirect_frame() for pre-computed xdp_frames

Add an xdp_do_redirect_frame() variant which supports pre-computed
xdp_frame structures. This will be used in bpf_prog_run() to avoid having
to write to the xdp_frame structure when the XDP program doesn't modify the
frame boundaries.

Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20220103150812.87914-6-toke@redhat.com

authored by

Toke Høiland-Jørgensen and committed by
Alexei Starovoitov
1372d34c d53ad5d8

+58 -11
+4
include/linux/filter.h
··· 1019 1019 int xdp_do_redirect(struct net_device *dev, 1020 1020 struct xdp_buff *xdp, 1021 1021 struct bpf_prog *prog); 1022 + int xdp_do_redirect_frame(struct net_device *dev, 1023 + struct xdp_buff *xdp, 1024 + struct xdp_frame *xdpf, 1025 + struct bpf_prog *prog); 1022 1026 void xdp_do_flush(void); 1023 1027 1024 1028 /* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as
+54 -11
net/core/filter.c
··· 3957 3957 } 3958 3958 EXPORT_SYMBOL_GPL(xdp_master_redirect); 3959 3959 3960 - int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, 3961 - struct bpf_prog *xdp_prog) 3960 + static inline int __xdp_do_redirect_xsk(struct bpf_redirect_info *ri, 3961 + struct net_device *dev, 3962 + struct xdp_buff *xdp, 3963 + struct bpf_prog *xdp_prog) 3962 3964 { 3963 - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 3964 3965 enum bpf_map_type map_type = ri->map_type; 3965 3966 void *fwd = ri->tgt_value; 3966 3967 u32 map_id = ri->map_id; 3967 - struct xdp_frame *xdpf; 3968 + int err; 3969 + 3970 + ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */ 3971 + ri->map_type = BPF_MAP_TYPE_UNSPEC; 3972 + 3973 + err = __xsk_map_redirect(fwd, xdp); 3974 + if (unlikely(err)) 3975 + goto err; 3976 + 3977 + _trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index); 3978 + return 0; 3979 + err: 3980 + _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err); 3981 + return err; 3982 + } 3983 + 3984 + static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri, 3985 + struct net_device *dev, 3986 + struct xdp_frame *xdpf, 3987 + struct bpf_prog *xdp_prog) 3988 + { 3989 + enum bpf_map_type map_type = ri->map_type; 3990 + void *fwd = ri->tgt_value; 3991 + u32 map_id = ri->map_id; 3968 3992 struct bpf_map *map; 3969 3993 int err; 3970 3994 3971 3995 ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */ 3972 3996 ri->map_type = BPF_MAP_TYPE_UNSPEC; 3973 3997 3974 - if (map_type == BPF_MAP_TYPE_XSKMAP) { 3975 - err = __xsk_map_redirect(fwd, xdp); 3976 - goto out; 3977 - } 3978 - 3979 - xdpf = xdp_convert_buff_to_frame(xdp); 3980 3998 if (unlikely(!xdpf)) { 3981 3999 err = -EOVERFLOW; 3982 4000 goto err; ··· 4031 4013 err = -EBADRQC; 4032 4014 } 4033 4015 4034 - out: 4035 4016 if (unlikely(err)) 4036 4017 goto err; 4037 4018 ··· 4040 4023 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err); 4041 4024 return err; 4042 4025 } 4026 + 4027 + int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, 4028 + struct bpf_prog *xdp_prog) 4029 + { 4030 + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 4031 + enum bpf_map_type map_type = ri->map_type; 4032 + 4033 + if (map_type == BPF_MAP_TYPE_XSKMAP) 4034 + return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog); 4035 + 4036 + return __xdp_do_redirect_frame(ri, dev, xdp_convert_buff_to_frame(xdp), 4037 + xdp_prog); 4038 + } 4043 4039 EXPORT_SYMBOL_GPL(xdp_do_redirect); 4040 + 4041 + int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp, 4042 + struct xdp_frame *xdpf, struct bpf_prog *xdp_prog) 4043 + { 4044 + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); 4045 + enum bpf_map_type map_type = ri->map_type; 4046 + 4047 + if (map_type == BPF_MAP_TYPE_XSKMAP) 4048 + return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog); 4049 + 4050 + return __xdp_do_redirect_frame(ri, dev, xdpf, xdp_prog); 4051 + } 4052 + EXPORT_SYMBOL_GPL(xdp_do_redirect_frame); 4044 4053 4045 4054 static int xdp_do_generic_redirect_map(struct net_device *dev, 4046 4055 struct sk_buff *skb,