Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xsk: Move driver interface to xdp_sock_drv.h

Move the AF_XDP zero-copy driver interface to its own include file
called xdp_sock_drv.h. This, hopefully, will make it more clear for
NIC driver implementors to know what functions to use for zero-copy
support.

v4->v5: Fix -Wmissing-prototypes by include header file. (Jakub)

Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200520192103.355233-4-bjorn.topel@gmail.com

authored by

Magnus Karlsson and committed by
Alexei Starovoitov
a71506a4 d20a1676

+238 -218
+1 -1
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 11 11 #include "i40e_diag.h" 12 12 #include "i40e_xsk.h" 13 13 #include <net/udp_tunnel.h> 14 - #include <net/xdp_sock.h> 14 + #include <net/xdp_sock_drv.h> 15 15 /* All i40e tracepoints are defined by the include below, which 16 16 * must be included exactly once across the whole kernel with 17 17 * CREATE_TRACE_POINTS defined
+1 -1
drivers/net/ethernet/intel/i40e/i40e_xsk.c
··· 2 2 /* Copyright(c) 2018 Intel Corporation. */ 3 3 4 4 #include <linux/bpf_trace.h> 5 - #include <net/xdp_sock.h> 5 + #include <net/xdp_sock_drv.h> 6 6 #include <net/xdp.h> 7 7 8 8 #include "i40e.h"
+1 -1
drivers/net/ethernet/intel/ice/ice_xsk.c
··· 2 2 /* Copyright (c) 2019, Intel Corporation. */ 3 3 4 4 #include <linux/bpf_trace.h> 5 - #include <net/xdp_sock.h> 5 + #include <net/xdp_sock_drv.h> 6 6 #include <net/xdp.h> 7 7 #include "ice.h" 8 8 #include "ice_base.h"
+1 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
··· 2 2 /* Copyright(c) 2018 Intel Corporation. */ 3 3 4 4 #include <linux/bpf_trace.h> 5 - #include <net/xdp_sock.h> 5 + #include <net/xdp_sock_drv.h> 6 6 #include <net/xdp.h> 7 7 8 8 #include "ixgbe.h"
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
··· 31 31 */ 32 32 33 33 #include <linux/bpf_trace.h> 34 - #include <net/xdp_sock.h> 34 + #include <net/xdp_sock_drv.h> 35 35 #include "en/xdp.h" 36 36 #include "en/params.h" 37 37
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
··· 5 5 #define __MLX5_EN_XSK_RX_H__ 6 6 7 7 #include "en.h" 8 - #include <net/xdp_sock.h> 8 + #include <net/xdp_sock_drv.h> 9 9 10 10 /* RX data path */ 11 11
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
··· 5 5 #define __MLX5_EN_XSK_TX_H__ 6 6 7 7 #include "en.h" 8 - #include <net/xdp_sock.h> 8 + #include <net/xdp_sock_drv.h> 9 9 10 10 /* TX data path */ 11 11
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 3 4 - #include <net/xdp_sock.h> 4 + #include <net/xdp_sock_drv.h> 5 5 #include "umem.h" 6 6 #include "setup.h" 7 7 #include "en/params.h"
+8 -206
include/net/xdp_sock.h
··· 15 15 16 16 struct net_device; 17 17 struct xsk_queue; 18 + struct xdp_buff; 18 19 19 20 /* Masks for xdp_umem_page flags. 20 21 * The low 12-bits of the addr will be 0 since this is the page address, so we ··· 102 101 spinlock_t map_list_lock; 103 102 }; 104 103 105 - struct xdp_buff; 106 104 #ifdef CONFIG_XDP_SOCKETS 107 - int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); 108 - /* Used from netdev driver */ 109 - bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt); 110 - bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr); 111 - void xsk_umem_release_addr(struct xdp_umem *umem); 112 - void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries); 113 - bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc); 114 - void xsk_umem_consume_tx_done(struct xdp_umem *umem); 115 - struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries); 116 - struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem, 117 - struct xdp_umem_fq_reuse *newq); 118 - void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq); 119 - struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id); 120 - void xsk_set_rx_need_wakeup(struct xdp_umem *umem); 121 - void xsk_set_tx_need_wakeup(struct xdp_umem *umem); 122 - void xsk_clear_rx_need_wakeup(struct xdp_umem *umem); 123 - void xsk_clear_tx_need_wakeup(struct xdp_umem *umem); 124 - bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem); 125 105 106 + int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); 126 107 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp); 127 108 void __xsk_map_flush(void); 128 109 ··· 136 153 return xsk_umem_extract_addr(addr) + xsk_umem_extract_offset(addr); 137 154 } 138 155 139 - static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) 140 - { 141 - unsigned long page_addr; 142 - 143 - addr = xsk_umem_add_offset_to_addr(addr); 144 - page_addr = (unsigned long)umem->pages[addr >> PAGE_SHIFT].addr; 145 - 146 - return (char *)(page_addr & PAGE_MASK) + (addr & ~PAGE_MASK); 147 - } 148 - 149 - static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) 150 - { 151 - addr = xsk_umem_add_offset_to_addr(addr); 152 - 153 - return umem->pages[addr >> PAGE_SHIFT].dma + (addr & ~PAGE_MASK); 154 - } 155 - 156 - /* Reuse-queue aware version of FILL queue helpers */ 157 - static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt) 158 - { 159 - struct xdp_umem_fq_reuse *rq = umem->fq_reuse; 160 - 161 - if (rq->length >= cnt) 162 - return true; 163 - 164 - return xsk_umem_has_addrs(umem, cnt - rq->length); 165 - } 166 - 167 - static inline bool xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) 168 - { 169 - struct xdp_umem_fq_reuse *rq = umem->fq_reuse; 170 - 171 - if (!rq->length) 172 - return xsk_umem_peek_addr(umem, addr); 173 - 174 - *addr = rq->handles[rq->length - 1]; 175 - return addr; 176 - } 177 - 178 - static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem) 179 - { 180 - struct xdp_umem_fq_reuse *rq = umem->fq_reuse; 181 - 182 - if (!rq->length) 183 - xsk_umem_release_addr(umem); 184 - else 185 - rq->length--; 186 - } 187 - 188 - static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) 189 - { 190 - struct xdp_umem_fq_reuse *rq = umem->fq_reuse; 191 - 192 - rq->handles[rq->length++] = addr; 193 - } 194 - 195 - /* Handle the offset appropriately depending on aligned or unaligned mode. 196 - * For unaligned mode, we store the offset in the upper 16-bits of the address. 197 - * For aligned mode, we simply add the offset to the address. 198 - */ 199 - static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 address, 200 - u64 offset) 201 - { 202 - if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) 203 - return address + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT); 204 - else 205 - return address + offset; 206 - } 207 - 208 - static inline u32 xsk_umem_xdp_frame_sz(struct xdp_umem *umem) 209 - { 210 - return umem->chunk_size_nohr; 211 - } 212 - 213 156 #else 157 + 214 158 static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 215 159 { 216 160 return -ENOTSUPP; 217 161 } 218 162 219 - static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt) 163 + static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) 220 164 { 221 - return false; 165 + return -EOPNOTSUPP; 222 166 } 223 167 224 - static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr) 225 - { 226 - return NULL; 227 - } 228 - 229 - static inline void xsk_umem_release_addr(struct xdp_umem *umem) 168 + static inline void __xsk_map_flush(void) 230 169 { 231 170 } 232 171 233 - static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries) 234 - { 235 - } 236 - 237 - static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, 238 - struct xdp_desc *desc) 239 - { 240 - return false; 241 - } 242 - 243 - static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem) 244 - { 245 - } 246 - 247 - static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries) 248 - { 249 - return NULL; 250 - } 251 - 252 - static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap( 253 - struct xdp_umem *umem, 254 - struct xdp_umem_fq_reuse *newq) 255 - { 256 - return NULL; 257 - } 258 - static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq) 259 - { 260 - } 261 - 262 - static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, 263 - u16 queue_id) 172 + static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, 173 + u32 key) 264 174 { 265 175 return NULL; 266 176 } ··· 173 297 return 0; 174 298 } 175 299 176 - static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) 177 - { 178 - return NULL; 179 - } 180 - 181 - static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) 182 - { 183 - return 0; 184 - } 185 - 186 - static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt) 187 - { 188 - return false; 189 - } 190 - 191 - static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) 192 - { 193 - return NULL; 194 - } 195 - 196 - static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem) 197 - { 198 - } 199 - 200 - static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) 201 - { 202 - } 203 - 204 - static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem) 205 - { 206 - } 207 - 208 - static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem) 209 - { 210 - } 211 - 212 - static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem) 213 - { 214 - } 215 - 216 - static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem) 217 - { 218 - } 219 - 220 - static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem) 221 - { 222 - return false; 223 - } 224 - 225 - static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle, 226 - u64 offset) 227 - { 228 - return 0; 229 - } 230 - 231 - static inline u32 xsk_umem_xdp_frame_sz(struct xdp_umem *umem) 232 - { 233 - return 0; 234 - } 235 - 236 - static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) 237 - { 238 - return -EOPNOTSUPP; 239 - } 240 - 241 - static inline void __xsk_map_flush(void) 242 - { 243 - } 244 - 245 - static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, 246 - u32 key) 247 - { 248 - return NULL; 249 - } 250 300 #endif /* CONFIG_XDP_SOCKETS */ 251 301 252 302 #endif /* _LINUX_XDP_SOCK_H */
+217
include/net/xdp_sock_drv.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Interface for implementing AF_XDP zero-copy support in drivers. 3 + * Copyright(c) 2020 Intel Corporation. 4 + */ 5 + 6 + #ifndef _LINUX_XDP_SOCK_DRV_H 7 + #define _LINUX_XDP_SOCK_DRV_H 8 + 9 + #include <net/xdp_sock.h> 10 + 11 + #ifdef CONFIG_XDP_SOCKETS 12 + 13 + bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt); 14 + bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr); 15 + void xsk_umem_release_addr(struct xdp_umem *umem); 16 + void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries); 17 + bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc); 18 + void xsk_umem_consume_tx_done(struct xdp_umem *umem); 19 + struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries); 20 + struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem, 21 + struct xdp_umem_fq_reuse *newq); 22 + void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq); 23 + struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id); 24 + void xsk_set_rx_need_wakeup(struct xdp_umem *umem); 25 + void xsk_set_tx_need_wakeup(struct xdp_umem *umem); 26 + void xsk_clear_rx_need_wakeup(struct xdp_umem *umem); 27 + void xsk_clear_tx_need_wakeup(struct xdp_umem *umem); 28 + bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem); 29 + 30 + static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) 31 + { 32 + unsigned long page_addr; 33 + 34 + addr = xsk_umem_add_offset_to_addr(addr); 35 + page_addr = (unsigned long)umem->pages[addr >> PAGE_SHIFT].addr; 36 + 37 + return (char *)(page_addr & PAGE_MASK) + (addr & ~PAGE_MASK); 38 + } 39 + 40 + static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) 41 + { 42 + addr = xsk_umem_add_offset_to_addr(addr); 43 + 44 + return umem->pages[addr >> PAGE_SHIFT].dma + (addr & ~PAGE_MASK); 45 + } 46 + 47 + /* Reuse-queue aware version of FILL queue helpers */ 48 + static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt) 49 + { 50 + struct xdp_umem_fq_reuse *rq = umem->fq_reuse; 51 + 52 + if (rq->length >= cnt) 53 + return true; 54 + 55 + return xsk_umem_has_addrs(umem, cnt - rq->length); 56 + } 57 + 58 + static inline bool xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) 59 + { 60 + struct xdp_umem_fq_reuse *rq = umem->fq_reuse; 61 + 62 + if (!rq->length) 63 + return xsk_umem_peek_addr(umem, addr); 64 + 65 + *addr = rq->handles[rq->length - 1]; 66 + return addr; 67 + } 68 + 69 + static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem) 70 + { 71 + struct xdp_umem_fq_reuse *rq = umem->fq_reuse; 72 + 73 + if (!rq->length) 74 + xsk_umem_release_addr(umem); 75 + else 76 + rq->length--; 77 + } 78 + 79 + static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) 80 + { 81 + struct xdp_umem_fq_reuse *rq = umem->fq_reuse; 82 + 83 + rq->handles[rq->length++] = addr; 84 + } 85 + 86 + /* Handle the offset appropriately depending on aligned or unaligned mode. 87 + * For unaligned mode, we store the offset in the upper 16-bits of the address. 88 + * For aligned mode, we simply add the offset to the address. 89 + */ 90 + static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 address, 91 + u64 offset) 92 + { 93 + if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) 94 + return address + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT); 95 + else 96 + return address + offset; 97 + } 98 + 99 + static inline u32 xsk_umem_xdp_frame_sz(struct xdp_umem *umem) 100 + { 101 + return umem->chunk_size_nohr; 102 + } 103 + 104 + #else 105 + 106 + static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt) 107 + { 108 + return false; 109 + } 110 + 111 + static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr) 112 + { 113 + return NULL; 114 + } 115 + 116 + static inline void xsk_umem_release_addr(struct xdp_umem *umem) 117 + { 118 + } 119 + 120 + static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries) 121 + { 122 + } 123 + 124 + static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, 125 + struct xdp_desc *desc) 126 + { 127 + return false; 128 + } 129 + 130 + static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem) 131 + { 132 + } 133 + 134 + static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries) 135 + { 136 + return NULL; 137 + } 138 + 139 + static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap( 140 + struct xdp_umem *umem, struct xdp_umem_fq_reuse *newq) 141 + { 142 + return NULL; 143 + } 144 + 145 + static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq) 146 + { 147 + } 148 + 149 + static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, 150 + u16 queue_id) 151 + { 152 + return NULL; 153 + } 154 + 155 + static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) 156 + { 157 + return NULL; 158 + } 159 + 160 + static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) 161 + { 162 + return 0; 163 + } 164 + 165 + static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt) 166 + { 167 + return false; 168 + } 169 + 170 + static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) 171 + { 172 + return NULL; 173 + } 174 + 175 + static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem) 176 + { 177 + } 178 + 179 + static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) 180 + { 181 + } 182 + 183 + static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem) 184 + { 185 + } 186 + 187 + static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem) 188 + { 189 + } 190 + 191 + static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem) 192 + { 193 + } 194 + 195 + static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem) 196 + { 197 + } 198 + 199 + static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem) 200 + { 201 + return false; 202 + } 203 + 204 + static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle, 205 + u64 offset) 206 + { 207 + return 0; 208 + } 209 + 210 + static inline u32 xsk_umem_xdp_frame_sz(struct xdp_umem *umem) 211 + { 212 + return 0; 213 + } 214 + 215 + #endif /* CONFIG_XDP_SOCKETS */ 216 + 217 + #endif /* _LINUX_XDP_SOCK_DRV_H */
+1 -1
net/ethtool/channels.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 3 - #include <net/xdp_sock.h> 3 + #include <net/xdp_sock_drv.h> 4 4 5 5 #include "netlink.h" 6 6 #include "common.h"
+1 -1
net/ethtool/ioctl.c
··· 24 24 #include <linux/sched/signal.h> 25 25 #include <linux/net.h> 26 26 #include <net/devlink.h> 27 - #include <net/xdp_sock.h> 27 + #include <net/xdp_sock_drv.h> 28 28 #include <net/flow_offload.h> 29 29 #include <linux/ethtool_netlink.h> 30 30 #include <generated/utsrelease.h>
+1 -1
net/xdp/xdp_umem.h
··· 6 6 #ifndef XDP_UMEM_H_ 7 7 #define XDP_UMEM_H_ 8 8 9 - #include <net/xdp_sock.h> 9 + #include <net/xdp_sock_drv.h> 10 10 11 11 int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, 12 12 u16 queue_id, u16 flags);
+1 -1
net/xdp/xsk.c
··· 22 22 #include <linux/net.h> 23 23 #include <linux/netdevice.h> 24 24 #include <linux/rculist.h> 25 - #include <net/xdp_sock.h> 25 + #include <net/xdp_sock_drv.h> 26 26 #include <net/xdp.h> 27 27 28 28 #include "xsk_queue.h"
+1
net/xdp/xsk_queue.c
··· 6 6 #include <linux/log2.h> 7 7 #include <linux/slab.h> 8 8 #include <linux/overflow.h> 9 + #include <net/xdp_sock_drv.h> 9 10 10 11 #include "xsk_queue.h" 11 12