at master 258 lines 7.5 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* Copyright(c) 2020 Intel Corporation. */ 3 4#ifndef XSK_BUFF_POOL_H_ 5#define XSK_BUFF_POOL_H_ 6 7#include <linux/if_xdp.h> 8#include <linux/types.h> 9#include <linux/dma-mapping.h> 10#include <linux/bpf.h> 11#include <net/xdp.h> 12 13struct xsk_buff_pool; 14struct xdp_rxq_info; 15struct xsk_cb_desc; 16struct xsk_queue; 17struct xdp_desc; 18struct xdp_umem; 19struct xdp_sock; 20struct device; 21struct page; 22 23#define XSK_PRIV_MAX 24 24 25struct xdp_buff_xsk { 26 struct xdp_buff xdp; 27 u8 cb[XSK_PRIV_MAX]; 28 dma_addr_t dma; 29 dma_addr_t frame_dma; 30 struct xsk_buff_pool *pool; 31 struct list_head list_node; 32} __aligned_largest; 33 34#define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb)) 35#define XSK_TX_COMPL_FITS(t) BUILD_BUG_ON(sizeof(struct xsk_tx_metadata_compl) > sizeof(t)) 36 37struct xsk_dma_map { 38 dma_addr_t *dma_pages; 39 struct device *dev; 40 struct net_device *netdev; 41 refcount_t users; 42 struct list_head list; /* Protected by the RTNL_LOCK */ 43 u32 dma_pages_cnt; 44}; 45 46struct xsk_buff_pool { 47 /* Members only used in the control path first. */ 48 struct device *dev; 49 struct net_device *netdev; 50 struct list_head xsk_tx_list; 51 /* Protects modifications to the xsk_tx_list */ 52 spinlock_t xsk_tx_list_lock; 53 refcount_t users; 54 struct xdp_umem *umem; 55 struct work_struct work; 56 /* Protects generic receive in shared and non-shared umem mode. */ 57 spinlock_t rx_lock; 58 struct list_head free_list; 59 struct list_head xskb_list; 60 u32 heads_cnt; 61 u16 queue_id; 62 63 /* Data path members as close to free_heads at the end as possible. */ 64 struct xsk_queue *fq ____cacheline_aligned_in_smp; 65 struct xsk_queue *cq; 66 /* For performance reasons, each buff pool has its own array of dma_pages 67 * even when they are identical. 68 */ 69 dma_addr_t *dma_pages; 70 struct xdp_buff_xsk *heads; 71 struct xdp_desc *tx_descs; 72 u64 chunk_mask; 73 u64 addrs_cnt; 74 u32 free_list_cnt; 75 u32 dma_pages_cnt; 76 u32 free_heads_cnt; 77 u32 headroom; 78 u32 chunk_size; 79 u32 chunk_shift; 80 u32 frame_len; 81 u32 xdp_zc_max_segs; 82 u8 tx_metadata_len; /* inherited from umem */ 83 u8 cached_need_wakeup; 84 bool uses_need_wakeup; 85 bool unaligned; 86 bool tx_sw_csum; 87 void *addrs; 88 /* Mutual exclusion of the completion ring in the SKB mode. 89 * Protect: NAPI TX thread and sendmsg error paths in the SKB 90 * destructor callback. 91 */ 92 spinlock_t cq_prod_lock; 93 /* Mutual exclusion of the completion ring in the SKB mode. 94 * Protect: when sockets share a single cq when the same netdev 95 * and queue id is shared. 96 */ 97 spinlock_t cq_cached_prod_lock; 98 struct xdp_buff_xsk *free_heads[]; 99}; 100 101/* Masks for xdp_umem_page flags. 102 * The low 12-bits of the addr will be 0 since this is the page address, so we 103 * can use them for flags. 104 */ 105#define XSK_NEXT_PG_CONTIG_SHIFT 0 106#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT) 107 108/* AF_XDP core. */ 109struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, 110 struct xdp_umem *umem); 111int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev, 112 u16 queue_id, u16 flags); 113int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, 114 struct net_device *dev, u16 queue_id); 115int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs); 116void xp_destroy(struct xsk_buff_pool *pool); 117void xp_get_pool(struct xsk_buff_pool *pool); 118bool xp_put_pool(struct xsk_buff_pool *pool); 119void xp_clear_dev(struct xsk_buff_pool *pool); 120void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs); 121void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs); 122 123/* AF_XDP, and XDP core. */ 124void xp_free(struct xdp_buff_xsk *xskb); 125 126static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool, 127 u64 addr) 128{ 129 xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom; 130} 131 132static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool, 133 dma_addr_t *dma_pages, u64 addr) 134{ 135 xskb->frame_dma = (dma_pages[addr >> PAGE_SHIFT] & ~XSK_NEXT_PG_CONTIG_MASK) + 136 (addr & ~PAGE_MASK); 137 xskb->dma = xskb->frame_dma + pool->headroom + XDP_PACKET_HEADROOM; 138} 139 140/* AF_XDP ZC drivers, via xdp_sock_buff.h */ 141void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq); 142void xp_fill_cb(struct xsk_buff_pool *pool, struct xsk_cb_desc *desc); 143int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, 144 unsigned long attrs, struct page **pages, u32 nr_pages); 145void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs); 146struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool); 147u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max); 148bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count); 149void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr); 150dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr); 151 152struct xdp_desc_ctx { 153 dma_addr_t dma; 154 struct xsk_tx_metadata *meta; 155}; 156 157struct xdp_desc_ctx xp_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr); 158 159static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb) 160{ 161 return xskb->dma; 162} 163 164static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb) 165{ 166 return xskb->frame_dma; 167} 168 169static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb) 170{ 171 dma_sync_single_for_cpu(xskb->pool->dev, xskb->dma, 172 xskb->pool->frame_len, 173 DMA_BIDIRECTIONAL); 174} 175 176static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool, 177 dma_addr_t dma, size_t size) 178{ 179 dma_sync_single_for_device(pool->dev, dma, size, DMA_BIDIRECTIONAL); 180} 181 182/* Masks for xdp_umem_page flags. 183 * The low 12-bits of the addr will be 0 since this is the page address, so we 184 * can use them for flags. 185 */ 186#define XSK_NEXT_PG_CONTIG_SHIFT 0 187#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT) 188 189static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool, 190 u64 addr, u32 len) 191{ 192 bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE; 193 194 if (likely(!cross_pg)) 195 return false; 196 197 return pool->dma_pages && 198 !(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK); 199} 200 201static inline bool xp_mb_desc(const struct xdp_desc *desc) 202{ 203 return desc->options & XDP_PKT_CONTD; 204} 205 206static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr) 207{ 208 return addr & pool->chunk_mask; 209} 210 211static inline u64 xp_unaligned_extract_addr(u64 addr) 212{ 213 return addr & XSK_UNALIGNED_BUF_ADDR_MASK; 214} 215 216static inline u64 xp_unaligned_extract_offset(u64 addr) 217{ 218 return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT; 219} 220 221static inline u64 xp_unaligned_add_offset_to_addr(u64 addr) 222{ 223 return xp_unaligned_extract_addr(addr) + 224 xp_unaligned_extract_offset(addr); 225} 226 227static inline u32 xp_aligned_extract_idx(struct xsk_buff_pool *pool, u64 addr) 228{ 229 return xp_aligned_extract_addr(pool, addr) >> pool->chunk_shift; 230} 231 232static inline void xp_release(struct xdp_buff_xsk *xskb) 233{ 234 if (xskb->pool->unaligned) 235 xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb; 236} 237 238static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb, 239 struct xsk_buff_pool *pool) 240{ 241 u64 orig_addr = xskb->xdp.data - pool->addrs; 242 u64 offset; 243 244 if (!pool->unaligned) 245 return orig_addr; 246 247 offset = xskb->xdp.data - xskb->xdp.data_hard_start; 248 offset += pool->headroom; 249 orig_addr -= offset; 250 return orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT); 251} 252 253static inline bool xp_tx_metadata_enabled(const struct xsk_buff_pool *pool) 254{ 255 return pool->tx_metadata_len > 0; 256} 257 258#endif /* XSK_BUFF_POOL_H_ */