Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xsk: Remove MEM_TYPE_ZERO_COPY and corresponding code

There are no users of MEM_TYPE_ZERO_COPY. Remove all corresponding
code, including the "handle" member of struct xdp_buff.

rfc->v1: Fixed spelling in commit message. (Björn)

Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200520192103.355233-13-bjorn.topel@gmail.com

authored by

Björn Töpel and committed by
Alexei Starovoitov
0807892e 39d6443c

+15 -510
-1
drivers/net/hyperv/netvsc_bpf.c
··· 50 50 xdp->data_end = xdp->data + len; 51 51 xdp->rxq = &nvchan->xdp_rxq; 52 52 xdp->frame_sz = PAGE_SIZE; 53 - xdp->handle = 0; 54 53 55 54 memcpy(xdp->data, data, len); 56 55
+1 -8
include/net/xdp.h
··· 39 39 MEM_TYPE_PAGE_SHARED = 0, /* Split-page refcnt based model */ 40 40 MEM_TYPE_PAGE_ORDER0, /* Orig XDP full page model */ 41 41 MEM_TYPE_PAGE_POOL, 42 - MEM_TYPE_ZERO_COPY, 43 42 MEM_TYPE_XSK_BUFF_POOL, 44 43 MEM_TYPE_MAX, 45 44 }; ··· 54 55 55 56 struct page_pool; 56 57 57 - struct zero_copy_allocator { 58 - void (*free)(struct zero_copy_allocator *zca, unsigned long handle); 59 - }; 60 - 61 58 struct xdp_rxq_info { 62 59 struct net_device *dev; 63 60 u32 queue_index; ··· 66 71 void *data_end; 67 72 void *data_meta; 68 73 void *data_hard_start; 69 - unsigned long handle; 70 74 struct xdp_rxq_info *rxq; 71 75 u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/ 72 76 }; ··· 114 120 int metasize; 115 121 int headroom; 116 122 117 - if (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY || 118 - xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) 123 + if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) 119 124 return xdp_convert_zc_to_xdp_frame(xdp); 120 125 121 126 /* Assure headroom is available for storing info */
-45
include/net/xdp_sock.h
··· 17 17 struct xsk_queue; 18 18 struct xdp_buff; 19 19 20 - struct xdp_umem_page { 21 - void *addr; 22 - dma_addr_t dma; 23 - }; 24 - 25 - struct xdp_umem_fq_reuse { 26 - u32 nentries; 27 - u32 length; 28 - u64 handles[]; 29 - }; 30 - 31 20 struct xdp_umem { 32 21 struct xsk_queue *fq; 33 22 struct xsk_queue *cq; 34 23 struct xsk_buff_pool *pool; 35 - struct xdp_umem_page *pages; 36 - u64 chunk_mask; 37 24 u64 size; 38 25 u32 headroom; 39 - u32 chunk_size_nohr; 40 26 u32 chunk_size; 41 27 struct user_struct *user; 42 28 refcount_t users; ··· 34 48 u8 flags; 35 49 int id; 36 50 struct net_device *dev; 37 - struct xdp_umem_fq_reuse *fq_reuse; 38 51 bool zc; 39 52 spinlock_t xsk_tx_list_lock; 40 53 struct list_head xsk_tx_list; ··· 94 109 return xs; 95 110 } 96 111 97 - static inline u64 xsk_umem_extract_addr(u64 addr) 98 - { 99 - return addr & XSK_UNALIGNED_BUF_ADDR_MASK; 100 - } 101 - 102 - static inline u64 xsk_umem_extract_offset(u64 addr) 103 - { 104 - return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT; 105 - } 106 - 107 - static inline u64 xsk_umem_add_offset_to_addr(u64 addr) 108 - { 109 - return xsk_umem_extract_addr(addr) + xsk_umem_extract_offset(addr); 110 - } 111 - 112 112 #else 113 113 114 114 static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) ··· 114 144 u32 key) 115 145 { 116 146 return NULL; 117 - } 118 - 119 - static inline u64 xsk_umem_extract_addr(u64 addr) 120 - { 121 - return 0; 122 - } 123 - 124 - static inline u64 xsk_umem_extract_offset(u64 addr) 125 - { 126 - return 0; 127 - } 128 - 129 - static inline u64 xsk_umem_add_offset_to_addr(u64 addr) 130 - { 131 - return 0; 132 147 } 133 148 134 149 #endif /* CONFIG_XDP_SOCKETS */
-149
include/net/xdp_sock_drv.h
··· 11 11 12 12 #ifdef CONFIG_XDP_SOCKETS 13 13 14 - bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt); 15 - bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr); 16 - void xsk_umem_release_addr(struct xdp_umem *umem); 17 14 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries); 18 15 bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc); 19 16 void xsk_umem_consume_tx_done(struct xdp_umem *umem); 20 - struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries); 21 - struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem, 22 - struct xdp_umem_fq_reuse *newq); 23 - void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq); 24 17 struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id); 25 18 void xsk_set_rx_need_wakeup(struct xdp_umem *umem); 26 19 void xsk_set_tx_need_wakeup(struct xdp_umem *umem); 27 20 void xsk_clear_rx_need_wakeup(struct xdp_umem *umem); 28 21 void xsk_clear_tx_need_wakeup(struct xdp_umem *umem); 29 22 bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem); 30 - 31 - static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) 32 - { 33 - unsigned long page_addr; 34 - 35 - addr = xsk_umem_add_offset_to_addr(addr); 36 - page_addr = (unsigned long)umem->pages[addr >> PAGE_SHIFT].addr; 37 - 38 - return (char *)(page_addr & PAGE_MASK) + (addr & ~PAGE_MASK); 39 - } 40 - 41 - static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) 42 - { 43 - addr = xsk_umem_add_offset_to_addr(addr); 44 - 45 - return umem->pages[addr >> PAGE_SHIFT].dma + (addr & ~PAGE_MASK); 46 - } 47 - 48 - /* Reuse-queue aware version of FILL queue helpers */ 49 - static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt) 50 - { 51 - struct xdp_umem_fq_reuse *rq = umem->fq_reuse; 52 - 53 - if (rq->length >= cnt) 54 - return true; 55 - 56 - return xsk_umem_has_addrs(umem, cnt - rq->length); 57 - } 58 - 59 - static inline bool xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) 60 - { 61 - struct xdp_umem_fq_reuse *rq = umem->fq_reuse; 62 - 63 - if (!rq->length) 64 - return xsk_umem_peek_addr(umem, addr); 65 - 66 - *addr = rq->handles[rq->length - 1]; 67 - return addr; 68 - } 69 - 70 - static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem) 71 - { 72 - struct xdp_umem_fq_reuse *rq = umem->fq_reuse; 73 - 74 - if (!rq->length) 75 - xsk_umem_release_addr(umem); 76 - else 77 - rq->length--; 78 - } 79 - 80 - static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) 81 - { 82 - struct xdp_umem_fq_reuse *rq = umem->fq_reuse; 83 - 84 - rq->handles[rq->length++] = addr; 85 - } 86 - 87 - /* Handle the offset appropriately depending on aligned or unaligned mode. 88 - * For unaligned mode, we store the offset in the upper 16-bits of the address. 89 - * For aligned mode, we simply add the offset to the address. 90 - */ 91 - static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 address, 92 - u64 offset) 93 - { 94 - if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) 95 - return address + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT); 96 - else 97 - return address + offset; 98 - } 99 - 100 - static inline u32 xsk_umem_xdp_frame_sz(struct xdp_umem *umem) 101 - { 102 - return umem->chunk_size_nohr; 103 - } 104 23 105 24 static inline u32 xsk_umem_get_headroom(struct xdp_umem *umem) 106 25 { ··· 111 192 112 193 #else 113 194 114 - static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt) 115 - { 116 - return false; 117 - } 118 - 119 - static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr) 120 - { 121 - return NULL; 122 - } 123 - 124 - static inline void xsk_umem_release_addr(struct xdp_umem *umem) 125 - { 126 - } 127 - 128 195 static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries) 129 196 { 130 197 } ··· 125 220 { 126 221 } 127 222 128 - static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries) 129 - { 130 - return NULL; 131 - } 132 - 133 - static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap( 134 - struct xdp_umem *umem, struct xdp_umem_fq_reuse *newq) 135 - { 136 - return NULL; 137 - } 138 - 139 - static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq) 140 - { 141 - } 142 - 143 223 static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, 144 224 u16 queue_id) 145 225 { 146 226 return NULL; 147 - } 148 - 149 - static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) 150 - { 151 - return NULL; 152 - } 153 - 154 - static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) 155 - { 156 - return 0; 157 - } 158 - 159 - static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt) 160 - { 161 - return false; 162 - } 163 - 164 - static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) 165 - { 166 - return NULL; 167 - } 168 - 169 - static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem) 170 - { 171 - } 172 - 173 - static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) 174 - { 175 227 } 176 228 177 229 static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem) ··· 150 288 static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem) 151 289 { 152 290 return false; 153 - } 154 - 155 - static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle, 156 - u64 offset) 157 - { 158 - return 0; 159 - } 160 - 161 - static inline u32 xsk_umem_xdp_frame_sz(struct xdp_umem *umem) 162 - { 163 - return 0; 164 291 } 165 292 166 293 static inline u32 xsk_umem_get_headroom(struct xdp_umem *umem)
-1
include/trace/events/xdp.h
··· 287 287 FN(PAGE_SHARED) \ 288 288 FN(PAGE_ORDER0) \ 289 289 FN(PAGE_POOL) \ 290 - FN(ZERO_COPY) \ 291 290 FN(XSK_BUFF_POOL) 292 291 293 292 #define __MEM_TYPE_TP_FN(x) \
+5 -37
net/core/xdp.c
··· 110 110 mutex_unlock(&mem_id_lock); 111 111 } 112 112 113 - static void mem_id_disconnect(int id) 114 - { 115 - struct xdp_mem_allocator *xa; 116 - 117 - mutex_lock(&mem_id_lock); 118 - 119 - xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params); 120 - if (!xa) { 121 - mutex_unlock(&mem_id_lock); 122 - WARN(1, "Request remove non-existing id(%d), driver bug?", id); 123 - return; 124 - } 125 - 126 - trace_mem_disconnect(xa); 127 - 128 - if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) 129 - call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); 130 - 131 - mutex_unlock(&mem_id_lock); 132 - } 133 - 134 113 void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) 135 114 { 136 115 struct xdp_mem_allocator *xa; ··· 122 143 123 144 if (id == 0) 124 145 return; 125 - 126 - if (xdp_rxq->mem.type == MEM_TYPE_ZERO_COPY) 127 - return mem_id_disconnect(id); 128 146 129 147 if (xdp_rxq->mem.type == MEM_TYPE_PAGE_POOL) { 130 148 rcu_read_lock(); ··· 278 302 xdp_rxq->mem.type = type; 279 303 280 304 if (!allocator) { 281 - if (type == MEM_TYPE_PAGE_POOL || type == MEM_TYPE_ZERO_COPY) 305 + if (type == MEM_TYPE_PAGE_POOL) 282 306 return -EINVAL; /* Setup time check page_pool req */ 283 307 return 0; 284 308 } ··· 338 362 * of xdp_frames/pages in those cases. 339 363 */ 340 364 static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, 341 - unsigned long handle, struct xdp_buff *xdp) 365 + struct xdp_buff *xdp) 342 366 { 343 367 struct xdp_mem_allocator *xa; 344 368 struct page *page; ··· 360 384 page = virt_to_page(data); /* Assumes order0 page*/ 361 385 put_page(page); 362 386 break; 363 - case MEM_TYPE_ZERO_COPY: 364 - /* NB! Only valid from an xdp_buff! */ 365 - rcu_read_lock(); 366 - /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ 367 - xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); 368 - xa->zc_alloc->free(xa->zc_alloc, handle); 369 - rcu_read_unlock(); 370 - break; 371 387 case MEM_TYPE_XSK_BUFF_POOL: 372 388 /* NB! Only valid from an xdp_buff! */ 373 389 xsk_buff_free(xdp); ··· 372 404 373 405 void xdp_return_frame(struct xdp_frame *xdpf) 374 406 { 375 - __xdp_return(xdpf->data, &xdpf->mem, false, 0, NULL); 407 + __xdp_return(xdpf->data, &xdpf->mem, false, NULL); 376 408 } 377 409 EXPORT_SYMBOL_GPL(xdp_return_frame); 378 410 379 411 void xdp_return_frame_rx_napi(struct xdp_frame *xdpf) 380 412 { 381 - __xdp_return(xdpf->data, &xdpf->mem, true, 0, NULL); 413 + __xdp_return(xdpf->data, &xdpf->mem, true, NULL); 382 414 } 383 415 EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi); 384 416 385 417 void xdp_return_buff(struct xdp_buff *xdp) 386 418 { 387 - __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp->handle, xdp); 419 + __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp); 388 420 } 389 421 EXPORT_SYMBOL_GPL(xdp_return_buff); 390 422
+1 -55
net/xdp/xdp_umem.c
··· 179 179 umem->zc = false; 180 180 } 181 181 182 - static void xdp_umem_unmap_pages(struct xdp_umem *umem) 183 - { 184 - unsigned int i; 185 - 186 - for (i = 0; i < umem->npgs; i++) 187 - if (PageHighMem(umem->pgs[i])) 188 - vunmap(umem->pages[i].addr); 189 - } 190 - 191 - static int xdp_umem_map_pages(struct xdp_umem *umem) 192 - { 193 - unsigned int i; 194 - void *addr; 195 - 196 - for (i = 0; i < umem->npgs; i++) { 197 - if (PageHighMem(umem->pgs[i])) 198 - addr = vmap(&umem->pgs[i], 1, VM_MAP, PAGE_KERNEL); 199 - else 200 - addr = page_address(umem->pgs[i]); 201 - 202 - if (!addr) { 203 - xdp_umem_unmap_pages(umem); 204 - return -ENOMEM; 205 - } 206 - 207 - umem->pages[i].addr = addr; 208 - } 209 - 210 - return 0; 211 - } 212 - 213 182 static void xdp_umem_unpin_pages(struct xdp_umem *umem) 214 183 { 215 184 unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true); ··· 213 244 umem->cq = NULL; 214 245 } 215 246 216 - xsk_reuseq_destroy(umem); 217 247 xp_destroy(umem->pool); 218 - xdp_umem_unmap_pages(umem); 219 248 xdp_umem_unpin_pages(umem); 220 - 221 - kvfree(umem->pages); 222 - umem->pages = NULL; 223 249 224 250 xdp_umem_unaccount_pages(umem); 225 251 kfree(umem); ··· 349 385 if (headroom >= chunk_size - XDP_PACKET_HEADROOM) 350 386 return -EINVAL; 351 387 352 - umem->chunk_mask = unaligned_chunks ? XSK_UNALIGNED_BUF_ADDR_MASK 353 - : ~((u64)chunk_size - 1); 354 388 umem->size = size; 355 389 umem->headroom = headroom; 356 - umem->chunk_size_nohr = chunk_size - headroom; 357 390 umem->chunk_size = chunk_size; 358 391 umem->npgs = size / PAGE_SIZE; 359 392 umem->pgs = NULL; ··· 369 408 if (err) 370 409 goto out_account; 371 410 372 - umem->pages = kvcalloc(umem->npgs, sizeof(*umem->pages), 373 - GFP_KERNEL_ACCOUNT); 374 - if (!umem->pages) { 375 - err = -ENOMEM; 376 - goto out_pin; 377 - } 378 - 379 - err = xdp_umem_map_pages(umem); 380 - if (err) 381 - goto out_pages; 382 - 383 411 umem->pool = xp_create(umem->pgs, umem->npgs, chunks, chunk_size, 384 412 headroom, size, unaligned_chunks); 385 413 if (!umem->pool) { 386 414 err = -ENOMEM; 387 - goto out_unmap; 415 + goto out_pin; 388 416 } 389 417 return 0; 390 418 391 - out_unmap: 392 - xdp_umem_unmap_pages(umem); 393 - out_pages: 394 - kvfree(umem->pages); 395 419 out_pin: 396 420 xdp_umem_unpin_pages(umem); 397 421 out_account:
+1 -47
net/xdp/xsk.c
··· 39 39 READ_ONCE(xs->umem->fq); 40 40 } 41 41 42 - bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt) 43 - { 44 - return xskq_cons_has_entries(umem->fq, cnt); 45 - } 46 - EXPORT_SYMBOL(xsk_umem_has_addrs); 47 - 48 - bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr) 49 - { 50 - return xskq_cons_peek_addr(umem->fq, addr, umem); 51 - } 52 - EXPORT_SYMBOL(xsk_umem_peek_addr); 53 - 54 - void xsk_umem_release_addr(struct xdp_umem *umem) 55 - { 56 - xskq_cons_release(umem->fq); 57 - } 58 - EXPORT_SYMBOL(xsk_umem_release_addr); 59 - 60 42 void xsk_set_rx_need_wakeup(struct xdp_umem *umem) 61 43 { 62 44 if (umem->need_wakeup & XDP_WAKEUP_RX) ··· 185 203 186 204 len = xdp->data_end - xdp->data; 187 205 188 - return xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY || 189 - xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL ? 206 + return xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL ? 190 207 __xsk_rcv_zc(xs, xdp, len) : 191 208 __xsk_rcv(xs, xdp, len, explicit_free); 192 209 } ··· 569 588 return sock; 570 589 } 571 590 572 - /* Check if umem pages are contiguous. 573 - * If zero-copy mode, use the DMA address to do the page contiguity check 574 - * For all other modes we use addr (kernel virtual address) 575 - * Store the result in the low bits of addr. 576 - */ 577 - static void xsk_check_page_contiguity(struct xdp_umem *umem, u32 flags) 578 - { 579 - struct xdp_umem_page *pgs = umem->pages; 580 - int i, is_contig; 581 - 582 - for (i = 0; i < umem->npgs - 1; i++) { 583 - is_contig = (flags & XDP_ZEROCOPY) ? 584 - (pgs[i].dma + PAGE_SIZE == pgs[i + 1].dma) : 585 - (pgs[i].addr + PAGE_SIZE == pgs[i + 1].addr); 586 - pgs[i].addr += is_contig << XSK_NEXT_PG_CONTIG_SHIFT; 587 - } 588 - } 589 - 590 591 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 591 592 { 592 593 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr; ··· 651 688 goto out_unlock; 652 689 } else { 653 690 /* This xsk has its own umem. */ 654 - xskq_set_umem(xs->umem->fq, xs->umem->size, 655 - xs->umem->chunk_mask); 656 - xskq_set_umem(xs->umem->cq, xs->umem->size, 657 - xs->umem->chunk_mask); 658 - 659 691 err = xdp_umem_assign_dev(xs->umem, dev, qid, flags); 660 692 if (err) 661 693 goto out_unlock; 662 - 663 - xsk_check_page_contiguity(xs->umem, flags); 664 694 } 665 695 666 696 xs->dev = dev; 667 697 xs->zc = xs->umem->zc; 668 698 xs->queue_id = qid; 669 - xskq_set_umem(xs->rx, xs->umem->size, xs->umem->chunk_mask); 670 - xskq_set_umem(xs->tx, xs->umem->size, xs->umem->chunk_mask); 671 699 xdp_add_sk_umem(xs->umem, xs); 672 700 673 701 out_unlock:
+7
net/xdp/xsk_buff_pool.c
··· 8 8 9 9 #include "xsk_queue.h" 10 10 11 + /* Masks for xdp_umem_page flags. 12 + * The low 12-bits of the addr will be 0 since this is the page address, so we 13 + * can use them for flags. 14 + */ 15 + #define XSK_NEXT_PG_CONTIG_SHIFT 0 16 + #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT) 17 + 11 18 struct xsk_buff_pool { 12 19 struct xsk_queue *fq; 13 20 struct list_head free_list;
-62
net/xdp/xsk_queue.c
··· 10 10 11 11 #include "xsk_queue.h" 12 12 13 - void xskq_set_umem(struct xsk_queue *q, u64 umem_size, u64 chunk_mask) 14 - { 15 - if (!q) 16 - return; 17 - 18 - q->umem_size = umem_size; 19 - q->chunk_mask = chunk_mask; 20 - } 21 - 22 13 static size_t xskq_get_ring_size(struct xsk_queue *q, bool umem_queue) 23 14 { 24 15 struct xdp_umem_ring *umem_ring; ··· 54 63 55 64 page_frag_free(q->ring); 56 65 kfree(q); 57 - } 58 - 59 - struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries) 60 - { 61 - struct xdp_umem_fq_reuse *newq; 62 - 63 - /* Check for overflow */ 64 - if (nentries > (u32)roundup_pow_of_two(nentries)) 65 - return NULL; 66 - nentries = roundup_pow_of_two(nentries); 67 - 68 - newq = kvmalloc(struct_size(newq, handles, nentries), GFP_KERNEL); 69 - if (!newq) 70 - return NULL; 71 - memset(newq, 0, offsetof(typeof(*newq), handles)); 72 - 73 - newq->nentries = nentries; 74 - return newq; 75 - } 76 - EXPORT_SYMBOL_GPL(xsk_reuseq_prepare); 77 - 78 - struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem, 79 - struct xdp_umem_fq_reuse *newq) 80 - { 81 - struct xdp_umem_fq_reuse *oldq = umem->fq_reuse; 82 - 83 - if (!oldq) { 84 - umem->fq_reuse = newq; 85 - return NULL; 86 - } 87 - 88 - if (newq->nentries < oldq->length) 89 - return newq; 90 - 91 - memcpy(newq->handles, oldq->handles, 92 - array_size(oldq->length, sizeof(u64))); 93 - newq->length = oldq->length; 94 - 95 - umem->fq_reuse = newq; 96 - return oldq; 97 - } 98 - EXPORT_SYMBOL_GPL(xsk_reuseq_swap); 99 - 100 - void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq) 101 - { 102 - kvfree(rq); 103 - } 104 - EXPORT_SYMBOL_GPL(xsk_reuseq_free); 105 - 106 - void xsk_reuseq_destroy(struct xdp_umem *umem) 107 - { 108 - xsk_reuseq_free(umem->fq_reuse); 109 - umem->fq_reuse = NULL; 110 66 }
-105
net/xdp/xsk_queue.h
··· 32 32 }; 33 33 34 34 struct xsk_queue { 35 - u64 chunk_mask; 36 - u64 umem_size; 37 35 u32 ring_mask; 38 36 u32 nentries; 39 37 u32 cached_prod; ··· 103 105 */ 104 106 105 107 /* Functions that read and validate content from consumer rings. */ 106 - 107 - static inline bool xskq_cons_crosses_non_contig_pg(struct xdp_umem *umem, 108 - u64 addr, 109 - u64 length) 110 - { 111 - bool cross_pg = (addr & (PAGE_SIZE - 1)) + length > PAGE_SIZE; 112 - bool next_pg_contig = 113 - (unsigned long)umem->pages[(addr >> PAGE_SHIFT)].addr & 114 - XSK_NEXT_PG_CONTIG_MASK; 115 - 116 - return cross_pg && !next_pg_contig; 117 - } 118 - 119 - static inline bool xskq_cons_is_valid_unaligned(struct xsk_queue *q, 120 - u64 addr, 121 - u64 length, 122 - struct xdp_umem *umem) 123 - { 124 - u64 base_addr = xsk_umem_extract_addr(addr); 125 - 126 - addr = xsk_umem_add_offset_to_addr(addr); 127 - if (base_addr >= q->umem_size || addr >= q->umem_size || 128 - xskq_cons_crosses_non_contig_pg(umem, addr, length)) { 129 - q->invalid_descs++; 130 - return false; 131 - } 132 - 133 - return true; 134 - } 135 - 136 - static inline bool xskq_cons_is_valid_addr(struct xsk_queue *q, u64 addr) 137 - { 138 - if (addr >= q->umem_size) { 139 - q->invalid_descs++; 140 - return false; 141 - } 142 - 143 - return true; 144 - } 145 - 146 - static inline bool xskq_cons_read_addr(struct xsk_queue *q, u64 *addr, 147 - struct xdp_umem *umem) 148 - { 149 - struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; 150 - 151 - while (q->cached_cons != q->cached_prod) { 152 - u32 idx = q->cached_cons & q->ring_mask; 153 - 154 - *addr = ring->desc[idx] & q->chunk_mask; 155 - 156 - if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) { 157 - if (xskq_cons_is_valid_unaligned(q, *addr, 158 - umem->chunk_size_nohr, 159 - umem)) 160 - return true; 161 - goto out; 162 - } 163 - 164 - if (xskq_cons_is_valid_addr(q, *addr)) 165 - return true; 166 - 167 - out: 168 - q->cached_cons++; 169 - } 170 - 171 - return false; 172 - } 173 - 174 - static inline bool xskq_cons_read_addr_aligned(struct xsk_queue *q, u64 *addr) 175 - { 176 - struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; 177 - 178 - while (q->cached_cons != q->cached_prod) { 179 - u32 idx = q->cached_cons & q->ring_mask; 180 - 181 - *addr = ring->desc[idx]; 182 - if (xskq_cons_is_valid_addr(q, *addr)) 183 - return true; 184 - 185 - q->cached_cons++; 186 - } 187 - 188 - return false; 189 - } 190 108 191 109 static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr) 192 110 { ··· 179 265 entries = q->cached_prod - q->cached_cons; 180 266 181 267 return entries >= cnt; 182 - } 183 - 184 - static inline bool xskq_cons_peek_addr(struct xsk_queue *q, u64 *addr, 185 - struct xdp_umem *umem) 186 - { 187 - if (q->cached_prod == q->cached_cons) 188 - xskq_cons_get_entries(q); 189 - return xskq_cons_read_addr(q, addr, umem); 190 - } 191 - 192 - static inline bool xskq_cons_peek_addr_aligned(struct xsk_queue *q, u64 *addr) 193 - { 194 - if (q->cached_prod == q->cached_cons) 195 - xskq_cons_get_entries(q); 196 - return xskq_cons_read_addr_aligned(q, addr); 197 268 } 198 269 199 270 static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr) ··· 309 410 return q ? q->invalid_descs : 0; 310 411 } 311 412 312 - void xskq_set_umem(struct xsk_queue *q, u64 umem_size, u64 chunk_mask); 313 413 struct xsk_queue *xskq_create(u32 nentries, bool umem_queue); 314 414 void xskq_destroy(struct xsk_queue *q_ops); 315 - 316 - /* Executed by the core when the entire UMEM gets freed */ 317 - void xsk_reuseq_destroy(struct xdp_umem *umem); 318 415 319 416 #endif /* _LINUX_XSK_QUEUE_H */