Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.4-rc7 536 lines 13 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* net/core/xdp.c 3 * 4 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. 5 */ 6#include <linux/bpf.h> 7#include <linux/filter.h> 8#include <linux/types.h> 9#include <linux/mm.h> 10#include <linux/netdevice.h> 11#include <linux/slab.h> 12#include <linux/idr.h> 13#include <linux/rhashtable.h> 14#include <net/page_pool.h> 15 16#include <net/xdp.h> 17#include <net/xdp_priv.h> /* struct xdp_mem_allocator */ 18#include <trace/events/xdp.h> 19 20#define REG_STATE_NEW 0x0 21#define REG_STATE_REGISTERED 0x1 22#define REG_STATE_UNREGISTERED 0x2 23#define REG_STATE_UNUSED 0x3 24 25static DEFINE_IDA(mem_id_pool); 26static DEFINE_MUTEX(mem_id_lock); 27#define MEM_ID_MAX 0xFFFE 28#define MEM_ID_MIN 1 29static int mem_id_next = MEM_ID_MIN; 30 31static bool mem_id_init; /* false */ 32static struct rhashtable *mem_id_ht; 33 34static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed) 35{ 36 const u32 *k = data; 37 const u32 key = *k; 38 39 BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_mem_allocator, mem.id) 40 != sizeof(u32)); 41 42 /* Use cyclic increasing ID as direct hash key */ 43 return key; 44} 45 46static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg, 47 const void *ptr) 48{ 49 const struct xdp_mem_allocator *xa = ptr; 50 u32 mem_id = *(u32 *)arg->key; 51 52 return xa->mem.id != mem_id; 53} 54 55static const struct rhashtable_params mem_id_rht_params = { 56 .nelem_hint = 64, 57 .head_offset = offsetof(struct xdp_mem_allocator, node), 58 .key_offset = offsetof(struct xdp_mem_allocator, mem.id), 59 .key_len = FIELD_SIZEOF(struct xdp_mem_allocator, mem.id), 60 .max_size = MEM_ID_MAX, 61 .min_size = 8, 62 .automatic_shrinking = true, 63 .hashfn = xdp_mem_id_hashfn, 64 .obj_cmpfn = xdp_mem_id_cmp, 65}; 66 67static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu) 68{ 69 struct xdp_mem_allocator *xa; 70 71 xa = container_of(rcu, struct xdp_mem_allocator, rcu); 72 73 /* Allocator have indicated safe to remove before this is called */ 74 if (xa->mem.type == MEM_TYPE_PAGE_POOL) 75 page_pool_free(xa->page_pool); 76 77 /* Allow this ID to be reused */ 78 ida_simple_remove(&mem_id_pool, xa->mem.id); 79 80 /* Poison memory */ 81 xa->mem.id = 0xFFFF; 82 xa->mem.type = 0xF0F0; 83 xa->allocator = (void *)0xDEAD9001; 84 85 kfree(xa); 86} 87 88static bool __mem_id_disconnect(int id, bool force) 89{ 90 struct xdp_mem_allocator *xa; 91 bool safe_to_remove = true; 92 93 mutex_lock(&mem_id_lock); 94 95 xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params); 96 if (!xa) { 97 mutex_unlock(&mem_id_lock); 98 WARN(1, "Request remove non-existing id(%d), driver bug?", id); 99 return true; 100 } 101 xa->disconnect_cnt++; 102 103 /* Detects in-flight packet-pages for page_pool */ 104 if (xa->mem.type == MEM_TYPE_PAGE_POOL) 105 safe_to_remove = page_pool_request_shutdown(xa->page_pool); 106 107 trace_mem_disconnect(xa, safe_to_remove, force); 108 109 if ((safe_to_remove || force) && 110 !rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) 111 call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); 112 113 mutex_unlock(&mem_id_lock); 114 return (safe_to_remove|force); 115} 116 117#define DEFER_TIME (msecs_to_jiffies(1000)) 118#define DEFER_WARN_INTERVAL (30 * HZ) 119#define DEFER_MAX_RETRIES 120 120 121static void mem_id_disconnect_defer_retry(struct work_struct *wq) 122{ 123 struct delayed_work *dwq = to_delayed_work(wq); 124 struct xdp_mem_allocator *xa = container_of(dwq, typeof(*xa), defer_wq); 125 bool force = false; 126 127 if (xa->disconnect_cnt > DEFER_MAX_RETRIES) 128 force = true; 129 130 if (__mem_id_disconnect(xa->mem.id, force)) 131 return; 132 133 /* Periodic warning */ 134 if (time_after_eq(jiffies, xa->defer_warn)) { 135 int sec = (s32)((u32)jiffies - (u32)xa->defer_start) / HZ; 136 137 pr_warn("%s() stalled mem.id=%u shutdown %d attempts %d sec\n", 138 __func__, xa->mem.id, xa->disconnect_cnt, sec); 139 xa->defer_warn = jiffies + DEFER_WARN_INTERVAL; 140 } 141 142 /* Still not ready to be disconnected, retry later */ 143 schedule_delayed_work(&xa->defer_wq, DEFER_TIME); 144} 145 146void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) 147{ 148 struct xdp_mem_allocator *xa; 149 int id = xdp_rxq->mem.id; 150 151 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) { 152 WARN(1, "Missing register, driver bug"); 153 return; 154 } 155 156 if (xdp_rxq->mem.type != MEM_TYPE_PAGE_POOL && 157 xdp_rxq->mem.type != MEM_TYPE_ZERO_COPY) { 158 return; 159 } 160 161 if (id == 0) 162 return; 163 164 if (__mem_id_disconnect(id, false)) 165 return; 166 167 /* Could not disconnect, defer new disconnect attempt to later */ 168 mutex_lock(&mem_id_lock); 169 170 xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params); 171 if (!xa) { 172 mutex_unlock(&mem_id_lock); 173 return; 174 } 175 xa->defer_start = jiffies; 176 xa->defer_warn = jiffies + DEFER_WARN_INTERVAL; 177 178 INIT_DELAYED_WORK(&xa->defer_wq, mem_id_disconnect_defer_retry); 179 mutex_unlock(&mem_id_lock); 180 schedule_delayed_work(&xa->defer_wq, DEFER_TIME); 181} 182EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model); 183 184/* This unregister operation will also cleanup and destroy the 185 * allocator. The page_pool_free() operation is first called when it's 186 * safe to remove, possibly deferred to a workqueue. 187 */ 188void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq) 189{ 190 /* Simplify driver cleanup code paths, allow unreg "unused" */ 191 if (xdp_rxq->reg_state == REG_STATE_UNUSED) 192 return; 193 194 WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG"); 195 196 xdp_rxq_info_unreg_mem_model(xdp_rxq); 197 198 xdp_rxq->reg_state = REG_STATE_UNREGISTERED; 199 xdp_rxq->dev = NULL; 200 201 /* Reset mem info to defaults */ 202 xdp_rxq->mem.id = 0; 203 xdp_rxq->mem.type = 0; 204} 205EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg); 206 207static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq) 208{ 209 memset(xdp_rxq, 0, sizeof(*xdp_rxq)); 210} 211 212/* Returns 0 on success, negative on failure */ 213int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, 214 struct net_device *dev, u32 queue_index) 215{ 216 if (xdp_rxq->reg_state == REG_STATE_UNUSED) { 217 WARN(1, "Driver promised not to register this"); 218 return -EINVAL; 219 } 220 221 if (xdp_rxq->reg_state == REG_STATE_REGISTERED) { 222 WARN(1, "Missing unregister, handled but fix driver"); 223 xdp_rxq_info_unreg(xdp_rxq); 224 } 225 226 if (!dev) { 227 WARN(1, "Missing net_device from driver"); 228 return -ENODEV; 229 } 230 231 /* State either UNREGISTERED or NEW */ 232 xdp_rxq_info_init(xdp_rxq); 233 xdp_rxq->dev = dev; 234 xdp_rxq->queue_index = queue_index; 235 236 xdp_rxq->reg_state = REG_STATE_REGISTERED; 237 return 0; 238} 239EXPORT_SYMBOL_GPL(xdp_rxq_info_reg); 240 241void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq) 242{ 243 xdp_rxq->reg_state = REG_STATE_UNUSED; 244} 245EXPORT_SYMBOL_GPL(xdp_rxq_info_unused); 246 247bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq) 248{ 249 return (xdp_rxq->reg_state == REG_STATE_REGISTERED); 250} 251EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg); 252 253static int __mem_id_init_hash_table(void) 254{ 255 struct rhashtable *rht; 256 int ret; 257 258 if (unlikely(mem_id_init)) 259 return 0; 260 261 rht = kzalloc(sizeof(*rht), GFP_KERNEL); 262 if (!rht) 263 return -ENOMEM; 264 265 ret = rhashtable_init(rht, &mem_id_rht_params); 266 if (ret < 0) { 267 kfree(rht); 268 return ret; 269 } 270 mem_id_ht = rht; 271 smp_mb(); /* mutex lock should provide enough pairing */ 272 mem_id_init = true; 273 274 return 0; 275} 276 277/* Allocate a cyclic ID that maps to allocator pointer. 278 * See: https://www.kernel.org/doc/html/latest/core-api/idr.html 279 * 280 * Caller must lock mem_id_lock. 281 */ 282static int __mem_id_cyclic_get(gfp_t gfp) 283{ 284 int retries = 1; 285 int id; 286 287again: 288 id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp); 289 if (id < 0) { 290 if (id == -ENOSPC) { 291 /* Cyclic allocator, reset next id */ 292 if (retries--) { 293 mem_id_next = MEM_ID_MIN; 294 goto again; 295 } 296 } 297 return id; /* errno */ 298 } 299 mem_id_next = id + 1; 300 301 return id; 302} 303 304static bool __is_supported_mem_type(enum xdp_mem_type type) 305{ 306 if (type == MEM_TYPE_PAGE_POOL) 307 return is_page_pool_compiled_in(); 308 309 if (type >= MEM_TYPE_MAX) 310 return false; 311 312 return true; 313} 314 315int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, 316 enum xdp_mem_type type, void *allocator) 317{ 318 struct xdp_mem_allocator *xdp_alloc; 319 gfp_t gfp = GFP_KERNEL; 320 int id, errno, ret; 321 void *ptr; 322 323 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) { 324 WARN(1, "Missing register, driver bug"); 325 return -EFAULT; 326 } 327 328 if (!__is_supported_mem_type(type)) 329 return -EOPNOTSUPP; 330 331 xdp_rxq->mem.type = type; 332 333 if (!allocator) { 334 if (type == MEM_TYPE_PAGE_POOL || type == MEM_TYPE_ZERO_COPY) 335 return -EINVAL; /* Setup time check page_pool req */ 336 return 0; 337 } 338 339 /* Delay init of rhashtable to save memory if feature isn't used */ 340 if (!mem_id_init) { 341 mutex_lock(&mem_id_lock); 342 ret = __mem_id_init_hash_table(); 343 mutex_unlock(&mem_id_lock); 344 if (ret < 0) { 345 WARN_ON(1); 346 return ret; 347 } 348 } 349 350 xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp); 351 if (!xdp_alloc) 352 return -ENOMEM; 353 354 mutex_lock(&mem_id_lock); 355 id = __mem_id_cyclic_get(gfp); 356 if (id < 0) { 357 errno = id; 358 goto err; 359 } 360 xdp_rxq->mem.id = id; 361 xdp_alloc->mem = xdp_rxq->mem; 362 xdp_alloc->allocator = allocator; 363 364 /* Insert allocator into ID lookup table */ 365 ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node); 366 if (IS_ERR(ptr)) { 367 ida_simple_remove(&mem_id_pool, xdp_rxq->mem.id); 368 xdp_rxq->mem.id = 0; 369 errno = PTR_ERR(ptr); 370 goto err; 371 } 372 373 if (type == MEM_TYPE_PAGE_POOL) 374 page_pool_get(xdp_alloc->page_pool); 375 376 mutex_unlock(&mem_id_lock); 377 378 trace_mem_connect(xdp_alloc, xdp_rxq); 379 return 0; 380err: 381 mutex_unlock(&mem_id_lock); 382 kfree(xdp_alloc); 383 return errno; 384} 385EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model); 386 387/* XDP RX runs under NAPI protection, and in different delivery error 388 * scenarios (e.g. queue full), it is possible to return the xdp_frame 389 * while still leveraging this protection. The @napi_direct boolian 390 * is used for those calls sites. Thus, allowing for faster recycling 391 * of xdp_frames/pages in those cases. 392 */ 393static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, 394 unsigned long handle) 395{ 396 struct xdp_mem_allocator *xa; 397 struct page *page; 398 399 switch (mem->type) { 400 case MEM_TYPE_PAGE_POOL: 401 rcu_read_lock(); 402 /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ 403 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); 404 page = virt_to_head_page(data); 405 if (likely(xa)) { 406 napi_direct &= !xdp_return_frame_no_direct(); 407 page_pool_put_page(xa->page_pool, page, napi_direct); 408 } else { 409 /* Hopefully stack show who to blame for late return */ 410 WARN_ONCE(1, "page_pool gone mem.id=%d", mem->id); 411 trace_mem_return_failed(mem, page); 412 put_page(page); 413 } 414 rcu_read_unlock(); 415 break; 416 case MEM_TYPE_PAGE_SHARED: 417 page_frag_free(data); 418 break; 419 case MEM_TYPE_PAGE_ORDER0: 420 page = virt_to_page(data); /* Assumes order0 page*/ 421 put_page(page); 422 break; 423 case MEM_TYPE_ZERO_COPY: 424 /* NB! Only valid from an xdp_buff! */ 425 rcu_read_lock(); 426 /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */ 427 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); 428 xa->zc_alloc->free(xa->zc_alloc, handle); 429 rcu_read_unlock(); 430 default: 431 /* Not possible, checked in xdp_rxq_info_reg_mem_model() */ 432 break; 433 } 434} 435 436void xdp_return_frame(struct xdp_frame *xdpf) 437{ 438 __xdp_return(xdpf->data, &xdpf->mem, false, 0); 439} 440EXPORT_SYMBOL_GPL(xdp_return_frame); 441 442void xdp_return_frame_rx_napi(struct xdp_frame *xdpf) 443{ 444 __xdp_return(xdpf->data, &xdpf->mem, true, 0); 445} 446EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi); 447 448void xdp_return_buff(struct xdp_buff *xdp) 449{ 450 __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp->handle); 451} 452EXPORT_SYMBOL_GPL(xdp_return_buff); 453 454/* Only called for MEM_TYPE_PAGE_POOL see xdp.h */ 455void __xdp_release_frame(void *data, struct xdp_mem_info *mem) 456{ 457 struct xdp_mem_allocator *xa; 458 struct page *page; 459 460 rcu_read_lock(); 461 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); 462 page = virt_to_head_page(data); 463 if (xa) 464 page_pool_release_page(xa->page_pool, page); 465 rcu_read_unlock(); 466} 467EXPORT_SYMBOL_GPL(__xdp_release_frame); 468 469int xdp_attachment_query(struct xdp_attachment_info *info, 470 struct netdev_bpf *bpf) 471{ 472 bpf->prog_id = info->prog ? info->prog->aux->id : 0; 473 bpf->prog_flags = info->prog ? info->flags : 0; 474 return 0; 475} 476EXPORT_SYMBOL_GPL(xdp_attachment_query); 477 478bool xdp_attachment_flags_ok(struct xdp_attachment_info *info, 479 struct netdev_bpf *bpf) 480{ 481 if (info->prog && (bpf->flags ^ info->flags) & XDP_FLAGS_MODES) { 482 NL_SET_ERR_MSG(bpf->extack, 483 "program loaded with different flags"); 484 return false; 485 } 486 return true; 487} 488EXPORT_SYMBOL_GPL(xdp_attachment_flags_ok); 489 490void xdp_attachment_setup(struct xdp_attachment_info *info, 491 struct netdev_bpf *bpf) 492{ 493 if (info->prog) 494 bpf_prog_put(info->prog); 495 info->prog = bpf->prog; 496 info->flags = bpf->flags; 497} 498EXPORT_SYMBOL_GPL(xdp_attachment_setup); 499 500struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp) 501{ 502 unsigned int metasize, totsize; 503 void *addr, *data_to_copy; 504 struct xdp_frame *xdpf; 505 struct page *page; 506 507 /* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */ 508 metasize = xdp_data_meta_unsupported(xdp) ? 0 : 509 xdp->data - xdp->data_meta; 510 totsize = xdp->data_end - xdp->data + metasize; 511 512 if (sizeof(*xdpf) + totsize > PAGE_SIZE) 513 return NULL; 514 515 page = dev_alloc_page(); 516 if (!page) 517 return NULL; 518 519 addr = page_to_virt(page); 520 xdpf = addr; 521 memset(xdpf, 0, sizeof(*xdpf)); 522 523 addr += sizeof(*xdpf); 524 data_to_copy = metasize ? xdp->data_meta : xdp->data; 525 memcpy(addr, data_to_copy, totsize); 526 527 xdpf->data = addr + metasize; 528 xdpf->len = totsize - metasize; 529 xdpf->headroom = 0; 530 xdpf->metasize = metasize; 531 xdpf->mem.type = MEM_TYPE_PAGE_ORDER0; 532 533 xdp_return_buff(xdp); 534 return xdpf; 535} 536EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame);