Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'net-6.8-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Paolo Abeni:
"Including fixes from bpf, netfilter and WiFi.

Jakub is doing a lot of work to include the self-tests in our CI, as a
result a significant amount of self-tests related fixes is flowing in
(and will likely continue in the next few weeks).

Current release - regressions:

- bpf: fix a kernel crash for the riscv 64 JIT

- bnxt_en: fix memory leak in bnxt_hwrm_get_rings()

- revert "net: macsec: use skb_ensure_writable_head_tail to expand
the skb"

Previous releases - regressions:

- core: fix removing a namespace with conflicting altnames

- tc/flower: fix chain template offload memory leak

- tcp:
- make sure init the accept_queue's spinlocks once
- fix autocork on CPUs with weak memory model

- udp: fix busy polling

- mlx5e:
- fix out-of-bound read in port timestamping
- fix peer flow lists corruption

- iwlwifi: fix a memory corruption

Previous releases - always broken:

- netfilter:
- nft_chain_filter: handle NETDEV_UNREGISTER for inet/ingress
basechain
- nft_limit: reject configurations that cause integer overflow

- bpf: fix bpf_xdp_adjust_tail() with XSK zero-copy mbuf, avoiding a
NULL pointer dereference upon shrinking

- llc: make llc_ui_sendmsg() more robust against bonding changes

- smc: fix illegal rmb_desc access in SMC-D connection dump

- dpll: fix pin dump crash for rebound module

- bnxt_en: fix possible crash after creating sw mqprio TCs

- hv_netvsc: calculate correct ring size when PAGE_SIZE is not 4kB

Misc:

- several self-tests fixes for better integration with the netdev CI

- added several missing modules descriptions"

* tag 'net-6.8-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (88 commits)
tsnep: Fix XDP_RING_NEED_WAKEUP for empty fill ring
tsnep: Remove FCS for XDP data path
net: fec: fix the unhandled context fault from smmu
selftests: bonding: do not test arp/ns target with mode balance-alb/tlb
fjes: fix memleaks in fjes_hw_setup
i40e: update xdp_rxq_info::frag_size for ZC enabled Rx queue
i40e: set xdp_rxq_info::frag_size
xdp: reflect tail increase for MEM_TYPE_XSK_BUFF_POOL
ice: update xdp_rxq_info::frag_size for ZC enabled Rx queue
intel: xsk: initialize skb_frag_t::bv_offset in ZC drivers
ice: remove redundant xdp_rxq_info registration
i40e: handle multi-buffer packets that are shrunk by xdp prog
ice: work on pre-XDP prog frag count
xsk: fix usage of multi-buffer BPF helpers for ZC XDP
xsk: make xsk_buff_pool responsible for clearing xdp_buff::flags
xsk: recycle buffer in case Rx queue was full
net: fill in MODULE_DESCRIPTION()s for rvu_mbox
net: fill in MODULE_DESCRIPTION()s for litex
net: fill in MODULE_DESCRIPTION()s for fsl_pq_mdio
net: fill in MODULE_DESCRIPTION()s for fec
...

+928 -341
+3 -2
arch/riscv/net/bpf_jit_comp64.c
··· 795 795 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; 796 796 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; 797 797 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; 798 + bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT; 798 799 void *orig_call = func_addr; 799 800 bool save_ret; 800 801 u32 insn; ··· 879 878 880 879 stack_size = round_up(stack_size, 16); 881 880 882 - if (func_addr) { 881 + if (!is_struct_ops) { 883 882 /* For the trampoline called from function entry, 884 883 * the frame of traced function and the frame of 885 884 * trampoline need to be considered. ··· 999 998 1000 999 emit_ld(RV_REG_S1, -sreg_off, RV_REG_FP, ctx); 1001 1000 1002 - if (func_addr) { 1001 + if (!is_struct_ops) { 1003 1002 /* trampoline called from function entry */ 1004 1003 emit_ld(RV_REG_T0, stack_size - 8, RV_REG_SP, ctx); 1005 1004 emit_ld(RV_REG_FP, stack_size - 16, RV_REG_SP, ctx);
+57 -11
drivers/dpll/dpll_core.c
··· 29 29 WARN_ON_ONCE(!xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED)) 30 30 #define ASSERT_DPLL_NOT_REGISTERED(d) \ 31 31 WARN_ON_ONCE(xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED)) 32 - #define ASSERT_PIN_REGISTERED(p) \ 33 - WARN_ON_ONCE(!xa_get_mark(&dpll_pin_xa, (p)->id, DPLL_REGISTERED)) 34 32 35 33 struct dpll_device_registration { 36 34 struct list_head list; ··· 423 425 } 424 426 EXPORT_SYMBOL_GPL(dpll_device_unregister); 425 427 428 + static void dpll_pin_prop_free(struct dpll_pin_properties *prop) 429 + { 430 + kfree(prop->package_label); 431 + kfree(prop->panel_label); 432 + kfree(prop->board_label); 433 + kfree(prop->freq_supported); 434 + } 435 + 436 + static int dpll_pin_prop_dup(const struct dpll_pin_properties *src, 437 + struct dpll_pin_properties *dst) 438 + { 439 + memcpy(dst, src, sizeof(*dst)); 440 + if (src->freq_supported && src->freq_supported_num) { 441 + size_t freq_size = src->freq_supported_num * 442 + sizeof(*src->freq_supported); 443 + dst->freq_supported = kmemdup(src->freq_supported, 444 + freq_size, GFP_KERNEL); 445 + if (!src->freq_supported) 446 + return -ENOMEM; 447 + } 448 + if (src->board_label) { 449 + dst->board_label = kstrdup(src->board_label, GFP_KERNEL); 450 + if (!dst->board_label) 451 + goto err_board_label; 452 + } 453 + if (src->panel_label) { 454 + dst->panel_label = kstrdup(src->panel_label, GFP_KERNEL); 455 + if (!dst->panel_label) 456 + goto err_panel_label; 457 + } 458 + if (src->package_label) { 459 + dst->package_label = kstrdup(src->package_label, GFP_KERNEL); 460 + if (!dst->package_label) 461 + goto err_package_label; 462 + } 463 + 464 + return 0; 465 + 466 + err_package_label: 467 + kfree(dst->panel_label); 468 + err_panel_label: 469 + kfree(dst->board_label); 470 + err_board_label: 471 + kfree(dst->freq_supported); 472 + return -ENOMEM; 473 + } 474 + 426 475 static struct dpll_pin * 427 476 dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module, 428 477 const struct dpll_pin_properties *prop) ··· 486 441 if (WARN_ON(prop->type < DPLL_PIN_TYPE_MUX || 487 442 prop->type > DPLL_PIN_TYPE_MAX)) { 488 443 ret = -EINVAL; 489 - goto err; 444 + goto err_pin_prop; 490 445 } 491 - pin->prop = prop; 446 + ret = dpll_pin_prop_dup(prop, &pin->prop); 447 + if (ret) 448 + goto err_pin_prop; 492 449 refcount_set(&pin->refcount, 1); 493 450 xa_init_flags(&pin->dpll_refs, XA_FLAGS_ALLOC); 494 451 xa_init_flags(&pin->parent_refs, XA_FLAGS_ALLOC); 495 452 ret = xa_alloc_cyclic(&dpll_pin_xa, &pin->id, pin, xa_limit_32b, 496 453 &dpll_pin_xa_id, GFP_KERNEL); 497 454 if (ret) 498 - goto err; 455 + goto err_xa_alloc; 499 456 return pin; 500 - err: 457 + err_xa_alloc: 501 458 xa_destroy(&pin->dpll_refs); 502 459 xa_destroy(&pin->parent_refs); 460 + dpll_pin_prop_free(&pin->prop); 461 + err_pin_prop: 503 462 kfree(pin); 504 463 return ERR_PTR(ret); 505 464 } ··· 563 514 xa_destroy(&pin->dpll_refs); 564 515 xa_destroy(&pin->parent_refs); 565 516 xa_erase(&dpll_pin_xa, pin->id); 517 + dpll_pin_prop_free(&pin->prop); 566 518 kfree(pin); 567 519 } 568 520 mutex_unlock(&dpll_lock); ··· 613 563 if (WARN_ON(!ops) || 614 564 WARN_ON(!ops->state_on_dpll_get) || 615 565 WARN_ON(!ops->direction_get)) 616 - return -EINVAL; 617 - if (ASSERT_DPLL_REGISTERED(dpll)) 618 566 return -EINVAL; 619 567 620 568 mutex_lock(&dpll_lock); ··· 684 636 unsigned long i, stop; 685 637 int ret; 686 638 687 - if (WARN_ON(parent->prop->type != DPLL_PIN_TYPE_MUX)) 639 + if (WARN_ON(parent->prop.type != DPLL_PIN_TYPE_MUX)) 688 640 return -EINVAL; 689 641 690 642 if (WARN_ON(!ops) || 691 643 WARN_ON(!ops->state_on_pin_get) || 692 644 WARN_ON(!ops->direction_get)) 693 - return -EINVAL; 694 - if (ASSERT_PIN_REGISTERED(parent)) 695 645 return -EINVAL; 696 646 697 647 mutex_lock(&dpll_lock);
+2 -2
drivers/dpll/dpll_core.h
··· 44 44 * @module: module of creator 45 45 * @dpll_refs: hold referencees to dplls pin was registered with 46 46 * @parent_refs: hold references to parent pins pin was registered with 47 - * @prop: pointer to pin properties given by registerer 47 + * @prop: pin properties copied from the registerer 48 48 * @rclk_dev_name: holds name of device when pin can recover clock from it 49 49 * @refcount: refcount 50 50 **/ ··· 55 55 struct module *module; 56 56 struct xarray dpll_refs; 57 57 struct xarray parent_refs; 58 - const struct dpll_pin_properties *prop; 58 + struct dpll_pin_properties prop; 59 59 refcount_t refcount; 60 60 }; 61 61
+41 -16
drivers/dpll/dpll_netlink.c
··· 303 303 if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY, sizeof(freq), &freq, 304 304 DPLL_A_PIN_PAD)) 305 305 return -EMSGSIZE; 306 - for (fs = 0; fs < pin->prop->freq_supported_num; fs++) { 306 + for (fs = 0; fs < pin->prop.freq_supported_num; fs++) { 307 307 nest = nla_nest_start(msg, DPLL_A_PIN_FREQUENCY_SUPPORTED); 308 308 if (!nest) 309 309 return -EMSGSIZE; 310 - freq = pin->prop->freq_supported[fs].min; 310 + freq = pin->prop.freq_supported[fs].min; 311 311 if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MIN, sizeof(freq), 312 312 &freq, DPLL_A_PIN_PAD)) { 313 313 nla_nest_cancel(msg, nest); 314 314 return -EMSGSIZE; 315 315 } 316 - freq = pin->prop->freq_supported[fs].max; 316 + freq = pin->prop.freq_supported[fs].max; 317 317 if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MAX, sizeof(freq), 318 318 &freq, DPLL_A_PIN_PAD)) { 319 319 nla_nest_cancel(msg, nest); ··· 329 329 { 330 330 int fs; 331 331 332 - for (fs = 0; fs < pin->prop->freq_supported_num; fs++) 333 - if (freq >= pin->prop->freq_supported[fs].min && 334 - freq <= pin->prop->freq_supported[fs].max) 332 + for (fs = 0; fs < pin->prop.freq_supported_num; fs++) 333 + if (freq >= pin->prop.freq_supported[fs].min && 334 + freq <= pin->prop.freq_supported[fs].max) 335 335 return true; 336 336 return false; 337 337 } ··· 421 421 dpll_cmd_pin_get_one(struct sk_buff *msg, struct dpll_pin *pin, 422 422 struct netlink_ext_ack *extack) 423 423 { 424 - const struct dpll_pin_properties *prop = pin->prop; 424 + const struct dpll_pin_properties *prop = &pin->prop; 425 425 struct dpll_pin_ref *ref; 426 426 int ret; 427 427 ··· 553 553 return dpll_device_event_send(DPLL_CMD_DEVICE_CHANGE_NTF, dpll); 554 554 } 555 555 556 + static bool dpll_pin_available(struct dpll_pin *pin) 557 + { 558 + struct dpll_pin_ref *par_ref; 559 + unsigned long i; 560 + 561 + if (!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED)) 562 + return false; 563 + xa_for_each(&pin->parent_refs, i, par_ref) 564 + if (xa_get_mark(&dpll_pin_xa, par_ref->pin->id, 565 + DPLL_REGISTERED)) 566 + return true; 567 + xa_for_each(&pin->dpll_refs, i, par_ref) 568 + if (xa_get_mark(&dpll_device_xa, par_ref->dpll->id, 569 + DPLL_REGISTERED)) 570 + return true; 571 + return false; 572 + } 573 + 556 574 /** 557 575 * dpll_device_change_ntf - notify that the dpll device has been changed 558 576 * @dpll: registered dpll pointer ··· 597 579 int ret = -ENOMEM; 598 580 void *hdr; 599 581 600 - if (WARN_ON(!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED))) 582 + if (!dpll_pin_available(pin)) 601 583 return -ENODEV; 602 584 603 585 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); ··· 735 717 int ret; 736 718 737 719 if (!(DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE & 738 - pin->prop->capabilities)) { 720 + pin->prop.capabilities)) { 739 721 NL_SET_ERR_MSG(extack, "state changing is not allowed"); 740 722 return -EOPNOTSUPP; 741 723 } ··· 771 753 int ret; 772 754 773 755 if (!(DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE & 774 - pin->prop->capabilities)) { 756 + pin->prop.capabilities)) { 775 757 NL_SET_ERR_MSG(extack, "state changing is not allowed"); 776 758 return -EOPNOTSUPP; 777 759 } ··· 798 780 int ret; 799 781 800 782 if (!(DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE & 801 - pin->prop->capabilities)) { 783 + pin->prop.capabilities)) { 802 784 NL_SET_ERR_MSG(extack, "prio changing is not allowed"); 803 785 return -EOPNOTSUPP; 804 786 } ··· 826 808 int ret; 827 809 828 810 if (!(DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE & 829 - pin->prop->capabilities)) { 811 + pin->prop.capabilities)) { 830 812 NL_SET_ERR_MSG(extack, "direction changing is not allowed"); 831 813 return -EOPNOTSUPP; 832 814 } ··· 856 838 int ret; 857 839 858 840 phase_adj = nla_get_s32(phase_adj_attr); 859 - if (phase_adj > pin->prop->phase_range.max || 860 - phase_adj < pin->prop->phase_range.min) { 841 + if (phase_adj > pin->prop.phase_range.max || 842 + phase_adj < pin->prop.phase_range.min) { 861 843 NL_SET_ERR_MSG_ATTR(extack, phase_adj_attr, 862 844 "phase adjust value not supported"); 863 845 return -EINVAL; ··· 1041 1023 unsigned long i; 1042 1024 1043 1025 xa_for_each_marked(&dpll_pin_xa, i, pin, DPLL_REGISTERED) { 1044 - prop = pin->prop; 1026 + prop = &pin->prop; 1045 1027 cid_match = clock_id ? pin->clock_id == clock_id : true; 1046 1028 mod_match = mod_name_attr && module_name(pin->module) ? 1047 1029 !nla_strcmp(mod_name_attr, ··· 1148 1130 } 1149 1131 pin = dpll_pin_find_from_nlattr(info); 1150 1132 if (!IS_ERR(pin)) { 1133 + if (!dpll_pin_available(pin)) { 1134 + nlmsg_free(msg); 1135 + return -ENODEV; 1136 + } 1151 1137 ret = dpll_msg_add_pin_handle(msg, pin); 1152 1138 if (ret) { 1153 1139 nlmsg_free(msg); ··· 1201 1179 1202 1180 xa_for_each_marked_start(&dpll_pin_xa, i, pin, DPLL_REGISTERED, 1203 1181 ctx->idx) { 1182 + if (!dpll_pin_available(pin)) 1183 + continue; 1204 1184 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, 1205 1185 cb->nlh->nlmsg_seq, 1206 1186 &dpll_nl_family, NLM_F_MULTI, ··· 1465 1441 } 1466 1442 info->user_ptr[0] = xa_load(&dpll_pin_xa, 1467 1443 nla_get_u32(info->attrs[DPLL_A_PIN_ID])); 1468 - if (!info->user_ptr[0]) { 1444 + if (!info->user_ptr[0] || 1445 + !dpll_pin_available(info->user_ptr[0])) { 1469 1446 NL_SET_ERR_MSG(info->extack, "pin not found"); 1470 1447 ret = -ENODEV; 1471 1448 goto unlock_dev;
+1
drivers/net/ethernet/8390/8390.c
··· 100 100 module_init(ns8390_module_init); 101 101 module_exit(ns8390_module_exit); 102 102 #endif /* MODULE */ 103 + MODULE_DESCRIPTION("National Semiconductor 8390 core driver"); 103 104 MODULE_LICENSE("GPL");
+1
drivers/net/ethernet/8390/8390p.c
··· 102 102 103 103 module_init(NS8390p_init_module); 104 104 module_exit(NS8390p_cleanup_module); 105 + MODULE_DESCRIPTION("National Semiconductor 8390 core for ISA driver"); 105 106 MODULE_LICENSE("GPL");
+1
drivers/net/ethernet/8390/apne.c
··· 610 610 return 1; 611 611 } 612 612 613 + MODULE_DESCRIPTION("National Semiconductor 8390 Amiga PCMCIA ethernet driver"); 613 614 MODULE_LICENSE("GPL");
+1
drivers/net/ethernet/8390/hydra.c
··· 270 270 module_init(hydra_init_module); 271 271 module_exit(hydra_cleanup_module); 272 272 273 + MODULE_DESCRIPTION("Zorro-II Hydra 8390 ethernet driver"); 273 274 MODULE_LICENSE("GPL");
+1
drivers/net/ethernet/8390/stnic.c
··· 296 296 297 297 module_init(stnic_probe); 298 298 module_exit(stnic_cleanup); 299 + MODULE_DESCRIPTION("National Semiconductor DP83902AV ethernet driver"); 299 300 MODULE_LICENSE("GPL");
+1
drivers/net/ethernet/8390/zorro8390.c
··· 443 443 module_init(zorro8390_init_module); 444 444 module_exit(zorro8390_cleanup_module); 445 445 446 + MODULE_DESCRIPTION("Zorro NS8390-based ethernet driver"); 446 447 MODULE_LICENSE("GPL");
+1
drivers/net/ethernet/broadcom/bcm4908_enet.c
··· 793 793 }; 794 794 module_platform_driver(bcm4908_enet_driver); 795 795 796 + MODULE_DESCRIPTION("Broadcom BCM4908 Gigabit Ethernet driver"); 796 797 MODULE_LICENSE("GPL v2"); 797 798 MODULE_DEVICE_TABLE(of, bcm4908_enet_of_match);
+1
drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
··· 260 260 EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister); 261 261 262 262 MODULE_AUTHOR("Rafał Miłecki"); 263 + MODULE_DESCRIPTION("Broadcom iProc GBit BCMA MDIO helpers"); 263 264 MODULE_LICENSE("GPL");
+1
drivers/net/ethernet/broadcom/bgmac-bcma.c
··· 362 362 module_exit(bgmac_exit) 363 363 364 364 MODULE_AUTHOR("Rafał Miłecki"); 365 + MODULE_DESCRIPTION("Broadcom iProc GBit BCMA interface driver"); 365 366 MODULE_LICENSE("GPL");
+1
drivers/net/ethernet/broadcom/bgmac-platform.c
··· 298 298 }; 299 299 300 300 module_platform_driver(bgmac_enet_driver); 301 + MODULE_DESCRIPTION("Broadcom iProc GBit platform interface driver"); 301 302 MODULE_LICENSE("GPL");
+1
drivers/net/ethernet/broadcom/bgmac.c
··· 1626 1626 EXPORT_SYMBOL_GPL(bgmac_enet_resume); 1627 1627 1628 1628 MODULE_AUTHOR("Rafał Miłecki"); 1629 + MODULE_DESCRIPTION("Broadcom iProc GBit driver"); 1629 1630 MODULE_LICENSE("GPL");
+35 -14
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 3817 3817 { 3818 3818 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); 3819 3819 int i, j, rc, ulp_base_vec, ulp_msix; 3820 - int tcs = netdev_get_num_tc(bp->dev); 3820 + int tcs = bp->num_tc; 3821 3821 3822 3822 if (!tcs) 3823 3823 tcs = 1; ··· 5935 5935 5936 5936 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) 5937 5937 { 5938 - if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 5939 - return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5); 5938 + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 5939 + if (!rx_rings) 5940 + return 0; 5941 + return bnxt_calc_nr_ring_pages(rx_rings - 1, 5942 + BNXT_RSS_TABLE_ENTRIES_P5); 5943 + } 5940 5944 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) 5941 5945 return 2; 5942 5946 return 1; ··· 6930 6926 if (cp < (rx + tx)) { 6931 6927 rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false); 6932 6928 if (rc) 6933 - return rc; 6929 + goto get_rings_exit; 6934 6930 if (bp->flags & BNXT_FLAG_AGG_RINGS) 6935 6931 rx <<= 1; 6936 6932 hw_resc->resv_rx_rings = rx; ··· 6942 6938 hw_resc->resv_cp_rings = cp; 6943 6939 hw_resc->resv_stat_ctxs = stats; 6944 6940 } 6941 + get_rings_exit: 6945 6942 hwrm_req_drop(bp, req); 6946 - return 0; 6943 + return rc; 6947 6944 } 6948 6945 6949 6946 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) ··· 7005 7000 7006 7001 req->num_rx_rings = cpu_to_le16(rx_rings); 7007 7002 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7003 + u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps); 7004 + 7008 7005 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 7009 7006 req->num_msix = cpu_to_le16(cp_rings); 7010 - req->num_rsscos_ctxs = 7011 - cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); 7007 + req->num_rsscos_ctxs = cpu_to_le16(rss_ctx); 7012 7008 } else { 7013 7009 req->num_cmpl_rings = cpu_to_le16(cp_rings); 7014 7010 req->num_hw_ring_grps = cpu_to_le16(ring_grps); ··· 7056 7050 req->num_tx_rings = cpu_to_le16(tx_rings); 7057 7051 req->num_rx_rings = cpu_to_le16(rx_rings); 7058 7052 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { 7053 + u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps); 7054 + 7059 7055 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); 7060 - req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64)); 7056 + req->num_rsscos_ctxs = cpu_to_le16(rss_ctx); 7061 7057 } else { 7062 7058 req->num_cmpl_rings = cpu_to_le16(cp_rings); 7063 7059 req->num_hw_ring_grps = cpu_to_le16(ring_grps); ··· 9946 9938 9947 9939 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx) 9948 9940 { 9949 - int tcs = netdev_get_num_tc(bp->dev); 9941 + int tcs = bp->num_tc; 9950 9942 9951 9943 if (!tcs) 9952 9944 tcs = 1; ··· 9955 9947 9956 9948 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp) 9957 9949 { 9958 - int tcs = netdev_get_num_tc(bp->dev); 9950 + int tcs = bp->num_tc; 9959 9951 9960 9952 return (tx_cp - bp->tx_nr_rings_xdp) * tcs + 9961 9953 bp->tx_nr_rings_xdp; ··· 9985 9977 struct net_device *dev = bp->dev; 9986 9978 int tcs, i; 9987 9979 9988 - tcs = netdev_get_num_tc(dev); 9980 + tcs = bp->num_tc; 9989 9981 if (tcs) { 9990 9982 int i, off, count; 9991 9983 ··· 10017 10009 { 10018 10010 const int len = sizeof(bp->irq_tbl[0].name); 10019 10011 10020 - if (netdev_get_num_tc(bp->dev)) 10012 + if (bp->num_tc) { 10021 10013 netdev_reset_tc(bp->dev); 10014 + bp->num_tc = 0; 10015 + } 10022 10016 10023 10017 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx", 10024 10018 0); ··· 10246 10236 10247 10237 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) 10248 10238 { 10249 - int tcs = netdev_get_num_tc(bp->dev); 10250 10239 bool irq_cleared = false; 10240 + int tcs = bp->num_tc; 10251 10241 int rc; 10252 10242 10253 10243 if (!bnxt_need_reserve_rings(bp)) ··· 10273 10263 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) { 10274 10264 netdev_err(bp->dev, "tx ring reservation failure\n"); 10275 10265 netdev_reset_tc(bp->dev); 10266 + bp->num_tc = 0; 10276 10267 if (bp->tx_nr_rings_xdp) 10277 10268 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp; 10278 10269 else ··· 11575 11564 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); 11576 11565 goto half_open_err; 11577 11566 } 11567 + bnxt_init_napi(bp); 11578 11568 set_bit(BNXT_STATE_HALF_OPEN, &bp->state); 11579 11569 rc = bnxt_init_nic(bp, true); 11580 11570 if (rc) { 11581 11571 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); 11572 + bnxt_del_napi(bp); 11582 11573 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); 11583 11574 goto half_open_err; 11584 11575 } ··· 11599 11586 void bnxt_half_close_nic(struct bnxt *bp) 11600 11587 { 11601 11588 bnxt_hwrm_resource_free(bp, false, true); 11589 + bnxt_del_napi(bp); 11602 11590 bnxt_free_skbs(bp); 11603 11591 bnxt_free_mem(bp, true); 11604 11592 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); ··· 13246 13232 13247 13233 bp->fw_cap = 0; 13248 13234 rc = bnxt_hwrm_ver_get(bp); 13235 + /* FW may be unresponsive after FLR. FLR must complete within 100 msec 13236 + * so wait before continuing with recovery. 13237 + */ 13238 + if (rc) 13239 + msleep(100); 13249 13240 bnxt_try_map_fw_health_reg(bp); 13250 13241 if (rc) { 13251 13242 rc = bnxt_try_recover_fw(bp); ··· 13803 13784 return -EINVAL; 13804 13785 } 13805 13786 13806 - if (netdev_get_num_tc(dev) == tc) 13787 + if (bp->num_tc == tc) 13807 13788 return 0; 13808 13789 13809 13790 if (bp->flags & BNXT_FLAG_SHARED_RINGS) ··· 13821 13802 if (tc) { 13822 13803 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; 13823 13804 netdev_set_num_tc(dev, tc); 13805 + bp->num_tc = tc; 13824 13806 } else { 13825 13807 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; 13826 13808 netdev_reset_tc(dev); 13809 + bp->num_tc = 0; 13827 13810 } 13828 13811 bp->tx_nr_rings += bp->tx_nr_rings_xdp; 13829 13812 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
+1
drivers/net/ethernet/broadcom/bnxt/bnxt.h
··· 2225 2225 u8 tc_to_qidx[BNXT_MAX_QUEUE]; 2226 2226 u8 q_ids[BNXT_MAX_QUEUE]; 2227 2227 u8 max_q; 2228 + u8 num_tc; 2228 2229 2229 2230 unsigned int current_interval; 2230 2231 #define BNXT_TIMER_INTERVAL HZ
+1 -1
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
··· 228 228 } 229 229 } 230 230 if (bp->ieee_ets) { 231 - int tc = netdev_get_num_tc(bp->dev); 231 + int tc = bp->num_tc; 232 232 233 233 if (!tc) 234 234 tc = 1;
+4 -3
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
··· 884 884 if (max_tx_sch_inputs) 885 885 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); 886 886 887 - tcs = netdev_get_num_tc(dev); 887 + tcs = bp->num_tc; 888 888 tx_grps = max(tcs, 1); 889 889 if (bp->tx_nr_rings_xdp) 890 890 tx_grps++; ··· 944 944 if (channel->combined_count) 945 945 sh = true; 946 946 947 - tcs = netdev_get_num_tc(dev); 947 + tcs = bp->num_tc; 948 948 949 949 req_tx_rings = sh ? channel->combined_count : channel->tx_count; 950 950 req_rx_rings = sh ? channel->combined_count : channel->rx_count; ··· 1574 1574 struct bnxt *bp = netdev_priv(dev); 1575 1575 1576 1576 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) 1577 - return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5); 1577 + return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) * 1578 + BNXT_RSS_TABLE_ENTRIES_P5; 1578 1579 return HW_HASH_INDEX_SIZE; 1579 1580 } 1580 1581
+1 -1
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
··· 407 407 if (prog) 408 408 tx_xdp = bp->rx_nr_rings; 409 409 410 - tc = netdev_get_num_tc(dev); 410 + tc = bp->num_tc; 411 411 if (!tc) 412 412 tc = 1; 413 413 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
+1
drivers/net/ethernet/cavium/liquidio/lio_core.c
··· 27 27 #include "octeon_network.h" 28 28 29 29 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); 30 + MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Core"); 30 31 MODULE_LICENSE("GPL"); 31 32 32 33 /* OOM task polling interval */
+1
drivers/net/ethernet/cirrus/ep93xx_eth.c
··· 868 868 869 869 module_platform_driver(ep93xx_eth_driver); 870 870 871 + MODULE_DESCRIPTION("Cirrus EP93xx Ethernet driver"); 871 872 MODULE_LICENSE("GPL"); 872 873 MODULE_ALIAS("platform:ep93xx-eth");
+15 -2
drivers/net/ethernet/engleder/tsnep_main.c
··· 1485 1485 1486 1486 xdp_prepare_buff(&xdp, page_address(entry->page), 1487 1487 XDP_PACKET_HEADROOM + TSNEP_RX_INLINE_METADATA_SIZE, 1488 - length, false); 1488 + length - ETH_FCS_LEN, false); 1489 1489 1490 1490 consume = tsnep_xdp_run_prog(rx, prog, &xdp, 1491 1491 &xdp_status, tx_nq, tx); ··· 1568 1568 prefetch(entry->xdp->data); 1569 1569 length = __le32_to_cpu(entry->desc_wb->properties) & 1570 1570 TSNEP_DESC_LENGTH_MASK; 1571 - xsk_buff_set_size(entry->xdp, length); 1571 + xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN); 1572 1572 xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool); 1573 1573 1574 1574 /* RX metadata with timestamps is in front of actual data, ··· 1761 1761 1762 1762 allocated--; 1763 1763 } 1764 + } 1765 + 1766 + /* set need wakeup flag immediately if ring is not filled completely, 1767 + * first polling would be too late as need wakeup signalisation would 1768 + * be delayed for an indefinite time 1769 + */ 1770 + if (xsk_uses_need_wakeup(rx->xsk_pool)) { 1771 + int desc_available = tsnep_rx_desc_available(rx); 1772 + 1773 + if (desc_available) 1774 + xsk_set_rx_need_wakeup(rx->xsk_pool); 1775 + else 1776 + xsk_clear_rx_need_wakeup(rx->xsk_pool); 1764 1777 } 1765 1778 } 1766 1779
+1
drivers/net/ethernet/ezchip/nps_enet.c
··· 661 661 module_platform_driver(nps_enet_driver); 662 662 663 663 MODULE_AUTHOR("EZchip Semiconductor"); 664 + MODULE_DESCRIPTION("EZchip NPS Ethernet driver"); 664 665 MODULE_LICENSE("GPL v2");
+1
drivers/net/ethernet/freescale/enetc/enetc.c
··· 3216 3216 } 3217 3217 EXPORT_SYMBOL_GPL(enetc_pci_remove); 3218 3218 3219 + MODULE_DESCRIPTION("NXP ENETC Ethernet driver"); 3219 3220 MODULE_LICENSE("Dual BSD/GPL");
+3
drivers/net/ethernet/freescale/fec_main.c
··· 2036 2036 2037 2037 /* if any of the above changed restart the FEC */ 2038 2038 if (status_change) { 2039 + netif_stop_queue(ndev); 2039 2040 napi_disable(&fep->napi); 2040 2041 netif_tx_lock_bh(ndev); 2041 2042 fec_restart(ndev); ··· 2046 2045 } 2047 2046 } else { 2048 2047 if (fep->link) { 2048 + netif_stop_queue(ndev); 2049 2049 napi_disable(&fep->napi); 2050 2050 netif_tx_lock_bh(ndev); 2051 2051 fec_stop(ndev); ··· 4771 4769 4772 4770 module_platform_driver(fec_driver); 4773 4771 4772 + MODULE_DESCRIPTION("NXP Fast Ethernet Controller (FEC) driver"); 4774 4773 MODULE_LICENSE("GPL");
+1
drivers/net/ethernet/freescale/fsl_pq_mdio.c
··· 531 531 532 532 module_platform_driver(fsl_pq_mdio_driver); 533 533 534 + MODULE_DESCRIPTION("Freescale PQ MDIO helpers"); 534 535 MODULE_LICENSE("GPL");
+31 -16
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 3588 3588 struct i40e_hmc_obj_rxq rx_ctx; 3589 3589 int err = 0; 3590 3590 bool ok; 3591 - int ret; 3592 3591 3593 3592 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS); 3594 3593 3595 3594 /* clear the context structure first */ 3596 3595 memset(&rx_ctx, 0, sizeof(rx_ctx)); 3597 3596 3598 - if (ring->vsi->type == I40E_VSI_MAIN) 3599 - xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); 3597 + ring->rx_buf_len = vsi->rx_buf_len; 3598 + 3599 + /* XDP RX-queue info only needed for RX rings exposed to XDP */ 3600 + if (ring->vsi->type != I40E_VSI_MAIN) 3601 + goto skip; 3602 + 3603 + if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) { 3604 + err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, 3605 + ring->queue_index, 3606 + ring->q_vector->napi.napi_id, 3607 + ring->rx_buf_len); 3608 + if (err) 3609 + return err; 3610 + } 3600 3611 3601 3612 ring->xsk_pool = i40e_xsk_pool(ring); 3602 3613 if (ring->xsk_pool) { 3603 - ring->rx_buf_len = 3604 - xsk_pool_get_rx_frame_size(ring->xsk_pool); 3605 - ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 3614 + xdp_rxq_info_unreg(&ring->xdp_rxq); 3615 + ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); 3616 + err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, 3617 + ring->queue_index, 3618 + ring->q_vector->napi.napi_id, 3619 + ring->rx_buf_len); 3620 + if (err) 3621 + return err; 3622 + err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 3606 3623 MEM_TYPE_XSK_BUFF_POOL, 3607 3624 NULL); 3608 - if (ret) 3609 - return ret; 3625 + if (err) 3626 + return err; 3610 3627 dev_info(&vsi->back->pdev->dev, 3611 3628 "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", 3612 3629 ring->queue_index); 3613 3630 3614 3631 } else { 3615 - ring->rx_buf_len = vsi->rx_buf_len; 3616 - if (ring->vsi->type == I40E_VSI_MAIN) { 3617 - ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 3618 - MEM_TYPE_PAGE_SHARED, 3619 - NULL); 3620 - if (ret) 3621 - return ret; 3622 - } 3632 + err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 3633 + MEM_TYPE_PAGE_SHARED, 3634 + NULL); 3635 + if (err) 3636 + return err; 3623 3637 } 3624 3638 3639 + skip: 3625 3640 xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq); 3626 3641 3627 3642 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
+23 -26
drivers/net/ethernet/intel/i40e/i40e_txrx.c
··· 1548 1548 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) 1549 1549 { 1550 1550 struct device *dev = rx_ring->dev; 1551 - int err; 1552 1551 1553 1552 u64_stats_init(&rx_ring->syncp); 1554 1553 ··· 1567 1568 rx_ring->next_to_clean = 0; 1568 1569 rx_ring->next_to_process = 0; 1569 1570 rx_ring->next_to_use = 0; 1570 - 1571 - /* XDP RX-queue info only needed for RX rings exposed to XDP */ 1572 - if (rx_ring->vsi->type == I40E_VSI_MAIN) { 1573 - err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 1574 - rx_ring->queue_index, rx_ring->q_vector->napi.napi_id); 1575 - if (err < 0) 1576 - return err; 1577 - } 1578 1571 1579 1572 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog; 1580 1573 ··· 2078 2087 static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res, 2079 2088 struct xdp_buff *xdp) 2080 2089 { 2081 - u32 next = rx_ring->next_to_clean; 2090 + u32 nr_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags; 2091 + u32 next = rx_ring->next_to_clean, i = 0; 2082 2092 struct i40e_rx_buffer *rx_buffer; 2083 2093 2084 2094 xdp->flags = 0; ··· 2092 2100 if (!rx_buffer->page) 2093 2101 continue; 2094 2102 2095 - if (xdp_res == I40E_XDP_CONSUMED) 2096 - rx_buffer->pagecnt_bias++; 2097 - else 2103 + if (xdp_res != I40E_XDP_CONSUMED) 2098 2104 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz); 2105 + else if (i++ <= nr_frags) 2106 + rx_buffer->pagecnt_bias++; 2099 2107 2100 2108 /* EOP buffer will be put in i40e_clean_rx_irq() */ 2101 2109 if (next == rx_ring->next_to_process) ··· 2109 2117 * i40e_construct_skb - Allocate skb and populate it 2110 2118 * @rx_ring: rx descriptor ring to transact packets on 2111 2119 * @xdp: xdp_buff pointing to the data 2112 - * @nr_frags: number of buffers for the packet 2113 2120 * 2114 2121 * This function allocates an skb. It then populates it with the page 2115 2122 * data from the current receive descriptor, taking care to set up the 2116 2123 * skb correctly. 2117 2124 */ 2118 2125 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, 2119 - struct xdp_buff *xdp, 2120 - u32 nr_frags) 2126 + struct xdp_buff *xdp) 2121 2127 { 2122 2128 unsigned int size = xdp->data_end - xdp->data; 2123 2129 struct i40e_rx_buffer *rx_buffer; 2130 + struct skb_shared_info *sinfo; 2124 2131 unsigned int headlen; 2125 2132 struct sk_buff *skb; 2133 + u32 nr_frags = 0; 2126 2134 2127 2135 /* prefetch first cache line of first page */ 2128 2136 net_prefetch(xdp->data); ··· 2160 2168 memcpy(__skb_put(skb, headlen), xdp->data, 2161 2169 ALIGN(headlen, sizeof(long))); 2162 2170 2171 + if (unlikely(xdp_buff_has_frags(xdp))) { 2172 + sinfo = xdp_get_shared_info_from_buff(xdp); 2173 + nr_frags = sinfo->nr_frags; 2174 + } 2163 2175 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); 2164 2176 /* update all of the pointers */ 2165 2177 size -= headlen; ··· 2183 2187 } 2184 2188 2185 2189 if (unlikely(xdp_buff_has_frags(xdp))) { 2186 - struct skb_shared_info *sinfo, *skinfo = skb_shinfo(skb); 2190 + struct skb_shared_info *skinfo = skb_shinfo(skb); 2187 2191 2188 - sinfo = xdp_get_shared_info_from_buff(xdp); 2189 2192 memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0], 2190 2193 sizeof(skb_frag_t) * nr_frags); 2191 2194 ··· 2207 2212 * i40e_build_skb - Build skb around an existing buffer 2208 2213 * @rx_ring: Rx descriptor ring to transact packets on 2209 2214 * @xdp: xdp_buff pointing to the data 2210 - * @nr_frags: number of buffers for the packet 2211 2215 * 2212 2216 * This function builds an skb around an existing Rx buffer, taking care 2213 2217 * to set up the skb correctly and avoid any memcpy overhead. 2214 2218 */ 2215 2219 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, 2216 - struct xdp_buff *xdp, 2217 - u32 nr_frags) 2220 + struct xdp_buff *xdp) 2218 2221 { 2219 2222 unsigned int metasize = xdp->data - xdp->data_meta; 2223 + struct skb_shared_info *sinfo; 2220 2224 struct sk_buff *skb; 2225 + u32 nr_frags; 2221 2226 2222 2227 /* Prefetch first cache line of first page. If xdp->data_meta 2223 2228 * is unused, this points exactly as xdp->data, otherwise we ··· 2225 2230 * data, and then actual data. 2226 2231 */ 2227 2232 net_prefetch(xdp->data_meta); 2233 + 2234 + if (unlikely(xdp_buff_has_frags(xdp))) { 2235 + sinfo = xdp_get_shared_info_from_buff(xdp); 2236 + nr_frags = sinfo->nr_frags; 2237 + } 2228 2238 2229 2239 /* build an skb around the page buffer */ 2230 2240 skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz); ··· 2243 2243 skb_metadata_set(skb, metasize); 2244 2244 2245 2245 if (unlikely(xdp_buff_has_frags(xdp))) { 2246 - struct skb_shared_info *sinfo; 2247 - 2248 - sinfo = xdp_get_shared_info_from_buff(xdp); 2249 2246 xdp_update_skb_shared_info(skb, nr_frags, 2250 2247 sinfo->xdp_frags_size, 2251 2248 nr_frags * xdp->frame_sz, ··· 2586 2589 total_rx_bytes += size; 2587 2590 } else { 2588 2591 if (ring_uses_build_skb(rx_ring)) 2589 - skb = i40e_build_skb(rx_ring, xdp, nfrags); 2592 + skb = i40e_build_skb(rx_ring, xdp); 2590 2593 else 2591 - skb = i40e_construct_skb(rx_ring, xdp, nfrags); 2594 + skb = i40e_construct_skb(rx_ring, xdp); 2592 2595 2593 2596 /* drop if we failed to retrieve a buffer */ 2594 2597 if (!skb) {
+2 -2
drivers/net/ethernet/intel/i40e/i40e_xsk.c
··· 414 414 } 415 415 416 416 __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, 417 - virt_to_page(xdp->data_hard_start), 0, size); 417 + virt_to_page(xdp->data_hard_start), 418 + XDP_PACKET_HEADROOM, size); 418 419 sinfo->xdp_frags_size += size; 419 420 xsk_buff_add_frag(xdp); 420 421 ··· 499 498 xdp_res = i40e_run_xdp_zc(rx_ring, first, xdp_prog); 500 499 i40e_handle_xdp_result_zc(rx_ring, first, rx_desc, &rx_packets, 501 500 &rx_bytes, xdp_res, &failure); 502 - first->flags = 0; 503 501 next_to_clean = next_to_process; 504 502 if (failure) 505 503 break;
+23 -14
drivers/net/ethernet/intel/ice/ice_base.c
··· 547 547 ring->rx_buf_len = ring->vsi->rx_buf_len; 548 548 549 549 if (ring->vsi->type == ICE_VSI_PF) { 550 - if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) 551 - /* coverity[check_return] */ 552 - __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, 553 - ring->q_index, 554 - ring->q_vector->napi.napi_id, 555 - ring->vsi->rx_buf_len); 550 + if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) { 551 + err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, 552 + ring->q_index, 553 + ring->q_vector->napi.napi_id, 554 + ring->rx_buf_len); 555 + if (err) 556 + return err; 557 + } 556 558 557 559 ring->xsk_pool = ice_xsk_pool(ring); 558 560 if (ring->xsk_pool) { 559 - xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); 561 + xdp_rxq_info_unreg(&ring->xdp_rxq); 560 562 561 563 ring->rx_buf_len = 562 564 xsk_pool_get_rx_frame_size(ring->xsk_pool); 565 + err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, 566 + ring->q_index, 567 + ring->q_vector->napi.napi_id, 568 + ring->rx_buf_len); 569 + if (err) 570 + return err; 563 571 err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 564 572 MEM_TYPE_XSK_BUFF_POOL, 565 573 NULL); ··· 579 571 dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", 580 572 ring->q_index); 581 573 } else { 582 - if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) 583 - /* coverity[check_return] */ 584 - __xdp_rxq_info_reg(&ring->xdp_rxq, 585 - ring->netdev, 586 - ring->q_index, 587 - ring->q_vector->napi.napi_id, 588 - ring->vsi->rx_buf_len); 574 + if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) { 575 + err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, 576 + ring->q_index, 577 + ring->q_vector->napi.napi_id, 578 + ring->rx_buf_len); 579 + if (err) 580 + return err; 581 + } 589 582 590 583 err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, 591 584 MEM_TYPE_PAGE_SHARED,
+9 -10
drivers/net/ethernet/intel/ice/ice_txrx.c
··· 513 513 if (ice_is_xdp_ena_vsi(rx_ring->vsi)) 514 514 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); 515 515 516 - if (rx_ring->vsi->type == ICE_VSI_PF && 517 - !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) 518 - if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, 519 - rx_ring->q_index, rx_ring->q_vector->napi.napi_id)) 520 - goto err; 521 516 return 0; 522 517 523 518 err: ··· 598 603 ret = ICE_XDP_CONSUMED; 599 604 } 600 605 exit: 601 - rx_buf->act = ret; 602 - if (unlikely(xdp_buff_has_frags(xdp))) 603 - ice_set_rx_bufs_act(xdp, rx_ring, ret); 606 + ice_set_rx_bufs_act(xdp, rx_ring, ret); 604 607 } 605 608 606 609 /** ··· 886 893 } 887 894 888 895 if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) { 889 - if (unlikely(xdp_buff_has_frags(xdp))) 890 - ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED); 896 + ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED); 891 897 return -ENOMEM; 892 898 } 893 899 894 900 __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page, 895 901 rx_buf->page_offset, size); 896 902 sinfo->xdp_frags_size += size; 903 + /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail() 904 + * can pop off frags but driver has to handle it on its own 905 + */ 906 + rx_ring->nr_frags = sinfo->nr_frags; 897 907 898 908 if (page_is_pfmemalloc(rx_buf->page)) 899 909 xdp_buff_set_frag_pfmemalloc(xdp); ··· 1247 1251 1248 1252 xdp->data = NULL; 1249 1253 rx_ring->first_desc = ntc; 1254 + rx_ring->nr_frags = 0; 1250 1255 continue; 1251 1256 construct_skb: 1252 1257 if (likely(ice_ring_uses_build_skb(rx_ring))) ··· 1263 1266 ICE_XDP_CONSUMED); 1264 1267 xdp->data = NULL; 1265 1268 rx_ring->first_desc = ntc; 1269 + rx_ring->nr_frags = 0; 1266 1270 break; 1267 1271 } 1268 1272 xdp->data = NULL; 1269 1273 rx_ring->first_desc = ntc; 1274 + rx_ring->nr_frags = 0; 1270 1275 1271 1276 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); 1272 1277 if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
+1
drivers/net/ethernet/intel/ice/ice_txrx.h
··· 358 358 struct ice_tx_ring *xdp_ring; 359 359 struct ice_rx_ring *next; /* pointer to next ring in q_vector */ 360 360 struct xsk_buff_pool *xsk_pool; 361 + u32 nr_frags; 361 362 dma_addr_t dma; /* physical address of ring */ 362 363 u16 rx_buf_len; 363 364 u8 dcb_tc; /* Traffic class of ring */
+22 -9
drivers/net/ethernet/intel/ice/ice_txrx_lib.h
··· 12 12 * act: action to store onto Rx buffers related to XDP buffer parts 13 13 * 14 14 * Set action that should be taken before putting Rx buffer from first frag 15 - * to one before last. Last one is handled by caller of this function as it 16 - * is the EOP frag that is currently being processed. This function is 17 - * supposed to be called only when XDP buffer contains frags. 15 + * to the last. 18 16 */ 19 17 static inline void 20 18 ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring, 21 19 const unsigned int act) 22 20 { 23 - const struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); 24 - u32 first = rx_ring->first_desc; 25 - u32 nr_frags = sinfo->nr_frags; 21 + u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags; 22 + u32 nr_frags = rx_ring->nr_frags + 1; 23 + u32 idx = rx_ring->first_desc; 26 24 u32 cnt = rx_ring->count; 27 25 struct ice_rx_buf *buf; 28 26 29 27 for (int i = 0; i < nr_frags; i++) { 30 - buf = &rx_ring->rx_buf[first]; 28 + buf = &rx_ring->rx_buf[idx]; 31 29 buf->act = act; 32 30 33 - if (++first == cnt) 34 - first = 0; 31 + if (++idx == cnt) 32 + idx = 0; 33 + } 34 + 35 + /* adjust pagecnt_bias on frags freed by XDP prog */ 36 + if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) { 37 + u32 delta = rx_ring->nr_frags - sinfo_frags; 38 + 39 + while (delta) { 40 + if (idx == 0) 41 + idx = cnt - 1; 42 + else 43 + idx--; 44 + buf = &rx_ring->rx_buf[idx]; 45 + buf->pagecnt_bias--; 46 + delta--; 47 + } 35 48 } 36 49 } 37 50
+2 -2
drivers/net/ethernet/intel/ice/ice_xsk.c
··· 825 825 } 826 826 827 827 __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, 828 - virt_to_page(xdp->data_hard_start), 0, size); 828 + virt_to_page(xdp->data_hard_start), 829 + XDP_PACKET_HEADROOM, size); 829 830 sinfo->xdp_frags_size += size; 830 831 xsk_buff_add_frag(xdp); 831 832 ··· 896 895 897 896 if (!first) { 898 897 first = xdp; 899 - xdp_buff_clear_frags_flag(first); 900 898 } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) { 901 899 break; 902 900 }
+2
drivers/net/ethernet/intel/idpf/idpf_lib.c
··· 783 783 /* setup watchdog timeout value to be 5 second */ 784 784 netdev->watchdog_timeo = 5 * HZ; 785 785 786 + netdev->dev_port = idx; 787 + 786 788 /* configure default MTU size */ 787 789 netdev->min_mtu = ETH_MIN_MTU; 788 790 netdev->max_mtu = vport->max_mtu;
+1
drivers/net/ethernet/litex/litex_liteeth.c
··· 318 318 module_platform_driver(liteeth_driver); 319 319 320 320 MODULE_AUTHOR("Joel Stanley <joel@jms.id.au>"); 321 + MODULE_DESCRIPTION("LiteX Liteeth Ethernet driver"); 321 322 MODULE_LICENSE("GPL");
+26 -1
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
··· 614 614 mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val); 615 615 } 616 616 617 + /* Cleanup pool before actual initialization in the OS */ 618 + static void mvpp2_bm_pool_cleanup(struct mvpp2 *priv, int pool_id) 619 + { 620 + unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu()); 621 + u32 val; 622 + int i; 623 + 624 + /* Drain the BM from all possible residues left by firmware */ 625 + for (i = 0; i < MVPP2_BM_POOL_SIZE_MAX; i++) 626 + mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(pool_id)); 627 + 628 + put_cpu(); 629 + 630 + /* Stop the BM pool */ 631 + val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(pool_id)); 632 + val |= MVPP2_BM_STOP_MASK; 633 + mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(pool_id), val); 634 + } 635 + 617 636 static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv) 618 637 { 619 638 enum dma_data_direction dma_dir = DMA_FROM_DEVICE; 620 639 int i, err, poolnum = MVPP2_BM_POOLS_NUM; 621 640 struct mvpp2_port *port; 641 + 642 + if (priv->percpu_pools) 643 + poolnum = mvpp2_get_nrxqs(priv) * 2; 644 + 645 + /* Clean up the pool state in case it contains stale state */ 646 + for (i = 0; i < poolnum; i++) 647 + mvpp2_bm_pool_cleanup(priv, i); 622 648 623 649 if (priv->percpu_pools) { 624 650 for (i = 0; i < priv->port_count; i++) { ··· 655 629 } 656 630 } 657 631 658 - poolnum = mvpp2_get_nrxqs(priv) * 2; 659 632 for (i = 0; i < poolnum; i++) { 660 633 /* the pool in use */ 661 634 int pn = i / (poolnum / 2);
+1
drivers/net/ethernet/marvell/octeontx2/af/mbox.c
··· 413 413 EXPORT_SYMBOL(otx2_mbox_id2name); 414 414 415 415 MODULE_AUTHOR("Marvell."); 416 + MODULE_DESCRIPTION("Marvell RVU NIC Mbox helpers"); 416 417 MODULE_LICENSE("GPL v2");
+3 -2
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 1923 1923 { 1924 1924 const char *namep = mlx5_command_str(opcode); 1925 1925 struct mlx5_cmd_stats *stats; 1926 + unsigned long flags; 1926 1927 1927 1928 if (!err || !(strcmp(namep, "unknown command opcode"))) 1928 1929 return; ··· 1931 1930 stats = xa_load(&dev->cmd.stats, opcode); 1932 1931 if (!stats) 1933 1932 return; 1934 - spin_lock_irq(&stats->lock); 1933 + spin_lock_irqsave(&stats->lock, flags); 1935 1934 stats->failed++; 1936 1935 if (err < 0) 1937 1936 stats->last_failed_errno = -err; ··· 1940 1939 stats->last_failed_mbox_status = status; 1941 1940 stats->last_failed_syndrome = syndrome; 1942 1941 } 1943 - spin_unlock_irq(&stats->lock); 1942 + spin_unlock_irqrestore(&stats->lock, flags); 1944 1943 } 1945 1944 1946 1945 /* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 1124 1124 extern const struct ethtool_ops mlx5e_ethtool_ops; 1125 1125 1126 1126 int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey); 1127 - int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev); 1127 + int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises); 1128 1128 void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); 1129 1129 int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, 1130 1130 bool enable_mc_lb);
+1
drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
··· 436 436 in = kvzalloc(inlen, GFP_KERNEL); 437 437 if (!in || !ft->g) { 438 438 kfree(ft->g); 439 + ft->g = NULL; 439 440 kvfree(in); 440 441 return -ENOMEM; 441 442 }
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
··· 1064 1064 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); 1065 1065 bool allow_swp; 1066 1066 1067 - allow_swp = 1068 - mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev); 1067 + allow_swp = mlx5_geneve_tx_allowed(mdev) || 1068 + (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO); 1069 1069 mlx5e_build_sq_param_common(mdev, param); 1070 1070 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); 1071 1071 MLX5_SET(sqc, sqc, allow_swp, allow_swp);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
··· 213 213 mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp); 214 214 out: 215 215 napi_consume_skb(skb, budget); 216 - md_buff[*md_buff_sz++] = metadata_id; 216 + md_buff[(*md_buff_sz)++] = metadata_id; 217 217 if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) && 218 218 !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) 219 219 queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
+8 -2
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
··· 336 336 /* iv len */ 337 337 aes_gcm->icv_len = x->aead->alg_icv_len; 338 338 339 + attrs->dir = x->xso.dir; 340 + 339 341 /* esn */ 340 342 if (x->props.flags & XFRM_STATE_ESN) { 341 343 attrs->replay_esn.trigger = true; 342 344 attrs->replay_esn.esn = sa_entry->esn_state.esn; 343 345 attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb; 344 346 attrs->replay_esn.overlap = sa_entry->esn_state.overlap; 347 + if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) 348 + goto skip_replay_window; 349 + 345 350 switch (x->replay_esn->replay_window) { 346 351 case 32: 347 352 attrs->replay_esn.replay_window = ··· 370 365 } 371 366 } 372 367 373 - attrs->dir = x->xso.dir; 368 + skip_replay_window: 374 369 /* spi */ 375 370 attrs->spi = be32_to_cpu(x->id.spi); 376 371 ··· 506 501 return -EINVAL; 507 502 } 508 503 509 - if (x->replay_esn && x->replay_esn->replay_window != 32 && 504 + if (x->replay_esn && x->xso.dir == XFRM_DEV_OFFLOAD_IN && 505 + x->replay_esn->replay_window != 32 && 510 506 x->replay_esn->replay_window != 64 && 511 507 x->replay_esn->replay_window != 128 && 512 508 x->replay_esn->replay_window != 256) {
+15 -11
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
··· 254 254 255 255 ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS, 256 256 sizeof(*ft->g), GFP_KERNEL); 257 - in = kvzalloc(inlen, GFP_KERNEL); 258 - if (!in || !ft->g) { 259 - kfree(ft->g); 260 - kvfree(in); 257 + if (!ft->g) 261 258 return -ENOMEM; 259 + 260 + in = kvzalloc(inlen, GFP_KERNEL); 261 + if (!in) { 262 + err = -ENOMEM; 263 + goto err_free_g; 262 264 } 263 265 264 266 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); ··· 280 278 break; 281 279 default: 282 280 err = -EINVAL; 283 - goto out; 281 + goto err_free_in; 284 282 } 285 283 286 284 switch (type) { ··· 302 300 break; 303 301 default: 304 302 err = -EINVAL; 305 - goto out; 303 + goto err_free_in; 306 304 } 307 305 308 306 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); ··· 311 309 MLX5_SET_CFG(in, end_flow_index, ix - 1); 312 310 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 313 311 if (IS_ERR(ft->g[ft->num_groups])) 314 - goto err; 312 + goto err_clean_group; 315 313 ft->num_groups++; 316 314 317 315 memset(in, 0, inlen); ··· 320 318 MLX5_SET_CFG(in, end_flow_index, ix - 1); 321 319 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); 322 320 if (IS_ERR(ft->g[ft->num_groups])) 323 - goto err; 321 + goto err_clean_group; 324 322 ft->num_groups++; 325 323 326 324 kvfree(in); 327 325 return 0; 328 326 329 - err: 327 + err_clean_group: 330 328 err = PTR_ERR(ft->g[ft->num_groups]); 331 329 ft->g[ft->num_groups] = NULL; 332 - out: 330 + err_free_in: 333 331 kvfree(in); 334 - 332 + err_free_g: 333 + kfree(ft->g); 334 + ft->g = NULL; 335 335 return err; 336 336 } 337 337
+13 -8
drivers/net/ethernet/mellanox/mlx5/core/en_common.c
··· 95 95 { 96 96 int tc, i; 97 97 98 - for (i = 0; i < MLX5_MAX_PORTS; i++) 98 + for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++) 99 99 for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++) 100 100 mlx5e_destroy_tis(mdev, tisn[i][tc]); 101 101 } ··· 110 110 int tc, i; 111 111 int err; 112 112 113 - for (i = 0; i < MLX5_MAX_PORTS; i++) { 113 + for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++) { 114 114 for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++) { 115 115 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; 116 116 void *tisc; ··· 140 140 return err; 141 141 } 142 142 143 - int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) 143 + int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises) 144 144 { 145 145 struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs; 146 146 int err; ··· 169 169 goto err_destroy_mkey; 170 170 } 171 171 172 - err = mlx5e_create_tises(mdev, res->tisn); 173 - if (err) { 174 - mlx5_core_err(mdev, "alloc tises failed, %d\n", err); 175 - goto err_destroy_bfreg; 172 + if (create_tises) { 173 + err = mlx5e_create_tises(mdev, res->tisn); 174 + if (err) { 175 + mlx5_core_err(mdev, "alloc tises failed, %d\n", err); 176 + goto err_destroy_bfreg; 177 + } 178 + res->tisn_valid = true; 176 179 } 180 + 177 181 INIT_LIST_HEAD(&res->td.tirs_list); 178 182 mutex_init(&res->td.list_lock); 179 183 ··· 207 203 208 204 mlx5_crypto_dek_cleanup(mdev->mlx5e_res.dek_priv); 209 205 mdev->mlx5e_res.dek_priv = NULL; 210 - mlx5e_destroy_tises(mdev, res->tisn); 206 + if (res->tisn_valid) 207 + mlx5e_destroy_tises(mdev, res->tisn); 211 208 mlx5_free_bfreg(mdev, &res->bfreg); 212 209 mlx5_core_destroy_mkey(mdev, res->mkey); 213 210 mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 5992 5992 if (netif_device_present(netdev)) 5993 5993 return 0; 5994 5994 5995 - err = mlx5e_create_mdev_resources(mdev); 5995 + err = mlx5e_create_mdev_resources(mdev, true); 5996 5996 if (err) 5997 5997 return err; 5998 5998
+3 -2
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 761 761 762 762 err = mlx5e_rss_params_indir_init(&indir, mdev, 763 763 mlx5e_rqt_size(mdev, hp->num_channels), 764 - mlx5e_rqt_size(mdev, priv->max_nch)); 764 + mlx5e_rqt_size(mdev, hp->num_channels)); 765 765 if (err) 766 766 return err; 767 767 ··· 2014 2014 list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) { 2015 2015 if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev)) 2016 2016 continue; 2017 + 2018 + list_del(&peer_flow->peer_flows); 2017 2019 if (refcount_dec_and_test(&peer_flow->refcnt)) { 2018 2020 mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow); 2019 - list_del(&peer_flow->peer_flows); 2020 2021 kfree(peer_flow); 2021 2022 } 2022 2023 }
+3
drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
··· 83 83 i++; 84 84 } 85 85 86 + rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN; 86 87 rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 87 88 dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, outer_headers.dmac_47_16); 88 89 ether_addr_copy(dmac_v, entry->key.addr); ··· 588 587 if (!rule_spec) 589 588 return ERR_PTR(-ENOMEM); 590 589 590 + rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN; 591 591 rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 592 592 593 593 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; ··· 664 662 dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID; 665 663 dest.vport.vhca_id = port->esw_owner_vhca_id; 666 664 } 665 + rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN; 667 666 handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1); 668 667 669 668 kvfree(rule_spec);
+2
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
··· 566 566 fte->flow_context.flow_tag); 567 567 MLX5_SET(flow_context, in_flow_context, flow_source, 568 568 fte->flow_context.flow_source); 569 + MLX5_SET(flow_context, in_flow_context, uplink_hairpin_en, 570 + !!(fte->flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN)); 569 571 570 572 MLX5_SET(flow_context, in_flow_context, extended_destination, 571 573 extended_dest);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
··· 783 783 } 784 784 785 785 /* This should only be called once per mdev */ 786 - err = mlx5e_create_mdev_resources(mdev); 786 + err = mlx5e_create_mdev_resources(mdev, false); 787 787 if (err) 788 788 goto destroy_ht; 789 789 }
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
··· 98 98 mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, 99 99 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); 100 100 101 - MLX5_SET(cqc, cqc, cq_period_mode, DIM_CQ_PERIOD_MODE_START_FROM_EQE); 101 + MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); 102 102 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); 103 103 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); 104 104 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+12 -5
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
··· 788 788 switch (action_type) { 789 789 case DR_ACTION_TYP_DROP: 790 790 attr.final_icm_addr = nic_dmn->drop_icm_addr; 791 + attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48; 791 792 break; 792 793 case DR_ACTION_TYP_FT: 793 794 dest_action = action; ··· 874 873 action->sampler->tx_icm_addr; 875 874 break; 876 875 case DR_ACTION_TYP_VPORT: 877 - attr.hit_gvmi = action->vport->caps->vhca_gvmi; 878 - dest_action = action; 879 - attr.final_icm_addr = rx_rule ? 880 - action->vport->caps->icm_address_rx : 881 - action->vport->caps->icm_address_tx; 876 + if (unlikely(rx_rule && action->vport->caps->num == MLX5_VPORT_UPLINK)) { 877 + /* can't go to uplink on RX rule - dropping instead */ 878 + attr.final_icm_addr = nic_dmn->drop_icm_addr; 879 + attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48; 880 + } else { 881 + attr.hit_gvmi = action->vport->caps->vhca_gvmi; 882 + dest_action = action; 883 + attr.final_icm_addr = rx_rule ? 884 + action->vport->caps->icm_address_rx : 885 + action->vport->caps->icm_address_tx; 886 + } 882 887 break; 883 888 case DR_ACTION_TYP_POP_VLAN: 884 889 if (!rx_rule && !(dmn->ste_ctx->actions_caps &
+21
drivers/net/ethernet/mellanox/mlx5/core/vport.c
··· 440 440 } 441 441 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid); 442 442 443 + int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group) 444 + { 445 + int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); 446 + u32 *out; 447 + int err; 448 + 449 + out = kvzalloc(outlen, GFP_KERNEL); 450 + if (!out) 451 + return -ENOMEM; 452 + 453 + err = mlx5_query_nic_vport_context(mdev, 0, out); 454 + if (err) 455 + goto out; 456 + 457 + *sd_group = MLX5_GET(query_nic_vport_context_out, out, 458 + nic_vport_context.sd_group); 459 + out: 460 + kvfree(out); 461 + return err; 462 + } 463 + 443 464 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid) 444 465 { 445 466 u32 *out;
+3
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 7542 7542 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n", 7543 7543 ERR_PTR(ret)); 7544 7544 7545 + /* Wait a bit for the reset to take effect */ 7546 + udelay(10); 7547 + 7545 7548 /* Init MAC and get the capabilities */ 7546 7549 ret = stmmac_hw_init(priv); 7547 7550 if (ret)
+30 -7
drivers/net/fjes/fjes_hw.c
··· 221 221 222 222 mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid); 223 223 hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL); 224 - if (!(hw->hw_info.req_buf)) 225 - return -ENOMEM; 224 + if (!(hw->hw_info.req_buf)) { 225 + result = -ENOMEM; 226 + goto free_ep_info; 227 + } 226 228 227 229 hw->hw_info.req_buf_size = mem_size; 228 230 229 231 mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid); 230 232 hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL); 231 - if (!(hw->hw_info.res_buf)) 232 - return -ENOMEM; 233 + if (!(hw->hw_info.res_buf)) { 234 + result = -ENOMEM; 235 + goto free_req_buf; 236 + } 233 237 234 238 hw->hw_info.res_buf_size = mem_size; 235 239 236 240 result = fjes_hw_alloc_shared_status_region(hw); 237 241 if (result) 238 - return result; 242 + goto free_res_buf; 239 243 240 244 hw->hw_info.buffer_share_bit = 0; 241 245 hw->hw_info.buffer_unshare_reserve_bit = 0; ··· 250 246 251 247 result = fjes_hw_alloc_epbuf(&buf_pair->tx); 252 248 if (result) 253 - return result; 249 + goto free_epbuf; 254 250 255 251 result = fjes_hw_alloc_epbuf(&buf_pair->rx); 256 252 if (result) 257 - return result; 253 + goto free_epbuf; 258 254 259 255 spin_lock_irqsave(&hw->rx_status_lock, flags); 260 256 fjes_hw_setup_epbuf(&buf_pair->tx, mac, ··· 277 273 fjes_hw_init_command_registers(hw, &param); 278 274 279 275 return 0; 276 + 277 + free_epbuf: 278 + for (epidx = 0; epidx < hw->max_epid ; epidx++) { 279 + if (epidx == hw->my_epid) 280 + continue; 281 + fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx); 282 + fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx); 283 + } 284 + fjes_hw_free_shared_status_region(hw); 285 + free_res_buf: 286 + kfree(hw->hw_info.res_buf); 287 + hw->hw_info.res_buf = NULL; 288 + free_req_buf: 289 + kfree(hw->hw_info.req_buf); 290 + hw->hw_info.req_buf = NULL; 291 + free_ep_info: 292 + kfree(hw->ep_shm_info); 293 + hw->ep_shm_info = NULL; 294 + return result; 280 295 } 281 296 282 297 static void fjes_hw_cleanup(struct fjes_hw *hw)
+2 -2
drivers/net/hyperv/netvsc_drv.c
··· 44 44 45 45 static unsigned int ring_size __ro_after_init = 128; 46 46 module_param(ring_size, uint, 0444); 47 - MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); 47 + MODULE_PARM_DESC(ring_size, "Ring buffer size (# of 4K pages)"); 48 48 unsigned int netvsc_ring_bytes __ro_after_init; 49 49 50 50 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | ··· 2807 2807 pr_info("Increased ring_size to %u (min allowed)\n", 2808 2808 ring_size); 2809 2809 } 2810 - netvsc_ring_bytes = ring_size * PAGE_SIZE; 2810 + netvsc_ring_bytes = VMBUS_RING_SIZE(ring_size * 4096); 2811 2811 2812 2812 register_netdevice_notifier(&netvsc_netdev_notifier); 2813 2813
+20 -5
drivers/net/macsec.c
··· 607 607 return ERR_PTR(-EINVAL); 608 608 } 609 609 610 - ret = skb_ensure_writable_head_tail(skb, dev); 611 - if (unlikely(ret < 0)) { 612 - macsec_txsa_put(tx_sa); 613 - kfree_skb(skb); 614 - return ERR_PTR(ret); 610 + if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM || 611 + skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) { 612 + struct sk_buff *nskb = skb_copy_expand(skb, 613 + MACSEC_NEEDED_HEADROOM, 614 + MACSEC_NEEDED_TAILROOM, 615 + GFP_ATOMIC); 616 + if (likely(nskb)) { 617 + consume_skb(skb); 618 + skb = nskb; 619 + } else { 620 + macsec_txsa_put(tx_sa); 621 + kfree_skb(skb); 622 + return ERR_PTR(-ENOMEM); 623 + } 624 + } else { 625 + skb = skb_unshare(skb, GFP_ATOMIC); 626 + if (!skb) { 627 + macsec_txsa_put(tx_sa); 628 + return ERR_PTR(-ENOMEM); 629 + } 615 630 } 616 631 617 632 unprotected_len = skb->len;
+11
drivers/net/phy/micrel.c
··· 120 120 */ 121 121 #define LAN8814_1PPM_FORMAT 17179 122 122 123 + #define PTP_RX_VERSION 0x0248 124 + #define PTP_TX_VERSION 0x0288 125 + #define PTP_MAX_VERSION(x) (((x) & GENMASK(7, 0)) << 8) 126 + #define PTP_MIN_VERSION(x) ((x) & GENMASK(7, 0)) 127 + 123 128 #define PTP_RX_MOD 0x024F 124 129 #define PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_ BIT(3) 125 130 #define PTP_RX_TIMESTAMP_EN 0x024D ··· 3154 3149 lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_L2_ADDR_EN, 0); 3155 3150 lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_IP_ADDR_EN, 0); 3156 3151 lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_IP_ADDR_EN, 0); 3152 + 3153 + /* Disable checking for minorVersionPTP field */ 3154 + lanphy_write_page_reg(phydev, 5, PTP_RX_VERSION, 3155 + PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0)); 3156 + lanphy_write_page_reg(phydev, 5, PTP_TX_VERSION, 3157 + PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0)); 3157 3158 3158 3159 skb_queue_head_init(&ptp_priv->tx_queue); 3159 3160 skb_queue_head_init(&ptp_priv->rx_queue);
+8 -2
drivers/net/tun.c
··· 1630 1630 switch (act) { 1631 1631 case XDP_REDIRECT: 1632 1632 err = xdp_do_redirect(tun->dev, xdp, xdp_prog); 1633 - if (err) 1633 + if (err) { 1634 + dev_core_stats_rx_dropped_inc(tun->dev); 1634 1635 return err; 1636 + } 1637 + dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data); 1635 1638 break; 1636 1639 case XDP_TX: 1637 1640 err = tun_xdp_tx(tun->dev, xdp); 1638 - if (err < 0) 1641 + if (err < 0) { 1642 + dev_core_stats_rx_dropped_inc(tun->dev); 1639 1643 return err; 1644 + } 1645 + dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data); 1640 1646 break; 1641 1647 case XDP_PASS: 1642 1648 break;
-4
drivers/net/wireless/ath/ath11k/core.h
··· 368 368 struct ieee80211_chanctx_conf chanctx; 369 369 struct ath11k_arp_ns_offload arp_ns_offload; 370 370 struct ath11k_rekey_data rekey_data; 371 - 372 - #ifdef CONFIG_ATH11K_DEBUGFS 373 - struct dentry *debugfs_twt; 374 - #endif /* CONFIG_ATH11K_DEBUGFS */ 375 371 }; 376 372 377 373 struct ath11k_vif_iter {
+10 -15
drivers/net/wireless/ath/ath11k/debugfs.c
··· 1894 1894 .open = simple_open 1895 1895 }; 1896 1896 1897 - void ath11k_debugfs_add_interface(struct ath11k_vif *arvif) 1897 + void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw, 1898 + struct ieee80211_vif *vif) 1898 1899 { 1900 + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); 1899 1901 struct ath11k_base *ab = arvif->ar->ab; 1902 + struct dentry *debugfs_twt; 1900 1903 1901 1904 if (arvif->vif->type != NL80211_IFTYPE_AP && 1902 1905 !(arvif->vif->type == NL80211_IFTYPE_STATION && 1903 1906 test_bit(WMI_TLV_SERVICE_STA_TWT, ab->wmi_ab.svc_map))) 1904 1907 return; 1905 1908 1906 - arvif->debugfs_twt = debugfs_create_dir("twt", 1907 - arvif->vif->debugfs_dir); 1908 - debugfs_create_file("add_dialog", 0200, arvif->debugfs_twt, 1909 + debugfs_twt = debugfs_create_dir("twt", 1910 + arvif->vif->debugfs_dir); 1911 + debugfs_create_file("add_dialog", 0200, debugfs_twt, 1909 1912 arvif, &ath11k_fops_twt_add_dialog); 1910 1913 1911 - debugfs_create_file("del_dialog", 0200, arvif->debugfs_twt, 1914 + debugfs_create_file("del_dialog", 0200, debugfs_twt, 1912 1915 arvif, &ath11k_fops_twt_del_dialog); 1913 1916 1914 - debugfs_create_file("pause_dialog", 0200, arvif->debugfs_twt, 1917 + debugfs_create_file("pause_dialog", 0200, debugfs_twt, 1915 1918 arvif, &ath11k_fops_twt_pause_dialog); 1916 1919 1917 - debugfs_create_file("resume_dialog", 0200, arvif->debugfs_twt, 1920 + debugfs_create_file("resume_dialog", 0200, debugfs_twt, 1918 1921 arvif, &ath11k_fops_twt_resume_dialog); 1919 1922 } 1920 1923 1921 - void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif) 1922 - { 1923 - if (!arvif->debugfs_twt) 1924 - return; 1925 - 1926 - debugfs_remove_recursive(arvif->debugfs_twt); 1927 - arvif->debugfs_twt = NULL; 1928 - }
+2 -10
drivers/net/wireless/ath/ath11k/debugfs.h
··· 307 307 return ar->debug.rx_filter; 308 308 } 309 309 310 - void ath11k_debugfs_add_interface(struct ath11k_vif *arvif); 311 - void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif); 310 + void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw, 311 + struct ieee80211_vif *vif); 312 312 void ath11k_debugfs_add_dbring_entry(struct ath11k *ar, 313 313 enum wmi_direct_buffer_module id, 314 314 enum ath11k_dbg_dbr_event event, ··· 385 385 u32 pdev_id, u32 vdev_id, u32 stats_id) 386 386 { 387 387 return 0; 388 - } 389 - 390 - static inline void ath11k_debugfs_add_interface(struct ath11k_vif *arvif) 391 - { 392 - } 393 - 394 - static inline void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif) 395 - { 396 388 } 397 389 398 390 static inline void
+1 -11
drivers/net/wireless/ath/ath11k/mac.c
··· 6756 6756 goto err; 6757 6757 } 6758 6758 6759 - /* In the case of hardware recovery, debugfs files are 6760 - * not deleted since ieee80211_ops.remove_interface() is 6761 - * not invoked. In such cases, try to delete the files. 6762 - * These will be re-created later. 6763 - */ 6764 - ath11k_debugfs_remove_interface(arvif); 6765 - 6766 6759 memset(arvif, 0, sizeof(*arvif)); 6767 6760 6768 6761 arvif->ar = ar; ··· 6932 6939 6933 6940 ath11k_dp_vdev_tx_attach(ar, arvif); 6934 6941 6935 - ath11k_debugfs_add_interface(arvif); 6936 - 6937 6942 if (vif->type != NL80211_IFTYPE_MONITOR && 6938 6943 test_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) { 6939 6944 ret = ath11k_mac_monitor_vdev_create(ar); ··· 7046 7055 7047 7056 /* Recalc txpower for remaining vdev */ 7048 7057 ath11k_mac_txpower_recalc(ar); 7049 - 7050 - ath11k_debugfs_remove_interface(arvif); 7051 7058 7052 7059 /* TODO: recal traffic pause state based on the available vdevs */ 7053 7060 ··· 9142 9153 #endif 9143 9154 9144 9155 #ifdef CONFIG_ATH11K_DEBUGFS 9156 + .vif_add_debugfs = ath11k_debugfs_op_vif_add, 9145 9157 .sta_add_debugfs = ath11k_debugfs_sta_op_add, 9146 9158 #endif 9147 9159
+2 -2
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 2 /* 3 - * Copyright (C) 2018-2023 Intel Corporation 3 + * Copyright (C) 2018-2024 Intel Corporation 4 4 */ 5 5 #include <linux/firmware.h> 6 6 #include "iwl-drv.h" ··· 1096 1096 node_trig = (void *)node_tlv->data; 1097 1097 } 1098 1098 1099 - memcpy(node_trig->data + offset, trig->data, trig_data_len); 1099 + memcpy((u8 *)node_trig->data + offset, trig->data, trig_data_len); 1100 1100 node_tlv->length = cpu_to_le32(size); 1101 1101 1102 1102 if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {
+1 -1
drivers/net/wireless/intersil/p54/fwio.c
··· 125 125 "FW rev %s - Softmac protocol %x.%x\n", 126 126 fw_version, priv->fw_var >> 8, priv->fw_var & 0xff); 127 127 snprintf(dev->wiphy->fw_version, sizeof(dev->wiphy->fw_version), 128 - "%s - %x.%x", fw_version, 128 + "%.19s - %x.%x", fw_version, 129 129 priv->fw_var >> 8, priv->fw_var & 0xff); 130 130 } 131 131
+1
include/linux/mlx5/driver.h
··· 681 681 struct mlx5_sq_bfreg bfreg; 682 682 #define MLX5_MAX_NUM_TC 8 683 683 u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC]; 684 + bool tisn_valid; 684 685 } hw_objs; 685 686 struct net_device *uplink_netdev; 686 687 struct mutex uplink_netdev_lock;
+1
include/linux/mlx5/fs.h
··· 132 132 133 133 enum { 134 134 FLOW_CONTEXT_HAS_TAG = BIT(0), 135 + FLOW_CONTEXT_UPLINK_HAIRPIN_EN = BIT(1), 135 136 }; 136 137 137 138 struct mlx5_flow_context {
+8 -4
include/linux/mlx5/mlx5_ifc.h
··· 3576 3576 u8 action[0x10]; 3577 3577 3578 3578 u8 extended_destination[0x1]; 3579 - u8 reserved_at_81[0x1]; 3579 + u8 uplink_hairpin_en[0x1]; 3580 3580 u8 flow_source[0x2]; 3581 3581 u8 encrypt_decrypt_type[0x4]; 3582 3582 u8 destination_list_size[0x18]; ··· 4036 4036 u8 affiliation_criteria[0x4]; 4037 4037 u8 affiliated_vhca_id[0x10]; 4038 4038 4039 - u8 reserved_at_60[0xd0]; 4039 + u8 reserved_at_60[0xa0]; 4040 4040 4041 + u8 reserved_at_100[0x1]; 4042 + u8 sd_group[0x3]; 4043 + u8 reserved_at_104[0x1c]; 4044 + 4045 + u8 reserved_at_120[0x10]; 4041 4046 u8 mtu[0x10]; 4042 4047 4043 4048 u8 system_image_guid[0x40]; ··· 10127 10122 u8 reserved_at_20[0x20]; 10128 10123 10129 10124 u8 local_port[0x8]; 10130 - u8 reserved_at_28[0x15]; 10131 - u8 sd_group[0x3]; 10125 + u8 reserved_at_28[0x18]; 10132 10126 10133 10127 u8 reserved_at_60[0x20]; 10134 10128 };
+1
include/linux/mlx5/vport.h
··· 72 72 int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu); 73 73 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, 74 74 u64 *system_image_guid); 75 + int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group); 75 76 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); 76 77 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, 77 78 u16 vport, u64 node_guid);
-6
include/linux/skmsg.h
··· 505 505 return !!psock->saved_data_ready; 506 506 } 507 507 508 - static inline bool sk_is_udp(const struct sock *sk) 509 - { 510 - return sk->sk_type == SOCK_DGRAM && 511 - sk->sk_protocol == IPPROTO_UDP; 512 - } 513 - 514 508 #if IS_ENABLED(CONFIG_NET_SOCK_MSG) 515 509 516 510 #define BPF_F_STRPARSER (1UL << 1)
+8
include/net/inet_connection_sock.h
··· 357 357 return inet_test_bit(IS_ICSK, sk) && !!inet_csk(sk)->icsk_ulp_ops; 358 358 } 359 359 360 + static inline void inet_init_csk_locks(struct sock *sk) 361 + { 362 + struct inet_connection_sock *icsk = inet_csk(sk); 363 + 364 + spin_lock_init(&icsk->icsk_accept_queue.rskq_lock); 365 + spin_lock_init(&icsk->icsk_accept_queue.fastopenq.lock); 366 + } 367 + 360 368 #endif /* _INET_CONNECTION_SOCK_H */
-5
include/net/inet_sock.h
··· 307 307 #define inet_assign_bit(nr, sk, val) \ 308 308 assign_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags, val) 309 309 310 - static inline bool sk_is_inet(struct sock *sk) 311 - { 312 - return sk->sk_family == AF_INET || sk->sk_family == AF_INET6; 313 - } 314 - 315 310 /** 316 311 * sk_to_full_sk - Access to a full socket 317 312 * @sk: pointer to a socket
+2 -4
include/net/llc_pdu.h
··· 262 262 */ 263 263 static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa) 264 264 { 265 - if (skb->protocol == htons(ETH_P_802_2)) 266 - memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN); 265 + memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN); 267 266 } 268 267 269 268 /** ··· 274 275 */ 275 276 static inline void llc_pdu_decode_da(struct sk_buff *skb, u8 *da) 276 277 { 277 - if (skb->protocol == htons(ETH_P_802_2)) 278 - memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN); 278 + memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN); 279 279 } 280 280 281 281 /**
+39 -10
include/net/netfilter/nf_tables.h
··· 205 205 * @nla: netlink attributes 206 206 * @portid: netlink portID of the original message 207 207 * @seq: netlink sequence number 208 + * @flags: modifiers to new request 208 209 * @family: protocol family 209 210 * @level: depth of the chains 210 211 * @report: notify via unicast netlink message ··· 283 282 * 284 283 * @key: element key 285 284 * @key_end: closing element key 285 + * @data: element data 286 286 * @priv: element private data and extensions 287 287 */ 288 288 struct nft_set_elem { ··· 327 325 * @dtype: data type 328 326 * @dlen: data length 329 327 * @objtype: object type 330 - * @flags: flags 331 328 * @size: number of set elements 332 329 * @policy: set policy 333 330 * @gc_int: garbage collector interval 331 + * @timeout: element timeout 334 332 * @field_len: length of each field in concatenation, bytes 335 333 * @field_count: number of concatenated fields in element 336 334 * @expr: set must support for expressions ··· 353 351 /** 354 352 * enum nft_set_class - performance class 355 353 * 356 - * @NFT_LOOKUP_O_1: constant, O(1) 357 - * @NFT_LOOKUP_O_LOG_N: logarithmic, O(log N) 358 - * @NFT_LOOKUP_O_N: linear, O(N) 354 + * @NFT_SET_CLASS_O_1: constant, O(1) 355 + * @NFT_SET_CLASS_O_LOG_N: logarithmic, O(log N) 356 + * @NFT_SET_CLASS_O_N: linear, O(N) 359 357 */ 360 358 enum nft_set_class { 361 359 NFT_SET_CLASS_O_1, ··· 424 422 * @remove: remove element from set 425 423 * @walk: iterate over all set elements 426 424 * @get: get set elements 425 + * @commit: commit set elements 426 + * @abort: abort set elements 427 427 * @privsize: function to return size of set private data 428 + * @estimate: estimate the required memory size and the lookup complexity class 428 429 * @init: initialize private data of new set instance 429 430 * @destroy: destroy private data of set instance 431 + * @gc_init: initialize garbage collection 430 432 * @elemsize: element private size 431 433 * 432 434 * Operations lookup, update and delete have simpler interfaces, are faster ··· 546 540 * @policy: set parameterization (see enum nft_set_policies) 547 541 * @udlen: user data length 548 542 * @udata: user data 549 - * @expr: stateful expression 543 + * @pending_update: list of pending update set element 550 544 * @ops: set ops 551 545 * @flags: set flags 552 546 * @dead: set will be freed, never cleared 553 547 * @genmask: generation mask 554 548 * @klen: key length 555 549 * @dlen: data length 550 + * @num_exprs: numbers of exprs 551 + * @exprs: stateful expression 552 + * @catchall_list: list of catch-all set element 556 553 * @data: private set data 557 554 */ 558 555 struct nft_set { ··· 701 692 * 702 693 * @len: length of extension area 703 694 * @offset: offsets of individual extension types 695 + * @ext_len: length of the expected extension(used to sanity check) 704 696 */ 705 697 struct nft_set_ext_tmpl { 706 698 u16 len; ··· 850 840 * @select_ops: function to select nft_expr_ops 851 841 * @release_ops: release nft_expr_ops 852 842 * @ops: default ops, used when no select_ops functions is present 843 + * @inner_ops: inner ops, used for inner packet operation 853 844 * @list: used internally 854 845 * @name: Identifier 855 846 * @owner: module reference ··· 892 881 * struct nft_expr_ops - nf_tables expression operations 893 882 * 894 883 * @eval: Expression evaluation function 884 + * @clone: Expression clone function 895 885 * @size: full expression size, including private data size 896 886 * @init: initialization function 897 887 * @activate: activate expression in the next generation 898 888 * @deactivate: deactivate expression in next generation 899 889 * @destroy: destruction function, called after synchronize_rcu 890 + * @destroy_clone: destruction clone function 900 891 * @dump: function to dump parameters 901 - * @type: expression type 902 892 * @validate: validate expression, called during loop detection 893 + * @reduce: reduce expression 894 + * @gc: garbage collection expression 895 + * @offload: hardware offload expression 896 + * @offload_action: function to report true/false to allocate one slot or not in the flow 897 + * offload array 898 + * @offload_stats: function to synchronize hardware stats via updating the counter expression 899 + * @type: expression type 903 900 * @data: extra data to attach to this expression operation 904 901 */ 905 902 struct nft_expr_ops { ··· 1060 1041 /** 1061 1042 * struct nft_chain - nf_tables chain 1062 1043 * 1044 + * @blob_gen_0: rule blob pointer to the current generation 1045 + * @blob_gen_1: rule blob pointer to the future generation 1063 1046 * @rules: list of rules in the chain 1064 1047 * @list: used internally 1065 1048 * @rhlhead: used internally 1066 1049 * @table: table that this chain belongs to 1067 1050 * @handle: chain handle 1068 1051 * @use: number of jump references to this chain 1069 - * @flags: bitmask of enum nft_chain_flags 1052 + * @flags: bitmask of enum NFTA_CHAIN_FLAGS 1053 + * @bound: bind or not 1054 + * @genmask: generation mask 1070 1055 * @name: name of the chain 1056 + * @udlen: user data length 1057 + * @udata: user data in the chain 1058 + * @blob_next: rule blob pointer to the next in the chain 1071 1059 */ 1072 1060 struct nft_chain { 1073 1061 struct nft_rule_blob __rcu *blob_gen_0; ··· 1172 1146 * @hook_list: list of netfilter hooks (for NFPROTO_NETDEV family) 1173 1147 * @type: chain type 1174 1148 * @policy: default policy 1149 + * @flags: indicate the base chain disabled or not 1175 1150 * @stats: per-cpu chain stats 1176 1151 * @chain: the chain 1177 1152 * @flow_block: flow block (for hardware offload) ··· 1301 1274 * struct nft_object - nf_tables stateful object 1302 1275 * 1303 1276 * @list: table stateful object list node 1304 - * @key: keys that identify this object 1305 1277 * @rhlhead: nft_objname_ht node 1278 + * @key: keys that identify this object 1306 1279 * @genmask: generation mask 1307 1280 * @use: number of references to this stateful object 1308 1281 * @handle: unique object handle 1282 + * @udlen: length of user data 1283 + * @udata: user data 1309 1284 * @ops: object operations 1310 1285 * @data: object data, layout depends on type 1311 1286 */ ··· 1373 1344 * @destroy: release existing stateful object 1374 1345 * @dump: netlink dump stateful object 1375 1346 * @update: update stateful object 1347 + * @type: pointer to object type 1376 1348 */ 1377 1349 struct nft_object_ops { 1378 1350 void (*eval)(struct nft_object *obj, ··· 1409 1379 * @genmask: generation mask 1410 1380 * @use: number of references to this flow table 1411 1381 * @handle: unique object handle 1412 - * @dev_name: array of device names 1382 + * @hook_list: hook list for hooks per net_device in flowtables 1413 1383 * @data: rhashtable and garbage collector 1414 - * @ops: array of hooks 1415 1384 */ 1416 1385 struct nft_flowtable { 1417 1386 struct list_head list;
+4
include/net/sch_generic.h
··· 375 375 struct nlattr **tca, 376 376 struct netlink_ext_ack *extack); 377 377 void (*tmplt_destroy)(void *tmplt_priv); 378 + void (*tmplt_reoffload)(struct tcf_chain *chain, 379 + bool add, 380 + flow_setup_cb_t *cb, 381 + void *cb_priv); 378 382 struct tcf_exts * (*get_exts)(const struct tcf_proto *tp, 379 383 u32 handle); 380 384
+17 -1
include/net/sock.h
··· 2765 2765 &skb_shinfo(skb)->tskey); 2766 2766 } 2767 2767 2768 + static inline bool sk_is_inet(const struct sock *sk) 2769 + { 2770 + int family = READ_ONCE(sk->sk_family); 2771 + 2772 + return family == AF_INET || family == AF_INET6; 2773 + } 2774 + 2768 2775 static inline bool sk_is_tcp(const struct sock *sk) 2769 2776 { 2770 - return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP; 2777 + return sk_is_inet(sk) && 2778 + sk->sk_type == SOCK_STREAM && 2779 + sk->sk_protocol == IPPROTO_TCP; 2780 + } 2781 + 2782 + static inline bool sk_is_udp(const struct sock *sk) 2783 + { 2784 + return sk_is_inet(sk) && 2785 + sk->sk_type == SOCK_DGRAM && 2786 + sk->sk_protocol == IPPROTO_UDP; 2771 2787 } 2772 2788 2773 2789 static inline bool sk_is_stream_unix(const struct sock *sk)
+27
include/net/xdp_sock_drv.h
··· 159 159 return ret; 160 160 } 161 161 162 + static inline void xsk_buff_del_tail(struct xdp_buff *tail) 163 + { 164 + struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp); 165 + 166 + list_del(&xskb->xskb_list_node); 167 + } 168 + 169 + static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first) 170 + { 171 + struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp); 172 + struct xdp_buff_xsk *frag; 173 + 174 + frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk, 175 + xskb_list_node); 176 + return &frag->xdp; 177 + } 178 + 162 179 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size) 163 180 { 164 181 xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM; 165 182 xdp->data_meta = xdp->data; 166 183 xdp->data_end = xdp->data + size; 184 + xdp->flags = 0; 167 185 } 168 186 169 187 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool, ··· 364 346 } 365 347 366 348 static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first) 349 + { 350 + return NULL; 351 + } 352 + 353 + static inline void xsk_buff_del_tail(struct xdp_buff *tail) 354 + { 355 + } 356 + 357 + static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first) 367 358 { 368 359 return NULL; 369 360 }
+4
net/8021q/vlan_netlink.c
··· 118 118 } 119 119 if (data[IFLA_VLAN_INGRESS_QOS]) { 120 120 nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) { 121 + if (nla_type(attr) != IFLA_VLAN_QOS_MAPPING) 122 + continue; 121 123 m = nla_data(attr); 122 124 vlan_dev_set_ingress_priority(dev, m->to, m->from); 123 125 } 124 126 } 125 127 if (data[IFLA_VLAN_EGRESS_QOS]) { 126 128 nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) { 129 + if (nla_type(attr) != IFLA_VLAN_QOS_MAPPING) 130 + continue; 127 131 m = nla_data(attr); 128 132 err = vlan_dev_set_egress_priority(dev, m->from, m->to); 129 133 if (err)
+9
net/core/dev.c
··· 11551 11551 11552 11552 static void __net_exit default_device_exit_net(struct net *net) 11553 11553 { 11554 + struct netdev_name_node *name_node, *tmp; 11554 11555 struct net_device *dev, *aux; 11555 11556 /* 11556 11557 * Push all migratable network devices back to the ··· 11574 11573 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 11575 11574 if (netdev_name_in_use(&init_net, fb_name)) 11576 11575 snprintf(fb_name, IFNAMSIZ, "dev%%d"); 11576 + 11577 + netdev_for_each_altname_safe(dev, name_node, tmp) 11578 + if (netdev_name_in_use(&init_net, name_node->name)) { 11579 + netdev_name_node_del(name_node); 11580 + synchronize_rcu(); 11581 + __netdev_name_node_alt_destroy(name_node); 11582 + } 11583 + 11577 11584 err = dev_change_net_namespace(dev, &init_net, fb_name); 11578 11585 if (err) { 11579 11586 pr_emerg("%s: failed to move %s to init_net: %d\n",
+3
net/core/dev.h
··· 63 63 64 64 #define netdev_for_each_altname(dev, namenode) \ 65 65 list_for_each_entry((namenode), &(dev)->name_node->list, list) 66 + #define netdev_for_each_altname_safe(dev, namenode, next) \ 67 + list_for_each_entry_safe((namenode), (next), &(dev)->name_node->list, \ 68 + list) 66 69 67 70 int netdev_name_node_alt_create(struct net_device *dev, const char *name); 68 71 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
+38 -6
net/core/filter.c
··· 83 83 #include <net/netfilter/nf_conntrack_bpf.h> 84 84 #include <net/netkit.h> 85 85 #include <linux/un.h> 86 + #include <net/xdp_sock_drv.h> 86 87 87 88 #include "dev.h" 88 89 ··· 4093 4092 memset(skb_frag_address(frag) + skb_frag_size(frag), 0, offset); 4094 4093 skb_frag_size_add(frag, offset); 4095 4094 sinfo->xdp_frags_size += offset; 4095 + if (rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) 4096 + xsk_buff_get_tail(xdp)->data_end += offset; 4096 4097 4097 4098 return 0; 4099 + } 4100 + 4101 + static void bpf_xdp_shrink_data_zc(struct xdp_buff *xdp, int shrink, 4102 + struct xdp_mem_info *mem_info, bool release) 4103 + { 4104 + struct xdp_buff *zc_frag = xsk_buff_get_tail(xdp); 4105 + 4106 + if (release) { 4107 + xsk_buff_del_tail(zc_frag); 4108 + __xdp_return(NULL, mem_info, false, zc_frag); 4109 + } else { 4110 + zc_frag->data_end -= shrink; 4111 + } 4112 + } 4113 + 4114 + static bool bpf_xdp_shrink_data(struct xdp_buff *xdp, skb_frag_t *frag, 4115 + int shrink) 4116 + { 4117 + struct xdp_mem_info *mem_info = &xdp->rxq->mem; 4118 + bool release = skb_frag_size(frag) == shrink; 4119 + 4120 + if (mem_info->type == MEM_TYPE_XSK_BUFF_POOL) { 4121 + bpf_xdp_shrink_data_zc(xdp, shrink, mem_info, release); 4122 + goto out; 4123 + } 4124 + 4125 + if (release) { 4126 + struct page *page = skb_frag_page(frag); 4127 + 4128 + __xdp_return(page_address(page), mem_info, false, NULL); 4129 + } 4130 + 4131 + out: 4132 + return release; 4098 4133 } 4099 4134 4100 4135 static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset) ··· 4147 4110 4148 4111 len_free += shrink; 4149 4112 offset -= shrink; 4150 - 4151 - if (skb_frag_size(frag) == shrink) { 4152 - struct page *page = skb_frag_page(frag); 4153 - 4154 - __xdp_return(page_address(page), &xdp->rxq->mem, 4155 - false, NULL); 4113 + if (bpf_xdp_shrink_data(xdp, frag, shrink)) { 4156 4114 n_frags_free++; 4157 4115 } else { 4158 4116 skb_frag_size_sub(frag, shrink);
-3
net/core/request_sock.c
··· 33 33 34 34 void reqsk_queue_alloc(struct request_sock_queue *queue) 35 35 { 36 - spin_lock_init(&queue->rskq_lock); 37 - 38 - spin_lock_init(&queue->fastopenq.lock); 39 36 queue->fastopenq.rskq_rst_head = NULL; 40 37 queue->fastopenq.rskq_rst_tail = NULL; 41 38 queue->fastopenq.qlen = 0;
+9 -2
net/core/sock.c
··· 107 107 #include <linux/interrupt.h> 108 108 #include <linux/poll.h> 109 109 #include <linux/tcp.h> 110 + #include <linux/udp.h> 110 111 #include <linux/init.h> 111 112 #include <linux/highmem.h> 112 113 #include <linux/user_namespace.h> ··· 4145 4144 { 4146 4145 struct sock *sk = p; 4147 4146 4148 - return !skb_queue_empty_lockless(&sk->sk_receive_queue) || 4149 - sk_busy_loop_timeout(sk, start_time); 4147 + if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) 4148 + return true; 4149 + 4150 + if (sk_is_udp(sk) && 4151 + !skb_queue_empty_lockless(&udp_sk(sk)->reader_queue)) 4152 + return true; 4153 + 4154 + return sk_busy_loop_timeout(sk, start_time); 4150 4155 } 4151 4156 EXPORT_SYMBOL(sk_busy_loop_end); 4152 4157 #endif /* CONFIG_NET_RX_BUSY_POLL */
+3
net/ipv4/af_inet.c
··· 330 330 if (INET_PROTOSW_REUSE & answer_flags) 331 331 sk->sk_reuse = SK_CAN_REUSE; 332 332 333 + if (INET_PROTOSW_ICSK & answer_flags) 334 + inet_init_csk_locks(sk); 335 + 333 336 inet = inet_sk(sk); 334 337 inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags); 335 338
+4
net/ipv4/inet_connection_sock.c
··· 727 727 } 728 728 if (req) 729 729 reqsk_put(req); 730 + 731 + if (newsk) 732 + inet_init_csk_locks(newsk); 733 + 730 734 return newsk; 731 735 out_err: 732 736 newsk = NULL;
+1
net/ipv4/tcp.c
··· 722 722 if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) { 723 723 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING); 724 724 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); 725 + smp_mb__after_atomic(); 725 726 } 726 727 /* It is possible TX completion already happened 727 728 * before we set TSQ_THROTTLED.
+3
net/ipv6/af_inet6.c
··· 199 199 if (INET_PROTOSW_REUSE & answer_flags) 200 200 sk->sk_reuse = SK_CAN_REUSE; 201 201 202 + if (INET_PROTOSW_ICSK & answer_flags) 203 + inet_init_csk_locks(sk); 204 + 202 205 inet = inet_sk(sk); 203 206 inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags); 204 207
+16 -8
net/llc/af_llc.c
··· 928 928 */ 929 929 static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) 930 930 { 931 + DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name); 931 932 struct sock *sk = sock->sk; 932 933 struct llc_sock *llc = llc_sk(sk); 933 - DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name); 934 934 int flags = msg->msg_flags; 935 935 int noblock = flags & MSG_DONTWAIT; 936 + int rc = -EINVAL, copied = 0, hdrlen, hh_len; 936 937 struct sk_buff *skb = NULL; 938 + struct net_device *dev; 937 939 size_t size = 0; 938 - int rc = -EINVAL, copied = 0, hdrlen; 939 940 940 941 dprintk("%s: sending from %02X to %02X\n", __func__, 941 942 llc->laddr.lsap, llc->daddr.lsap); ··· 956 955 if (rc) 957 956 goto out; 958 957 } 959 - hdrlen = llc->dev->hard_header_len + llc_ui_header_len(sk, addr); 958 + dev = llc->dev; 959 + hh_len = LL_RESERVED_SPACE(dev); 960 + hdrlen = llc_ui_header_len(sk, addr); 960 961 size = hdrlen + len; 961 - if (size > llc->dev->mtu) 962 - size = llc->dev->mtu; 962 + size = min_t(size_t, size, READ_ONCE(dev->mtu)); 963 963 copied = size - hdrlen; 964 964 rc = -EINVAL; 965 965 if (copied < 0) 966 966 goto out; 967 967 release_sock(sk); 968 - skb = sock_alloc_send_skb(sk, size, noblock, &rc); 968 + skb = sock_alloc_send_skb(sk, hh_len + size, noblock, &rc); 969 969 lock_sock(sk); 970 970 if (!skb) 971 971 goto out; 972 - skb->dev = llc->dev; 972 + if (sock_flag(sk, SOCK_ZAPPED) || 973 + llc->dev != dev || 974 + hdrlen != llc_ui_header_len(sk, addr) || 975 + hh_len != LL_RESERVED_SPACE(dev) || 976 + size > READ_ONCE(dev->mtu)) 977 + goto out; 978 + skb->dev = dev; 973 979 skb->protocol = llc_proto_type(addr->sllc_arphrd); 974 - skb_reserve(skb, hdrlen); 980 + skb_reserve(skb, hh_len + hdrlen); 975 981 rc = memcpy_from_msg(skb_put(skb, copied), msg, copied); 976 982 if (rc) 977 983 goto out;
-7
net/llc/llc_core.c
··· 135 135 .func = llc_rcv, 136 136 }; 137 137 138 - static struct packet_type llc_tr_packet_type __read_mostly = { 139 - .type = cpu_to_be16(ETH_P_TR_802_2), 140 - .func = llc_rcv, 141 - }; 142 - 143 138 static int __init llc_init(void) 144 139 { 145 140 dev_add_pack(&llc_packet_type); 146 - dev_add_pack(&llc_tr_packet_type); 147 141 return 0; 148 142 } 149 143 150 144 static void __exit llc_exit(void) 151 145 { 152 146 dev_remove_pack(&llc_packet_type); 153 - dev_remove_pack(&llc_tr_packet_type); 154 147 } 155 148 156 149 module_init(llc_init);
-1
net/mac80211/Kconfig
··· 62 62 depends on KUNIT 63 63 depends on MAC80211 64 64 default KUNIT_ALL_TESTS 65 - depends on !KERNEL_6_2 66 65 help 67 66 Enable this option to test mac80211 internals with kunit. 68 67
+6 -1
net/mac80211/sta_info.c
··· 404 404 int i; 405 405 406 406 for (i = 0; i < ARRAY_SIZE(sta->link); i++) { 407 - if (!(sta->sta.valid_links & BIT(i))) 407 + struct link_sta_info *link_sta; 408 + 409 + link_sta = rcu_access_pointer(sta->link[i]); 410 + if (!link_sta) 408 411 continue; 409 412 410 413 sta_remove_link(sta, i, false); ··· 912 909 913 910 if (ieee80211_vif_is_mesh(&sdata->vif)) 914 911 mesh_accept_plinks_update(sdata); 912 + 913 + ieee80211_check_fast_xmit(sta); 915 914 916 915 return 0; 917 916 out_remove:
+1 -1
net/mac80211/tx.c
··· 3048 3048 sdata->vif.type == NL80211_IFTYPE_STATION) 3049 3049 goto out; 3050 3050 3051 - if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED)) 3051 + if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED) || !sta->uploaded) 3052 3052 goto out; 3053 3053 3054 3054 if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
+10 -10
net/netfilter/nf_tables_api.c
··· 24 24 #include <net/sock.h> 25 25 26 26 #define NFT_MODULE_AUTOLOAD_LIMIT (MODULE_NAME_LEN - sizeof("nft-expr-255-")) 27 + #define NFT_SET_MAX_ANONLEN 16 27 28 28 29 unsigned int nf_tables_net_id __read_mostly; 29 30 ··· 4412 4411 p = strchr(name, '%'); 4413 4412 if (p != NULL) { 4414 4413 if (p[1] != 'd' || strchr(p + 2, '%')) 4414 + return -EINVAL; 4415 + 4416 + if (strnlen(name, NFT_SET_MAX_ANONLEN) >= NFT_SET_MAX_ANONLEN) 4415 4417 return -EINVAL; 4416 4418 4417 4419 inuse = (unsigned long *)get_zeroed_page(GFP_KERNEL); ··· 10992 10988 data->verdict.code = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE])); 10993 10989 10994 10990 switch (data->verdict.code) { 10995 - default: 10996 - switch (data->verdict.code & NF_VERDICT_MASK) { 10997 - case NF_ACCEPT: 10998 - case NF_DROP: 10999 - case NF_QUEUE: 11000 - break; 11001 - default: 11002 - return -EINVAL; 11003 - } 11004 - fallthrough; 10991 + case NF_ACCEPT: 10992 + case NF_DROP: 10993 + case NF_QUEUE: 10994 + break; 11005 10995 case NFT_CONTINUE: 11006 10996 case NFT_BREAK: 11007 10997 case NFT_RETURN: ··· 11030 11032 11031 11033 data->verdict.chain = chain; 11032 11034 break; 11035 + default: 11036 + return -EINVAL; 11033 11037 } 11034 11038 11035 11039 desc->len = sizeof(data->verdict);
+9 -2
net/netfilter/nft_chain_filter.c
··· 357 357 unsigned long event, void *ptr) 358 358 { 359 359 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 360 + struct nft_base_chain *basechain; 360 361 struct nftables_pernet *nft_net; 361 - struct nft_table *table; 362 362 struct nft_chain *chain, *nr; 363 + struct nft_table *table; 363 364 struct nft_ctx ctx = { 364 365 .net = dev_net(dev), 365 366 }; ··· 372 371 nft_net = nft_pernet(ctx.net); 373 372 mutex_lock(&nft_net->commit_mutex); 374 373 list_for_each_entry(table, &nft_net->tables, list) { 375 - if (table->family != NFPROTO_NETDEV) 374 + if (table->family != NFPROTO_NETDEV && 375 + table->family != NFPROTO_INET) 376 376 continue; 377 377 378 378 ctx.family = table->family; 379 379 ctx.table = table; 380 380 list_for_each_entry_safe(chain, nr, &table->chains, list) { 381 381 if (!nft_is_base_chain(chain)) 382 + continue; 383 + 384 + basechain = nft_base_chain(chain); 385 + if (table->family == NFPROTO_INET && 386 + basechain->ops.hooknum != NF_INET_INGRESS) 382 387 continue; 383 388 384 389 ctx.chain = chain;
+12
net/netfilter/nft_compat.c
··· 350 350 unsigned int hook_mask = 0; 351 351 int ret; 352 352 353 + if (ctx->family != NFPROTO_IPV4 && 354 + ctx->family != NFPROTO_IPV6 && 355 + ctx->family != NFPROTO_BRIDGE && 356 + ctx->family != NFPROTO_ARP) 357 + return -EOPNOTSUPP; 358 + 353 359 if (nft_is_base_chain(ctx->chain)) { 354 360 const struct nft_base_chain *basechain = 355 361 nft_base_chain(ctx->chain); ··· 600 594 struct xt_match *match = expr->ops->data; 601 595 unsigned int hook_mask = 0; 602 596 int ret; 597 + 598 + if (ctx->family != NFPROTO_IPV4 && 599 + ctx->family != NFPROTO_IPV6 && 600 + ctx->family != NFPROTO_BRIDGE && 601 + ctx->family != NFPROTO_ARP) 602 + return -EOPNOTSUPP; 603 603 604 604 if (nft_is_base_chain(ctx->chain)) { 605 605 const struct nft_base_chain *basechain =
+5
net/netfilter/nft_flow_offload.c
··· 384 384 { 385 385 unsigned int hook_mask = (1 << NF_INET_FORWARD); 386 386 387 + if (ctx->family != NFPROTO_IPV4 && 388 + ctx->family != NFPROTO_IPV6 && 389 + ctx->family != NFPROTO_INET) 390 + return -EOPNOTSUPP; 391 + 387 392 return nft_chain_validate_hooks(ctx->chain, hook_mask); 388 393 } 389 394
+16 -7
net/netfilter/nft_limit.c
··· 58 58 static int nft_limit_init(struct nft_limit_priv *priv, 59 59 const struct nlattr * const tb[], bool pkts) 60 60 { 61 + u64 unit, tokens, rate_with_burst; 61 62 bool invert = false; 62 - u64 unit, tokens; 63 63 64 64 if (tb[NFTA_LIMIT_RATE] == NULL || 65 65 tb[NFTA_LIMIT_UNIT] == NULL) 66 66 return -EINVAL; 67 67 68 68 priv->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE])); 69 + if (priv->rate == 0) 70 + return -EINVAL; 71 + 69 72 unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT])); 70 - priv->nsecs = unit * NSEC_PER_SEC; 71 - if (priv->rate == 0 || priv->nsecs < unit) 73 + if (check_mul_overflow(unit, NSEC_PER_SEC, &priv->nsecs)) 72 74 return -EOVERFLOW; 73 75 74 76 if (tb[NFTA_LIMIT_BURST]) ··· 79 77 if (pkts && priv->burst == 0) 80 78 priv->burst = NFT_LIMIT_PKT_BURST_DEFAULT; 81 79 82 - if (priv->rate + priv->burst < priv->rate) 80 + if (check_add_overflow(priv->rate, priv->burst, &rate_with_burst)) 83 81 return -EOVERFLOW; 84 82 85 83 if (pkts) { 86 - tokens = div64_u64(priv->nsecs, priv->rate) * priv->burst; 84 + u64 tmp = div64_u64(priv->nsecs, priv->rate); 85 + 86 + if (check_mul_overflow(tmp, priv->burst, &tokens)) 87 + return -EOVERFLOW; 87 88 } else { 89 + u64 tmp; 90 + 88 91 /* The token bucket size limits the number of tokens can be 89 92 * accumulated. tokens_max specifies the bucket size. 90 93 * tokens_max = unit * (rate + burst) / rate. 91 94 */ 92 - tokens = div64_u64(priv->nsecs * (priv->rate + priv->burst), 93 - priv->rate); 95 + if (check_mul_overflow(priv->nsecs, rate_with_burst, &tmp)) 96 + return -EOVERFLOW; 97 + 98 + tokens = div64_u64(tmp, priv->rate); 94 99 } 95 100 96 101 if (tb[NFTA_LIMIT_FLAGS]) {
+5
net/netfilter/nft_nat.c
··· 143 143 struct nft_nat *priv = nft_expr_priv(expr); 144 144 int err; 145 145 146 + if (ctx->family != NFPROTO_IPV4 && 147 + ctx->family != NFPROTO_IPV6 && 148 + ctx->family != NFPROTO_INET) 149 + return -EOPNOTSUPP; 150 + 146 151 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); 147 152 if (err < 0) 148 153 return err;
+5
net/netfilter/nft_rt.c
··· 166 166 const struct nft_rt *priv = nft_expr_priv(expr); 167 167 unsigned int hooks; 168 168 169 + if (ctx->family != NFPROTO_IPV4 && 170 + ctx->family != NFPROTO_IPV6 && 171 + ctx->family != NFPROTO_INET) 172 + return -EOPNOTSUPP; 173 + 169 174 switch (priv->key) { 170 175 case NFT_RT_NEXTHOP4: 171 176 case NFT_RT_NEXTHOP6:
+5
net/netfilter/nft_socket.c
··· 242 242 const struct nft_expr *expr, 243 243 const struct nft_data **data) 244 244 { 245 + if (ctx->family != NFPROTO_IPV4 && 246 + ctx->family != NFPROTO_IPV6 && 247 + ctx->family != NFPROTO_INET) 248 + return -EOPNOTSUPP; 249 + 245 250 return nft_chain_validate_hooks(ctx->chain, 246 251 (1 << NF_INET_PRE_ROUTING) | 247 252 (1 << NF_INET_LOCAL_IN) |
+5 -2
net/netfilter/nft_synproxy.c
··· 186 186 break; 187 187 #endif 188 188 case NFPROTO_INET: 189 - case NFPROTO_BRIDGE: 190 189 err = nf_synproxy_ipv4_init(snet, ctx->net); 191 190 if (err) 192 191 goto nf_ct_failure; ··· 218 219 break; 219 220 #endif 220 221 case NFPROTO_INET: 221 - case NFPROTO_BRIDGE: 222 222 nf_synproxy_ipv4_fini(snet, ctx->net); 223 223 nf_synproxy_ipv6_fini(snet, ctx->net); 224 224 break; ··· 251 253 const struct nft_expr *expr, 252 254 const struct nft_data **data) 253 255 { 256 + if (ctx->family != NFPROTO_IPV4 && 257 + ctx->family != NFPROTO_IPV6 && 258 + ctx->family != NFPROTO_INET) 259 + return -EOPNOTSUPP; 260 + 254 261 return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) | 255 262 (1 << NF_INET_FORWARD)); 256 263 }
+5
net/netfilter/nft_tproxy.c
··· 316 316 const struct nft_expr *expr, 317 317 const struct nft_data **data) 318 318 { 319 + if (ctx->family != NFPROTO_IPV4 && 320 + ctx->family != NFPROTO_IPV6 && 321 + ctx->family != NFPROTO_INET) 322 + return -EOPNOTSUPP; 323 + 319 324 return nft_chain_validate_hooks(ctx->chain, 1 << NF_INET_PRE_ROUTING); 320 325 } 321 326
+5
net/netfilter/nft_xfrm.c
··· 235 235 const struct nft_xfrm *priv = nft_expr_priv(expr); 236 236 unsigned int hooks; 237 237 238 + if (ctx->family != NFPROTO_IPV4 && 239 + ctx->family != NFPROTO_IPV6 && 240 + ctx->family != NFPROTO_INET) 241 + return -EOPNOTSUPP; 242 + 238 243 switch (priv->dir) { 239 244 case XFRM_POLICY_IN: 240 245 hooks = (1 << NF_INET_FORWARD) |
+1 -1
net/netlink/af_netlink.c
··· 374 374 if (is_vmalloc_addr(skb->head)) { 375 375 if (!skb->cloned || 376 376 !atomic_dec_return(&(skb_shinfo(skb)->dataref))) 377 - vfree(skb->head); 377 + vfree_atomic(skb->head); 378 378 379 379 skb->head = NULL; 380 380 }
+1 -1
net/rds/af_rds.c
··· 419 419 420 420 rs->rs_rx_traces = trace.rx_traces; 421 421 for (i = 0; i < rs->rs_rx_traces; i++) { 422 - if (trace.rx_trace_pos[i] > RDS_MSG_RX_DGRAM_TRACE_MAX) { 422 + if (trace.rx_trace_pos[i] >= RDS_MSG_RX_DGRAM_TRACE_MAX) { 423 423 rs->rs_rx_traces = 0; 424 424 return -EFAULT; 425 425 }
+8 -1
net/sched/cls_api.c
··· 1560 1560 chain_prev = chain, 1561 1561 chain = __tcf_get_next_chain(block, chain), 1562 1562 tcf_chain_put(chain_prev)) { 1563 + if (chain->tmplt_ops && add) 1564 + chain->tmplt_ops->tmplt_reoffload(chain, true, cb, 1565 + cb_priv); 1563 1566 for (tp = __tcf_get_next_proto(chain, NULL); tp; 1564 1567 tp_prev = tp, 1565 1568 tp = __tcf_get_next_proto(chain, tp), ··· 1578 1575 goto err_playback_remove; 1579 1576 } 1580 1577 } 1578 + if (chain->tmplt_ops && !add) 1579 + chain->tmplt_ops->tmplt_reoffload(chain, false, cb, 1580 + cb_priv); 1581 1581 } 1582 1582 1583 1583 return 0; ··· 3006 3000 ops = tcf_proto_lookup_ops(name, true, extack); 3007 3001 if (IS_ERR(ops)) 3008 3002 return PTR_ERR(ops); 3009 - if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) { 3003 + if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump || 3004 + !ops->tmplt_reoffload) { 3010 3005 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier"); 3011 3006 module_put(ops->owner); 3012 3007 return -EOPNOTSUPP;
+23
net/sched/cls_flower.c
··· 2721 2721 kfree(tmplt); 2722 2722 } 2723 2723 2724 + static void fl_tmplt_reoffload(struct tcf_chain *chain, bool add, 2725 + flow_setup_cb_t *cb, void *cb_priv) 2726 + { 2727 + struct fl_flow_tmplt *tmplt = chain->tmplt_priv; 2728 + struct flow_cls_offload cls_flower = {}; 2729 + 2730 + cls_flower.rule = flow_rule_alloc(0); 2731 + if (!cls_flower.rule) 2732 + return; 2733 + 2734 + cls_flower.common.chain_index = chain->index; 2735 + cls_flower.command = add ? FLOW_CLS_TMPLT_CREATE : 2736 + FLOW_CLS_TMPLT_DESTROY; 2737 + cls_flower.cookie = (unsigned long) tmplt; 2738 + cls_flower.rule->match.dissector = &tmplt->dissector; 2739 + cls_flower.rule->match.mask = &tmplt->mask; 2740 + cls_flower.rule->match.key = &tmplt->dummy_key; 2741 + 2742 + cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv); 2743 + kfree(cls_flower.rule); 2744 + } 2745 + 2724 2746 static int fl_dump_key_val(struct sk_buff *skb, 2725 2747 void *val, int val_type, 2726 2748 void *mask, int mask_type, int len) ··· 3650 3628 .bind_class = fl_bind_class, 3651 3629 .tmplt_create = fl_tmplt_create, 3652 3630 .tmplt_destroy = fl_tmplt_destroy, 3631 + .tmplt_reoffload = fl_tmplt_reoffload, 3653 3632 .tmplt_dump = fl_tmplt_dump, 3654 3633 .get_exts = fl_get_exts, 3655 3634 .owner = THIS_MODULE,
+1 -1
net/smc/smc_diag.c
··· 164 164 } 165 165 if (smc_conn_lgr_valid(&smc->conn) && smc->conn.lgr->is_smcd && 166 166 (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) && 167 - !list_empty(&smc->conn.lgr->list)) { 167 + !list_empty(&smc->conn.lgr->list) && smc->conn.rmb_desc) { 168 168 struct smc_connection *conn = &smc->conn; 169 169 struct smcd_diag_dmbinfo dinfo; 170 170 struct smcd_dev *smcd = conn->lgr->smcd;
-1
net/wireless/Kconfig
··· 206 206 depends on KUNIT 207 207 depends on CFG80211 208 208 default KUNIT_ALL_TESTS 209 - depends on !KERNEL_6_2 210 209 help 211 210 Enable this option to test cfg80211 functions with kunit. 212 211
+1
net/wireless/nl80211.c
··· 4020 4020 } 4021 4021 wiphy_unlock(&rdev->wiphy); 4022 4022 4023 + if_start = 0; 4023 4024 wp_idx++; 4024 4025 } 4025 4026 out:
+8 -4
net/xdp/xsk.c
··· 167 167 contd = XDP_PKT_CONTD; 168 168 169 169 err = __xsk_rcv_zc(xs, xskb, len, contd); 170 - if (err || likely(!frags)) 171 - goto out; 170 + if (err) 171 + goto err; 172 + if (likely(!frags)) 173 + return 0; 172 174 173 175 xskb_list = &xskb->pool->xskb_list; 174 176 list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) { ··· 179 177 len = pos->xdp.data_end - pos->xdp.data; 180 178 err = __xsk_rcv_zc(xs, pos, len, contd); 181 179 if (err) 182 - return err; 180 + goto err; 183 181 list_del(&pos->xskb_list_node); 184 182 } 185 183 186 - out: 184 + return 0; 185 + err: 186 + xsk_buff_free(xdp); 187 187 return err; 188 188 } 189 189
+1
net/xdp/xsk_buff_pool.c
··· 555 555 556 556 xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM; 557 557 xskb->xdp.data_meta = xskb->xdp.data; 558 + xskb->xdp.flags = 0; 558 559 559 560 if (pool->dma_need_sync) { 560 561 dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
+4 -4
tools/testing/selftests/drivers/net/bonding/bond_options.sh
··· 162 162 local mode=$1 163 163 164 164 for primary_reselect in 0 1 2; do 165 - prio_test "mode active-backup arp_interval 100 arp_ip_target ${g_ip4} primary eth1 primary_reselect $primary_reselect" 165 + prio_test "mode $mode arp_interval 100 arp_ip_target ${g_ip4} primary eth1 primary_reselect $primary_reselect" 166 166 log_test "prio" "$mode arp_ip_target primary_reselect $primary_reselect" 167 167 done 168 168 } ··· 178 178 fi 179 179 180 180 for primary_reselect in 0 1 2; do 181 - prio_test "mode active-backup arp_interval 100 ns_ip6_target ${g_ip6} primary eth1 primary_reselect $primary_reselect" 181 + prio_test "mode $mode arp_interval 100 ns_ip6_target ${g_ip6} primary eth1 primary_reselect $primary_reselect" 182 182 log_test "prio" "$mode ns_ip6_target primary_reselect $primary_reselect" 183 183 done 184 184 } ··· 194 194 195 195 for mode in $modes; do 196 196 prio_miimon $mode 197 - prio_arp $mode 198 - prio_ns $mode 199 197 done 198 + prio_arp "active-backup" 199 + prio_ns "active-backup" 200 200 } 201 201 202 202 arp_validate_test()
+1 -1
tools/testing/selftests/drivers/net/bonding/settings
··· 1 - timeout=120 1 + timeout=1200
+9
tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
··· 270 270 echo 1 > $NSIM_DEV_SYS/new_port 271 271 fi 272 272 NSIM_NETDEV=`get_netdev_name old_netdevs` 273 + ifconfig $NSIM_NETDEV up 273 274 274 275 msg="new NIC device created" 275 276 exp0=( 0 0 0 0 ) ··· 432 431 fi 433 432 434 433 echo $port > $NSIM_DEV_SYS/new_port 434 + NSIM_NETDEV=`get_netdev_name old_netdevs` 435 435 ifconfig $NSIM_NETDEV up 436 436 437 437 overflow_table0 "overflow NIC table" ··· 490 488 fi 491 489 492 490 echo $port > $NSIM_DEV_SYS/new_port 491 + NSIM_NETDEV=`get_netdev_name old_netdevs` 493 492 ifconfig $NSIM_NETDEV up 494 493 495 494 overflow_table0 "overflow NIC table" ··· 547 544 fi 548 545 549 546 echo $port > $NSIM_DEV_SYS/new_port 547 + NSIM_NETDEV=`get_netdev_name old_netdevs` 550 548 ifconfig $NSIM_NETDEV up 551 549 552 550 overflow_table0 "destroy NIC" ··· 577 573 fi 578 574 579 575 echo $port > $NSIM_DEV_SYS/new_port 576 + NSIM_NETDEV=`get_netdev_name old_netdevs` 580 577 ifconfig $NSIM_NETDEV up 581 578 582 579 msg="create VxLANs v6" ··· 638 633 fi 639 634 640 635 echo $port > $NSIM_DEV_SYS/new_port 636 + NSIM_NETDEV=`get_netdev_name old_netdevs` 641 637 ifconfig $NSIM_NETDEV up 642 638 643 639 echo 110 > $NSIM_DEV_DFS/ports/$port/udp_ports_inject_error ··· 694 688 fi 695 689 696 690 echo $port > $NSIM_DEV_SYS/new_port 691 + NSIM_NETDEV=`get_netdev_name old_netdevs` 697 692 ifconfig $NSIM_NETDEV up 698 693 699 694 msg="create VxLANs v6" ··· 754 747 fi 755 748 756 749 echo $port > $NSIM_DEV_SYS/new_port 750 + NSIM_NETDEV=`get_netdev_name old_netdevs` 757 751 ifconfig $NSIM_NETDEV up 758 752 759 753 msg="create VxLANs v6" ··· 885 877 886 878 echo 2 > $NSIM_DEV_SYS/del_port 887 879 echo 2 > $NSIM_DEV_SYS/new_port 880 + NSIM_NETDEV=`get_netdev_name old_netdevs` 888 881 check_tables 889 882 890 883 msg="replace VxLAN in overflow table"
+28
tools/testing/selftests/net/config
··· 1 1 CONFIG_USER_NS=y 2 2 CONFIG_NET_NS=y 3 + CONFIG_BONDING=m 3 4 CONFIG_BPF_SYSCALL=y 4 5 CONFIG_TEST_BPF=m 5 6 CONFIG_NUMA=y ··· 15 14 CONFIG_NET_IPVTI=y 16 15 CONFIG_IPV6_VTI=y 17 16 CONFIG_DUMMY=y 17 + CONFIG_BRIDGE_VLAN_FILTERING=y 18 18 CONFIG_BRIDGE=y 19 + CONFIG_CRYPTO_CHACHA20POLY1305=m 19 20 CONFIG_VLAN_8021Q=y 20 21 CONFIG_IFB=y 22 + CONFIG_INET_DIAG=y 23 + CONFIG_IP_GRE=m 21 24 CONFIG_NETFILTER=y 22 25 CONFIG_NETFILTER_ADVANCED=y 23 26 CONFIG_NF_CONNTRACK=m ··· 30 25 CONFIG_IP_NF_IPTABLES=m 31 26 CONFIG_IP6_NF_NAT=m 32 27 CONFIG_IP_NF_NAT=m 28 + CONFIG_IPV6_GRE=m 29 + CONFIG_IPV6_SEG6_LWTUNNEL=y 30 + CONFIG_L2TP_ETH=m 31 + CONFIG_L2TP_IP=m 32 + CONFIG_L2TP=m 33 + CONFIG_L2TP_V3=y 34 + CONFIG_MACSEC=m 35 + CONFIG_MACVLAN=y 36 + CONFIG_MACVTAP=y 37 + CONFIG_MPLS=y 38 + CONFIG_MPTCP=y 33 39 CONFIG_NF_TABLES=m 34 40 CONFIG_NF_TABLES_IPV6=y 35 41 CONFIG_NF_TABLES_IPV4=y 36 42 CONFIG_NFT_NAT=m 43 + CONFIG_NET_ACT_GACT=m 44 + CONFIG_NET_CLS_BASIC=m 45 + CONFIG_NET_CLS_U32=m 46 + CONFIG_NET_IPGRE_DEMUX=m 47 + CONFIG_NET_IPGRE=m 48 + CONFIG_NET_SCH_FQ_CODEL=m 49 + CONFIG_NET_SCH_HTB=m 37 50 CONFIG_NET_SCH_FQ=m 38 51 CONFIG_NET_SCH_ETF=m 39 52 CONFIG_NET_SCH_NETEM=y 53 + CONFIG_PSAMPLE=m 54 + CONFIG_TCP_MD5SIG=y 40 55 CONFIG_TEST_BLACKHOLE_DEV=m 41 56 CONFIG_KALLSYMS=y 57 + CONFIG_TLS=m 42 58 CONFIG_TRACEPOINTS=y 43 59 CONFIG_NET_DROP_MONITOR=m 44 60 CONFIG_NETDEVSIM=m ··· 74 48 CONFIG_IPV6_IOAM6_LWTUNNEL=y 75 49 CONFIG_CRYPTO_SM4_GENERIC=y 76 50 CONFIG_AMT=m 51 + CONFIG_TUN=y 77 52 CONFIG_VXLAN=m 78 53 CONFIG_IP_SCTP=m 79 54 CONFIG_NETFILTER_XT_MATCH_POLICY=m 80 55 CONFIG_CRYPTO_ARIA=y 56 + CONFIG_XFRM_INTERFACE=m
+5 -1
tools/testing/selftests/net/rps_default_mask.sh
··· 1 - #!/bin/sh 1 + #!/bin/bash 2 2 # SPDX-License-Identifier: GPL-2.0 3 3 4 4 readonly ksft_skip=4 ··· 33 33 34 34 rps_mask=$($cmd /sys/class/net/$dev_name/queues/rx-0/rps_cpus) 35 35 printf "%-60s" "$msg" 36 + 37 + # In case there is more than 32 CPUs we need to remove commas from masks 38 + rps_mask=${rps_mask//,} 39 + expected_rps_mask=${expected_rps_mask//,} 36 40 if [ $rps_mask -eq $expected_rps_mask ]; then 37 41 echo "[ ok ]" 38 42 else
+50 -18
tools/testing/selftests/net/so_incoming_cpu.c
··· 3 3 #define _GNU_SOURCE 4 4 #include <sched.h> 5 5 6 + #include <fcntl.h> 7 + 6 8 #include <netinet/in.h> 7 9 #include <sys/socket.h> 8 10 #include <sys/sysinfo.h> 9 11 10 12 #include "../kselftest_harness.h" 11 13 12 - #define CLIENT_PER_SERVER 32 /* More sockets, more reliable */ 13 - #define NR_SERVER self->nproc 14 - #define NR_CLIENT (CLIENT_PER_SERVER * NR_SERVER) 15 - 16 14 FIXTURE(so_incoming_cpu) 17 15 { 18 - int nproc; 19 16 int *servers; 20 17 union { 21 18 struct sockaddr addr; ··· 53 56 .when_to_set = AFTER_ALL_LISTEN, 54 57 }; 55 58 59 + static void write_sysctl(struct __test_metadata *_metadata, 60 + char *filename, char *string) 61 + { 62 + int fd, len, ret; 63 + 64 + fd = open(filename, O_WRONLY); 65 + ASSERT_NE(fd, -1); 66 + 67 + len = strlen(string); 68 + ret = write(fd, string, len); 69 + ASSERT_EQ(ret, len); 70 + } 71 + 72 + static void setup_netns(struct __test_metadata *_metadata) 73 + { 74 + ASSERT_EQ(unshare(CLONE_NEWNET), 0); 75 + ASSERT_EQ(system("ip link set lo up"), 0); 76 + 77 + write_sysctl(_metadata, "/proc/sys/net/ipv4/ip_local_port_range", "10000 60001"); 78 + write_sysctl(_metadata, "/proc/sys/net/ipv4/tcp_tw_reuse", "0"); 79 + } 80 + 81 + #define NR_PORT (60001 - 10000 - 1) 82 + #define NR_CLIENT_PER_SERVER_DEFAULT 32 83 + static int nr_client_per_server, nr_server, nr_client; 84 + 56 85 FIXTURE_SETUP(so_incoming_cpu) 57 86 { 58 - self->nproc = get_nprocs(); 59 - ASSERT_LE(2, self->nproc); 87 + setup_netns(_metadata); 60 88 61 - self->servers = malloc(sizeof(int) * NR_SERVER); 89 + nr_server = get_nprocs(); 90 + ASSERT_LE(2, nr_server); 91 + 92 + if (NR_CLIENT_PER_SERVER_DEFAULT * nr_server < NR_PORT) 93 + nr_client_per_server = NR_CLIENT_PER_SERVER_DEFAULT; 94 + else 95 + nr_client_per_server = NR_PORT / nr_server; 96 + 97 + nr_client = nr_client_per_server * nr_server; 98 + 99 + self->servers = malloc(sizeof(int) * nr_server); 62 100 ASSERT_NE(self->servers, NULL); 63 101 64 102 self->in_addr.sin_family = AF_INET; ··· 106 74 { 107 75 int i; 108 76 109 - for (i = 0; i < NR_SERVER; i++) 77 + for (i = 0; i < nr_server; i++) 110 78 close(self->servers[i]); 111 79 112 80 free(self->servers); ··· 142 110 if (variant->when_to_set == BEFORE_LISTEN) 143 111 set_so_incoming_cpu(_metadata, fd, cpu); 144 112 145 - /* We don't use CLIENT_PER_SERVER here not to block 113 + /* We don't use nr_client_per_server here not to block 146 114 * this test at connect() if SO_INCOMING_CPU is broken. 147 115 */ 148 - ret = listen(fd, NR_CLIENT); 116 + ret = listen(fd, nr_client); 149 117 ASSERT_EQ(ret, 0); 150 118 151 119 if (variant->when_to_set == AFTER_LISTEN) ··· 160 128 { 161 129 int i, ret; 162 130 163 - for (i = 0; i < NR_SERVER; i++) { 131 + for (i = 0; i < nr_server; i++) { 164 132 self->servers[i] = create_server(_metadata, self, variant, i); 165 133 166 134 if (i == 0) { ··· 170 138 } 171 139 172 140 if (variant->when_to_set == AFTER_ALL_LISTEN) { 173 - for (i = 0; i < NR_SERVER; i++) 141 + for (i = 0; i < nr_server; i++) 174 142 set_so_incoming_cpu(_metadata, self->servers[i], i); 175 143 } 176 144 } ··· 181 149 cpu_set_t cpu_set; 182 150 int i, j, fd, ret; 183 151 184 - for (i = 0; i < NR_SERVER; i++) { 152 + for (i = 0; i < nr_server; i++) { 185 153 CPU_ZERO(&cpu_set); 186 154 187 155 CPU_SET(i, &cpu_set); ··· 194 162 ret = sched_setaffinity(0, sizeof(cpu_set), &cpu_set); 195 163 ASSERT_EQ(ret, 0); 196 164 197 - for (j = 0; j < CLIENT_PER_SERVER; j++) { 165 + for (j = 0; j < nr_client_per_server; j++) { 198 166 fd = socket(AF_INET, SOCK_STREAM, 0); 199 167 ASSERT_NE(fd, -1); 200 168 ··· 212 180 int i, j, fd, cpu, ret, total = 0; 213 181 socklen_t len = sizeof(int); 214 182 215 - for (i = 0; i < NR_SERVER; i++) { 216 - for (j = 0; j < CLIENT_PER_SERVER; j++) { 183 + for (i = 0; i < nr_server; i++) { 184 + for (j = 0; j < nr_client_per_server; j++) { 217 185 /* If we see -EAGAIN here, SO_INCOMING_CPU is broken */ 218 186 fd = accept(self->servers[i], &self->addr, &self->addrlen); 219 187 ASSERT_NE(fd, -1); ··· 227 195 } 228 196 } 229 197 230 - ASSERT_EQ(total, NR_CLIENT); 198 + ASSERT_EQ(total, nr_client); 231 199 TH_LOG("SO_INCOMING_CPU is very likely to be " 232 200 "working correctly with %d sockets.", total); 233 201 }