Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'net-6.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Jakub Kicinski:
"Including fixes from netfilter.

Current release - regressions:

- sched: act_pedit: free pedit keys on bail from offset check

Current release - new code bugs:

- pds_core:
- Kconfig fixes (DEBUGFS and AUXILIARY_BUS)
- fix mutex double unlock in error path

Previous releases - regressions:

- sched: cls_api: remove block_cb from driver_list before freeing

- nf_tables: fix ct untracked match breakage

- eth: mtk_eth_soc: drop generic vlan rx offload

- sched: flower: fix error handler on replace

Previous releases - always broken:

- tcp: fix skb_copy_ubufs() vs BIG TCP

- ipv6: fix skb hash for some RST packets

- af_packet: don't send zero-byte data in packet_sendmsg_spkt()

- rxrpc: timeout handling fixes after moving client call connection
to the I/O thread

- ixgbe: fix panic during XDP_TX with > 64 CPUs

- igc: RMW the SRRCTL register to prevent losing timestamp config

- dsa: mt7530: fix corrupt frames using TRGMII on 40 MHz XTAL MT7621

- r8152:
- fix flow control issue of RTL8156A
- fix the poor throughput for 2.5G devices
- move setting r8153b_rx_agg_chg_indicate() to fix coalescing
- enable autosuspend

- ncsi: clear Tx enable mode when handling a Config required AEN

- octeontx2-pf: macsec: fixes for CN10KB ASIC rev

Misc:

- 9p: remove INET dependency"

* tag 'net-6.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (69 commits)
net: bcmgenet: Remove phy_stop() from bcmgenet_netif_stop()
pds_core: fix mutex double unlock in error path
net/sched: flower: fix error handler on replace
Revert "net/sched: flower: Fix wrong handle assignment during filter change"
net/sched: flower: fix filter idr initialization
net: fec: correct the counting of XDP sent frames
bonding: add xdp_features support
net: enetc: check the index of the SFI rather than the handle
sfc: Add back mailing list
virtio_net: suppress cpu stall when free_unused_bufs
ice: block LAN in case of VF to VF offload
net: dsa: mt7530: fix network connectivity with multiple CPU ports
net: dsa: mt7530: fix corrupt frames using trgmii on 40 MHz XTAL MT7621
9p: Remove INET dependency
netfilter: nf_tables: fix ct untracked match breakage
af_packet: Don't send zero-byte data in packet_sendmsg_spkt().
igc: read before write to SRRCTL register
pds_core: add AUXILIARY_BUS and NET_DEVLINK to Kconfig
pds_core: remove CONFIG_DEBUG_FS from makefile
ionic: catch failure from devlink_alloc
...

+734 -406
+1
MAINTAINERS
··· 19059 19059 M: Edward Cree <ecree.xilinx@gmail.com> 19060 19060 M: Martin Habets <habetsm.xilinx@gmail.com> 19061 19061 L: netdev@vger.kernel.org 19062 + L: linux-net-drivers@amd.com 19062 19063 S: Supported 19063 19064 F: Documentation/networking/devlink/sfc.rst 19064 19065 F: drivers/net/ethernet/sfc/
+2 -13
drivers/isdn/mISDN/dsp_cmx.c
··· 141 141 /*#define CMX_DELAY_DEBUG * gives rx-buffer delay overview */ 142 142 /*#define CMX_TX_DEBUG * massive read/write on tx-buffer with content */ 143 143 144 - static inline int 145 - count_list_member(struct list_head *head) 146 - { 147 - int cnt = 0; 148 - struct list_head *m; 149 - 150 - list_for_each(m, head) 151 - cnt++; 152 - return cnt; 153 - } 154 - 155 144 /* 156 145 * debug cmx memory structure 157 146 */ ··· 1661 1672 mustmix = 0; 1662 1673 members = 0; 1663 1674 if (conf) { 1664 - members = count_list_member(&conf->mlist); 1675 + members = list_count_nodes(&conf->mlist); 1665 1676 #ifdef CMX_CONF_DEBUG 1666 1677 if (conf->software && members > 1) 1667 1678 #else ··· 1684 1695 /* loop all members that require conference mixing */ 1685 1696 list_for_each_entry(conf, &conf_ilist, list) { 1686 1697 /* count members and check hardware */ 1687 - members = count_list_member(&conf->mlist); 1698 + members = list_count_nodes(&conf->mlist); 1688 1699 #ifdef CMX_CONF_DEBUG 1689 1700 if (conf->software && members > 1) { 1690 1701 #else
+29
drivers/net/bonding/bond_main.c
··· 1789 1789 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1790 1790 } 1791 1791 1792 + void bond_xdp_set_features(struct net_device *bond_dev) 1793 + { 1794 + struct bonding *bond = netdev_priv(bond_dev); 1795 + xdp_features_t val = NETDEV_XDP_ACT_MASK; 1796 + struct list_head *iter; 1797 + struct slave *slave; 1798 + 1799 + ASSERT_RTNL(); 1800 + 1801 + if (!bond_xdp_check(bond)) { 1802 + xdp_clear_features_flag(bond_dev); 1803 + return; 1804 + } 1805 + 1806 + bond_for_each_slave(bond, slave, iter) 1807 + val &= slave->dev->xdp_features; 1808 + 1809 + xdp_set_features_flag(bond_dev, val); 1810 + } 1811 + 1792 1812 /* enslave device <slave> to bond device <master> */ 1793 1813 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, 1794 1814 struct netlink_ext_ack *extack) ··· 2256 2236 bpf_prog_inc(bond->xdp_prog); 2257 2237 } 2258 2238 2239 + bond_xdp_set_features(bond_dev); 2240 + 2259 2241 slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n", 2260 2242 bond_is_active_slave(new_slave) ? "an active" : "a backup", 2261 2243 new_slave->link != BOND_LINK_DOWN ? "an up" : "a down"); ··· 2505 2483 if (!netif_is_bond_master(slave_dev)) 2506 2484 slave_dev->priv_flags &= ~IFF_BONDING; 2507 2485 2486 + bond_xdp_set_features(bond_dev); 2508 2487 kobject_put(&slave->kobj); 2509 2488 2510 2489 return 0; ··· 3952 3929 case NETDEV_RESEND_IGMP: 3953 3930 /* Propagate to master device */ 3954 3931 call_netdevice_notifiers(event, slave->bond->dev); 3932 + break; 3933 + case NETDEV_XDP_FEAT_CHANGE: 3934 + bond_xdp_set_features(bond_dev); 3955 3935 break; 3956 3936 default: 3957 3937 break; ··· 5900 5874 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) 5901 5875 bond_dev->features |= BOND_XFRM_FEATURES; 5902 5876 #endif /* CONFIG_XFRM_OFFLOAD */ 5877 + 5878 + if (bond_xdp_check(bond)) 5879 + bond_dev->xdp_features = NETDEV_XDP_ACT_MASK; 5903 5880 } 5904 5881 5905 5882 /* Destroy a bonding device.
+2
drivers/net/bonding/bond_options.c
··· 877 877 netdev_update_features(bond->dev); 878 878 } 879 879 880 + bond_xdp_set_features(bond->dev); 881 + 880 882 return 0; 881 883 } 882 884
+9 -5
drivers/net/dsa/mt7530.c
··· 426 426 else 427 427 ssc_delta = 0x87; 428 428 if (priv->id == ID_MT7621) { 429 - /* PLL frequency: 150MHz: 1.2GBit */ 429 + /* PLL frequency: 125MHz: 1.0GBit */ 430 430 if (xtal == HWTRAP_XTAL_40MHZ) 431 - ncpo1 = 0x0780; 431 + ncpo1 = 0x0640; 432 432 if (xtal == HWTRAP_XTAL_25MHZ) 433 433 ncpo1 = 0x0a00; 434 434 } else { /* PLL frequency: 250MHz: 2.0Gbit */ ··· 1002 1002 mt7530_write(priv, MT7530_PVC_P(port), 1003 1003 PORT_SPEC_TAG); 1004 1004 1005 - /* Disable flooding by default */ 1006 - mt7530_rmw(priv, MT7530_MFC, BC_FFP_MASK | UNM_FFP_MASK | UNU_FFP_MASK, 1007 - BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) | UNU_FFP(BIT(port))); 1005 + /* Enable flooding on the CPU port */ 1006 + mt7530_set(priv, MT7530_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) | 1007 + UNU_FFP(BIT(port))); 1008 1008 1009 1009 /* Set CPU port number */ 1010 1010 if (priv->id == ID_MT7621) ··· 2366 2366 2367 2367 /* Enable and reset MIB counters */ 2368 2368 mt7530_mib_reset(ds); 2369 + 2370 + /* Disable flooding on all ports */ 2371 + mt7530_clear(priv, MT7530_MFC, BC_FFP_MASK | UNM_FFP_MASK | 2372 + UNU_FFP_MASK); 2369 2373 2370 2374 for (i = 0; i < MT7530_NUM_PORTS; i++) { 2371 2375 /* Disable forwarding by default on all ports */
+1
drivers/net/dsa/mv88e6xxx/chip.c
··· 5194 5194 .set_cpu_port = mv88e6095_g1_set_cpu_port, 5195 5195 .set_egress_port = mv88e6095_g1_set_egress_port, 5196 5196 .watchdog_ops = &mv88e6390_watchdog_ops, 5197 + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, 5197 5198 .reset = mv88e6352_g1_reset, 5198 5199 .vtu_getnext = mv88e6185_g1_vtu_getnext, 5199 5200 .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+2
drivers/net/ethernet/amd/Kconfig
··· 189 189 config PDS_CORE 190 190 tristate "AMD/Pensando Data Systems Core Device Support" 191 191 depends on 64BIT && PCI 192 + select AUXILIARY_BUS 193 + select NET_DEVLINK 192 194 help 193 195 This enables the support for the AMD/Pensando Core device family of 194 196 adapters. More specific information on this driver can be
+1 -2
drivers/net/ethernet/amd/pds_core/Makefile
··· 9 9 dev.o \ 10 10 adminq.o \ 11 11 core.o \ 12 + debugfs.o \ 12 13 fw.o 13 - 14 - pds_core-$(CONFIG_DEBUG_FS) += debugfs.o
+13 -8
drivers/net/ethernet/amd/pds_core/main.c
··· 244 244 set_bit(PDSC_S_FW_DEAD, &pdsc->state); 245 245 246 246 err = pdsc_setup(pdsc, PDSC_SETUP_INIT); 247 - if (err) 247 + if (err) { 248 + mutex_unlock(&pdsc->config_lock); 248 249 goto err_out_unmap_bars; 250 + } 251 + 249 252 err = pdsc_start(pdsc); 250 - if (err) 253 + if (err) { 254 + mutex_unlock(&pdsc->config_lock); 251 255 goto err_out_teardown; 256 + } 252 257 253 258 mutex_unlock(&pdsc->config_lock); 254 259 ··· 262 257 err = devl_params_register(dl, pdsc_dl_params, 263 258 ARRAY_SIZE(pdsc_dl_params)); 264 259 if (err) { 260 + devl_unlock(dl); 265 261 dev_warn(pdsc->dev, "Failed to register devlink params: %pe\n", 266 262 ERR_PTR(err)); 267 - goto err_out_unlock_dl; 263 + goto err_out_stop; 268 264 } 269 265 270 266 hr = devl_health_reporter_create(dl, &pdsc_fw_reporter_ops, 0, pdsc); 271 267 if (IS_ERR(hr)) { 268 + devl_unlock(dl); 272 269 dev_warn(pdsc->dev, "Failed to create fw reporter: %pe\n", hr); 273 270 err = PTR_ERR(hr); 274 271 goto err_out_unreg_params; ··· 286 279 return 0; 287 280 288 281 err_out_unreg_params: 289 - devl_params_unregister(dl, pdsc_dl_params, 290 - ARRAY_SIZE(pdsc_dl_params)); 291 - err_out_unlock_dl: 292 - devl_unlock(dl); 282 + devlink_params_unregister(dl, pdsc_dl_params, 283 + ARRAY_SIZE(pdsc_dl_params)); 284 + err_out_stop: 293 285 pdsc_stop(pdsc); 294 286 err_out_teardown: 295 287 pdsc_teardown(pdsc, PDSC_TEARDOWN_REMOVING); 296 288 err_out_unmap_bars: 297 - mutex_unlock(&pdsc->config_lock); 298 289 del_timer_sync(&pdsc->wdtimer); 299 290 if (pdsc->wq) 300 291 destroy_workqueue(pdsc->wq);
+2
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
··· 379 379 } 380 380 } 381 381 382 + #ifdef CONFIG_PM 382 383 static int aq_suspend_common(struct device *dev) 383 384 { 384 385 struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev)); ··· 464 463 .restore = aq_pm_resume_restore, 465 464 .thaw = aq_pm_thaw, 466 465 }; 466 + #endif 467 467 468 468 static struct pci_driver aq_pci_ops = { 469 469 .name = AQ_CFG_DRV_NAME,
+2 -2
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
··· 336 336 static void aq_a2_fill_a0_stats(struct aq_hw_s *self, 337 337 struct statistics_s *stats) 338 338 { 339 - struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; 339 + struct hw_atl2_priv *priv = self->priv; 340 340 struct aq_stats_s *cs = &self->curr_stats; 341 341 struct aq_stats_s curr_stats = *cs; 342 342 bool corrupted_stats = false; ··· 378 378 static void aq_a2_fill_b0_stats(struct aq_hw_s *self, 379 379 struct statistics_s *stats) 380 380 { 381 - struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; 381 + struct hw_atl2_priv *priv = self->priv; 382 382 struct aq_stats_s *cs = &self->curr_stats; 383 383 struct aq_stats_s curr_stats = *cs; 384 384 bool corrupted_stats = false;
-1
drivers/net/ethernet/broadcom/genet/bcmgenet.c
··· 3465 3465 /* Disable MAC transmit. TX DMA disabled must be done before this */ 3466 3466 umac_enable_set(priv, CMD_TX_EN, false); 3467 3467 3468 - phy_stop(dev->phydev); 3469 3468 bcmgenet_disable_rx_napi(priv); 3470 3469 bcmgenet_intr_disable(priv); 3471 3470
+1 -1
drivers/net/ethernet/freescale/enetc/enetc_qos.c
··· 1247 1247 int index; 1248 1248 1249 1249 index = enetc_get_free_index(priv); 1250 - if (sfi->handle < 0) { 1250 + if (index < 0) { 1251 1251 NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!"); 1252 1252 err = -ENOSPC; 1253 1253 goto free_fmi;
+9 -4
drivers/net/ethernet/freescale/fec_main.c
··· 3798 3798 entries_free = fec_enet_get_free_txdesc_num(txq); 3799 3799 if (entries_free < MAX_SKB_FRAGS + 1) { 3800 3800 netdev_err(fep->netdev, "NOT enough BD for SG!\n"); 3801 - return NETDEV_TX_OK; 3801 + xdp_return_frame(frame); 3802 + return NETDEV_TX_BUSY; 3802 3803 } 3803 3804 3804 3805 /* Fill in a Tx ring entry */ ··· 3857 3856 struct fec_enet_private *fep = netdev_priv(dev); 3858 3857 struct fec_enet_priv_tx_q *txq; 3859 3858 int cpu = smp_processor_id(); 3859 + unsigned int sent_frames = 0; 3860 3860 struct netdev_queue *nq; 3861 3861 unsigned int queue; 3862 3862 int i; ··· 3868 3866 3869 3867 __netif_tx_lock(nq, cpu); 3870 3868 3871 - for (i = 0; i < num_frames; i++) 3872 - fec_enet_txq_xmit_frame(fep, txq, frames[i]); 3869 + for (i = 0; i < num_frames; i++) { 3870 + if (fec_enet_txq_xmit_frame(fep, txq, frames[i]) != 0) 3871 + break; 3872 + sent_frames++; 3873 + } 3873 3874 3874 3875 /* Make sure the update to bdp and tx_skbuff are performed. */ 3875 3876 wmb(); ··· 3882 3877 3883 3878 __netif_tx_unlock(nq); 3884 3879 3885 - return num_frames; 3880 + return sent_frames; 3886 3881 } 3887 3882 3888 3883 static const struct net_device_ops fec_netdev_ops = {
+2 -1
drivers/net/ethernet/intel/ice/ice_tc_lib.c
··· 693 693 * results into order of switch rule evaluation. 694 694 */ 695 695 rule_info.priority = 7; 696 + rule_info.flags_info.act_valid = true; 696 697 697 698 if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) { 698 699 rule_info.sw_act.flag |= ICE_FLTR_RX; 699 700 rule_info.sw_act.src = hw->pf_id; 700 701 rule_info.rx = true; 702 + rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE; 701 703 } else { 702 704 rule_info.sw_act.flag |= ICE_FLTR_TX; 703 705 rule_info.sw_act.src = vsi->idx; 704 706 rule_info.rx = false; 705 707 rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE; 706 - rule_info.flags_info.act_valid = true; 707 708 } 708 709 709 710 /* specify the cookie as filter_rule_id */
+8 -3
drivers/net/ethernet/intel/igc/igc_base.h
··· 87 87 #define IGC_RXDCTL_SWFLUSH 0x04000000 /* Receive Software Flush */ 88 88 89 89 /* SRRCTL bit definitions */ 90 - #define IGC_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ 91 - #define IGC_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ 92 - #define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 90 + #define IGC_SRRCTL_BSIZEPKT_MASK GENMASK(6, 0) 91 + #define IGC_SRRCTL_BSIZEPKT(x) FIELD_PREP(IGC_SRRCTL_BSIZEPKT_MASK, \ 92 + (x) / 1024) /* in 1 KB resolution */ 93 + #define IGC_SRRCTL_BSIZEHDR_MASK GENMASK(13, 8) 94 + #define IGC_SRRCTL_BSIZEHDR(x) FIELD_PREP(IGC_SRRCTL_BSIZEHDR_MASK, \ 95 + (x) / 64) /* in 64 bytes resolution */ 96 + #define IGC_SRRCTL_DESCTYPE_MASK GENMASK(27, 25) 97 + #define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF FIELD_PREP(IGC_SRRCTL_DESCTYPE_MASK, 1) 93 98 94 99 #endif /* _IGC_BASE_H */
+5 -2
drivers/net/ethernet/intel/igc/igc_main.c
··· 640 640 else 641 641 buf_size = IGC_RXBUFFER_2048; 642 642 643 - srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT; 644 - srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT; 643 + srrctl = rd32(IGC_SRRCTL(reg_idx)); 644 + srrctl &= ~(IGC_SRRCTL_BSIZEPKT_MASK | IGC_SRRCTL_BSIZEHDR_MASK | 645 + IGC_SRRCTL_DESCTYPE_MASK); 646 + srrctl |= IGC_SRRCTL_BSIZEHDR(IGC_RX_HDR_LEN); 647 + srrctl |= IGC_SRRCTL_BSIZEPKT(buf_size); 645 648 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; 646 649 647 650 wr32(IGC_SRRCTL(reg_idx), srrctl);
-3
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
··· 1035 1035 adapter->q_vector[v_idx] = NULL; 1036 1036 __netif_napi_del(&q_vector->napi); 1037 1037 1038 - if (static_key_enabled(&ixgbe_xdp_locking_key)) 1039 - static_branch_dec(&ixgbe_xdp_locking_key); 1040 - 1041 1038 /* 1042 1039 * after a call to __netif_napi_del() napi may still be used and 1043 1040 * ixgbe_get_stats64() might access the rings on this vector,
+4 -2
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 6487 6487 set_bit(0, adapter->fwd_bitmask); 6488 6488 set_bit(__IXGBE_DOWN, &adapter->state); 6489 6489 6490 + /* enable locking for XDP_TX if we have more CPUs than queues */ 6491 + if (nr_cpu_ids > IXGBE_MAX_XDP_QS) 6492 + static_branch_enable(&ixgbe_xdp_locking_key); 6493 + 6490 6494 return 0; 6491 6495 } 6492 6496 ··· 10274 10270 */ 10275 10271 if (nr_cpu_ids > IXGBE_MAX_XDP_QS * 2) 10276 10272 return -ENOMEM; 10277 - else if (nr_cpu_ids > IXGBE_MAX_XDP_QS) 10278 - static_branch_inc(&ixgbe_xdp_locking_key); 10279 10273 10280 10274 old_prog = xchg(&adapter->xdp_prog, prog); 10281 10275 need_reset = (!!prog != !!old_prog);
+8
drivers/net/ethernet/marvell/octeontx2/af/cgx.c
··· 1231 1231 linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat); 1232 1232 linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat); 1233 1233 linfo->lmac_type_id = FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, lstat); 1234 + 1235 + if (linfo->lmac_type_id >= LMAC_MODE_MAX) { 1236 + dev_err(&cgx->pdev->dev, "Unknown lmac_type_id %d reported by firmware on cgx port%d:%d", 1237 + linfo->lmac_type_id, cgx->cgx_id, lmac_id); 1238 + strncpy(linfo->lmac_type, "Unknown", LMACTYPE_STR_LEN - 1); 1239 + return; 1240 + } 1241 + 1234 1242 lmac_string = cgx_lmactype_string[linfo->lmac_type_id]; 1235 1243 strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1); 1236 1244 }
+4 -1
drivers/net/ethernet/marvell/octeontx2/af/mbox.c
··· 157 157 */ 158 158 int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase, 159 159 struct pci_dev *pdev, void *reg_base, 160 - int direction, int ndevs) 160 + int direction, int ndevs, unsigned long *pf_bmap) 161 161 { 162 162 struct otx2_mbox_dev *mdev; 163 163 int devid, err; ··· 169 169 mbox->hwbase = hwbase[0]; 170 170 171 171 for (devid = 0; devid < ndevs; devid++) { 172 + if (!test_bit(devid, pf_bmap)) 173 + continue; 174 + 172 175 mdev = &mbox->dev[devid]; 173 176 mdev->mbase = hwbase[devid]; 174 177 mdev->hwbase = hwbase[devid];
+13 -6
drivers/net/ethernet/marvell/octeontx2/af/mbox.h
··· 96 96 int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase, 97 97 struct pci_dev *pdev, void __force *reg_base, 98 98 int direction, int ndevs); 99 + 99 100 int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase, 100 101 struct pci_dev *pdev, void __force *reg_base, 101 - int direction, int ndevs); 102 + int direction, int ndevs, unsigned long *bmap); 102 103 void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid); 103 104 int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid); 104 105 int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid); ··· 246 245 M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \ 247 246 npc_mcam_get_stats_req, \ 248 247 npc_mcam_get_stats_rsp) \ 249 - M(NPC_GET_SECRET_KEY, 0x6013, npc_get_secret_key, \ 250 - npc_get_secret_key_req, \ 251 - npc_get_secret_key_rsp) \ 248 + M(NPC_GET_FIELD_HASH_INFO, 0x6013, npc_get_field_hash_info, \ 249 + npc_get_field_hash_info_req, \ 250 + npc_get_field_hash_info_rsp) \ 252 251 M(NPC_GET_FIELD_STATUS, 0x6014, npc_get_field_status, \ 253 252 npc_get_field_status_req, \ 254 253 npc_get_field_status_rsp) \ ··· 1525 1524 u8 stat_ena; /* enabled */ 1526 1525 }; 1527 1526 1528 - struct npc_get_secret_key_req { 1527 + struct npc_get_field_hash_info_req { 1529 1528 struct mbox_msghdr hdr; 1530 1529 u8 intf; 1531 1530 }; 1532 1531 1533 - struct npc_get_secret_key_rsp { 1532 + struct npc_get_field_hash_info_rsp { 1534 1533 struct mbox_msghdr hdr; 1535 1534 u64 secret_key[3]; 1535 + #define NPC_MAX_HASH 2 1536 + #define NPC_MAX_HASH_MASK 2 1537 + /* NPC_AF_INTF(0..1)_HASH(0..1)_MASK(0..1) */ 1538 + u64 hash_mask[NPC_MAX_INTF][NPC_MAX_HASH][NPC_MAX_HASH_MASK]; 1539 + /* NPC_AF_INTF(0..1)_HASH(0..1)_RESULT_CTRL */ 1540 + u64 hash_ctrl[NPC_MAX_INTF][NPC_MAX_HASH]; 1536 1541 }; 1537 1542 1538 1543 enum ptp_op {
+54 -56
drivers/net/ethernet/marvell/octeontx2/af/mcs.c
··· 473 473 for (reg_id = 0; reg_id < 4; reg_id++) { 474 474 reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id); 475 475 mcs_reg_write(mcs, reg, data[reg_id]); 476 + } 477 + for (reg_id = 0; reg_id < 4; reg_id++) { 476 478 reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id); 477 479 mcs_reg_write(mcs, reg, mask[reg_id]); 478 480 } ··· 482 480 for (reg_id = 0; reg_id < 4; reg_id++) { 483 481 reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id); 484 482 mcs_reg_write(mcs, reg, data[reg_id]); 483 + } 484 + for (reg_id = 0; reg_id < 4; reg_id++) { 485 485 reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id); 486 486 mcs_reg_write(mcs, reg, mask[reg_id]); 487 487 } ··· 498 494 499 495 /* Flow entry */ 500 496 flow_id = mcs->hw->tcam_entries - MCS_RSRC_RSVD_CNT; 497 + __set_bit(flow_id, mcs->rx.flow_ids.bmap); 498 + __set_bit(flow_id, mcs->tx.flow_ids.bmap); 499 + 501 500 for (reg_id = 0; reg_id < 4; reg_id++) { 502 501 reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id); 503 502 mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0)); ··· 511 504 } 512 505 /* secy */ 513 506 secy_id = mcs->hw->secy_entries - MCS_RSRC_RSVD_CNT; 507 + __set_bit(secy_id, mcs->rx.secy.bmap); 508 + __set_bit(secy_id, mcs->tx.secy.bmap); 514 509 515 510 /* Set validate frames to NULL and enable control port */ 516 511 plcy = 0x7ull; ··· 537 528 /* Enable Flowid entry */ 538 529 mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_RX, true); 539 530 mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_TX, true); 531 + 540 532 return 0; 541 533 } 542 534 ··· 936 926 mcs_add_intr_wq_entry(mcs, &event); 937 927 } 938 928 939 - static void mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir) 929 + void cn10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, 930 + enum mcs_direction dir) 940 931 { 941 - struct mcs_intr_event event = { 0 }; 942 - int i; 932 + u64 val, reg; 933 + int lmac; 943 934 944 - if (!(intr & MCS_BBE_INT_MASK)) 935 + if (!(intr & 0x6ULL)) 945 936 return; 946 937 947 - event.mcs_id = mcs->mcs_id; 948 - event.pcifunc = mcs->pf_map[0]; 938 + if (intr & BIT_ULL(1)) 939 + reg = (dir == MCS_RX) ? MCSX_BBE_RX_SLAVE_DFIFO_OVERFLOW_0 : 940 + MCSX_BBE_TX_SLAVE_DFIFO_OVERFLOW_0; 941 + else 942 + reg = (dir == MCS_RX) ? MCSX_BBE_RX_SLAVE_PLFIFO_OVERFLOW_0 : 943 + MCSX_BBE_TX_SLAVE_PLFIFO_OVERFLOW_0; 944 + val = mcs_reg_read(mcs, reg); 949 945 950 - for (i = 0; i < MCS_MAX_BBE_INT; i++) { 951 - if (!(intr & BIT_ULL(i))) 946 + /* policy/data over flow occurred */ 947 + for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) { 948 + if (!(val & BIT_ULL(lmac))) 952 949 continue; 953 - 954 - /* Lower nibble denotes data fifo overflow interrupts and 955 - * upper nibble indicates policy fifo overflow interrupts. 956 - */ 957 - if (intr & 0xFULL) 958 - event.intr_mask = (dir == MCS_RX) ? 959 - MCS_BBE_RX_DFIFO_OVERFLOW_INT : 960 - MCS_BBE_TX_DFIFO_OVERFLOW_INT; 961 - else 962 - event.intr_mask = (dir == MCS_RX) ? 963 - MCS_BBE_RX_PLFIFO_OVERFLOW_INT : 964 - MCS_BBE_TX_PLFIFO_OVERFLOW_INT; 965 - 966 - /* Notify the lmac_id info which ran into BBE fatal error */ 967 - event.lmac_id = i & 0x3ULL; 968 - mcs_add_intr_wq_entry(mcs, &event); 950 + dev_warn(mcs->dev, "BEE:Policy or data overflow occurred on lmac:%d\n", lmac); 969 951 } 970 952 } 971 953 972 - static void mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir) 954 + void cn10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, 955 + enum mcs_direction dir) 973 956 { 974 - struct mcs_intr_event event = { 0 }; 975 - int i; 957 + int lmac; 976 958 977 - if (!(intr & MCS_PAB_INT_MASK)) 959 + if (!(intr & 0xFFFFFULL)) 978 960 return; 979 961 980 - event.mcs_id = mcs->mcs_id; 981 - event.pcifunc = mcs->pf_map[0]; 982 - 983 - for (i = 0; i < MCS_MAX_PAB_INT; i++) { 984 - if (!(intr & BIT_ULL(i))) 985 - continue; 986 - 987 - event.intr_mask = (dir == MCS_RX) ? MCS_PAB_RX_CHAN_OVERFLOW_INT : 988 - MCS_PAB_TX_CHAN_OVERFLOW_INT; 989 - 990 - /* Notify the lmac_id info which ran into PAB fatal error */ 991 - event.lmac_id = i; 992 - mcs_add_intr_wq_entry(mcs, &event); 962 + for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) { 963 + if (intr & BIT_ULL(lmac)) 964 + dev_warn(mcs->dev, "PAB: overflow occurred on lmac:%d\n", lmac); 993 965 } 994 966 } 995 967 ··· 980 988 struct mcs *mcs = (struct mcs *)mcs_irq; 981 989 u64 intr, cpm_intr, bbe_intr, pab_intr; 982 990 983 - /* Disable and clear the interrupt */ 991 + /* Disable the interrupt */ 984 992 mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1C, BIT_ULL(0)); 985 - mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0)); 986 993 987 994 /* Check which block has interrupt*/ 988 995 intr = mcs_reg_read(mcs, MCSX_TOP_SLAVE_INT_SUM); ··· 1028 1037 /* BBE RX */ 1029 1038 if (intr & MCS_BBE_RX_INT_ENA) { 1030 1039 bbe_intr = mcs_reg_read(mcs, MCSX_BBE_RX_SLAVE_BBE_INT); 1031 - mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX); 1040 + mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX); 1032 1041 1033 1042 /* Clear the interrupt */ 1034 1043 mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW, 0); ··· 1038 1047 /* BBE TX */ 1039 1048 if (intr & MCS_BBE_TX_INT_ENA) { 1040 1049 bbe_intr = mcs_reg_read(mcs, MCSX_BBE_TX_SLAVE_BBE_INT); 1041 - mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX); 1050 + mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX); 1042 1051 1043 1052 /* Clear the interrupt */ 1044 1053 mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW, 0); ··· 1048 1057 /* PAB RX */ 1049 1058 if (intr & MCS_PAB_RX_INT_ENA) { 1050 1059 pab_intr = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PAB_INT); 1051 - mcs_pab_intr_handler(mcs, pab_intr, MCS_RX); 1060 + mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_RX); 1052 1061 1053 1062 /* Clear the interrupt */ 1054 1063 mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW, 0); ··· 1058 1067 /* PAB TX */ 1059 1068 if (intr & MCS_PAB_TX_INT_ENA) { 1060 1069 pab_intr = mcs_reg_read(mcs, MCSX_PAB_TX_SLAVE_PAB_INT); 1061 - mcs_pab_intr_handler(mcs, pab_intr, MCS_TX); 1070 + mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_TX); 1062 1071 1063 1072 /* Clear the interrupt */ 1064 1073 mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW, 0); 1065 1074 mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT, pab_intr); 1066 1075 } 1067 1076 1068 - /* Enable the interrupt */ 1077 + /* Clear and enable the interrupt */ 1078 + mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0)); 1069 1079 mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0)); 1070 1080 1071 1081 return IRQ_HANDLED; ··· 1148 1156 return ret; 1149 1157 } 1150 1158 1151 - ret = request_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP), 1159 + ret = request_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec), 1152 1160 mcs_ip_intr_handler, 0, "MCS_IP", mcs); 1153 1161 if (ret) { 1154 1162 dev_err(mcs->dev, "MCS IP irq registration failed\n"); ··· 1167 1175 mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT_ENB, 0x7ULL); 1168 1176 mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT_ENB, 0x7FULL); 1169 1177 1170 - mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xff); 1171 - mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xff); 1178 + mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xFFULL); 1179 + mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xFFULL); 1172 1180 1173 - mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xff); 1174 - mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xff); 1181 + mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xFFFFFULL); 1182 + mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xFFFFFULL); 1175 1183 1176 1184 mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries); 1177 1185 if (!mcs->tx_sa_active) { ··· 1182 1190 return ret; 1183 1191 1184 1192 free_irq: 1185 - free_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP), mcs); 1193 + free_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec), mcs); 1186 1194 exit: 1187 1195 pci_free_irq_vectors(mcs->pdev); 1188 1196 mcs->num_vec = 0; ··· 1317 1325 void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode) 1318 1326 { 1319 1327 u64 reg; 1328 + int id = lmac_id * 2; 1320 1329 1321 - reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(lmac_id * 2); 1330 + reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(id); 1331 + mcs_reg_write(mcs, reg, (u64)mode); 1332 + reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG((id + 1)); 1322 1333 mcs_reg_write(mcs, reg, (u64)mode); 1323 1334 } 1324 1335 ··· 1479 1484 hw->lmac_cnt = 20; /* lmacs/ports per mcs block */ 1480 1485 hw->mcs_x2p_intf = 5; /* x2p clabration intf */ 1481 1486 hw->mcs_blks = 1; /* MCS blocks */ 1487 + hw->ip_vec = MCS_CN10KB_INT_VEC_IP; /* IP vector */ 1482 1488 } 1483 1489 1484 1490 static struct mcs_ops cn10kb_mcs_ops = { ··· 1488 1492 .mcs_tx_sa_mem_map_write = cn10kb_mcs_tx_sa_mem_map_write, 1489 1493 .mcs_rx_sa_mem_map_write = cn10kb_mcs_rx_sa_mem_map_write, 1490 1494 .mcs_flowid_secy_map = cn10kb_mcs_flowid_secy_map, 1495 + .mcs_bbe_intr_handler = cn10kb_mcs_bbe_intr_handler, 1496 + .mcs_pab_intr_handler = cn10kb_mcs_pab_intr_handler, 1491 1497 }; 1492 1498 1493 1499 static int mcs_probe(struct pci_dev *pdev, const struct pci_device_id *id) ··· 1590 1592 1591 1593 /* Set MCS to external bypass */ 1592 1594 mcs_set_external_bypass(mcs, true); 1593 - free_irq(pci_irq_vector(pdev, MCS_INT_VEC_IP), mcs); 1595 + free_irq(pci_irq_vector(pdev, mcs->hw->ip_vec), mcs); 1594 1596 pci_free_irq_vectors(pdev); 1595 1597 pci_release_regions(pdev); 1596 1598 pci_disable_device(pdev);
+12 -14
drivers/net/ethernet/marvell/octeontx2/af/mcs.h
··· 43 43 /* Reserved resources for default bypass entry */ 44 44 #define MCS_RSRC_RSVD_CNT 1 45 45 46 - /* MCS Interrupt Vector Enumeration */ 47 - enum mcs_int_vec_e { 48 - MCS_INT_VEC_MIL_RX_GBL = 0x0, 49 - MCS_INT_VEC_MIL_RX_LMACX = 0x1, 50 - MCS_INT_VEC_MIL_TX_LMACX = 0x5, 51 - MCS_INT_VEC_HIL_RX_GBL = 0x9, 52 - MCS_INT_VEC_HIL_RX_LMACX = 0xa, 53 - MCS_INT_VEC_HIL_TX_GBL = 0xe, 54 - MCS_INT_VEC_HIL_TX_LMACX = 0xf, 55 - MCS_INT_VEC_IP = 0x13, 56 - MCS_INT_VEC_CNT = 0x14, 57 - }; 46 + /* MCS Interrupt Vector */ 47 + #define MCS_CNF10KB_INT_VEC_IP 0x13 48 + #define MCS_CN10KB_INT_VEC_IP 0x53 58 49 59 50 #define MCS_MAX_BBE_INT 8ULL 60 51 #define MCS_BBE_INT_MASK 0xFFULL 61 52 62 - #define MCS_MAX_PAB_INT 4ULL 63 - #define MCS_PAB_INT_MASK 0xFULL 53 + #define MCS_MAX_PAB_INT 8ULL 54 + #define MCS_PAB_INT_MASK 0xFULL 64 55 65 56 #define MCS_BBE_RX_INT_ENA BIT_ULL(0) 66 57 #define MCS_BBE_TX_INT_ENA BIT_ULL(1) ··· 128 137 u8 lmac_cnt; 129 138 u8 mcs_blks; 130 139 unsigned long lmac_bmap; /* bitmap of enabled mcs lmac */ 140 + u16 ip_vec; 131 141 }; 132 142 133 143 struct mcs { ··· 157 165 void (*mcs_tx_sa_mem_map_write)(struct mcs *mcs, struct mcs_tx_sc_sa_map *map); 158 166 void (*mcs_rx_sa_mem_map_write)(struct mcs *mcs, struct mcs_rx_sc_sa_map *map); 159 167 void (*mcs_flowid_secy_map)(struct mcs *mcs, struct secy_mem_map *map, int dir); 168 + void (*mcs_bbe_intr_handler)(struct mcs *mcs, u64 intr, enum mcs_direction dir); 169 + void (*mcs_pab_intr_handler)(struct mcs *mcs, u64 intr, enum mcs_direction dir); 160 170 }; 161 171 162 172 extern struct pci_driver mcs_driver; ··· 213 219 void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir); 214 220 void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map); 215 221 void cn10kb_mcs_parser_cfg(struct mcs *mcs); 222 + void cn10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir); 223 + void cn10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir); 216 224 217 225 /* CNF10K-B APIs */ 218 226 struct mcs_ops *cnf10kb_get_mac_ops(void); ··· 225 229 void cnf10kb_mcs_parser_cfg(struct mcs *mcs); 226 230 void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs); 227 231 void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs); 232 + void cnf10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir); 233 + void cnf10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir); 228 234 229 235 /* Stats APIs */ 230 236 void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats, int id, int dir);
+63
drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
··· 13 13 .mcs_tx_sa_mem_map_write = cnf10kb_mcs_tx_sa_mem_map_write, 14 14 .mcs_rx_sa_mem_map_write = cnf10kb_mcs_rx_sa_mem_map_write, 15 15 .mcs_flowid_secy_map = cnf10kb_mcs_flowid_secy_map, 16 + .mcs_bbe_intr_handler = cnf10kb_mcs_bbe_intr_handler, 17 + .mcs_pab_intr_handler = cnf10kb_mcs_pab_intr_handler, 16 18 }; 17 19 18 20 struct mcs_ops *cnf10kb_get_mac_ops(void) ··· 33 31 hw->lmac_cnt = 4; /* lmacs/ports per mcs block */ 34 32 hw->mcs_x2p_intf = 1; /* x2p clabration intf */ 35 33 hw->mcs_blks = 7; /* MCS blocks */ 34 + hw->ip_vec = MCS_CNF10KB_INT_VEC_IP; /* IP vector */ 36 35 } 37 36 38 37 void cnf10kb_mcs_parser_cfg(struct mcs *mcs) ··· 212 209 event.sa_id = val & 0x7F; 213 210 214 211 event.pcifunc = mcs->tx.sa2pf_map[event.sa_id]; 212 + mcs_add_intr_wq_entry(mcs, &event); 213 + } 214 + } 215 + 216 + void cnf10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, 217 + enum mcs_direction dir) 218 + { 219 + struct mcs_intr_event event = { 0 }; 220 + int i; 221 + 222 + if (!(intr & MCS_BBE_INT_MASK)) 223 + return; 224 + 225 + event.mcs_id = mcs->mcs_id; 226 + event.pcifunc = mcs->pf_map[0]; 227 + 228 + for (i = 0; i < MCS_MAX_BBE_INT; i++) { 229 + if (!(intr & BIT_ULL(i))) 230 + continue; 231 + 232 + /* Lower nibble denotes data fifo overflow interrupts and 233 + * upper nibble indicates policy fifo overflow interrupts. 234 + */ 235 + if (intr & 0xFULL) 236 + event.intr_mask = (dir == MCS_RX) ? 237 + MCS_BBE_RX_DFIFO_OVERFLOW_INT : 238 + MCS_BBE_TX_DFIFO_OVERFLOW_INT; 239 + else 240 + event.intr_mask = (dir == MCS_RX) ? 241 + MCS_BBE_RX_PLFIFO_OVERFLOW_INT : 242 + MCS_BBE_TX_PLFIFO_OVERFLOW_INT; 243 + 244 + /* Notify the lmac_id info which ran into BBE fatal error */ 245 + event.lmac_id = i & 0x3ULL; 246 + mcs_add_intr_wq_entry(mcs, &event); 247 + } 248 + } 249 + 250 + void cnf10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, 251 + enum mcs_direction dir) 252 + { 253 + struct mcs_intr_event event = { 0 }; 254 + int i; 255 + 256 + if (!(intr & MCS_PAB_INT_MASK)) 257 + return; 258 + 259 + event.mcs_id = mcs->mcs_id; 260 + event.pcifunc = mcs->pf_map[0]; 261 + 262 + for (i = 0; i < MCS_MAX_PAB_INT; i++) { 263 + if (!(intr & BIT_ULL(i))) 264 + continue; 265 + 266 + event.intr_mask = (dir == MCS_RX) ? 267 + MCS_PAB_RX_CHAN_OVERFLOW_INT : 268 + MCS_PAB_TX_CHAN_OVERFLOW_INT; 269 + 270 + /* Notify the lmac_id info which ran into PAB fatal error */ 271 + event.lmac_id = i; 215 272 mcs_add_intr_wq_entry(mcs, &event); 216 273 } 217 274 }
+5 -1
drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
··· 97 97 #define MCSX_PEX_TX_SLAVE_VLAN_CFGX(a) (0x46f8ull + (a) * 0x8ull) 98 98 #define MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(a) (0x788ull + (a) * 0x8ull) 99 99 #define MCSX_PEX_TX_SLAVE_PORT_CONFIG(a) (0x4738ull + (a) * 0x8ull) 100 + #define MCSX_PEX_RX_SLAVE_PORT_CFGX(a) (0x3b98ull + (a) * 0x8ull) 100 101 #define MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(a) ({ \ 101 102 u64 offset; \ 102 103 \ ··· 276 275 #define MCSX_BBE_RX_SLAVE_CAL_ENTRY 0x180ull 277 276 #define MCSX_BBE_RX_SLAVE_CAL_LEN 0x188ull 278 277 #define MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(a) (0x290ull + (a) * 0x40ull) 279 - 278 + #define MCSX_BBE_RX_SLAVE_DFIFO_OVERFLOW_0 0xe20 279 + #define MCSX_BBE_TX_SLAVE_DFIFO_OVERFLOW_0 0x1298 280 + #define MCSX_BBE_RX_SLAVE_PLFIFO_OVERFLOW_0 0xe40 281 + #define MCSX_BBE_TX_SLAVE_PLFIFO_OVERFLOW_0 0x12b8 280 282 #define MCSX_BBE_RX_SLAVE_BBE_INT ({ \ 281 283 u64 offset; \ 282 284 \
+37
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
··· 11 11 12 12 #include "mcs.h" 13 13 #include "rvu.h" 14 + #include "mcs_reg.h" 14 15 #include "lmac_common.h" 15 16 16 17 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ ··· 32 31 33 32 MBOX_UP_MCS_MESSAGES 34 33 #undef M 34 + 35 + void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena) 36 + { 37 + struct mcs *mcs; 38 + u64 cfg; 39 + u8 port; 40 + 41 + if (!rvu->mcs_blk_cnt) 42 + return; 43 + 44 + /* When ptp is enabled, RPM appends 8B header for all 45 + * RX packets. MCS PEX need to configure to skip 8B 46 + * during packet parsing. 47 + */ 48 + 49 + /* CNF10K-B */ 50 + if (rvu->mcs_blk_cnt > 1) { 51 + mcs = mcs_get_pdata(rpm_id); 52 + cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION); 53 + if (ena) 54 + cfg |= BIT_ULL(lmac_id); 55 + else 56 + cfg &= ~BIT_ULL(lmac_id); 57 + mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, cfg); 58 + return; 59 + } 60 + /* CN10KB */ 61 + mcs = mcs_get_pdata(0); 62 + port = (rpm_id * rvu->hw->lmac_per_cgx) + lmac_id; 63 + cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port)); 64 + if (ena) 65 + cfg |= BIT_ULL(0); 66 + else 67 + cfg &= ~BIT_ULL(0); 68 + mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port), cfg); 69 + } 35 70 36 71 int rvu_mbox_handler_mcs_set_lmac_mode(struct rvu *rvu, 37 72 struct mcs_set_lmac_mode *req,
+40 -9
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
··· 2282 2282 } 2283 2283 2284 2284 static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr, 2285 - int num, int type) 2285 + int num, int type, unsigned long *pf_bmap) 2286 2286 { 2287 2287 struct rvu_hwinfo *hw = rvu->hw; 2288 2288 int region; ··· 2294 2294 */ 2295 2295 if (type == TYPE_AFVF) { 2296 2296 for (region = 0; region < num; region++) { 2297 + if (!test_bit(region, pf_bmap)) 2298 + continue; 2299 + 2297 2300 if (hw->cap.per_pf_mbox_regs) { 2298 2301 bar4 = rvu_read64(rvu, BLKADDR_RVUM, 2299 2302 RVU_AF_PFX_BAR4_ADDR(0)) + ··· 2318 2315 * RVU_AF_PF_BAR4_ADDR register. 2319 2316 */ 2320 2317 for (region = 0; region < num; region++) { 2318 + if (!test_bit(region, pf_bmap)) 2319 + continue; 2320 + 2321 2321 if (hw->cap.per_pf_mbox_regs) { 2322 2322 bar4 = rvu_read64(rvu, BLKADDR_RVUM, 2323 2323 RVU_AF_PFX_BAR4_ADDR(region)); ··· 2349 2343 int err = -EINVAL, i, dir, dir_up; 2350 2344 void __iomem *reg_base; 2351 2345 struct rvu_work *mwork; 2346 + unsigned long *pf_bmap; 2352 2347 void **mbox_regions; 2353 2348 const char *name; 2349 + u64 cfg; 2350 + 2351 + pf_bmap = bitmap_zalloc(num, GFP_KERNEL); 2352 + if (!pf_bmap) 2353 + return -ENOMEM; 2354 + 2355 + /* RVU VFs */ 2356 + if (type == TYPE_AFVF) 2357 + bitmap_set(pf_bmap, 0, num); 2358 + 2359 + if (type == TYPE_AFPF) { 2360 + /* Mark enabled PFs in bitmap */ 2361 + for (i = 0; i < num; i++) { 2362 + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(i)); 2363 + if (cfg & BIT_ULL(20)) 2364 + set_bit(i, pf_bmap); 2365 + } 2366 + } 2354 2367 2355 2368 mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL); 2356 - if (!mbox_regions) 2357 - return -ENOMEM; 2369 + if (!mbox_regions) { 2370 + err = -ENOMEM; 2371 + goto free_bitmap; 2372 + } 2358 2373 2359 2374 switch (type) { 2360 2375 case TYPE_AFPF: ··· 2383 2356 dir = MBOX_DIR_AFPF; 2384 2357 dir_up = MBOX_DIR_AFPF_UP; 2385 2358 reg_base = rvu->afreg_base; 2386 - err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF); 2359 + err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF, pf_bmap); 2387 2360 if (err) 2388 2361 goto free_regions; 2389 2362 break; ··· 2392 2365 dir = MBOX_DIR_PFVF; 2393 2366 dir_up = MBOX_DIR_PFVF_UP; 2394 2367 reg_base = rvu->pfreg_base; 2395 - err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF); 2368 + err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF, pf_bmap); 2396 2369 if (err) 2397 2370 goto free_regions; 2398 2371 break; ··· 2423 2396 } 2424 2397 2425 2398 err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev, 2426 - reg_base, dir, num); 2399 + reg_base, dir, num, pf_bmap); 2427 2400 if (err) 2428 2401 goto exit; 2429 2402 2430 2403 err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev, 2431 - reg_base, dir_up, num); 2404 + reg_base, dir_up, num, pf_bmap); 2432 2405 if (err) 2433 2406 goto exit; 2434 2407 2435 2408 for (i = 0; i < num; i++) { 2409 + if (!test_bit(i, pf_bmap)) 2410 + continue; 2411 + 2436 2412 mwork = &mw->mbox_wrk[i]; 2437 2413 mwork->rvu = rvu; 2438 2414 INIT_WORK(&mwork->work, mbox_handler); ··· 2444 2414 mwork->rvu = rvu; 2445 2415 INIT_WORK(&mwork->work, mbox_up_handler); 2446 2416 } 2447 - kfree(mbox_regions); 2448 - return 0; 2417 + goto free_regions; 2449 2418 2450 2419 exit: 2451 2420 destroy_workqueue(mw->mbox_wq); ··· 2453 2424 iounmap((void __iomem *)mbox_regions[num]); 2454 2425 free_regions: 2455 2426 kfree(mbox_regions); 2427 + free_bitmap: 2428 + bitmap_free(pf_bmap); 2456 2429 return err; 2457 2430 } 2458 2431
+1
drivers/net/ethernet/marvell/octeontx2/af/rvu.h
··· 920 920 /* CN10K MCS */ 921 921 int rvu_mcs_init(struct rvu *rvu); 922 922 int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc); 923 + void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena); 923 924 void rvu_mcs_exit(struct rvu *rvu); 924 925 925 926 #endif /* RVU_H */
+2
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
··· 773 773 /* This flag is required to clean up CGX conf if app gets killed */ 774 774 pfvf->hw_rx_tstamp_en = enable; 775 775 776 + /* Inform MCS about 8B RX header */ 777 + rvu_mcs_ptp_cfg(rvu, cgx_id, lmac_id, enable); 776 778 return 0; 777 779 } 778 780
+8 -5
drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
··· 60 60 u64 iova, u64 *lmt_addr) 61 61 { 62 62 u64 pa, val, pf; 63 - int err; 63 + int err = 0; 64 64 65 65 if (!iova) { 66 66 dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__); 67 67 return -EINVAL; 68 68 } 69 69 70 + mutex_lock(&rvu->rsrc_lock); 70 71 rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova); 71 72 pf = rvu_get_pf(pcifunc) & 0x1F; 72 73 val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 | ··· 77 76 err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false); 78 77 if (err) { 79 78 dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__); 80 - return err; 79 + goto exit; 81 80 } 82 81 val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS); 83 82 if (val & ~0x1ULL) { 84 83 dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val); 85 - return -EIO; 84 + err = -EIO; 85 + goto exit; 86 86 } 87 87 /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT0[57:18] 88 88 * PA[11:0] = IOVA[11:0] ··· 91 89 pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT0) >> 18; 92 90 pa &= GENMASK_ULL(39, 0); 93 91 *lmt_addr = (pa << 12) | (iova & 0xFFF); 94 - 95 - return 0; 92 + exit: 93 + mutex_unlock(&rvu->rsrc_lock); 94 + return err; 96 95 } 97 96 98 97 static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
+3 -2
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
··· 497 497 stats.octet_validated_cnt); 498 498 seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id, 499 499 stats.pkt_port_disabled_cnt); 500 - seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_badtag_cnt); 501 - seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_nosa_cnt); 500 + seq_printf(filp, "secy%d: Pkts with badtag: %lld\n", secy_id, stats.pkt_badtag_cnt); 501 + seq_printf(filp, "secy%d: Pkts with no SA(sectag.tci.c=0): %lld\n", secy_id, 502 + stats.pkt_nosa_cnt); 502 503 seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id, 503 504 stats.pkt_nosaerror_cnt); 504 505 seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
+17 -9
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
··· 13 13 #include "rvu_npc_fs.h" 14 14 #include "rvu_npc_hash.h" 15 15 16 - #define NPC_BYTESM GENMASK_ULL(19, 16) 17 - #define NPC_HDR_OFFSET GENMASK_ULL(15, 8) 18 - #define NPC_KEY_OFFSET GENMASK_ULL(5, 0) 19 - #define NPC_LDATA_EN BIT_ULL(7) 20 - 21 16 static const char * const npc_flow_names[] = { 22 17 [NPC_DMAC] = "dmac", 23 18 [NPC_SMAC] = "smac", ··· 437 442 static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid, 438 443 u8 lt, u64 cfg, u8 intf) 439 444 { 445 + struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash; 440 446 struct npc_mcam *mcam = &rvu->hw->mcam; 441 447 u8 hdr, key, nr_bytes, bit_offset; 442 448 u8 la_ltype, la_start; ··· 486 490 NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4); 487 491 NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4); 488 492 NPC_SCAN_HDR(NPC_IPFRAG_IPV6, NPC_LID_LC, NPC_LT_LC_IP6_EXT, 6, 1); 489 - NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16); 490 - NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16); 493 + if (rvu->hw->cap.npc_hash_extract) { 494 + if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][0]) 495 + NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 4); 496 + else 497 + NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16); 498 + 499 + if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][1]) 500 + NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 4); 501 + else 502 + NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16); 503 + } else { 504 + NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16); 505 + NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16); 506 + } 507 + 491 508 NPC_SCAN_HDR(NPC_SPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 0, 2); 492 509 NPC_SCAN_HDR(NPC_DPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 2, 2); 493 510 NPC_SCAN_HDR(NPC_SPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 0, 2); ··· 603 594 */ 604 595 masked_cfg = cfg & NPC_EXACT_NIBBLE; 605 596 bitnr = NPC_EXACT_NIBBLE_START; 606 - for_each_set_bit_from(bitnr, (unsigned long *)&masked_cfg, 607 - NPC_EXACT_NIBBLE_START) { 597 + for_each_set_bit_from(bitnr, (unsigned long *)&masked_cfg, NPC_EXACT_NIBBLE_END + 1) { 608 598 npc_scan_exact_result(mcam, bitnr, key_nibble, intf); 609 599 key_nibble++; 610 600 }
+4
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h
··· 9 9 #define __RVU_NPC_FS_H 10 10 11 11 #define IPV6_WORDS 4 12 + #define NPC_BYTESM GENMASK_ULL(19, 16) 13 + #define NPC_HDR_OFFSET GENMASK_ULL(15, 8) 14 + #define NPC_KEY_OFFSET GENMASK_ULL(5, 0) 15 + #define NPC_LDATA_EN BIT_ULL(7) 12 16 13 17 void npc_update_entry(struct rvu *rvu, enum key_fields type, 14 18 struct mcam_entry *entry, u64 val_lo,
+70 -53
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
··· 78 78 return hash_out; 79 79 } 80 80 81 - u32 npc_field_hash_calc(u64 *ldata, struct npc_mcam_kex_hash *mkex_hash, 82 - u64 *secret_key, u8 intf, u8 hash_idx) 81 + u32 npc_field_hash_calc(u64 *ldata, struct npc_get_field_hash_info_rsp rsp, 82 + u8 intf, u8 hash_idx) 83 83 { 84 84 u64 hash_key[3]; 85 85 u64 data_padded[2]; 86 86 u32 field_hash; 87 87 88 - hash_key[0] = secret_key[1] << 31; 89 - hash_key[0] |= secret_key[2]; 90 - hash_key[1] = secret_key[1] >> 33; 91 - hash_key[1] |= secret_key[0] << 31; 92 - hash_key[2] = secret_key[0] >> 33; 88 + hash_key[0] = rsp.secret_key[1] << 31; 89 + hash_key[0] |= rsp.secret_key[2]; 90 + hash_key[1] = rsp.secret_key[1] >> 33; 91 + hash_key[1] |= rsp.secret_key[0] << 31; 92 + hash_key[2] = rsp.secret_key[0] >> 33; 93 93 94 - data_padded[0] = mkex_hash->hash_mask[intf][hash_idx][0] & ldata[0]; 95 - data_padded[1] = mkex_hash->hash_mask[intf][hash_idx][1] & ldata[1]; 94 + data_padded[0] = rsp.hash_mask[intf][hash_idx][0] & ldata[0]; 95 + data_padded[1] = rsp.hash_mask[intf][hash_idx][1] & ldata[1]; 96 96 field_hash = rvu_npc_toeplitz_hash(data_padded, hash_key, 128, 159); 97 97 98 - field_hash &= mkex_hash->hash_ctrl[intf][hash_idx] >> 32; 99 - field_hash |= mkex_hash->hash_ctrl[intf][hash_idx]; 98 + field_hash &= FIELD_GET(GENMASK(63, 32), rsp.hash_ctrl[intf][hash_idx]); 99 + field_hash += FIELD_GET(GENMASK(31, 0), rsp.hash_ctrl[intf][hash_idx]); 100 100 return field_hash; 101 101 } 102 102 103 - static u64 npc_update_use_hash(int lt, int ld) 103 + static u64 npc_update_use_hash(struct rvu *rvu, int blkaddr, 104 + u8 intf, int lid, int lt, int ld) 104 105 { 105 - u64 cfg = 0; 106 + u8 hdr, key; 107 + u64 cfg; 106 108 107 - switch (lt) { 108 - case NPC_LT_LC_IP6: 109 - /* Update use_hash(bit-20) and bytesm1 (bit-16:19) 110 - * in KEX_LD_CFG 111 - */ 112 - cfg = KEX_LD_CFG_USE_HASH(0x1, 0x03, 113 - ld ? 0x8 : 0x18, 114 - 0x1, 0x0, 0x10); 115 - break; 116 - } 109 + cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld)); 110 + hdr = FIELD_GET(NPC_HDR_OFFSET, cfg); 111 + key = FIELD_GET(NPC_KEY_OFFSET, cfg); 112 + 113 + /* Update use_hash(bit-20) to 'true' and 114 + * bytesm1(bit-16:19) to '0x3' in KEX_LD_CFG 115 + */ 116 + cfg = KEX_LD_CFG_USE_HASH(0x1, 0x03, 117 + hdr, 0x1, 0x0, key); 117 118 118 119 return cfg; 119 120 } ··· 133 132 for (lt = 0; lt < NPC_MAX_LT; lt++) { 134 133 for (ld = 0; ld < NPC_MAX_LD; ld++) { 135 134 if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) { 136 - u64 cfg = npc_update_use_hash(lt, ld); 135 + u64 cfg; 137 136 138 - hash_cnt++; 139 137 if (hash_cnt == NPC_MAX_HASH) 140 138 return; 141 139 140 + cfg = npc_update_use_hash(rvu, blkaddr, 141 + intf, lid, lt, ld); 142 142 /* Set updated KEX configuration */ 143 143 SET_KEX_LD(intf, lid, lt, ld, cfg); 144 144 /* Set HASH configuration */ ··· 151 149 mkex_hash->hash_mask[intf][ld][1]); 152 150 SET_KEX_LD_HASH_CTRL(intf, ld, 153 151 mkex_hash->hash_ctrl[intf][ld]); 152 + 153 + hash_cnt++; 154 154 } 155 155 } 156 156 } ··· 173 169 for (lt = 0; lt < NPC_MAX_LT; lt++) { 174 170 for (ld = 0; ld < NPC_MAX_LD; ld++) 175 171 if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) { 176 - u64 cfg = npc_update_use_hash(lt, ld); 172 + u64 cfg; 177 173 178 - hash_cnt++; 179 174 if (hash_cnt == NPC_MAX_HASH) 180 175 return; 181 176 177 + cfg = npc_update_use_hash(rvu, blkaddr, 178 + intf, lid, lt, ld); 182 179 /* Set updated KEX configuration */ 183 180 SET_KEX_LD(intf, lid, lt, ld, cfg); 184 181 /* Set HASH configuration */ ··· 192 187 SET_KEX_LD_HASH_CTRL(intf, ld, 193 188 mkex_hash->hash_ctrl[intf][ld]); 194 189 hash_cnt++; 195 - if (hash_cnt == NPC_MAX_HASH) 196 - return; 197 190 } 198 191 } 199 192 } ··· 241 238 struct flow_msg *omask) 242 239 { 243 240 struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash; 244 - struct npc_get_secret_key_req req; 245 - struct npc_get_secret_key_rsp rsp; 241 + struct npc_get_field_hash_info_req req; 242 + struct npc_get_field_hash_info_rsp rsp; 246 243 u64 ldata[2], cfg; 247 244 u32 field_hash; 248 245 u8 hash_idx; ··· 253 250 } 254 251 255 252 req.intf = intf; 256 - rvu_mbox_handler_npc_get_secret_key(rvu, &req, &rsp); 253 + rvu_mbox_handler_npc_get_field_hash_info(rvu, &req, &rsp); 257 254 258 255 for (hash_idx = 0; hash_idx < NPC_MAX_HASH; hash_idx++) { 259 256 cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_CFG(intf, hash_idx)); ··· 269 266 * is hashed to 32 bit value. 270 267 */ 271 268 case NPC_LT_LC_IP6: 272 - if (features & BIT_ULL(NPC_SIP_IPV6)) { 269 + /* ld[0] == hash_idx[0] == Source IPv6 270 + * ld[1] == hash_idx[1] == Destination IPv6 271 + */ 272 + if ((features & BIT_ULL(NPC_SIP_IPV6)) && !hash_idx) { 273 273 u32 src_ip[IPV6_WORDS]; 274 274 275 275 be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS); 276 - ldata[0] = (u64)src_ip[0] << 32 | src_ip[1]; 277 - ldata[1] = (u64)src_ip[2] << 32 | src_ip[3]; 276 + ldata[1] = (u64)src_ip[0] << 32 | src_ip[1]; 277 + ldata[0] = (u64)src_ip[2] << 32 | src_ip[3]; 278 278 field_hash = npc_field_hash_calc(ldata, 279 - mkex_hash, 280 - rsp.secret_key, 279 + rsp, 281 280 intf, 282 281 hash_idx); 283 282 npc_update_entry(rvu, NPC_SIP_IPV6, entry, 284 - field_hash, 0, 32, 0, intf); 283 + field_hash, 0, 284 + GENMASK(31, 0), 0, intf); 285 285 memcpy(&opkt->ip6src, &pkt->ip6src, 286 286 sizeof(pkt->ip6src)); 287 287 memcpy(&omask->ip6src, &mask->ip6src, 288 288 sizeof(mask->ip6src)); 289 - break; 290 - } 291 - 292 - if (features & BIT_ULL(NPC_DIP_IPV6)) { 289 + } else if ((features & BIT_ULL(NPC_DIP_IPV6)) && hash_idx) { 293 290 u32 dst_ip[IPV6_WORDS]; 294 291 295 292 be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS); 296 - ldata[0] = (u64)dst_ip[0] << 32 | dst_ip[1]; 297 - ldata[1] = (u64)dst_ip[2] << 32 | dst_ip[3]; 293 + ldata[1] = (u64)dst_ip[0] << 32 | dst_ip[1]; 294 + ldata[0] = (u64)dst_ip[2] << 32 | dst_ip[3]; 298 295 field_hash = npc_field_hash_calc(ldata, 299 - mkex_hash, 300 - rsp.secret_key, 296 + rsp, 301 297 intf, 302 298 hash_idx); 303 299 npc_update_entry(rvu, NPC_DIP_IPV6, entry, 304 - field_hash, 0, 32, 0, intf); 300 + field_hash, 0, 301 + GENMASK(31, 0), 0, intf); 305 302 memcpy(&opkt->ip6dst, &pkt->ip6dst, 306 303 sizeof(pkt->ip6dst)); 307 304 memcpy(&omask->ip6dst, &mask->ip6dst, 308 305 sizeof(mask->ip6dst)); 309 306 } 307 + 310 308 break; 311 309 } 312 310 } ··· 315 311 } 316 312 } 317 313 318 - int rvu_mbox_handler_npc_get_secret_key(struct rvu *rvu, 319 - struct npc_get_secret_key_req *req, 320 - struct npc_get_secret_key_rsp *rsp) 314 + int rvu_mbox_handler_npc_get_field_hash_info(struct rvu *rvu, 315 + struct npc_get_field_hash_info_req *req, 316 + struct npc_get_field_hash_info_rsp *rsp) 321 317 { 322 318 u64 *secret_key = rsp->secret_key; 323 319 u8 intf = req->intf; 324 - int blkaddr; 320 + int i, j, blkaddr; 325 321 326 322 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); 327 323 if (blkaddr < 0) { ··· 332 328 secret_key[0] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf)); 333 329 secret_key[1] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf)); 334 330 secret_key[2] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf)); 331 + 332 + for (i = 0; i < NPC_MAX_HASH; i++) { 333 + for (j = 0; j < NPC_MAX_HASH_MASK; j++) { 334 + rsp->hash_mask[NIX_INTF_RX][i][j] = 335 + GET_KEX_LD_HASH_MASK(NIX_INTF_RX, i, j); 336 + rsp->hash_mask[NIX_INTF_TX][i][j] = 337 + GET_KEX_LD_HASH_MASK(NIX_INTF_TX, i, j); 338 + } 339 + } 340 + 341 + for (i = 0; i < NPC_MAX_INTF; i++) 342 + for (j = 0; j < NPC_MAX_HASH; j++) 343 + rsp->hash_ctrl[i][j] = GET_KEX_LD_HASH_CTRL(i, j); 335 344 336 345 return 0; 337 346 } ··· 1885 1868 rvu->hw->table = table; 1886 1869 1887 1870 /* Read table size, ways and depth */ 1888 - table->mem_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3); 1889 1871 table->mem_table.ways = FIELD_GET(GENMASK_ULL(19, 16), npc_const3); 1890 - table->cam_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3); 1872 + table->mem_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3); 1873 + table->cam_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3); 1891 1874 1892 1875 dev_dbg(rvu->dev, "%s: NPC exact match 4way_2k table(ways=%d, depth=%d)\n", 1893 1876 __func__, table->mem_table.ways, table->cam_table.depth);
+8 -2
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
··· 31 31 rvu_write64(rvu, blkaddr, \ 32 32 NPC_AF_INTFX_HASHX_MASKX(intf, ld, mask_idx), cfg) 33 33 34 + #define GET_KEX_LD_HASH_CTRL(intf, ld) \ 35 + rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_RESULT_CTRL(intf, ld)) 36 + 37 + #define GET_KEX_LD_HASH_MASK(intf, ld, mask_idx) \ 38 + rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_MASKX(intf, ld, mask_idx)) 39 + 34 40 #define SET_KEX_LD_HASH_CTRL(intf, ld, cfg) \ 35 41 rvu_write64(rvu, blkaddr, \ 36 42 NPC_AF_INTFX_HASHX_RESULT_CTRL(intf, ld), cfg) ··· 62 56 struct flow_msg *omask); 63 57 void npc_config_secret_key(struct rvu *rvu, int blkaddr); 64 58 void npc_program_mkex_hash(struct rvu *rvu, int blkaddr); 65 - u32 npc_field_hash_calc(u64 *ldata, struct npc_mcam_kex_hash *mkex_hash, 66 - u64 *secret_key, u8 intf, u8 hash_idx); 59 + u32 npc_field_hash_calc(u64 *ldata, struct npc_get_field_hash_info_rsp rsp, 60 + u8 intf, u8 hash_idx); 67 61 68 62 static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = { 69 63 .lid_lt_ld_hash_en = {
+35 -13
drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
··· 9 9 #include <net/macsec.h> 10 10 #include "otx2_common.h" 11 11 12 + #define MCS_TCAM0_MAC_DA_MASK GENMASK_ULL(47, 0) 12 13 #define MCS_TCAM0_MAC_SA_MASK GENMASK_ULL(63, 48) 13 14 #define MCS_TCAM1_MAC_SA_MASK GENMASK_ULL(31, 0) 14 15 #define MCS_TCAM1_ETYPE_MASK GENMASK_ULL(47, 32) ··· 150 149 enum mcs_rsrc_type type, u16 hw_rsrc_id, 151 150 bool all) 152 151 { 152 + struct mcs_clear_stats *clear_req; 153 153 struct mbox *mbox = &pfvf->mbox; 154 154 struct mcs_free_rsrc_req *req; 155 155 156 156 mutex_lock(&mbox->lock); 157 + 158 + clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox); 159 + if (!clear_req) 160 + goto fail; 161 + 162 + clear_req->id = hw_rsrc_id; 163 + clear_req->type = type; 164 + clear_req->dir = dir; 157 165 158 166 req = otx2_mbox_alloc_msg_mcs_free_resources(mbox); 159 167 if (!req) ··· 247 237 struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id) 248 238 { 249 239 struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc; 240 + struct macsec_secy *secy = rxsc->sw_secy; 250 241 struct mcs_flowid_entry_write_req *req; 251 242 struct mbox *mbox = &pfvf->mbox; 243 + u64 mac_da; 252 244 int ret; 253 245 254 246 mutex_lock(&mbox->lock); ··· 261 249 goto fail; 262 250 } 263 251 252 + mac_da = ether_addr_to_u64(secy->netdev->dev_addr); 253 + 254 + req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_DA_MASK, mac_da); 255 + req->mask[0] = ~0ULL; 256 + req->mask[0] = ~MCS_TCAM0_MAC_DA_MASK; 257 + 264 258 req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC); 265 259 req->mask[1] = ~0ULL; 266 260 req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK; 267 261 268 - req->mask[0] = ~0ULL; 269 262 req->mask[2] = ~0ULL; 270 263 req->mask[3] = ~0ULL; 271 264 ··· 1014 997 1015 998 /* Check if sync is really needed */ 1016 999 if (secy->validate_frames == txsc->last_validate_frames && 1017 - secy->protect_frames == txsc->last_protect_frames) 1000 + secy->replay_protect == txsc->last_replay_protect) 1018 1001 return; 1019 1002 1020 1003 cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true); ··· 1036 1019 rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt; 1037 1020 rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt; 1038 1021 1039 - if (txsc->last_protect_frames) 1022 + if (txsc->last_replay_protect) 1040 1023 rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt; 1041 1024 else 1042 1025 rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt; 1043 1026 1044 - if (txsc->last_validate_frames == MACSEC_VALIDATE_CHECK) 1027 + if (txsc->last_validate_frames == MACSEC_VALIDATE_DISABLED) 1045 1028 rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt; 1046 1029 else 1047 1030 rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt; 1048 1031 } 1049 1032 1050 1033 txsc->last_validate_frames = secy->validate_frames; 1051 - txsc->last_protect_frames = secy->protect_frames; 1034 + txsc->last_replay_protect = secy->replay_protect; 1052 1035 } 1053 1036 1054 1037 static int cn10k_mdo_open(struct macsec_context *ctx) ··· 1117 1100 txsc->sw_secy = secy; 1118 1101 txsc->encoding_sa = secy->tx_sc.encoding_sa; 1119 1102 txsc->last_validate_frames = secy->validate_frames; 1120 - txsc->last_protect_frames = secy->protect_frames; 1103 + txsc->last_replay_protect = secy->replay_protect; 1121 1104 1122 1105 list_add(&txsc->entry, &cfg->txsc_list); 1123 1106 ··· 1134 1117 struct macsec_secy *secy = ctx->secy; 1135 1118 struct macsec_tx_sa *sw_tx_sa; 1136 1119 struct cn10k_mcs_txsc *txsc; 1120 + bool active; 1137 1121 u8 sa_num; 1138 1122 int err; 1139 1123 ··· 1142 1124 if (!txsc) 1143 1125 return -ENOENT; 1144 1126 1145 - txsc->encoding_sa = secy->tx_sc.encoding_sa; 1146 - 1147 - sa_num = txsc->encoding_sa; 1148 - sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]); 1127 + /* Encoding SA got changed */ 1128 + if (txsc->encoding_sa != secy->tx_sc.encoding_sa) { 1129 + txsc->encoding_sa = secy->tx_sc.encoding_sa; 1130 + sa_num = txsc->encoding_sa; 1131 + sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]); 1132 + active = sw_tx_sa ? sw_tx_sa->active : false; 1133 + cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num, active); 1134 + } 1149 1135 1150 1136 if (netif_running(secy->netdev)) { 1151 1137 cn10k_mcs_sync_stats(pfvf, secy, txsc); 1152 1138 1153 - err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num); 1139 + err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0); 1154 1140 if (err) 1155 1141 return err; 1156 1142 } ··· 1543 1521 rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt; 1544 1522 rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt; 1545 1523 1546 - if (secy->protect_frames) 1524 + if (secy->replay_protect) 1547 1525 rxsc->stats.InPktsLate += rsp.pkt_late_cnt; 1548 1526 else 1549 1527 rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt; 1550 1528 1551 - if (secy->validate_frames == MACSEC_VALIDATE_CHECK) 1529 + if (secy->validate_frames == MACSEC_VALIDATE_DISABLED) 1552 1530 rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt; 1553 1531 else 1554 1532 rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt;
+3 -3
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
··· 335 335 #define OTX2_PER_VF_VLAN_FLOWS 2 /* Rx + Tx per VF */ 336 336 #define OTX2_VF_VLAN_RX_INDEX 0 337 337 #define OTX2_VF_VLAN_TX_INDEX 1 338 - u16 max_flows; 339 - u8 dmacflt_max_flows; 340 338 u32 *bmap_to_dmacindex; 341 339 unsigned long *dmacflt_bmap; 342 340 struct list_head flow_list; 341 + u32 dmacflt_max_flows; 342 + u16 max_flows; 343 343 }; 344 344 345 345 struct otx2_tc_info { ··· 389 389 struct cn10k_txsc_stats stats; 390 390 struct list_head entry; 391 391 enum macsec_validation_type last_validate_frames; 392 - bool last_protect_frames; 392 + bool last_replay_protect; 393 393 u16 hw_secy_id_tx; 394 394 u16 hw_secy_id_rx; 395 395 u16 hw_flow_id;
+11 -3
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
··· 1835 1835 otx2_dmacflt_reinstall_flows(pf); 1836 1836 1837 1837 err = otx2_rxtx_enable(pf, true); 1838 - if (err) 1838 + /* If a mbox communication error happens at this point then interface 1839 + * will end up in a state such that it is in down state but hardware 1840 + * mcam entries are enabled to receive the packets. Hence disable the 1841 + * packet I/O. 1842 + */ 1843 + if (err == EIO) 1844 + goto err_disable_rxtx; 1845 + else if (err) 1839 1846 goto err_tx_stop_queues; 1840 1847 1841 1848 otx2_do_set_rx_mode(pf); 1842 1849 1843 1850 return 0; 1844 1851 1852 + err_disable_rxtx: 1853 + otx2_rxtx_enable(pf, false); 1845 1854 err_tx_stop_queues: 1846 1855 netif_tx_stop_all_queues(netdev); 1847 1856 netif_carrier_off(netdev); ··· 3082 3073 otx2_config_pause_frm(pf); 3083 3074 } 3084 3075 3085 - cn10k_mcs_free(pf); 3086 - 3087 3076 #ifdef CONFIG_DCB 3088 3077 /* Disable PFC config */ 3089 3078 if (pf->pfc_en) { ··· 3095 3088 3096 3089 otx2_unregister_dl(pf); 3097 3090 unregister_netdev(netdev); 3091 + cn10k_mcs_free(pf); 3098 3092 otx2_sriov_disable(pf->pdev); 3099 3093 otx2_sriov_vfcfg_cleanup(pf); 3100 3094 if (pf->otx2_wq)
+1 -1
drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
··· 544 544 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { 545 545 if (ntohs(flow_spec->etype) == ETH_P_IP) { 546 546 flow_spec->ip_flag = IPV4_FLAG_MORE; 547 - flow_mask->ip_flag = 0xff; 547 + flow_mask->ip_flag = IPV4_FLAG_MORE; 548 548 req->features |= BIT_ULL(NPC_IPFRAG_IPV4); 549 549 } else if (ntohs(flow_spec->etype) == ETH_P_IPV6) { 550 550 flow_spec->next_header = IPPROTO_FRAGMENT;
+1 -1
drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
··· 621 621 622 622 err = otx2vf_realloc_msix_vectors(vf); 623 623 if (err) 624 - goto err_mbox_destroy; 624 + goto err_detach_rsrc; 625 625 626 626 err = otx2_set_real_num_queues(netdev, qcount, qcount); 627 627 if (err)
+40 -66
drivers/net/ethernet/mediatek/mtk_eth_soc.c
··· 1918 1918 1919 1919 while (done < budget) { 1920 1920 unsigned int pktlen, *rxdcsum; 1921 - bool has_hwaccel_tag = false; 1922 1921 struct net_device *netdev; 1923 - u16 vlan_proto, vlan_tci; 1924 1922 dma_addr_t dma_addr; 1925 1923 u32 hash, reason; 1926 1924 int mac = 0; ··· 2053 2055 skb_checksum_none_assert(skb); 2054 2056 skb->protocol = eth_type_trans(skb, netdev); 2055 2057 2056 - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { 2057 - if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { 2058 - if (trxd.rxd3 & RX_DMA_VTAG_V2) { 2059 - vlan_proto = RX_DMA_VPID(trxd.rxd4); 2060 - vlan_tci = RX_DMA_VID(trxd.rxd4); 2061 - has_hwaccel_tag = true; 2062 - } 2063 - } else if (trxd.rxd2 & RX_DMA_VTAG) { 2064 - vlan_proto = RX_DMA_VPID(trxd.rxd3); 2065 - vlan_tci = RX_DMA_VID(trxd.rxd3); 2066 - has_hwaccel_tag = true; 2067 - } 2068 - } 2069 - 2070 2058 /* When using VLAN untagging in combination with DSA, the 2071 2059 * hardware treats the MTK special tag as a VLAN and untags it. 2072 2060 */ 2073 - if (has_hwaccel_tag && netdev_uses_dsa(netdev)) { 2074 - unsigned int port = vlan_proto & GENMASK(2, 0); 2061 + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) && 2062 + (trxd.rxd2 & RX_DMA_VTAG) && netdev_uses_dsa(netdev)) { 2063 + unsigned int port = RX_DMA_VPID(trxd.rxd3) & GENMASK(2, 0); 2075 2064 2076 2065 if (port < ARRAY_SIZE(eth->dsa_meta) && 2077 2066 eth->dsa_meta[port]) 2078 2067 skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst); 2079 - } else if (has_hwaccel_tag) { 2080 - __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan_tci); 2081 2068 } 2082 2069 2083 2070 if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) ··· 2890 2907 2891 2908 static int mtk_set_features(struct net_device *dev, netdev_features_t features) 2892 2909 { 2893 - struct mtk_mac *mac = netdev_priv(dev); 2894 - struct mtk_eth *eth = mac->hw; 2895 2910 netdev_features_t diff = dev->features ^ features; 2896 - int i; 2897 2911 2898 2912 if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO)) 2899 2913 mtk_hwlro_netdev_disable(dev); 2900 - 2901 - /* Set RX VLAN offloading */ 2902 - if (!(diff & NETIF_F_HW_VLAN_CTAG_RX)) 2903 - return 0; 2904 - 2905 - mtk_w32(eth, !!(features & NETIF_F_HW_VLAN_CTAG_RX), 2906 - MTK_CDMP_EG_CTRL); 2907 - 2908 - /* sync features with other MAC */ 2909 - for (i = 0; i < MTK_MAC_COUNT; i++) { 2910 - if (!eth->netdev[i] || eth->netdev[i] == dev) 2911 - continue; 2912 - eth->netdev[i]->features &= ~NETIF_F_HW_VLAN_CTAG_RX; 2913 - eth->netdev[i]->features |= features & NETIF_F_HW_VLAN_CTAG_RX; 2914 - } 2915 2914 2916 2915 return 0; 2917 2916 } ··· 3212 3247 struct mtk_eth *eth = mac->hw; 3213 3248 int i, err; 3214 3249 3215 - if (mtk_uses_dsa(dev) && !eth->prog) { 3216 - for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) { 3217 - struct metadata_dst *md_dst = eth->dsa_meta[i]; 3218 - 3219 - if (md_dst) 3220 - continue; 3221 - 3222 - md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, 3223 - GFP_KERNEL); 3224 - if (!md_dst) 3225 - return -ENOMEM; 3226 - 3227 - md_dst->u.port_info.port_id = i; 3228 - eth->dsa_meta[i] = md_dst; 3229 - } 3230 - } else { 3231 - /* Hardware special tag parsing needs to be disabled if at least 3232 - * one MAC does not use DSA. 3233 - */ 3234 - u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL); 3235 - val &= ~MTK_CDMP_STAG_EN; 3236 - mtk_w32(eth, val, MTK_CDMP_IG_CTRL); 3237 - } 3238 - 3239 3250 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0); 3240 3251 if (err) { 3241 3252 netdev_err(dev, "%s: could not attach PHY: %d\n", __func__, ··· 3249 3308 3250 3309 phylink_start(mac->phylink); 3251 3310 netif_tx_start_all_queues(dev); 3311 + 3312 + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) 3313 + return 0; 3314 + 3315 + if (mtk_uses_dsa(dev) && !eth->prog) { 3316 + for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) { 3317 + struct metadata_dst *md_dst = eth->dsa_meta[i]; 3318 + 3319 + if (md_dst) 3320 + continue; 3321 + 3322 + md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, 3323 + GFP_KERNEL); 3324 + if (!md_dst) 3325 + return -ENOMEM; 3326 + 3327 + md_dst->u.port_info.port_id = i; 3328 + eth->dsa_meta[i] = md_dst; 3329 + } 3330 + } else { 3331 + /* Hardware special tag parsing needs to be disabled if at least 3332 + * one MAC does not use DSA. 3333 + */ 3334 + u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL); 3335 + 3336 + val &= ~MTK_CDMP_STAG_EN; 3337 + mtk_w32(eth, val, MTK_CDMP_IG_CTRL); 3338 + 3339 + val = mtk_r32(eth, MTK_CDMQ_IG_CTRL); 3340 + val &= ~MTK_CDMQ_STAG_EN; 3341 + mtk_w32(eth, val, MTK_CDMQ_IG_CTRL); 3342 + 3343 + mtk_w32(eth, 0, MTK_CDMP_EG_CTRL); 3344 + } 3252 3345 3253 3346 return 0; 3254 3347 } ··· 3768 3793 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { 3769 3794 val = mtk_r32(eth, MTK_CDMP_IG_CTRL); 3770 3795 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL); 3771 - } 3772 3796 3773 - /* Enable RX VLan Offloading */ 3774 - mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); 3797 + mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); 3798 + } 3775 3799 3776 3800 /* set interrupt delays based on current Net DIM sample */ 3777 3801 mtk_dim_rx(&eth->rx_dim.work); ··· 4427 4453 eth->netdev[id]->hw_features |= NETIF_F_LRO; 4428 4454 4429 4455 eth->netdev[id]->vlan_features = eth->soc->hw_features & 4430 - ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); 4456 + ~NETIF_F_HW_VLAN_CTAG_TX; 4431 4457 eth->netdev[id]->features |= eth->soc->hw_features; 4432 4458 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops; 4433 4459
-1
drivers/net/ethernet/mediatek/mtk_eth_soc.h
··· 48 48 #define MTK_HW_FEATURES (NETIF_F_IP_CSUM | \ 49 49 NETIF_F_RXCSUM | \ 50 50 NETIF_F_HW_VLAN_CTAG_TX | \ 51 - NETIF_F_HW_VLAN_CTAG_RX | \ 52 51 NETIF_F_SG | NETIF_F_TSO | \ 53 52 NETIF_F_TSO6 | \ 54 53 NETIF_F_IPV6_CSUM |\
+2
drivers/net/ethernet/pensando/ionic/ionic_devlink.c
··· 61 61 struct devlink *dl; 62 62 63 63 dl = devlink_alloc(&ionic_dl_ops, sizeof(struct ionic), dev); 64 + if (!dl) 65 + return NULL; 64 66 65 67 return devlink_priv(dl); 66 68 }
+1 -1
drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
··· 794 794 info->data = lif->nxqs; 795 795 break; 796 796 default: 797 - netdev_err(netdev, "Command parameter %d is not supported\n", 797 + netdev_dbg(netdev, "Command parameter %d is not supported\n", 798 798 info->cmd); 799 799 err = -EOPNOTSUPP; 800 800 }
+7 -4
drivers/net/ethernet/sfc/mcdi_port_common.c
··· 972 972 973 973 /* A QSFP+ NIC may actually have an SFP+ module attached. 974 974 * The ID is page 0, byte 0. 975 + * QSFP28 is of type SFF_8636, however, this is treated 976 + * the same by ethtool, so we can also treat them the same. 975 977 */ 976 978 switch (efx_mcdi_phy_get_module_eeprom_byte(efx, 0, 0)) { 977 - case 0x3: 979 + case 0x3: /* SFP */ 978 980 return MC_CMD_MEDIA_SFP_PLUS; 979 - case 0xc: 980 - case 0xd: 981 + case 0xc: /* QSFP */ 982 + case 0xd: /* QSFP+ */ 983 + case 0x11: /* QSFP28 */ 981 984 return MC_CMD_MEDIA_QSFP_PLUS; 982 985 default: 983 986 return 0; ··· 1078 1075 1079 1076 case MC_CMD_MEDIA_QSFP_PLUS: 1080 1077 modinfo->type = ETH_MODULE_SFF_8436; 1081 - modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 1078 + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; 1082 1079 break; 1083 1080 1084 1081 default:
+59 -26
drivers/net/usb/r8152.c
··· 199 199 #define OCP_EEE_AR 0xa41a 200 200 #define OCP_EEE_DATA 0xa41c 201 201 #define OCP_PHY_STATUS 0xa420 202 + #define OCP_INTR_EN 0xa424 202 203 #define OCP_NCTL_CFG 0xa42c 203 204 #define OCP_POWER_CFG 0xa430 204 205 #define OCP_EEE_CFG 0xa432 ··· 620 619 #define PHY_STAT_EXT_INIT 2 621 620 #define PHY_STAT_LAN_ON 3 622 621 #define PHY_STAT_PWRDN 5 622 + 623 + /* OCP_INTR_EN */ 624 + #define INTR_SPEED_FORCE BIT(3) 623 625 624 626 /* OCP_NCTL_CFG */ 625 627 #define PGA_RETURN_EN BIT(1) ··· 3027 3023 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data); 3028 3024 3029 3025 switch (tp->version) { 3030 - case RTL_VER_08: 3031 - case RTL_VER_09: 3032 - case RTL_VER_14: 3033 - r8153b_rx_agg_chg_indicate(tp); 3026 + case RTL_VER_01: 3027 + case RTL_VER_02: 3028 + case RTL_VER_03: 3029 + case RTL_VER_04: 3030 + case RTL_VER_05: 3031 + case RTL_VER_06: 3032 + case RTL_VER_07: 3034 3033 break; 3035 3034 default: 3035 + r8153b_rx_agg_chg_indicate(tp); 3036 3036 break; 3037 3037 } 3038 3038 ··· 3090 3082 640 / 8); 3091 3083 ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EXTRA_AGGR_TMR, 3092 3084 ocp_data); 3093 - r8153b_rx_agg_chg_indicate(tp); 3094 3085 break; 3095 3086 3096 3087 default: ··· 3123 3116 case RTL_VER_15: 3124 3117 ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, 3125 3118 ocp_data / 8); 3126 - r8153b_rx_agg_chg_indicate(tp); 3127 3119 break; 3128 3120 default: 3129 3121 WARN_ON_ONCE(1); ··· 5992 5986 r8153_aldps_en(tp, true); 5993 5987 } 5994 5988 5989 + static u32 fc_pause_on_auto(struct r8152 *tp) 5990 + { 5991 + return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 6 * 1024); 5992 + } 5993 + 5994 + static u32 fc_pause_off_auto(struct r8152 *tp) 5995 + { 5996 + return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 14 * 1024); 5997 + } 5998 + 5999 + static void r8156_fc_parameter(struct r8152 *tp) 6000 + { 6001 + u32 pause_on = tp->fc_pause_on ? tp->fc_pause_on : fc_pause_on_auto(tp); 6002 + u32 pause_off = tp->fc_pause_off ? tp->fc_pause_off : fc_pause_off_auto(tp); 6003 + 6004 + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16); 6005 + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16); 6006 + } 6007 + 5995 6008 static int rtl8156_enable(struct r8152 *tp) 5996 6009 { 5997 6010 u32 ocp_data; ··· 6019 5994 if (test_bit(RTL8152_UNPLUG, &tp->flags)) 6020 5995 return -ENODEV; 6021 5996 5997 + r8156_fc_parameter(tp); 6022 5998 set_tx_qlen(tp); 6023 5999 rtl_set_eee_plus(tp); 6024 6000 r8153_set_rx_early_timeout(tp); ··· 6051 6025 ocp_write_word(tp, MCU_TYPE_USB, USB_L1_CTRL, ocp_data); 6052 6026 } 6053 6027 6028 + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); 6029 + ocp_data &= ~FC_PATCH_TASK; 6030 + ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); 6031 + usleep_range(1000, 2000); 6032 + ocp_data |= FC_PATCH_TASK; 6033 + ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); 6034 + 6054 6035 return rtl_enable(tp); 6036 + } 6037 + 6038 + static void rtl8156_disable(struct r8152 *tp) 6039 + { 6040 + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, 0); 6041 + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, 0); 6042 + 6043 + rtl8153_disable(tp); 6055 6044 } 6056 6045 6057 6046 static int rtl8156b_enable(struct r8152 *tp) ··· 6468 6427 6469 6428 r8153_aldps_en(tp, true); 6470 6429 r8153b_u1u2en(tp, true); 6471 - } 6472 - 6473 - static inline u32 fc_pause_on_auto(struct r8152 *tp) 6474 - { 6475 - return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 6 * 1024); 6476 - } 6477 - 6478 - static inline u32 fc_pause_off_auto(struct r8152 *tp) 6479 - { 6480 - return (ALIGN(mtu_to_size(tp->netdev->mtu), 1024) + 14 * 1024); 6481 - } 6482 - 6483 - static void r8156_fc_parameter(struct r8152 *tp) 6484 - { 6485 - u32 pause_on = tp->fc_pause_on ? tp->fc_pause_on : fc_pause_on_auto(tp); 6486 - u32 pause_off = tp->fc_pause_off ? tp->fc_pause_off : fc_pause_off_auto(tp); 6487 - 6488 - ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_FULL, pause_on / 16); 6489 - ocp_write_word(tp, MCU_TYPE_PLA, PLA_RX_FIFO_EMPTY, pause_off / 16); 6490 6430 } 6491 6431 6492 6432 static void rtl8156_change_mtu(struct r8152 *tp) ··· 7560 7538 ((swap_a & 0x1f) << 8) | 7561 7539 ((swap_a >> 8) & 0x1f)); 7562 7540 } 7541 + 7542 + /* Notify the MAC when the speed is changed to force mode. */ 7543 + data = ocp_reg_read(tp, OCP_INTR_EN); 7544 + data |= INTR_SPEED_FORCE; 7545 + ocp_reg_write(tp, OCP_INTR_EN, data); 7563 7546 break; 7564 7547 default: 7565 7548 break; ··· 7959 7932 default: 7960 7933 break; 7961 7934 } 7935 + 7936 + /* Notify the MAC when the speed is changed to force mode. */ 7937 + data = ocp_reg_read(tp, OCP_INTR_EN); 7938 + data |= INTR_SPEED_FORCE; 7939 + ocp_reg_write(tp, OCP_INTR_EN, data); 7962 7940 7963 7941 if (rtl_phy_patch_request(tp, true, true)) 7964 7942 return; ··· 9372 9340 case RTL_VER_10: 9373 9341 ops->init = r8156_init; 9374 9342 ops->enable = rtl8156_enable; 9375 - ops->disable = rtl8153_disable; 9343 + ops->disable = rtl8156_disable; 9376 9344 ops->up = rtl8156_up; 9377 9345 ops->down = rtl8156_down; 9378 9346 ops->unload = rtl8153_unload; ··· 9910 9878 .probe = rtl8152_cfgselector_probe, 9911 9879 .id_table = rtl8152_table, 9912 9880 .generic_subclass = 1, 9881 + .supports_autosuspend = 1, 9913 9882 }; 9914 9883 9915 9884 static int __init rtl8152_driver_init(void)
+2
drivers/net/virtio_net.c
··· 3560 3560 struct virtqueue *vq = vi->sq[i].vq; 3561 3561 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) 3562 3562 virtnet_sq_free_unused_buf(vq, buf); 3563 + cond_resched(); 3563 3564 } 3564 3565 3565 3566 for (i = 0; i < vi->max_queue_pairs; i++) { 3566 3567 struct virtqueue *vq = vi->rq[i].vq; 3567 3568 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) 3568 3569 virtnet_rq_free_unused_buf(vq, buf); 3570 + cond_resched(); 3569 3571 } 3570 3572 } 3571 3573
+1 -1
fs/9p/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 config 9P_FS 3 3 tristate "Plan 9 Resource Sharing Support (9P2000)" 4 - depends on INET && NET_9P 4 + depends on NET_9P 5 5 select NETFS_SUPPORT 6 6 help 7 7 If you say Y here, you will get experimental support for
-1
fs/9p/vfs_addr.c
··· 12 12 #include <linux/file.h> 13 13 #include <linux/stat.h> 14 14 #include <linux/string.h> 15 - #include <linux/inet.h> 16 15 #include <linux/pagemap.h> 17 16 #include <linux/sched.h> 18 17 #include <linux/swap.h>
-1
fs/9p/vfs_dentry.c
··· 13 13 #include <linux/pagemap.h> 14 14 #include <linux/stat.h> 15 15 #include <linux/string.h> 16 - #include <linux/inet.h> 17 16 #include <linux/namei.h> 18 17 #include <linux/sched.h> 19 18 #include <linux/slab.h>
-1
fs/9p/vfs_dir.c
··· 13 13 #include <linux/stat.h> 14 14 #include <linux/string.h> 15 15 #include <linux/sched.h> 16 - #include <linux/inet.h> 17 16 #include <linux/slab.h> 18 17 #include <linux/uio.h> 19 18 #include <linux/fscache.h>
-1
fs/9p/vfs_file.c
··· 14 14 #include <linux/file.h> 15 15 #include <linux/stat.h> 16 16 #include <linux/string.h> 17 - #include <linux/inet.h> 18 17 #include <linux/list.h> 19 18 #include <linux/pagemap.h> 20 19 #include <linux/utsname.h>
-1
fs/9p/vfs_inode.c
··· 15 15 #include <linux/pagemap.h> 16 16 #include <linux/stat.h> 17 17 #include <linux/string.h> 18 - #include <linux/inet.h> 19 18 #include <linux/namei.h> 20 19 #include <linux/sched.h> 21 20 #include <linux/slab.h>
-1
fs/9p/vfs_inode_dotl.c
··· 13 13 #include <linux/pagemap.h> 14 14 #include <linux/stat.h> 15 15 #include <linux/string.h> 16 - #include <linux/inet.h> 17 16 #include <linux/namei.h> 18 17 #include <linux/sched.h> 19 18 #include <linux/slab.h>
-1
fs/9p/vfs_super.c
··· 12 12 #include <linux/file.h> 13 13 #include <linux/stat.h> 14 14 #include <linux/string.h> 15 - #include <linux/inet.h> 16 15 #include <linux/pagemap.h> 17 16 #include <linux/mount.h> 18 17 #include <linux/sched.h>
+2 -2
fs/afs/afs.h
··· 19 19 #define AFSPATHMAX 1024 /* Maximum length of a pathname plus NUL */ 20 20 #define AFSOPAQUEMAX 1024 /* Maximum length of an opaque field */ 21 21 22 - #define AFS_VL_MAX_LIFESPAN (120 * HZ) 23 - #define AFS_PROBE_MAX_LIFESPAN (30 * HZ) 22 + #define AFS_VL_MAX_LIFESPAN 120 23 + #define AFS_PROBE_MAX_LIFESPAN 30 24 24 25 25 typedef u64 afs_volid_t; 26 26 typedef u64 afs_vnodeid_t;
+1 -1
fs/afs/internal.h
··· 128 128 spinlock_t state_lock; 129 129 int error; /* error code */ 130 130 u32 abort_code; /* Remote abort ID or 0 */ 131 - unsigned int max_lifespan; /* Maximum lifespan to set if not 0 */ 131 + unsigned int max_lifespan; /* Maximum lifespan in secs to set if not 0 */ 132 132 unsigned request_size; /* size of request data */ 133 133 unsigned reply_max; /* maximum size of reply */ 134 134 unsigned count2; /* count used in unmarshalling */
+3 -5
fs/afs/rxrpc.c
··· 335 335 /* create a call */ 336 336 rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key, 337 337 (unsigned long)call, 338 - tx_total_len, gfp, 338 + tx_total_len, 339 + call->max_lifespan, 340 + gfp, 339 341 (call->async ? 340 342 afs_wake_up_async_call : 341 343 afs_wake_up_call_waiter), ··· 352 350 } 353 351 354 352 call->rxcall = rxcall; 355 - 356 - if (call->max_lifespan) 357 - rxrpc_kernel_set_max_life(call->net->socket, rxcall, 358 - call->max_lifespan); 359 353 call->issue_time = ktime_get_real(); 360 354 361 355 /* send the request */
+11 -10
include/net/af_rxrpc.h
··· 40 40 void rxrpc_kernel_new_call_notification(struct socket *, 41 41 rxrpc_notify_new_call_t, 42 42 rxrpc_discard_new_call_t); 43 - struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *, 44 - struct sockaddr_rxrpc *, 45 - struct key *, 46 - unsigned long, 47 - s64, 48 - gfp_t, 49 - rxrpc_notify_rx_t, 50 - bool, 51 - enum rxrpc_interruptibility, 52 - unsigned int); 43 + struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, 44 + struct sockaddr_rxrpc *srx, 45 + struct key *key, 46 + unsigned long user_call_ID, 47 + s64 tx_total_len, 48 + u32 hard_timeout, 49 + gfp_t gfp, 50 + rxrpc_notify_rx_t notify_rx, 51 + bool upgrade, 52 + enum rxrpc_interruptibility interruptibility, 53 + unsigned int debug_id); 53 54 int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *, 54 55 struct msghdr *, size_t, 55 56 rxrpc_notify_end_tx_t);
+1
include/net/bonding.h
··· 659 659 void bond_prepare_sysfs_group(struct bonding *bond); 660 660 int bond_sysfs_slave_add(struct slave *slave); 661 661 void bond_sysfs_slave_del(struct slave *slave); 662 + void bond_xdp_set_features(struct net_device *bond_dev); 662 663 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, 663 664 struct netlink_ext_ack *extack); 664 665 int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
+1
include/net/netfilter/nf_tables.h
··· 619 619 }; 620 620 621 621 enum nft_trans_phase; 622 + void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set); 622 623 void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, 623 624 struct nft_set_binding *binding, 624 625 enum nft_trans_phase phase);
+2
net/9p/Kconfig
··· 17 17 18 18 config NET_9P_FD 19 19 default NET_9P 20 + imply INET 21 + imply UNIX 20 22 tristate "9P FD Transport" 21 23 help 22 24 This builds support for transports over TCP, Unix sockets and
+14 -6
net/core/skbuff.c
··· 1758 1758 { 1759 1759 int num_frags = skb_shinfo(skb)->nr_frags; 1760 1760 struct page *page, *head = NULL; 1761 - int i, new_frags; 1761 + int i, order, psize, new_frags; 1762 1762 u32 d_off; 1763 1763 1764 1764 if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) ··· 1767 1767 if (!num_frags) 1768 1768 goto release; 1769 1769 1770 - new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT; 1770 + /* We might have to allocate high order pages, so compute what minimum 1771 + * page order is needed. 1772 + */ 1773 + order = 0; 1774 + while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb)) 1775 + order++; 1776 + psize = (PAGE_SIZE << order); 1777 + 1778 + new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order); 1771 1779 for (i = 0; i < new_frags; i++) { 1772 - page = alloc_page(gfp_mask); 1780 + page = alloc_pages(gfp_mask | __GFP_COMP, order); 1773 1781 if (!page) { 1774 1782 while (head) { 1775 1783 struct page *next = (struct page *)page_private(head); ··· 1804 1796 vaddr = kmap_atomic(p); 1805 1797 1806 1798 while (done < p_len) { 1807 - if (d_off == PAGE_SIZE) { 1799 + if (d_off == psize) { 1808 1800 d_off = 0; 1809 1801 page = (struct page *)page_private(page); 1810 1802 } 1811 - copy = min_t(u32, PAGE_SIZE - d_off, p_len - done); 1803 + copy = min_t(u32, psize - d_off, p_len - done); 1812 1804 memcpy(page_address(page) + d_off, 1813 1805 vaddr + p_off + done, copy); 1814 1806 done += copy; ··· 1824 1816 1825 1817 /* skb frags point to kernel buffers */ 1826 1818 for (i = 0; i < new_frags - 1; i++) { 1827 - __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE); 1819 + __skb_fill_page_desc(skb, i, head, 0, psize); 1828 1820 head = (struct page *)page_private(head); 1829 1821 } 1830 1822 __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
+1 -1
net/ethtool/ioctl.c
··· 574 574 static int ethtool_set_link_ksettings(struct net_device *dev, 575 575 void __user *useraddr) 576 576 { 577 + struct ethtool_link_ksettings link_ksettings = {}; 577 578 int err; 578 - struct ethtool_link_ksettings link_ksettings; 579 579 580 580 ASSERT_RTNL(); 581 581
+5 -3
net/ipv6/sit.c
··· 1095 1095 1096 1096 static void ipip6_tunnel_bind_dev(struct net_device *dev) 1097 1097 { 1098 + struct ip_tunnel *tunnel = netdev_priv(dev); 1099 + int t_hlen = tunnel->hlen + sizeof(struct iphdr); 1098 1100 struct net_device *tdev = NULL; 1099 - struct ip_tunnel *tunnel; 1101 + int hlen = LL_MAX_HEADER; 1100 1102 const struct iphdr *iph; 1101 1103 struct flowi4 fl4; 1102 1104 1103 - tunnel = netdev_priv(dev); 1104 1105 iph = &tunnel->parms.iph; 1105 1106 1106 1107 if (iph->daddr) { ··· 1124 1123 tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link); 1125 1124 1126 1125 if (tdev && !netif_is_l3_master(tdev)) { 1127 - int t_hlen = tunnel->hlen + sizeof(struct iphdr); 1128 1126 int mtu; 1129 1127 1130 1128 mtu = tdev->mtu - t_hlen; 1131 1129 if (mtu < IPV6_MIN_MTU) 1132 1130 mtu = IPV6_MIN_MTU; 1133 1131 WRITE_ONCE(dev->mtu, mtu); 1132 + hlen = tdev->hard_header_len + tdev->needed_headroom; 1134 1133 } 1134 + dev->needed_headroom = t_hlen + hlen; 1135 1135 } 1136 1136 1137 1137 static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p,
+1 -1
net/ipv6/tcp_ipv6.c
··· 1065 1065 if (np->repflow) 1066 1066 label = ip6_flowlabel(ipv6h); 1067 1067 priority = sk->sk_priority; 1068 - txhash = sk->sk_hash; 1068 + txhash = sk->sk_txhash; 1069 1069 } 1070 1070 if (sk->sk_state == TCP_TIME_WAIT) { 1071 1071 label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel);
+1
net/ncsi/ncsi-aen.c
··· 165 165 nc->state = NCSI_CHANNEL_INACTIVE; 166 166 list_add_tail_rcu(&nc->link, &ndp->channel_queue); 167 167 spin_unlock_irqrestore(&ndp->lock, flags); 168 + nc->modes[NCSI_MODE_TX_ENABLE].enable = 0; 168 169 169 170 return ncsi_process_next_channel(ndp); 170 171 }
+29 -12
net/netfilter/nf_tables_api.c
··· 2075 2075 2076 2076 if (!basechain) { 2077 2077 if (!ha[NFTA_HOOK_HOOKNUM] || 2078 - !ha[NFTA_HOOK_PRIORITY]) 2079 - return -EINVAL; 2078 + !ha[NFTA_HOOK_PRIORITY]) { 2079 + NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_NAME]); 2080 + return -ENOENT; 2081 + } 2080 2082 2081 2083 hook->num = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM])); 2082 2084 hook->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY])); ··· 5127 5125 } 5128 5126 } 5129 5127 5128 + void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set) 5129 + { 5130 + if (nft_set_is_anonymous(set)) 5131 + nft_clear(ctx->net, set); 5132 + 5133 + set->use++; 5134 + } 5135 + EXPORT_SYMBOL_GPL(nf_tables_activate_set); 5136 + 5130 5137 void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, 5131 5138 struct nft_set_binding *binding, 5132 5139 enum nft_trans_phase phase) 5133 5140 { 5134 5141 switch (phase) { 5135 5142 case NFT_TRANS_PREPARE: 5143 + if (nft_set_is_anonymous(set)) 5144 + nft_deactivate_next(ctx->net, set); 5145 + 5136 5146 set->use--; 5137 5147 return; 5138 5148 case NFT_TRANS_ABORT: ··· 7707 7693 }; 7708 7694 7709 7695 static int nft_flowtable_parse_hook(const struct nft_ctx *ctx, 7710 - const struct nlattr *attr, 7696 + const struct nlattr * const nla[], 7711 7697 struct nft_flowtable_hook *flowtable_hook, 7712 7698 struct nft_flowtable *flowtable, 7713 7699 struct netlink_ext_ack *extack, bool add) ··· 7719 7705 7720 7706 INIT_LIST_HEAD(&flowtable_hook->list); 7721 7707 7722 - err = nla_parse_nested_deprecated(tb, NFTA_FLOWTABLE_HOOK_MAX, attr, 7708 + err = nla_parse_nested_deprecated(tb, NFTA_FLOWTABLE_HOOK_MAX, 7709 + nla[NFTA_FLOWTABLE_HOOK], 7723 7710 nft_flowtable_hook_policy, NULL); 7724 7711 if (err < 0) 7725 7712 return err; 7726 7713 7727 7714 if (add) { 7728 7715 if (!tb[NFTA_FLOWTABLE_HOOK_NUM] || 7729 - !tb[NFTA_FLOWTABLE_HOOK_PRIORITY]) 7730 - return -EINVAL; 7716 + !tb[NFTA_FLOWTABLE_HOOK_PRIORITY]) { 7717 + NL_SET_BAD_ATTR(extack, nla[NFTA_FLOWTABLE_NAME]); 7718 + return -ENOENT; 7719 + } 7731 7720 7732 7721 hooknum = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_NUM])); 7733 7722 if (hooknum != NF_NETDEV_INGRESS) ··· 7915 7898 u32 flags; 7916 7899 int err; 7917 7900 7918 - err = nft_flowtable_parse_hook(ctx, nla[NFTA_FLOWTABLE_HOOK], 7919 - &flowtable_hook, flowtable, extack, false); 7901 + err = nft_flowtable_parse_hook(ctx, nla, &flowtable_hook, flowtable, 7902 + extack, false); 7920 7903 if (err < 0) 7921 7904 return err; 7922 7905 ··· 8061 8044 if (err < 0) 8062 8045 goto err3; 8063 8046 8064 - err = nft_flowtable_parse_hook(&ctx, nla[NFTA_FLOWTABLE_HOOK], 8065 - &flowtable_hook, flowtable, extack, true); 8047 + err = nft_flowtable_parse_hook(&ctx, nla, &flowtable_hook, flowtable, 8048 + extack, true); 8066 8049 if (err < 0) 8067 8050 goto err4; 8068 8051 ··· 8124 8107 struct nft_trans *trans; 8125 8108 int err; 8126 8109 8127 - err = nft_flowtable_parse_hook(ctx, nla[NFTA_FLOWTABLE_HOOK], 8128 - &flowtable_hook, flowtable, extack, false); 8110 + err = nft_flowtable_parse_hook(ctx, nla, &flowtable_hook, flowtable, 8111 + extack, false); 8129 8112 if (err < 0) 8130 8113 return err; 8131 8114
+10 -4
net/netfilter/nft_ct_fast.c
··· 15 15 unsigned int state; 16 16 17 17 ct = nf_ct_get(pkt->skb, &ctinfo); 18 - if (!ct) { 19 - regs->verdict.code = NFT_BREAK; 20 - return; 21 - } 22 18 23 19 switch (priv->key) { 24 20 case NFT_CT_STATE: ··· 26 30 state = NF_CT_STATE_INVALID_BIT; 27 31 *dest = state; 28 32 return; 33 + default: 34 + break; 35 + } 36 + 37 + if (!ct) { 38 + regs->verdict.code = NFT_BREAK; 39 + return; 40 + } 41 + 42 + switch (priv->key) { 29 43 case NFT_CT_DIRECTION: 30 44 nft_reg_store8(dest, CTINFO2DIR(ctinfo)); 31 45 return;
+1 -1
net/netfilter/nft_dynset.c
··· 342 342 { 343 343 struct nft_dynset *priv = nft_expr_priv(expr); 344 344 345 - priv->set->use++; 345 + nf_tables_activate_set(ctx, priv->set); 346 346 } 347 347 348 348 static void nft_dynset_destroy(const struct nft_ctx *ctx,
+1 -1
net/netfilter/nft_lookup.c
··· 167 167 { 168 168 struct nft_lookup *priv = nft_expr_priv(expr); 169 169 170 - priv->set->use++; 170 + nf_tables_activate_set(ctx, priv->set); 171 171 } 172 172 173 173 static void nft_lookup_destroy(const struct nft_ctx *ctx,
+1 -1
net/netfilter/nft_objref.c
··· 185 185 { 186 186 struct nft_objref_map *priv = nft_expr_priv(expr); 187 187 188 - priv->set->use++; 188 + nf_tables_activate_set(ctx, priv->set); 189 189 } 190 190 191 191 static void nft_objref_map_destroy(const struct nft_ctx *ctx,
+1 -1
net/packet/af_packet.c
··· 2033 2033 goto retry; 2034 2034 } 2035 2035 2036 - if (!dev_validate_header(dev, skb->data, len)) { 2036 + if (!dev_validate_header(dev, skb->data, len) || !skb->len) { 2037 2037 err = -EINVAL; 2038 2038 goto out_unlock; 2039 2039 }
+3
net/rxrpc/af_rxrpc.c
··· 265 265 * @key: The security context to use (defaults to socket setting) 266 266 * @user_call_ID: The ID to use 267 267 * @tx_total_len: Total length of data to transmit during the call (or -1) 268 + * @hard_timeout: The maximum lifespan of the call in sec 268 269 * @gfp: The allocation constraints 269 270 * @notify_rx: Where to send notifications instead of socket queue 270 271 * @upgrade: Request service upgrade for call ··· 284 283 struct key *key, 285 284 unsigned long user_call_ID, 286 285 s64 tx_total_len, 286 + u32 hard_timeout, 287 287 gfp_t gfp, 288 288 rxrpc_notify_rx_t notify_rx, 289 289 bool upgrade, ··· 315 313 p.tx_total_len = tx_total_len; 316 314 p.interruptibility = interruptibility; 317 315 p.kernel = true; 316 + p.timeouts.hard = hard_timeout; 318 317 319 318 memset(&cp, 0, sizeof(cp)); 320 319 cp.local = rx->local;
+1
net/rxrpc/ar-internal.h
··· 616 616 unsigned long expect_term_by; /* When we expect call termination by */ 617 617 u32 next_rx_timo; /* Timeout for next Rx packet (jif) */ 618 618 u32 next_req_timo; /* Timeout for next Rx request packet (jif) */ 619 + u32 hard_timo; /* Maximum lifetime or 0 (jif) */ 619 620 struct timer_list timer; /* Combined event timer */ 620 621 struct work_struct destroyer; /* In-process-context destroyer */ 621 622 rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
+8 -1
net/rxrpc/call_object.c
··· 224 224 if (cp->exclusive) 225 225 __set_bit(RXRPC_CALL_EXCLUSIVE, &call->flags); 226 226 227 + if (p->timeouts.normal) 228 + call->next_rx_timo = min(msecs_to_jiffies(p->timeouts.normal), 1UL); 229 + if (p->timeouts.idle) 230 + call->next_req_timo = min(msecs_to_jiffies(p->timeouts.idle), 1UL); 231 + if (p->timeouts.hard) 232 + call->hard_timo = p->timeouts.hard * HZ; 233 + 227 234 ret = rxrpc_init_client_call_security(call); 228 235 if (ret < 0) { 229 236 rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, ret); ··· 262 255 call->keepalive_at = j; 263 256 call->expect_rx_by = j; 264 257 call->expect_req_by = j; 265 - call->expect_term_by = j; 258 + call->expect_term_by = j + call->hard_timo; 266 259 call->timer.expires = now; 267 260 } 268 261
+11 -11
net/rxrpc/sendmsg.c
··· 50 50 _enter("%d", call->debug_id); 51 51 52 52 if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN) 53 - return call->error; 53 + goto no_wait; 54 54 55 55 add_wait_queue_exclusive(&call->waitq, &myself); 56 56 57 57 for (;;) { 58 - ret = call->error; 59 - if (ret < 0) 60 - break; 61 - 62 58 switch (call->interruptibility) { 63 59 case RXRPC_INTERRUPTIBLE: 64 60 case RXRPC_PREINTERRUPTIBLE: ··· 65 69 set_current_state(TASK_UNINTERRUPTIBLE); 66 70 break; 67 71 } 68 - if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN) { 69 - ret = call->error; 72 + 73 + if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN) 70 74 break; 71 - } 72 75 if ((call->interruptibility == RXRPC_INTERRUPTIBLE || 73 76 call->interruptibility == RXRPC_PREINTERRUPTIBLE) && 74 77 signal_pending(current)) { ··· 80 85 remove_wait_queue(&call->waitq, &myself); 81 86 __set_current_state(TASK_RUNNING); 82 87 88 + no_wait: 83 89 if (ret == 0 && rxrpc_call_is_complete(call)) 84 90 ret = call->error; 85 91 ··· 651 655 if (IS_ERR(call)) 652 656 return PTR_ERR(call); 653 657 /* ... and we have the call lock. */ 658 + p.call.nr_timeouts = 0; 654 659 ret = 0; 655 660 if (rxrpc_call_is_complete(call)) 656 661 goto out_put_unlock; 657 662 } else { 658 663 switch (rxrpc_call_state(call)) { 659 - case RXRPC_CALL_UNINITIALISED: 660 664 case RXRPC_CALL_CLIENT_AWAIT_CONN: 661 - case RXRPC_CALL_SERVER_PREALLOC: 662 665 case RXRPC_CALL_SERVER_SECURING: 666 + if (p.command == RXRPC_CMD_SEND_ABORT) 667 + break; 668 + fallthrough; 669 + case RXRPC_CALL_UNINITIALISED: 670 + case RXRPC_CALL_SERVER_PREALLOC: 663 671 rxrpc_put_call(call, rxrpc_call_put_sendmsg); 664 672 ret = -EBUSY; 665 673 goto error_release_sock; ··· 703 703 fallthrough; 704 704 case 1: 705 705 if (p.call.timeouts.hard > 0) { 706 - j = msecs_to_jiffies(p.call.timeouts.hard); 706 + j = p.call.timeouts.hard * HZ; 707 707 now = jiffies; 708 708 j += now; 709 709 WRITE_ONCE(call->expect_term_by, j);
+1 -1
net/sched/act_mirred.c
··· 264 264 goto out; 265 265 } 266 266 267 - if (unlikely(!(dev->flags & IFF_UP))) { 267 + if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) { 268 268 net_notice_ratelimited("tc mirred to Houston: device %s is down\n", 269 269 dev->name); 270 270 goto out;
+3 -1
net/sched/act_pedit.c
··· 258 258 if (!offmask && cur % 4) { 259 259 NL_SET_ERR_MSG_MOD(extack, "Offsets must be on 32bit boundaries"); 260 260 ret = -EINVAL; 261 - goto put_chain; 261 + goto out_free_keys; 262 262 } 263 263 264 264 /* sanitize the shift value for any later use */ ··· 291 291 292 292 return ret; 293 293 294 + out_free_keys: 295 + kfree(nparms->tcfp_keys); 294 296 put_chain: 295 297 if (goto_ch) 296 298 tcf_chain_put_by_act(goto_ch);
+1
net/sched/cls_api.c
··· 1589 1589 1590 1590 err_unroll: 1591 1591 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1592 + list_del(&block_cb->driver_list); 1592 1593 if (i-- > 0) { 1593 1594 list_del(&block_cb->list); 1594 1595 tcf_block_playback_offloads(block, block_cb->cb,
+5 -4
net/sched/cls_flower.c
··· 2210 2210 spin_lock(&tp->lock); 2211 2211 if (!handle) { 2212 2212 handle = 1; 2213 - err = idr_alloc_u32(&head->handle_idr, fnew, &handle, 2213 + err = idr_alloc_u32(&head->handle_idr, NULL, &handle, 2214 2214 INT_MAX, GFP_ATOMIC); 2215 2215 } else { 2216 - err = idr_alloc_u32(&head->handle_idr, fnew, &handle, 2216 + err = idr_alloc_u32(&head->handle_idr, NULL, &handle, 2217 2217 handle, GFP_ATOMIC); 2218 2218 2219 2219 /* Filter with specified handle was concurrently ··· 2339 2339 errout_mask: 2340 2340 fl_mask_put(head, fnew->mask); 2341 2341 errout_idr: 2342 - idr_remove(&head->handle_idr, fnew->handle); 2342 + if (!fold) 2343 + idr_remove(&head->handle_idr, fnew->handle); 2343 2344 __fl_put(fnew); 2344 2345 errout_tb: 2345 2346 kfree(tb); ··· 2379 2378 rcu_read_lock(); 2380 2379 idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) { 2381 2380 /* don't return filters that are being deleted */ 2382 - if (!refcount_inc_not_zero(&f->refcnt)) 2381 + if (!f || !refcount_inc_not_zero(&f->refcnt)) 2383 2382 continue; 2384 2383 rcu_read_unlock(); 2385 2384
+5 -5
tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh
··· 292 292 ip netns exec ${hsname} sysctl -wq net.ipv6.conf.all.accept_dad=0 293 293 ip netns exec ${hsname} sysctl -wq net.ipv6.conf.default.accept_dad=0 294 294 295 + # disable the rp_filter otherwise the kernel gets confused about how 296 + # to route decap ipv4 packets. 297 + ip netns exec ${rtname} sysctl -wq net.ipv4.conf.all.rp_filter=0 298 + ip netns exec ${rtname} sysctl -wq net.ipv4.conf.default.rp_filter=0 299 + 295 300 ip -netns ${hsname} link add veth0 type veth peer name ${rtveth} 296 301 ip -netns ${hsname} link set ${rtveth} netns ${rtname} 297 302 ip -netns ${hsname} addr add ${IPv6_HS_NETWORK}::${hs}/64 dev veth0 nodad ··· 320 315 321 316 ip netns exec ${rtname} sysctl -wq net.ipv6.conf.${rtveth}.proxy_ndp=1 322 317 ip netns exec ${rtname} sysctl -wq net.ipv4.conf.${rtveth}.proxy_arp=1 323 - 324 - # disable the rp_filter otherwise the kernel gets confused about how 325 - # to route decap ipv4 packets. 326 - ip netns exec ${rtname} sysctl -wq net.ipv4.conf.all.rp_filter=0 327 - ip netns exec ${rtname} sysctl -wq net.ipv4.conf.${rtveth}.rp_filter=0 328 318 329 319 ip netns exec ${rtname} sh -c "echo 1 > /proc/sys/net/vrf/strict_mode" 330 320 }
+5 -2
tools/testing/selftests/netfilter/Makefile
··· 8 8 ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \ 9 9 conntrack_vrf.sh nft_synproxy.sh rpath.sh 10 10 11 - CFLAGS += $(shell pkg-config --cflags libmnl 2>/dev/null || echo "-I/usr/include/libmnl") 12 - LDLIBS = -lmnl 11 + HOSTPKG_CONFIG := pkg-config 12 + 13 + CFLAGS += $(shell $(HOSTPKG_CONFIG) --cflags libmnl 2>/dev/null) 14 + LDLIBS += $(shell $(HOSTPKG_CONFIG) --libs libmnl 2>/dev/null || echo -lmnl) 15 + 13 16 TEST_GEN_FILES = nf-queue connect_close 14 17 15 18 include ../lib.mk