Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Handle v4/v6 mixed sockets properly in soreuseport, from Craig
Gallak.

2) Bug fixes for the new macsec facility (missing kmalloc NULL checks,
missing locking around netdev list traversal, etc.) from Sabrina
Dubroca.

3) Fix handling of host routes on ifdown in ipv6, from David Ahern.

4) Fix double-fdput in bpf verifier. From Jann Horn.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (31 commits)
bpf: fix double-fdput in replace_map_fd_with_map_ptr()
net: ipv6: Delete host routes on an ifdown
Revert "ipv6: Revert optional address flusing on ifdown."
net/mlx4_en: fix spurious timestamping callbacks
net: dummy: remove note about being Y by default
cxgbi: fix uninitialized flowi6
ipv6: Revert optional address flusing on ifdown.
ipv4/fib: don't warn when primary address is missing if in_dev is dead
net/mlx5: Add pci shutdown callback
net/mlx5_core: Remove static from local variable
net/mlx5e: Use vport MTU rather than physical port MTU
net/mlx5e: Fix minimum MTU
net/mlx5e: Device's mtu field is u16 and not int
net/mlx5_core: Add ConnectX-5 to list of supported devices
net/mlx5e: Fix MLX5E_100BASE_T define
net/mlx5_core: Fix soft lockup in steering error flow
qlcnic: Update version to 5.3.64
net: stmmac: socfpga: Remove re-registration of reset controller
macsec: fix netlink attribute validation
macsec: add missing macsec prefix in uapi
...

+351 -218
+9
MAINTAINERS
··· 11071 11071 F: drivers/clk/ti/ 11072 11072 F: include/linux/clk/ti.h 11073 11073 11074 + TI ETHERNET SWITCH DRIVER (CPSW) 11075 + M: Mugunthan V N <mugunthanvnm@ti.com> 11076 + R: Grygorii Strashko <grygorii.strashko@ti.com> 11077 + L: linux-omap@vger.kernel.org 11078 + L: netdev@vger.kernel.org 11079 + S: Maintained 11080 + F: drivers/net/ethernet/ti/cpsw* 11081 + F: drivers/net/ethernet/ti/davinci* 11082 + 11074 11083 TI FLASH MEDIA INTERFACE DRIVER 11075 11084 M: Alex Dubov <oakad@yahoo.com> 11076 11085 S: Maintained
+2 -2
drivers/infiniband/hw/mlx5/main.c
··· 671 671 struct mlx5_ib_dev *dev = to_mdev(ibdev); 672 672 struct mlx5_core_dev *mdev = dev->mdev; 673 673 struct mlx5_hca_vport_context *rep; 674 - int max_mtu; 675 - int oper_mtu; 674 + u16 max_mtu; 675 + u16 oper_mtu; 676 676 int err; 677 677 u8 ib_link_width_oper; 678 678 u8 vl_hw_cap;
+2 -3
drivers/net/Kconfig
··· 62 62 this device is consigned into oblivion) with a configurable IP 63 63 address. It is most commonly used in order to make your currently 64 64 inactive SLIP address seem like a real address for local programs. 65 - If you use SLIP or PPP, you might want to say Y here. Since this 66 - thing often comes in handy, the default is Y. It won't enlarge your 67 - kernel either. What a deal. Read about it in the Network 65 + If you use SLIP or PPP, you might want to say Y here. It won't 66 + enlarge your kernel. What a deal. Read about it in the Network 68 67 Administrator's Guide, available from 69 68 <http://www.tldp.org/docs.html#guide>. 70 69
+4 -2
drivers/net/ethernet/mellanox/mlx4/en_tx.c
··· 405 405 u32 packets = 0; 406 406 u32 bytes = 0; 407 407 int factor = priv->cqe_factor; 408 - u64 timestamp = 0; 409 408 int done = 0; 410 409 int budget = priv->tx_work_limit; 411 410 u32 last_nr_txbb; ··· 444 445 new_index = be16_to_cpu(cqe->wqe_index) & size_mask; 445 446 446 447 do { 448 + u64 timestamp = 0; 449 + 447 450 txbbs_skipped += last_nr_txbb; 448 451 ring_index = (ring_index + last_nr_txbb) & size_mask; 449 - if (ring->tx_info[ring_index].ts_requested) 452 + 453 + if (unlikely(ring->tx_info[ring_index].ts_requested)) 450 454 timestamp = mlx4_en_get_cqe_ts(cqe); 451 455 452 456 /* free next descriptor */
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 609 609 MLX5E_100GBASE_KR4 = 22, 610 610 MLX5E_100GBASE_LR4 = 23, 611 611 MLX5E_100BASE_TX = 24, 612 - MLX5E_100BASE_T = 25, 612 + MLX5E_1000BASE_T = 25, 613 613 MLX5E_10GBASE_T = 26, 614 614 MLX5E_25GBASE_CR = 27, 615 615 MLX5E_25GBASE_KR = 28,
+4 -4
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 138 138 [MLX5E_100BASE_TX] = { 139 139 .speed = 100, 140 140 }, 141 - [MLX5E_100BASE_T] = { 142 - .supported = SUPPORTED_100baseT_Full, 143 - .advertised = ADVERTISED_100baseT_Full, 144 - .speed = 100, 141 + [MLX5E_1000BASE_T] = { 142 + .supported = SUPPORTED_1000baseT_Full, 143 + .advertised = ADVERTISED_1000baseT_Full, 144 + .speed = 1000, 145 145 }, 146 146 [MLX5E_10GBASE_T] = { 147 147 .supported = SUPPORTED_10000baseT_Full,
+57 -15
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 1404 1404 return 0; 1405 1405 } 1406 1406 1407 - static int mlx5e_set_dev_port_mtu(struct net_device *netdev) 1407 + static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu) 1408 1408 { 1409 - struct mlx5e_priv *priv = netdev_priv(netdev); 1410 1409 struct mlx5_core_dev *mdev = priv->mdev; 1411 - int hw_mtu; 1410 + u16 hw_mtu = MLX5E_SW2HW_MTU(mtu); 1412 1411 int err; 1413 1412 1414 - err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1); 1413 + err = mlx5_set_port_mtu(mdev, hw_mtu, 1); 1415 1414 if (err) 1416 1415 return err; 1417 1416 1418 - mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1); 1417 + /* Update vport context MTU */ 1418 + mlx5_modify_nic_vport_mtu(mdev, hw_mtu); 1419 + return 0; 1420 + } 1419 1421 1420 - if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu) 1421 - netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n", 1422 - __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu); 1422 + static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu) 1423 + { 1424 + struct mlx5_core_dev *mdev = priv->mdev; 1425 + u16 hw_mtu = 0; 1426 + int err; 1423 1427 1424 - netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu); 1428 + err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu); 1429 + if (err || !hw_mtu) /* fallback to port oper mtu */ 1430 + mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1); 1431 + 1432 + *mtu = MLX5E_HW2SW_MTU(hw_mtu); 1433 + } 1434 + 1435 + static int mlx5e_set_dev_port_mtu(struct net_device *netdev) 1436 + { 1437 + struct mlx5e_priv *priv = netdev_priv(netdev); 1438 + u16 mtu; 1439 + int err; 1440 + 1441 + err = mlx5e_set_mtu(priv, netdev->mtu); 1442 + if (err) 1443 + return err; 1444 + 1445 + mlx5e_query_mtu(priv, &mtu); 1446 + if (mtu != netdev->mtu) 1447 + netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n", 1448 + __func__, mtu, netdev->mtu); 1449 + 1450 + netdev->mtu = mtu; 1425 1451 return 0; 1426 1452 } 1427 1453 ··· 2025 1999 return err; 2026 2000 } 2027 2001 2002 + #define MXL5_HW_MIN_MTU 64 2003 + #define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN) 2004 + 2028 2005 static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) 2029 2006 { 2030 2007 struct mlx5e_priv *priv = netdev_priv(netdev); 2031 2008 struct mlx5_core_dev *mdev = priv->mdev; 2032 2009 bool was_opened; 2033 - int max_mtu; 2010 + u16 max_mtu; 2011 + u16 min_mtu; 2034 2012 int err = 0; 2035 2013 2036 2014 mlx5_query_port_max_mtu(mdev, &max_mtu, 1); 2037 2015 2038 2016 max_mtu = MLX5E_HW2SW_MTU(max_mtu); 2017 + min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU); 2039 2018 2040 - if (new_mtu > max_mtu) { 2019 + if (new_mtu > max_mtu || new_mtu < min_mtu) { 2041 2020 netdev_err(netdev, 2042 - "%s: Bad MTU (%d) > (%d) Max\n", 2043 - __func__, new_mtu, max_mtu); 2021 + "%s: Bad MTU (%d), valid range is: [%d..%d]\n", 2022 + __func__, new_mtu, min_mtu, max_mtu); 2044 2023 return -EINVAL; 2045 2024 } 2046 2025 ··· 2633 2602 schedule_work(&priv->set_rx_mode_work); 2634 2603 mlx5e_disable_async_events(priv); 2635 2604 flush_scheduled_work(); 2636 - unregister_netdev(netdev); 2605 + if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) { 2606 + netif_device_detach(netdev); 2607 + mutex_lock(&priv->state_lock); 2608 + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 2609 + mlx5e_close_locked(netdev); 2610 + mutex_unlock(&priv->state_lock); 2611 + } else { 2612 + unregister_netdev(netdev); 2613 + } 2614 + 2637 2615 mlx5e_tc_cleanup(priv); 2638 2616 mlx5e_vxlan_cleanup(priv); 2639 2617 mlx5e_destroy_flow_tables(priv); ··· 2655 2615 mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn); 2656 2616 mlx5_core_dealloc_pd(priv->mdev, priv->pdn); 2657 2617 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); 2658 - free_netdev(netdev); 2618 + 2619 + if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) 2620 + free_netdev(netdev); 2659 2621 } 2660 2622 2661 2623 static void *mlx5e_get_netdev(void *vpriv)
+18 -30
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 1065 1065 return rule; 1066 1066 } 1067 1067 1068 - static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft, 1069 - u8 match_criteria_enable, 1070 - u32 *match_criteria, 1071 - u32 *match_value, 1072 - u8 action, 1073 - u32 flow_tag, 1074 - struct mlx5_flow_destination *dest) 1075 - { 1076 - struct mlx5_flow_rule *rule; 1077 - struct mlx5_flow_group *g; 1078 - 1079 - g = create_autogroup(ft, match_criteria_enable, match_criteria); 1080 - if (IS_ERR(g)) 1081 - return (void *)g; 1082 - 1083 - rule = add_rule_fg(g, match_value, 1084 - action, flow_tag, dest); 1085 - if (IS_ERR(rule)) { 1086 - /* Remove assumes refcount > 0 and autogroup creates a group 1087 - * with a refcount = 0. 1088 - */ 1089 - tree_get_node(&g->node); 1090 - tree_remove_node(&g->node); 1091 - } 1092 - return rule; 1093 - } 1094 - 1095 1068 static struct mlx5_flow_rule * 1096 1069 _mlx5_add_flow_rule(struct mlx5_flow_table *ft, 1097 1070 u8 match_criteria_enable, ··· 1092 1119 goto unlock; 1093 1120 } 1094 1121 1095 - rule = add_rule_to_auto_fg(ft, match_criteria_enable, match_criteria, 1096 - match_value, action, flow_tag, dest); 1122 + g = create_autogroup(ft, match_criteria_enable, match_criteria); 1123 + if (IS_ERR(g)) { 1124 + rule = (void *)g; 1125 + goto unlock; 1126 + } 1127 + 1128 + rule = add_rule_fg(g, match_value, 1129 + action, flow_tag, dest); 1130 + if (IS_ERR(rule)) { 1131 + /* Remove assumes refcount > 0 and autogroup creates a group 1132 + * with a refcount = 0. 1133 + */ 1134 + unlock_ref_node(&ft->node); 1135 + tree_get_node(&g->node); 1136 + tree_remove_node(&g->node); 1137 + return rule; 1138 + } 1097 1139 unlock: 1098 1140 unlock_ref_node(&ft->node); 1099 1141 return rule; ··· 1276 1288 { 1277 1289 struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns; 1278 1290 int prio; 1279 - static struct fs_prio *fs_prio; 1291 + struct fs_prio *fs_prio; 1280 1292 struct mlx5_flow_namespace *ns; 1281 1293 1282 1294 if (!root_ns)
+21 -4
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 966 966 int err; 967 967 968 968 mutex_lock(&dev->intf_state_mutex); 969 - if (dev->interface_state == MLX5_INTERFACE_STATE_UP) { 969 + if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { 970 970 dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n", 971 971 __func__); 972 972 goto out; ··· 1133 1133 if (err) 1134 1134 pr_info("failed request module on %s\n", MLX5_IB_MOD); 1135 1135 1136 - dev->interface_state = MLX5_INTERFACE_STATE_UP; 1136 + clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state); 1137 + set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); 1137 1138 out: 1138 1139 mutex_unlock(&dev->intf_state_mutex); 1139 1140 ··· 1208 1207 } 1209 1208 1210 1209 mutex_lock(&dev->intf_state_mutex); 1211 - if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) { 1210 + if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) { 1212 1211 dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", 1213 1212 __func__); 1214 1213 goto out; ··· 1242 1241 mlx5_cmd_cleanup(dev); 1243 1242 1244 1243 out: 1245 - dev->interface_state = MLX5_INTERFACE_STATE_DOWN; 1244 + clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); 1245 + set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state); 1246 1246 mutex_unlock(&dev->intf_state_mutex); 1247 1247 return err; 1248 1248 } ··· 1454 1452 .resume = mlx5_pci_resume 1455 1453 }; 1456 1454 1455 + static void shutdown(struct pci_dev *pdev) 1456 + { 1457 + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1458 + struct mlx5_priv *priv = &dev->priv; 1459 + 1460 + dev_info(&pdev->dev, "Shutdown was called\n"); 1461 + /* Notify mlx5 clients that the kernel is being shut down */ 1462 + set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state); 1463 + mlx5_unload_one(dev, priv); 1464 + mlx5_pci_disable_device(dev); 1465 + } 1466 + 1457 1467 static const struct pci_device_id mlx5_core_pci_table[] = { 1458 1468 { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */ 1459 1469 { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */ ··· 1473 1459 { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */ 1474 1460 { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */ 1475 1461 { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */ 1462 + { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */ 1463 + { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */ 1476 1464 { 0, } 1477 1465 }; 1478 1466 ··· 1485 1469 .id_table = mlx5_core_pci_table, 1486 1470 .probe = init_one, 1487 1471 .remove = remove_one, 1472 + .shutdown = shutdown, 1488 1473 .err_handler = &mlx5_err_handler, 1489 1474 .sriov_configure = mlx5_core_sriov_configure, 1490 1475 };
+5 -5
drivers/net/ethernet/mellanox/mlx5/core/port.c
··· 247 247 } 248 248 EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status); 249 249 250 - static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu, 251 - int *max_mtu, int *oper_mtu, u8 port) 250 + static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu, 251 + u16 *max_mtu, u16 *oper_mtu, u8 port) 252 252 { 253 253 u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; 254 254 u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; ··· 268 268 *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu); 269 269 } 270 270 271 - int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port) 271 + int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port) 272 272 { 273 273 u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; 274 274 u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; ··· 283 283 } 284 284 EXPORT_SYMBOL_GPL(mlx5_set_port_mtu); 285 285 286 - void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, 286 + void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, 287 287 u8 port) 288 288 { 289 289 mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port); 290 290 } 291 291 EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu); 292 292 293 - void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, 293 + void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu, 294 294 u8 port) 295 295 { 296 296 mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
+40
drivers/net/ethernet/mellanox/mlx5/core/vport.c
··· 196 196 } 197 197 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address); 198 198 199 + int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu) 200 + { 201 + int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); 202 + u32 *out; 203 + int err; 204 + 205 + out = mlx5_vzalloc(outlen); 206 + if (!out) 207 + return -ENOMEM; 208 + 209 + err = mlx5_query_nic_vport_context(mdev, 0, out, outlen); 210 + if (!err) 211 + *mtu = MLX5_GET(query_nic_vport_context_out, out, 212 + nic_vport_context.mtu); 213 + 214 + kvfree(out); 215 + return err; 216 + } 217 + EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu); 218 + 219 + int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu) 220 + { 221 + int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); 222 + void *in; 223 + int err; 224 + 225 + in = mlx5_vzalloc(inlen); 226 + if (!in) 227 + return -ENOMEM; 228 + 229 + MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1); 230 + MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu); 231 + 232 + err = mlx5_modify_nic_vport_context(mdev, in, inlen); 233 + 234 + kvfree(in); 235 + return err; 236 + } 237 + EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu); 238 + 199 239 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, 200 240 u32 vport, 201 241 enum mlx5_list_type list_type,
+2 -2
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
··· 37 37 38 38 #define _QLCNIC_LINUX_MAJOR 5 39 39 #define _QLCNIC_LINUX_MINOR 3 40 - #define _QLCNIC_LINUX_SUBVERSION 63 41 - #define QLCNIC_LINUX_VERSIONID "5.3.63" 40 + #define _QLCNIC_LINUX_SUBVERSION 64 41 + #define QLCNIC_LINUX_VERSIONID "5.3.64" 42 42 #define QLCNIC_DRV_IDC_VER 0x01 43 43 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 44 44 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
+16 -34
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
··· 49 49 u32 reg_shift; 50 50 struct device *dev; 51 51 struct regmap *sys_mgr_base_addr; 52 - struct reset_control *stmmac_rst; 53 52 void __iomem *splitter_base; 54 53 bool f2h_ptp_ref_clk; 55 54 }; ··· 90 91 int ret; 91 92 struct device_node *np_splitter; 92 93 struct resource res_splitter; 93 - 94 - dwmac->stmmac_rst = devm_reset_control_get(dev, 95 - STMMAC_RESOURCE_NAME); 96 - if (IS_ERR(dwmac->stmmac_rst)) { 97 - dev_info(dev, "Could not get reset control!\n"); 98 - if (PTR_ERR(dwmac->stmmac_rst) == -EPROBE_DEFER) 99 - return -EPROBE_DEFER; 100 - dwmac->stmmac_rst = NULL; 101 - } 102 94 103 95 dwmac->interface = of_get_phy_mode(np); 104 96 ··· 184 194 return 0; 185 195 } 186 196 187 - static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv) 188 - { 189 - struct socfpga_dwmac *dwmac = priv; 190 - 191 - /* On socfpga platform exit, assert and hold reset to the 192 - * enet controller - the default state after a hard reset. 193 - */ 194 - if (dwmac->stmmac_rst) 195 - reset_control_assert(dwmac->stmmac_rst); 196 - } 197 - 198 197 static int socfpga_dwmac_init(struct platform_device *pdev, void *priv) 199 198 { 200 - struct socfpga_dwmac *dwmac = priv; 199 + struct socfpga_dwmac *dwmac = priv; 201 200 struct net_device *ndev = platform_get_drvdata(pdev); 202 201 struct stmmac_priv *stpriv = NULL; 203 202 int ret = 0; 204 203 205 - if (ndev) 206 - stpriv = netdev_priv(ndev); 204 + if (!ndev) 205 + return -EINVAL; 206 + 207 + stpriv = netdev_priv(ndev); 208 + if (!stpriv) 209 + return -EINVAL; 207 210 208 211 /* Assert reset to the enet controller before changing the phy mode */ 209 - if (dwmac->stmmac_rst) 210 - reset_control_assert(dwmac->stmmac_rst); 212 + if (stpriv->stmmac_rst) 213 + reset_control_assert(stpriv->stmmac_rst); 211 214 212 215 /* Setup the phy mode in the system manager registers according to 213 216 * devicetree configuration ··· 210 227 /* Deassert reset for the phy configuration to be sampled by 211 228 * the enet controller, and operation to start in requested mode 212 229 */ 213 - if (dwmac->stmmac_rst) 214 - reset_control_deassert(dwmac->stmmac_rst); 230 + if (stpriv->stmmac_rst) 231 + reset_control_deassert(stpriv->stmmac_rst); 215 232 216 233 /* Before the enet controller is suspended, the phy is suspended. 217 234 * This causes the phy clock to be gated. The enet controller is ··· 228 245 * control register 0, and can be modified by the phy driver 229 246 * framework. 230 247 */ 231 - if (stpriv && stpriv->phydev) 248 + if (stpriv->phydev) 232 249 phy_resume(stpriv->phydev); 233 250 234 251 return ret; ··· 268 285 269 286 plat_dat->bsp_priv = dwmac; 270 287 plat_dat->init = socfpga_dwmac_init; 271 - plat_dat->exit = socfpga_dwmac_exit; 272 288 plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed; 273 289 274 - ret = socfpga_dwmac_init(pdev, plat_dat->bsp_priv); 275 - if (ret) 276 - return ret; 290 + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 291 + if (!ret) 292 + ret = socfpga_dwmac_init(pdev, dwmac); 277 293 278 - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 294 + return ret; 279 295 } 280 296 281 297 static const struct of_device_id socfpga_dwmac_match[] = {
+42 -23
drivers/net/macsec.c
··· 880 880 macsec_skb_cb(skb)->valid = false; 881 881 skb = skb_share_check(skb, GFP_ATOMIC); 882 882 if (!skb) 883 - return NULL; 883 + return ERR_PTR(-ENOMEM); 884 884 885 885 req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC); 886 886 if (!req) { 887 887 kfree_skb(skb); 888 - return NULL; 888 + return ERR_PTR(-ENOMEM); 889 889 } 890 890 891 891 hdr = (struct macsec_eth_header *)skb->data; ··· 905 905 skb = skb_unshare(skb, GFP_ATOMIC); 906 906 if (!skb) { 907 907 aead_request_free(req); 908 - return NULL; 908 + return ERR_PTR(-ENOMEM); 909 909 } 910 910 } else { 911 911 /* integrity only: all headers + data authenticated */ ··· 921 921 dev_hold(dev); 922 922 ret = crypto_aead_decrypt(req); 923 923 if (ret == -EINPROGRESS) { 924 - return NULL; 924 + return ERR_PTR(ret); 925 925 } else if (ret != 0) { 926 926 /* decryption/authentication failed 927 927 * 10.6 if validateFrames is disabled, deliver anyway 928 928 */ 929 929 if (ret != -EBADMSG) { 930 930 kfree_skb(skb); 931 - skb = NULL; 931 + skb = ERR_PTR(ret); 932 932 } 933 933 } else { 934 934 macsec_skb_cb(skb)->valid = true; ··· 1146 1146 secy->validate_frames != MACSEC_VALIDATE_DISABLED) 1147 1147 skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); 1148 1148 1149 - if (!skb) { 1150 - macsec_rxsa_put(rx_sa); 1149 + if (IS_ERR(skb)) { 1150 + /* the decrypt callback needs the reference */ 1151 + if (PTR_ERR(skb) != -EINPROGRESS) 1152 + macsec_rxsa_put(rx_sa); 1151 1153 rcu_read_unlock(); 1152 1154 *pskb = NULL; 1153 1155 return RX_HANDLER_CONSUMED; ··· 1163 1161 macsec_extra_len(macsec_skb_cb(skb)->has_sci)); 1164 1162 macsec_reset_skb(skb, secy->netdev); 1165 1163 1166 - macsec_rxsa_put(rx_sa); 1164 + if (rx_sa) 1165 + macsec_rxsa_put(rx_sa); 1167 1166 count_rx(dev, skb->len); 1168 1167 1169 1168 rcu_read_unlock(); ··· 1625 1622 } 1626 1623 1627 1624 rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); 1628 - if (init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len, 1629 - secy->icv_len)) { 1625 + if (!rx_sa || init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1626 + secy->key_len, secy->icv_len)) { 1627 + kfree(rx_sa); 1630 1628 rtnl_unlock(); 1631 1629 return -ENOMEM; 1632 1630 } ··· 1772 1768 tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); 1773 1769 if (!tx_sa || init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1774 1770 secy->key_len, secy->icv_len)) { 1771 + kfree(tx_sa); 1775 1772 rtnl_unlock(); 1776 1773 return -ENOMEM; 1777 1774 } ··· 2232 2227 return 1; 2233 2228 2234 2229 if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci) || 2235 - nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, DEFAULT_CIPHER_ID) || 2230 + nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, 2231 + MACSEC_DEFAULT_CIPHER_ID) || 2236 2232 nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || 2237 2233 nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || 2238 2234 nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || ··· 2274 2268 if (!hdr) 2275 2269 return -EMSGSIZE; 2276 2270 2277 - rtnl_lock(); 2271 + genl_dump_check_consistent(cb, hdr, &macsec_fam); 2278 2272 2279 2273 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) 2280 2274 goto nla_put_failure; ··· 2435 2429 2436 2430 nla_nest_end(skb, rxsc_list); 2437 2431 2438 - rtnl_unlock(); 2439 - 2440 2432 genlmsg_end(skb, hdr); 2441 2433 2442 2434 return 0; 2443 2435 2444 2436 nla_put_failure: 2445 - rtnl_unlock(); 2446 2437 genlmsg_cancel(skb, hdr); 2447 2438 return -EMSGSIZE; 2448 2439 } 2440 + 2441 + static int macsec_generation = 1; /* protected by RTNL */ 2449 2442 2450 2443 static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) 2451 2444 { ··· 2455 2450 dev_idx = cb->args[0]; 2456 2451 2457 2452 d = 0; 2453 + rtnl_lock(); 2454 + 2455 + cb->seq = macsec_generation; 2456 + 2458 2457 for_each_netdev(net, dev) { 2459 2458 struct macsec_secy *secy; 2460 2459 ··· 2476 2467 } 2477 2468 2478 2469 done: 2470 + rtnl_unlock(); 2479 2471 cb->args[0] = d; 2480 2472 return skb->len; 2481 2473 } ··· 2930 2920 struct net_device *real_dev = macsec->real_dev; 2931 2921 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); 2932 2922 2923 + macsec_generation++; 2924 + 2933 2925 unregister_netdevice_queue(dev, head); 2934 2926 list_del_rcu(&macsec->secys); 2935 - if (list_empty(&rxd->secys)) 2927 + if (list_empty(&rxd->secys)) { 2936 2928 netdev_rx_handler_unregister(real_dev); 2929 + kfree(rxd); 2930 + } 2937 2931 2938 2932 macsec_del_dev(macsec); 2939 2933 } ··· 2959 2945 2960 2946 err = netdev_rx_handler_register(real_dev, macsec_handle_frame, 2961 2947 rxd); 2962 - if (err < 0) 2948 + if (err < 0) { 2949 + kfree(rxd); 2963 2950 return err; 2951 + } 2964 2952 } 2965 2953 2966 2954 list_add_tail_rcu(&macsec->secys, &rxd->secys); ··· 3082 3066 if (err < 0) 3083 3067 goto del_dev; 3084 3068 3069 + macsec_generation++; 3070 + 3085 3071 dev_hold(real_dev); 3086 3072 3087 3073 return 0; ··· 3097 3079 3098 3080 static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[]) 3099 3081 { 3100 - u64 csid = DEFAULT_CIPHER_ID; 3082 + u64 csid = MACSEC_DEFAULT_CIPHER_ID; 3101 3083 u8 icv_len = DEFAULT_ICV_LEN; 3102 3084 int flag; 3103 3085 bool es, scb, sci; ··· 3112 3094 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); 3113 3095 3114 3096 switch (csid) { 3115 - case DEFAULT_CIPHER_ID: 3116 - case DEFAULT_CIPHER_ALT: 3097 + case MACSEC_DEFAULT_CIPHER_ID: 3098 + case MACSEC_DEFAULT_CIPHER_ALT: 3117 3099 if (icv_len < MACSEC_MIN_ICV_LEN || 3118 3100 icv_len > MACSEC_MAX_ICV_LEN) 3119 3101 return -EINVAL; ··· 3147 3129 nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) 3148 3130 return -EINVAL; 3149 3131 3150 - if ((data[IFLA_MACSEC_PROTECT] && 3151 - nla_get_u8(data[IFLA_MACSEC_PROTECT])) && 3132 + if ((data[IFLA_MACSEC_REPLAY_PROTECT] && 3133 + nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) && 3152 3134 !data[IFLA_MACSEC_WINDOW]) 3153 3135 return -EINVAL; 3154 3136 ··· 3186 3168 3187 3169 if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci) || 3188 3170 nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || 3189 - nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE, DEFAULT_CIPHER_ID) || 3171 + nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE, 3172 + MACSEC_DEFAULT_CIPHER_ID) || 3190 3173 nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || 3191 3174 nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || 3192 3175 nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
+1
drivers/scsi/cxgbi/libcxgbi.c
··· 688 688 { 689 689 struct flowi6 fl; 690 690 691 + memset(&fl, 0, sizeof(fl)); 691 692 if (saddr) 692 693 memcpy(&fl.saddr, saddr, sizeof(struct in6_addr)); 693 694 if (daddr)
+4 -3
include/linux/mlx5/driver.h
··· 519 519 }; 520 520 521 521 enum mlx5_interface_state { 522 - MLX5_INTERFACE_STATE_DOWN, 523 - MLX5_INTERFACE_STATE_UP, 522 + MLX5_INTERFACE_STATE_DOWN = BIT(0), 523 + MLX5_INTERFACE_STATE_UP = BIT(1), 524 + MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2), 524 525 }; 525 526 526 527 enum mlx5_pci_status { ··· 545 544 enum mlx5_device_state state; 546 545 /* sync interface state */ 547 546 struct mutex intf_state_mutex; 548 - enum mlx5_interface_state interface_state; 547 + unsigned long intf_state; 549 548 void (*event) (struct mlx5_core_dev *dev, 550 549 enum mlx5_dev_event event, 551 550 unsigned long param);
+3 -3
include/linux/mlx5/port.h
··· 54 54 int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, 55 55 enum mlx5_port_status *status); 56 56 57 - int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port); 58 - void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port); 59 - void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, 57 + int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port); 58 + void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port); 59 + void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu, 60 60 u8 port); 61 61 62 62 int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
+2
include/linux/mlx5/vport.h
··· 45 45 u16 vport, u8 *addr); 46 46 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, 47 47 u16 vport, u8 *addr); 48 + int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu); 49 + int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu); 48 50 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, 49 51 u64 *system_image_guid); 50 52 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
+4
include/net/switchdev.h
··· 54 54 struct net_device *orig_dev; 55 55 enum switchdev_attr_id id; 56 56 u32 flags; 57 + void *complete_priv; 58 + void (*complete)(struct net_device *dev, int err, void *priv); 57 59 union { 58 60 struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */ 59 61 u8 stp_state; /* PORT_STP_STATE */ ··· 77 75 struct net_device *orig_dev; 78 76 enum switchdev_obj_id id; 79 77 u32 flags; 78 + void *complete_priv; 79 + void (*complete)(struct net_device *dev, int err, void *priv); 80 80 }; 81 81 82 82 /* SWITCHDEV_OBJ_ID_PORT_VLAN */
+2 -2
include/uapi/linux/if_macsec.h
··· 19 19 20 20 #define MACSEC_MAX_KEY_LEN 128 21 21 22 - #define DEFAULT_CIPHER_ID 0x0080020001000001ULL 23 - #define DEFAULT_CIPHER_ALT 0x0080C20001000001ULL 22 + #define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL 23 + #define MACSEC_DEFAULT_CIPHER_ALT 0x0080C20001000001ULL 24 24 25 25 #define MACSEC_MIN_ICV_LEN 8 26 26 #define MACSEC_MAX_ICV_LEN 32
-1
kernel/bpf/verifier.c
··· 2030 2030 if (IS_ERR(map)) { 2031 2031 verbose("fd %d is not pointing to valid bpf_map\n", 2032 2032 insn->imm); 2033 - fdput(f); 2034 2033 return PTR_ERR(map); 2035 2034 } 2036 2035
+79 -45
net/bridge/br_mdb.c
··· 61 61 e->flags |= MDB_FLAGS_OFFLOAD; 62 62 } 63 63 64 + static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip) 65 + { 66 + memset(ip, 0, sizeof(struct br_ip)); 67 + ip->vid = entry->vid; 68 + ip->proto = entry->addr.proto; 69 + if (ip->proto == htons(ETH_P_IP)) 70 + ip->u.ip4 = entry->addr.u.ip4; 71 + #if IS_ENABLED(CONFIG_IPV6) 72 + else 73 + ip->u.ip6 = entry->addr.u.ip6; 74 + #endif 75 + } 76 + 64 77 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb, 65 78 struct net_device *dev) 66 79 { ··· 256 243 + nla_total_size(sizeof(struct br_mdb_entry)); 257 244 } 258 245 259 - static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry, 260 - int type, struct net_bridge_port_group *pg) 246 + struct br_mdb_complete_info { 247 + struct net_bridge_port *port; 248 + struct br_ip ip; 249 + }; 250 + 251 + static void br_mdb_complete(struct net_device *dev, int err, void *priv) 261 252 { 253 + struct br_mdb_complete_info *data = priv; 254 + struct net_bridge_port_group __rcu **pp; 255 + struct net_bridge_port_group *p; 256 + struct net_bridge_mdb_htable *mdb; 257 + struct net_bridge_mdb_entry *mp; 258 + struct net_bridge_port *port = data->port; 259 + struct net_bridge *br = port->br; 260 + 261 + if (err) 262 + goto err; 263 + 264 + spin_lock_bh(&br->multicast_lock); 265 + mdb = mlock_dereference(br->mdb, br); 266 + mp = br_mdb_ip_get(mdb, &data->ip); 267 + if (!mp) 268 + goto out; 269 + for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; 270 + pp = &p->next) { 271 + if (p->port != port) 272 + continue; 273 + p->flags |= MDB_PG_FLAGS_OFFLOAD; 274 + } 275 + out: 276 + spin_unlock_bh(&br->multicast_lock); 277 + err: 278 + kfree(priv); 279 + } 280 + 281 + static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p, 282 + struct br_mdb_entry *entry, int type) 283 + { 284 + struct br_mdb_complete_info *complete_info; 262 285 struct switchdev_obj_port_mdb mdb = { 263 286 .obj = { 264 287 .id = SWITCHDEV_OBJ_ID_PORT_MDB, ··· 317 268 318 269 mdb.obj.orig_dev = port_dev; 319 270 if (port_dev && type == RTM_NEWMDB) { 320 - err = switchdev_port_obj_add(port_dev, &mdb.obj); 321 - if (!err && pg) 322 - pg->flags |= MDB_PG_FLAGS_OFFLOAD; 271 + complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC); 272 + if (complete_info) { 273 + complete_info->port = p; 274 + __mdb_entry_to_br_ip(entry, &complete_info->ip); 275 + mdb.obj.complete_priv = complete_info; 276 + mdb.obj.complete = br_mdb_complete; 277 + switchdev_port_obj_add(port_dev, &mdb.obj); 278 + } 323 279 } else if (port_dev && type == RTM_DELMDB) { 324 280 switchdev_port_obj_del(port_dev, &mdb.obj); 325 281 } ··· 345 291 rtnl_set_sk_err(net, RTNLGRP_MDB, err); 346 292 } 347 293 348 - void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg, 349 - int type) 294 + void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, 295 + struct br_ip *group, int type, u8 flags) 350 296 { 351 297 struct br_mdb_entry entry; 352 298 353 299 memset(&entry, 0, sizeof(entry)); 354 - entry.ifindex = pg->port->dev->ifindex; 355 - entry.addr.proto = pg->addr.proto; 356 - entry.addr.u.ip4 = pg->addr.u.ip4; 300 + entry.ifindex = port->dev->ifindex; 301 + entry.addr.proto = group->proto; 302 + entry.addr.u.ip4 = group->u.ip4; 357 303 #if IS_ENABLED(CONFIG_IPV6) 358 - entry.addr.u.ip6 = pg->addr.u.ip6; 304 + entry.addr.u.ip6 = group->u.ip6; 359 305 #endif 360 - entry.vid = pg->addr.vid; 361 - __mdb_entry_fill_flags(&entry, pg->flags); 362 - __br_mdb_notify(dev, &entry, type, pg); 306 + entry.vid = group->vid; 307 + __mdb_entry_fill_flags(&entry, flags); 308 + __br_mdb_notify(dev, port, &entry, type); 363 309 } 364 310 365 311 static int nlmsg_populate_rtr_fill(struct sk_buff *skb, ··· 504 450 } 505 451 506 452 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, 507 - struct br_ip *group, unsigned char state, 508 - struct net_bridge_port_group **pg) 453 + struct br_ip *group, unsigned char state) 509 454 { 510 455 struct net_bridge_mdb_entry *mp; 511 456 struct net_bridge_port_group *p; ··· 535 482 if (unlikely(!p)) 536 483 return -ENOMEM; 537 484 rcu_assign_pointer(*pp, p); 538 - *pg = p; 539 485 if (state == MDB_TEMPORARY) 540 486 mod_timer(&p->timer, now + br->multicast_membership_interval); 541 487 ··· 542 490 } 543 491 544 492 static int __br_mdb_add(struct net *net, struct net_bridge *br, 545 - struct br_mdb_entry *entry, 546 - struct net_bridge_port_group **pg) 493 + struct br_mdb_entry *entry) 547 494 { 548 495 struct br_ip ip; 549 496 struct net_device *dev; ··· 560 509 if (!p || p->br != br || p->state == BR_STATE_DISABLED) 561 510 return -EINVAL; 562 511 563 - memset(&ip, 0, sizeof(ip)); 564 - ip.vid = entry->vid; 565 - ip.proto = entry->addr.proto; 566 - if (ip.proto == htons(ETH_P_IP)) 567 - ip.u.ip4 = entry->addr.u.ip4; 568 - #if IS_ENABLED(CONFIG_IPV6) 569 - else 570 - ip.u.ip6 = entry->addr.u.ip6; 571 - #endif 512 + __mdb_entry_to_br_ip(entry, &ip); 572 513 573 514 spin_lock_bh(&br->multicast_lock); 574 - ret = br_mdb_add_group(br, p, &ip, entry->state, pg); 515 + ret = br_mdb_add_group(br, p, &ip, entry->state); 575 516 spin_unlock_bh(&br->multicast_lock); 576 517 return ret; 577 518 } ··· 571 528 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh) 572 529 { 573 530 struct net *net = sock_net(skb->sk); 574 - struct net_bridge_port_group *pg; 575 531 struct net_bridge_vlan_group *vg; 576 532 struct net_device *dev, *pdev; 577 533 struct br_mdb_entry *entry; ··· 600 558 if (br_vlan_enabled(br) && vg && entry->vid == 0) { 601 559 list_for_each_entry(v, &vg->vlan_list, vlist) { 602 560 entry->vid = v->vid; 603 - err = __br_mdb_add(net, br, entry, &pg); 561 + err = __br_mdb_add(net, br, entry); 604 562 if (err) 605 563 break; 606 - __br_mdb_notify(dev, entry, RTM_NEWMDB, pg); 564 + __br_mdb_notify(dev, p, entry, RTM_NEWMDB); 607 565 } 608 566 } else { 609 - err = __br_mdb_add(net, br, entry, &pg); 567 + err = __br_mdb_add(net, br, entry); 610 568 if (!err) 611 - __br_mdb_notify(dev, entry, RTM_NEWMDB, pg); 569 + __br_mdb_notify(dev, p, entry, RTM_NEWMDB); 612 570 } 613 571 614 572 return err; ··· 626 584 if (!netif_running(br->dev) || br->multicast_disabled) 627 585 return -EINVAL; 628 586 629 - memset(&ip, 0, sizeof(ip)); 630 - ip.vid = entry->vid; 631 - ip.proto = entry->addr.proto; 632 - if (ip.proto == htons(ETH_P_IP)) 633 - ip.u.ip4 = entry->addr.u.ip4; 634 - #if IS_ENABLED(CONFIG_IPV6) 635 - else 636 - ip.u.ip6 = entry->addr.u.ip6; 637 - #endif 587 + __mdb_entry_to_br_ip(entry, &ip); 638 588 639 589 spin_lock_bh(&br->multicast_lock); 640 590 mdb = mlock_dereference(br->mdb, br); ··· 696 662 entry->vid = v->vid; 697 663 err = __br_mdb_del(br, entry); 698 664 if (!err) 699 - __br_mdb_notify(dev, entry, RTM_DELMDB, NULL); 665 + __br_mdb_notify(dev, p, entry, RTM_DELMDB); 700 666 } 701 667 } else { 702 668 err = __br_mdb_del(br, entry); 703 669 if (!err) 704 - __br_mdb_notify(dev, entry, RTM_DELMDB, NULL); 670 + __br_mdb_notify(dev, p, entry, RTM_DELMDB); 705 671 } 706 672 707 673 return err;
+5 -3
net/bridge/br_multicast.c
··· 283 283 rcu_assign_pointer(*pp, p->next); 284 284 hlist_del_init(&p->mglist); 285 285 del_timer(&p->timer); 286 - br_mdb_notify(br->dev, p, RTM_DELMDB); 286 + br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB, 287 + p->flags); 287 288 call_rcu_bh(&p->rcu, br_multicast_free_pg); 288 289 289 290 if (!mp->ports && !mp->mglist && ··· 706 705 if (unlikely(!p)) 707 706 goto err; 708 707 rcu_assign_pointer(*pp, p); 709 - br_mdb_notify(br->dev, p, RTM_NEWMDB); 708 + br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0); 710 709 711 710 found: 712 711 mod_timer(&p->timer, now + br->multicast_membership_interval); ··· 1462 1461 hlist_del_init(&p->mglist); 1463 1462 del_timer(&p->timer); 1464 1463 call_rcu_bh(&p->rcu, br_multicast_free_pg); 1465 - br_mdb_notify(br->dev, p, RTM_DELMDB); 1464 + br_mdb_notify(br->dev, port, group, RTM_DELMDB, 1465 + p->flags); 1466 1466 1467 1467 if (!mp->ports && !mp->mglist && 1468 1468 netif_running(br->dev))
+2 -2
net/bridge/br_private.h
··· 560 560 unsigned char flags); 561 561 void br_mdb_init(void); 562 562 void br_mdb_uninit(void); 563 - void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg, 564 - int type); 563 + void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port, 564 + struct br_ip *group, int type, u8 flags); 565 565 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port, 566 566 int type); 567 567
+5 -1
net/ipv4/fib_frontend.c
··· 904 904 if (ifa->ifa_flags & IFA_F_SECONDARY) { 905 905 prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask); 906 906 if (!prim) { 907 - pr_warn("%s: bug: prim == NULL\n", __func__); 907 + /* if the device has been deleted, we don't perform 908 + * address promotion 909 + */ 910 + if (!in_dev->dead) 911 + pr_warn("%s: bug: prim == NULL\n", __func__); 908 912 return; 909 913 } 910 914 if (iprim && iprim != prim) {
+15 -33
net/ipv6/addrconf.c
··· 3176 3176 } 3177 3177 #endif 3178 3178 3179 - #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 3180 - /* If the host route is cached on the addr struct make sure it is associated 3181 - * with the proper table. e.g., enslavement can change and if so the cached 3182 - * host route needs to move to the new table. 3183 - */ 3184 - static void l3mdev_check_host_rt(struct inet6_dev *idev, 3185 - struct inet6_ifaddr *ifp) 3186 - { 3187 - if (ifp->rt) { 3188 - u32 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL; 3189 - 3190 - if (tb_id != ifp->rt->rt6i_table->tb6_id) { 3191 - ip6_del_rt(ifp->rt); 3192 - ifp->rt = NULL; 3193 - } 3194 - } 3195 - } 3196 - #else 3197 - static void l3mdev_check_host_rt(struct inet6_dev *idev, 3198 - struct inet6_ifaddr *ifp) 3199 - { 3200 - } 3201 - #endif 3202 - 3203 3179 static int fixup_permanent_addr(struct inet6_dev *idev, 3204 3180 struct inet6_ifaddr *ifp) 3205 3181 { 3206 - l3mdev_check_host_rt(idev, ifp); 3207 - 3208 3182 if (!ifp->rt) { 3209 3183 struct rt6_info *rt; 3210 3184 ··· 3278 3304 break; 3279 3305 3280 3306 if (event == NETDEV_UP) { 3307 + /* restore routes for permanent addresses */ 3308 + addrconf_permanent_addr(dev); 3309 + 3281 3310 if (!addrconf_qdisc_ok(dev)) { 3282 3311 /* device is not ready yet. */ 3283 3312 pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n", ··· 3313 3336 3314 3337 run_pending = 1; 3315 3338 } 3316 - 3317 - /* restore routes for permanent addresses */ 3318 - addrconf_permanent_addr(dev); 3319 3339 3320 3340 switch (dev->type) { 3321 3341 #if IS_ENABLED(CONFIG_IPV6_SIT) ··· 3530 3556 3531 3557 INIT_LIST_HEAD(&del_list); 3532 3558 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { 3559 + struct rt6_info *rt = NULL; 3560 + 3533 3561 addrconf_del_dad_work(ifa); 3534 3562 3535 3563 write_unlock_bh(&idev->lock); ··· 3544 3568 ifa->state = 0; 3545 3569 if (!(ifa->flags & IFA_F_NODAD)) 3546 3570 ifa->flags |= IFA_F_TENTATIVE; 3571 + 3572 + rt = ifa->rt; 3573 + ifa->rt = NULL; 3547 3574 } else { 3548 3575 state = ifa->state; 3549 3576 ifa->state = INET6_IFADDR_STATE_DEAD; ··· 3556 3577 } 3557 3578 3558 3579 spin_unlock_bh(&ifa->lock); 3580 + 3581 + if (rt) 3582 + ip6_del_rt(rt); 3559 3583 3560 3584 if (state != INET6_IFADDR_STATE_DEAD) { 3561 3585 __ipv6_ifa_notify(RTM_DELADDR, ifa); ··· 5325 5343 if (rt) 5326 5344 ip6_del_rt(rt); 5327 5345 } 5328 - dst_hold(&ifp->rt->dst); 5329 - 5330 - ip6_del_rt(ifp->rt); 5331 - 5346 + if (ifp->rt) { 5347 + dst_hold(&ifp->rt->dst); 5348 + ip6_del_rt(ifp->rt); 5349 + } 5332 5350 rt_genid_bump_ipv6(net); 5333 5351 break; 5334 5352 }
+6
net/switchdev/switchdev.c
··· 305 305 if (err && err != -EOPNOTSUPP) 306 306 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n", 307 307 err, attr->id); 308 + if (attr->complete) 309 + attr->complete(dev, err, attr->complete_priv); 308 310 } 309 311 310 312 static int switchdev_port_attr_set_defer(struct net_device *dev, ··· 436 434 if (err && err != -EOPNOTSUPP) 437 435 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n", 438 436 err, obj->id); 437 + if (obj->complete) 438 + obj->complete(dev, err, obj->complete_priv); 439 439 } 440 440 441 441 static int switchdev_port_obj_add_defer(struct net_device *dev, ··· 506 502 if (err && err != -EOPNOTSUPP) 507 503 netdev_err(dev, "failed (err=%d) to del object (id=%d)\n", 508 504 err, obj->id); 505 + if (obj->complete) 506 + obj->complete(dev, err, obj->complete_priv); 509 507 } 510 508 511 509 static int switchdev_port_obj_del_defer(struct net_device *dev,