Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

1) Unbalanced locking in mwifiex_process_country_ie, from Brian Norris.

2) Fix thermal zone registration in iwlwifi, from Andrei
Otcheretianski.

3) Fix double free_irq in sgi ioc3 eth, from Thomas Bogendoerfer.

4) Use after free in mptcp, from Florian Westphal.

5) Use after free in wireguard's root_remove_peer_lists, from Eric
Dumazet.

6) Properly access packets heads in bonding alb code, from Eric
Dumazet.

7) Fix data race in skb_queue_len(), from Qian Cai.

8) Fix regression in r8169 on some chips, from Heiner Kallweit.

9) Fix XDP program ref counting in hv_netvsc, from Haiyang Zhang.

10) Certain kinds of set link netlink operations can cause a NULL deref
in the ipv6 addrconf code. Fix from Eric Dumazet.

11) Don't cancel uninitialized work queue in drop monitor, from Ido
Schimmel.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (84 commits)
net: thunderx: use proper interface type for RGMII
mt76: mt7615: fix max_nss in mt7615_eeprom_parse_hw_cap
bpf: Improve bucket_log calculation logic
selftests/bpf: Test freeing sockmap/sockhash with a socket in it
bpf, sockhash: Synchronize_rcu before free'ing map
bpf, sockmap: Don't sleep while holding RCU lock on tear-down
bpftool: Don't crash on missing xlated program instructions
bpf, sockmap: Check update requirements after locking
drop_monitor: Do not cancel uninitialized work item
mlxsw: spectrum_dpipe: Add missing error path
mlxsw: core: Add validation of hardware device types for MGPIR register
mlxsw: spectrum_router: Clear offload indication from IPv6 nexthops on abort
selftests: mlxsw: Add test cases for local table route replacement
mlxsw: spectrum_router: Prevent incorrect replacement of local table routes
net: dsa: microchip: enable module autoprobe
ipv6/addrconf: fix potential NULL deref in inet6_set_link_af()
dpaa_eth: support all modes with rate adapting PHYs
net: stmmac: update pci platform data to use phy_interface
net: stmmac: xgmac: fix missing IFF_MULTICAST checki in dwxgmac2_set_filter
net: stmmac: fix missing IFF_MULTICAST check in dwmac4_set_filter
...

+783 -328
+32 -12
drivers/net/bonding/bond_alb.c
··· 1383 bool do_tx_balance = true; 1384 u32 hash_index = 0; 1385 const u8 *hash_start = NULL; 1386 - struct ipv6hdr *ip6hdr; 1387 1388 skb_reset_mac_header(skb); 1389 eth_data = eth_hdr(skb); 1390 1391 switch (ntohs(skb->protocol)) { 1392 case ETH_P_IP: { 1393 - const struct iphdr *iph = ip_hdr(skb); 1394 1395 if (is_broadcast_ether_addr(eth_data->h_dest) || 1396 - iph->daddr == ip_bcast || 1397 - iph->protocol == IPPROTO_IGMP) { 1398 do_tx_balance = false; 1399 break; 1400 } 1401 hash_start = (char *)&(iph->daddr); 1402 hash_size = sizeof(iph->daddr); 1403 - } 1404 break; 1405 - case ETH_P_IPV6: 1406 /* IPv6 doesn't really use broadcast mac address, but leave 1407 * that here just in case. 1408 */ ··· 1424 break; 1425 } 1426 1427 - /* Additianally, DAD probes should not be tx-balanced as that 1428 * will lead to false positives for duplicate addresses and 1429 * prevent address configuration from working. 1430 */ ··· 1438 break; 1439 } 1440 1441 - hash_start = (char *)&(ipv6_hdr(skb)->daddr); 1442 - hash_size = sizeof(ipv6_hdr(skb)->daddr); 1443 break; 1444 - case ETH_P_IPX: 1445 - if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) { 1446 /* something is wrong with this packet */ 1447 do_tx_balance = false; 1448 break; 1449 } 1450 1451 - if (ipx_hdr(skb)->ipx_type != IPX_TYPE_NCP) { 1452 /* The only protocol worth balancing in 1453 * this family since it has an "ARP" like 1454 * mechanism ··· 1466 break; 1467 } 1468 1469 hash_start = (char *)eth_data->h_dest; 1470 hash_size = ETH_ALEN; 1471 break; 1472 case ETH_P_ARP: 1473 do_tx_balance = false; 1474 if (bond_info->rlb_enabled)
··· 1383 bool do_tx_balance = true; 1384 u32 hash_index = 0; 1385 const u8 *hash_start = NULL; 1386 1387 skb_reset_mac_header(skb); 1388 eth_data = eth_hdr(skb); 1389 1390 switch (ntohs(skb->protocol)) { 1391 case ETH_P_IP: { 1392 + const struct iphdr *iph; 1393 1394 if (is_broadcast_ether_addr(eth_data->h_dest) || 1395 + !pskb_network_may_pull(skb, sizeof(*iph))) { 1396 + do_tx_balance = false; 1397 + break; 1398 + } 1399 + iph = ip_hdr(skb); 1400 + if (iph->daddr == ip_bcast || iph->protocol == IPPROTO_IGMP) { 1401 do_tx_balance = false; 1402 break; 1403 } 1404 hash_start = (char *)&(iph->daddr); 1405 hash_size = sizeof(iph->daddr); 1406 break; 1407 + } 1408 + case ETH_P_IPV6: { 1409 + const struct ipv6hdr *ip6hdr; 1410 + 1411 /* IPv6 doesn't really use broadcast mac address, but leave 1412 * that here just in case. 1413 */ ··· 1419 break; 1420 } 1421 1422 + if (!pskb_network_may_pull(skb, sizeof(*ip6hdr))) { 1423 + do_tx_balance = false; 1424 + break; 1425 + } 1426 + /* Additionally, DAD probes should not be tx-balanced as that 1427 * will lead to false positives for duplicate addresses and 1428 * prevent address configuration from working. 1429 */ ··· 1429 break; 1430 } 1431 1432 + hash_start = (char *)&ip6hdr->daddr; 1433 + hash_size = sizeof(ip6hdr->daddr); 1434 break; 1435 + } 1436 + case ETH_P_IPX: { 1437 + const struct ipxhdr *ipxhdr; 1438 + 1439 + if (pskb_network_may_pull(skb, sizeof(*ipxhdr))) { 1440 + do_tx_balance = false; 1441 + break; 1442 + } 1443 + ipxhdr = (struct ipxhdr *)skb_network_header(skb); 1444 + 1445 + if (ipxhdr->ipx_checksum != IPX_NO_CHECKSUM) { 1446 /* something is wrong with this packet */ 1447 do_tx_balance = false; 1448 break; 1449 } 1450 1451 + if (ipxhdr->ipx_type != IPX_TYPE_NCP) { 1452 /* The only protocol worth balancing in 1453 * this family since it has an "ARP" like 1454 * mechanism ··· 1448 break; 1449 } 1450 1451 + eth_data = eth_hdr(skb); 1452 hash_start = (char *)eth_data->h_dest; 1453 hash_size = ETH_ALEN; 1454 break; 1455 + } 1456 case ETH_P_ARP: 1457 do_tx_balance = false; 1458 if (bond_info->rlb_enabled)
+1 -1
drivers/net/dsa/b53/b53_common.c
··· 693 b53_do_vlan_op(dev, VTA_CMD_CLEAR); 694 } 695 696 - b53_enable_vlan(dev, false, ds->vlan_filtering); 697 698 b53_for_each_port(dev, i) 699 b53_write16(dev, B53_VLAN_PAGE,
··· 693 b53_do_vlan_op(dev, VTA_CMD_CLEAR); 694 } 695 696 + b53_enable_vlan(dev, dev->vlan_enabled, ds->vlan_filtering); 697 698 b53_for_each_port(dev, i) 699 b53_write16(dev, B53_VLAN_PAGE,
+3 -1
drivers/net/dsa/bcm_sf2.c
··· 68 69 /* Force link status for IMP port */ 70 reg = core_readl(priv, offset); 71 - reg |= (MII_SW_OR | LINK_STS | GMII_SPEED_UP_2G); 72 core_writel(priv, reg, offset); 73 74 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
··· 68 69 /* Force link status for IMP port */ 70 reg = core_readl(priv, offset); 71 + reg |= (MII_SW_OR | LINK_STS); 72 + if (priv->type == BCM7278_DEVICE_ID) 73 + reg |= GMII_SPEED_UP_2G; 74 core_writel(priv, reg, offset); 75 76 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
+6
drivers/net/dsa/microchip/ksz9477_spi.c
··· 101 102 module_spi_driver(ksz9477_spi_driver); 103 104 MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>"); 105 MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch SPI access Driver"); 106 MODULE_LICENSE("GPL");
··· 101 102 module_spi_driver(ksz9477_spi_driver); 103 104 + MODULE_ALIAS("spi:ksz9477"); 105 + MODULE_ALIAS("spi:ksz9897"); 106 + MODULE_ALIAS("spi:ksz9893"); 107 + MODULE_ALIAS("spi:ksz9563"); 108 + MODULE_ALIAS("spi:ksz8563"); 109 + MODULE_ALIAS("spi:ksz9567"); 110 MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>"); 111 MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch SPI access Driver"); 112 MODULE_LICENSE("GPL");
+3
drivers/net/ethernet/broadcom/bcmsysport.c
··· 2736 2737 umac_reset(priv); 2738 2739 /* We may have been suspended and never received a WOL event that 2740 * would turn off MPD detection, take care of that now 2741 */
··· 2736 2737 umac_reset(priv); 2738 2739 + /* Disable the UniMAC RX/TX */ 2740 + umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0); 2741 + 2742 /* We may have been suspended and never received a WOL event that 2743 * would turn off MPD detection, take care of that now 2744 */
+8 -6
drivers/net/ethernet/cadence/macb_main.c
··· 73 /* Max length of transmit frame must be a multiple of 8 bytes */ 74 #define MACB_TX_LEN_ALIGN 8 75 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) 76 - #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) 77 78 #define GEM_MTU_MIN_SIZE ETH_MIN_MTU 79 #define MACB_NETIF_LSO NETIF_F_TSO ··· 1795 1796 /* Validate LSO compatibility */ 1797 1798 - /* there is only one buffer */ 1799 - if (!skb_is_nonlinear(skb)) 1800 return features; 1801 1802 /* length of header */ 1803 hdrlen = skb_transport_offset(skb); 1804 - if (ip_hdr(skb)->protocol == IPPROTO_TCP) 1805 - hdrlen += tcp_hdrlen(skb); 1806 1807 - /* For LSO: 1808 * When software supplies two or more payload buffers all payload buffers 1809 * apart from the last must be a multiple of 8 bytes in size. 1810 */
··· 73 /* Max length of transmit frame must be a multiple of 8 bytes */ 74 #define MACB_TX_LEN_ALIGN 8 75 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) 76 + /* Limit maximum TX length as per Cadence TSO errata. This is to avoid a 77 + * false amba_error in TX path from the DMA assuming there is not enough 78 + * space in the SRAM (16KB) even when there is. 79 + */ 80 + #define GEM_MAX_TX_LEN (unsigned int)(0x3FC0) 81 82 #define GEM_MTU_MIN_SIZE ETH_MIN_MTU 83 #define MACB_NETIF_LSO NETIF_F_TSO ··· 1791 1792 /* Validate LSO compatibility */ 1793 1794 + /* there is only one buffer or protocol is not UDP */ 1795 + if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP)) 1796 return features; 1797 1798 /* length of header */ 1799 hdrlen = skb_transport_offset(skb); 1800 1801 + /* For UFO only: 1802 * When software supplies two or more payload buffers all payload buffers 1803 * apart from the last must be a multiple of 8 bytes in size. 1804 */
+1 -1
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
··· 1039 if (lmac_type == BGX_MODE_QSGMII) 1040 return PHY_INTERFACE_MODE_QSGMII; 1041 if (lmac_type == BGX_MODE_RGMII) 1042 - return PHY_INTERFACE_MODE_RGMII; 1043 1044 return PHY_INTERFACE_MODE_SGMII; 1045 }
··· 1039 if (lmac_type == BGX_MODE_QSGMII) 1040 return PHY_INTERFACE_MODE_QSGMII; 1041 if (lmac_type == BGX_MODE_RGMII) 1042 + return PHY_INTERFACE_MODE_RGMII_RXID; 1043 1044 return PHY_INTERFACE_MODE_SGMII; 1045 }
+7
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
··· 3403 atomic_read(&adap->chcr_stats.fallback)); 3404 seq_printf(seq, "IPSec PDU: %10u\n", 3405 atomic_read(&adap->chcr_stats.ipsec_cnt)); 3406 return 0; 3407 } 3408 DEFINE_SHOW_ATTRIBUTE(chcr_stats);
··· 3403 atomic_read(&adap->chcr_stats.fallback)); 3404 seq_printf(seq, "IPSec PDU: %10u\n", 3405 atomic_read(&adap->chcr_stats.ipsec_cnt)); 3406 + seq_printf(seq, "TLS PDU Tx: %10u\n", 3407 + atomic_read(&adap->chcr_stats.tls_pdu_tx)); 3408 + seq_printf(seq, "TLS PDU Rx: %10u\n", 3409 + atomic_read(&adap->chcr_stats.tls_pdu_rx)); 3410 + seq_printf(seq, "TLS Keys (DDR) Count: %10u\n", 3411 + atomic_read(&adap->chcr_stats.tls_key)); 3412 + 3413 return 0; 3414 } 3415 DEFINE_SHOW_ATTRIBUTE(chcr_stats);
+4 -1
drivers/net/ethernet/dec/tulip/de2104x.c
··· 417 if (status & DescOwn) 418 break; 419 420 - len = ((status >> 16) & 0x7ff) - 4; 421 mapping = de->rx_skb[rx_tail].mapping; 422 423 if (unlikely(drop)) {
··· 417 if (status & DescOwn) 418 break; 419 420 + /* the length is actually a 15 bit value here according 421 + * to Table 4-1 in the DE2104x spec so mask is 0x7fff 422 + */ 423 + len = ((status >> 16) & 0x7fff) - 4; 424 mapping = de->rx_skb[rx_tail].mapping; 425 426 if (unlikely(drop)) {
+11 -3
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
··· 2453 mac_dev->adjust_link(mac_dev); 2454 } 2455 2456 static int dpaa_phy_init(struct net_device *net_dev) 2457 { 2458 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; ··· 2474 return -ENODEV; 2475 } 2476 2477 - /* Remove any features not supported by the controller */ 2478 - ethtool_convert_legacy_u32_to_link_mode(mask, mac_dev->if_support); 2479 - linkmode_and(phy_dev->supported, phy_dev->supported, mask); 2480 2481 phy_support_asym_pause(phy_dev); 2482
··· 2453 mac_dev->adjust_link(mac_dev); 2454 } 2455 2456 + /* The Aquantia PHYs are capable of performing rate adaptation */ 2457 + #define PHY_VEND_AQUANTIA 0x03a1b400 2458 + 2459 static int dpaa_phy_init(struct net_device *net_dev) 2460 { 2461 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; ··· 2471 return -ENODEV; 2472 } 2473 2474 + /* Unless the PHY is capable of rate adaptation */ 2475 + if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII || 2476 + ((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) { 2477 + /* remove any features not supported by the controller */ 2478 + ethtool_convert_legacy_u32_to_link_mode(mask, 2479 + mac_dev->if_support); 2480 + linkmode_and(phy_dev->supported, phy_dev->supported, mask); 2481 + } 2482 2483 phy_support_asym_pause(phy_dev); 2484
+1 -1
drivers/net/ethernet/intel/i40e/i40e_xsk.c
··· 791 struct i40e_ring *ring; 792 793 if (test_bit(__I40E_CONFIG_BUSY, pf->state)) 794 - return -ENETDOWN; 795 796 if (test_bit(__I40E_VSI_DOWN, vsi->state)) 797 return -ENETDOWN;
··· 791 struct i40e_ring *ring; 792 793 if (test_bit(__I40E_CONFIG_BUSY, pf->state)) 794 + return -EAGAIN; 795 796 if (test_bit(__I40E_VSI_DOWN, vsi->state)) 797 return -ENETDOWN;
+22 -9
drivers/net/ethernet/marvell/mvneta.c
··· 401 struct u64_stats_sync syncp; 402 u64 rx_packets; 403 u64 rx_bytes; 404 u64 tx_packets; 405 u64 tx_bytes; 406 }; ··· 740 struct mvneta_pcpu_stats *cpu_stats; 741 u64 rx_packets; 742 u64 rx_bytes; 743 u64 tx_packets; 744 u64 tx_bytes; 745 ··· 750 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 751 rx_packets = cpu_stats->rx_packets; 752 rx_bytes = cpu_stats->rx_bytes; 753 tx_packets = cpu_stats->tx_packets; 754 tx_bytes = cpu_stats->tx_bytes; 755 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 756 757 stats->rx_packets += rx_packets; 758 stats->rx_bytes += rx_bytes; 759 stats->tx_packets += tx_packets; 760 stats->tx_bytes += tx_bytes; 761 } 762 - 763 - stats->rx_errors = dev->stats.rx_errors; 764 - stats->rx_dropped = dev->stats.rx_dropped; 765 766 stats->tx_dropped = dev->stats.tx_dropped; 767 } ··· 1741 static void mvneta_rx_error(struct mvneta_port *pp, 1742 struct mvneta_rx_desc *rx_desc) 1743 { 1744 u32 status = rx_desc->status; 1745 1746 switch (status & MVNETA_RXD_ERR_CODE_MASK) { 1747 case MVNETA_RXD_ERR_CRC: ··· 2190 2191 rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE); 2192 if (unlikely(!rxq->skb)) { 2193 - netdev_err(dev, 2194 - "Can't allocate skb on queue %d\n", 2195 - rxq->id); 2196 - dev->stats.rx_dropped++; 2197 rxq->skb_alloc_err++; 2198 return -ENOMEM; 2199 } 2200 page_pool_release_page(rxq->page_pool, page); ··· 2285 /* Check errors only for FIRST descriptor */ 2286 if (rx_status & MVNETA_RXD_ERR_SUMMARY) { 2287 mvneta_rx_error(pp, rx_desc); 2288 - dev->stats.rx_errors++; 2289 /* leave the descriptor untouched */ 2290 continue; 2291 } ··· 2386 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, 2387 rx_desc->buf_phys_addr); 2388 err_drop_frame: 2389 - dev->stats.rx_errors++; 2390 mvneta_rx_error(pp, rx_desc); 2391 /* leave the descriptor untouched */ 2392 continue;
··· 401 struct u64_stats_sync syncp; 402 u64 rx_packets; 403 u64 rx_bytes; 404 + u64 rx_dropped; 405 + u64 rx_errors; 406 u64 tx_packets; 407 u64 tx_bytes; 408 }; ··· 738 struct mvneta_pcpu_stats *cpu_stats; 739 u64 rx_packets; 740 u64 rx_bytes; 741 + u64 rx_dropped; 742 + u64 rx_errors; 743 u64 tx_packets; 744 u64 tx_bytes; 745 ··· 746 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 747 rx_packets = cpu_stats->rx_packets; 748 rx_bytes = cpu_stats->rx_bytes; 749 + rx_dropped = cpu_stats->rx_dropped; 750 + rx_errors = cpu_stats->rx_errors; 751 tx_packets = cpu_stats->tx_packets; 752 tx_bytes = cpu_stats->tx_bytes; 753 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 754 755 stats->rx_packets += rx_packets; 756 stats->rx_bytes += rx_bytes; 757 + stats->rx_dropped += rx_dropped; 758 + stats->rx_errors += rx_errors; 759 stats->tx_packets += tx_packets; 760 stats->tx_bytes += tx_bytes; 761 } 762 763 stats->tx_dropped = dev->stats.tx_dropped; 764 } ··· 1736 static void mvneta_rx_error(struct mvneta_port *pp, 1737 struct mvneta_rx_desc *rx_desc) 1738 { 1739 + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 1740 u32 status = rx_desc->status; 1741 + 1742 + /* update per-cpu counter */ 1743 + u64_stats_update_begin(&stats->syncp); 1744 + stats->rx_errors++; 1745 + u64_stats_update_end(&stats->syncp); 1746 1747 switch (status & MVNETA_RXD_ERR_CODE_MASK) { 1748 case MVNETA_RXD_ERR_CRC: ··· 2179 2180 rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE); 2181 if (unlikely(!rxq->skb)) { 2182 + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2183 + 2184 + netdev_err(dev, "Can't allocate skb on queue %d\n", rxq->id); 2185 rxq->skb_alloc_err++; 2186 + 2187 + u64_stats_update_begin(&stats->syncp); 2188 + stats->rx_dropped++; 2189 + u64_stats_update_end(&stats->syncp); 2190 + 2191 return -ENOMEM; 2192 } 2193 page_pool_release_page(rxq->page_pool, page); ··· 2270 /* Check errors only for FIRST descriptor */ 2271 if (rx_status & MVNETA_RXD_ERR_SUMMARY) { 2272 mvneta_rx_error(pp, rx_desc); 2273 /* leave the descriptor untouched */ 2274 continue; 2275 } ··· 2372 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, 2373 rx_desc->buf_phys_addr); 2374 err_drop_frame: 2375 mvneta_rx_error(pp, rx_desc); 2376 /* leave the descriptor untouched */ 2377 continue;
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
··· 45 46 static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) 47 { 48 - if (!MLX5_CAP_GEN(mdev, tls)) 49 return false; 50 51 if (!MLX5_CAP_GEN(mdev, log_max_dek))
··· 45 46 static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) 47 { 48 + if (!MLX5_CAP_GEN(mdev, tls_tx)) 49 return false; 50 51 if (!MLX5_CAP_GEN(mdev, log_max_dek))
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
··· 269 int datalen; 270 u32 skb_seq; 271 272 - if (MLX5_CAP_GEN(sq->channel->mdev, tls)) { 273 skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi); 274 goto out; 275 }
··· 269 int datalen; 270 u32 skb_seq; 271 272 + if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) { 273 skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi); 274 goto out; 275 }
+9 -7
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 613 614 wqe_counter = be16_to_cpu(cqe->wqe_counter); 615 616 - if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { 617 - netdev_WARN_ONCE(cq->channel->netdev, 618 - "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe)); 619 - if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) 620 - queue_work(cq->channel->priv->wq, &sq->recover_work); 621 - break; 622 - } 623 do { 624 struct mlx5e_sq_wqe_info *wi; 625 u16 ci; ··· 621 622 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 623 wi = &sq->db.ico_wqe[ci]; 624 625 if (likely(wi->opcode == MLX5_OPCODE_UMR)) { 626 sqcc += MLX5E_UMR_WQEBBS;
··· 613 614 wqe_counter = be16_to_cpu(cqe->wqe_counter); 615 616 do { 617 struct mlx5e_sq_wqe_info *wi; 618 u16 ci; ··· 628 629 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 630 wi = &sq->db.ico_wqe[ci]; 631 + 632 + if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { 633 + netdev_WARN_ONCE(cq->channel->netdev, 634 + "Bad OP in ICOSQ CQE: 0x%x\n", 635 + get_cqe_opcode(cqe)); 636 + if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) 637 + queue_work(cq->channel->priv->wq, &sq->recover_work); 638 + break; 639 + } 640 641 if (likely(wi->opcode == MLX5_OPCODE_UMR)) { 642 sqcc += MLX5E_UMR_WQEBBS;
+14 -19
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
··· 451 452 i = 0; 453 do { 454 u16 wqe_counter; 455 bool last_wqe; 456 457 mlx5_cqwq_pop(&cq->wq); 458 459 wqe_counter = be16_to_cpu(cqe->wqe_counter); 460 461 - if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) { 462 - if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, 463 - &sq->state)) { 464 - struct mlx5e_tx_wqe_info *wi; 465 - u16 ci; 466 - 467 - ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 468 - wi = &sq->db.wqe_info[ci]; 469 - mlx5e_dump_error_cqe(sq, 470 - (struct mlx5_err_cqe *)cqe); 471 - mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); 472 - queue_work(cq->channel->priv->wq, 473 - &sq->recover_work); 474 - } 475 - stats->cqe_err++; 476 - } 477 - 478 do { 479 - struct mlx5e_tx_wqe_info *wi; 480 struct sk_buff *skb; 481 - u16 ci; 482 int j; 483 484 last_wqe = (sqcc == wqe_counter); ··· 498 sqcc += wi->num_wqebbs; 499 napi_consume_skb(skb, napi_budget); 500 } while (!last_wqe); 501 502 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); 503
··· 451 452 i = 0; 453 do { 454 + struct mlx5e_tx_wqe_info *wi; 455 u16 wqe_counter; 456 bool last_wqe; 457 + u16 ci; 458 459 mlx5_cqwq_pop(&cq->wq); 460 461 wqe_counter = be16_to_cpu(cqe->wqe_counter); 462 463 do { 464 struct sk_buff *skb; 465 int j; 466 467 last_wqe = (sqcc == wqe_counter); ··· 515 sqcc += wi->num_wqebbs; 516 napi_consume_skb(skb, napi_budget); 517 } while (!last_wqe); 518 + 519 + if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) { 520 + if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, 521 + &sq->state)) { 522 + mlx5e_dump_error_cqe(sq, 523 + (struct mlx5_err_cqe *)cqe); 524 + mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); 525 + queue_work(cq->channel->priv->wq, 526 + &sq->recover_work); 527 + } 528 + stats->cqe_err++; 529 + } 530 531 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); 532
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
··· 850 mutex_lock(&fpga_xfrm->lock); 851 if (!--fpga_xfrm->num_rules) { 852 mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx); 853 fpga_xfrm->sa_ctx = NULL; 854 } 855 mutex_unlock(&fpga_xfrm->lock); ··· 1479 if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs))) 1480 return 0; 1481 1482 - if (!mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) { 1483 mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n"); 1484 return -EOPNOTSUPP; 1485 }
··· 850 mutex_lock(&fpga_xfrm->lock); 851 if (!--fpga_xfrm->num_rules) { 852 mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx); 853 + kfree(fpga_xfrm->sa_ctx); 854 fpga_xfrm->sa_ctx = NULL; 855 } 856 mutex_unlock(&fpga_xfrm->lock); ··· 1478 if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs))) 1479 return 0; 1480 1481 + if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) { 1482 mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n"); 1483 return -EOPNOTSUPP; 1484 }
+8 -7
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 1582 struct match_list first; 1583 }; 1584 1585 - static void free_match_list(struct match_list_head *head) 1586 { 1587 if (!list_empty(&head->list)) { 1588 struct match_list *iter, *match_tmp; 1589 1590 list_del(&head->first.list); 1591 - tree_put_node(&head->first.g->node, false); 1592 list_for_each_entry_safe(iter, match_tmp, &head->list, 1593 list) { 1594 - tree_put_node(&iter->g->node, false); 1595 list_del(&iter->list); 1596 kfree(iter); 1597 } ··· 1600 1601 static int build_match_list(struct match_list_head *match_head, 1602 struct mlx5_flow_table *ft, 1603 - const struct mlx5_flow_spec *spec) 1604 { 1605 struct rhlist_head *tmp, *list; 1606 struct mlx5_flow_group *g; ··· 1626 1627 curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC); 1628 if (!curr_match) { 1629 - free_match_list(match_head); 1630 err = -ENOMEM; 1631 goto out; 1632 } ··· 1806 version = atomic_read(&ft->node.version); 1807 1808 /* Collect all fgs which has a matching match_criteria */ 1809 - err = build_match_list(&match_head, ft, spec); 1810 if (err) { 1811 if (take_write) 1812 up_write_ref_node(&ft->node, false); ··· 1820 1821 rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest, 1822 dest_num, version); 1823 - free_match_list(&match_head); 1824 if (!IS_ERR(rule) || 1825 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) { 1826 if (take_write)
··· 1582 struct match_list first; 1583 }; 1584 1585 + static void free_match_list(struct match_list_head *head, bool ft_locked) 1586 { 1587 if (!list_empty(&head->list)) { 1588 struct match_list *iter, *match_tmp; 1589 1590 list_del(&head->first.list); 1591 + tree_put_node(&head->first.g->node, ft_locked); 1592 list_for_each_entry_safe(iter, match_tmp, &head->list, 1593 list) { 1594 + tree_put_node(&iter->g->node, ft_locked); 1595 list_del(&iter->list); 1596 kfree(iter); 1597 } ··· 1600 1601 static int build_match_list(struct match_list_head *match_head, 1602 struct mlx5_flow_table *ft, 1603 + const struct mlx5_flow_spec *spec, 1604 + bool ft_locked) 1605 { 1606 struct rhlist_head *tmp, *list; 1607 struct mlx5_flow_group *g; ··· 1625 1626 curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC); 1627 if (!curr_match) { 1628 + free_match_list(match_head, ft_locked); 1629 err = -ENOMEM; 1630 goto out; 1631 } ··· 1805 version = atomic_read(&ft->node.version); 1806 1807 /* Collect all fgs which has a matching match_criteria */ 1808 + err = build_match_list(&match_head, ft, spec, take_write); 1809 if (err) { 1810 if (take_write) 1811 up_write_ref_node(&ft->node, false); ··· 1819 1820 rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest, 1821 dest_num, version); 1822 + free_match_list(&match_head, take_write); 1823 if (!IS_ERR(rule) || 1824 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) { 1825 if (take_write)
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/fw.c
··· 242 return err; 243 } 244 245 - if (MLX5_CAP_GEN(dev, tls)) { 246 err = mlx5_core_get_caps(dev, MLX5_CAP_TLS); 247 if (err) 248 return err;
··· 242 return err; 243 } 244 245 + if (MLX5_CAP_GEN(dev, tls_tx)) { 246 err = mlx5_core_get_caps(dev, MLX5_CAP_TLS); 247 if (err) 248 return err;
+4 -2
drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
··· 573 574 static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon) 575 { 576 int index, max_index, sensor_index; 577 char mgpir_pl[MLXSW_REG_MGPIR_LEN]; 578 char mtmp_pl[MLXSW_REG_MTMP_LEN]; ··· 585 if (err) 586 return err; 587 588 - mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, NULL, NULL, NULL); 589 - if (!gbox_num) 590 return 0; 591 592 index = mlxsw_hwmon->module_sensor_max;
··· 573 574 static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon) 575 { 576 + enum mlxsw_reg_mgpir_device_type device_type; 577 int index, max_index, sensor_index; 578 char mgpir_pl[MLXSW_REG_MGPIR_LEN]; 579 char mtmp_pl[MLXSW_REG_MTMP_LEN]; ··· 584 if (err) 585 return err; 586 587 + mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, NULL); 588 + if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE || 589 + !gbox_num) 590 return 0; 591 592 index = mlxsw_hwmon->module_sensor_max;
+6 -2
drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
··· 895 mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core, 896 struct mlxsw_thermal *thermal) 897 { 898 struct mlxsw_thermal_module *gearbox_tz; 899 char mgpir_pl[MLXSW_REG_MGPIR_LEN]; 900 int i; 901 int err; 902 ··· 910 if (err) 911 return err; 912 913 - mlxsw_reg_mgpir_unpack(mgpir_pl, &thermal->tz_gearbox_num, NULL, NULL, 914 NULL); 915 - if (!thermal->tz_gearbox_num) 916 return 0; 917 918 thermal->tz_gearbox_arr = kcalloc(thermal->tz_gearbox_num, 919 sizeof(*thermal->tz_gearbox_arr), 920 GFP_KERNEL);
··· 895 mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core, 896 struct mlxsw_thermal *thermal) 897 { 898 + enum mlxsw_reg_mgpir_device_type device_type; 899 struct mlxsw_thermal_module *gearbox_tz; 900 char mgpir_pl[MLXSW_REG_MGPIR_LEN]; 901 + u8 gbox_num; 902 int i; 903 int err; 904 ··· 908 if (err) 909 return err; 910 911 + mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, 912 NULL); 913 + if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE || 914 + !gbox_num) 915 return 0; 916 917 + thermal->tz_gearbox_num = gbox_num; 918 thermal->tz_gearbox_arr = kcalloc(thermal->tz_gearbox_num, 919 sizeof(*thermal->tz_gearbox_arr), 920 GFP_KERNEL);
+2 -1
drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
··· 215 start_again: 216 err = devlink_dpipe_entry_ctx_prepare(dump_ctx); 217 if (err) 218 - return err; 219 j = 0; 220 for (; i < rif_count; i++) { 221 struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i); ··· 247 return 0; 248 err_entry_append: 249 err_entry_get: 250 rtnl_unlock(); 251 devlink_dpipe_entry_clear(&entry); 252 return err;
··· 215 start_again: 216 err = devlink_dpipe_entry_ctx_prepare(dump_ctx); 217 if (err) 218 + goto err_ctx_prepare; 219 j = 0; 220 for (; i < rif_count; i++) { 221 struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i); ··· 247 return 0; 248 err_entry_append: 249 err_entry_get: 250 + err_ctx_prepare: 251 rtnl_unlock(); 252 devlink_dpipe_entry_clear(&entry); 253 return err;
+54 -1
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
··· 4844 fib_node->fib_entry = NULL; 4845 } 4846 4847 static int 4848 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp, 4849 const struct fib_entry_notifier_info *fen_info) ··· 4887 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n"); 4888 err = PTR_ERR(fib4_entry); 4889 goto err_fib4_entry_create; 4890 } 4891 4892 replaced = fib_node->fib_entry; ··· 4931 return; 4932 4933 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info); 4934 - if (WARN_ON(!fib4_entry)) 4935 return; 4936 fib_node = fib4_entry->common.fib_node; 4937 ··· 4993 4994 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6) 4995 { 4996 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt); 4997 kfree(mlxsw_sp_rt6); 4998 } ··· 5434 return NULL; 5435 } 5436 5437 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp, 5438 struct fib6_info **rt_arr, 5439 unsigned int nrt6) ··· 5487 if (IS_ERR(fib6_entry)) { 5488 err = PTR_ERR(fib6_entry); 5489 goto err_fib6_entry_create; 5490 } 5491 5492 replaced = fib_node->fib_entry;
··· 4844 fib_node->fib_entry = NULL; 4845 } 4846 4847 + static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry) 4848 + { 4849 + struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node; 4850 + struct mlxsw_sp_fib4_entry *fib4_replaced; 4851 + 4852 + if (!fib_node->fib_entry) 4853 + return true; 4854 + 4855 + fib4_replaced = container_of(fib_node->fib_entry, 4856 + struct mlxsw_sp_fib4_entry, common); 4857 + if (fib4_entry->tb_id == RT_TABLE_MAIN && 4858 + fib4_replaced->tb_id == RT_TABLE_LOCAL) 4859 + return false; 4860 + 4861 + return true; 4862 + } 4863 + 4864 static int 4865 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp, 4866 const struct fib_entry_notifier_info *fen_info) ··· 4870 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n"); 4871 err = PTR_ERR(fib4_entry); 4872 goto err_fib4_entry_create; 4873 + } 4874 + 4875 + if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) { 4876 + mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry); 4877 + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); 4878 + return 0; 4879 } 4880 4881 replaced = fib_node->fib_entry; ··· 4908 return; 4909 4910 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info); 4911 + if (!fib4_entry) 4912 return; 4913 fib_node = fib4_entry->common.fib_node; 4914 ··· 4970 4971 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6) 4972 { 4973 + struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh; 4974 + 4975 + fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; 4976 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt); 4977 kfree(mlxsw_sp_rt6); 4978 } ··· 5408 return NULL; 5409 } 5410 5411 + static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry) 5412 + { 5413 + struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node; 5414 + struct mlxsw_sp_fib6_entry *fib6_replaced; 5415 + struct fib6_info *rt, *rt_replaced; 5416 + 5417 + if (!fib_node->fib_entry) 5418 + return true; 5419 + 5420 + fib6_replaced = container_of(fib_node->fib_entry, 5421 + struct mlxsw_sp_fib6_entry, 5422 + common); 5423 + rt = mlxsw_sp_fib6_entry_rt(fib6_entry); 5424 + rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced); 5425 + if (rt->fib6_table->tb6_id == RT_TABLE_MAIN && 5426 + rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL) 5427 + return false; 5428 + 5429 + return true; 5430 + } 5431 + 5432 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp, 5433 struct fib6_info **rt_arr, 5434 unsigned int nrt6) ··· 5440 if (IS_ERR(fib6_entry)) { 5441 err = PTR_ERR(fib6_entry); 5442 goto err_fib6_entry_create; 5443 + } 5444 + 5445 + if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) { 5446 + mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); 5447 + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); 5448 + return 0; 5449 } 5450 5451 replaced = fib_node->fib_entry;
+2 -2
drivers/net/ethernet/qlogic/qed/qed_ptp.c
··· 44 /* Add/subtract the Adjustment_Value when making a Drift adjustment */ 45 #define QED_DRIFT_CNTR_DIRECTION_SHIFT 31 46 #define QED_TIMESTAMP_MASK BIT(16) 47 - /* Param mask for Hardware to detect/timestamp the unicast PTP packets */ 48 - #define QED_PTP_UCAST_PARAM_MASK 0xF 49 50 static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn) 51 {
··· 44 /* Add/subtract the Adjustment_Value when making a Drift adjustment */ 45 #define QED_DRIFT_CNTR_DIRECTION_SHIFT 31 46 #define QED_TIMESTAMP_MASK BIT(16) 47 + /* Param mask for Hardware to detect/timestamp the L2/L4 unicast PTP packets */ 48 + #define QED_PTP_UCAST_PARAM_MASK 0x70F 49 50 static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn) 51 {
+6
drivers/net/ethernet/realtek/r8169_main.c
··· 2477 switch (tp->mac_version) { 2478 case RTL_GIGA_MAC_VER_12: 2479 case RTL_GIGA_MAC_VER_17: 2480 r8168b_1_hw_jumbo_enable(tp); 2481 break; 2482 case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26: 2483 r8168c_hw_jumbo_enable(tp); 2484 break; 2485 case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28: 2486 r8168dp_hw_jumbo_enable(tp); 2487 break; 2488 case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33: 2489 r8168e_hw_jumbo_enable(tp); 2490 break; 2491 default: ··· 2518 break; 2519 } 2520 rtl_lock_config_regs(tp); 2521 } 2522 2523 static void rtl_jumbo_config(struct rtl8169_private *tp, int mtu)
··· 2477 switch (tp->mac_version) { 2478 case RTL_GIGA_MAC_VER_12: 2479 case RTL_GIGA_MAC_VER_17: 2480 + pcie_set_readrq(tp->pci_dev, 512); 2481 r8168b_1_hw_jumbo_enable(tp); 2482 break; 2483 case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26: 2484 + pcie_set_readrq(tp->pci_dev, 512); 2485 r8168c_hw_jumbo_enable(tp); 2486 break; 2487 case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28: 2488 r8168dp_hw_jumbo_enable(tp); 2489 break; 2490 case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33: 2491 + pcie_set_readrq(tp->pci_dev, 512); 2492 r8168e_hw_jumbo_enable(tp); 2493 break; 2494 default: ··· 2515 break; 2516 } 2517 rtl_lock_config_regs(tp); 2518 + 2519 + if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii) 2520 + pcie_set_readrq(tp->pci_dev, 4096); 2521 } 2522 2523 static void rtl_jumbo_config(struct rtl8169_private *tp, int mtu)
-1
drivers/net/ethernet/sgi/ioc3-eth.c
··· 823 netif_stop_queue(dev); 824 825 ioc3_stop(ip); 826 - free_irq(dev->irq, dev); 827 828 ioc3_free_rx_bufs(ip); 829 ioc3_clean_tx_ring(ip);
··· 823 netif_stop_queue(dev); 824 825 ioc3_stop(ip); 826 827 ioc3_free_rx_bufs(ip); 828 ioc3_clean_tx_ring(ip);
+1
drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
··· 413 dll_lock = rgmii_readl(ethqos, SDC4_STATUS); 414 if (dll_lock & SDC4_STATUS_DLL_LOCK) 415 break; 416 } while (retry > 0); 417 if (!retry) 418 dev_err(&ethqos->pdev->dev,
··· 413 dll_lock = rgmii_readl(ethqos, SDC4_STATUS); 414 if (dll_lock & SDC4_STATUS_DLL_LOCK) 415 break; 416 + retry--; 417 } while (retry > 0); 418 if (!retry) 419 dev_err(&ethqos->pdev->dev,
+5 -4
drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
··· 420 value |= GMAC_PACKET_FILTER_PM; 421 /* Set all the bits of the HASH tab */ 422 memset(mc_filter, 0xff, sizeof(mc_filter)); 423 - } else if (!netdev_mc_empty(dev)) { 424 struct netdev_hw_addr *ha; 425 426 /* Hash filter for multicast */ ··· 736 __le16 perfect_match, bool is_double) 737 { 738 void __iomem *ioaddr = hw->pcsr; 739 740 writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE); 741 742 if (hash) { 743 - u32 value = GMAC_VLAN_VTHM | GMAC_VLAN_ETV; 744 if (is_double) { 745 value |= GMAC_VLAN_EDVLP; 746 value |= GMAC_VLAN_ESVL; ··· 762 763 writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG); 764 } else { 765 - u32 value = readl(ioaddr + GMAC_VLAN_TAG); 766 - 767 value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV); 768 value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL); 769 value &= ~GMAC_VLAN_DOVLTC;
··· 420 value |= GMAC_PACKET_FILTER_PM; 421 /* Set all the bits of the HASH tab */ 422 memset(mc_filter, 0xff, sizeof(mc_filter)); 423 + } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) { 424 struct netdev_hw_addr *ha; 425 426 /* Hash filter for multicast */ ··· 736 __le16 perfect_match, bool is_double) 737 { 738 void __iomem *ioaddr = hw->pcsr; 739 + u32 value; 740 741 writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE); 742 743 + value = readl(ioaddr + GMAC_VLAN_TAG); 744 + 745 if (hash) { 746 + value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV; 747 if (is_double) { 748 value |= GMAC_VLAN_EDVLP; 749 value |= GMAC_VLAN_ESVL; ··· 759 760 writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG); 761 } else { 762 value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV); 763 value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL); 764 value &= ~GMAC_VLAN_DOVLTC;
+7 -3
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
··· 458 459 for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++) 460 writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i)); 461 - } else if (!netdev_mc_empty(dev)) { 462 struct netdev_hw_addr *ha; 463 464 value |= XGMAC_FILTER_HMC; ··· 569 570 writel(value, ioaddr + XGMAC_PACKET_FILTER); 571 572 - value = XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV; 573 if (is_double) { 574 value |= XGMAC_VLAN_EDVLP; 575 value |= XGMAC_VLAN_ESVL; ··· 586 587 writel(value, ioaddr + XGMAC_PACKET_FILTER); 588 589 - value = XGMAC_VLAN_ETV; 590 if (is_double) { 591 value |= XGMAC_VLAN_EDVLP; 592 value |= XGMAC_VLAN_ESVL;
··· 458 459 for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++) 460 writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i)); 461 + } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) { 462 struct netdev_hw_addr *ha; 463 464 value |= XGMAC_FILTER_HMC; ··· 569 570 writel(value, ioaddr + XGMAC_PACKET_FILTER); 571 572 + value = readl(ioaddr + XGMAC_VLAN_TAG); 573 + 574 + value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV; 575 if (is_double) { 576 value |= XGMAC_VLAN_EDVLP; 577 value |= XGMAC_VLAN_ESVL; ··· 584 585 writel(value, ioaddr + XGMAC_PACKET_FILTER); 586 587 + value = readl(ioaddr + XGMAC_VLAN_TAG); 588 + 589 + value |= XGMAC_VLAN_ETV; 590 if (is_double) { 591 value |= XGMAC_VLAN_EDVLP; 592 value |= XGMAC_VLAN_ESVL;
+8 -6
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
··· 95 96 plat->bus_id = 1; 97 plat->phy_addr = 0; 98 - plat->interface = PHY_INTERFACE_MODE_GMII; 99 100 plat->dma_cfg->pbl = 32; 101 plat->dma_cfg->pblx8 = true; ··· 217 { 218 plat->bus_id = 1; 219 plat->phy_addr = 0; 220 - plat->interface = PHY_INTERFACE_MODE_SGMII; 221 return ehl_common_data(pdev, plat); 222 } 223 ··· 231 { 232 plat->bus_id = 1; 233 plat->phy_addr = 0; 234 - plat->interface = PHY_INTERFACE_MODE_RGMII; 235 return ehl_common_data(pdev, plat); 236 } 237 ··· 260 { 261 plat->bus_id = 1; 262 plat->phy_addr = 0; 263 - plat->interface = PHY_INTERFACE_MODE_SGMII; 264 return tgl_common_data(pdev, plat); 265 } 266 ··· 360 361 plat->bus_id = pci_dev_id(pdev); 362 plat->phy_addr = ret; 363 - plat->interface = PHY_INTERFACE_MODE_RMII; 364 365 plat->dma_cfg->pbl = 16; 366 plat->dma_cfg->pblx8 = true; ··· 417 418 plat->bus_id = 1; 419 plat->phy_addr = -1; 420 - plat->interface = PHY_INTERFACE_MODE_GMII; 421 422 plat->dma_cfg->pbl = 32; 423 plat->dma_cfg->pblx8 = true;
··· 95 96 plat->bus_id = 1; 97 plat->phy_addr = 0; 98 + plat->phy_interface = PHY_INTERFACE_MODE_GMII; 99 100 plat->dma_cfg->pbl = 32; 101 plat->dma_cfg->pblx8 = true; ··· 217 { 218 plat->bus_id = 1; 219 plat->phy_addr = 0; 220 + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 221 + 222 return ehl_common_data(pdev, plat); 223 } 224 ··· 230 { 231 plat->bus_id = 1; 232 plat->phy_addr = 0; 233 + plat->phy_interface = PHY_INTERFACE_MODE_RGMII; 234 + 235 return ehl_common_data(pdev, plat); 236 } 237 ··· 258 { 259 plat->bus_id = 1; 260 plat->phy_addr = 0; 261 + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 262 return tgl_common_data(pdev, plat); 263 } 264 ··· 358 359 plat->bus_id = pci_dev_id(pdev); 360 plat->phy_addr = ret; 361 + plat->phy_interface = PHY_INTERFACE_MODE_RMII; 362 363 plat->dma_cfg->pbl = 16; 364 plat->dma_cfg->pblx8 = true; ··· 415 416 plat->bus_id = 1; 417 plat->phy_addr = -1; 418 + plat->phy_interface = PHY_INTERFACE_MODE_GMII; 419 420 plat->dma_cfg->pbl = 32; 421 plat->dma_cfg->pblx8 = true;
+11 -2
drivers/net/hyperv/netvsc_bpf.c
··· 120 } 121 122 if (prog) 123 - bpf_prog_add(prog, nvdev->num_chn); 124 125 for (i = 0; i < nvdev->num_chn; i++) 126 rcu_assign_pointer(nvdev->chan_table[i].bpf_prog, prog); ··· 136 { 137 struct netdev_bpf xdp; 138 bpf_op_t ndo_bpf; 139 140 ASSERT_RTNL(); 141 ··· 149 150 memset(&xdp, 0, sizeof(xdp)); 151 152 xdp.command = XDP_SETUP_PROG; 153 xdp.prog = prog; 154 155 - return ndo_bpf(vf_netdev, &xdp); 156 } 157 158 static u32 netvsc_xdp_query(struct netvsc_device *nvdev)
··· 120 } 121 122 if (prog) 123 + bpf_prog_add(prog, nvdev->num_chn - 1); 124 125 for (i = 0; i < nvdev->num_chn; i++) 126 rcu_assign_pointer(nvdev->chan_table[i].bpf_prog, prog); ··· 136 { 137 struct netdev_bpf xdp; 138 bpf_op_t ndo_bpf; 139 + int ret; 140 141 ASSERT_RTNL(); 142 ··· 148 149 memset(&xdp, 0, sizeof(xdp)); 150 151 + if (prog) 152 + bpf_prog_inc(prog); 153 + 154 xdp.command = XDP_SETUP_PROG; 155 xdp.prog = prog; 156 157 + ret = ndo_bpf(vf_netdev, &xdp); 158 + 159 + if (ret && prog) 160 + bpf_prog_put(prog); 161 + 162 + return ret; 163 } 164 165 static u32 netvsc_xdp_query(struct netvsc_device *nvdev)
+4 -1
drivers/net/hyperv/netvsc_drv.c
··· 1059 1060 prog = dev_info->bprog; 1061 if (prog) { 1062 ret = netvsc_xdp_set(ndev, prog, NULL, nvdev); 1063 - if (ret) 1064 goto err1; 1065 } 1066 1067 /* In any case device is now ready */
··· 1059 1060 prog = dev_info->bprog; 1061 if (prog) { 1062 + bpf_prog_inc(prog); 1063 ret = netvsc_xdp_set(ndev, prog, NULL, nvdev); 1064 + if (ret) { 1065 + bpf_prog_put(prog); 1066 goto err1; 1067 + } 1068 } 1069 1070 /* In any case device is now ready */
+1 -3
drivers/net/netdevsim/dev.c
··· 934 int nsim_dev_init(void) 935 { 936 nsim_dev_ddir = debugfs_create_dir(DRV_NAME, NULL); 937 - if (IS_ERR(nsim_dev_ddir)) 938 - return PTR_ERR(nsim_dev_ddir); 939 - return 0; 940 } 941 942 void nsim_dev_exit(void)
··· 934 int nsim_dev_init(void) 935 { 936 nsim_dev_ddir = debugfs_create_dir(DRV_NAME, NULL); 937 + return PTR_ERR_OR_ZERO(nsim_dev_ddir); 938 } 939 940 void nsim_dev_exit(void)
+1
drivers/net/wireguard/allowedips.c
··· 263 } else { 264 node = kzalloc(sizeof(*node), GFP_KERNEL); 265 if (unlikely(!node)) { 266 kfree(newnode); 267 return -ENOMEM; 268 }
··· 263 } else { 264 node = kzalloc(sizeof(*node), GFP_KERNEL); 265 if (unlikely(!node)) { 266 + list_del(&newnode->peer_list); 267 kfree(newnode); 268 return -ENOMEM; 269 }
+2 -4
drivers/net/wireguard/netlink.c
··· 569 private_key); 570 list_for_each_entry_safe(peer, temp, &wg->peer_list, 571 peer_list) { 572 - if (wg_noise_precompute_static_static(peer)) 573 - wg_noise_expire_current_peer_keypairs(peer); 574 - else 575 - wg_peer_remove(peer); 576 } 577 wg_cookie_checker_precompute_device_keys(&wg->cookie_checker); 578 up_write(&wg->static_identity.lock);
··· 569 private_key); 570 list_for_each_entry_safe(peer, temp, &wg->peer_list, 571 peer_list) { 572 + BUG_ON(!wg_noise_precompute_static_static(peer)); 573 + wg_noise_expire_current_peer_keypairs(peer); 574 } 575 wg_cookie_checker_precompute_device_keys(&wg->cookie_checker); 576 up_write(&wg->static_identity.lock);
+7 -3
drivers/net/wireguard/noise.c
··· 46 /* Must hold peer->handshake.static_identity->lock */ 47 bool wg_noise_precompute_static_static(struct wg_peer *peer) 48 { 49 - bool ret = true; 50 51 down_write(&peer->handshake.lock); 52 - if (peer->handshake.static_identity->has_identity) 53 ret = curve25519( 54 peer->handshake.precomputed_static_static, 55 peer->handshake.static_identity->static_private, 56 peer->handshake.remote_static); 57 - else 58 memset(peer->handshake.precomputed_static_static, 0, 59 NOISE_PUBLIC_KEY_LEN); 60 up_write(&peer->handshake.lock); 61 return ret; 62 }
··· 46 /* Must hold peer->handshake.static_identity->lock */ 47 bool wg_noise_precompute_static_static(struct wg_peer *peer) 48 { 49 + bool ret; 50 51 down_write(&peer->handshake.lock); 52 + if (peer->handshake.static_identity->has_identity) { 53 ret = curve25519( 54 peer->handshake.precomputed_static_static, 55 peer->handshake.static_identity->static_private, 56 peer->handshake.remote_static); 57 + } else { 58 + u8 empty[NOISE_PUBLIC_KEY_LEN] = { 0 }; 59 + 60 + ret = curve25519(empty, empty, peer->handshake.remote_static); 61 memset(peer->handshake.precomputed_static_static, 0, 62 NOISE_PUBLIC_KEY_LEN); 63 + } 64 up_write(&peer->handshake.lock); 65 return ret; 66 }
+41 -13
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
··· 1897 ieee80211_resume_disconnect(vif); 1898 } 1899 1900 - static int iwl_mvm_check_rt_status(struct iwl_mvm *mvm, 1901 - struct ieee80211_vif *vif) 1902 { 1903 - u32 base = mvm->trans->dbg.lmac_error_event_table[0]; 1904 struct error_table_start { 1905 /* cf. struct iwl_error_event_table */ 1906 u32 valid; 1907 - u32 error_id; 1908 } err_info; 1909 1910 - iwl_trans_read_mem_bytes(mvm->trans, base, 1911 - &err_info, sizeof(err_info)); 1912 1913 - if (err_info.valid && 1914 - err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) { 1915 - struct cfg80211_wowlan_wakeup wakeup = { 1916 - .rfkill_release = true, 1917 - }; 1918 - ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL); 1919 } 1920 - return err_info.valid; 1921 } 1922 1923 static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
··· 1897 ieee80211_resume_disconnect(vif); 1898 } 1899 1900 + static bool iwl_mvm_rt_status(struct iwl_trans *trans, u32 base, u32 *err_id) 1901 { 1902 struct error_table_start { 1903 /* cf. struct iwl_error_event_table */ 1904 u32 valid; 1905 + __le32 err_id; 1906 } err_info; 1907 1908 + if (!base) 1909 + return false; 1910 1911 + iwl_trans_read_mem_bytes(trans, base, 1912 + &err_info, sizeof(err_info)); 1913 + if (err_info.valid && err_id) 1914 + *err_id = le32_to_cpu(err_info.err_id); 1915 + 1916 + return !!err_info.valid; 1917 + } 1918 + 1919 + static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm, 1920 + struct ieee80211_vif *vif) 1921 + { 1922 + u32 err_id; 1923 + 1924 + /* check for lmac1 error */ 1925 + if (iwl_mvm_rt_status(mvm->trans, 1926 + mvm->trans->dbg.lmac_error_event_table[0], 1927 + &err_id)) { 1928 + if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) { 1929 + struct cfg80211_wowlan_wakeup wakeup = { 1930 + .rfkill_release = true, 1931 + }; 1932 + ieee80211_report_wowlan_wakeup(vif, &wakeup, 1933 + GFP_KERNEL); 1934 + } 1935 + return true; 1936 } 1937 + 1938 + /* check if we have lmac2 set and check for error */ 1939 + if (iwl_mvm_rt_status(mvm->trans, 1940 + mvm->trans->dbg.lmac_error_event_table[1], NULL)) 1941 + return true; 1942 + 1943 + /* check for umac error */ 1944 + if (iwl_mvm_rt_status(mvm->trans, 1945 + mvm->trans->dbg.umac_error_event_table, NULL)) 1946 + return true; 1947 + 1948 + return false; 1949 } 1950 1951 static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
+4 -1
drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
··· 8 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 9 * Copyright (C) 2018 Intel Corporation 10 * Copyright (C) 2019 Intel Corporation 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as ··· 31 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 32 * Copyright (C) 2018 Intel Corporation 33 * Copyright (C) 2019 Intel Corporation 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without ··· 530 if (req != mvm->ftm_initiator.req) 531 return; 532 533 if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD, 534 LOCATION_GROUP, 0), 535 0, sizeof(cmd), &cmd)) ··· 645 lockdep_assert_held(&mvm->mutex); 646 647 if (!mvm->ftm_initiator.req) { 648 - IWL_ERR(mvm, "Got FTM response but have no request?\n"); 649 return; 650 } 651
··· 8 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 9 * Copyright (C) 2018 Intel Corporation 10 * Copyright (C) 2019 Intel Corporation 11 + * Copyright (C) 2020 Intel Corporation 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of version 2 of the GNU General Public License as ··· 30 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 31 * Copyright (C) 2018 Intel Corporation 32 * Copyright (C) 2019 Intel Corporation 33 + * Copyright (C) 2020 Intel Corporation 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without ··· 528 if (req != mvm->ftm_initiator.req) 529 return; 530 531 + iwl_mvm_ftm_reset(mvm); 532 + 533 if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD, 534 LOCATION_GROUP, 0), 535 0, sizeof(cmd), &cmd)) ··· 641 lockdep_assert_held(&mvm->mutex); 642 643 if (!mvm->ftm_initiator.req) { 644 return; 645 } 646
+4 -6
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
··· 5 * 6 * GPL LICENSE SUMMARY 7 * 8 - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 - * Copyright(c) 2018 - 2019 Intel Corporation 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of version 2 of the GNU General Public License as ··· 27 * 28 * BSD LICENSE 29 * 30 - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 31 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 32 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 33 - * Copyright(c) 2018 - 2019 Intel Corporation 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without ··· 2035 rcu_read_lock(); 2036 2037 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]); 2038 - if (IS_ERR(sta)) { 2039 rcu_read_unlock(); 2040 WARN(1, "Can't find STA to configure HE\n"); 2041 return; ··· 3291 if (fw_has_capa(&mvm->fw->ucode_capa, 3292 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) 3293 iwl_mvm_schedule_session_protection(mvm, vif, 900, 3294 - min_duration); 3295 else 3296 iwl_mvm_protect_session(mvm, vif, duration, 3297 min_duration, 500, false);
··· 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 10 + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as ··· 28 * 29 * BSD LICENSE 30 * 31 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 32 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 33 + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without ··· 2037 rcu_read_lock(); 2038 2039 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]); 2040 + if (IS_ERR_OR_NULL(sta)) { 2041 rcu_read_unlock(); 2042 WARN(1, "Can't find STA to configure HE\n"); 2043 return; ··· 3293 if (fw_has_capa(&mvm->fw->ucode_capa, 3294 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) 3295 iwl_mvm_schedule_session_protection(mvm, vif, 900, 3296 + min_duration, false); 3297 else 3298 iwl_mvm_protect_session(mvm, vif, duration, 3299 min_duration, 500, false);
+7 -3
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
··· 3320 igtk_cmd.sta_id = cpu_to_le32(sta_id); 3321 3322 if (remove_key) { 3323 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID); 3324 } else { 3325 struct ieee80211_key_seq seq; ··· 3578 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", 3579 keyconf->keyidx, sta_id); 3580 3581 - if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3582 - keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3583 - keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)) 3584 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); 3585 3586 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
··· 3320 igtk_cmd.sta_id = cpu_to_le32(sta_id); 3321 3322 if (remove_key) { 3323 + /* This is a valid situation for IGTK */ 3324 + if (sta_id == IWL_MVM_INVALID_STA) 3325 + return 0; 3326 + 3327 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID); 3328 } else { 3329 struct ieee80211_key_seq seq; ··· 3574 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", 3575 keyconf->keyidx, sta_id); 3576 3577 + if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3578 + keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3579 + keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) 3580 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); 3581 3582 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
+8 -2
drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
··· 205 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 206 u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int; 207 208 - mutex_lock(&mvm->mutex); 209 /* Protect the session to hear the TDLS setup response on the channel */ 210 - iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true); 211 mutex_unlock(&mvm->mutex); 212 } 213
··· 205 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 206 u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int; 207 208 /* Protect the session to hear the TDLS setup response on the channel */ 209 + mutex_lock(&mvm->mutex); 210 + if (fw_has_capa(&mvm->fw->ucode_capa, 211 + IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) 212 + iwl_mvm_schedule_session_protection(mvm, vif, duration, 213 + duration, true); 214 + else 215 + iwl_mvm_protect_session(mvm, vif, duration, 216 + duration, 100, true); 217 mutex_unlock(&mvm->mutex); 218 } 219
+60 -11
drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
··· 1056 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); 1057 } 1058 1059 void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, 1060 struct ieee80211_vif *vif, 1061 - u32 duration, u32 min_duration) 1062 { 1063 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1064 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; 1065 - 1066 struct iwl_mvm_session_prot_cmd cmd = { 1067 .id_and_color = 1068 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, ··· 1100 .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC), 1101 .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)), 1102 }; 1103 - int ret; 1104 1105 lockdep_assert_held(&mvm->mutex); 1106 ··· 1120 IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n", 1121 le32_to_cpu(cmd.duration_tu)); 1122 1123 - ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD, 1124 - MAC_CONF_GROUP, 0), 1125 - 0, sizeof(cmd), &cmd); 1126 - if (ret) { 1127 IWL_ERR(mvm, 1128 - "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret); 1129 - spin_lock_bh(&mvm->time_event_lock); 1130 - iwl_mvm_te_clear_data(mvm, te_data); 1131 - spin_unlock_bh(&mvm->time_event_lock); 1132 } 1133 }
··· 1056 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); 1057 } 1058 1059 + static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait, 1060 + struct iwl_rx_packet *pkt, void *data) 1061 + { 1062 + struct iwl_mvm *mvm = 1063 + container_of(notif_wait, struct iwl_mvm, notif_wait); 1064 + struct iwl_mvm_session_prot_notif *resp; 1065 + int resp_len = iwl_rx_packet_payload_len(pkt); 1066 + 1067 + if (WARN_ON(pkt->hdr.cmd != SESSION_PROTECTION_NOTIF || 1068 + pkt->hdr.group_id != MAC_CONF_GROUP)) 1069 + return true; 1070 + 1071 + if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { 1072 + IWL_ERR(mvm, "Invalid SESSION_PROTECTION_NOTIF response\n"); 1073 + return true; 1074 + } 1075 + 1076 + resp = (void *)pkt->data; 1077 + 1078 + if (!resp->status) 1079 + IWL_ERR(mvm, 1080 + "TIME_EVENT_NOTIFICATION received but not executed\n"); 1081 + 1082 + return true; 1083 + } 1084 + 1085 void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, 1086 struct ieee80211_vif *vif, 1087 + u32 duration, u32 min_duration, 1088 + bool wait_for_notif) 1089 { 1090 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1091 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; 1092 + const u16 notif[] = { iwl_cmd_id(SESSION_PROTECTION_NOTIF, 1093 + MAC_CONF_GROUP, 0) }; 1094 + struct iwl_notification_wait wait_notif; 1095 struct iwl_mvm_session_prot_cmd cmd = { 1096 .id_and_color = 1097 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, ··· 1071 .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC), 1072 .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)), 1073 }; 1074 1075 lockdep_assert_held(&mvm->mutex); 1076 ··· 1092 IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n", 1093 le32_to_cpu(cmd.duration_tu)); 1094 1095 + if (!wait_for_notif) { 1096 + if (iwl_mvm_send_cmd_pdu(mvm, 1097 + iwl_cmd_id(SESSION_PROTECTION_CMD, 1098 + MAC_CONF_GROUP, 0), 1099 + 0, sizeof(cmd), &cmd)) { 1100 + IWL_ERR(mvm, 1101 + "Couldn't send the SESSION_PROTECTION_CMD\n"); 1102 + spin_lock_bh(&mvm->time_event_lock); 1103 + iwl_mvm_te_clear_data(mvm, te_data); 1104 + spin_unlock_bh(&mvm->time_event_lock); 1105 + } 1106 + 1107 + return; 1108 + } 1109 + 1110 + iwl_init_notification_wait(&mvm->notif_wait, &wait_notif, 1111 + notif, ARRAY_SIZE(notif), 1112 + iwl_mvm_session_prot_notif, NULL); 1113 + 1114 + if (iwl_mvm_send_cmd_pdu(mvm, 1115 + iwl_cmd_id(SESSION_PROTECTION_CMD, 1116 + MAC_CONF_GROUP, 0), 1117 + 0, sizeof(cmd), &cmd)) { 1118 IWL_ERR(mvm, 1119 + "Couldn't send the SESSION_PROTECTION_CMD\n"); 1120 + iwl_remove_notification(&mvm->notif_wait, &wait_notif); 1121 + } else if (iwl_wait_notification(&mvm->notif_wait, &wait_notif, 1122 + TU_TO_JIFFIES(100))) { 1123 + IWL_ERR(mvm, 1124 + "Failed to protect session until session protection\n"); 1125 } 1126 }
+3 -1
drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
··· 250 * @mvm: the mvm component 251 * @vif: the virtual interface for which the protection issued 252 * @duration: the duration of the protection 253 */ 254 void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, 255 struct ieee80211_vif *vif, 256 - u32 duration, u32 min_duration); 257 258 /** 259 * iwl_mvm_rx_session_protect_notif - handles %SESSION_PROTECTION_NOTIF
··· 250 * @mvm: the mvm component 251 * @vif: the virtual interface for which the protection issued 252 * @duration: the duration of the protection 253 + * @wait_for_notif: if true, will block until the start of the protection 254 */ 255 void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, 256 struct ieee80211_vif *vif, 257 + u32 duration, u32 min_duration, 258 + bool wait_for_notif); 259 260 /** 261 * iwl_mvm_rx_session_protect_notif - handles %SESSION_PROTECTION_NOTIF
+6 -4
drivers/net/wireless/intel/iwlwifi/mvm/tt.c
··· 8 * Copyright(c) 2013 - 2014, 2019 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 10 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH 11 - * Copyright(c) 2019 Intel Corporation 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of version 2 of the GNU General Public License as ··· 31 * Copyright(c) 2012 - 2014, 2019 Intel Corporation. All rights reserved. 32 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 33 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH 34 - * Copyright(c) 2019 Intel Corporation 35 * All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without ··· 234 .flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP), 235 }; 236 struct iwl_ext_dts_measurement_cmd extcmd = { 237 - .control_mode = cpu_to_le32(DTS_AUTOMATIC), 238 }; 239 u32 cmdid; 240 ··· 734 static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm) 735 { 736 int i; 737 - char name[] = "iwlwifi"; 738 739 if (!iwl_mvm_is_tt_in_fw(mvm)) { 740 mvm->tz_device.tzone = NULL; ··· 745 746 BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH); 747 748 mvm->tz_device.tzone = thermal_zone_device_register(name, 749 IWL_MAX_DTS_TRIPS, 750 IWL_WRITABLE_TRIPS_MSK,
··· 8 * Copyright(c) 2013 - 2014, 2019 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 10 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH 11 + * Copyright(c) 2019 - 2020 Intel Corporation 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of version 2 of the GNU General Public License as ··· 31 * Copyright(c) 2012 - 2014, 2019 Intel Corporation. All rights reserved. 32 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 33 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH 34 + * Copyright(c) 2019 - 2020 Intel Corporation 35 * All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without ··· 234 .flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP), 235 }; 236 struct iwl_ext_dts_measurement_cmd extcmd = { 237 + .control_mode = cpu_to_le32(DTS_DIRECT_WITHOUT_MEASURE), 238 }; 239 u32 cmdid; 240 ··· 734 static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm) 735 { 736 int i; 737 + char name[16]; 738 + static atomic_t counter = ATOMIC_INIT(0); 739 740 if (!iwl_mvm_is_tt_in_fw(mvm)) { 741 mvm->tz_device.tzone = NULL; ··· 744 745 BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH); 746 747 + sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF); 748 mvm->tz_device.tzone = thermal_zone_device_register(name, 749 IWL_MAX_DTS_TRIPS, 750 IWL_WRITABLE_TRIPS_MSK,
+2
drivers/net/wireless/marvell/libertas/cfg.c
··· 1785 rates_max = rates_eid[1]; 1786 if (rates_max > MAX_RATES) { 1787 lbs_deb_join("invalid rates"); 1788 goto out; 1789 } 1790 rates = cmd.bss.rates;
··· 1785 rates_max = rates_eid[1]; 1786 if (rates_max > MAX_RATES) { 1787 lbs_deb_join("invalid rates"); 1788 + rcu_read_unlock(); 1789 + ret = -EINVAL; 1790 goto out; 1791 } 1792 rates = cmd.bss.rates;
+7
drivers/net/wireless/marvell/mwifiex/scan.c
··· 2884 vs_param_set->header.len = 2885 cpu_to_le16((((u16) priv->vs_ie[id].ie[1]) 2886 & 0x00FF) + 2); 2887 memcpy(vs_param_set->ie, priv->vs_ie[id].ie, 2888 le16_to_cpu(vs_param_set->header.len)); 2889 *buffer += le16_to_cpu(vs_param_set->header.len) +
··· 2884 vs_param_set->header.len = 2885 cpu_to_le16((((u16) priv->vs_ie[id].ie[1]) 2886 & 0x00FF) + 2); 2887 + if (le16_to_cpu(vs_param_set->header.len) > 2888 + MWIFIEX_MAX_VSIE_LEN) { 2889 + mwifiex_dbg(priv->adapter, ERROR, 2890 + "Invalid param length!\n"); 2891 + break; 2892 + } 2893 + 2894 memcpy(vs_param_set->ie, priv->vs_ie[id].ie, 2895 le16_to_cpu(vs_param_set->header.len)); 2896 *buffer += le16_to_cpu(vs_param_set->header.len) +
+1
drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
··· 232 233 if (country_ie_len > 234 (IEEE80211_COUNTRY_STRING_LEN + MWIFIEX_MAX_TRIPLET_802_11D)) { 235 mwifiex_dbg(priv->adapter, ERROR, 236 "11D: country_ie_len overflow!, deauth AP\n"); 237 return -EINVAL;
··· 232 233 if (country_ie_len > 234 (IEEE80211_COUNTRY_STRING_LEN + MWIFIEX_MAX_TRIPLET_802_11D)) { 235 + rcu_read_unlock(); 236 mwifiex_dbg(priv->adapter, ERROR, 237 "11D: country_ie_len overflow!, deauth AP\n"); 238 return -EINVAL;
+4
drivers/net/wireless/marvell/mwifiex/wmm.c
··· 970 "WMM Parameter Set Count: %d\n", 971 wmm_param_ie->qos_info_bitmap & mask); 972 973 memcpy((u8 *) &priv->curr_bss_params.bss_descriptor. 974 wmm_ie, wmm_param_ie, 975 wmm_param_ie->vend_hdr.len + 2);
··· 970 "WMM Parameter Set Count: %d\n", 971 wmm_param_ie->qos_info_bitmap & mask); 972 973 + if (wmm_param_ie->vend_hdr.len + 2 > 974 + sizeof(struct ieee_types_wmm_parameter)) 975 + break; 976 + 977 memcpy((u8 *) &priv->curr_bss_params.bss_descriptor. 978 wmm_ie, wmm_param_ie, 979 wmm_param_ie->vend_hdr.len + 2);
+2 -1
drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
··· 92 93 static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev) 94 { 95 - u8 val, *eeprom = dev->mt76.eeprom.data; 96 u8 tx_mask, rx_mask, max_nss; 97 98 val = FIELD_GET(MT_EE_NIC_WIFI_CONF_BAND_SEL, 99 eeprom[MT_EE_WIFI_CONF]);
··· 92 93 static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev) 94 { 95 + u8 *eeprom = dev->mt76.eeprom.data; 96 u8 tx_mask, rx_mask, max_nss; 97 + u32 val; 98 99 val = FIELD_GET(MT_EE_NIC_WIFI_CONF_BAND_SEL, 100 eeprom[MT_EE_WIFI_CONF]);
+11 -12
drivers/net/wireless/realtek/rtw88/wow.c
··· 281 rtw_write32_clr(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE); 282 } 283 284 - static bool rtw_wow_check_fw_status(struct rtw_dev *rtwdev, bool wow_enable) 285 { 286 - bool ret; 287 - 288 /* wait 100ms for wow firmware to finish work */ 289 msleep(100); 290 291 if (wow_enable) { 292 - if (!rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON)) 293 - ret = 0; 294 } else { 295 - if (rtw_read32_mask(rtwdev, REG_FE1IMR, BIT_FS_RXDONE) == 0 && 296 - rtw_read32_mask(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE) == 0) 297 - ret = 0; 298 } 299 300 - if (ret) 301 - rtw_err(rtwdev, "failed to check wow status %s\n", 302 - wow_enable ? "enabled" : "disabled"); 303 304 - return ret; 305 } 306 307 static void rtw_wow_fw_security_type_iter(struct ieee80211_hw *hw,
··· 281 rtw_write32_clr(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE); 282 } 283 284 + static int rtw_wow_check_fw_status(struct rtw_dev *rtwdev, bool wow_enable) 285 { 286 /* wait 100ms for wow firmware to finish work */ 287 msleep(100); 288 289 if (wow_enable) { 290 + if (rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON)) 291 + goto wow_fail; 292 } else { 293 + if (rtw_read32_mask(rtwdev, REG_FE1IMR, BIT_FS_RXDONE) || 294 + rtw_read32_mask(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE)) 295 + goto wow_fail; 296 } 297 298 + return 0; 299 300 + wow_fail: 301 + rtw_err(rtwdev, "failed to check wow status %s\n", 302 + wow_enable ? "enabled" : "disabled"); 303 + return -EBUSY; 304 } 305 306 static void rtw_wow_fw_security_type_iter(struct ieee80211_hw *hw,
+5 -2
include/linux/bpf.h
··· 728 #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) 729 #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) 730 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id); 731 - void bpf_struct_ops_init(struct btf *btf); 732 bool bpf_struct_ops_get(const void *kdata); 733 void bpf_struct_ops_put(const void *kdata); 734 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, ··· 752 { 753 return NULL; 754 } 755 - static inline void bpf_struct_ops_init(struct btf *btf) { } 756 static inline bool bpf_try_module_get(const void *data, struct module *owner) 757 { 758 return try_module_get(owner);
··· 728 #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) 729 #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) 730 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id); 731 + void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log); 732 bool bpf_struct_ops_get(const void *kdata); 733 void bpf_struct_ops_put(const void *kdata); 734 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, ··· 752 { 753 return NULL; 754 } 755 + static inline void bpf_struct_ops_init(struct btf *btf, 756 + struct bpf_verifier_log *log) 757 + { 758 + } 759 static inline bool bpf_try_module_get(const void *data, struct module *owner) 760 { 761 return try_module_get(owner);
+4 -3
include/linux/mlx5/mlx5_ifc.h
··· 1448 1449 u8 reserved_at_440[0x20]; 1450 1451 - u8 tls[0x1]; 1452 - u8 reserved_at_461[0x2]; 1453 u8 log_max_uctx[0x5]; 1454 u8 reserved_at_468[0x3]; 1455 u8 log_max_umem[0x5]; 1456 u8 max_num_eqs[0x10]; 1457 1458 - u8 reserved_at_480[0x3]; 1459 u8 log_max_l2_table[0x5]; 1460 u8 reserved_at_488[0x8]; 1461 u8 log_uar_page_sz[0x10];
··· 1448 1449 u8 reserved_at_440[0x20]; 1450 1451 + u8 reserved_at_460[0x3]; 1452 u8 log_max_uctx[0x5]; 1453 u8 reserved_at_468[0x3]; 1454 u8 log_max_umem[0x5]; 1455 u8 max_num_eqs[0x10]; 1456 1457 + u8 reserved_at_480[0x1]; 1458 + u8 tls_tx[0x1]; 1459 + u8 reserved_at_482[0x1]; 1460 u8 log_max_l2_table[0x5]; 1461 u8 reserved_at_488[0x8]; 1462 u8 log_uar_page_sz[0x10];
+1 -1
include/linux/platform_data/b53.h
··· 19 #ifndef __B53_H 20 #define __B53_H 21 22 - #include <linux/kernel.h> 23 #include <linux/platform_data/dsa.h> 24 25 struct b53_platform_data {
··· 19 #ifndef __B53_H 20 #define __B53_H 21 22 + #include <linux/types.h> 23 #include <linux/platform_data/dsa.h> 24 25 struct b53_platform_data {
+1 -1
include/linux/platform_data/microchip-ksz.h
··· 19 #ifndef __MICROCHIP_KSZ_H 20 #define __MICROCHIP_KSZ_H 21 22 - #include <linux/kernel.h> 23 24 struct ksz_platform_data { 25 u32 chip_id;
··· 19 #ifndef __MICROCHIP_KSZ_H 20 #define __MICROCHIP_KSZ_H 21 22 + #include <linux/types.h> 23 24 struct ksz_platform_data { 25 u32 chip_id;
+13 -1
include/linux/skbuff.h
··· 1822 } 1823 1824 /** 1825 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head 1826 * @list: queue to initialize 1827 * ··· 2038 { 2039 struct sk_buff *next, *prev; 2040 2041 - list->qlen--; 2042 next = skb->next; 2043 prev = skb->prev; 2044 skb->next = skb->prev = NULL;
··· 1822 } 1823 1824 /** 1825 + * skb_queue_len_lockless - get queue length 1826 + * @list_: list to measure 1827 + * 1828 + * Return the length of an &sk_buff queue. 1829 + * This variant can be used in lockless contexts. 1830 + */ 1831 + static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_) 1832 + { 1833 + return READ_ONCE(list_->qlen); 1834 + } 1835 + 1836 + /** 1837 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head 1838 * @list: queue to initialize 1839 * ··· 2026 { 2027 struct sk_buff *next, *prev; 2028 2029 + WRITE_ONCE(list->qlen, list->qlen - 1); 2030 next = skb->next; 2031 prev = skb->prev; 2032 skb->next = skb->prev = NULL;
-5
include/net/ipx.h
··· 47 /* From af_ipx.c */ 48 extern int sysctl_ipx_pprop_broadcasting; 49 50 - static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb) 51 - { 52 - return (struct ipxhdr *)skb_transport_header(skb); 53 - } 54 - 55 struct ipx_interface { 56 /* IPX address */ 57 __be32 if_netnum;
··· 47 /* From af_ipx.c */ 48 extern int sysctl_ipx_pprop_broadcasting; 49 50 struct ipx_interface { 51 /* IPX address */ 52 __be32 if_netnum;
+2 -3
kernel/bpf/bpf_struct_ops.c
··· 96 97 static const struct btf_type *module_type; 98 99 - void bpf_struct_ops_init(struct btf *btf) 100 { 101 s32 type_id, value_id, module_id; 102 const struct btf_member *member; 103 struct bpf_struct_ops *st_ops; 104 - struct bpf_verifier_log log = {}; 105 const struct btf_type *t; 106 char value_name[128]; 107 const char *mname; ··· 171 member->type, 172 NULL); 173 if (func_proto && 174 - btf_distill_func_proto(&log, btf, 175 func_proto, mname, 176 &st_ops->func_models[j])) { 177 pr_warn("Error in parsing func ptr %s in struct %s\n",
··· 96 97 static const struct btf_type *module_type; 98 99 + void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log) 100 { 101 s32 type_id, value_id, module_id; 102 const struct btf_member *member; 103 struct bpf_struct_ops *st_ops; 104 const struct btf_type *t; 105 char value_name[128]; 106 const char *mname; ··· 172 member->type, 173 NULL); 174 if (func_proto && 175 + btf_distill_func_proto(log, btf, 176 func_proto, mname, 177 &st_ops->func_models[j])) { 178 pr_warn("Error in parsing func ptr %s in struct %s\n",
+4 -6
kernel/bpf/btf.c
··· 3643 goto errout; 3644 } 3645 3646 - bpf_struct_ops_init(btf); 3647 3648 btf_verifier_env_free(env); 3649 refcount_set(&btf->refcnt, 1); ··· 3931 3932 if (btf_type_is_ptr(mtype)) { 3933 const struct btf_type *stype; 3934 3935 if (msize != size || off != moff) { 3936 bpf_log(log, ··· 3940 return -EACCES; 3941 } 3942 3943 - stype = btf_type_by_id(btf_vmlinux, mtype->type); 3944 - /* skip modifiers */ 3945 - while (btf_type_is_modifier(stype)) 3946 - stype = btf_type_by_id(btf_vmlinux, stype->type); 3947 if (btf_type_is_struct(stype)) { 3948 - *next_btf_id = mtype->type; 3949 return PTR_TO_BTF_ID; 3950 } 3951 }
··· 3643 goto errout; 3644 } 3645 3646 + bpf_struct_ops_init(btf, log); 3647 3648 btf_verifier_env_free(env); 3649 refcount_set(&btf->refcnt, 1); ··· 3931 3932 if (btf_type_is_ptr(mtype)) { 3933 const struct btf_type *stype; 3934 + u32 id; 3935 3936 if (msize != size || off != moff) { 3937 bpf_log(log, ··· 3939 return -EACCES; 3940 } 3941 3942 + stype = btf_type_skip_modifiers(btf_vmlinux, mtype->type, &id); 3943 if (btf_type_is_struct(stype)) { 3944 + *next_btf_id = id; 3945 return PTR_TO_BTF_ID; 3946 } 3947 }
+3 -2
net/core/bpf_sk_storage.c
··· 643 return ERR_PTR(-ENOMEM); 644 bpf_map_init_from_attr(&smap->map, attr); 645 646 /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */ 647 - smap->bucket_log = max_t(u32, 1, ilog2(roundup_pow_of_two(num_possible_cpus()))); 648 - nbuckets = 1U << smap->bucket_log; 649 cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap); 650 651 ret = bpf_map_charge_init(&smap->map.memory, cost);
··· 643 return ERR_PTR(-ENOMEM); 644 bpf_map_init_from_attr(&smap->map, attr); 645 646 + nbuckets = roundup_pow_of_two(num_possible_cpus()); 647 /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */ 648 + nbuckets = max_t(u32, 2, nbuckets); 649 + smap->bucket_log = ilog2(nbuckets); 650 cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap); 651 652 ret = bpf_map_charge_init(&smap->map.memory, cost);
+6
net/core/devlink.c
··· 3986 goto out_unlock; 3987 } 3988 3989 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3990 &devlink_nl_family, NLM_F_ACK | NLM_F_MULTI, 3991 DEVLINK_CMD_REGION_READ);
··· 3986 goto out_unlock; 3987 } 3988 3989 + /* return 0 if there is no further data to read */ 3990 + if (start_offset >= region->size) { 3991 + err = 0; 3992 + goto out_unlock; 3993 + } 3994 + 3995 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3996 &devlink_nl_family, NLM_F_ACK | NLM_F_MULTI, 3997 DEVLINK_CMD_REGION_READ);
+3 -1
net/core/drop_monitor.c
··· 1000 { 1001 int cpu; 1002 1003 - if (!monitor_hw) 1004 NL_SET_ERR_MSG_MOD(extack, "Hardware monitoring already disabled"); 1005 1006 monitor_hw = false; 1007
··· 1000 { 1001 int cpu; 1002 1003 + if (!monitor_hw) { 1004 NL_SET_ERR_MSG_MOD(extack, "Hardware monitoring already disabled"); 1005 + return; 1006 + } 1007 1008 monitor_hw = false; 1009
+18 -10
net/core/sock_map.c
··· 234 int i; 235 236 synchronize_rcu(); 237 - rcu_read_lock(); 238 raw_spin_lock_bh(&stab->lock); 239 for (i = 0; i < stab->map.max_entries; i++) { 240 struct sock **psk = &stab->sks[i]; ··· 242 sk = xchg(psk, NULL); 243 if (sk) { 244 lock_sock(sk); 245 sock_map_unref(sk, psk); 246 release_sock(sk); 247 } 248 } 249 raw_spin_unlock_bh(&stab->lock); 250 - rcu_read_unlock(); 251 252 synchronize_rcu(); 253 254 bpf_map_area_free(stab->sks); ··· 417 ret = -EINVAL; 418 goto out; 419 } 420 - if (!sock_map_sk_is_suitable(sk) || 421 - sk->sk_state != TCP_ESTABLISHED) { 422 ret = -EOPNOTSUPP; 423 goto out; 424 } 425 426 sock_map_sk_acquire(sk); 427 - ret = sock_map_update_common(map, idx, sk, flags); 428 sock_map_sk_release(sk); 429 out: 430 fput(sock->file); ··· 742 ret = -EINVAL; 743 goto out; 744 } 745 - if (!sock_map_sk_is_suitable(sk) || 746 - sk->sk_state != TCP_ESTABLISHED) { 747 ret = -EOPNOTSUPP; 748 goto out; 749 } 750 751 sock_map_sk_acquire(sk); 752 - ret = sock_hash_update_common(map, key, sk, flags); 753 sock_map_sk_release(sk); 754 out: 755 fput(sock->file); ··· 864 int i; 865 866 synchronize_rcu(); 867 - rcu_read_lock(); 868 for (i = 0; i < htab->buckets_num; i++) { 869 bucket = sock_hash_select_bucket(htab, i); 870 raw_spin_lock_bh(&bucket->lock); 871 hlist_for_each_entry_safe(elem, node, &bucket->head, node) { 872 hlist_del_rcu(&elem->node); 873 lock_sock(elem->sk); 874 sock_map_unref(elem->sk, elem); 875 release_sock(elem->sk); 876 } 877 raw_spin_unlock_bh(&bucket->lock); 878 } 879 - rcu_read_unlock(); 880 881 bpf_map_area_free(htab->buckets); 882 kfree(htab);
··· 234 int i; 235 236 synchronize_rcu(); 237 raw_spin_lock_bh(&stab->lock); 238 for (i = 0; i < stab->map.max_entries; i++) { 239 struct sock **psk = &stab->sks[i]; ··· 243 sk = xchg(psk, NULL); 244 if (sk) { 245 lock_sock(sk); 246 + rcu_read_lock(); 247 sock_map_unref(sk, psk); 248 + rcu_read_unlock(); 249 release_sock(sk); 250 } 251 } 252 raw_spin_unlock_bh(&stab->lock); 253 254 + /* wait for psock readers accessing its map link */ 255 synchronize_rcu(); 256 257 bpf_map_area_free(stab->sks); ··· 416 ret = -EINVAL; 417 goto out; 418 } 419 + if (!sock_map_sk_is_suitable(sk)) { 420 ret = -EOPNOTSUPP; 421 goto out; 422 } 423 424 sock_map_sk_acquire(sk); 425 + if (sk->sk_state != TCP_ESTABLISHED) 426 + ret = -EOPNOTSUPP; 427 + else 428 + ret = sock_map_update_common(map, idx, sk, flags); 429 sock_map_sk_release(sk); 430 out: 431 fput(sock->file); ··· 739 ret = -EINVAL; 740 goto out; 741 } 742 + if (!sock_map_sk_is_suitable(sk)) { 743 ret = -EOPNOTSUPP; 744 goto out; 745 } 746 747 sock_map_sk_acquire(sk); 748 + if (sk->sk_state != TCP_ESTABLISHED) 749 + ret = -EOPNOTSUPP; 750 + else 751 + ret = sock_hash_update_common(map, key, sk, flags); 752 sock_map_sk_release(sk); 753 out: 754 fput(sock->file); ··· 859 int i; 860 861 synchronize_rcu(); 862 for (i = 0; i < htab->buckets_num; i++) { 863 bucket = sock_hash_select_bucket(htab, i); 864 raw_spin_lock_bh(&bucket->lock); 865 hlist_for_each_entry_safe(elem, node, &bucket->head, node) { 866 hlist_del_rcu(&elem->node); 867 lock_sock(elem->sk); 868 + rcu_read_lock(); 869 sock_map_unref(elem->sk, elem); 870 + rcu_read_unlock(); 871 release_sock(elem->sk); 872 } 873 raw_spin_unlock_bh(&bucket->lock); 874 } 875 + 876 + /* wait for psock readers accessing its map link */ 877 + synchronize_rcu(); 878 879 bpf_map_area_free(htab->buckets); 880 kfree(htab);
+3
net/ipv6/addrconf.c
··· 5718 struct nlattr *tb[IFLA_INET6_MAX + 1]; 5719 int err; 5720 5721 if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0) 5722 BUG(); 5723
··· 5718 struct nlattr *tb[IFLA_INET6_MAX + 1]; 5719 int err; 5720 5721 + if (!idev) 5722 + return -EAFNOSUPPORT; 5723 + 5724 if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0) 5725 BUG(); 5726
+36 -70
net/mptcp/protocol.c
··· 24 25 #define MPTCP_SAME_STATE TCP_MAX_STATES 26 27 - static void __mptcp_close(struct sock *sk, long timeout); 28 - 29 - static const struct proto_ops *tcp_proto_ops(struct sock *sk) 30 - { 31 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 32 - if (sk->sk_family == AF_INET6) 33 - return &inet6_stream_ops; 34 #endif 35 - return &inet_stream_ops; 36 - } 37 - 38 - /* MP_CAPABLE handshake failed, convert msk to plain tcp, replacing 39 - * socket->sk and stream ops and destroying msk 40 - * return the msk socket, as we can't access msk anymore after this function 41 - * completes 42 - * Called with msk lock held, releases such lock before returning 43 - */ 44 - static struct socket *__mptcp_fallback_to_tcp(struct mptcp_sock *msk, 45 - struct sock *ssk) 46 - { 47 - struct mptcp_subflow_context *subflow; 48 - struct socket *sock; 49 - struct sock *sk; 50 - 51 - sk = (struct sock *)msk; 52 - sock = sk->sk_socket; 53 - subflow = mptcp_subflow_ctx(ssk); 54 - 55 - /* detach the msk socket */ 56 - list_del_init(&subflow->node); 57 - sock_orphan(sk); 58 - sock->sk = NULL; 59 - 60 - /* socket is now TCP */ 61 - lock_sock(ssk); 62 - sock_graft(ssk, sock); 63 - if (subflow->conn) { 64 - /* We can't release the ULP data on a live socket, 65 - * restore the tcp callback 66 - */ 67 - mptcp_subflow_tcp_fallback(ssk, subflow); 68 - sock_put(subflow->conn); 69 - subflow->conn = NULL; 70 - } 71 - release_sock(ssk); 72 - sock->ops = tcp_proto_ops(ssk); 73 - 74 - /* destroy the left-over msk sock */ 75 - __mptcp_close(sk, 0); 76 - return sock; 77 - } 78 79 /* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not 80 * completed yet or has failed, return the subflow socket. ··· 48 return msk->first && !sk_is_mptcp(msk->first); 49 } 50 51 - /* if the mp_capable handshake has failed, it fallbacks msk to plain TCP, 52 - * releases the socket lock and returns a reference to the now TCP socket. 53 - * Otherwise returns NULL 54 - */ 55 static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk) 56 { 57 sock_owned_by_me((const struct sock *)msk); ··· 56 return NULL; 57 58 if (msk->subflow) { 59 - /* the first subflow is an active connection, discart the 60 - * paired socket 61 - */ 62 - msk->subflow->sk = NULL; 63 - sock_release(msk->subflow); 64 - msk->subflow = NULL; 65 } 66 67 - return __mptcp_fallback_to_tcp(msk, msk->first); 68 } 69 70 static bool __mptcp_can_create_subflow(const struct mptcp_sock *msk) ··· 587 } 588 589 /* Called with msk lock held, releases such lock before returning */ 590 - static void __mptcp_close(struct sock *sk, long timeout) 591 { 592 struct mptcp_subflow_context *subflow, *tmp; 593 struct mptcp_sock *msk = mptcp_sk(sk); 594 LIST_HEAD(conn_list); 595 596 mptcp_token_destroy(msk->token); 597 inet_sk_state_store(sk, TCP_CLOSE); ··· 609 } 610 611 sk_common_release(sk); 612 - } 613 - 614 - static void mptcp_close(struct sock *sk, long timeout) 615 - { 616 - lock_sock(sk); 617 - __mptcp_close(sk, timeout); 618 } 619 620 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) ··· 632 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr; 633 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr; 634 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr; 635 } 636 637 static struct sock *mptcp_accept(struct sock *sk, int flags, int *err, ··· 688 lock_sock(sk); 689 690 local_bh_disable(); 691 - new_mptcp_sock = sk_clone_lock(sk, GFP_ATOMIC); 692 if (!new_mptcp_sock) { 693 *err = -ENOBUFS; 694 local_bh_enable(); ··· 1237 strcpy(mptcp_v6_prot.name, "MPTCPv6"); 1238 mptcp_v6_prot.slab = NULL; 1239 mptcp_v6_prot.destroy = mptcp_v6_destroy; 1240 - mptcp_v6_prot.obj_size = sizeof(struct mptcp_sock) + 1241 - sizeof(struct ipv6_pinfo); 1242 1243 err = proto_register(&mptcp_v6_prot, 1); 1244 if (err)
··· 24 25 #define MPTCP_SAME_STATE TCP_MAX_STATES 26 27 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 28 + struct mptcp6_sock { 29 + struct mptcp_sock msk; 30 + struct ipv6_pinfo np; 31 + }; 32 #endif 33 34 /* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not 35 * completed yet or has failed, return the subflow socket. ··· 93 return msk->first && !sk_is_mptcp(msk->first); 94 } 95 96 static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk) 97 { 98 sock_owned_by_me((const struct sock *)msk); ··· 105 return NULL; 106 107 if (msk->subflow) { 108 + release_sock((struct sock *)msk); 109 + return msk->subflow; 110 } 111 112 + return NULL; 113 } 114 115 static bool __mptcp_can_create_subflow(const struct mptcp_sock *msk) ··· 640 } 641 642 /* Called with msk lock held, releases such lock before returning */ 643 + static void mptcp_close(struct sock *sk, long timeout) 644 { 645 struct mptcp_subflow_context *subflow, *tmp; 646 struct mptcp_sock *msk = mptcp_sk(sk); 647 LIST_HEAD(conn_list); 648 + 649 + lock_sock(sk); 650 651 mptcp_token_destroy(msk->token); 652 inet_sk_state_store(sk, TCP_CLOSE); ··· 660 } 661 662 sk_common_release(sk); 663 } 664 665 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) ··· 689 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr; 690 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr; 691 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr; 692 + } 693 + 694 + #if IS_ENABLED(CONFIG_MPTCP_IPV6) 695 + static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk) 696 + { 697 + unsigned int offset = sizeof(struct mptcp6_sock) - sizeof(struct ipv6_pinfo); 698 + 699 + return (struct ipv6_pinfo *)(((u8 *)sk) + offset); 700 + } 701 + #endif 702 + 703 + struct sock *mptcp_sk_clone_lock(const struct sock *sk) 704 + { 705 + struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC); 706 + 707 + if (!nsk) 708 + return NULL; 709 + 710 + #if IS_ENABLED(CONFIG_MPTCP_IPV6) 711 + if (nsk->sk_family == AF_INET6) 712 + inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk); 713 + #endif 714 + 715 + return nsk; 716 } 717 718 static struct sock *mptcp_accept(struct sock *sk, int flags, int *err, ··· 721 lock_sock(sk); 722 723 local_bh_disable(); 724 + new_mptcp_sock = mptcp_sk_clone_lock(sk); 725 if (!new_mptcp_sock) { 726 *err = -ENOBUFS; 727 local_bh_enable(); ··· 1270 strcpy(mptcp_v6_prot.name, "MPTCPv6"); 1271 mptcp_v6_prot.slab = NULL; 1272 mptcp_v6_prot.destroy = mptcp_v6_destroy; 1273 + mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock); 1274 1275 err = proto_register(&mptcp_v6_prot, 1); 1276 if (err)
+19 -3
net/rxrpc/call_object.c
··· 562 } 563 564 /* 565 - * Final call destruction under RCU. 566 */ 567 - static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) 568 { 569 - struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); 570 struct rxrpc_net *rxnet = call->rxnet; 571 572 rxrpc_put_connection(call->conn); ··· 576 kmem_cache_free(rxrpc_call_jar, call); 577 if (atomic_dec_and_test(&rxnet->nr_calls)) 578 wake_up_var(&rxnet->nr_calls); 579 } 580 581 /*
··· 562 } 563 564 /* 565 + * Final call destruction - but must be done in process context. 566 */ 567 + static void rxrpc_destroy_call(struct work_struct *work) 568 { 569 + struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor); 570 struct rxrpc_net *rxnet = call->rxnet; 571 572 rxrpc_put_connection(call->conn); ··· 576 kmem_cache_free(rxrpc_call_jar, call); 577 if (atomic_dec_and_test(&rxnet->nr_calls)) 578 wake_up_var(&rxnet->nr_calls); 579 + } 580 + 581 + /* 582 + * Final call destruction under RCU. 583 + */ 584 + static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) 585 + { 586 + struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); 587 + 588 + if (in_softirq()) { 589 + INIT_WORK(&call->processor, rxrpc_destroy_call); 590 + if (!rxrpc_queue_work(&call->processor)) 591 + BUG(); 592 + } else { 593 + rxrpc_destroy_call(&call->processor); 594 + } 595 } 596 597 /*
+1 -2
net/rxrpc/conn_object.c
··· 171 172 _enter("%d,%x", conn->debug_id, call->cid); 173 174 - set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); 175 - 176 if (rcu_access_pointer(chan->call) == call) { 177 /* Save the result of the call so that we can repeat it if necessary 178 * through the channel, whilst disposing of the actual call record. ··· 223 __rxrpc_disconnect_call(conn, call); 224 spin_unlock(&conn->channel_lock); 225 226 conn->idle_timestamp = jiffies; 227 } 228
··· 171 172 _enter("%d,%x", conn->debug_id, call->cid); 173 174 if (rcu_access_pointer(chan->call) == call) { 175 /* Save the result of the call so that we can repeat it if necessary 176 * through the channel, whilst disposing of the actual call record. ··· 225 __rxrpc_disconnect_call(conn, call); 226 spin_unlock(&conn->channel_lock); 227 228 + set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); 229 conn->idle_timestamp = jiffies; 230 } 231
+1 -2
net/sched/cls_tcindex.c
··· 365 366 err = tcindex_filter_result_init(&new_filter_result, net); 367 if (err < 0) 368 - goto errout1; 369 if (old_r) 370 cr = r->res; 371 ··· 484 tcindex_free_perfect_hash(cp); 485 else if (balloc == 2) 486 kfree(cp->h); 487 - errout1: 488 tcf_exts_destroy(&new_filter_result.exts); 489 errout: 490 kfree(cp);
··· 365 366 err = tcindex_filter_result_init(&new_filter_result, net); 367 if (err < 0) 368 + goto errout_alloc; 369 if (old_r) 370 cr = r->res; 371 ··· 484 tcindex_free_perfect_hash(cp); 485 else if (balloc == 2) 486 kfree(cp->h); 487 tcf_exts_destroy(&new_filter_result.exts); 488 errout: 489 kfree(cp);
+1 -1
net/sched/sch_fq_pie.c
··· 349 while (sch->q.qlen > sch->limit) { 350 struct sk_buff *skb = fq_pie_qdisc_dequeue(sch); 351 352 - kfree_skb(skb); 353 len_dropped += qdisc_pkt_len(skb); 354 num_dropped += 1; 355 } 356 qdisc_tree_reduce_backlog(sch, num_dropped, len_dropped); 357
··· 349 while (sch->q.qlen > sch->limit) { 350 struct sk_buff *skb = fq_pie_qdisc_dequeue(sch); 351 352 len_dropped += qdisc_pkt_len(skb); 353 num_dropped += 1; 354 + rtnl_kfree_skbs(skb, skb); 355 } 356 qdisc_tree_reduce_backlog(sch, num_dropped, len_dropped); 357
+57 -35
net/sched/sch_taprio.c
··· 31 32 #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) 33 #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD) 34 35 struct sched_entry { 36 struct list_head list; ··· 767 [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 }, 768 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 }, 769 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 }, 770 }; 771 772 static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry, ··· 1369 return 0; 1370 } 1371 1372 static int taprio_change(struct Qdisc *sch, struct nlattr *opt, 1373 struct netlink_ext_ack *extack) 1374 { ··· 1404 struct taprio_sched *q = qdisc_priv(sch); 1405 struct net_device *dev = qdisc_dev(sch); 1406 struct tc_mqprio_qopt *mqprio = NULL; 1407 - u32 taprio_flags = 0; 1408 unsigned long flags; 1409 ktime_t start; 1410 int i, err; ··· 1416 if (tb[TCA_TAPRIO_ATTR_PRIOMAP]) 1417 mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]); 1418 1419 - if (tb[TCA_TAPRIO_ATTR_FLAGS]) { 1420 - taprio_flags = nla_get_u32(tb[TCA_TAPRIO_ATTR_FLAGS]); 1421 1422 - if (q->flags != 0 && q->flags != taprio_flags) { 1423 - NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported"); 1424 - return -EOPNOTSUPP; 1425 - } else if (!taprio_flags_valid(taprio_flags)) { 1426 - NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid"); 1427 - return -EINVAL; 1428 - } 1429 1430 - q->flags = taprio_flags; 1431 - } 1432 - 1433 - err = taprio_parse_mqprio_opt(dev, mqprio, extack, taprio_flags); 1434 if (err < 0) 1435 return err; 1436 ··· 1465 1466 taprio_set_picos_per_byte(dev, q); 1467 1468 - if (FULL_OFFLOAD_IS_ENABLED(taprio_flags)) 1469 err = taprio_enable_offload(dev, mqprio, q, new_admin, extack); 1470 else 1471 err = taprio_disable_offload(dev, q, extack); ··· 1498 q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]); 1499 } 1500 1501 - if (!TXTIME_ASSIST_IS_ENABLED(taprio_flags) && 1502 - !FULL_OFFLOAD_IS_ENABLED(taprio_flags) && 1503 !hrtimer_active(&q->advance_timer)) { 1504 hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS); 1505 q->advance_timer.function = advance_sched; 1506 } 1507 1508 - if (mqprio) { 1509 - netdev_set_num_tc(dev, mqprio->num_tc); 1510 - for (i = 0; i < mqprio->num_tc; i++) 1511 - netdev_set_tc_queue(dev, i, 1512 - mqprio->count[i], 1513 - mqprio->offset[i]); 1514 - 1515 - /* Always use supplied priority mappings */ 1516 - for (i = 0; i <= TC_BITMASK; i++) 1517 - netdev_set_prio_tc_map(dev, i, 1518 - mqprio->prio_tc_map[i]); 1519 - } 1520 - 1521 - if (FULL_OFFLOAD_IS_ENABLED(taprio_flags)) { 1522 q->dequeue = taprio_dequeue_offload; 1523 q->peek = taprio_peek_offload; 1524 } else { ··· 1522 goto unlock; 1523 } 1524 1525 - if (TXTIME_ASSIST_IS_ENABLED(taprio_flags)) { 1526 - setup_txtime(q, new_admin, start); 1527 1528 if (!oper) { 1529 rcu_assign_pointer(q->oper_sched, new_admin); 1530 err = 0; ··· 1549 1550 spin_unlock_irqrestore(&q->current_entry_lock, flags); 1551 1552 - if (FULL_OFFLOAD_IS_ENABLED(taprio_flags)) 1553 taprio_offload_config_changed(q); 1554 } 1555 ··· 1588 } 1589 q->qdiscs = NULL; 1590 1591 - netdev_set_num_tc(dev, 0); 1592 1593 if (q->oper_sched) 1594 call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb); ··· 1618 * and get the valid one on taprio_change(). 1619 */ 1620 q->clockid = -1; 1621 1622 spin_lock(&taprio_list_lock); 1623 list_add(&q->taprio_list, &taprio_list);
··· 31 32 #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) 33 #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD) 34 + #define TAPRIO_FLAGS_INVALID U32_MAX 35 36 struct sched_entry { 37 struct list_head list; ··· 766 [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 }, 767 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 }, 768 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 }, 769 + [TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 }, 770 }; 771 772 static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry, ··· 1367 return 0; 1368 } 1369 1370 + /* The semantics of the 'flags' argument in relation to 'change()' 1371 + * requests, are interpreted following two rules (which are applied in 1372 + * this order): (1) an omitted 'flags' argument is interpreted as 1373 + * zero; (2) the 'flags' of a "running" taprio instance cannot be 1374 + * changed. 1375 + */ 1376 + static int taprio_new_flags(const struct nlattr *attr, u32 old, 1377 + struct netlink_ext_ack *extack) 1378 + { 1379 + u32 new = 0; 1380 + 1381 + if (attr) 1382 + new = nla_get_u32(attr); 1383 + 1384 + if (old != TAPRIO_FLAGS_INVALID && old != new) { 1385 + NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported"); 1386 + return -EOPNOTSUPP; 1387 + } 1388 + 1389 + if (!taprio_flags_valid(new)) { 1390 + NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid"); 1391 + return -EINVAL; 1392 + } 1393 + 1394 + return new; 1395 + } 1396 + 1397 static int taprio_change(struct Qdisc *sch, struct nlattr *opt, 1398 struct netlink_ext_ack *extack) 1399 { ··· 1375 struct taprio_sched *q = qdisc_priv(sch); 1376 struct net_device *dev = qdisc_dev(sch); 1377 struct tc_mqprio_qopt *mqprio = NULL; 1378 unsigned long flags; 1379 ktime_t start; 1380 int i, err; ··· 1388 if (tb[TCA_TAPRIO_ATTR_PRIOMAP]) 1389 mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]); 1390 1391 + err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS], 1392 + q->flags, extack); 1393 + if (err < 0) 1394 + return err; 1395 1396 + q->flags = err; 1397 1398 + err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags); 1399 if (err < 0) 1400 return err; 1401 ··· 1444 1445 taprio_set_picos_per_byte(dev, q); 1446 1447 + if (mqprio) { 1448 + netdev_set_num_tc(dev, mqprio->num_tc); 1449 + for (i = 0; i < mqprio->num_tc; i++) 1450 + netdev_set_tc_queue(dev, i, 1451 + mqprio->count[i], 1452 + mqprio->offset[i]); 1453 + 1454 + /* Always use supplied priority mappings */ 1455 + for (i = 0; i <= TC_BITMASK; i++) 1456 + netdev_set_prio_tc_map(dev, i, 1457 + mqprio->prio_tc_map[i]); 1458 + } 1459 + 1460 + if (FULL_OFFLOAD_IS_ENABLED(q->flags)) 1461 err = taprio_enable_offload(dev, mqprio, q, new_admin, extack); 1462 else 1463 err = taprio_disable_offload(dev, q, extack); ··· 1464 q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]); 1465 } 1466 1467 + if (!TXTIME_ASSIST_IS_ENABLED(q->flags) && 1468 + !FULL_OFFLOAD_IS_ENABLED(q->flags) && 1469 !hrtimer_active(&q->advance_timer)) { 1470 hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS); 1471 q->advance_timer.function = advance_sched; 1472 } 1473 1474 + if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { 1475 q->dequeue = taprio_dequeue_offload; 1476 q->peek = taprio_peek_offload; 1477 } else { ··· 1501 goto unlock; 1502 } 1503 1504 + setup_txtime(q, new_admin, start); 1505 1506 + if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { 1507 if (!oper) { 1508 rcu_assign_pointer(q->oper_sched, new_admin); 1509 err = 0; ··· 1528 1529 spin_unlock_irqrestore(&q->current_entry_lock, flags); 1530 1531 + if (FULL_OFFLOAD_IS_ENABLED(q->flags)) 1532 taprio_offload_config_changed(q); 1533 } 1534 ··· 1567 } 1568 q->qdiscs = NULL; 1569 1570 + netdev_reset_tc(dev); 1571 1572 if (q->oper_sched) 1573 call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb); ··· 1597 * and get the valid one on taprio_change(). 1598 */ 1599 q->clockid = -1; 1600 + q->flags = TAPRIO_FLAGS_INVALID; 1601 1602 spin_lock(&taprio_list_lock); 1603 list_add(&q->taprio_list, &taprio_list);
+9 -2
net/unix/af_unix.c
··· 189 return unix_peer(osk) == NULL || unix_our_peer(sk, osk); 190 } 191 192 - static inline int unix_recvq_full(struct sock const *sk) 193 { 194 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; 195 } 196 197 struct sock *unix_peer_get(struct sock *s) ··· 1764 * - unix_peer(sk) == sk by time of get but disconnected before lock 1765 */ 1766 if (other != sk && 1767 - unlikely(unix_peer(other) != sk && unix_recvq_full(other))) { 1768 if (timeo) { 1769 timeo = unix_wait_for_peer(other, timeo); 1770
··· 189 return unix_peer(osk) == NULL || unix_our_peer(sk, osk); 190 } 191 192 + static inline int unix_recvq_full(const struct sock *sk) 193 { 194 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; 195 + } 196 + 197 + static inline int unix_recvq_full_lockless(const struct sock *sk) 198 + { 199 + return skb_queue_len_lockless(&sk->sk_receive_queue) > 200 + READ_ONCE(sk->sk_max_ack_backlog); 201 } 202 203 struct sock *unix_peer_get(struct sock *s) ··· 1758 * - unix_peer(sk) == sk by time of get but disconnected before lock 1759 */ 1760 if (other != sk && 1761 + unlikely(unix_peer(other) != sk && 1762 + unix_recvq_full_lockless(other))) { 1763 if (timeo) { 1764 timeo = unix_wait_for_peer(other, timeo); 1765
+2 -2
samples/bpf/xdpsock_user.c
··· 83 static u32 opt_umem_flags; 84 static int opt_unaligned_chunks; 85 static int opt_mmap_flags; 86 - static u32 opt_xdp_bind_flags; 87 static int opt_xsk_frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; 88 static int opt_timeout = 1000; 89 static bool opt_need_wakeup = true; ··· 788 int ret; 789 790 ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0); 791 - if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN || errno == EBUSY) 792 return; 793 exit_with_error(errno); 794 }
··· 83 static u32 opt_umem_flags; 84 static int opt_unaligned_chunks; 85 static int opt_mmap_flags; 86 static int opt_xsk_frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; 87 static int opt_timeout = 1000; 88 static bool opt_need_wakeup = true; ··· 789 int ret; 790 791 ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0); 792 + if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN || 793 + errno == EBUSY || errno == ENETDOWN) 794 return; 795 exit_with_error(errno); 796 }
+1 -1
tools/bpf/bpftool/feature.c
··· 580 res = bpf_probe_large_insn_limit(ifindex); 581 print_bool_feature("have_large_insn_limit", 582 "Large program size limit", 583 - "HAVE_LARGE_INSN_LIMIT", 584 res, define_prefix); 585 } 586
··· 580 res = bpf_probe_large_insn_limit(ifindex); 581 print_bool_feature("have_large_insn_limit", 582 "Large program size limit", 583 + "LARGE_INSN_LIMIT", 584 res, define_prefix); 585 } 586
+1 -1
tools/bpf/bpftool/prog.c
··· 536 buf = (unsigned char *)(info->jited_prog_insns); 537 member_len = info->jited_prog_len; 538 } else { /* DUMP_XLATED */ 539 - if (info->xlated_prog_len == 0) { 540 p_err("error retrieving insn dump: kernel.kptr_restrict set?"); 541 return -1; 542 }
··· 536 buf = (unsigned char *)(info->jited_prog_insns); 537 member_len = info->jited_prog_len; 538 } else { /* DUMP_XLATED */ 539 + if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) { 540 p_err("error retrieving insn dump: kernel.kptr_restrict set?"); 541 return -1; 542 }
+2 -2
tools/bpf/runqslower/Makefile
··· 41 42 $(OUTPUT)/runqslower: $(OUTPUT)/runqslower.o $(BPFOBJ) 43 $(call msg,BINARY,$@) 44 - $(Q)$(CC) $(CFLAGS) -lelf -lz $^ -o $@ 45 46 $(OUTPUT)/runqslower.o: runqslower.h $(OUTPUT)/runqslower.skel.h \ 47 $(OUTPUT)/runqslower.bpf.o ··· 75 fi 76 $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF_PATH) format c > $@ 77 78 - $(BPFOBJ): | $(OUTPUT) 79 $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) \ 80 OUTPUT=$(abspath $(dir $@))/ $(abspath $@) 81
··· 41 42 $(OUTPUT)/runqslower: $(OUTPUT)/runqslower.o $(BPFOBJ) 43 $(call msg,BINARY,$@) 44 + $(Q)$(CC) $(CFLAGS) $^ -lelf -lz -o $@ 45 46 $(OUTPUT)/runqslower.o: runqslower.h $(OUTPUT)/runqslower.skel.h \ 47 $(OUTPUT)/runqslower.bpf.o ··· 75 fi 76 $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF_PATH) format c > $@ 77 78 + $(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT) 79 $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) \ 80 OUTPUT=$(abspath $(dir $@))/ $(abspath $@) 81
+74
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
···
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (c) 2020 Cloudflare 3 + 4 + #include "test_progs.h" 5 + 6 + static int connected_socket_v4(void) 7 + { 8 + struct sockaddr_in addr = { 9 + .sin_family = AF_INET, 10 + .sin_port = htons(80), 11 + .sin_addr = { inet_addr("127.0.0.1") }, 12 + }; 13 + socklen_t len = sizeof(addr); 14 + int s, repair, err; 15 + 16 + s = socket(AF_INET, SOCK_STREAM, 0); 17 + if (CHECK_FAIL(s == -1)) 18 + goto error; 19 + 20 + repair = TCP_REPAIR_ON; 21 + err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair)); 22 + if (CHECK_FAIL(err)) 23 + goto error; 24 + 25 + err = connect(s, (struct sockaddr *)&addr, len); 26 + if (CHECK_FAIL(err)) 27 + goto error; 28 + 29 + repair = TCP_REPAIR_OFF_NO_WP; 30 + err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair)); 31 + if (CHECK_FAIL(err)) 32 + goto error; 33 + 34 + return s; 35 + error: 36 + perror(__func__); 37 + close(s); 38 + return -1; 39 + } 40 + 41 + /* Create a map, populate it with one socket, and free the map. */ 42 + static void test_sockmap_create_update_free(enum bpf_map_type map_type) 43 + { 44 + const int zero = 0; 45 + int s, map, err; 46 + 47 + s = connected_socket_v4(); 48 + if (CHECK_FAIL(s == -1)) 49 + return; 50 + 51 + map = bpf_create_map(map_type, sizeof(int), sizeof(int), 1, 0); 52 + if (CHECK_FAIL(map == -1)) { 53 + perror("bpf_create_map"); 54 + goto out; 55 + } 56 + 57 + err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST); 58 + if (CHECK_FAIL(err)) { 59 + perror("bpf_map_update"); 60 + goto out; 61 + } 62 + 63 + out: 64 + close(map); 65 + close(s); 66 + } 67 + 68 + void test_sockmap_basic(void) 69 + { 70 + if (test__start_subtest("sockmap create_update_free")) 71 + test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP); 72 + if (test__start_subtest("sockhash create_update_free")) 73 + test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH); 74 + }
+1 -1
tools/testing/selftests/bpf/prog_tests/trampoline_count.c
··· 46 const char *fentry_name = "fentry/__set_task_comm"; 47 const char *fexit_name = "fexit/__set_task_comm"; 48 const char *object = "test_trampoline_count.o"; 49 - struct inst inst[MAX_TRAMP_PROGS] = { 0 }; 50 int err, i = 0, duration = 0; 51 struct bpf_object *obj; 52 struct bpf_link *link;
··· 46 const char *fentry_name = "fentry/__set_task_comm"; 47 const char *fexit_name = "fexit/__set_task_comm"; 48 const char *object = "test_trampoline_count.o"; 49 + struct inst inst[MAX_TRAMP_PROGS] = {}; 50 int err, i = 0, duration = 0; 51 struct bpf_object *obj; 52 struct bpf_link *link;
+76
tools/testing/selftests/drivers/net/mlxsw/fib.sh
··· 14 ipv4_plen 15 ipv4_replay 16 ipv4_flush 17 ipv6_add 18 ipv6_metric 19 ipv6_append_single ··· 27 ipv6_delete_multipath 28 ipv6_replay_single 29 ipv6_replay_multipath 30 " 31 NUM_NETIFS=0 32 source $lib_dir/lib.sh ··· 91 fib_ipv4_flush_test "testns1" 92 } 93 94 ipv6_add() 95 { 96 fib_ipv6_add_test "testns1" ··· 186 ipv6_replay_multipath() 187 { 188 fib_ipv6_replay_multipath_test "testns1" "$DEVLINK_DEV" 189 } 190 191 setup_prepare()
··· 14 ipv4_plen 15 ipv4_replay 16 ipv4_flush 17 + ipv4_local_replace 18 ipv6_add 19 ipv6_metric 20 ipv6_append_single ··· 26 ipv6_delete_multipath 27 ipv6_replay_single 28 ipv6_replay_multipath 29 + ipv6_local_replace 30 " 31 NUM_NETIFS=0 32 source $lib_dir/lib.sh ··· 89 fib_ipv4_flush_test "testns1" 90 } 91 92 + ipv4_local_replace() 93 + { 94 + local ns="testns1" 95 + 96 + RET=0 97 + 98 + ip -n $ns link add name dummy1 type dummy 99 + ip -n $ns link set dev dummy1 up 100 + 101 + ip -n $ns route add table local 192.0.2.1/32 dev dummy1 102 + fib4_trap_check $ns "table local 192.0.2.1/32 dev dummy1" false 103 + check_err $? "Local table route not in hardware when should" 104 + 105 + ip -n $ns route add table main 192.0.2.1/32 dev dummy1 106 + fib4_trap_check $ns "table main 192.0.2.1/32 dev dummy1" true 107 + check_err $? "Main table route in hardware when should not" 108 + 109 + fib4_trap_check $ns "table local 192.0.2.1/32 dev dummy1" false 110 + check_err $? "Local table route was replaced when should not" 111 + 112 + # Test that local routes can replace routes in main table. 113 + ip -n $ns route add table main 192.0.2.2/32 dev dummy1 114 + fib4_trap_check $ns "table main 192.0.2.2/32 dev dummy1" false 115 + check_err $? "Main table route not in hardware when should" 116 + 117 + ip -n $ns route add table local 192.0.2.2/32 dev dummy1 118 + fib4_trap_check $ns "table local 192.0.2.2/32 dev dummy1" false 119 + check_err $? "Local table route did not replace route in main table when should" 120 + 121 + fib4_trap_check $ns "table main 192.0.2.2/32 dev dummy1" true 122 + check_err $? "Main table route was not replaced when should" 123 + 124 + log_test "IPv4 local table route replacement" 125 + 126 + ip -n $ns link del dev dummy1 127 + } 128 + 129 ipv6_add() 130 { 131 fib_ipv6_add_test "testns1" ··· 147 ipv6_replay_multipath() 148 { 149 fib_ipv6_replay_multipath_test "testns1" "$DEVLINK_DEV" 150 + } 151 + 152 + ipv6_local_replace() 153 + { 154 + local ns="testns1" 155 + 156 + RET=0 157 + 158 + ip -n $ns link add name dummy1 type dummy 159 + ip -n $ns link set dev dummy1 up 160 + 161 + ip -n $ns route add table local 2001:db8:1::1/128 dev dummy1 162 + fib6_trap_check $ns "table local 2001:db8:1::1/128 dev dummy1" false 163 + check_err $? "Local table route not in hardware when should" 164 + 165 + ip -n $ns route add table main 2001:db8:1::1/128 dev dummy1 166 + fib6_trap_check $ns "table main 2001:db8:1::1/128 dev dummy1" true 167 + check_err $? "Main table route in hardware when should not" 168 + 169 + fib6_trap_check $ns "table local 2001:db8:1::1/128 dev dummy1" false 170 + check_err $? "Local table route was replaced when should not" 171 + 172 + # Test that local routes can replace routes in main table. 173 + ip -n $ns route add table main 2001:db8:1::2/128 dev dummy1 174 + fib6_trap_check $ns "table main 2001:db8:1::2/128 dev dummy1" false 175 + check_err $? "Main table route not in hardware when should" 176 + 177 + ip -n $ns route add table local 2001:db8:1::2/128 dev dummy1 178 + fib6_trap_check $ns "table local 2001:db8:1::2/128 dev dummy1" false 179 + check_err $? "Local route route did not replace route in main table when should" 180 + 181 + fib6_trap_check $ns "table main 2001:db8:1::2/128 dev dummy1" true 182 + check_err $? "Main table route was not replaced when should" 183 + 184 + log_test "IPv6 local table route replacement" 185 + 186 + ip -n $ns link del dev dummy1 187 } 188 189 setup_prepare()
+9
tools/testing/selftests/net/mptcp/mptcp_connect.c
··· 634 cfg_host, a, cfg_port, b); 635 } 636 637 int main_loop_s(int listensock) 638 { 639 struct sockaddr_storage ss; ··· 665 salen = sizeof(ss); 666 remotesock = accept(listensock, (struct sockaddr *)&ss, &salen); 667 if (remotesock >= 0) { 668 check_sockaddr(pf, &ss, salen); 669 check_getpeername(remotesock, &ss, salen); 670
··· 634 cfg_host, a, cfg_port, b); 635 } 636 637 + static void maybe_close(int fd) 638 + { 639 + unsigned int r = rand(); 640 + 641 + if (r & 1) 642 + close(fd); 643 + } 644 + 645 int main_loop_s(int listensock) 646 { 647 struct sockaddr_storage ss; ··· 657 salen = sizeof(ss); 658 remotesock = accept(listensock, (struct sockaddr *)&ss, &salen); 659 if (remotesock >= 0) { 660 + maybe_close(listensock); 661 check_sockaddr(pf, &ss, salen); 662 check_getpeername(remotesock, &ss, salen); 663
+14 -9
tools/testing/selftests/wireguard/netns.sh
··· 38 ip1() { pretty 1 "ip $*"; ip -n $netns1 "$@"; } 39 ip2() { pretty 2 "ip $*"; ip -n $netns2 "$@"; } 40 sleep() { read -t "$1" -N 1 || true; } 41 - waitiperf() { pretty "${1//*-}" "wait for iperf:5201"; while [[ $(ss -N "$1" -tlp 'sport = 5201') != *iperf3* ]]; do sleep 0.1; done; } 42 - waitncatudp() { pretty "${1//*-}" "wait for udp:1111"; while [[ $(ss -N "$1" -ulp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; } 43 - waitncattcp() { pretty "${1//*-}" "wait for tcp:1111"; while [[ $(ss -N "$1" -tlp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; } 44 waitiface() { pretty "${1//*-}" "wait for $2 to come up"; ip netns exec "$1" bash -c "while [[ \$(< \"/sys/class/net/$2/operstate\") != up ]]; do read -t .1 -N 0 || true; done;"; } 45 46 cleanup() { ··· 118 119 # TCP over IPv4 120 n2 iperf3 -s -1 -B 192.168.241.2 & 121 - waitiperf $netns2 122 n1 iperf3 -Z -t 3 -c 192.168.241.2 123 124 # TCP over IPv6 125 n1 iperf3 -s -1 -B fd00::1 & 126 - waitiperf $netns1 127 n2 iperf3 -Z -t 3 -c fd00::1 128 129 # UDP over IPv4 130 n1 iperf3 -s -1 -B 192.168.241.1 & 131 - waitiperf $netns1 132 n2 iperf3 -Z -t 3 -b 0 -u -c 192.168.241.1 133 134 # UDP over IPv6 135 n2 iperf3 -s -1 -B fd00::2 & 136 - waitiperf $netns2 137 n1 iperf3 -Z -t 3 -b 0 -u -c fd00::2 138 } 139 ··· 206 n1 wg set wg0 peer "$pub2" allowed-ips 192.168.241.0/24 207 exec 4< <(n1 ncat -l -u -p 1111) 208 ncat_pid=$! 209 - waitncatudp $netns1 210 n2 ncat -u 192.168.241.1 1111 <<<"X" 211 read -r -N 1 -t 1 out <&4 && [[ $out == "X" ]] 212 kill $ncat_pid ··· 215 n2 wg set wg0 listen-port 9997 216 exec 4< <(n1 ncat -l -u -p 1111) 217 ncat_pid=$! 218 - waitncatudp $netns1 219 n2 ncat -u 192.168.241.1 1111 <<<"X" 220 ! read -r -N 1 -t 1 out <&4 || false 221 kill $ncat_pid ··· 515 n0 wg set wg0 peer "$pub2" allowed-ips 0.0.0.0/0 516 n0 wg set wg0 peer "$pub2" allowed-ips ::/0,1700::/111,5000::/4,e000::/37,9000::/75 517 n0 wg set wg0 peer "$pub2" allowed-ips ::/0 518 ip0 link del wg0 519 520 declare -A objects
··· 38 ip1() { pretty 1 "ip $*"; ip -n $netns1 "$@"; } 39 ip2() { pretty 2 "ip $*"; ip -n $netns2 "$@"; } 40 sleep() { read -t "$1" -N 1 || true; } 41 + waitiperf() { pretty "${1//*-}" "wait for iperf:5201 pid $2"; while [[ $(ss -N "$1" -tlpH 'sport = 5201') != *\"iperf3\",pid=$2,fd=* ]]; do sleep 0.1; done; } 42 + waitncatudp() { pretty "${1//*-}" "wait for udp:1111 pid $2"; while [[ $(ss -N "$1" -ulpH 'sport = 1111') != *\"ncat\",pid=$2,fd=* ]]; do sleep 0.1; done; } 43 waitiface() { pretty "${1//*-}" "wait for $2 to come up"; ip netns exec "$1" bash -c "while [[ \$(< \"/sys/class/net/$2/operstate\") != up ]]; do read -t .1 -N 0 || true; done;"; } 44 45 cleanup() { ··· 119 120 # TCP over IPv4 121 n2 iperf3 -s -1 -B 192.168.241.2 & 122 + waitiperf $netns2 $! 123 n1 iperf3 -Z -t 3 -c 192.168.241.2 124 125 # TCP over IPv6 126 n1 iperf3 -s -1 -B fd00::1 & 127 + waitiperf $netns1 $! 128 n2 iperf3 -Z -t 3 -c fd00::1 129 130 # UDP over IPv4 131 n1 iperf3 -s -1 -B 192.168.241.1 & 132 + waitiperf $netns1 $! 133 n2 iperf3 -Z -t 3 -b 0 -u -c 192.168.241.1 134 135 # UDP over IPv6 136 n2 iperf3 -s -1 -B fd00::2 & 137 + waitiperf $netns2 $! 138 n1 iperf3 -Z -t 3 -b 0 -u -c fd00::2 139 } 140 ··· 207 n1 wg set wg0 peer "$pub2" allowed-ips 192.168.241.0/24 208 exec 4< <(n1 ncat -l -u -p 1111) 209 ncat_pid=$! 210 + waitncatudp $netns1 $ncat_pid 211 n2 ncat -u 192.168.241.1 1111 <<<"X" 212 read -r -N 1 -t 1 out <&4 && [[ $out == "X" ]] 213 kill $ncat_pid ··· 216 n2 wg set wg0 listen-port 9997 217 exec 4< <(n1 ncat -l -u -p 1111) 218 ncat_pid=$! 219 + waitncatudp $netns1 $ncat_pid 220 n2 ncat -u 192.168.241.1 1111 <<<"X" 221 ! read -r -N 1 -t 1 out <&4 || false 222 kill $ncat_pid ··· 516 n0 wg set wg0 peer "$pub2" allowed-ips 0.0.0.0/0 517 n0 wg set wg0 peer "$pub2" allowed-ips ::/0,1700::/111,5000::/4,e000::/37,9000::/75 518 n0 wg set wg0 peer "$pub2" allowed-ips ::/0 519 + n0 wg set wg0 peer "$pub2" remove 520 + low_order_points=( AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 4Ot6fDtBuK4WVuP68Z/EatoJjeucMrH9hmIFFl9JuAA= X5yVvKNQjCSx0LFVnIPvWwREXMRYHI6G2CJO3dCfEVc= 7P///////////////////////////////////////38= 7f///////////////////////////////////////38= 7v///////////////////////////////////////38= ) 521 + n0 wg set wg0 private-key /dev/null ${low_order_points[@]/#/peer } 522 + [[ -z $(n0 wg show wg0 peers) ]] 523 + n0 wg set wg0 private-key <(echo "$key1") ${low_order_points[@]/#/peer } 524 + [[ -z $(n0 wg show wg0 peers) ]] 525 ip0 link del wg0 526 527 declare -A objects
-1
tools/testing/selftests/wireguard/qemu/debug.config
··· 1 CONFIG_LOCALVERSION="-debug" 2 - CONFIG_ENABLE_WARN_DEPRECATED=y 3 CONFIG_ENABLE_MUST_CHECK=y 4 CONFIG_FRAME_POINTER=y 5 CONFIG_STACK_VALIDATION=y
··· 1 CONFIG_LOCALVERSION="-debug" 2 CONFIG_ENABLE_MUST_CHECK=y 3 CONFIG_FRAME_POINTER=y 4 CONFIG_STACK_VALIDATION=y