Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

1) Unbalanced locking in mwifiex_process_country_ie, from Brian Norris.

2) Fix thermal zone registration in iwlwifi, from Andrei
Otcheretianski.

3) Fix double free_irq in sgi ioc3 eth, from Thomas Bogendoerfer.

4) Use after free in mptcp, from Florian Westphal.

5) Use after free in wireguard's root_remove_peer_lists, from Eric
Dumazet.

6) Properly access packets heads in bonding alb code, from Eric
Dumazet.

7) Fix data race in skb_queue_len(), from Qian Cai.

8) Fix regression in r8169 on some chips, from Heiner Kallweit.

9) Fix XDP program ref counting in hv_netvsc, from Haiyang Zhang.

10) Certain kinds of set link netlink operations can cause a NULL deref
in the ipv6 addrconf code. Fix from Eric Dumazet.

11) Don't cancel uninitialized work queue in drop monitor, from Ido
Schimmel.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (84 commits)
net: thunderx: use proper interface type for RGMII
mt76: mt7615: fix max_nss in mt7615_eeprom_parse_hw_cap
bpf: Improve bucket_log calculation logic
selftests/bpf: Test freeing sockmap/sockhash with a socket in it
bpf, sockhash: Synchronize_rcu before free'ing map
bpf, sockmap: Don't sleep while holding RCU lock on tear-down
bpftool: Don't crash on missing xlated program instructions
bpf, sockmap: Check update requirements after locking
drop_monitor: Do not cancel uninitialized work item
mlxsw: spectrum_dpipe: Add missing error path
mlxsw: core: Add validation of hardware device types for MGPIR register
mlxsw: spectrum_router: Clear offload indication from IPv6 nexthops on abort
selftests: mlxsw: Add test cases for local table route replacement
mlxsw: spectrum_router: Prevent incorrect replacement of local table routes
net: dsa: microchip: enable module autoprobe
ipv6/addrconf: fix potential NULL deref in inet6_set_link_af()
dpaa_eth: support all modes with rate adapting PHYs
net: stmmac: update pci platform data to use phy_interface
net: stmmac: xgmac: fix missing IFF_MULTICAST checki in dwxgmac2_set_filter
net: stmmac: fix missing IFF_MULTICAST check in dwmac4_set_filter
...

+783 -328
+32 -12
drivers/net/bonding/bond_alb.c
··· 1383 1383 bool do_tx_balance = true; 1384 1384 u32 hash_index = 0; 1385 1385 const u8 *hash_start = NULL; 1386 - struct ipv6hdr *ip6hdr; 1387 1386 1388 1387 skb_reset_mac_header(skb); 1389 1388 eth_data = eth_hdr(skb); 1390 1389 1391 1390 switch (ntohs(skb->protocol)) { 1392 1391 case ETH_P_IP: { 1393 - const struct iphdr *iph = ip_hdr(skb); 1392 + const struct iphdr *iph; 1394 1393 1395 1394 if (is_broadcast_ether_addr(eth_data->h_dest) || 1396 - iph->daddr == ip_bcast || 1397 - iph->protocol == IPPROTO_IGMP) { 1395 + !pskb_network_may_pull(skb, sizeof(*iph))) { 1396 + do_tx_balance = false; 1397 + break; 1398 + } 1399 + iph = ip_hdr(skb); 1400 + if (iph->daddr == ip_bcast || iph->protocol == IPPROTO_IGMP) { 1398 1401 do_tx_balance = false; 1399 1402 break; 1400 1403 } 1401 1404 hash_start = (char *)&(iph->daddr); 1402 1405 hash_size = sizeof(iph->daddr); 1403 - } 1404 1406 break; 1405 - case ETH_P_IPV6: 1407 + } 1408 + case ETH_P_IPV6: { 1409 + const struct ipv6hdr *ip6hdr; 1410 + 1406 1411 /* IPv6 doesn't really use broadcast mac address, but leave 1407 1412 * that here just in case. 1408 1413 */ ··· 1424 1419 break; 1425 1420 } 1426 1421 1427 - /* Additianally, DAD probes should not be tx-balanced as that 1422 + if (!pskb_network_may_pull(skb, sizeof(*ip6hdr))) { 1423 + do_tx_balance = false; 1424 + break; 1425 + } 1426 + /* Additionally, DAD probes should not be tx-balanced as that 1428 1427 * will lead to false positives for duplicate addresses and 1429 1428 * prevent address configuration from working. 1430 1429 */ ··· 1438 1429 break; 1439 1430 } 1440 1431 1441 - hash_start = (char *)&(ipv6_hdr(skb)->daddr); 1442 - hash_size = sizeof(ipv6_hdr(skb)->daddr); 1432 + hash_start = (char *)&ip6hdr->daddr; 1433 + hash_size = sizeof(ip6hdr->daddr); 1443 1434 break; 1444 - case ETH_P_IPX: 1445 - if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) { 1435 + } 1436 + case ETH_P_IPX: { 1437 + const struct ipxhdr *ipxhdr; 1438 + 1439 + if (pskb_network_may_pull(skb, sizeof(*ipxhdr))) { 1440 + do_tx_balance = false; 1441 + break; 1442 + } 1443 + ipxhdr = (struct ipxhdr *)skb_network_header(skb); 1444 + 1445 + if (ipxhdr->ipx_checksum != IPX_NO_CHECKSUM) { 1446 1446 /* something is wrong with this packet */ 1447 1447 do_tx_balance = false; 1448 1448 break; 1449 1449 } 1450 1450 1451 - if (ipx_hdr(skb)->ipx_type != IPX_TYPE_NCP) { 1451 + if (ipxhdr->ipx_type != IPX_TYPE_NCP) { 1452 1452 /* The only protocol worth balancing in 1453 1453 * this family since it has an "ARP" like 1454 1454 * mechanism ··· 1466 1448 break; 1467 1449 } 1468 1450 1451 + eth_data = eth_hdr(skb); 1469 1452 hash_start = (char *)eth_data->h_dest; 1470 1453 hash_size = ETH_ALEN; 1471 1454 break; 1455 + } 1472 1456 case ETH_P_ARP: 1473 1457 do_tx_balance = false; 1474 1458 if (bond_info->rlb_enabled)
+1 -1
drivers/net/dsa/b53/b53_common.c
··· 693 693 b53_do_vlan_op(dev, VTA_CMD_CLEAR); 694 694 } 695 695 696 - b53_enable_vlan(dev, false, ds->vlan_filtering); 696 + b53_enable_vlan(dev, dev->vlan_enabled, ds->vlan_filtering); 697 697 698 698 b53_for_each_port(dev, i) 699 699 b53_write16(dev, B53_VLAN_PAGE,
+3 -1
drivers/net/dsa/bcm_sf2.c
··· 68 68 69 69 /* Force link status for IMP port */ 70 70 reg = core_readl(priv, offset); 71 - reg |= (MII_SW_OR | LINK_STS | GMII_SPEED_UP_2G); 71 + reg |= (MII_SW_OR | LINK_STS); 72 + if (priv->type == BCM7278_DEVICE_ID) 73 + reg |= GMII_SPEED_UP_2G; 72 74 core_writel(priv, reg, offset); 73 75 74 76 /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
+6
drivers/net/dsa/microchip/ksz9477_spi.c
··· 101 101 102 102 module_spi_driver(ksz9477_spi_driver); 103 103 104 + MODULE_ALIAS("spi:ksz9477"); 105 + MODULE_ALIAS("spi:ksz9897"); 106 + MODULE_ALIAS("spi:ksz9893"); 107 + MODULE_ALIAS("spi:ksz9563"); 108 + MODULE_ALIAS("spi:ksz8563"); 109 + MODULE_ALIAS("spi:ksz9567"); 104 110 MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>"); 105 111 MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch SPI access Driver"); 106 112 MODULE_LICENSE("GPL");
+3
drivers/net/ethernet/broadcom/bcmsysport.c
··· 2736 2736 2737 2737 umac_reset(priv); 2738 2738 2739 + /* Disable the UniMAC RX/TX */ 2740 + umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0); 2741 + 2739 2742 /* We may have been suspended and never received a WOL event that 2740 2743 * would turn off MPD detection, take care of that now 2741 2744 */
+8 -6
drivers/net/ethernet/cadence/macb_main.c
··· 73 73 /* Max length of transmit frame must be a multiple of 8 bytes */ 74 74 #define MACB_TX_LEN_ALIGN 8 75 75 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) 76 - #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) 76 + /* Limit maximum TX length as per Cadence TSO errata. This is to avoid a 77 + * false amba_error in TX path from the DMA assuming there is not enough 78 + * space in the SRAM (16KB) even when there is. 79 + */ 80 + #define GEM_MAX_TX_LEN (unsigned int)(0x3FC0) 77 81 78 82 #define GEM_MTU_MIN_SIZE ETH_MIN_MTU 79 83 #define MACB_NETIF_LSO NETIF_F_TSO ··· 1795 1791 1796 1792 /* Validate LSO compatibility */ 1797 1793 1798 - /* there is only one buffer */ 1799 - if (!skb_is_nonlinear(skb)) 1794 + /* there is only one buffer or protocol is not UDP */ 1795 + if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP)) 1800 1796 return features; 1801 1797 1802 1798 /* length of header */ 1803 1799 hdrlen = skb_transport_offset(skb); 1804 - if (ip_hdr(skb)->protocol == IPPROTO_TCP) 1805 - hdrlen += tcp_hdrlen(skb); 1806 1800 1807 - /* For LSO: 1801 + /* For UFO only: 1808 1802 * When software supplies two or more payload buffers all payload buffers 1809 1803 * apart from the last must be a multiple of 8 bytes in size. 1810 1804 */
+1 -1
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
··· 1039 1039 if (lmac_type == BGX_MODE_QSGMII) 1040 1040 return PHY_INTERFACE_MODE_QSGMII; 1041 1041 if (lmac_type == BGX_MODE_RGMII) 1042 - return PHY_INTERFACE_MODE_RGMII; 1042 + return PHY_INTERFACE_MODE_RGMII_RXID; 1043 1043 1044 1044 return PHY_INTERFACE_MODE_SGMII; 1045 1045 }
+7
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
··· 3403 3403 atomic_read(&adap->chcr_stats.fallback)); 3404 3404 seq_printf(seq, "IPSec PDU: %10u\n", 3405 3405 atomic_read(&adap->chcr_stats.ipsec_cnt)); 3406 + seq_printf(seq, "TLS PDU Tx: %10u\n", 3407 + atomic_read(&adap->chcr_stats.tls_pdu_tx)); 3408 + seq_printf(seq, "TLS PDU Rx: %10u\n", 3409 + atomic_read(&adap->chcr_stats.tls_pdu_rx)); 3410 + seq_printf(seq, "TLS Keys (DDR) Count: %10u\n", 3411 + atomic_read(&adap->chcr_stats.tls_key)); 3412 + 3406 3413 return 0; 3407 3414 } 3408 3415 DEFINE_SHOW_ATTRIBUTE(chcr_stats);
+4 -1
drivers/net/ethernet/dec/tulip/de2104x.c
··· 417 417 if (status & DescOwn) 418 418 break; 419 419 420 - len = ((status >> 16) & 0x7ff) - 4; 420 + /* the length is actually a 15 bit value here according 421 + * to Table 4-1 in the DE2104x spec so mask is 0x7fff 422 + */ 423 + len = ((status >> 16) & 0x7fff) - 4; 421 424 mapping = de->rx_skb[rx_tail].mapping; 422 425 423 426 if (unlikely(drop)) {
+11 -3
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
··· 2453 2453 mac_dev->adjust_link(mac_dev); 2454 2454 } 2455 2455 2456 + /* The Aquantia PHYs are capable of performing rate adaptation */ 2457 + #define PHY_VEND_AQUANTIA 0x03a1b400 2458 + 2456 2459 static int dpaa_phy_init(struct net_device *net_dev) 2457 2460 { 2458 2461 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; ··· 2474 2471 return -ENODEV; 2475 2472 } 2476 2473 2477 - /* Remove any features not supported by the controller */ 2478 - ethtool_convert_legacy_u32_to_link_mode(mask, mac_dev->if_support); 2479 - linkmode_and(phy_dev->supported, phy_dev->supported, mask); 2474 + /* Unless the PHY is capable of rate adaptation */ 2475 + if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII || 2476 + ((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) { 2477 + /* remove any features not supported by the controller */ 2478 + ethtool_convert_legacy_u32_to_link_mode(mask, 2479 + mac_dev->if_support); 2480 + linkmode_and(phy_dev->supported, phy_dev->supported, mask); 2481 + } 2480 2482 2481 2483 phy_support_asym_pause(phy_dev); 2482 2484
+1 -1
drivers/net/ethernet/intel/i40e/i40e_xsk.c
··· 791 791 struct i40e_ring *ring; 792 792 793 793 if (test_bit(__I40E_CONFIG_BUSY, pf->state)) 794 - return -ENETDOWN; 794 + return -EAGAIN; 795 795 796 796 if (test_bit(__I40E_VSI_DOWN, vsi->state)) 797 797 return -ENETDOWN;
+22 -9
drivers/net/ethernet/marvell/mvneta.c
··· 401 401 struct u64_stats_sync syncp; 402 402 u64 rx_packets; 403 403 u64 rx_bytes; 404 + u64 rx_dropped; 405 + u64 rx_errors; 404 406 u64 tx_packets; 405 407 u64 tx_bytes; 406 408 }; ··· 740 738 struct mvneta_pcpu_stats *cpu_stats; 741 739 u64 rx_packets; 742 740 u64 rx_bytes; 741 + u64 rx_dropped; 742 + u64 rx_errors; 743 743 u64 tx_packets; 744 744 u64 tx_bytes; 745 745 ··· 750 746 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 751 747 rx_packets = cpu_stats->rx_packets; 752 748 rx_bytes = cpu_stats->rx_bytes; 749 + rx_dropped = cpu_stats->rx_dropped; 750 + rx_errors = cpu_stats->rx_errors; 753 751 tx_packets = cpu_stats->tx_packets; 754 752 tx_bytes = cpu_stats->tx_bytes; 755 753 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 756 754 757 755 stats->rx_packets += rx_packets; 758 756 stats->rx_bytes += rx_bytes; 757 + stats->rx_dropped += rx_dropped; 758 + stats->rx_errors += rx_errors; 759 759 stats->tx_packets += tx_packets; 760 760 stats->tx_bytes += tx_bytes; 761 761 } 762 - 763 - stats->rx_errors = dev->stats.rx_errors; 764 - stats->rx_dropped = dev->stats.rx_dropped; 765 762 766 763 stats->tx_dropped = dev->stats.tx_dropped; 767 764 } ··· 1741 1736 static void mvneta_rx_error(struct mvneta_port *pp, 1742 1737 struct mvneta_rx_desc *rx_desc) 1743 1738 { 1739 + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 1744 1740 u32 status = rx_desc->status; 1741 + 1742 + /* update per-cpu counter */ 1743 + u64_stats_update_begin(&stats->syncp); 1744 + stats->rx_errors++; 1745 + u64_stats_update_end(&stats->syncp); 1745 1746 1746 1747 switch (status & MVNETA_RXD_ERR_CODE_MASK) { 1747 1748 case MVNETA_RXD_ERR_CRC: ··· 2190 2179 2191 2180 rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE); 2192 2181 if (unlikely(!rxq->skb)) { 2193 - netdev_err(dev, 2194 - "Can't allocate skb on queue %d\n", 2195 - rxq->id); 2196 - dev->stats.rx_dropped++; 2182 + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); 2183 + 2184 + netdev_err(dev, "Can't allocate skb on queue %d\n", rxq->id); 2197 2185 rxq->skb_alloc_err++; 2186 + 2187 + u64_stats_update_begin(&stats->syncp); 2188 + stats->rx_dropped++; 2189 + u64_stats_update_end(&stats->syncp); 2190 + 2198 2191 return -ENOMEM; 2199 2192 } 2200 2193 page_pool_release_page(rxq->page_pool, page); ··· 2285 2270 /* Check errors only for FIRST descriptor */ 2286 2271 if (rx_status & MVNETA_RXD_ERR_SUMMARY) { 2287 2272 mvneta_rx_error(pp, rx_desc); 2288 - dev->stats.rx_errors++; 2289 2273 /* leave the descriptor untouched */ 2290 2274 continue; 2291 2275 } ··· 2386 2372 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, 2387 2373 rx_desc->buf_phys_addr); 2388 2374 err_drop_frame: 2389 - dev->stats.rx_errors++; 2390 2375 mvneta_rx_error(pp, rx_desc); 2391 2376 /* leave the descriptor untouched */ 2392 2377 continue;
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
··· 45 45 46 46 static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) 47 47 { 48 - if (!MLX5_CAP_GEN(mdev, tls)) 48 + if (!MLX5_CAP_GEN(mdev, tls_tx)) 49 49 return false; 50 50 51 51 if (!MLX5_CAP_GEN(mdev, log_max_dek))
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
··· 269 269 int datalen; 270 270 u32 skb_seq; 271 271 272 - if (MLX5_CAP_GEN(sq->channel->mdev, tls)) { 272 + if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) { 273 273 skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi); 274 274 goto out; 275 275 }
+9 -7
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 613 613 614 614 wqe_counter = be16_to_cpu(cqe->wqe_counter); 615 615 616 - if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { 617 - netdev_WARN_ONCE(cq->channel->netdev, 618 - "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe)); 619 - if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) 620 - queue_work(cq->channel->priv->wq, &sq->recover_work); 621 - break; 622 - } 623 616 do { 624 617 struct mlx5e_sq_wqe_info *wi; 625 618 u16 ci; ··· 621 628 622 629 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 623 630 wi = &sq->db.ico_wqe[ci]; 631 + 632 + if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { 633 + netdev_WARN_ONCE(cq->channel->netdev, 634 + "Bad OP in ICOSQ CQE: 0x%x\n", 635 + get_cqe_opcode(cqe)); 636 + if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) 637 + queue_work(cq->channel->priv->wq, &sq->recover_work); 638 + break; 639 + } 624 640 625 641 if (likely(wi->opcode == MLX5_OPCODE_UMR)) { 626 642 sqcc += MLX5E_UMR_WQEBBS;
+14 -19
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
··· 451 451 452 452 i = 0; 453 453 do { 454 + struct mlx5e_tx_wqe_info *wi; 454 455 u16 wqe_counter; 455 456 bool last_wqe; 457 + u16 ci; 456 458 457 459 mlx5_cqwq_pop(&cq->wq); 458 460 459 461 wqe_counter = be16_to_cpu(cqe->wqe_counter); 460 462 461 - if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) { 462 - if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, 463 - &sq->state)) { 464 - struct mlx5e_tx_wqe_info *wi; 465 - u16 ci; 466 - 467 - ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 468 - wi = &sq->db.wqe_info[ci]; 469 - mlx5e_dump_error_cqe(sq, 470 - (struct mlx5_err_cqe *)cqe); 471 - mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); 472 - queue_work(cq->channel->priv->wq, 473 - &sq->recover_work); 474 - } 475 - stats->cqe_err++; 476 - } 477 - 478 463 do { 479 - struct mlx5e_tx_wqe_info *wi; 480 464 struct sk_buff *skb; 481 - u16 ci; 482 465 int j; 483 466 484 467 last_wqe = (sqcc == wqe_counter); ··· 498 515 sqcc += wi->num_wqebbs; 499 516 napi_consume_skb(skb, napi_budget); 500 517 } while (!last_wqe); 518 + 519 + if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) { 520 + if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, 521 + &sq->state)) { 522 + mlx5e_dump_error_cqe(sq, 523 + (struct mlx5_err_cqe *)cqe); 524 + mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); 525 + queue_work(cq->channel->priv->wq, 526 + &sq->recover_work); 527 + } 528 + stats->cqe_err++; 529 + } 501 530 502 531 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); 503 532
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
··· 850 850 mutex_lock(&fpga_xfrm->lock); 851 851 if (!--fpga_xfrm->num_rules) { 852 852 mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx); 853 + kfree(fpga_xfrm->sa_ctx); 853 854 fpga_xfrm->sa_ctx = NULL; 854 855 } 855 856 mutex_unlock(&fpga_xfrm->lock); ··· 1479 1478 if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs))) 1480 1479 return 0; 1481 1480 1482 - if (!mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) { 1481 + if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) { 1483 1482 mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n"); 1484 1483 return -EOPNOTSUPP; 1485 1484 }
+8 -7
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 1582 1582 struct match_list first; 1583 1583 }; 1584 1584 1585 - static void free_match_list(struct match_list_head *head) 1585 + static void free_match_list(struct match_list_head *head, bool ft_locked) 1586 1586 { 1587 1587 if (!list_empty(&head->list)) { 1588 1588 struct match_list *iter, *match_tmp; 1589 1589 1590 1590 list_del(&head->first.list); 1591 - tree_put_node(&head->first.g->node, false); 1591 + tree_put_node(&head->first.g->node, ft_locked); 1592 1592 list_for_each_entry_safe(iter, match_tmp, &head->list, 1593 1593 list) { 1594 - tree_put_node(&iter->g->node, false); 1594 + tree_put_node(&iter->g->node, ft_locked); 1595 1595 list_del(&iter->list); 1596 1596 kfree(iter); 1597 1597 } ··· 1600 1600 1601 1601 static int build_match_list(struct match_list_head *match_head, 1602 1602 struct mlx5_flow_table *ft, 1603 - const struct mlx5_flow_spec *spec) 1603 + const struct mlx5_flow_spec *spec, 1604 + bool ft_locked) 1604 1605 { 1605 1606 struct rhlist_head *tmp, *list; 1606 1607 struct mlx5_flow_group *g; ··· 1626 1625 1627 1626 curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC); 1628 1627 if (!curr_match) { 1629 - free_match_list(match_head); 1628 + free_match_list(match_head, ft_locked); 1630 1629 err = -ENOMEM; 1631 1630 goto out; 1632 1631 } ··· 1806 1805 version = atomic_read(&ft->node.version); 1807 1806 1808 1807 /* Collect all fgs which has a matching match_criteria */ 1809 - err = build_match_list(&match_head, ft, spec); 1808 + err = build_match_list(&match_head, ft, spec, take_write); 1810 1809 if (err) { 1811 1810 if (take_write) 1812 1811 up_write_ref_node(&ft->node, false); ··· 1820 1819 1821 1820 rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest, 1822 1821 dest_num, version); 1823 - free_match_list(&match_head); 1822 + free_match_list(&match_head, take_write); 1824 1823 if (!IS_ERR(rule) || 1825 1824 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) { 1826 1825 if (take_write)
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/fw.c
··· 242 242 return err; 243 243 } 244 244 245 - if (MLX5_CAP_GEN(dev, tls)) { 245 + if (MLX5_CAP_GEN(dev, tls_tx)) { 246 246 err = mlx5_core_get_caps(dev, MLX5_CAP_TLS); 247 247 if (err) 248 248 return err;
+4 -2
drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
··· 573 573 574 574 static int mlxsw_hwmon_gearbox_init(struct mlxsw_hwmon *mlxsw_hwmon) 575 575 { 576 + enum mlxsw_reg_mgpir_device_type device_type; 576 577 int index, max_index, sensor_index; 577 578 char mgpir_pl[MLXSW_REG_MGPIR_LEN]; 578 579 char mtmp_pl[MLXSW_REG_MTMP_LEN]; ··· 585 584 if (err) 586 585 return err; 587 586 588 - mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, NULL, NULL, NULL); 589 - if (!gbox_num) 587 + mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, NULL); 588 + if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE || 589 + !gbox_num) 590 590 return 0; 591 591 592 592 index = mlxsw_hwmon->module_sensor_max;
+6 -2
drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
··· 895 895 mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core, 896 896 struct mlxsw_thermal *thermal) 897 897 { 898 + enum mlxsw_reg_mgpir_device_type device_type; 898 899 struct mlxsw_thermal_module *gearbox_tz; 899 900 char mgpir_pl[MLXSW_REG_MGPIR_LEN]; 901 + u8 gbox_num; 900 902 int i; 901 903 int err; 902 904 ··· 910 908 if (err) 911 909 return err; 912 910 913 - mlxsw_reg_mgpir_unpack(mgpir_pl, &thermal->tz_gearbox_num, NULL, NULL, 911 + mlxsw_reg_mgpir_unpack(mgpir_pl, &gbox_num, &device_type, NULL, 914 912 NULL); 915 - if (!thermal->tz_gearbox_num) 913 + if (device_type != MLXSW_REG_MGPIR_DEVICE_TYPE_GEARBOX_DIE || 914 + !gbox_num) 916 915 return 0; 917 916 917 + thermal->tz_gearbox_num = gbox_num; 918 918 thermal->tz_gearbox_arr = kcalloc(thermal->tz_gearbox_num, 919 919 sizeof(*thermal->tz_gearbox_arr), 920 920 GFP_KERNEL);
+2 -1
drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
··· 215 215 start_again: 216 216 err = devlink_dpipe_entry_ctx_prepare(dump_ctx); 217 217 if (err) 218 - return err; 218 + goto err_ctx_prepare; 219 219 j = 0; 220 220 for (; i < rif_count; i++) { 221 221 struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i); ··· 247 247 return 0; 248 248 err_entry_append: 249 249 err_entry_get: 250 + err_ctx_prepare: 250 251 rtnl_unlock(); 251 252 devlink_dpipe_entry_clear(&entry); 252 253 return err;
+54 -1
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
··· 4844 4844 fib_node->fib_entry = NULL; 4845 4845 } 4846 4846 4847 + static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry) 4848 + { 4849 + struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node; 4850 + struct mlxsw_sp_fib4_entry *fib4_replaced; 4851 + 4852 + if (!fib_node->fib_entry) 4853 + return true; 4854 + 4855 + fib4_replaced = container_of(fib_node->fib_entry, 4856 + struct mlxsw_sp_fib4_entry, common); 4857 + if (fib4_entry->tb_id == RT_TABLE_MAIN && 4858 + fib4_replaced->tb_id == RT_TABLE_LOCAL) 4859 + return false; 4860 + 4861 + return true; 4862 + } 4863 + 4847 4864 static int 4848 4865 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp, 4849 4866 const struct fib_entry_notifier_info *fen_info) ··· 4887 4870 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n"); 4888 4871 err = PTR_ERR(fib4_entry); 4889 4872 goto err_fib4_entry_create; 4873 + } 4874 + 4875 + if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) { 4876 + mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry); 4877 + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); 4878 + return 0; 4890 4879 } 4891 4880 4892 4881 replaced = fib_node->fib_entry; ··· 4931 4908 return; 4932 4909 4933 4910 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info); 4934 - if (WARN_ON(!fib4_entry)) 4911 + if (!fib4_entry) 4935 4912 return; 4936 4913 fib_node = fib4_entry->common.fib_node; 4937 4914 ··· 4993 4970 4994 4971 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6) 4995 4972 { 4973 + struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh; 4974 + 4975 + fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD; 4996 4976 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt); 4997 4977 kfree(mlxsw_sp_rt6); 4998 4978 } ··· 5434 5408 return NULL; 5435 5409 } 5436 5410 5411 + static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry) 5412 + { 5413 + struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node; 5414 + struct mlxsw_sp_fib6_entry *fib6_replaced; 5415 + struct fib6_info *rt, *rt_replaced; 5416 + 5417 + if (!fib_node->fib_entry) 5418 + return true; 5419 + 5420 + fib6_replaced = container_of(fib_node->fib_entry, 5421 + struct mlxsw_sp_fib6_entry, 5422 + common); 5423 + rt = mlxsw_sp_fib6_entry_rt(fib6_entry); 5424 + rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced); 5425 + if (rt->fib6_table->tb6_id == RT_TABLE_MAIN && 5426 + rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL) 5427 + return false; 5428 + 5429 + return true; 5430 + } 5431 + 5437 5432 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp, 5438 5433 struct fib6_info **rt_arr, 5439 5434 unsigned int nrt6) ··· 5487 5440 if (IS_ERR(fib6_entry)) { 5488 5441 err = PTR_ERR(fib6_entry); 5489 5442 goto err_fib6_entry_create; 5443 + } 5444 + 5445 + if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) { 5446 + mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); 5447 + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); 5448 + return 0; 5490 5449 } 5491 5450 5492 5451 replaced = fib_node->fib_entry;
+2 -2
drivers/net/ethernet/qlogic/qed/qed_ptp.c
··· 44 44 /* Add/subtract the Adjustment_Value when making a Drift adjustment */ 45 45 #define QED_DRIFT_CNTR_DIRECTION_SHIFT 31 46 46 #define QED_TIMESTAMP_MASK BIT(16) 47 - /* Param mask for Hardware to detect/timestamp the unicast PTP packets */ 48 - #define QED_PTP_UCAST_PARAM_MASK 0xF 47 + /* Param mask for Hardware to detect/timestamp the L2/L4 unicast PTP packets */ 48 + #define QED_PTP_UCAST_PARAM_MASK 0x70F 49 49 50 50 static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn) 51 51 {
+6
drivers/net/ethernet/realtek/r8169_main.c
··· 2477 2477 switch (tp->mac_version) { 2478 2478 case RTL_GIGA_MAC_VER_12: 2479 2479 case RTL_GIGA_MAC_VER_17: 2480 + pcie_set_readrq(tp->pci_dev, 512); 2480 2481 r8168b_1_hw_jumbo_enable(tp); 2481 2482 break; 2482 2483 case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26: 2484 + pcie_set_readrq(tp->pci_dev, 512); 2483 2485 r8168c_hw_jumbo_enable(tp); 2484 2486 break; 2485 2487 case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28: 2486 2488 r8168dp_hw_jumbo_enable(tp); 2487 2489 break; 2488 2490 case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33: 2491 + pcie_set_readrq(tp->pci_dev, 512); 2489 2492 r8168e_hw_jumbo_enable(tp); 2490 2493 break; 2491 2494 default: ··· 2518 2515 break; 2519 2516 } 2520 2517 rtl_lock_config_regs(tp); 2518 + 2519 + if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii) 2520 + pcie_set_readrq(tp->pci_dev, 4096); 2521 2521 } 2522 2522 2523 2523 static void rtl_jumbo_config(struct rtl8169_private *tp, int mtu)
-1
drivers/net/ethernet/sgi/ioc3-eth.c
··· 823 823 netif_stop_queue(dev); 824 824 825 825 ioc3_stop(ip); 826 - free_irq(dev->irq, dev); 827 826 828 827 ioc3_free_rx_bufs(ip); 829 828 ioc3_clean_tx_ring(ip);
+1
drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
··· 413 413 dll_lock = rgmii_readl(ethqos, SDC4_STATUS); 414 414 if (dll_lock & SDC4_STATUS_DLL_LOCK) 415 415 break; 416 + retry--; 416 417 } while (retry > 0); 417 418 if (!retry) 418 419 dev_err(&ethqos->pdev->dev,
+5 -4
drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
··· 420 420 value |= GMAC_PACKET_FILTER_PM; 421 421 /* Set all the bits of the HASH tab */ 422 422 memset(mc_filter, 0xff, sizeof(mc_filter)); 423 - } else if (!netdev_mc_empty(dev)) { 423 + } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) { 424 424 struct netdev_hw_addr *ha; 425 425 426 426 /* Hash filter for multicast */ ··· 736 736 __le16 perfect_match, bool is_double) 737 737 { 738 738 void __iomem *ioaddr = hw->pcsr; 739 + u32 value; 739 740 740 741 writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE); 741 742 743 + value = readl(ioaddr + GMAC_VLAN_TAG); 744 + 742 745 if (hash) { 743 - u32 value = GMAC_VLAN_VTHM | GMAC_VLAN_ETV; 746 + value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV; 744 747 if (is_double) { 745 748 value |= GMAC_VLAN_EDVLP; 746 749 value |= GMAC_VLAN_ESVL; ··· 762 759 763 760 writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG); 764 761 } else { 765 - u32 value = readl(ioaddr + GMAC_VLAN_TAG); 766 - 767 762 value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV); 768 763 value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL); 769 764 value &= ~GMAC_VLAN_DOVLTC;
+7 -3
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
··· 458 458 459 459 for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++) 460 460 writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i)); 461 - } else if (!netdev_mc_empty(dev)) { 461 + } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) { 462 462 struct netdev_hw_addr *ha; 463 463 464 464 value |= XGMAC_FILTER_HMC; ··· 569 569 570 570 writel(value, ioaddr + XGMAC_PACKET_FILTER); 571 571 572 - value = XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV; 572 + value = readl(ioaddr + XGMAC_VLAN_TAG); 573 + 574 + value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV; 573 575 if (is_double) { 574 576 value |= XGMAC_VLAN_EDVLP; 575 577 value |= XGMAC_VLAN_ESVL; ··· 586 584 587 585 writel(value, ioaddr + XGMAC_PACKET_FILTER); 588 586 589 - value = XGMAC_VLAN_ETV; 587 + value = readl(ioaddr + XGMAC_VLAN_TAG); 588 + 589 + value |= XGMAC_VLAN_ETV; 590 590 if (is_double) { 591 591 value |= XGMAC_VLAN_EDVLP; 592 592 value |= XGMAC_VLAN_ESVL;
+8 -6
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
··· 95 95 96 96 plat->bus_id = 1; 97 97 plat->phy_addr = 0; 98 - plat->interface = PHY_INTERFACE_MODE_GMII; 98 + plat->phy_interface = PHY_INTERFACE_MODE_GMII; 99 99 100 100 plat->dma_cfg->pbl = 32; 101 101 plat->dma_cfg->pblx8 = true; ··· 217 217 { 218 218 plat->bus_id = 1; 219 219 plat->phy_addr = 0; 220 - plat->interface = PHY_INTERFACE_MODE_SGMII; 220 + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 221 + 221 222 return ehl_common_data(pdev, plat); 222 223 } 223 224 ··· 231 230 { 232 231 plat->bus_id = 1; 233 232 plat->phy_addr = 0; 234 - plat->interface = PHY_INTERFACE_MODE_RGMII; 233 + plat->phy_interface = PHY_INTERFACE_MODE_RGMII; 234 + 235 235 return ehl_common_data(pdev, plat); 236 236 } 237 237 ··· 260 258 { 261 259 plat->bus_id = 1; 262 260 plat->phy_addr = 0; 263 - plat->interface = PHY_INTERFACE_MODE_SGMII; 261 + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; 264 262 return tgl_common_data(pdev, plat); 265 263 } 266 264 ··· 360 358 361 359 plat->bus_id = pci_dev_id(pdev); 362 360 plat->phy_addr = ret; 363 - plat->interface = PHY_INTERFACE_MODE_RMII; 361 + plat->phy_interface = PHY_INTERFACE_MODE_RMII; 364 362 365 363 plat->dma_cfg->pbl = 16; 366 364 plat->dma_cfg->pblx8 = true; ··· 417 415 418 416 plat->bus_id = 1; 419 417 plat->phy_addr = -1; 420 - plat->interface = PHY_INTERFACE_MODE_GMII; 418 + plat->phy_interface = PHY_INTERFACE_MODE_GMII; 421 419 422 420 plat->dma_cfg->pbl = 32; 423 421 plat->dma_cfg->pblx8 = true;
+11 -2
drivers/net/hyperv/netvsc_bpf.c
··· 120 120 } 121 121 122 122 if (prog) 123 - bpf_prog_add(prog, nvdev->num_chn); 123 + bpf_prog_add(prog, nvdev->num_chn - 1); 124 124 125 125 for (i = 0; i < nvdev->num_chn; i++) 126 126 rcu_assign_pointer(nvdev->chan_table[i].bpf_prog, prog); ··· 136 136 { 137 137 struct netdev_bpf xdp; 138 138 bpf_op_t ndo_bpf; 139 + int ret; 139 140 140 141 ASSERT_RTNL(); 141 142 ··· 149 148 150 149 memset(&xdp, 0, sizeof(xdp)); 151 150 151 + if (prog) 152 + bpf_prog_inc(prog); 153 + 152 154 xdp.command = XDP_SETUP_PROG; 153 155 xdp.prog = prog; 154 156 155 - return ndo_bpf(vf_netdev, &xdp); 157 + ret = ndo_bpf(vf_netdev, &xdp); 158 + 159 + if (ret && prog) 160 + bpf_prog_put(prog); 161 + 162 + return ret; 156 163 } 157 164 158 165 static u32 netvsc_xdp_query(struct netvsc_device *nvdev)
+4 -1
drivers/net/hyperv/netvsc_drv.c
··· 1059 1059 1060 1060 prog = dev_info->bprog; 1061 1061 if (prog) { 1062 + bpf_prog_inc(prog); 1062 1063 ret = netvsc_xdp_set(ndev, prog, NULL, nvdev); 1063 - if (ret) 1064 + if (ret) { 1065 + bpf_prog_put(prog); 1064 1066 goto err1; 1067 + } 1065 1068 } 1066 1069 1067 1070 /* In any case device is now ready */
+1 -3
drivers/net/netdevsim/dev.c
··· 934 934 int nsim_dev_init(void) 935 935 { 936 936 nsim_dev_ddir = debugfs_create_dir(DRV_NAME, NULL); 937 - if (IS_ERR(nsim_dev_ddir)) 938 - return PTR_ERR(nsim_dev_ddir); 939 - return 0; 937 + return PTR_ERR_OR_ZERO(nsim_dev_ddir); 940 938 } 941 939 942 940 void nsim_dev_exit(void)
+1
drivers/net/wireguard/allowedips.c
··· 263 263 } else { 264 264 node = kzalloc(sizeof(*node), GFP_KERNEL); 265 265 if (unlikely(!node)) { 266 + list_del(&newnode->peer_list); 266 267 kfree(newnode); 267 268 return -ENOMEM; 268 269 }
+2 -4
drivers/net/wireguard/netlink.c
··· 569 569 private_key); 570 570 list_for_each_entry_safe(peer, temp, &wg->peer_list, 571 571 peer_list) { 572 - if (wg_noise_precompute_static_static(peer)) 573 - wg_noise_expire_current_peer_keypairs(peer); 574 - else 575 - wg_peer_remove(peer); 572 + BUG_ON(!wg_noise_precompute_static_static(peer)); 573 + wg_noise_expire_current_peer_keypairs(peer); 576 574 } 577 575 wg_cookie_checker_precompute_device_keys(&wg->cookie_checker); 578 576 up_write(&wg->static_identity.lock);
+7 -3
drivers/net/wireguard/noise.c
··· 46 46 /* Must hold peer->handshake.static_identity->lock */ 47 47 bool wg_noise_precompute_static_static(struct wg_peer *peer) 48 48 { 49 - bool ret = true; 49 + bool ret; 50 50 51 51 down_write(&peer->handshake.lock); 52 - if (peer->handshake.static_identity->has_identity) 52 + if (peer->handshake.static_identity->has_identity) { 53 53 ret = curve25519( 54 54 peer->handshake.precomputed_static_static, 55 55 peer->handshake.static_identity->static_private, 56 56 peer->handshake.remote_static); 57 - else 57 + } else { 58 + u8 empty[NOISE_PUBLIC_KEY_LEN] = { 0 }; 59 + 60 + ret = curve25519(empty, empty, peer->handshake.remote_static); 58 61 memset(peer->handshake.precomputed_static_static, 0, 59 62 NOISE_PUBLIC_KEY_LEN); 63 + } 60 64 up_write(&peer->handshake.lock); 61 65 return ret; 62 66 }
+41 -13
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
··· 1897 1897 ieee80211_resume_disconnect(vif); 1898 1898 } 1899 1899 1900 - static int iwl_mvm_check_rt_status(struct iwl_mvm *mvm, 1901 - struct ieee80211_vif *vif) 1900 + static bool iwl_mvm_rt_status(struct iwl_trans *trans, u32 base, u32 *err_id) 1902 1901 { 1903 - u32 base = mvm->trans->dbg.lmac_error_event_table[0]; 1904 1902 struct error_table_start { 1905 1903 /* cf. struct iwl_error_event_table */ 1906 1904 u32 valid; 1907 - u32 error_id; 1905 + __le32 err_id; 1908 1906 } err_info; 1909 1907 1910 - iwl_trans_read_mem_bytes(mvm->trans, base, 1911 - &err_info, sizeof(err_info)); 1908 + if (!base) 1909 + return false; 1912 1910 1913 - if (err_info.valid && 1914 - err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) { 1915 - struct cfg80211_wowlan_wakeup wakeup = { 1916 - .rfkill_release = true, 1917 - }; 1918 - ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL); 1911 + iwl_trans_read_mem_bytes(trans, base, 1912 + &err_info, sizeof(err_info)); 1913 + if (err_info.valid && err_id) 1914 + *err_id = le32_to_cpu(err_info.err_id); 1915 + 1916 + return !!err_info.valid; 1917 + } 1918 + 1919 + static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm, 1920 + struct ieee80211_vif *vif) 1921 + { 1922 + u32 err_id; 1923 + 1924 + /* check for lmac1 error */ 1925 + if (iwl_mvm_rt_status(mvm->trans, 1926 + mvm->trans->dbg.lmac_error_event_table[0], 1927 + &err_id)) { 1928 + if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) { 1929 + struct cfg80211_wowlan_wakeup wakeup = { 1930 + .rfkill_release = true, 1931 + }; 1932 + ieee80211_report_wowlan_wakeup(vif, &wakeup, 1933 + GFP_KERNEL); 1934 + } 1935 + return true; 1919 1936 } 1920 - return err_info.valid; 1937 + 1938 + /* check if we have lmac2 set and check for error */ 1939 + if (iwl_mvm_rt_status(mvm->trans, 1940 + mvm->trans->dbg.lmac_error_event_table[1], NULL)) 1941 + return true; 1942 + 1943 + /* check for umac error */ 1944 + if (iwl_mvm_rt_status(mvm->trans, 1945 + mvm->trans->dbg.umac_error_event_table, NULL)) 1946 + return true; 1947 + 1948 + return false; 1921 1949 } 1922 1950 1923 1951 static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
+4 -1
drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
··· 8 8 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 9 9 * Copyright (C) 2018 Intel Corporation 10 10 * Copyright (C) 2019 Intel Corporation 11 + * Copyright (C) 2020 Intel Corporation 11 12 * 12 13 * This program is free software; you can redistribute it and/or modify 13 14 * it under the terms of version 2 of the GNU General Public License as ··· 31 30 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 32 31 * Copyright (C) 2018 Intel Corporation 33 32 * Copyright (C) 2019 Intel Corporation 33 + * Copyright (C) 2020 Intel Corporation 34 34 * All rights reserved. 35 35 * 36 36 * Redistribution and use in source and binary forms, with or without ··· 530 528 if (req != mvm->ftm_initiator.req) 531 529 return; 532 530 531 + iwl_mvm_ftm_reset(mvm); 532 + 533 533 if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD, 534 534 LOCATION_GROUP, 0), 535 535 0, sizeof(cmd), &cmd)) ··· 645 641 lockdep_assert_held(&mvm->mutex); 646 642 647 643 if (!mvm->ftm_initiator.req) { 648 - IWL_ERR(mvm, "Got FTM response but have no request?\n"); 649 644 return; 650 645 } 651 646
+4 -6
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
··· 5 5 * 6 6 * GPL LICENSE SUMMARY 7 7 * 8 - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 8 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 9 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 - * Copyright(c) 2018 - 2019 Intel Corporation 10 + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation 12 11 * 13 12 * This program is free software; you can redistribute it and/or modify 14 13 * it under the terms of version 2 of the GNU General Public License as ··· 27 28 * 28 29 * BSD LICENSE 29 30 * 30 - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 31 31 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 32 32 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 33 - * Copyright(c) 2018 - 2019 Intel Corporation 33 + * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation 34 34 * All rights reserved. 35 35 * 36 36 * Redistribution and use in source and binary forms, with or without ··· 2035 2037 rcu_read_lock(); 2036 2038 2037 2039 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]); 2038 - if (IS_ERR(sta)) { 2040 + if (IS_ERR_OR_NULL(sta)) { 2039 2041 rcu_read_unlock(); 2040 2042 WARN(1, "Can't find STA to configure HE\n"); 2041 2043 return; ··· 3291 3293 if (fw_has_capa(&mvm->fw->ucode_capa, 3292 3294 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) 3293 3295 iwl_mvm_schedule_session_protection(mvm, vif, 900, 3294 - min_duration); 3296 + min_duration, false); 3295 3297 else 3296 3298 iwl_mvm_protect_session(mvm, vif, duration, 3297 3299 min_duration, 500, false);
+7 -3
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
··· 3320 3320 igtk_cmd.sta_id = cpu_to_le32(sta_id); 3321 3321 3322 3322 if (remove_key) { 3323 + /* This is a valid situation for IGTK */ 3324 + if (sta_id == IWL_MVM_INVALID_STA) 3325 + return 0; 3326 + 3323 3327 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID); 3324 3328 } else { 3325 3329 struct ieee80211_key_seq seq; ··· 3578 3574 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", 3579 3575 keyconf->keyidx, sta_id); 3580 3576 3581 - if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3582 - keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3583 - keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)) 3577 + if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3578 + keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3579 + keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) 3584 3580 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); 3585 3581 3586 3582 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
+8 -2
drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
··· 205 205 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 206 206 u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int; 207 207 208 - mutex_lock(&mvm->mutex); 209 208 /* Protect the session to hear the TDLS setup response on the channel */ 210 - iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true); 209 + mutex_lock(&mvm->mutex); 210 + if (fw_has_capa(&mvm->fw->ucode_capa, 211 + IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) 212 + iwl_mvm_schedule_session_protection(mvm, vif, duration, 213 + duration, true); 214 + else 215 + iwl_mvm_protect_session(mvm, vif, duration, 216 + duration, 100, true); 211 217 mutex_unlock(&mvm->mutex); 212 218 } 213 219
+60 -11
drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
··· 1056 1056 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); 1057 1057 } 1058 1058 1059 + static bool iwl_mvm_session_prot_notif(struct iwl_notif_wait_data *notif_wait, 1060 + struct iwl_rx_packet *pkt, void *data) 1061 + { 1062 + struct iwl_mvm *mvm = 1063 + container_of(notif_wait, struct iwl_mvm, notif_wait); 1064 + struct iwl_mvm_session_prot_notif *resp; 1065 + int resp_len = iwl_rx_packet_payload_len(pkt); 1066 + 1067 + if (WARN_ON(pkt->hdr.cmd != SESSION_PROTECTION_NOTIF || 1068 + pkt->hdr.group_id != MAC_CONF_GROUP)) 1069 + return true; 1070 + 1071 + if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { 1072 + IWL_ERR(mvm, "Invalid SESSION_PROTECTION_NOTIF response\n"); 1073 + return true; 1074 + } 1075 + 1076 + resp = (void *)pkt->data; 1077 + 1078 + if (!resp->status) 1079 + IWL_ERR(mvm, 1080 + "TIME_EVENT_NOTIFICATION received but not executed\n"); 1081 + 1082 + return true; 1083 + } 1084 + 1059 1085 void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, 1060 1086 struct ieee80211_vif *vif, 1061 - u32 duration, u32 min_duration) 1087 + u32 duration, u32 min_duration, 1088 + bool wait_for_notif) 1062 1089 { 1063 1090 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1064 1091 struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; 1065 - 1092 + const u16 notif[] = { iwl_cmd_id(SESSION_PROTECTION_NOTIF, 1093 + MAC_CONF_GROUP, 0) }; 1094 + struct iwl_notification_wait wait_notif; 1066 1095 struct iwl_mvm_session_prot_cmd cmd = { 1067 1096 .id_and_color = 1068 1097 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, ··· 1100 1071 .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC), 1101 1072 .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)), 1102 1073 }; 1103 - int ret; 1104 1074 1105 1075 lockdep_assert_held(&mvm->mutex); 1106 1076 ··· 1120 1092 IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n", 1121 1093 le32_to_cpu(cmd.duration_tu)); 1122 1094 1123 - ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD, 1124 - MAC_CONF_GROUP, 0), 1125 - 0, sizeof(cmd), &cmd); 1126 - if (ret) { 1095 + if (!wait_for_notif) { 1096 + if (iwl_mvm_send_cmd_pdu(mvm, 1097 + iwl_cmd_id(SESSION_PROTECTION_CMD, 1098 + MAC_CONF_GROUP, 0), 1099 + 0, sizeof(cmd), &cmd)) { 1100 + IWL_ERR(mvm, 1101 + "Couldn't send the SESSION_PROTECTION_CMD\n"); 1102 + spin_lock_bh(&mvm->time_event_lock); 1103 + iwl_mvm_te_clear_data(mvm, te_data); 1104 + spin_unlock_bh(&mvm->time_event_lock); 1105 + } 1106 + 1107 + return; 1108 + } 1109 + 1110 + iwl_init_notification_wait(&mvm->notif_wait, &wait_notif, 1111 + notif, ARRAY_SIZE(notif), 1112 + iwl_mvm_session_prot_notif, NULL); 1113 + 1114 + if (iwl_mvm_send_cmd_pdu(mvm, 1115 + iwl_cmd_id(SESSION_PROTECTION_CMD, 1116 + MAC_CONF_GROUP, 0), 1117 + 0, sizeof(cmd), &cmd)) { 1127 1118 IWL_ERR(mvm, 1128 - "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret); 1129 - spin_lock_bh(&mvm->time_event_lock); 1130 - iwl_mvm_te_clear_data(mvm, te_data); 1131 - spin_unlock_bh(&mvm->time_event_lock); 1119 + "Couldn't send the SESSION_PROTECTION_CMD\n"); 1120 + iwl_remove_notification(&mvm->notif_wait, &wait_notif); 1121 + } else if (iwl_wait_notification(&mvm->notif_wait, &wait_notif, 1122 + TU_TO_JIFFIES(100))) { 1123 + IWL_ERR(mvm, 1124 + "Failed to protect session until session protection\n"); 1132 1125 } 1133 1126 }
+3 -1
drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
··· 250 250 * @mvm: the mvm component 251 251 * @vif: the virtual interface for which the protection issued 252 252 * @duration: the duration of the protection 253 + * @wait_for_notif: if true, will block until the start of the protection 253 254 */ 254 255 void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm, 255 256 struct ieee80211_vif *vif, 256 - u32 duration, u32 min_duration); 257 + u32 duration, u32 min_duration, 258 + bool wait_for_notif); 257 259 258 260 /** 259 261 * iwl_mvm_rx_session_protect_notif - handles %SESSION_PROTECTION_NOTIF
+6 -4
drivers/net/wireless/intel/iwlwifi/mvm/tt.c
··· 8 8 * Copyright(c) 2013 - 2014, 2019 Intel Corporation. All rights reserved. 9 9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 10 10 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH 11 - * Copyright(c) 2019 Intel Corporation 11 + * Copyright(c) 2019 - 2020 Intel Corporation 12 12 * 13 13 * This program is free software; you can redistribute it and/or modify 14 14 * it under the terms of version 2 of the GNU General Public License as ··· 31 31 * Copyright(c) 2012 - 2014, 2019 Intel Corporation. All rights reserved. 32 32 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 33 33 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH 34 - * Copyright(c) 2019 Intel Corporation 34 + * Copyright(c) 2019 - 2020 Intel Corporation 35 35 * All rights reserved. 36 36 * 37 37 * Redistribution and use in source and binary forms, with or without ··· 234 234 .flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP), 235 235 }; 236 236 struct iwl_ext_dts_measurement_cmd extcmd = { 237 - .control_mode = cpu_to_le32(DTS_AUTOMATIC), 237 + .control_mode = cpu_to_le32(DTS_DIRECT_WITHOUT_MEASURE), 238 238 }; 239 239 u32 cmdid; 240 240 ··· 734 734 static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm) 735 735 { 736 736 int i; 737 - char name[] = "iwlwifi"; 737 + char name[16]; 738 + static atomic_t counter = ATOMIC_INIT(0); 738 739 739 740 if (!iwl_mvm_is_tt_in_fw(mvm)) { 740 741 mvm->tz_device.tzone = NULL; ··· 745 744 746 745 BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH); 747 746 747 + sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF); 748 748 mvm->tz_device.tzone = thermal_zone_device_register(name, 749 749 IWL_MAX_DTS_TRIPS, 750 750 IWL_WRITABLE_TRIPS_MSK,
+2
drivers/net/wireless/marvell/libertas/cfg.c
··· 1785 1785 rates_max = rates_eid[1]; 1786 1786 if (rates_max > MAX_RATES) { 1787 1787 lbs_deb_join("invalid rates"); 1788 + rcu_read_unlock(); 1789 + ret = -EINVAL; 1788 1790 goto out; 1789 1791 } 1790 1792 rates = cmd.bss.rates;
+7
drivers/net/wireless/marvell/mwifiex/scan.c
··· 2884 2884 vs_param_set->header.len = 2885 2885 cpu_to_le16((((u16) priv->vs_ie[id].ie[1]) 2886 2886 & 0x00FF) + 2); 2887 + if (le16_to_cpu(vs_param_set->header.len) > 2888 + MWIFIEX_MAX_VSIE_LEN) { 2889 + mwifiex_dbg(priv->adapter, ERROR, 2890 + "Invalid param length!\n"); 2891 + break; 2892 + } 2893 + 2887 2894 memcpy(vs_param_set->ie, priv->vs_ie[id].ie, 2888 2895 le16_to_cpu(vs_param_set->header.len)); 2889 2896 *buffer += le16_to_cpu(vs_param_set->header.len) +
+1
drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
··· 232 232 233 233 if (country_ie_len > 234 234 (IEEE80211_COUNTRY_STRING_LEN + MWIFIEX_MAX_TRIPLET_802_11D)) { 235 + rcu_read_unlock(); 235 236 mwifiex_dbg(priv->adapter, ERROR, 236 237 "11D: country_ie_len overflow!, deauth AP\n"); 237 238 return -EINVAL;
+4
drivers/net/wireless/marvell/mwifiex/wmm.c
··· 970 970 "WMM Parameter Set Count: %d\n", 971 971 wmm_param_ie->qos_info_bitmap & mask); 972 972 973 + if (wmm_param_ie->vend_hdr.len + 2 > 974 + sizeof(struct ieee_types_wmm_parameter)) 975 + break; 976 + 973 977 memcpy((u8 *) &priv->curr_bss_params.bss_descriptor. 974 978 wmm_ie, wmm_param_ie, 975 979 wmm_param_ie->vend_hdr.len + 2);
+2 -1
drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
··· 92 92 93 93 static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev) 94 94 { 95 - u8 val, *eeprom = dev->mt76.eeprom.data; 95 + u8 *eeprom = dev->mt76.eeprom.data; 96 96 u8 tx_mask, rx_mask, max_nss; 97 + u32 val; 97 98 98 99 val = FIELD_GET(MT_EE_NIC_WIFI_CONF_BAND_SEL, 99 100 eeprom[MT_EE_WIFI_CONF]);
+11 -12
drivers/net/wireless/realtek/rtw88/wow.c
··· 281 281 rtw_write32_clr(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE); 282 282 } 283 283 284 - static bool rtw_wow_check_fw_status(struct rtw_dev *rtwdev, bool wow_enable) 284 + static int rtw_wow_check_fw_status(struct rtw_dev *rtwdev, bool wow_enable) 285 285 { 286 - bool ret; 287 - 288 286 /* wait 100ms for wow firmware to finish work */ 289 287 msleep(100); 290 288 291 289 if (wow_enable) { 292 - if (!rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON)) 293 - ret = 0; 290 + if (rtw_read8(rtwdev, REG_WOWLAN_WAKE_REASON)) 291 + goto wow_fail; 294 292 } else { 295 - if (rtw_read32_mask(rtwdev, REG_FE1IMR, BIT_FS_RXDONE) == 0 && 296 - rtw_read32_mask(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE) == 0) 297 - ret = 0; 293 + if (rtw_read32_mask(rtwdev, REG_FE1IMR, BIT_FS_RXDONE) || 294 + rtw_read32_mask(rtwdev, REG_RXPKT_NUM, BIT_RW_RELEASE)) 295 + goto wow_fail; 298 296 } 299 297 300 - if (ret) 301 - rtw_err(rtwdev, "failed to check wow status %s\n", 302 - wow_enable ? "enabled" : "disabled"); 298 + return 0; 303 299 304 - return ret; 300 + wow_fail: 301 + rtw_err(rtwdev, "failed to check wow status %s\n", 302 + wow_enable ? "enabled" : "disabled"); 303 + return -EBUSY; 305 304 } 306 305 307 306 static void rtw_wow_fw_security_type_iter(struct ieee80211_hw *hw,
+5 -2
include/linux/bpf.h
··· 728 728 #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) 729 729 #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) 730 730 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id); 731 - void bpf_struct_ops_init(struct btf *btf); 731 + void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log); 732 732 bool bpf_struct_ops_get(const void *kdata); 733 733 void bpf_struct_ops_put(const void *kdata); 734 734 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, ··· 752 752 { 753 753 return NULL; 754 754 } 755 - static inline void bpf_struct_ops_init(struct btf *btf) { } 755 + static inline void bpf_struct_ops_init(struct btf *btf, 756 + struct bpf_verifier_log *log) 757 + { 758 + } 756 759 static inline bool bpf_try_module_get(const void *data, struct module *owner) 757 760 { 758 761 return try_module_get(owner);
+4 -3
include/linux/mlx5/mlx5_ifc.h
··· 1448 1448 1449 1449 u8 reserved_at_440[0x20]; 1450 1450 1451 - u8 tls[0x1]; 1452 - u8 reserved_at_461[0x2]; 1451 + u8 reserved_at_460[0x3]; 1453 1452 u8 log_max_uctx[0x5]; 1454 1453 u8 reserved_at_468[0x3]; 1455 1454 u8 log_max_umem[0x5]; 1456 1455 u8 max_num_eqs[0x10]; 1457 1456 1458 - u8 reserved_at_480[0x3]; 1457 + u8 reserved_at_480[0x1]; 1458 + u8 tls_tx[0x1]; 1459 + u8 reserved_at_482[0x1]; 1459 1460 u8 log_max_l2_table[0x5]; 1460 1461 u8 reserved_at_488[0x8]; 1461 1462 u8 log_uar_page_sz[0x10];
+1 -1
include/linux/platform_data/b53.h
··· 19 19 #ifndef __B53_H 20 20 #define __B53_H 21 21 22 - #include <linux/kernel.h> 22 + #include <linux/types.h> 23 23 #include <linux/platform_data/dsa.h> 24 24 25 25 struct b53_platform_data {
+1 -1
include/linux/platform_data/microchip-ksz.h
··· 19 19 #ifndef __MICROCHIP_KSZ_H 20 20 #define __MICROCHIP_KSZ_H 21 21 22 - #include <linux/kernel.h> 22 + #include <linux/types.h> 23 23 24 24 struct ksz_platform_data { 25 25 u32 chip_id;
+13 -1
include/linux/skbuff.h
··· 1822 1822 } 1823 1823 1824 1824 /** 1825 + * skb_queue_len_lockless - get queue length 1826 + * @list_: list to measure 1827 + * 1828 + * Return the length of an &sk_buff queue. 1829 + * This variant can be used in lockless contexts. 1830 + */ 1831 + static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_) 1832 + { 1833 + return READ_ONCE(list_->qlen); 1834 + } 1835 + 1836 + /** 1825 1837 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head 1826 1838 * @list: queue to initialize 1827 1839 * ··· 2038 2026 { 2039 2027 struct sk_buff *next, *prev; 2040 2028 2041 - list->qlen--; 2029 + WRITE_ONCE(list->qlen, list->qlen - 1); 2042 2030 next = skb->next; 2043 2031 prev = skb->prev; 2044 2032 skb->next = skb->prev = NULL;
-5
include/net/ipx.h
··· 47 47 /* From af_ipx.c */ 48 48 extern int sysctl_ipx_pprop_broadcasting; 49 49 50 - static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb) 51 - { 52 - return (struct ipxhdr *)skb_transport_header(skb); 53 - } 54 - 55 50 struct ipx_interface { 56 51 /* IPX address */ 57 52 __be32 if_netnum;
+2 -3
kernel/bpf/bpf_struct_ops.c
··· 96 96 97 97 static const struct btf_type *module_type; 98 98 99 - void bpf_struct_ops_init(struct btf *btf) 99 + void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log) 100 100 { 101 101 s32 type_id, value_id, module_id; 102 102 const struct btf_member *member; 103 103 struct bpf_struct_ops *st_ops; 104 - struct bpf_verifier_log log = {}; 105 104 const struct btf_type *t; 106 105 char value_name[128]; 107 106 const char *mname; ··· 171 172 member->type, 172 173 NULL); 173 174 if (func_proto && 174 - btf_distill_func_proto(&log, btf, 175 + btf_distill_func_proto(log, btf, 175 176 func_proto, mname, 176 177 &st_ops->func_models[j])) { 177 178 pr_warn("Error in parsing func ptr %s in struct %s\n",
+4 -6
kernel/bpf/btf.c
··· 3643 3643 goto errout; 3644 3644 } 3645 3645 3646 - bpf_struct_ops_init(btf); 3646 + bpf_struct_ops_init(btf, log); 3647 3647 3648 3648 btf_verifier_env_free(env); 3649 3649 refcount_set(&btf->refcnt, 1); ··· 3931 3931 3932 3932 if (btf_type_is_ptr(mtype)) { 3933 3933 const struct btf_type *stype; 3934 + u32 id; 3934 3935 3935 3936 if (msize != size || off != moff) { 3936 3937 bpf_log(log, ··· 3940 3939 return -EACCES; 3941 3940 } 3942 3941 3943 - stype = btf_type_by_id(btf_vmlinux, mtype->type); 3944 - /* skip modifiers */ 3945 - while (btf_type_is_modifier(stype)) 3946 - stype = btf_type_by_id(btf_vmlinux, stype->type); 3942 + stype = btf_type_skip_modifiers(btf_vmlinux, mtype->type, &id); 3947 3943 if (btf_type_is_struct(stype)) { 3948 - *next_btf_id = mtype->type; 3944 + *next_btf_id = id; 3949 3945 return PTR_TO_BTF_ID; 3950 3946 } 3951 3947 }
+3 -2
net/core/bpf_sk_storage.c
··· 643 643 return ERR_PTR(-ENOMEM); 644 644 bpf_map_init_from_attr(&smap->map, attr); 645 645 646 + nbuckets = roundup_pow_of_two(num_possible_cpus()); 646 647 /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */ 647 - smap->bucket_log = max_t(u32, 1, ilog2(roundup_pow_of_two(num_possible_cpus()))); 648 - nbuckets = 1U << smap->bucket_log; 648 + nbuckets = max_t(u32, 2, nbuckets); 649 + smap->bucket_log = ilog2(nbuckets); 649 650 cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap); 650 651 651 652 ret = bpf_map_charge_init(&smap->map.memory, cost);
+6
net/core/devlink.c
··· 3986 3986 goto out_unlock; 3987 3987 } 3988 3988 3989 + /* return 0 if there is no further data to read */ 3990 + if (start_offset >= region->size) { 3991 + err = 0; 3992 + goto out_unlock; 3993 + } 3994 + 3989 3995 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 3990 3996 &devlink_nl_family, NLM_F_ACK | NLM_F_MULTI, 3991 3997 DEVLINK_CMD_REGION_READ);
+3 -1
net/core/drop_monitor.c
··· 1000 1000 { 1001 1001 int cpu; 1002 1002 1003 - if (!monitor_hw) 1003 + if (!monitor_hw) { 1004 1004 NL_SET_ERR_MSG_MOD(extack, "Hardware monitoring already disabled"); 1005 + return; 1006 + } 1005 1007 1006 1008 monitor_hw = false; 1007 1009
+18 -10
net/core/sock_map.c
··· 234 234 int i; 235 235 236 236 synchronize_rcu(); 237 - rcu_read_lock(); 238 237 raw_spin_lock_bh(&stab->lock); 239 238 for (i = 0; i < stab->map.max_entries; i++) { 240 239 struct sock **psk = &stab->sks[i]; ··· 242 243 sk = xchg(psk, NULL); 243 244 if (sk) { 244 245 lock_sock(sk); 246 + rcu_read_lock(); 245 247 sock_map_unref(sk, psk); 248 + rcu_read_unlock(); 246 249 release_sock(sk); 247 250 } 248 251 } 249 252 raw_spin_unlock_bh(&stab->lock); 250 - rcu_read_unlock(); 251 253 254 + /* wait for psock readers accessing its map link */ 252 255 synchronize_rcu(); 253 256 254 257 bpf_map_area_free(stab->sks); ··· 417 416 ret = -EINVAL; 418 417 goto out; 419 418 } 420 - if (!sock_map_sk_is_suitable(sk) || 421 - sk->sk_state != TCP_ESTABLISHED) { 419 + if (!sock_map_sk_is_suitable(sk)) { 422 420 ret = -EOPNOTSUPP; 423 421 goto out; 424 422 } 425 423 426 424 sock_map_sk_acquire(sk); 427 - ret = sock_map_update_common(map, idx, sk, flags); 425 + if (sk->sk_state != TCP_ESTABLISHED) 426 + ret = -EOPNOTSUPP; 427 + else 428 + ret = sock_map_update_common(map, idx, sk, flags); 428 429 sock_map_sk_release(sk); 429 430 out: 430 431 fput(sock->file); ··· 742 739 ret = -EINVAL; 743 740 goto out; 744 741 } 745 - if (!sock_map_sk_is_suitable(sk) || 746 - sk->sk_state != TCP_ESTABLISHED) { 742 + if (!sock_map_sk_is_suitable(sk)) { 747 743 ret = -EOPNOTSUPP; 748 744 goto out; 749 745 } 750 746 751 747 sock_map_sk_acquire(sk); 752 - ret = sock_hash_update_common(map, key, sk, flags); 748 + if (sk->sk_state != TCP_ESTABLISHED) 749 + ret = -EOPNOTSUPP; 750 + else 751 + ret = sock_hash_update_common(map, key, sk, flags); 753 752 sock_map_sk_release(sk); 754 753 out: 755 754 fput(sock->file); ··· 864 859 int i; 865 860 866 861 synchronize_rcu(); 867 - rcu_read_lock(); 868 862 for (i = 0; i < htab->buckets_num; i++) { 869 863 bucket = sock_hash_select_bucket(htab, i); 870 864 raw_spin_lock_bh(&bucket->lock); 871 865 hlist_for_each_entry_safe(elem, node, &bucket->head, node) { 872 866 hlist_del_rcu(&elem->node); 873 867 lock_sock(elem->sk); 868 + rcu_read_lock(); 874 869 sock_map_unref(elem->sk, elem); 870 + rcu_read_unlock(); 875 871 release_sock(elem->sk); 876 872 } 877 873 raw_spin_unlock_bh(&bucket->lock); 878 874 } 879 - rcu_read_unlock(); 875 + 876 + /* wait for psock readers accessing its map link */ 877 + synchronize_rcu(); 880 878 881 879 bpf_map_area_free(htab->buckets); 882 880 kfree(htab);
+3
net/ipv6/addrconf.c
··· 5718 5718 struct nlattr *tb[IFLA_INET6_MAX + 1]; 5719 5719 int err; 5720 5720 5721 + if (!idev) 5722 + return -EAFNOSUPPORT; 5723 + 5721 5724 if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0) 5722 5725 BUG(); 5723 5726
+36 -70
net/mptcp/protocol.c
··· 24 24 25 25 #define MPTCP_SAME_STATE TCP_MAX_STATES 26 26 27 - static void __mptcp_close(struct sock *sk, long timeout); 28 - 29 - static const struct proto_ops *tcp_proto_ops(struct sock *sk) 30 - { 31 27 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 32 - if (sk->sk_family == AF_INET6) 33 - return &inet6_stream_ops; 28 + struct mptcp6_sock { 29 + struct mptcp_sock msk; 30 + struct ipv6_pinfo np; 31 + }; 34 32 #endif 35 - return &inet_stream_ops; 36 - } 37 - 38 - /* MP_CAPABLE handshake failed, convert msk to plain tcp, replacing 39 - * socket->sk and stream ops and destroying msk 40 - * return the msk socket, as we can't access msk anymore after this function 41 - * completes 42 - * Called with msk lock held, releases such lock before returning 43 - */ 44 - static struct socket *__mptcp_fallback_to_tcp(struct mptcp_sock *msk, 45 - struct sock *ssk) 46 - { 47 - struct mptcp_subflow_context *subflow; 48 - struct socket *sock; 49 - struct sock *sk; 50 - 51 - sk = (struct sock *)msk; 52 - sock = sk->sk_socket; 53 - subflow = mptcp_subflow_ctx(ssk); 54 - 55 - /* detach the msk socket */ 56 - list_del_init(&subflow->node); 57 - sock_orphan(sk); 58 - sock->sk = NULL; 59 - 60 - /* socket is now TCP */ 61 - lock_sock(ssk); 62 - sock_graft(ssk, sock); 63 - if (subflow->conn) { 64 - /* We can't release the ULP data on a live socket, 65 - * restore the tcp callback 66 - */ 67 - mptcp_subflow_tcp_fallback(ssk, subflow); 68 - sock_put(subflow->conn); 69 - subflow->conn = NULL; 70 - } 71 - release_sock(ssk); 72 - sock->ops = tcp_proto_ops(ssk); 73 - 74 - /* destroy the left-over msk sock */ 75 - __mptcp_close(sk, 0); 76 - return sock; 77 - } 78 33 79 34 /* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not 80 35 * completed yet or has failed, return the subflow socket. ··· 48 93 return msk->first && !sk_is_mptcp(msk->first); 49 94 } 50 95 51 - /* if the mp_capable handshake has failed, it fallbacks msk to plain TCP, 52 - * releases the socket lock and returns a reference to the now TCP socket. 53 - * Otherwise returns NULL 54 - */ 55 96 static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk) 56 97 { 57 98 sock_owned_by_me((const struct sock *)msk); ··· 56 105 return NULL; 57 106 58 107 if (msk->subflow) { 59 - /* the first subflow is an active connection, discart the 60 - * paired socket 61 - */ 62 - msk->subflow->sk = NULL; 63 - sock_release(msk->subflow); 64 - msk->subflow = NULL; 108 + release_sock((struct sock *)msk); 109 + return msk->subflow; 65 110 } 66 111 67 - return __mptcp_fallback_to_tcp(msk, msk->first); 112 + return NULL; 68 113 } 69 114 70 115 static bool __mptcp_can_create_subflow(const struct mptcp_sock *msk) ··· 587 640 } 588 641 589 642 /* Called with msk lock held, releases such lock before returning */ 590 - static void __mptcp_close(struct sock *sk, long timeout) 643 + static void mptcp_close(struct sock *sk, long timeout) 591 644 { 592 645 struct mptcp_subflow_context *subflow, *tmp; 593 646 struct mptcp_sock *msk = mptcp_sk(sk); 594 647 LIST_HEAD(conn_list); 648 + 649 + lock_sock(sk); 595 650 596 651 mptcp_token_destroy(msk->token); 597 652 inet_sk_state_store(sk, TCP_CLOSE); ··· 609 660 } 610 661 611 662 sk_common_release(sk); 612 - } 613 - 614 - static void mptcp_close(struct sock *sk, long timeout) 615 - { 616 - lock_sock(sk); 617 - __mptcp_close(sk, timeout); 618 663 } 619 664 620 665 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) ··· 632 689 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr; 633 690 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr; 634 691 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr; 692 + } 693 + 694 + #if IS_ENABLED(CONFIG_MPTCP_IPV6) 695 + static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk) 696 + { 697 + unsigned int offset = sizeof(struct mptcp6_sock) - sizeof(struct ipv6_pinfo); 698 + 699 + return (struct ipv6_pinfo *)(((u8 *)sk) + offset); 700 + } 701 + #endif 702 + 703 + struct sock *mptcp_sk_clone_lock(const struct sock *sk) 704 + { 705 + struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC); 706 + 707 + if (!nsk) 708 + return NULL; 709 + 710 + #if IS_ENABLED(CONFIG_MPTCP_IPV6) 711 + if (nsk->sk_family == AF_INET6) 712 + inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk); 713 + #endif 714 + 715 + return nsk; 635 716 } 636 717 637 718 static struct sock *mptcp_accept(struct sock *sk, int flags, int *err, ··· 688 721 lock_sock(sk); 689 722 690 723 local_bh_disable(); 691 - new_mptcp_sock = sk_clone_lock(sk, GFP_ATOMIC); 724 + new_mptcp_sock = mptcp_sk_clone_lock(sk); 692 725 if (!new_mptcp_sock) { 693 726 *err = -ENOBUFS; 694 727 local_bh_enable(); ··· 1237 1270 strcpy(mptcp_v6_prot.name, "MPTCPv6"); 1238 1271 mptcp_v6_prot.slab = NULL; 1239 1272 mptcp_v6_prot.destroy = mptcp_v6_destroy; 1240 - mptcp_v6_prot.obj_size = sizeof(struct mptcp_sock) + 1241 - sizeof(struct ipv6_pinfo); 1273 + mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock); 1242 1274 1243 1275 err = proto_register(&mptcp_v6_prot, 1); 1244 1276 if (err)
+19 -3
net/rxrpc/call_object.c
··· 562 562 } 563 563 564 564 /* 565 - * Final call destruction under RCU. 565 + * Final call destruction - but must be done in process context. 566 566 */ 567 - static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) 567 + static void rxrpc_destroy_call(struct work_struct *work) 568 568 { 569 - struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); 569 + struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor); 570 570 struct rxrpc_net *rxnet = call->rxnet; 571 571 572 572 rxrpc_put_connection(call->conn); ··· 576 576 kmem_cache_free(rxrpc_call_jar, call); 577 577 if (atomic_dec_and_test(&rxnet->nr_calls)) 578 578 wake_up_var(&rxnet->nr_calls); 579 + } 580 + 581 + /* 582 + * Final call destruction under RCU. 583 + */ 584 + static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) 585 + { 586 + struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); 587 + 588 + if (in_softirq()) { 589 + INIT_WORK(&call->processor, rxrpc_destroy_call); 590 + if (!rxrpc_queue_work(&call->processor)) 591 + BUG(); 592 + } else { 593 + rxrpc_destroy_call(&call->processor); 594 + } 579 595 } 580 596 581 597 /*
+1 -2
net/rxrpc/conn_object.c
··· 171 171 172 172 _enter("%d,%x", conn->debug_id, call->cid); 173 173 174 - set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); 175 - 176 174 if (rcu_access_pointer(chan->call) == call) { 177 175 /* Save the result of the call so that we can repeat it if necessary 178 176 * through the channel, whilst disposing of the actual call record. ··· 223 225 __rxrpc_disconnect_call(conn, call); 224 226 spin_unlock(&conn->channel_lock); 225 227 228 + set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); 226 229 conn->idle_timestamp = jiffies; 227 230 } 228 231
+1 -2
net/sched/cls_tcindex.c
··· 365 365 366 366 err = tcindex_filter_result_init(&new_filter_result, net); 367 367 if (err < 0) 368 - goto errout1; 368 + goto errout_alloc; 369 369 if (old_r) 370 370 cr = r->res; 371 371 ··· 484 484 tcindex_free_perfect_hash(cp); 485 485 else if (balloc == 2) 486 486 kfree(cp->h); 487 - errout1: 488 487 tcf_exts_destroy(&new_filter_result.exts); 489 488 errout: 490 489 kfree(cp);
+1 -1
net/sched/sch_fq_pie.c
··· 349 349 while (sch->q.qlen > sch->limit) { 350 350 struct sk_buff *skb = fq_pie_qdisc_dequeue(sch); 351 351 352 - kfree_skb(skb); 353 352 len_dropped += qdisc_pkt_len(skb); 354 353 num_dropped += 1; 354 + rtnl_kfree_skbs(skb, skb); 355 355 } 356 356 qdisc_tree_reduce_backlog(sch, num_dropped, len_dropped); 357 357
+57 -35
net/sched/sch_taprio.c
··· 31 31 32 32 #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) 33 33 #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD) 34 + #define TAPRIO_FLAGS_INVALID U32_MAX 34 35 35 36 struct sched_entry { 36 37 struct list_head list; ··· 767 766 [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 }, 768 767 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 }, 769 768 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 }, 769 + [TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 }, 770 770 }; 771 771 772 772 static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry, ··· 1369 1367 return 0; 1370 1368 } 1371 1369 1370 + /* The semantics of the 'flags' argument in relation to 'change()' 1371 + * requests, are interpreted following two rules (which are applied in 1372 + * this order): (1) an omitted 'flags' argument is interpreted as 1373 + * zero; (2) the 'flags' of a "running" taprio instance cannot be 1374 + * changed. 1375 + */ 1376 + static int taprio_new_flags(const struct nlattr *attr, u32 old, 1377 + struct netlink_ext_ack *extack) 1378 + { 1379 + u32 new = 0; 1380 + 1381 + if (attr) 1382 + new = nla_get_u32(attr); 1383 + 1384 + if (old != TAPRIO_FLAGS_INVALID && old != new) { 1385 + NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported"); 1386 + return -EOPNOTSUPP; 1387 + } 1388 + 1389 + if (!taprio_flags_valid(new)) { 1390 + NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid"); 1391 + return -EINVAL; 1392 + } 1393 + 1394 + return new; 1395 + } 1396 + 1372 1397 static int taprio_change(struct Qdisc *sch, struct nlattr *opt, 1373 1398 struct netlink_ext_ack *extack) 1374 1399 { ··· 1404 1375 struct taprio_sched *q = qdisc_priv(sch); 1405 1376 struct net_device *dev = qdisc_dev(sch); 1406 1377 struct tc_mqprio_qopt *mqprio = NULL; 1407 - u32 taprio_flags = 0; 1408 1378 unsigned long flags; 1409 1379 ktime_t start; 1410 1380 int i, err; ··· 1416 1388 if (tb[TCA_TAPRIO_ATTR_PRIOMAP]) 1417 1389 mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]); 1418 1390 1419 - if (tb[TCA_TAPRIO_ATTR_FLAGS]) { 1420 - taprio_flags = nla_get_u32(tb[TCA_TAPRIO_ATTR_FLAGS]); 1391 + err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS], 1392 + q->flags, extack); 1393 + if (err < 0) 1394 + return err; 1421 1395 1422 - if (q->flags != 0 && q->flags != taprio_flags) { 1423 - NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported"); 1424 - return -EOPNOTSUPP; 1425 - } else if (!taprio_flags_valid(taprio_flags)) { 1426 - NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid"); 1427 - return -EINVAL; 1428 - } 1396 + q->flags = err; 1429 1397 1430 - q->flags = taprio_flags; 1431 - } 1432 - 1433 - err = taprio_parse_mqprio_opt(dev, mqprio, extack, taprio_flags); 1398 + err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags); 1434 1399 if (err < 0) 1435 1400 return err; 1436 1401 ··· 1465 1444 1466 1445 taprio_set_picos_per_byte(dev, q); 1467 1446 1468 - if (FULL_OFFLOAD_IS_ENABLED(taprio_flags)) 1447 + if (mqprio) { 1448 + netdev_set_num_tc(dev, mqprio->num_tc); 1449 + for (i = 0; i < mqprio->num_tc; i++) 1450 + netdev_set_tc_queue(dev, i, 1451 + mqprio->count[i], 1452 + mqprio->offset[i]); 1453 + 1454 + /* Always use supplied priority mappings */ 1455 + for (i = 0; i <= TC_BITMASK; i++) 1456 + netdev_set_prio_tc_map(dev, i, 1457 + mqprio->prio_tc_map[i]); 1458 + } 1459 + 1460 + if (FULL_OFFLOAD_IS_ENABLED(q->flags)) 1469 1461 err = taprio_enable_offload(dev, mqprio, q, new_admin, extack); 1470 1462 else 1471 1463 err = taprio_disable_offload(dev, q, extack); ··· 1498 1464 q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]); 1499 1465 } 1500 1466 1501 - if (!TXTIME_ASSIST_IS_ENABLED(taprio_flags) && 1502 - !FULL_OFFLOAD_IS_ENABLED(taprio_flags) && 1467 + if (!TXTIME_ASSIST_IS_ENABLED(q->flags) && 1468 + !FULL_OFFLOAD_IS_ENABLED(q->flags) && 1503 1469 !hrtimer_active(&q->advance_timer)) { 1504 1470 hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS); 1505 1471 q->advance_timer.function = advance_sched; 1506 1472 } 1507 1473 1508 - if (mqprio) { 1509 - netdev_set_num_tc(dev, mqprio->num_tc); 1510 - for (i = 0; i < mqprio->num_tc; i++) 1511 - netdev_set_tc_queue(dev, i, 1512 - mqprio->count[i], 1513 - mqprio->offset[i]); 1514 - 1515 - /* Always use supplied priority mappings */ 1516 - for (i = 0; i <= TC_BITMASK; i++) 1517 - netdev_set_prio_tc_map(dev, i, 1518 - mqprio->prio_tc_map[i]); 1519 - } 1520 - 1521 - if (FULL_OFFLOAD_IS_ENABLED(taprio_flags)) { 1474 + if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { 1522 1475 q->dequeue = taprio_dequeue_offload; 1523 1476 q->peek = taprio_peek_offload; 1524 1477 } else { ··· 1522 1501 goto unlock; 1523 1502 } 1524 1503 1525 - if (TXTIME_ASSIST_IS_ENABLED(taprio_flags)) { 1526 - setup_txtime(q, new_admin, start); 1504 + setup_txtime(q, new_admin, start); 1527 1505 1506 + if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { 1528 1507 if (!oper) { 1529 1508 rcu_assign_pointer(q->oper_sched, new_admin); 1530 1509 err = 0; ··· 1549 1528 1550 1529 spin_unlock_irqrestore(&q->current_entry_lock, flags); 1551 1530 1552 - if (FULL_OFFLOAD_IS_ENABLED(taprio_flags)) 1531 + if (FULL_OFFLOAD_IS_ENABLED(q->flags)) 1553 1532 taprio_offload_config_changed(q); 1554 1533 } 1555 1534 ··· 1588 1567 } 1589 1568 q->qdiscs = NULL; 1590 1569 1591 - netdev_set_num_tc(dev, 0); 1570 + netdev_reset_tc(dev); 1592 1571 1593 1572 if (q->oper_sched) 1594 1573 call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb); ··· 1618 1597 * and get the valid one on taprio_change(). 1619 1598 */ 1620 1599 q->clockid = -1; 1600 + q->flags = TAPRIO_FLAGS_INVALID; 1621 1601 1622 1602 spin_lock(&taprio_list_lock); 1623 1603 list_add(&q->taprio_list, &taprio_list);
+9 -2
net/unix/af_unix.c
··· 189 189 return unix_peer(osk) == NULL || unix_our_peer(sk, osk); 190 190 } 191 191 192 - static inline int unix_recvq_full(struct sock const *sk) 192 + static inline int unix_recvq_full(const struct sock *sk) 193 193 { 194 194 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; 195 + } 196 + 197 + static inline int unix_recvq_full_lockless(const struct sock *sk) 198 + { 199 + return skb_queue_len_lockless(&sk->sk_receive_queue) > 200 + READ_ONCE(sk->sk_max_ack_backlog); 195 201 } 196 202 197 203 struct sock *unix_peer_get(struct sock *s) ··· 1764 1758 * - unix_peer(sk) == sk by time of get but disconnected before lock 1765 1759 */ 1766 1760 if (other != sk && 1767 - unlikely(unix_peer(other) != sk && unix_recvq_full(other))) { 1761 + unlikely(unix_peer(other) != sk && 1762 + unix_recvq_full_lockless(other))) { 1768 1763 if (timeo) { 1769 1764 timeo = unix_wait_for_peer(other, timeo); 1770 1765
+2 -2
samples/bpf/xdpsock_user.c
··· 83 83 static u32 opt_umem_flags; 84 84 static int opt_unaligned_chunks; 85 85 static int opt_mmap_flags; 86 - static u32 opt_xdp_bind_flags; 87 86 static int opt_xsk_frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; 88 87 static int opt_timeout = 1000; 89 88 static bool opt_need_wakeup = true; ··· 788 789 int ret; 789 790 790 791 ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0); 791 - if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN || errno == EBUSY) 792 + if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN || 793 + errno == EBUSY || errno == ENETDOWN) 792 794 return; 793 795 exit_with_error(errno); 794 796 }
+1 -1
tools/bpf/bpftool/feature.c
··· 580 580 res = bpf_probe_large_insn_limit(ifindex); 581 581 print_bool_feature("have_large_insn_limit", 582 582 "Large program size limit", 583 - "HAVE_LARGE_INSN_LIMIT", 583 + "LARGE_INSN_LIMIT", 584 584 res, define_prefix); 585 585 } 586 586
+1 -1
tools/bpf/bpftool/prog.c
··· 536 536 buf = (unsigned char *)(info->jited_prog_insns); 537 537 member_len = info->jited_prog_len; 538 538 } else { /* DUMP_XLATED */ 539 - if (info->xlated_prog_len == 0) { 539 + if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) { 540 540 p_err("error retrieving insn dump: kernel.kptr_restrict set?"); 541 541 return -1; 542 542 }
+2 -2
tools/bpf/runqslower/Makefile
··· 41 41 42 42 $(OUTPUT)/runqslower: $(OUTPUT)/runqslower.o $(BPFOBJ) 43 43 $(call msg,BINARY,$@) 44 - $(Q)$(CC) $(CFLAGS) -lelf -lz $^ -o $@ 44 + $(Q)$(CC) $(CFLAGS) $^ -lelf -lz -o $@ 45 45 46 46 $(OUTPUT)/runqslower.o: runqslower.h $(OUTPUT)/runqslower.skel.h \ 47 47 $(OUTPUT)/runqslower.bpf.o ··· 75 75 fi 76 76 $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF_PATH) format c > $@ 77 77 78 - $(BPFOBJ): | $(OUTPUT) 78 + $(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT) 79 79 $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) \ 80 80 OUTPUT=$(abspath $(dir $@))/ $(abspath $@) 81 81
+74
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (c) 2020 Cloudflare 3 + 4 + #include "test_progs.h" 5 + 6 + static int connected_socket_v4(void) 7 + { 8 + struct sockaddr_in addr = { 9 + .sin_family = AF_INET, 10 + .sin_port = htons(80), 11 + .sin_addr = { inet_addr("127.0.0.1") }, 12 + }; 13 + socklen_t len = sizeof(addr); 14 + int s, repair, err; 15 + 16 + s = socket(AF_INET, SOCK_STREAM, 0); 17 + if (CHECK_FAIL(s == -1)) 18 + goto error; 19 + 20 + repair = TCP_REPAIR_ON; 21 + err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair)); 22 + if (CHECK_FAIL(err)) 23 + goto error; 24 + 25 + err = connect(s, (struct sockaddr *)&addr, len); 26 + if (CHECK_FAIL(err)) 27 + goto error; 28 + 29 + repair = TCP_REPAIR_OFF_NO_WP; 30 + err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair)); 31 + if (CHECK_FAIL(err)) 32 + goto error; 33 + 34 + return s; 35 + error: 36 + perror(__func__); 37 + close(s); 38 + return -1; 39 + } 40 + 41 + /* Create a map, populate it with one socket, and free the map. */ 42 + static void test_sockmap_create_update_free(enum bpf_map_type map_type) 43 + { 44 + const int zero = 0; 45 + int s, map, err; 46 + 47 + s = connected_socket_v4(); 48 + if (CHECK_FAIL(s == -1)) 49 + return; 50 + 51 + map = bpf_create_map(map_type, sizeof(int), sizeof(int), 1, 0); 52 + if (CHECK_FAIL(map == -1)) { 53 + perror("bpf_create_map"); 54 + goto out; 55 + } 56 + 57 + err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST); 58 + if (CHECK_FAIL(err)) { 59 + perror("bpf_map_update"); 60 + goto out; 61 + } 62 + 63 + out: 64 + close(map); 65 + close(s); 66 + } 67 + 68 + void test_sockmap_basic(void) 69 + { 70 + if (test__start_subtest("sockmap create_update_free")) 71 + test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP); 72 + if (test__start_subtest("sockhash create_update_free")) 73 + test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH); 74 + }
+1 -1
tools/testing/selftests/bpf/prog_tests/trampoline_count.c
··· 46 46 const char *fentry_name = "fentry/__set_task_comm"; 47 47 const char *fexit_name = "fexit/__set_task_comm"; 48 48 const char *object = "test_trampoline_count.o"; 49 - struct inst inst[MAX_TRAMP_PROGS] = { 0 }; 49 + struct inst inst[MAX_TRAMP_PROGS] = {}; 50 50 int err, i = 0, duration = 0; 51 51 struct bpf_object *obj; 52 52 struct bpf_link *link;
+76
tools/testing/selftests/drivers/net/mlxsw/fib.sh
··· 14 14 ipv4_plen 15 15 ipv4_replay 16 16 ipv4_flush 17 + ipv4_local_replace 17 18 ipv6_add 18 19 ipv6_metric 19 20 ipv6_append_single ··· 27 26 ipv6_delete_multipath 28 27 ipv6_replay_single 29 28 ipv6_replay_multipath 29 + ipv6_local_replace 30 30 " 31 31 NUM_NETIFS=0 32 32 source $lib_dir/lib.sh ··· 91 89 fib_ipv4_flush_test "testns1" 92 90 } 93 91 92 + ipv4_local_replace() 93 + { 94 + local ns="testns1" 95 + 96 + RET=0 97 + 98 + ip -n $ns link add name dummy1 type dummy 99 + ip -n $ns link set dev dummy1 up 100 + 101 + ip -n $ns route add table local 192.0.2.1/32 dev dummy1 102 + fib4_trap_check $ns "table local 192.0.2.1/32 dev dummy1" false 103 + check_err $? "Local table route not in hardware when should" 104 + 105 + ip -n $ns route add table main 192.0.2.1/32 dev dummy1 106 + fib4_trap_check $ns "table main 192.0.2.1/32 dev dummy1" true 107 + check_err $? "Main table route in hardware when should not" 108 + 109 + fib4_trap_check $ns "table local 192.0.2.1/32 dev dummy1" false 110 + check_err $? "Local table route was replaced when should not" 111 + 112 + # Test that local routes can replace routes in main table. 113 + ip -n $ns route add table main 192.0.2.2/32 dev dummy1 114 + fib4_trap_check $ns "table main 192.0.2.2/32 dev dummy1" false 115 + check_err $? "Main table route not in hardware when should" 116 + 117 + ip -n $ns route add table local 192.0.2.2/32 dev dummy1 118 + fib4_trap_check $ns "table local 192.0.2.2/32 dev dummy1" false 119 + check_err $? "Local table route did not replace route in main table when should" 120 + 121 + fib4_trap_check $ns "table main 192.0.2.2/32 dev dummy1" true 122 + check_err $? "Main table route was not replaced when should" 123 + 124 + log_test "IPv4 local table route replacement" 125 + 126 + ip -n $ns link del dev dummy1 127 + } 128 + 94 129 ipv6_add() 95 130 { 96 131 fib_ipv6_add_test "testns1" ··· 186 147 ipv6_replay_multipath() 187 148 { 188 149 fib_ipv6_replay_multipath_test "testns1" "$DEVLINK_DEV" 150 + } 151 + 152 + ipv6_local_replace() 153 + { 154 + local ns="testns1" 155 + 156 + RET=0 157 + 158 + ip -n $ns link add name dummy1 type dummy 159 + ip -n $ns link set dev dummy1 up 160 + 161 + ip -n $ns route add table local 2001:db8:1::1/128 dev dummy1 162 + fib6_trap_check $ns "table local 2001:db8:1::1/128 dev dummy1" false 163 + check_err $? "Local table route not in hardware when should" 164 + 165 + ip -n $ns route add table main 2001:db8:1::1/128 dev dummy1 166 + fib6_trap_check $ns "table main 2001:db8:1::1/128 dev dummy1" true 167 + check_err $? "Main table route in hardware when should not" 168 + 169 + fib6_trap_check $ns "table local 2001:db8:1::1/128 dev dummy1" false 170 + check_err $? "Local table route was replaced when should not" 171 + 172 + # Test that local routes can replace routes in main table. 173 + ip -n $ns route add table main 2001:db8:1::2/128 dev dummy1 174 + fib6_trap_check $ns "table main 2001:db8:1::2/128 dev dummy1" false 175 + check_err $? "Main table route not in hardware when should" 176 + 177 + ip -n $ns route add table local 2001:db8:1::2/128 dev dummy1 178 + fib6_trap_check $ns "table local 2001:db8:1::2/128 dev dummy1" false 179 + check_err $? "Local route route did not replace route in main table when should" 180 + 181 + fib6_trap_check $ns "table main 2001:db8:1::2/128 dev dummy1" true 182 + check_err $? "Main table route was not replaced when should" 183 + 184 + log_test "IPv6 local table route replacement" 185 + 186 + ip -n $ns link del dev dummy1 189 187 } 190 188 191 189 setup_prepare()
+9
tools/testing/selftests/net/mptcp/mptcp_connect.c
··· 634 634 cfg_host, a, cfg_port, b); 635 635 } 636 636 637 + static void maybe_close(int fd) 638 + { 639 + unsigned int r = rand(); 640 + 641 + if (r & 1) 642 + close(fd); 643 + } 644 + 637 645 int main_loop_s(int listensock) 638 646 { 639 647 struct sockaddr_storage ss; ··· 665 657 salen = sizeof(ss); 666 658 remotesock = accept(listensock, (struct sockaddr *)&ss, &salen); 667 659 if (remotesock >= 0) { 660 + maybe_close(listensock); 668 661 check_sockaddr(pf, &ss, salen); 669 662 check_getpeername(remotesock, &ss, salen); 670 663
+14 -9
tools/testing/selftests/wireguard/netns.sh
··· 38 38 ip1() { pretty 1 "ip $*"; ip -n $netns1 "$@"; } 39 39 ip2() { pretty 2 "ip $*"; ip -n $netns2 "$@"; } 40 40 sleep() { read -t "$1" -N 1 || true; } 41 - waitiperf() { pretty "${1//*-}" "wait for iperf:5201"; while [[ $(ss -N "$1" -tlp 'sport = 5201') != *iperf3* ]]; do sleep 0.1; done; } 42 - waitncatudp() { pretty "${1//*-}" "wait for udp:1111"; while [[ $(ss -N "$1" -ulp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; } 43 - waitncattcp() { pretty "${1//*-}" "wait for tcp:1111"; while [[ $(ss -N "$1" -tlp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; } 41 + waitiperf() { pretty "${1//*-}" "wait for iperf:5201 pid $2"; while [[ $(ss -N "$1" -tlpH 'sport = 5201') != *\"iperf3\",pid=$2,fd=* ]]; do sleep 0.1; done; } 42 + waitncatudp() { pretty "${1//*-}" "wait for udp:1111 pid $2"; while [[ $(ss -N "$1" -ulpH 'sport = 1111') != *\"ncat\",pid=$2,fd=* ]]; do sleep 0.1; done; } 44 43 waitiface() { pretty "${1//*-}" "wait for $2 to come up"; ip netns exec "$1" bash -c "while [[ \$(< \"/sys/class/net/$2/operstate\") != up ]]; do read -t .1 -N 0 || true; done;"; } 45 44 46 45 cleanup() { ··· 118 119 119 120 # TCP over IPv4 120 121 n2 iperf3 -s -1 -B 192.168.241.2 & 121 - waitiperf $netns2 122 + waitiperf $netns2 $! 122 123 n1 iperf3 -Z -t 3 -c 192.168.241.2 123 124 124 125 # TCP over IPv6 125 126 n1 iperf3 -s -1 -B fd00::1 & 126 - waitiperf $netns1 127 + waitiperf $netns1 $! 127 128 n2 iperf3 -Z -t 3 -c fd00::1 128 129 129 130 # UDP over IPv4 130 131 n1 iperf3 -s -1 -B 192.168.241.1 & 131 - waitiperf $netns1 132 + waitiperf $netns1 $! 132 133 n2 iperf3 -Z -t 3 -b 0 -u -c 192.168.241.1 133 134 134 135 # UDP over IPv6 135 136 n2 iperf3 -s -1 -B fd00::2 & 136 - waitiperf $netns2 137 + waitiperf $netns2 $! 137 138 n1 iperf3 -Z -t 3 -b 0 -u -c fd00::2 138 139 } 139 140 ··· 206 207 n1 wg set wg0 peer "$pub2" allowed-ips 192.168.241.0/24 207 208 exec 4< <(n1 ncat -l -u -p 1111) 208 209 ncat_pid=$! 209 - waitncatudp $netns1 210 + waitncatudp $netns1 $ncat_pid 210 211 n2 ncat -u 192.168.241.1 1111 <<<"X" 211 212 read -r -N 1 -t 1 out <&4 && [[ $out == "X" ]] 212 213 kill $ncat_pid ··· 215 216 n2 wg set wg0 listen-port 9997 216 217 exec 4< <(n1 ncat -l -u -p 1111) 217 218 ncat_pid=$! 218 - waitncatudp $netns1 219 + waitncatudp $netns1 $ncat_pid 219 220 n2 ncat -u 192.168.241.1 1111 <<<"X" 220 221 ! read -r -N 1 -t 1 out <&4 || false 221 222 kill $ncat_pid ··· 515 516 n0 wg set wg0 peer "$pub2" allowed-ips 0.0.0.0/0 516 517 n0 wg set wg0 peer "$pub2" allowed-ips ::/0,1700::/111,5000::/4,e000::/37,9000::/75 517 518 n0 wg set wg0 peer "$pub2" allowed-ips ::/0 519 + n0 wg set wg0 peer "$pub2" remove 520 + low_order_points=( AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 4Ot6fDtBuK4WVuP68Z/EatoJjeucMrH9hmIFFl9JuAA= X5yVvKNQjCSx0LFVnIPvWwREXMRYHI6G2CJO3dCfEVc= 7P///////////////////////////////////////38= 7f///////////////////////////////////////38= 7v///////////////////////////////////////38= ) 521 + n0 wg set wg0 private-key /dev/null ${low_order_points[@]/#/peer } 522 + [[ -z $(n0 wg show wg0 peers) ]] 523 + n0 wg set wg0 private-key <(echo "$key1") ${low_order_points[@]/#/peer } 524 + [[ -z $(n0 wg show wg0 peers) ]] 518 525 ip0 link del wg0 519 526 520 527 declare -A objects
-1
tools/testing/selftests/wireguard/qemu/debug.config
··· 1 1 CONFIG_LOCALVERSION="-debug" 2 - CONFIG_ENABLE_WARN_DEPRECATED=y 3 2 CONFIG_ENABLE_MUST_CHECK=y 4 3 CONFIG_FRAME_POINTER=y 5 4 CONFIG_STACK_VALIDATION=y