Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'net-6.14-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Paolo Abeni:
"Including fixes from bluetooth and wireless.

Current release - new code bugs:

- wifi: nl80211: disable multi-link reconfiguration

Previous releases - regressions:

- gso: fix ownership in __udp_gso_segment

- wifi: iwlwifi:
- fix A-MSDU TSO preparation
- free pages allocated when failing to build A-MSDU

- ipv6: fix dst ref loop in ila lwtunnel

- mptcp: fix 'scheduling while atomic' in
mptcp_pm_nl_append_new_local_addr

- bluetooth: add check for mgmt_alloc_skb() in
mgmt_device_connected()

- ethtool: allow NULL nlattrs when getting a phy_device

- eth: be2net: fix sleeping while atomic bugs in
be_ndo_bridge_getlink

Previous releases - always broken:

- core: support TCP GSO case for a few missing flags

- wifi: mac80211:
- fix vendor-specific inheritance
- cleanup sta TXQs on flush

- llc: do not use skb_get() before dev_queue_xmit()

- eth: ipa: nable checksum for IPA_ENDPOINT_AP_MODEM_{RX,TX}
for v4.7"

* tag 'net-6.14-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (41 commits)
net: ipv6: fix missing dst ref drop in ila lwtunnel
net: ipv6: fix dst ref loop in ila lwtunnel
mctp i3c: handle NULL header address
net: dsa: mt7530: Fix traffic flooding for MMIO devices
net-timestamp: support TCP GSO case for a few missing flags
vlan: enforce underlying device type
mptcp: fix 'scheduling while atomic' in mptcp_pm_nl_append_new_local_addr
net: ethtool: netlink: Allow NULL nlattrs when getting a phy_device
ppp: Fix KMSAN uninit-value warning with bpf
net: ipa: Enable checksum for IPA_ENDPOINT_AP_MODEM_{RX,TX} for v4.7
net: ipa: Fix QSB data for v4.7
net: ipa: Fix v4.7 resource group names
net: hns3: make sure ptp clock is unregister and freed if hclge_ptp_get_cycle returns an error
wifi: nl80211: disable multi-link reconfiguration
net: dsa: rtl8366rb: don't prompt users for LED control
be2net: fix sleeping while atomic bugs in be_ndo_bridge_getlink
llc: do not use skb_get() before dev_queue_xmit()
wifi: cfg80211: regulatory: improve invalid hints checking
caif_virtio: fix wrong pointer check in cfv_probe()
net: gso: fix ownership in __udp_gso_segment
...

+499 -316
-1
.mailmap
··· 88 88 Antonio Quartulli <antonio@mandelbit.com> <antonio.quartulli@open-mesh.com> 89 89 Antonio Quartulli <antonio@mandelbit.com> <ordex@autistici.org> 90 90 Antonio Quartulli <antonio@mandelbit.com> <ordex@ritirata.org> 91 - Antonio Quartulli <antonio@mandelbit.com> <antonio@openvpn.net> 92 91 Antonio Quartulli <antonio@mandelbit.com> <a@unstable.cc> 93 92 Anup Patel <anup@brainfault.org> <anup.patel@wdc.com> 94 93 Archit Taneja <archit@ti.com>
+1
drivers/bluetooth/btusb.c
··· 3644 3644 } 3645 3645 3646 3646 static const struct file_operations force_poll_sync_fops = { 3647 + .owner = THIS_MODULE, 3647 3648 .open = simple_open, 3648 3649 .read = force_poll_sync_read, 3649 3650 .write = force_poll_sync_write,
+1 -1
drivers/net/caif/caif_virtio.c
··· 745 745 746 746 if (cfv->vr_rx) 747 747 vdev->vringh_config->del_vrhs(cfv->vdev); 748 - if (cfv->vdev) 748 + if (cfv->vq_tx) 749 749 vdev->config->del_vqs(cfv->vdev); 750 750 free_netdev(netdev); 751 751 return err;
+2 -6
drivers/net/dsa/mt7530.c
··· 2591 2591 if (ret < 0) 2592 2592 return ret; 2593 2593 2594 - return 0; 2594 + /* Setup VLAN ID 0 for VLAN-unaware bridges */ 2595 + return mt7530_setup_vlan0(priv); 2595 2596 } 2596 2597 2597 2598 static int ··· 2685 2684 } 2686 2685 2687 2686 ret = mt7531_setup_common(ds); 2688 - if (ret) 2689 - return ret; 2690 - 2691 - /* Setup VLAN ID 0 for VLAN-unaware bridges */ 2692 - ret = mt7530_setup_vlan0(priv); 2693 2687 if (ret) 2694 2688 return ret; 2695 2689
+1 -1
drivers/net/dsa/realtek/Kconfig
··· 44 44 Select to enable support for Realtek RTL8366RB. 45 45 46 46 config NET_DSA_REALTEK_RTL8366RB_LEDS 47 - bool "Support RTL8366RB LED control" 47 + bool 48 48 depends on (LEDS_CLASS=y || LEDS_CLASS=NET_DSA_REALTEK_RTL8366RB) 49 49 depends on NET_DSA_REALTEK_RTL8366RB 50 50 default NET_DSA_REALTEK_RTL8366RB
+1 -1
drivers/net/ethernet/emulex/benet/be.h
··· 562 562 struct be_dma_mem mbox_mem_alloced; 563 563 564 564 struct be_mcc_obj mcc_obj; 565 - struct mutex mcc_lock; /* For serializing mcc cmds to BE card */ 565 + spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ 566 566 spinlock_t mcc_cq_lock; 567 567 568 568 u16 cfg_num_rx_irqs; /* configured via set-channels */
+98 -99
drivers/net/ethernet/emulex/benet/be_cmds.c
··· 575 575 /* Wait till no more pending mcc requests are present */ 576 576 static int be_mcc_wait_compl(struct be_adapter *adapter) 577 577 { 578 - #define mcc_timeout 12000 /* 12s timeout */ 578 + #define mcc_timeout 120000 /* 12s timeout */ 579 579 int i, status = 0; 580 580 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 581 581 ··· 589 589 590 590 if (atomic_read(&mcc_obj->q.used) == 0) 591 591 break; 592 - usleep_range(500, 1000); 592 + udelay(100); 593 593 } 594 594 if (i == mcc_timeout) { 595 595 dev_err(&adapter->pdev->dev, "FW not responding\n"); ··· 866 866 static int be_cmd_lock(struct be_adapter *adapter) 867 867 { 868 868 if (use_mcc(adapter)) { 869 - mutex_lock(&adapter->mcc_lock); 869 + spin_lock_bh(&adapter->mcc_lock); 870 870 return 0; 871 871 } else { 872 872 return mutex_lock_interruptible(&adapter->mbox_lock); ··· 877 877 static void be_cmd_unlock(struct be_adapter *adapter) 878 878 { 879 879 if (use_mcc(adapter)) 880 - return mutex_unlock(&adapter->mcc_lock); 880 + return spin_unlock_bh(&adapter->mcc_lock); 881 881 else 882 882 return mutex_unlock(&adapter->mbox_lock); 883 883 } ··· 1047 1047 struct be_cmd_req_mac_query *req; 1048 1048 int status; 1049 1049 1050 - mutex_lock(&adapter->mcc_lock); 1050 + spin_lock_bh(&adapter->mcc_lock); 1051 1051 1052 1052 wrb = wrb_from_mccq(adapter); 1053 1053 if (!wrb) { ··· 1076 1076 } 1077 1077 1078 1078 err: 1079 - mutex_unlock(&adapter->mcc_lock); 1079 + spin_unlock_bh(&adapter->mcc_lock); 1080 1080 return status; 1081 1081 } 1082 1082 ··· 1088 1088 struct be_cmd_req_pmac_add *req; 1089 1089 int status; 1090 1090 1091 - mutex_lock(&adapter->mcc_lock); 1091 + spin_lock_bh(&adapter->mcc_lock); 1092 1092 1093 1093 wrb = wrb_from_mccq(adapter); 1094 1094 if (!wrb) { ··· 1113 1113 } 1114 1114 1115 1115 err: 1116 - mutex_unlock(&adapter->mcc_lock); 1116 + spin_unlock_bh(&adapter->mcc_lock); 1117 1117 1118 1118 if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST) 1119 1119 status = -EPERM; ··· 1131 1131 if (pmac_id == -1) 1132 1132 return 0; 1133 1133 1134 - mutex_lock(&adapter->mcc_lock); 1134 + spin_lock_bh(&adapter->mcc_lock); 1135 1135 1136 1136 wrb = wrb_from_mccq(adapter); 1137 1137 if (!wrb) { ··· 1151 1151 status = be_mcc_notify_wait(adapter); 1152 1152 1153 1153 err: 1154 - mutex_unlock(&adapter->mcc_lock); 1154 + spin_unlock_bh(&adapter->mcc_lock); 1155 1155 return status; 1156 1156 } 1157 1157 ··· 1414 1414 struct be_dma_mem *q_mem = &rxq->dma_mem; 1415 1415 int status; 1416 1416 1417 - mutex_lock(&adapter->mcc_lock); 1417 + spin_lock_bh(&adapter->mcc_lock); 1418 1418 1419 1419 wrb = wrb_from_mccq(adapter); 1420 1420 if (!wrb) { ··· 1444 1444 } 1445 1445 1446 1446 err: 1447 - mutex_unlock(&adapter->mcc_lock); 1447 + spin_unlock_bh(&adapter->mcc_lock); 1448 1448 return status; 1449 1449 } 1450 1450 ··· 1508 1508 struct be_cmd_req_q_destroy *req; 1509 1509 int status; 1510 1510 1511 - mutex_lock(&adapter->mcc_lock); 1511 + spin_lock_bh(&adapter->mcc_lock); 1512 1512 1513 1513 wrb = wrb_from_mccq(adapter); 1514 1514 if (!wrb) { ··· 1525 1525 q->created = false; 1526 1526 1527 1527 err: 1528 - mutex_unlock(&adapter->mcc_lock); 1528 + spin_unlock_bh(&adapter->mcc_lock); 1529 1529 return status; 1530 1530 } 1531 1531 ··· 1593 1593 struct be_cmd_req_hdr *hdr; 1594 1594 int status = 0; 1595 1595 1596 - mutex_lock(&adapter->mcc_lock); 1596 + spin_lock_bh(&adapter->mcc_lock); 1597 1597 1598 1598 wrb = wrb_from_mccq(adapter); 1599 1599 if (!wrb) { ··· 1621 1621 adapter->stats_cmd_sent = true; 1622 1622 1623 1623 err: 1624 - mutex_unlock(&adapter->mcc_lock); 1624 + spin_unlock_bh(&adapter->mcc_lock); 1625 1625 return status; 1626 1626 } 1627 1627 ··· 1637 1637 CMD_SUBSYSTEM_ETH)) 1638 1638 return -EPERM; 1639 1639 1640 - mutex_lock(&adapter->mcc_lock); 1640 + spin_lock_bh(&adapter->mcc_lock); 1641 1641 1642 1642 wrb = wrb_from_mccq(adapter); 1643 1643 if (!wrb) { ··· 1660 1660 adapter->stats_cmd_sent = true; 1661 1661 1662 1662 err: 1663 - mutex_unlock(&adapter->mcc_lock); 1663 + spin_unlock_bh(&adapter->mcc_lock); 1664 1664 return status; 1665 1665 } 1666 1666 ··· 1697 1697 struct be_cmd_req_link_status *req; 1698 1698 int status; 1699 1699 1700 - mutex_lock(&adapter->mcc_lock); 1700 + spin_lock_bh(&adapter->mcc_lock); 1701 1701 1702 1702 if (link_status) 1703 1703 *link_status = LINK_DOWN; ··· 1736 1736 } 1737 1737 1738 1738 err: 1739 - mutex_unlock(&adapter->mcc_lock); 1739 + spin_unlock_bh(&adapter->mcc_lock); 1740 1740 return status; 1741 1741 } 1742 1742 ··· 1747 1747 struct be_cmd_req_get_cntl_addnl_attribs *req; 1748 1748 int status = 0; 1749 1749 1750 - mutex_lock(&adapter->mcc_lock); 1750 + spin_lock_bh(&adapter->mcc_lock); 1751 1751 1752 1752 wrb = wrb_from_mccq(adapter); 1753 1753 if (!wrb) { ··· 1762 1762 1763 1763 status = be_mcc_notify(adapter); 1764 1764 err: 1765 - mutex_unlock(&adapter->mcc_lock); 1765 + spin_unlock_bh(&adapter->mcc_lock); 1766 1766 return status; 1767 1767 } 1768 1768 ··· 1811 1811 if (!get_fat_cmd.va) 1812 1812 return -ENOMEM; 1813 1813 1814 - mutex_lock(&adapter->mcc_lock); 1814 + spin_lock_bh(&adapter->mcc_lock); 1815 1815 1816 1816 while (total_size) { 1817 1817 buf_size = min(total_size, (u32)60 * 1024); ··· 1849 1849 log_offset += buf_size; 1850 1850 } 1851 1851 err: 1852 + spin_unlock_bh(&adapter->mcc_lock); 1852 1853 dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size, 1853 1854 get_fat_cmd.va, get_fat_cmd.dma); 1854 - mutex_unlock(&adapter->mcc_lock); 1855 1855 return status; 1856 1856 } 1857 1857 ··· 1862 1862 struct be_cmd_req_get_fw_version *req; 1863 1863 int status; 1864 1864 1865 - mutex_lock(&adapter->mcc_lock); 1865 + spin_lock_bh(&adapter->mcc_lock); 1866 1866 1867 1867 wrb = wrb_from_mccq(adapter); 1868 1868 if (!wrb) { ··· 1885 1885 sizeof(adapter->fw_on_flash)); 1886 1886 } 1887 1887 err: 1888 - mutex_unlock(&adapter->mcc_lock); 1888 + spin_unlock_bh(&adapter->mcc_lock); 1889 1889 return status; 1890 1890 } 1891 1891 ··· 1899 1899 struct be_cmd_req_modify_eq_delay *req; 1900 1900 int status = 0, i; 1901 1901 1902 - mutex_lock(&adapter->mcc_lock); 1902 + spin_lock_bh(&adapter->mcc_lock); 1903 1903 1904 1904 wrb = wrb_from_mccq(adapter); 1905 1905 if (!wrb) { ··· 1922 1922 1923 1923 status = be_mcc_notify(adapter); 1924 1924 err: 1925 - mutex_unlock(&adapter->mcc_lock); 1925 + spin_unlock_bh(&adapter->mcc_lock); 1926 1926 return status; 1927 1927 } 1928 1928 ··· 1949 1949 struct be_cmd_req_vlan_config *req; 1950 1950 int status; 1951 1951 1952 - mutex_lock(&adapter->mcc_lock); 1952 + spin_lock_bh(&adapter->mcc_lock); 1953 1953 1954 1954 wrb = wrb_from_mccq(adapter); 1955 1955 if (!wrb) { ··· 1971 1971 1972 1972 status = be_mcc_notify_wait(adapter); 1973 1973 err: 1974 - mutex_unlock(&adapter->mcc_lock); 1974 + spin_unlock_bh(&adapter->mcc_lock); 1975 1975 return status; 1976 1976 } 1977 1977 ··· 1982 1982 struct be_cmd_req_rx_filter *req = mem->va; 1983 1983 int status; 1984 1984 1985 - mutex_lock(&adapter->mcc_lock); 1985 + spin_lock_bh(&adapter->mcc_lock); 1986 1986 1987 1987 wrb = wrb_from_mccq(adapter); 1988 1988 if (!wrb) { ··· 2015 2015 2016 2016 status = be_mcc_notify_wait(adapter); 2017 2017 err: 2018 - mutex_unlock(&adapter->mcc_lock); 2018 + spin_unlock_bh(&adapter->mcc_lock); 2019 2019 return status; 2020 2020 } 2021 2021 ··· 2046 2046 CMD_SUBSYSTEM_COMMON)) 2047 2047 return -EPERM; 2048 2048 2049 - mutex_lock(&adapter->mcc_lock); 2049 + spin_lock_bh(&adapter->mcc_lock); 2050 2050 2051 2051 wrb = wrb_from_mccq(adapter); 2052 2052 if (!wrb) { ··· 2066 2066 status = be_mcc_notify_wait(adapter); 2067 2067 2068 2068 err: 2069 - mutex_unlock(&adapter->mcc_lock); 2069 + spin_unlock_bh(&adapter->mcc_lock); 2070 2070 2071 2071 if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED) 2072 2072 return -EOPNOTSUPP; ··· 2085 2085 CMD_SUBSYSTEM_COMMON)) 2086 2086 return -EPERM; 2087 2087 2088 - mutex_lock(&adapter->mcc_lock); 2088 + spin_lock_bh(&adapter->mcc_lock); 2089 2089 2090 2090 wrb = wrb_from_mccq(adapter); 2091 2091 if (!wrb) { ··· 2108 2108 } 2109 2109 2110 2110 err: 2111 - mutex_unlock(&adapter->mcc_lock); 2111 + spin_unlock_bh(&adapter->mcc_lock); 2112 2112 return status; 2113 2113 } 2114 2114 ··· 2189 2189 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) 2190 2190 return 0; 2191 2191 2192 - mutex_lock(&adapter->mcc_lock); 2192 + spin_lock_bh(&adapter->mcc_lock); 2193 2193 2194 2194 wrb = wrb_from_mccq(adapter); 2195 2195 if (!wrb) { ··· 2214 2214 2215 2215 status = be_mcc_notify_wait(adapter); 2216 2216 err: 2217 - mutex_unlock(&adapter->mcc_lock); 2217 + spin_unlock_bh(&adapter->mcc_lock); 2218 2218 return status; 2219 2219 } 2220 2220 ··· 2226 2226 struct be_cmd_req_enable_disable_beacon *req; 2227 2227 int status; 2228 2228 2229 - mutex_lock(&adapter->mcc_lock); 2229 + spin_lock_bh(&adapter->mcc_lock); 2230 2230 2231 2231 wrb = wrb_from_mccq(adapter); 2232 2232 if (!wrb) { ··· 2247 2247 status = be_mcc_notify_wait(adapter); 2248 2248 2249 2249 err: 2250 - mutex_unlock(&adapter->mcc_lock); 2250 + spin_unlock_bh(&adapter->mcc_lock); 2251 2251 return status; 2252 2252 } 2253 2253 ··· 2258 2258 struct be_cmd_req_get_beacon_state *req; 2259 2259 int status; 2260 2260 2261 - mutex_lock(&adapter->mcc_lock); 2261 + spin_lock_bh(&adapter->mcc_lock); 2262 2262 2263 2263 wrb = wrb_from_mccq(adapter); 2264 2264 if (!wrb) { ··· 2282 2282 } 2283 2283 2284 2284 err: 2285 - mutex_unlock(&adapter->mcc_lock); 2285 + spin_unlock_bh(&adapter->mcc_lock); 2286 2286 return status; 2287 2287 } 2288 2288 ··· 2306 2306 return -ENOMEM; 2307 2307 } 2308 2308 2309 - mutex_lock(&adapter->mcc_lock); 2309 + spin_lock_bh(&adapter->mcc_lock); 2310 2310 2311 2311 wrb = wrb_from_mccq(adapter); 2312 2312 if (!wrb) { ··· 2328 2328 memcpy(data, resp->page_data + off, len); 2329 2329 } 2330 2330 err: 2331 - mutex_unlock(&adapter->mcc_lock); 2331 + spin_unlock_bh(&adapter->mcc_lock); 2332 2332 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); 2333 2333 return status; 2334 2334 } ··· 2345 2345 void *ctxt = NULL; 2346 2346 int status; 2347 2347 2348 - mutex_lock(&adapter->mcc_lock); 2348 + spin_lock_bh(&adapter->mcc_lock); 2349 2349 adapter->flash_status = 0; 2350 2350 2351 2351 wrb = wrb_from_mccq(adapter); ··· 2387 2387 if (status) 2388 2388 goto err_unlock; 2389 2389 2390 - mutex_unlock(&adapter->mcc_lock); 2390 + spin_unlock_bh(&adapter->mcc_lock); 2391 2391 2392 2392 if (!wait_for_completion_timeout(&adapter->et_cmd_compl, 2393 2393 msecs_to_jiffies(60000))) ··· 2406 2406 return status; 2407 2407 2408 2408 err_unlock: 2409 - mutex_unlock(&adapter->mcc_lock); 2409 + spin_unlock_bh(&adapter->mcc_lock); 2410 2410 return status; 2411 2411 } 2412 2412 ··· 2460 2460 struct be_mcc_wrb *wrb; 2461 2461 int status; 2462 2462 2463 - mutex_lock(&adapter->mcc_lock); 2463 + spin_lock_bh(&adapter->mcc_lock); 2464 2464 2465 2465 wrb = wrb_from_mccq(adapter); 2466 2466 if (!wrb) { ··· 2478 2478 2479 2479 status = be_mcc_notify_wait(adapter); 2480 2480 err: 2481 - mutex_unlock(&adapter->mcc_lock); 2481 + spin_unlock_bh(&adapter->mcc_lock); 2482 2482 return status; 2483 2483 } 2484 2484 ··· 2491 2491 struct lancer_cmd_resp_read_object *resp; 2492 2492 int status; 2493 2493 2494 - mutex_lock(&adapter->mcc_lock); 2494 + spin_lock_bh(&adapter->mcc_lock); 2495 2495 2496 2496 wrb = wrb_from_mccq(adapter); 2497 2497 if (!wrb) { ··· 2525 2525 } 2526 2526 2527 2527 err_unlock: 2528 - mutex_unlock(&adapter->mcc_lock); 2528 + spin_unlock_bh(&adapter->mcc_lock); 2529 2529 return status; 2530 2530 } 2531 2531 ··· 2537 2537 struct be_cmd_write_flashrom *req; 2538 2538 int status; 2539 2539 2540 - mutex_lock(&adapter->mcc_lock); 2540 + spin_lock_bh(&adapter->mcc_lock); 2541 2541 adapter->flash_status = 0; 2542 2542 2543 2543 wrb = wrb_from_mccq(adapter); ··· 2562 2562 if (status) 2563 2563 goto err_unlock; 2564 2564 2565 - mutex_unlock(&adapter->mcc_lock); 2565 + spin_unlock_bh(&adapter->mcc_lock); 2566 2566 2567 2567 if (!wait_for_completion_timeout(&adapter->et_cmd_compl, 2568 2568 msecs_to_jiffies(40000))) ··· 2573 2573 return status; 2574 2574 2575 2575 err_unlock: 2576 - mutex_unlock(&adapter->mcc_lock); 2576 + spin_unlock_bh(&adapter->mcc_lock); 2577 2577 return status; 2578 2578 } 2579 2579 ··· 2584 2584 struct be_mcc_wrb *wrb; 2585 2585 int status; 2586 2586 2587 - mutex_lock(&adapter->mcc_lock); 2587 + spin_lock_bh(&adapter->mcc_lock); 2588 2588 2589 2589 wrb = wrb_from_mccq(adapter); 2590 2590 if (!wrb) { ··· 2611 2611 memcpy(flashed_crc, req->crc, 4); 2612 2612 2613 2613 err: 2614 - mutex_unlock(&adapter->mcc_lock); 2614 + spin_unlock_bh(&adapter->mcc_lock); 2615 2615 return status; 2616 2616 } 2617 2617 ··· 3217 3217 struct be_cmd_req_acpi_wol_magic_config *req; 3218 3218 int status; 3219 3219 3220 - mutex_lock(&adapter->mcc_lock); 3220 + spin_lock_bh(&adapter->mcc_lock); 3221 3221 3222 3222 wrb = wrb_from_mccq(adapter); 3223 3223 if (!wrb) { ··· 3234 3234 status = be_mcc_notify_wait(adapter); 3235 3235 3236 3236 err: 3237 - mutex_unlock(&adapter->mcc_lock); 3237 + spin_unlock_bh(&adapter->mcc_lock); 3238 3238 return status; 3239 3239 } 3240 3240 ··· 3249 3249 CMD_SUBSYSTEM_LOWLEVEL)) 3250 3250 return -EPERM; 3251 3251 3252 - mutex_lock(&adapter->mcc_lock); 3252 + spin_lock_bh(&adapter->mcc_lock); 3253 3253 3254 3254 wrb = wrb_from_mccq(adapter); 3255 3255 if (!wrb) { ··· 3272 3272 if (status) 3273 3273 goto err_unlock; 3274 3274 3275 - mutex_unlock(&adapter->mcc_lock); 3275 + spin_unlock_bh(&adapter->mcc_lock); 3276 3276 3277 3277 if (!wait_for_completion_timeout(&adapter->et_cmd_compl, 3278 3278 msecs_to_jiffies(SET_LB_MODE_TIMEOUT))) ··· 3281 3281 return status; 3282 3282 3283 3283 err_unlock: 3284 - mutex_unlock(&adapter->mcc_lock); 3284 + spin_unlock_bh(&adapter->mcc_lock); 3285 3285 return status; 3286 3286 } 3287 3287 ··· 3298 3298 CMD_SUBSYSTEM_LOWLEVEL)) 3299 3299 return -EPERM; 3300 3300 3301 - mutex_lock(&adapter->mcc_lock); 3301 + spin_lock_bh(&adapter->mcc_lock); 3302 3302 3303 3303 wrb = wrb_from_mccq(adapter); 3304 3304 if (!wrb) { ··· 3324 3324 if (status) 3325 3325 goto err; 3326 3326 3327 - mutex_unlock(&adapter->mcc_lock); 3327 + spin_unlock_bh(&adapter->mcc_lock); 3328 3328 3329 3329 wait_for_completion(&adapter->et_cmd_compl); 3330 3330 resp = embedded_payload(wrb); ··· 3332 3332 3333 3333 return status; 3334 3334 err: 3335 - mutex_unlock(&adapter->mcc_lock); 3335 + spin_unlock_bh(&adapter->mcc_lock); 3336 3336 return status; 3337 3337 } 3338 3338 ··· 3348 3348 CMD_SUBSYSTEM_LOWLEVEL)) 3349 3349 return -EPERM; 3350 3350 3351 - mutex_lock(&adapter->mcc_lock); 3351 + spin_lock_bh(&adapter->mcc_lock); 3352 3352 3353 3353 wrb = wrb_from_mccq(adapter); 3354 3354 if (!wrb) { ··· 3382 3382 } 3383 3383 3384 3384 err: 3385 - mutex_unlock(&adapter->mcc_lock); 3385 + spin_unlock_bh(&adapter->mcc_lock); 3386 3386 return status; 3387 3387 } 3388 3388 ··· 3393 3393 struct be_cmd_req_seeprom_read *req; 3394 3394 int status; 3395 3395 3396 - mutex_lock(&adapter->mcc_lock); 3396 + spin_lock_bh(&adapter->mcc_lock); 3397 3397 3398 3398 wrb = wrb_from_mccq(adapter); 3399 3399 if (!wrb) { ··· 3409 3409 status = be_mcc_notify_wait(adapter); 3410 3410 3411 3411 err: 3412 - mutex_unlock(&adapter->mcc_lock); 3412 + spin_unlock_bh(&adapter->mcc_lock); 3413 3413 return status; 3414 3414 } 3415 3415 ··· 3424 3424 CMD_SUBSYSTEM_COMMON)) 3425 3425 return -EPERM; 3426 3426 3427 - mutex_lock(&adapter->mcc_lock); 3427 + spin_lock_bh(&adapter->mcc_lock); 3428 3428 3429 3429 wrb = wrb_from_mccq(adapter); 3430 3430 if (!wrb) { ··· 3469 3469 } 3470 3470 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); 3471 3471 err: 3472 - mutex_unlock(&adapter->mcc_lock); 3472 + spin_unlock_bh(&adapter->mcc_lock); 3473 3473 return status; 3474 3474 } 3475 3475 ··· 3479 3479 struct be_cmd_req_set_qos *req; 3480 3480 int status; 3481 3481 3482 - mutex_lock(&adapter->mcc_lock); 3482 + spin_lock_bh(&adapter->mcc_lock); 3483 3483 3484 3484 wrb = wrb_from_mccq(adapter); 3485 3485 if (!wrb) { ··· 3499 3499 status = be_mcc_notify_wait(adapter); 3500 3500 3501 3501 err: 3502 - mutex_unlock(&adapter->mcc_lock); 3502 + spin_unlock_bh(&adapter->mcc_lock); 3503 3503 return status; 3504 3504 } 3505 3505 ··· 3611 3611 struct be_cmd_req_get_fn_privileges *req; 3612 3612 int status; 3613 3613 3614 - mutex_lock(&adapter->mcc_lock); 3614 + spin_lock_bh(&adapter->mcc_lock); 3615 3615 3616 3616 wrb = wrb_from_mccq(adapter); 3617 3617 if (!wrb) { ··· 3643 3643 } 3644 3644 3645 3645 err: 3646 - mutex_unlock(&adapter->mcc_lock); 3646 + spin_unlock_bh(&adapter->mcc_lock); 3647 3647 return status; 3648 3648 } 3649 3649 ··· 3655 3655 struct be_cmd_req_set_fn_privileges *req; 3656 3656 int status; 3657 3657 3658 - mutex_lock(&adapter->mcc_lock); 3658 + spin_lock_bh(&adapter->mcc_lock); 3659 3659 3660 3660 wrb = wrb_from_mccq(adapter); 3661 3661 if (!wrb) { ··· 3675 3675 3676 3676 status = be_mcc_notify_wait(adapter); 3677 3677 err: 3678 - mutex_unlock(&adapter->mcc_lock); 3678 + spin_unlock_bh(&adapter->mcc_lock); 3679 3679 return status; 3680 3680 } 3681 3681 ··· 3707 3707 return -ENOMEM; 3708 3708 } 3709 3709 3710 - mutex_lock(&adapter->mcc_lock); 3710 + spin_lock_bh(&adapter->mcc_lock); 3711 3711 3712 3712 wrb = wrb_from_mccq(adapter); 3713 3713 if (!wrb) { ··· 3771 3771 } 3772 3772 3773 3773 out: 3774 - mutex_unlock(&adapter->mcc_lock); 3774 + spin_unlock_bh(&adapter->mcc_lock); 3775 3775 dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size, 3776 3776 get_mac_list_cmd.va, get_mac_list_cmd.dma); 3777 3777 return status; ··· 3831 3831 if (!cmd.va) 3832 3832 return -ENOMEM; 3833 3833 3834 - mutex_lock(&adapter->mcc_lock); 3834 + spin_lock_bh(&adapter->mcc_lock); 3835 3835 3836 3836 wrb = wrb_from_mccq(adapter); 3837 3837 if (!wrb) { ··· 3853 3853 3854 3854 err: 3855 3855 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); 3856 - mutex_unlock(&adapter->mcc_lock); 3856 + spin_unlock_bh(&adapter->mcc_lock); 3857 3857 return status; 3858 3858 } 3859 3859 ··· 3889 3889 CMD_SUBSYSTEM_COMMON)) 3890 3890 return -EPERM; 3891 3891 3892 - mutex_lock(&adapter->mcc_lock); 3892 + spin_lock_bh(&adapter->mcc_lock); 3893 3893 3894 3894 wrb = wrb_from_mccq(adapter); 3895 3895 if (!wrb) { ··· 3930 3930 status = be_mcc_notify_wait(adapter); 3931 3931 3932 3932 err: 3933 - mutex_unlock(&adapter->mcc_lock); 3933 + spin_unlock_bh(&adapter->mcc_lock); 3934 3934 return status; 3935 3935 } 3936 3936 ··· 3944 3944 int status; 3945 3945 u16 vid; 3946 3946 3947 - mutex_lock(&adapter->mcc_lock); 3947 + spin_lock_bh(&adapter->mcc_lock); 3948 3948 3949 3949 wrb = wrb_from_mccq(adapter); 3950 3950 if (!wrb) { ··· 3991 3991 } 3992 3992 3993 3993 err: 3994 - mutex_unlock(&adapter->mcc_lock); 3994 + spin_unlock_bh(&adapter->mcc_lock); 3995 3995 return status; 3996 3996 } 3997 3997 ··· 4190 4190 struct be_cmd_req_set_ext_fat_caps *req; 4191 4191 int status; 4192 4192 4193 - mutex_lock(&adapter->mcc_lock); 4193 + spin_lock_bh(&adapter->mcc_lock); 4194 4194 4195 4195 wrb = wrb_from_mccq(adapter); 4196 4196 if (!wrb) { ··· 4206 4206 4207 4207 status = be_mcc_notify_wait(adapter); 4208 4208 err: 4209 - mutex_unlock(&adapter->mcc_lock); 4209 + spin_unlock_bh(&adapter->mcc_lock); 4210 4210 return status; 4211 4211 } 4212 4212 ··· 4684 4684 if (iface == 0xFFFFFFFF) 4685 4685 return -1; 4686 4686 4687 - mutex_lock(&adapter->mcc_lock); 4687 + spin_lock_bh(&adapter->mcc_lock); 4688 4688 4689 4689 wrb = wrb_from_mccq(adapter); 4690 4690 if (!wrb) { ··· 4701 4701 4702 4702 status = be_mcc_notify_wait(adapter); 4703 4703 err: 4704 - mutex_unlock(&adapter->mcc_lock); 4704 + spin_unlock_bh(&adapter->mcc_lock); 4705 4705 return status; 4706 4706 } 4707 4707 ··· 4735 4735 struct be_cmd_resp_get_iface_list *resp; 4736 4736 int status; 4737 4737 4738 - mutex_lock(&adapter->mcc_lock); 4738 + spin_lock_bh(&adapter->mcc_lock); 4739 4739 4740 4740 wrb = wrb_from_mccq(adapter); 4741 4741 if (!wrb) { ··· 4756 4756 } 4757 4757 4758 4758 err: 4759 - mutex_unlock(&adapter->mcc_lock); 4759 + spin_unlock_bh(&adapter->mcc_lock); 4760 4760 return status; 4761 4761 } 4762 4762 ··· 4850 4850 if (BEx_chip(adapter)) 4851 4851 return 0; 4852 4852 4853 - mutex_lock(&adapter->mcc_lock); 4853 + spin_lock_bh(&adapter->mcc_lock); 4854 4854 4855 4855 wrb = wrb_from_mccq(adapter); 4856 4856 if (!wrb) { ··· 4868 4868 req->enable = 1; 4869 4869 status = be_mcc_notify_wait(adapter); 4870 4870 err: 4871 - mutex_unlock(&adapter->mcc_lock); 4871 + spin_unlock_bh(&adapter->mcc_lock); 4872 4872 return status; 4873 4873 } 4874 4874 ··· 4941 4941 u32 link_config = 0; 4942 4942 int status; 4943 4943 4944 - mutex_lock(&adapter->mcc_lock); 4944 + spin_lock_bh(&adapter->mcc_lock); 4945 4945 4946 4946 wrb = wrb_from_mccq(adapter); 4947 4947 if (!wrb) { ··· 4969 4969 4970 4970 status = be_mcc_notify_wait(adapter); 4971 4971 err: 4972 - mutex_unlock(&adapter->mcc_lock); 4972 + spin_unlock_bh(&adapter->mcc_lock); 4973 4973 return status; 4974 4974 } 4975 4975 ··· 5000 5000 struct be_mcc_wrb *wrb; 5001 5001 int status; 5002 5002 5003 - if (mutex_lock_interruptible(&adapter->mcc_lock)) 5004 - return -1; 5003 + spin_lock_bh(&adapter->mcc_lock); 5005 5004 5006 5005 wrb = wrb_from_mccq(adapter); 5007 5006 if (!wrb) { ··· 5038 5039 dev_info(&adapter->pdev->dev, 5039 5040 "Adapter does not support HW error recovery\n"); 5040 5041 5041 - mutex_unlock(&adapter->mcc_lock); 5042 + spin_unlock_bh(&adapter->mcc_lock); 5042 5043 return status; 5043 5044 } 5044 5045 ··· 5052 5053 struct be_cmd_resp_hdr *resp; 5053 5054 int status; 5054 5055 5055 - mutex_lock(&adapter->mcc_lock); 5056 + spin_lock_bh(&adapter->mcc_lock); 5056 5057 5057 5058 wrb = wrb_from_mccq(adapter); 5058 5059 if (!wrb) { ··· 5075 5076 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length); 5076 5077 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length); 5077 5078 err: 5078 - mutex_unlock(&adapter->mcc_lock); 5079 + spin_unlock_bh(&adapter->mcc_lock); 5079 5080 return status; 5080 5081 } 5081 5082 EXPORT_SYMBOL(be_roce_mcc_cmd);
+1 -1
drivers/net/ethernet/emulex/benet/be_main.c
··· 5667 5667 } 5668 5668 5669 5669 mutex_init(&adapter->mbox_lock); 5670 - mutex_init(&adapter->mcc_lock); 5671 5670 mutex_init(&adapter->rx_filter_lock); 5671 + spin_lock_init(&adapter->mcc_lock); 5672 5672 spin_lock_init(&adapter->mcc_cq_lock); 5673 5673 init_completion(&adapter->et_cmd_compl); 5674 5674
+1 -1
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
··· 483 483 484 484 ret = hclge_ptp_get_cycle(hdev); 485 485 if (ret) 486 - return ret; 486 + goto out; 487 487 } 488 488 489 489 ret = hclge_ptp_int_en(hdev, true);
+4 -2
drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
··· 11 11 #include "dwmac_dma.h" 12 12 #include "dwmac1000.h" 13 13 14 + #define DRIVER_NAME "dwmac-loongson-pci" 15 + 14 16 /* Normal Loongson Tx Summary */ 15 17 #define DMA_INTR_ENA_NIE_TX_LOONGSON 0x00040000 16 18 /* Normal Loongson Rx Summary */ ··· 570 568 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 571 569 if (pci_resource_len(pdev, i) == 0) 572 570 continue; 573 - ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); 571 + ret = pcim_iomap_regions(pdev, BIT(0), DRIVER_NAME); 574 572 if (ret) 575 573 goto err_disable_device; 576 574 break; ··· 689 687 MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table); 690 688 691 689 static struct pci_driver loongson_dwmac_driver = { 692 - .name = "dwmac-loongson-pci", 690 + .name = DRIVER_NAME, 693 691 .id_table = loongson_dwmac_id_table, 694 692 .probe = loongson_dwmac_probe, 695 693 .remove = loongson_dwmac_remove,
+9 -9
drivers/net/ipa/data/ipa_data-v4.7.c
··· 28 28 enum ipa_rsrc_group_id { 29 29 /* Source resource group identifiers */ 30 30 IPA_RSRC_GROUP_SRC_UL_DL = 0, 31 - IPA_RSRC_GROUP_SRC_UC_RX_Q, 32 31 IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */ 33 32 34 33 /* Destination resource group identifiers */ 35 - IPA_RSRC_GROUP_DST_UL_DL_DPL = 0, 36 - IPA_RSRC_GROUP_DST_UNUSED_1, 34 + IPA_RSRC_GROUP_DST_UL_DL = 0, 37 35 IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */ 38 36 }; 39 37 40 38 /* QSB configuration data for an SoC having IPA v4.7 */ 41 39 static const struct ipa_qsb_data ipa_qsb_data[] = { 42 40 [IPA_QSB_MASTER_DDR] = { 43 - .max_writes = 8, 44 - .max_reads = 0, /* no limit (hardware max) */ 41 + .max_writes = 12, 42 + .max_reads = 13, 45 43 .max_reads_beats = 120, 46 44 }, 47 45 }; ··· 79 81 }, 80 82 .endpoint = { 81 83 .config = { 82 - .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, 84 + .resource_group = IPA_RSRC_GROUP_DST_UL_DL, 83 85 .aggregation = true, 84 86 .status_enable = true, 85 87 .rx = { ··· 104 106 .filter_support = true, 105 107 .config = { 106 108 .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, 109 + .checksum = true, 107 110 .qmap = true, 108 111 .status_enable = true, 109 112 .tx = { ··· 127 128 }, 128 129 .endpoint = { 129 130 .config = { 130 - .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, 131 + .resource_group = IPA_RSRC_GROUP_DST_UL_DL, 132 + .checksum = true, 131 133 .qmap = true, 132 134 .aggregation = true, 133 135 .rx = { ··· 197 197 /* Destination resource configuration data for an SoC having IPA v4.7 */ 198 198 static const struct ipa_resource ipa_resource_dst[] = { 199 199 [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = { 200 - .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { 200 + .limits[IPA_RSRC_GROUP_DST_UL_DL] = { 201 201 .min = 7, .max = 7, 202 202 }, 203 203 }, 204 204 [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = { 205 - .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { 205 + .limits[IPA_RSRC_GROUP_DST_UL_DL] = { 206 206 .min = 2, .max = 2, 207 207 }, 208 208 },
+3
drivers/net/mctp/mctp-i3c.c
··· 507 507 { 508 508 struct mctp_i3c_internal_hdr *ihdr; 509 509 510 + if (!daddr || !saddr) 511 + return -EINVAL; 512 + 510 513 skb_push(skb, sizeof(struct mctp_i3c_internal_hdr)); 511 514 skb_reset_mac_header(skb); 512 515 ihdr = (void *)skb_mac_header(skb);
+19 -9
drivers/net/ppp/ppp_generic.c
··· 72 72 #define PPP_PROTO_LEN 2 73 73 #define PPP_LCP_HDRLEN 4 74 74 75 + /* The filter instructions generated by libpcap are constructed 76 + * assuming a four-byte PPP header on each packet, where the last 77 + * 2 bytes are the protocol field defined in the RFC and the first 78 + * byte of the first 2 bytes indicates the direction. 79 + * The second byte is currently unused, but we still need to initialize 80 + * it to prevent crafted BPF programs from reading them which would 81 + * cause reading of uninitialized data. 82 + */ 83 + #define PPP_FILTER_OUTBOUND_TAG 0x0100 84 + #define PPP_FILTER_INBOUND_TAG 0x0000 85 + 75 86 /* 76 87 * An instance of /dev/ppp can be associated with either a ppp 77 88 * interface unit or a ppp channel. In both cases, file->private_data ··· 1773 1762 1774 1763 if (proto < 0x8000) { 1775 1764 #ifdef CONFIG_PPP_FILTER 1776 - /* check if we should pass this packet */ 1777 - /* the filter instructions are constructed assuming 1778 - a four-byte PPP header on each packet */ 1779 - *(u8 *)skb_push(skb, 2) = 1; 1765 + /* check if the packet passes the pass and active filters. 1766 + * See comment for PPP_FILTER_OUTBOUND_TAG above. 1767 + */ 1768 + *(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_OUTBOUND_TAG); 1780 1769 if (ppp->pass_filter && 1781 1770 bpf_prog_run(ppp->pass_filter, skb) == 0) { 1782 1771 if (ppp->debug & 1) ··· 2493 2482 /* network protocol frame - give it to the kernel */ 2494 2483 2495 2484 #ifdef CONFIG_PPP_FILTER 2496 - /* check if the packet passes the pass and active filters */ 2497 - /* the filter instructions are constructed assuming 2498 - a four-byte PPP header on each packet */ 2499 2485 if (ppp->pass_filter || ppp->active_filter) { 2500 2486 if (skb_unclone(skb, GFP_ATOMIC)) 2501 2487 goto err; 2502 - 2503 - *(u8 *)skb_push(skb, 2) = 0; 2488 + /* Check if the packet passes the pass and active filters. 2489 + * See comment for PPP_FILTER_INBOUND_TAG above. 2490 + */ 2491 + *(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_INBOUND_TAG); 2504 2492 if (ppp->pass_filter && 2505 2493 bpf_prog_run(ppp->pass_filter, skb) == 0) { 2506 2494 if (ppp->debug & 1)
+13 -7
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
··· 1172 1172 struct brcmf_bus *bus_if; 1173 1173 struct brcmf_sdio_dev *sdiodev; 1174 1174 mmc_pm_flag_t sdio_flags; 1175 + bool cap_power_off; 1175 1176 int ret = 0; 1176 1177 1177 1178 func = container_of(dev, struct sdio_func, dev); ··· 1180 1179 if (func->num != 1) 1181 1180 return 0; 1182 1181 1182 + cap_power_off = !!(func->card->host->caps & MMC_CAP_POWER_OFF_CARD); 1183 1183 1184 1184 bus_if = dev_get_drvdata(dev); 1185 1185 sdiodev = bus_if->bus_priv.sdio; 1186 1186 1187 - if (sdiodev->wowl_enabled) { 1187 + if (sdiodev->wowl_enabled || !cap_power_off) { 1188 1188 brcmf_sdiod_freezer_on(sdiodev); 1189 1189 brcmf_sdio_wd_timer(sdiodev->bus, 0); 1190 1190 1191 1191 sdio_flags = MMC_PM_KEEP_POWER; 1192 - if (sdiodev->settings->bus.sdio.oob_irq_supported) 1193 - enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr); 1194 - else 1195 - sdio_flags |= MMC_PM_WAKE_SDIO_IRQ; 1192 + 1193 + if (sdiodev->wowl_enabled) { 1194 + if (sdiodev->settings->bus.sdio.oob_irq_supported) 1195 + enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr); 1196 + else 1197 + sdio_flags |= MMC_PM_WAKE_SDIO_IRQ; 1198 + } 1196 1199 1197 1200 if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags)) 1198 1201 brcmf_err("Failed to set pm_flags %x\n", sdio_flags); ··· 1218 1213 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 1219 1214 struct sdio_func *func = container_of(dev, struct sdio_func, dev); 1220 1215 int ret = 0; 1216 + bool cap_power_off = !!(func->card->host->caps & MMC_CAP_POWER_OFF_CARD); 1221 1217 1222 1218 brcmf_dbg(SDIO, "Enter: F%d\n", func->num); 1223 1219 if (func->num != 2) 1224 1220 return 0; 1225 1221 1226 - if (!sdiodev->wowl_enabled) { 1222 + if (!sdiodev->wowl_enabled && cap_power_off) { 1227 1223 /* bus was powered off and device removed, probe again */ 1228 1224 ret = brcmf_sdiod_probe(sdiodev); 1229 1225 if (ret) 1230 1226 brcmf_err("Failed to probe device on resume\n"); 1231 1227 } else { 1232 - if (sdiodev->settings->bus.sdio.oob_irq_supported) 1228 + if (sdiodev->wowl_enabled && sdiodev->settings->bus.sdio.oob_irq_supported) 1233 1229 disable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr); 1234 1230 1235 1231 brcmf_sdiod_freezer_off(sdiodev);
+58 -28
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
··· 558 558 } 559 559 560 560 /* 561 - * alloc_sgtable - allocates scallerlist table in the given size, 562 - * fills it with pages and returns it 561 + * alloc_sgtable - allocates (chained) scatterlist in the given size, 562 + * fills it with pages and returns it 563 563 * @size: the size (in bytes) of the table 564 - */ 565 - static struct scatterlist *alloc_sgtable(int size) 564 + */ 565 + static struct scatterlist *alloc_sgtable(ssize_t size) 566 566 { 567 - int alloc_size, nents, i; 568 - struct page *new_page; 569 - struct scatterlist *iter; 570 - struct scatterlist *table; 567 + struct scatterlist *result = NULL, *prev; 568 + int nents, i, n_prev; 571 569 572 570 nents = DIV_ROUND_UP(size, PAGE_SIZE); 573 - table = kcalloc(nents, sizeof(*table), GFP_KERNEL); 574 - if (!table) 575 - return NULL; 576 - sg_init_table(table, nents); 577 - iter = table; 578 - for_each_sg(table, iter, sg_nents(table), i) { 579 - new_page = alloc_page(GFP_KERNEL); 580 - if (!new_page) { 581 - /* release all previous allocated pages in the table */ 582 - iter = table; 583 - for_each_sg(table, iter, sg_nents(table), i) { 584 - new_page = sg_page(iter); 585 - if (new_page) 586 - __free_page(new_page); 587 - } 588 - kfree(table); 571 + 572 + #define N_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(*result)) 573 + /* 574 + * We need an additional entry for table chaining, 575 + * this ensures the loop can finish i.e. we can 576 + * fit at least two entries per page (obviously, 577 + * many more really fit.) 578 + */ 579 + BUILD_BUG_ON(N_ENTRIES_PER_PAGE < 2); 580 + 581 + while (nents > 0) { 582 + struct scatterlist *new, *iter; 583 + int n_fill, n_alloc; 584 + 585 + if (nents <= N_ENTRIES_PER_PAGE) { 586 + /* last needed table */ 587 + n_fill = nents; 588 + n_alloc = nents; 589 + nents = 0; 590 + } else { 591 + /* fill a page with entries */ 592 + n_alloc = N_ENTRIES_PER_PAGE; 593 + /* reserve one for chaining */ 594 + n_fill = n_alloc - 1; 595 + nents -= n_fill; 596 + } 597 + 598 + new = kcalloc(n_alloc, sizeof(*new), GFP_KERNEL); 599 + if (!new) { 600 + if (result) 601 + _devcd_free_sgtable(result); 589 602 return NULL; 590 603 } 591 - alloc_size = min_t(int, size, PAGE_SIZE); 592 - size -= PAGE_SIZE; 593 - sg_set_page(iter, new_page, alloc_size, 0); 604 + sg_init_table(new, n_alloc); 605 + 606 + if (!result) 607 + result = new; 608 + else 609 + sg_chain(prev, n_prev, new); 610 + prev = new; 611 + n_prev = n_alloc; 612 + 613 + for_each_sg(new, iter, n_fill, i) { 614 + struct page *new_page = alloc_page(GFP_KERNEL); 615 + 616 + if (!new_page) { 617 + _devcd_free_sgtable(result); 618 + return NULL; 619 + } 620 + 621 + sg_set_page(iter, new_page, PAGE_SIZE, 0); 622 + } 594 623 } 595 - return table; 624 + 625 + return result; 596 626 } 597 627 598 628 static void iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt,
+3
drivers/net/wireless/intel/iwlwifi/fw/dump.c
··· 540 540 } err_info = {}; 541 541 int ret; 542 542 543 + if (err_id) 544 + *err_id = 0; 545 + 543 546 if (!base) 544 547 return false; 545 548
+1 -1
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
··· 1181 1181 1182 1182 if (tlv_len != sizeof(*fseq_ver)) 1183 1183 goto invalid_tlv_len; 1184 - IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %s\n", 1184 + IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %.32s\n", 1185 1185 fseq_ver->version); 1186 1186 } 1187 1187 break;
+51 -26
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
··· 3092 3092 ieee80211_resume_disconnect(vif); 3093 3093 } 3094 3094 3095 - static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm, 3096 - struct ieee80211_vif *vif) 3095 + enum rt_status { 3096 + FW_ALIVE, 3097 + FW_NEEDS_RESET, 3098 + FW_ERROR, 3099 + }; 3100 + 3101 + static enum rt_status iwl_mvm_check_rt_status(struct iwl_mvm *mvm, 3102 + struct ieee80211_vif *vif) 3097 3103 { 3098 3104 u32 err_id; 3099 3105 ··· 3107 3101 if (iwl_fwrt_read_err_table(mvm->trans, 3108 3102 mvm->trans->dbg.lmac_error_event_table[0], 3109 3103 &err_id)) { 3110 - if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN && vif) { 3111 - struct cfg80211_wowlan_wakeup wakeup = { 3112 - .rfkill_release = true, 3113 - }; 3114 - ieee80211_report_wowlan_wakeup(vif, &wakeup, 3115 - GFP_KERNEL); 3104 + if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) { 3105 + IWL_WARN(mvm, "Rfkill was toggled during suspend\n"); 3106 + if (vif) { 3107 + struct cfg80211_wowlan_wakeup wakeup = { 3108 + .rfkill_release = true, 3109 + }; 3110 + 3111 + ieee80211_report_wowlan_wakeup(vif, &wakeup, 3112 + GFP_KERNEL); 3113 + } 3114 + 3115 + return FW_NEEDS_RESET; 3116 3116 } 3117 - return true; 3117 + return FW_ERROR; 3118 3118 } 3119 3119 3120 3120 /* check if we have lmac2 set and check for error */ 3121 3121 if (iwl_fwrt_read_err_table(mvm->trans, 3122 3122 mvm->trans->dbg.lmac_error_event_table[1], 3123 3123 NULL)) 3124 - return true; 3124 + return FW_ERROR; 3125 3125 3126 3126 /* check for umac error */ 3127 3127 if (iwl_fwrt_read_err_table(mvm->trans, 3128 3128 mvm->trans->dbg.umac_error_event_table, 3129 3129 NULL)) 3130 - return true; 3130 + return FW_ERROR; 3131 3131 3132 - return false; 3132 + return FW_ALIVE; 3133 3133 } 3134 3134 3135 3135 /* ··· 3504 3492 bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa, 3505 3493 IWL_UCODE_TLV_CAPA_D0I3_END_FIRST); 3506 3494 bool resume_notif_based = iwl_mvm_d3_resume_notif_based(mvm); 3495 + enum rt_status rt_status; 3507 3496 bool keep = false; 3508 3497 3509 3498 mutex_lock(&mvm->mutex); ··· 3528 3515 3529 3516 iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); 3530 3517 3531 - if (iwl_mvm_check_rt_status(mvm, vif)) { 3532 - IWL_ERR(mvm, "FW Error occurred during suspend. Restarting.\n"); 3518 + rt_status = iwl_mvm_check_rt_status(mvm, vif); 3519 + if (rt_status != FW_ALIVE) { 3533 3520 set_bit(STATUS_FW_ERROR, &mvm->trans->status); 3534 - iwl_mvm_dump_nic_error_log(mvm); 3535 - iwl_dbg_tlv_time_point(&mvm->fwrt, 3536 - IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL); 3537 - iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, 3538 - false, 0); 3521 + if (rt_status == FW_ERROR) { 3522 + IWL_ERR(mvm, "FW Error occurred during suspend. Restarting.\n"); 3523 + iwl_mvm_dump_nic_error_log(mvm); 3524 + iwl_dbg_tlv_time_point(&mvm->fwrt, 3525 + IWL_FW_INI_TIME_POINT_FW_ASSERT, 3526 + NULL); 3527 + iwl_fw_dbg_collect_desc(&mvm->fwrt, 3528 + &iwl_dump_desc_assert, 3529 + false, 0); 3530 + } 3539 3531 ret = 1; 3540 3532 goto err; 3541 3533 } ··· 3697 3679 .notif_expected = 3698 3680 IWL_D3_NOTIF_D3_END_NOTIF, 3699 3681 }; 3682 + enum rt_status rt_status; 3700 3683 int ret; 3701 3684 3702 3685 lockdep_assert_held(&mvm->mutex); ··· 3707 3688 mvm->last_reset_or_resume_time_jiffies = jiffies; 3708 3689 iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); 3709 3690 3710 - if (iwl_mvm_check_rt_status(mvm, NULL)) { 3711 - IWL_ERR(mvm, "FW Error occurred during suspend. Restarting.\n"); 3691 + rt_status = iwl_mvm_check_rt_status(mvm, NULL); 3692 + if (rt_status != FW_ALIVE) { 3712 3693 set_bit(STATUS_FW_ERROR, &mvm->trans->status); 3713 - iwl_mvm_dump_nic_error_log(mvm); 3714 - iwl_dbg_tlv_time_point(&mvm->fwrt, 3715 - IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL); 3716 - iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, 3717 - false, 0); 3694 + if (rt_status == FW_ERROR) { 3695 + IWL_ERR(mvm, 3696 + "iwl_mvm_check_rt_status failed, device is gone during suspend\n"); 3697 + iwl_mvm_dump_nic_error_log(mvm); 3698 + iwl_dbg_tlv_time_point(&mvm->fwrt, 3699 + IWL_FW_INI_TIME_POINT_FW_ASSERT, 3700 + NULL); 3701 + iwl_fw_dbg_collect_desc(&mvm->fwrt, 3702 + &iwl_dump_desc_assert, 3703 + false, 0); 3704 + } 3718 3705 mvm->trans->state = IWL_TRANS_NO_FW; 3719 3706 ret = -ENODEV; 3720 3707
+7
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
··· 1479 1479 if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000) 1480 1480 return -EOPNOTSUPP; 1481 1481 1482 + /* 1483 + * If the firmware is not running, silently succeed since there is 1484 + * no data to clear. 1485 + */ 1486 + if (!iwl_mvm_firmware_running(mvm)) 1487 + return count; 1488 + 1482 1489 mutex_lock(&mvm->mutex); 1483 1490 iwl_fw_dbg_clear_monitor_buf(&mvm->fwrt); 1484 1491 mutex_unlock(&mvm->mutex);
+4 -4
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
··· 995 995 */ 996 996 u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK); 997 997 u32 rate_n_flags = phy_data->rate_n_flags; 998 - u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK_V1; 998 + u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; 999 999 u8 offs = 0; 1000 1000 1001 1001 rx_status->bw = RATE_INFO_BW_HE_RU; ··· 1050 1050 1051 1051 if (he_mu) 1052 1052 he_mu->flags2 |= 1053 - le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1, 1053 + le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, 1054 1054 rate_n_flags), 1055 1055 IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW); 1056 - else if (he_type == RATE_MCS_HE_TYPE_TRIG_V1) 1056 + else if (he_type == RATE_MCS_HE_TYPE_TRIG) 1057 1057 he->data6 |= 1058 1058 cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) | 1059 - le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1, 1059 + le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, 1060 1060 rate_n_flags), 1061 1061 IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW); 1062 1062 }
+2
drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
··· 1030 1030 /* End TE, notify mac80211 */ 1031 1031 mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID; 1032 1032 mvmvif->time_event_data.link_id = -1; 1033 + /* set the bit so the ROC cleanup will actually clean up */ 1034 + set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status); 1033 1035 iwl_mvm_roc_finished(mvm); 1034 1036 ieee80211_remain_on_channel_expired(mvm->hw); 1035 1037 } else if (le32_to_cpu(notif->start)) {
+3 -2
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 2 /* 3 - * Copyright (C) 2003-2015, 2018-2024 Intel Corporation 3 + * Copyright (C) 2003-2015, 2018-2025 Intel Corporation 4 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 6 */ ··· 646 646 unsigned int len); 647 647 struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb, 648 648 struct iwl_cmd_meta *cmd_meta, 649 - u8 **hdr, unsigned int hdr_room); 649 + u8 **hdr, unsigned int hdr_room, 650 + unsigned int offset); 650 651 651 652 void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb, 652 653 struct iwl_cmd_meta *cmd_meta);
+4 -2
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 2 /* 3 3 * Copyright (C) 2017 Intel Deutschland GmbH 4 - * Copyright (C) 2018-2020, 2023-2024 Intel Corporation 4 + * Copyright (C) 2018-2020, 2023-2025 Intel Corporation 5 5 */ 6 6 #include <net/tso.h> 7 7 #include <linux/tcp.h> ··· 188 188 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)); 189 189 190 190 /* Our device supports 9 segments at most, it will fit in 1 page */ 191 - sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room); 191 + sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room, 192 + snap_ip_tcp_hdrlen + hdr_len); 192 193 if (!sgt) 193 194 return -ENOMEM; 194 195 ··· 348 347 return tfd; 349 348 350 349 out_err: 350 + iwl_pcie_free_tso_pages(trans, skb, out_meta); 351 351 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); 352 352 return NULL; 353 353 }
+12 -8
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 2 /* 3 - * Copyright (C) 2003-2014, 2018-2021, 2023-2024 Intel Corporation 3 + * Copyright (C) 2003-2014, 2018-2021, 2023-2025 Intel Corporation 4 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 6 */ ··· 1855 1855 * @cmd_meta: command meta to store the scatter list information for unmapping 1856 1856 * @hdr: output argument for TSO headers 1857 1857 * @hdr_room: requested length for TSO headers 1858 + * @offset: offset into the data from which mapping should start 1858 1859 * 1859 1860 * Allocate space for a scatter gather list and TSO headers and map the SKB 1860 1861 * using the scatter gather list. The SKB is unmapped again when the page is ··· 1865 1864 */ 1866 1865 struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb, 1867 1866 struct iwl_cmd_meta *cmd_meta, 1868 - u8 **hdr, unsigned int hdr_room) 1867 + u8 **hdr, unsigned int hdr_room, 1868 + unsigned int offset) 1869 1869 { 1870 1870 struct sg_table *sgt; 1871 + unsigned int n_segments; 1871 1872 1872 1873 if (WARN_ON_ONCE(skb_has_frag_list(skb))) 1873 1874 return NULL; 1874 1875 1876 + n_segments = DIV_ROUND_UP(skb->len - offset, skb_shinfo(skb)->gso_size); 1875 1877 *hdr = iwl_pcie_get_page_hdr(trans, 1876 1878 hdr_room + __alignof__(struct sg_table) + 1877 1879 sizeof(struct sg_table) + 1878 - (skb_shinfo(skb)->nr_frags + 1) * 1879 - sizeof(struct scatterlist), 1880 + n_segments * sizeof(struct scatterlist), 1880 1881 skb); 1881 1882 if (!*hdr) 1882 1883 return NULL; ··· 1886 1883 sgt = (void *)PTR_ALIGN(*hdr + hdr_room, __alignof__(struct sg_table)); 1887 1884 sgt->sgl = (void *)(sgt + 1); 1888 1885 1889 - sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1); 1886 + sg_init_table(sgt->sgl, n_segments); 1890 1887 1891 1888 /* Only map the data, not the header (it is copied to the TSO page) */ 1892 - sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, skb_headlen(skb), 1893 - skb->data_len); 1889 + sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, offset, 1890 + skb->len - offset); 1894 1891 if (WARN_ON_ONCE(sgt->orig_nents <= 0)) 1895 1892 return NULL; 1896 1893 ··· 1942 1939 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; 1943 1940 1944 1941 /* Our device supports 9 segments at most, it will fit in 1 page */ 1945 - sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room); 1942 + sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room, 1943 + snap_ip_tcp_hdrlen + hdr_len + iv_len); 1946 1944 if (!sgt) 1947 1945 return -ENOMEM; 1948 1946
+2 -1
net/8021q/vlan.c
··· 131 131 { 132 132 const char *name = real_dev->name; 133 133 134 - if (real_dev->features & NETIF_F_VLAN_CHALLENGED) { 134 + if (real_dev->features & NETIF_F_VLAN_CHALLENGED || 135 + real_dev->type != ARPHRD_ETHER) { 135 136 pr_info("VLANs not supported on %s\n", name); 136 137 NL_SET_ERR_MSG_MOD(extack, "VLANs not supported on device"); 137 138 return -EOPNOTSUPP;
+5
net/bluetooth/mgmt.c
··· 9660 9660 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) + 9661 9661 eir_precalc_len(sizeof(conn->dev_class))); 9662 9662 9663 + if (!skb) 9664 + return; 9665 + 9663 9666 ev = skb_put(skb, sizeof(*ev)); 9664 9667 bacpy(&ev->addr.bdaddr, &conn->dst); 9665 9668 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type); ··· 10416 10413 10417 10414 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, 10418 10415 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0)); 10416 + if (!skb) 10417 + return; 10419 10418 10420 10419 ev = skb_put(skb, sizeof(*ev)); 10421 10420 bacpy(&ev->addr.bdaddr, bdaddr);
+4 -4
net/ethtool/cabletest.c
··· 72 72 dev = req_info.dev; 73 73 74 74 rtnl_lock(); 75 - phydev = ethnl_req_get_phydev(&req_info, 76 - tb[ETHTOOL_A_CABLE_TEST_HEADER], 75 + phydev = ethnl_req_get_phydev(&req_info, tb, 76 + ETHTOOL_A_CABLE_TEST_HEADER, 77 77 info->extack); 78 78 if (IS_ERR_OR_NULL(phydev)) { 79 79 ret = -EOPNOTSUPP; ··· 339 339 goto out_dev_put; 340 340 341 341 rtnl_lock(); 342 - phydev = ethnl_req_get_phydev(&req_info, 343 - tb[ETHTOOL_A_CABLE_TEST_TDR_HEADER], 342 + phydev = ethnl_req_get_phydev(&req_info, tb, 343 + ETHTOOL_A_CABLE_TEST_TDR_HEADER, 344 344 info->extack); 345 345 if (IS_ERR_OR_NULL(phydev)) { 346 346 ret = -EOPNOTSUPP;
+1 -1
net/ethtool/linkstate.c
··· 103 103 struct phy_device *phydev; 104 104 int ret; 105 105 106 - phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_LINKSTATE_HEADER], 106 + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_LINKSTATE_HEADER, 107 107 info->extack); 108 108 if (IS_ERR(phydev)) { 109 109 ret = PTR_ERR(phydev);
+3 -3
net/ethtool/netlink.c
··· 211 211 } 212 212 213 213 struct phy_device *ethnl_req_get_phydev(const struct ethnl_req_info *req_info, 214 - const struct nlattr *header, 214 + struct nlattr **tb, unsigned int header, 215 215 struct netlink_ext_ack *extack) 216 216 { 217 217 struct phy_device *phydev; ··· 225 225 return req_info->dev->phydev; 226 226 227 227 phydev = phy_link_topo_get_phy(req_info->dev, req_info->phy_index); 228 - if (!phydev) { 229 - NL_SET_ERR_MSG_ATTR(extack, header, 228 + if (!phydev && tb) { 229 + NL_SET_ERR_MSG_ATTR(extack, tb[header], 230 230 "no phy matching phyindex"); 231 231 return ERR_PTR(-ENODEV); 232 232 }
+3 -2
net/ethtool/netlink.h
··· 275 275 * ethnl_req_get_phydev() - Gets the phy_device targeted by this request, 276 276 * if any. Must be called under rntl_lock(). 277 277 * @req_info: The ethnl request to get the phy from. 278 - * @header: The netlink header, used for error reporting. 278 + * @tb: The netlink attributes array, for error reporting. 279 + * @header: The netlink header index, used for error reporting. 279 280 * @extack: The netlink extended ACK, for error reporting. 280 281 * 281 282 * The caller must hold RTNL, until it's done interacting with the returned ··· 290 289 * is returned. 291 290 */ 292 291 struct phy_device *ethnl_req_get_phydev(const struct ethnl_req_info *req_info, 293 - const struct nlattr *header, 292 + struct nlattr **tb, unsigned int header, 294 293 struct netlink_ext_ack *extack); 295 294 296 295 /**
+1 -1
net/ethtool/phy.c
··· 125 125 struct phy_req_info *req_info = PHY_REQINFO(req_base); 126 126 struct phy_device *phydev; 127 127 128 - phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PHY_HEADER], 128 + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PHY_HEADER, 129 129 extack); 130 130 if (!phydev) 131 131 return 0;
+3 -3
net/ethtool/plca.c
··· 62 62 struct phy_device *phydev; 63 63 int ret; 64 64 65 - phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PLCA_HEADER], 65 + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PLCA_HEADER, 66 66 info->extack); 67 67 // check that the PHY device is available and connected 68 68 if (IS_ERR_OR_NULL(phydev)) { ··· 152 152 bool mod = false; 153 153 int ret; 154 154 155 - phydev = ethnl_req_get_phydev(req_info, tb[ETHTOOL_A_PLCA_HEADER], 155 + phydev = ethnl_req_get_phydev(req_info, tb, ETHTOOL_A_PLCA_HEADER, 156 156 info->extack); 157 157 // check that the PHY device is available and connected 158 158 if (IS_ERR_OR_NULL(phydev)) ··· 211 211 struct phy_device *phydev; 212 212 int ret; 213 213 214 - phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PLCA_HEADER], 214 + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PLCA_HEADER, 215 215 info->extack); 216 216 // check that the PHY device is available and connected 217 217 if (IS_ERR_OR_NULL(phydev)) {
+2 -2
net/ethtool/pse-pd.c
··· 64 64 if (ret < 0) 65 65 return ret; 66 66 67 - phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PSE_HEADER], 67 + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PSE_HEADER, 68 68 info->extack); 69 69 if (IS_ERR(phydev)) 70 70 return -ENODEV; ··· 261 261 struct phy_device *phydev; 262 262 int ret; 263 263 264 - phydev = ethnl_req_get_phydev(req_info, tb[ETHTOOL_A_PSE_HEADER], 264 + phydev = ethnl_req_get_phydev(req_info, tb, ETHTOOL_A_PSE_HEADER, 265 265 info->extack); 266 266 ret = ethnl_set_pse_validate(phydev, info); 267 267 if (ret)
+1 -1
net/ethtool/stats.c
··· 138 138 struct phy_device *phydev; 139 139 int ret; 140 140 141 - phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_STATS_HEADER], 141 + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_STATS_HEADER, 142 142 info->extack); 143 143 if (IS_ERR(phydev)) 144 144 return PTR_ERR(phydev);
+1 -1
net/ethtool/strset.c
··· 309 309 return 0; 310 310 } 311 311 312 - phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_HEADER_FLAGS], 312 + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_HEADER_FLAGS, 313 313 info->extack); 314 314 315 315 /* phydev can be NULL, check for errors only */
+7 -4
net/ipv4/tcp_offload.c
··· 13 13 #include <net/tcp.h> 14 14 #include <net/protocol.h> 15 15 16 - static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq, 16 + static void tcp_gso_tstamp(struct sk_buff *skb, struct sk_buff *gso_skb, 17 17 unsigned int seq, unsigned int mss) 18 18 { 19 + u32 flags = skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP; 20 + u32 ts_seq = skb_shinfo(gso_skb)->tskey; 21 + 19 22 while (skb) { 20 23 if (before(ts_seq, seq + mss)) { 21 - skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP; 24 + skb_shinfo(skb)->tx_flags |= flags; 22 25 skb_shinfo(skb)->tskey = ts_seq; 23 26 return; 24 27 } ··· 196 193 th = tcp_hdr(skb); 197 194 seq = ntohl(th->seq); 198 195 199 - if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP)) 200 - tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss); 196 + if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP)) 197 + tcp_gso_tstamp(segs, gso_skb, seq, mss); 201 198 202 199 newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta)); 203 200
+6 -2
net/ipv4/udp_offload.c
··· 321 321 322 322 /* clear destructor to avoid skb_segment assigning it to tail */ 323 323 copy_dtor = gso_skb->destructor == sock_wfree; 324 - if (copy_dtor) 324 + if (copy_dtor) { 325 325 gso_skb->destructor = NULL; 326 + gso_skb->sk = NULL; 327 + } 326 328 327 329 segs = skb_segment(gso_skb, features); 328 330 if (IS_ERR_OR_NULL(segs)) { 329 - if (copy_dtor) 331 + if (copy_dtor) { 330 332 gso_skb->destructor = sock_wfree; 333 + gso_skb->sk = sk; 334 + } 331 335 return segs; 332 336 } 333 337
+3 -1
net/ipv6/ila/ila_lwt.c
··· 88 88 goto drop; 89 89 } 90 90 91 - if (ilwt->connected) { 91 + /* cache only if we don't create a dst reference loop */ 92 + if (ilwt->connected && orig_dst->lwtstate != dst->lwtstate) { 92 93 local_bh_disable(); 93 94 dst_cache_set_ip6(&ilwt->dst_cache, dst, &fl6.saddr); 94 95 local_bh_enable(); 95 96 } 96 97 } 97 98 99 + skb_dst_drop(skb); 98 100 skb_dst_set(skb, dst); 99 101 return dst_output(net, sk, skb); 100 102
+27 -22
net/llc/llc_s_ac.c
··· 24 24 #include <net/llc_s_ac.h> 25 25 #include <net/llc_s_ev.h> 26 26 #include <net/llc_sap.h> 27 - 27 + #include <net/sock.h> 28 28 29 29 /** 30 30 * llc_sap_action_unitdata_ind - forward UI PDU to network layer ··· 40 40 return 0; 41 41 } 42 42 43 + static int llc_prepare_and_xmit(struct sk_buff *skb) 44 + { 45 + struct llc_sap_state_ev *ev = llc_sap_ev(skb); 46 + struct sk_buff *nskb; 47 + int rc; 48 + 49 + rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); 50 + if (rc) 51 + return rc; 52 + 53 + nskb = skb_clone(skb, GFP_ATOMIC); 54 + if (!nskb) 55 + return -ENOMEM; 56 + 57 + if (skb->sk) 58 + skb_set_owner_w(nskb, skb->sk); 59 + 60 + return dev_queue_xmit(nskb); 61 + } 62 + 43 63 /** 44 64 * llc_sap_action_send_ui - sends UI PDU resp to UNITDATA REQ to MAC layer 45 65 * @sap: SAP ··· 72 52 int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb) 73 53 { 74 54 struct llc_sap_state_ev *ev = llc_sap_ev(skb); 75 - int rc; 76 55 77 56 llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap, 78 57 ev->daddr.lsap, LLC_PDU_CMD); 79 58 llc_pdu_init_as_ui_cmd(skb); 80 - rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); 81 - if (likely(!rc)) { 82 - skb_get(skb); 83 - rc = dev_queue_xmit(skb); 84 - } 85 - return rc; 59 + 60 + return llc_prepare_and_xmit(skb); 86 61 } 87 62 88 63 /** ··· 92 77 int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb) 93 78 { 94 79 struct llc_sap_state_ev *ev = llc_sap_ev(skb); 95 - int rc; 96 80 97 81 llc_pdu_header_init(skb, LLC_PDU_TYPE_U_XID, ev->saddr.lsap, 98 82 ev->daddr.lsap, LLC_PDU_CMD); 99 83 llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0); 100 - rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); 101 - if (likely(!rc)) { 102 - skb_get(skb); 103 - rc = dev_queue_xmit(skb); 104 - } 105 - return rc; 84 + 85 + return llc_prepare_and_xmit(skb); 106 86 } 107 87 108 88 /** ··· 143 133 int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb) 144 134 { 145 135 struct llc_sap_state_ev *ev = llc_sap_ev(skb); 146 - int rc; 147 136 148 137 llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap, 149 138 ev->daddr.lsap, LLC_PDU_CMD); 150 139 llc_pdu_init_as_test_cmd(skb); 151 - rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); 152 - if (likely(!rc)) { 153 - skb_get(skb); 154 - rc = dev_queue_xmit(skb); 155 - } 156 - return rc; 140 + 141 + return llc_prepare_and_xmit(skb); 157 142 } 158 143 159 144 int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb)
+8 -2
net/mac80211/driver-ops.c
··· 116 116 117 117 sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER; 118 118 119 - /* Remove driver debugfs entries */ 120 - ieee80211_debugfs_recreate_netdev(sdata, sdata->vif.valid_links); 119 + /* 120 + * Remove driver debugfs entries. 121 + * The virtual monitor interface doesn't get a debugfs 122 + * entry, so it's exempt here. 123 + */ 124 + if (sdata != rcu_access_pointer(local->monitor_sdata)) 125 + ieee80211_debugfs_recreate_netdev(sdata, 126 + sdata->vif.valid_links); 121 127 122 128 trace_drv_remove_interface(local, sdata); 123 129 local->ops->remove_interface(&local->hw, &sdata->vif);
+6 -5
net/mac80211/iface.c
··· 1206 1206 return; 1207 1207 } 1208 1208 1209 - RCU_INIT_POINTER(local->monitor_sdata, NULL); 1210 - mutex_unlock(&local->iflist_mtx); 1211 - 1212 - synchronize_net(); 1213 - 1209 + clear_bit(SDATA_STATE_RUNNING, &sdata->state); 1214 1210 ieee80211_link_release_channel(&sdata->deflink); 1215 1211 1216 1212 if (ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) 1217 1213 drv_remove_interface(local, sdata); 1214 + 1215 + RCU_INIT_POINTER(local->monitor_sdata, NULL); 1216 + mutex_unlock(&local->iflist_mtx); 1217 + 1218 + synchronize_net(); 1218 1219 1219 1220 kfree(sdata); 1220 1221 }
+1
net/mac80211/mlme.c
··· 4959 4959 parse_params.start = bss_ies->data; 4960 4960 parse_params.len = bss_ies->len; 4961 4961 parse_params.bss = cbss; 4962 + parse_params.link_id = -1; 4962 4963 bss_elems = ieee802_11_parse_elems_full(&parse_params); 4963 4964 if (!bss_elems) { 4964 4965 ret = false;
+90 -45
net/mac80211/parse.c
··· 47 47 /* The EPCS Multi-Link element in the original elements */ 48 48 const struct element *ml_epcs_elem; 49 49 50 + bool multi_link_inner; 51 + bool skip_vendor; 52 + 50 53 /* 51 54 * scratch buffer that can be used for various element parsing related 52 55 * tasks, e.g., element de-fragmentation etc. ··· 155 152 switch (le16_get_bits(mle->control, 156 153 IEEE80211_ML_CONTROL_TYPE)) { 157 154 case IEEE80211_ML_CONTROL_TYPE_BASIC: 158 - if (elems_parse->ml_basic_elem) { 155 + if (elems_parse->multi_link_inner) { 159 156 elems->parse_error |= 160 157 IEEE80211_PARSE_ERR_DUP_NEST_ML_BASIC; 161 158 break; 162 159 } 163 - elems_parse->ml_basic_elem = elem; 164 160 break; 165 161 case IEEE80211_ML_CONTROL_TYPE_RECONF: 166 162 elems_parse->ml_reconf_elem = elem; ··· 401 399 IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; 402 400 break; 403 401 case WLAN_EID_VENDOR_SPECIFIC: 402 + if (elems_parse->skip_vendor) 403 + break; 404 + 404 405 if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 && 405 406 pos[2] == 0xf2) { 406 407 /* Microsoft OUI (00:50:F2) */ ··· 871 866 } 872 867 } 873 868 874 - static void ieee80211_mle_parse_link(struct ieee80211_elems_parse *elems_parse, 875 - struct ieee80211_elems_parse_params *params) 869 + static const struct element * 870 + ieee80211_prep_mle_link_parse(struct ieee80211_elems_parse *elems_parse, 871 + struct ieee80211_elems_parse_params *params, 872 + struct ieee80211_elems_parse_params *sub) 876 873 { 877 874 struct ieee802_11_elems *elems = &elems_parse->elems; 878 875 struct ieee80211_mle_per_sta_profile *prof; 879 - struct ieee80211_elems_parse_params sub = { 880 - .mode = params->mode, 881 - .action = params->action, 882 - .from_ap = params->from_ap, 883 - .link_id = -1, 884 - }; 885 - ssize_t ml_len = elems->ml_basic_len; 886 - const struct element *non_inherit = NULL; 876 + const struct element *tmp; 877 + ssize_t ml_len; 887 878 const u8 *end; 879 + 880 + if (params->mode < IEEE80211_CONN_MODE_EHT) 881 + return NULL; 882 + 883 + for_each_element_extid(tmp, WLAN_EID_EXT_EHT_MULTI_LINK, 884 + elems->ie_start, elems->total_len) { 885 + const struct ieee80211_multi_link_elem *mle = 886 + (void *)tmp->data + 1; 887 + 888 + if (!ieee80211_mle_size_ok(tmp->data + 1, tmp->datalen - 1)) 889 + continue; 890 + 891 + if (le16_get_bits(mle->control, IEEE80211_ML_CONTROL_TYPE) != 892 + IEEE80211_ML_CONTROL_TYPE_BASIC) 893 + continue; 894 + 895 + elems_parse->ml_basic_elem = tmp; 896 + break; 897 + } 888 898 889 899 ml_len = cfg80211_defragment_element(elems_parse->ml_basic_elem, 890 900 elems->ie_start, ··· 911 891 WLAN_EID_FRAGMENT); 912 892 913 893 if (ml_len < 0) 914 - return; 894 + return NULL; 915 895 916 896 elems->ml_basic = (const void *)elems_parse->scratch_pos; 917 897 elems->ml_basic_len = ml_len; 918 898 elems_parse->scratch_pos += ml_len; 919 899 920 900 if (params->link_id == -1) 921 - return; 901 + return NULL; 922 902 923 903 ieee80211_mle_get_sta_prof(elems_parse, params->link_id); 924 904 prof = elems->prof; 925 905 926 906 if (!prof) 927 - return; 907 + return NULL; 928 908 929 909 /* check if we have the 4 bytes for the fixed part in assoc response */ 930 910 if (elems->sta_prof_len < sizeof(*prof) + prof->sta_info_len - 1 + 4) { 931 911 elems->prof = NULL; 932 912 elems->sta_prof_len = 0; 933 - return; 913 + return NULL; 934 914 } 935 915 936 916 /* ··· 939 919 * the -1 is because the 'sta_info_len' is accounted to as part of the 940 920 * per-STA profile, but not part of the 'u8 variable[]' portion. 941 921 */ 942 - sub.start = prof->variable + prof->sta_info_len - 1 + 4; 922 + sub->start = prof->variable + prof->sta_info_len - 1 + 4; 943 923 end = (const u8 *)prof + elems->sta_prof_len; 944 - sub.len = end - sub.start; 924 + sub->len = end - sub->start; 945 925 946 - non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, 947 - sub.start, sub.len); 948 - _ieee802_11_parse_elems_full(&sub, elems_parse, non_inherit); 926 + sub->mode = params->mode; 927 + sub->action = params->action; 928 + sub->from_ap = params->from_ap; 929 + sub->link_id = -1; 930 + 931 + return cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, 932 + sub->start, sub->len); 949 933 } 950 934 951 935 static void ··· 997 973 struct ieee802_11_elems * 998 974 ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params) 999 975 { 976 + struct ieee80211_elems_parse_params sub = {}; 1000 977 struct ieee80211_elems_parse *elems_parse; 1001 - struct ieee802_11_elems *elems; 1002 978 const struct element *non_inherit = NULL; 1003 - u8 *nontransmitted_profile; 1004 - int nontransmitted_profile_len = 0; 979 + struct ieee802_11_elems *elems; 1005 980 size_t scratch_len = 3 * params->len; 981 + bool multi_link_inner = false; 1006 982 1007 983 BUILD_BUG_ON(offsetof(typeof(*elems_parse), elems) != 0); 984 + 985 + /* cannot parse for both a specific link and non-transmitted BSS */ 986 + if (WARN_ON(params->link_id >= 0 && params->bss)) 987 + return NULL; 1008 988 1009 989 elems_parse = kzalloc(struct_size(elems_parse, scratch, scratch_len), 1010 990 GFP_ATOMIC); ··· 1026 998 ieee80211_clear_tpe(&elems->tpe); 1027 999 ieee80211_clear_tpe(&elems->csa_tpe); 1028 1000 1029 - nontransmitted_profile = elems_parse->scratch_pos; 1030 - nontransmitted_profile_len = 1031 - ieee802_11_find_bssid_profile(params->start, params->len, 1032 - elems, params->bss, 1033 - nontransmitted_profile); 1034 - elems_parse->scratch_pos += nontransmitted_profile_len; 1035 - non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, 1036 - nontransmitted_profile, 1037 - nontransmitted_profile_len); 1001 + /* 1002 + * If we're looking for a non-transmitted BSS then we cannot at 1003 + * the same time be looking for a second link as the two can only 1004 + * appear in the same frame carrying info for different BSSes. 1005 + * 1006 + * In any case, we only look for one at a time, as encoded by 1007 + * the WARN_ON above. 1008 + */ 1009 + if (params->bss) { 1010 + int nontx_len = 1011 + ieee802_11_find_bssid_profile(params->start, 1012 + params->len, 1013 + elems, params->bss, 1014 + elems_parse->scratch_pos); 1015 + sub.start = elems_parse->scratch_pos; 1016 + sub.mode = params->mode; 1017 + sub.len = nontx_len; 1018 + sub.action = params->action; 1019 + sub.link_id = params->link_id; 1038 1020 1021 + /* consume the space used for non-transmitted profile */ 1022 + elems_parse->scratch_pos += nontx_len; 1023 + 1024 + non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, 1025 + sub.start, nontx_len); 1026 + } else { 1027 + /* must always parse to get elems_parse->ml_basic_elem */ 1028 + non_inherit = ieee80211_prep_mle_link_parse(elems_parse, params, 1029 + &sub); 1030 + multi_link_inner = true; 1031 + } 1032 + 1033 + elems_parse->skip_vendor = 1034 + cfg80211_find_elem(WLAN_EID_VENDOR_SPECIFIC, 1035 + sub.start, sub.len); 1039 1036 elems->crc = _ieee802_11_parse_elems_full(params, elems_parse, 1040 1037 non_inherit); 1041 1038 1042 - /* Override with nontransmitted profile, if found */ 1043 - if (nontransmitted_profile_len) { 1044 - struct ieee80211_elems_parse_params sub = { 1045 - .mode = params->mode, 1046 - .start = nontransmitted_profile, 1047 - .len = nontransmitted_profile_len, 1048 - .action = params->action, 1049 - .link_id = params->link_id, 1050 - }; 1051 - 1039 + /* Override with nontransmitted/per-STA profile if found */ 1040 + if (sub.len) { 1041 + elems_parse->multi_link_inner = multi_link_inner; 1042 + elems_parse->skip_vendor = false; 1052 1043 _ieee802_11_parse_elems_full(&sub, elems_parse, NULL); 1053 1044 } 1054 - 1055 - ieee80211_mle_parse_link(elems_parse, params); 1056 1045 1057 1046 ieee80211_mle_defrag_reconf(elems_parse); 1058 1047
+3 -2
net/mac80211/util.c
··· 687 687 struct ieee80211_sub_if_data *sdata, 688 688 unsigned int queues, bool drop) 689 689 { 690 - if (!local->ops->flush) 690 + if (!local->ops->flush && !drop) 691 691 return; 692 692 693 693 /* ··· 714 714 } 715 715 } 716 716 717 - drv_flush(local, sdata, queues, drop); 717 + if (local->ops->flush) 718 + drv_flush(local, sdata, queues, drop); 718 719 719 720 ieee80211_wake_queues_by_reason(&local->hw, queues, 720 721 IEEE80211_QUEUE_STOP_REASON_FLUSH,
+15 -3
net/mptcp/pm_netlink.c
··· 977 977 978 978 static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet, 979 979 struct mptcp_pm_addr_entry *entry, 980 - bool needs_id) 980 + bool needs_id, bool replace) 981 981 { 982 982 struct mptcp_pm_addr_entry *cur, *del_entry = NULL; 983 983 unsigned int addr_max; ··· 1016 1016 } 1017 1017 if (entry->addr.id) 1018 1018 goto out; 1019 + 1020 + /* allow callers that only need to look up the local 1021 + * addr's id to skip replacement. This allows them to 1022 + * avoid calling synchronize_rcu in the packet recv 1023 + * path. 1024 + */ 1025 + if (!replace) { 1026 + kfree(entry); 1027 + ret = cur->addr.id; 1028 + goto out; 1029 + } 1019 1030 1020 1031 pernet->addrs--; 1021 1032 entry->addr.id = cur->addr.id; ··· 1176 1165 entry->ifindex = 0; 1177 1166 entry->flags = MPTCP_PM_ADDR_FLAG_IMPLICIT; 1178 1167 entry->lsk = NULL; 1179 - ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true); 1168 + ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true, false); 1180 1169 if (ret < 0) 1181 1170 kfree(entry); 1182 1171 ··· 1444 1433 } 1445 1434 } 1446 1435 ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, 1447 - !mptcp_pm_has_addr_attr_id(attr, info)); 1436 + !mptcp_pm_has_addr_attr_id(attr, info), 1437 + true); 1448 1438 if (ret < 0) { 1449 1439 GENL_SET_ERR_MSG_FMT(info, "too many addresses or duplicate one: %d", ret); 1450 1440 goto out_free;
+6 -1
net/wireless/nl80211.c
··· 4220 4220 if (flags[flag]) 4221 4221 *mntrflags |= (1<<flag); 4222 4222 4223 + /* cooked monitor mode is incompatible with other modes */ 4224 + if (*mntrflags & MONITOR_FLAG_COOK_FRAMES && 4225 + *mntrflags != MONITOR_FLAG_COOK_FRAMES) 4226 + return -EOPNOTSUPP; 4227 + 4223 4228 *mntrflags |= MONITOR_FLAG_CHANGED; 4224 4229 4225 4230 return 0; ··· 16534 16529 goto out; 16535 16530 } 16536 16531 16537 - err = cfg80211_assoc_ml_reconf(rdev, dev, links, rem_links); 16532 + err = -EOPNOTSUPP; 16538 16533 16539 16534 out: 16540 16535 for (link_id = 0; link_id < ARRAY_SIZE(links); link_id++)
+2 -1
net/wireless/reg.c
··· 407 407 { 408 408 if (!alpha2) 409 409 return false; 410 - return isalpha(alpha2[0]) && isalpha(alpha2[1]); 410 + return isascii(alpha2[0]) && isalpha(alpha2[0]) && 411 + isascii(alpha2[1]) && isalpha(alpha2[1]); 411 412 } 412 413 413 414 static bool alpha2_equal(const char *alpha2_x, const char *alpha2_y)