Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Trivial conflict in CAN on file rename.

Conflicts:
drivers/net/can/m_can/tcan4x5x-core.c

Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+1305 -479
+4 -2
Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
··· 19 19 properties: 20 20 compatible: 21 21 enum: 22 - - nxp,pf8x00 22 + - nxp,pf8100 23 + - nxp,pf8121a 24 + - nxp,pf8200 23 25 24 26 reg: 25 27 maxItems: 1 ··· 120 118 #size-cells = <0>; 121 119 122 120 pmic@8 { 123 - compatible = "nxp,pf8x00"; 121 + compatible = "nxp,pf8100"; 124 122 reg = <0x08>; 125 123 126 124 regulators {
+1
Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt
··· 44 44 Definition: Must be one of below: 45 45 "qcom,pm8005-rpmh-regulators" 46 46 "qcom,pm8009-rpmh-regulators" 47 + "qcom,pm8009-1-rpmh-regulators" 47 48 "qcom,pm8150-rpmh-regulators" 48 49 "qcom,pm8150l-rpmh-regulators" 49 50 "qcom,pm8350-rpmh-regulators"
+36 -26
Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
··· 164 164 165 165 NPA Reporters 166 166 ------------- 167 - The NPA reporters are responsible for reporting and recovering the following group of errors 167 + The NPA reporters are responsible for reporting and recovering the following group of errors: 168 + 168 169 1. GENERAL events 170 + 169 171 - Error due to operation of unmapped PF. 170 172 - Error due to disabled alloc/free for other HW blocks (NIX, SSO, TIM, DPI and AURA). 173 + 171 174 2. ERROR events 175 + 172 176 - Fault due to NPA_AQ_INST_S read or NPA_AQ_RES_S write. 173 177 - AQ Doorbell Error. 178 + 174 179 3. RAS events 180 + 175 181 - RAS Error Reporting for NPA_AQ_INST_S/NPA_AQ_RES_S. 182 + 176 183 4. RVU events 184 + 177 185 - Error due to unmapped slot. 178 186 179 - Sample Output 180 - ------------- 181 - ~# devlink health 182 - pci/0002:01:00.0: 183 - reporter hw_npa_intr 184 - state healthy error 2872 recover 2872 last_dump_date 2020-12-10 last_dump_time 09:39:09 grace_period 0 auto_recover true auto_dump true 185 - reporter hw_npa_gen 186 - state healthy error 2872 recover 2872 last_dump_date 2020-12-11 last_dump_time 04:43:04 grace_period 0 auto_recover true auto_dump true 187 - reporter hw_npa_err 188 - state healthy error 2871 recover 2871 last_dump_date 2020-12-10 last_dump_time 09:39:17 grace_period 0 auto_recover true auto_dump true 189 - reporter hw_npa_ras 190 - state healthy error 0 recover 0 last_dump_date 2020-12-10 last_dump_time 09:32:40 grace_period 0 auto_recover true auto_dump true 187 + Sample Output:: 188 + 189 + ~# devlink health 190 + pci/0002:01:00.0: 191 + reporter hw_npa_intr 192 + state healthy error 2872 recover 2872 last_dump_date 2020-12-10 last_dump_time 09:39:09 grace_period 0 auto_recover true auto_dump true 193 + reporter hw_npa_gen 194 + state healthy error 2872 recover 2872 last_dump_date 2020-12-11 last_dump_time 04:43:04 grace_period 0 auto_recover true auto_dump true 195 + reporter hw_npa_err 196 + state healthy error 2871 recover 2871 last_dump_date 2020-12-10 last_dump_time 09:39:17 grace_period 0 auto_recover true auto_dump true 197 + reporter hw_npa_ras 198 + state healthy error 0 recover 0 last_dump_date 2020-12-10 last_dump_time 09:32:40 grace_period 0 auto_recover true auto_dump true 191 199 192 200 Each reporter dumps the 201 + 193 202 - Error Type 194 203 - Error Register value 195 204 - Reason in words 196 205 197 - For eg: 198 - ~# devlink health dump show pci/0002:01:00.0 reporter hw_npa_gen 199 - NPA_AF_GENERAL: 200 - NPA General Interrupt Reg : 1 201 - NIX0: free disabled RX 202 - ~# devlink health dump show pci/0002:01:00.0 reporter hw_npa_intr 203 - NPA_AF_RVU: 204 - NPA RVU Interrupt Reg : 1 205 - Unmap Slot Error 206 - ~# devlink health dump show pci/0002:01:00.0 reporter hw_npa_err 207 - NPA_AF_ERR: 208 - NPA Error Interrupt Reg : 4096 209 - AQ Doorbell Error 206 + For example:: 207 + 208 + ~# devlink health dump show pci/0002:01:00.0 reporter hw_npa_gen 209 + NPA_AF_GENERAL: 210 + NPA General Interrupt Reg : 1 211 + NIX0: free disabled RX 212 + ~# devlink health dump show pci/0002:01:00.0 reporter hw_npa_intr 213 + NPA_AF_RVU: 214 + NPA RVU Interrupt Reg : 1 215 + Unmap Slot Error 216 + ~# devlink health dump show pci/0002:01:00.0 reporter hw_npa_err 217 + NPA_AF_ERR: 218 + NPA Error Interrupt Reg : 4096 219 + AQ Doorbell Error
+2 -2
Documentation/networking/netdevices.rst
··· 64 64 Context: process 65 65 66 66 ndo_get_stats: 67 - Synchronization: dev_base_lock rwlock. 68 - Context: nominally process, but don't sleep inside an rwlock 67 + Synchronization: rtnl_lock() semaphore, dev_base_lock rwlock, or RCU. 68 + Context: atomic (can't sleep under rwlock or RCU) 69 69 70 70 ndo_start_xmit: 71 71 Synchronization: __netif_tx_lock spinlock.
+1 -1
MAINTAINERS
··· 10848 10848 10849 10849 MCAN MMIO DEVICE DRIVER 10850 10850 M: Dan Murphy <dmurphy@ti.com> 10851 - M: Sriram Dash <sriram.dash@samsung.com> 10851 + M: Pankaj Sharma <pankj.sharma@samsung.com> 10852 10852 L: linux-can@vger.kernel.org 10853 10853 S: Maintained 10854 10854 F: Documentation/devicetree/bindings/net/can/bosch,m_can.yaml
+1
arch/arm/crypto/chacha-glue.c
··· 60 60 chacha_block_xor_neon(state, d, s, nrounds); 61 61 if (d != dst) 62 62 memcpy(dst, buf, bytes); 63 + state[12]++; 63 64 } 64 65 } 65 66
+2 -1
crypto/ecdh.c
··· 39 39 struct ecdh params; 40 40 unsigned int ndigits; 41 41 42 - if (crypto_ecdh_decode_key(buf, len, &params) < 0) 42 + if (crypto_ecdh_decode_key(buf, len, &params) < 0 || 43 + params.key_size > sizeof(ctx->private_key)) 43 44 return -EINVAL; 44 45 45 46 ndigits = ecdh_supported_curve(params.curve_id);
+7 -2
drivers/base/regmap/regmap-debugfs.c
··· 582 582 devname = dev_name(map->dev); 583 583 584 584 if (name) { 585 - map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", 585 + if (!map->debugfs_name) { 586 + map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", 586 587 devname, name); 588 + if (!map->debugfs_name) 589 + return; 590 + } 587 591 name = map->debugfs_name; 588 592 } else { 589 593 name = devname; ··· 595 591 596 592 if (!strcmp(name, "dummy")) { 597 593 kfree(map->debugfs_name); 598 - 599 594 map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d", 600 595 dummy_index); 596 + if (!map->debugfs_name) 597 + return; 601 598 name = map->debugfs_name; 602 599 dummy_index++; 603 600 }
+1
drivers/isdn/mISDN/Kconfig
··· 13 13 config MISDN_DSP 14 14 tristate "Digital Audio Processing of transparent data" 15 15 depends on MISDN 16 + select BITREVERSE 16 17 help 17 18 Enable support for digital audio processing capability. 18 19
+14 -8
drivers/net/bareudp.c
··· 645 645 return 0; 646 646 } 647 647 648 + static void bareudp_dellink(struct net_device *dev, struct list_head *head) 649 + { 650 + struct bareudp_dev *bareudp = netdev_priv(dev); 651 + 652 + list_del(&bareudp->next); 653 + unregister_netdevice_queue(dev, head); 654 + } 655 + 648 656 static int bareudp_newlink(struct net *net, struct net_device *dev, 649 657 struct nlattr *tb[], struct nlattr *data[], 650 658 struct netlink_ext_ack *extack) 651 659 { 652 660 struct bareudp_conf conf; 661 + LIST_HEAD(list_kill); 653 662 int err; 654 663 655 664 err = bareudp2info(data, &conf, extack); ··· 671 662 672 663 err = bareudp_link_config(dev, tb); 673 664 if (err) 674 - return err; 665 + goto err_unconfig; 675 666 676 667 return 0; 677 - } 678 668 679 - static void bareudp_dellink(struct net_device *dev, struct list_head *head) 680 - { 681 - struct bareudp_dev *bareudp = netdev_priv(dev); 682 - 683 - list_del(&bareudp->next); 684 - unregister_netdevice_queue(dev, head); 669 + err_unconfig: 670 + bareudp_dellink(dev, &list_kill); 671 + unregister_netdevice_many(&list_kill); 672 + return err; 685 673 } 686 674 687 675 static size_t bareudp_get_size(const struct net_device *dev)
+1
drivers/net/can/Kconfig
··· 123 123 config CAN_KVASER_PCIEFD 124 124 depends on PCI 125 125 tristate "Kvaser PCIe FD cards" 126 + select CRC32 126 127 help 127 128 This is a driver for the Kvaser PCI Express CAN FD family. 128 129
-2
drivers/net/can/m_can/m_can.c
··· 1852 1852 void m_can_class_unregister(struct m_can_classdev *cdev) 1853 1853 { 1854 1854 unregister_candev(cdev->net); 1855 - 1856 - m_can_clk_stop(cdev); 1857 1855 } 1858 1856 EXPORT_SYMBOL_GPL(m_can_class_unregister); 1859 1857
-26
drivers/net/can/m_can/tcan4x5x-core.c
··· 108 108 109 109 } 110 110 111 - static struct can_bittiming_const tcan4x5x_bittiming_const = { 112 - .name = KBUILD_MODNAME, 113 - .tseg1_min = 2, 114 - .tseg1_max = 31, 115 - .tseg2_min = 2, 116 - .tseg2_max = 16, 117 - .sjw_max = 16, 118 - .brp_min = 1, 119 - .brp_max = 32, 120 - .brp_inc = 1, 121 - }; 122 - 123 - static struct can_bittiming_const tcan4x5x_data_bittiming_const = { 124 - .name = KBUILD_MODNAME, 125 - .tseg1_min = 1, 126 - .tseg1_max = 32, 127 - .tseg2_min = 1, 128 - .tseg2_max = 16, 129 - .sjw_max = 16, 130 - .brp_min = 1, 131 - .brp_max = 32, 132 - .brp_inc = 1, 133 - }; 134 - 135 111 static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv) 136 112 { 137 113 int wake_state = 0; ··· 349 373 mcan_class->dev = &spi->dev; 350 374 mcan_class->ops = &tcan4x5x_ops; 351 375 mcan_class->is_peripheral = true; 352 - mcan_class->bit_timing = &tcan4x5x_bittiming_const; 353 - mcan_class->data_timing = &tcan4x5x_data_bittiming_const; 354 376 mcan_class->net->irq = spi->irq; 355 377 356 378 spi_set_drvdata(spi, priv);
+2 -2
drivers/net/can/rcar/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 config CAN_RCAR 3 - tristate "Renesas R-Car CAN controller" 3 + tristate "Renesas R-Car and RZ/G CAN controller" 4 4 depends on ARCH_RENESAS || ARM 5 5 help 6 6 Say Y here if you want to use CAN controller found on Renesas R-Car 7 - SoCs. 7 + or RZ/G SoCs. 8 8 9 9 To compile this driver as a module, choose M here: the module will 10 10 be called rcar_can.
+8 -9
drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
··· 1368 1368 struct mcp251xfd_tx_ring *tx_ring = priv->tx; 1369 1369 struct spi_transfer *last_xfer; 1370 1370 1371 - tx_ring->tail += len; 1372 - 1373 1371 /* Increment the TEF FIFO tail pointer 'len' times in 1374 1372 * a single SPI message. 1375 - */ 1376 - 1377 - /* Note: 1373 + * 1374 + * Note: 1378 1375 * 1379 1376 * "cs_change == 1" on the last transfer results in an 1380 1377 * active chip select after the complete SPI ··· 1387 1390 last_xfer->cs_change = 1; 1388 1391 if (err) 1389 1392 return err; 1393 + 1394 + tx_ring->tail += len; 1390 1395 1391 1396 err = mcp251xfd_check_tef_tail(priv); 1392 1397 if (err) ··· 1552 1553 1553 1554 /* Increment the RX FIFO tail pointer 'len' times in a 1554 1555 * single SPI message. 1555 - */ 1556 - ring->tail += len; 1557 - 1558 - /* Note: 1556 + * 1557 + * Note: 1559 1558 * 1560 1559 * "cs_change == 1" on the last transfer results in an 1561 1560 * active chip select after the complete SPI ··· 1569 1572 last_xfer->cs_change = 1; 1570 1573 if (err) 1571 1574 return err; 1575 + 1576 + ring->tail += len; 1572 1577 } 1573 1578 1574 1579 return 0;
+1
drivers/net/dsa/hirschmann/Kconfig
··· 4 4 depends on HAS_IOMEM 5 5 depends on NET_DSA 6 6 depends on PTP_1588_CLOCK 7 + depends on LEDS_CLASS 7 8 select NET_DSA_TAG_HELLCREEK 8 9 help 9 10 This driver adds support for Hirschmann Hellcreek TSN switches.
+4 -3
drivers/net/dsa/lantiq_gswip.c
··· 1436 1436 phylink_set(mask, Pause); 1437 1437 phylink_set(mask, Asym_Pause); 1438 1438 1439 - /* With the exclusion of MII and Reverse MII, we support Gigabit, 1440 - * including Half duplex 1439 + /* With the exclusion of MII, Reverse MII and Reduced MII, we 1440 + * support Gigabit, including Half duplex 1441 1441 */ 1442 1442 if (state->interface != PHY_INTERFACE_MODE_MII && 1443 - state->interface != PHY_INTERFACE_MODE_REVMII) { 1443 + state->interface != PHY_INTERFACE_MODE_REVMII && 1444 + state->interface != PHY_INTERFACE_MODE_RMII) { 1444 1445 phylink_set(mask, 1000baseT_Full); 1445 1446 phylink_set(mask, 1000baseT_Half); 1446 1447 }
+24 -47
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
··· 621 621 622 622 while (!skb_queue_empty(&listen_ctx->synq)) { 623 623 struct chtls_sock *csk = 624 - container_of((struct synq *)__skb_dequeue 624 + container_of((struct synq *)skb_peek 625 625 (&listen_ctx->synq), struct chtls_sock, synq); 626 626 struct sock *child = csk->sk; 627 627 ··· 1109 1109 const struct cpl_pass_accept_req *req, 1110 1110 struct chtls_dev *cdev) 1111 1111 { 1112 + struct adapter *adap = pci_get_drvdata(cdev->pdev); 1112 1113 struct neighbour *n = NULL; 1113 1114 struct inet_sock *newinet; 1114 1115 const struct iphdr *iph; ··· 1119 1118 struct dst_entry *dst; 1120 1119 struct tcp_sock *tp; 1121 1120 struct sock *newsk; 1121 + bool found = false; 1122 1122 u16 port_id; 1123 1123 int rxq_idx; 1124 - int step; 1124 + int step, i; 1125 1125 1126 1126 iph = (const struct iphdr *)network_hdr; 1127 1127 newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb); ··· 1154 1152 n = dst_neigh_lookup(dst, &ip6h->saddr); 1155 1153 #endif 1156 1154 } 1157 - if (!n) 1155 + if (!n || !n->dev) 1158 1156 goto free_sk; 1159 1157 1160 1158 ndev = n->dev; ··· 1162 1160 goto free_dst; 1163 1161 if (is_vlan_dev(ndev)) 1164 1162 ndev = vlan_dev_real_dev(ndev); 1163 + 1164 + for_each_port(adap, i) 1165 + if (cdev->ports[i] == ndev) 1166 + found = true; 1167 + 1168 + if (!found) 1169 + goto free_dst; 1165 1170 1166 1171 port_id = cxgb4_port_idx(ndev); 1167 1172 ··· 1247 1238 free_csk: 1248 1239 chtls_sock_release(&csk->kref); 1249 1240 free_dst: 1241 + neigh_release(n); 1250 1242 dst_release(dst); 1251 1243 free_sk: 1252 1244 inet_csk_prepare_forced_close(newsk); ··· 1397 1387 1398 1388 newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev); 1399 1389 if (!newsk) 1400 - goto free_oreq; 1390 + goto reject; 1401 1391 1402 1392 if (chtls_get_module(newsk)) 1403 1393 goto reject; ··· 1413 1403 kfree_skb(skb); 1414 1404 return; 1415 1405 1416 - free_oreq: 1417 - chtls_reqsk_free(oreq); 1418 1406 reject: 1419 1407 mk_tid_release(reply_skb, 0, tid); 1420 1408 cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb); ··· 1597 1589 sk_wake_async(sk, 0, POLL_OUT); 1598 1590 1599 1591 data = lookup_stid(cdev->tids, stid); 1592 + if (!data) { 1593 + /* listening server close */ 1594 + kfree_skb(skb); 1595 + goto unlock; 1596 + } 1600 1597 lsk = ((struct listen_ctx *)data)->lsk; 1601 1598 1602 1599 bh_lock_sock(lsk); ··· 2010 1997 spin_unlock_bh(&cdev->deferq.lock); 2011 1998 } 2012 1999 2013 - static void send_abort_rpl(struct sock *sk, struct sk_buff *skb, 2014 - struct chtls_dev *cdev, int status, int queue) 2015 - { 2016 - struct cpl_abort_req_rss *req = cplhdr(skb); 2017 - struct sk_buff *reply_skb; 2018 - struct chtls_sock *csk; 2019 - 2020 - csk = rcu_dereference_sk_user_data(sk); 2021 - 2022 - reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl), 2023 - GFP_KERNEL); 2024 - 2025 - if (!reply_skb) { 2026 - req->status = (queue << 1); 2027 - t4_defer_reply(skb, cdev, send_defer_abort_rpl); 2028 - return; 2029 - } 2030 - 2031 - set_abort_rpl_wr(reply_skb, GET_TID(req), status); 2032 - kfree_skb(skb); 2033 - 2034 - set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue); 2035 - if (csk_conn_inline(csk)) { 2036 - struct l2t_entry *e = csk->l2t_entry; 2037 - 2038 - if (e && sk->sk_state != TCP_SYN_RECV) { 2039 - cxgb4_l2t_send(csk->egress_dev, reply_skb, e); 2040 - return; 2041 - } 2042 - } 2043 - cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb); 2044 - } 2045 - 2046 2000 static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb, 2047 2001 struct chtls_dev *cdev, 2048 2002 int status, int queue) ··· 2058 2078 queue = csk->txq_idx; 2059 2079 2060 2080 skb->sk = NULL; 2081 + chtls_send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev, 2082 + CPL_ABORT_NO_RST, queue); 2061 2083 do_abort_syn_rcv(child, lsk); 2062 - send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev, 2063 - CPL_ABORT_NO_RST, queue); 2064 2084 } 2065 2085 2066 2086 static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb) ··· 2090 2110 if (!sock_owned_by_user(psk)) { 2091 2111 int queue = csk->txq_idx; 2092 2112 2113 + chtls_send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue); 2093 2114 do_abort_syn_rcv(sk, psk); 2094 - send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue); 2095 2115 } else { 2096 2116 skb->sk = sk; 2097 2117 BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv; ··· 2109 2129 int queue = csk->txq_idx; 2110 2130 2111 2131 if (is_neg_adv(req->status)) { 2112 - if (sk->sk_state == TCP_SYN_RECV) 2113 - chtls_set_tcb_tflag(sk, 0, 0); 2114 - 2115 2132 kfree_skb(skb); 2116 2133 return; 2117 2134 } ··· 2135 2158 if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb)) 2136 2159 return; 2137 2160 2138 - chtls_release_resources(sk); 2139 - chtls_conn_done(sk); 2140 2161 } 2141 2162 2142 2163 chtls_send_abort_rpl(sk, skb, BLOG_SKB_CB(skb)->cdev, 2143 2164 rst_status, queue); 2165 + chtls_release_resources(sk); 2166 + chtls_conn_done(sk); 2144 2167 } 2145 2168 2146 2169 static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
+1
drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
··· 223 223 }; 224 224 225 225 module_platform_driver(fs_enet_bb_mdio_driver); 226 + MODULE_LICENSE("GPL");
+1
drivers/net/ethernet/freescale/fs_enet/mii-fec.c
··· 224 224 }; 225 225 226 226 module_platform_driver(fs_enet_fec_mdio_driver); 227 + MODULE_LICENSE("GPL");
+2 -2
drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
··· 169 169 #define hclge_mbx_ring_ptr_move_crq(crq) \ 170 170 (crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num) 171 171 #define hclge_mbx_tail_ptr_move_arq(arq) \ 172 - (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE) 172 + (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM) 173 173 #define hclge_mbx_head_ptr_move_arq(arq) \ 174 - (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE) 174 + (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM) 175 175 #endif
+6 -3
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
··· 752 752 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; 753 753 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; 754 754 755 - if (hdev->hw.mac.phydev) { 755 + if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv && 756 + hdev->hw.mac.phydev->drv->set_loopback) { 756 757 count += 1; 757 758 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK; 758 759 } ··· 4538 4537 req->ipv4_sctp_en = tuple_sets; 4539 4538 break; 4540 4539 case SCTP_V6_FLOW: 4541 - if ((nfc->data & RXH_L4_B_0_1) || 4542 - (nfc->data & RXH_L4_B_2_3)) 4540 + if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && 4541 + (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))) 4543 4542 return -EINVAL; 4544 4543 4545 4544 req->ipv6_sctp_en = tuple_sets; ··· 4731 4730 vport[i].rss_tuple_sets.ipv6_udp_en = 4732 4731 HCLGE_RSS_INPUT_TUPLE_OTHER; 4733 4732 vport[i].rss_tuple_sets.ipv6_sctp_en = 4733 + hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ? 4734 + HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT : 4734 4735 HCLGE_RSS_INPUT_TUPLE_SCTP; 4735 4736 vport[i].rss_tuple_sets.ipv6_fragment_en = 4736 4737 HCLGE_RSS_INPUT_TUPLE_OTHER;
+2
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
··· 107 107 #define HCLGE_D_IP_BIT BIT(2) 108 108 #define HCLGE_S_IP_BIT BIT(3) 109 109 #define HCLGE_V_TAG_BIT BIT(4) 110 + #define HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT \ 111 + (HCLGE_D_IP_BIT | HCLGE_S_IP_BIT | HCLGE_V_TAG_BIT) 110 112 111 113 #define HCLGE_RSS_TC_SIZE_0 1 112 114 #define HCLGE_RSS_TC_SIZE_1 2
+6 -3
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
··· 917 917 req->ipv4_sctp_en = tuple_sets; 918 918 break; 919 919 case SCTP_V6_FLOW: 920 - if ((nfc->data & RXH_L4_B_0_1) || 921 - (nfc->data & RXH_L4_B_2_3)) 920 + if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && 921 + (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3))) 922 922 return -EINVAL; 923 923 924 924 req->ipv6_sctp_en = tuple_sets; ··· 2502 2502 tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2503 2503 tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2504 2504 tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2505 - tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2505 + tuple_sets->ipv6_sctp_en = 2506 + hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ? 2507 + HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT : 2508 + HCLGEVF_RSS_INPUT_TUPLE_SCTP; 2506 2509 tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; 2507 2510 } 2508 2511
+2
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
··· 122 122 #define HCLGEVF_D_IP_BIT BIT(2) 123 123 #define HCLGEVF_S_IP_BIT BIT(3) 124 124 #define HCLGEVF_V_TAG_BIT BIT(4) 125 + #define HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT \ 126 + (HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT) 125 127 126 128 #define HCLGEVF_STATS_TIMER_INTERVAL 36U 127 129
+1 -1
drivers/net/ethernet/marvell/mvneta.c
··· 4432 4432 struct bpf_prog *old_prog; 4433 4433 4434 4434 if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) { 4435 - NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP"); 4435 + NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP"); 4436 4436 return -EOPNOTSUPP; 4437 4437 } 4438 4438
+11 -3
drivers/net/ethernet/marvell/octeontx2/af/cgx.c
··· 871 871 if (!lmac) 872 872 return -ENOMEM; 873 873 lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL); 874 - if (!lmac->name) 875 - return -ENOMEM; 874 + if (!lmac->name) { 875 + err = -ENOMEM; 876 + goto err_lmac_free; 877 + } 876 878 sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i); 877 879 lmac->lmac_id = i; 878 880 lmac->cgx = cgx; ··· 885 883 CGX_LMAC_FWI + i * 9), 886 884 cgx_fwi_event_handler, 0, lmac->name, lmac); 887 885 if (err) 888 - return err; 886 + goto err_irq; 889 887 890 888 /* Enable interrupt */ 891 889 cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S, ··· 897 895 } 898 896 899 897 return cgx_lmac_verify_fwi_version(cgx); 898 + 899 + err_irq: 900 + kfree(lmac->name); 901 + err_lmac_free: 902 + kfree(lmac); 903 + return err; 900 904 } 901 905 902 906 static int cgx_lmac_exit(struct cgx *cgx)
+5
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
··· 626 626 if (!reg_c0) 627 627 return true; 628 628 629 + /* If reg_c0 is not equal to the default flow tag then skb->mark 630 + * is not supported and must be reset back to 0. 631 + */ 632 + skb->mark = 0; 633 + 629 634 priv = netdev_priv(skb->dev); 630 635 esw = priv->mdev->priv.eswitch; 631 636
+49 -28
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
··· 118 118 u16 zone; 119 119 }; 120 120 121 - struct mlx5_ct_shared_counter { 121 + struct mlx5_ct_counter { 122 122 struct mlx5_fc *counter; 123 123 refcount_t refcount; 124 + bool is_shared; 124 125 }; 125 126 126 127 struct mlx5_ct_entry { 127 128 struct rhash_head node; 128 129 struct rhash_head tuple_node; 129 130 struct rhash_head tuple_nat_node; 130 - struct mlx5_ct_shared_counter *shared_counter; 131 + struct mlx5_ct_counter *counter; 131 132 unsigned long cookie; 132 133 unsigned long restore_cookie; 133 134 struct mlx5_ct_tuple tuple; ··· 395 394 } 396 395 397 396 static void 398 - mlx5_tc_ct_shared_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry) 397 + mlx5_tc_ct_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry) 399 398 { 400 - if (!refcount_dec_and_test(&entry->shared_counter->refcount)) 399 + if (entry->counter->is_shared && 400 + !refcount_dec_and_test(&entry->counter->refcount)) 401 401 return; 402 402 403 - mlx5_fc_destroy(ct_priv->dev, entry->shared_counter->counter); 404 - kfree(entry->shared_counter); 403 + mlx5_fc_destroy(ct_priv->dev, entry->counter->counter); 404 + kfree(entry->counter); 405 405 } 406 406 407 407 static void ··· 701 699 attr->dest_ft = ct_priv->post_ct; 702 700 attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct; 703 701 attr->outer_match_level = MLX5_MATCH_L4; 704 - attr->counter = entry->shared_counter->counter; 702 + attr->counter = entry->counter->counter; 705 703 attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT; 706 704 707 705 mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule); ··· 734 732 return err; 735 733 } 736 734 737 - static struct mlx5_ct_shared_counter * 735 + static struct mlx5_ct_counter * 736 + mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv) 737 + { 738 + struct mlx5_ct_counter *counter; 739 + int ret; 740 + 741 + counter = kzalloc(sizeof(*counter), GFP_KERNEL); 742 + if (!counter) 743 + return ERR_PTR(-ENOMEM); 744 + 745 + counter->is_shared = false; 746 + counter->counter = mlx5_fc_create(ct_priv->dev, true); 747 + if (IS_ERR(counter->counter)) { 748 + ct_dbg("Failed to create counter for ct entry"); 749 + ret = PTR_ERR(counter->counter); 750 + kfree(counter); 751 + return ERR_PTR(ret); 752 + } 753 + 754 + return counter; 755 + } 756 + 757 + static struct mlx5_ct_counter * 738 758 mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv, 739 759 struct mlx5_ct_entry *entry) 740 760 { 741 761 struct mlx5_ct_tuple rev_tuple = entry->tuple; 742 - struct mlx5_ct_shared_counter *shared_counter; 743 - struct mlx5_core_dev *dev = ct_priv->dev; 762 + struct mlx5_ct_counter *shared_counter; 744 763 struct mlx5_ct_entry *rev_entry; 745 764 __be16 tmp_port; 746 765 int ret; ··· 790 767 rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple, 791 768 tuples_ht_params); 792 769 if (rev_entry) { 793 - if (refcount_inc_not_zero(&rev_entry->shared_counter->refcount)) { 770 + if (refcount_inc_not_zero(&rev_entry->counter->refcount)) { 794 771 mutex_unlock(&ct_priv->shared_counter_lock); 795 - return rev_entry->shared_counter; 772 + return rev_entry->counter; 796 773 } 797 774 } 798 775 mutex_unlock(&ct_priv->shared_counter_lock); 799 776 800 - shared_counter = kzalloc(sizeof(*shared_counter), GFP_KERNEL); 801 - if (!shared_counter) 802 - return ERR_PTR(-ENOMEM); 803 - 804 - shared_counter->counter = mlx5_fc_create(dev, true); 805 - if (IS_ERR(shared_counter->counter)) { 806 - ct_dbg("Failed to create counter for ct entry"); 807 - ret = PTR_ERR(shared_counter->counter); 808 - kfree(shared_counter); 777 + shared_counter = mlx5_tc_ct_counter_create(ct_priv); 778 + if (IS_ERR(shared_counter)) { 779 + ret = PTR_ERR(shared_counter); 809 780 return ERR_PTR(ret); 810 781 } 811 782 783 + shared_counter->is_shared = true; 812 784 refcount_set(&shared_counter->refcount, 1); 813 785 return shared_counter; 814 786 } ··· 816 798 { 817 799 int err; 818 800 819 - entry->shared_counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry); 820 - if (IS_ERR(entry->shared_counter)) { 821 - err = PTR_ERR(entry->shared_counter); 822 - ct_dbg("Failed to create counter for ct entry"); 801 + if (nf_ct_acct_enabled(dev_net(ct_priv->netdev))) 802 + entry->counter = mlx5_tc_ct_counter_create(ct_priv); 803 + else 804 + entry->counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry); 805 + 806 + if (IS_ERR(entry->counter)) { 807 + err = PTR_ERR(entry->counter); 823 808 return err; 824 809 } 825 810 ··· 841 820 err_nat: 842 821 mlx5_tc_ct_entry_del_rule(ct_priv, entry, false); 843 822 err_orig: 844 - mlx5_tc_ct_shared_counter_put(ct_priv, entry); 823 + mlx5_tc_ct_counter_put(ct_priv, entry); 845 824 return err; 846 825 } 847 826 ··· 939 918 rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node, 940 919 tuples_ht_params); 941 920 mutex_unlock(&ct_priv->shared_counter_lock); 942 - mlx5_tc_ct_shared_counter_put(ct_priv, entry); 921 + mlx5_tc_ct_counter_put(ct_priv, entry); 943 922 944 923 } 945 924 ··· 977 956 if (!entry) 978 957 return -ENOENT; 979 958 980 - mlx5_fc_query_cached(entry->shared_counter->counter, &bytes, &packets, &lastuse); 959 + mlx5_fc_query_cached(entry->counter->counter, &bytes, &packets, &lastuse); 981 960 flow_stats_update(&f->stats, bytes, packets, 0, lastuse, 982 961 FLOW_ACTION_HW_STATS_DELAYED); 983 962
+9
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
··· 371 371 u8 tun_l4_proto; 372 372 }; 373 373 374 + static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg) 375 + { 376 + /* SWP offsets are in 2-bytes words */ 377 + eseg->swp_outer_l3_offset += VLAN_HLEN / 2; 378 + eseg->swp_outer_l4_offset += VLAN_HLEN / 2; 379 + eseg->swp_inner_l3_offset += VLAN_HLEN / 2; 380 + eseg->swp_inner_l4_offset += VLAN_HLEN / 2; 381 + } 382 + 374 383 static inline void 375 384 mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, 376 385 struct mlx5e_swp_spec *swp_spec)
+5 -3
drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
··· 51 51 } 52 52 53 53 static inline void 54 - mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg) 54 + mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ihs) 55 55 { 56 56 struct mlx5e_swp_spec swp_spec = {}; 57 57 unsigned int offset = 0; ··· 85 85 } 86 86 87 87 mlx5e_set_eseg_swp(skb, eseg, &swp_spec); 88 + if (skb_vlan_tag_present(skb) && ihs) 89 + mlx5e_eseg_swp_offsets_add_vlan(eseg); 88 90 } 89 91 90 92 #else ··· 165 163 166 164 static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv, 167 165 struct sk_buff *skb, 168 - struct mlx5_wqe_eth_seg *eseg) 166 + struct mlx5_wqe_eth_seg *eseg, u16 ihs) 169 167 { 170 168 #ifdef CONFIG_MLX5_EN_IPSEC 171 169 if (xfrm_offload(skb)) ··· 174 172 175 173 #if IS_ENABLED(CONFIG_GENEVE) 176 174 if (skb->encapsulation) 177 - mlx5e_tx_tunnel_accel(skb, eseg); 175 + mlx5e_tx_tunnel_accel(skb, eseg, ihs); 178 176 #endif 179 177 180 178 return true;
+18 -6
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 1010 1010 return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings); 1011 1011 } 1012 1012 1013 + static int mlx5e_speed_validate(struct net_device *netdev, bool ext, 1014 + const unsigned long link_modes, u8 autoneg) 1015 + { 1016 + /* Extended link-mode has no speed limitations. */ 1017 + if (ext) 1018 + return 0; 1019 + 1020 + if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) && 1021 + autoneg != AUTONEG_ENABLE) { 1022 + netdev_err(netdev, "%s: 56G link speed requires autoneg enabled\n", 1023 + __func__); 1024 + return -EINVAL; 1025 + } 1026 + return 0; 1027 + } 1028 + 1013 1029 static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes) 1014 1030 { 1015 1031 u32 i, ptys_modes = 0; ··· 1119 1103 link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) : 1120 1104 mlx5e_port_speed2linkmodes(mdev, speed, !ext); 1121 1105 1122 - if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) && 1123 - autoneg != AUTONEG_ENABLE) { 1124 - netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n", 1125 - __func__); 1126 - err = -EINVAL; 1106 + err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg); 1107 + if (err) 1127 1108 goto out; 1128 - } 1129 1109 1130 1110 link_modes = link_modes & eproto.cap; 1131 1111 if (!link_modes) {
+3
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
··· 942 942 in = kvzalloc(inlen, GFP_KERNEL); 943 943 if (!in) { 944 944 kfree(ft->g); 945 + ft->g = NULL; 945 946 return -ENOMEM; 946 947 } 947 948 ··· 1088 1087 in = kvzalloc(inlen, GFP_KERNEL); 1089 1088 if (!in) { 1090 1089 kfree(ft->g); 1090 + ft->g = NULL; 1091 1091 return -ENOMEM; 1092 1092 } 1093 1093 ··· 1392 1390 ft->g[ft->num_groups] = NULL; 1393 1391 mlx5e_destroy_groups(ft); 1394 1392 kvfree(in); 1393 + kfree(ft->g); 1395 1394 1396 1395 return err; 1397 1396 }
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 3161 3161 3162 3162 mlx5_set_port_admin_status(mdev, state); 3163 3163 3164 - if (mlx5_eswitch_mode(mdev) != MLX5_ESWITCH_LEGACY) 3164 + if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS || 3165 + !MLX5_CAP_GEN(mdev, uplink_follow)) 3165 3166 return; 3166 3167 3167 3168 if (state == MLX5_PORT_UP)
+5 -4
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
··· 682 682 683 683 static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq, 684 684 struct sk_buff *skb, struct mlx5e_accel_tx_state *accel, 685 - struct mlx5_wqe_eth_seg *eseg) 685 + struct mlx5_wqe_eth_seg *eseg, u16 ihs) 686 686 { 687 - if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg))) 687 + if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg, ihs))) 688 688 return false; 689 689 690 690 mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg); ··· 714 714 if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) { 715 715 struct mlx5_wqe_eth_seg eseg = {}; 716 716 717 - if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg))) 717 + if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg, 718 + attr.ihs))) 718 719 return NETDEV_TX_OK; 719 720 720 721 mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more()); ··· 732 731 /* May update the WQE, but may not post other WQEs. */ 733 732 mlx5e_accel_tx_finish(sq, wqe, &accel, 734 733 (struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl)); 735 - if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth))) 734 + if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs))) 736 735 return NETDEV_TX_OK; 737 736 738 737 mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
+12 -13
drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
··· 95 95 return 0; 96 96 } 97 97 98 - if (!IS_ERR_OR_NULL(vport->egress.acl)) 99 - return 0; 98 + if (!vport->egress.acl) { 99 + vport->egress.acl = esw_acl_table_create(esw, vport->vport, 100 + MLX5_FLOW_NAMESPACE_ESW_EGRESS, 101 + table_size); 102 + if (IS_ERR(vport->egress.acl)) { 103 + err = PTR_ERR(vport->egress.acl); 104 + vport->egress.acl = NULL; 105 + goto out; 106 + } 100 107 101 - vport->egress.acl = esw_acl_table_create(esw, vport->vport, 102 - MLX5_FLOW_NAMESPACE_ESW_EGRESS, 103 - table_size); 104 - if (IS_ERR(vport->egress.acl)) { 105 - err = PTR_ERR(vport->egress.acl); 106 - vport->egress.acl = NULL; 107 - goto out; 108 + err = esw_acl_egress_lgcy_groups_create(esw, vport); 109 + if (err) 110 + goto out; 108 111 } 109 - 110 - err = esw_acl_egress_lgcy_groups_create(esw, vport); 111 - if (err) 112 - goto out; 113 112 114 113 esw_debug(esw->dev, 115 114 "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
+5 -6
drivers/net/ethernet/mellanox/mlx5/core/lag.c
··· 564 564 struct mlx5_core_dev *tmp_dev; 565 565 int i, err; 566 566 567 - if (!MLX5_CAP_GEN(dev, vport_group_manager)) 567 + if (!MLX5_CAP_GEN(dev, vport_group_manager) || 568 + !MLX5_CAP_GEN(dev, lag_master) || 569 + MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS) 568 570 return; 569 571 570 572 tmp_dev = mlx5_get_next_phys_dev(dev); ··· 584 582 if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0) 585 583 return; 586 584 587 - for (i = 0; i < MLX5_MAX_PORTS; i++) { 588 - tmp_dev = ldev->pf[i].dev; 589 - if (!tmp_dev || !MLX5_CAP_GEN(tmp_dev, lag_master) || 590 - MLX5_CAP_GEN(tmp_dev, num_lag_ports) != MLX5_MAX_PORTS) 585 + for (i = 0; i < MLX5_MAX_PORTS; i++) 586 + if (!ldev->pf[i].dev) 591 587 break; 592 - } 593 588 594 589 if (i >= MLX5_MAX_PORTS) 595 590 ldev->flags |= MLX5_LAG_FLAG_READY;
+5 -2
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 1368 1368 MLX5_COREDEV_VF : MLX5_COREDEV_PF; 1369 1369 1370 1370 dev->priv.adev_idx = mlx5_adev_idx_alloc(); 1371 - if (dev->priv.adev_idx < 0) 1372 - return dev->priv.adev_idx; 1371 + if (dev->priv.adev_idx < 0) { 1372 + err = dev->priv.adev_idx; 1373 + goto adev_init_err; 1374 + } 1373 1375 1374 1376 err = mlx5_mdev_init(dev, prof_sel); 1375 1377 if (err) ··· 1405 1403 mlx5_mdev_uninit(dev); 1406 1404 mdev_init_err: 1407 1405 mlx5_adev_idx_free(dev->priv.adev_idx); 1406 + adev_init_err: 1408 1407 mlx5_devlink_free(devlink); 1409 1408 1410 1409 return err;
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/rdma.c
··· 116 116 static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev) 117 117 { 118 118 mlx5_core_roce_gid_set(dev, 0, 0, 0, 119 - NULL, NULL, false, 0, 0); 119 + NULL, NULL, false, 0, 1); 120 120 } 121 121 122 122 static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid)
+10 -2
drivers/net/ethernet/natsemi/macsonic.c
··· 506 506 507 507 err = register_netdev(dev); 508 508 if (err) 509 - goto out; 509 + goto undo_probe; 510 510 511 511 return 0; 512 512 513 + undo_probe: 514 + dma_free_coherent(lp->device, 515 + SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), 516 + lp->descriptors, lp->descriptors_laddr); 513 517 out: 514 518 free_netdev(dev); 515 519 ··· 588 584 589 585 err = register_netdev(ndev); 590 586 if (err) 591 - goto out; 587 + goto undo_probe; 592 588 593 589 nubus_set_drvdata(board, ndev); 594 590 595 591 return 0; 596 592 593 + undo_probe: 594 + dma_free_coherent(lp->device, 595 + SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), 596 + lp->descriptors, lp->descriptors_laddr); 597 597 out: 598 598 free_netdev(ndev); 599 599 return err;
+5 -2
drivers/net/ethernet/natsemi/xtsonic.c
··· 229 229 sonic_msg_init(dev); 230 230 231 231 if ((err = register_netdev(dev))) 232 - goto out1; 232 + goto undo_probe1; 233 233 234 234 return 0; 235 235 236 - out1: 236 + undo_probe1: 237 + dma_free_coherent(lp->device, 238 + SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), 239 + lp->descriptors, lp->descriptors_laddr); 237 240 release_region(dev->base_addr, SONIC_MEM_SIZE); 238 241 out: 239 242 free_netdev(dev);
+1
drivers/net/ethernet/qlogic/Kconfig
··· 78 78 depends on PCI 79 79 select ZLIB_INFLATE 80 80 select CRC8 81 + select CRC32 81 82 select NET_DEVLINK 82 83 help 83 84 This enables the support for Marvell FastLinQ adapters family.
+82 -47
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
··· 64 64 * @variant: reference to the current board variant 65 65 * @regmap: regmap for using the syscon 66 66 * @internal_phy_powered: Does the internal PHY is enabled 67 + * @use_internal_phy: Is the internal PHY selected for use 67 68 * @mux_handle: Internal pointer used by mdio-mux lib 68 69 */ 69 70 struct sunxi_priv_data { ··· 75 74 const struct emac_variant *variant; 76 75 struct regmap_field *regmap_field; 77 76 bool internal_phy_powered; 77 + bool use_internal_phy; 78 78 void *mux_handle; 79 79 }; 80 80 ··· 541 539 .dma_interrupt = sun8i_dwmac_dma_interrupt, 542 540 }; 543 541 542 + static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv); 543 + 544 544 static int sun8i_dwmac_init(struct platform_device *pdev, void *priv) 545 545 { 546 + struct net_device *ndev = platform_get_drvdata(pdev); 546 547 struct sunxi_priv_data *gmac = priv; 547 548 int ret; 548 549 ··· 559 554 560 555 ret = clk_prepare_enable(gmac->tx_clk); 561 556 if (ret) { 562 - if (gmac->regulator) 563 - regulator_disable(gmac->regulator); 564 557 dev_err(&pdev->dev, "Could not enable AHB clock\n"); 565 - return ret; 558 + goto err_disable_regulator; 559 + } 560 + 561 + if (gmac->use_internal_phy) { 562 + ret = sun8i_dwmac_power_internal_phy(netdev_priv(ndev)); 563 + if (ret) 564 + goto err_disable_clk; 566 565 } 567 566 568 567 return 0; 568 + 569 + err_disable_clk: 570 + clk_disable_unprepare(gmac->tx_clk); 571 + err_disable_regulator: 572 + if (gmac->regulator) 573 + regulator_disable(gmac->regulator); 574 + 575 + return ret; 569 576 } 570 577 571 578 static void sun8i_dwmac_core_init(struct mac_device_info *hw, ··· 848 831 struct sunxi_priv_data *gmac = priv->plat->bsp_priv; 849 832 u32 reg, val; 850 833 int ret = 0; 851 - bool need_power_ephy = false; 852 834 853 835 if (current_child ^ desired_child) { 854 836 regmap_field_read(gmac->regmap_field, &reg); ··· 855 839 case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID: 856 840 dev_info(priv->device, "Switch mux to internal PHY"); 857 841 val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SELECT; 858 - 859 - need_power_ephy = true; 842 + gmac->use_internal_phy = true; 860 843 break; 861 844 case DWMAC_SUN8I_MDIO_MUX_EXTERNAL_ID: 862 845 dev_info(priv->device, "Switch mux to external PHY"); 863 846 val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SHUTDOWN; 864 - need_power_ephy = false; 847 + gmac->use_internal_phy = false; 865 848 break; 866 849 default: 867 850 dev_err(priv->device, "Invalid child ID %x\n", ··· 868 853 return -EINVAL; 869 854 } 870 855 regmap_field_write(gmac->regmap_field, val); 871 - if (need_power_ephy) { 856 + if (gmac->use_internal_phy) { 872 857 ret = sun8i_dwmac_power_internal_phy(priv); 873 858 if (ret) 874 859 return ret; ··· 898 883 return ret; 899 884 } 900 885 901 - static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) 886 + static int sun8i_dwmac_set_syscon(struct device *dev, 887 + struct plat_stmmacenet_data *plat) 902 888 { 903 - struct sunxi_priv_data *gmac = priv->plat->bsp_priv; 904 - struct device_node *node = priv->device->of_node; 889 + struct sunxi_priv_data *gmac = plat->bsp_priv; 890 + struct device_node *node = dev->of_node; 905 891 int ret; 906 892 u32 reg, val; 907 893 908 894 ret = regmap_field_read(gmac->regmap_field, &val); 909 895 if (ret) { 910 - dev_err(priv->device, "Fail to read from regmap field.\n"); 896 + dev_err(dev, "Fail to read from regmap field.\n"); 911 897 return ret; 912 898 } 913 899 914 900 reg = gmac->variant->default_syscon_value; 915 901 if (reg != val) 916 - dev_warn(priv->device, 902 + dev_warn(dev, 917 903 "Current syscon value is not the default %x (expect %x)\n", 918 904 val, reg); 919 905 ··· 927 911 /* Force EPHY xtal frequency to 24MHz. */ 928 912 reg |= H3_EPHY_CLK_SEL; 929 913 930 - ret = of_mdio_parse_addr(priv->device, priv->plat->phy_node); 914 + ret = of_mdio_parse_addr(dev, plat->phy_node); 931 915 if (ret < 0) { 932 - dev_err(priv->device, "Could not parse MDIO addr\n"); 916 + dev_err(dev, "Could not parse MDIO addr\n"); 933 917 return ret; 934 918 } 935 919 /* of_mdio_parse_addr returns a valid (0 ~ 31) PHY ··· 945 929 946 930 if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) { 947 931 if (val % 100) { 948 - dev_err(priv->device, "tx-delay must be a multiple of 100\n"); 932 + dev_err(dev, "tx-delay must be a multiple of 100\n"); 949 933 return -EINVAL; 950 934 } 951 935 val /= 100; 952 - dev_dbg(priv->device, "set tx-delay to %x\n", val); 936 + dev_dbg(dev, "set tx-delay to %x\n", val); 953 937 if (val <= gmac->variant->tx_delay_max) { 954 938 reg &= ~(gmac->variant->tx_delay_max << 955 939 SYSCON_ETXDC_SHIFT); 956 940 reg |= (val << SYSCON_ETXDC_SHIFT); 957 941 } else { 958 - dev_err(priv->device, "Invalid TX clock delay: %d\n", 942 + dev_err(dev, "Invalid TX clock delay: %d\n", 959 943 val); 960 944 return -EINVAL; 961 945 } ··· 963 947 964 948 if (!of_property_read_u32(node, "allwinner,rx-delay-ps", &val)) { 965 949 if (val % 100) { 966 - dev_err(priv->device, "rx-delay must be a multiple of 100\n"); 950 + dev_err(dev, "rx-delay must be a multiple of 100\n"); 967 951 return -EINVAL; 968 952 } 969 953 val /= 100; 970 - dev_dbg(priv->device, "set rx-delay to %x\n", val); 954 + dev_dbg(dev, "set rx-delay to %x\n", val); 971 955 if (val <= gmac->variant->rx_delay_max) { 972 956 reg &= ~(gmac->variant->rx_delay_max << 973 957 SYSCON_ERXDC_SHIFT); 974 958 reg |= (val << SYSCON_ERXDC_SHIFT); 975 959 } else { 976 - dev_err(priv->device, "Invalid RX clock delay: %d\n", 960 + dev_err(dev, "Invalid RX clock delay: %d\n", 977 961 val); 978 962 return -EINVAL; 979 963 } ··· 984 968 if (gmac->variant->support_rmii) 985 969 reg &= ~SYSCON_RMII_EN; 986 970 987 - switch (priv->plat->interface) { 971 + switch (plat->interface) { 988 972 case PHY_INTERFACE_MODE_MII: 989 973 /* default */ 990 974 break; ··· 998 982 reg |= SYSCON_RMII_EN | SYSCON_ETCS_EXT_GMII; 999 983 break; 1000 984 default: 1001 - dev_err(priv->device, "Unsupported interface mode: %s", 1002 - phy_modes(priv->plat->interface)); 985 + dev_err(dev, "Unsupported interface mode: %s", 986 + phy_modes(plat->interface)); 1003 987 return -EINVAL; 1004 988 } 1005 989 ··· 1020 1004 struct sunxi_priv_data *gmac = priv; 1021 1005 1022 1006 if (gmac->variant->soc_has_internal_phy) { 1023 - /* sun8i_dwmac_exit could be called with mdiomux uninit */ 1024 - if (gmac->mux_handle) 1025 - mdio_mux_uninit(gmac->mux_handle); 1026 1007 if (gmac->internal_phy_powered) 1027 1008 sun8i_dwmac_unpower_internal_phy(gmac); 1028 1009 } 1029 - 1030 - sun8i_dwmac_unset_syscon(gmac); 1031 - 1032 - reset_control_put(gmac->rst_ephy); 1033 1010 1034 1011 clk_disable_unprepare(gmac->tx_clk); 1035 1012 ··· 1058 1049 { 1059 1050 struct mac_device_info *mac; 1060 1051 struct stmmac_priv *priv = ppriv; 1061 - int ret; 1062 1052 1063 1053 mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL); 1064 1054 if (!mac) 1065 - return NULL; 1066 - 1067 - ret = sun8i_dwmac_set_syscon(priv); 1068 - if (ret) 1069 1055 return NULL; 1070 1056 1071 1057 mac->pcsr = priv->ioaddr; ··· 1138 1134 if (ret) 1139 1135 return ret; 1140 1136 1141 - plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); 1142 - if (IS_ERR(plat_dat)) 1143 - return PTR_ERR(plat_dat); 1144 - 1145 1137 gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL); 1146 1138 if (!gmac) 1147 1139 return -ENOMEM; ··· 1201 1201 ret = of_get_phy_mode(dev->of_node, &interface); 1202 1202 if (ret) 1203 1203 return -EINVAL; 1204 - plat_dat->interface = interface; 1204 + 1205 + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); 1206 + if (IS_ERR(plat_dat)) 1207 + return PTR_ERR(plat_dat); 1205 1208 1206 1209 /* platform data specifying hardware features and callbacks. 1207 1210 * hardware features were copied from Allwinner drivers. 1208 1211 */ 1212 + plat_dat->interface = interface; 1209 1213 plat_dat->rx_coe = STMMAC_RX_COE_TYPE2; 1210 1214 plat_dat->tx_coe = 1; 1211 1215 plat_dat->has_sun8i = true; ··· 1218 1214 plat_dat->exit = sun8i_dwmac_exit; 1219 1215 plat_dat->setup = sun8i_dwmac_setup; 1220 1216 1217 + ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat); 1218 + if (ret) 1219 + goto dwmac_deconfig; 1220 + 1221 1221 ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv); 1222 1222 if (ret) 1223 - return ret; 1223 + goto dwmac_syscon; 1224 1224 1225 1225 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); 1226 1226 if (ret) ··· 1238 1230 if (gmac->variant->soc_has_internal_phy) { 1239 1231 ret = get_ephy_nodes(priv); 1240 1232 if (ret) 1241 - goto dwmac_exit; 1233 + goto dwmac_remove; 1242 1234 ret = sun8i_dwmac_register_mdio_mux(priv); 1243 1235 if (ret) { 1244 1236 dev_err(&pdev->dev, "Failed to register mux\n"); ··· 1247 1239 } else { 1248 1240 ret = sun8i_dwmac_reset(priv); 1249 1241 if (ret) 1250 - goto dwmac_exit; 1242 + goto dwmac_remove; 1251 1243 } 1252 1244 1253 1245 return ret; 1254 1246 dwmac_mux: 1255 - sun8i_dwmac_unset_syscon(gmac); 1247 + reset_control_put(gmac->rst_ephy); 1248 + clk_put(gmac->ephy_clk); 1249 + dwmac_remove: 1250 + stmmac_dvr_remove(&pdev->dev); 1256 1251 dwmac_exit: 1252 + sun8i_dwmac_exit(pdev, gmac); 1253 + dwmac_syscon: 1254 + sun8i_dwmac_unset_syscon(gmac); 1255 + dwmac_deconfig: 1256 + stmmac_remove_config_dt(pdev, plat_dat); 1257 + 1258 + return ret; 1259 + } 1260 + 1261 + static int sun8i_dwmac_remove(struct platform_device *pdev) 1262 + { 1263 + struct net_device *ndev = platform_get_drvdata(pdev); 1264 + struct stmmac_priv *priv = netdev_priv(ndev); 1265 + struct sunxi_priv_data *gmac = priv->plat->bsp_priv; 1266 + 1267 + if (gmac->variant->soc_has_internal_phy) { 1268 + mdio_mux_uninit(gmac->mux_handle); 1269 + sun8i_dwmac_unpower_internal_phy(gmac); 1270 + reset_control_put(gmac->rst_ephy); 1271 + clk_put(gmac->ephy_clk); 1272 + } 1273 + 1257 1274 stmmac_pltfr_remove(pdev); 1258 - return ret; 1275 + sun8i_dwmac_unset_syscon(gmac); 1276 + 1277 + return 0; 1259 1278 } 1260 1279 1261 1280 static const struct of_device_id sun8i_dwmac_match[] = { ··· 1304 1269 1305 1270 static struct platform_driver sun8i_dwmac_driver = { 1306 1271 .probe = sun8i_dwmac_probe, 1307 - .remove = stmmac_pltfr_remove, 1272 + .remove = sun8i_dwmac_remove, 1308 1273 .driver = { 1309 1274 .name = "dwmac-sun8i", 1310 1275 .pm = &stmmac_pltfr_pm_ops,
+6 -2
drivers/net/usb/cdc_ncm.c
··· 1199 1199 * accordingly. Otherwise, we should check here. 1200 1200 */ 1201 1201 if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) 1202 - delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus); 1202 + delayed_ndp_size = ctx->max_ndp_size + 1203 + max_t(u32, 1204 + ctx->tx_ndp_modulus, 1205 + ctx->tx_modulus + ctx->tx_remainder) - 1; 1203 1206 else 1204 1207 delayed_ndp_size = 0; 1205 1208 ··· 1413 1410 if (!(dev->driver_info->flags & FLAG_SEND_ZLP) && 1414 1411 skb_out->len > ctx->min_tx_pkt) { 1415 1412 padding_count = ctx->tx_curr_size - skb_out->len; 1416 - skb_put_zero(skb_out, padding_count); 1413 + if (!WARN_ON(padding_count > ctx->tx_curr_size)) 1414 + skb_put_zero(skb_out, padding_count); 1417 1415 } else if (skb_out->len < ctx->tx_curr_size && 1418 1416 (skb_out->len % dev->maxpacket) == 0) { 1419 1417 skb_put_u8(skb_out, 0); /* force short packet */
+1
drivers/net/wan/Kconfig
··· 282 282 tristate "Slic Maxim ds26522 card support" 283 283 depends on SPI 284 284 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST 285 + select BITREVERSE 285 286 help 286 287 This module initializes and configures the slic maxim card 287 288 in T1 or E1 mode.
+1
drivers/net/wireless/ath/wil6210/Kconfig
··· 2 2 config WIL6210 3 3 tristate "Wilocity 60g WiFi card wil6210 support" 4 4 select WANT_DEV_COREDUMP 5 + select CRC32 5 6 depends on CFG80211 6 7 depends on PCI 7 8 default n
+2
drivers/ptp/Kconfig
··· 64 64 depends on NETWORK_PHY_TIMESTAMPING 65 65 depends on PHYLIB 66 66 depends on PTP_1588_CLOCK 67 + select CRC32 67 68 help 68 69 Supports the DP83640 PHYTER with IEEE 1588 features. 69 70 ··· 79 78 config PTP_1588_CLOCK_INES 80 79 tristate "ZHAW InES PTP time stamping IP core" 81 80 depends on NETWORK_PHY_TIMESTAMPING 81 + depends on HAS_IOMEM 82 82 depends on PHYLIB 83 83 depends on PTP_1588_CLOCK 84 84 help
+1
drivers/regulator/Kconfig
··· 881 881 config REGULATOR_QCOM_RPMH 882 882 tristate "Qualcomm Technologies, Inc. RPMh regulator driver" 883 883 depends on QCOM_RPMH || (QCOM_RPMH=n && COMPILE_TEST) 884 + depends on QCOM_COMMAND_DB || (QCOM_COMMAND_DB=n && COMPILE_TEST) 884 885 help 885 886 This driver supports control of PMIC regulators via the RPMh hardware 886 887 block found on Qualcomm Technologies Inc. SoCs. RPMh regulator
+57
drivers/regulator/bd718x7-regulator.c
··· 15 15 #include <linux/regulator/of_regulator.h> 16 16 #include <linux/slab.h> 17 17 18 + /* Typical regulator startup times as per data sheet in uS */ 19 + #define BD71847_BUCK1_STARTUP_TIME 144 20 + #define BD71847_BUCK2_STARTUP_TIME 162 21 + #define BD71847_BUCK3_STARTUP_TIME 162 22 + #define BD71847_BUCK4_STARTUP_TIME 240 23 + #define BD71847_BUCK5_STARTUP_TIME 270 24 + #define BD71847_BUCK6_STARTUP_TIME 200 25 + #define BD71847_LDO1_STARTUP_TIME 440 26 + #define BD71847_LDO2_STARTUP_TIME 370 27 + #define BD71847_LDO3_STARTUP_TIME 310 28 + #define BD71847_LDO4_STARTUP_TIME 400 29 + #define BD71847_LDO5_STARTUP_TIME 530 30 + #define BD71847_LDO6_STARTUP_TIME 400 31 + 32 + #define BD71837_BUCK1_STARTUP_TIME 160 33 + #define BD71837_BUCK2_STARTUP_TIME 180 34 + #define BD71837_BUCK3_STARTUP_TIME 180 35 + #define BD71837_BUCK4_STARTUP_TIME 180 36 + #define BD71837_BUCK5_STARTUP_TIME 160 37 + #define BD71837_BUCK6_STARTUP_TIME 240 38 + #define BD71837_BUCK7_STARTUP_TIME 220 39 + #define BD71837_BUCK8_STARTUP_TIME 200 40 + #define BD71837_LDO1_STARTUP_TIME 440 41 + #define BD71837_LDO2_STARTUP_TIME 370 42 + #define BD71837_LDO3_STARTUP_TIME 310 43 + #define BD71837_LDO4_STARTUP_TIME 400 44 + #define BD71837_LDO5_STARTUP_TIME 310 45 + #define BD71837_LDO6_STARTUP_TIME 400 46 + #define BD71837_LDO7_STARTUP_TIME 530 47 + 18 48 /* 19 49 * BD718(37/47/50) have two "enable control modes". ON/OFF can either be 20 50 * controlled by software - or by PMIC internal HW state machine. Whether ··· 643 613 .vsel_mask = DVS_BUCK_RUN_MASK, 644 614 .enable_reg = BD718XX_REG_BUCK1_CTRL, 645 615 .enable_mask = BD718XX_BUCK_EN, 616 + .enable_time = BD71847_BUCK1_STARTUP_TIME, 646 617 .owner = THIS_MODULE, 647 618 .of_parse_cb = buck_set_hw_dvs_levels, 648 619 }, ··· 677 646 .vsel_mask = DVS_BUCK_RUN_MASK, 678 647 .enable_reg = BD718XX_REG_BUCK2_CTRL, 679 648 .enable_mask = BD718XX_BUCK_EN, 649 + .enable_time = BD71847_BUCK2_STARTUP_TIME, 680 650 .owner = THIS_MODULE, 681 651 .of_parse_cb = buck_set_hw_dvs_levels, 682 652 }, ··· 712 680 .linear_range_selectors = bd71847_buck3_volt_range_sel, 713 681 .enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL, 714 682 .enable_mask = BD718XX_BUCK_EN, 683 + .enable_time = BD71847_BUCK3_STARTUP_TIME, 715 684 .owner = THIS_MODULE, 716 685 }, 717 686 .init = { ··· 739 706 .vsel_range_mask = BD71847_BUCK4_RANGE_MASK, 740 707 .linear_range_selectors = bd71847_buck4_volt_range_sel, 741 708 .enable_mask = BD718XX_BUCK_EN, 709 + .enable_time = BD71847_BUCK4_STARTUP_TIME, 742 710 .owner = THIS_MODULE, 743 711 }, 744 712 .init = { ··· 761 727 .vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK, 762 728 .enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL, 763 729 .enable_mask = BD718XX_BUCK_EN, 730 + .enable_time = BD71847_BUCK5_STARTUP_TIME, 764 731 .owner = THIS_MODULE, 765 732 }, 766 733 .init = { ··· 785 750 .vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK, 786 751 .enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL, 787 752 .enable_mask = BD718XX_BUCK_EN, 753 + .enable_time = BD71847_BUCK6_STARTUP_TIME, 788 754 .owner = THIS_MODULE, 789 755 }, 790 756 .init = { ··· 811 775 .linear_range_selectors = bd718xx_ldo1_volt_range_sel, 812 776 .enable_reg = BD718XX_REG_LDO1_VOLT, 813 777 .enable_mask = BD718XX_LDO_EN, 778 + .enable_time = BD71847_LDO1_STARTUP_TIME, 814 779 .owner = THIS_MODULE, 815 780 }, 816 781 .init = { ··· 833 796 .n_voltages = ARRAY_SIZE(ldo_2_volts), 834 797 .enable_reg = BD718XX_REG_LDO2_VOLT, 835 798 .enable_mask = BD718XX_LDO_EN, 799 + .enable_time = BD71847_LDO2_STARTUP_TIME, 836 800 .owner = THIS_MODULE, 837 801 }, 838 802 .init = { ··· 856 818 .vsel_mask = BD718XX_LDO3_MASK, 857 819 .enable_reg = BD718XX_REG_LDO3_VOLT, 858 820 .enable_mask = BD718XX_LDO_EN, 821 + .enable_time = BD71847_LDO3_STARTUP_TIME, 859 822 .owner = THIS_MODULE, 860 823 }, 861 824 .init = { ··· 879 840 .vsel_mask = BD718XX_LDO4_MASK, 880 841 .enable_reg = BD718XX_REG_LDO4_VOLT, 881 842 .enable_mask = BD718XX_LDO_EN, 843 + .enable_time = BD71847_LDO4_STARTUP_TIME, 882 844 .owner = THIS_MODULE, 883 845 }, 884 846 .init = { ··· 905 865 .linear_range_selectors = bd71847_ldo5_volt_range_sel, 906 866 .enable_reg = BD718XX_REG_LDO5_VOLT, 907 867 .enable_mask = BD718XX_LDO_EN, 868 + .enable_time = BD71847_LDO5_STARTUP_TIME, 908 869 .owner = THIS_MODULE, 909 870 }, 910 871 .init = { ··· 930 889 .vsel_mask = BD718XX_LDO6_MASK, 931 890 .enable_reg = BD718XX_REG_LDO6_VOLT, 932 891 .enable_mask = BD718XX_LDO_EN, 892 + .enable_time = BD71847_LDO6_STARTUP_TIME, 933 893 .owner = THIS_MODULE, 934 894 }, 935 895 .init = { ··· 984 942 .vsel_mask = DVS_BUCK_RUN_MASK, 985 943 .enable_reg = BD718XX_REG_BUCK1_CTRL, 986 944 .enable_mask = BD718XX_BUCK_EN, 945 + .enable_time = BD71837_BUCK1_STARTUP_TIME, 987 946 .owner = THIS_MODULE, 988 947 .of_parse_cb = buck_set_hw_dvs_levels, 989 948 }, ··· 1018 975 .vsel_mask = DVS_BUCK_RUN_MASK, 1019 976 .enable_reg = BD718XX_REG_BUCK2_CTRL, 1020 977 .enable_mask = BD718XX_BUCK_EN, 978 + .enable_time = BD71837_BUCK2_STARTUP_TIME, 1021 979 .owner = THIS_MODULE, 1022 980 .of_parse_cb = buck_set_hw_dvs_levels, 1023 981 }, ··· 1049 1005 .vsel_mask = DVS_BUCK_RUN_MASK, 1050 1006 .enable_reg = BD71837_REG_BUCK3_CTRL, 1051 1007 .enable_mask = BD718XX_BUCK_EN, 1008 + .enable_time = BD71837_BUCK3_STARTUP_TIME, 1052 1009 .owner = THIS_MODULE, 1053 1010 .of_parse_cb = buck_set_hw_dvs_levels, 1054 1011 }, ··· 1078 1033 .vsel_mask = DVS_BUCK_RUN_MASK, 1079 1034 .enable_reg = BD71837_REG_BUCK4_CTRL, 1080 1035 .enable_mask = BD718XX_BUCK_EN, 1036 + .enable_time = BD71837_BUCK4_STARTUP_TIME, 1081 1037 .owner = THIS_MODULE, 1082 1038 .of_parse_cb = buck_set_hw_dvs_levels, 1083 1039 }, ··· 1111 1065 .linear_range_selectors = bd71837_buck5_volt_range_sel, 1112 1066 .enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL, 1113 1067 .enable_mask = BD718XX_BUCK_EN, 1068 + .enable_time = BD71837_BUCK5_STARTUP_TIME, 1114 1069 .owner = THIS_MODULE, 1115 1070 }, 1116 1071 .init = { ··· 1135 1088 .vsel_mask = BD71837_BUCK6_MASK, 1136 1089 .enable_reg = BD718XX_REG_2ND_NODVS_BUCK_CTRL, 1137 1090 .enable_mask = BD718XX_BUCK_EN, 1091 + .enable_time = BD71837_BUCK6_STARTUP_TIME, 1138 1092 .owner = THIS_MODULE, 1139 1093 }, 1140 1094 .init = { ··· 1157 1109 .vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK, 1158 1110 .enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL, 1159 1111 .enable_mask = BD718XX_BUCK_EN, 1112 + .enable_time = BD71837_BUCK7_STARTUP_TIME, 1160 1113 .owner = THIS_MODULE, 1161 1114 }, 1162 1115 .init = { ··· 1181 1132 .vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK, 1182 1133 .enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL, 1183 1134 .enable_mask = BD718XX_BUCK_EN, 1135 + .enable_time = BD71837_BUCK8_STARTUP_TIME, 1184 1136 .owner = THIS_MODULE, 1185 1137 }, 1186 1138 .init = { ··· 1207 1157 .linear_range_selectors = bd718xx_ldo1_volt_range_sel, 1208 1158 .enable_reg = BD718XX_REG_LDO1_VOLT, 1209 1159 .enable_mask = BD718XX_LDO_EN, 1160 + .enable_time = BD71837_LDO1_STARTUP_TIME, 1210 1161 .owner = THIS_MODULE, 1211 1162 }, 1212 1163 .init = { ··· 1229 1178 .n_voltages = ARRAY_SIZE(ldo_2_volts), 1230 1179 .enable_reg = BD718XX_REG_LDO2_VOLT, 1231 1180 .enable_mask = BD718XX_LDO_EN, 1181 + .enable_time = BD71837_LDO2_STARTUP_TIME, 1232 1182 .owner = THIS_MODULE, 1233 1183 }, 1234 1184 .init = { ··· 1252 1200 .vsel_mask = BD718XX_LDO3_MASK, 1253 1201 .enable_reg = BD718XX_REG_LDO3_VOLT, 1254 1202 .enable_mask = BD718XX_LDO_EN, 1203 + .enable_time = BD71837_LDO3_STARTUP_TIME, 1255 1204 .owner = THIS_MODULE, 1256 1205 }, 1257 1206 .init = { ··· 1275 1222 .vsel_mask = BD718XX_LDO4_MASK, 1276 1223 .enable_reg = BD718XX_REG_LDO4_VOLT, 1277 1224 .enable_mask = BD718XX_LDO_EN, 1225 + .enable_time = BD71837_LDO4_STARTUP_TIME, 1278 1226 .owner = THIS_MODULE, 1279 1227 }, 1280 1228 .init = { ··· 1300 1246 .vsel_mask = BD71837_LDO5_MASK, 1301 1247 .enable_reg = BD718XX_REG_LDO5_VOLT, 1302 1248 .enable_mask = BD718XX_LDO_EN, 1249 + .enable_time = BD71837_LDO5_STARTUP_TIME, 1303 1250 .owner = THIS_MODULE, 1304 1251 }, 1305 1252 .init = { ··· 1327 1272 .vsel_mask = BD718XX_LDO6_MASK, 1328 1273 .enable_reg = BD718XX_REG_LDO6_VOLT, 1329 1274 .enable_mask = BD718XX_LDO_EN, 1275 + .enable_time = BD71837_LDO6_STARTUP_TIME, 1330 1276 .owner = THIS_MODULE, 1331 1277 }, 1332 1278 .init = { ··· 1352 1296 .vsel_mask = BD71837_LDO7_MASK, 1353 1297 .enable_reg = BD71837_REG_LDO7_VOLT, 1354 1298 .enable_mask = BD718XX_LDO_EN, 1299 + .enable_time = BD71837_LDO7_STARTUP_TIME, 1355 1300 .owner = THIS_MODULE, 1356 1301 }, 1357 1302 .init = {
+6 -2
drivers/regulator/pf8x00-regulator.c
··· 469 469 } 470 470 471 471 static const struct of_device_id pf8x00_dt_ids[] = { 472 - { .compatible = "nxp,pf8x00",}, 472 + { .compatible = "nxp,pf8100",}, 473 + { .compatible = "nxp,pf8121a",}, 474 + { .compatible = "nxp,pf8200",}, 473 475 { } 474 476 }; 475 477 MODULE_DEVICE_TABLE(of, pf8x00_dt_ids); 476 478 477 479 static const struct i2c_device_id pf8x00_i2c_id[] = { 478 - { "pf8x00", 0 }, 480 + { "pf8100", 0 }, 481 + { "pf8121a", 0 }, 482 + { "pf8200", 0 }, 479 483 {}, 480 484 }; 481 485 MODULE_DEVICE_TABLE(i2c, pf8x00_i2c_id);
+1 -1
drivers/regulator/qcom-rpmh-regulator.c
··· 726 726 static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = { 727 727 .regulator_type = VRM, 728 728 .ops = &rpmh_regulator_vrm_ops, 729 - .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 1600), 729 + .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 16000), 730 730 .n_voltages = 5, 731 731 .pmic_mode_map = pmic_mode_map_pmic5_smps, 732 732 .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
+2 -1
drivers/s390/net/qeth_core.h
··· 1079 1079 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, 1080 1080 int clear_start_mask); 1081 1081 int qeth_threads_running(struct qeth_card *, unsigned long); 1082 - int qeth_set_offline(struct qeth_card *card, bool resetting); 1082 + int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc, 1083 + bool resetting); 1083 1084 1084 1085 int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, 1085 1086 int (*reply_cb)
+25 -13
drivers/s390/net/qeth_core_main.c
··· 5507 5507 return rc; 5508 5508 } 5509 5509 5510 - static int qeth_set_online(struct qeth_card *card) 5510 + static int qeth_set_online(struct qeth_card *card, 5511 + const struct qeth_discipline *disc) 5511 5512 { 5512 5513 bool carrier_ok; 5513 5514 int rc; 5514 5515 5515 - mutex_lock(&card->discipline_mutex); 5516 5516 mutex_lock(&card->conf_mutex); 5517 5517 QETH_CARD_TEXT(card, 2, "setonlin"); 5518 5518 ··· 5529 5529 /* no need for locking / error handling at this early stage: */ 5530 5530 qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card)); 5531 5531 5532 - rc = card->discipline->set_online(card, carrier_ok); 5532 + rc = disc->set_online(card, carrier_ok); 5533 5533 if (rc) 5534 5534 goto err_online; 5535 5535 ··· 5537 5537 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE); 5538 5538 5539 5539 mutex_unlock(&card->conf_mutex); 5540 - mutex_unlock(&card->discipline_mutex); 5541 5540 return 0; 5542 5541 5543 5542 err_online: ··· 5551 5552 qdio_free(CARD_DDEV(card)); 5552 5553 5553 5554 mutex_unlock(&card->conf_mutex); 5554 - mutex_unlock(&card->discipline_mutex); 5555 5555 return rc; 5556 5556 } 5557 5557 5558 - int qeth_set_offline(struct qeth_card *card, bool resetting) 5558 + int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc, 5559 + bool resetting) 5559 5560 { 5560 5561 int rc, rc2, rc3; 5561 5562 5562 - mutex_lock(&card->discipline_mutex); 5563 5563 mutex_lock(&card->conf_mutex); 5564 5564 QETH_CARD_TEXT(card, 3, "setoffl"); 5565 5565 ··· 5579 5581 5580 5582 cancel_work_sync(&card->rx_mode_work); 5581 5583 5582 - card->discipline->set_offline(card); 5584 + disc->set_offline(card); 5583 5585 5584 5586 qeth_qdio_clear_card(card, 0); 5585 5587 qeth_drain_output_queues(card); ··· 5600 5602 kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE); 5601 5603 5602 5604 mutex_unlock(&card->conf_mutex); 5603 - mutex_unlock(&card->discipline_mutex); 5604 5605 return 0; 5605 5606 } 5606 5607 EXPORT_SYMBOL_GPL(qeth_set_offline); 5607 5608 5608 5609 static int qeth_do_reset(void *data) 5609 5610 { 5611 + const struct qeth_discipline *disc; 5610 5612 struct qeth_card *card = data; 5611 5613 int rc; 5614 + 5615 + /* Lock-free, other users will block until we are done. */ 5616 + disc = card->discipline; 5612 5617 5613 5618 QETH_CARD_TEXT(card, 2, "recover1"); 5614 5619 if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) ··· 5620 5619 dev_warn(&card->gdev->dev, 5621 5620 "A recovery process has been started for the device\n"); 5622 5621 5623 - qeth_set_offline(card, true); 5624 - rc = qeth_set_online(card); 5622 + qeth_set_offline(card, disc, true); 5623 + rc = qeth_set_online(card, disc); 5625 5624 if (!rc) { 5626 5625 dev_info(&card->gdev->dev, 5627 5626 "Device successfully recovered!\n"); ··· 6585 6584 break; 6586 6585 default: 6587 6586 card->info.layer_enforced = true; 6587 + /* It's so early that we don't need the discipline_mutex yet. */ 6588 6588 rc = qeth_core_load_discipline(card, enforced_disc); 6589 6589 if (rc) 6590 6590 goto err_load; ··· 6618 6616 6619 6617 QETH_CARD_TEXT(card, 2, "removedv"); 6620 6618 6619 + mutex_lock(&card->discipline_mutex); 6621 6620 if (card->discipline) { 6622 6621 card->discipline->remove(gdev); 6623 6622 qeth_core_free_discipline(card); 6624 6623 } 6624 + mutex_unlock(&card->discipline_mutex); 6625 6625 6626 6626 qeth_free_qdio_queues(card); 6627 6627 ··· 6638 6634 int rc = 0; 6639 6635 enum qeth_discipline_id def_discipline; 6640 6636 6637 + mutex_lock(&card->discipline_mutex); 6641 6638 if (!card->discipline) { 6642 6639 def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : 6643 6640 QETH_DISCIPLINE_LAYER2; ··· 6652 6647 } 6653 6648 } 6654 6649 6655 - rc = qeth_set_online(card); 6650 + rc = qeth_set_online(card, card->discipline); 6651 + 6656 6652 err: 6653 + mutex_unlock(&card->discipline_mutex); 6657 6654 return rc; 6658 6655 } 6659 6656 6660 6657 static int qeth_core_set_offline(struct ccwgroup_device *gdev) 6661 6658 { 6662 6659 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 6660 + int rc; 6663 6661 6664 - return qeth_set_offline(card, false); 6662 + mutex_lock(&card->discipline_mutex); 6663 + rc = qeth_set_offline(card, card->discipline, false); 6664 + mutex_unlock(&card->discipline_mutex); 6665 + 6666 + return rc; 6665 6667 } 6666 6668 6667 6669 static void qeth_core_shutdown(struct ccwgroup_device *gdev)
+1 -1
drivers/s390/net/qeth_l2_main.c
··· 2208 2208 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); 2209 2209 2210 2210 if (gdev->state == CCWGROUP_ONLINE) 2211 - qeth_set_offline(card, false); 2211 + qeth_set_offline(card, card->discipline, false); 2212 2212 2213 2213 cancel_work_sync(&card->close_dev_work); 2214 2214 if (card->dev->reg_state == NETREG_REGISTERED)
+2 -2
drivers/s390/net/qeth_l3_main.c
··· 1813 1813 struct net_device *dev, 1814 1814 netdev_features_t features) 1815 1815 { 1816 - if (qeth_get_ip_version(skb) != 4) 1816 + if (vlan_get_protocol(skb) != htons(ETH_P_IP)) 1817 1817 features &= ~NETIF_F_HW_VLAN_CTAG_TX; 1818 1818 return qeth_features_check(skb, dev, features); 1819 1819 } ··· 1971 1971 wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); 1972 1972 1973 1973 if (cgdev->state == CCWGROUP_ONLINE) 1974 - qeth_set_offline(card, false); 1974 + qeth_set_offline(card, card->discipline, false); 1975 1975 1976 1976 cancel_work_sync(&card->close_dev_work); 1977 1977 if (card->dev->reg_state == NETREG_REGISTERED)
+17 -15
drivers/spi/spi-altera.c
··· 189 189 190 190 /* send the first byte */ 191 191 altera_spi_tx_word(hw); 192 - } else { 193 - while (hw->count < hw->len) { 194 - altera_spi_tx_word(hw); 195 192 196 - for (;;) { 197 - altr_spi_readl(hw, ALTERA_SPI_STATUS, &val); 198 - if (val & ALTERA_SPI_STATUS_RRDY_MSK) 199 - break; 200 - 201 - cpu_relax(); 202 - } 203 - 204 - altera_spi_rx_word(hw); 205 - } 206 - spi_finalize_current_transfer(master); 193 + return 1; 207 194 } 208 195 209 - return t->len; 196 + while (hw->count < hw->len) { 197 + altera_spi_tx_word(hw); 198 + 199 + for (;;) { 200 + altr_spi_readl(hw, ALTERA_SPI_STATUS, &val); 201 + if (val & ALTERA_SPI_STATUS_RRDY_MSK) 202 + break; 203 + 204 + cpu_relax(); 205 + } 206 + 207 + altera_spi_rx_word(hw); 208 + } 209 + spi_finalize_current_transfer(master); 210 + 211 + return 0; 210 212 } 211 213 212 214 static irqreturn_t altera_spi_irq(int irq, void *dev)
+80 -4
drivers/spi/spi-geni-qcom.c
··· 83 83 spinlock_t lock; 84 84 int irq; 85 85 bool cs_flag; 86 + bool abort_failed; 86 87 }; 87 88 88 89 static int get_spi_clk_cfg(unsigned int speed_hz, ··· 142 141 spin_unlock_irq(&mas->lock); 143 142 144 143 time_left = wait_for_completion_timeout(&mas->abort_done, HZ); 145 - if (!time_left) 144 + if (!time_left) { 146 145 dev_err(mas->dev, "Failed to cancel/abort m_cmd\n"); 146 + 147 + /* 148 + * No need for a lock since SPI core has a lock and we never 149 + * access this from an interrupt. 150 + */ 151 + mas->abort_failed = true; 152 + } 153 + } 154 + 155 + static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas) 156 + { 157 + struct geni_se *se = &mas->se; 158 + u32 m_irq, m_irq_en; 159 + 160 + if (!mas->abort_failed) 161 + return false; 162 + 163 + /* 164 + * The only known case where a transfer times out and then a cancel 165 + * times out then an abort times out is if something is blocking our 166 + * interrupt handler from running. Avoid starting any new transfers 167 + * until that sorts itself out. 168 + */ 169 + spin_lock_irq(&mas->lock); 170 + m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS); 171 + m_irq_en = readl(se->base + SE_GENI_M_IRQ_EN); 172 + spin_unlock_irq(&mas->lock); 173 + 174 + if (m_irq & m_irq_en) { 175 + dev_err(mas->dev, "Interrupts pending after abort: %#010x\n", 176 + m_irq & m_irq_en); 177 + return true; 178 + } 179 + 180 + /* 181 + * If we're here the problem resolved itself so no need to check more 182 + * on future transfers. 183 + */ 184 + mas->abort_failed = false; 185 + 186 + return false; 147 187 } 148 188 149 189 static void spi_geni_set_cs(struct spi_device *slv, bool set_flag) ··· 200 158 if (set_flag == mas->cs_flag) 201 159 return; 202 160 203 - mas->cs_flag = set_flag; 204 - 205 161 pm_runtime_get_sync(mas->dev); 162 + 163 + if (spi_geni_is_abort_still_pending(mas)) { 164 + dev_err(mas->dev, "Can't set chip select\n"); 165 + goto exit; 166 + } 167 + 206 168 spin_lock_irq(&mas->lock); 169 + if (mas->cur_xfer) { 170 + dev_err(mas->dev, "Can't set CS when prev xfer running\n"); 171 + spin_unlock_irq(&mas->lock); 172 + goto exit; 173 + } 174 + 175 + mas->cs_flag = set_flag; 207 176 reinit_completion(&mas->cs_done); 208 177 if (set_flag) 209 178 geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0); ··· 223 170 spin_unlock_irq(&mas->lock); 224 171 225 172 time_left = wait_for_completion_timeout(&mas->cs_done, HZ); 226 - if (!time_left) 173 + if (!time_left) { 174 + dev_warn(mas->dev, "Timeout setting chip select\n"); 227 175 handle_fifo_timeout(spi, NULL); 176 + } 228 177 178 + exit: 229 179 pm_runtime_put(mas->dev); 230 180 } 231 181 ··· 336 280 int ret; 337 281 struct spi_geni_master *mas = spi_master_get_devdata(spi); 338 282 283 + if (spi_geni_is_abort_still_pending(mas)) 284 + return -EBUSY; 285 + 339 286 ret = setup_fifo_params(spi_msg->spi, spi); 340 287 if (ret) 341 288 dev_err(mas->dev, "Couldn't select mode %d\n", ret); ··· 413 354 unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas); 414 355 unsigned int i = 0; 415 356 357 + /* Stop the watermark IRQ if nothing to send */ 358 + if (!mas->cur_xfer) { 359 + writel(0, se->base + SE_GENI_TX_WATERMARK_REG); 360 + return false; 361 + } 362 + 416 363 max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word; 417 364 if (mas->tx_rem_bytes < max_bytes) 418 365 max_bytes = mas->tx_rem_bytes; ··· 461 396 if (rx_last_byte_valid && rx_last_byte_valid < 4) 462 397 rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid; 463 398 } 399 + 400 + /* Clear out the FIFO and bail if nowhere to put it */ 401 + if (!mas->cur_xfer) { 402 + for (i = 0; i < DIV_ROUND_UP(rx_bytes, bytes_per_fifo_word); i++) 403 + readl(se->base + SE_GENI_RX_FIFOn); 404 + return; 405 + } 406 + 464 407 if (mas->rx_rem_bytes < rx_bytes) 465 408 rx_bytes = mas->rx_rem_bytes; 466 409 ··· 567 494 struct spi_transfer *xfer) 568 495 { 569 496 struct spi_geni_master *mas = spi_master_get_devdata(spi); 497 + 498 + if (spi_geni_is_abort_still_pending(mas)) 499 + return -EBUSY; 570 500 571 501 /* Terminate and return success for 0 byte length transfer */ 572 502 if (!xfer->len)
+2 -2
drivers/spi/spi-stm32.c
··· 493 493 494 494 /* align packet size with data registers access */ 495 495 if (spi->cur_bpw > 8) 496 - fthlv -= (fthlv % 2); /* multiple of 2 */ 496 + fthlv += (fthlv % 2) ? 1 : 0; 497 497 else 498 - fthlv -= (fthlv % 4); /* multiple of 4 */ 498 + fthlv += (fthlv % 4) ? (4 - (fthlv % 4)) : 0; 499 499 500 500 if (!fthlv) 501 501 fthlv = 1;
+8 -3
drivers/spi/spi.c
··· 1108 1108 { 1109 1109 struct spi_statistics *statm = &ctlr->statistics; 1110 1110 struct spi_statistics *stats = &msg->spi->statistics; 1111 + u32 speed_hz = xfer->speed_hz; 1111 1112 unsigned long long ms; 1112 1113 1113 1114 if (spi_controller_is_slave(ctlr)) { ··· 1117 1116 return -EINTR; 1118 1117 } 1119 1118 } else { 1119 + if (!speed_hz) 1120 + speed_hz = 100000; 1121 + 1120 1122 ms = 8LL * 1000LL * xfer->len; 1121 - do_div(ms, xfer->speed_hz); 1123 + do_div(ms, speed_hz); 1122 1124 ms += ms + 200; /* some tolerance */ 1123 1125 1124 1126 if (ms > UINT_MAX) ··· 3382 3378 if (status) 3383 3379 return status; 3384 3380 3385 - if (!spi->max_speed_hz || 3386 - spi->max_speed_hz > spi->controller->max_speed_hz) 3381 + if (spi->controller->max_speed_hz && 3382 + (!spi->max_speed_hz || 3383 + spi->max_speed_hz > spi->controller->max_speed_hz)) 3387 3384 spi->max_speed_hz = spi->controller->max_speed_hz; 3388 3385 3389 3386 mutex_lock(&spi->controller->io_mutex);
+14
drivers/tty/Kconfig
··· 401 401 help 402 402 FDC channel number to use for KGDB. 403 403 404 + config NULL_TTY 405 + tristate "NULL TTY driver" 406 + help 407 + Say Y here if you want a NULL TTY which simply discards messages. 408 + 409 + This is useful to allow userspace applications which expect a console 410 + device to work without modifications even when no console is 411 + available or desired. 412 + 413 + In order to use this driver, you should redirect the console to this 414 + TTY, or boot the kernel with console=ttynull. 415 + 416 + If unsure, say N. 417 + 404 418 config TRACE_ROUTER 405 419 tristate "Trace data router for MIPI P1149.7 cJTAG standard" 406 420 depends on TRACE_SINK
+2 -1
drivers/tty/Makefile
··· 2 2 obj-$(CONFIG_TTY) += tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o \ 3 3 tty_buffer.o tty_port.o tty_mutex.o \ 4 4 tty_ldsem.o tty_baudrate.o tty_jobctrl.o \ 5 - n_null.o ttynull.o 5 + n_null.o 6 6 obj-$(CONFIG_LEGACY_PTYS) += pty.o 7 7 obj-$(CONFIG_UNIX98_PTYS) += pty.o 8 8 obj-$(CONFIG_AUDIT) += tty_audit.o ··· 25 25 obj-$(CONFIG_MOXA_INTELLIO) += moxa.o 26 26 obj-$(CONFIG_MOXA_SMARTIO) += mxser.o 27 27 obj-$(CONFIG_NOZOMI) += nozomi.o 28 + obj-$(CONFIG_NULL_TTY) += ttynull.o 28 29 obj-$(CONFIG_ROCKETPORT) += rocket.o 29 30 obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o 30 31 obj-$(CONFIG_PPC_EPAPR_HV_BYTECHAN) += ehv_bytechan.o
-18
drivers/tty/ttynull.c
··· 2 2 /* 3 3 * Copyright (C) 2019 Axis Communications AB 4 4 * 5 - * The console is useful for userspace applications which expect a console 6 - * device to work without modifications even when no console is available 7 - * or desired. 8 - * 9 - * In order to use this driver, you should redirect the console to this 10 - * TTY, or boot the kernel with console=ttynull. 11 - * 12 5 * Based on ttyprintk.c: 13 6 * Copyright (C) 2010 Samo Pogacnik 14 7 */ ··· 58 65 .name = "ttynull", 59 66 .device = ttynull_device, 60 67 }; 61 - 62 - void __init register_ttynull_console(void) 63 - { 64 - if (!ttynull_driver) 65 - return; 66 - 67 - if (add_preferred_console(ttynull_console.name, 0, NULL)) 68 - return; 69 - 70 - register_console(&ttynull_console); 71 - } 72 68 73 69 static int __init ttynull_init(void) 74 70 {
+65 -3
drivers/vhost/vsock.c
··· 30 30 #define VHOST_VSOCK_PKT_WEIGHT 256 31 31 32 32 enum { 33 - VHOST_VSOCK_FEATURES = VHOST_FEATURES, 33 + VHOST_VSOCK_FEATURES = VHOST_FEATURES | 34 + (1ULL << VIRTIO_F_ACCESS_PLATFORM) 35 + }; 36 + 37 + enum { 38 + VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) 34 39 }; 35 40 36 41 /* Used to track all the vhost_vsock instances on the system. */ ··· 97 92 mutex_lock(&vq->mutex); 98 93 99 94 if (!vhost_vq_get_backend(vq)) 95 + goto out; 96 + 97 + if (!vq_meta_prefetch(vq)) 100 98 goto out; 101 99 102 100 /* Avoid further vmexits, we're already processing the virtqueue */ ··· 457 449 if (!vhost_vq_get_backend(vq)) 458 450 goto out; 459 451 452 + if (!vq_meta_prefetch(vq)) 453 + goto out; 454 + 460 455 vhost_disable_notify(&vsock->dev, vq); 461 456 do { 462 457 u32 len; ··· 777 766 mutex_lock(&vsock->dev.mutex); 778 767 if ((features & (1 << VHOST_F_LOG_ALL)) && 779 768 !vhost_log_access_ok(&vsock->dev)) { 780 - mutex_unlock(&vsock->dev.mutex); 781 - return -EFAULT; 769 + goto err; 770 + } 771 + 772 + if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) { 773 + if (vhost_init_device_iotlb(&vsock->dev, true)) 774 + goto err; 782 775 } 783 776 784 777 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { ··· 793 778 } 794 779 mutex_unlock(&vsock->dev.mutex); 795 780 return 0; 781 + 782 + err: 783 + mutex_unlock(&vsock->dev.mutex); 784 + return -EFAULT; 796 785 } 797 786 798 787 static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl, ··· 830 811 if (copy_from_user(&features, argp, sizeof(features))) 831 812 return -EFAULT; 832 813 return vhost_vsock_set_features(vsock, features); 814 + case VHOST_GET_BACKEND_FEATURES: 815 + features = VHOST_VSOCK_BACKEND_FEATURES; 816 + if (copy_to_user(argp, &features, sizeof(features))) 817 + return -EFAULT; 818 + return 0; 819 + case VHOST_SET_BACKEND_FEATURES: 820 + if (copy_from_user(&features, argp, sizeof(features))) 821 + return -EFAULT; 822 + if (features & ~VHOST_VSOCK_BACKEND_FEATURES) 823 + return -EOPNOTSUPP; 824 + vhost_set_backend_features(&vsock->dev, features); 825 + return 0; 833 826 default: 834 827 mutex_lock(&vsock->dev.mutex); 835 828 r = vhost_dev_ioctl(&vsock->dev, ioctl, argp); ··· 854 823 } 855 824 } 856 825 826 + static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) 827 + { 828 + struct file *file = iocb->ki_filp; 829 + struct vhost_vsock *vsock = file->private_data; 830 + struct vhost_dev *dev = &vsock->dev; 831 + int noblock = file->f_flags & O_NONBLOCK; 832 + 833 + return vhost_chr_read_iter(dev, to, noblock); 834 + } 835 + 836 + static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb, 837 + struct iov_iter *from) 838 + { 839 + struct file *file = iocb->ki_filp; 840 + struct vhost_vsock *vsock = file->private_data; 841 + struct vhost_dev *dev = &vsock->dev; 842 + 843 + return vhost_chr_write_iter(dev, from); 844 + } 845 + 846 + static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait) 847 + { 848 + struct vhost_vsock *vsock = file->private_data; 849 + struct vhost_dev *dev = &vsock->dev; 850 + 851 + return vhost_chr_poll(file, dev, wait); 852 + } 853 + 857 854 static const struct file_operations vhost_vsock_fops = { 858 855 .owner = THIS_MODULE, 859 856 .open = vhost_vsock_dev_open, ··· 889 830 .llseek = noop_llseek, 890 831 .unlocked_ioctl = vhost_vsock_dev_ioctl, 891 832 .compat_ioctl = compat_ptr_ioctl, 833 + .read_iter = vhost_vsock_chr_read_iter, 834 + .write_iter = vhost_vsock_chr_write_iter, 835 + .poll = vhost_vsock_chr_poll, 892 836 }; 893 837 894 838 static struct miscdevice vhost_vsock_misc = {
+9
fs/btrfs/btrfs_inode.h
··· 42 42 * to an inode. 43 43 */ 44 44 BTRFS_INODE_NO_XATTRS, 45 + /* 46 + * Set when we are in a context where we need to start a transaction and 47 + * have dirty pages with the respective file range locked. This is to 48 + * ensure that when reserving space for the transaction, if we are low 49 + * on available space and need to flush delalloc, we will not flush 50 + * delalloc for this inode, because that could result in a deadlock (on 51 + * the file range, inode's io_tree). 52 + */ 53 + BTRFS_INODE_NO_DELALLOC_FLUSH, 45 54 }; 46 55 47 56 /* in memory btrfs inode */
+22 -2
fs/btrfs/ctree.c
··· 2555 2555 * @p: Holds all btree nodes along the search path 2556 2556 * @root: The root node of the tree 2557 2557 * @key: The key we are looking for 2558 - * @ins_len: Indicates purpose of search, for inserts it is 1, for 2559 - * deletions it's -1. 0 for plain searches 2558 + * @ins_len: Indicates purpose of search: 2559 + * >0 for inserts it's size of item inserted (*) 2560 + * <0 for deletions 2561 + * 0 for plain searches, not modifying the tree 2562 + * 2563 + * (*) If size of item inserted doesn't include 2564 + * sizeof(struct btrfs_item), then p->search_for_extension must 2565 + * be set. 2560 2566 * @cow: boolean should CoW operations be performed. Must always be 1 2561 2567 * when modifying the tree. 2562 2568 * ··· 2723 2717 2724 2718 if (level == 0) { 2725 2719 p->slots[level] = slot; 2720 + /* 2721 + * Item key already exists. In this case, if we are 2722 + * allowed to insert the item (for example, in dir_item 2723 + * case, item key collision is allowed), it will be 2724 + * merged with the original item. Only the item size 2725 + * grows, no new btrfs item will be added. If 2726 + * search_for_extension is not set, ins_len already 2727 + * accounts the size btrfs_item, deduct it here so leaf 2728 + * space check will be correct. 2729 + */ 2730 + if (ret == 0 && ins_len > 0 && !p->search_for_extension) { 2731 + ASSERT(ins_len >= sizeof(struct btrfs_item)); 2732 + ins_len -= sizeof(struct btrfs_item); 2733 + } 2726 2734 if (ins_len > 0 && 2727 2735 btrfs_leaf_free_space(b) < ins_len) { 2728 2736 if (write_lock_level < 1) {
+27 -2
fs/btrfs/ctree.h
··· 131 131 * defrag 132 132 */ 133 133 BTRFS_FS_STATE_REMOUNTING, 134 + /* Filesystem in RO mode */ 135 + BTRFS_FS_STATE_RO, 134 136 /* Track if a transaction abort has been reported on this filesystem */ 135 137 BTRFS_FS_STATE_TRANS_ABORTED, 136 138 /* ··· 369 367 unsigned int search_commit_root:1; 370 368 unsigned int need_commit_sem:1; 371 369 unsigned int skip_release_on_error:1; 370 + /* 371 + * Indicate that new item (btrfs_search_slot) is extending already 372 + * existing item and ins_len contains only the data size and not item 373 + * header (ie. sizeof(struct btrfs_item) is not included). 374 + */ 375 + unsigned int search_for_extension:1; 372 376 }; 373 377 #define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \ 374 378 sizeof(struct btrfs_item)) ··· 2893 2885 * If we remount the fs to be R/O or umount the fs, the cleaner needn't do 2894 2886 * anything except sleeping. This function is used to check the status of 2895 2887 * the fs. 2888 + * We check for BTRFS_FS_STATE_RO to avoid races with a concurrent remount, 2889 + * since setting and checking for SB_RDONLY in the superblock's flags is not 2890 + * atomic. 2896 2891 */ 2897 2892 static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info) 2898 2893 { 2899 - return fs_info->sb->s_flags & SB_RDONLY || btrfs_fs_closing(fs_info); 2894 + return test_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state) || 2895 + btrfs_fs_closing(fs_info); 2896 + } 2897 + 2898 + static inline void btrfs_set_sb_rdonly(struct super_block *sb) 2899 + { 2900 + sb->s_flags |= SB_RDONLY; 2901 + set_bit(BTRFS_FS_STATE_RO, &btrfs_sb(sb)->fs_state); 2902 + } 2903 + 2904 + static inline void btrfs_clear_sb_rdonly(struct super_block *sb) 2905 + { 2906 + sb->s_flags &= ~SB_RDONLY; 2907 + clear_bit(BTRFS_FS_STATE_RO, &btrfs_sb(sb)->fs_state); 2900 2908 } 2901 2909 2902 2910 /* tree mod log functions from ctree.c */ ··· 3097 3073 u32 min_type); 3098 3074 3099 3075 int btrfs_start_delalloc_snapshot(struct btrfs_root *root); 3100 - int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr); 3076 + int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr, 3077 + bool in_reclaim_context); 3101 3078 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end, 3102 3079 unsigned int extra_bits, 3103 3080 struct extent_state **cached_state);
+1 -1
fs/btrfs/dev-replace.c
··· 715 715 * flush all outstanding I/O and inode extent mappings before the 716 716 * copy operation is declared as being finished 717 717 */ 718 - ret = btrfs_start_delalloc_roots(fs_info, U64_MAX); 718 + ret = btrfs_start_delalloc_roots(fs_info, U64_MAX, false); 719 719 if (ret) { 720 720 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 721 721 return ret;
+36 -34
fs/btrfs/discard.c
··· 199 199 static struct btrfs_block_group *peek_discard_list( 200 200 struct btrfs_discard_ctl *discard_ctl, 201 201 enum btrfs_discard_state *discard_state, 202 - int *discard_index) 202 + int *discard_index, u64 now) 203 203 { 204 204 struct btrfs_block_group *block_group; 205 - const u64 now = ktime_get_ns(); 206 205 207 206 spin_lock(&discard_ctl->lock); 208 207 again: 209 208 block_group = find_next_block_group(discard_ctl, now); 210 209 211 - if (block_group && now > block_group->discard_eligible_time) { 210 + if (block_group && now >= block_group->discard_eligible_time) { 212 211 if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED && 213 212 block_group->used != 0) { 214 213 if (btrfs_is_block_group_data_only(block_group)) ··· 221 222 block_group->discard_state = BTRFS_DISCARD_EXTENTS; 222 223 } 223 224 discard_ctl->block_group = block_group; 225 + } 226 + if (block_group) { 224 227 *discard_state = block_group->discard_state; 225 228 *discard_index = block_group->discard_index; 226 - } else { 227 - block_group = NULL; 228 229 } 229 - 230 230 spin_unlock(&discard_ctl->lock); 231 231 232 232 return block_group; ··· 328 330 btrfs_discard_schedule_work(discard_ctl, false); 329 331 } 330 332 331 - /** 332 - * btrfs_discard_schedule_work - responsible for scheduling the discard work 333 - * @discard_ctl: discard control 334 - * @override: override the current timer 335 - * 336 - * Discards are issued by a delayed workqueue item. @override is used to 337 - * update the current delay as the baseline delay interval is reevaluated on 338 - * transaction commit. This is also maxed with any other rate limit. 339 - */ 340 - void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl, 341 - bool override) 333 + static void __btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl, 334 + u64 now, bool override) 342 335 { 343 336 struct btrfs_block_group *block_group; 344 - const u64 now = ktime_get_ns(); 345 - 346 - spin_lock(&discard_ctl->lock); 347 337 348 338 if (!btrfs_run_discard_work(discard_ctl)) 349 - goto out; 350 - 339 + return; 351 340 if (!override && delayed_work_pending(&discard_ctl->work)) 352 - goto out; 341 + return; 353 342 354 343 block_group = find_next_block_group(discard_ctl, now); 355 344 if (block_group) { ··· 378 393 mod_delayed_work(discard_ctl->discard_workers, 379 394 &discard_ctl->work, nsecs_to_jiffies(delay)); 380 395 } 381 - out: 396 + } 397 + 398 + /* 399 + * btrfs_discard_schedule_work - responsible for scheduling the discard work 400 + * @discard_ctl: discard control 401 + * @override: override the current timer 402 + * 403 + * Discards are issued by a delayed workqueue item. @override is used to 404 + * update the current delay as the baseline delay interval is reevaluated on 405 + * transaction commit. This is also maxed with any other rate limit. 406 + */ 407 + void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl, 408 + bool override) 409 + { 410 + const u64 now = ktime_get_ns(); 411 + 412 + spin_lock(&discard_ctl->lock); 413 + __btrfs_discard_schedule_work(discard_ctl, now, override); 382 414 spin_unlock(&discard_ctl->lock); 383 415 } 384 416 ··· 440 438 int discard_index = 0; 441 439 u64 trimmed = 0; 442 440 u64 minlen = 0; 441 + u64 now = ktime_get_ns(); 443 442 444 443 discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work); 445 444 446 445 block_group = peek_discard_list(discard_ctl, &discard_state, 447 - &discard_index); 446 + &discard_index, now); 448 447 if (!block_group || !btrfs_run_discard_work(discard_ctl)) 449 448 return; 449 + if (now < block_group->discard_eligible_time) { 450 + btrfs_discard_schedule_work(discard_ctl, false); 451 + return; 452 + } 450 453 451 454 /* Perform discarding */ 452 455 minlen = discard_minlen[discard_index]; ··· 481 474 discard_ctl->discard_extent_bytes += trimmed; 482 475 } 483 476 484 - /* 485 - * Updated without locks as this is inside the workfn and nothing else 486 - * is reading the values 487 - */ 488 - discard_ctl->prev_discard = trimmed; 489 - discard_ctl->prev_discard_time = ktime_get_ns(); 490 - 491 477 /* Determine next steps for a block_group */ 492 478 if (block_group->discard_cursor >= btrfs_block_group_end(block_group)) { 493 479 if (discard_state == BTRFS_DISCARD_BITMAPS) { ··· 496 496 } 497 497 } 498 498 499 + now = ktime_get_ns(); 499 500 spin_lock(&discard_ctl->lock); 501 + discard_ctl->prev_discard = trimmed; 502 + discard_ctl->prev_discard_time = now; 500 503 discard_ctl->block_group = NULL; 504 + __btrfs_discard_schedule_work(discard_ctl, now, false); 501 505 spin_unlock(&discard_ctl->lock); 502 - 503 - btrfs_discard_schedule_work(discard_ctl, false); 504 506 } 505 507 506 508 /**
+8 -5
fs/btrfs/disk-io.c
··· 1729 1729 */ 1730 1730 btrfs_delete_unused_bgs(fs_info); 1731 1731 sleep: 1732 - clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); 1732 + clear_and_wake_up_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); 1733 1733 if (kthread_should_park()) 1734 1734 kthread_parkme(); 1735 1735 if (kthread_should_stop()) ··· 2830 2830 return -ENOMEM; 2831 2831 btrfs_init_delayed_root(fs_info->delayed_root); 2832 2832 2833 + if (sb_rdonly(sb)) 2834 + set_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state); 2835 + 2833 2836 return btrfs_alloc_stripe_hash_table(fs_info); 2834 2837 } 2835 2838 ··· 2972 2969 } 2973 2970 } 2974 2971 2972 + ret = btrfs_find_orphan_roots(fs_info); 2975 2973 out: 2976 2974 return ret; 2977 2975 } ··· 3386 3382 goto fail_qgroup; 3387 3383 } 3388 3384 } 3389 - 3390 - ret = btrfs_find_orphan_roots(fs_info); 3391 - if (ret) 3392 - goto fail_qgroup; 3393 3385 3394 3386 fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true); 3395 3387 if (IS_ERR(fs_info->fs_root)) { ··· 4180 4180 */ 4181 4181 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 4182 4182 btrfs_stop_all_workers(fs_info); 4183 + 4184 + /* We shouldn't have any transaction open at this point */ 4185 + ASSERT(list_empty(&fs_info->trans_list)); 4183 4186 4184 4187 clear_bit(BTRFS_FS_OPEN, &fs_info->flags); 4185 4188 free_root_pointers(fs_info, true);
+2
fs/btrfs/extent-tree.c
··· 844 844 want = extent_ref_type(parent, owner); 845 845 if (insert) { 846 846 extra_size = btrfs_extent_inline_ref_size(want); 847 + path->search_for_extension = 1; 847 848 path->keep_locks = 1; 848 849 } else 849 850 extra_size = -1; ··· 997 996 out: 998 997 if (insert) { 999 998 path->keep_locks = 0; 999 + path->search_for_extension = 0; 1000 1000 btrfs_unlock_up_safe(path, 1); 1001 1001 } 1002 1002 return err;
+2
fs/btrfs/file-item.c
··· 1016 1016 } 1017 1017 1018 1018 btrfs_release_path(path); 1019 + path->search_for_extension = 1; 1019 1020 ret = btrfs_search_slot(trans, root, &file_key, path, 1020 1021 csum_size, 1); 1022 + path->search_for_extension = 0; 1021 1023 if (ret < 0) 1022 1024 goto out; 1023 1025
+11 -4
fs/btrfs/inode.c
··· 9390 9390 * some fairly slow code that needs optimization. This walks the list 9391 9391 * of all the inodes with pending delalloc and forces them to disk. 9392 9392 */ 9393 - static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot) 9393 + static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot, 9394 + bool in_reclaim_context) 9394 9395 { 9395 9396 struct btrfs_inode *binode; 9396 9397 struct inode *inode; ··· 9412 9411 9413 9412 list_move_tail(&binode->delalloc_inodes, 9414 9413 &root->delalloc_inodes); 9414 + 9415 + if (in_reclaim_context && 9416 + test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags)) 9417 + continue; 9418 + 9415 9419 inode = igrab(&binode->vfs_inode); 9416 9420 if (!inode) { 9417 9421 cond_resched_lock(&root->delalloc_lock); ··· 9470 9464 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) 9471 9465 return -EROFS; 9472 9466 9473 - return start_delalloc_inodes(root, &nr, true); 9467 + return start_delalloc_inodes(root, &nr, true, false); 9474 9468 } 9475 9469 9476 - int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr) 9470 + int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr, 9471 + bool in_reclaim_context) 9477 9472 { 9478 9473 struct btrfs_root *root; 9479 9474 struct list_head splice; ··· 9497 9490 &fs_info->delalloc_roots); 9498 9491 spin_unlock(&fs_info->delalloc_root_lock); 9499 9492 9500 - ret = start_delalloc_inodes(root, &nr, false); 9493 + ret = start_delalloc_inodes(root, &nr, false, in_reclaim_context); 9501 9494 btrfs_put_root(root); 9502 9495 if (ret < 0) 9503 9496 goto out;
+1 -1
fs/btrfs/ioctl.c
··· 4951 4951 case BTRFS_IOC_SYNC: { 4952 4952 int ret; 4953 4953 4954 - ret = btrfs_start_delalloc_roots(fs_info, U64_MAX); 4954 + ret = btrfs_start_delalloc_roots(fs_info, U64_MAX, false); 4955 4955 if (ret) 4956 4956 return ret; 4957 4957 ret = btrfs_sync_fs(inode->i_sb, 1);
+30 -13
fs/btrfs/qgroup.c
··· 3190 3190 return ret; 3191 3191 } 3192 3192 3193 + static bool rescan_should_stop(struct btrfs_fs_info *fs_info) 3194 + { 3195 + return btrfs_fs_closing(fs_info) || 3196 + test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); 3197 + } 3198 + 3193 3199 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) 3194 3200 { 3195 3201 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info, ··· 3204 3198 struct btrfs_trans_handle *trans = NULL; 3205 3199 int err = -ENOMEM; 3206 3200 int ret = 0; 3201 + bool stopped = false; 3207 3202 3208 3203 path = btrfs_alloc_path(); 3209 3204 if (!path) ··· 3217 3210 path->skip_locking = 1; 3218 3211 3219 3212 err = 0; 3220 - while (!err && !btrfs_fs_closing(fs_info)) { 3213 + while (!err && !(stopped = rescan_should_stop(fs_info))) { 3221 3214 trans = btrfs_start_transaction(fs_info->fs_root, 0); 3222 3215 if (IS_ERR(trans)) { 3223 3216 err = PTR_ERR(trans); ··· 3260 3253 } 3261 3254 3262 3255 mutex_lock(&fs_info->qgroup_rescan_lock); 3263 - if (!btrfs_fs_closing(fs_info)) 3256 + if (!stopped) 3264 3257 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; 3265 3258 if (trans) { 3266 3259 ret = update_qgroup_status_item(trans); ··· 3279 3272 3280 3273 btrfs_end_transaction(trans); 3281 3274 3282 - if (btrfs_fs_closing(fs_info)) { 3275 + if (stopped) { 3283 3276 btrfs_info(fs_info, "qgroup scan paused"); 3284 3277 } else if (err >= 0) { 3285 3278 btrfs_info(fs_info, "qgroup scan completed%s", ··· 3538 3531 bool can_commit = true; 3539 3532 3540 3533 /* 3541 - * We don't want to run flush again and again, so if there is a running 3542 - * one, we won't try to start a new flush, but exit directly. 3543 - */ 3544 - if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) { 3545 - wait_event(root->qgroup_flush_wait, 3546 - !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)); 3547 - return 0; 3548 - } 3549 - 3550 - /* 3551 3534 * If current process holds a transaction, we shouldn't flush, as we 3552 3535 * assume all space reservation happens before a transaction handle is 3553 3536 * held. ··· 3550 3553 if (current->journal_info && 3551 3554 current->journal_info != BTRFS_SEND_TRANS_STUB) 3552 3555 can_commit = false; 3556 + 3557 + /* 3558 + * We don't want to run flush again and again, so if there is a running 3559 + * one, we won't try to start a new flush, but exit directly. 3560 + */ 3561 + if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) { 3562 + /* 3563 + * We are already holding a transaction, thus we can block other 3564 + * threads from flushing. So exit right now. This increases 3565 + * the chance of EDQUOT for heavy load and near limit cases. 3566 + * But we can argue that if we're already near limit, EDQUOT is 3567 + * unavoidable anyway. 3568 + */ 3569 + if (!can_commit) 3570 + return 0; 3571 + 3572 + wait_event(root->qgroup_flush_wait, 3573 + !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)); 3574 + return 0; 3575 + } 3553 3576 3554 3577 ret = btrfs_start_delalloc_snapshot(root); 3555 3578 if (ret < 0)
+15
fs/btrfs/reflink.c
··· 89 89 if (ret) 90 90 goto out_unlock; 91 91 92 + /* 93 + * After dirtying the page our caller will need to start a transaction, 94 + * and if we are low on metadata free space, that can cause flushing of 95 + * delalloc for all inodes in order to get metadata space released. 96 + * However we are holding the range locked for the whole duration of 97 + * the clone/dedupe operation, so we may deadlock if that happens and no 98 + * other task releases enough space. So mark this inode as not being 99 + * possible to flush to avoid such deadlock. We will clear that flag 100 + * when we finish cloning all extents, since a transaction is started 101 + * after finding each extent to clone. 102 + */ 103 + set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags); 104 + 92 105 if (comp_type == BTRFS_COMPRESS_NONE) { 93 106 char *map; 94 107 ··· 562 549 out: 563 550 btrfs_free_path(path); 564 551 kvfree(buf); 552 + clear_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &BTRFS_I(inode)->runtime_flags); 553 + 565 554 return ret; 566 555 } 567 556
+31 -18
fs/btrfs/send.c
··· 236 236 * after this directory is moved, we can try to rmdir the ino rmdir_ino. 237 237 */ 238 238 u64 rmdir_ino; 239 + u64 rmdir_gen; 239 240 bool orphanized; 240 241 }; 241 242 ··· 317 316 static struct waiting_dir_move * 318 317 get_waiting_dir_move(struct send_ctx *sctx, u64 ino); 319 318 320 - static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino); 319 + static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen); 321 320 322 321 static int need_send_hole(struct send_ctx *sctx) 323 322 { ··· 2300 2299 2301 2300 fs_path_reset(name); 2302 2301 2303 - if (is_waiting_for_rm(sctx, ino)) { 2302 + if (is_waiting_for_rm(sctx, ino, gen)) { 2304 2303 ret = gen_unique_name(sctx, ino, gen, name); 2305 2304 if (ret < 0) 2306 2305 goto out; ··· 2859 2858 return ret; 2860 2859 } 2861 2860 2862 - static struct orphan_dir_info * 2863 - add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino) 2861 + static struct orphan_dir_info *add_orphan_dir_info(struct send_ctx *sctx, 2862 + u64 dir_ino, u64 dir_gen) 2864 2863 { 2865 2864 struct rb_node **p = &sctx->orphan_dirs.rb_node; 2866 2865 struct rb_node *parent = NULL; ··· 2869 2868 while (*p) { 2870 2869 parent = *p; 2871 2870 entry = rb_entry(parent, struct orphan_dir_info, node); 2872 - if (dir_ino < entry->ino) { 2871 + if (dir_ino < entry->ino) 2873 2872 p = &(*p)->rb_left; 2874 - } else if (dir_ino > entry->ino) { 2873 + else if (dir_ino > entry->ino) 2875 2874 p = &(*p)->rb_right; 2876 - } else { 2875 + else if (dir_gen < entry->gen) 2876 + p = &(*p)->rb_left; 2877 + else if (dir_gen > entry->gen) 2878 + p = &(*p)->rb_right; 2879 + else 2877 2880 return entry; 2878 - } 2879 2881 } 2880 2882 2881 2883 odi = kmalloc(sizeof(*odi), GFP_KERNEL); 2882 2884 if (!odi) 2883 2885 return ERR_PTR(-ENOMEM); 2884 2886 odi->ino = dir_ino; 2885 - odi->gen = 0; 2887 + odi->gen = dir_gen; 2886 2888 odi->last_dir_index_offset = 0; 2887 2889 2888 2890 rb_link_node(&odi->node, parent, p); ··· 2893 2889 return odi; 2894 2890 } 2895 2891 2896 - static struct orphan_dir_info * 2897 - get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino) 2892 + static struct orphan_dir_info *get_orphan_dir_info(struct send_ctx *sctx, 2893 + u64 dir_ino, u64 gen) 2898 2894 { 2899 2895 struct rb_node *n = sctx->orphan_dirs.rb_node; 2900 2896 struct orphan_dir_info *entry; ··· 2905 2901 n = n->rb_left; 2906 2902 else if (dir_ino > entry->ino) 2907 2903 n = n->rb_right; 2904 + else if (gen < entry->gen) 2905 + n = n->rb_left; 2906 + else if (gen > entry->gen) 2907 + n = n->rb_right; 2908 2908 else 2909 2909 return entry; 2910 2910 } 2911 2911 return NULL; 2912 2912 } 2913 2913 2914 - static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino) 2914 + static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen) 2915 2915 { 2916 - struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino); 2916 + struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino, gen); 2917 2917 2918 2918 return odi != NULL; 2919 2919 } ··· 2962 2954 key.type = BTRFS_DIR_INDEX_KEY; 2963 2955 key.offset = 0; 2964 2956 2965 - odi = get_orphan_dir_info(sctx, dir); 2957 + odi = get_orphan_dir_info(sctx, dir, dir_gen); 2966 2958 if (odi) 2967 2959 key.offset = odi->last_dir_index_offset; 2968 2960 ··· 2993 2985 2994 2986 dm = get_waiting_dir_move(sctx, loc.objectid); 2995 2987 if (dm) { 2996 - odi = add_orphan_dir_info(sctx, dir); 2988 + odi = add_orphan_dir_info(sctx, dir, dir_gen); 2997 2989 if (IS_ERR(odi)) { 2998 2990 ret = PTR_ERR(odi); 2999 2991 goto out; ··· 3001 2993 odi->gen = dir_gen; 3002 2994 odi->last_dir_index_offset = found_key.offset; 3003 2995 dm->rmdir_ino = dir; 2996 + dm->rmdir_gen = dir_gen; 3004 2997 ret = 0; 3005 2998 goto out; 3006 2999 } 3007 3000 3008 3001 if (loc.objectid > send_progress) { 3009 - odi = add_orphan_dir_info(sctx, dir); 3002 + odi = add_orphan_dir_info(sctx, dir, dir_gen); 3010 3003 if (IS_ERR(odi)) { 3011 3004 ret = PTR_ERR(odi); 3012 3005 goto out; ··· 3047 3038 return -ENOMEM; 3048 3039 dm->ino = ino; 3049 3040 dm->rmdir_ino = 0; 3041 + dm->rmdir_gen = 0; 3050 3042 dm->orphanized = orphanized; 3051 3043 3052 3044 while (*p) { ··· 3193 3183 while (ino != BTRFS_FIRST_FREE_OBJECTID) { 3194 3184 fs_path_reset(name); 3195 3185 3196 - if (is_waiting_for_rm(sctx, ino)) 3186 + if (is_waiting_for_rm(sctx, ino, gen)) 3197 3187 break; 3198 3188 if (is_waiting_for_move(sctx, ino)) { 3199 3189 if (*ancestor_ino == 0) ··· 3233 3223 u64 parent_ino, parent_gen; 3234 3224 struct waiting_dir_move *dm = NULL; 3235 3225 u64 rmdir_ino = 0; 3226 + u64 rmdir_gen; 3236 3227 u64 ancestor; 3237 3228 bool is_orphan; 3238 3229 int ret; ··· 3248 3237 dm = get_waiting_dir_move(sctx, pm->ino); 3249 3238 ASSERT(dm); 3250 3239 rmdir_ino = dm->rmdir_ino; 3240 + rmdir_gen = dm->rmdir_gen; 3251 3241 is_orphan = dm->orphanized; 3252 3242 free_waiting_dir_move(sctx, dm); 3253 3243 ··· 3285 3273 dm = get_waiting_dir_move(sctx, pm->ino); 3286 3274 ASSERT(dm); 3287 3275 dm->rmdir_ino = rmdir_ino; 3276 + dm->rmdir_gen = rmdir_gen; 3288 3277 } 3289 3278 goto out; 3290 3279 } ··· 3304 3291 struct orphan_dir_info *odi; 3305 3292 u64 gen; 3306 3293 3307 - odi = get_orphan_dir_info(sctx, rmdir_ino); 3294 + odi = get_orphan_dir_info(sctx, rmdir_ino, rmdir_gen); 3308 3295 if (!odi) { 3309 3296 /* already deleted */ 3310 3297 goto finish;
+1 -1
fs/btrfs/space-info.c
··· 532 532 533 533 loops = 0; 534 534 while ((delalloc_bytes || dio_bytes) && loops < 3) { 535 - btrfs_start_delalloc_roots(fs_info, items); 535 + btrfs_start_delalloc_roots(fs_info, items, true); 536 536 537 537 loops++; 538 538 if (wait_ordered && !trans) {
+37 -3
fs/btrfs/super.c
··· 175 175 btrfs_discard_stop(fs_info); 176 176 177 177 /* btrfs handle error by forcing the filesystem readonly */ 178 - sb->s_flags |= SB_RDONLY; 178 + btrfs_set_sb_rdonly(sb); 179 179 btrfs_info(fs_info, "forced readonly"); 180 180 /* 181 181 * Note that a running device replace operation is not canceled here ··· 1953 1953 /* avoid complains from lockdep et al. */ 1954 1954 up(&fs_info->uuid_tree_rescan_sem); 1955 1955 1956 - sb->s_flags |= SB_RDONLY; 1956 + btrfs_set_sb_rdonly(sb); 1957 1957 1958 1958 /* 1959 1959 * Setting SB_RDONLY will put the cleaner thread to ··· 1964 1964 */ 1965 1965 btrfs_delete_unused_bgs(fs_info); 1966 1966 1967 + /* 1968 + * The cleaner task could be already running before we set the 1969 + * flag BTRFS_FS_STATE_RO (and SB_RDONLY in the superblock). 1970 + * We must make sure that after we finish the remount, i.e. after 1971 + * we call btrfs_commit_super(), the cleaner can no longer start 1972 + * a transaction - either because it was dropping a dead root, 1973 + * running delayed iputs or deleting an unused block group (the 1974 + * cleaner picked a block group from the list of unused block 1975 + * groups before we were able to in the previous call to 1976 + * btrfs_delete_unused_bgs()). 1977 + */ 1978 + wait_on_bit(&fs_info->flags, BTRFS_FS_CLEANER_RUNNING, 1979 + TASK_UNINTERRUPTIBLE); 1980 + 1981 + /* 1982 + * We've set the superblock to RO mode, so we might have made 1983 + * the cleaner task sleep without running all pending delayed 1984 + * iputs. Go through all the delayed iputs here, so that if an 1985 + * unmount happens without remounting RW we don't end up at 1986 + * finishing close_ctree() with a non-empty list of delayed 1987 + * iputs. 1988 + */ 1989 + btrfs_run_delayed_iputs(fs_info); 1990 + 1967 1991 btrfs_dev_replace_suspend_for_unmount(fs_info); 1968 1992 btrfs_scrub_cancel(fs_info); 1969 1993 btrfs_pause_balance(fs_info); 1994 + 1995 + /* 1996 + * Pause the qgroup rescan worker if it is running. We don't want 1997 + * it to be still running after we are in RO mode, as after that, 1998 + * by the time we unmount, it might have left a transaction open, 1999 + * so we would leak the transaction and/or crash. 2000 + */ 2001 + btrfs_qgroup_wait_for_completion(fs_info, false); 1970 2002 1971 2003 ret = btrfs_commit_super(fs_info); 1972 2004 if (ret) ··· 2038 2006 if (ret) 2039 2007 goto restore; 2040 2008 2041 - sb->s_flags &= ~SB_RDONLY; 2009 + btrfs_clear_sb_rdonly(sb); 2042 2010 2043 2011 set_bit(BTRFS_FS_OPEN, &fs_info->flags); 2044 2012 } ··· 2060 2028 /* We've hit an error - don't reset SB_RDONLY */ 2061 2029 if (sb_rdonly(sb)) 2062 2030 old_flags |= SB_RDONLY; 2031 + if (!(old_flags & SB_RDONLY)) 2032 + clear_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state); 2063 2033 sb->s_flags = old_flags; 2064 2034 fs_info->mount_opt = old_opts; 2065 2035 fs_info->compress_type = old_compress_type;
+8 -2
fs/btrfs/tests/btrfs-tests.c
··· 55 55 struct inode *inode; 56 56 57 57 inode = new_inode(test_mnt->mnt_sb); 58 - if (inode) 59 - inode_init_owner(inode, NULL, S_IFREG); 58 + if (!inode) 59 + return NULL; 60 + 61 + inode->i_mode = S_IFREG; 62 + BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY; 63 + BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID; 64 + BTRFS_I(inode)->location.offset = 0; 65 + inode_init_owner(inode, NULL, S_IFREG); 60 66 61 67 return inode; 62 68 }
-9
fs/btrfs/tests/inode-tests.c
··· 232 232 return ret; 233 233 } 234 234 235 - inode->i_mode = S_IFREG; 236 - BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY; 237 - BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID; 238 - BTRFS_I(inode)->location.offset = 0; 239 - 240 235 fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize); 241 236 if (!fs_info) { 242 237 test_std_err(TEST_ALLOC_FS_INFO); ··· 829 834 test_std_err(TEST_ALLOC_INODE); 830 835 return ret; 831 836 } 832 - 833 - BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY; 834 - BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID; 835 - BTRFS_I(inode)->location.offset = 0; 836 837 837 838 fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize); 838 839 if (!fs_info) {
+2 -2
fs/btrfs/volumes.c
··· 2592 2592 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); 2593 2593 2594 2594 if (seeding_dev) { 2595 - sb->s_flags &= ~SB_RDONLY; 2595 + btrfs_clear_sb_rdonly(sb); 2596 2596 ret = btrfs_prepare_sprout(fs_info); 2597 2597 if (ret) { 2598 2598 btrfs_abort_transaction(trans, ret); ··· 2728 2728 mutex_unlock(&fs_info->fs_devices->device_list_mutex); 2729 2729 error_trans: 2730 2730 if (seeding_dev) 2731 - sb->s_flags |= SB_RDONLY; 2731 + btrfs_set_sb_rdonly(sb); 2732 2732 if (trans) 2733 2733 btrfs_end_transaction(trans); 2734 2734 error_free_zone:
+11 -3
fs/select.c
··· 1011 1011 fdcount = do_poll(head, &table, end_time); 1012 1012 poll_freewait(&table); 1013 1013 1014 + if (!user_write_access_begin(ufds, nfds * sizeof(*ufds))) 1015 + goto out_fds; 1016 + 1014 1017 for (walk = head; walk; walk = walk->next) { 1015 1018 struct pollfd *fds = walk->entries; 1016 1019 int j; 1017 1020 1018 - for (j = 0; j < walk->len; j++, ufds++) 1019 - if (__put_user(fds[j].revents, &ufds->revents)) 1020 - goto out_fds; 1021 + for (j = walk->len; j; fds++, ufds++, j--) 1022 + unsafe_put_user(fds->revents, &ufds->revents, Efault); 1021 1023 } 1024 + user_write_access_end(); 1022 1025 1023 1026 err = fdcount; 1024 1027 out_fds: ··· 1033 1030 } 1034 1031 1035 1032 return err; 1033 + 1034 + Efault: 1035 + user_write_access_end(); 1036 + err = -EFAULT; 1037 + goto out_fds; 1036 1038 } 1037 1039 1038 1040 static long do_restart_poll(struct restart_block *restart_block)
-3
include/linux/console.h
··· 186 186 extern int braille_unregister_console(struct console *); 187 187 #ifdef CONFIG_TTY 188 188 extern void console_sysfs_notify(void); 189 - extern void register_ttynull_console(void); 190 189 #else 191 190 static inline void console_sysfs_notify(void) 192 - { } 193 - static inline void register_ttynull_console(void) 194 191 { } 195 192 #endif 196 193 extern bool console_suspend_enabled;
+2 -1
include/linux/mlx5/mlx5_ifc.h
··· 1280 1280 u8 ece_support[0x1]; 1281 1281 u8 reserved_at_a4[0x7]; 1282 1282 u8 log_max_srq[0x5]; 1283 - u8 reserved_at_b0[0x2]; 1283 + u8 reserved_at_b0[0x1]; 1284 + u8 uplink_follow[0x1]; 1284 1285 u8 ts_cqe_to_dest_cqn[0x1]; 1285 1286 u8 reserved_at_b3[0xd]; 1286 1287
+3 -2
include/uapi/linux/if_link.h
··· 75 75 * 76 76 * @rx_dropped: Number of packets received but not processed, 77 77 * e.g. due to lack of resources or unsupported protocol. 78 - * For hardware interfaces this counter should not include packets 79 - * dropped by the device which are counted separately in 78 + * For hardware interfaces this counter may include packets discarded 79 + * due to L2 address filtering but should not include packets dropped 80 + * by the device due to buffer exhaustion which are counted separately in 80 81 * @rx_missed_errors (since procfs folds those two counters together). 81 82 * 82 83 * @tx_dropped: Number of packets dropped on their way to transmission,
+2 -8
init/main.c
··· 1480 1480 struct file *file = filp_open("/dev/console", O_RDWR, 0); 1481 1481 1482 1482 if (IS_ERR(file)) { 1483 - pr_err("Warning: unable to open an initial console. Fallback to ttynull.\n"); 1484 - register_ttynull_console(); 1485 - 1486 - file = filp_open("/dev/console", O_RDWR, 0); 1487 - if (IS_ERR(file)) { 1488 - pr_err("Warning: Failed to add ttynull console. No stdin, stdout, and stderr for the init process!\n"); 1489 - return; 1490 - } 1483 + pr_err("Warning: unable to open an initial console.\n"); 1484 + return; 1491 1485 } 1492 1486 init_dup(file); 1493 1487 init_dup(file);
+1
kernel/bpf/task_iter.c
··· 159 159 } 160 160 161 161 /* set info->task and info->tid */ 162 + info->task = curr_task; 162 163 if (curr_tid == info->tid) { 163 164 curr_fd = info->fd; 164 165 } else {
+2 -1
net/8021q/vlan.c
··· 284 284 return 0; 285 285 286 286 out_free_newdev: 287 - if (new_dev->reg_state == NETREG_UNINITIALIZED) 287 + if (new_dev->reg_state == NETREG_UNINITIALIZED || 288 + new_dev->reg_state == NETREG_UNREGISTERED) 288 289 free_netdev(new_dev); 289 290 return err; 290 291 }
+1 -1
net/ipv4/ip_output.c
··· 302 302 if (skb_is_gso(skb)) 303 303 return ip_finish_output_gso(net, sk, skb, mtu); 304 304 305 - if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU)) 305 + if (skb->len > mtu || IPCB(skb)->frag_max_size) 306 306 return ip_fragment(net, sk, skb, mtu, ip_finish_output2); 307 307 308 308 return ip_finish_output2(net, sk, skb);
+5 -6
net/ipv4/ip_tunnel.c
··· 759 759 goto tx_error; 760 760 } 761 761 762 - if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph, 763 - 0, 0, false)) { 762 + df = tnl_params->frag_off; 763 + if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df) 764 + df |= (inner_iph->frag_off & htons(IP_DF)); 765 + 766 + if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, 0, 0, false)) { 764 767 ip_rt_put(rt); 765 768 goto tx_error; 766 769 } ··· 790 787 else 791 788 ttl = ip4_dst_hoplimit(&rt->dst); 792 789 } 793 - 794 - df = tnl_params->frag_off; 795 - if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df) 796 - df |= (inner_iph->frag_off&htons(IP_DF)); 797 790 798 791 max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) 799 792 + rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
+4 -2
net/ipv4/nexthop.c
··· 627 627 for (i = NHA_GROUP_TYPE + 1; i < __NHA_MAX; ++i) { 628 628 if (!tb[i]) 629 629 continue; 630 - if (tb[NHA_FDB]) 630 + if (i == NHA_FDB) 631 631 continue; 632 632 NL_SET_ERR_MSG(extack, 633 633 "No other attributes can be set in nexthop groups"); ··· 1459 1459 return nh; 1460 1460 1461 1461 out_no_nh: 1462 - for (; i >= 0; --i) 1462 + for (i--; i >= 0; --i) { 1463 + list_del(&nhg->nh_entries[i].nh_list); 1463 1464 nexthop_put(nhg->nh_entries[i].nh); 1465 + } 1464 1466 1465 1467 kfree(nhg->spare); 1466 1468 kfree(nhg);
+2 -3
net/ipv6/ip6_fib.c
··· 1025 1025 { 1026 1026 struct fib6_table *table = rt->fib6_table; 1027 1027 1028 + /* Flush all cached dst in exception table */ 1029 + rt6_flush_exceptions(rt); 1028 1030 fib6_drop_pcpu_from(rt, table); 1029 1031 1030 1032 if (rt->nh && !list_empty(&rt->nh_list)) ··· 1928 1926 rt->fib6_node = NULL; 1929 1927 net->ipv6.rt6_stats->fib_rt_entries--; 1930 1928 net->ipv6.rt6_stats->fib_discarded_routes++; 1931 - 1932 - /* Flush all cached dst in exception table */ 1933 - rt6_flush_exceptions(rt); 1934 1929 1935 1930 /* Reset round-robin state, if necessary */ 1936 1931 if (rcu_access_pointer(fn->rr_ptr) == rt)
+4 -3
net/qrtr/ns.c
··· 755 755 queue_work(qrtr_ns.workqueue, &qrtr_ns.work); 756 756 } 757 757 758 - void qrtr_ns_init(void) 758 + int qrtr_ns_init(void) 759 759 { 760 760 struct sockaddr_qrtr sq; 761 761 int ret; ··· 766 766 ret = sock_create_kern(&init_net, AF_QIPCRTR, SOCK_DGRAM, 767 767 PF_QIPCRTR, &qrtr_ns.sock); 768 768 if (ret < 0) 769 - return; 769 + return ret; 770 770 771 771 ret = kernel_getsockname(qrtr_ns.sock, (struct sockaddr *)&sq); 772 772 if (ret < 0) { ··· 797 797 if (ret < 0) 798 798 goto err_wq; 799 799 800 - return; 800 + return 0; 801 801 802 802 err_wq: 803 803 destroy_workqueue(qrtr_ns.workqueue); 804 804 err_sock: 805 805 sock_release(qrtr_ns.sock); 806 + return ret; 806 807 } 807 808 EXPORT_SYMBOL_GPL(qrtr_ns_init); 808 809
+11 -5
net/qrtr/qrtr.c
··· 1287 1287 return rc; 1288 1288 1289 1289 rc = sock_register(&qrtr_family); 1290 - if (rc) { 1291 - proto_unregister(&qrtr_proto); 1292 - return rc; 1293 - } 1290 + if (rc) 1291 + goto err_proto; 1294 1292 1295 - qrtr_ns_init(); 1293 + rc = qrtr_ns_init(); 1294 + if (rc) 1295 + goto err_sock; 1296 1296 1297 + return 0; 1298 + 1299 + err_sock: 1300 + sock_unregister(qrtr_family.family); 1301 + err_proto: 1302 + proto_unregister(&qrtr_proto); 1297 1303 return rc; 1298 1304 } 1299 1305 postcore_initcall(qrtr_proto_init);
+1 -1
net/qrtr/qrtr.h
··· 29 29 30 30 int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len); 31 31 32 - void qrtr_ns_init(void); 32 + int qrtr_ns_init(void); 33 33 34 34 void qrtr_ns_remove(void); 35 35
+1
net/wireless/Kconfig
··· 21 21 tristate "cfg80211 - wireless configuration API" 22 22 depends on RFKILL || !RFKILL 23 23 select FW_LOADER 24 + select CRC32 24 25 # may need to update this when certificates are changed and are 25 26 # using a different algorithm, though right now they shouldn't 26 27 # (this is here rather than below to allow it to be a module)
+2 -2
scripts/gcc-plugins/Makefile
··· 22 22 GCC_PLUGINS_DIR = $(shell $(CC) -print-file-name=plugin) 23 23 24 24 plugin_cxxflags = -Wp,-MMD,$(depfile) $(KBUILD_HOSTCXXFLAGS) -fPIC \ 25 - -I $(GCC_PLUGINS_DIR)/include -I $(obj) -std=gnu++98 \ 25 + -I $(GCC_PLUGINS_DIR)/include -I $(obj) -std=gnu++11 \ 26 26 -fno-rtti -fno-exceptions -fasynchronous-unwind-tables \ 27 - -ggdb -Wno-narrowing -Wno-unused-variable -Wno-c++11-compat \ 27 + -ggdb -Wno-narrowing -Wno-unused-variable \ 28 28 -Wno-format-diag 29 29 30 30 plugin_ldflags = -shared
-2
sound/pci/hda/hda_intel.c
··· 2220 2220 SND_PCI_QUIRK(0x1849, 0x7662, "Asrock H81M-HDS", 0), 2221 2221 /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ 2222 2222 SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0), 2223 - /* https://bugzilla.redhat.com/show_bug.cgi?id=1581607 */ 2224 - SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", 0), 2225 2223 /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ 2226 2224 SND_PCI_QUIRK(0x1558, 0x6504, "Clevo W65_67SB", 0), 2227 2225 /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
+1
sound/pci/hda/patch_conexant.c
··· 1070 1070 static const struct hda_device_id snd_hda_id_conexant[] = { 1071 1071 HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto), 1072 1072 HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto), 1073 + HDA_CODEC_ENTRY(0x14f120d0, "CX11970", patch_conexant_auto), 1073 1074 HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto), 1074 1075 HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto), 1075 1076 HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto),
+1 -1
sound/pci/hda/patch_hdmi.c
··· 1733 1733 per_pin->silent_stream = false; 1734 1734 1735 1735 unlock_out: 1736 - mutex_unlock(&spec->pcm_lock); 1736 + mutex_unlock(&per_pin->lock); 1737 1737 } 1738 1738 1739 1739 /* update ELD and jack state via audio component */
+11 -1
sound/pci/hda/patch_realtek.c
··· 6289 6289 ALC221_FIXUP_HP_FRONT_MIC, 6290 6290 ALC292_FIXUP_TPT460, 6291 6291 ALC298_FIXUP_SPK_VOLUME, 6292 + ALC298_FIXUP_LENOVO_SPK_VOLUME, 6292 6293 ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER, 6293 6294 ALC269_FIXUP_ATIV_BOOK_8, 6294 6295 ALC221_FIXUP_HP_MIC_NO_PRESENCE, ··· 7120 7119 .chained = true, 7121 7120 .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, 7122 7121 }, 7122 + [ALC298_FIXUP_LENOVO_SPK_VOLUME] = { 7123 + .type = HDA_FIXUP_FUNC, 7124 + .v.func = alc298_fixup_speaker_volume, 7125 + }, 7123 7126 [ALC295_FIXUP_DISABLE_DAC3] = { 7124 7127 .type = HDA_FIXUP_FUNC, 7125 7128 .v.func = alc295_fixup_disable_dac3, ··· 7890 7885 SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE), 7891 7886 SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC), 7892 7887 SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC), 7893 - SND_PCI_QUIRK(0x1028, 0x0a58, "Dell Precision 3650 Tower", ALC255_FIXUP_DELL_HEADSET_MIC), 7888 + SND_PCI_QUIRK(0x1028, 0x0a58, "Dell", ALC255_FIXUP_DELL_HEADSET_MIC), 7894 7889 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 7895 7890 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 7896 7891 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), ··· 7964 7959 SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), 7965 7960 SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), 7966 7961 SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED), 7962 + SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED), 7967 7963 SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED), 7968 7964 SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT), 7969 7965 SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED), 7970 7966 SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED), 7971 7967 SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED), 7968 + SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED), 7972 7969 SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED), 7973 7970 SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED), 7974 7971 SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), ··· 8028 8021 SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), 8029 8022 SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE), 8030 8023 SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), 8024 + SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), 8025 + SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), 8031 8026 SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE), 8032 8027 SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), 8033 8028 SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), ··· 8135 8126 SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), 8136 8127 SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), 8137 8128 SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), 8129 + SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME), 8138 8130 SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), 8139 8131 SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), 8140 8132 SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+13
sound/pci/hda/patch_via.c
··· 1002 1002 enum { 1003 1003 VIA_FIXUP_INTMIC_BOOST, 1004 1004 VIA_FIXUP_ASUS_G75, 1005 + VIA_FIXUP_POWER_SAVE, 1005 1006 }; 1006 1007 1007 1008 static void via_fixup_intmic_boost(struct hda_codec *codec, ··· 1010 1009 { 1011 1010 if (action == HDA_FIXUP_ACT_PRE_PROBE) 1012 1011 override_mic_boost(codec, 0x30, 0, 2, 40); 1012 + } 1013 + 1014 + static void via_fixup_power_save(struct hda_codec *codec, 1015 + const struct hda_fixup *fix, int action) 1016 + { 1017 + if (action == HDA_FIXUP_ACT_PRE_PROBE) 1018 + codec->power_save_node = 0; 1013 1019 } 1014 1020 1015 1021 static const struct hda_fixup via_fixups[] = { ··· 1033 1025 { } 1034 1026 } 1035 1027 }, 1028 + [VIA_FIXUP_POWER_SAVE] = { 1029 + .type = HDA_FIXUP_FUNC, 1030 + .v.func = via_fixup_power_save, 1031 + }, 1036 1032 }; 1037 1033 1038 1034 static const struct snd_pci_quirk vt2002p_fixups[] = { 1039 1035 SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75), 1040 1036 SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST), 1037 + SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", VIA_FIXUP_POWER_SAVE), 1041 1038 {} 1042 1039 }; 1043 1040
+4
sound/usb/implicit.c
··· 74 74 75 75 /* No quirk for playback but with capture quirk (see below) */ 76 76 IMPLICIT_FB_SKIP_DEV(0x0582, 0x0130), /* BOSS BR-80 */ 77 + IMPLICIT_FB_SKIP_DEV(0x0582, 0x0171), /* BOSS RC-505 */ 77 78 IMPLICIT_FB_SKIP_DEV(0x0582, 0x0189), /* BOSS GT-100v2 */ 78 79 IMPLICIT_FB_SKIP_DEV(0x0582, 0x01d6), /* BOSS GT-1 */ 79 80 IMPLICIT_FB_SKIP_DEV(0x0582, 0x01d8), /* BOSS Katana */ 80 81 IMPLICIT_FB_SKIP_DEV(0x0582, 0x01e5), /* BOSS GT-001 */ 82 + IMPLICIT_FB_SKIP_DEV(0x0582, 0x0203), /* BOSS AD-10 */ 81 83 82 84 {} /* terminator */ 83 85 }; ··· 87 85 /* Implicit feedback quirk table for capture: only FIXED type */ 88 86 static const struct snd_usb_implicit_fb_match capture_implicit_fb_quirks[] = { 89 87 IMPLICIT_FB_FIXED_DEV(0x0582, 0x0130, 0x0d, 0x01), /* BOSS BR-80 */ 88 + IMPLICIT_FB_FIXED_DEV(0x0582, 0x0171, 0x0d, 0x01), /* BOSS RC-505 */ 90 89 IMPLICIT_FB_FIXED_DEV(0x0582, 0x0189, 0x0d, 0x01), /* BOSS GT-100v2 */ 91 90 IMPLICIT_FB_FIXED_DEV(0x0582, 0x01d6, 0x0d, 0x01), /* BOSS GT-1 */ 92 91 IMPLICIT_FB_FIXED_DEV(0x0582, 0x01d8, 0x0d, 0x01), /* BOSS Katana */ 93 92 IMPLICIT_FB_FIXED_DEV(0x0582, 0x01e5, 0x0d, 0x01), /* BOSS GT-001 */ 93 + IMPLICIT_FB_FIXED_DEV(0x0582, 0x0203, 0x0d, 0x01), /* BOSS AD-10 */ 94 94 95 95 {} /* terminator */ 96 96 };
+4
sound/usb/midi.c
··· 1889 1889 ms_ep = find_usb_ms_endpoint_descriptor(hostep); 1890 1890 if (!ms_ep) 1891 1891 continue; 1892 + if (ms_ep->bNumEmbMIDIJack > 0x10) 1893 + continue; 1892 1894 if (usb_endpoint_dir_out(ep)) { 1893 1895 if (endpoints[epidx].out_ep) { 1894 1896 if (++epidx >= MIDI_MAX_ENDPOINTS) { ··· 2143 2141 cs_desc[1] == USB_DT_CS_INTERFACE && 2144 2142 cs_desc[2] == 0xf1 && 2145 2143 cs_desc[3] == 0x02) { 2144 + if (cs_desc[4] > 0x10 || cs_desc[5] > 0x10) 2145 + continue; 2146 2146 endpoint->in_cables = (1 << cs_desc[4]) - 1; 2147 2147 endpoint->out_cables = (1 << cs_desc[5]) - 1; 2148 2148 return snd_usbmidi_detect_endpoints(umidi, endpoint, 1);
-1
tools/bpf/bpftool/net.c
··· 11 11 #include <bpf/bpf.h> 12 12 #include <bpf/libbpf.h> 13 13 #include <net/if.h> 14 - #include <linux/if.h> 15 14 #include <linux/rtnetlink.h> 16 15 #include <linux/socket.h> 17 16 #include <linux/tc_act/tc_bpf.h>
+12 -5
tools/bpf/resolve_btfids/main.c
··· 139 139 #define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__) 140 140 #define pr_err(fmt, ...) \ 141 141 eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__) 142 + #define pr_info(fmt, ...) \ 143 + eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__) 142 144 143 145 static bool is_btf_id(const char *name) 144 146 { ··· 474 472 int nr_funcs = obj->nr_funcs; 475 473 int err, type_id; 476 474 struct btf *btf; 477 - __u32 nr; 475 + __u32 nr_types; 478 476 479 477 btf = btf__parse(obj->btf ?: obj->path, NULL); 480 478 err = libbpf_get_error(btf); ··· 485 483 } 486 484 487 485 err = -1; 488 - nr = btf__get_nr_types(btf); 486 + nr_types = btf__get_nr_types(btf); 489 487 490 488 /* 491 489 * Iterate all the BTF types and search for collected symbol IDs. 492 490 */ 493 - for (type_id = 1; type_id <= nr; type_id++) { 491 + for (type_id = 1; type_id <= nr_types; type_id++) { 494 492 const struct btf_type *type; 495 493 struct rb_root *root; 496 494 struct btf_id *id; ··· 528 526 529 527 id = btf_id__find(root, str); 530 528 if (id) { 531 - id->id = type_id; 532 - (*nr)--; 529 + if (id->id) { 530 + pr_info("WARN: multiple IDs found for '%s': %d, %d - using %d\n", 531 + str, id->id, type_id, id->id); 532 + } else { 533 + id->id = type_id; 534 + (*nr)--; 535 + } 533 536 } 534 537 } 535 538
+1 -1
tools/testing/selftests/bpf/progs/bprm_opts.c
··· 4 4 * Copyright 2020 Google LLC. 5 5 */ 6 6 7 - #include "vmlinux.h" 7 + #include <linux/bpf.h> 8 8 #include <errno.h> 9 9 #include <bpf/bpf_helpers.h> 10 10 #include <bpf/bpf_tracing.h>
+1 -1
tools/testing/selftests/net/fib_nexthops.sh
··· 869 869 pid3=$! 870 870 ip netns exec me ping -f 2001:db8:101::2 >/dev/null 2>&1 & 871 871 pid4=$! 872 - ip netns exec me mausezahn veth1 -B 2001:db8:101::2 -A 2001:db8:91::1 -c 0 -t tcp "dp=1-1023, flags=syn" >/dev/null 2>&1 & 872 + ip netns exec me mausezahn -6 veth1 -B 2001:db8:101::2 -A 2001:db8:91::1 -c 0 -t tcp "dp=1-1023, flags=syn" >/dev/null 2>&1 & 873 873 pid5=$! 874 874 875 875 sleep 300
+69 -2
tools/testing/selftests/net/pmtu.sh
··· 162 162 # - list_flush_ipv6_exception 163 163 # Using the same topology as in pmtu_ipv6, create exceptions, and check 164 164 # they are shown when listing exception caches, gone after flushing them 165 - 165 + # 166 + # - pmtu_ipv4_route_change 167 + # Use the same topology as in pmtu_ipv4, but issue a route replacement 168 + # command and delete the corresponding device afterward. This tests for 169 + # proper cleanup of the PMTU exceptions by the route replacement path. 170 + # Device unregistration should complete successfully 171 + # 172 + # - pmtu_ipv6_route_change 173 + # Same as above but with IPv6 166 174 167 175 # Kselftest framework requirement - SKIP code is 4. 168 176 ksft_skip=4 ··· 232 224 cleanup_ipv4_exception ipv4: cleanup of cached exceptions 1 233 225 cleanup_ipv6_exception ipv6: cleanup of cached exceptions 1 234 226 list_flush_ipv4_exception ipv4: list and flush cached exceptions 1 235 - list_flush_ipv6_exception ipv6: list and flush cached exceptions 1" 227 + list_flush_ipv6_exception ipv6: list and flush cached exceptions 1 228 + pmtu_ipv4_route_change ipv4: PMTU exception w/route replace 1 229 + pmtu_ipv6_route_change ipv6: PMTU exception w/route replace 1" 236 230 237 231 NS_A="ns-A" 238 232 NS_B="ns-B" ··· 1790 1780 fi 1791 1781 1792 1782 return ${fail} 1783 + } 1784 + 1785 + test_pmtu_ipvX_route_change() { 1786 + family=${1} 1787 + 1788 + setup namespaces routing || return 2 1789 + trace "${ns_a}" veth_A-R1 "${ns_r1}" veth_R1-A \ 1790 + "${ns_r1}" veth_R1-B "${ns_b}" veth_B-R1 \ 1791 + "${ns_a}" veth_A-R2 "${ns_r2}" veth_R2-A \ 1792 + "${ns_r2}" veth_R2-B "${ns_b}" veth_B-R2 1793 + 1794 + if [ ${family} -eq 4 ]; then 1795 + ping=ping 1796 + dst1="${prefix4}.${b_r1}.1" 1797 + dst2="${prefix4}.${b_r2}.1" 1798 + gw="${prefix4}.${a_r1}.2" 1799 + else 1800 + ping=${ping6} 1801 + dst1="${prefix6}:${b_r1}::1" 1802 + dst2="${prefix6}:${b_r2}::1" 1803 + gw="${prefix6}:${a_r1}::2" 1804 + fi 1805 + 1806 + # Set up initial MTU values 1807 + mtu "${ns_a}" veth_A-R1 2000 1808 + mtu "${ns_r1}" veth_R1-A 2000 1809 + mtu "${ns_r1}" veth_R1-B 1400 1810 + mtu "${ns_b}" veth_B-R1 1400 1811 + 1812 + mtu "${ns_a}" veth_A-R2 2000 1813 + mtu "${ns_r2}" veth_R2-A 2000 1814 + mtu "${ns_r2}" veth_R2-B 1500 1815 + mtu "${ns_b}" veth_B-R2 1500 1816 + 1817 + # Create route exceptions 1818 + run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst1} 1819 + run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst2} 1820 + 1821 + # Check that exceptions have been created with the correct PMTU 1822 + pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})" 1823 + check_pmtu_value "1400" "${pmtu_1}" "exceeding MTU" || return 1 1824 + pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})" 1825 + check_pmtu_value "1500" "${pmtu_2}" "exceeding MTU" || return 1 1826 + 1827 + # Replace the route from A to R1 1828 + run_cmd ${ns_a} ip route change default via ${gw} 1829 + 1830 + # Delete the device in A 1831 + run_cmd ${ns_a} ip link del "veth_A-R1" 1832 + } 1833 + 1834 + test_pmtu_ipv4_route_change() { 1835 + test_pmtu_ipvX_route_change 4 1836 + } 1837 + 1838 + test_pmtu_ipv6_route_change() { 1839 + test_pmtu_ipvX_route_change 6 1793 1840 } 1794 1841 1795 1842 usage() {
+34
tools/testing/selftests/net/udpgro.sh
··· 5 5 6 6 readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)" 7 7 8 + # set global exit status, but never reset nonzero one. 9 + check_err() 10 + { 11 + if [ $ret -eq 0 ]; then 12 + ret=$1 13 + fi 14 + } 15 + 8 16 cleanup() { 9 17 local -r jobs="$(jobs -p)" 10 18 local -r ns="$(ip netns list|grep $PEER_NS)" ··· 52 44 # Hack: let bg programs complete the startup 53 45 sleep 0.1 54 46 ./udpgso_bench_tx ${tx_args} 47 + ret=$? 55 48 wait $(jobs -p) 49 + return $ret 56 50 } 57 51 58 52 run_test() { ··· 97 87 98 88 sleep 0.1 99 89 ./udpgso_bench_tx ${tx_args} 90 + ret=$? 100 91 kill -INT $pid 101 92 wait $(jobs -p) 93 + return $ret 102 94 } 103 95 104 96 run_one_2sock() { ··· 122 110 sleep 0.1 123 111 # first UDP GSO socket should be closed at this point 124 112 ./udpgso_bench_tx ${tx_args} 113 + ret=$? 125 114 wait $(jobs -p) 115 + return $ret 126 116 } 127 117 128 118 run_nat_test() { ··· 145 131 local -r core_args="-l 4" 146 132 local -r ipv4_args="${core_args} -4 -D 192.168.1.1" 147 133 local -r ipv6_args="${core_args} -6 -D 2001:db8::1" 134 + ret=0 148 135 149 136 echo "ipv4" 150 137 run_test "no GRO" "${ipv4_args} -M 10 -s 1400" "-4 -n 10 -l 1400" 138 + check_err $? 151 139 152 140 # explicitly check we are not receiving UDP_SEGMENT cmsg (-S -1) 153 141 # when GRO does not take place 154 142 run_test "no GRO chk cmsg" "${ipv4_args} -M 10 -s 1400" "-4 -n 10 -l 1400 -S -1" 143 + check_err $? 155 144 156 145 # the GSO packets are aggregated because: 157 146 # * veth schedule napi after each xmit 158 147 # * segmentation happens in BH context, veth napi poll is delayed after 159 148 # the transmission of the last segment 160 149 run_test "GRO" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720" 150 + check_err $? 161 151 run_test "GRO chk cmsg" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720 -S 1472" 152 + check_err $? 162 153 run_test "GRO with custom segment size" "${ipv4_args} -M 1 -s 14720 -S 500 " "-4 -n 1 -l 14720" 154 + check_err $? 163 155 run_test "GRO with custom segment size cmsg" "${ipv4_args} -M 1 -s 14720 -S 500 " "-4 -n 1 -l 14720 -S 500" 156 + check_err $? 164 157 165 158 run_nat_test "bad GRO lookup" "${ipv4_args} -M 1 -s 14720 -S 0" "-n 10 -l 1472" 159 + check_err $? 166 160 run_2sock_test "multiple GRO socks" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720 -S 1472" 161 + check_err $? 167 162 168 163 echo "ipv6" 169 164 run_test "no GRO" "${ipv6_args} -M 10 -s 1400" "-n 10 -l 1400" 165 + check_err $? 170 166 run_test "no GRO chk cmsg" "${ipv6_args} -M 10 -s 1400" "-n 10 -l 1400 -S -1" 167 + check_err $? 171 168 run_test "GRO" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 1 -l 14520" 169 + check_err $? 172 170 run_test "GRO chk cmsg" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 1 -l 14520 -S 1452" 171 + check_err $? 173 172 run_test "GRO with custom segment size" "${ipv6_args} -M 1 -s 14520 -S 500" "-n 1 -l 14520" 173 + check_err $? 174 174 run_test "GRO with custom segment size cmsg" "${ipv6_args} -M 1 -s 14520 -S 500" "-n 1 -l 14520 -S 500" 175 + check_err $? 175 176 176 177 run_nat_test "bad GRO lookup" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 10 -l 1452" 178 + check_err $? 177 179 run_2sock_test "multiple GRO socks" "${ipv6_args} -M 1 -s 14520 -S 0 " "-n 1 -l 14520 -S 1452" 180 + check_err $? 181 + return $ret 178 182 } 179 183 180 184 if [ ! -f ../bpf/xdp_dummy.o ]; then ··· 212 180 shift 213 181 run_one_2sock $@ 214 182 fi 183 + 184 + exit $?
+2 -1
tools/testing/selftests/netfilter/Makefile
··· 4 4 TEST_PROGS := nft_trans_stress.sh nft_nat.sh bridge_brouter.sh \ 5 5 conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \ 6 6 nft_concat_range.sh nft_conntrack_helper.sh \ 7 - nft_queue.sh nft_meta.sh 7 + nft_queue.sh nft_meta.sh \ 8 + ipip-conntrack-mtu.sh 8 9 9 10 LDLIBS = -lmnl 10 11 TEST_GEN_FILES = nf-queue
+206
tools/testing/selftests/netfilter/ipip-conntrack-mtu.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + # Kselftest framework requirement - SKIP code is 4. 5 + ksft_skip=4 6 + 7 + # Conntrack needs to reassemble fragments in order to have complete 8 + # packets for rule matching. Reassembly can lead to packet loss. 9 + 10 + # Consider the following setup: 11 + # +--------+ +---------+ +--------+ 12 + # |Router A|-------|Wanrouter|-------|Router B| 13 + # | |.IPIP..| |..IPIP.| | 14 + # +--------+ +---------+ +--------+ 15 + # / mtu 1400 \ 16 + # / \ 17 + #+--------+ +--------+ 18 + #|Client A| |Client B| 19 + #| | | | 20 + #+--------+ +--------+ 21 + 22 + # Router A and Router B use IPIP tunnel interfaces to tunnel traffic 23 + # between Client A and Client B over WAN. Wanrouter has MTU 1400 set 24 + # on its interfaces. 25 + 26 + rnd=$(mktemp -u XXXXXXXX) 27 + rx=$(mktemp) 28 + 29 + r_a="ns-ra-$rnd" 30 + r_b="ns-rb-$rnd" 31 + r_w="ns-rw-$rnd" 32 + c_a="ns-ca-$rnd" 33 + c_b="ns-cb-$rnd" 34 + 35 + checktool (){ 36 + if ! $1 > /dev/null 2>&1; then 37 + echo "SKIP: Could not $2" 38 + exit $ksft_skip 39 + fi 40 + } 41 + 42 + checktool "iptables --version" "run test without iptables" 43 + checktool "ip -Version" "run test without ip tool" 44 + checktool "which nc" "run test without nc (netcat)" 45 + checktool "ip netns add ${r_a}" "create net namespace" 46 + 47 + for n in ${r_b} ${r_w} ${c_a} ${c_b};do 48 + ip netns add ${n} 49 + done 50 + 51 + cleanup() { 52 + for n in ${r_a} ${r_b} ${r_w} ${c_a} ${c_b};do 53 + ip netns del ${n} 54 + done 55 + rm -f ${rx} 56 + } 57 + 58 + trap cleanup EXIT 59 + 60 + test_path() { 61 + msg="$1" 62 + 63 + ip netns exec ${c_b} nc -n -w 3 -q 3 -u -l -p 5000 > ${rx} < /dev/null & 64 + 65 + sleep 1 66 + for i in 1 2 3; do 67 + head -c1400 /dev/zero | tr "\000" "a" | ip netns exec ${c_a} nc -n -w 1 -u 192.168.20.2 5000 68 + done 69 + 70 + wait 71 + 72 + bytes=$(wc -c < ${rx}) 73 + 74 + if [ $bytes -eq 1400 ];then 75 + echo "OK: PMTU $msg connection tracking" 76 + else 77 + echo "FAIL: PMTU $msg connection tracking: got $bytes, expected 1400" 78 + exit 1 79 + fi 80 + } 81 + 82 + # Detailed setup for Router A 83 + # --------------------------- 84 + # Interfaces: 85 + # eth0: 10.2.2.1/24 86 + # eth1: 192.168.10.1/24 87 + # ipip0: No IP address, local 10.2.2.1 remote 10.4.4.1 88 + # Routes: 89 + # 192.168.20.0/24 dev ipip0 (192.168.20.0/24 is subnet of Client B) 90 + # 10.4.4.1 via 10.2.2.254 (Router B via Wanrouter) 91 + # No iptables rules at all. 92 + 93 + ip link add veth0 netns ${r_a} type veth peer name veth0 netns ${r_w} 94 + ip link add veth1 netns ${r_a} type veth peer name veth0 netns ${c_a} 95 + 96 + l_addr="10.2.2.1" 97 + r_addr="10.4.4.1" 98 + ip netns exec ${r_a} ip link add ipip0 type ipip local ${l_addr} remote ${r_addr} mode ipip || exit $ksft_skip 99 + 100 + for dev in lo veth0 veth1 ipip0; do 101 + ip -net ${r_a} link set $dev up 102 + done 103 + 104 + ip -net ${r_a} addr add 10.2.2.1/24 dev veth0 105 + ip -net ${r_a} addr add 192.168.10.1/24 dev veth1 106 + 107 + ip -net ${r_a} route add 192.168.20.0/24 dev ipip0 108 + ip -net ${r_a} route add 10.4.4.0/24 via 10.2.2.254 109 + 110 + ip netns exec ${r_a} sysctl -q net.ipv4.conf.all.forwarding=1 > /dev/null 111 + 112 + # Detailed setup for Router B 113 + # --------------------------- 114 + # Interfaces: 115 + # eth0: 10.4.4.1/24 116 + # eth1: 192.168.20.1/24 117 + # ipip0: No IP address, local 10.4.4.1 remote 10.2.2.1 118 + # Routes: 119 + # 192.168.10.0/24 dev ipip0 (192.168.10.0/24 is subnet of Client A) 120 + # 10.2.2.1 via 10.4.4.254 (Router A via Wanrouter) 121 + # No iptables rules at all. 122 + 123 + ip link add veth0 netns ${r_b} type veth peer name veth1 netns ${r_w} 124 + ip link add veth1 netns ${r_b} type veth peer name veth0 netns ${c_b} 125 + 126 + l_addr="10.4.4.1" 127 + r_addr="10.2.2.1" 128 + 129 + ip netns exec ${r_b} ip link add ipip0 type ipip local ${l_addr} remote ${r_addr} mode ipip || exit $ksft_skip 130 + 131 + for dev in lo veth0 veth1 ipip0; do 132 + ip -net ${r_b} link set $dev up 133 + done 134 + 135 + ip -net ${r_b} addr add 10.4.4.1/24 dev veth0 136 + ip -net ${r_b} addr add 192.168.20.1/24 dev veth1 137 + 138 + ip -net ${r_b} route add 192.168.10.0/24 dev ipip0 139 + ip -net ${r_b} route add 10.2.2.0/24 via 10.4.4.254 140 + ip netns exec ${r_b} sysctl -q net.ipv4.conf.all.forwarding=1 > /dev/null 141 + 142 + # Client A 143 + ip -net ${c_a} addr add 192.168.10.2/24 dev veth0 144 + ip -net ${c_a} link set dev lo up 145 + ip -net ${c_a} link set dev veth0 up 146 + ip -net ${c_a} route add default via 192.168.10.1 147 + 148 + # Client A 149 + ip -net ${c_b} addr add 192.168.20.2/24 dev veth0 150 + ip -net ${c_b} link set dev veth0 up 151 + ip -net ${c_b} link set dev lo up 152 + ip -net ${c_b} route add default via 192.168.20.1 153 + 154 + # Wan 155 + ip -net ${r_w} addr add 10.2.2.254/24 dev veth0 156 + ip -net ${r_w} addr add 10.4.4.254/24 dev veth1 157 + 158 + ip -net ${r_w} link set dev lo up 159 + ip -net ${r_w} link set dev veth0 up mtu 1400 160 + ip -net ${r_w} link set dev veth1 up mtu 1400 161 + 162 + ip -net ${r_a} link set dev veth0 mtu 1400 163 + ip -net ${r_b} link set dev veth0 mtu 1400 164 + 165 + ip netns exec ${r_w} sysctl -q net.ipv4.conf.all.forwarding=1 > /dev/null 166 + 167 + # Path MTU discovery 168 + # ------------------ 169 + # Running tracepath from Client A to Client B shows PMTU discovery is working 170 + # as expected: 171 + # 172 + # clienta:~# tracepath 192.168.20.2 173 + # 1?: [LOCALHOST] pmtu 1500 174 + # 1: 192.168.10.1 0.867ms 175 + # 1: 192.168.10.1 0.302ms 176 + # 2: 192.168.10.1 0.312ms pmtu 1480 177 + # 2: no reply 178 + # 3: 192.168.10.1 0.510ms pmtu 1380 179 + # 3: 192.168.20.2 2.320ms reached 180 + # Resume: pmtu 1380 hops 3 back 3 181 + 182 + # ip netns exec ${c_a} traceroute --mtu 192.168.20.2 183 + 184 + # Router A has learned PMTU (1400) to Router B from Wanrouter. 185 + # Client A has learned PMTU (1400 - IPIP overhead = 1380) to Client B 186 + # from Router A. 187 + 188 + #Send large UDP packet 189 + #--------------------- 190 + #Now we send a 1400 bytes UDP packet from Client A to Client B: 191 + 192 + # clienta:~# head -c1400 /dev/zero | tr "\000" "a" | nc -u 192.168.20.2 5000 193 + test_path "without" 194 + 195 + # The IPv4 stack on Client A already knows the PMTU to Client B, so the 196 + # UDP packet is sent as two fragments (1380 + 20). Router A forwards the 197 + # fragments between eth1 and ipip0. The fragments fit into the tunnel and 198 + # reach their destination. 199 + 200 + #When sending the large UDP packet again, Router A now reassembles the 201 + #fragments before routing the packet over ipip0. The resulting IPIP 202 + #packet is too big (1400) for the tunnel PMTU (1380) to Router B, it is 203 + #dropped on Router A before sending. 204 + 205 + ip netns exec ${r_a} iptables -A FORWARD -m conntrack --ctstate NEW 206 + test_path "with"