Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR.

Conflicts:

include/net/inet_sock.h
f866fbc842de ("ipv4: fix data-races around inet->inet_id")
c274af224269 ("inet: introduce inet->inet_flags")
https://lore.kernel.org/all/679ddff6-db6e-4ff6-b177-574e90d0103d@tessares.net/

Adjacent changes:

drivers/net/bonding/bond_alb.c
e74216b8def3 ("bonding: fix macvlan over alb bond support")
f11e5bd159b0 ("bonding: support balance-alb with openvswitch")

drivers/net/ethernet/broadcom/bgmac.c
d6499f0b7c7c ("net: bgmac: Return PTR_ERR() for fixed_phy_register()")
23a14488ea58 ("net: bgmac: Fix return value check for fixed_phy_register()")

drivers/net/ethernet/broadcom/genet/bcmmii.c
32bbe64a1386 ("net: bcmgenet: Fix return value check for fixed_phy_register()")
acf50d1adbf4 ("net: bcmgenet: Return PTR_ERR() for fixed_phy_register()")

net/sctp/socket.c
f866fbc842de ("ipv4: fix data-races around inet->inet_id")
b09bde5c3554 ("inet: move inet->mc_loop to inet->inet_frags")

Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+1686 -921
+10 -10
Documentation/ABI/testing/sysfs-class-led-trigger-netdev
··· 13 13 Specifies the duration of the LED blink in milliseconds. 14 14 Defaults to 50 ms. 15 15 16 - With hw_control ON, the interval value MUST be set to the 16 + When offloaded is true, the interval value MUST be set to the 17 17 default value and cannot be changed. 18 18 Trying to set any value in this specific mode will return 19 19 an EINVAL error. ··· 44 44 If set to 1, the LED will blink for the milliseconds specified 45 45 in interval to signal transmission. 46 46 47 - With hw_control ON, the blink interval is controlled by hardware 48 - and won't reflect the value set in interval. 47 + When offloaded is true, the blink interval is controlled by 48 + hardware and won't reflect the value set in interval. 49 49 50 50 What: /sys/class/leds/<led>/rx 51 51 Date: Dec 2017 ··· 59 59 If set to 1, the LED will blink for the milliseconds specified 60 60 in interval to signal reception. 61 61 62 - With hw_control ON, the blink interval is controlled by hardware 63 - and won't reflect the value set in interval. 62 + When offloaded is true, the blink interval is controlled by 63 + hardware and won't reflect the value set in interval. 64 64 65 - What: /sys/class/leds/<led>/hw_control 65 + What: /sys/class/leds/<led>/offloaded 66 66 Date: Jun 2023 67 67 KernelVersion: 6.5 68 68 Contact: linux-leds@vger.kernel.org 69 69 Description: 70 - Communicate whether the LED trigger modes are driven by hardware 71 - or software fallback is used. 70 + Communicate whether the LED trigger modes are offloaded to 71 + hardware or whether software fallback is used. 72 72 73 73 If 0, the LED is using software fallback to blink. 74 74 75 - If 1, the LED is using hardware control to blink and signal the 76 - requested modes. 75 + If 1, the LED blinking in requested mode is offloaded to 76 + hardware. 77 77 78 78 What: /sys/class/leds/<led>/link_10 79 79 Date: Jun 2023
+2 -2
Documentation/admin-guide/hw-vuln/srso.rst
··· 141 141 To ensure the safety of this mitigation, the kernel must ensure that the 142 142 safe return sequence is itself free from attacker interference. In Zen3 143 143 and Zen4, this is accomplished by creating a BTB alias between the 144 - untraining function srso_untrain_ret_alias() and the safe return 145 - function srso_safe_ret_alias() which results in evicting a potentially 144 + untraining function srso_alias_untrain_ret() and the safe return 145 + function srso_alias_safe_ret() which results in evicting a potentially 146 146 poisoned BTB entry and using that safe one for all function returns. 147 147 148 148 In older Zen1 and Zen2, this is accomplished using a reinterpretation
+1 -1
Documentation/devicetree/bindings/pinctrl/qcom,sa8775p-tlmm.yaml
··· 87 87 emac0_mdc, emac0_mdio, emac0_ptp_aux, emac0_ptp_pps, emac1_mcg0, 88 88 emac1_mcg1, emac1_mcg2, emac1_mcg3, emac1_mdc, emac1_mdio, 89 89 emac1_ptp_aux, emac1_ptp_pps, gcc_gp1, gcc_gp2, gcc_gp3, 90 - gcc_gp4, gcc_gp5, hs0_mi2s, hs1_mi2s, hs2_mi2s, ibi_i3c, 90 + gcc_gp4, gcc_gp5, gpio, hs0_mi2s, hs1_mi2s, hs2_mi2s, ibi_i3c, 91 91 jitter_bist, mdp0_vsync0, mdp0_vsync1, mdp0_vsync2, mdp0_vsync3, 92 92 mdp0_vsync4, mdp0_vsync5, mdp0_vsync6, mdp0_vsync7, mdp0_vsync8, 93 93 mdp1_vsync0, mdp1_vsync1, mdp1_vsync2, mdp1_vsync3, mdp1_vsync4,
+1 -1
Documentation/i2c/writing-clients.rst
··· 46 46 }, 47 47 48 48 .id_table = foo_idtable, 49 - .probe_new = foo_probe, 49 + .probe = foo_probe, 50 50 .remove = foo_remove, 51 51 /* if device autodetection is needed: */ 52 52 .class = I2C_CLASS_SOMETHING,
+13 -13
MAINTAINERS
··· 8831 8831 S: Maintained 8832 8832 F: drivers/gpio/gpio-regmap.c 8833 8833 F: include/linux/gpio/regmap.h 8834 + K: (devm_)?gpio_regmap_(un)?register 8834 8835 8835 8836 GPIO SUBSYSTEM 8836 8837 M: Linus Walleij <linus.walleij@linaro.org> ··· 14822 14821 F: net/netfilter/xt_SECMARK.c 14823 14822 F: net/netlabel/ 14824 14823 14824 + NETWORKING [MACSEC] 14825 + M: Sabrina Dubroca <sd@queasysnail.net> 14826 + L: netdev@vger.kernel.org 14827 + S: Maintained 14828 + F: drivers/net/macsec.c 14829 + F: include/net/macsec.h 14830 + F: include/uapi/linux/if_macsec.h 14831 + K: macsec 14832 + K: \bmdo_ 14833 + 14825 14834 NETWORKING [MPTCP] 14826 14835 M: Matthieu Baerts <matthieu.baerts@tessares.net> 14827 14836 M: Mat Martineau <martineau@kernel.org> ··· 19260 19249 F: drivers/tty/serdev/ 19261 19250 F: include/linux/serdev.h 19262 19251 19263 - SERIAL DRIVERS 19264 - M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 19265 - L: linux-serial@vger.kernel.org 19266 - S: Maintained 19267 - F: Documentation/devicetree/bindings/serial/ 19268 - F: drivers/tty/serial/ 19269 - 19270 19252 SERIAL IR RECEIVER 19271 19253 M: Sean Young <sean@mess.org> 19272 19254 L: linux-media@vger.kernel.org ··· 21664 21660 T: git git://github.com/srcres258/linux-doc.git doc-zh-tw 21665 21661 F: Documentation/translations/zh_TW/ 21666 21662 21667 - TTY LAYER 21663 + TTY LAYER AND SERIAL DRIVERS 21668 21664 M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 21669 21665 M: Jiri Slaby <jirislaby@kernel.org> 21670 21666 L: linux-kernel@vger.kernel.org 21671 21667 L: linux-serial@vger.kernel.org 21672 21668 S: Supported 21673 21669 T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty.git 21670 + F: Documentation/devicetree/bindings/serial/ 21674 21671 F: Documentation/driver-api/serial/ 21675 21672 F: drivers/tty/ 21676 - F: drivers/tty/serial/serial_base.h 21677 - F: drivers/tty/serial/serial_base_bus.c 21678 - F: drivers/tty/serial/serial_core.c 21679 - F: drivers/tty/serial/serial_ctrl.c 21680 - F: drivers/tty/serial/serial_port.c 21681 21673 F: include/linux/selection.h 21682 21674 F: include/linux/serial.h 21683 21675 F: include/linux/serial_core.h
+1 -1
Makefile
··· 2 2 VERSION = 6 3 3 PATCHLEVEL = 5 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc6 5 + EXTRAVERSION = -rc7 6 6 NAME = Hurr durr I'ma ninja sloth 7 7 8 8 # *DOCUMENTATION*
+1 -1
arch/arm/boot/dts/arm/integratorap.dts
··· 158 158 valid-mask = <0x003fffff>; 159 159 }; 160 160 161 - pci: pciv3@62000000 { 161 + pci: pci@62000000 { 162 162 compatible = "arm,integrator-ap-pci", "v3,v360epc-pci"; 163 163 device_type = "pci"; 164 164 #interrupt-cells = <1>;
+1 -1
arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-mira.dtsi
··· 182 182 pinctrl-0 = <&pinctrl_rtc_int>; 183 183 reg = <0x68>; 184 184 interrupt-parent = <&gpio7>; 185 - interrupts = <8 IRQ_TYPE_LEVEL_HIGH>; 185 + interrupts = <8 IRQ_TYPE_LEVEL_LOW>; 186 186 status = "disabled"; 187 187 }; 188 188 };
+8 -6
arch/arm/boot/dts/nxp/imx/imx6sx.dtsi
··· 863 863 reg = <0>; 864 864 865 865 ldb_from_lcdif1: endpoint { 866 - remote-endpoint = <&lcdif1_to_ldb>; 867 866 }; 868 867 }; 869 868 ··· 1009 1010 <&clks IMX6SX_CLK_USDHC1>; 1010 1011 clock-names = "ipg", "ahb", "per"; 1011 1012 bus-width = <4>; 1013 + fsl,tuning-start-tap = <20>; 1014 + fsl,tuning-step= <2>; 1012 1015 status = "disabled"; 1013 1016 }; 1014 1017 ··· 1023 1022 <&clks IMX6SX_CLK_USDHC2>; 1024 1023 clock-names = "ipg", "ahb", "per"; 1025 1024 bus-width = <4>; 1025 + fsl,tuning-start-tap = <20>; 1026 + fsl,tuning-step= <2>; 1026 1027 status = "disabled"; 1027 1028 }; 1028 1029 ··· 1037 1034 <&clks IMX6SX_CLK_USDHC3>; 1038 1035 clock-names = "ipg", "ahb", "per"; 1039 1036 bus-width = <4>; 1037 + fsl,tuning-start-tap = <20>; 1038 + fsl,tuning-step= <2>; 1040 1039 status = "disabled"; 1041 1040 }; 1042 1041 ··· 1314 1309 power-domains = <&pd_disp>; 1315 1310 status = "disabled"; 1316 1311 1317 - ports { 1318 - port { 1319 - lcdif1_to_ldb: endpoint { 1320 - remote-endpoint = <&ldb_from_lcdif1>; 1321 - }; 1312 + port { 1313 + lcdif1_to_ldb: endpoint { 1322 1314 }; 1323 1315 }; 1324 1316 };
+6
arch/arm/boot/dts/nxp/imx/imx7s.dtsi
··· 1184 1184 <&clks IMX7D_USDHC1_ROOT_CLK>; 1185 1185 clock-names = "ipg", "ahb", "per"; 1186 1186 bus-width = <4>; 1187 + fsl,tuning-step = <2>; 1188 + fsl,tuning-start-tap = <20>; 1187 1189 status = "disabled"; 1188 1190 }; 1189 1191 ··· 1198 1196 <&clks IMX7D_USDHC2_ROOT_CLK>; 1199 1197 clock-names = "ipg", "ahb", "per"; 1200 1198 bus-width = <4>; 1199 + fsl,tuning-step = <2>; 1200 + fsl,tuning-start-tap = <20>; 1201 1201 status = "disabled"; 1202 1202 }; 1203 1203 ··· 1212 1208 <&clks IMX7D_USDHC3_ROOT_CLK>; 1213 1209 clock-names = "ipg", "ahb", "per"; 1214 1210 bus-width = <4>; 1211 + fsl,tuning-step = <2>; 1212 + fsl,tuning-start-tap = <20>; 1215 1213 status = "disabled"; 1216 1214 }; 1217 1215
+9
arch/arm/boot/dts/ti/omap/am335x-bone-common.dtsi
··· 145 145 /* MDIO */ 146 146 AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLUP | SLEWCTRL_FAST, MUX_MODE0) 147 147 AM33XX_PADCONF(AM335X_PIN_MDC, PIN_OUTPUT_PULLUP, MUX_MODE0) 148 + /* Added to support GPIO controlled PHY reset */ 149 + AM33XX_PADCONF(AM335X_PIN_UART0_CTSN, PIN_OUTPUT_PULLUP, MUX_MODE7) 148 150 >; 149 151 }; 150 152 ··· 155 153 /* MDIO reset value */ 156 154 AM33XX_PADCONF(AM335X_PIN_MDIO, PIN_INPUT_PULLDOWN, MUX_MODE7) 157 155 AM33XX_PADCONF(AM335X_PIN_MDC, PIN_INPUT_PULLDOWN, MUX_MODE7) 156 + /* Added to support GPIO controlled PHY reset */ 157 + AM33XX_PADCONF(AM335X_PIN_UART0_CTSN, PIN_INPUT_PULLDOWN, MUX_MODE7) 158 158 >; 159 159 }; 160 160 ··· 219 215 baseboard_eeprom: baseboard_eeprom@50 { 220 216 compatible = "atmel,24c256"; 221 217 reg = <0x50>; 218 + vcc-supply = <&ldo4_reg>; 222 219 223 220 #address-cells = <1>; 224 221 #size-cells = <1>; ··· 382 377 383 378 ethphy0: ethernet-phy@0 { 384 379 reg = <0>; 380 + /* Support GPIO reset on revision C3 boards */ 381 + reset-gpios = <&gpio1 8 GPIO_ACTIVE_LOW>; 382 + reset-assert-us = <300>; 383 + reset-deassert-us = <6500>; 385 384 }; 386 385 }; 387 386
+1 -1
arch/arm/mach-zynq/pm.c
··· 8 8 */ 9 9 10 10 #include <linux/io.h> 11 + #include <linux/of.h> 11 12 #include <linux/of_address.h> 12 - #include <linux/of_device.h> 13 13 #include "common.h" 14 14 15 15 /* register offsets */
+3 -4
arch/arm64/boot/dts/freescale/imx8mm.dtsi
··· 1221 1221 compatible = "fsl,imx8mm-mipi-csi2"; 1222 1222 reg = <0x32e30000 0x1000>; 1223 1223 interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>; 1224 - assigned-clocks = <&clk IMX8MM_CLK_CSI1_CORE>, 1225 - <&clk IMX8MM_CLK_CSI1_PHY_REF>; 1226 - assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_1000M>, 1227 - <&clk IMX8MM_SYS_PLL2_1000M>; 1224 + assigned-clocks = <&clk IMX8MM_CLK_CSI1_CORE>; 1225 + assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_1000M>; 1226 + 1228 1227 clock-frequency = <333000000>; 1229 1228 clocks = <&clk IMX8MM_CLK_DISP_APB_ROOT>, 1230 1229 <&clk IMX8MM_CLK_CSI1_ROOT>,
+2 -4
arch/arm64/boot/dts/freescale/imx8mn.dtsi
··· 1175 1175 compatible = "fsl,imx8mm-mipi-csi2"; 1176 1176 reg = <0x32e30000 0x1000>; 1177 1177 interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>; 1178 - assigned-clocks = <&clk IMX8MN_CLK_CAMERA_PIXEL>, 1179 - <&clk IMX8MN_CLK_CSI1_PHY_REF>; 1180 - assigned-clock-parents = <&clk IMX8MN_SYS_PLL2_1000M>, 1181 - <&clk IMX8MN_SYS_PLL2_1000M>; 1178 + assigned-clocks = <&clk IMX8MN_CLK_CAMERA_PIXEL>; 1179 + assigned-clock-parents = <&clk IMX8MN_SYS_PLL2_1000M>; 1182 1180 assigned-clock-rates = <333000000>; 1183 1181 clock-frequency = <333000000>; 1184 1182 clocks = <&clk IMX8MN_CLK_DISP_APB_ROOT>,
+1 -1
arch/arm64/boot/dts/freescale/imx93.dtsi
··· 340 340 341 341 anatop: anatop@44480000 { 342 342 compatible = "fsl,imx93-anatop", "syscon"; 343 - reg = <0x44480000 0x10000>; 343 + reg = <0x44480000 0x2000>; 344 344 }; 345 345 346 346 adc1: adc@44530000 {
+1 -1
arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
··· 121 121 }; 122 122 }; 123 123 124 - pm8150l-thermal { 124 + pm8150l-pcb-thermal { 125 125 polling-delay-passive = <0>; 126 126 polling-delay = <0>; 127 127 thermal-sensors = <&pm8150l_adc_tm 1>;
+2 -2
arch/arm64/boot/dts/qcom/sa8775p-ride.dts
··· 153 153 154 154 vreg_l4c: ldo4 { 155 155 regulator-name = "vreg_l4c"; 156 - regulator-min-microvolt = <1100000>; 157 - regulator-max-microvolt = <1300000>; 156 + regulator-min-microvolt = <1200000>; 157 + regulator-max-microvolt = <1200000>; 158 158 regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>; 159 159 /* 160 160 * FIXME: This should have regulator-allow-set-load but
+2 -2
arch/arm64/boot/dts/qcom/sc7180.dtsi
··· 3120 3120 reg = <0 0x0ae94400 0 0x200>, 3121 3121 <0 0x0ae94600 0 0x280>, 3122 3122 <0 0x0ae94a00 0 0x1e0>; 3123 - reg-names = "dsi0_phy", 3124 - "dsi0_phy_lane", 3123 + reg-names = "dsi_phy", 3124 + "dsi_phy_lane", 3125 3125 "dsi_pll"; 3126 3126 3127 3127 #clock-cells = <1>;
+1 -1
arch/arm64/boot/dts/qcom/sc8180x.dtsi
··· 3561 3561 }; 3562 3562 3563 3563 osm_l3: interconnect@18321000 { 3564 - compatible = "qcom,sc8180x-osm-l3"; 3564 + compatible = "qcom,sc8180x-osm-l3", "qcom,osm-l3"; 3565 3565 reg = <0 0x18321000 0 0x1400>; 3566 3566 3567 3567 clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GPLL0>;
+9 -9
arch/arm64/boot/dts/qcom/sm8150.dtsi
··· 56 56 qcom,freq-domain = <&cpufreq_hw 0>; 57 57 operating-points-v2 = <&cpu0_opp_table>; 58 58 interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, 59 - <&osm_l3 MASTER_OSM_L3_APPS 0 &osm_l3 SLAVE_OSM_L3 0>; 59 + <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; 60 60 power-domains = <&CPU_PD0>; 61 61 power-domain-names = "psci"; 62 62 #cooling-cells = <2>; ··· 85 85 qcom,freq-domain = <&cpufreq_hw 0>; 86 86 operating-points-v2 = <&cpu0_opp_table>; 87 87 interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, 88 - <&osm_l3 MASTER_OSM_L3_APPS 0 &osm_l3 SLAVE_OSM_L3 0>; 88 + <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; 89 89 power-domains = <&CPU_PD1>; 90 90 power-domain-names = "psci"; 91 91 #cooling-cells = <2>; ··· 109 109 qcom,freq-domain = <&cpufreq_hw 0>; 110 110 operating-points-v2 = <&cpu0_opp_table>; 111 111 interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, 112 - <&osm_l3 MASTER_OSM_L3_APPS 0 &osm_l3 SLAVE_OSM_L3 0>; 112 + <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; 113 113 power-domains = <&CPU_PD2>; 114 114 power-domain-names = "psci"; 115 115 #cooling-cells = <2>; ··· 133 133 qcom,freq-domain = <&cpufreq_hw 0>; 134 134 operating-points-v2 = <&cpu0_opp_table>; 135 135 interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, 136 - <&osm_l3 MASTER_OSM_L3_APPS 0 &osm_l3 SLAVE_OSM_L3 0>; 136 + <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; 137 137 power-domains = <&CPU_PD3>; 138 138 power-domain-names = "psci"; 139 139 #cooling-cells = <2>; ··· 157 157 qcom,freq-domain = <&cpufreq_hw 1>; 158 158 operating-points-v2 = <&cpu4_opp_table>; 159 159 interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, 160 - <&osm_l3 MASTER_OSM_L3_APPS 0 &osm_l3 SLAVE_OSM_L3 0>; 160 + <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; 161 161 power-domains = <&CPU_PD4>; 162 162 power-domain-names = "psci"; 163 163 #cooling-cells = <2>; ··· 181 181 qcom,freq-domain = <&cpufreq_hw 1>; 182 182 operating-points-v2 = <&cpu4_opp_table>; 183 183 interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, 184 - <&osm_l3 MASTER_OSM_L3_APPS 0 &osm_l3 SLAVE_OSM_L3 0>; 184 + <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; 185 185 power-domains = <&CPU_PD5>; 186 186 power-domain-names = "psci"; 187 187 #cooling-cells = <2>; ··· 205 205 qcom,freq-domain = <&cpufreq_hw 1>; 206 206 operating-points-v2 = <&cpu4_opp_table>; 207 207 interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, 208 - <&osm_l3 MASTER_OSM_L3_APPS 0 &osm_l3 SLAVE_OSM_L3 0>; 208 + <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; 209 209 power-domains = <&CPU_PD6>; 210 210 power-domain-names = "psci"; 211 211 #cooling-cells = <2>; ··· 229 229 qcom,freq-domain = <&cpufreq_hw 2>; 230 230 operating-points-v2 = <&cpu7_opp_table>; 231 231 interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, 232 - <&osm_l3 MASTER_OSM_L3_APPS 0 &osm_l3 SLAVE_OSM_L3 0>; 232 + <&osm_l3 MASTER_OSM_L3_APPS &osm_l3 SLAVE_OSM_L3>; 233 233 power-domains = <&CPU_PD7>; 234 234 power-domain-names = "psci"; 235 235 #cooling-cells = <2>; ··· 4342 4342 clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GPLL0>; 4343 4343 clock-names = "xo", "alternate"; 4344 4344 4345 - #interconnect-cells = <2>; 4345 + #interconnect-cells = <1>; 4346 4346 }; 4347 4347 4348 4348 cpufreq_hw: cpufreq@18323000 {
+9 -9
arch/arm64/boot/dts/qcom/sm8250.dtsi
··· 107 107 qcom,freq-domain = <&cpufreq_hw 0>; 108 108 operating-points-v2 = <&cpu0_opp_table>; 109 109 interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, 110 - <&epss_l3 MASTER_OSM_L3_APPS 0 &epss_l3 SLAVE_OSM_L3 0>; 110 + <&epss_l3 MASTER_OSM_L3_APPS &epss_l3 SLAVE_OSM_L3>; 111 111 #cooling-cells = <2>; 112 112 L2_0: l2-cache { 113 113 compatible = "cache"; ··· 138 138 qcom,freq-domain = <&cpufreq_hw 0>; 139 139 operating-points-v2 = <&cpu0_opp_table>; 140 140 interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, 141 - <&epss_l3 MASTER_OSM_L3_APPS 0 &epss_l3 SLAVE_OSM_L3 0>; 141 + <&epss_l3 MASTER_OSM_L3_APPS &epss_l3 SLAVE_OSM_L3>; 142 142 #cooling-cells = <2>; 143 143 L2_100: l2-cache { 144 144 compatible = "cache"; ··· 163 163 qcom,freq-domain = <&cpufreq_hw 0>; 164 164 operating-points-v2 = <&cpu0_opp_table>; 165 165 interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, 166 - <&epss_l3 MASTER_OSM_L3_APPS 0 &epss_l3 SLAVE_OSM_L3 0>; 166 + <&epss_l3 MASTER_OSM_L3_APPS &epss_l3 SLAVE_OSM_L3>; 167 167 #cooling-cells = <2>; 168 168 L2_200: l2-cache { 169 169 compatible = "cache"; ··· 188 188 qcom,freq-domain = <&cpufreq_hw 0>; 189 189 operating-points-v2 = <&cpu0_opp_table>; 190 190 interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, 191 - <&epss_l3 MASTER_OSM_L3_APPS 0 &epss_l3 SLAVE_OSM_L3 0>; 191 + <&epss_l3 MASTER_OSM_L3_APPS &epss_l3 SLAVE_OSM_L3>; 192 192 #cooling-cells = <2>; 193 193 L2_300: l2-cache { 194 194 compatible = "cache"; ··· 213 213 qcom,freq-domain = <&cpufreq_hw 1>; 214 214 operating-points-v2 = <&cpu4_opp_table>; 215 215 interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, 216 - <&epss_l3 MASTER_OSM_L3_APPS 0 &epss_l3 SLAVE_OSM_L3 0>; 216 + <&epss_l3 MASTER_OSM_L3_APPS &epss_l3 SLAVE_OSM_L3>; 217 217 #cooling-cells = <2>; 218 218 L2_400: l2-cache { 219 219 compatible = "cache"; ··· 238 238 qcom,freq-domain = <&cpufreq_hw 1>; 239 239 operating-points-v2 = <&cpu4_opp_table>; 240 240 interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, 241 - <&epss_l3 MASTER_OSM_L3_APPS 0 &epss_l3 SLAVE_OSM_L3 0>; 241 + <&epss_l3 MASTER_OSM_L3_APPS &epss_l3 SLAVE_OSM_L3>; 242 242 #cooling-cells = <2>; 243 243 L2_500: l2-cache { 244 244 compatible = "cache"; ··· 263 263 qcom,freq-domain = <&cpufreq_hw 1>; 264 264 operating-points-v2 = <&cpu4_opp_table>; 265 265 interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, 266 - <&epss_l3 MASTER_OSM_L3_APPS 0 &epss_l3 SLAVE_OSM_L3 0>; 266 + <&epss_l3 MASTER_OSM_L3_APPS &epss_l3 SLAVE_OSM_L3>; 267 267 #cooling-cells = <2>; 268 268 L2_600: l2-cache { 269 269 compatible = "cache"; ··· 288 288 qcom,freq-domain = <&cpufreq_hw 2>; 289 289 operating-points-v2 = <&cpu7_opp_table>; 290 290 interconnects = <&gem_noc MASTER_AMPSS_M0 0 &mc_virt SLAVE_EBI_CH0 0>, 291 - <&epss_l3 MASTER_OSM_L3_APPS 0 &epss_l3 SLAVE_OSM_L3 0>; 291 + <&epss_l3 MASTER_OSM_L3_APPS &epss_l3 SLAVE_OSM_L3>; 292 292 #cooling-cells = <2>; 293 293 L2_700: l2-cache { 294 294 compatible = "cache"; ··· 5679 5679 clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GPLL0>; 5680 5680 clock-names = "xo", "alternate"; 5681 5681 5682 - #interconnect-cells = <2>; 5682 + #interconnect-cells = <1>; 5683 5683 }; 5684 5684 5685 5685 cpufreq_hw: cpufreq@18591000 {
+4
arch/arm64/boot/dts/qcom/sm8350.dtsi
··· 1744 1744 qcom,controlled-remotely; 1745 1745 iommus = <&apps_smmu 0x594 0x0011>, 1746 1746 <&apps_smmu 0x596 0x0011>; 1747 + /* FIXME: Probing BAM DMA causes some abort and system hang */ 1748 + status = "fail"; 1747 1749 }; 1748 1750 1749 1751 crypto: crypto@1dfa000 { ··· 1757 1755 <&apps_smmu 0x596 0x0011>; 1758 1756 interconnects = <&aggre2_noc MASTER_CRYPTO 0 &mc_virt SLAVE_EBI1 0>; 1759 1757 interconnect-names = "memory"; 1758 + /* FIXME: dependency BAM DMA is disabled */ 1759 + status = "disabled"; 1760 1760 }; 1761 1761 1762 1762 ipa: ipa@1e40000 {
+8 -8
arch/arm64/boot/dts/rockchip/px30.dtsi
··· 291 291 }; 292 292 power-domain@PX30_PD_MMC_NAND { 293 293 reg = <PX30_PD_MMC_NAND>; 294 - clocks = <&cru HCLK_NANDC>, 295 - <&cru HCLK_EMMC>, 296 - <&cru HCLK_SDIO>, 297 - <&cru HCLK_SFC>, 298 - <&cru SCLK_EMMC>, 299 - <&cru SCLK_NANDC>, 300 - <&cru SCLK_SDIO>, 301 - <&cru SCLK_SFC>; 294 + clocks = <&cru HCLK_NANDC>, 295 + <&cru HCLK_EMMC>, 296 + <&cru HCLK_SDIO>, 297 + <&cru HCLK_SFC>, 298 + <&cru SCLK_EMMC>, 299 + <&cru SCLK_NANDC>, 300 + <&cru SCLK_SDIO>, 301 + <&cru SCLK_SFC>; 302 302 pm_qos = <&qos_emmc>, <&qos_nand>, 303 303 <&qos_sdio>, <&qos_sfc>; 304 304 #power-domain-cells = <0>;
-1
arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
··· 106 106 regulator-name = "vdd_core"; 107 107 regulator-min-microvolt = <827000>; 108 108 regulator-max-microvolt = <1340000>; 109 - regulator-init-microvolt = <1015000>; 110 109 regulator-settling-time-up-us = <250>; 111 110 regulator-always-on; 112 111 regulator-boot-on;
-1
arch/arm64/boot/dts/rockchip/rk3308-rock-pi-s.dts
··· 105 105 regulator-name = "vdd_core"; 106 106 regulator-min-microvolt = <827000>; 107 107 regulator-max-microvolt = <1340000>; 108 - regulator-init-microvolt = <1015000>; 109 108 regulator-settling-time-up-us = <250>; 110 109 regulator-always-on; 111 110 regulator-boot-on;
+1 -1
arch/arm64/boot/dts/rockchip/rk3399-eaidk-610.dts
··· 773 773 compatible = "brcm,bcm4329-fmac"; 774 774 reg = <1>; 775 775 interrupt-parent = <&gpio0>; 776 - interrupts = <RK_PA3 GPIO_ACTIVE_HIGH>; 776 + interrupts = <RK_PA3 IRQ_TYPE_LEVEL_HIGH>; 777 777 interrupt-names = "host-wake"; 778 778 pinctrl-names = "default"; 779 779 pinctrl-0 = <&wifi_host_wake_l>;
-1
arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
··· 375 375 vcc_sdio: LDO_REG4 { 376 376 regulator-always-on; 377 377 regulator-boot-on; 378 - regulator-init-microvolt = <3000000>; 379 378 regulator-min-microvolt = <1800000>; 380 379 regulator-max-microvolt = <3300000>; 381 380 regulator-name = "vcc_sdio";
+1 -2
arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts
··· 548 548 &sdhci { 549 549 max-frequency = <150000000>; 550 550 bus-width = <8>; 551 - mmc-hs400-1_8v; 551 + mmc-hs200-1_8v; 552 552 non-removable; 553 - mmc-hs400-enhanced-strobe; 554 553 status = "okay"; 555 554 }; 556 555
+3 -3
arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
··· 45 45 sdio_pwrseq: sdio-pwrseq { 46 46 compatible = "mmc-pwrseq-simple"; 47 47 clocks = <&rk808 1>; 48 - clock-names = "ext_clock"; 48 + clock-names = "lpo"; 49 49 pinctrl-names = "default"; 50 50 pinctrl-0 = <&wifi_enable_h>; 51 51 reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>; ··· 645 645 }; 646 646 647 647 &sdhci { 648 + max-frequency = <150000000>; 648 649 bus-width = <8>; 649 - mmc-hs400-1_8v; 650 - mmc-hs400-enhanced-strobe; 650 + mmc-hs200-1_8v; 651 651 non-removable; 652 652 status = "okay"; 653 653 };
+1 -1
arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b-plus.dts
··· 31 31 compatible = "brcm,bcm4329-fmac"; 32 32 reg = <1>; 33 33 interrupt-parent = <&gpio0>; 34 - interrupts = <RK_PA3 GPIO_ACTIVE_HIGH>; 34 + interrupts = <RK_PA3 IRQ_TYPE_LEVEL_HIGH>; 35 35 interrupt-names = "host-wake"; 36 36 pinctrl-names = "default"; 37 37 pinctrl-0 = <&wifi_host_wake_l>;
-3
arch/arm64/boot/dts/rockchip/rk3566-anbernic-rgxx3.dtsi
··· 356 356 regulator-boot-on; 357 357 regulator-min-microvolt = <500000>; 358 358 regulator-max-microvolt = <1350000>; 359 - regulator-init-microvolt = <900000>; 360 359 regulator-ramp-delay = <6001>; 361 360 regulator-initial-mode = <0x2>; 362 361 regulator-name = "vdd_logic"; ··· 370 371 regulator-boot-on; 371 372 regulator-min-microvolt = <500000>; 372 373 regulator-max-microvolt = <1350000>; 373 - regulator-init-microvolt = <900000>; 374 374 regulator-ramp-delay = <6001>; 375 375 regulator-initial-mode = <0x2>; 376 376 regulator-name = "vdd_gpu"; ··· 531 533 regulator-boot-on; 532 534 regulator-min-microvolt = <712500>; 533 535 regulator-max-microvolt = <1390000>; 534 - regulator-init-microvolt = <900000>; 535 536 regulator-name = "vdd_cpu"; 536 537 regulator-ramp-delay = <2300>; 537 538 vin-supply = <&vcc_sys>;
+2 -2
arch/arm64/boot/dts/rockchip/rk3566-box-demo.dts
··· 239 239 240 240 &gmac1 { 241 241 assigned-clocks = <&cru SCLK_GMAC1_RX_TX>, <&cru SCLK_GMAC1>; 242 - assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>, <&gmac1_clkin>; 242 + assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>, <&gmac1_clkin>; 243 243 phy-mode = "rgmii"; 244 244 clock_in_out = "input"; 245 245 pinctrl-names = "default"; ··· 416 416 compatible = "brcm,bcm4329-fmac"; 417 417 reg = <1>; 418 418 interrupt-parent = <&gpio2>; 419 - interrupts = <RK_PB2 GPIO_ACTIVE_HIGH>; 419 + interrupts = <RK_PB2 IRQ_TYPE_LEVEL_HIGH>; 420 420 interrupt-names = "host-wake"; 421 421 pinctrl-names = "default"; 422 422 pinctrl-0 = <&wifi_host_wake_h>;
-3
arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts
··· 218 218 regulator-boot-on; 219 219 regulator-min-microvolt = <500000>; 220 220 regulator-max-microvolt = <1350000>; 221 - regulator-init-microvolt = <900000>; 222 221 regulator-ramp-delay = <6001>; 223 222 regulator-initial-mode = <0x2>; 224 223 ··· 232 233 regulator-boot-on; 233 234 regulator-min-microvolt = <500000>; 234 235 regulator-max-microvolt = <1350000>; 235 - regulator-init-microvolt = <900000>; 236 236 regulator-ramp-delay = <6001>; 237 237 regulator-initial-mode = <0x2>; 238 238 ··· 257 259 regulator-boot-on; 258 260 regulator-min-microvolt = <500000>; 259 261 regulator-max-microvolt = <1350000>; 260 - regulator-init-microvolt = <900000>; 261 262 regulator-ramp-delay = <6001>; 262 263 regulator-initial-mode = <0x2>; 263 264
-2
arch/arm64/boot/dts/rockchip/rk3566-pinenote.dtsi
··· 264 264 regulator-always-on; 265 265 regulator-min-microvolt = <500000>; 266 266 regulator-max-microvolt = <1350000>; 267 - regulator-init-microvolt = <900000>; 268 267 regulator-ramp-delay = <6001>; 269 268 regulator-initial-mode = <0x2>; 270 269 ··· 277 278 regulator-name = "vdd_gpu_npu"; 278 279 regulator-min-microvolt = <500000>; 279 280 regulator-max-microvolt = <1350000>; 280 - regulator-init-microvolt = <900000>; 281 281 regulator-ramp-delay = <6001>; 282 282 regulator-initial-mode = <0x2>; 283 283
-2
arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts
··· 366 366 regulator-boot-on; 367 367 regulator-min-microvolt = <500000>; 368 368 regulator-max-microvolt = <1350000>; 369 - regulator-init-microvolt = <900000>; 370 369 regulator-ramp-delay = <6001>; 371 370 regulator-initial-mode = <0x2>; 372 371 regulator-name = "vdd_logic"; ··· 380 381 regulator-boot-on; 381 382 regulator-min-microvolt = <500000>; 382 383 regulator-max-microvolt = <1350000>; 383 - regulator-init-microvolt = <900000>; 384 384 regulator-ramp-delay = <6001>; 385 385 regulator-initial-mode = <0x2>; 386 386 regulator-name = "vdd_gpu";
-2
arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts
··· 277 277 regulator-boot-on; 278 278 regulator-min-microvolt = <500000>; 279 279 regulator-max-microvolt = <1350000>; 280 - regulator-init-microvolt = <900000>; 281 280 regulator-ramp-delay = <6001>; 282 281 283 282 regulator-state-mem { ··· 291 292 regulator-boot-on; 292 293 regulator-min-microvolt = <900000>; 293 294 regulator-max-microvolt = <1350000>; 294 - regulator-init-microvolt = <900000>; 295 295 regulator-ramp-delay = <6001>; 296 296 297 297 regulator-state-mem {
+2 -2
arch/arm64/boot/dts/rockchip/rk3566-radxa-cm3-io.dts
··· 137 137 138 138 &mdio1 { 139 139 rgmii_phy1: ethernet-phy@0 { 140 - compatible="ethernet-phy-ieee802.3-c22"; 141 - reg= <0x0>; 140 + compatible = "ethernet-phy-ieee802.3-c22"; 141 + reg = <0x0>; 142 142 }; 143 143 }; 144 144
-2
arch/arm64/boot/dts/rockchip/rk3566-roc-pc.dts
··· 278 278 regulator-boot-on; 279 279 regulator-min-microvolt = <500000>; 280 280 regulator-max-microvolt = <1350000>; 281 - regulator-init-microvolt = <900000>; 282 281 regulator-ramp-delay = <6001>; 283 282 284 283 regulator-state-mem { ··· 290 291 regulator-name = "vdd_gpu"; 291 292 regulator-min-microvolt = <900000>; 292 293 regulator-max-microvolt = <1350000>; 293 - regulator-init-microvolt = <900000>; 294 294 regulator-ramp-delay = <6001>; 295 295 296 296 regulator-state-mem {
-3
arch/arm64/boot/dts/rockchip/rk3566-soquartz.dtsi
··· 234 234 regulator-boot-on; 235 235 regulator-min-microvolt = <500000>; 236 236 regulator-max-microvolt = <1350000>; 237 - regulator-init-microvolt = <900000>; 238 237 regulator-ramp-delay = <6001>; 239 238 regulator-initial-mode = <0x2>; 240 239 regulator-state-mem { ··· 248 249 regulator-boot-on; 249 250 regulator-min-microvolt = <500000>; 250 251 regulator-max-microvolt = <1350000>; 251 - regulator-init-microvolt = <900000>; 252 252 regulator-ramp-delay = <6001>; 253 253 regulator-initial-mode = <0x2>; 254 254 regulator-state-mem { ··· 270 272 regulator-boot-on; 271 273 regulator-min-microvolt = <500000>; 272 274 regulator-max-microvolt = <1350000>; 273 - regulator-init-microvolt = <900000>; 274 275 regulator-initial-mode = <0x2>; 275 276 regulator-name = "vdd_npu"; 276 277 regulator-state-mem {
-3
arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
··· 308 308 regulator-name = "vdd_logic"; 309 309 regulator-always-on; 310 310 regulator-boot-on; 311 - regulator-init-microvolt = <900000>; 312 311 regulator-initial-mode = <0x2>; 313 312 regulator-min-microvolt = <500000>; 314 313 regulator-max-microvolt = <1350000>; ··· 321 322 vdd_gpu: DCDC_REG2 { 322 323 regulator-name = "vdd_gpu"; 323 324 regulator-always-on; 324 - regulator-init-microvolt = <900000>; 325 325 regulator-initial-mode = <0x2>; 326 326 regulator-min-microvolt = <500000>; 327 327 regulator-max-microvolt = <1350000>; ··· 344 346 345 347 vdd_npu: DCDC_REG4 { 346 348 regulator-name = "vdd_npu"; 347 - regulator-init-microvolt = <900000>; 348 349 regulator-initial-mode = <0x2>; 349 350 regulator-min-microvolt = <500000>; 350 351 regulator-max-microvolt = <1350000>;
-3
arch/arm64/boot/dts/rockchip/rk3568-evb1-v10.dts
··· 293 293 regulator-name = "vdd_logic"; 294 294 regulator-always-on; 295 295 regulator-boot-on; 296 - regulator-init-microvolt = <900000>; 297 296 regulator-initial-mode = <0x2>; 298 297 regulator-min-microvolt = <500000>; 299 298 regulator-max-microvolt = <1350000>; ··· 306 307 vdd_gpu: DCDC_REG2 { 307 308 regulator-name = "vdd_gpu"; 308 309 regulator-always-on; 309 - regulator-init-microvolt = <900000>; 310 310 regulator-initial-mode = <0x2>; 311 311 regulator-min-microvolt = <500000>; 312 312 regulator-max-microvolt = <1350000>; ··· 329 331 330 332 vdd_npu: DCDC_REG4 { 331 333 regulator-name = "vdd_npu"; 332 - regulator-init-microvolt = <900000>; 333 334 regulator-initial-mode = <0x2>; 334 335 regulator-min-microvolt = <500000>; 335 336 regulator-max-microvolt = <1350000>;
-4
arch/arm64/boot/dts/rockchip/rk3568-fastrhino-r66s.dtsi
··· 173 173 regulator-name = "vdd_logic"; 174 174 regulator-always-on; 175 175 regulator-boot-on; 176 - regulator-init-microvolt = <900000>; 177 176 regulator-initial-mode = <0x2>; 178 177 regulator-min-microvolt = <500000>; 179 178 regulator-max-microvolt = <1350000>; ··· 186 187 vdd_gpu: DCDC_REG2 { 187 188 regulator-name = "vdd_gpu"; 188 189 regulator-always-on; 189 - regulator-init-microvolt = <900000>; 190 190 regulator-initial-mode = <0x2>; 191 191 regulator-min-microvolt = <500000>; 192 192 regulator-max-microvolt = <1350000>; ··· 209 211 210 212 vdd_npu: DCDC_REG4 { 211 213 regulator-name = "vdd_npu"; 212 - regulator-init-microvolt = <900000>; 213 214 regulator-initial-mode = <0x2>; 214 215 regulator-min-microvolt = <500000>; 215 216 regulator-max-microvolt = <1350000>; ··· 327 330 328 331 vcca1v8_image: LDO_REG9 { 329 332 regulator-name = "vcca1v8_image"; 330 - regulator-init-microvolt = <950000>; 331 333 regulator-min-microvolt = <950000>; 332 334 regulator-max-microvolt = <1800000>; 333 335
-3
arch/arm64/boot/dts/rockchip/rk3568-lubancat-2.dts
··· 243 243 regulator-boot-on; 244 244 regulator-min-microvolt = <500000>; 245 245 regulator-max-microvolt = <1350000>; 246 - regulator-init-microvolt = <900000>; 247 246 regulator-ramp-delay = <6001>; 248 247 regulator-initial-mode = <0x2>; 249 248 ··· 257 258 regulator-boot-on; 258 259 regulator-min-microvolt = <500000>; 259 260 regulator-max-microvolt = <1350000>; 260 - regulator-init-microvolt = <900000>; 261 261 regulator-ramp-delay = <6001>; 262 262 regulator-initial-mode = <0x2>; 263 263 ··· 282 284 regulator-boot-on; 283 285 regulator-min-microvolt = <500000>; 284 286 regulator-max-microvolt = <1350000>; 285 - regulator-init-microvolt = <900000>; 286 287 regulator-ramp-delay = <6001>; 287 288 regulator-initial-mode = <0x2>; 288 289
-3
arch/arm64/boot/dts/rockchip/rk3568-nanopi-r5s.dtsi
··· 232 232 regulator-name = "vdd_logic"; 233 233 regulator-always-on; 234 234 regulator-boot-on; 235 - regulator-init-microvolt = <900000>; 236 235 regulator-initial-mode = <0x2>; 237 236 regulator-min-microvolt = <500000>; 238 237 regulator-max-microvolt = <1350000>; ··· 245 246 vdd_gpu: DCDC_REG2 { 246 247 regulator-name = "vdd_gpu"; 247 248 regulator-always-on; 248 - regulator-init-microvolt = <900000>; 249 249 regulator-initial-mode = <0x2>; 250 250 regulator-min-microvolt = <500000>; 251 251 regulator-max-microvolt = <1350000>; ··· 268 270 269 271 vdd_npu: DCDC_REG4 { 270 272 regulator-name = "vdd_npu"; 271 - regulator-init-microvolt = <900000>; 272 273 regulator-initial-mode = <0x2>; 273 274 regulator-min-microvolt = <500000>; 274 275 regulator-max-microvolt = <1350000>;
-3
arch/arm64/boot/dts/rockchip/rk3568-odroid-m1.dts
··· 291 291 regulator-name = "vdd_logic"; 292 292 regulator-always-on; 293 293 regulator-boot-on; 294 - regulator-init-microvolt = <900000>; 295 294 regulator-initial-mode = <0x2>; 296 295 regulator-min-microvolt = <500000>; 297 296 regulator-max-microvolt = <1350000>; ··· 304 305 vdd_gpu: DCDC_REG2 { 305 306 regulator-name = "vdd_gpu"; 306 307 regulator-always-on; 307 - regulator-init-microvolt = <900000>; 308 308 regulator-initial-mode = <0x2>; 309 309 regulator-min-microvolt = <500000>; 310 310 regulator-max-microvolt = <1350000>; ··· 327 329 328 330 vdd_npu: DCDC_REG4 { 329 331 regulator-name = "vdd_npu"; 330 - regulator-init-microvolt = <900000>; 331 332 regulator-initial-mode = <0x2>; 332 333 regulator-min-microvolt = <500000>; 333 334 regulator-max-microvolt = <1350000>;
-3
arch/arm64/boot/dts/rockchip/rk3568-radxa-cm3i.dtsi
··· 163 163 regulator-name = "vdd_logic"; 164 164 regulator-always-on; 165 165 regulator-boot-on; 166 - regulator-init-microvolt = <900000>; 167 166 regulator-initial-mode = <0x2>; 168 167 regulator-min-microvolt = <500000>; 169 168 regulator-max-microvolt = <1350000>; ··· 176 177 vdd_gpu: DCDC_REG2 { 177 178 regulator-name = "vdd_gpu"; 178 179 regulator-always-on; 179 - regulator-init-microvolt = <900000>; 180 180 regulator-initial-mode = <0x2>; 181 181 regulator-min-microvolt = <500000>; 182 182 regulator-max-microvolt = <1350000>; ··· 199 201 200 202 vdd_npu: DCDC_REG4 { 201 203 regulator-name = "vdd_npu"; 202 - regulator-init-microvolt = <900000>; 203 204 regulator-initial-mode = <0x2>; 204 205 regulator-min-microvolt = <500000>; 205 206 regulator-max-microvolt = <1350000>;
-3
arch/arm64/boot/dts/rockchip/rk3568-rock-3a.dts
··· 350 350 regulator-name = "vdd_logic"; 351 351 regulator-always-on; 352 352 regulator-boot-on; 353 - regulator-init-microvolt = <900000>; 354 353 regulator-initial-mode = <0x2>; 355 354 regulator-min-microvolt = <500000>; 356 355 regulator-max-microvolt = <1350000>; ··· 363 364 vdd_gpu: DCDC_REG2 { 364 365 regulator-name = "vdd_gpu"; 365 366 regulator-always-on; 366 - regulator-init-microvolt = <900000>; 367 367 regulator-initial-mode = <0x2>; 368 368 regulator-min-microvolt = <500000>; 369 369 regulator-max-microvolt = <1350000>; ··· 386 388 387 389 vdd_npu: DCDC_REG4 { 388 390 regulator-name = "vdd_npu"; 389 - regulator-init-microvolt = <900000>; 390 391 regulator-initial-mode = <0x2>; 391 392 regulator-min-microvolt = <500000>; 392 393 regulator-max-microvolt = <1350000>;
-1
arch/arm64/boot/dts/rockchip/rk3588-rock-5b.dts
··· 337 337 regulator-boot-on; 338 338 regulator-min-microvolt = <550000>; 339 339 regulator-max-microvolt = <950000>; 340 - regulator-init-microvolt = <750000>; 341 340 regulator-ramp-delay = <12500>; 342 341 regulator-name = "vdd_vdenc_s0"; 343 342
+4 -4
arch/arm64/boot/dts/rockchip/rk3588s-indiedroid-nova.dts
··· 125 125 cpu-supply = <&vdd_cpu_lit_s0>; 126 126 }; 127 127 128 - &cpu_b0{ 128 + &cpu_b0 { 129 129 cpu-supply = <&vdd_cpu_big0_s0>; 130 130 }; 131 131 132 - &cpu_b1{ 132 + &cpu_b1 { 133 133 cpu-supply = <&vdd_cpu_big0_s0>; 134 134 }; 135 135 136 - &cpu_b2{ 136 + &cpu_b2 { 137 137 cpu-supply = <&vdd_cpu_big1_s0>; 138 138 }; 139 139 140 - &cpu_b3{ 140 + &cpu_b3 { 141 141 cpu-supply = <&vdd_cpu_big1_s0>; 142 142 }; 143 143
+2 -2
arch/arm64/include/asm/fpsimd.h
··· 356 356 return vec_max_virtualisable_vl(ARM64_VEC_SME); 357 357 } 358 358 359 - extern void sme_alloc(struct task_struct *task); 359 + extern void sme_alloc(struct task_struct *task, bool flush); 360 360 extern unsigned int sme_get_vl(void); 361 361 extern int sme_set_current_vl(unsigned long arg); 362 362 extern int sme_get_current_vl(void); ··· 388 388 static inline void sme_smstop_sm(void) { } 389 389 static inline void sme_smstop(void) { } 390 390 391 - static inline void sme_alloc(struct task_struct *task) { } 391 + static inline void sme_alloc(struct task_struct *task, bool flush) { } 392 392 static inline void sme_setup(void) { } 393 393 static inline unsigned int sme_get_vl(void) { return 0; } 394 394 static inline int sme_max_vl(void) { return 0; }
+24
arch/arm64/include/uapi/asm/bitsperlong.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 + /* 3 + * Copyright (C) 2012 ARM Ltd. 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 + */ 17 + #ifndef __ASM_BITSPERLONG_H 18 + #define __ASM_BITSPERLONG_H 19 + 20 + #define __BITS_PER_LONG 64 21 + 22 + #include <asm-generic/bitsperlong.h> 23 + 24 + #endif /* __ASM_BITSPERLONG_H */
+3 -3
arch/arm64/kernel/fpsimd.c
··· 1285 1285 * the interest of testability and predictability, the architecture 1286 1286 * guarantees that when ZA is enabled it will be zeroed. 1287 1287 */ 1288 - void sme_alloc(struct task_struct *task) 1288 + void sme_alloc(struct task_struct *task, bool flush) 1289 1289 { 1290 - if (task->thread.sme_state) { 1290 + if (task->thread.sme_state && flush) { 1291 1291 memset(task->thread.sme_state, 0, sme_state_size(task)); 1292 1292 return; 1293 1293 } ··· 1515 1515 } 1516 1516 1517 1517 sve_alloc(current, false); 1518 - sme_alloc(current); 1518 + sme_alloc(current, true); 1519 1519 if (!current->thread.sve_state || !current->thread.sme_state) { 1520 1520 force_sig(SIGKILL); 1521 1521 return;
+17 -3
arch/arm64/kernel/ptrace.c
··· 881 881 break; 882 882 case ARM64_VEC_SME: 883 883 target->thread.svcr |= SVCR_SM_MASK; 884 + 885 + /* 886 + * Disable traps and ensure there is SME storage but 887 + * preserve any currently set values in ZA/ZT. 888 + */ 889 + sme_alloc(target, false); 890 + set_tsk_thread_flag(target, TIF_SME); 884 891 break; 885 892 default: 886 893 WARN_ON_ONCE(1); ··· 1107 1100 } 1108 1101 1109 1102 /* Allocate/reinit ZA storage */ 1110 - sme_alloc(target); 1103 + sme_alloc(target, true); 1111 1104 if (!target->thread.sme_state) { 1112 1105 ret = -ENOMEM; 1113 1106 goto out; ··· 1177 1170 if (!system_supports_sme2()) 1178 1171 return -EINVAL; 1179 1172 1173 + /* Ensure SVE storage in case this is first use of SME */ 1174 + sve_alloc(target, false); 1175 + if (!target->thread.sve_state) 1176 + return -ENOMEM; 1177 + 1180 1178 if (!thread_za_enabled(&target->thread)) { 1181 - sme_alloc(target); 1179 + sme_alloc(target, true); 1182 1180 if (!target->thread.sme_state) 1183 1181 return -ENOMEM; 1184 1182 } ··· 1191 1179 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 1192 1180 thread_zt_state(&target->thread), 1193 1181 0, ZT_SIG_REG_BYTES); 1194 - if (ret == 0) 1182 + if (ret == 0) { 1195 1183 target->thread.svcr |= SVCR_ZA_MASK; 1184 + set_tsk_thread_flag(target, TIF_SME); 1185 + } 1196 1186 1197 1187 fpsimd_flush_task_state(target); 1198 1188
+1 -1
arch/arm64/kernel/signal.c
··· 475 475 fpsimd_flush_task_state(current); 476 476 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ 477 477 478 - sme_alloc(current); 478 + sme_alloc(current, true); 479 479 if (!current->thread.sme_state) { 480 480 current->thread.svcr &= ~SVCR_ZA_MASK; 481 481 clear_thread_flag(TIF_SME);
+3 -3
arch/powerpc/kernel/rtas_flash.c
··· 709 709 if (!rtas_validate_flash_data.buf) 710 710 return -ENOMEM; 711 711 712 - flash_block_cache = kmem_cache_create("rtas_flash_cache", 713 - RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0, 714 - NULL); 712 + flash_block_cache = kmem_cache_create_usercopy("rtas_flash_cache", 713 + RTAS_BLK_SIZE, RTAS_BLK_SIZE, 714 + 0, 0, RTAS_BLK_SIZE, NULL); 715 715 if (!flash_block_cache) { 716 716 printk(KERN_ERR "%s: failed to create block cache\n", 717 717 __func__);
+17 -11
arch/riscv/Kconfig
··· 570 570 config TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI 571 571 def_bool y 572 572 # https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=aed44286efa8ae8717a77d94b51ac3614e2ca6dc 573 - depends on AS_IS_GNU && AS_VERSION >= 23800 573 + # https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=98416dbb0a62579d4a7a4a76bab51b5b52fec2cd 574 + depends on AS_IS_GNU && AS_VERSION >= 23600 574 575 help 575 - Newer binutils versions default to ISA spec version 20191213 which 576 - moves some instructions from the I extension to the Zicsr and Zifencei 577 - extensions. 576 + Binutils-2.38 and GCC-12.1.0 bumped the default ISA spec to the newer 577 + 20191213 version, which moves some instructions from the I extension to 578 + the Zicsr and Zifencei extensions. This requires explicitly specifying 579 + Zicsr and Zifencei when binutils >= 2.38 or GCC >= 12.1.0. Zicsr 580 + and Zifencei are supported in binutils from version 2.36 onwards. 581 + To make life easier, and avoid forcing toolchains that default to a 582 + newer ISA spec to version 2.2, relax the check to binutils >= 2.36. 583 + For clang < 17 or GCC < 11.1.0, for which this is not possible, this is 584 + dealt with in CONFIG_TOOLCHAIN_NEEDS_OLD_ISA_SPEC. 578 585 579 586 config TOOLCHAIN_NEEDS_OLD_ISA_SPEC 580 587 def_bool y 581 588 depends on TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI 582 589 # https://github.com/llvm/llvm-project/commit/22e199e6afb1263c943c0c0d4498694e15bf8a16 583 - depends on CC_IS_CLANG && CLANG_VERSION < 170000 590 + # https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=b03be74bad08c382da47e048007a78fa3fb4ef49 591 + depends on (CC_IS_CLANG && CLANG_VERSION < 170000) || (CC_IS_GCC && GCC_VERSION < 110100) 584 592 help 585 - Certain versions of clang do not support zicsr and zifencei via -march 586 - but newer versions of binutils require it for the reasons noted in the 587 - help text of CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI. This 588 - option causes an older ISA spec compatible with these older versions 589 - of clang to be passed to GAS, which has the same result as passing zicsr 590 - and zifencei to -march. 593 + Certain versions of clang and GCC do not support zicsr and zifencei via 594 + -march. This option causes an older ISA spec compatible with these older 595 + versions of clang and GCC to be passed to GAS, which has the same result 596 + as passing zicsr and zifencei to -march. 591 597 592 598 config FPU 593 599 bool "FPU support"
+13 -2
arch/riscv/include/asm/insn.h
··· 110 110 #define RVC_INSN_FUNCT4_OPOFF 12 111 111 #define RVC_INSN_FUNCT3_MASK GENMASK(15, 13) 112 112 #define RVC_INSN_FUNCT3_OPOFF 13 113 + #define RVC_INSN_J_RS1_MASK GENMASK(11, 7) 113 114 #define RVC_INSN_J_RS2_MASK GENMASK(6, 2) 114 115 #define RVC_INSN_OPCODE_MASK GENMASK(1, 0) 115 116 #define RVC_ENCODE_FUNCT3(f_) (RVC_FUNCT3_##f_ << RVC_INSN_FUNCT3_OPOFF) ··· 246 245 __RISCV_INSN_FUNCS(auipc, RVG_MASK_AUIPC, RVG_MATCH_AUIPC) 247 246 __RISCV_INSN_FUNCS(jalr, RVG_MASK_JALR, RVG_MATCH_JALR) 248 247 __RISCV_INSN_FUNCS(jal, RVG_MASK_JAL, RVG_MATCH_JAL) 249 - __RISCV_INSN_FUNCS(c_jr, RVC_MASK_C_JR, RVC_MATCH_C_JR) 250 - __RISCV_INSN_FUNCS(c_jalr, RVC_MASK_C_JALR, RVC_MATCH_C_JALR) 251 248 __RISCV_INSN_FUNCS(c_j, RVC_MASK_C_J, RVC_MATCH_C_J) 252 249 __RISCV_INSN_FUNCS(beq, RVG_MASK_BEQ, RVG_MATCH_BEQ) 253 250 __RISCV_INSN_FUNCS(bne, RVG_MASK_BNE, RVG_MATCH_BNE) ··· 270 271 static __always_inline bool riscv_insn_is_branch(u32 code) 271 272 { 272 273 return (code & RV_INSN_OPCODE_MASK) == RVG_OPCODE_BRANCH; 274 + } 275 + 276 + static __always_inline bool riscv_insn_is_c_jr(u32 code) 277 + { 278 + return (code & RVC_MASK_C_JR) == RVC_MATCH_C_JR && 279 + (code & RVC_INSN_J_RS1_MASK) != 0; 280 + } 281 + 282 + static __always_inline bool riscv_insn_is_c_jalr(u32 code) 283 + { 284 + return (code & RVC_MASK_C_JALR) == RVC_MATCH_C_JALR && 285 + (code & RVC_INSN_J_RS1_MASK) != 0; 273 286 } 274 287 275 288 #define RV_IMM_SIGN(x) (-(((x) >> 31) & 1))
+14
arch/riscv/include/uapi/asm/bitsperlong.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ 2 + /* 3 + * Copyright (C) 2012 ARM Ltd. 4 + * Copyright (C) 2015 Regents of the University of California 5 + */ 6 + 7 + #ifndef _UAPI_ASM_RISCV_BITSPERLONG_H 8 + #define _UAPI_ASM_RISCV_BITSPERLONG_H 9 + 10 + #define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8) 11 + 12 + #include <asm-generic/bitsperlong.h> 13 + 14 + #endif /* _UAPI_ASM_RISCV_BITSPERLONG_H */
+7 -1
arch/riscv/kernel/compat_vdso/Makefile
··· 11 11 COMPAT_CC := $(CC) 12 12 COMPAT_LD := $(LD) 13 13 14 - COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32 14 + # binutils 2.35 does not support the zifencei extension, but in the ISA 15 + # spec 20191213, G stands for IMAFD_ZICSR_ZIFENCEI. 16 + ifdef CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI 17 + COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32 18 + else 19 + COMPAT_CC_FLAGS := -march=rv32imafd -mabi=ilp32 20 + endif 15 21 COMPAT_LD_FLAGS := -melf32lriscv 16 22 17 23 # Disable attributes, as they're useless and break the build.
+3
arch/riscv/kernel/irq.c
··· 84 84 : [sp] "r" (sp) 85 85 : "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7", 86 86 "t0", "t1", "t2", "t3", "t4", "t5", "t6", 87 + #ifndef CONFIG_FRAME_POINTER 88 + "s0", 89 + #endif 87 90 "memory"); 88 91 } else 89 92 #endif
+6 -3
arch/riscv/kernel/traps.c
··· 297 297 asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs) 298 298 { 299 299 if (user_mode(regs)) { 300 - ulong syscall = regs->a7; 300 + long syscall = regs->a7; 301 301 302 302 regs->epc += 4; 303 303 regs->orig_a0 = regs->a0; ··· 306 306 307 307 syscall = syscall_enter_from_user_mode(regs, syscall); 308 308 309 - if (syscall < NR_syscalls) 309 + if (syscall >= 0 && syscall < NR_syscalls) 310 310 syscall_handler(regs, syscall); 311 - else 311 + else if (syscall != -1) 312 312 regs->a0 = -ENOSYS; 313 313 314 314 syscall_exit_to_user_mode(regs); ··· 372 372 : [sp] "r" (sp), [regs] "r" (regs) 373 373 : "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7", 374 374 "t0", "t1", "t2", "t3", "t4", "t5", "t6", 375 + #ifndef CONFIG_FRAME_POINTER 376 + "s0", 377 + #endif 375 378 "memory"); 376 379 } else 377 380 #endif
+7 -4
arch/riscv/lib/uaccess.S
··· 17 17 li t6, SR_SUM 18 18 csrs CSR_STATUS, t6 19 19 20 - /* Save for return value */ 21 - mv t5, a2 20 + /* 21 + * Save the terminal address which will be used to compute the number 22 + * of bytes copied in case of a fixup exception. 23 + */ 24 + add t5, a0, a2 22 25 23 26 /* 24 27 * Register allocation for code below: ··· 179 176 10: 180 177 /* Disable access to user memory */ 181 178 csrc CSR_STATUS, t6 182 - mv a0, t5 179 + sub a0, t5, a0 183 180 ret 184 181 ENDPROC(__asm_copy_to_user) 185 182 ENDPROC(__asm_copy_from_user) ··· 231 228 11: 232 229 /* Disable access to user memory */ 233 230 csrc CSR_STATUS, t6 234 - mv a0, a1 231 + sub a0, a3, a0 235 232 ret 236 233 ENDPROC(__clear_user) 237 234 EXPORT_SYMBOL(__clear_user)
+1
arch/x86/include/asm/entry-common.h
··· 92 92 static __always_inline void arch_exit_to_user_mode(void) 93 93 { 94 94 mds_user_clear_cpu_buffers(); 95 + amd_clear_divider(); 95 96 } 96 97 #define arch_exit_to_user_mode arch_exit_to_user_mode 97 98
+27 -22
arch/x86/include/asm/nospec-branch.h
··· 272 272 .endm 273 273 274 274 #ifdef CONFIG_CPU_UNRET_ENTRY 275 - #define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret" 275 + #define CALL_UNTRAIN_RET "call entry_untrain_ret" 276 276 #else 277 - #define CALL_ZEN_UNTRAIN_RET "" 277 + #define CALL_UNTRAIN_RET "" 278 278 #endif 279 279 280 280 /* ··· 282 282 * return thunk isn't mapped into the userspace tables (then again, AMD 283 283 * typically has NO_MELTDOWN). 284 284 * 285 - * While zen_untrain_ret() doesn't clobber anything but requires stack, 285 + * While retbleed_untrain_ret() doesn't clobber anything but requires stack, 286 286 * entry_ibpb() will clobber AX, CX, DX. 287 287 * 288 288 * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point ··· 293 293 defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO) 294 294 VALIDATE_UNRET_END 295 295 ALTERNATIVE_3 "", \ 296 - CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ 296 + CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ 297 297 "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ 298 298 __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH 299 299 #endif 300 + .endm 300 301 301 - #ifdef CONFIG_CPU_SRSO 302 - ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ 303 - "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS 302 + .macro UNTRAIN_RET_VM 303 + #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ 304 + defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO) 305 + VALIDATE_UNRET_END 306 + ALTERNATIVE_3 "", \ 307 + CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ 308 + "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT, \ 309 + __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH 304 310 #endif 305 311 .endm 306 312 ··· 315 309 defined(CONFIG_CALL_DEPTH_TRACKING) 316 310 VALIDATE_UNRET_END 317 311 ALTERNATIVE_3 "", \ 318 - CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ 312 + CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ 319 313 "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ 320 314 __stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH 321 - #endif 322 - 323 - #ifdef CONFIG_CPU_SRSO 324 - ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ 325 - "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS 326 315 #endif 327 316 .endm 328 317 ··· 342 341 extern retpoline_thunk_t __x86_indirect_call_thunk_array[]; 343 342 extern retpoline_thunk_t __x86_indirect_jump_thunk_array[]; 344 343 344 + #ifdef CONFIG_RETHUNK 345 345 extern void __x86_return_thunk(void); 346 - extern void zen_untrain_ret(void); 346 + #else 347 + static inline void __x86_return_thunk(void) {} 348 + #endif 349 + 350 + extern void retbleed_return_thunk(void); 351 + extern void srso_return_thunk(void); 352 + extern void srso_alias_return_thunk(void); 353 + 354 + extern void retbleed_untrain_ret(void); 347 355 extern void srso_untrain_ret(void); 348 - extern void srso_untrain_ret_alias(void); 356 + extern void srso_alias_untrain_ret(void); 357 + 358 + extern void entry_untrain_ret(void); 349 359 extern void entry_ibpb(void); 350 360 351 - #ifdef CONFIG_CALL_THUNKS 352 361 extern void (*x86_return_thunk)(void); 353 - #else 354 - #define x86_return_thunk (&__x86_return_thunk) 355 - #endif 356 362 357 363 #ifdef CONFIG_CALL_DEPTH_TRACKING 358 364 extern void __x86_return_skl(void); ··· 485 477 SPEC_STORE_BYPASS_PRCTL, 486 478 SPEC_STORE_BYPASS_SECCOMP, 487 479 }; 488 - 489 - extern char __indirect_thunk_start[]; 490 - extern char __indirect_thunk_end[]; 491 480 492 481 static __always_inline 493 482 void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
-4
arch/x86/kernel/alternative.c
··· 687 687 688 688 #ifdef CONFIG_RETHUNK 689 689 690 - #ifdef CONFIG_CALL_THUNKS 691 - void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; 692 - #endif 693 - 694 690 /* 695 691 * Rewrite the compiler generated return thunk tail-calls. 696 692 *
+1
arch/x86/kernel/cpu/amd.c
··· 1329 1329 asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) 1330 1330 :: "a" (0), "d" (0), "r" (1)); 1331 1331 } 1332 + EXPORT_SYMBOL_GPL(amd_clear_divider);
+23 -4
arch/x86/kernel/cpu/bugs.c
··· 63 63 64 64 static DEFINE_MUTEX(spec_ctrl_mutex); 65 65 66 + void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; 67 + 66 68 /* Update SPEC_CTRL MSR and its cached copy unconditionally */ 67 69 static void update_spec_ctrl(u64 val) 68 70 { ··· 167 165 md_clear_select_mitigation(); 168 166 srbds_select_mitigation(); 169 167 l1d_flush_select_mitigation(); 168 + 169 + /* 170 + * srso_select_mitigation() depends and must run after 171 + * retbleed_select_mitigation(). 172 + */ 170 173 srso_select_mitigation(); 171 174 gds_select_mitigation(); 172 175 } ··· 1042 1035 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 1043 1036 setup_force_cpu_cap(X86_FEATURE_UNRET); 1044 1037 1038 + if (IS_ENABLED(CONFIG_RETHUNK)) 1039 + x86_return_thunk = retbleed_return_thunk; 1040 + 1045 1041 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && 1046 1042 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) 1047 1043 pr_err(RETBLEED_UNTRAIN_MSG); ··· 1054 1044 1055 1045 case RETBLEED_MITIGATION_IBPB: 1056 1046 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); 1047 + setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); 1057 1048 mitigate_smt = true; 1058 1049 break; 1059 1050 ··· 2428 2417 * Zen1/2 with SMT off aren't vulnerable after the right 2429 2418 * IBPB microcode has been applied. 2430 2419 */ 2431 - if ((boot_cpu_data.x86 < 0x19) && 2432 - (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED))) 2420 + if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) { 2433 2421 setup_force_cpu_cap(X86_FEATURE_SRSO_NO); 2422 + return; 2423 + } 2434 2424 } 2435 2425 2436 2426 if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { ··· 2460 2448 * like ftrace, static_call, etc. 2461 2449 */ 2462 2450 setup_force_cpu_cap(X86_FEATURE_RETHUNK); 2451 + setup_force_cpu_cap(X86_FEATURE_UNRET); 2463 2452 2464 - if (boot_cpu_data.x86 == 0x19) 2453 + if (boot_cpu_data.x86 == 0x19) { 2465 2454 setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); 2466 - else 2455 + x86_return_thunk = srso_alias_return_thunk; 2456 + } else { 2467 2457 setup_force_cpu_cap(X86_FEATURE_SRSO); 2458 + x86_return_thunk = srso_return_thunk; 2459 + } 2468 2460 srso_mitigation = SRSO_MITIGATION_SAFE_RET; 2469 2461 } else { 2470 2462 pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); ··· 2712 2696 2713 2697 static ssize_t srso_show_state(char *buf) 2714 2698 { 2699 + if (boot_cpu_has(X86_FEATURE_SRSO_NO)) 2700 + return sysfs_emit(buf, "Mitigation: SMT disabled\n"); 2701 + 2715 2702 return sysfs_emit(buf, "%s%s\n", 2716 2703 srso_strings[srso_mitigation], 2717 2704 (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode"));
+16 -24
arch/x86/kernel/kprobes/opt.c
··· 226 226 } 227 227 228 228 /* Check whether insn is indirect jump */ 229 - static int __insn_is_indirect_jump(struct insn *insn) 229 + static int insn_is_indirect_jump(struct insn *insn) 230 230 { 231 231 return ((insn->opcode.bytes[0] == 0xff && 232 232 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ ··· 258 258 target = (unsigned long)insn->next_byte + insn->immediate.value; 259 259 260 260 return (start <= target && target <= start + len); 261 - } 262 - 263 - static int insn_is_indirect_jump(struct insn *insn) 264 - { 265 - int ret = __insn_is_indirect_jump(insn); 266 - 267 - #ifdef CONFIG_RETPOLINE 268 - /* 269 - * Jump to x86_indirect_thunk_* is treated as an indirect jump. 270 - * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with 271 - * older gcc may use indirect jump. So we add this check instead of 272 - * replace indirect-jump check. 273 - */ 274 - if (!ret) 275 - ret = insn_jump_into_range(insn, 276 - (unsigned long)__indirect_thunk_start, 277 - (unsigned long)__indirect_thunk_end - 278 - (unsigned long)__indirect_thunk_start); 279 - #endif 280 - return ret; 281 261 } 282 262 283 263 /* Decode whole function to ensure any instructions don't jump into target */ ··· 314 334 /* Recover address */ 315 335 insn.kaddr = (void *)addr; 316 336 insn.next_byte = (void *)(addr + insn.length); 317 - /* Check any instructions don't jump into target */ 318 - if (insn_is_indirect_jump(&insn) || 319 - insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE, 337 + /* 338 + * Check any instructions don't jump into target, indirectly or 339 + * directly. 340 + * 341 + * The indirect case is present to handle a code with jump 342 + * tables. When the kernel uses retpolines, the check should in 343 + * theory additionally look for jumps to indirect thunks. 344 + * However, the kernel built with retpolines or IBT has jump 345 + * tables disabled so the check can be skipped altogether. 346 + */ 347 + if (!IS_ENABLED(CONFIG_RETPOLINE) && 348 + !IS_ENABLED(CONFIG_X86_KERNEL_IBT) && 349 + insn_is_indirect_jump(&insn)) 350 + return 0; 351 + if (insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE, 320 352 DISP32_SIZE)) 321 353 return 0; 322 354 addr += insn.length;
+13
arch/x86/kernel/static_call.c
··· 186 186 */ 187 187 bool __static_call_fixup(void *tramp, u8 op, void *dest) 188 188 { 189 + unsigned long addr = (unsigned long)tramp; 190 + /* 191 + * Not all .return_sites are a static_call trampoline (most are not). 192 + * Check if the 3 bytes after the return are still kernel text, if not, 193 + * then this definitely is not a trampoline and we need not worry 194 + * further. 195 + * 196 + * This avoids the memcmp() below tripping over pagefaults etc.. 197 + */ 198 + if (((addr >> PAGE_SHIFT) != ((addr + 7) >> PAGE_SHIFT)) && 199 + !kernel_text_address(addr + 7)) 200 + return false; 201 + 189 202 if (memcmp(tramp+5, tramp_ud, 3)) { 190 203 /* Not a trampoline site, not our problem. */ 191 204 return false;
-2
arch/x86/kernel/traps.c
··· 206 206 { 207 207 do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, 208 208 FPE_INTDIV, error_get_trap_addr(regs)); 209 - 210 - amd_clear_divider(); 211 209 } 212 210 213 211 DEFINE_IDTENTRY(exc_overflow)
+9 -11
arch/x86/kernel/vmlinux.lds.S
··· 133 133 KPROBES_TEXT 134 134 SOFTIRQENTRY_TEXT 135 135 #ifdef CONFIG_RETPOLINE 136 - __indirect_thunk_start = .; 137 - *(.text.__x86.indirect_thunk) 138 - *(.text.__x86.return_thunk) 139 - __indirect_thunk_end = .; 136 + *(.text..__x86.indirect_thunk) 137 + *(.text..__x86.return_thunk) 140 138 #endif 141 139 STATIC_CALL_TEXT 142 140 143 141 ALIGN_ENTRY_TEXT_BEGIN 144 142 #ifdef CONFIG_CPU_SRSO 145 - *(.text.__x86.rethunk_untrain) 143 + *(.text..__x86.rethunk_untrain) 146 144 #endif 147 145 148 146 ENTRY_TEXT 149 147 150 148 #ifdef CONFIG_CPU_SRSO 151 149 /* 152 - * See the comment above srso_untrain_ret_alias()'s 150 + * See the comment above srso_alias_untrain_ret()'s 153 151 * definition. 154 152 */ 155 - . = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); 156 - *(.text.__x86.rethunk_safe) 153 + . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); 154 + *(.text..__x86.rethunk_safe) 157 155 #endif 158 156 ALIGN_ENTRY_TEXT_END 159 157 *(.gnu.warning) ··· 521 523 #endif 522 524 523 525 #ifdef CONFIG_RETHUNK 524 - . = ASSERT((__ret & 0x3f) == 0, "__ret not cacheline-aligned"); 526 + . = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned"); 525 527 . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned"); 526 528 #endif 527 529 ··· 536 538 * Instead do: (A | B) - (A & B) in order to compute the XOR 537 539 * of the two function addresses: 538 540 */ 539 - . = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) - 540 - (ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), 541 + . = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) - 542 + (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), 541 543 "SRSO function pair won't alias"); 542 544 #endif 543 545
+2
arch/x86/kvm/svm/svm.c
··· 4006 4006 4007 4007 guest_state_enter_irqoff(); 4008 4008 4009 + amd_clear_divider(); 4010 + 4009 4011 if (sev_es_guest(vcpu->kvm)) 4010 4012 __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted); 4011 4013 else
+2 -5
arch/x86/kvm/svm/vmenter.S
··· 222 222 * because interrupt handlers won't sanitize 'ret' if the return is 223 223 * from the kernel. 224 224 */ 225 - UNTRAIN_RET 226 - 227 - /* SRSO */ 228 - ALTERNATIVE "", "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT 225 + UNTRAIN_RET_VM 229 226 230 227 /* 231 228 * Clear all general purpose registers except RSP and RAX to prevent ··· 359 362 * because interrupt handlers won't sanitize RET if the return is 360 363 * from the kernel. 361 364 */ 362 - UNTRAIN_RET 365 + UNTRAIN_RET_VM 363 366 364 367 /* "Pop" @spec_ctrl_intercepted. */ 365 368 pop %_ASM_BX
+97 -44
arch/x86/lib/retpoline.S
··· 13 13 #include <asm/frame.h> 14 14 #include <asm/nops.h> 15 15 16 - .section .text.__x86.indirect_thunk 16 + .section .text..__x86.indirect_thunk 17 17 18 18 19 19 .macro POLINE reg ··· 133 133 #ifdef CONFIG_RETHUNK 134 134 135 135 /* 136 - * srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at 136 + * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at 137 137 * special addresses: 138 138 * 139 - * - srso_untrain_ret_alias() is 2M aligned 140 - * - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14 139 + * - srso_alias_untrain_ret() is 2M aligned 140 + * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14 141 141 * and 20 in its virtual address are set (while those bits in the 142 - * srso_untrain_ret_alias() function are cleared). 142 + * srso_alias_untrain_ret() function are cleared). 143 143 * 144 144 * This guarantees that those two addresses will alias in the branch 145 145 * target buffer of Zen3/4 generations, leading to any potential 146 146 * poisoned entries at that BTB slot to get evicted. 147 147 * 148 - * As a result, srso_safe_ret_alias() becomes a safe return. 148 + * As a result, srso_alias_safe_ret() becomes a safe return. 149 149 */ 150 150 #ifdef CONFIG_CPU_SRSO 151 - .section .text.__x86.rethunk_untrain 151 + .section .text..__x86.rethunk_untrain 152 152 153 - SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) 153 + SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) 154 + UNWIND_HINT_FUNC 154 155 ANNOTATE_NOENDBR 155 156 ASM_NOP2 156 157 lfence 157 - jmp __x86_return_thunk 158 - SYM_FUNC_END(srso_untrain_ret_alias) 159 - __EXPORT_THUNK(srso_untrain_ret_alias) 158 + jmp srso_alias_return_thunk 159 + SYM_FUNC_END(srso_alias_untrain_ret) 160 + __EXPORT_THUNK(srso_alias_untrain_ret) 160 161 161 - .section .text.__x86.rethunk_safe 162 - #endif 163 - 164 - /* Needs a definition for the __x86_return_thunk alternative below. */ 165 - SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) 166 - #ifdef CONFIG_CPU_SRSO 167 - add $8, %_ASM_SP 168 - UNWIND_HINT_FUNC 169 - #endif 162 + .section .text..__x86.rethunk_safe 163 + #else 164 + /* dummy definition for alternatives */ 165 + SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) 170 166 ANNOTATE_UNRET_SAFE 171 167 ret 172 168 int3 173 - SYM_FUNC_END(srso_safe_ret_alias) 169 + SYM_FUNC_END(srso_alias_untrain_ret) 170 + #endif 174 171 175 - .section .text.__x86.return_thunk 172 + SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE) 173 + lea 8(%_ASM_SP), %_ASM_SP 174 + UNWIND_HINT_FUNC 175 + ANNOTATE_UNRET_SAFE 176 + ret 177 + int3 178 + SYM_FUNC_END(srso_alias_safe_ret) 179 + 180 + .section .text..__x86.return_thunk 181 + 182 + SYM_CODE_START(srso_alias_return_thunk) 183 + UNWIND_HINT_FUNC 184 + ANNOTATE_NOENDBR 185 + call srso_alias_safe_ret 186 + ud2 187 + SYM_CODE_END(srso_alias_return_thunk) 188 + 189 + /* 190 + * Some generic notes on the untraining sequences: 191 + * 192 + * They are interchangeable when it comes to flushing potentially wrong 193 + * RET predictions from the BTB. 194 + * 195 + * The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the 196 + * Retbleed sequence because the return sequence done there 197 + * (srso_safe_ret()) is longer and the return sequence must fully nest 198 + * (end before) the untraining sequence. Therefore, the untraining 199 + * sequence must fully overlap the return sequence. 200 + * 201 + * Regarding alignment - the instructions which need to be untrained, 202 + * must all start at a cacheline boundary for Zen1/2 generations. That 203 + * is, instruction sequences starting at srso_safe_ret() and 204 + * the respective instruction sequences at retbleed_return_thunk() 205 + * must start at a cacheline boundary. 206 + */ 176 207 177 208 /* 178 209 * Safety details here pertain to the AMD Zen{1,2} microarchitecture: 179 - * 1) The RET at __x86_return_thunk must be on a 64 byte boundary, for 210 + * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for 180 211 * alignment within the BTB. 181 - * 2) The instruction at zen_untrain_ret must contain, and not 212 + * 2) The instruction at retbleed_untrain_ret must contain, and not 182 213 * end with, the 0xc3 byte of the RET. 183 214 * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread 184 215 * from re-poisioning the BTB prediction. 185 216 */ 186 217 .align 64 187 - .skip 64 - (__ret - zen_untrain_ret), 0xcc 188 - SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) 218 + .skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc 219 + SYM_START(retbleed_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) 189 220 ANNOTATE_NOENDBR 190 221 /* 191 - * As executed from zen_untrain_ret, this is: 222 + * As executed from retbleed_untrain_ret, this is: 192 223 * 193 224 * TEST $0xcc, %bl 194 225 * LFENCE 195 - * JMP __x86_return_thunk 226 + * JMP retbleed_return_thunk 196 227 * 197 228 * Executing the TEST instruction has a side effect of evicting any BTB 198 229 * prediction (potentially attacker controlled) attached to the RET, as 199 - * __x86_return_thunk + 1 isn't an instruction boundary at the moment. 230 + * retbleed_return_thunk + 1 isn't an instruction boundary at the moment. 200 231 */ 201 232 .byte 0xf6 202 233 203 234 /* 204 - * As executed from __x86_return_thunk, this is a plain RET. 235 + * As executed from retbleed_return_thunk, this is a plain RET. 205 236 * 206 237 * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8. 207 238 * ··· 244 213 * With SMT enabled and STIBP active, a sibling thread cannot poison 245 214 * RET's prediction to a type of its choice, but can evict the 246 215 * prediction due to competitive sharing. If the prediction is 247 - * evicted, __x86_return_thunk will suffer Straight Line Speculation 216 + * evicted, retbleed_return_thunk will suffer Straight Line Speculation 248 217 * which will be contained safely by the INT3. 249 218 */ 250 - SYM_INNER_LABEL(__ret, SYM_L_GLOBAL) 219 + SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL) 251 220 ret 252 221 int3 253 - SYM_CODE_END(__ret) 222 + SYM_CODE_END(retbleed_return_thunk) 254 223 255 224 /* 256 225 * Ensure the TEST decoding / BTB invalidation is complete. ··· 261 230 * Jump back and execute the RET in the middle of the TEST instruction. 262 231 * INT3 is for SLS protection. 263 232 */ 264 - jmp __ret 233 + jmp retbleed_return_thunk 265 234 int3 266 - SYM_FUNC_END(zen_untrain_ret) 267 - __EXPORT_THUNK(zen_untrain_ret) 235 + SYM_FUNC_END(retbleed_untrain_ret) 236 + __EXPORT_THUNK(retbleed_untrain_ret) 268 237 269 238 /* 270 - * SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret() 239 + * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret() 271 240 * above. On kernel entry, srso_untrain_ret() is executed which is a 272 241 * 273 - * movabs $0xccccccc308c48348,%rax 242 + * movabs $0xccccc30824648d48,%rax 274 243 * 275 244 * and when the return thunk executes the inner label srso_safe_ret() 276 245 * later, it is a stack manipulation and a RET which is mispredicted and ··· 282 251 ANNOTATE_NOENDBR 283 252 .byte 0x48, 0xb8 284 253 254 + /* 255 + * This forces the function return instruction to speculate into a trap 256 + * (UD2 in srso_return_thunk() below). This RET will then mispredict 257 + * and execution will continue at the return site read from the top of 258 + * the stack. 259 + */ 285 260 SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL) 286 - add $8, %_ASM_SP 261 + lea 8(%_ASM_SP), %_ASM_SP 287 262 ret 288 263 int3 289 264 int3 290 - int3 265 + /* end of movabs */ 291 266 lfence 292 267 call srso_safe_ret 293 - int3 268 + ud2 294 269 SYM_CODE_END(srso_safe_ret) 295 270 SYM_FUNC_END(srso_untrain_ret) 296 271 __EXPORT_THUNK(srso_untrain_ret) 297 272 298 - SYM_FUNC_START(__x86_return_thunk) 299 - ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \ 300 - "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS 273 + SYM_CODE_START(srso_return_thunk) 274 + UNWIND_HINT_FUNC 275 + ANNOTATE_NOENDBR 276 + call srso_safe_ret 277 + ud2 278 + SYM_CODE_END(srso_return_thunk) 279 + 280 + SYM_FUNC_START(entry_untrain_ret) 281 + ALTERNATIVE_2 "jmp retbleed_untrain_ret", \ 282 + "jmp srso_untrain_ret", X86_FEATURE_SRSO, \ 283 + "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS 284 + SYM_FUNC_END(entry_untrain_ret) 285 + __EXPORT_THUNK(entry_untrain_ret) 286 + 287 + SYM_CODE_START(__x86_return_thunk) 288 + UNWIND_HINT_FUNC 289 + ANNOTATE_NOENDBR 290 + ANNOTATE_UNRET_SAFE 291 + ret 301 292 int3 302 293 SYM_CODE_END(__x86_return_thunk) 303 294 EXPORT_SYMBOL(__x86_return_thunk)
+2
block/blk-cgroup.c
··· 136 136 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); 137 137 if (blkg->parent) 138 138 blkg_put(blkg->parent); 139 + spin_lock_irq(&q->queue_lock); 139 140 list_del_init(&blkg->q_node); 141 + spin_unlock_irq(&q->queue_lock); 140 142 mutex_unlock(&q->blkcg_mutex); 141 143 142 144 blk_put_queue(q);
+23 -13
block/blk-crypto-fallback.c
··· 78 78 struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX]; 79 79 } *blk_crypto_keyslots; 80 80 81 - static struct blk_crypto_profile blk_crypto_fallback_profile; 81 + static struct blk_crypto_profile *blk_crypto_fallback_profile; 82 82 static struct workqueue_struct *blk_crypto_wq; 83 83 static mempool_t *blk_crypto_bounce_page_pool; 84 84 static struct bio_set crypto_bio_split; ··· 292 292 * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for 293 293 * this bio's algorithm and key. 294 294 */ 295 - blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile, 295 + blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile, 296 296 bc->bc_key, &slot); 297 297 if (blk_st != BLK_STS_OK) { 298 298 src_bio->bi_status = blk_st; ··· 395 395 * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for 396 396 * this bio's algorithm and key. 397 397 */ 398 - blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile, 398 + blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile, 399 399 bc->bc_key, &slot); 400 400 if (blk_st != BLK_STS_OK) { 401 401 bio->bi_status = blk_st; ··· 499 499 return false; 500 500 } 501 501 502 - if (!__blk_crypto_cfg_supported(&blk_crypto_fallback_profile, 502 + if (!__blk_crypto_cfg_supported(blk_crypto_fallback_profile, 503 503 &bc->bc_key->crypto_cfg)) { 504 504 bio->bi_status = BLK_STS_NOTSUPP; 505 505 return false; ··· 526 526 527 527 int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key) 528 528 { 529 - return __blk_crypto_evict_key(&blk_crypto_fallback_profile, key); 529 + return __blk_crypto_evict_key(blk_crypto_fallback_profile, key); 530 530 } 531 531 532 532 static bool blk_crypto_fallback_inited; ··· 534 534 { 535 535 int i; 536 536 int err; 537 - struct blk_crypto_profile *profile = &blk_crypto_fallback_profile; 538 537 539 538 if (blk_crypto_fallback_inited) 540 539 return 0; ··· 544 545 if (err) 545 546 goto out; 546 547 547 - err = blk_crypto_profile_init(profile, blk_crypto_num_keyslots); 548 - if (err) 548 + /* Dynamic allocation is needed because of lockdep_register_key(). */ 549 + blk_crypto_fallback_profile = 550 + kzalloc(sizeof(*blk_crypto_fallback_profile), GFP_KERNEL); 551 + if (!blk_crypto_fallback_profile) { 552 + err = -ENOMEM; 549 553 goto fail_free_bioset; 554 + } 555 + 556 + err = blk_crypto_profile_init(blk_crypto_fallback_profile, 557 + blk_crypto_num_keyslots); 558 + if (err) 559 + goto fail_free_profile; 550 560 err = -ENOMEM; 551 561 552 - profile->ll_ops = blk_crypto_fallback_ll_ops; 553 - profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE; 562 + blk_crypto_fallback_profile->ll_ops = blk_crypto_fallback_ll_ops; 563 + blk_crypto_fallback_profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE; 554 564 555 565 /* All blk-crypto modes have a crypto API fallback. */ 556 566 for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) 557 - profile->modes_supported[i] = 0xFFFFFFFF; 558 - profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0; 567 + blk_crypto_fallback_profile->modes_supported[i] = 0xFFFFFFFF; 568 + blk_crypto_fallback_profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0; 559 569 560 570 blk_crypto_wq = alloc_workqueue("blk_crypto_wq", 561 571 WQ_UNBOUND | WQ_HIGHPRI | ··· 605 597 fail_free_wq: 606 598 destroy_workqueue(blk_crypto_wq); 607 599 fail_destroy_profile: 608 - blk_crypto_profile_destroy(profile); 600 + blk_crypto_profile_destroy(blk_crypto_fallback_profile); 601 + fail_free_profile: 602 + kfree(blk_crypto_fallback_profile); 609 603 fail_free_bioset: 610 604 bioset_exit(&crypto_bio_split); 611 605 out:
+20 -3
block/blk-mq.c
··· 681 681 } 682 682 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 683 683 684 + static void blk_mq_finish_request(struct request *rq) 685 + { 686 + struct request_queue *q = rq->q; 687 + 688 + if (rq->rq_flags & RQF_USE_SCHED) { 689 + q->elevator->type->ops.finish_request(rq); 690 + /* 691 + * For postflush request that may need to be 692 + * completed twice, we should clear this flag 693 + * to avoid double finish_request() on the rq. 694 + */ 695 + rq->rq_flags &= ~RQF_USE_SCHED; 696 + } 697 + } 698 + 684 699 static void __blk_mq_free_request(struct request *rq) 685 700 { 686 701 struct request_queue *q = rq->q; ··· 722 707 { 723 708 struct request_queue *q = rq->q; 724 709 725 - if ((rq->rq_flags & RQF_USE_SCHED) && 726 - q->elevator->type->ops.finish_request) 727 - q->elevator->type->ops.finish_request(rq); 710 + blk_mq_finish_request(rq); 728 711 729 712 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) 730 713 laptop_io_completion(q->disk->bdi); ··· 1033 1020 if (blk_mq_need_time_stamp(rq)) 1034 1021 __blk_mq_end_request_acct(rq, ktime_get_ns()); 1035 1022 1023 + blk_mq_finish_request(rq); 1024 + 1036 1025 if (rq->end_io) { 1037 1026 rq_qos_done(rq->q, rq); 1038 1027 if (rq->end_io(rq, error) == RQ_END_IO_FREE) ··· 1088 1073 blk_complete_request(rq); 1089 1074 if (iob->need_ts) 1090 1075 __blk_mq_end_request_acct(rq, now); 1076 + 1077 + blk_mq_finish_request(rq); 1091 1078 1092 1079 rq_qos_done(rq->q, rq); 1093 1080
+3
block/elevator.c
··· 499 499 500 500 int elv_register(struct elevator_type *e) 501 501 { 502 + /* finish request is mandatory */ 503 + if (WARN_ON_ONCE(!e->ops.finish_request)) 504 + return -EINVAL; 502 505 /* insert_requests and dispatch_request are mandatory */ 503 506 if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request)) 504 507 return -EINVAL;
+2 -2
crypto/af_alg.c
··· 1241 1241 return -ENOMEM; 1242 1242 } 1243 1243 1244 + rsgl->sgl.need_unpin = 1245 + iov_iter_extract_will_pin(&msg->msg_iter); 1244 1246 rsgl->sgl.sgt.sgl = rsgl->sgl.sgl; 1245 1247 rsgl->sgl.sgt.nents = 0; 1246 1248 rsgl->sgl.sgt.orig_nents = 0; ··· 1257 1255 } 1258 1256 1259 1257 sg_mark_end(rsgl->sgl.sgt.sgl + rsgl->sgl.sgt.nents - 1); 1260 - rsgl->sgl.need_unpin = 1261 - iov_iter_extract_will_pin(&msg->msg_iter); 1262 1258 1263 1259 /* chain the new scatterlist with previous one */ 1264 1260 if (areq->last_rsgl)
+5 -1
drivers/acpi/resource.c
··· 501 501 static const struct dmi_system_id pcspecialist_laptop[] = { 502 502 { 503 503 .ident = "PCSpecialist Elimina Pro 16 M", 504 + /* 505 + * Some models have product-name "Elimina Pro 16 M", 506 + * others "GM6BGEQ". Match on board-name to match both. 507 + */ 504 508 .matches = { 505 509 DMI_MATCH(DMI_SYS_VENDOR, "PCSpecialist"), 506 - DMI_MATCH(DMI_PRODUCT_NAME, "Elimina Pro 16 M"), 510 + DMI_MATCH(DMI_BOARD_NAME, "GM6BGEQ"), 507 511 }, 508 512 }, 509 513 { }
+1 -1
drivers/block/rnbd/rnbd-clt-sysfs.c
··· 25 25 26 26 static struct device *rnbd_dev; 27 27 static const struct class rnbd_dev_class = { 28 - .name = "rnbd_client", 28 + .name = "rnbd-client", 29 29 }; 30 30 static struct kobject *rnbd_devs_kobj; 31 31
+2
drivers/bus/ti-sysc.c
··· 2142 2142 sysc_val = sysc_read_sysconfig(ddata); 2143 2143 sysc_val |= sysc_mask; 2144 2144 sysc_write(ddata, sysc_offset, sysc_val); 2145 + /* Flush posted write */ 2146 + sysc_val = sysc_read_sysconfig(ddata); 2145 2147 } 2146 2148 2147 2149 if (ddata->cfg.srst_udelay)
+2 -2
drivers/crypto/caam/ctrl.c
··· 382 382 val = ent_delay; 383 383 /* min. freq. count, equal to 1/4 of the entropy sample length */ 384 384 wr_reg32(&r4tst->rtfrqmin, val >> 2); 385 - /* max. freq. count, equal to 16 times the entropy sample length */ 386 - wr_reg32(&r4tst->rtfrqmax, val << 4); 385 + /* disable maximum frequency count */ 386 + wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE); 387 387 } 388 388 389 389 wr_reg32(&r4tst->rtsdctl, (val << RTSDCTL_ENT_DLY_SHIFT) |
+5 -2
drivers/gpio/gpiolib-sysfs.c
··· 515 515 * they may be undone on its behalf too. 516 516 */ 517 517 if (test_and_clear_bit(FLAG_SYSFS, &desc->flags)) { 518 - status = 0; 518 + gpiod_unexport(desc); 519 519 gpiod_free(desc); 520 + status = 0; 520 521 } 521 522 done: 522 523 if (status) ··· 782 781 mutex_unlock(&sysfs_lock); 783 782 784 783 /* unregister gpiod class devices owned by sysfs */ 785 - for_each_gpio_desc_with_flag(chip, desc, FLAG_SYSFS) 784 + for_each_gpio_desc_with_flag(chip, desc, FLAG_SYSFS) { 785 + gpiod_unexport(desc); 786 786 gpiod_free(desc); 787 + } 787 788 } 788 789 789 790 static int __init gpiolib_sysfs_init(void)
+11 -5
drivers/gpio/gpiolib.c
··· 2167 2167 2168 2168 void gpiod_free(struct gpio_desc *desc) 2169 2169 { 2170 - if (desc && desc->gdev && gpiod_free_commit(desc)) { 2171 - module_put(desc->gdev->owner); 2172 - gpio_device_put(desc->gdev); 2173 - } else { 2170 + /* 2171 + * We must not use VALIDATE_DESC_VOID() as the underlying gdev->chip 2172 + * may already be NULL but we still want to put the references. 2173 + */ 2174 + if (!desc) 2175 + return; 2176 + 2177 + if (!gpiod_free_commit(desc)) 2174 2178 WARN_ON(extra_checks); 2175 - } 2179 + 2180 + module_put(desc->gdev->owner); 2181 + gpio_device_put(desc->gdev); 2176 2182 } 2177 2183 2178 2184 /**
+7 -4
drivers/i2c/busses/i2c-bcm-iproc.c
··· 233 233 u32 offset) 234 234 { 235 235 u32 val; 236 + unsigned long flags; 236 237 237 238 if (iproc_i2c->idm_base) { 238 - spin_lock(&iproc_i2c->idm_lock); 239 + spin_lock_irqsave(&iproc_i2c->idm_lock, flags); 239 240 writel(iproc_i2c->ape_addr_mask, 240 241 iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET); 241 242 val = readl(iproc_i2c->base + offset); 242 - spin_unlock(&iproc_i2c->idm_lock); 243 + spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags); 243 244 } else { 244 245 val = readl(iproc_i2c->base + offset); 245 246 } ··· 251 250 static inline void iproc_i2c_wr_reg(struct bcm_iproc_i2c_dev *iproc_i2c, 252 251 u32 offset, u32 val) 253 252 { 253 + unsigned long flags; 254 + 254 255 if (iproc_i2c->idm_base) { 255 - spin_lock(&iproc_i2c->idm_lock); 256 + spin_lock_irqsave(&iproc_i2c->idm_lock, flags); 256 257 writel(iproc_i2c->ape_addr_mask, 257 258 iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET); 258 259 writel(val, iproc_i2c->base + offset); 259 - spin_unlock(&iproc_i2c->idm_lock); 260 + spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags); 260 261 } else { 261 262 writel(val, iproc_i2c->base + offset); 262 263 }
+14 -2
drivers/i2c/busses/i2c-designware-master.c
··· 588 588 u32 flags = msgs[dev->msg_read_idx].flags; 589 589 590 590 regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); 591 + tmp &= DW_IC_DATA_CMD_DAT; 591 592 /* Ensure length byte is a valid value */ 592 - if (flags & I2C_M_RECV_LEN && 593 - (tmp & DW_IC_DATA_CMD_DAT) <= I2C_SMBUS_BLOCK_MAX && tmp > 0) { 593 + if (flags & I2C_M_RECV_LEN) { 594 + /* 595 + * if IC_EMPTYFIFO_HOLD_MASTER_EN is set, which cannot be 596 + * detected from the registers, the controller can be 597 + * disabled if the STOP bit is set. But it is only set 598 + * after receiving block data response length in 599 + * I2C_FUNC_SMBUS_BLOCK_DATA case. That needs to read 600 + * another byte with STOP bit set when the block data 601 + * response length is invalid to complete the transaction. 602 + */ 603 + if (!tmp || tmp > I2C_SMBUS_BLOCK_MAX) 604 + tmp = 1; 605 + 594 606 len = i2c_dw_recv_len(dev, tmp); 595 607 } 596 608 *buf++ = tmp;
+8
drivers/i2c/busses/i2c-hisi.c
··· 330 330 struct hisi_i2c_controller *ctlr = context; 331 331 u32 int_stat; 332 332 333 + /* 334 + * Don't handle the interrupt if cltr->completion is NULL. We may 335 + * reach here because the interrupt is spurious or the transfer is 336 + * started by another port (e.g. firmware) rather than us. 337 + */ 338 + if (!ctlr->completion) 339 + return IRQ_NONE; 340 + 333 341 int_stat = readl(ctlr->iobase + HISI_I2C_INT_MSTAT); 334 342 hisi_i2c_clear_int(ctlr, int_stat); 335 343 if (!(int_stat & HISI_I2C_INT_ALL))
+3
drivers/i2c/busses/i2c-imx-lpi2c.c
··· 209 209 lpi2c_imx_set_mode(lpi2c_imx); 210 210 211 211 clk_rate = clk_get_rate(lpi2c_imx->clks[0].clk); 212 + if (!clk_rate) 213 + return -EINVAL; 214 + 212 215 if (lpi2c_imx->mode == HS || lpi2c_imx->mode == ULTRA_FAST) 213 216 filt = 0; 214 217 else
+2 -1
drivers/i2c/busses/i2c-sun6i-p2wi.c
··· 250 250 251 251 p2wi->rstc = devm_reset_control_get_exclusive(dev, NULL); 252 252 if (IS_ERR(p2wi->rstc)) { 253 - dev_err(dev, "failed to retrieve reset controller: %d\n", ret); 253 + dev_err(dev, "failed to retrieve reset controller: %pe\n", 254 + p2wi->rstc); 254 255 return PTR_ERR(p2wi->rstc); 255 256 } 256 257
+2 -1
drivers/i2c/busses/i2c-tegra.c
··· 442 442 if (IS_VI(i2c_dev)) 443 443 return 0; 444 444 445 - if (!i2c_dev->hw->has_apb_dma) { 445 + if (i2c_dev->hw->has_apb_dma) { 446 446 if (!IS_ENABLED(CONFIG_TEGRA20_APB_DMA)) { 447 447 dev_dbg(i2c_dev->dev, "APB DMA support not enabled\n"); 448 448 return 0; ··· 460 460 i2c_dev->dma_chan = dma_request_chan(i2c_dev->dev, "tx"); 461 461 if (IS_ERR(i2c_dev->dma_chan)) { 462 462 err = PTR_ERR(i2c_dev->dma_chan); 463 + i2c_dev->dma_chan = NULL; 463 464 goto err_out; 464 465 } 465 466
+4 -4
drivers/leds/trigger/ledtrig-netdev.c
··· 406 406 407 407 static DEVICE_ATTR_RW(interval); 408 408 409 - static ssize_t hw_control_show(struct device *dev, 410 - struct device_attribute *attr, char *buf) 409 + static ssize_t offloaded_show(struct device *dev, 410 + struct device_attribute *attr, char *buf) 411 411 { 412 412 struct led_netdev_data *trigger_data = led_trigger_get_drvdata(dev); 413 413 414 414 return sprintf(buf, "%d\n", trigger_data->hw_control); 415 415 } 416 416 417 - static DEVICE_ATTR_RO(hw_control); 417 + static DEVICE_ATTR_RO(offloaded); 418 418 419 419 static struct attribute *netdev_trig_attrs[] = { 420 420 &dev_attr_device_name.attr, ··· 427 427 &dev_attr_rx.attr, 428 428 &dev_attr_tx.attr, 429 429 &dev_attr_interval.attr, 430 - &dev_attr_hw_control.attr, 430 + &dev_attr_offloaded.attr, 431 431 NULL 432 432 }; 433 433 ATTRIBUTE_GROUPS(netdev_trig);
+2 -2
drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
··· 1310 1310 jpeg->dev = &pdev->dev; 1311 1311 jpeg->variant = of_device_get_match_data(jpeg->dev); 1312 1312 1313 + platform_set_drvdata(pdev, jpeg); 1314 + 1313 1315 ret = devm_of_platform_populate(&pdev->dev); 1314 1316 if (ret) { 1315 1317 v4l2_err(&jpeg->v4l2_dev, "Master of platform populate failed."); ··· 1382 1380 "%s device registered as /dev/video%d (%d,%d)\n", 1383 1381 jpeg->variant->dev_name, jpeg->vdev->num, 1384 1382 VIDEO_MAJOR, jpeg->vdev->minor); 1385 - 1386 - platform_set_drvdata(pdev, jpeg); 1387 1383 1388 1384 pm_runtime_enable(&pdev->dev); 1389 1385
+5 -2
drivers/media/platform/nxp/imx7-media-csi.c
··· 9 9 #include <linux/clk.h> 10 10 #include <linux/delay.h> 11 11 #include <linux/interrupt.h> 12 + #include <linux/math.h> 12 13 #include <linux/mfd/syscon.h> 14 + #include <linux/minmax.h> 13 15 #include <linux/module.h> 14 16 #include <linux/of_device.h> 15 17 #include <linux/of_graph.h> ··· 1139 1137 * TODO: Implement configurable stride support. 1140 1138 */ 1141 1139 walign = 8 * 8 / cc->bpp; 1142 - v4l_bound_align_image(&pixfmt->width, 1, 0xffff, walign, 1143 - &pixfmt->height, 1, 0xffff, 1, 0); 1140 + pixfmt->width = clamp(round_up(pixfmt->width, walign), walign, 1141 + round_down(65535U, walign)); 1142 + pixfmt->height = clamp(pixfmt->height, 1U, 65535U); 1144 1143 1145 1144 pixfmt->bytesperline = pixfmt->width * cc->bpp / 8; 1146 1145 pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
+1 -1
drivers/media/usb/uvc/uvc_v4l2.c
··· 45 45 map->menu_names = NULL; 46 46 map->menu_mapping = NULL; 47 47 48 - map->menu_mask = BIT_MASK(xmap->menu_count); 48 + map->menu_mask = GENMASK(xmap->menu_count - 1, 0); 49 49 50 50 size = xmap->menu_count * sizeof(*map->menu_mapping); 51 51 map->menu_mapping = kzalloc(size, GFP_KERNEL);
+4 -3
drivers/mmc/core/block.c
··· 2097 2097 mmc_blk_urgent_bkops(mq, mqrq); 2098 2098 } 2099 2099 2100 - static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req) 2100 + static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type) 2101 2101 { 2102 2102 unsigned long flags; 2103 2103 bool put_card; 2104 2104 2105 2105 spin_lock_irqsave(&mq->lock, flags); 2106 2106 2107 - mq->in_flight[mmc_issue_type(mq, req)] -= 1; 2107 + mq->in_flight[issue_type] -= 1; 2108 2108 2109 2109 put_card = (mmc_tot_in_flight(mq) == 0); 2110 2110 ··· 2117 2117 static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req, 2118 2118 bool can_sleep) 2119 2119 { 2120 + enum mmc_issue_type issue_type = mmc_issue_type(mq, req); 2120 2121 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 2121 2122 struct mmc_request *mrq = &mqrq->brq.mrq; 2122 2123 struct mmc_host *host = mq->card->host; ··· 2137 2136 blk_mq_complete_request(req); 2138 2137 } 2139 2138 2140 - mmc_blk_mq_dec_in_flight(mq, req); 2139 + mmc_blk_mq_dec_in_flight(mq, issue_type); 2141 2140 } 2142 2141 2143 2142 void mmc_blk_mq_recovery(struct mmc_queue *mq)
+7 -4
drivers/mmc/host/sdhci_f_sdh30.c
··· 210 210 { 211 211 struct sdhci_host *host = platform_get_drvdata(pdev); 212 212 struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host); 213 - 214 - reset_control_assert(priv->rst); 215 - clk_disable_unprepare(priv->clk); 216 - clk_disable_unprepare(priv->clk_iface); 213 + struct clk *clk_iface = priv->clk_iface; 214 + struct reset_control *rst = priv->rst; 215 + struct clk *clk = priv->clk; 217 216 218 217 sdhci_pltfm_unregister(pdev); 218 + 219 + reset_control_assert(rst); 220 + clk_disable_unprepare(clk); 221 + clk_disable_unprepare(clk_iface); 219 222 220 223 return 0; 221 224 }
+13 -13
drivers/mmc/host/sunplus-mmc.c
··· 863 863 struct spmmc_host *host; 864 864 int ret = 0; 865 865 866 - mmc = mmc_alloc_host(sizeof(*host), &pdev->dev); 867 - if (!mmc) { 868 - ret = -ENOMEM; 869 - goto probe_free_host; 870 - } 866 + mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(struct spmmc_host)); 867 + if (!mmc) 868 + return -ENOMEM; 871 869 872 870 host = mmc_priv(mmc); 873 871 host->mmc = mmc; ··· 900 902 901 903 ret = mmc_of_parse(mmc); 902 904 if (ret) 903 - goto probe_free_host; 905 + goto clk_disable; 904 906 905 907 mmc->ops = &spmmc_ops; 906 908 mmc->f_min = SPMMC_MIN_CLK; ··· 909 911 910 912 ret = mmc_regulator_get_supply(mmc); 911 913 if (ret) 912 - goto probe_free_host; 914 + goto clk_disable; 913 915 914 916 if (!mmc->ocr_avail) 915 917 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; ··· 925 927 host->tuning_info.enable_tuning = 1; 926 928 pm_runtime_set_active(&pdev->dev); 927 929 pm_runtime_enable(&pdev->dev); 928 - mmc_add_host(mmc); 930 + ret = mmc_add_host(mmc); 931 + if (ret) 932 + goto pm_disable; 929 933 930 - return ret; 934 + return 0; 931 935 932 - probe_free_host: 933 - if (mmc) 934 - mmc_free_host(mmc); 936 + pm_disable: 937 + pm_runtime_disable(&pdev->dev); 935 938 939 + clk_disable: 940 + clk_disable_unprepare(host->clk); 936 941 return ret; 937 942 } 938 943 ··· 949 948 pm_runtime_put_noidle(&dev->dev); 950 949 pm_runtime_disable(&dev->dev); 951 950 platform_set_drvdata(dev, NULL); 952 - mmc_free_host(host->mmc); 953 951 954 952 return 0; 955 953 }
-2
drivers/mmc/host/wbsd.c
··· 1705 1705 1706 1706 wbsd_release_resources(host); 1707 1707 wbsd_free_mmc(dev); 1708 - 1709 - mmc_free_host(mmc); 1710 1708 return ret; 1711 1709 } 1712 1710
+3 -3
drivers/net/bonding/bond_alb.c
··· 660 660 return NULL; 661 661 arp = (struct arp_pkt *)skb_network_header(skb); 662 662 663 - /* Don't modify or load balance ARPs that do not originate locally 664 - * (e.g.,arrive via a bridge). 663 + /* Don't modify or load balance ARPs that do not originate 664 + * from the bond itself or a VLAN directly above the bond. 665 665 */ 666 - if (!bond_slave_has_mac_rx(bond, arp->mac_src)) 666 + if (!bond_slave_has_mac_rcu(bond, arp->mac_src)) 667 667 return NULL; 668 668 669 669 dev = ip_dev_find(dev_net(bond->dev), arp->ip_src);
+1 -6
drivers/net/can/vxcan.c
··· 192 192 193 193 nla_peer = data[VXCAN_INFO_PEER]; 194 194 ifmp = nla_data(nla_peer); 195 - err = rtnl_nla_parse_ifla(peer_tb, 196 - nla_data(nla_peer) + 197 - sizeof(struct ifinfomsg), 198 - nla_len(nla_peer) - 199 - sizeof(struct ifinfomsg), 200 - NULL); 195 + err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack); 201 196 if (err < 0) 202 197 return err; 203 198
+4
drivers/net/dsa/mt7530.c
··· 1006 1006 mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK, 1007 1007 MT753X_BPDU_CPU_ONLY); 1008 1008 1009 + /* Trap 802.1X PAE frames to the CPU port(s) */ 1010 + mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_PORT_FW_MASK, 1011 + MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY)); 1012 + 1009 1013 /* Trap LLDP frames with :0E MAC DA to the CPU port(s) */ 1010 1014 mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_PORT_FW_MASK, 1011 1015 MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY));
+2
drivers/net/dsa/mt7530.h
··· 66 66 /* Registers for BPDU and PAE frame control*/ 67 67 #define MT753X_BPC 0x24 68 68 #define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0) 69 + #define MT753X_PAE_PORT_FW_MASK GENMASK(18, 16) 70 + #define MT753X_PAE_PORT_FW(x) FIELD_PREP(MT753X_PAE_PORT_FW_MASK, x) 69 71 70 72 /* Register for :03 and :0E MAC DA frame control */ 71 73 #define MT753X_RGAC2 0x2c
+3
drivers/net/dsa/ocelot/felix_vsc9959.c
··· 1070 1070 if (gate_len_ns == U64_MAX) 1071 1071 return U64_MAX; 1072 1072 1073 + if (gate_len_ns < VSC9959_TAS_MIN_GATE_LEN_NS) 1074 + return 0; 1075 + 1073 1076 return (gate_len_ns - VSC9959_TAS_MIN_GATE_LEN_NS) * PSEC_PER_NSEC; 1074 1077 } 1075 1078
+1 -1
drivers/net/ethernet/broadcom/bgmac.c
··· 1448 1448 int err; 1449 1449 1450 1450 phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); 1451 - if (!phy_dev || IS_ERR(phy_dev)) { 1451 + if (IS_ERR(phy_dev)) { 1452 1452 dev_err(bgmac->dev, "Failed to register fixed PHY device\n"); 1453 1453 return PTR_ERR(phy_dev); 1454 1454 }
+2
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
··· 1508 1508 bool cnic_loaded; 1509 1509 struct cnic_eth_dev *(*cnic_probe)(struct net_device *); 1510 1510 1511 + bool nic_stopped; 1512 + 1511 1513 /* Flag that indicates that we can start looking for FCoE L2 queue 1512 1514 * completions in the default status block. 1513 1515 */
+13 -8
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 2715 2715 bnx2x_add_all_napi(bp); 2716 2716 DP(NETIF_MSG_IFUP, "napi added\n"); 2717 2717 bnx2x_napi_enable(bp); 2718 + bp->nic_stopped = false; 2718 2719 2719 2720 if (IS_PF(bp)) { 2720 2721 /* set pf load just before approaching the MCP */ ··· 2961 2960 load_error1: 2962 2961 bnx2x_napi_disable(bp); 2963 2962 bnx2x_del_all_napi(bp); 2963 + bp->nic_stopped = true; 2964 2964 2965 2965 /* clear pf_load status, as it was already set */ 2966 2966 if (IS_PF(bp)) ··· 3097 3095 if (!CHIP_IS_E1x(bp)) 3098 3096 bnx2x_pf_disable(bp); 3099 3097 3100 - /* Disable HW interrupts, NAPI */ 3101 - bnx2x_netif_stop(bp, 1); 3102 - /* Delete all NAPI objects */ 3103 - bnx2x_del_all_napi(bp); 3104 - if (CNIC_LOADED(bp)) 3105 - bnx2x_del_all_napi_cnic(bp); 3106 - /* Release IRQs */ 3107 - bnx2x_free_irq(bp); 3098 + if (!bp->nic_stopped) { 3099 + /* Disable HW interrupts, NAPI */ 3100 + bnx2x_netif_stop(bp, 1); 3101 + /* Delete all NAPI objects */ 3102 + bnx2x_del_all_napi(bp); 3103 + if (CNIC_LOADED(bp)) 3104 + bnx2x_del_all_napi_cnic(bp); 3105 + /* Release IRQs */ 3106 + bnx2x_free_irq(bp); 3107 + bp->nic_stopped = true; 3108 + } 3108 3109 3109 3110 /* Report UNLOAD_DONE to MCP */ 3110 3111 bnx2x_send_unload_done(bp, false);
+19 -13
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
··· 9474 9474 } 9475 9475 } 9476 9476 9477 - /* Disable HW interrupts, NAPI */ 9478 - bnx2x_netif_stop(bp, 1); 9479 - /* Delete all NAPI objects */ 9480 - bnx2x_del_all_napi(bp); 9481 - if (CNIC_LOADED(bp)) 9482 - bnx2x_del_all_napi_cnic(bp); 9477 + if (!bp->nic_stopped) { 9478 + /* Disable HW interrupts, NAPI */ 9479 + bnx2x_netif_stop(bp, 1); 9480 + /* Delete all NAPI objects */ 9481 + bnx2x_del_all_napi(bp); 9482 + if (CNIC_LOADED(bp)) 9483 + bnx2x_del_all_napi_cnic(bp); 9483 9484 9484 - /* Release IRQs */ 9485 - bnx2x_free_irq(bp); 9485 + /* Release IRQs */ 9486 + bnx2x_free_irq(bp); 9487 + bp->nic_stopped = true; 9488 + } 9486 9489 9487 9490 /* Reset the chip, unless PCI function is offline. If we reach this 9488 9491 * point following a PCI error handling, it means device is really ··· 14241 14238 } 14242 14239 bnx2x_drain_tx_queues(bp); 14243 14240 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY); 14244 - bnx2x_netif_stop(bp, 1); 14245 - bnx2x_del_all_napi(bp); 14241 + if (!bp->nic_stopped) { 14242 + bnx2x_netif_stop(bp, 1); 14243 + bnx2x_del_all_napi(bp); 14246 14244 14247 - if (CNIC_LOADED(bp)) 14248 - bnx2x_del_all_napi_cnic(bp); 14245 + if (CNIC_LOADED(bp)) 14246 + bnx2x_del_all_napi_cnic(bp); 14249 14247 14250 - bnx2x_free_irq(bp); 14248 + bnx2x_free_irq(bp); 14249 + bp->nic_stopped = true; 14250 + } 14251 14251 14252 14252 /* Report UNLOAD_DONE to MCP */ 14253 14253 bnx2x_send_unload_done(bp, true);
+9 -6
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
··· 529 529 bnx2x_vfpf_finalize(bp, &req->first_tlv); 530 530 531 531 free_irq: 532 - /* Disable HW interrupts, NAPI */ 533 - bnx2x_netif_stop(bp, 0); 534 - /* Delete all NAPI objects */ 535 - bnx2x_del_all_napi(bp); 532 + if (!bp->nic_stopped) { 533 + /* Disable HW interrupts, NAPI */ 534 + bnx2x_netif_stop(bp, 0); 535 + /* Delete all NAPI objects */ 536 + bnx2x_del_all_napi(bp); 536 537 537 - /* Release IRQs */ 538 - bnx2x_free_irq(bp); 538 + /* Release IRQs */ 539 + bnx2x_free_irq(bp); 540 + bp->nic_stopped = true; 541 + } 539 542 } 540 543 541 544 static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
+1 -1
drivers/net/ethernet/broadcom/genet/bcmmii.c
··· 617 617 }; 618 618 619 619 phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); 620 - if (!phydev || IS_ERR(phydev)) { 620 + if (IS_ERR(phydev)) { 621 621 dev_err(kdev, "failed to register fixed PHY device\n"); 622 622 return PTR_ERR(phydev); 623 623 }
+4 -1
drivers/net/ethernet/broadcom/tg3.c
··· 6880 6880 6881 6881 ri->data = NULL; 6882 6882 6883 - skb = build_skb(data, frag_size); 6883 + if (frag_size) 6884 + skb = build_skb(data, frag_size); 6885 + else 6886 + skb = slab_build_skb(data); 6884 6887 if (!skb) { 6885 6888 tg3_frag_free(frag_size != 0, data); 6886 6889 goto drop_it_no_recycle;
+1 -1
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
··· 1466 1466 tp->write_seq = snd_isn; 1467 1467 tp->snd_nxt = snd_isn; 1468 1468 tp->snd_una = snd_isn; 1469 - inet_sk(sk)->inet_id = get_random_u16(); 1469 + atomic_set(&inet_sk(sk)->inet_id, get_random_u16()); 1470 1470 assign_rxopt(sk, opt); 1471 1471 1472 1472 if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))
+1 -1
drivers/net/ethernet/ibm/ibmveth.c
··· 203 203 unsigned long offset; 204 204 205 205 for (offset = 0; offset < length; offset += SMP_CACHE_BYTES) 206 - asm("dcbfl %0,%1" :: "b" (addr), "r" (offset)); 206 + asm("dcbf %0,%1,1" :: "b" (addr), "r" (offset)); 207 207 } 208 208 209 209 /* replenish the buffers for a pool. note that we don't need to
+3 -2
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 2609 2609 retval = i40e_correct_mac_vlan_filters 2610 2610 (vsi, &tmp_add_list, &tmp_del_list, 2611 2611 vlan_filters); 2612 - else 2612 + else if (pf->vf) 2613 2613 retval = i40e_correct_vf_mac_vlan_filters 2614 2614 (vsi, &tmp_add_list, &tmp_del_list, 2615 2615 vlan_filters, pf->vf[vsi->vf_id].trusted); ··· 2782 2782 } 2783 2783 2784 2784 /* if the VF is not trusted do not do promisc */ 2785 - if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) { 2785 + if (vsi->type == I40E_VSI_SRIOV && pf->vf && 2786 + !pf->vf[vsi->vf_id].trusted) { 2786 2787 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); 2787 2788 goto out; 2788 2789 }
+2 -1
drivers/net/ethernet/intel/ice/ice_base.c
··· 434 434 /* Receive Packet Data Buffer Size. 435 435 * The Packet Data Buffer Size is defined in 128 byte units. 436 436 */ 437 - rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; 437 + rlan_ctx.dbuf = DIV_ROUND_UP(ring->rx_buf_len, 438 + BIT_ULL(ICE_RLAN_CTX_DBUF_S)); 438 439 439 440 /* use 32 byte descriptors */ 440 441 rlan_ctx.dsize = 1;
+4 -4
drivers/net/ethernet/intel/ice/ice_sriov.c
··· 1131 1131 if (!vf) 1132 1132 return -EINVAL; 1133 1133 1134 - ret = ice_check_vf_ready_for_reset(vf); 1134 + ret = ice_check_vf_ready_for_cfg(vf); 1135 1135 if (ret) 1136 1136 goto out_put_vf; 1137 1137 ··· 1246 1246 goto out_put_vf; 1247 1247 } 1248 1248 1249 - ret = ice_check_vf_ready_for_reset(vf); 1249 + ret = ice_check_vf_ready_for_cfg(vf); 1250 1250 if (ret) 1251 1251 goto out_put_vf; 1252 1252 ··· 1300 1300 return -EOPNOTSUPP; 1301 1301 } 1302 1302 1303 - ret = ice_check_vf_ready_for_reset(vf); 1303 + ret = ice_check_vf_ready_for_cfg(vf); 1304 1304 if (ret) 1305 1305 goto out_put_vf; 1306 1306 ··· 1613 1613 if (!vf) 1614 1614 return -EINVAL; 1615 1615 1616 - ret = ice_check_vf_ready_for_reset(vf); 1616 + ret = ice_check_vf_ready_for_cfg(vf); 1617 1617 if (ret) 1618 1618 goto out_put_vf; 1619 1619
+8 -26
drivers/net/ethernet/intel/ice/ice_vf_lib.c
··· 186 186 } 187 187 188 188 /** 189 - * ice_check_vf_ready_for_reset - check if VF is ready to be reset 190 - * @vf: VF to check if it's ready to be reset 191 - * 192 - * The purpose of this function is to ensure that the VF is not in reset, 193 - * disabled, and is both initialized and active, thus enabling us to safely 194 - * initialize another reset. 195 - */ 196 - int ice_check_vf_ready_for_reset(struct ice_vf *vf) 197 - { 198 - int ret; 199 - 200 - ret = ice_check_vf_ready_for_cfg(vf); 201 - if (!ret && !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) 202 - ret = -EAGAIN; 203 - 204 - return ret; 205 - } 206 - 207 - /** 208 189 * ice_trigger_vf_reset - Reset a VF on HW 209 190 * @vf: pointer to the VF structure 210 191 * @is_vflr: true if VFLR was issued, false if not ··· 843 862 return 0; 844 863 } 845 864 865 + if (flags & ICE_VF_RESET_LOCK) 866 + mutex_lock(&vf->cfg_lock); 867 + else 868 + lockdep_assert_held(&vf->cfg_lock); 869 + 846 870 if (ice_is_vf_disabled(vf)) { 847 871 vsi = ice_get_vf_vsi(vf); 848 872 if (!vsi) { 849 873 dev_dbg(dev, "VF is already removed\n"); 850 - return -EINVAL; 874 + err = -EINVAL; 875 + goto out_unlock; 851 876 } 852 877 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); 853 878 ··· 862 875 863 876 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n", 864 877 vf->vf_id); 865 - return 0; 878 + goto out_unlock; 866 879 } 867 - 868 - if (flags & ICE_VF_RESET_LOCK) 869 - mutex_lock(&vf->cfg_lock); 870 - else 871 - lockdep_assert_held(&vf->cfg_lock); 872 880 873 881 /* Set VF disable bit state here, before triggering reset */ 874 882 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
-1
drivers/net/ethernet/intel/ice/ice_vf_lib.h
··· 215 215 struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf); 216 216 bool ice_is_vf_disabled(struct ice_vf *vf); 217 217 int ice_check_vf_ready_for_cfg(struct ice_vf *vf); 218 - int ice_check_vf_ready_for_reset(struct ice_vf *vf); 219 218 void ice_set_vf_state_dis(struct ice_vf *vf); 220 219 bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf); 221 220 void
-1
drivers/net/ethernet/intel/ice/ice_virtchnl.c
··· 3949 3949 ice_vc_notify_vf_link_state(vf); 3950 3950 break; 3951 3951 case VIRTCHNL_OP_RESET_VF: 3952 - clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); 3953 3952 ops->reset_vf(vf); 3954 3953 break; 3955 3954 case VIRTCHNL_OP_ADD_ETH_ADDR:
+12 -12
drivers/net/ethernet/intel/igb/igb_ptp.c
··· 1385 1385 return; 1386 1386 } 1387 1387 1388 - spin_lock_init(&adapter->tmreg_lock); 1389 - INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); 1390 - 1391 - if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) 1392 - INIT_DELAYED_WORK(&adapter->ptp_overflow_work, 1393 - igb_ptp_overflow_check); 1394 - 1395 - adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; 1396 - adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; 1397 - 1398 - igb_ptp_reset(adapter); 1399 - 1400 1388 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, 1401 1389 &adapter->pdev->dev); 1402 1390 if (IS_ERR(adapter->ptp_clock)) { ··· 1394 1406 dev_info(&adapter->pdev->dev, "added PHC on %s\n", 1395 1407 adapter->netdev->name); 1396 1408 adapter->ptp_flags |= IGB_PTP_ENABLED; 1409 + 1410 + spin_lock_init(&adapter->tmreg_lock); 1411 + INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); 1412 + 1413 + if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) 1414 + INIT_DELAYED_WORK(&adapter->ptp_overflow_work, 1415 + igb_ptp_overflow_check); 1416 + 1417 + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; 1418 + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; 1419 + 1420 + igb_ptp_reset(adapter); 1397 1421 } 1398 1422 } 1399 1423
+1 -1
drivers/net/ethernet/intel/igc/igc_defines.h
··· 546 546 #define IGC_PTM_CTRL_START_NOW BIT(29) /* Start PTM Now */ 547 547 #define IGC_PTM_CTRL_EN BIT(30) /* Enable PTM */ 548 548 #define IGC_PTM_CTRL_TRIG BIT(31) /* PTM Cycle trigger */ 549 - #define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x2f) << 2) 549 + #define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x3f) << 2) 550 550 #define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8) 551 551 552 552 #define IGC_PTM_SHORT_CYC_DEFAULT 10 /* Default Short/interrupted cycle interval */
+2 -1
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
··· 4270 4270 if (link < 0) 4271 4271 return NIX_AF_ERR_RX_LINK_INVALID; 4272 4272 4273 - nix_find_link_frs(rvu, req, pcifunc); 4274 4273 4275 4274 linkcfg: 4275 + nix_find_link_frs(rvu, req, pcifunc); 4276 + 4276 4277 cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); 4277 4278 cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); 4278 4279 if (req->update_minlen)
+10 -2
drivers/net/ethernet/mediatek/mtk_wed.c
··· 222 222 223 223 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 224 224 struct mtk_wed_hw *hw = hw_list[i]; 225 - struct mtk_wed_device *dev = hw->wed_dev; 225 + struct mtk_wed_device *dev; 226 226 int err; 227 227 228 + if (!hw) 229 + break; 230 + 231 + dev = hw->wed_dev; 228 232 if (!dev || !dev->wlan.reset) 229 233 continue; 230 234 ··· 249 245 250 246 for (i = 0; i < ARRAY_SIZE(hw_list); i++) { 251 247 struct mtk_wed_hw *hw = hw_list[i]; 252 - struct mtk_wed_device *dev = hw->wed_dev; 248 + struct mtk_wed_device *dev; 253 249 250 + if (!hw) 251 + break; 252 + 253 + dev = hw->wed_dev; 254 254 if (!dev || !dev->wlan.reset_complete) 255 255 continue; 256 256
+2 -2
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
··· 32 32 MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8), 33 33 MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2), 34 34 MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6), 35 - MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_MSB, 0x18, 17, 3), 36 - MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_LSB, 0x18, 20, 8), 35 + MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_MSB, 0x18, 17, 4), 36 + MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_LSB, 0x18, 21, 8), 37 37 MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4), 38 38 MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4), 39 39 MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4),
+6 -2
drivers/net/ethernet/mellanox/mlxsw/pci.c
··· 517 517 struct sk_buff *skb, 518 518 enum mlxsw_pci_cqe_v cqe_v, char *cqe) 519 519 { 520 + u8 ts_type; 521 + 520 522 if (cqe_v != MLXSW_PCI_CQE_V2) 521 523 return; 522 524 523 - if (mlxsw_pci_cqe2_time_stamp_type_get(cqe) != 524 - MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC) 525 + ts_type = mlxsw_pci_cqe2_time_stamp_type_get(cqe); 526 + 527 + if (ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC && 528 + ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_MIRROR_UTC) 525 529 return; 526 530 527 531 mlxsw_skb_cb(skb)->cqe_ts.sec = mlxsw_pci_cqe2_time_stamp_sec_get(cqe);
-9
drivers/net/ethernet/mellanox/mlxsw/reg.h
··· 97 97 */ 98 98 MLXSW_ITEM32_LP(reg, sspr, 0x00, 16, 0x00, 12); 99 99 100 - /* reg_sspr_sub_port 101 - * Virtual port within the physical port. 102 - * Should be set to 0 when virtual ports are not enabled on the port. 103 - * 104 - * Access: RW 105 - */ 106 - MLXSW_ITEM32(reg, sspr, sub_port, 0x00, 8, 8); 107 - 108 100 /* reg_sspr_system_port 109 101 * Unique identifier within the stacking domain that represents all the ports 110 102 * that are available in the system (external ports). ··· 112 120 MLXSW_REG_ZERO(sspr, payload); 113 121 mlxsw_reg_sspr_m_set(payload, 1); 114 122 mlxsw_reg_sspr_local_port_set(payload, local_port); 115 - mlxsw_reg_sspr_sub_port_set(payload, 0); 116 123 mlxsw_reg_sspr_system_port_set(payload, local_port); 117 124 } 118 125
+1 -1
drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
··· 193 193 key->vrid, GENMASK(7, 0)); 194 194 mlxsw_sp_acl_rulei_keymask_u32(rulei, 195 195 MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB, 196 - key->vrid >> 8, GENMASK(2, 0)); 196 + key->vrid >> 8, GENMASK(3, 0)); 197 197 switch (key->proto) { 198 198 case MLXSW_SP_L3_PROTO_IPV4: 199 199 return mlxsw_sp2_mr_tcam_rule_parse4(rulei, key);
+2 -2
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
··· 173 173 174 174 static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4[] = { 175 175 MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 24, 8), 176 - MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x00, 0, 3), 176 + MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x00, 0, 3, 0, true), 177 177 }; 178 178 179 179 static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = { ··· 324 324 325 325 static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4b[] = { 326 326 MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 13, 8), 327 - MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x04, 21, 4, 0, true), 327 + MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x04, 21, 4), 328 328 }; 329 329 330 330 static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2b[] = {
+1 -1
drivers/net/ethernet/sfc/falcon/selftest.c
··· 428 428 for (i = 0; i < state->packet_count; i++) { 429 429 /* Allocate an skb, holding an extra reference for 430 430 * transmit completion counting */ 431 - skb = alloc_skb(EF4_LOOPBACK_PAYLOAD_LEN, GFP_KERNEL); 431 + skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); 432 432 if (!skb) 433 433 return -ENOMEM; 434 434 state->skbs[i] = skb;
+1 -1
drivers/net/ethernet/sfc/selftest.c
··· 425 425 for (i = 0; i < state->packet_count; i++) { 426 426 /* Allocate an skb, holding an extra reference for 427 427 * transmit completion counting */ 428 - skb = alloc_skb(EFX_LOOPBACK_PAYLOAD_LEN, GFP_KERNEL); 428 + skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); 429 429 if (!skb) 430 430 return -ENOMEM; 431 431 state->skbs[i] = skb;
+1 -1
drivers/net/ethernet/sfc/siena/selftest.c
··· 426 426 for (i = 0; i < state->packet_count; i++) { 427 427 /* Allocate an skb, holding an extra reference for 428 428 * transmit completion counting */ 429 - skb = alloc_skb(EFX_LOOPBACK_PAYLOAD_LEN, GFP_KERNEL); 429 + skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); 430 430 if (!skb) 431 431 return -ENOMEM; 432 432 state->skbs[i] = skb;
+2 -1
drivers/net/ipvlan/ipvlan_main.c
··· 748 748 749 749 write_pnet(&port->pnet, newnet); 750 750 751 - ipvlan_migrate_l3s_hook(oldnet, newnet); 751 + if (port->mode == IPVLAN_MODE_L3S) 752 + ipvlan_migrate_l3s_hook(oldnet, newnet); 752 753 break; 753 754 } 754 755 case NETDEV_UNREGISTER:
+2 -2
drivers/net/mdio/mdio-bitbang.c
··· 186 186 struct mdiobb_ctrl *ctrl = bus->priv; 187 187 188 188 mdiobb_cmd_addr(ctrl, phy, devad, reg); 189 - mdiobb_cmd(ctrl, MDIO_C45_READ, phy, reg); 189 + mdiobb_cmd(ctrl, MDIO_C45_READ, phy, devad); 190 190 191 191 return mdiobb_read_common(bus, phy); 192 192 } ··· 222 222 struct mdiobb_ctrl *ctrl = bus->priv; 223 223 224 224 mdiobb_cmd_addr(ctrl, phy, devad, reg); 225 - mdiobb_cmd(ctrl, MDIO_C45_WRITE, phy, reg); 225 + mdiobb_cmd(ctrl, MDIO_C45_WRITE, phy, devad); 226 226 227 227 return mdiobb_write_common(bus, val); 228 228 }
+7 -4
drivers/net/phy/phy.c
··· 1218 1218 1219 1219 static void phy_process_error(struct phy_device *phydev) 1220 1220 { 1221 - mutex_lock(&phydev->lock); 1221 + /* phydev->lock must be held for the state change to be safe */ 1222 + if (!mutex_is_locked(&phydev->lock)) 1223 + phydev_err(phydev, "PHY-device data unsafe context\n"); 1224 + 1222 1225 phydev->state = PHY_ERROR; 1223 - mutex_unlock(&phydev->lock); 1224 1226 1225 1227 phy_trigger_machine(phydev); 1226 1228 } ··· 1231 1229 const void *func, int err) 1232 1230 { 1233 1231 WARN(1, "%pS: returned: %d\n", func, err); 1232 + mutex_lock(&phydev->lock); 1234 1233 phy_process_error(phydev); 1234 + mutex_unlock(&phydev->lock); 1235 1235 } 1236 1236 1237 1237 /** ··· 1242 1238 * 1243 1239 * Moves the PHY to the ERROR state in response to a read 1244 1240 * or write error, and tells the controller the link is down. 1245 - * Must not be called from interrupt context, or while the 1246 - * phydev->lock is held. 1241 + * Must be called with phydev->lock held. 1247 1242 */ 1248 1243 void phy_error(struct phy_device *phydev) 1249 1244 {
+10
drivers/net/phy/sfp-bus.c
··· 258 258 switch (id->base.extended_cc) { 259 259 case SFF8024_ECC_UNSPEC: 260 260 break; 261 + case SFF8024_ECC_100G_25GAUI_C2M_AOC: 262 + if (br_min <= 28000 && br_max >= 25000) { 263 + /* 25GBASE-R, possibly with FEC */ 264 + __set_bit(PHY_INTERFACE_MODE_25GBASER, interfaces); 265 + /* There is currently no link mode for 25000base 266 + * with unspecified range, reuse SR. 267 + */ 268 + phylink_set(modes, 25000baseSR_Full); 269 + } 270 + break; 261 271 case SFF8024_ECC_100GBASE_SR4_25GBASE_SR: 262 272 phylink_set(modes, 100000baseSR4_Full); 263 273 phylink_set(modes, 25000baseSR_Full);
+1 -4
drivers/net/veth.c
··· 1861 1861 1862 1862 nla_peer = data[VETH_INFO_PEER]; 1863 1863 ifmp = nla_data(nla_peer); 1864 - err = rtnl_nla_parse_ifla(peer_tb, 1865 - nla_data(nla_peer) + sizeof(struct ifinfomsg), 1866 - nla_len(nla_peer) - sizeof(struct ifinfomsg), 1867 - NULL); 1864 + err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack); 1868 1865 if (err < 0) 1869 1866 return err; 1870 1867
+1
drivers/net/wireless/intel/iwlwifi/Kconfig
··· 66 66 tristate "Intel Wireless WiFi MVM Firmware support" 67 67 select WANT_DEV_COREDUMP 68 68 depends on MAC80211 69 + depends on PTP_1588_CLOCK_OPTIONAL 69 70 help 70 71 This is the driver that supports the MVM firmware. The list 71 72 of the devices that use this firmware is available here:
+9 -22
drivers/of/dynamic.c
··· 63 63 } 64 64 EXPORT_SYMBOL_GPL(of_reconfig_notifier_unregister); 65 65 66 - #ifdef DEBUG 67 - const char *action_names[] = { 66 + static const char *action_names[] = { 67 + [0] = "INVALID", 68 68 [OF_RECONFIG_ATTACH_NODE] = "ATTACH_NODE", 69 69 [OF_RECONFIG_DETACH_NODE] = "DETACH_NODE", 70 70 [OF_RECONFIG_ADD_PROPERTY] = "ADD_PROPERTY", 71 71 [OF_RECONFIG_REMOVE_PROPERTY] = "REMOVE_PROPERTY", 72 72 [OF_RECONFIG_UPDATE_PROPERTY] = "UPDATE_PROPERTY", 73 73 }; 74 - #endif 75 74 76 75 int of_reconfig_notify(unsigned long action, struct of_reconfig_data *p) 77 76 { ··· 619 620 } 620 621 621 622 ret = __of_add_property(ce->np, ce->prop); 622 - if (ret) { 623 - pr_err("changeset: add_property failed @%pOF/%s\n", 624 - ce->np, 625 - ce->prop->name); 626 - break; 627 - } 628 623 break; 629 624 case OF_RECONFIG_REMOVE_PROPERTY: 630 625 ret = __of_remove_property(ce->np, ce->prop); 631 - if (ret) { 632 - pr_err("changeset: remove_property failed @%pOF/%s\n", 633 - ce->np, 634 - ce->prop->name); 635 - break; 636 - } 637 626 break; 638 627 639 628 case OF_RECONFIG_UPDATE_PROPERTY: ··· 635 648 } 636 649 637 650 ret = __of_update_property(ce->np, ce->prop, &old_prop); 638 - if (ret) { 639 - pr_err("changeset: update_property failed @%pOF/%s\n", 640 - ce->np, 641 - ce->prop->name); 642 - break; 643 - } 644 651 break; 645 652 default: 646 653 ret = -EINVAL; 647 654 } 648 655 raw_spin_unlock_irqrestore(&devtree_lock, flags); 649 656 650 - if (ret) 657 + if (ret) { 658 + pr_err("changeset: apply failed: %-15s %pOF:%s\n", 659 + action_names[ce->action], ce->np, ce->prop->name); 651 660 return ret; 661 + } 652 662 653 663 switch (ce->action) { 654 664 case OF_RECONFIG_ATTACH_NODE: ··· 930 946 ce = kzalloc(sizeof(*ce), GFP_KERNEL); 931 947 if (!ce) 932 948 return -ENOMEM; 949 + 950 + if (WARN_ON(action >= ARRAY_SIZE(action_names))) 951 + return -EINVAL; 933 952 934 953 /* get a reference to the node */ 935 954 ce->action = action;
+2 -1
drivers/of/kexec.c
··· 184 184 if (ret) 185 185 return ret; 186 186 187 - return memblock_phys_free(addr, size); 187 + memblock_free_late(addr, size); 188 + return 0; 188 189 } 189 190 #endif 190 191
+2 -2
drivers/of/platform.c
··· 141 141 } 142 142 143 143 /* setup generic device info */ 144 - device_set_node(&dev->dev, of_fwnode_handle(np)); 144 + device_set_node(&dev->dev, of_fwnode_handle(of_node_get(np))); 145 145 dev->dev.parent = parent ? : &platform_bus; 146 146 147 147 if (bus_id) ··· 239 239 dev->dev.dma_mask = &dev->dev.coherent_dma_mask; 240 240 241 241 /* setup generic device info */ 242 - device_set_node(&dev->dev, of_fwnode_handle(node)); 242 + device_set_node(&dev->dev, of_fwnode_handle(of_node_get(node))); 243 243 dev->dev.parent = parent ? : &platform_bus; 244 244 dev->dev.platform_data = platform_data; 245 245 if (bus_id)
+2 -2
drivers/of/unittest.c
··· 664 664 memset(&args, 0, sizeof(args)); 665 665 666 666 EXPECT_BEGIN(KERN_INFO, 667 - "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle"); 667 + "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle 12345678"); 668 668 669 669 rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-phandle", 670 670 "phandle", 0, &args); 671 671 EXPECT_END(KERN_INFO, 672 - "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle"); 672 + "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle 12345678"); 673 673 674 674 unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); 675 675
+6 -3
drivers/pinctrl/qcom/pinctrl-msm.c
··· 1038 1038 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 1039 1039 struct msm_pinctrl *pctrl = gpiochip_get_data(gc); 1040 1040 const struct msm_pingroup *g; 1041 + u32 intr_target_mask = GENMASK(2, 0); 1041 1042 unsigned long flags; 1042 1043 bool was_enabled; 1043 1044 u32 val; ··· 1075 1074 * With intr_target_use_scm interrupts are routed to 1076 1075 * application cpu using scm calls. 1077 1076 */ 1077 + if (g->intr_target_width) 1078 + intr_target_mask = GENMASK(g->intr_target_width - 1, 0); 1079 + 1078 1080 if (pctrl->intr_target_use_scm) { 1079 1081 u32 addr = pctrl->phys_base[0] + g->intr_target_reg; 1080 1082 int ret; 1081 1083 1082 1084 qcom_scm_io_readl(addr, &val); 1083 - 1084 - val &= ~(7 << g->intr_target_bit); 1085 + val &= ~(intr_target_mask << g->intr_target_bit); 1085 1086 val |= g->intr_target_kpss_val << g->intr_target_bit; 1086 1087 1087 1088 ret = qcom_scm_io_writel(addr, val); ··· 1093 1090 d->hwirq); 1094 1091 } else { 1095 1092 val = msm_readl_intr_target(pctrl, g); 1096 - val &= ~(7 << g->intr_target_bit); 1093 + val &= ~(intr_target_mask << g->intr_target_bit); 1097 1094 val |= g->intr_target_kpss_val << g->intr_target_bit; 1098 1095 msm_writel_intr_target(val, pctrl, g); 1099 1096 }
+2
drivers/pinctrl/qcom/pinctrl-msm.h
··· 59 59 * @intr_status_bit: Offset in @intr_status_reg for reading and acking the interrupt 60 60 * status. 61 61 * @intr_target_bit: Offset in @intr_target_reg for configuring the interrupt routing. 62 + * @intr_target_width: Number of bits used for specifying interrupt routing target. 62 63 * @intr_target_kpss_val: Value in @intr_target_bit for specifying that the interrupt from 63 64 * this gpio should get routed to the KPSS processor. 64 65 * @intr_raw_status_bit: Offset in @intr_cfg_reg for the raw status bit. ··· 101 100 unsigned intr_ack_high:1; 102 101 103 102 unsigned intr_target_bit:5; 103 + unsigned intr_target_width:5; 104 104 unsigned intr_target_kpss_val:5; 105 105 unsigned intr_raw_status_bit:5; 106 106 unsigned intr_polarity_bit:5;
+1
drivers/pinctrl/qcom/pinctrl-sa8775p.c
··· 46 46 .intr_enable_bit = 0, \ 47 47 .intr_status_bit = 0, \ 48 48 .intr_target_bit = 5, \ 49 + .intr_target_width = 4, \ 49 50 .intr_target_kpss_val = 3, \ 50 51 .intr_raw_status_bit = 4, \ 51 52 .intr_polarity_bit = 1, \
+1
drivers/platform/mellanox/mlxbf-tmfifo.c
··· 887 887 tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE]; 888 888 mlxbf_tmfifo_console_output(tm_vdev, vring); 889 889 spin_unlock_irqrestore(&fifo->spin_lock[0], flags); 890 + set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events); 890 891 } else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, 891 892 &fifo->pend_events)) { 892 893 return true;
+5
drivers/platform/x86/ideapad-laptop.c
··· 1049 1049 { KE_IGNORE, 0x03 | IDEAPAD_WMI_KEY }, 1050 1050 /* Customizable Lenovo Hotkey ("star" with 'S' inside) */ 1051 1051 { KE_KEY, 0x01 | IDEAPAD_WMI_KEY, { KEY_FAVORITES } }, 1052 + { KE_KEY, 0x04 | IDEAPAD_WMI_KEY, { KEY_SELECTIVE_SCREENSHOT } }, 1053 + /* Lenovo Support */ 1054 + { KE_KEY, 0x07 | IDEAPAD_WMI_KEY, { KEY_HELP } }, 1055 + { KE_KEY, 0x0e | IDEAPAD_WMI_KEY, { KEY_PICKUP_PHONE } }, 1056 + { KE_KEY, 0x0f | IDEAPAD_WMI_KEY, { KEY_HANGUP_PHONE } }, 1052 1057 /* Dark mode toggle */ 1053 1058 { KE_KEY, 0x13 | IDEAPAD_WMI_KEY, { KEY_PROG1 } }, 1054 1059 /* Sound profile switch */
+7
drivers/platform/x86/lenovo-ymc.c
··· 36 36 DMI_MATCH(DMI_PRODUCT_NAME, "82QF"), 37 37 }, 38 38 }, 39 + { 40 + /* Lenovo Yoga 7 14ACN6 */ 41 + .matches = { 42 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 43 + DMI_MATCH(DMI_PRODUCT_NAME, "82N7"), 44 + }, 45 + }, 39 46 { } 40 47 }; 41 48
+1
drivers/soc/aspeed/aspeed-socinfo.c
··· 137 137 138 138 soc_dev = soc_device_register(attrs); 139 139 if (IS_ERR(soc_dev)) { 140 + kfree(attrs->machine); 140 141 kfree(attrs->soc_id); 141 142 kfree(attrs->serial_number); 142 143 kfree(attrs);
+1 -1
drivers/soc/aspeed/aspeed-uart-routing.c
··· 524 524 struct aspeed_uart_routing_selector *sel = to_routing_selector(attr); 525 525 int val; 526 526 527 - val = match_string(sel->options, -1, buf); 527 + val = __sysfs_match_string(sel->options, -1, buf); 528 528 if (val < 0) { 529 529 dev_err(dev, "invalid value \"%s\"\n", buf); 530 530 return -EINVAL;
+3
drivers/tty/Kconfig
··· 164 164 userspace depends on this functionality to continue operating 165 165 normally. 166 166 167 + Processes which run with CAP_SYS_ADMIN, such as BRLTTY, can 168 + use TIOCSTI even when this is set to N. 169 + 167 170 This functionality can be changed at runtime with the 168 171 dev.tty.legacy_tiocsti sysctl. This configuration option sets 169 172 the default value of the sysctl.
+2 -1
drivers/tty/n_gsm.c
··· 3042 3042 static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc) 3043 3043 { 3044 3044 int i; 3045 - struct gsm_dlci *dlci = gsm->dlci[0]; 3045 + struct gsm_dlci *dlci; 3046 3046 struct gsm_msg *txq, *ntxq; 3047 3047 3048 3048 gsm->dead = true; 3049 3049 mutex_lock(&gsm->mutex); 3050 3050 3051 + dlci = gsm->dlci[0]; 3051 3052 if (dlci) { 3052 3053 if (disc && dlci->state != DLCI_CLOSED) { 3053 3054 gsm_dlci_begin_close(dlci);
+3
drivers/tty/serial/8250/8250_core.c
··· 497 497 498 498 up = &serial8250_ports[index]; 499 499 up->port.line = index; 500 + up->port.port_id = index; 500 501 501 502 serial8250_init_port(up); 502 503 if (!base_ops) ··· 1041 1040 uart_remove_one_port(&serial8250_reg, &uart->port); 1042 1041 1043 1042 uart->port.ctrl_id = up->port.ctrl_id; 1043 + uart->port.port_id = up->port.port_id; 1044 1044 uart->port.iobase = up->port.iobase; 1045 1045 uart->port.membase = up->port.membase; 1046 1046 uart->port.irq = up->port.irq; ··· 1204 1202 uart->port.flags &= ~UPF_BOOT_AUTOCONF; 1205 1203 uart->port.type = PORT_UNKNOWN; 1206 1204 uart->port.dev = &serial8250_isa_devs->dev; 1205 + uart->port.port_id = line; 1207 1206 uart->capabilities = 0; 1208 1207 serial8250_init_port(uart); 1209 1208 serial8250_apply_quirks(uart);
+1 -3
drivers/tty/serial/8250/8250_port.c
··· 703 703 704 704 static void serial8250_clear_IER(struct uart_8250_port *up) 705 705 { 706 - /* Port locked to synchronize UART_IER access against the console. */ 707 - lockdep_assert_held_once(&up->port.lock); 708 - 709 706 if (up->capabilities & UART_CAP_UUE) 710 707 serial_out(up, UART_IER, UART_IER_UUE); 711 708 else ··· 3275 3278 3276 3279 spin_lock_init(&port->lock); 3277 3280 port->ctrl_id = 0; 3281 + port->pm = NULL; 3278 3282 port->ops = &serial8250_pops; 3279 3283 port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE); 3280 3284
+2 -2
drivers/tty/serial/fsl_lpuart.c
··· 1139 1139 unsigned long sr = lpuart32_read(&sport->port, UARTSTAT); 1140 1140 1141 1141 if (sr & (UARTSTAT_PE | UARTSTAT_FE)) { 1142 - /* Read DR to clear the error flags */ 1143 - lpuart32_read(&sport->port, UARTDATA); 1142 + /* Clear the error flags */ 1143 + lpuart32_write(&sport->port, sr, UARTSTAT); 1144 1144 1145 1145 if (sr & UARTSTAT_PE) 1146 1146 sport->port.icount.parity++;
+1
drivers/tty/serial/serial_base.h
··· 16 16 17 17 struct serial_ctrl_device { 18 18 struct device dev; 19 + struct ida port_ida; 19 20 }; 20 21 21 22 struct serial_port_device {
+56 -14
drivers/tty/serial/serial_base_bus.c
··· 10 10 11 11 #include <linux/container_of.h> 12 12 #include <linux/device.h> 13 + #include <linux/idr.h> 13 14 #include <linux/module.h> 14 15 #include <linux/serial_core.h> 15 16 #include <linux/slab.h> ··· 20 19 21 20 static bool serial_base_initialized; 22 21 22 + static const struct device_type serial_ctrl_type = { 23 + .name = "ctrl", 24 + }; 25 + 26 + static const struct device_type serial_port_type = { 27 + .name = "port", 28 + }; 29 + 23 30 static int serial_base_match(struct device *dev, struct device_driver *drv) 24 31 { 25 - int len = strlen(drv->name); 32 + if (dev->type == &serial_ctrl_type && 33 + str_has_prefix(drv->name, serial_ctrl_type.name)) 34 + return 1; 26 35 27 - return !strncmp(dev_name(dev), drv->name, len); 36 + if (dev->type == &serial_port_type && 37 + str_has_prefix(drv->name, serial_port_type.name)) 38 + return 1; 39 + 40 + return 0; 28 41 } 29 42 30 43 static struct bus_type serial_base_bus_type = { ··· 63 48 struct device *parent_dev, 64 49 const struct device_type *type, 65 50 void (*release)(struct device *dev), 66 - int id) 51 + unsigned int ctrl_id, 52 + unsigned int port_id) 67 53 { 68 54 device_initialize(dev); 69 55 dev->type = type; ··· 77 61 return -EPROBE_DEFER; 78 62 } 79 63 80 - return dev_set_name(dev, "%s.%s.%d", type->name, dev_name(port->dev), id); 81 - } 64 + if (type == &serial_ctrl_type) 65 + return dev_set_name(dev, "%s:%d", dev_name(port->dev), ctrl_id); 82 66 83 - static const struct device_type serial_ctrl_type = { 84 - .name = "ctrl", 85 - }; 67 + if (type == &serial_port_type) 68 + return dev_set_name(dev, "%s:%d.%d", dev_name(port->dev), 69 + ctrl_id, port_id); 70 + 71 + return -EINVAL; 72 + } 86 73 87 74 static void serial_base_ctrl_release(struct device *dev) 88 75 { ··· 100 81 return; 101 82 102 83 device_del(&ctrl_dev->dev); 84 + put_device(&ctrl_dev->dev); 103 85 } 104 86 105 87 struct serial_ctrl_device *serial_base_ctrl_add(struct uart_port *port, ··· 113 93 if (!ctrl_dev) 114 94 return ERR_PTR(-ENOMEM); 115 95 96 + ida_init(&ctrl_dev->port_ida); 97 + 116 98 err = serial_base_device_init(port, &ctrl_dev->dev, 117 99 parent, &serial_ctrl_type, 118 100 serial_base_ctrl_release, 119 - port->ctrl_id); 101 + port->ctrl_id, 0); 120 102 if (err) 121 103 goto err_put_device; 122 104 ··· 134 112 return ERR_PTR(err); 135 113 } 136 114 137 - static const struct device_type serial_port_type = { 138 - .name = "port", 139 - }; 140 - 141 115 static void serial_base_port_release(struct device *dev) 142 116 { 143 117 struct serial_port_device *port_dev = to_serial_base_port_device(dev); ··· 145 127 struct serial_ctrl_device *ctrl_dev) 146 128 { 147 129 struct serial_port_device *port_dev; 130 + int min = 0, max = -1; /* Use -1 for max to apply IDA defaults */ 148 131 int err; 149 132 150 133 port_dev = kzalloc(sizeof(*port_dev), GFP_KERNEL); 151 134 if (!port_dev) 152 135 return ERR_PTR(-ENOMEM); 153 136 137 + /* Device driver specified port_id vs automatic assignment? */ 138 + if (port->port_id) { 139 + min = port->port_id; 140 + max = port->port_id; 141 + } 142 + 143 + err = ida_alloc_range(&ctrl_dev->port_ida, min, max, GFP_KERNEL); 144 + if (err < 0) { 145 + kfree(port_dev); 146 + return ERR_PTR(err); 147 + } 148 + 149 + port->port_id = err; 150 + 154 151 err = serial_base_device_init(port, &port_dev->dev, 155 152 &ctrl_dev->dev, &serial_port_type, 156 153 serial_base_port_release, 157 - port->line); 154 + port->ctrl_id, port->port_id); 158 155 if (err) 159 156 goto err_put_device; 160 157 ··· 183 150 184 151 err_put_device: 185 152 put_device(&port_dev->dev); 153 + ida_free(&ctrl_dev->port_ida, port->port_id); 186 154 187 155 return ERR_PTR(err); 188 156 } 189 157 190 158 void serial_base_port_device_remove(struct serial_port_device *port_dev) 191 159 { 160 + struct serial_ctrl_device *ctrl_dev; 161 + struct device *parent; 162 + 192 163 if (!port_dev) 193 164 return; 194 165 166 + parent = port_dev->dev.parent; 167 + ctrl_dev = to_serial_base_ctrl_device(parent); 168 + 195 169 device_del(&port_dev->dev); 170 + ida_free(&ctrl_dev->port_ida, port_dev->port->port_id); 171 + put_device(&port_dev->dev); 196 172 } 197 173 198 174 static int serial_base_init(void)
+1 -1
drivers/video/fbdev/amifb.c
··· 687 687 __u16 height; 688 688 __u16 xspot; 689 689 __u16 yspot; 690 - __u8 data[1]; /* field with [height][width] */ 690 + DECLARE_FLEX_ARRAY(__u8, data); /* field with [height][width] */ 691 691 }; 692 692 693 693 struct fb_cursorstate {
+1 -1
drivers/video/fbdev/atmel_lcdfb.c
··· 1308 1308 .resume = atmel_lcdfb_resume, 1309 1309 .driver = { 1310 1310 .name = "atmel_lcdfb", 1311 - .of_match_table = of_match_ptr(atmel_lcdfb_dt_ids), 1311 + .of_match_table = atmel_lcdfb_dt_ids, 1312 1312 }, 1313 1313 }; 1314 1314
+2 -2
drivers/video/fbdev/goldfishfb.c
··· 203 203 } 204 204 205 205 fb->irq = platform_get_irq(pdev, 0); 206 - if (fb->irq <= 0) { 207 - ret = -ENODEV; 206 + if (fb->irq < 0) { 207 + ret = fb->irq; 208 208 goto err_no_irq; 209 209 } 210 210
+3 -1
drivers/video/fbdev/mmp/hw/mmp_ctrl.c
··· 519 519 "unable to get clk %s\n", mi->clk_name); 520 520 goto failed; 521 521 } 522 - clk_prepare_enable(ctrl->clk); 522 + ret = clk_prepare_enable(ctrl->clk); 523 + if (ret) 524 + goto failed; 523 525 524 526 /* init global regs */ 525 527 ctrl_set_default(ctrl);
+2 -2
drivers/video/fbdev/ssd1307fb.c
··· 399 399 /* Enable the PWM */ 400 400 pwm_enable(par->pwm); 401 401 402 - dev_dbg(&par->client->dev, "Using PWM%d with a %lluns period.\n", 403 - par->pwm->pwm, pwm_get_period(par->pwm)); 402 + dev_dbg(&par->client->dev, "Using PWM %s with a %lluns period.\n", 403 + par->pwm->label, pwm_get_period(par->pwm)); 404 404 } 405 405 406 406 /* Set initial contrast */
+1
fs/btrfs/ctree.h
··· 443 443 444 444 struct btrfs_file_private { 445 445 void *filldir_buf; 446 + u64 last_index; 446 447 struct extent_state *llseek_cached_state; 447 448 }; 448 449
+3 -2
fs/btrfs/delayed-inode.c
··· 1632 1632 } 1633 1633 1634 1634 bool btrfs_readdir_get_delayed_items(struct inode *inode, 1635 + u64 last_index, 1635 1636 struct list_head *ins_list, 1636 1637 struct list_head *del_list) 1637 1638 { ··· 1652 1651 1653 1652 mutex_lock(&delayed_node->mutex); 1654 1653 item = __btrfs_first_delayed_insertion_item(delayed_node); 1655 - while (item) { 1654 + while (item && item->index <= last_index) { 1656 1655 refcount_inc(&item->refs); 1657 1656 list_add_tail(&item->readdir_list, ins_list); 1658 1657 item = __btrfs_next_delayed_item(item); 1659 1658 } 1660 1659 1661 1660 item = __btrfs_first_delayed_deletion_item(delayed_node); 1662 - while (item) { 1661 + while (item && item->index <= last_index) { 1663 1662 refcount_inc(&item->refs); 1664 1663 list_add_tail(&item->readdir_list, del_list); 1665 1664 item = __btrfs_next_delayed_item(item);
+1
fs/btrfs/delayed-inode.h
··· 148 148 149 149 /* Used for readdir() */ 150 150 bool btrfs_readdir_get_delayed_items(struct inode *inode, 151 + u64 last_index, 151 152 struct list_head *ins_list, 152 153 struct list_head *del_list); 153 154 void btrfs_readdir_put_delayed_items(struct inode *inode,
+24 -1
fs/btrfs/extent_io.c
··· 902 902 size -= len; 903 903 pg_offset += len; 904 904 disk_bytenr += len; 905 - bio_ctrl->len_to_oe_boundary -= len; 905 + 906 + /* 907 + * len_to_oe_boundary defaults to U32_MAX, which isn't page or 908 + * sector aligned. alloc_new_bio() then sets it to the end of 909 + * our ordered extent for writes into zoned devices. 910 + * 911 + * When len_to_oe_boundary is tracking an ordered extent, we 912 + * trust the ordered extent code to align things properly, and 913 + * the check above to cap our write to the ordered extent 914 + * boundary is correct. 915 + * 916 + * When len_to_oe_boundary is U32_MAX, the cap above would 917 + * result in a 4095 byte IO for the last page right before 918 + * we hit the bio limit of UINT_MAX. bio_add_page() has all 919 + * the checks required to make sure we don't overflow the bio, 920 + * and we should just ignore len_to_oe_boundary completely 921 + * unless we're using it to track an ordered extent. 922 + * 923 + * It's pretty hard to make a bio sized U32_MAX, but it can 924 + * happen when the page cache is able to feed us contiguous 925 + * pages for large extents. 926 + */ 927 + if (bio_ctrl->len_to_oe_boundary != U32_MAX) 928 + bio_ctrl->len_to_oe_boundary -= len; 906 929 907 930 /* Ordered extent boundary: move on to a new bio. */ 908 931 if (bio_ctrl->len_to_oe_boundary == 0)
+2 -4
fs/btrfs/extent_map.c
··· 760 760 761 761 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { 762 762 start = em_end; 763 - if (end != (u64)-1) 764 - len = start + len - em_end; 765 763 goto next; 766 764 } 767 765 ··· 827 829 if (!split) 828 830 goto remove_em; 829 831 } 830 - split->start = start + len; 831 - split->len = em_end - (start + len); 832 + split->start = end; 833 + split->len = em_end - end; 832 834 split->block_start = em->block_start; 833 835 split->flags = flags; 834 836 split->compress_type = em->compress_type;
+79 -52
fs/btrfs/inode.c
··· 5873 5873 } 5874 5874 5875 5875 /* 5876 + * Find the highest existing sequence number in a directory and then set the 5877 + * in-memory index_cnt variable to the first free sequence number. 5878 + */ 5879 + static int btrfs_set_inode_index_count(struct btrfs_inode *inode) 5880 + { 5881 + struct btrfs_root *root = inode->root; 5882 + struct btrfs_key key, found_key; 5883 + struct btrfs_path *path; 5884 + struct extent_buffer *leaf; 5885 + int ret; 5886 + 5887 + key.objectid = btrfs_ino(inode); 5888 + key.type = BTRFS_DIR_INDEX_KEY; 5889 + key.offset = (u64)-1; 5890 + 5891 + path = btrfs_alloc_path(); 5892 + if (!path) 5893 + return -ENOMEM; 5894 + 5895 + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5896 + if (ret < 0) 5897 + goto out; 5898 + /* FIXME: we should be able to handle this */ 5899 + if (ret == 0) 5900 + goto out; 5901 + ret = 0; 5902 + 5903 + if (path->slots[0] == 0) { 5904 + inode->index_cnt = BTRFS_DIR_START_INDEX; 5905 + goto out; 5906 + } 5907 + 5908 + path->slots[0]--; 5909 + 5910 + leaf = path->nodes[0]; 5911 + btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 5912 + 5913 + if (found_key.objectid != btrfs_ino(inode) || 5914 + found_key.type != BTRFS_DIR_INDEX_KEY) { 5915 + inode->index_cnt = BTRFS_DIR_START_INDEX; 5916 + goto out; 5917 + } 5918 + 5919 + inode->index_cnt = found_key.offset + 1; 5920 + out: 5921 + btrfs_free_path(path); 5922 + return ret; 5923 + } 5924 + 5925 + static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index) 5926 + { 5927 + if (dir->index_cnt == (u64)-1) { 5928 + int ret; 5929 + 5930 + ret = btrfs_inode_delayed_dir_index_count(dir); 5931 + if (ret) { 5932 + ret = btrfs_set_inode_index_count(dir); 5933 + if (ret) 5934 + return ret; 5935 + } 5936 + } 5937 + 5938 + *index = dir->index_cnt; 5939 + 5940 + return 0; 5941 + } 5942 + 5943 + /* 5876 5944 * All this infrastructure exists because dir_emit can fault, and we are holding 5877 5945 * the tree lock when doing readdir. For now just allocate a buffer and copy 5878 5946 * our information into that, and then dir_emit from the buffer. This is ··· 5952 5884 static int btrfs_opendir(struct inode *inode, struct file *file) 5953 5885 { 5954 5886 struct btrfs_file_private *private; 5887 + u64 last_index; 5888 + int ret; 5889 + 5890 + ret = btrfs_get_dir_last_index(BTRFS_I(inode), &last_index); 5891 + if (ret) 5892 + return ret; 5955 5893 5956 5894 private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL); 5957 5895 if (!private) 5958 5896 return -ENOMEM; 5897 + private->last_index = last_index; 5959 5898 private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 5960 5899 if (!private->filldir_buf) { 5961 5900 kfree(private); ··· 6029 5954 6030 5955 INIT_LIST_HEAD(&ins_list); 6031 5956 INIT_LIST_HEAD(&del_list); 6032 - put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list); 5957 + put = btrfs_readdir_get_delayed_items(inode, private->last_index, 5958 + &ins_list, &del_list); 6033 5959 6034 5960 again: 6035 5961 key.type = BTRFS_DIR_INDEX_KEY; ··· 6048 5972 break; 6049 5973 if (found_key.offset < ctx->pos) 6050 5974 continue; 5975 + if (found_key.offset > private->last_index) 5976 + break; 6051 5977 if (btrfs_should_delete_dir_index(&del_list, found_key.offset)) 6052 5978 continue; 6053 5979 di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); ··· 6183 6105 if (flags & S_ATIME) 6184 6106 inode->i_atime = *now; 6185 6107 return dirty ? btrfs_dirty_inode(BTRFS_I(inode)) : 0; 6186 - } 6187 - 6188 - /* 6189 - * find the highest existing sequence number in a directory 6190 - * and then set the in-memory index_cnt variable to reflect 6191 - * free sequence numbers 6192 - */ 6193 - static int btrfs_set_inode_index_count(struct btrfs_inode *inode) 6194 - { 6195 - struct btrfs_root *root = inode->root; 6196 - struct btrfs_key key, found_key; 6197 - struct btrfs_path *path; 6198 - struct extent_buffer *leaf; 6199 - int ret; 6200 - 6201 - key.objectid = btrfs_ino(inode); 6202 - key.type = BTRFS_DIR_INDEX_KEY; 6203 - key.offset = (u64)-1; 6204 - 6205 - path = btrfs_alloc_path(); 6206 - if (!path) 6207 - return -ENOMEM; 6208 - 6209 - ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 6210 - if (ret < 0) 6211 - goto out; 6212 - /* FIXME: we should be able to handle this */ 6213 - if (ret == 0) 6214 - goto out; 6215 - ret = 0; 6216 - 6217 - if (path->slots[0] == 0) { 6218 - inode->index_cnt = BTRFS_DIR_START_INDEX; 6219 - goto out; 6220 - } 6221 - 6222 - path->slots[0]--; 6223 - 6224 - leaf = path->nodes[0]; 6225 - btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 6226 - 6227 - if (found_key.objectid != btrfs_ino(inode) || 6228 - found_key.type != BTRFS_DIR_INDEX_KEY) { 6229 - inode->index_cnt = BTRFS_DIR_START_INDEX; 6230 - goto out; 6231 - } 6232 - 6233 - inode->index_cnt = found_key.offset + 1; 6234 - out: 6235 - btrfs_free_path(path); 6236 - return ret; 6237 6108 } 6238 6109 6239 6110 /*
+2 -1
fs/btrfs/scrub.c
··· 605 605 btrfs_stack_header_bytenr(header), logical); 606 606 return; 607 607 } 608 - if (memcmp(header->fsid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE) != 0) { 608 + if (memcmp(header->fsid, fs_info->fs_devices->metadata_uuid, 609 + BTRFS_FSID_SIZE) != 0) { 609 610 bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree); 610 611 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree); 611 612 btrfs_warn_rl(fs_info,
+1 -2
fs/btrfs/volumes.c
··· 4638 4638 } 4639 4639 } 4640 4640 4641 - BUG_ON(fs_info->balance_ctl || 4642 - test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4641 + ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); 4643 4642 atomic_dec(&fs_info->balance_cancel_req); 4644 4643 mutex_unlock(&fs_info->balance_mutex); 4645 4644 return 0;
+16 -10
fs/nfs/direct.c
··· 472 472 return result; 473 473 } 474 474 475 - static void 476 - nfs_direct_join_group(struct list_head *list, struct inode *inode) 475 + static void nfs_direct_join_group(struct list_head *list, struct inode *inode) 477 476 { 478 - struct nfs_page *req, *next; 477 + struct nfs_page *req, *subreq; 479 478 480 479 list_for_each_entry(req, list, wb_list) { 481 - if (req->wb_head != req || req->wb_this_page == req) 480 + if (req->wb_head != req) 482 481 continue; 483 - for (next = req->wb_this_page; 484 - next != req->wb_head; 485 - next = next->wb_this_page) { 486 - nfs_list_remove_request(next); 487 - nfs_release_request(next); 488 - } 482 + subreq = req->wb_this_page; 483 + if (subreq == req) 484 + continue; 485 + do { 486 + /* 487 + * Remove subrequests from this list before freeing 488 + * them in the call to nfs_join_page_group(). 489 + */ 490 + if (!list_empty(&subreq->wb_list)) { 491 + nfs_list_remove_request(subreq); 492 + nfs_release_request(subreq); 493 + } 494 + } while ((subreq = subreq->wb_this_page) != req); 489 495 nfs_join_page_group(req, inode); 490 496 } 491 497 }
+2 -3
fs/nfs/nfs42proc.c
··· 1377 1377 for (i = 0; i < np; i++) { 1378 1378 pages[i] = alloc_page(GFP_KERNEL); 1379 1379 if (!pages[i]) { 1380 - np = i + 1; 1381 1380 err = -ENOMEM; 1382 1381 goto out; 1383 1382 } ··· 1400 1401 } while (exception.retry); 1401 1402 1402 1403 out: 1403 - while (--np >= 0) 1404 - __free_page(pages[np]); 1404 + while (--i >= 0) 1405 + __free_page(pages[i]); 1405 1406 kfree(pages); 1406 1407 1407 1408 return err;
+10 -4
fs/nfs/nfs4proc.c
··· 6004 6004 out_ok: 6005 6005 ret = res.acl_len; 6006 6006 out_free: 6007 - for (i = 0; i < npages; i++) 6008 - if (pages[i]) 6009 - __free_page(pages[i]); 6007 + while (--i >= 0) 6008 + __free_page(pages[i]); 6010 6009 if (res.acl_scratch) 6011 6010 __free_page(res.acl_scratch); 6012 6011 kfree(pages); ··· 7180 7181 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) 7181 7182 goto out_restart; 7182 7183 break; 7183 - case -NFS4ERR_BAD_STATEID: 7184 7184 case -NFS4ERR_OLD_STATEID: 7185 + if (data->arg.new_lock_owner != 0 && 7186 + nfs4_refresh_open_old_stateid(&data->arg.open_stateid, 7187 + lsp->ls_state)) 7188 + goto out_restart; 7189 + if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp)) 7190 + goto out_restart; 7191 + fallthrough; 7192 + case -NFS4ERR_BAD_STATEID: 7185 7193 case -NFS4ERR_STALE_STATEID: 7186 7194 case -NFS4ERR_EXPIRED: 7187 7195 if (data->arg.new_lock_owner != 0) {
+3 -1
fs/nfs/sysfs.c
··· 345 345 int ret = -ENOMEM; 346 346 347 347 s = kasprintf(GFP_KERNEL, "server-%d", server->s_sysfs_id); 348 - if (s) 348 + if (s) { 349 349 ret = kobject_rename(&server->kobj, s); 350 + kfree(s); 351 + } 350 352 if (ret < 0) 351 353 pr_warn("NFS: rename sysfs %s failed (%d)\n", 352 354 server->kobj.name, ret);
+4
fs/smb/client/fs_context.c
··· 231 231 break; 232 232 case Opt_sec_none: 233 233 ctx->nullauth = 1; 234 + kfree(ctx->username); 235 + ctx->username = NULL; 234 236 break; 235 237 default: 236 238 cifs_errorf(fc, "bad security option: %s\n", value); ··· 1203 1201 case Opt_user: 1204 1202 kfree(ctx->username); 1205 1203 ctx->username = NULL; 1204 + if (ctx->nullauth) 1205 + break; 1206 1206 if (strlen(param->string) == 0) { 1207 1207 /* null user, ie. anonymous authentication */ 1208 1208 ctx->nullauth = 1;
+2 -1
include/linux/serial_core.h
··· 459 459 struct serial_rs485 *rs485); 460 460 int (*iso7816_config)(struct uart_port *, 461 461 struct serial_iso7816 *iso7816); 462 - int ctrl_id; /* optional serial core controller id */ 462 + unsigned int ctrl_id; /* optional serial core controller id */ 463 + unsigned int port_id; /* optional serial core port id */ 463 464 unsigned int irq; /* irq number */ 464 465 unsigned long irqflags; /* irq flags */ 465 466 unsigned int uartclk; /* base uart clock */
+1 -10
include/net/bonding.h
··· 722 722 } 723 723 724 724 /* Caller must hold rcu_read_lock() for read */ 725 - static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac) 725 + static inline bool bond_slave_has_mac_rcu(struct bonding *bond, const u8 *mac) 726 726 { 727 727 struct list_head *iter; 728 728 struct slave *tmp; 729 - struct netdev_hw_addr *ha; 730 729 731 730 bond_for_each_slave_rcu(bond, tmp, iter) 732 731 if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) 733 732 return true; 734 - 735 - if (netdev_uc_empty(bond->dev)) 736 - return false; 737 - 738 - netdev_for_each_uc_addr(ha, bond->dev) 739 - if (ether_addr_equal_64bits(mac, ha->addr)) 740 - return true; 741 - 742 733 return false; 743 734 } 744 735
+1 -1
include/net/inet_sock.h
··· 223 223 __s16 uc_ttl; 224 224 __be16 inet_sport; 225 225 struct ip_options_rcu __rcu *inet_opt; 226 - __u16 inet_id; 226 + atomic_t inet_id; 227 227 228 228 __u8 tos; 229 229 __u8 min_ttl;
+13 -2
include/net/ip.h
··· 538 538 * generator as much as we can. 539 539 */ 540 540 if (sk && inet_sk(sk)->inet_daddr) { 541 - iph->id = htons(inet_sk(sk)->inet_id); 542 - inet_sk(sk)->inet_id += segs; 541 + int val; 542 + 543 + /* avoid atomic operations for TCP, 544 + * as we hold socket lock at this point. 545 + */ 546 + if (sk_is_tcp(sk)) { 547 + sock_owned_by_me(sk); 548 + val = atomic_read(&inet_sk(sk)->inet_id); 549 + atomic_set(&inet_sk(sk)->inet_id, val + segs); 550 + } else { 551 + val = atomic_add_return(segs, &inet_sk(sk)->inet_id); 552 + } 553 + iph->id = htons(val); 543 554 return; 544 555 } 545 556 if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
+1
include/net/mac80211.h
··· 6612 6612 * marks frames marked in the bitmap as having been filtered. Afterwards, it 6613 6613 * checks if any frames in the window starting from @ssn can now be released 6614 6614 * (in case they were only waiting for frames that were filtered.) 6615 + * (Only work correctly if @max_rx_aggregation_subframes <= 64 frames) 6615 6616 */ 6616 6617 void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid, 6617 6618 u16 ssn, u64 filtered,
+6
include/net/netfilter/nf_tables.h
··· 587 587 return (void *)set->data; 588 588 } 589 589 590 + static inline bool nft_set_gc_is_pending(const struct nft_set *s) 591 + { 592 + return refcount_read(&s->refs) != 1; 593 + } 594 + 590 595 static inline struct nft_set *nft_set_container_of(const void *priv) 591 596 { 592 597 return (void *)priv - offsetof(struct nft_set, data); ··· 1734 1729 u64 table_handle; 1735 1730 unsigned int base_seq; 1736 1731 unsigned int gc_seq; 1732 + u8 validate_state; 1737 1733 }; 1738 1734 1739 1735 extern unsigned int nf_tables_net_id;
+2 -2
include/net/rtnetlink.h
··· 190 190 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm, 191 191 u32 portid, const struct nlmsghdr *nlh); 192 192 193 - int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, 194 - struct netlink_ext_ack *exterr); 193 + int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer, 194 + struct netlink_ext_ack *exterr); 195 195 struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid); 196 196 197 197 #define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)
+4 -3
include/net/sock.h
··· 1323 1323 /* 1324 1324 * Pressure flag: try to collapse. 1325 1325 * Technical note: it is used by multiple contexts non atomically. 1326 + * Make sure to use READ_ONCE()/WRITE_ONCE() for all reads/writes. 1326 1327 * All the __sk_mem_schedule() is of this nature: accounting 1327 1328 * is strict, actions are advisory and have some latency. 1328 1329 */ ··· 1425 1424 static inline bool sk_under_global_memory_pressure(const struct sock *sk) 1426 1425 { 1427 1426 return sk->sk_prot->memory_pressure && 1428 - !!*sk->sk_prot->memory_pressure; 1427 + !!READ_ONCE(*sk->sk_prot->memory_pressure); 1429 1428 } 1430 1429 1431 1430 static inline bool sk_under_memory_pressure(const struct sock *sk) ··· 1437 1436 mem_cgroup_under_socket_pressure(sk->sk_memcg)) 1438 1437 return true; 1439 1438 1440 - return !!*sk->sk_prot->memory_pressure; 1439 + return !!READ_ONCE(*sk->sk_prot->memory_pressure); 1441 1440 } 1442 1441 1443 1442 static inline long ··· 1514 1513 { 1515 1514 if (!prot->memory_pressure) 1516 1515 return false; 1517 - return !!*prot->memory_pressure; 1516 + return !!READ_ONCE(*prot->memory_pressure); 1518 1517 } 1519 1518 1520 1519
-12
include/video/kyro.h
··· 38 38 int wc_cookie; 39 39 }; 40 40 41 - extern int kyro_dev_init(void); 42 - extern void kyro_dev_reset(void); 43 - 44 - extern unsigned char *kyro_dev_physical_fb_ptr(void); 45 - extern unsigned char *kyro_dev_virtual_fb_ptr(void); 46 - extern void *kyro_dev_physical_regs_ptr(void); 47 - extern void *kyro_dev_virtual_regs_ptr(void); 48 - extern unsigned int kyro_dev_fb_size(void); 49 - extern unsigned int kyro_dev_regs_size(void); 50 - 51 - extern u32 kyro_dev_overlay_offset(void); 52 - 53 41 /* 54 42 * benedict.gaster@superh.com 55 43 * Added the follow IOCTLS for the creation of overlay services...
+1 -4
mm/memory.c
··· 5257 5257 5258 5258 static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs) 5259 5259 { 5260 - /* Even if this succeeds, make it clear we *might* have slept */ 5261 - if (likely(mmap_read_trylock(mm))) { 5262 - might_sleep(); 5260 + if (likely(mmap_read_trylock(mm))) 5263 5261 return true; 5264 - } 5265 5262 5266 5263 if (regs && !user_mode(regs)) { 5267 5264 unsigned long ip = instruction_pointer(regs);
+2 -1
net/batman-adv/bat_v_elp.c
··· 505 505 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 506 506 struct batadv_elp_packet *elp_packet; 507 507 struct batadv_hard_iface *primary_if; 508 - struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb); 508 + struct ethhdr *ethhdr; 509 509 bool res; 510 510 int ret = NET_RX_DROP; 511 511 ··· 513 513 if (!res) 514 514 goto free_skb; 515 515 516 + ethhdr = eth_hdr(skb); 516 517 if (batadv_is_my_mac(bat_priv, ethhdr->h_source)) 517 518 goto free_skb; 518 519
+5 -2
net/batman-adv/bat_v_ogm.c
··· 123 123 { 124 124 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 125 125 126 - if (hard_iface->if_status != BATADV_IF_ACTIVE) 126 + if (hard_iface->if_status != BATADV_IF_ACTIVE) { 127 + kfree_skb(skb); 127 128 return; 129 + } 128 130 129 131 batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX); 130 132 batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES, ··· 987 985 { 988 986 struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 989 987 struct batadv_ogm2_packet *ogm_packet; 990 - struct ethhdr *ethhdr = eth_hdr(skb); 988 + struct ethhdr *ethhdr; 991 989 int ogm_offset; 992 990 u8 *packet_pos; 993 991 int ret = NET_RX_DROP; ··· 1001 999 if (!batadv_check_management_packet(skb, if_incoming, BATADV_OGM2_HLEN)) 1002 1000 goto free_skb; 1003 1001 1002 + ethhdr = eth_hdr(skb); 1004 1003 if (batadv_is_my_mac(bat_priv, ethhdr->h_source)) 1005 1004 goto free_skb; 1006 1005
+13 -1
net/batman-adv/hard-interface.c
··· 631 631 */ 632 632 void batadv_update_min_mtu(struct net_device *soft_iface) 633 633 { 634 - soft_iface->mtu = batadv_hardif_min_mtu(soft_iface); 634 + struct batadv_priv *bat_priv = netdev_priv(soft_iface); 635 + int limit_mtu; 636 + int mtu; 637 + 638 + mtu = batadv_hardif_min_mtu(soft_iface); 639 + 640 + if (bat_priv->mtu_set_by_user) 641 + limit_mtu = bat_priv->mtu_set_by_user; 642 + else 643 + limit_mtu = ETH_DATA_LEN; 644 + 645 + mtu = min(mtu, limit_mtu); 646 + dev_set_mtu(soft_iface, mtu); 635 647 636 648 /* Check if the local translate table should be cleaned up to match a 637 649 * new (and smaller) MTU.
+3
net/batman-adv/netlink.c
··· 495 495 attr = info->attrs[BATADV_ATTR_FRAGMENTATION_ENABLED]; 496 496 497 497 atomic_set(&bat_priv->fragmentation, !!nla_get_u8(attr)); 498 + 499 + rtnl_lock(); 498 500 batadv_update_min_mtu(bat_priv->soft_iface); 501 + rtnl_unlock(); 499 502 } 500 503 501 504 if (info->attrs[BATADV_ATTR_GW_BANDWIDTH_DOWN]) {
+3
net/batman-adv/soft-interface.c
··· 153 153 154 154 static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu) 155 155 { 156 + struct batadv_priv *bat_priv = netdev_priv(dev); 157 + 156 158 /* check ranges */ 157 159 if (new_mtu < ETH_MIN_MTU || new_mtu > batadv_hardif_min_mtu(dev)) 158 160 return -EINVAL; 159 161 160 162 dev->mtu = new_mtu; 163 + bat_priv->mtu_set_by_user = new_mtu; 161 164 162 165 return 0; 163 166 }
-1
net/batman-adv/translation-table.c
··· 774 774 if (roamed_back) { 775 775 batadv_tt_global_free(bat_priv, tt_global, 776 776 "Roaming canceled"); 777 - tt_global = NULL; 778 777 } else { 779 778 /* The global entry has to be marked as ROAMING and 780 779 * has to be kept for consistency purpose
+6
net/batman-adv/types.h
··· 1547 1547 struct net_device *soft_iface; 1548 1548 1549 1549 /** 1550 + * @mtu_set_by_user: MTU was set once by user 1551 + * protected by rtnl_lock 1552 + */ 1553 + int mtu_set_by_user; 1554 + 1555 + /** 1550 1556 * @bat_counters: mesh internal traffic statistic counters (see 1551 1557 * batadv_counters) 1552 1558 */
+7 -15
net/can/isotp.c
··· 188 188 return (isotp_bc_flags(so) == 0); 189 189 } 190 190 191 - static bool isotp_register_txecho(struct isotp_sock *so) 192 - { 193 - /* all modes but SF_BROADCAST register for tx echo skbs */ 194 - return (isotp_bc_flags(so) != CAN_ISOTP_SF_BROADCAST); 195 - } 196 - 197 191 static enum hrtimer_restart isotp_rx_timer_handler(struct hrtimer *hrtimer) 198 192 { 199 193 struct isotp_sock *so = container_of(hrtimer, struct isotp_sock, ··· 1203 1209 lock_sock(sk); 1204 1210 1205 1211 /* remove current filters & unregister */ 1206 - if (so->bound && isotp_register_txecho(so)) { 1212 + if (so->bound) { 1207 1213 if (so->ifindex) { 1208 1214 struct net_device *dev; 1209 1215 ··· 1326 1332 can_rx_register(net, dev, rx_id, SINGLE_MASK(rx_id), 1327 1333 isotp_rcv, sk, "isotp", sk); 1328 1334 1329 - if (isotp_register_txecho(so)) { 1330 - /* no consecutive frame echo skb in flight */ 1331 - so->cfecho = 0; 1335 + /* no consecutive frame echo skb in flight */ 1336 + so->cfecho = 0; 1332 1337 1333 - /* register for echo skb's */ 1334 - can_rx_register(net, dev, tx_id, SINGLE_MASK(tx_id), 1335 - isotp_rcv_echo, sk, "isotpe", sk); 1336 - } 1338 + /* register for echo skb's */ 1339 + can_rx_register(net, dev, tx_id, SINGLE_MASK(tx_id), 1340 + isotp_rcv_echo, sk, "isotpe", sk); 1337 1341 1338 1342 dev_put(dev); 1339 1343 ··· 1552 1560 case NETDEV_UNREGISTER: 1553 1561 lock_sock(sk); 1554 1562 /* remove current filters & unregister */ 1555 - if (so->bound && isotp_register_txecho(so)) { 1563 + if (so->bound) { 1556 1564 if (isotp_register_rxid(so)) 1557 1565 can_rx_unregister(dev_net(dev), dev, so->rxid, 1558 1566 SINGLE_MASK(so->rxid),
+26 -9
net/can/raw.c
··· 85 85 int bound; 86 86 int ifindex; 87 87 struct net_device *dev; 88 + netdevice_tracker dev_tracker; 88 89 struct list_head notifier; 89 90 int loopback; 90 91 int recv_own_msgs; ··· 286 285 case NETDEV_UNREGISTER: 287 286 lock_sock(sk); 288 287 /* remove current filters & unregister */ 289 - if (ro->bound) 288 + if (ro->bound) { 290 289 raw_disable_allfilters(dev_net(dev), dev, sk); 290 + netdev_put(dev, &ro->dev_tracker); 291 + } 291 292 292 293 if (ro->count > 1) 293 294 kfree(ro->filter); ··· 394 391 395 392 /* remove current filters & unregister */ 396 393 if (ro->bound) { 397 - if (ro->dev) 394 + if (ro->dev) { 398 395 raw_disable_allfilters(dev_net(ro->dev), ro->dev, sk); 399 - else 396 + netdev_put(ro->dev, &ro->dev_tracker); 397 + } else { 400 398 raw_disable_allfilters(sock_net(sk), NULL, sk); 399 + } 401 400 } 402 401 403 402 if (ro->count > 1) ··· 450 445 goto out; 451 446 } 452 447 if (dev->type != ARPHRD_CAN) { 453 - dev_put(dev); 454 448 err = -ENODEV; 455 - goto out; 449 + goto out_put_dev; 456 450 } 451 + 457 452 if (!(dev->flags & IFF_UP)) 458 453 notify_enetdown = 1; 459 454 ··· 461 456 462 457 /* filters set by default/setsockopt */ 463 458 err = raw_enable_allfilters(sock_net(sk), dev, sk); 464 - dev_put(dev); 459 + if (err) 460 + goto out_put_dev; 461 + 465 462 } else { 466 463 ifindex = 0; 467 464 ··· 474 467 if (!err) { 475 468 if (ro->bound) { 476 469 /* unregister old filters */ 477 - if (ro->dev) 470 + if (ro->dev) { 478 471 raw_disable_allfilters(dev_net(ro->dev), 479 472 ro->dev, sk); 480 - else 473 + /* drop reference to old ro->dev */ 474 + netdev_put(ro->dev, &ro->dev_tracker); 475 + } else { 481 476 raw_disable_allfilters(sock_net(sk), NULL, sk); 477 + } 482 478 } 483 479 ro->ifindex = ifindex; 484 480 ro->bound = 1; 481 + /* bind() ok -> hold a reference for new ro->dev */ 485 482 ro->dev = dev; 483 + if (ro->dev) 484 + netdev_hold(ro->dev, &ro->dev_tracker, GFP_KERNEL); 486 485 } 487 486 488 - out: 487 + out_put_dev: 488 + /* remove potential reference from dev_get_by_index() */ 489 + if (dev) 490 + dev_put(dev); 491 + out: 489 492 release_sock(sk); 490 493 rtnl_unlock(); 491 494
+21 -4
net/core/rtnetlink.c
··· 2267 2267 return err; 2268 2268 } 2269 2269 2270 - int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, 2271 - struct netlink_ext_ack *exterr) 2270 + int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer, 2271 + struct netlink_ext_ack *exterr) 2272 2272 { 2273 - return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy, 2273 + const struct ifinfomsg *ifmp; 2274 + const struct nlattr *attrs; 2275 + size_t len; 2276 + 2277 + ifmp = nla_data(nla_peer); 2278 + attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg); 2279 + len = nla_len(nla_peer) - sizeof(struct ifinfomsg); 2280 + 2281 + if (ifmp->ifi_index < 0) { 2282 + NL_SET_ERR_MSG_ATTR(exterr, nla_peer, 2283 + "ifindex can't be negative"); 2284 + return -EINVAL; 2285 + } 2286 + 2287 + return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy, 2274 2288 exterr); 2275 2289 } 2276 - EXPORT_SYMBOL(rtnl_nla_parse_ifla); 2290 + EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg); 2277 2291 2278 2292 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) 2279 2293 { ··· 3560 3546 if (ifm->ifi_index > 0) { 3561 3547 link_specified = true; 3562 3548 dev = __dev_get_by_index(net, ifm->ifi_index); 3549 + } else if (ifm->ifi_index < 0) { 3550 + NL_SET_ERR_MSG(extack, "ifindex can't be negative"); 3551 + return -EINVAL; 3563 3552 } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) { 3564 3553 link_specified = true; 3565 3554 dev = rtnl_dev_get(net, tb);
+2 -2
net/dccp/ipv4.c
··· 130 130 inet->inet_daddr, 131 131 inet->inet_sport, 132 132 inet->inet_dport); 133 - inet->inet_id = get_random_u16(); 133 + atomic_set(&inet->inet_id, get_random_u16()); 134 134 135 135 err = dccp_connect(sk); 136 136 rt = NULL; ··· 430 430 RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt)); 431 431 newinet->mc_index = inet_iif(skb); 432 432 newinet->mc_ttl = ip_hdr(skb)->ttl; 433 - newinet->inet_id = get_random_u16(); 433 + atomic_set(&newinet->inet_id, get_random_u16()); 434 434 435 435 if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL) 436 436 goto put_and_exit;
+12 -8
net/dccp/proto.c
··· 315 315 __poll_t dccp_poll(struct file *file, struct socket *sock, 316 316 poll_table *wait) 317 317 { 318 - __poll_t mask; 319 318 struct sock *sk = sock->sk; 319 + __poll_t mask; 320 + u8 shutdown; 321 + int state; 320 322 321 323 sock_poll_wait(file, sock, wait); 322 - if (sk->sk_state == DCCP_LISTEN) 324 + 325 + state = inet_sk_state_load(sk); 326 + if (state == DCCP_LISTEN) 323 327 return inet_csk_listen_poll(sk); 324 328 325 329 /* Socket is not locked. We are protected from async events ··· 332 328 */ 333 329 334 330 mask = 0; 335 - if (sk->sk_err) 331 + if (READ_ONCE(sk->sk_err)) 336 332 mask = EPOLLERR; 333 + shutdown = READ_ONCE(sk->sk_shutdown); 337 334 338 - if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED) 335 + if (shutdown == SHUTDOWN_MASK || state == DCCP_CLOSED) 339 336 mask |= EPOLLHUP; 340 - if (sk->sk_shutdown & RCV_SHUTDOWN) 337 + if (shutdown & RCV_SHUTDOWN) 341 338 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; 342 339 343 340 /* Connected? */ 344 - if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) { 341 + if ((1 << state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) { 345 342 if (atomic_read(&sk->sk_rmem_alloc) > 0) 346 343 mask |= EPOLLIN | EPOLLRDNORM; 347 344 348 - if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 345 + if (!(shutdown & SEND_SHUTDOWN)) { 349 346 if (sk_stream_is_writeable(sk)) { 350 347 mask |= EPOLLOUT | EPOLLWRNORM; 351 348 } else { /* send SIGIO later */ ··· 364 359 } 365 360 return mask; 366 361 } 367 - 368 362 EXPORT_SYMBOL_GPL(dccp_poll); 369 363 370 364 int dccp_ioctl(struct sock *sk, int cmd, int *karg)
+3
net/devlink/leftover.c
··· 6619 6619 struct devlink_param_item *param_item; 6620 6620 struct devlink_trap_item *trap_item; 6621 6621 struct devlink_port *devlink_port; 6622 + struct devlink_linecard *linecard; 6622 6623 struct devlink_rate *rate_node; 6623 6624 struct devlink_region *region; 6624 6625 unsigned long port_index; ··· 6648 6647 6649 6648 xa_for_each(&devlink->ports, port_index, devlink_port) 6650 6649 devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL); 6650 + list_for_each_entry_reverse(linecard, &devlink->linecard_list, list) 6651 + devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_DEL); 6651 6652 devlink_notify(devlink, DEVLINK_CMD_DEL); 6652 6653 } 6653 6654
+1 -1
net/ipv4/af_inet.c
··· 346 346 else 347 347 inet->pmtudisc = IP_PMTUDISC_WANT; 348 348 349 - inet->inet_id = 0; 349 + atomic_set(&inet->inet_id, 0); 350 350 351 351 sock_init_data(sock, sk); 352 352
+1 -1
net/ipv4/datagram.c
··· 73 73 reuseport_has_conns_set(sk); 74 74 sk->sk_state = TCP_ESTABLISHED; 75 75 sk_set_txhash(sk); 76 - inet->inet_id = get_random_u16(); 76 + atomic_set(&inet->inet_id, get_random_u16()); 77 77 78 78 sk_dst_set(sk, &rt->dst); 79 79 err = 0;
+2 -2
net/ipv4/tcp_ipv4.c
··· 313 313 inet->inet_daddr)); 314 314 } 315 315 316 - inet->inet_id = get_random_u16(); 316 + atomic_set(&inet->inet_id, get_random_u16()); 317 317 318 318 if (tcp_fastopen_defer_connect(sk, &err)) 319 319 return err; ··· 1596 1596 inet_csk(newsk)->icsk_ext_hdr_len = 0; 1597 1597 if (inet_opt) 1598 1598 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; 1599 - newinet->inet_id = get_random_u16(); 1599 + atomic_set(&newinet->inet_id, get_random_u16()); 1600 1600 1601 1601 /* Set ToS of the new socket based upon the value of incoming SYN. 1602 1602 * ECT bits are set later in tcp_init_transfer().
+10 -2
net/mac80211/rx.c
··· 1083 1083 struct sk_buff *tail = skb_peek_tail(frames); 1084 1084 struct ieee80211_rx_status *status; 1085 1085 1086 - if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index)) 1086 + if (tid_agg_rx->reorder_buf_filtered && 1087 + tid_agg_rx->reorder_buf_filtered & BIT_ULL(index)) 1087 1088 return true; 1088 1089 1089 1090 if (!tail) ··· 1125 1124 } 1126 1125 1127 1126 no_frame: 1128 - tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); 1127 + if (tid_agg_rx->reorder_buf_filtered) 1128 + tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); 1129 1129 tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num); 1130 1130 } 1131 1131 ··· 4266 4264 u16 ssn, u64 filtered, 4267 4265 u16 received_mpdus) 4268 4266 { 4267 + struct ieee80211_local *local; 4269 4268 struct sta_info *sta; 4270 4269 struct tid_ampdu_rx *tid_agg_rx; 4271 4270 struct sk_buff_head frames; ··· 4283 4280 __skb_queue_head_init(&frames); 4284 4281 4285 4282 sta = container_of(pubsta, struct sta_info, sta); 4283 + 4284 + local = sta->sdata->local; 4285 + WARN_ONCE(local->hw.max_rx_aggregation_subframes > 64, 4286 + "RX BA marker can't support max_rx_aggregation_subframes %u > 64\n", 4287 + local->hw.max_rx_aggregation_subframes); 4286 4288 4287 4289 if (!ieee80211_rx_data_set_sta(&rx, sta, -1)) 4288 4290 return;
+15 -8
net/netfilter/nf_tables_api.c
··· 1373 1373 if (table == NULL) 1374 1374 goto err_kzalloc; 1375 1375 1376 - table->validate_state = NFT_VALIDATE_SKIP; 1376 + table->validate_state = nft_net->validate_state; 1377 1377 table->name = nla_strdup(attr, GFP_KERNEL_ACCOUNT); 1378 1378 if (table->name == NULL) 1379 1379 goto err_strdup; ··· 9054 9054 return -EAGAIN; 9055 9055 9056 9056 nft_validate_state_update(table, NFT_VALIDATE_SKIP); 9057 + break; 9057 9058 } 9058 - 9059 - break; 9060 9059 } 9061 9060 9062 9061 return 0; ··· 9459 9460 struct nft_trans_gc *trans, *next; 9460 9461 LIST_HEAD(trans_gc_list); 9461 9462 9462 - spin_lock(&nf_tables_destroy_list_lock); 9463 + spin_lock(&nf_tables_gc_list_lock); 9463 9464 list_splice_init(&nf_tables_gc_list, &trans_gc_list); 9464 - spin_unlock(&nf_tables_destroy_list_lock); 9465 + spin_unlock(&nf_tables_gc_list_lock); 9465 9466 9466 9467 list_for_each_entry_safe(trans, next, &trans_gc_list, list) { 9467 9468 list_del(&trans->list); ··· 9801 9802 } 9802 9803 9803 9804 /* 0. Validate ruleset, otherwise roll back for error reporting. */ 9804 - if (nf_tables_validate(net) < 0) 9805 + if (nf_tables_validate(net) < 0) { 9806 + nft_net->validate_state = NFT_VALIDATE_DO; 9805 9807 return -EAGAIN; 9808 + } 9806 9809 9807 9810 err = nft_flow_rule_offload_commit(net); 9808 9811 if (err < 0) ··· 10063 10062 nf_tables_commit_audit_log(&adl, nft_net->base_seq); 10064 10063 10065 10064 nft_gc_seq_end(nft_net, gc_seq); 10065 + nft_net->validate_state = NFT_VALIDATE_SKIP; 10066 10066 nf_tables_commit_release(net); 10067 10067 10068 10068 return 0; ··· 10340 10338 enum nfnl_abort_action action) 10341 10339 { 10342 10340 struct nftables_pernet *nft_net = nft_pernet(net); 10343 - int ret = __nf_tables_abort(net, action); 10341 + unsigned int gc_seq; 10342 + int ret; 10344 10343 10344 + gc_seq = nft_gc_seq_begin(nft_net); 10345 + ret = __nf_tables_abort(net, action); 10346 + nft_gc_seq_end(nft_net, gc_seq); 10345 10347 mutex_unlock(&nft_net->commit_mutex); 10346 10348 10347 10349 return ret; ··· 11083 11077 gc_seq = nft_gc_seq_begin(nft_net); 11084 11078 11085 11079 if (!list_empty(&nf_tables_destroy_list)) 11086 - rcu_barrier(); 11080 + nf_tables_trans_destroy_flush_work(); 11087 11081 again: 11088 11082 list_for_each_entry(table, &nft_net->tables, list) { 11089 11083 if (nft_table_has_owner(table) && ··· 11127 11121 mutex_init(&nft_net->commit_mutex); 11128 11122 nft_net->base_seq = 1; 11129 11123 nft_net->gc_seq = 0; 11124 + nft_net->validate_state = NFT_VALIDATE_SKIP; 11130 11125 11131 11126 return 0; 11132 11127 }
+3
net/netfilter/nft_set_hash.c
··· 326 326 nft_net = nft_pernet(net); 327 327 gc_seq = READ_ONCE(nft_net->gc_seq); 328 328 329 + if (nft_set_gc_is_pending(set)) 330 + goto done; 331 + 329 332 gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL); 330 333 if (!gc) 331 334 goto done;
+10 -3
net/netfilter/nft_set_pipapo.c
··· 902 902 static int pipapo_insert(struct nft_pipapo_field *f, const uint8_t *k, 903 903 int mask_bits) 904 904 { 905 - int rule = f->rules++, group, ret, bit_offset = 0; 905 + int rule = f->rules, group, ret, bit_offset = 0; 906 906 907 - ret = pipapo_resize(f, f->rules - 1, f->rules); 907 + ret = pipapo_resize(f, f->rules, f->rules + 1); 908 908 if (ret) 909 909 return ret; 910 + 911 + f->rules++; 910 912 911 913 for (group = 0; group < f->groups; group++) { 912 914 int i, v; ··· 1054 1052 step++; 1055 1053 if (step >= len) { 1056 1054 if (!masks) { 1057 - pipapo_insert(f, base, 0); 1055 + err = pipapo_insert(f, base, 0); 1056 + if (err < 0) 1057 + return err; 1058 1058 masks = 1; 1059 1059 } 1060 1060 goto out; ··· 1238 1234 ret = pipapo_insert(f, start, f->groups * f->bb); 1239 1235 else 1240 1236 ret = pipapo_expand(f, start, end, f->groups * f->bb); 1237 + 1238 + if (ret < 0) 1239 + return ret; 1241 1240 1242 1241 if (f->bsize > bsize_max) 1243 1242 bsize_max = f->bsize;
+3
net/netfilter/nft_set_rbtree.c
··· 611 611 nft_net = nft_pernet(net); 612 612 gc_seq = READ_ONCE(nft_net->gc_seq); 613 613 614 + if (nft_set_gc_is_pending(set)) 615 + goto done; 616 + 614 617 gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL); 615 618 if (!gc) 616 619 goto done;
+40 -13
net/sched/sch_api.c
··· 1547 1547 return 0; 1548 1548 } 1549 1549 1550 + static bool req_create_or_replace(struct nlmsghdr *n) 1551 + { 1552 + return (n->nlmsg_flags & NLM_F_CREATE && 1553 + n->nlmsg_flags & NLM_F_REPLACE); 1554 + } 1555 + 1556 + static bool req_create_exclusive(struct nlmsghdr *n) 1557 + { 1558 + return (n->nlmsg_flags & NLM_F_CREATE && 1559 + n->nlmsg_flags & NLM_F_EXCL); 1560 + } 1561 + 1562 + static bool req_change(struct nlmsghdr *n) 1563 + { 1564 + return (!(n->nlmsg_flags & NLM_F_CREATE) && 1565 + !(n->nlmsg_flags & NLM_F_REPLACE) && 1566 + !(n->nlmsg_flags & NLM_F_EXCL)); 1567 + } 1568 + 1550 1569 /* 1551 1570 * Create/change qdisc. 1552 1571 */ 1553 - 1554 1572 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, 1555 1573 struct netlink_ext_ack *extack) 1556 1574 { ··· 1662 1644 * 1663 1645 * We know, that some child q is already 1664 1646 * attached to this parent and have choice: 1665 - * either to change it or to create/graft new one. 1647 + * 1) change it or 2) create/graft new one. 1648 + * If the requested qdisc kind is different 1649 + * than the existing one, then we choose graft. 1650 + * If they are the same then this is "change" 1651 + * operation - just let it fallthrough.. 1666 1652 * 1667 1653 * 1. We are allowed to create/graft only 1668 - * if CREATE and REPLACE flags are set. 1654 + * if the request is explicitly stating 1655 + * "please create if it doesn't exist". 1669 1656 * 1670 - * 2. If EXCL is set, requestor wanted to say, 1671 - * that qdisc tcm_handle is not expected 1657 + * 2. If the request is to exclusive create 1658 + * then the qdisc tcm_handle is not expected 1672 1659 * to exist, so that we choose create/graft too. 1673 1660 * 1674 1661 * 3. The last case is when no flags are set. 1662 + * This will happen when for example tc 1663 + * utility issues a "change" command. 1675 1664 * Alas, it is sort of hole in API, we 1676 1665 * cannot decide what to do unambiguously. 1677 - * For now we select create/graft, if 1678 - * user gave KIND, which does not match existing. 1666 + * For now we select create/graft. 1679 1667 */ 1680 - if ((n->nlmsg_flags & NLM_F_CREATE) && 1681 - (n->nlmsg_flags & NLM_F_REPLACE) && 1682 - ((n->nlmsg_flags & NLM_F_EXCL) || 1683 - (tca[TCA_KIND] && 1684 - nla_strcmp(tca[TCA_KIND], q->ops->id)))) 1685 - goto create_n_graft; 1668 + if (tca[TCA_KIND] && 1669 + nla_strcmp(tca[TCA_KIND], q->ops->id)) { 1670 + if (req_create_or_replace(n) || 1671 + req_create_exclusive(n)) 1672 + goto create_n_graft; 1673 + else if (req_change(n)) 1674 + goto create_n_graft2; 1675 + } 1686 1676 } 1687 1677 } 1688 1678 } else { ··· 1724 1698 NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag"); 1725 1699 return -ENOENT; 1726 1700 } 1701 + create_n_graft2: 1727 1702 if (clid == TC_H_INGRESS) { 1728 1703 if (dev_ingress_queue(dev)) { 1729 1704 q = qdisc_create(dev, dev_ingress_queue(dev),
+2 -2
net/sctp/socket.c
··· 99 99 100 100 static void sctp_enter_memory_pressure(struct sock *sk) 101 101 { 102 - sctp_memory_pressure = 1; 102 + WRITE_ONCE(sctp_memory_pressure, 1); 103 103 } 104 104 105 105 ··· 9479 9479 newinet->inet_rcv_saddr = inet->inet_rcv_saddr; 9480 9480 newinet->inet_dport = htons(asoc->peer.port); 9481 9481 newinet->pmtudisc = inet->pmtudisc; 9482 - newinet->inet_id = get_random_u16(); 9482 + atomic_set(&newinet->inet_id, get_random_u16()); 9483 9483 9484 9484 newinet->uc_ttl = inet->uc_ttl; 9485 9485 inet_set_bit(MC_LOOP, newsk);
+4 -5
net/sunrpc/xprtrdma/verbs.c
··· 935 935 if (!rep->rr_rdmabuf) 936 936 goto out_free; 937 937 938 - if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) 939 - goto out_free_regbuf; 940 - 941 938 rep->rr_cid.ci_completion_id = 942 939 atomic_inc_return(&r_xprt->rx_ep->re_completion_ids); 943 940 ··· 953 956 spin_unlock(&buf->rb_lock); 954 957 return rep; 955 958 956 - out_free_regbuf: 957 - rpcrdma_regbuf_free(rep->rr_rdmabuf); 958 959 out_free: 959 960 kfree(rep); 960 961 out: ··· 1358 1363 rep = rpcrdma_rep_create(r_xprt, temp); 1359 1364 if (!rep) 1360 1365 break; 1366 + if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) { 1367 + rpcrdma_rep_put(buf, rep); 1368 + break; 1369 + } 1361 1370 1362 1371 rep->rr_cid.ci_queue_id = ep->re_attr.recv_cq->res.id; 1363 1372 trace_xprtrdma_post_recv(rep);
+1
rust/macros/vtable.rs
··· 74 74 const {gen_const_name}: bool = false;", 75 75 ) 76 76 .unwrap(); 77 + consts.insert(gen_const_name); 77 78 } 78 79 } else { 79 80 const_items = "const USE_VTABLE_ATTR: () = ();".to_owned();
+1 -1
security/selinux/ss/policydb.c
··· 2005 2005 if (!datum) 2006 2006 goto out; 2007 2007 2008 + datum->next = NULL; 2008 2009 *dst = datum; 2009 2010 2010 2011 /* ebitmap_read() will at least init the bitmap */ ··· 2018 2017 goto out; 2019 2018 2020 2019 datum->otype = le32_to_cpu(buf[0]); 2021 - datum->next = NULL; 2022 2020 2023 2021 dst = &datum->next; 2024 2022 }
+4
sound/pci/hda/patch_cs8409-tables.c
··· 550 550 SND_PCI_QUIRK(0x1028, 0x0C50, "Dolphin", CS8409_DOLPHIN), 551 551 SND_PCI_QUIRK(0x1028, 0x0C51, "Dolphin", CS8409_DOLPHIN), 552 552 SND_PCI_QUIRK(0x1028, 0x0C52, "Dolphin", CS8409_DOLPHIN), 553 + SND_PCI_QUIRK(0x1028, 0x0C73, "Dolphin", CS8409_DOLPHIN), 554 + SND_PCI_QUIRK(0x1028, 0x0C75, "Dolphin", CS8409_DOLPHIN), 555 + SND_PCI_QUIRK(0x1028, 0x0C7D, "Dolphin", CS8409_DOLPHIN), 556 + SND_PCI_QUIRK(0x1028, 0x0C7F, "Dolphin", CS8409_DOLPHIN), 553 557 {} /* terminator */ 554 558 }; 555 559
+16 -8
sound/pci/hda/patch_realtek.c
··· 9422 9422 SND_PCI_QUIRK(0x1028, 0x0cbd, "Dell Oasis 13 CS MTL-U", ALC245_FIXUP_CS35L41_SPI_2), 9423 9423 SND_PCI_QUIRK(0x1028, 0x0cbe, "Dell Oasis 13 2-IN-1 MTL-U", ALC245_FIXUP_CS35L41_SPI_2), 9424 9424 SND_PCI_QUIRK(0x1028, 0x0cbf, "Dell Oasis 13 Low Weight MTU-L", ALC245_FIXUP_CS35L41_SPI_2), 9425 - SND_PCI_QUIRK(0x1028, 0x0cc1, "Dell Oasis 14 MTL-H/U", ALC287_FIXUP_CS35L41_I2C_2), 9426 - SND_PCI_QUIRK(0x1028, 0x0cc2, "Dell Oasis 14 2-in-1 MTL-H/U", ALC287_FIXUP_CS35L41_I2C_2), 9427 - SND_PCI_QUIRK(0x1028, 0x0cc3, "Dell Oasis 14 Low Weight MTL-U", ALC287_FIXUP_CS35L41_I2C_2), 9428 - SND_PCI_QUIRK(0x1028, 0x0cc4, "Dell Oasis 16 MTL-H/U", ALC287_FIXUP_CS35L41_I2C_2), 9429 - SND_PCI_QUIRK(0x1028, 0x0cc5, "Dell Oasis MLK 14 RPL-P", ALC287_FIXUP_CS35L41_I2C_2), 9425 + SND_PCI_QUIRK(0x1028, 0x0cc1, "Dell Oasis 14 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2), 9426 + SND_PCI_QUIRK(0x1028, 0x0cc2, "Dell Oasis 14 2-in-1 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2), 9427 + SND_PCI_QUIRK(0x1028, 0x0cc3, "Dell Oasis 14 Low Weight MTL-U", ALC245_FIXUP_CS35L41_SPI_2), 9428 + SND_PCI_QUIRK(0x1028, 0x0cc4, "Dell Oasis 16 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2), 9430 9429 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 9431 9430 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 9432 9431 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), ··· 9616 9617 SND_PCI_QUIRK(0x103c, 0x8b96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), 9617 9618 SND_PCI_QUIRK(0x103c, 0x8b97, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), 9618 9619 SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED), 9619 - SND_PCI_QUIRK(0x103c, 0x8c26, "HP HP EliteBook 800G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), 9620 + SND_PCI_QUIRK(0x103c, 0x8c46, "HP EliteBook 830 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), 9621 + SND_PCI_QUIRK(0x103c, 0x8c47, "HP EliteBook 840 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), 9622 + SND_PCI_QUIRK(0x103c, 0x8c48, "HP EliteBook 860 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), 9623 + SND_PCI_QUIRK(0x103c, 0x8c49, "HP Elite x360 830 2-in-1 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), 9624 + SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), 9625 + SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), 9626 + SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), 9620 9627 SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), 9621 9628 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), 9622 9629 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), ··· 10643 10638 spec = codec->spec; 10644 10639 spec->gen.shared_mic_vref_pin = 0x18; 10645 10640 codec->power_save_node = 0; 10641 + spec->en_3kpull_low = true; 10646 10642 10647 10643 #ifdef CONFIG_PM 10648 10644 codec->patch_ops.suspend = alc269_suspend; ··· 10726 10720 spec->shutup = alc256_shutup; 10727 10721 spec->init_hook = alc256_init; 10728 10722 spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */ 10729 - if (codec->bus->pci->vendor == PCI_VENDOR_ID_AMD) 10730 - spec->en_3kpull_low = true; 10723 + if (codec->core.vendor_id == 0x10ec0236 && 10724 + codec->bus->pci->vendor != PCI_VENDOR_ID_AMD) 10725 + spec->en_3kpull_low = false; 10731 10726 break; 10732 10727 case 0x10ec0257: 10733 10728 spec->codec_variant = ALC269_TYPE_ALC257; 10734 10729 spec->shutup = alc256_shutup; 10735 10730 spec->init_hook = alc256_init; 10736 10731 spec->gen.mixer_nid = 0; 10732 + spec->en_3kpull_low = false; 10737 10733 break; 10738 10734 case 0x10ec0215: 10739 10735 case 0x10ec0245:
+5 -4
sound/soc/codecs/max98363.c
··· 185 185 pm_runtime_get_noresume(dev); 186 186 187 187 ret = regmap_read(max98363->regmap, MAX98363_R21FF_REV_ID, &reg); 188 - if (!ret) { 188 + if (!ret) 189 189 dev_info(dev, "Revision ID: %X\n", reg); 190 - return ret; 191 - } 190 + else 191 + goto out; 192 192 193 193 if (max98363->first_hw_init) { 194 194 regcache_cache_bypass(max98363->regmap, false); ··· 198 198 max98363->first_hw_init = true; 199 199 max98363->hw_init = true; 200 200 201 + out: 201 202 pm_runtime_mark_last_busy(dev); 202 203 pm_runtime_put_autosuspend(dev); 203 204 204 - return 0; 205 + return ret; 205 206 } 206 207 207 208 #define MAX98363_RATES SNDRV_PCM_RATE_8000_192000
+12 -1
sound/soc/codecs/rt1308-sdw.c
··· 52 52 case 0x300a: 53 53 case 0xc000: 54 54 case 0xc710: 55 + case 0xcf01: 55 56 case 0xc860 ... 0xc863: 56 57 case 0xc870 ... 0xc873: 57 58 return true; ··· 214 213 { 215 214 struct rt1308_sdw_priv *rt1308 = dev_get_drvdata(dev); 216 215 int ret = 0; 217 - unsigned int tmp; 216 + unsigned int tmp, hibernation_flag; 218 217 219 218 if (rt1308->hw_init) 220 219 return 0; ··· 242 241 } 243 242 244 243 pm_runtime_get_noresume(&slave->dev); 244 + 245 + regmap_read(rt1308->regmap, 0xcf01, &hibernation_flag); 246 + if ((hibernation_flag != 0x00) && rt1308->first_hw_init) 247 + goto _preset_ready_; 245 248 246 249 /* sw reset */ 247 250 regmap_write(rt1308->regmap, RT1308_SDW_RESET, 0); ··· 287 282 regmap_write(rt1308->regmap, 0xc100, 0xd7); 288 283 regmap_write(rt1308->regmap, 0xc101, 0xd7); 289 284 285 + /* apply BQ params */ 286 + rt1308_apply_bq_params(rt1308); 287 + 288 + regmap_write(rt1308->regmap, 0xcf01, 0x01); 289 + 290 + _preset_ready_: 290 291 if (rt1308->first_hw_init) { 291 292 regcache_cache_bypass(rt1308->regmap, false); 292 293 regcache_mark_dirty(rt1308->regmap);
+2
sound/soc/codecs/rt5665.c
··· 4472 4472 struct rt5665_priv *rt5665 = snd_soc_component_get_drvdata(component); 4473 4473 4474 4474 regmap_write(rt5665->regmap, RT5665_RESET, 0); 4475 + 4476 + regulator_bulk_disable(ARRAY_SIZE(rt5665->supplies), rt5665->supplies); 4475 4477 } 4476 4478 4477 4479 #ifdef CONFIG_PM
+2 -2
sound/soc/fsl/fsl_micfil.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 1 + // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 2 // Copyright 2018 NXP 3 3 4 4 #include <linux/bitfield.h> ··· 1254 1254 1255 1255 MODULE_AUTHOR("Cosmin-Gabriel Samoila <cosmin.samoila@nxp.com>"); 1256 1256 MODULE_DESCRIPTION("NXP PDM Microphone Interface (MICFIL) driver"); 1257 - MODULE_LICENSE("GPL v2"); 1257 + MODULE_LICENSE("Dual BSD/GPL");
+1 -1
sound/soc/fsl/fsl_micfil.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 1 + /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ 2 2 /* 3 3 * PDM Microphone Interface for the NXP i.MX SoC 4 4 * Copyright 2018 NXP
+1 -1
sound/soc/intel/boards/sof_sdw.c
··· 476 476 DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), 477 477 DMI_MATCH(DMI_PRODUCT_NAME, "Lunar Lake Client Platform"), 478 478 }, 479 - .driver_data = (void *)(RT711_JD2_100K), 479 + .driver_data = (void *)(RT711_JD2), 480 480 }, 481 481 {} 482 482 };
+3 -3
sound/soc/intel/boards/sof_sdw_cs42l42.c
··· 99 99 jack = &ctx->sdw_headset; 100 100 101 101 snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE); 102 - snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND); 103 - snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP); 104 - snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN); 102 + snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOLUMEUP); 103 + snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN); 104 + snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND); 105 105 106 106 ret = snd_soc_component_set_jack(component, jack, NULL); 107 107
+26 -16
sound/soc/meson/axg-tdm-formatter.c
··· 30 30 struct axg_tdm_stream *ts, 31 31 unsigned int offset) 32 32 { 33 - unsigned int val, ch = ts->channels; 34 - unsigned long mask; 35 - int i, j; 33 + unsigned int ch = ts->channels; 34 + u32 val[AXG_TDM_NUM_LANES]; 35 + int i, j, k; 36 + 37 + /* 38 + * We need to mimick the slot distribution used by the HW to keep the 39 + * channel placement consistent regardless of the number of channel 40 + * in the stream. This is why the odd algorithm below is used. 41 + */ 42 + memset(val, 0, sizeof(*val) * AXG_TDM_NUM_LANES); 36 43 37 44 /* 38 45 * Distribute the channels of the stream over the available slots 39 - * of each TDM lane 46 + * of each TDM lane. We need to go over the 32 slots ... 40 47 */ 41 - for (i = 0; i < AXG_TDM_NUM_LANES; i++) { 42 - val = 0; 43 - mask = ts->mask[i]; 44 - 45 - for (j = find_first_bit(&mask, 32); 46 - (j < 32) && ch; 47 - j = find_next_bit(&mask, 32, j + 1)) { 48 - val |= 1 << j; 49 - ch -= 1; 48 + for (i = 0; (i < 32) && ch; i += 2) { 49 + /* ... of all the lanes ... */ 50 + for (j = 0; j < AXG_TDM_NUM_LANES; j++) { 51 + /* ... then distribute the channels in pairs */ 52 + for (k = 0; k < 2; k++) { 53 + if ((BIT(i + k) & ts->mask[j]) && ch) { 54 + val[j] |= BIT(i + k); 55 + ch -= 1; 56 + } 57 + } 50 58 } 51 - 52 - regmap_write(map, offset, val); 53 - offset += regmap_get_reg_stride(map); 54 59 } 55 60 56 61 /* ··· 66 61 if (WARN_ON(ch != 0)) { 67 62 pr_err("channel mask error\n"); 68 63 return -EINVAL; 64 + } 65 + 66 + for (i = 0; i < AXG_TDM_NUM_LANES; i++) { 67 + regmap_write(map, offset, val[i]); 68 + offset += regmap_get_reg_stride(map); 69 69 } 70 70 71 71 return 0;
+6 -2
sound/soc/soc-pcm.c
··· 38 38 switch (ret) { 39 39 case -EPROBE_DEFER: 40 40 case -ENOTSUPP: 41 + case -EINVAL: 41 42 break; 42 43 default: 43 44 dev_err(rtd->dev, ··· 2467 2466 2468 2467 /* there is no point preparing this FE if there are no BEs */ 2469 2468 if (list_empty(&fe->dpcm[stream].be_clients)) { 2470 - dev_err(fe->dev, "ASoC: no backend DAIs enabled for %s\n", 2471 - fe->dai_link->name); 2469 + /* dev_err_once() for visibility, dev_dbg() for debugging UCM profiles */ 2470 + dev_err_once(fe->dev, "ASoC: no backend DAIs enabled for %s, possibly missing ALSA mixer-based routing or UCM profile\n", 2471 + fe->dai_link->name); 2472 + dev_dbg(fe->dev, "ASoC: no backend DAIs enabled for %s\n", 2473 + fe->dai_link->name); 2472 2474 ret = -EINVAL; 2473 2475 goto out; 2474 2476 }
+10 -1
sound/soc/sof/intel/hda-dai-ops.c
··· 372 372 static int hda_ipc3_post_trigger(struct snd_sof_dev *sdev, struct snd_soc_dai *cpu_dai, 373 373 struct snd_pcm_substream *substream, int cmd) 374 374 { 375 + struct hdac_ext_stream *hext_stream = hda_get_hext_stream(sdev, cpu_dai, substream); 375 376 struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(cpu_dai, substream->stream); 376 377 377 378 switch (cmd) { ··· 380 379 case SNDRV_PCM_TRIGGER_STOP: 381 380 { 382 381 struct snd_sof_dai_config_data data = { 0 }; 382 + int ret; 383 383 384 384 data.dai_data = DMA_CHAN_INVALID; 385 - return hda_dai_config(w, SOF_DAI_CONFIG_FLAGS_HW_FREE, &data); 385 + ret = hda_dai_config(w, SOF_DAI_CONFIG_FLAGS_HW_FREE, &data); 386 + if (ret < 0) 387 + return ret; 388 + 389 + if (cmd == SNDRV_PCM_TRIGGER_STOP) 390 + return hda_link_dma_cleanup(substream, hext_stream, cpu_dai); 391 + 392 + break; 386 393 } 387 394 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 388 395 return hda_dai_config(w, SOF_DAI_CONFIG_FLAGS_PAUSE, NULL);
+2 -3
sound/soc/sof/intel/hda-dai.c
··· 107 107 return sdai->platform_private; 108 108 } 109 109 110 - static int hda_link_dma_cleanup(struct snd_pcm_substream *substream, 111 - struct hdac_ext_stream *hext_stream, 112 - struct snd_soc_dai *cpu_dai) 110 + int hda_link_dma_cleanup(struct snd_pcm_substream *substream, struct hdac_ext_stream *hext_stream, 111 + struct snd_soc_dai *cpu_dai) 113 112 { 114 113 const struct hda_dai_widget_dma_ops *ops = hda_dai_get_ops(substream, cpu_dai); 115 114 struct sof_intel_hda_stream *hda_stream;
+2
sound/soc/sof/intel/hda.h
··· 963 963 hda_select_dai_widget_ops(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget); 964 964 int hda_dai_config(struct snd_soc_dapm_widget *w, unsigned int flags, 965 965 struct snd_sof_dai_config_data *data); 966 + int hda_link_dma_cleanup(struct snd_pcm_substream *substream, struct hdac_ext_stream *hext_stream, 967 + struct snd_soc_dai *cpu_dai); 966 968 967 969 #endif
+1 -1
sound/soc/sof/ipc3.c
··· 1001 1001 1002 1002 ipc3_log_header(sdev->dev, "ipc rx", hdr->cmd); 1003 1003 1004 - if (hdr->size < sizeof(hdr) || hdr->size > SOF_IPC_MSG_MAX_SIZE) { 1004 + if (hdr->size < sizeof(*hdr) || hdr->size > SOF_IPC_MSG_MAX_SIZE) { 1005 1005 dev_err(sdev->dev, "The received message size is invalid: %u\n", 1006 1006 hdr->size); 1007 1007 return;
+3 -3
sound/soc/sof/ipc4-topology.c
··· 1731 1731 1732 1732 *ipc_config_size = ipc_size; 1733 1733 1734 + /* update pipeline memory usage */ 1735 + sof_ipc4_update_resource_usage(sdev, swidget, &copier_data->base_config); 1736 + 1734 1737 /* copy IPC data */ 1735 1738 memcpy(*ipc_config_data, (void *)copier_data, sizeof(*copier_data)); 1736 1739 if (gtw_cfg_config_length) ··· 1745 1742 memcpy(*ipc_config_data + sizeof(*copier_data) + 1746 1743 gtw_cfg_config_length, 1747 1744 &ipc4_copier->dma_config_tlv, dma_config_tlv_size); 1748 - 1749 - /* update pipeline memory usage */ 1750 - sof_ipc4_update_resource_usage(sdev, swidget, &copier_data->base_config); 1751 1745 1752 1746 return 0; 1753 1747 }
+29
sound/usb/quirks-table.h
··· 4507 4507 } 4508 4508 } 4509 4509 }, 4510 + { 4511 + /* Advanced modes of the Mythware XA001AU. 4512 + * For the standard mode, Mythware XA001AU has ID ffad:a001 4513 + */ 4514 + USB_DEVICE_VENDOR_SPEC(0xffad, 0xa001), 4515 + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { 4516 + .vendor_name = "Mythware", 4517 + .product_name = "XA001AU", 4518 + .ifnum = QUIRK_ANY_INTERFACE, 4519 + .type = QUIRK_COMPOSITE, 4520 + .data = (const struct snd_usb_audio_quirk[]) { 4521 + { 4522 + .ifnum = 0, 4523 + .type = QUIRK_IGNORE_INTERFACE, 4524 + }, 4525 + { 4526 + .ifnum = 1, 4527 + .type = QUIRK_AUDIO_STANDARD_INTERFACE, 4528 + }, 4529 + { 4530 + .ifnum = 2, 4531 + .type = QUIRK_AUDIO_STANDARD_INTERFACE, 4532 + }, 4533 + { 4534 + .ifnum = -1 4535 + } 4536 + } 4537 + } 4538 + }, 4510 4539 4511 4540 #undef USB_DEVICE_VENDOR_SPEC 4512 4541 #undef USB_AUDIO_DEVICE
+24
tools/arch/arm64/include/uapi/asm/bitsperlong.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 + /* 3 + * Copyright (C) 2012 ARM Ltd. 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 + */ 17 + #ifndef __ASM_BITSPERLONG_H 18 + #define __ASM_BITSPERLONG_H 19 + 20 + #define __BITS_PER_LONG 64 21 + 22 + #include <asm-generic/bitsperlong.h> 23 + 24 + #endif /* __ASM_BITSPERLONG_H */
+14
tools/arch/riscv/include/uapi/asm/bitsperlong.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (C) 2012 ARM Ltd. 4 + * Copyright (C) 2015 Regents of the University of California 5 + */ 6 + 7 + #ifndef _UAPI_ASM_RISCV_BITSPERLONG_H 8 + #define _UAPI_ASM_RISCV_BITSPERLONG_H 9 + 10 + #define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8) 11 + 12 + #include <asm-generic/bitsperlong.h> 13 + 14 + #endif /* _UAPI_ASM_RISCV_BITSPERLONG_H */
+7 -4
tools/objtool/arch/x86/decode.c
··· 824 824 825 825 bool arch_is_rethunk(struct symbol *sym) 826 826 { 827 - return !strcmp(sym->name, "__x86_return_thunk") || 828 - !strcmp(sym->name, "srso_untrain_ret") || 829 - !strcmp(sym->name, "srso_safe_ret") || 830 - !strcmp(sym->name, "__ret"); 827 + return !strcmp(sym->name, "__x86_return_thunk"); 828 + } 829 + 830 + bool arch_is_embedded_insn(struct symbol *sym) 831 + { 832 + return !strcmp(sym->name, "retbleed_return_thunk") || 833 + !strcmp(sym->name, "srso_safe_ret"); 831 834 }
+35 -10
tools/objtool/check.c
··· 389 389 if (!strcmp(sec->name, ".noinstr.text") || 390 390 !strcmp(sec->name, ".entry.text") || 391 391 !strcmp(sec->name, ".cpuidle.text") || 392 - !strncmp(sec->name, ".text.__x86.", 12)) 392 + !strncmp(sec->name, ".text..__x86.", 13)) 393 393 sec->noinstr = true; 394 394 395 395 /* ··· 455 455 return -1; 456 456 } 457 457 458 - if (func->return_thunk || func->alias != func) 458 + if (func->embedded_insn || func->alias != func) 459 459 continue; 460 460 461 461 if (!find_insn(file, sec, func->offset)) { ··· 1288 1288 return 0; 1289 1289 } 1290 1290 1291 + /* 1292 + * Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol 1293 + * will be added to the .retpoline_sites section. 1294 + */ 1291 1295 __weak bool arch_is_retpoline(struct symbol *sym) 1292 1296 { 1293 1297 return false; 1294 1298 } 1295 1299 1300 + /* 1301 + * Symbols that replace INSN_RETURN, every (tail) call to such a symbol 1302 + * will be added to the .return_sites section. 1303 + */ 1296 1304 __weak bool arch_is_rethunk(struct symbol *sym) 1305 + { 1306 + return false; 1307 + } 1308 + 1309 + /* 1310 + * Symbols that are embedded inside other instructions, because sometimes crazy 1311 + * code exists. These are mostly ignored for validation purposes. 1312 + */ 1313 + __weak bool arch_is_embedded_insn(struct symbol *sym) 1297 1314 { 1298 1315 return false; 1299 1316 } ··· 1593 1576 struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off); 1594 1577 1595 1578 /* 1596 - * This is a special case for zen_untrain_ret(). 1579 + * This is a special case for retbleed_untrain_ret(). 1597 1580 * It jumps to __x86_return_thunk(), but objtool 1598 1581 * can't find the thunk's starting RET 1599 1582 * instruction, because the RET is also in the 1600 1583 * middle of another instruction. Objtool only 1601 1584 * knows about the outer instruction. 1602 1585 */ 1603 - if (sym && sym->return_thunk) { 1586 + if (sym && sym->embedded_insn) { 1604 1587 add_return_call(file, insn, false); 1605 1588 continue; 1606 1589 } ··· 2519 2502 if (arch_is_rethunk(func)) 2520 2503 func->return_thunk = true; 2521 2504 2505 + if (arch_is_embedded_insn(func)) 2506 + func->embedded_insn = true; 2507 + 2522 2508 if (arch_ftrace_match(func->name)) 2523 2509 func->fentry = true; 2524 2510 ··· 2650 2630 return 0; 2651 2631 } 2652 2632 2653 - static bool is_fentry_call(struct instruction *insn) 2633 + static bool is_special_call(struct instruction *insn) 2654 2634 { 2655 - if (insn->type == INSN_CALL && 2656 - insn_call_dest(insn) && 2657 - insn_call_dest(insn)->fentry) 2658 - return true; 2635 + if (insn->type == INSN_CALL) { 2636 + struct symbol *dest = insn_call_dest(insn); 2637 + 2638 + if (!dest) 2639 + return false; 2640 + 2641 + if (dest->fentry || dest->embedded_insn) 2642 + return true; 2643 + } 2659 2644 2660 2645 return false; 2661 2646 } ··· 3661 3636 if (ret) 3662 3637 return ret; 3663 3638 3664 - if (opts.stackval && func && !is_fentry_call(insn) && 3639 + if (opts.stackval && func && !is_special_call(insn) && 3665 3640 !has_valid_stack_frame(&state)) { 3666 3641 WARN_INSN(insn, "call without frame pointer save/setup"); 3667 3642 return 1;
+1
tools/objtool/include/objtool/arch.h
··· 90 90 91 91 bool arch_is_retpoline(struct symbol *sym); 92 92 bool arch_is_rethunk(struct symbol *sym); 93 + bool arch_is_embedded_insn(struct symbol *sym); 93 94 94 95 int arch_rewrite_retpolines(struct objtool_file *file); 95 96
+1
tools/objtool/include/objtool/elf.h
··· 66 66 u8 fentry : 1; 67 67 u8 profiling_func : 1; 68 68 u8 warned : 1; 69 + u8 embedded_insn : 1; 69 70 struct list_head pv_target; 70 71 struct reloc *relocs; 71 72 };
+1 -3
tools/perf/util/thread-stack.c
··· 1038 1038 1039 1039 static bool is_x86_retpoline(const char *name) 1040 1040 { 1041 - const char *p = strstr(name, "__x86_indirect_thunk_"); 1042 - 1043 - return p == name || !strcmp(name, "__indirect_thunk_start"); 1041 + return strstr(name, "__x86_indirect_thunk_") == name; 1044 1042 } 1045 1043 1046 1044 /*
+3 -1
tools/testing/selftests/drivers/net/bonding/Makefile
··· 9 9 mode-1-recovery-updelay.sh \ 10 10 mode-2-recovery-updelay.sh \ 11 11 bond_options.sh \ 12 - bond-eth-type-change.sh 12 + bond-eth-type-change.sh \ 13 + bond_macvlan.sh 13 14 14 15 TEST_FILES := \ 15 16 lag_lib.sh \ 17 + bond_topo_2d1c.sh \ 16 18 bond_topo_3d1c.sh \ 17 19 net_forwarding_lib.sh 18 20
+2 -2
tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh
··· 57 57 58 58 # add ports 59 59 ip link set fbond master fab-br0 60 - ip link set veth1-bond down master fbond 61 - ip link set veth2-bond down master fbond 60 + ip link set veth1-bond master fbond 61 + ip link set veth2-bond master fbond 62 62 63 63 # bring up 64 64 ip link set veth1-end up
+99
tools/testing/selftests/drivers/net/bonding/bond_macvlan.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + # 4 + # Test macvlan over balance-alb 5 + 6 + lib_dir=$(dirname "$0") 7 + source ${lib_dir}/bond_topo_2d1c.sh 8 + 9 + m1_ns="m1-$(mktemp -u XXXXXX)" 10 + m2_ns="m1-$(mktemp -u XXXXXX)" 11 + m1_ip4="192.0.2.11" 12 + m1_ip6="2001:db8::11" 13 + m2_ip4="192.0.2.12" 14 + m2_ip6="2001:db8::12" 15 + 16 + cleanup() 17 + { 18 + ip -n ${m1_ns} link del macv0 19 + ip netns del ${m1_ns} 20 + ip -n ${m2_ns} link del macv0 21 + ip netns del ${m2_ns} 22 + 23 + client_destroy 24 + server_destroy 25 + gateway_destroy 26 + } 27 + 28 + check_connection() 29 + { 30 + local ns=${1} 31 + local target=${2} 32 + local message=${3:-"macvlan_over_bond"} 33 + RET=0 34 + 35 + 36 + ip netns exec ${ns} ping ${target} -c 4 -i 0.1 &>/dev/null 37 + check_err $? "ping failed" 38 + log_test "$mode: $message" 39 + } 40 + 41 + macvlan_over_bond() 42 + { 43 + local param="$1" 44 + RET=0 45 + 46 + # setup new bond mode 47 + bond_reset "${param}" 48 + 49 + ip -n ${s_ns} link add link bond0 name macv0 type macvlan mode bridge 50 + ip -n ${s_ns} link set macv0 netns ${m1_ns} 51 + ip -n ${m1_ns} link set dev macv0 up 52 + ip -n ${m1_ns} addr add ${m1_ip4}/24 dev macv0 53 + ip -n ${m1_ns} addr add ${m1_ip6}/24 dev macv0 54 + 55 + ip -n ${s_ns} link add link bond0 name macv0 type macvlan mode bridge 56 + ip -n ${s_ns} link set macv0 netns ${m2_ns} 57 + ip -n ${m2_ns} link set dev macv0 up 58 + ip -n ${m2_ns} addr add ${m2_ip4}/24 dev macv0 59 + ip -n ${m2_ns} addr add ${m2_ip6}/24 dev macv0 60 + 61 + sleep 2 62 + 63 + check_connection "${c_ns}" "${s_ip4}" "IPv4: client->server" 64 + check_connection "${c_ns}" "${s_ip6}" "IPv6: client->server" 65 + check_connection "${c_ns}" "${m1_ip4}" "IPv4: client->macvlan_1" 66 + check_connection "${c_ns}" "${m1_ip6}" "IPv6: client->macvlan_1" 67 + check_connection "${c_ns}" "${m2_ip4}" "IPv4: client->macvlan_2" 68 + check_connection "${c_ns}" "${m2_ip6}" "IPv6: client->macvlan_2" 69 + check_connection "${m1_ns}" "${m2_ip4}" "IPv4: macvlan_1->macvlan_2" 70 + check_connection "${m1_ns}" "${m2_ip6}" "IPv6: macvlan_1->macvlan_2" 71 + 72 + 73 + sleep 5 74 + 75 + check_connection "${s_ns}" "${c_ip4}" "IPv4: server->client" 76 + check_connection "${s_ns}" "${c_ip6}" "IPv6: server->client" 77 + check_connection "${m1_ns}" "${c_ip4}" "IPv4: macvlan_1->client" 78 + check_connection "${m1_ns}" "${c_ip6}" "IPv6: macvlan_1->client" 79 + check_connection "${m2_ns}" "${c_ip4}" "IPv4: macvlan_2->client" 80 + check_connection "${m2_ns}" "${c_ip6}" "IPv6: macvlan_2->client" 81 + check_connection "${m2_ns}" "${m1_ip4}" "IPv4: macvlan_2->macvlan_2" 82 + check_connection "${m2_ns}" "${m1_ip6}" "IPv6: macvlan_2->macvlan_2" 83 + 84 + ip -n ${c_ns} neigh flush dev eth0 85 + } 86 + 87 + trap cleanup EXIT 88 + 89 + setup_prepare 90 + ip netns add ${m1_ns} 91 + ip netns add ${m2_ns} 92 + 93 + modes="active-backup balance-tlb balance-alb" 94 + 95 + for mode in $modes; do 96 + macvlan_over_bond "mode $mode" 97 + done 98 + 99 + exit $EXIT_STATUS
-3
tools/testing/selftests/drivers/net/bonding/bond_options.sh
··· 9 9 num_grat_arp 10 10 " 11 11 12 - REQUIRE_MZ=no 13 - NUM_NETIFS=0 14 12 lib_dir=$(dirname "$0") 15 - source ${lib_dir}/net_forwarding_lib.sh 16 13 source ${lib_dir}/bond_topo_3d1c.sh 17 14 18 15 skip_prio()
+158
tools/testing/selftests/drivers/net/bonding/bond_topo_2d1c.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + # 4 + # Topology for Bond mode 1,5,6 testing 5 + # 6 + # +-------------------------+ 7 + # | bond0 | Server 8 + # | + | 192.0.2.1/24 9 + # | eth0 | eth1 | 2001:db8::1/24 10 + # | +---+---+ | 11 + # | | | | 12 + # +-------------------------+ 13 + # | | 14 + # +-------------------------+ 15 + # | | | | 16 + # | +---+-------+---+ | Gateway 17 + # | | br0 | | 192.0.2.254/24 18 + # | +-------+-------+ | 2001:db8::254/24 19 + # | | | 20 + # +-------------------------+ 21 + # | 22 + # +-------------------------+ 23 + # | | | Client 24 + # | + | 192.0.2.10/24 25 + # | eth0 | 2001:db8::10/24 26 + # +-------------------------+ 27 + 28 + REQUIRE_MZ=no 29 + NUM_NETIFS=0 30 + lib_dir=$(dirname "$0") 31 + source ${lib_dir}/net_forwarding_lib.sh 32 + 33 + s_ns="s-$(mktemp -u XXXXXX)" 34 + c_ns="c-$(mktemp -u XXXXXX)" 35 + g_ns="g-$(mktemp -u XXXXXX)" 36 + s_ip4="192.0.2.1" 37 + c_ip4="192.0.2.10" 38 + g_ip4="192.0.2.254" 39 + s_ip6="2001:db8::1" 40 + c_ip6="2001:db8::10" 41 + g_ip6="2001:db8::254" 42 + 43 + gateway_create() 44 + { 45 + ip netns add ${g_ns} 46 + ip -n ${g_ns} link add br0 type bridge 47 + ip -n ${g_ns} link set br0 up 48 + ip -n ${g_ns} addr add ${g_ip4}/24 dev br0 49 + ip -n ${g_ns} addr add ${g_ip6}/24 dev br0 50 + } 51 + 52 + gateway_destroy() 53 + { 54 + ip -n ${g_ns} link del br0 55 + ip netns del ${g_ns} 56 + } 57 + 58 + server_create() 59 + { 60 + ip netns add ${s_ns} 61 + ip -n ${s_ns} link add bond0 type bond mode active-backup miimon 100 62 + 63 + for i in $(seq 0 1); do 64 + ip -n ${s_ns} link add eth${i} type veth peer name s${i} netns ${g_ns} 65 + 66 + ip -n ${g_ns} link set s${i} up 67 + ip -n ${g_ns} link set s${i} master br0 68 + ip -n ${s_ns} link set eth${i} master bond0 69 + 70 + tc -n ${g_ns} qdisc add dev s${i} clsact 71 + done 72 + 73 + ip -n ${s_ns} link set bond0 up 74 + ip -n ${s_ns} addr add ${s_ip4}/24 dev bond0 75 + ip -n ${s_ns} addr add ${s_ip6}/24 dev bond0 76 + sleep 2 77 + } 78 + 79 + # Reset bond with new mode and options 80 + bond_reset() 81 + { 82 + # Count the eth link number in real-time as this function 83 + # maybe called from other topologies. 84 + local link_num=$(ip -n ${s_ns} -br link show | grep -c "^eth") 85 + local param="$1" 86 + link_num=$((link_num -1)) 87 + 88 + ip -n ${s_ns} link set bond0 down 89 + ip -n ${s_ns} link del bond0 90 + 91 + ip -n ${s_ns} link add bond0 type bond $param 92 + for i in $(seq 0 ${link_num}); do 93 + ip -n ${s_ns} link set eth$i master bond0 94 + done 95 + 96 + ip -n ${s_ns} link set bond0 up 97 + ip -n ${s_ns} addr add ${s_ip4}/24 dev bond0 98 + ip -n ${s_ns} addr add ${s_ip6}/24 dev bond0 99 + sleep 2 100 + } 101 + 102 + server_destroy() 103 + { 104 + # Count the eth link number in real-time as this function 105 + # maybe called from other topologies. 106 + local link_num=$(ip -n ${s_ns} -br link show | grep -c "^eth") 107 + link_num=$((link_num -1)) 108 + for i in $(seq 0 ${link_num}); do 109 + ip -n ${s_ns} link del eth${i} 110 + done 111 + ip netns del ${s_ns} 112 + } 113 + 114 + client_create() 115 + { 116 + ip netns add ${c_ns} 117 + ip -n ${c_ns} link add eth0 type veth peer name c0 netns ${g_ns} 118 + 119 + ip -n ${g_ns} link set c0 up 120 + ip -n ${g_ns} link set c0 master br0 121 + 122 + ip -n ${c_ns} link set eth0 up 123 + ip -n ${c_ns} addr add ${c_ip4}/24 dev eth0 124 + ip -n ${c_ns} addr add ${c_ip6}/24 dev eth0 125 + } 126 + 127 + client_destroy() 128 + { 129 + ip -n ${c_ns} link del eth0 130 + ip netns del ${c_ns} 131 + } 132 + 133 + setup_prepare() 134 + { 135 + gateway_create 136 + server_create 137 + client_create 138 + } 139 + 140 + cleanup() 141 + { 142 + pre_cleanup 143 + 144 + client_destroy 145 + server_destroy 146 + gateway_destroy 147 + } 148 + 149 + bond_check_connection() 150 + { 151 + local msg=${1:-"check connection"} 152 + 153 + sleep 2 154 + ip netns exec ${s_ns} ping ${c_ip4} -c5 -i 0.1 &>/dev/null 155 + check_err $? "${msg}: ping failed" 156 + ip netns exec ${s_ns} ping6 ${c_ip6} -c5 -i 0.1 &>/dev/null 157 + check_err $? "${msg}: ping6 failed" 158 + }
+8 -110
tools/testing/selftests/drivers/net/bonding/bond_topo_3d1c.sh
··· 25 25 # | eth0 | 2001:db8::10/24 26 26 # +-------------------------------------+ 27 27 28 - s_ns="s-$(mktemp -u XXXXXX)" 29 - c_ns="c-$(mktemp -u XXXXXX)" 30 - g_ns="g-$(mktemp -u XXXXXX)" 31 - s_ip4="192.0.2.1" 32 - c_ip4="192.0.2.10" 33 - g_ip4="192.0.2.254" 34 - s_ip6="2001:db8::1" 35 - c_ip6="2001:db8::10" 36 - g_ip6="2001:db8::254" 37 - 38 - gateway_create() 39 - { 40 - ip netns add ${g_ns} 41 - ip -n ${g_ns} link add br0 type bridge 42 - ip -n ${g_ns} link set br0 up 43 - ip -n ${g_ns} addr add ${g_ip4}/24 dev br0 44 - ip -n ${g_ns} addr add ${g_ip6}/24 dev br0 45 - } 46 - 47 - gateway_destroy() 48 - { 49 - ip -n ${g_ns} link del br0 50 - ip netns del ${g_ns} 51 - } 52 - 53 - server_create() 54 - { 55 - ip netns add ${s_ns} 56 - ip -n ${s_ns} link add bond0 type bond mode active-backup miimon 100 57 - 58 - for i in $(seq 0 2); do 59 - ip -n ${s_ns} link add eth${i} type veth peer name s${i} netns ${g_ns} 60 - 61 - ip -n ${g_ns} link set s${i} up 62 - ip -n ${g_ns} link set s${i} master br0 63 - ip -n ${s_ns} link set eth${i} master bond0 64 - 65 - tc -n ${g_ns} qdisc add dev s${i} clsact 66 - done 67 - 68 - ip -n ${s_ns} link set bond0 up 69 - ip -n ${s_ns} addr add ${s_ip4}/24 dev bond0 70 - ip -n ${s_ns} addr add ${s_ip6}/24 dev bond0 71 - sleep 2 72 - } 73 - 74 - # Reset bond with new mode and options 75 - bond_reset() 76 - { 77 - local param="$1" 78 - 79 - ip -n ${s_ns} link set bond0 down 80 - ip -n ${s_ns} link del bond0 81 - 82 - ip -n ${s_ns} link add bond0 type bond $param 83 - for i in $(seq 0 2); do 84 - ip -n ${s_ns} link set eth$i master bond0 85 - done 86 - 87 - ip -n ${s_ns} link set bond0 up 88 - ip -n ${s_ns} addr add ${s_ip4}/24 dev bond0 89 - ip -n ${s_ns} addr add ${s_ip6}/24 dev bond0 90 - sleep 2 91 - } 92 - 93 - server_destroy() 94 - { 95 - for i in $(seq 0 2); do 96 - ip -n ${s_ns} link del eth${i} 97 - done 98 - ip netns del ${s_ns} 99 - } 100 - 101 - client_create() 102 - { 103 - ip netns add ${c_ns} 104 - ip -n ${c_ns} link add eth0 type veth peer name c0 netns ${g_ns} 105 - 106 - ip -n ${g_ns} link set c0 up 107 - ip -n ${g_ns} link set c0 master br0 108 - 109 - ip -n ${c_ns} link set eth0 up 110 - ip -n ${c_ns} addr add ${c_ip4}/24 dev eth0 111 - ip -n ${c_ns} addr add ${c_ip6}/24 dev eth0 112 - } 113 - 114 - client_destroy() 115 - { 116 - ip -n ${c_ns} link del eth0 117 - ip netns del ${c_ns} 118 - } 28 + source bond_topo_2d1c.sh 119 29 120 30 setup_prepare() 121 31 { 122 32 gateway_create 123 33 server_create 124 34 client_create 125 - } 126 35 127 - cleanup() 128 - { 129 - pre_cleanup 130 - 131 - client_destroy 132 - server_destroy 133 - gateway_destroy 134 - } 135 - 136 - bond_check_connection() 137 - { 138 - local msg=${1:-"check connection"} 139 - 140 - sleep 2 141 - ip netns exec ${s_ns} ping ${c_ip4} -c5 -i 0.1 &>/dev/null 142 - check_err $? "${msg}: ping failed" 143 - ip netns exec ${s_ns} ping6 ${c_ip6} -c5 -i 0.1 &>/dev/null 144 - check_err $? "${msg}: ping6 failed" 36 + # Add the extra device as we use 3 down links for bond0 37 + local i=2 38 + ip -n ${s_ns} link add eth${i} type veth peer name s${i} netns ${g_ns} 39 + ip -n ${g_ns} link set s${i} up 40 + ip -n ${g_ns} link set s${i} master br0 41 + ip -n ${s_ns} link set eth${i} master bond0 42 + tc -n ${g_ns} qdisc add dev s${i} clsact 145 43 }
+6 -10
tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh
··· 98 98 99 99 port_pool_test() 100 100 { 101 - local exp_max_occ=288 101 + local exp_max_occ=$(devlink_cell_size_get) 102 102 local max_occ 103 103 104 104 devlink sb occupancy clearmax $DEVLINK_DEV 105 105 106 - $MZ $h1 -c 1 -p 160 -a $h1mac -b $h2mac -A 192.0.1.1 -B 192.0.1.2 \ 106 + $MZ $h1 -c 1 -p 10 -a $h1mac -b $h2mac -A 192.0.1.1 -B 192.0.1.2 \ 107 107 -t ip -q 108 108 109 109 devlink sb occupancy snapshot $DEVLINK_DEV ··· 126 126 127 127 port_tc_ip_test() 128 128 { 129 - local exp_max_occ=288 129 + local exp_max_occ=$(devlink_cell_size_get) 130 130 local max_occ 131 131 132 132 devlink sb occupancy clearmax $DEVLINK_DEV 133 133 134 - $MZ $h1 -c 1 -p 160 -a $h1mac -b $h2mac -A 192.0.1.1 -B 192.0.1.2 \ 134 + $MZ $h1 -c 1 -p 10 -a $h1mac -b $h2mac -A 192.0.1.1 -B 192.0.1.2 \ 135 135 -t ip -q 136 136 137 137 devlink sb occupancy snapshot $DEVLINK_DEV ··· 154 154 155 155 port_tc_arp_test() 156 156 { 157 - local exp_max_occ=96 157 + local exp_max_occ=$(devlink_cell_size_get) 158 158 local max_occ 159 - 160 - if [[ $MLXSW_CHIP != "mlxsw_spectrum" ]]; then 161 - exp_max_occ=144 162 - fi 163 159 164 160 devlink sb occupancy clearmax $DEVLINK_DEV 165 161 166 - $MZ $h1 -c 1 -p 160 -a $h1mac -A 192.0.1.1 -t arp -q 162 + $MZ $h1 -c 1 -p 10 -a $h1mac -A 192.0.1.1 -t arp -q 167 163 168 164 devlink sb occupancy snapshot $DEVLINK_DEV 169 165
+2
tools/testing/selftests/net/.gitignore
··· 15 15 ipsec 16 16 ipv6_flowlabel 17 17 ipv6_flowlabel_mgr 18 + log.txt 18 19 msg_zerocopy 19 20 nettest 20 21 psock_fanout ··· 46 45 timestamping 47 46 tls 48 47 toeplitz 48 + tools 49 49 tun 50 50 txring_overwrite 51 51 txtimestamp