Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge remote-tracking branch 'torvalds/master' into perf/core

To pick fixes that went via perf/urgent.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

+2614 -1646
+23 -1
Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml
··· 84 84 - qcom,msm8939-pcnoc 85 85 - qcom,msm8939-snoc 86 86 - qcom,msm8996-a1noc 87 - - qcom,msm8996-a2noc 88 87 - qcom,msm8996-bimc 89 88 - qcom,msm8996-cnoc 90 89 - qcom,msm8996-pnoc ··· 184 185 185 186 required: 186 187 - power-domains 188 + 189 + - if: 190 + properties: 191 + compatible: 192 + contains: 193 + enum: 194 + - qcom,msm8996-a2noc 195 + 196 + then: 197 + properties: 198 + clock-names: 199 + items: 200 + - const: bus 201 + - const: bus_a 202 + - const: aggre2_ufs_axi 203 + - const: ufs_axi 204 + 205 + clocks: 206 + items: 207 + - description: Bus Clock 208 + - description: Bus A Clock 209 + - description: Aggregate2 NoC UFS AXI Clock 210 + - description: UFS AXI Clock 187 211 188 212 - if: 189 213 properties:
+4 -4
Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml Documentation/devicetree/bindings/phy/amlogic,g12a-usb2-phy.yaml
··· 2 2 # Copyright 2019 BayLibre, SAS 3 3 %YAML 1.2 4 4 --- 5 - $id: "http://devicetree.org/schemas/phy/amlogic,meson-g12a-usb2-phy.yaml#" 5 + $id: "http://devicetree.org/schemas/phy/amlogic,g12a-usb2-phy.yaml#" 6 6 $schema: "http://devicetree.org/meta-schemas/core.yaml#" 7 7 8 8 title: Amlogic G12A USB2 PHY ··· 13 13 properties: 14 14 compatible: 15 15 enum: 16 - - amlogic,meson-g12a-usb2-phy 17 - - amlogic,meson-a1-usb2-phy 16 + - amlogic,g12a-usb2-phy 17 + - amlogic,a1-usb2-phy 18 18 19 19 reg: 20 20 maxItems: 1 ··· 68 68 examples: 69 69 - | 70 70 phy@36000 { 71 - compatible = "amlogic,meson-g12a-usb2-phy"; 71 + compatible = "amlogic,g12a-usb2-phy"; 72 72 reg = <0x36000 0x2000>; 73 73 clocks = <&xtal>; 74 74 clock-names = "xtal";
+3 -3
Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml Documentation/devicetree/bindings/phy/amlogic,g12a-usb3-pcie-phy.yaml
··· 2 2 # Copyright 2019 BayLibre, SAS 3 3 %YAML 1.2 4 4 --- 5 - $id: "http://devicetree.org/schemas/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml#" 5 + $id: "http://devicetree.org/schemas/phy/amlogic,g12a-usb3-pcie-phy.yaml#" 6 6 $schema: "http://devicetree.org/meta-schemas/core.yaml#" 7 7 8 8 title: Amlogic G12A USB3 + PCIE Combo PHY ··· 13 13 properties: 14 14 compatible: 15 15 enum: 16 - - amlogic,meson-g12a-usb3-pcie-phy 16 + - amlogic,g12a-usb3-pcie-phy 17 17 18 18 reg: 19 19 maxItems: 1 ··· 49 49 examples: 50 50 - | 51 51 phy@46000 { 52 - compatible = "amlogic,meson-g12a-usb3-pcie-phy"; 52 + compatible = "amlogic,g12a-usb3-pcie-phy"; 53 53 reg = <0x46000 0x2000>; 54 54 clocks = <&ref_clk>; 55 55 clock-names = "ref_clk";
-1
Documentation/devicetree/bindings/phy/qcom,usb-hs-28nm.yaml
··· 16 16 compatible: 17 17 enum: 18 18 - qcom,usb-hs-28nm-femtophy 19 - - qcom,usb-hs-28nm-mdm9607 20 19 21 20 reg: 22 21 maxItems: 1
+2 -3
Documentation/devicetree/bindings/soc/qcom/qcom,apr-services.yaml
··· 39 39 qcom,protection-domain: 40 40 $ref: /schemas/types.yaml#/definitions/string-array 41 41 description: | 42 - Protection domain service name and path for APR service 43 - possible values are:: 42 + Protection domain service name and path for APR service (if supported). 43 + Possible values are:: 44 44 "avs/audio", "msm/adsp/audio_pd". 45 45 "kernel/elf_loader", "msm/modem/wlan_pd". 46 46 "tms/servreg", "msm/adsp/audio_pd". ··· 49 49 50 50 required: 51 51 - reg 52 - - qcom,protection-domain 53 52 54 53 additionalProperties: true
+1 -1
Documentation/kbuild/makefiles.rst
··· 1042 1042 1043 1043 When executing "make clean", the file "crc32table.h" will be deleted. 1044 1044 Kbuild will assume files to be in the same relative directory as the 1045 - Makefile, except if prefixed with $(objtree). 1045 + Makefile. 1046 1046 1047 1047 To exclude certain files or directories from make clean, use the 1048 1048 $(no-clean-files) variable.
+22 -3
MAINTAINERS
··· 383 383 M: Robert Moore <robert.moore@intel.com> 384 384 M: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com> 385 385 L: linux-acpi@vger.kernel.org 386 - L: devel@acpica.org 386 + L: acpica-devel@lists.linuxfoundation.org 387 387 S: Supported 388 388 W: https://acpica.org/ 389 389 W: https://github.com/acpica/acpica/ ··· 1104 1104 F: arch/arm64/boot/dts/amd/ 1105 1105 1106 1106 AMD XGBE DRIVER 1107 - M: Tom Lendacky <thomas.lendacky@amd.com> 1108 1107 M: "Shyam Sundar S K" <Shyam-sundar.S-k@amd.com> 1109 1108 L: netdev@vger.kernel.org 1110 1109 S: Supported ··· 9298 9299 9299 9300 HISILICON DMA DRIVER 9300 9301 M: Zhou Wang <wangzhou1@hisilicon.com> 9301 - M: Jie Hai <haijie1@hisilicon.com> 9302 + M: Jie Hai <haijie1@huawei.com> 9302 9303 L: dmaengine@vger.kernel.org 9303 9304 S: Maintained 9304 9305 F: drivers/dma/hisi_dma.c ··· 15749 15750 W: https://wireless.wiki.kernel.org/en/users/Drivers/p54 15750 15751 F: drivers/net/wireless/intersil/p54/ 15751 15752 15753 + PACKET SOCKETS 15754 + M: Willem de Bruijn <willemdebruijn.kernel@gmail.com> 15755 + S: Maintained 15756 + F: include/uapi/linux/if_packet.h 15757 + F: net/packet/af_packet.c 15758 + 15752 15759 PACKING 15753 15760 M: Vladimir Oltean <olteanv@gmail.com> 15754 15761 L: netdev@vger.kernel.org ··· 19331 19326 S: Orphan 19332 19327 F: sound/soc/uniphier/ 19333 19328 19329 + SOCKET TIMESTAMPING 19330 + M: Willem de Bruijn <willemdebruijn.kernel@gmail.com> 19331 + S: Maintained 19332 + F: Documentation/networking/timestamping.rst 19333 + F: include/uapi/linux/net_tstamp.h 19334 + F: tools/testing/selftests/net/so_txtime.c 19335 + 19334 19336 SOEKRIS NET48XX LED SUPPORT 19335 19337 M: Chris Boot <bootc@bootc.net> 19336 19338 S: Maintained ··· 21757 21745 T: git git://linuxtv.org/media_tree.git 21758 21746 F: Documentation/admin-guide/media/zr364xx* 21759 21747 F: drivers/staging/media/deprecated/zr364xx/ 21748 + 21749 + USER DATAGRAM PROTOCOL (UDP) 21750 + M: Willem de Bruijn <willemdebruijn.kernel@gmail.com> 21751 + S: Maintained 21752 + F: include/linux/udp.h 21753 + F: net/ipv4/udp.c 21754 + F: net/ipv6/udp.c 21760 21755 21761 21756 USER-MODE LINUX (UML) 21762 21757 M: Richard Weinberger <richard@nod.at>
+15 -2
Makefile
··· 2 2 VERSION = 6 3 3 PATCHLEVEL = 2 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc4 5 + EXTRAVERSION = -rc5 6 6 NAME = Hurr durr I'ma ninja sloth 7 7 8 8 # *DOCUMENTATION* ··· 549 549 CFLAGS_KERNEL = 550 550 RUSTFLAGS_KERNEL = 551 551 AFLAGS_KERNEL = 552 - export LDFLAGS_vmlinux = 552 + LDFLAGS_vmlinux = 553 553 554 554 # Use USERINCLUDE when you must reference the UAPI directories only. 555 555 USERINCLUDE := \ ··· 1248 1248 @: 1249 1249 1250 1250 PHONY += vmlinux 1251 + # LDFLAGS_vmlinux in the top Makefile defines linker flags for the top vmlinux, 1252 + # not for decompressors. LDFLAGS_vmlinux in arch/*/boot/compressed/Makefile is 1253 + # unrelated; the decompressors just happen to have the same base name, 1254 + # arch/*/boot/compressed/vmlinux. 1255 + # Export LDFLAGS_vmlinux only to scripts/Makefile.vmlinux. 1256 + # 1257 + # _LDFLAGS_vmlinux is a workaround for the 'private export' bug: 1258 + # https://savannah.gnu.org/bugs/?61463 1259 + # For Make > 4.4, the following simple code will work: 1260 + # vmlinux: private export LDFLAGS_vmlinux := $(LDFLAGS_vmlinux) 1261 + vmlinux: private _LDFLAGS_vmlinux := $(LDFLAGS_vmlinux) 1262 + vmlinux: export LDFLAGS_vmlinux = $(_LDFLAGS_vmlinux) 1251 1263 vmlinux: vmlinux.o $(KBUILD_LDS) modpost 1252 1264 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.vmlinux 1253 1265 ··· 1545 1533 # *.ko are usually independent of vmlinux, but CONFIG_DEBUG_INFOBTF_MODULES 1546 1534 # is an exception. 1547 1535 ifdef CONFIG_DEBUG_INFO_BTF_MODULES 1536 + KBUILD_BUILTIN := 1 1548 1537 modules: vmlinux 1549 1538 endif 1550 1539
+2 -2
arch/arm/boot/dts/armada-38x.dtsi
··· 304 304 }; 305 305 306 306 gpio0: gpio@18100 { 307 - compatible = "marvell,armadaxp-gpio", 307 + compatible = "marvell,armada-370-gpio", 308 308 "marvell,orion-gpio"; 309 309 reg = <0x18100 0x40>, <0x181c0 0x08>; 310 310 reg-names = "gpio", "pwm"; ··· 323 323 }; 324 324 325 325 gpio1: gpio@18140 { 326 - compatible = "marvell,armadaxp-gpio", 326 + compatible = "marvell,armada-370-gpio", 327 327 "marvell,orion-gpio"; 328 328 reg = <0x18140 0x40>, <0x181c8 0x08>; 329 329 reg-names = "gpio", "pwm";
+2 -2
arch/arm/boot/dts/armada-39x.dtsi
··· 213 213 }; 214 214 215 215 gpio0: gpio@18100 { 216 - compatible = "marvell,armadaxp-gpio", "marvell,orion-gpio"; 216 + compatible = "marvell,orion-gpio"; 217 217 reg = <0x18100 0x40>; 218 218 ngpios = <32>; 219 219 gpio-controller; ··· 227 227 }; 228 228 229 229 gpio1: gpio@18140 { 230 - compatible = "marvell,armadaxp-gpio", "marvell,orion-gpio"; 230 + compatible = "marvell,orion-gpio"; 231 231 reg = <0x18140 0x40>; 232 232 ngpios = <28>; 233 233 gpio-controller;
+1 -1
arch/arm/boot/dts/imx53-ppd.dts
··· 488 488 scl-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>; 489 489 status = "okay"; 490 490 491 - i2c-switch@70 { 491 + i2c-mux@70 { 492 492 compatible = "nxp,pca9547"; 493 493 #address-cells = <1>; 494 494 #size-cells = <0>;
-1
arch/arm/boot/dts/imx6qdl-gw560x.dtsi
··· 632 632 &uart1 { 633 633 pinctrl-names = "default"; 634 634 pinctrl-0 = <&pinctrl_uart1>; 635 - uart-has-rtscts; 636 635 rts-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>; 637 636 status = "okay"; 638 637 };
+1 -1
arch/arm/boot/dts/imx6ul-pico-dwarf.dts
··· 32 32 }; 33 33 34 34 &i2c2 { 35 - clock_frequency = <100000>; 35 + clock-frequency = <100000>; 36 36 pinctrl-names = "default"; 37 37 pinctrl-0 = <&pinctrl_i2c2>; 38 38 status = "okay";
+2 -2
arch/arm/boot/dts/imx7d-pico-dwarf.dts
··· 32 32 }; 33 33 34 34 &i2c1 { 35 - clock_frequency = <100000>; 35 + clock-frequency = <100000>; 36 36 pinctrl-names = "default"; 37 37 pinctrl-0 = <&pinctrl_i2c1>; 38 38 status = "okay"; ··· 52 52 }; 53 53 54 54 &i2c4 { 55 - clock_frequency = <100000>; 55 + clock-frequency = <100000>; 56 56 pinctrl-names = "default"; 57 57 pinctrl-0 = <&pinctrl_i2c1>; 58 58 status = "okay";
+2 -2
arch/arm/boot/dts/imx7d-pico-nymph.dts
··· 43 43 }; 44 44 45 45 &i2c1 { 46 - clock_frequency = <100000>; 46 + clock-frequency = <100000>; 47 47 pinctrl-names = "default"; 48 48 pinctrl-0 = <&pinctrl_i2c1>; 49 49 status = "okay"; ··· 64 64 }; 65 65 66 66 &i2c2 { 67 - clock_frequency = <100000>; 67 + clock-frequency = <100000>; 68 68 pinctrl-names = "default"; 69 69 pinctrl-0 = <&pinctrl_i2c2>; 70 70 status = "okay";
+11 -11
arch/arm/boot/dts/qcom-apq8084-ifc6540.dts
··· 19 19 serial@f995e000 { 20 20 status = "okay"; 21 21 }; 22 - 23 - sdhci@f9824900 { 24 - bus-width = <8>; 25 - non-removable; 26 - status = "okay"; 27 - }; 28 - 29 - sdhci@f98a4900 { 30 - cd-gpios = <&tlmm 122 GPIO_ACTIVE_LOW>; 31 - bus-width = <4>; 32 - }; 33 22 }; 23 + }; 24 + 25 + &sdhc_1 { 26 + bus-width = <8>; 27 + non-removable; 28 + status = "okay"; 29 + }; 30 + 31 + &sdhc_2 { 32 + cd-gpios = <&tlmm 122 GPIO_ACTIVE_LOW>; 33 + bus-width = <4>; 34 34 };
+2 -2
arch/arm/boot/dts/qcom-apq8084.dtsi
··· 421 421 status = "disabled"; 422 422 }; 423 423 424 - mmc@f9824900 { 424 + sdhc_1: mmc@f9824900 { 425 425 compatible = "qcom,apq8084-sdhci", "qcom,sdhci-msm-v4"; 426 426 reg = <0xf9824900 0x11c>, <0xf9824000 0x800>; 427 427 reg-names = "hc", "core"; ··· 434 434 status = "disabled"; 435 435 }; 436 436 437 - mmc@f98a4900 { 437 + sdhc_2: mmc@f98a4900 { 438 438 compatible = "qcom,apq8084-sdhci", "qcom,sdhci-msm-v4"; 439 439 reg = <0xf98a4900 0x11c>, <0xf98a4000 0x800>; 440 440 reg-names = "hc", "core";
+1 -1
arch/arm/boot/dts/sam9x60.dtsi
··· 564 564 mpddrc: mpddrc@ffffe800 { 565 565 compatible = "microchip,sam9x60-ddramc", "atmel,sama5d3-ddramc"; 566 566 reg = <0xffffe800 0x200>; 567 - clocks = <&pmc PMC_TYPE_SYSTEM 2>, <&pmc PMC_TYPE_CORE PMC_MCK>; 567 + clocks = <&pmc PMC_TYPE_SYSTEM 2>, <&pmc PMC_TYPE_PERIPHERAL 49>; 568 568 clock-names = "ddrck", "mpddr"; 569 569 }; 570 570
+6 -2
arch/arm/boot/dts/stm32mp151a-prtt1l.dtsi
··· 101 101 102 102 &qspi { 103 103 pinctrl-names = "default", "sleep"; 104 - pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>; 105 - pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>; 104 + pinctrl-0 = <&qspi_clk_pins_a 105 + &qspi_bk1_pins_a 106 + &qspi_cs1_pins_a>; 107 + pinctrl-1 = <&qspi_clk_sleep_pins_a 108 + &qspi_bk1_sleep_pins_a 109 + &qspi_cs1_sleep_pins_a>; 106 110 reg = <0x58003000 0x1000>, <0x70000000 0x4000000>; 107 111 #address-cells = <1>; 108 112 #size-cells = <0>;
+6 -2
arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi
··· 391 391 392 392 &qspi { 393 393 pinctrl-names = "default", "sleep"; 394 - pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>; 395 - pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>; 394 + pinctrl-0 = <&qspi_clk_pins_a 395 + &qspi_bk1_pins_a 396 + &qspi_cs1_pins_a>; 397 + pinctrl-1 = <&qspi_clk_sleep_pins_a 398 + &qspi_bk1_sleep_pins_a 399 + &qspi_cs1_sleep_pins_a>; 396 400 reg = <0x58003000 0x1000>, <0x70000000 0x4000000>; 397 401 #address-cells = <1>; 398 402 #size-cells = <0>;
+6 -2
arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
··· 428 428 429 429 &qspi { 430 430 pinctrl-names = "default", "sleep"; 431 - pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>; 432 - pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>; 431 + pinctrl-0 = <&qspi_clk_pins_a 432 + &qspi_bk1_pins_a 433 + &qspi_cs1_pins_a>; 434 + pinctrl-1 = <&qspi_clk_sleep_pins_a 435 + &qspi_bk1_sleep_pins_a 436 + &qspi_cs1_sleep_pins_a>; 433 437 reg = <0x58003000 0x1000>, <0x70000000 0x4000000>; 434 438 #address-cells = <1>; 435 439 #size-cells = <0>;
+6 -2
arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
··· 247 247 248 248 &qspi { 249 249 pinctrl-names = "default", "sleep"; 250 - pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>; 251 - pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>; 250 + pinctrl-0 = <&qspi_clk_pins_a 251 + &qspi_bk1_pins_a 252 + &qspi_cs1_pins_a>; 253 + pinctrl-1 = <&qspi_clk_sleep_pins_a 254 + &qspi_bk1_sleep_pins_a 255 + &qspi_cs1_sleep_pins_a>; 252 256 reg = <0x58003000 0x1000>, <0x70000000 0x200000>; 253 257 #address-cells = <1>; 254 258 #size-cells = <0>;
+1 -1
arch/arm/boot/dts/vf610-zii-dev-rev-b.dts
··· 345 345 }; 346 346 347 347 &i2c2 { 348 - tca9548@70 { 348 + i2c-mux@70 { 349 349 compatible = "nxp,pca9548"; 350 350 pinctrl-0 = <&pinctrl_i2c_mux_reset>; 351 351 pinctrl-names = "default";
+1 -1
arch/arm/boot/dts/vf610-zii-dev-rev-c.dts
··· 340 340 }; 341 341 342 342 &i2c2 { 343 - tca9548@70 { 343 + i2c-mux@70 { 344 344 compatible = "nxp,pca9548"; 345 345 pinctrl-0 = <&pinctrl_i2c_mux_reset>; 346 346 pinctrl-names = "default";
-1
arch/arm/mach-footbridge/isa-rtc.c
··· 20 20 21 21 #include <linux/init.h> 22 22 #include <linux/mc146818rtc.h> 23 - #include <linux/bcd.h> 24 23 #include <linux/io.h> 25 24 26 25 #include "common.h"
+1
arch/arm/mach-imx/cpu-imx25.c
··· 23 23 24 24 np = of_find_compatible_node(NULL, NULL, "fsl,imx25-iim"); 25 25 iim_base = of_iomap(np, 0); 26 + of_node_put(np); 26 27 BUG_ON(!iim_base); 27 28 rev = readl(iim_base + MXC_IIMSREV); 28 29 iounmap(iim_base);
+1
arch/arm/mach-imx/cpu-imx27.c
··· 28 28 29 29 np = of_find_compatible_node(NULL, NULL, "fsl,imx27-ccm"); 30 30 ccm_base = of_iomap(np, 0); 31 + of_node_put(np); 31 32 BUG_ON(!ccm_base); 32 33 /* 33 34 * now we have access to the IO registers. As we need
+1
arch/arm/mach-imx/cpu-imx31.c
··· 39 39 40 40 np = of_find_compatible_node(NULL, NULL, "fsl,imx31-iim"); 41 41 iim_base = of_iomap(np, 0); 42 + of_node_put(np); 42 43 BUG_ON(!iim_base); 43 44 44 45 /* read SREV register from IIM module */
+1
arch/arm/mach-imx/cpu-imx35.c
··· 21 21 22 22 np = of_find_compatible_node(NULL, NULL, "fsl,imx35-iim"); 23 23 iim_base = of_iomap(np, 0); 24 + of_node_put(np); 24 25 BUG_ON(!iim_base); 25 26 26 27 rev = imx_readl(iim_base + MXC_IIMSREV);
+1
arch/arm/mach-imx/cpu-imx5.c
··· 28 28 29 29 np = of_find_compatible_node(NULL, NULL, compat); 30 30 iim_base = of_iomap(np, 0); 31 + of_node_put(np); 31 32 WARN_ON(!iim_base); 32 33 33 34 srev = readl(iim_base + IIM_SREV) & 0xff;
+1 -4
arch/arm/mach-omap1/Kconfig
··· 4 4 depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 5 5 depends on CPU_LITTLE_ENDIAN 6 6 depends on ATAGS 7 + select ARCH_OMAP 7 8 select ARCH_HAS_HOLES_MEMORYMODEL 8 9 select ARCH_OMAP 9 10 select CLKSRC_MMIO ··· 45 44 select ARCH_OMAP_OTG 46 45 select CPU_ARM926T 47 46 select OMAP_DM_TIMER 48 - 49 - config ARCH_OMAP1_ANY 50 - select ARCH_OMAP 51 - def_bool ARCH_OMAP730 || ARCH_OMAP850 || ARCH_OMAP15XX || ARCH_OMAP16XX 52 47 53 48 config ARCH_OMAP 54 49 bool
-4
arch/arm/mach-omap1/Makefile
··· 3 3 # Makefile for the linux kernel. 4 4 # 5 5 6 - ifdef CONFIG_ARCH_OMAP1_ANY 7 - 8 6 # Common support 9 7 obj-y := io.o id.o sram-init.o sram.o time.o irq.o mux.o flash.o \ 10 8 serial.o devices.o dma.o omap-dma.o fb.o ··· 57 59 obj-$(CONFIG_ARCH_OMAP850) += gpio7xx.o 58 60 obj-$(CONFIG_ARCH_OMAP15XX) += gpio15xx.o 59 61 obj-$(CONFIG_ARCH_OMAP16XX) += gpio16xx.o 60 - 61 - endif
+1
arch/arm/mach-omap1/gpio15xx.c
··· 11 11 #include <linux/gpio.h> 12 12 #include <linux/platform_data/gpio-omap.h> 13 13 #include <linux/soc/ti/omap1-soc.h> 14 + #include <asm/irq.h> 14 15 15 16 #include "irqs.h" 16 17
+15 -17
arch/arm/mach-omap1/io.c
··· 22 22 * The machine specific code may provide the extra mapping besides the 23 23 * default mapping provided here. 24 24 */ 25 - static struct map_desc omap_io_desc[] __initdata = { 25 + #if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850) 26 + static struct map_desc omap7xx_io_desc[] __initdata = { 26 27 { 27 28 .virtual = OMAP1_IO_VIRT, 28 29 .pfn = __phys_to_pfn(OMAP1_IO_PHYS), 29 30 .length = OMAP1_IO_SIZE, 30 31 .type = MT_DEVICE 31 - } 32 - }; 33 - 34 - #if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850) 35 - static struct map_desc omap7xx_io_desc[] __initdata = { 32 + }, 36 33 { 37 34 .virtual = OMAP7XX_DSP_BASE, 38 35 .pfn = __phys_to_pfn(OMAP7XX_DSP_START), ··· 47 50 #ifdef CONFIG_ARCH_OMAP15XX 48 51 static struct map_desc omap1510_io_desc[] __initdata = { 49 52 { 53 + .virtual = OMAP1_IO_VIRT, 54 + .pfn = __phys_to_pfn(OMAP1_IO_PHYS), 55 + .length = OMAP1_IO_SIZE, 56 + .type = MT_DEVICE 57 + }, 58 + { 50 59 .virtual = OMAP1510_DSP_BASE, 51 60 .pfn = __phys_to_pfn(OMAP1510_DSP_START), 52 61 .length = OMAP1510_DSP_SIZE, ··· 69 66 #if defined(CONFIG_ARCH_OMAP16XX) 70 67 static struct map_desc omap16xx_io_desc[] __initdata = { 71 68 { 69 + .virtual = OMAP1_IO_VIRT, 70 + .pfn = __phys_to_pfn(OMAP1_IO_PHYS), 71 + .length = OMAP1_IO_SIZE, 72 + .type = MT_DEVICE 73 + }, 74 + { 72 75 .virtual = OMAP16XX_DSP_BASE, 73 76 .pfn = __phys_to_pfn(OMAP16XX_DSP_START), 74 77 .length = OMAP16XX_DSP_SIZE, ··· 88 79 }; 89 80 #endif 90 81 91 - /* 92 - * Maps common IO regions for omap1 93 - */ 94 - static void __init omap1_map_common_io(void) 95 - { 96 - iotable_init(omap_io_desc, ARRAY_SIZE(omap_io_desc)); 97 - } 98 - 99 82 #if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850) 100 83 void __init omap7xx_map_io(void) 101 84 { 102 - omap1_map_common_io(); 103 85 iotable_init(omap7xx_io_desc, ARRAY_SIZE(omap7xx_io_desc)); 104 86 } 105 87 #endif ··· 98 98 #ifdef CONFIG_ARCH_OMAP15XX 99 99 void __init omap15xx_map_io(void) 100 100 { 101 - omap1_map_common_io(); 102 101 iotable_init(omap1510_io_desc, ARRAY_SIZE(omap1510_io_desc)); 103 102 } 104 103 #endif ··· 105 106 #if defined(CONFIG_ARCH_OMAP16XX) 106 107 void __init omap16xx_map_io(void) 107 108 { 108 - omap1_map_common_io(); 109 109 iotable_init(omap16xx_io_desc, ARRAY_SIZE(omap16xx_io_desc)); 110 110 } 111 111 #endif
-21
arch/arm/mach-omap1/mcbsp.c
··· 89 89 #define OMAP1610_MCBSP2_BASE 0xfffb1000 90 90 #define OMAP1610_MCBSP3_BASE 0xe1017000 91 91 92 - #if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850) 93 92 struct resource omap7xx_mcbsp_res[][6] = { 94 93 { 95 94 { ··· 158 159 }; 159 160 #define OMAP7XX_MCBSP_RES_SZ ARRAY_SIZE(omap7xx_mcbsp_res[1]) 160 161 #define OMAP7XX_MCBSP_COUNT ARRAY_SIZE(omap7xx_mcbsp_res) 161 - #else 162 - #define omap7xx_mcbsp_res_0 NULL 163 - #define omap7xx_mcbsp_pdata NULL 164 - #define OMAP7XX_MCBSP_RES_SZ 0 165 - #define OMAP7XX_MCBSP_COUNT 0 166 - #endif 167 162 168 - #ifdef CONFIG_ARCH_OMAP15XX 169 163 struct resource omap15xx_mcbsp_res[][6] = { 170 164 { 171 165 { ··· 258 266 }; 259 267 #define OMAP15XX_MCBSP_RES_SZ ARRAY_SIZE(omap15xx_mcbsp_res[1]) 260 268 #define OMAP15XX_MCBSP_COUNT ARRAY_SIZE(omap15xx_mcbsp_res) 261 - #else 262 - #define omap15xx_mcbsp_res_0 NULL 263 - #define omap15xx_mcbsp_pdata NULL 264 - #define OMAP15XX_MCBSP_RES_SZ 0 265 - #define OMAP15XX_MCBSP_COUNT 0 266 - #endif 267 269 268 - #ifdef CONFIG_ARCH_OMAP16XX 269 270 struct resource omap16xx_mcbsp_res[][6] = { 270 271 { 271 272 { ··· 358 373 }; 359 374 #define OMAP16XX_MCBSP_RES_SZ ARRAY_SIZE(omap16xx_mcbsp_res[1]) 360 375 #define OMAP16XX_MCBSP_COUNT ARRAY_SIZE(omap16xx_mcbsp_res) 361 - #else 362 - #define omap16xx_mcbsp_res_0 NULL 363 - #define omap16xx_mcbsp_pdata NULL 364 - #define OMAP16XX_MCBSP_RES_SZ 0 365 - #define OMAP16XX_MCBSP_COUNT 0 366 - #endif 367 376 368 377 static void omap_mcbsp_register_board_cfg(struct resource *res, int res_count, 369 378 struct omap_mcbsp_platform_data *config, int size)
-7
arch/arm/mach-omap1/pm.h
··· 106 106 #define OMAP7XX_IDLECT3 0xfffece24 107 107 #define OMAP7XX_IDLE_LOOP_REQUEST 0x0C00 108 108 109 - #if !defined(CONFIG_ARCH_OMAP730) && \ 110 - !defined(CONFIG_ARCH_OMAP850) && \ 111 - !defined(CONFIG_ARCH_OMAP15XX) && \ 112 - !defined(CONFIG_ARCH_OMAP16XX) 113 - #warning "Power management for this processor not implemented yet" 114 - #endif 115 - 116 109 #ifndef __ASSEMBLER__ 117 110 118 111 #include <linux/clk.h>
+2
arch/arm/mach-pxa/Kconfig
··· 45 45 config MACH_PXA3XX_DT 46 46 bool "Support PXA3xx platforms from device tree" 47 47 select CPU_PXA300 48 + select CPU_PXA310 49 + select CPU_PXA320 48 50 select PINCTRL 49 51 select POWER_SUPPLY 50 52 select PXA3xx
+2 -6
arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts
··· 131 131 }; 132 132 133 133 &usb { 134 - phys = <&usb2_phy1>; 135 - phy-names = "usb2-phy1"; 136 - }; 137 - 138 - &usb2_phy0 { 139 - status = "disabled"; 134 + phys = <&usb2_phy0>, <&usb2_phy1>; 135 + phy-names = "usb2-phy0", "usb2-phy1"; 140 136 };
+1 -1
arch/arm64/boot/dts/freescale/fsl-ls1012a-qds.dts
··· 110 110 &i2c0 { 111 111 status = "okay"; 112 112 113 - pca9547@77 { 113 + i2c-mux@77 { 114 114 compatible = "nxp,pca9547"; 115 115 reg = <0x77>; 116 116 #address-cells = <1>;
+1 -1
arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts
··· 89 89 &i2c0 { 90 90 status = "okay"; 91 91 92 - pca9547@77 { 92 + i2c-mux@77 { 93 93 compatible = "nxp,pca9547"; 94 94 reg = <0x77>; 95 95 #address-cells = <1>;
+1 -1
arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts
··· 88 88 &i2c0 { 89 89 status = "okay"; 90 90 91 - pca9547@77 { 91 + i2c-mux@77 { 92 92 compatible = "nxp,pca9547"; 93 93 reg = <0x77>; 94 94 #address-cells = <1>;
+1 -1
arch/arm64/boot/dts/freescale/fsl-ls1088a-qds.dts
··· 53 53 &i2c0 { 54 54 status = "okay"; 55 55 56 - i2c-switch@77 { 56 + i2c-mux@77 { 57 57 compatible = "nxp,pca9547"; 58 58 reg = <0x77>; 59 59 #address-cells = <1>;
+1 -1
arch/arm64/boot/dts/freescale/fsl-ls1088a-rdb.dts
··· 136 136 &i2c0 { 137 137 status = "okay"; 138 138 139 - i2c-switch@77 { 139 + i2c-mux@77 { 140 140 compatible = "nxp,pca9547"; 141 141 reg = <0x77>; 142 142 #address-cells = <1>;
+1 -1
arch/arm64/boot/dts/freescale/fsl-ls1088a-ten64.dts
··· 245 245 &i2c3 { 246 246 status = "okay"; 247 247 248 - i2c-switch@70 { 248 + i2c-mux@70 { 249 249 compatible = "nxp,pca9540"; 250 250 #address-cells = <1>; 251 251 #size-cells = <0>;
+1 -1
arch/arm64/boot/dts/freescale/fsl-ls208xa-qds.dtsi
··· 103 103 104 104 &i2c0 { 105 105 status = "okay"; 106 - pca9547@77 { 106 + i2c-mux@77 { 107 107 compatible = "nxp,pca9547"; 108 108 reg = <0x77>; 109 109 #address-cells = <1>;
+1 -1
arch/arm64/boot/dts/freescale/fsl-ls208xa-rdb.dtsi
··· 44 44 45 45 &i2c0 { 46 46 status = "okay"; 47 - pca9547@75 { 47 + i2c-mux@75 { 48 48 compatible = "nxp,pca9547"; 49 49 reg = <0x75>; 50 50 #address-cells = <1>;
+1 -1
arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi
··· 54 54 &i2c0 { 55 55 status = "okay"; 56 56 57 - i2c-switch@77 { 57 + i2c-mux@77 { 58 58 compatible = "nxp,pca9547"; 59 59 #address-cells = <1>; 60 60 #size-cells = <0>;
+2 -2
arch/arm64/boot/dts/freescale/imx8mm-beacon-baseboard.dtsi
··· 120 120 &ecspi2 { 121 121 pinctrl-names = "default"; 122 122 pinctrl-0 = <&pinctrl_espi2>; 123 - cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>; 123 + cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>; 124 124 status = "okay"; 125 125 126 126 eeprom@0 { ··· 316 316 MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x82 317 317 MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x82 318 318 MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x82 319 - MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x41 319 + MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x41 320 320 >; 321 321 }; 322 322
+1 -1
arch/arm64/boot/dts/freescale/imx8mm-data-modul-edm-sbc.dts
··· 275 275 compatible = "rohm,bd71847"; 276 276 reg = <0x4b>; 277 277 #clock-cells = <0>; 278 - clocks = <&clk_xtal32k 0>; 278 + clocks = <&clk_xtal32k>; 279 279 clock-output-names = "clk-32k-out"; 280 280 pinctrl-names = "default"; 281 281 pinctrl-0 = <&pinctrl_pmic>;
+1 -1
arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts
··· 214 214 pinctrl-0 = <&pinctrl_i2c3>; 215 215 status = "okay"; 216 216 217 - i2cmux@70 { 217 + i2c-mux@70 { 218 218 compatible = "nxp,pca9540"; 219 219 reg = <0x70>; 220 220 #address-cells = <1>;
+1
arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
··· 771 771 &usbotg2 { 772 772 dr_mode = "host"; 773 773 vbus-supply = <&reg_usb2_vbus>; 774 + over-current-active-low; 774 775 status = "okay"; 775 776 }; 776 777
+1
arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi
··· 9 9 simple-audio-card,bitclock-master = <&dailink_master>; 10 10 simple-audio-card,format = "i2s"; 11 11 simple-audio-card,frame-master = <&dailink_master>; 12 + simple-audio-card,mclk-fs = <256>; 12 13 simple-audio-card,name = "imx8mm-wm8904"; 13 14 simple-audio-card,routing = 14 15 "Headphone Jack", "HPOUTL",
+1
arch/arm64/boot/dts/freescale/imx8mm-verdin-dev.dtsi
··· 11 11 simple-audio-card,bitclock-master = <&dailink_master>; 12 12 simple-audio-card,format = "i2s"; 13 13 simple-audio-card,frame-master = <&dailink_master>; 14 + simple-audio-card,mclk-fs = <256>; 14 15 simple-audio-card,name = "imx8mm-nau8822"; 15 16 simple-audio-card,routing = 16 17 "Headphones", "LHP",
+2 -2
arch/arm64/boot/dts/freescale/imx8mp-evk.dts
··· 36 36 37 37 pcie0_refclk: pcie0-refclk { 38 38 compatible = "fixed-clock"; 39 - #clock-cells = <0>; 40 - clock-frequency = <100000000>; 39 + #clock-cells = <0>; 40 + clock-frequency = <100000000>; 41 41 }; 42 42 43 43 reg_can1_stby: regulator-can1-stby {
-10
arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
··· 99 99 100 100 regulators { 101 101 buck1: BUCK1 { 102 - regulator-compatible = "BUCK1"; 103 102 regulator-min-microvolt = <600000>; 104 103 regulator-max-microvolt = <2187500>; 105 104 regulator-boot-on; ··· 107 108 }; 108 109 109 110 buck2: BUCK2 { 110 - regulator-compatible = "BUCK2"; 111 111 regulator-min-microvolt = <600000>; 112 112 regulator-max-microvolt = <2187500>; 113 113 regulator-boot-on; ··· 117 119 }; 118 120 119 121 buck4: BUCK4 { 120 - regulator-compatible = "BUCK4"; 121 122 regulator-min-microvolt = <600000>; 122 123 regulator-max-microvolt = <3400000>; 123 124 regulator-boot-on; ··· 124 127 }; 125 128 126 129 buck5: BUCK5 { 127 - regulator-compatible = "BUCK5"; 128 130 regulator-min-microvolt = <600000>; 129 131 regulator-max-microvolt = <3400000>; 130 132 regulator-boot-on; ··· 131 135 }; 132 136 133 137 buck6: BUCK6 { 134 - regulator-compatible = "BUCK6"; 135 138 regulator-min-microvolt = <600000>; 136 139 regulator-max-microvolt = <3400000>; 137 140 regulator-boot-on; ··· 138 143 }; 139 144 140 145 ldo1: LDO1 { 141 - regulator-compatible = "LDO1"; 142 146 regulator-min-microvolt = <1600000>; 143 147 regulator-max-microvolt = <3300000>; 144 148 regulator-boot-on; ··· 145 151 }; 146 152 147 153 ldo2: LDO2 { 148 - regulator-compatible = "LDO2"; 149 154 regulator-min-microvolt = <800000>; 150 155 regulator-max-microvolt = <1150000>; 151 156 regulator-boot-on; ··· 152 159 }; 153 160 154 161 ldo3: LDO3 { 155 - regulator-compatible = "LDO3"; 156 162 regulator-min-microvolt = <800000>; 157 163 regulator-max-microvolt = <3300000>; 158 164 regulator-boot-on; ··· 159 167 }; 160 168 161 169 ldo4: LDO4 { 162 - regulator-compatible = "LDO4"; 163 170 regulator-min-microvolt = <800000>; 164 171 regulator-max-microvolt = <3300000>; 165 172 }; 166 173 167 174 ldo5: LDO5 { 168 - regulator-compatible = "LDO5"; 169 175 regulator-min-microvolt = <1800000>; 170 176 regulator-max-microvolt = <3300000>; 171 177 regulator-boot-on;
+8 -7
arch/arm64/boot/dts/freescale/imx8mp.dtsi
··· 524 524 compatible = "fsl,imx8mp-gpc"; 525 525 reg = <0x303a0000 0x1000>; 526 526 interrupt-parent = <&gic>; 527 + interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>; 527 528 interrupt-controller; 528 529 #interrupt-cells = <3>; 529 530 ··· 591 590 reg = <IMX8MP_POWER_DOMAIN_MIPI_PHY2>; 592 591 }; 593 592 594 - pgc_hsiomix: power-domains@17 { 593 + pgc_hsiomix: power-domain@17 { 595 594 #power-domain-cells = <0>; 596 595 reg = <IMX8MP_POWER_DOMAIN_HSIOMIX>; 597 596 clocks = <&clk IMX8MP_CLK_HSIO_AXI>, ··· 1298 1297 reg = <0x32f10100 0x8>, 1299 1298 <0x381f0000 0x20>; 1300 1299 clocks = <&clk IMX8MP_CLK_HSIO_ROOT>, 1301 - <&clk IMX8MP_CLK_USB_ROOT>; 1300 + <&clk IMX8MP_CLK_USB_SUSP>; 1302 1301 clock-names = "hsio", "suspend"; 1303 1302 interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>; 1304 1303 power-domains = <&hsio_blk_ctrl IMX8MP_HSIOBLK_PD_USB>; ··· 1311 1310 usb_dwc3_0: usb@38100000 { 1312 1311 compatible = "snps,dwc3"; 1313 1312 reg = <0x38100000 0x10000>; 1314 - clocks = <&clk IMX8MP_CLK_HSIO_AXI>, 1313 + clocks = <&clk IMX8MP_CLK_USB_ROOT>, 1315 1314 <&clk IMX8MP_CLK_USB_CORE_REF>, 1316 - <&clk IMX8MP_CLK_USB_ROOT>; 1315 + <&clk IMX8MP_CLK_USB_SUSP>; 1317 1316 clock-names = "bus_early", "ref", "suspend"; 1318 1317 interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>; 1319 1318 phys = <&usb3_phy0>, <&usb3_phy0>; ··· 1340 1339 reg = <0x32f10108 0x8>, 1341 1340 <0x382f0000 0x20>; 1342 1341 clocks = <&clk IMX8MP_CLK_HSIO_ROOT>, 1343 - <&clk IMX8MP_CLK_USB_ROOT>; 1342 + <&clk IMX8MP_CLK_USB_SUSP>; 1344 1343 clock-names = "hsio", "suspend"; 1345 1344 interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>; 1346 1345 power-domains = <&hsio_blk_ctrl IMX8MP_HSIOBLK_PD_USB>; ··· 1353 1352 usb_dwc3_1: usb@38200000 { 1354 1353 compatible = "snps,dwc3"; 1355 1354 reg = <0x38200000 0x10000>; 1356 - clocks = <&clk IMX8MP_CLK_HSIO_AXI>, 1355 + clocks = <&clk IMX8MP_CLK_USB_ROOT>, 1357 1356 <&clk IMX8MP_CLK_USB_CORE_REF>, 1358 - <&clk IMX8MP_CLK_USB_ROOT>; 1357 + <&clk IMX8MP_CLK_USB_SUSP>; 1359 1358 clock-names = "bus_early", "ref", "suspend"; 1360 1359 interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>; 1361 1360 phys = <&usb3_phy1>, <&usb3_phy1>;
+2 -2
arch/arm64/boot/dts/freescale/imx8mq-nitrogen.dts
··· 133 133 pinctrl-0 = <&pinctrl_i2c1>; 134 134 status = "okay"; 135 135 136 - i2cmux@70 { 136 + i2c-mux@70 { 137 137 compatible = "nxp,pca9546"; 138 138 pinctrl-names = "default"; 139 139 pinctrl-0 = <&pinctrl_i2c1_pca9546>; ··· 216 216 pinctrl-0 = <&pinctrl_i2c4>; 217 217 status = "okay"; 218 218 219 - pca9546: i2cmux@70 { 219 + pca9546: i2c-mux@70 { 220 220 compatible = "nxp,pca9546"; 221 221 reg = <0x70>; 222 222 #address-cells = <1>;
+2 -2
arch/arm64/boot/dts/freescale/imx8mq-thor96.dts
··· 339 339 bus-width = <4>; 340 340 non-removable; 341 341 no-sd; 342 - no-emmc; 342 + no-mmc; 343 343 status = "okay"; 344 344 345 345 brcmf: wifi@1 { ··· 359 359 cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>; 360 360 bus-width = <4>; 361 361 no-sdio; 362 - no-emmc; 362 + no-mmc; 363 363 disable-wp; 364 364 status = "okay"; 365 365 };
+1 -1
arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
··· 61 61 pinctrl-0 = <&pinctrl_lpi2c1 &pinctrl_ioexp_rst>; 62 62 status = "okay"; 63 63 64 - i2c-switch@71 { 64 + i2c-mux@71 { 65 65 compatible = "nxp,pca9646", "nxp,pca9546"; 66 66 #address-cells = <1>; 67 67 #size-cells = <0>;
+3 -3
arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
··· 74 74 75 75 pinctrl_usdhc1: usdhc1grp { 76 76 fsl,pins = < 77 - MX93_PAD_SD1_CLK__USDHC1_CLK 0x17fe 77 + MX93_PAD_SD1_CLK__USDHC1_CLK 0x15fe 78 78 MX93_PAD_SD1_CMD__USDHC1_CMD 0x13fe 79 79 MX93_PAD_SD1_DATA0__USDHC1_DATA0 0x13fe 80 80 MX93_PAD_SD1_DATA1__USDHC1_DATA1 0x13fe ··· 84 84 MX93_PAD_SD1_DATA5__USDHC1_DATA5 0x13fe 85 85 MX93_PAD_SD1_DATA6__USDHC1_DATA6 0x13fe 86 86 MX93_PAD_SD1_DATA7__USDHC1_DATA7 0x13fe 87 - MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x17fe 87 + MX93_PAD_SD1_STROBE__USDHC1_STROBE 0x15fe 88 88 >; 89 89 }; 90 90 ··· 102 102 103 103 pinctrl_usdhc2: usdhc2grp { 104 104 fsl,pins = < 105 - MX93_PAD_SD2_CLK__USDHC2_CLK 0x17fe 105 + MX93_PAD_SD2_CLK__USDHC2_CLK 0x15fe 106 106 MX93_PAD_SD2_CMD__USDHC2_CMD 0x13fe 107 107 MX93_PAD_SD2_DATA0__USDHC2_DATA0 0x13fe 108 108 MX93_PAD_SD2_DATA1__USDHC2_DATA1 0x13fe
+1 -1
arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi
··· 98 98 99 99 uart1: serial@12100 { 100 100 compatible = "snps,dw-apb-uart"; 101 - reg = <0x11000 0x100>; 101 + reg = <0x12100 0x100>; 102 102 reg-shift = <2>; 103 103 interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>; 104 104 reg-io-width = <1>;
+6
arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi
··· 3 3 * Copyright (c) 2015, LGE Inc. All rights reserved. 4 4 * Copyright (c) 2016, The Linux Foundation. All rights reserved. 5 5 * Copyright (c) 2021, Petr Vorel <petr.vorel@gmail.com> 6 + * Copyright (c) 2022, Dominik Kobinski <dominikkobinski314@gmail.com> 6 7 */ 7 8 8 9 /dts-v1/; ··· 50 49 51 50 cont_splash_mem: memory@3400000 { 52 51 reg = <0 0x03400000 0 0x1200000>; 52 + no-map; 53 + }; 54 + 55 + removed_region: reserved@5000000 { 56 + reg = <0 0x05000000 0 0x2200000>; 53 57 no-map; 54 58 }; 55 59 };
+60 -17
arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
··· 11 11 #include <dt-bindings/gpio/gpio.h> 12 12 #include <dt-bindings/input/gpio-keys.h> 13 13 14 + /delete-node/ &adsp_mem; 15 + /delete-node/ &audio_mem; 16 + /delete-node/ &mpss_mem; 17 + /delete-node/ &peripheral_region; 18 + /delete-node/ &rmtfs_mem; 19 + 14 20 / { 15 21 model = "Xiaomi Mi 4C"; 16 22 compatible = "xiaomi,libra", "qcom,msm8992"; ··· 76 70 #size-cells = <2>; 77 71 ranges; 78 72 79 - /* This is for getting crash logs using Android downstream kernels */ 73 + memory_hole: hole@6400000 { 74 + reg = <0 0x06400000 0 0x600000>; 75 + no-map; 76 + }; 77 + 78 + memory_hole2: hole2@6c00000 { 79 + reg = <0 0x06c00000 0 0x2400000>; 80 + no-map; 81 + }; 82 + 83 + mpss_mem: mpss@9000000 { 84 + reg = <0 0x09000000 0 0x5a00000>; 85 + no-map; 86 + }; 87 + 88 + tzapp: tzapp@ea00000 { 89 + reg = <0 0x0ea00000 0 0x1900000>; 90 + no-map; 91 + }; 92 + 93 + mdm_rfsa_mem: mdm-rfsa@ca0b0000 { 94 + reg = <0 0xca0b0000 0 0x10000>; 95 + no-map; 96 + }; 97 + 98 + rmtfs_mem: rmtfs@ca100000 { 99 + compatible = "qcom,rmtfs-mem"; 100 + reg = <0 0xca100000 0 0x180000>; 101 + no-map; 102 + 103 + qcom,client-id = <1>; 104 + }; 105 + 106 + audio_mem: audio@cb400000 { 107 + reg = <0 0xcb000000 0 0x400000>; 108 + no-mem; 109 + }; 110 + 111 + qseecom_mem: qseecom@cb400000 { 112 + reg = <0 0xcb400000 0 0x1c00000>; 113 + no-mem; 114 + }; 115 + 116 + adsp_rfsa_mem: adsp-rfsa@cd000000 { 117 + reg = <0 0xcd000000 0 0x10000>; 118 + no-map; 119 + }; 120 + 121 + sensor_rfsa_mem: sensor-rfsa@cd010000 { 122 + reg = <0 0xcd010000 0 0x10000>; 123 + no-map; 124 + }; 125 + 80 126 ramoops@dfc00000 { 81 127 compatible = "ramoops"; 82 - reg = <0x0 0xdfc00000 0x0 0x40000>; 128 + reg = <0 0xdfc00000 0 0x40000>; 83 129 console-size = <0x10000>; 84 130 record-size = <0x10000>; 85 131 ftrace-size = <0x10000>; 86 132 pmsg-size = <0x20000>; 87 - }; 88 - 89 - modem_region: modem_region@9000000 { 90 - reg = <0x0 0x9000000 0x0 0x5a00000>; 91 - no-map; 92 - }; 93 - 94 - tzapp: modem_region@ea00000 { 95 - reg = <0x0 0xea00000 0x0 0x1900000>; 96 - no-map; 97 133 }; 98 134 }; 99 135 }; ··· 176 128 177 129 &blsp2_uart2 { 178 130 status = "okay"; 179 - }; 180 - 181 - &peripheral_region { 182 - reg = <0x0 0x7400000 0x0 0x1c00000>; 183 - no-map; 184 131 }; 185 132 186 133 &pm8994_spmi_regulators {
-4
arch/arm64/boot/dts/qcom/msm8992.dtsi
··· 37 37 compatible = "qcom,rpmcc-msm8992", "qcom,rpmcc"; 38 38 }; 39 39 40 - &tcsr_mutex { 41 - compatible = "qcom,sfpb-mutex"; 42 - }; 43 - 44 40 &timer { 45 41 interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>, 46 42 <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+16 -3
arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts
··· 9 9 10 10 #include "msm8994.dtsi" 11 11 12 - /* Angler's firmware does not report where the memory is allocated */ 13 - /delete-node/ &cont_splash_mem; 14 - 15 12 / { 16 13 model = "Huawei Nexus 6P"; 17 14 compatible = "huawei,angler", "qcom,msm8994"; ··· 24 27 25 28 chosen { 26 29 stdout-path = "serial0:115200n8"; 30 + }; 31 + 32 + reserved-memory { 33 + #address-cells = <2>; 34 + #size-cells = <2>; 35 + ranges; 36 + 37 + tzapp_mem: tzapp@4800000 { 38 + reg = <0 0x04800000 0 0x1900000>; 39 + no-map; 40 + }; 41 + 42 + removed_region: reserved@6300000 { 43 + reg = <0 0x06300000 0 0xD00000>; 44 + no-map; 45 + }; 27 46 }; 28 47 }; 29 48
+26 -57
arch/arm64/boot/dts/qcom/sc8280xp.dtsi
··· 10 10 #include <dt-bindings/interconnect/qcom,sc8280xp.h> 11 11 #include <dt-bindings/interrupt-controller/arm-gic.h> 12 12 #include <dt-bindings/mailbox/qcom-ipcc.h> 13 + #include <dt-bindings/phy/phy-qcom-qmp.h> 13 14 #include <dt-bindings/power/qcom-rpmpd.h> 14 15 #include <dt-bindings/soc/qcom,rpmh-rsc.h> 15 16 #include <dt-bindings/thermal/thermal.h> ··· 763 762 <0>, 764 763 <0>, 765 764 <0>, 766 - <&usb_0_ssphy>, 765 + <&usb_0_qmpphy QMP_USB43DP_USB3_PIPE_CLK>, 767 766 <0>, 768 767 <0>, 769 768 <0>, ··· 771 770 <0>, 772 771 <0>, 773 772 <0>, 774 - <&usb_1_ssphy>, 773 + <&usb_1_qmpphy QMP_USB43DP_USB3_PIPE_CLK>, 775 774 <0>, 776 775 <0>, 777 776 <0>, ··· 1674 1673 }; 1675 1674 }; 1676 1675 1677 - usb_0_qmpphy: phy-wrapper@88ec000 { 1676 + usb_0_qmpphy: phy@88eb000 { 1678 1677 compatible = "qcom,sc8280xp-qmp-usb43dp-phy"; 1679 - reg = <0 0x088ec000 0 0x1e4>, 1680 - <0 0x088eb000 0 0x40>, 1681 - <0 0x088ed000 0 0x1c8>; 1682 - #address-cells = <2>; 1683 - #size-cells = <2>; 1684 - ranges; 1678 + reg = <0 0x088eb000 0 0x4000>; 1685 1679 1686 1680 clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>, 1687 - <&rpmhcc RPMH_CXO_CLK>, 1688 1681 <&gcc GCC_USB4_EUD_CLKREF_CLK>, 1689 - <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>; 1690 - clock-names = "aux", "ref_clk_src", "ref", "com_aux"; 1691 - 1692 - resets = <&gcc GCC_USB3_PHY_PRIM_BCR>, 1693 - <&gcc GCC_USB3_DP_PHY_PRIM_BCR>; 1694 - reset-names = "phy", "common"; 1682 + <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>, 1683 + <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>; 1684 + clock-names = "aux", "ref", "com_aux", "usb3_pipe"; 1695 1685 1696 1686 power-domains = <&gcc USB30_PRIM_GDSC>; 1697 1687 1698 - status = "disabled"; 1688 + resets = <&gcc GCC_USB3_PHY_PRIM_BCR>, 1689 + <&gcc GCC_USB4_DP_PHY_PRIM_BCR>; 1690 + reset-names = "phy", "common"; 1699 1691 1700 - usb_0_ssphy: usb3-phy@88eb400 { 1701 - reg = <0 0x088eb400 0 0x100>, 1702 - <0 0x088eb600 0 0x3ec>, 1703 - <0 0x088ec400 0 0x364>, 1704 - <0 0x088eba00 0 0x100>, 1705 - <0 0x088ebc00 0 0x3ec>, 1706 - <0 0x088ec200 0 0x18>; 1707 - #phy-cells = <0>; 1708 - #clock-cells = <0>; 1709 - clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>; 1710 - clock-names = "pipe0"; 1711 - clock-output-names = "usb0_phy_pipe_clk_src"; 1712 - }; 1692 + #clock-cells = <1>; 1693 + #phy-cells = <1>; 1694 + 1695 + status = "disabled"; 1713 1696 }; 1714 1697 1715 1698 usb_1_hsphy: phy@8902000 { ··· 1710 1725 status = "disabled"; 1711 1726 }; 1712 1727 1713 - usb_1_qmpphy: phy-wrapper@8904000 { 1728 + usb_1_qmpphy: phy@8903000 { 1714 1729 compatible = "qcom,sc8280xp-qmp-usb43dp-phy"; 1715 - reg = <0 0x08904000 0 0x1e4>, 1716 - <0 0x08903000 0 0x40>, 1717 - <0 0x08905000 0 0x1c8>; 1718 - #address-cells = <2>; 1719 - #size-cells = <2>; 1720 - ranges; 1730 + reg = <0 0x08903000 0 0x4000>; 1721 1731 1722 1732 clocks = <&gcc GCC_USB3_SEC_PHY_AUX_CLK>, 1723 - <&rpmhcc RPMH_CXO_CLK>, 1724 1733 <&gcc GCC_USB4_CLKREF_CLK>, 1725 - <&gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>; 1726 - clock-names = "aux", "ref_clk_src", "ref", "com_aux"; 1734 + <&gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>, 1735 + <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>; 1736 + clock-names = "aux", "ref", "com_aux", "usb3_pipe"; 1737 + 1738 + power-domains = <&gcc USB30_SEC_GDSC>; 1727 1739 1728 1740 resets = <&gcc GCC_USB3_PHY_SEC_BCR>, 1729 1741 <&gcc GCC_USB4_1_DP_PHY_PRIM_BCR>; 1730 1742 reset-names = "phy", "common"; 1731 1743 1732 - power-domains = <&gcc USB30_SEC_GDSC>; 1744 + #clock-cells = <1>; 1745 + #phy-cells = <1>; 1733 1746 1734 1747 status = "disabled"; 1735 - 1736 - usb_1_ssphy: usb3-phy@8903400 { 1737 - reg = <0 0x08903400 0 0x100>, 1738 - <0 0x08903600 0 0x3ec>, 1739 - <0 0x08904400 0 0x364>, 1740 - <0 0x08903a00 0 0x100>, 1741 - <0 0x08903c00 0 0x3ec>, 1742 - <0 0x08904200 0 0x18>; 1743 - #phy-cells = <0>; 1744 - #clock-cells = <0>; 1745 - clocks = <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>; 1746 - clock-names = "pipe0"; 1747 - clock-output-names = "usb1_phy_pipe_clk_src"; 1748 - }; 1749 1748 }; 1750 1749 1751 1750 pmu@9091000 { ··· 1879 1910 reg = <0 0x0a600000 0 0xcd00>; 1880 1911 interrupts = <GIC_SPI 803 IRQ_TYPE_LEVEL_HIGH>; 1881 1912 iommus = <&apps_smmu 0x820 0x0>; 1882 - phys = <&usb_0_hsphy>, <&usb_0_ssphy>; 1913 + phys = <&usb_0_hsphy>, <&usb_0_qmpphy QMP_USB43DP_USB3_PHY>; 1883 1914 phy-names = "usb2-phy", "usb3-phy"; 1884 1915 }; 1885 1916 }; ··· 1933 1964 reg = <0 0x0a800000 0 0xcd00>; 1934 1965 interrupts = <GIC_SPI 810 IRQ_TYPE_LEVEL_HIGH>; 1935 1966 iommus = <&apps_smmu 0x860 0x0>; 1936 - phys = <&usb_1_hsphy>, <&usb_1_ssphy>; 1967 + phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>; 1937 1968 phy-names = "usb2-phy", "usb3-phy"; 1938 1969 }; 1939 1970 };
-1
arch/arm64/boot/dts/qcom/sm8250.dtsi
··· 334 334 exit-latency-us = <6562>; 335 335 min-residency-us = <9987>; 336 336 local-timer-stop; 337 - status = "disabled"; 338 337 }; 339 338 }; 340 339 };
+2 -2
arch/arm64/boot/dts/qcom/sm8350.dtsi
··· 2382 2382 <&rpmhcc RPMH_CXO_CLK>; 2383 2383 clock-names = "iface", "core", "xo"; 2384 2384 resets = <&gcc GCC_SDCC2_BCR>; 2385 - interconnects = <&aggre2_noc MASTER_SDCC_2 0 &mc_virt SLAVE_EBI1 0>, 2386 - <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_SDCC_2 0>; 2385 + interconnects = <&aggre2_noc MASTER_SDCC_2 &mc_virt SLAVE_EBI1>, 2386 + <&gem_noc MASTER_APPSS_PROC &config_noc SLAVE_SDCC_2>; 2387 2387 interconnect-names = "sdhc-ddr","cpu-sdhc"; 2388 2388 iommus = <&apps_smmu 0x4a0 0x0>; 2389 2389 power-domains = <&rpmhpd SM8350_CX>;
+1 -1
arch/riscv/boot/dts/sifive/fu740-c000.dtsi
··· 328 328 bus-range = <0x0 0xff>; 329 329 ranges = <0x81000000 0x0 0x60080000 0x0 0x60080000 0x0 0x10000>, /* I/O */ 330 330 <0x82000000 0x0 0x60090000 0x0 0x60090000 0x0 0xff70000>, /* mem */ 331 - <0x82000000 0x0 0x70000000 0x0 0x70000000 0x0 0x1000000>, /* mem */ 331 + <0x82000000 0x0 0x70000000 0x0 0x70000000 0x0 0x10000000>, /* mem */ 332 332 <0xc3000000 0x20 0x00000000 0x20 0x00000000 0x20 0x00000000>; /* mem prefetchable */ 333 333 num-lanes = <0x8>; 334 334 interrupts = <56>, <57>, <58>, <59>, <60>, <61>, <62>, <63>, <64>;
+3 -2
arch/s390/kernel/setup.c
··· 508 508 { 509 509 struct lowcore *abs_lc; 510 510 unsigned long flags; 511 + int i; 511 512 512 513 __ctl_clear_bit(0, 28); 513 514 S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT; ··· 524 523 abs_lc = get_abs_lowcore(&flags); 525 524 abs_lc->restart_flags = RESTART_FLAG_CTLREGS; 526 525 abs_lc->program_new_psw = S390_lowcore.program_new_psw; 527 - memcpy(abs_lc->cregs_save_area, S390_lowcore.cregs_save_area, 528 - sizeof(abs_lc->cregs_save_area)); 526 + for (i = 0; i < 16; i++) 527 + abs_lc->cregs_save_area[i] = S390_lowcore.cregs_save_area[i]; 529 528 put_abs_lowcore(abs_lc, flags); 530 529 } 531 530
+1
arch/x86/events/intel/core.c
··· 6339 6339 break; 6340 6340 6341 6341 case INTEL_FAM6_SAPPHIRERAPIDS_X: 6342 + case INTEL_FAM6_EMERALDRAPIDS_X: 6342 6343 pmem = true; 6343 6344 x86_pmu.late_ack = true; 6344 6345 memcpy(hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(hw_cache_event_ids));
+1
arch/x86/events/intel/cstate.c
··· 677 677 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_cstates), 678 678 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_cstates), 679 679 X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &icx_cstates), 680 + X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &icx_cstates), 680 681 681 682 X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &icl_cstates), 682 683 X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &icl_cstates),
+9
arch/x86/kernel/cpu/aperfmperf.c
··· 330 330 331 331 static void disable_freq_invariance_workfn(struct work_struct *work) 332 332 { 333 + int cpu; 334 + 333 335 static_branch_disable(&arch_scale_freq_key); 336 + 337 + /* 338 + * Set arch_freq_scale to a default value on all cpus 339 + * This negates the effect of scaling 340 + */ 341 + for_each_possible_cpu(cpu) 342 + per_cpu(arch_freq_scale, cpu) = SCHED_CAPACITY_SCALE; 334 343 } 335 344 336 345 static DECLARE_WORK(disable_freq_invariance_work,
+3 -5
block/bfq-cgroup.c
··· 316 316 317 317 static void bfqg_get(struct bfq_group *bfqg) 318 318 { 319 - bfqg->ref++; 319 + refcount_inc(&bfqg->ref); 320 320 } 321 321 322 322 static void bfqg_put(struct bfq_group *bfqg) 323 323 { 324 - bfqg->ref--; 325 - 326 - if (bfqg->ref == 0) 324 + if (refcount_dec_and_test(&bfqg->ref)) 327 325 kfree(bfqg); 328 326 } 329 327 ··· 528 530 } 529 531 530 532 /* see comments in bfq_bic_update_cgroup for why refcounting */ 531 - bfqg_get(bfqg); 533 + refcount_set(&bfqg->ref, 1); 532 534 return &bfqg->pd; 533 535 } 534 536
+1 -1
block/bfq-iosched.h
··· 928 928 char blkg_path[128]; 929 929 930 930 /* reference counter (see comments in bfq_bic_update_cgroup) */ 931 - int ref; 931 + refcount_t ref; 932 932 /* Is bfq_group still online? */ 933 933 bool online; 934 934
+4
block/blk-cgroup.c
··· 1455 1455 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) 1456 1456 pol->pd_init_fn(blkg->pd[pol->plid]); 1457 1457 1458 + if (pol->pd_online_fn) 1459 + list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) 1460 + pol->pd_online_fn(blkg->pd[pol->plid]); 1461 + 1458 1462 __set_bit(pol->plid, q->blkcg_pols); 1459 1463 ret = 0; 1460 1464
+5 -1
block/blk-mq.c
··· 2890 2890 struct blk_plug *plug, struct bio **bio, unsigned int nsegs) 2891 2891 { 2892 2892 struct request *rq; 2893 + enum hctx_type type, hctx_type; 2893 2894 2894 2895 if (!plug) 2895 2896 return NULL; ··· 2903 2902 return NULL; 2904 2903 } 2905 2904 2906 - if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type) 2905 + type = blk_mq_get_hctx_type((*bio)->bi_opf); 2906 + hctx_type = rq->mq_hctx->type; 2907 + if (type != hctx_type && 2908 + !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT)) 2907 2909 return NULL; 2908 2910 if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf)) 2909 2911 return NULL;
+3
drivers/accessibility/speakup/spk_ttyio.c
··· 354 354 { 355 355 struct tty_struct *tty = in_synth->dev; 356 356 357 + if (tty == NULL) 358 + return; 359 + 357 360 tty_lock(tty); 358 361 359 362 if (tty->ops->close)
+10
drivers/acpi/prmt.c
··· 236 236 efi_status_t status; 237 237 struct prm_context_buffer context; 238 238 239 + if (!efi_enabled(EFI_RUNTIME_SERVICES)) { 240 + pr_err_ratelimited("PRM: EFI runtime services no longer available\n"); 241 + return AE_NO_HANDLER; 242 + } 243 + 239 244 /* 240 245 * The returned acpi_status will always be AE_OK. Error values will be 241 246 * saved in the first byte of the PRM message buffer to be used by ASL. ··· 329 324 return; 330 325 331 326 pr_info("PRM: found %u modules\n", mc); 327 + 328 + if (!efi_enabled(EFI_RUNTIME_SERVICES)) { 329 + pr_err("PRM: EFI runtime services unavailable\n"); 330 + return; 331 + } 332 332 333 333 status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT, 334 334 ACPI_ADR_SPACE_PLATFORM_RT,
+8
drivers/acpi/video_detect.c
··· 517 517 }, 518 518 { 519 519 .callback = video_detect_force_native, 520 + /* Acer Aspire 4810T */ 521 + .matches = { 522 + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 523 + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 4810T"), 524 + }, 525 + }, 526 + { 527 + .callback = video_detect_force_native, 520 528 /* Acer Aspire 5738z */ 521 529 .matches = { 522 530 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+12 -6
drivers/base/property.c
··· 997 997 fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode, 998 998 struct fwnode_handle *prev) 999 999 { 1000 + struct fwnode_handle *ep, *port_parent = NULL; 1000 1001 const struct fwnode_handle *parent; 1001 - struct fwnode_handle *ep; 1002 1002 1003 1003 /* 1004 1004 * If this function is in a loop and the previous iteration returned 1005 1005 * an endpoint from fwnode->secondary, then we need to use the secondary 1006 1006 * as parent rather than @fwnode. 1007 1007 */ 1008 - if (prev) 1009 - parent = fwnode_graph_get_port_parent(prev); 1010 - else 1008 + if (prev) { 1009 + port_parent = fwnode_graph_get_port_parent(prev); 1010 + parent = port_parent; 1011 + } else { 1011 1012 parent = fwnode; 1013 + } 1012 1014 if (IS_ERR_OR_NULL(parent)) 1013 1015 return NULL; 1014 1016 1015 1017 ep = fwnode_call_ptr_op(parent, graph_get_next_endpoint, prev); 1016 1018 if (ep) 1017 - return ep; 1019 + goto out_put_port_parent; 1018 1020 1019 - return fwnode_graph_get_next_endpoint(parent->secondary, NULL); 1021 + ep = fwnode_graph_get_next_endpoint(parent->secondary, NULL); 1022 + 1023 + out_put_port_parent: 1024 + fwnode_handle_put(port_parent); 1025 + return ep; 1020 1026 } 1021 1027 EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint); 1022 1028
+1 -1
drivers/base/test/test_async_driver_probe.c
··· 145 145 calltime = ktime_get(); 146 146 for_each_online_cpu(cpu) { 147 147 nid = cpu_to_node(cpu); 148 - pdev = &sync_dev[sync_id]; 148 + pdev = &async_dev[async_id]; 149 149 150 150 *pdev = test_platform_device_register_node("test_async_driver", 151 151 async_id,
+2
drivers/block/pktcdvd.c
··· 2400 2400 struct bio *split; 2401 2401 2402 2402 bio = bio_split_to_limits(bio); 2403 + if (!bio) 2404 + return; 2403 2405 2404 2406 pkt_dbg(2, pd, "start = %6llx stop = %6llx\n", 2405 2407 (unsigned long long)bio->bi_iter.bi_sector,
+1 -1
drivers/block/rnbd/rnbd-clt.c
··· 1440 1440 goto out_alloc; 1441 1441 } 1442 1442 1443 - ret = ida_alloc_max(&index_ida, 1 << (MINORBITS - RNBD_PART_BITS), 1443 + ret = ida_alloc_max(&index_ida, (1 << (MINORBITS - RNBD_PART_BITS)) - 1, 1444 1444 GFP_KERNEL); 1445 1445 if (ret < 0) { 1446 1446 pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
+7
drivers/bluetooth/hci_qca.c
··· 2164 2164 int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS); 2165 2165 struct serdev_device *serdev = to_serdev_device(dev); 2166 2166 struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); 2167 + struct hci_uart *hu = &qcadev->serdev_hu; 2168 + struct hci_dev *hdev = hu->hdev; 2169 + struct qca_data *qca = hu->priv; 2167 2170 const u8 ibs_wake_cmd[] = { 0xFD }; 2168 2171 const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 }; 2169 2172 2170 2173 if (qcadev->btsoc_type == QCA_QCA6390) { 2174 + if (test_bit(QCA_BT_OFF, &qca->flags) || 2175 + !test_bit(HCI_RUNNING, &hdev->flags)) 2176 + return; 2177 + 2171 2178 serdev_device_write_flush(serdev); 2172 2179 ret = serdev_device_write_buf(serdev, ibs_wake_cmd, 2173 2180 sizeof(ibs_wake_cmd));
+1 -1
drivers/comedi/drivers/adv_pci1760.c
··· 58 58 #define PCI1760_CMD_CLR_IMB2 0x00 /* Clears IMB2 */ 59 59 #define PCI1760_CMD_SET_DO 0x01 /* Set output state */ 60 60 #define PCI1760_CMD_GET_DO 0x02 /* Read output status */ 61 - #define PCI1760_CMD_GET_STATUS 0x03 /* Read current status */ 61 + #define PCI1760_CMD_GET_STATUS 0x07 /* Read current status */ 62 62 #define PCI1760_CMD_GET_FW_VER 0x0e /* Read firmware version */ 63 63 #define PCI1760_CMD_GET_HW_VER 0x0f /* Read hardware version */ 64 64 #define PCI1760_CMD_SET_PWM_HI(x) (0x10 + (x) * 2) /* Set "hi" period */
+4 -3
drivers/dma/dmaengine.c
··· 451 451 /* The channel is already in use, update client count */ 452 452 if (chan->client_count) { 453 453 __module_get(owner); 454 - goto out; 454 + chan->client_count++; 455 + return 0; 455 456 } 456 457 457 458 if (!try_module_get(owner)) ··· 471 470 goto err_out; 472 471 } 473 472 473 + chan->client_count++; 474 + 474 475 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) 475 476 balance_ref_count(chan); 476 477 477 - out: 478 - chan->client_count++; 479 478 return 0; 480 479 481 480 err_out:
+6
drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
··· 1018 1018 1019 1019 /* The bad descriptor currently is in the head of vc list */ 1020 1020 vd = vchan_next_desc(&chan->vc); 1021 + if (!vd) { 1022 + dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n", 1023 + axi_chan_name(chan)); 1024 + goto out; 1025 + } 1021 1026 /* Remove the completed descriptor from issued list */ 1022 1027 list_del(&vd->node); 1023 1028 ··· 1037 1032 /* Try to restart the controller */ 1038 1033 axi_chan_start_first_queued(chan); 1039 1034 1035 + out: 1040 1036 spin_unlock_irqrestore(&chan->vc.lock, flags); 1041 1037 } 1042 1038
+13 -3
drivers/dma/idxd/device.c
··· 1172 1172 spin_unlock(&ie->list_lock); 1173 1173 1174 1174 list_for_each_entry_safe(desc, itr, &flist, list) { 1175 + struct dma_async_tx_descriptor *tx; 1176 + 1175 1177 list_del(&desc->list); 1176 1178 ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT; 1179 + /* 1180 + * wq is being disabled. Any remaining descriptors are 1181 + * likely to be stuck and can be dropped. callback could 1182 + * point to code that is no longer accessible, for example 1183 + * if dmatest module has been unloaded. 1184 + */ 1185 + tx = &desc->txd; 1186 + tx->callback = NULL; 1187 + tx->callback_result = NULL; 1177 1188 idxd_dma_complete_txd(desc, ctype, true); 1178 1189 } 1179 1190 } ··· 1401 1390 err_irq: 1402 1391 idxd_wq_unmap_portal(wq); 1403 1392 err_map_portal: 1404 - rc = idxd_wq_disable(wq, false); 1405 - if (rc < 0) 1393 + if (idxd_wq_disable(wq, false)) 1406 1394 dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq))); 1407 1395 err: 1408 1396 return rc; ··· 1418 1408 dev_warn(dev, "Clients has claim on wq %d: %d\n", 1419 1409 wq->id, idxd_wq_refcount(wq)); 1420 1410 1421 - idxd_wq_free_resources(wq); 1422 1411 idxd_wq_unmap_portal(wq); 1423 1412 idxd_wq_drain(wq); 1424 1413 idxd_wq_free_irq(wq); 1425 1414 idxd_wq_reset(wq); 1415 + idxd_wq_free_resources(wq); 1426 1416 percpu_ref_exit(&wq->wq_active); 1427 1417 wq->type = IDXD_WQT_NONE; 1428 1418 wq->client_count = 0;
+3 -1
drivers/dma/imx-sdma.c
··· 1521 1521 sdma_config_ownership(sdmac, false, true, false); 1522 1522 1523 1523 if (sdma_load_context(sdmac)) 1524 - goto err_desc_out; 1524 + goto err_bd_out; 1525 1525 1526 1526 return desc; 1527 1527 1528 + err_bd_out: 1529 + sdma_free_bd(desc); 1528 1530 err_desc_out: 1529 1531 kfree(desc); 1530 1532 err_out:
+5 -5
drivers/dma/lgm/lgm-dma.c
··· 914 914 } 915 915 } 916 916 917 - static int ldma_cfg_init(struct ldma_dev *d) 917 + static int ldma_parse_dt(struct ldma_dev *d) 918 918 { 919 919 struct fwnode_handle *fwnode = dev_fwnode(d->dev); 920 920 struct ldma_port *p; ··· 1661 1661 p->ldev = d; 1662 1662 } 1663 1663 1664 - ret = ldma_cfg_init(d); 1665 - if (ret) 1666 - return ret; 1667 - 1668 1664 dma_dev->dev = &pdev->dev; 1669 1665 1670 1666 ch_mask = (unsigned long)d->channels_mask; ··· 1670 1674 else 1671 1675 ldma_dma_init_v3X(j, d); 1672 1676 } 1677 + 1678 + ret = ldma_parse_dt(d); 1679 + if (ret) 1680 + return ret; 1673 1681 1674 1682 dma_dev->device_alloc_chan_resources = ldma_alloc_chan_resources; 1675 1683 dma_dev->device_free_chan_resources = ldma_free_chan_resources;
+4 -3
drivers/dma/ptdma/ptdma-dev.c
··· 71 71 bool soc = FIELD_GET(DWORD0_SOC, desc->dw0); 72 72 u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx]; 73 73 u32 tail; 74 + unsigned long flags; 74 75 75 76 if (soc) { 76 77 desc->dw0 |= FIELD_PREP(DWORD0_IOC, desc->dw0); 77 78 desc->dw0 &= ~DWORD0_SOC; 78 79 } 79 - mutex_lock(&cmd_q->q_mutex); 80 + spin_lock_irqsave(&cmd_q->q_lock, flags); 80 81 81 82 /* Copy 32-byte command descriptor to hw queue. */ 82 83 memcpy(q_desc, desc, 32); ··· 92 91 93 92 /* Turn the queue back on using our cached control register */ 94 93 pt_start_queue(cmd_q); 95 - mutex_unlock(&cmd_q->q_mutex); 94 + spin_unlock_irqrestore(&cmd_q->q_lock, flags); 96 95 97 96 return 0; 98 97 } ··· 200 199 201 200 cmd_q->pt = pt; 202 201 cmd_q->dma_pool = dma_pool; 203 - mutex_init(&cmd_q->q_mutex); 202 + spin_lock_init(&cmd_q->q_lock); 204 203 205 204 /* Page alignment satisfies our needs for N <= 128 */ 206 205 cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
+1 -1
drivers/dma/ptdma/ptdma.h
··· 196 196 struct ptdma_desc *qbase; 197 197 198 198 /* Aligned queue start address (per requirement) */ 199 - struct mutex q_mutex ____cacheline_aligned; 199 + spinlock_t q_lock ____cacheline_aligned; 200 200 unsigned int qidx; 201 201 202 202 unsigned int qsize;
+1
drivers/dma/qcom/gpi.c
··· 1756 1756 tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE); 1757 1757 if (spi->cmd == SPI_RX) { 1758 1758 tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB); 1759 + tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK); 1759 1760 } else if (spi->cmd == SPI_TX) { 1760 1761 tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN); 1761 1762 } else { /* SPI_DUPLEX */
+1
drivers/dma/tegra186-gpc-dma.c
··· 711 711 return err; 712 712 } 713 713 714 + vchan_terminate_vdesc(&tdc->dma_desc->vd); 714 715 tegra_dma_disable(tdc); 715 716 tdc->dma_desc = NULL; 716 717 }
+1 -1
drivers/dma/tegra210-adma.c
··· 221 221 int ret; 222 222 223 223 /* Clear any interrupts */ 224 - tdma_write(tdma, tdma->cdata->global_int_clear, 0x1); 224 + tdma_write(tdma, tdma->cdata->ch_base_offset + tdma->cdata->global_int_clear, 0x1); 225 225 226 226 /* Assert soft reset */ 227 227 tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1);
+3 -2
drivers/dma/ti/k3-udma.c
··· 762 762 if (uc->desc->dir == DMA_DEV_TO_MEM) { 763 763 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); 764 764 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); 765 - udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); 765 + if (uc->config.ep_type != PSIL_EP_NATIVE) 766 + udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); 766 767 } else { 767 768 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val); 768 769 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val); 769 - if (!uc->bchan) 770 + if (!uc->bchan && uc->config.ep_type != PSIL_EP_NATIVE) 770 771 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val); 771 772 } 772 773 }
+3 -1
drivers/dma/xilinx/xilinx_dma.c
··· 3143 3143 /* Initialize the channels */ 3144 3144 for_each_child_of_node(node, child) { 3145 3145 err = xilinx_dma_child_probe(xdev, child); 3146 - if (err < 0) 3146 + if (err < 0) { 3147 + of_node_put(child); 3147 3148 goto error; 3149 + } 3148 3150 } 3149 3151 3150 3152 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+7 -8
drivers/edac/edac_device.c
··· 34 34 static DEFINE_MUTEX(device_ctls_mutex); 35 35 static LIST_HEAD(edac_device_list); 36 36 37 + /* Default workqueue processing interval on this instance, in msecs */ 38 + #define DEFAULT_POLL_INTERVAL 1000 39 + 37 40 #ifdef CONFIG_EDAC_DEBUG 38 41 static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev) 39 42 { ··· 339 336 * whole one second to save timers firing all over the period 340 337 * between integral seconds 341 338 */ 342 - if (edac_dev->poll_msec == 1000) 339 + if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL) 343 340 edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); 344 341 else 345 342 edac_queue_work(&edac_dev->work, edac_dev->delay); ··· 369 366 * timers firing on sub-second basis, while they are happy 370 367 * to fire together on the 1 second exactly 371 368 */ 372 - if (edac_dev->poll_msec == 1000) 369 + if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL) 373 370 edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); 374 371 else 375 372 edac_queue_work(&edac_dev->work, edac_dev->delay); ··· 403 400 edac_dev->delay = msecs_to_jiffies(msec); 404 401 405 402 /* See comment in edac_device_workq_setup() above */ 406 - if (edac_dev->poll_msec == 1000) 403 + if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL) 407 404 edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); 408 405 else 409 406 edac_mod_work(&edac_dev->work, edac_dev->delay); ··· 445 442 /* This instance is NOW RUNNING */ 446 443 edac_dev->op_state = OP_RUNNING_POLL; 447 444 448 - /* 449 - * enable workq processing on this instance, 450 - * default = 1000 msec 451 - */ 452 - edac_device_workq_setup(edac_dev, 1000); 445 + edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: DEFAULT_POLL_INTERVAL); 453 446 } else { 454 447 edac_dev->op_state = OP_RUNNING_INTERRUPT; 455 448 }
+2 -3
drivers/edac/qcom_edac.c
··· 252 252 static int 253 253 dump_syn_reg(struct edac_device_ctl_info *edev_ctl, int err_type, u32 bank) 254 254 { 255 - struct llcc_drv_data *drv = edev_ctl->pvt_info; 255 + struct llcc_drv_data *drv = edev_ctl->dev->platform_data; 256 256 int ret; 257 257 258 258 ret = dump_syn_reg_values(drv, bank, err_type); ··· 289 289 llcc_ecc_irq_handler(int irq, void *edev_ctl) 290 290 { 291 291 struct edac_device_ctl_info *edac_dev_ctl = edev_ctl; 292 - struct llcc_drv_data *drv = edac_dev_ctl->pvt_info; 292 + struct llcc_drv_data *drv = edac_dev_ctl->dev->platform_data; 293 293 irqreturn_t irq_rc = IRQ_NONE; 294 294 u32 drp_error, trp_error, i; 295 295 int ret; ··· 358 358 edev_ctl->dev_name = dev_name(dev); 359 359 edev_ctl->ctl_name = "llcc"; 360 360 edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE; 361 - edev_ctl->pvt_info = llcc_driv_data; 362 361 363 362 rc = edac_device_add_device(edev_ctl); 364 363 if (rc)
+2
drivers/firmware/arm_scmi/driver.c
··· 910 910 xfer->hdr.protocol_id, xfer->hdr.seq, 911 911 xfer->hdr.poll_completion); 912 912 913 + /* Clear any stale status */ 914 + xfer->hdr.status = SCMI_SUCCESS; 913 915 xfer->state = SCMI_XFER_SENT_OK; 914 916 /* 915 917 * Even though spinlocking is not needed here since no race is possible
+6 -3
drivers/firmware/arm_scmi/shmem.c
··· 81 81 void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, 82 82 struct scmi_xfer *xfer) 83 83 { 84 + size_t len = ioread32(&shmem->length); 85 + 84 86 xfer->hdr.status = ioread32(shmem->msg_payload); 85 87 /* Skip the length of header and status in shmem area i.e 8 bytes */ 86 - xfer->rx.len = min_t(size_t, xfer->rx.len, 87 - ioread32(&shmem->length) - 8); 88 + xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0); 88 89 89 90 /* Take a copy to the rx buffer.. */ 90 91 memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len); ··· 94 93 void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem, 95 94 size_t max_len, struct scmi_xfer *xfer) 96 95 { 96 + size_t len = ioread32(&shmem->length); 97 + 97 98 /* Skip only the length of header in shmem area i.e 4 bytes */ 98 - xfer->rx.len = min_t(size_t, max_len, ioread32(&shmem->length) - 4); 99 + xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0); 99 100 100 101 /* Take a copy to the rx buffer.. */ 101 102 memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
+6 -1
drivers/firmware/arm_scmi/virtio.c
··· 160 160 } 161 161 162 162 vioch->shutdown_done = &vioch_shutdown_done; 163 - virtio_break_device(vioch->vqueue->vdev); 164 163 if (!vioch->is_rx && vioch->deferred_tx_wq) 165 164 /* Cannot be kicked anymore after this...*/ 166 165 vioch->deferred_tx_wq = NULL; ··· 481 482 struct scmi_chan_info *cinfo = p; 482 483 struct scmi_vio_channel *vioch = cinfo->transport_info; 483 484 485 + /* 486 + * Break device to inhibit further traffic flowing while shutting down 487 + * the channels: doing it later holding vioch->lock creates unsafe 488 + * locking dependency chains as reported by LOCKDEP. 489 + */ 490 + virtio_break_device(vioch->vqueue->vdev); 484 491 scmi_vio_channel_cleanup_sync(vioch); 485 492 486 493 scmi_free_channel(cinfo, data, id);
+4 -3
drivers/firmware/google/gsmi.c
··· 361 361 memcpy(data, gsmi_dev.data_buf->start, *data_size); 362 362 363 363 /* All variables are have the following attributes */ 364 - *attr = EFI_VARIABLE_NON_VOLATILE | 365 - EFI_VARIABLE_BOOTSERVICE_ACCESS | 366 - EFI_VARIABLE_RUNTIME_ACCESS; 364 + if (attr) 365 + *attr = EFI_VARIABLE_NON_VOLATILE | 366 + EFI_VARIABLE_BOOTSERVICE_ACCESS | 367 + EFI_VARIABLE_RUNTIME_ACCESS; 367 368 } 368 369 369 370 spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+12 -1
drivers/gpio/gpio-mxc.c
··· 18 18 #include <linux/module.h> 19 19 #include <linux/platform_device.h> 20 20 #include <linux/slab.h> 21 + #include <linux/spinlock.h> 21 22 #include <linux/syscore_ops.h> 22 23 #include <linux/gpio/driver.h> 23 24 #include <linux/of.h> ··· 160 159 { 161 160 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 162 161 struct mxc_gpio_port *port = gc->private; 162 + unsigned long flags; 163 163 u32 bit, val; 164 164 u32 gpio_idx = d->hwirq; 165 165 int edge; ··· 199 197 return -EINVAL; 200 198 } 201 199 200 + raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags); 201 + 202 202 if (GPIO_EDGE_SEL >= 0) { 203 203 val = readl(port->base + GPIO_EDGE_SEL); 204 204 if (edge == GPIO_INT_BOTH_EDGES) ··· 221 217 writel(1 << gpio_idx, port->base + GPIO_ISR); 222 218 port->pad_type[gpio_idx] = type; 223 219 224 - return 0; 220 + raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags); 221 + 222 + return port->gc.direction_input(&port->gc, gpio_idx); 225 223 } 226 224 227 225 static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio) 228 226 { 229 227 void __iomem *reg = port->base; 228 + unsigned long flags; 230 229 u32 bit, val; 231 230 int edge; 231 + 232 + raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags); 232 233 233 234 reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */ 234 235 bit = gpio & 0xf; ··· 252 243 return; 253 244 } 254 245 writel(val | (edge << (bit << 1)), reg); 246 + 247 + raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags); 255 248 } 256 249 257 250 /* handle 32 interrupts in one status register */
+15 -2
drivers/gpio/gpiolib-acpi.c
··· 385 385 } 386 386 387 387 static bool acpi_gpio_irq_is_wake(struct device *parent, 388 - struct acpi_resource_gpio *agpio) 388 + const struct acpi_resource_gpio *agpio) 389 389 { 390 390 unsigned int pin = agpio->pin_table[0]; 391 391 ··· 778 778 lookup->info.pin_config = agpio->pin_config; 779 779 lookup->info.debounce = agpio->debounce_timeout; 780 780 lookup->info.gpioint = gpioint; 781 - lookup->info.wake_capable = agpio->wake_capable == ACPI_WAKE_CAPABLE; 781 + lookup->info.wake_capable = acpi_gpio_irq_is_wake(&lookup->info.adev->dev, agpio); 782 782 783 783 /* 784 784 * Polarity and triggering are only specified for GpioInt ··· 1621 1621 }, 1622 1622 .driver_data = &(struct acpi_gpiolib_dmi_quirk) { 1623 1623 .ignore_interrupt = "AMDI0030:00@18", 1624 + }, 1625 + }, 1626 + { 1627 + /* 1628 + * Spurious wakeups from TP_ATTN# pin 1629 + * Found in BIOS 1.7.8 1630 + * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627 1631 + */ 1632 + .matches = { 1633 + DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), 1634 + }, 1635 + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { 1636 + .ignore_wake = "ELAN0415:00@9", 1624 1637 }, 1625 1638 }, 1626 1639 {} /* Terminating entry */
+3
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
··· 156 156 return amdgpu_compute_multipipe == 1; 157 157 } 158 158 159 + if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0)) 160 + return true; 161 + 159 162 /* FIXME: spreading the queues across pipes causes perf regressions 160 163 * on POLARIS11 compute workloads */ 161 164 if (adev->asic_type == CHIP_POLARIS11)
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
··· 497 497 !--id_mgr->reserved_use_count) { 498 498 /* give the reserved ID back to normal round robin */ 499 499 list_add(&id_mgr->reserved->list, &id_mgr->ids_lru); 500 + id_mgr->reserved = NULL; 500 501 } 501 502 vm->reserved_vmid[vmhub] = false; 502 503 mutex_unlock(&id_mgr->lock);
+8 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
··· 161 161 struct dma_fence *f; 162 162 unsigned i; 163 163 164 - /* use sched fence if available */ 165 - f = job->base.s_fence ? &job->base.s_fence->finished : &job->hw_fence; 164 + /* Check if any fences where initialized */ 165 + if (job->base.s_fence && job->base.s_fence->finished.ops) 166 + f = &job->base.s_fence->finished; 167 + else if (job->hw_fence.ops) 168 + f = &job->hw_fence; 169 + else 170 + f = NULL; 171 + 166 172 for (i = 0; i < job->num_ibs; ++i) 167 173 amdgpu_ib_free(ring->adev, &job->ibs[i], f); 168 174 }
+9 -2
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
··· 1287 1287 1288 1288 switch (adev->ip_versions[GC_HWIP][0]) { 1289 1289 case IP_VERSION(11, 0, 0): 1290 - case IP_VERSION(11, 0, 1): 1291 1290 case IP_VERSION(11, 0, 2): 1292 1291 case IP_VERSION(11, 0, 3): 1293 - case IP_VERSION(11, 0, 4): 1294 1292 adev->gfx.me.num_me = 1; 1295 1293 adev->gfx.me.num_pipe_per_me = 1; 1296 1294 adev->gfx.me.num_queue_per_pipe = 1; 1297 1295 adev->gfx.mec.num_mec = 2; 1296 + adev->gfx.mec.num_pipe_per_mec = 4; 1297 + adev->gfx.mec.num_queue_per_pipe = 4; 1298 + break; 1299 + case IP_VERSION(11, 0, 1): 1300 + case IP_VERSION(11, 0, 4): 1301 + adev->gfx.me.num_me = 1; 1302 + adev->gfx.me.num_pipe_per_me = 1; 1303 + adev->gfx.me.num_queue_per_pipe = 1; 1304 + adev->gfx.mec.num_mec = 1; 1298 1305 adev->gfx.mec.num_pipe_per_mec = 4; 1299 1306 adev->gfx.mec.num_queue_per_pipe = 4; 1300 1307 break;
+4 -10
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 1503 1503 case IP_VERSION(3, 0, 1): 1504 1504 case IP_VERSION(3, 1, 2): 1505 1505 case IP_VERSION(3, 1, 3): 1506 - case IP_VERSION(3, 1, 4): 1507 - case IP_VERSION(3, 1, 5): 1508 1506 case IP_VERSION(3, 1, 6): 1509 1507 init_data.flags.gpu_vm_support = true; 1510 1508 break; ··· 1726 1728 if (adev->dm.vblank_control_workqueue) { 1727 1729 destroy_workqueue(adev->dm.vblank_control_workqueue); 1728 1730 adev->dm.vblank_control_workqueue = NULL; 1729 - } 1730 - 1731 - for (i = 0; i < adev->dm.display_indexes_num; i++) { 1732 - drm_encoder_cleanup(&adev->dm.mst_encoders[i].base); 1733 1731 } 1734 1732 1735 1733 amdgpu_dm_destroy_drm_device(&adev->dm); ··· 5305 5311 5306 5312 timing_out->aspect_ratio = get_aspect_ratio(mode_in); 5307 5313 5308 - stream->output_color_space = get_output_color_space(timing_out); 5309 - 5310 5314 stream->out_transfer_func->type = TF_TYPE_PREDEFINED; 5311 5315 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; 5312 5316 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { ··· 5315 5323 adjust_colour_depth_from_display_info(timing_out, info); 5316 5324 } 5317 5325 } 5326 + 5327 + stream->output_color_space = get_output_color_space(timing_out); 5318 5328 } 5319 5329 5320 5330 static void fill_audio_info(struct audio_info *audio_info, ··· 9524 9530 goto fail; 9525 9531 } 9526 9532 9527 - if (dm_old_con_state->abm_level != 9528 - dm_new_con_state->abm_level) 9533 + if (dm_old_con_state->abm_level != dm_new_con_state->abm_level || 9534 + dm_old_con_state->scaling != dm_new_con_state->scaling) 9529 9535 new_crtc_state->connectors_changed = true; 9530 9536 } 9531 9537
-1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 468 468 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) 469 469 { 470 470 drm_encoder_cleanup(encoder); 471 - kfree(encoder); 472 471 } 473 472 474 473 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
+2 -2
drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
··· 90 90 { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3, 91 91 0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} }, 92 92 { COLOR_SPACE_YCBCR2020_TYPE, 93 - { 0x1000, 0xF149, 0xFEB7, 0x0000, 0x0868, 0x15B2, 94 - 0x01E6, 0x0000, 0xFB88, 0xF478, 0x1000, 0x0000} }, 93 + { 0x1000, 0xF149, 0xFEB7, 0x1004, 0x0868, 0x15B2, 94 + 0x01E6, 0x201, 0xFB88, 0xF478, 0x1000, 0x1004} }, 95 95 { COLOR_SPACE_YCBCR709_BLACK_TYPE, 96 96 { 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 97 97 0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x1000} },
+6 -1
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
··· 1171 1171 int ret = 0; 1172 1172 uint32_t apu_percent = 0; 1173 1173 uint32_t dgpu_percent = 0; 1174 + struct amdgpu_device *adev = smu->adev; 1174 1175 1175 1176 1176 1177 ret = smu_cmn_get_metrics_table(smu, ··· 1197 1196 *value = metrics->AverageUvdActivity / 100; 1198 1197 break; 1199 1198 case METRICS_AVERAGE_SOCKETPOWER: 1200 - *value = (metrics->CurrentSocketPower << 8) / 1000; 1199 + if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1)) && (adev->pm.fw_version >= 0x40000f)) || 1200 + ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0)) && (adev->pm.fw_version >= 0x373200))) 1201 + *value = metrics->CurrentSocketPower << 8; 1202 + else 1203 + *value = (metrics->CurrentSocketPower << 8) / 1000; 1201 1204 break; 1202 1205 case METRICS_TEMPERATURE_EDGE: 1203 1206 *value = (metrics->GfxTemperature / 100) *
+7
drivers/gpu/drm/drm_fb_helper.c
··· 30 30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 31 31 32 32 #include <linux/console.h> 33 + #include <linux/pci.h> 33 34 #include <linux/sysrq.h> 35 + #include <linux/vga_switcheroo.h> 34 36 35 37 #include <drm/drm_atomic.h> 36 38 #include <drm/drm_drv.h> ··· 1911 1909 return ret; 1912 1910 1913 1911 strcpy(fb_helper->fb->comm, "[fbcon]"); 1912 + 1913 + /* Set the fb info for vgaswitcheroo clients. Does nothing otherwise. */ 1914 + if (dev_is_pci(dev->dev)) 1915 + vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), fb_helper->info); 1916 + 1914 1917 return 0; 1915 1918 } 1916 1919
+1 -1
drivers/gpu/drm/i915/display/skl_universal_plane.c
··· 1627 1627 u32 offset; 1628 1628 int ret; 1629 1629 1630 - if (w > max_width || w < min_width || h > max_height) { 1630 + if (w > max_width || w < min_width || h > max_height || h < 1) { 1631 1631 drm_dbg_kms(&dev_priv->drm, 1632 1632 "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n", 1633 1633 w, h, min_width, max_width, max_height);
+4 -4
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
··· 1847 1847 I915_SHRINK_ACTIVE); 1848 1848 i915_vma_unpin(vma); 1849 1849 if (err) 1850 - goto out_put; 1850 + goto out_wf; 1851 1851 1852 1852 /* 1853 1853 * Now that the pages are *unpinned* shrinking should invoke ··· 1863 1863 pr_err("unexpected pages mismatch, should_swap=%s\n", 1864 1864 str_yes_no(should_swap)); 1865 1865 err = -EINVAL; 1866 - goto out_put; 1866 + goto out_wf; 1867 1867 } 1868 1868 1869 1869 if (should_swap == (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys)) { 1870 1870 pr_err("unexpected residual page-size bits, should_swap=%s\n", 1871 1871 str_yes_no(should_swap)); 1872 1872 err = -EINVAL; 1873 - goto out_put; 1873 + goto out_wf; 1874 1874 } 1875 1875 1876 1876 err = i915_vma_pin(vma, 0, 0, flags); 1877 1877 if (err) 1878 - goto out_put; 1878 + goto out_wf; 1879 1879 1880 1880 while (n--) { 1881 1881 err = cpu_check(obj, n, 0xdeadbeaf);
+7 -3
drivers/gpu/drm/i915/gt/intel_gt_regs.h
··· 429 429 #define RC_OP_FLUSH_ENABLE (1 << 0) 430 430 #define HIZ_RAW_STALL_OPT_DISABLE (1 << 2) 431 431 #define CACHE_MODE_1 _MMIO(0x7004) /* IVB+ */ 432 - #define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1 << 6) 433 - #define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1 << 6) 434 - #define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1 << 1) 432 + #define MSAA_OPTIMIZATION_REDUC_DISABLE REG_BIT(11) 433 + #define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE REG_BIT(6) 434 + #define GEN8_4x4_STC_OPTIMIZATION_DISABLE REG_BIT(6) 435 + #define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE REG_BIT(1) 435 436 436 437 #define GEN7_GT_MODE _MMIO(0x7008) 437 438 #define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2)) ··· 457 456 458 457 #define GEN8_L3CNTLREG _MMIO(0x7034) 459 458 #define GEN8_ERRDETBCTRL (1 << 9) 459 + 460 + #define PSS_MODE2 _MMIO(0x703c) 461 + #define SCOREBOARD_STALL_FLUSH_CONTROL REG_BIT(5) 460 462 461 463 #define GEN7_SC_INSTDONE _MMIO(0x7100) 462 464 #define GEN12_SC_INSTDONE_EXTRA _MMIO(0x7104)
+9 -1
drivers/gpu/drm/i915/gt/intel_workarounds.c
··· 771 771 772 772 /* Wa_14014947963:dg2 */ 773 773 if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_FOREVER) || 774 - IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915)) 774 + IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915)) 775 775 wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, 0x4000); 776 + 777 + /* Wa_18018764978:dg2 */ 778 + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_C0, STEP_FOREVER) || 779 + IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915)) 780 + wa_masked_en(wal, PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL); 776 781 777 782 /* Wa_15010599737:dg2 */ 778 783 wa_mcr_masked_en(wal, CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN); 784 + 785 + /* Wa_18019271663:dg2 */ 786 + wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE); 779 787 } 780 788 781 789 static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
+1 -4
drivers/gpu/drm/i915/i915_driver.c
··· 1069 1069 */ 1070 1070 static void i915_driver_lastclose(struct drm_device *dev) 1071 1071 { 1072 - struct drm_i915_private *i915 = to_i915(dev); 1073 - 1074 1072 intel_fbdev_restore_mode(dev); 1075 1073 1076 - if (HAS_DISPLAY(i915)) 1077 - vga_switcheroo_process_delayed_switch(); 1074 + vga_switcheroo_process_delayed_switch(); 1078 1075 } 1079 1076 1080 1077 static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
+2 -1
drivers/gpu/drm/i915/i915_pci.c
··· 423 423 .has_coherent_ggtt = true, \ 424 424 .has_llc = 1, \ 425 425 .has_rc6 = 1, \ 426 - .has_rc6p = 1, \ 426 + /* snb does support rc6p, but enabling it causes various issues */ \ 427 + .has_rc6p = 0, \ 427 428 .has_rps = true, \ 428 429 .dma_mask_size = 40, \ 429 430 .__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, \
+5 -1
drivers/gpu/drm/i915/i915_switcheroo.c
··· 19 19 dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n"); 20 20 return; 21 21 } 22 + if (!HAS_DISPLAY(i915)) { 23 + dev_err(&pdev->dev, "Device state not initialized, aborting switch.\n"); 24 + return; 25 + } 22 26 23 27 if (state == VGA_SWITCHEROO_ON) { 24 28 drm_info(&i915->drm, "switched on\n"); ··· 48 44 * locking inversion with the driver load path. And the access here is 49 45 * completely racy anyway. So don't bother with locking for now. 50 46 */ 51 - return i915 && atomic_read(&i915->drm.open_count) == 0; 47 + return i915 && HAS_DISPLAY(i915) && atomic_read(&i915->drm.open_count) == 0; 52 48 } 53 49 54 50 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
+3 -2
drivers/gpu/drm/msm/adreno/adreno_device.c
··· 551 551 return 0; 552 552 } 553 553 554 + static int adreno_system_suspend(struct device *dev); 554 555 static void adreno_unbind(struct device *dev, struct device *master, 555 556 void *data) 556 557 { 557 558 struct msm_drm_private *priv = dev_get_drvdata(master); 558 559 struct msm_gpu *gpu = dev_to_gpu(dev); 559 560 560 - pm_runtime_force_suspend(dev); 561 + WARN_ON_ONCE(adreno_system_suspend(dev)); 561 562 gpu->funcs->destroy(gpu); 562 563 563 564 priv->gpu_pdev = NULL; ··· 610 609 611 610 static void adreno_shutdown(struct platform_device *pdev) 612 611 { 613 - pm_runtime_force_suspend(&pdev->dev); 612 + WARN_ON_ONCE(adreno_system_suspend(&pdev->dev)); 614 613 } 615 614 616 615 static const struct of_device_id dt_match[] = {
+4
drivers/gpu/drm/msm/adreno/adreno_gpu.c
··· 352 352 /* Ensure string is null terminated: */ 353 353 str[len] = '\0'; 354 354 355 + mutex_lock(&gpu->lock); 356 + 355 357 if (param == MSM_PARAM_COMM) { 356 358 paramp = &ctx->comm; 357 359 } else { ··· 362 360 363 361 kfree(*paramp); 364 362 *paramp = str; 363 + 364 + mutex_unlock(&gpu->lock); 365 365 366 366 return 0; 367 367 }
+2
drivers/gpu/drm/msm/msm_gpu.c
··· 335 335 struct msm_file_private *ctx = submit->queue->ctx; 336 336 struct task_struct *task; 337 337 338 + WARN_ON(!mutex_is_locked(&submit->gpu->lock)); 339 + 338 340 /* Note that kstrdup will return NULL if argument is NULL: */ 339 341 *comm = kstrdup(ctx->comm, GFP_KERNEL); 340 342 *cmd = kstrdup(ctx->cmdline, GFP_KERNEL);
+10 -2
drivers/gpu/drm/msm/msm_gpu.h
··· 376 376 */ 377 377 int sysprof; 378 378 379 - /** comm: Overridden task comm, see MSM_PARAM_COMM */ 379 + /** 380 + * comm: Overridden task comm, see MSM_PARAM_COMM 381 + * 382 + * Accessed under msm_gpu::lock 383 + */ 380 384 char *comm; 381 385 382 - /** cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE */ 386 + /** 387 + * cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE 388 + * 389 + * Accessed under msm_gpu::lock 390 + */ 383 391 char *cmdline; 384 392 385 393 /**
+2 -1
drivers/gpu/drm/panfrost/Kconfig
··· 3 3 config DRM_PANFROST 4 4 tristate "Panfrost (DRM support for ARM Mali Midgard/Bifrost GPUs)" 5 5 depends on DRM 6 - depends on ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64) 6 + depends on ARM || ARM64 || COMPILE_TEST 7 + depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE 7 8 depends on MMU 8 9 select DRM_SCHED 9 10 select IOMMU_SUPPORT
+2 -4
drivers/gpu/drm/vc4/vc4_bo.c
··· 179 179 bo->validated_shader = NULL; 180 180 } 181 181 182 + mutex_destroy(&bo->madv_lock); 182 183 drm_gem_dma_free(&bo->base); 183 184 } 184 185 ··· 395 394 { 396 395 struct vc4_dev *vc4 = to_vc4_dev(dev); 397 396 struct vc4_bo *bo; 398 - int ret; 399 397 400 398 if (WARN_ON_ONCE(vc4->is_vc5)) 401 399 return ERR_PTR(-ENODEV); ··· 406 406 bo->madv = VC4_MADV_WILLNEED; 407 407 refcount_set(&bo->usecnt, 0); 408 408 409 - ret = drmm_mutex_init(dev, &bo->madv_lock); 410 - if (ret) 411 - return ERR_PTR(ret); 409 + mutex_init(&bo->madv_lock); 412 410 413 411 mutex_lock(&vc4->bo_lock); 414 412 bo->label = VC4_BO_TYPE_KERNEL;
+5 -2
drivers/infiniband/core/verbs.c
··· 2957 2957 bool __rdma_block_iter_next(struct ib_block_iter *biter) 2958 2958 { 2959 2959 unsigned int block_offset; 2960 + unsigned int sg_delta; 2960 2961 2961 2962 if (!biter->__sg_nents || !biter->__sg) 2962 2963 return false; 2963 2964 2964 2965 biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance; 2965 2966 block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1); 2966 - biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset; 2967 + sg_delta = BIT_ULL(biter->__pg_bit) - block_offset; 2967 2968 2968 - if (biter->__sg_advance >= sg_dma_len(biter->__sg)) { 2969 + if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) { 2970 + biter->__sg_advance += sg_delta; 2971 + } else { 2969 2972 biter->__sg_advance = 0; 2970 2973 biter->__sg = sg_next(biter->__sg); 2971 2974 biter->__sg_nents--;
+141 -61
drivers/infiniband/hw/hfi1/user_exp_rcv.c
··· 23 23 static bool tid_rb_invalidate(struct mmu_interval_notifier *mni, 24 24 const struct mmu_notifier_range *range, 25 25 unsigned long cur_seq); 26 + static bool tid_cover_invalidate(struct mmu_interval_notifier *mni, 27 + const struct mmu_notifier_range *range, 28 + unsigned long cur_seq); 26 29 static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *, 27 30 struct tid_group *grp, 28 31 unsigned int start, u16 count, 29 32 u32 *tidlist, unsigned int *tididx, 30 33 unsigned int *pmapped); 31 - static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo, 32 - struct tid_group **grp); 34 + static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo); 35 + static void __clear_tid_node(struct hfi1_filedata *fd, 36 + struct tid_rb_node *node); 33 37 static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node); 34 38 35 39 static const struct mmu_interval_notifier_ops tid_mn_ops = { 36 40 .invalidate = tid_rb_invalidate, 41 + }; 42 + static const struct mmu_interval_notifier_ops tid_cover_ops = { 43 + .invalidate = tid_cover_invalidate, 37 44 }; 38 45 39 46 /* ··· 260 253 tididx = 0, mapped, mapped_pages = 0; 261 254 u32 *tidlist = NULL; 262 255 struct tid_user_buf *tidbuf; 256 + unsigned long mmu_seq = 0; 263 257 264 258 if (!PAGE_ALIGNED(tinfo->vaddr)) 259 + return -EINVAL; 260 + if (tinfo->length == 0) 265 261 return -EINVAL; 266 262 267 263 tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL); 268 264 if (!tidbuf) 269 265 return -ENOMEM; 270 266 267 + mutex_init(&tidbuf->cover_mutex); 271 268 tidbuf->vaddr = tinfo->vaddr; 272 269 tidbuf->length = tinfo->length; 273 270 tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets), 274 271 GFP_KERNEL); 275 272 if (!tidbuf->psets) { 276 - kfree(tidbuf); 277 - return -ENOMEM; 273 + ret = -ENOMEM; 274 + goto fail_release_mem; 275 + } 276 + 277 + if (fd->use_mn) { 278 + ret = mmu_interval_notifier_insert( 279 + &tidbuf->notifier, current->mm, 280 + tidbuf->vaddr, tidbuf->npages * PAGE_SIZE, 281 + &tid_cover_ops); 282 + if (ret) 283 + goto fail_release_mem; 284 + mmu_seq = mmu_interval_read_begin(&tidbuf->notifier); 278 285 } 279 286 280 287 pinned = pin_rcv_pages(fd, tidbuf); 281 288 if (pinned <= 0) { 282 - kfree(tidbuf->psets); 283 - kfree(tidbuf); 284 - return pinned; 289 + ret = (pinned < 0) ? pinned : -ENOSPC; 290 + goto fail_unpin; 285 291 } 286 292 287 293 /* Find sets of physically contiguous pages */ 288 294 tidbuf->n_psets = find_phys_blocks(tidbuf, pinned); 289 295 290 - /* 291 - * We don't need to access this under a lock since tid_used is per 292 - * process and the same process cannot be in hfi1_user_exp_rcv_clear() 293 - * and hfi1_user_exp_rcv_setup() at the same time. 294 - */ 296 + /* Reserve the number of expected tids to be used. */ 295 297 spin_lock(&fd->tid_lock); 296 298 if (fd->tid_used + tidbuf->n_psets > fd->tid_limit) 297 299 pageset_count = fd->tid_limit - fd->tid_used; 298 300 else 299 301 pageset_count = tidbuf->n_psets; 302 + fd->tid_used += pageset_count; 300 303 spin_unlock(&fd->tid_lock); 301 304 302 - if (!pageset_count) 303 - goto bail; 305 + if (!pageset_count) { 306 + ret = -ENOSPC; 307 + goto fail_unreserve; 308 + } 304 309 305 310 ngroups = pageset_count / dd->rcv_entries.group_size; 306 311 tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL); 307 312 if (!tidlist) { 308 313 ret = -ENOMEM; 309 - goto nomem; 314 + goto fail_unreserve; 310 315 } 311 316 312 317 tididx = 0; ··· 414 395 } 415 396 unlock: 416 397 mutex_unlock(&uctxt->exp_mutex); 417 - nomem: 418 398 hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx, 419 399 mapped_pages, ret); 420 - if (tididx) { 421 - spin_lock(&fd->tid_lock); 422 - fd->tid_used += tididx; 423 - spin_unlock(&fd->tid_lock); 424 - tinfo->tidcnt = tididx; 425 - tinfo->length = mapped_pages * PAGE_SIZE; 426 400 427 - if (copy_to_user(u64_to_user_ptr(tinfo->tidlist), 428 - tidlist, sizeof(tidlist[0]) * tididx)) { 429 - /* 430 - * On failure to copy to the user level, we need to undo 431 - * everything done so far so we don't leak resources. 432 - */ 433 - tinfo->tidlist = (unsigned long)&tidlist; 434 - hfi1_user_exp_rcv_clear(fd, tinfo); 435 - tinfo->tidlist = 0; 436 - ret = -EFAULT; 437 - goto bail; 401 + /* fail if nothing was programmed, set error if none provided */ 402 + if (tididx == 0) { 403 + if (ret >= 0) 404 + ret = -ENOSPC; 405 + goto fail_unreserve; 406 + } 407 + 408 + /* adjust reserved tid_used to actual count */ 409 + spin_lock(&fd->tid_lock); 410 + fd->tid_used -= pageset_count - tididx; 411 + spin_unlock(&fd->tid_lock); 412 + 413 + /* unpin all pages not covered by a TID */ 414 + unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, pinned - mapped_pages, 415 + false); 416 + 417 + if (fd->use_mn) { 418 + /* check for an invalidate during setup */ 419 + bool fail = false; 420 + 421 + mutex_lock(&tidbuf->cover_mutex); 422 + fail = mmu_interval_read_retry(&tidbuf->notifier, mmu_seq); 423 + mutex_unlock(&tidbuf->cover_mutex); 424 + 425 + if (fail) { 426 + ret = -EBUSY; 427 + goto fail_unprogram; 438 428 } 439 429 } 440 430 441 - /* 442 - * If not everything was mapped (due to insufficient RcvArray entries, 443 - * for example), unpin all unmapped pages so we can pin them nex time. 444 - */ 445 - if (mapped_pages != pinned) 446 - unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, 447 - (pinned - mapped_pages), false); 448 - bail: 449 - kfree(tidbuf->psets); 450 - kfree(tidlist); 431 + tinfo->tidcnt = tididx; 432 + tinfo->length = mapped_pages * PAGE_SIZE; 433 + 434 + if (copy_to_user(u64_to_user_ptr(tinfo->tidlist), 435 + tidlist, sizeof(tidlist[0]) * tididx)) { 436 + ret = -EFAULT; 437 + goto fail_unprogram; 438 + } 439 + 440 + if (fd->use_mn) 441 + mmu_interval_notifier_remove(&tidbuf->notifier); 451 442 kfree(tidbuf->pages); 443 + kfree(tidbuf->psets); 452 444 kfree(tidbuf); 453 - return ret > 0 ? 0 : ret; 445 + kfree(tidlist); 446 + return 0; 447 + 448 + fail_unprogram: 449 + /* unprogram, unmap, and unpin all allocated TIDs */ 450 + tinfo->tidlist = (unsigned long)tidlist; 451 + hfi1_user_exp_rcv_clear(fd, tinfo); 452 + tinfo->tidlist = 0; 453 + pinned = 0; /* nothing left to unpin */ 454 + pageset_count = 0; /* nothing left reserved */ 455 + fail_unreserve: 456 + spin_lock(&fd->tid_lock); 457 + fd->tid_used -= pageset_count; 458 + spin_unlock(&fd->tid_lock); 459 + fail_unpin: 460 + if (fd->use_mn) 461 + mmu_interval_notifier_remove(&tidbuf->notifier); 462 + if (pinned > 0) 463 + unpin_rcv_pages(fd, tidbuf, NULL, 0, pinned, false); 464 + fail_release_mem: 465 + kfree(tidbuf->pages); 466 + kfree(tidbuf->psets); 467 + kfree(tidbuf); 468 + kfree(tidlist); 469 + return ret; 454 470 } 455 471 456 472 int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd, ··· 506 452 507 453 mutex_lock(&uctxt->exp_mutex); 508 454 for (tididx = 0; tididx < tinfo->tidcnt; tididx++) { 509 - ret = unprogram_rcvarray(fd, tidinfo[tididx], NULL); 455 + ret = unprogram_rcvarray(fd, tidinfo[tididx]); 510 456 if (ret) { 511 457 hfi1_cdbg(TID, "Failed to unprogram rcv array %d", 512 458 ret); ··· 760 706 } 761 707 762 708 node->fdata = fd; 709 + mutex_init(&node->invalidate_mutex); 763 710 node->phys = page_to_phys(pages[0]); 764 711 node->npages = npages; 765 712 node->rcventry = rcventry; ··· 776 721 &tid_mn_ops); 777 722 if (ret) 778 723 goto out_unmap; 779 - /* 780 - * FIXME: This is in the wrong order, the notifier should be 781 - * established before the pages are pinned by pin_rcv_pages. 782 - */ 783 - mmu_interval_read_begin(&node->notifier); 784 724 } 785 725 fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node; 786 726 ··· 795 745 return -EFAULT; 796 746 } 797 747 798 - static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo, 799 - struct tid_group **grp) 748 + static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo) 800 749 { 801 750 struct hfi1_ctxtdata *uctxt = fd->uctxt; 802 751 struct hfi1_devdata *dd = uctxt->dd; ··· 818 769 if (!node || node->rcventry != (uctxt->expected_base + rcventry)) 819 770 return -EBADF; 820 771 821 - if (grp) 822 - *grp = node->grp; 823 - 824 772 if (fd->use_mn) 825 773 mmu_interval_notifier_remove(&node->notifier); 826 774 cacheless_tid_rb_remove(fd, node); ··· 825 779 return 0; 826 780 } 827 781 828 - static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node) 782 + static void __clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node) 829 783 { 830 784 struct hfi1_ctxtdata *uctxt = fd->uctxt; 831 785 struct hfi1_devdata *dd = uctxt->dd; 786 + 787 + mutex_lock(&node->invalidate_mutex); 788 + if (node->freed) 789 + goto done; 790 + node->freed = true; 832 791 833 792 trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry, 834 793 node->npages, 835 794 node->notifier.interval_tree.start, node->phys, 836 795 node->dma_addr); 837 796 838 - /* 839 - * Make sure device has seen the write before we unpin the 840 - * pages. 841 - */ 797 + /* Make sure device has seen the write before pages are unpinned */ 842 798 hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0); 843 799 844 800 unpin_rcv_pages(fd, NULL, node, 0, node->npages, true); 801 + done: 802 + mutex_unlock(&node->invalidate_mutex); 803 + } 804 + 805 + static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node) 806 + { 807 + struct hfi1_ctxtdata *uctxt = fd->uctxt; 808 + 809 + __clear_tid_node(fd, node); 845 810 846 811 node->grp->used--; 847 812 node->grp->map &= ~(1 << (node->rcventry - node->grp->base)); ··· 911 854 if (node->freed) 912 855 return true; 913 856 857 + /* take action only if unmapping */ 858 + if (range->event != MMU_NOTIFY_UNMAP) 859 + return true; 860 + 914 861 trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt, 915 862 node->notifier.interval_tree.start, 916 863 node->rcventry, node->npages, node->dma_addr); 917 - node->freed = true; 864 + 865 + /* clear the hardware rcvarray entry */ 866 + __clear_tid_node(fdata, node); 918 867 919 868 spin_lock(&fdata->invalid_lock); 920 869 if (fdata->invalid_tid_idx < uctxt->expected_count) { ··· 947 884 fdata->invalid_tid_idx++; 948 885 } 949 886 spin_unlock(&fdata->invalid_lock); 887 + return true; 888 + } 889 + 890 + static bool tid_cover_invalidate(struct mmu_interval_notifier *mni, 891 + const struct mmu_notifier_range *range, 892 + unsigned long cur_seq) 893 + { 894 + struct tid_user_buf *tidbuf = 895 + container_of(mni, struct tid_user_buf, notifier); 896 + 897 + /* take action only if unmapping */ 898 + if (range->event == MMU_NOTIFY_UNMAP) { 899 + mutex_lock(&tidbuf->cover_mutex); 900 + mmu_interval_set_seq(mni, cur_seq); 901 + mutex_unlock(&tidbuf->cover_mutex); 902 + } 903 + 950 904 return true; 951 905 } 952 906
+3
drivers/infiniband/hw/hfi1/user_exp_rcv.h
··· 16 16 }; 17 17 18 18 struct tid_user_buf { 19 + struct mmu_interval_notifier notifier; 20 + struct mutex cover_mutex; 19 21 unsigned long vaddr; 20 22 unsigned long length; 21 23 unsigned int npages; ··· 29 27 struct tid_rb_node { 30 28 struct mmu_interval_notifier notifier; 31 29 struct hfi1_filedata *fdata; 30 + struct mutex invalidate_mutex; /* covers hw removal */ 32 31 unsigned long phys; 33 32 struct tid_group *grp; 34 33 u32 rcventry;
+5 -5
drivers/infiniband/sw/rxe/rxe_param.h
··· 98 98 RXE_MAX_SRQ = DEFAULT_MAX_VALUE - RXE_MIN_SRQ_INDEX, 99 99 100 100 RXE_MIN_MR_INDEX = 0x00000001, 101 - RXE_MAX_MR_INDEX = DEFAULT_MAX_VALUE, 102 - RXE_MAX_MR = DEFAULT_MAX_VALUE - RXE_MIN_MR_INDEX, 103 - RXE_MIN_MW_INDEX = 0x00010001, 104 - RXE_MAX_MW_INDEX = 0x00020000, 105 - RXE_MAX_MW = 0x00001000, 101 + RXE_MAX_MR_INDEX = DEFAULT_MAX_VALUE >> 1, 102 + RXE_MAX_MR = RXE_MAX_MR_INDEX - RXE_MIN_MR_INDEX, 103 + RXE_MIN_MW_INDEX = RXE_MAX_MR_INDEX + 1, 104 + RXE_MAX_MW_INDEX = DEFAULT_MAX_VALUE, 105 + RXE_MAX_MW = RXE_MAX_MW_INDEX - RXE_MIN_MW_INDEX, 106 106 107 107 RXE_MAX_PKT_PER_ACK = 64, 108 108
+11 -11
drivers/infiniband/sw/rxe/rxe_pool.c
··· 23 23 .size = sizeof(struct rxe_ucontext), 24 24 .elem_offset = offsetof(struct rxe_ucontext, elem), 25 25 .min_index = 1, 26 - .max_index = UINT_MAX, 27 - .max_elem = UINT_MAX, 26 + .max_index = RXE_MAX_UCONTEXT, 27 + .max_elem = RXE_MAX_UCONTEXT, 28 28 }, 29 29 [RXE_TYPE_PD] = { 30 30 .name = "pd", 31 31 .size = sizeof(struct rxe_pd), 32 32 .elem_offset = offsetof(struct rxe_pd, elem), 33 33 .min_index = 1, 34 - .max_index = UINT_MAX, 35 - .max_elem = UINT_MAX, 34 + .max_index = RXE_MAX_PD, 35 + .max_elem = RXE_MAX_PD, 36 36 }, 37 37 [RXE_TYPE_AH] = { 38 38 .name = "ah", ··· 40 40 .elem_offset = offsetof(struct rxe_ah, elem), 41 41 .min_index = RXE_MIN_AH_INDEX, 42 42 .max_index = RXE_MAX_AH_INDEX, 43 - .max_elem = RXE_MAX_AH_INDEX - RXE_MIN_AH_INDEX + 1, 43 + .max_elem = RXE_MAX_AH, 44 44 }, 45 45 [RXE_TYPE_SRQ] = { 46 46 .name = "srq", ··· 49 49 .cleanup = rxe_srq_cleanup, 50 50 .min_index = RXE_MIN_SRQ_INDEX, 51 51 .max_index = RXE_MAX_SRQ_INDEX, 52 - .max_elem = RXE_MAX_SRQ_INDEX - RXE_MIN_SRQ_INDEX + 1, 52 + .max_elem = RXE_MAX_SRQ, 53 53 }, 54 54 [RXE_TYPE_QP] = { 55 55 .name = "qp", ··· 58 58 .cleanup = rxe_qp_cleanup, 59 59 .min_index = RXE_MIN_QP_INDEX, 60 60 .max_index = RXE_MAX_QP_INDEX, 61 - .max_elem = RXE_MAX_QP_INDEX - RXE_MIN_QP_INDEX + 1, 61 + .max_elem = RXE_MAX_QP, 62 62 }, 63 63 [RXE_TYPE_CQ] = { 64 64 .name = "cq", ··· 66 66 .elem_offset = offsetof(struct rxe_cq, elem), 67 67 .cleanup = rxe_cq_cleanup, 68 68 .min_index = 1, 69 - .max_index = UINT_MAX, 70 - .max_elem = UINT_MAX, 69 + .max_index = RXE_MAX_CQ, 70 + .max_elem = RXE_MAX_CQ, 71 71 }, 72 72 [RXE_TYPE_MR] = { 73 73 .name = "mr", ··· 76 76 .cleanup = rxe_mr_cleanup, 77 77 .min_index = RXE_MIN_MR_INDEX, 78 78 .max_index = RXE_MAX_MR_INDEX, 79 - .max_elem = RXE_MAX_MR_INDEX - RXE_MIN_MR_INDEX + 1, 79 + .max_elem = RXE_MAX_MR, 80 80 }, 81 81 [RXE_TYPE_MW] = { 82 82 .name = "mw", ··· 85 85 .cleanup = rxe_mw_cleanup, 86 86 .min_index = RXE_MIN_MW_INDEX, 87 87 .max_index = RXE_MAX_MW_INDEX, 88 - .max_elem = RXE_MAX_MW_INDEX - RXE_MIN_MW_INDEX + 1, 88 + .max_elem = RXE_MAX_MW, 89 89 }, 90 90 }; 91 91
+1 -1
drivers/interconnect/qcom/icc-rpm.c
··· 488 488 } 489 489 490 490 regmap_done: 491 - ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks); 491 + ret = devm_clk_bulk_get_optional(dev, qp->num_clks, qp->bus_clks); 492 492 if (ret) 493 493 return ret; 494 494
+14 -5
drivers/interconnect/qcom/msm8996.c
··· 33 33 "aggre0_noc_mpu_cfg" 34 34 }; 35 35 36 + static const char * const bus_a2noc_clocks[] = { 37 + "bus", 38 + "bus_a", 39 + "aggre2_ufs_axi", 40 + "ufs_axi" 41 + }; 42 + 36 43 static const u16 mas_a0noc_common_links[] = { 37 44 MSM8996_SLAVE_A0NOC_SNOC 38 45 }; ··· 1813 1806 .reg_bits = 32, 1814 1807 .reg_stride = 4, 1815 1808 .val_bits = 32, 1816 - .max_register = 0x9000, 1809 + .max_register = 0x6000, 1817 1810 .fast_io = true 1818 1811 }; 1819 1812 ··· 1837 1830 .reg_bits = 32, 1838 1831 .reg_stride = 4, 1839 1832 .val_bits = 32, 1840 - .max_register = 0x7000, 1833 + .max_register = 0x5000, 1841 1834 .fast_io = true 1842 1835 }; 1843 1836 ··· 1858 1851 .reg_bits = 32, 1859 1852 .reg_stride = 4, 1860 1853 .val_bits = 32, 1861 - .max_register = 0xa000, 1854 + .max_register = 0x7000, 1862 1855 .fast_io = true 1863 1856 }; 1864 1857 ··· 1866 1859 .type = QCOM_ICC_NOC, 1867 1860 .nodes = a2noc_nodes, 1868 1861 .num_nodes = ARRAY_SIZE(a2noc_nodes), 1862 + .clocks = bus_a2noc_clocks, 1863 + .num_clocks = ARRAY_SIZE(bus_a2noc_clocks), 1869 1864 .regmap_cfg = &msm8996_a2noc_regmap_config 1870 1865 }; 1871 1866 ··· 1886 1877 .reg_bits = 32, 1887 1878 .reg_stride = 4, 1888 1879 .val_bits = 32, 1889 - .max_register = 0x62000, 1880 + .max_register = 0x5a000, 1890 1881 .fast_io = true 1891 1882 }; 1892 1883 ··· 1997 1988 .reg_bits = 32, 1998 1989 .reg_stride = 4, 1999 1990 .val_bits = 32, 2000 - .max_register = 0x20000, 1991 + .max_register = 0x1c000, 2001 1992 .fast_io = true 2002 1993 }; 2003 1994
+2 -2
drivers/md/md.c
··· 3644 3644 */ 3645 3645 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor) 3646 3646 { 3647 - static struct md_rdev *claim_rdev; /* just for claiming the bdev */ 3647 + static struct md_rdev claim_rdev; /* just for claiming the bdev */ 3648 3648 struct md_rdev *rdev; 3649 3649 sector_t size; 3650 3650 int err; ··· 3662 3662 3663 3663 rdev->bdev = blkdev_get_by_dev(newdev, 3664 3664 FMODE_READ | FMODE_WRITE | FMODE_EXCL, 3665 - super_format == -2 ? claim_rdev : rdev); 3665 + super_format == -2 ? &claim_rdev : rdev); 3666 3666 if (IS_ERR(rdev->bdev)) { 3667 3667 pr_warn("md: could not open device unknown-block(%u,%u).\n", 3668 3668 MAJOR(newdev), MINOR(newdev));
+2 -4
drivers/memory/atmel-sdramc.c
··· 47 47 caps = of_device_get_match_data(&pdev->dev); 48 48 49 49 if (caps->has_ddrck) { 50 - clk = devm_clk_get(&pdev->dev, "ddrck"); 50 + clk = devm_clk_get_enabled(&pdev->dev, "ddrck"); 51 51 if (IS_ERR(clk)) 52 52 return PTR_ERR(clk); 53 - clk_prepare_enable(clk); 54 53 } 55 54 56 55 if (caps->has_mpddr_clk) { 57 - clk = devm_clk_get(&pdev->dev, "mpddr"); 56 + clk = devm_clk_get_enabled(&pdev->dev, "mpddr"); 58 57 if (IS_ERR(clk)) { 59 58 pr_err("AT91 RAMC: couldn't get mpddr clock\n"); 60 59 return PTR_ERR(clk); 61 60 } 62 - clk_prepare_enable(clk); 63 61 } 64 62 65 63 return 0;
+1 -2
drivers/memory/mvebu-devbus.c
··· 280 280 if (IS_ERR(devbus->base)) 281 281 return PTR_ERR(devbus->base); 282 282 283 - clk = devm_clk_get(&pdev->dev, NULL); 283 + clk = devm_clk_get_enabled(&pdev->dev, NULL); 284 284 if (IS_ERR(clk)) 285 285 return PTR_ERR(clk); 286 - clk_prepare_enable(clk); 287 286 288 287 /* 289 288 * Obtain clock period in picoseconds,
+2 -1
drivers/memory/omap-gpmc.c
··· 1918 1918 } 1919 1919 } 1920 1920 1921 - if (p->wait_pin > gpmc_nr_waitpins) { 1921 + if (p->wait_pin != GPMC_WAITPIN_INVALID && 1922 + p->wait_pin > gpmc_nr_waitpins) { 1922 1923 pr_err("%s: invalid wait-pin (%d)\n", __func__, p->wait_pin); 1923 1924 return -EINVAL; 1924 1925 }
-36
drivers/memory/tegra/tegra186.c
··· 22 22 #define MC_SID_STREAMID_SECURITY_WRITE_ACCESS_DISABLED BIT(16) 23 23 #define MC_SID_STREAMID_SECURITY_OVERRIDE BIT(8) 24 24 25 - static void tegra186_mc_program_sid(struct tegra_mc *mc) 26 - { 27 - unsigned int i; 28 - 29 - for (i = 0; i < mc->soc->num_clients; i++) { 30 - const struct tegra_mc_client *client = &mc->soc->clients[i]; 31 - u32 override, security; 32 - 33 - override = readl(mc->regs + client->regs.sid.override); 34 - security = readl(mc->regs + client->regs.sid.security); 35 - 36 - dev_dbg(mc->dev, "client %s: override: %x security: %x\n", 37 - client->name, override, security); 38 - 39 - dev_dbg(mc->dev, "setting SID %u for %s\n", client->sid, 40 - client->name); 41 - writel(client->sid, mc->regs + client->regs.sid.override); 42 - 43 - override = readl(mc->regs + client->regs.sid.override); 44 - security = readl(mc->regs + client->regs.sid.security); 45 - 46 - dev_dbg(mc->dev, "client %s: override: %x security: %x\n", 47 - client->name, override, security); 48 - } 49 - } 50 - 51 25 static int tegra186_mc_probe(struct tegra_mc *mc) 52 26 { 53 27 struct platform_device *pdev = to_platform_device(mc->dev); ··· 59 85 if (err < 0) 60 86 return err; 61 87 62 - tegra186_mc_program_sid(mc); 63 - 64 88 return 0; 65 89 } 66 90 67 91 static void tegra186_mc_remove(struct tegra_mc *mc) 68 92 { 69 93 of_platform_depopulate(mc->dev); 70 - } 71 - 72 - static int tegra186_mc_resume(struct tegra_mc *mc) 73 - { 74 - tegra186_mc_program_sid(mc); 75 - 76 - return 0; 77 94 } 78 95 79 96 #if IS_ENABLED(CONFIG_IOMMU_API) ··· 138 173 const struct tegra_mc_ops tegra186_mc_ops = { 139 174 .probe = tegra186_mc_probe, 140 175 .remove = tegra186_mc_remove, 141 - .resume = tegra186_mc_resume, 142 176 .probe_device = tegra186_mc_probe_device, 143 177 .handle_irq = tegra30_mc_handle_irq, 144 178 };
+41 -42
drivers/misc/fastrpc.c
··· 321 321 perm.vmid = QCOM_SCM_VMID_HLOS; 322 322 perm.perm = QCOM_SCM_PERM_RWX; 323 323 err = qcom_scm_assign_mem(map->phys, map->size, 324 - &(map->fl->cctx->vmperms[0].vmid), &perm, 1); 324 + &map->fl->cctx->perms, &perm, 1); 325 325 if (err) { 326 326 dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", 327 327 map->phys, map->size, err); ··· 334 334 dma_buf_put(map->buf); 335 335 } 336 336 337 + if (map->fl) { 338 + spin_lock(&map->fl->lock); 339 + list_del(&map->node); 340 + spin_unlock(&map->fl->lock); 341 + map->fl = NULL; 342 + } 343 + 337 344 kfree(map); 338 345 } 339 346 ··· 350 343 kref_put(&map->refcount, fastrpc_free_map); 351 344 } 352 345 353 - static void fastrpc_map_get(struct fastrpc_map *map) 346 + static int fastrpc_map_get(struct fastrpc_map *map) 354 347 { 355 - if (map) 356 - kref_get(&map->refcount); 348 + if (!map) 349 + return -ENOENT; 350 + 351 + return kref_get_unless_zero(&map->refcount) ? 0 : -ENOENT; 357 352 } 358 353 359 354 360 355 static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd, 361 - struct fastrpc_map **ppmap) 356 + struct fastrpc_map **ppmap, bool take_ref) 362 357 { 358 + struct fastrpc_session_ctx *sess = fl->sctx; 363 359 struct fastrpc_map *map = NULL; 360 + int ret = -ENOENT; 364 361 365 - mutex_lock(&fl->mutex); 362 + spin_lock(&fl->lock); 366 363 list_for_each_entry(map, &fl->maps, node) { 367 - if (map->fd == fd) { 368 - *ppmap = map; 369 - mutex_unlock(&fl->mutex); 370 - return 0; 364 + if (map->fd != fd) 365 + continue; 366 + 367 + if (take_ref) { 368 + ret = fastrpc_map_get(map); 369 + if (ret) { 370 + dev_dbg(sess->dev, "%s: Failed to get map fd=%d ret=%d\n", 371 + __func__, fd, ret); 372 + break; 373 + } 371 374 } 375 + 376 + *ppmap = map; 377 + ret = 0; 378 + break; 372 379 } 373 - mutex_unlock(&fl->mutex); 374 - 375 - return -ENOENT; 376 - } 377 - 378 - static int fastrpc_map_find(struct fastrpc_user *fl, int fd, 379 - struct fastrpc_map **ppmap) 380 - { 381 - int ret = fastrpc_map_lookup(fl, fd, ppmap); 382 - 383 - if (!ret) 384 - fastrpc_map_get(*ppmap); 380 + spin_unlock(&fl->lock); 385 381 386 382 return ret; 387 383 } ··· 756 746 struct fastrpc_map *map = NULL; 757 747 int err = 0; 758 748 759 - if (!fastrpc_map_find(fl, fd, ppmap)) 749 + if (!fastrpc_map_lookup(fl, fd, ppmap, true)) 760 750 return 0; 761 751 762 752 map = kzalloc(sizeof(*map), GFP_KERNEL); ··· 798 788 * If subsystem VMIDs are defined in DTSI, then do 799 789 * hyp_assign from HLOS to those VM(s) 800 790 */ 801 - unsigned int perms = BIT(QCOM_SCM_VMID_HLOS); 802 - 803 791 map->attr = attr; 804 - err = qcom_scm_assign_mem(map->phys, (u64)map->size, &perms, 792 + err = qcom_scm_assign_mem(map->phys, (u64)map->size, &fl->cctx->perms, 805 793 fl->cctx->vmperms, fl->cctx->vmcount); 806 794 if (err) { 807 795 dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d", ··· 1078 1070 for (i = 0; i < FASTRPC_MAX_FDLIST; i++) { 1079 1071 if (!fdlist[i]) 1080 1072 break; 1081 - if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap)) 1073 + if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false)) 1082 1074 fastrpc_map_put(mmap); 1083 1075 } 1084 1076 ··· 1266 1258 1267 1259 /* Map if we have any heap VMIDs associated with this ADSP Static Process. */ 1268 1260 if (fl->cctx->vmcount) { 1269 - unsigned int perms = BIT(QCOM_SCM_VMID_HLOS); 1270 - 1271 1261 err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys, 1272 - (u64)fl->cctx->remote_heap->size, &perms, 1262 + (u64)fl->cctx->remote_heap->size, 1263 + &fl->cctx->perms, 1273 1264 fl->cctx->vmperms, fl->cctx->vmcount); 1274 1265 if (err) { 1275 1266 dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d", ··· 1316 1309 perm.perm = QCOM_SCM_PERM_RWX; 1317 1310 err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys, 1318 1311 (u64)fl->cctx->remote_heap->size, 1319 - &(fl->cctx->vmperms[0].vmid), &perm, 1); 1312 + &fl->cctx->perms, &perm, 1); 1320 1313 if (err) 1321 1314 dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", 1322 1315 fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err); ··· 1440 1433 fl->init_mem = NULL; 1441 1434 fastrpc_buf_free(imem); 1442 1435 err_alloc: 1443 - if (map) { 1444 - spin_lock(&fl->lock); 1445 - list_del(&map->node); 1446 - spin_unlock(&fl->lock); 1447 - fastrpc_map_put(map); 1448 - } 1436 + fastrpc_map_put(map); 1449 1437 err: 1450 1438 kfree(args); 1451 1439 ··· 1516 1514 fastrpc_context_put(ctx); 1517 1515 } 1518 1516 1519 - list_for_each_entry_safe(map, m, &fl->maps, node) { 1520 - list_del(&map->node); 1517 + list_for_each_entry_safe(map, m, &fl->maps, node) 1521 1518 fastrpc_map_put(map); 1522 - } 1523 1519 1524 1520 list_for_each_entry_safe(buf, b, &fl->mmaps, node) { 1525 1521 list_del(&buf->node); ··· 1894 1894 /* Add memory to static PD pool, protection thru hypervisor */ 1895 1895 if (req.flags != ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) { 1896 1896 struct qcom_scm_vmperm perm; 1897 - int err = 0; 1898 1897 1899 1898 perm.vmid = QCOM_SCM_VMID_HLOS; 1900 1899 perm.perm = QCOM_SCM_PERM_RWX; 1901 1900 err = qcom_scm_assign_mem(buf->phys, buf->size, 1902 - &(fl->cctx->vmperms[0].vmid), &perm, 1); 1901 + &fl->cctx->perms, &perm, 1); 1903 1902 if (err) { 1904 1903 dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", 1905 1904 buf->phys, buf->size, err);
+8 -4
drivers/misc/mei/bus.c
··· 702 702 if (cl->state == MEI_FILE_UNINITIALIZED) { 703 703 ret = mei_cl_link(cl); 704 704 if (ret) 705 - goto out; 705 + goto notlinked; 706 706 /* update pointers */ 707 707 cl->cldev = cldev; 708 708 } 709 709 710 710 ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size); 711 - out: 711 + if (ret) 712 + mei_cl_unlink(cl); 713 + notlinked: 712 714 mutex_unlock(&bus->device_lock); 713 715 if (ret) 714 716 return ERR_PTR(ret); ··· 760 758 if (cl->state == MEI_FILE_UNINITIALIZED) { 761 759 ret = mei_cl_link(cl); 762 760 if (ret) 763 - goto out; 761 + goto notlinked; 764 762 /* update pointers */ 765 763 cl->cldev = cldev; 766 764 } ··· 787 785 } 788 786 789 787 out: 788 + if (ret) 789 + mei_cl_unlink(cl); 790 + notlinked: 790 791 mutex_unlock(&bus->device_lock); 791 792 792 793 return ret; ··· 1282 1277 mei_cl_flush_queues(cldev->cl, NULL); 1283 1278 mei_me_cl_put(cldev->me_cl); 1284 1279 mei_dev_bus_put(cldev->bus); 1285 - mei_cl_unlink(cldev->cl); 1286 1280 kfree(cldev->cl); 1287 1281 kfree(cldev); 1288 1282 }
+2
drivers/misc/mei/hw-me-regs.h
··· 111 111 112 112 #define MEI_DEV_ID_RPL_S 0x7A68 /* Raptor Lake Point S */ 113 113 114 + #define MEI_DEV_ID_MTL_M 0x7E70 /* Meteor Lake Point M */ 115 + 114 116 /* 115 117 * MEI HW Section 116 118 */
+2
drivers/misc/mei/pci-me.c
··· 118 118 119 119 {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)}, 120 120 121 + {MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)}, 122 + 121 123 /* required last entry */ 122 124 {0, } 123 125 };
+19 -30
drivers/misc/vmw_vmci/vmci_guest.c
··· 56 56 57 57 bool exclusive_vectors; 58 58 59 - struct tasklet_struct datagram_tasklet; 60 - struct tasklet_struct bm_tasklet; 61 59 struct wait_queue_head inout_wq; 62 60 63 61 void *data_buffer; ··· 302 304 * This function assumes that it has exclusive access to the data 303 305 * in register(s) for the duration of the call. 304 306 */ 305 - static void vmci_dispatch_dgs(unsigned long data) 307 + static void vmci_dispatch_dgs(struct vmci_guest_device *vmci_dev) 306 308 { 307 - struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data; 308 309 u8 *dg_in_buffer = vmci_dev->data_buffer; 309 310 struct vmci_datagram *dg; 310 311 size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE; ··· 462 465 * Scans the notification bitmap for raised flags, clears them 463 466 * and handles the notifications. 464 467 */ 465 - static void vmci_process_bitmap(unsigned long data) 468 + static void vmci_process_bitmap(struct vmci_guest_device *dev) 466 469 { 467 - struct vmci_guest_device *dev = (struct vmci_guest_device *)data; 468 - 469 470 if (!dev->notification_bitmap) { 470 471 dev_dbg(dev->dev, "No bitmap present in %s\n", __func__); 471 472 return; ··· 481 486 struct vmci_guest_device *dev = _dev; 482 487 483 488 /* 484 - * If we are using MSI-X with exclusive vectors then we simply schedule 485 - * the datagram tasklet, since we know the interrupt was meant for us. 489 + * If we are using MSI-X with exclusive vectors then we simply call 490 + * vmci_dispatch_dgs(), since we know the interrupt was meant for us. 486 491 * Otherwise we must read the ICR to determine what to do. 487 492 */ 488 493 489 494 if (dev->exclusive_vectors) { 490 - tasklet_schedule(&dev->datagram_tasklet); 495 + vmci_dispatch_dgs(dev); 491 496 } else { 492 497 unsigned int icr; 493 498 ··· 497 502 return IRQ_NONE; 498 503 499 504 if (icr & VMCI_ICR_DATAGRAM) { 500 - tasklet_schedule(&dev->datagram_tasklet); 505 + vmci_dispatch_dgs(dev); 501 506 icr &= ~VMCI_ICR_DATAGRAM; 502 507 } 503 508 504 509 if (icr & VMCI_ICR_NOTIFICATION) { 505 - tasklet_schedule(&dev->bm_tasklet); 510 + vmci_process_bitmap(dev); 506 511 icr &= ~VMCI_ICR_NOTIFICATION; 507 512 } 508 513 ··· 531 536 struct vmci_guest_device *dev = _dev; 532 537 533 538 /* For MSI-X we can just assume it was meant for us. */ 534 - tasklet_schedule(&dev->bm_tasklet); 539 + vmci_process_bitmap(dev); 535 540 536 541 return IRQ_HANDLED; 537 542 } ··· 633 638 vmci_dev->iobase = iobase; 634 639 vmci_dev->mmio_base = mmio_base; 635 640 636 - tasklet_init(&vmci_dev->datagram_tasklet, 637 - vmci_dispatch_dgs, (unsigned long)vmci_dev); 638 - tasklet_init(&vmci_dev->bm_tasklet, 639 - vmci_process_bitmap, (unsigned long)vmci_dev); 640 641 init_waitqueue_head(&vmci_dev->inout_wq); 641 642 642 643 if (mmio_base != NULL) { ··· 799 808 * Request IRQ for legacy or MSI interrupts, or for first 800 809 * MSI-X vector. 801 810 */ 802 - error = request_irq(pci_irq_vector(pdev, 0), vmci_interrupt, 803 - IRQF_SHARED, KBUILD_MODNAME, vmci_dev); 811 + error = request_threaded_irq(pci_irq_vector(pdev, 0), NULL, 812 + vmci_interrupt, IRQF_SHARED, 813 + KBUILD_MODNAME, vmci_dev); 804 814 if (error) { 805 815 dev_err(&pdev->dev, "Irq %u in use: %d\n", 806 816 pci_irq_vector(pdev, 0), error); ··· 815 823 * between the vectors. 816 824 */ 817 825 if (vmci_dev->exclusive_vectors) { 818 - error = request_irq(pci_irq_vector(pdev, 1), 819 - vmci_interrupt_bm, 0, KBUILD_MODNAME, 820 - vmci_dev); 826 + error = request_threaded_irq(pci_irq_vector(pdev, 1), NULL, 827 + vmci_interrupt_bm, 0, 828 + KBUILD_MODNAME, vmci_dev); 821 829 if (error) { 822 830 dev_err(&pdev->dev, 823 831 "Failed to allocate irq %u: %d\n", ··· 825 833 goto err_free_irq; 826 834 } 827 835 if (caps_in_use & VMCI_CAPS_DMA_DATAGRAM) { 828 - error = request_irq(pci_irq_vector(pdev, 2), 829 - vmci_interrupt_dma_datagram, 830 - 0, KBUILD_MODNAME, vmci_dev); 836 + error = request_threaded_irq(pci_irq_vector(pdev, 2), 837 + NULL, 838 + vmci_interrupt_dma_datagram, 839 + 0, KBUILD_MODNAME, 840 + vmci_dev); 831 841 if (error) { 832 842 dev_err(&pdev->dev, 833 843 "Failed to allocate irq %u: %d\n", ··· 865 871 866 872 err_free_irq: 867 873 free_irq(pci_irq_vector(pdev, 0), vmci_dev); 868 - tasklet_kill(&vmci_dev->datagram_tasklet); 869 - tasklet_kill(&vmci_dev->bm_tasklet); 870 874 871 875 err_disable_msi: 872 876 pci_free_irq_vectors(pdev); ··· 934 942 } 935 943 free_irq(pci_irq_vector(pdev, 0), vmci_dev); 936 944 pci_free_irq_vectors(pdev); 937 - 938 - tasklet_kill(&vmci_dev->datagram_tasklet); 939 - tasklet_kill(&vmci_dev->bm_tasklet); 940 945 941 946 if (vmci_dev->notification_bitmap) { 942 947 /*
+15 -7
drivers/mmc/host/sdhci-esdhc-imx.c
··· 107 107 #define ESDHC_TUNING_START_TAP_DEFAULT 0x1 108 108 #define ESDHC_TUNING_START_TAP_MASK 0x7f 109 109 #define ESDHC_TUNING_CMD_CRC_CHECK_DISABLE (1 << 7) 110 + #define ESDHC_TUNING_STEP_DEFAULT 0x1 110 111 #define ESDHC_TUNING_STEP_MASK 0x00070000 111 112 #define ESDHC_TUNING_STEP_SHIFT 16 112 113 ··· 1369 1368 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 1370 1369 struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); 1371 1370 struct cqhci_host *cq_host = host->mmc->cqe_private; 1372 - int tmp; 1371 + u32 tmp; 1373 1372 1374 1373 if (esdhc_is_usdhc(imx_data)) { 1375 1374 /* ··· 1424 1423 1425 1424 if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) { 1426 1425 tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL); 1427 - tmp |= ESDHC_STD_TUNING_EN | 1428 - ESDHC_TUNING_START_TAP_DEFAULT; 1429 - if (imx_data->boarddata.tuning_start_tap) { 1430 - tmp &= ~ESDHC_TUNING_START_TAP_MASK; 1426 + tmp |= ESDHC_STD_TUNING_EN; 1427 + 1428 + /* 1429 + * ROM code or bootloader may config the start tap 1430 + * and step, unmask them first. 1431 + */ 1432 + tmp &= ~(ESDHC_TUNING_START_TAP_MASK | ESDHC_TUNING_STEP_MASK); 1433 + if (imx_data->boarddata.tuning_start_tap) 1431 1434 tmp |= imx_data->boarddata.tuning_start_tap; 1432 - } 1435 + else 1436 + tmp |= ESDHC_TUNING_START_TAP_DEFAULT; 1433 1437 1434 1438 if (imx_data->boarddata.tuning_step) { 1435 - tmp &= ~ESDHC_TUNING_STEP_MASK; 1436 1439 tmp |= imx_data->boarddata.tuning_step 1440 + << ESDHC_TUNING_STEP_SHIFT; 1441 + } else { 1442 + tmp |= ESDHC_TUNING_STEP_DEFAULT 1437 1443 << ESDHC_TUNING_STEP_SHIFT; 1438 1444 } 1439 1445
+5 -3
drivers/mmc/host/sunxi-mmc.c
··· 1492 1492 struct sunxi_mmc_host *host = mmc_priv(mmc); 1493 1493 1494 1494 mmc_remove_host(mmc); 1495 - pm_runtime_force_suspend(&pdev->dev); 1496 - disable_irq(host->irq); 1497 - sunxi_mmc_disable(host); 1495 + pm_runtime_disable(&pdev->dev); 1496 + if (!pm_runtime_status_suspended(&pdev->dev)) { 1497 + disable_irq(host->irq); 1498 + sunxi_mmc_disable(host); 1499 + } 1498 1500 dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); 1499 1501 mmc_free_host(mmc); 1500 1502
+2 -2
drivers/net/dsa/microchip/ksz9477.c
··· 540 540 ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]); 541 541 542 542 /* clear forwarding port */ 543 - alu_table[2] &= ~BIT(port); 543 + alu_table[1] &= ~BIT(port); 544 544 545 545 /* if there is no port to forward, clear table */ 546 - if ((alu_table[2] & ALU_V_PORT_MAP) == 0) { 546 + if ((alu_table[1] & ALU_V_PORT_MAP) == 0) { 547 547 alu_table[0] = 0; 548 548 alu_table[1] = 0; 549 549 alu_table[2] = 0;
+15 -8
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
··· 524 524 netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n"); 525 525 } 526 526 527 + static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata) 528 + { 529 + unsigned int max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 530 + 531 + /* From MAC ver 30H the TFCR is per priority, instead of per queue */ 532 + if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30) 533 + return max_q_count; 534 + else 535 + return min_t(unsigned int, pdata->tx_q_count, max_q_count); 536 + } 537 + 527 538 static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) 528 539 { 529 - unsigned int max_q_count, q_count; 530 540 unsigned int reg, reg_val; 531 - unsigned int i; 541 + unsigned int i, q_count; 532 542 533 543 /* Clear MTL flow control */ 534 544 for (i = 0; i < pdata->rx_q_count; i++) 535 545 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); 536 546 537 547 /* Clear MAC flow control */ 538 - max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 539 - q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); 548 + q_count = xgbe_get_fc_queue_count(pdata); 540 549 reg = MAC_Q0TFCR; 541 550 for (i = 0; i < q_count; i++) { 542 551 reg_val = XGMAC_IOREAD(pdata, reg); ··· 562 553 { 563 554 struct ieee_pfc *pfc = pdata->pfc; 564 555 struct ieee_ets *ets = pdata->ets; 565 - unsigned int max_q_count, q_count; 566 556 unsigned int reg, reg_val; 567 - unsigned int i; 557 + unsigned int i, q_count; 568 558 569 559 /* Set MTL flow control */ 570 560 for (i = 0; i < pdata->rx_q_count; i++) { ··· 587 579 } 588 580 589 581 /* Set MAC flow control */ 590 - max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 591 - q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); 582 + q_count = xgbe_get_fc_queue_count(pdata); 592 583 reg = MAC_Q0TFCR; 593 584 for (i = 0; i < q_count; i++) { 594 585 reg_val = XGMAC_IOREAD(pdata, reg);
+24
drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
··· 496 496 reg |= XGBE_KR_TRAINING_ENABLE; 497 497 reg |= XGBE_KR_TRAINING_START; 498 498 XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); 499 + pdata->kr_start_time = jiffies; 499 500 500 501 netif_dbg(pdata, link, pdata->netdev, 501 502 "KR training initiated\n"); ··· 632 631 xgbe_an_disable(pdata); 633 632 634 633 xgbe_switch_mode(pdata); 634 + 635 + pdata->an_result = XGBE_AN_READY; 635 636 636 637 xgbe_an_restart(pdata); 637 638 ··· 1278 1275 static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata) 1279 1276 { 1280 1277 unsigned long link_timeout; 1278 + unsigned long kr_time; 1279 + int wait; 1281 1280 1282 1281 link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ); 1283 1282 if (time_after(jiffies, link_timeout)) { 1283 + if ((xgbe_cur_mode(pdata) == XGBE_MODE_KR) && 1284 + pdata->phy.autoneg == AUTONEG_ENABLE) { 1285 + /* AN restart should not happen while KR training is in progress. 1286 + * The while loop ensures no AN restart during KR training, 1287 + * waits up to 500ms and AN restart is triggered only if KR 1288 + * training is failed. 1289 + */ 1290 + wait = XGBE_KR_TRAINING_WAIT_ITER; 1291 + while (wait--) { 1292 + kr_time = pdata->kr_start_time + 1293 + msecs_to_jiffies(XGBE_AN_MS_TIMEOUT); 1294 + if (time_after(jiffies, kr_time)) 1295 + break; 1296 + /* AN restart is not required, if AN result is COMPLETE */ 1297 + if (pdata->an_result == XGBE_AN_COMPLETE) 1298 + return; 1299 + usleep_range(10000, 11000); 1300 + } 1301 + } 1284 1302 netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n"); 1285 1303 xgbe_phy_config_aneg(pdata); 1286 1304 }
+2
drivers/net/ethernet/amd/xgbe/xgbe.h
··· 290 290 /* Auto-negotiation */ 291 291 #define XGBE_AN_MS_TIMEOUT 500 292 292 #define XGBE_LINK_TIMEOUT 5 293 + #define XGBE_KR_TRAINING_WAIT_ITER 50 293 294 294 295 #define XGBE_SGMII_AN_LINK_STATUS BIT(1) 295 296 #define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3)) ··· 1281 1280 unsigned int parallel_detect; 1282 1281 unsigned int fec_ability; 1283 1282 unsigned long an_start; 1283 + unsigned long kr_start_time; 1284 1284 enum xgbe_an_mode an_mode; 1285 1285 1286 1286 /* I2C support */
+4 -9
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
··· 3969 3969 test_info->timeout = HWRM_CMD_TIMEOUT; 3970 3970 for (i = 0; i < bp->num_tests; i++) { 3971 3971 char *str = test_info->string[i]; 3972 - char *fw_str = resp->test0_name + i * 32; 3972 + char *fw_str = resp->test_name[i]; 3973 3973 3974 3974 if (i == BNXT_MACLPBK_TEST_IDX) { 3975 3975 strcpy(str, "Mac loopback test (offline)"); ··· 3980 3980 } else if (i == BNXT_IRQ_TEST_IDX) { 3981 3981 strcpy(str, "Interrupt_test (offline)"); 3982 3982 } else { 3983 - strscpy(str, fw_str, ETH_GSTRING_LEN); 3984 - strncat(str, " test", ETH_GSTRING_LEN - strlen(str)); 3985 - if (test_info->offline_mask & (1 << i)) 3986 - strncat(str, " (offline)", 3987 - ETH_GSTRING_LEN - strlen(str)); 3988 - else 3989 - strncat(str, " (online)", 3990 - ETH_GSTRING_LEN - strlen(str)); 3983 + snprintf(str, ETH_GSTRING_LEN, "%s test (%s)", 3984 + fw_str, test_info->offline_mask & (1 << i) ? 3985 + "offline" : "online"); 3991 3986 } 3992 3987 } 3993 3988
+1 -8
drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
··· 10249 10249 u8 unused_0; 10250 10250 __le16 test_timeout; 10251 10251 u8 unused_1[2]; 10252 - char test0_name[32]; 10253 - char test1_name[32]; 10254 - char test2_name[32]; 10255 - char test3_name[32]; 10256 - char test4_name[32]; 10257 - char test5_name[32]; 10258 - char test6_name[32]; 10259 - char test7_name[32]; 10252 + char test_name[8][32]; 10260 10253 u8 eyescope_target_BER_support; 10261 10254 #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL 10262 10255 #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL
+1 -8
drivers/net/ethernet/cadence/macb_main.c
··· 2187 2187 bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) || 2188 2188 skb_is_nonlinear(*skb); 2189 2189 int padlen = ETH_ZLEN - (*skb)->len; 2190 - int headroom = skb_headroom(*skb); 2191 2190 int tailroom = skb_tailroom(*skb); 2192 2191 struct sk_buff *nskb; 2193 2192 u32 fcs; ··· 2200 2201 /* FCS could be appeded to tailroom. */ 2201 2202 if (tailroom >= ETH_FCS_LEN) 2202 2203 goto add_fcs; 2203 - /* FCS could be appeded by moving data to headroom. */ 2204 - else if (!cloned && headroom + tailroom >= ETH_FCS_LEN) 2205 - padlen = 0; 2206 2204 /* No room for FCS, need to reallocate skb. */ 2207 2205 else 2208 2206 padlen = ETH_FCS_LEN; ··· 2208 2212 padlen += ETH_FCS_LEN; 2209 2213 } 2210 2214 2211 - if (!cloned && headroom + tailroom >= padlen) { 2212 - (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len); 2213 - skb_set_tail_pointer(*skb, (*skb)->len); 2214 - } else { 2215 + if (cloned || tailroom < padlen) { 2215 2216 nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC); 2216 2217 if (!nskb) 2217 2218 return -ENOMEM;
+2 -2
drivers/net/ethernet/freescale/enetc/enetc.c
··· 2290 2290 2291 2291 priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp); 2292 2292 2293 - netif_tx_lock(priv->ndev); 2293 + netif_tx_lock_bh(priv->ndev); 2294 2294 2295 2295 clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags); 2296 2296 skb = skb_dequeue(&priv->tx_skbs); 2297 2297 if (skb) 2298 2298 enetc_start_xmit(skb, priv->ndev); 2299 2299 2300 - netif_tx_unlock(priv->ndev); 2300 + netif_tx_unlock_bh(priv->ndev); 2301 2301 } 2302 2302 2303 2303 static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv)
+2 -9
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
··· 1012 1012 rbpool = cq->rbpool; 1013 1013 free_ptrs = cq->pool_ptrs; 1014 1014 1015 - get_cpu(); 1016 1015 while (cq->pool_ptrs) { 1017 1016 if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) { 1018 1017 /* Schedule a WQ if we fails to free atleast half of the ··· 1031 1032 pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM); 1032 1033 cq->pool_ptrs--; 1033 1034 } 1034 - put_cpu(); 1035 1035 cq->refill_task_sched = false; 1036 1036 } 1037 1037 ··· 1368 1370 if (err) 1369 1371 goto fail; 1370 1372 1371 - get_cpu(); 1372 1373 /* Allocate pointers and free them to aura/pool */ 1373 1374 for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) { 1374 1375 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); ··· 1391 1394 } 1392 1395 1393 1396 err_mem: 1394 - put_cpu(); 1395 1397 return err ? -ENOMEM : 0; 1396 1398 1397 1399 fail: ··· 1431 1435 if (err) 1432 1436 goto fail; 1433 1437 1434 - get_cpu(); 1435 1438 /* Allocate pointers and free them to aura/pool */ 1436 1439 for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { 1437 1440 pool = &pfvf->qset.pool[pool_id]; 1438 1441 for (ptr = 0; ptr < num_ptrs; ptr++) { 1439 1442 err = otx2_alloc_rbuf(pfvf, pool, &bufptr); 1440 1443 if (err) 1441 - goto err_mem; 1444 + return -ENOMEM; 1442 1445 pfvf->hw_ops->aura_freeptr(pfvf, pool_id, 1443 1446 bufptr + OTX2_HEAD_ROOM); 1444 1447 } 1445 1448 } 1446 - err_mem: 1447 - put_cpu(); 1448 - return err ? -ENOMEM : 0; 1449 + return 0; 1449 1450 fail: 1450 1451 otx2_mbox_reset(&pfvf->mbox.mbox, 0); 1451 1452 otx2_aura_pool_free(pfvf);
+2
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
··· 736 736 u64 ptrs[2]; 737 737 738 738 ptrs[1] = buf; 739 + get_cpu(); 739 740 /* Free only one buffer at time during init and teardown */ 740 741 __cn10k_aura_freeptr(pfvf, aura, ptrs, 2); 742 + put_cpu(); 741 743 } 742 744 743 745 /* Alloc pointer from pool/aura */
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
··· 637 637 if (child->bw_share == old_bw_share) 638 638 continue; 639 639 640 - err_one = mlx5_qos_update_node(htb->mdev, child->hw_id, child->bw_share, 640 + err_one = mlx5_qos_update_node(htb->mdev, child->bw_share, 641 641 child->max_average_bw, child->hw_id); 642 642 if (!err && err_one) { 643 643 err = err_one; ··· 671 671 mlx5e_htb_convert_rate(htb, rate, node->parent, &bw_share); 672 672 mlx5e_htb_convert_ceil(htb, ceil, &max_average_bw); 673 673 674 - err = mlx5_qos_update_node(htb->mdev, node->parent->hw_id, bw_share, 674 + err = mlx5_qos_update_node(htb->mdev, bw_share, 675 675 max_average_bw, node->hw_id); 676 676 if (err) { 677 677 NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
··· 578 578 { 579 579 enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); 580 580 u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); 581 - bool unaligned = xsk ? xsk->unaligned : false; 582 581 u16 max_mtu_pkts; 583 582 584 583 if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) ··· 590 591 * needed number of WQEs exceeds the maximum. 591 592 */ 592 593 max_mtu_pkts = min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE, 593 - mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, unaligned)); 594 + mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, xsk->unaligned)); 594 595 if (params->log_rq_mtu_frames > max_mtu_pkts) { 595 596 mlx5_core_err(mdev, "Current RQ length %d is too big for XSK with given frame size %u\n", 596 597 1 << params->log_rq_mtu_frames, xsk->chunk_size);
+2 -3
drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
··· 477 477 struct mlx5e_sample_flow *sample_flow; 478 478 struct mlx5e_sample_attr *sample_attr; 479 479 struct mlx5_flow_attr *pre_attr; 480 - u32 tunnel_id = attr->tunnel_id; 481 480 struct mlx5_eswitch *esw; 482 481 u32 default_tbl_id; 483 482 u32 obj_id; ··· 521 522 restore_obj.sample.group_id = sample_attr->group_num; 522 523 restore_obj.sample.rate = sample_attr->rate; 523 524 restore_obj.sample.trunc_size = sample_attr->trunc_size; 524 - restore_obj.sample.tunnel_id = tunnel_id; 525 + restore_obj.sample.tunnel_id = attr->tunnel_id; 525 526 err = mapping_add(esw->offloads.reg_c0_obj_pool, &restore_obj, &obj_id); 526 527 if (err) 527 528 goto err_obj_id; ··· 547 548 /* For decap action, do decap in the original flow table instead of the 548 549 * default flow table. 549 550 */ 550 - if (tunnel_id) 551 + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) 551 552 pre_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; 552 553 pre_attr->modify_hdr = sample_flow->restore->modify_hdr; 553 554 pre_attr->flags = MLX5_ATTR_FLAG_SAMPLE;
+2 -5
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
··· 122 122 u8 ctx[MLX5_ST_SZ_BYTES(ipsec_aso)]; 123 123 dma_addr_t dma_addr; 124 124 struct mlx5_aso *aso; 125 - /* IPsec ASO caches data on every query call, 126 - * so in nested calls, we can use this boolean to save 127 - * recursive calls to mlx5e_ipsec_aso_query() 128 - */ 129 - u8 use_cache : 1; 125 + /* Protect ASO WQ access, as it is global to whole IPsec */ 126 + spinlock_t lock; 130 127 }; 131 128 132 129 struct mlx5e_ipsec {
+6 -6
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
··· 320 320 if (ret) 321 321 goto unlock; 322 322 323 - aso->use_cache = true; 324 323 if (attrs->esn_trigger && 325 324 !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) { 326 325 u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter); ··· 332 333 !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm) || 333 334 !MLX5_GET(ipsec_aso, aso->ctx, remove_flow_enable)) 334 335 xfrm_state_check_expire(sa_entry->x); 335 - aso->use_cache = false; 336 336 337 337 unlock: 338 338 spin_unlock(&sa_entry->x->lock); ··· 396 398 goto err_aso_create; 397 399 } 398 400 401 + spin_lock_init(&aso->lock); 399 402 ipsec->nb.notifier_call = mlx5e_ipsec_event; 400 403 mlx5_notifier_register(mdev, &ipsec->nb); 401 404 ··· 455 456 struct mlx5e_hw_objs *res; 456 457 struct mlx5_aso_wqe *wqe; 457 458 u8 ds_cnt; 459 + int ret; 458 460 459 461 lockdep_assert_held(&sa_entry->x->lock); 460 - if (aso->use_cache) 461 - return 0; 462 - 463 462 res = &mdev->mlx5e_res.hw_objs; 464 463 464 + spin_lock_bh(&aso->lock); 465 465 memset(aso->ctx, 0, sizeof(aso->ctx)); 466 466 wqe = mlx5_aso_get_wqe(aso->aso); 467 467 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS); ··· 475 477 mlx5e_ipsec_aso_copy(ctrl, data); 476 478 477 479 mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl); 478 - return mlx5_aso_poll_cq(aso->aso, false); 480 + ret = mlx5_aso_poll_cq(aso->aso, false); 481 + spin_unlock_bh(&aso->lock); 482 + return ret; 479 483 } 480 484 481 485 void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
+3
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 166 166 * it's different than the ht->mutex here. 167 167 */ 168 168 static struct lock_class_key tc_ht_lock_key; 169 + static struct lock_class_key tc_ht_wq_key; 169 170 170 171 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow); 171 172 static void free_flow_post_acts(struct mlx5e_tc_flow *flow); ··· 5183 5182 return err; 5184 5183 5185 5184 lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key); 5185 + lockdep_init_map(&tc->ht.run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0); 5186 5186 5187 5187 mapping_id = mlx5_query_nic_system_image_guid(dev); 5188 5188 ··· 5290 5288 return err; 5291 5289 5292 5290 lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key); 5291 + lockdep_init_map(&tc_ht->run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0); 5293 5292 5294 5293 return 0; 5295 5294 }
+3 -15
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
··· 22 22 }; 23 23 24 24 static int esw_qos_tsar_config(struct mlx5_core_dev *dev, u32 *sched_ctx, 25 - u32 parent_ix, u32 tsar_ix, 26 - u32 max_rate, u32 bw_share) 25 + u32 tsar_ix, u32 max_rate, u32 bw_share) 27 26 { 28 27 u32 bitmask = 0; 29 28 30 29 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) 31 30 return -EOPNOTSUPP; 32 31 33 - MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_ix); 34 32 MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate); 35 33 MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); 36 34 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; ··· 49 51 int err; 50 52 51 53 err = esw_qos_tsar_config(dev, sched_ctx, 52 - esw->qos.root_tsar_ix, group->tsar_ix, 54 + group->tsar_ix, 53 55 max_rate, bw_share); 54 56 if (err) 55 57 NL_SET_ERR_MSG_MOD(extack, "E-Switch modify group TSAR element failed"); ··· 65 67 struct netlink_ext_ack *extack) 66 68 { 67 69 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; 68 - struct mlx5_esw_rate_group *group = vport->qos.group; 69 70 struct mlx5_core_dev *dev = esw->dev; 70 - u32 parent_tsar_ix; 71 - void *vport_elem; 72 71 int err; 73 72 74 73 if (!vport->qos.enabled) 75 74 return -EIO; 76 75 77 - parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix; 78 - MLX5_SET(scheduling_context, sched_ctx, element_type, 79 - SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); 80 - vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx, 81 - element_attributes); 82 - MLX5_SET(vport_element, vport_elem, vport_number, vport->vport); 83 - 84 - err = esw_qos_tsar_config(dev, sched_ctx, parent_tsar_ix, vport->qos.esw_tsar_ix, 76 + err = esw_qos_tsar_config(dev, sched_ctx, vport->qos.esw_tsar_ix, 85 77 max_rate, bw_share); 86 78 if (err) { 87 79 esw_warn(esw->dev,
+1
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
··· 1464 1464 mlx5_lag_disable_change(esw->dev); 1465 1465 down_write(&esw->mode_lock); 1466 1466 mlx5_eswitch_disable_locked(esw); 1467 + esw->mode = MLX5_ESWITCH_LEGACY; 1467 1468 up_write(&esw->mode_lock); 1468 1469 mlx5_lag_enable_change(esw->dev); 1469 1470 }
+1
drivers/net/ethernet/mellanox/mlx5/core/health.c
··· 677 677 mutex_lock(&dev->intf_state_mutex); 678 678 if (test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) { 679 679 mlx5_core_err(dev, "health works are not permitted at this stage\n"); 680 + mutex_unlock(&dev->intf_state_mutex); 680 681 return; 681 682 } 682 683 mutex_unlock(&dev->intf_state_mutex);
+4 -4
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 2098 2098 } 2099 2099 } 2100 2100 2101 - static int __init init(void) 2101 + static int __init mlx5_init(void) 2102 2102 { 2103 2103 int err; 2104 2104 ··· 2133 2133 return err; 2134 2134 } 2135 2135 2136 - static void __exit cleanup(void) 2136 + static void __exit mlx5_cleanup(void) 2137 2137 { 2138 2138 mlx5e_cleanup(); 2139 2139 mlx5_sf_driver_unregister(); ··· 2141 2141 mlx5_unregister_debugfs(); 2142 2142 } 2143 2143 2144 - module_init(init); 2145 - module_exit(cleanup); 2144 + module_init(mlx5_init); 2145 + module_exit(mlx5_cleanup);
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/qos.c
··· 62 62 return mlx5_qos_create_inner_node(mdev, MLX5_QOS_DEFAULT_DWRR_UID, 0, 0, id); 63 63 } 64 64 65 - int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id, 65 + int mlx5_qos_update_node(struct mlx5_core_dev *mdev, 66 66 u32 bw_share, u32 max_avg_bw, u32 id) 67 67 { 68 68 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; 69 69 u32 bitmask = 0; 70 70 71 - MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id); 72 71 MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); 73 72 MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw); 74 73
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/qos.h
··· 23 23 int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id, 24 24 u32 bw_share, u32 max_avg_bw, u32 *id); 25 25 int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id); 26 - int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id, u32 bw_share, 26 + int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 bw_share, 27 27 u32 max_avg_bw, u32 id); 28 28 int mlx5_qos_destroy_node(struct mlx5_core_dev *mdev, u32 id); 29 29
+8 -5
drivers/net/ethernet/microchip/lan966x/lan966x_main.c
··· 1043 1043 lan966x->base_mac[5] &= 0xf0; 1044 1044 } 1045 1045 1046 - ports = device_get_named_child_node(&pdev->dev, "ethernet-ports"); 1047 - if (!ports) 1048 - return dev_err_probe(&pdev->dev, -ENODEV, 1049 - "no ethernet-ports child found\n"); 1050 - 1051 1046 err = lan966x_create_targets(pdev, lan966x); 1052 1047 if (err) 1053 1048 return dev_err_probe(&pdev->dev, err, ··· 1120 1125 } 1121 1126 } 1122 1127 1128 + ports = device_get_named_child_node(&pdev->dev, "ethernet-ports"); 1129 + if (!ports) 1130 + return dev_err_probe(&pdev->dev, -ENODEV, 1131 + "no ethernet-ports child found\n"); 1132 + 1123 1133 /* init switch */ 1124 1134 lan966x_init(lan966x); 1125 1135 lan966x_stats_init(lan966x); ··· 1162 1162 goto cleanup_ports; 1163 1163 } 1164 1164 1165 + fwnode_handle_put(ports); 1166 + 1165 1167 lan966x_mdb_init(lan966x); 1166 1168 err = lan966x_fdb_init(lan966x); 1167 1169 if (err) ··· 1193 1191 lan966x_fdb_deinit(lan966x); 1194 1192 1195 1193 cleanup_ports: 1194 + fwnode_handle_put(ports); 1196 1195 fwnode_handle_put(portnp); 1197 1196 1198 1197 lan966x_cleanup_ports(lan966x);
+14
drivers/net/ethernet/stmicro/stmmac/dwmac5.c
··· 186 186 int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp, 187 187 struct stmmac_safety_feature_cfg *safety_feat_cfg) 188 188 { 189 + struct stmmac_safety_feature_cfg all_safety_feats = { 190 + .tsoee = 1, 191 + .mrxpee = 1, 192 + .mestee = 1, 193 + .mrxee = 1, 194 + .mtxee = 1, 195 + .epsi = 1, 196 + .edpp = 1, 197 + .prtyen = 1, 198 + .tmouten = 1, 199 + }; 189 200 u32 value; 190 201 191 202 if (!asp) 192 203 return -EINVAL; 204 + 205 + if (!safety_feat_cfg) 206 + safety_feat_cfg = &all_safety_feats; 193 207 194 208 /* 1. Enable Safety Features */ 195 209 value = readl(ioaddr + MTL_ECC_CONTROL);
+4 -4
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
··· 551 551 p = (char *)priv + offsetof(struct stmmac_priv, 552 552 xstats.txq_stats[q].tx_pkt_n); 553 553 for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) { 554 - *data++ = (*(u64 *)p); 555 - p += sizeof(u64 *); 554 + *data++ = (*(unsigned long *)p); 555 + p += sizeof(unsigned long); 556 556 } 557 557 } 558 558 for (q = 0; q < rx_cnt; q++) { 559 559 p = (char *)priv + offsetof(struct stmmac_priv, 560 560 xstats.rxq_stats[q].rx_pkt_n); 561 561 for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) { 562 - *data++ = (*(u64 *)p); 563 - p += sizeof(u64 *); 562 + *data++ = (*(unsigned long *)p); 563 + p += sizeof(unsigned long); 564 564 } 565 565 } 566 566 }
+5
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 1150 1150 int addr = priv->plat->phy_addr; 1151 1151 struct phy_device *phydev; 1152 1152 1153 + if (addr < 0) { 1154 + netdev_err(priv->dev, "no phy found\n"); 1155 + return -ENODEV; 1156 + } 1157 + 1153 1158 phydev = mdiobus_get_phy(priv->mii, addr); 1154 1159 if (!phydev) { 1155 1160 netdev_err(priv->dev, "no phy at addr %d\n", addr);
+10
drivers/net/ipa/ipa_interrupt.c
··· 127 127 return IRQ_HANDLED; 128 128 } 129 129 130 + void ipa_interrupt_irq_disable(struct ipa *ipa) 131 + { 132 + disable_irq(ipa->interrupt->irq); 133 + } 134 + 135 + void ipa_interrupt_irq_enable(struct ipa *ipa) 136 + { 137 + enable_irq(ipa->interrupt->irq); 138 + } 139 + 130 140 /* Common function used to enable/disable TX_SUSPEND for an endpoint */ 131 141 static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt, 132 142 u32 endpoint_id, bool enable)
+16
drivers/net/ipa/ipa_interrupt.h
··· 86 86 void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt); 87 87 88 88 /** 89 + * ipa_interrupt_irq_enable() - Enable IPA interrupts 90 + * @ipa: IPA pointer 91 + * 92 + * This enables the IPA interrupt line 93 + */ 94 + void ipa_interrupt_irq_enable(struct ipa *ipa); 95 + 96 + /** 97 + * ipa_interrupt_irq_disable() - Disable IPA interrupts 98 + * @ipa: IPA pointer 99 + * 100 + * This disables the IPA interrupt line 101 + */ 102 + void ipa_interrupt_irq_disable(struct ipa *ipa); 103 + 104 + /** 89 105 * ipa_interrupt_config() - Configure the IPA interrupt framework 90 106 * @ipa: IPA pointer 91 107 *
+17
drivers/net/ipa/ipa_power.c
··· 181 181 182 182 __set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags); 183 183 184 + /* Increment the disable depth to ensure that the IRQ won't 185 + * be re-enabled until the matching _enable call in 186 + * ipa_resume(). We do this to ensure that the interrupt 187 + * handler won't run whilst PM runtime is disabled. 188 + * 189 + * Note that disabling the IRQ is NOT the same as disabling 190 + * irq wake. If wakeup is enabled for the IPA then the IRQ 191 + * will still cause the system to wake up, see irq_set_irq_wake(). 192 + */ 193 + ipa_interrupt_irq_disable(ipa); 194 + 184 195 return pm_runtime_force_suspend(dev); 185 196 } 186 197 ··· 203 192 ret = pm_runtime_force_resume(dev); 204 193 205 194 __clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags); 195 + 196 + /* Now that PM runtime is enabled again it's safe 197 + * to turn the IRQ back on and process any data 198 + * that was received during suspend. 199 + */ 200 + ipa_interrupt_irq_enable(ipa); 206 201 207 202 return ret; 208 203 }
+6 -1
drivers/net/phy/mdio_bus.c
··· 108 108 109 109 struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr) 110 110 { 111 - struct mdio_device *mdiodev = bus->mdio_map[addr]; 111 + struct mdio_device *mdiodev; 112 + 113 + if (addr < 0 || addr >= ARRAY_SIZE(bus->mdio_map)) 114 + return NULL; 115 + 116 + mdiodev = bus->mdio_map[addr]; 112 117 113 118 if (!mdiodev) 114 119 return NULL;
-2
drivers/net/team/team.c
··· 1044 1044 goto err_port_enter; 1045 1045 } 1046 1046 } 1047 - port->dev->priv_flags |= IFF_NO_ADDRCONF; 1048 1047 1049 1048 return 0; 1050 1049 ··· 1057 1058 { 1058 1059 if (team->ops.port_leave) 1059 1060 team->ops.port_leave(team, port); 1060 - port->dev->priv_flags &= ~IFF_NO_ADDRCONF; 1061 1061 dev_put(team->dev); 1062 1062 } 1063 1063
+1 -1
drivers/net/usb/sr9700.c
··· 413 413 /* ignore the CRC length */ 414 414 len = (skb->data[1] | (skb->data[2] << 8)) - 4; 415 415 416 - if (len > ETH_FRAME_LEN || len > skb->len) 416 + if (len > ETH_FRAME_LEN || len > skb->len || len < 0) 417 417 return 0; 418 418 419 419 /* the last packet of current skb */
+4 -2
drivers/net/virtio_net.c
··· 1877 1877 */ 1878 1878 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { 1879 1879 netif_stop_subqueue(dev, qnum); 1880 - if (!use_napi && 1881 - unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { 1880 + if (use_napi) { 1881 + if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) 1882 + virtqueue_napi_schedule(&sq->napi, sq->vq); 1883 + } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { 1882 1884 /* More just got used, free them then recheck. */ 1883 1885 free_old_xmit_skbs(sq, false); 1884 1886 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
+4 -2
drivers/net/wan/fsl_ucc_hdlc.c
··· 1243 1243 free_dev: 1244 1244 free_netdev(dev); 1245 1245 undo_uhdlc_init: 1246 - iounmap(utdm->siram); 1246 + if (utdm) 1247 + iounmap(utdm->siram); 1247 1248 unmap_si_regs: 1248 - iounmap(utdm->si_regs); 1249 + if (utdm) 1250 + iounmap(utdm->si_regs); 1249 1251 free_utdm: 1250 1252 if (uhdlc_priv->tsa) 1251 1253 kfree(utdm);
+19 -18
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
··· 7937 7937 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 7938 7938 struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg)); 7939 7939 7940 + if (chan->flags & IEEE80211_CHAN_DISABLED) 7941 + return -EINVAL; 7942 + 7940 7943 /* set_channel */ 7941 7944 chspec = channel_to_chanspec(&cfg->d11inf, chan); 7942 7945 if (chspec != INVCHANSPEC) { ··· 7964 7961 struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg)); 7965 7962 struct brcmf_dump_survey survey = {}; 7966 7963 struct ieee80211_supported_band *band; 7967 - struct ieee80211_channel *chan; 7964 + enum nl80211_band band_id; 7968 7965 struct cca_msrmnt_query req; 7969 7966 u32 noise; 7970 7967 int err; ··· 7977 7974 return -EBUSY; 7978 7975 } 7979 7976 7980 - band = wiphy->bands[NL80211_BAND_2GHZ]; 7981 - if (band && idx >= band->n_channels) { 7982 - idx -= band->n_channels; 7983 - band = NULL; 7984 - } 7977 + for (band_id = 0; band_id < NUM_NL80211_BANDS; band_id++) { 7978 + band = wiphy->bands[band_id]; 7979 + if (!band) 7980 + continue; 7981 + if (idx >= band->n_channels) { 7982 + idx -= band->n_channels; 7983 + continue; 7984 + } 7985 7985 7986 - if (!band || idx >= band->n_channels) { 7987 - band = wiphy->bands[NL80211_BAND_5GHZ]; 7988 - if (idx >= band->n_channels) 7989 - return -ENOENT; 7986 + info->channel = &band->channels[idx]; 7987 + break; 7990 7988 } 7989 + if (band_id == NUM_NL80211_BANDS) 7990 + return -ENOENT; 7991 7991 7992 7992 /* Setting current channel to the requested channel */ 7993 - chan = &band->channels[idx]; 7994 - err = cfg80211_set_channel(wiphy, ndev, chan, NL80211_CHAN_HT20); 7995 - if (err) { 7996 - info->channel = chan; 7997 - info->filled = 0; 7993 + info->filled = 0; 7994 + if (cfg80211_set_channel(wiphy, ndev, info->channel, NL80211_CHAN_HT20)) 7998 7995 return 0; 7999 - } 8000 7996 8001 7997 /* Disable mpc */ 8002 7998 brcmf_set_mpc(ifp, 0); ··· 8030 8028 if (err) 8031 8029 goto exit; 8032 8030 8033 - info->channel = chan; 8034 8031 info->noise = noise; 8035 8032 info->time = ACS_MSRMNT_DELAY; 8036 8033 info->time_busy = ACS_MSRMNT_DELAY - survey.idle; ··· 8041 8040 SURVEY_INFO_TIME_TX; 8042 8041 8043 8042 brcmf_dbg(INFO, "OBSS dump: channel %d: survey duration %d\n", 8044 - ieee80211_frequency_to_channel(chan->center_freq), 8043 + ieee80211_frequency_to_channel(info->channel->center_freq), 8045 8044 ACS_MSRMNT_DELAY); 8046 8045 brcmf_dbg(INFO, "noise(%d) busy(%llu) rx(%llu) tx(%llu)\n", 8047 8046 info->noise, info->time_busy, info->time_rx, info->time_tx);
+1 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
··· 1228 1228 BRCMF_NROF_H2D_COMMON_MSGRINGS; 1229 1229 max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS; 1230 1230 } 1231 - if (max_flowrings > 256) { 1231 + if (max_flowrings > 512) { 1232 1232 brcmf_err(bus, "invalid max_flowrings(%d)\n", max_flowrings); 1233 1233 return -EIO; 1234 1234 }
+78 -49
drivers/net/wireless/mediatek/mt76/dma.c
··· 206 206 } 207 207 208 208 static int 209 + mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, 210 + struct mt76_queue_buf *buf, void *data) 211 + { 212 + struct mt76_desc *desc = &q->desc[q->head]; 213 + struct mt76_queue_entry *entry = &q->entry[q->head]; 214 + struct mt76_txwi_cache *txwi = NULL; 215 + u32 buf1 = 0, ctrl; 216 + int idx = q->head; 217 + int rx_token; 218 + 219 + ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); 220 + 221 + if ((q->flags & MT_QFLAG_WED) && 222 + FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) { 223 + txwi = mt76_get_rxwi(dev); 224 + if (!txwi) 225 + return -ENOMEM; 226 + 227 + rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr); 228 + if (rx_token < 0) { 229 + mt76_put_rxwi(dev, txwi); 230 + return -ENOMEM; 231 + } 232 + 233 + buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token); 234 + ctrl |= MT_DMA_CTL_TO_HOST; 235 + } 236 + 237 + WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr)); 238 + WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); 239 + WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); 240 + WRITE_ONCE(desc->info, 0); 241 + 242 + entry->dma_addr[0] = buf->addr; 243 + entry->dma_len[0] = buf->len; 244 + entry->txwi = txwi; 245 + entry->buf = data; 246 + entry->wcid = 0xffff; 247 + entry->skip_buf1 = true; 248 + q->head = (q->head + 1) % q->ndesc; 249 + q->queued++; 250 + 251 + return idx; 252 + } 253 + 254 + static int 209 255 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, 210 256 struct mt76_queue_buf *buf, int nbufs, u32 info, 211 257 struct sk_buff *skb, void *txwi) 212 258 { 213 259 struct mt76_queue_entry *entry; 214 260 struct mt76_desc *desc; 215 - u32 ctrl; 216 261 int i, idx = -1; 262 + u32 ctrl, next; 263 + 264 + if (txwi) { 265 + q->entry[q->head].txwi = DMA_DUMMY_DATA; 266 + q->entry[q->head].skip_buf0 = true; 267 + } 217 268 218 269 for (i = 0; i < nbufs; i += 2, buf += 2) { 219 270 u32 buf0 = buf[0].addr, buf1 = 0; 220 271 221 272 idx = q->head; 222 - q->head = (q->head + 1) % q->ndesc; 273 + next = (q->head + 1) % q->ndesc; 223 274 224 275 desc = &q->desc[idx]; 225 276 entry = &q->entry[idx]; 226 277 227 - if ((q->flags & MT_QFLAG_WED) && 228 - FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) { 229 - struct mt76_txwi_cache *t = txwi; 230 - int rx_token; 278 + if (buf[0].skip_unmap) 279 + entry->skip_buf0 = true; 280 + entry->skip_buf1 = i == nbufs - 1; 231 281 232 - if (!t) 233 - return -ENOMEM; 282 + entry->dma_addr[0] = buf[0].addr; 283 + entry->dma_len[0] = buf[0].len; 234 284 235 - rx_token = mt76_rx_token_consume(dev, (void *)skb, t, 236 - buf[0].addr); 237 - buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token); 238 - ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len) | 239 - MT_DMA_CTL_TO_HOST; 240 - } else { 241 - if (txwi) { 242 - q->entry[q->head].txwi = DMA_DUMMY_DATA; 243 - q->entry[q->head].skip_buf0 = true; 244 - } 245 - 246 - if (buf[0].skip_unmap) 247 - entry->skip_buf0 = true; 248 - entry->skip_buf1 = i == nbufs - 1; 249 - 250 - entry->dma_addr[0] = buf[0].addr; 251 - entry->dma_len[0] = buf[0].len; 252 - 253 - ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); 254 - if (i < nbufs - 1) { 255 - entry->dma_addr[1] = buf[1].addr; 256 - entry->dma_len[1] = buf[1].len; 257 - buf1 = buf[1].addr; 258 - ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); 259 - if (buf[1].skip_unmap) 260 - entry->skip_buf1 = true; 261 - } 262 - 263 - if (i == nbufs - 1) 264 - ctrl |= MT_DMA_CTL_LAST_SEC0; 265 - else if (i == nbufs - 2) 266 - ctrl |= MT_DMA_CTL_LAST_SEC1; 285 + ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len); 286 + if (i < nbufs - 1) { 287 + entry->dma_addr[1] = buf[1].addr; 288 + entry->dma_len[1] = buf[1].len; 289 + buf1 = buf[1].addr; 290 + ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len); 291 + if (buf[1].skip_unmap) 292 + entry->skip_buf1 = true; 267 293 } 294 + 295 + if (i == nbufs - 1) 296 + ctrl |= MT_DMA_CTL_LAST_SEC0; 297 + else if (i == nbufs - 2) 298 + ctrl |= MT_DMA_CTL_LAST_SEC1; 268 299 269 300 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0)); 270 301 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1)); 271 302 WRITE_ONCE(desc->info, cpu_to_le32(info)); 272 303 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl)); 273 304 305 + q->head = next; 274 306 q->queued++; 275 307 } 276 308 ··· 609 577 spin_lock_bh(&q->lock); 610 578 611 579 while (q->queued < q->ndesc - 1) { 612 - struct mt76_txwi_cache *t = NULL; 613 580 struct mt76_queue_buf qbuf; 614 581 void *buf = NULL; 615 - 616 - if ((q->flags & MT_QFLAG_WED) && 617 - FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) { 618 - t = mt76_get_rxwi(dev); 619 - if (!t) 620 - break; 621 - } 622 582 623 583 buf = page_frag_alloc(rx_page, q->buf_size, GFP_ATOMIC); 624 584 if (!buf) ··· 625 601 qbuf.addr = addr + offset; 626 602 qbuf.len = len - offset; 627 603 qbuf.skip_unmap = false; 628 - mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, t); 604 + if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) { 605 + dma_unmap_single(dev->dma_dev, addr, len, 606 + DMA_FROM_DEVICE); 607 + skb_free_frag(buf); 608 + break; 609 + } 629 610 frames++; 630 611 } 631 612
+7
drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
··· 653 653 654 654 desc->buf0 = cpu_to_le32(phy_addr); 655 655 token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr); 656 + if (token < 0) { 657 + dma_unmap_single(dev->mt76.dma_dev, phy_addr, 658 + wed->wlan.rx_size, DMA_TO_DEVICE); 659 + skb_free_frag(ptr); 660 + goto unmap; 661 + } 662 + 656 663 desc->token |= cpu_to_le32(FIELD_PREP(MT_DMA_CTL_TOKEN, 657 664 token)); 658 665 desc++;
+4 -3
drivers/net/wireless/mediatek/mt76/tx.c
··· 764 764 spin_lock_bh(&dev->rx_token_lock); 765 765 token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size, 766 766 GFP_ATOMIC); 767 + if (token >= 0) { 768 + t->ptr = ptr; 769 + t->dma_addr = phys; 770 + } 767 771 spin_unlock_bh(&dev->rx_token_lock); 768 - 769 - t->ptr = ptr; 770 - t->dma_addr = phys; 771 772 772 773 return token; 773 774 }
+6 -13
drivers/net/wireless/rndis_wlan.c
··· 696 696 struct rndis_query *get; 697 697 struct rndis_query_c *get_c; 698 698 } u; 699 - int ret, buflen; 700 - int resplen, respoffs, copylen; 699 + int ret; 700 + size_t buflen, resplen, respoffs, copylen; 701 701 702 702 buflen = *len + sizeof(*u.get); 703 703 if (buflen < CONTROL_BUFFER_SIZE) ··· 732 732 733 733 if (respoffs > buflen) { 734 734 /* Device returned data offset outside buffer, error. */ 735 - netdev_dbg(dev->net, "%s(%s): received invalid " 736 - "data offset: %d > %d\n", __func__, 737 - oid_to_string(oid), respoffs, buflen); 735 + netdev_dbg(dev->net, 736 + "%s(%s): received invalid data offset: %zu > %zu\n", 737 + __func__, oid_to_string(oid), respoffs, buflen); 738 738 739 739 ret = -EINVAL; 740 740 goto exit_unlock; 741 741 } 742 742 743 - if ((resplen + respoffs) > buflen) { 744 - /* Device would have returned more data if buffer would 745 - * have been big enough. Copy just the bits that we got. 746 - */ 747 - copylen = buflen - respoffs; 748 - } else { 749 - copylen = resplen; 750 - } 743 + copylen = min(resplen, buflen - respoffs); 751 744 752 745 if (copylen > *len) 753 746 copylen = *len;
+20 -4
drivers/nvme/host/apple.c
··· 829 829 apple_nvme_remove_cq(anv); 830 830 } 831 831 832 - nvme_disable_ctrl(&anv->ctrl, shutdown); 832 + /* 833 + * Always disable the NVMe controller after shutdown. 834 + * We need to do this to bring it back up later anyway, and we 835 + * can't do it while the firmware is not running (e.g. in the 836 + * resume reset path before RTKit is initialized), so for Apple 837 + * controllers it makes sense to unconditionally do it here. 838 + * Additionally, this sequence of events is reliable, while 839 + * others (like disabling after bringing back the firmware on 840 + * resume) seem to run into trouble under some circumstances. 841 + * 842 + * Both U-Boot and m1n1 also use this convention (i.e. an ANS 843 + * NVMe controller is handed off with firmware shut down, in an 844 + * NVMe disabled state, after a clean shutdown). 845 + */ 846 + if (shutdown) 847 + nvme_disable_ctrl(&anv->ctrl, shutdown); 848 + nvme_disable_ctrl(&anv->ctrl, false); 833 849 } 834 850 835 851 WRITE_ONCE(anv->ioq.enabled, false); ··· 1001 985 goto out; 1002 986 } 1003 987 1004 - if (anv->ctrl.ctrl_config & NVME_CC_ENABLE) 1005 - apple_nvme_disable(anv, false); 1006 - 1007 988 /* RTKit must be shut down cleanly for the (soft)-reset to work */ 1008 989 if (apple_rtkit_is_running(anv->rtk)) { 990 + /* reset the controller if it is enabled */ 991 + if (anv->ctrl.ctrl_config & NVME_CC_ENABLE) 992 + apple_nvme_disable(anv, false); 1009 993 dev_dbg(anv->dev, "Trying to shut down RTKit before reset."); 1010 994 ret = apple_rtkit_shutdown(anv->rtk); 1011 995 if (ret)
+1 -1
drivers/nvme/host/pci.c
··· 1362 1362 else 1363 1363 nvme_poll_irqdisable(nvmeq); 1364 1364 1365 - if (blk_mq_request_completed(req)) { 1365 + if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) { 1366 1366 dev_warn(dev->ctrl.device, 1367 1367 "I/O %d QID %d timeout, completion polled\n", 1368 1368 req->tag, nvmeq->qid);
+1 -1
drivers/phy/freescale/phy-fsl-imx8m-pcie.c
··· 255 255 imx8_phy->perst = 256 256 devm_reset_control_get_exclusive(dev, "perst"); 257 257 if (IS_ERR(imx8_phy->perst)) 258 - dev_err_probe(dev, PTR_ERR(imx8_phy->perst), 258 + return dev_err_probe(dev, PTR_ERR(imx8_phy->perst), 259 259 "Failed to get PCIE PHY PERST control\n"); 260 260 } 261 261
+3 -2
drivers/phy/phy-can-transceiver.c
··· 99 99 struct gpio_desc *standby_gpio; 100 100 struct gpio_desc *enable_gpio; 101 101 u32 max_bitrate = 0; 102 + int err; 102 103 103 104 can_transceiver_phy = devm_kzalloc(dev, sizeof(struct can_transceiver_phy), GFP_KERNEL); 104 105 if (!can_transceiver_phy) ··· 125 124 return PTR_ERR(phy); 126 125 } 127 126 128 - device_property_read_u32(dev, "max-bitrate", &max_bitrate); 129 - if (!max_bitrate) 127 + err = device_property_read_u32(dev, "max-bitrate", &max_bitrate); 128 + if ((err != -EINVAL) && !max_bitrate) 130 129 dev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit\n"); 131 130 phy->attrs.max_link_rate = max_bitrate; 132 131
-13
drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c
··· 401 401 HSPHY_INIT_CFG(0x90, 0x60, 0), 402 402 }; 403 403 404 - static const struct hsphy_init_seq init_seq_mdm9607[] = { 405 - HSPHY_INIT_CFG(0x80, 0x44, 0), 406 - HSPHY_INIT_CFG(0x81, 0x38, 0), 407 - HSPHY_INIT_CFG(0x82, 0x24, 0), 408 - HSPHY_INIT_CFG(0x83, 0x13, 0), 409 - }; 410 - 411 404 static const struct hsphy_data hsphy_data_femtophy = { 412 405 .init_seq = init_seq_femtophy, 413 406 .init_seq_num = ARRAY_SIZE(init_seq_femtophy), 414 407 }; 415 408 416 - static const struct hsphy_data hsphy_data_mdm9607 = { 417 - .init_seq = init_seq_mdm9607, 418 - .init_seq_num = ARRAY_SIZE(init_seq_mdm9607), 419 - }; 420 - 421 409 static const struct of_device_id qcom_snps_hsphy_match[] = { 422 410 { .compatible = "qcom,usb-hs-28nm-femtophy", .data = &hsphy_data_femtophy, }, 423 - { .compatible = "qcom,usb-hs-28nm-mdm9607", .data = &hsphy_data_mdm9607, }, 424 411 { }, 425 412 }; 426 413 MODULE_DEVICE_TABLE(of, qcom_snps_hsphy_match);
+1 -1
drivers/phy/renesas/r8a779f0-ether-serdes.c
··· 126 126 r8a779f0_eth_serdes_write32(channel->addr, 0x0160, 0x180, 0x0007); 127 127 r8a779f0_eth_serdes_write32(channel->addr, 0x01ac, 0x180, 0x0000); 128 128 r8a779f0_eth_serdes_write32(channel->addr, 0x00c4, 0x180, 0x0310); 129 - r8a779f0_eth_serdes_write32(channel->addr, 0x00c8, 0x380, 0x0101); 129 + r8a779f0_eth_serdes_write32(channel->addr, 0x00c8, 0x180, 0x0101); 130 130 ret = r8a779f0_eth_serdes_reg_wait(channel, 0x00c8, 0x0180, BIT(0), 0); 131 131 if (ret) 132 132 return ret;
+3 -1
drivers/phy/rockchip/phy-rockchip-inno-usb2.c
··· 485 485 return ret; 486 486 487 487 ret = property_enable(base, &rport->port_cfg->phy_sus, false); 488 - if (ret) 488 + if (ret) { 489 + clk_disable_unprepare(rphy->clk480m); 489 490 return ret; 491 + } 490 492 491 493 /* waiting for the utmi_clk to become stable */ 492 494 usleep_range(1500, 2000);
+3
drivers/phy/sunplus/phy-sunplus-usb2.c
··· 254 254 return PTR_ERR(usbphy->phy_regs); 255 255 256 256 usbphy->moon4_res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "moon4"); 257 + if (!usbphy->moon4_res_mem) 258 + return -EINVAL; 259 + 257 260 usbphy->moon4_regs = devm_ioremap(&pdev->dev, usbphy->moon4_res_mem->start, 258 261 resource_size(usbphy->moon4_res_mem)); 259 262 if (!usbphy->moon4_regs)
+2 -2
drivers/phy/ti/Kconfig
··· 23 23 24 24 config PHY_AM654_SERDES 25 25 tristate "TI AM654 SERDES support" 26 - depends on OF && ARCH_K3 || COMPILE_TEST 26 + depends on OF && (ARCH_K3 || COMPILE_TEST) 27 27 depends on COMMON_CLK 28 28 select GENERIC_PHY 29 29 select MULTIPLEXER ··· 35 35 36 36 config PHY_J721E_WIZ 37 37 tristate "TI J721E WIZ (SERDES Wrapper) support" 38 - depends on OF && ARCH_K3 || COMPILE_TEST 38 + depends on OF && (ARCH_K3 || COMPILE_TEST) 39 39 depends on HAS_IOMEM && OF_ADDRESS 40 40 depends on COMMON_CLK 41 41 select GENERIC_PHY
+2 -1
drivers/pinctrl/nomadik/pinctrl-ab8500.c
··· 6 6 */ 7 7 8 8 #include <linux/kernel.h> 9 - #include <linux/gpio/driver.h> 10 9 #include <linux/pinctrl/pinctrl.h> 10 + 11 11 #include <linux/mfd/abx500/ab8500.h> 12 + 12 13 #include "pinctrl-abx500.h" 13 14 14 15 /* All the pins that can be used for GPIO and some other functions */
+2 -1
drivers/pinctrl/nomadik/pinctrl-ab8505.c
··· 6 6 */ 7 7 8 8 #include <linux/kernel.h> 9 - #include <linux/gpio/driver.h> 10 9 #include <linux/pinctrl/pinctrl.h> 10 + 11 11 #include <linux/mfd/abx500/ab8500.h> 12 + 12 13 #include "pinctrl-abx500.h" 13 14 14 15 /* All the pins that can be used for GPIO and some other functions */
+20 -16
drivers/pinctrl/nomadik/pinctrl-abx500.c
··· 6 6 * 7 7 * Driver allows to use AxB5xx unused pins to be used as GPIO 8 8 */ 9 - #include <linux/kernel.h> 10 - #include <linux/types.h> 11 - #include <linux/slab.h> 12 - #include <linux/init.h> 9 + #include <linux/bitops.h> 13 10 #include <linux/err.h> 11 + #include <linux/gpio/driver.h> 12 + #include <linux/init.h> 13 + #include <linux/interrupt.h> 14 + #include <linux/irq.h> 15 + #include <linux/irqdomain.h> 16 + #include <linux/kernel.h> 14 17 #include <linux/of.h> 15 18 #include <linux/of_device.h> 16 19 #include <linux/platform_device.h> 17 - #include <linux/gpio/driver.h> 18 - #include <linux/irq.h> 19 - #include <linux/irqdomain.h> 20 - #include <linux/interrupt.h> 21 - #include <linux/bitops.h> 20 + #include <linux/seq_file.h> 21 + #include <linux/slab.h> 22 + #include <linux/types.h> 23 + 22 24 #include <linux/mfd/abx500.h> 23 25 #include <linux/mfd/abx500/ab8500.h> 24 - #include <linux/pinctrl/pinctrl.h> 25 - #include <linux/pinctrl/consumer.h> 26 - #include <linux/pinctrl/pinmux.h> 27 - #include <linux/pinctrl/pinconf.h> 28 - #include <linux/pinctrl/pinconf-generic.h> 29 - #include <linux/pinctrl/machine.h> 30 26 31 - #include "pinctrl-abx500.h" 27 + #include <linux/pinctrl/consumer.h> 28 + #include <linux/pinctrl/machine.h> 29 + #include <linux/pinctrl/pinconf-generic.h> 30 + #include <linux/pinctrl/pinconf.h> 31 + #include <linux/pinctrl/pinctrl.h> 32 + #include <linux/pinctrl/pinmux.h> 33 + 32 34 #include "../core.h" 33 35 #include "../pinconf.h" 34 36 #include "../pinctrl-utils.h" 37 + 38 + #include "pinctrl-abx500.h" 35 39 36 40 /* 37 41 * GPIO registers offset
+4
drivers/pinctrl/nomadik/pinctrl-abx500.h
··· 2 2 #ifndef PINCTRL_PINCTRL_ABx500_H 3 3 #define PINCTRL_PINCTRL_ABx500_H 4 4 5 + #include <linux/types.h> 6 + 7 + struct pinctrl_pin_desc; 8 + 5 9 /* Package definitions */ 6 10 #define PINCTRL_AB8500 0 7 11 #define PINCTRL_AB8505 1
+3
drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include <linux/kernel.h> 3 + #include <linux/types.h> 4 + 3 5 #include <linux/pinctrl/pinctrl.h> 6 + 4 7 #include "pinctrl-nomadik.h" 5 8 6 9 /* All the pins that can be used for GPIO and some other functions */
+3
drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include <linux/kernel.h> 3 + #include <linux/types.h> 4 + 3 5 #include <linux/pinctrl/pinctrl.h> 6 + 4 7 #include "pinctrl-nomadik.h" 5 8 6 9 /* All the pins that can be used for GPIO and some other functions */
+18 -16
drivers/pinctrl/nomadik/pinctrl-nomadik.c
··· 7 7 * Rewritten based on work by Prafulla WADASKAR <prafulla.wadaskar@st.com> 8 8 * Copyright (C) 2011-2013 Linus Walleij <linus.walleij@linaro.org> 9 9 */ 10 - #include <linux/kernel.h> 11 - #include <linux/init.h> 12 - #include <linux/device.h> 13 - #include <linux/platform_device.h> 14 - #include <linux/io.h> 10 + #include <linux/bitops.h> 15 11 #include <linux/clk.h> 12 + #include <linux/device.h> 16 13 #include <linux/err.h> 17 14 #include <linux/gpio/driver.h> 18 - #include <linux/spinlock.h> 15 + #include <linux/init.h> 19 16 #include <linux/interrupt.h> 20 - #include <linux/slab.h> 21 - #include <linux/of_device.h> 17 + #include <linux/io.h> 18 + #include <linux/kernel.h> 22 19 #include <linux/of_address.h> 23 - #include <linux/bitops.h> 24 - #include <linux/pinctrl/machine.h> 25 - #include <linux/pinctrl/pinctrl.h> 26 - #include <linux/pinctrl/pinmux.h> 27 - #include <linux/pinctrl/pinconf.h> 20 + #include <linux/of_device.h> 21 + #include <linux/platform_device.h> 22 + #include <linux/seq_file.h> 23 + #include <linux/slab.h> 24 + #include <linux/spinlock.h> 25 + 28 26 /* Since we request GPIOs from ourself */ 29 27 #include <linux/pinctrl/consumer.h> 30 - #include "pinctrl-nomadik.h" 28 + #include <linux/pinctrl/machine.h> 29 + #include <linux/pinctrl/pinconf.h> 30 + #include <linux/pinctrl/pinctrl.h> 31 + #include <linux/pinctrl/pinmux.h> 32 + 31 33 #include "../core.h" 32 34 #include "../pinctrl-utils.h" 35 + 36 + #include "pinctrl-nomadik.h" 33 37 34 38 /* 35 39 * The GPIO module in the Nomadik family of Systems-on-Chip is an ··· 910 906 911 907 return (afunc ? NMK_GPIO_ALT_A : 0) | (bfunc ? NMK_GPIO_ALT_B : 0); 912 908 } 913 - 914 - #include <linux/seq_file.h> 915 909 916 910 static void nmk_gpio_dbg_show_one(struct seq_file *s, 917 911 struct pinctrl_dev *pctldev, struct gpio_chip *chip,
+5
drivers/pinctrl/nomadik/pinctrl-nomadik.h
··· 2 2 #ifndef PINCTRL_PINCTRL_NOMADIK_H 3 3 #define PINCTRL_PINCTRL_NOMADIK_H 4 4 5 + #include <linux/kernel.h> 6 + #include <linux/types.h> 7 + 8 + #include <linux/pinctrl/pinctrl.h> 9 + 5 10 /* Package definitions */ 6 11 #define PINCTRL_NMK_STN8815 0 7 12 #define PINCTRL_NMK_DB8500 1
+20 -11
drivers/pinctrl/pinctrl-rockchip.c
··· 926 926 RK_MUXROUTE_PMU(0, RK_PB5, 4, 0x0110, WRITE_MASK_VAL(3, 2, 1)), /* PWM1 IO mux M1 */ 927 927 RK_MUXROUTE_PMU(0, RK_PC1, 1, 0x0110, WRITE_MASK_VAL(5, 4, 0)), /* PWM2 IO mux M0 */ 928 928 RK_MUXROUTE_PMU(0, RK_PB6, 4, 0x0110, WRITE_MASK_VAL(5, 4, 1)), /* PWM2 IO mux M1 */ 929 - RK_MUXROUTE_PMU(0, RK_PB3, 2, 0x0300, WRITE_MASK_VAL(0, 0, 0)), /* CAN0 IO mux M0 */ 929 + RK_MUXROUTE_GRF(0, RK_PB3, 2, 0x0300, WRITE_MASK_VAL(0, 0, 0)), /* CAN0 IO mux M0 */ 930 930 RK_MUXROUTE_GRF(2, RK_PA1, 4, 0x0300, WRITE_MASK_VAL(0, 0, 1)), /* CAN0 IO mux M1 */ 931 931 RK_MUXROUTE_GRF(1, RK_PA1, 3, 0x0300, WRITE_MASK_VAL(2, 2, 0)), /* CAN1 IO mux M0 */ 932 932 RK_MUXROUTE_GRF(4, RK_PC3, 3, 0x0300, WRITE_MASK_VAL(2, 2, 1)), /* CAN1 IO mux M1 */ 933 933 RK_MUXROUTE_GRF(4, RK_PB5, 3, 0x0300, WRITE_MASK_VAL(4, 4, 0)), /* CAN2 IO mux M0 */ 934 934 RK_MUXROUTE_GRF(2, RK_PB2, 4, 0x0300, WRITE_MASK_VAL(4, 4, 1)), /* CAN2 IO mux M1 */ 935 935 RK_MUXROUTE_GRF(4, RK_PC4, 1, 0x0300, WRITE_MASK_VAL(6, 6, 0)), /* HPDIN IO mux M0 */ 936 - RK_MUXROUTE_PMU(0, RK_PC2, 2, 0x0300, WRITE_MASK_VAL(6, 6, 1)), /* HPDIN IO mux M1 */ 936 + RK_MUXROUTE_GRF(0, RK_PC2, 2, 0x0300, WRITE_MASK_VAL(6, 6, 1)), /* HPDIN IO mux M1 */ 937 937 RK_MUXROUTE_GRF(3, RK_PB1, 3, 0x0300, WRITE_MASK_VAL(8, 8, 0)), /* GMAC1 IO mux M0 */ 938 938 RK_MUXROUTE_GRF(4, RK_PA7, 3, 0x0300, WRITE_MASK_VAL(8, 8, 1)), /* GMAC1 IO mux M1 */ 939 939 RK_MUXROUTE_GRF(4, RK_PD1, 1, 0x0300, WRITE_MASK_VAL(10, 10, 0)), /* HDMITX IO mux M0 */ 940 - RK_MUXROUTE_PMU(0, RK_PC7, 1, 0x0300, WRITE_MASK_VAL(10, 10, 1)), /* HDMITX IO mux M1 */ 941 - RK_MUXROUTE_PMU(0, RK_PB6, 1, 0x0300, WRITE_MASK_VAL(14, 14, 0)), /* I2C2 IO mux M0 */ 940 + RK_MUXROUTE_GRF(0, RK_PC7, 1, 0x0300, WRITE_MASK_VAL(10, 10, 1)), /* HDMITX IO mux M1 */ 941 + RK_MUXROUTE_GRF(0, RK_PB6, 1, 0x0300, WRITE_MASK_VAL(14, 14, 0)), /* I2C2 IO mux M0 */ 942 942 RK_MUXROUTE_GRF(4, RK_PB4, 1, 0x0300, WRITE_MASK_VAL(14, 14, 1)), /* I2C2 IO mux M1 */ 943 943 RK_MUXROUTE_GRF(1, RK_PA0, 1, 0x0304, WRITE_MASK_VAL(0, 0, 0)), /* I2C3 IO mux M0 */ 944 944 RK_MUXROUTE_GRF(3, RK_PB6, 4, 0x0304, WRITE_MASK_VAL(0, 0, 1)), /* I2C3 IO mux M1 */ ··· 964 964 RK_MUXROUTE_GRF(4, RK_PC3, 1, 0x0308, WRITE_MASK_VAL(12, 12, 1)), /* PWM15 IO mux M1 */ 965 965 RK_MUXROUTE_GRF(3, RK_PD2, 3, 0x0308, WRITE_MASK_VAL(14, 14, 0)), /* SDMMC2 IO mux M0 */ 966 966 RK_MUXROUTE_GRF(3, RK_PA5, 5, 0x0308, WRITE_MASK_VAL(14, 14, 1)), /* SDMMC2 IO mux M1 */ 967 - RK_MUXROUTE_PMU(0, RK_PB5, 2, 0x030c, WRITE_MASK_VAL(0, 0, 0)), /* SPI0 IO mux M0 */ 967 + RK_MUXROUTE_GRF(0, RK_PB5, 2, 0x030c, WRITE_MASK_VAL(0, 0, 0)), /* SPI0 IO mux M0 */ 968 968 RK_MUXROUTE_GRF(2, RK_PD3, 3, 0x030c, WRITE_MASK_VAL(0, 0, 1)), /* SPI0 IO mux M1 */ 969 969 RK_MUXROUTE_GRF(2, RK_PB5, 3, 0x030c, WRITE_MASK_VAL(2, 2, 0)), /* SPI1 IO mux M0 */ 970 970 RK_MUXROUTE_GRF(3, RK_PC3, 3, 0x030c, WRITE_MASK_VAL(2, 2, 1)), /* SPI1 IO mux M1 */ ··· 973 973 RK_MUXROUTE_GRF(4, RK_PB3, 4, 0x030c, WRITE_MASK_VAL(6, 6, 0)), /* SPI3 IO mux M0 */ 974 974 RK_MUXROUTE_GRF(4, RK_PC2, 2, 0x030c, WRITE_MASK_VAL(6, 6, 1)), /* SPI3 IO mux M1 */ 975 975 RK_MUXROUTE_GRF(2, RK_PB4, 2, 0x030c, WRITE_MASK_VAL(8, 8, 0)), /* UART1 IO mux M0 */ 976 - RK_MUXROUTE_PMU(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(8, 8, 1)), /* UART1 IO mux M1 */ 977 - RK_MUXROUTE_PMU(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(10, 10, 0)), /* UART2 IO mux M0 */ 976 + RK_MUXROUTE_GRF(3, RK_PD6, 4, 0x030c, WRITE_MASK_VAL(8, 8, 1)), /* UART1 IO mux M1 */ 977 + RK_MUXROUTE_GRF(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(10, 10, 0)), /* UART2 IO mux M0 */ 978 978 RK_MUXROUTE_GRF(1, RK_PD5, 2, 0x030c, WRITE_MASK_VAL(10, 10, 1)), /* UART2 IO mux M1 */ 979 979 RK_MUXROUTE_GRF(1, RK_PA1, 2, 0x030c, WRITE_MASK_VAL(12, 12, 0)), /* UART3 IO mux M0 */ 980 980 RK_MUXROUTE_GRF(3, RK_PB7, 4, 0x030c, WRITE_MASK_VAL(12, 12, 1)), /* UART3 IO mux M1 */ ··· 1004 1004 RK_MUXROUTE_GRF(3, RK_PD6, 5, 0x0314, WRITE_MASK_VAL(1, 0, 1)), /* PDM IO mux M1 */ 1005 1005 RK_MUXROUTE_GRF(4, RK_PA0, 4, 0x0314, WRITE_MASK_VAL(1, 0, 1)), /* PDM IO mux M1 */ 1006 1006 RK_MUXROUTE_GRF(3, RK_PC4, 5, 0x0314, WRITE_MASK_VAL(1, 0, 2)), /* PDM IO mux M2 */ 1007 - RK_MUXROUTE_PMU(0, RK_PA5, 3, 0x0314, WRITE_MASK_VAL(3, 2, 0)), /* PCIE20 IO mux M0 */ 1007 + RK_MUXROUTE_GRF(0, RK_PA5, 3, 0x0314, WRITE_MASK_VAL(3, 2, 0)), /* PCIE20 IO mux M0 */ 1008 1008 RK_MUXROUTE_GRF(2, RK_PD0, 4, 0x0314, WRITE_MASK_VAL(3, 2, 1)), /* PCIE20 IO mux M1 */ 1009 1009 RK_MUXROUTE_GRF(1, RK_PB0, 4, 0x0314, WRITE_MASK_VAL(3, 2, 2)), /* PCIE20 IO mux M2 */ 1010 - RK_MUXROUTE_PMU(0, RK_PA4, 3, 0x0314, WRITE_MASK_VAL(5, 4, 0)), /* PCIE30X1 IO mux M0 */ 1010 + RK_MUXROUTE_GRF(0, RK_PA4, 3, 0x0314, WRITE_MASK_VAL(5, 4, 0)), /* PCIE30X1 IO mux M0 */ 1011 1011 RK_MUXROUTE_GRF(2, RK_PD2, 4, 0x0314, WRITE_MASK_VAL(5, 4, 1)), /* PCIE30X1 IO mux M1 */ 1012 1012 RK_MUXROUTE_GRF(1, RK_PA5, 4, 0x0314, WRITE_MASK_VAL(5, 4, 2)), /* PCIE30X1 IO mux M2 */ 1013 - RK_MUXROUTE_PMU(0, RK_PA6, 2, 0x0314, WRITE_MASK_VAL(7, 6, 0)), /* PCIE30X2 IO mux M0 */ 1013 + RK_MUXROUTE_GRF(0, RK_PA6, 2, 0x0314, WRITE_MASK_VAL(7, 6, 0)), /* PCIE30X2 IO mux M0 */ 1014 1014 RK_MUXROUTE_GRF(2, RK_PD4, 4, 0x0314, WRITE_MASK_VAL(7, 6, 1)), /* PCIE30X2 IO mux M1 */ 1015 1015 RK_MUXROUTE_GRF(4, RK_PC2, 4, 0x0314, WRITE_MASK_VAL(7, 6, 2)), /* PCIE30X2 IO mux M2 */ 1016 1016 }; ··· 2436 2436 case RK3308: 2437 2437 case RK3368: 2438 2438 case RK3399: 2439 + case RK3568: 2439 2440 case RK3588: 2440 2441 pull_type = bank->pull_type[pin_num / 8]; 2441 2442 data >>= bit; 2442 2443 data &= (1 << RK3188_PULL_BITS_PER_PIN) - 1; 2444 + /* 2445 + * In the TRM, pull-up being 1 for everything except the GPIO0_D3-D6, 2446 + * where that pull up value becomes 3. 2447 + */ 2448 + if (ctrl->type == RK3568 && bank->bank_num == 0 && pin_num >= 27 && pin_num <= 30) { 2449 + if (data == 3) 2450 + data = 1; 2451 + } 2443 2452 2444 2453 return rockchip_pull_list[pull_type][data]; 2445 2454 default: ··· 2506 2497 } 2507 2498 } 2508 2499 /* 2509 - * In the TRM, pull-up being 1 for everything except the GPIO0_D0-D6, 2500 + * In the TRM, pull-up being 1 for everything except the GPIO0_D3-D6, 2510 2501 * where that pull up value becomes 3. 2511 2502 */ 2512 2503 if (ctrl->type == RK3568 && bank->bank_num == 0 && pin_num >= 27 && pin_num <= 30) {
+2 -5
drivers/pinctrl/sunplus/sppctl.c
··· 499 499 return 0; 500 500 } 501 501 502 - #ifdef CONFIG_DEBUG_FS 503 502 static void sppctl_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) 504 503 { 505 504 const char *label; ··· 520 521 seq_puts(s, "\n"); 521 522 } 522 523 } 523 - #endif 524 524 525 525 static int sppctl_gpio_new(struct platform_device *pdev, struct sppctl_pdata *pctl) 526 526 { ··· 548 550 gchip->get = sppctl_gpio_get; 549 551 gchip->set = sppctl_gpio_set; 550 552 gchip->set_config = sppctl_gpio_set_config; 551 - #ifdef CONFIG_DEBUG_FS 552 - gchip->dbg_show = sppctl_gpio_dbg_show; 553 - #endif 553 + gchip->dbg_show = IS_ENABLED(CONFIG_DEBUG_FS) ? 554 + sppctl_gpio_dbg_show : NULL; 554 555 gchip->base = -1; 555 556 gchip->ngpio = sppctl_gpio_list_sz; 556 557 gchip->names = sppctl_gpio_list_s;
+1 -1
drivers/reset/Kconfig
··· 257 257 258 258 config RESET_TI_SCI 259 259 tristate "TI System Control Interface (TI-SCI) reset driver" 260 - depends on TI_SCI_PROTOCOL || COMPILE_TEST 260 + depends on TI_SCI_PROTOCOL || (COMPILE_TEST && TI_SCI_PROTOCOL=n) 261 261 help 262 262 This enables the reset driver support over TI System Control Interface 263 263 available on some new TI's SoCs. If you wish to use reset resources
+1 -3
drivers/reset/reset-uniphier-glue.c
··· 47 47 struct device *dev = &pdev->dev; 48 48 struct uniphier_glue_reset_priv *priv; 49 49 struct resource *res; 50 - resource_size_t size; 51 50 int i, ret; 52 51 53 52 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); ··· 59 60 return -EINVAL; 60 61 61 62 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 62 - size = resource_size(res); 63 63 priv->rdata.membase = devm_ioremap_resource(dev, res); 64 64 if (IS_ERR(priv->rdata.membase)) 65 65 return PTR_ERR(priv->rdata.membase); ··· 94 96 95 97 spin_lock_init(&priv->rdata.lock); 96 98 priv->rdata.rcdev.owner = THIS_MODULE; 97 - priv->rdata.rcdev.nr_resets = size * BITS_PER_BYTE; 99 + priv->rdata.rcdev.nr_resets = resource_size(res) * BITS_PER_BYTE; 98 100 priv->rdata.rcdev.ops = &reset_simple_ops; 99 101 priv->rdata.rcdev.of_node = dev->of_node; 100 102 priv->rdata.active_low = true;
+4 -3
drivers/soc/imx/imx8mp-blk-ctrl.c
··· 212 212 break; 213 213 case IMX8MP_HDMIBLK_PD_LCDIF: 214 214 regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0, 215 - BIT(7) | BIT(16) | BIT(17) | BIT(18) | 215 + BIT(16) | BIT(17) | BIT(18) | 216 216 BIT(19) | BIT(20)); 217 217 regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(11)); 218 218 regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, ··· 241 241 regmap_set_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(1)); 242 242 break; 243 243 case IMX8MP_HDMIBLK_PD_HDMI_TX_PHY: 244 + regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(7)); 244 245 regmap_set_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(22) | BIT(24)); 245 246 regmap_set_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(12)); 246 247 regmap_clear_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(3)); ··· 271 270 BIT(4) | BIT(5) | BIT(6)); 272 271 regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(11)); 273 272 regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0, 274 - BIT(7) | BIT(16) | BIT(17) | BIT(18) | 273 + BIT(16) | BIT(17) | BIT(18) | 275 274 BIT(19) | BIT(20)); 276 275 break; 277 276 case IMX8MP_HDMIBLK_PD_PAI: ··· 299 298 case IMX8MP_HDMIBLK_PD_HDMI_TX_PHY: 300 299 regmap_set_bits(bc->regmap, HDMI_TX_CONTROL0, BIT(3)); 301 300 regmap_clear_bits(bc->regmap, HDMI_RTX_RESET_CTL0, BIT(12)); 301 + regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL0, BIT(7)); 302 302 regmap_clear_bits(bc->regmap, HDMI_RTX_CLK_CTL1, BIT(22) | BIT(24)); 303 303 break; 304 304 case IMX8MP_HDMIBLK_PD_HDCP: ··· 592 590 ret = PTR_ERR(domain->power_dev); 593 591 goto cleanup_pds; 594 592 } 595 - dev_set_name(domain->power_dev, "%s", data->name); 596 593 597 594 domain->genpd.name = data->name; 598 595 domain->genpd.power_on = imx8mp_blk_ctrl_power_on;
+2 -2
drivers/soc/imx/soc-imx8m.c
··· 66 66 ocotp_base = of_iomap(np, 0); 67 67 WARN_ON(!ocotp_base); 68 68 clk = of_clk_get_by_name(np, NULL); 69 - if (!clk) { 70 - WARN_ON(!clk); 69 + if (IS_ERR(clk)) { 70 + WARN_ON(IS_ERR(clk)); 71 71 return 0; 72 72 } 73 73
+2 -1
drivers/soc/qcom/apr.c
··· 461 461 goto out; 462 462 } 463 463 464 + /* Protection domain is optional, it does not exist on older platforms */ 464 465 ret = of_property_read_string_index(np, "qcom,protection-domain", 465 466 1, &adev->service_path); 466 - if (ret < 0) { 467 + if (ret < 0 && ret != -EINVAL) { 467 468 dev_err(dev, "Failed to read second value of qcom,protection-domain\n"); 468 469 goto out; 469 470 }
+5 -1
drivers/soc/qcom/cpr.c
··· 1708 1708 1709 1709 ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd); 1710 1710 if (ret) 1711 - return ret; 1711 + goto err_remove_genpd; 1712 1712 1713 1713 platform_set_drvdata(pdev, drv); 1714 1714 cpr_debugfs_init(drv); 1715 1715 1716 1716 return 0; 1717 + 1718 + err_remove_genpd: 1719 + pm_genpd_remove(&drv->pd); 1720 + return ret; 1717 1721 } 1718 1722 1719 1723 static int cpr_remove(struct platform_device *pdev)
+1 -1
drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h
··· 86 86 87 87 struct vchiq_instance; 88 88 89 - extern enum vchiq_status vchiq_initialise(struct vchiq_instance **pinstance); 89 + extern int vchiq_initialise(struct vchiq_instance **pinstance); 90 90 extern enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance); 91 91 extern enum vchiq_status vchiq_connect(struct vchiq_instance *instance); 92 92 extern enum vchiq_status vchiq_open_service(struct vchiq_instance *instance,
+2 -2
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
··· 100 100 extern void 101 101 vchiq_dump_service_use_state(struct vchiq_state *state); 102 102 103 - extern enum vchiq_status 103 + extern int 104 104 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service, 105 105 enum USE_TYPE_E use_type); 106 - extern enum vchiq_status 106 + extern int 107 107 vchiq_release_internal(struct vchiq_state *state, 108 108 struct vchiq_service *service); 109 109
+10 -3
drivers/thermal/thermal_core.c
··· 909 909 cdev->devdata = devdata; 910 910 911 911 ret = cdev->ops->get_max_state(cdev, &cdev->max_state); 912 - if (ret) 913 - goto out_kfree_type; 912 + if (ret) { 913 + kfree(cdev->type); 914 + goto out_ida_remove; 915 + } 914 916 915 917 thermal_cooling_device_setup_sysfs(cdev); 918 + 916 919 ret = dev_set_name(&cdev->device, "cooling_device%d", cdev->id); 917 920 if (ret) { 921 + kfree(cdev->type); 918 922 thermal_cooling_device_destroy_sysfs(cdev); 919 - goto out_kfree_type; 923 + goto out_ida_remove; 920 924 } 925 + 921 926 ret = device_register(&cdev->device); 922 927 if (ret) 923 928 goto out_kfree_type; ··· 948 943 thermal_cooling_device_destroy_sysfs(cdev); 949 944 kfree(cdev->type); 950 945 put_device(&cdev->device); 946 + 947 + /* thermal_release() takes care of the rest */ 951 948 cdev = NULL; 952 949 out_ida_remove: 953 950 ida_free(&thermal_cdev_ida, id);
+4 -16
drivers/thunderbolt/retimer.c
··· 427 427 { 428 428 u32 status[TB_MAX_RETIMER_INDEX + 1] = {}; 429 429 int ret, i, last_idx = 0; 430 - struct usb4_port *usb4; 431 - 432 - usb4 = port->usb4; 433 - if (!usb4) 434 - return 0; 435 - 436 - pm_runtime_get_sync(&usb4->dev); 437 430 438 431 /* 439 432 * Send broadcast RT to make sure retimer indices facing this ··· 434 441 */ 435 442 ret = usb4_port_enumerate_retimers(port); 436 443 if (ret) 437 - goto out; 444 + return ret; 438 445 439 446 /* 440 447 * Enable sideband channel for each retimer. We can do this ··· 464 471 break; 465 472 } 466 473 467 - if (!last_idx) { 468 - ret = 0; 469 - goto out; 470 - } 474 + if (!last_idx) 475 + return 0; 471 476 472 477 /* Add on-board retimers if they do not exist already */ 478 + ret = 0; 473 479 for (i = 1; i <= last_idx; i++) { 474 480 struct tb_retimer *rt; 475 481 ··· 481 489 break; 482 490 } 483 491 } 484 - 485 - out: 486 - pm_runtime_mark_last_busy(&usb4->dev); 487 - pm_runtime_put_autosuspend(&usb4->dev); 488 492 489 493 return ret; 490 494 }
+15 -5
drivers/thunderbolt/tb.c
··· 628 628 * Downstream switch is reachable through two ports. 629 629 * Only scan on the primary port (link_nr == 0). 630 630 */ 631 + 632 + if (port->usb4) 633 + pm_runtime_get_sync(&port->usb4->dev); 634 + 631 635 if (tb_wait_for_port(port, false) <= 0) 632 - return; 636 + goto out_rpm_put; 633 637 if (port->remote) { 634 638 tb_port_dbg(port, "port already has a remote\n"); 635 - return; 639 + goto out_rpm_put; 636 640 } 637 641 638 642 tb_retimer_scan(port, true); ··· 651 647 */ 652 648 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL) 653 649 tb_scan_xdomain(port); 654 - return; 650 + goto out_rpm_put; 655 651 } 656 652 657 653 if (tb_switch_configure(sw)) { 658 654 tb_switch_put(sw); 659 - return; 655 + goto out_rpm_put; 660 656 } 661 657 662 658 /* ··· 685 681 686 682 if (tb_switch_add(sw)) { 687 683 tb_switch_put(sw); 688 - return; 684 + goto out_rpm_put; 689 685 } 690 686 691 687 /* Link the switches using both links if available */ ··· 737 733 738 734 tb_add_dp_resources(sw); 739 735 tb_scan_switch(sw); 736 + 737 + out_rpm_put: 738 + if (port->usb4) { 739 + pm_runtime_mark_last_busy(&port->usb4->dev); 740 + pm_runtime_put_autosuspend(&port->usb4->dev); 741 + } 740 742 } 741 743 742 744 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
+1 -1
drivers/thunderbolt/tunnel.c
··· 1275 1275 return; 1276 1276 } else if (!ret) { 1277 1277 /* Use maximum link rate if the link valid is not set */ 1278 - ret = usb4_usb3_port_max_link_rate(tunnel->src_port); 1278 + ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port); 1279 1279 if (ret < 0) { 1280 1280 tb_tunnel_warn(tunnel, "failed to read maximum link rate\n"); 1281 1281 return;
+12 -5
drivers/thunderbolt/xdomain.c
··· 1419 1419 * registered, we notify the userspace that it has changed. 1420 1420 */ 1421 1421 if (!update) { 1422 - struct tb_port *port; 1422 + /* 1423 + * Now disable lane 1 if bonding was not enabled. Do 1424 + * this only if bonding was possible at the beginning 1425 + * (that is we are the connection manager and there are 1426 + * two lanes). 1427 + */ 1428 + if (xd->bonding_possible) { 1429 + struct tb_port *port; 1423 1430 1424 - /* Now disable lane 1 if bonding was not enabled */ 1425 - port = tb_port_at(xd->route, tb_xdomain_parent(xd)); 1426 - if (!port->bonded) 1427 - tb_port_disable(port->dual_link_port); 1431 + port = tb_port_at(xd->route, tb_xdomain_parent(xd)); 1432 + if (!port->bonded) 1433 + tb_port_disable(port->dual_link_port); 1434 + } 1428 1435 1429 1436 if (device_add(&xd->dev)) { 1430 1437 dev_err(&xd->dev, "failed to add XDomain device\n");
+14
drivers/tty/serial/8250/8250_exar.c
··· 43 43 #define PCI_DEVICE_ID_EXAR_XR17V4358 0x4358 44 44 #define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358 45 45 46 + #define PCI_DEVICE_ID_SEALEVEL_710xC 0x1001 47 + #define PCI_DEVICE_ID_SEALEVEL_720xC 0x1002 48 + #define PCI_DEVICE_ID_SEALEVEL_740xC 0x1004 49 + #define PCI_DEVICE_ID_SEALEVEL_780xC 0x1008 50 + #define PCI_DEVICE_ID_SEALEVEL_716xC 0x1010 51 + 46 52 #define UART_EXAR_INT0 0x80 47 53 #define UART_EXAR_8XMODE 0x88 /* 8X sampling rate select */ 48 54 #define UART_EXAR_SLEEP 0x8b /* Sleep mode */ ··· 644 638 nr_ports = BIT(((pcidev->device & 0x38) >> 3) - 1); 645 639 else if (board->num_ports) 646 640 nr_ports = board->num_ports; 641 + else if (pcidev->vendor == PCI_VENDOR_ID_SEALEVEL) 642 + nr_ports = pcidev->device & 0xff; 647 643 else 648 644 nr_ports = pcidev->device & 0x0f; 649 645 ··· 872 864 EXAR_DEVICE(COMMTECH, 4224PCI335, pbn_fastcom335_4), 873 865 EXAR_DEVICE(COMMTECH, 2324PCI335, pbn_fastcom335_4), 874 866 EXAR_DEVICE(COMMTECH, 2328PCI335, pbn_fastcom335_8), 867 + 868 + EXAR_DEVICE(SEALEVEL, 710xC, pbn_exar_XR17V35x), 869 + EXAR_DEVICE(SEALEVEL, 720xC, pbn_exar_XR17V35x), 870 + EXAR_DEVICE(SEALEVEL, 740xC, pbn_exar_XR17V35x), 871 + EXAR_DEVICE(SEALEVEL, 780xC, pbn_exar_XR17V35x), 872 + EXAR_DEVICE(SEALEVEL, 716xC, pbn_exar_XR17V35x), 875 873 { 0, } 876 874 }; 877 875 MODULE_DEVICE_TABLE(pci, exar_pci_tbl);
+4 -4
drivers/tty/serial/amba-pl011.c
··· 1466 1466 struct circ_buf *xmit = &uap->port.state->xmit; 1467 1467 int count = uap->fifosize >> 1; 1468 1468 1469 + if ((uap->port.rs485.flags & SER_RS485_ENABLED) && 1470 + !uap->rs485_tx_started) 1471 + pl011_rs485_tx_start(uap); 1472 + 1469 1473 if (uap->port.x_char) { 1470 1474 if (!pl011_tx_char(uap, uap->port.x_char, from_irq)) 1471 1475 return true; ··· 1480 1476 pl011_stop_tx(&uap->port); 1481 1477 return false; 1482 1478 } 1483 - 1484 - if ((uap->port.rs485.flags & SER_RS485_ENABLED) && 1485 - !uap->rs485_tx_started) 1486 - pl011_rs485_tx_start(uap); 1487 1479 1488 1480 /* If we are using DMA mode, try to send some characters. */ 1489 1481 if (pl011_dma_tx_irq(uap))
+1 -7
drivers/tty/serial/atmel_serial.c
··· 2657 2657 else if (mr == ATMEL_US_PAR_ODD) 2658 2658 *parity = 'o'; 2659 2659 2660 - /* 2661 - * The serial core only rounds down when matching this to a 2662 - * supported baud rate. Make sure we don't end up slightly 2663 - * lower than one of those, as it would make us fall through 2664 - * to a much lower baud rate than we really want. 2665 - */ 2666 - *baud = port->uartclk / (16 * (quot - 1)); 2660 + *baud = port->uartclk / (16 * quot); 2667 2661 } 2668 2662 2669 2663 static int __init atmel_console_setup(struct console *co, char *options)
+5 -15
drivers/tty/serial/kgdboc.c
··· 171 171 int err = -ENODEV; 172 172 char *cptr = config; 173 173 struct console *cons; 174 + int cookie; 174 175 175 176 if (!strlen(config) || isspace(config[0])) { 176 177 err = 0; ··· 190 189 if (kgdboc_register_kbd(&cptr)) 191 190 goto do_register; 192 191 193 - /* 194 - * tty_find_polling_driver() can call uart_set_options() 195 - * (via poll_init) to configure the uart. Take the console_list_lock 196 - * in order to synchronize against register_console(), which can also 197 - * configure the uart via uart_set_options(). This also allows safe 198 - * traversal of the console list. 199 - */ 200 - console_list_lock(); 201 - 202 192 p = tty_find_polling_driver(cptr, &tty_line); 203 - if (!p) { 204 - console_list_unlock(); 193 + if (!p) 205 194 goto noconfig; 206 - } 207 195 208 196 /* 209 197 * Take console_lock to serialize device() callback with ··· 201 211 */ 202 212 console_lock(); 203 213 204 - for_each_console(cons) { 214 + cookie = console_srcu_read_lock(); 215 + for_each_console_srcu(cons) { 205 216 int idx; 206 217 if (cons->device && cons->device(cons, &idx) == p && 207 218 idx == tty_line) { ··· 210 219 break; 211 220 } 212 221 } 222 + console_srcu_read_unlock(cookie); 213 223 214 224 console_unlock(); 215 - 216 - console_list_unlock(); 217 225 218 226 kgdb_tty_driver = p; 219 227 kgdb_tty_line = tty_line;
+1 -1
drivers/tty/serial/pch_uart.c
··· 749 749 uart_xmit_advance(port, sg_dma_len(sg)); 750 750 751 751 async_tx_ack(priv->desc_tx); 752 - dma_unmap_sg(port->dev, sg, priv->orig_nent, DMA_TO_DEVICE); 752 + dma_unmap_sg(port->dev, priv->sg_tx_p, priv->orig_nent, DMA_TO_DEVICE); 753 753 priv->tx_dma_use = 0; 754 754 priv->nent = 0; 755 755 priv->orig_nent = 0;
+24 -8
drivers/tty/serial/qcom_geni_serial.c
··· 864 864 return IRQ_HANDLED; 865 865 } 866 866 867 - static void get_tx_fifo_size(struct qcom_geni_serial_port *port) 867 + static int setup_fifos(struct qcom_geni_serial_port *port) 868 868 { 869 869 struct uart_port *uport; 870 + u32 old_rx_fifo_depth = port->rx_fifo_depth; 870 871 871 872 uport = &port->uport; 872 873 port->tx_fifo_depth = geni_se_get_tx_fifo_depth(&port->se); ··· 875 874 port->rx_fifo_depth = geni_se_get_rx_fifo_depth(&port->se); 876 875 uport->fifosize = 877 876 (port->tx_fifo_depth * port->tx_fifo_width) / BITS_PER_BYTE; 877 + 878 + if (port->rx_fifo && (old_rx_fifo_depth != port->rx_fifo_depth) && port->rx_fifo_depth) { 879 + port->rx_fifo = devm_krealloc(uport->dev, port->rx_fifo, 880 + port->rx_fifo_depth * sizeof(u32), 881 + GFP_KERNEL); 882 + if (!port->rx_fifo) 883 + return -ENOMEM; 884 + } 885 + 886 + return 0; 878 887 } 879 888 880 889 ··· 899 888 u32 rxstale = DEFAULT_BITS_PER_CHAR * STALE_TIMEOUT; 900 889 u32 proto; 901 890 u32 pin_swap; 891 + int ret; 902 892 903 893 proto = geni_se_read_proto(&port->se); 904 894 if (proto != GENI_SE_UART) { ··· 909 897 910 898 qcom_geni_serial_stop_rx(uport); 911 899 912 - get_tx_fifo_size(port); 900 + ret = setup_fifos(port); 901 + if (ret) 902 + return ret; 913 903 914 904 writel(rxstale, uport->membase + SE_UART_RX_STALE_CNT); 915 905 ··· 1530 1516 return 0; 1531 1517 } 1532 1518 1533 - static int __maybe_unused qcom_geni_serial_sys_suspend(struct device *dev) 1519 + static int qcom_geni_serial_sys_suspend(struct device *dev) 1534 1520 { 1535 1521 struct qcom_geni_serial_port *port = dev_get_drvdata(dev); 1536 1522 struct uart_port *uport = &port->uport; ··· 1547 1533 return uart_suspend_port(private_data->drv, uport); 1548 1534 } 1549 1535 1550 - static int __maybe_unused qcom_geni_serial_sys_resume(struct device *dev) 1536 + static int qcom_geni_serial_sys_resume(struct device *dev) 1551 1537 { 1552 1538 int ret; 1553 1539 struct qcom_geni_serial_port *port = dev_get_drvdata(dev); ··· 1595 1581 } 1596 1582 1597 1583 static const struct dev_pm_ops qcom_geni_serial_pm_ops = { 1598 - SET_SYSTEM_SLEEP_PM_OPS(qcom_geni_serial_sys_suspend, 1599 - qcom_geni_serial_sys_resume) 1600 - .restore = qcom_geni_serial_sys_hib_resume, 1601 - .thaw = qcom_geni_serial_sys_hib_resume, 1584 + .suspend = pm_sleep_ptr(qcom_geni_serial_sys_suspend), 1585 + .resume = pm_sleep_ptr(qcom_geni_serial_sys_resume), 1586 + .freeze = pm_sleep_ptr(qcom_geni_serial_sys_suspend), 1587 + .poweroff = pm_sleep_ptr(qcom_geni_serial_sys_suspend), 1588 + .restore = pm_sleep_ptr(qcom_geni_serial_sys_hib_resume), 1589 + .thaw = pm_sleep_ptr(qcom_geni_serial_sys_hib_resume), 1602 1590 }; 1603 1591 1604 1592 static const struct of_device_id qcom_geni_serial_match_table[] = {
+5
drivers/tty/serial/serial_core.c
··· 2212 2212 * @parity: parity character - 'n' (none), 'o' (odd), 'e' (even) 2213 2213 * @bits: number of data bits 2214 2214 * @flow: flow control character - 'r' (rts) 2215 + * 2216 + * Locking: Caller must hold console_list_lock in order to serialize 2217 + * early initialization of the serial-console lock. 2215 2218 */ 2216 2219 int 2217 2220 uart_set_options(struct uart_port *port, struct console *co, ··· 2622 2619 2623 2620 if (!ret && options) { 2624 2621 uart_parse_options(options, &baud, &parity, &bits, &flow); 2622 + console_list_lock(); 2625 2623 ret = uart_set_options(port, NULL, baud, parity, bits, flow); 2624 + console_list_unlock(); 2626 2625 } 2627 2626 out: 2628 2627 mutex_unlock(&tport->mutex);
+12
drivers/usb/cdns3/cdns3-gadget.c
··· 2614 2614 u8 req_on_hw_ring = 0; 2615 2615 unsigned long flags; 2616 2616 int ret = 0; 2617 + int val; 2617 2618 2618 2619 if (!ep || !request || !ep->desc) 2619 2620 return -EINVAL; ··· 2650 2649 2651 2650 /* Update ring only if removed request is on pending_req_list list */ 2652 2651 if (req_on_hw_ring && link_trb) { 2652 + /* Stop DMA */ 2653 + writel(EP_CMD_DFLUSH, &priv_dev->regs->ep_cmd); 2654 + 2655 + /* wait for DFLUSH cleared */ 2656 + readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val, 2657 + !(val & EP_CMD_DFLUSH), 1, 1000); 2658 + 2653 2659 link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma + 2654 2660 ((priv_req->end_trb + 1) * TRB_SIZE))); 2655 2661 link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control) & TRB_CYCLE) | ··· 2667 2659 } 2668 2660 2669 2661 cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET); 2662 + 2663 + req = cdns3_next_request(&priv_ep->pending_req_list); 2664 + if (req) 2665 + cdns3_rearm_transfer(priv_ep, 1); 2670 2666 2671 2667 not_found: 2672 2668 spin_unlock_irqrestore(&priv_dev->lock, flags);
+2 -2
drivers/usb/chipidea/core.c
··· 1294 1294 cable_id = &ci->platdata->id_extcon; 1295 1295 cable_vbus = &ci->platdata->vbus_extcon; 1296 1296 1297 - if ((!IS_ERR(cable_id->edev) || !IS_ERR(ci->role_switch)) 1297 + if ((!IS_ERR(cable_id->edev) || ci->role_switch) 1298 1298 && ci->is_otg && 1299 1299 (otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS)) 1300 1300 ci_irq(ci); 1301 1301 1302 - if ((!IS_ERR(cable_vbus->edev) || !IS_ERR(ci->role_switch)) 1302 + if ((!IS_ERR(cable_vbus->edev) || ci->role_switch) 1303 1303 && ci->is_otg && 1304 1304 (otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS)) 1305 1305 ci_irq(ci);
+13
drivers/usb/core/hub.c
··· 44 44 #define USB_PRODUCT_USB5534B 0x5534 45 45 #define USB_VENDOR_CYPRESS 0x04b4 46 46 #define USB_PRODUCT_CY7C65632 0x6570 47 + #define USB_VENDOR_TEXAS_INSTRUMENTS 0x0451 48 + #define USB_PRODUCT_TUSB8041_USB3 0x8140 49 + #define USB_PRODUCT_TUSB8041_USB2 0x8142 47 50 #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01 48 51 #define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02 49 52 ··· 5857 5854 .idVendor = USB_VENDOR_GENESYS_LOGIC, 5858 5855 .bInterfaceClass = USB_CLASS_HUB, 5859 5856 .driver_info = HUB_QUIRK_CHECK_PORT_AUTOSUSPEND}, 5857 + { .match_flags = USB_DEVICE_ID_MATCH_VENDOR 5858 + | USB_DEVICE_ID_MATCH_PRODUCT, 5859 + .idVendor = USB_VENDOR_TEXAS_INSTRUMENTS, 5860 + .idProduct = USB_PRODUCT_TUSB8041_USB2, 5861 + .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND}, 5862 + { .match_flags = USB_DEVICE_ID_MATCH_VENDOR 5863 + | USB_DEVICE_ID_MATCH_PRODUCT, 5864 + .idVendor = USB_VENDOR_TEXAS_INSTRUMENTS, 5865 + .idProduct = USB_PRODUCT_TUSB8041_USB3, 5866 + .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND}, 5860 5867 { .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS, 5861 5868 .bDeviceClass = USB_CLASS_HUB}, 5862 5869 { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,
+65
drivers/usb/core/usb-acpi.c
··· 37 37 } 38 38 EXPORT_SYMBOL_GPL(usb_acpi_power_manageable); 39 39 40 + #define UUID_USB_CONTROLLER_DSM "ce2ee385-00e6-48cb-9f05-2edb927c4899" 41 + #define USB_DSM_DISABLE_U1_U2_FOR_PORT 5 42 + 43 + /** 44 + * usb_acpi_port_lpm_incapable - check if lpm should be disabled for a port. 45 + * @hdev: USB device belonging to the usb hub 46 + * @index: zero based port index 47 + * 48 + * Some USB3 ports may not support USB3 link power management U1/U2 states 49 + * due to different retimer setup. ACPI provides _DSM method which returns 0x01 50 + * if U1 and U2 states should be disabled. Evaluate _DSM with: 51 + * Arg0: UUID = ce2ee385-00e6-48cb-9f05-2edb927c4899 52 + * Arg1: Revision ID = 0 53 + * Arg2: Function Index = 5 54 + * Arg3: (empty) 55 + * 56 + * Return 1 if USB3 port is LPM incapable, negative on error, otherwise 0 57 + */ 58 + 59 + int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index) 60 + { 61 + union acpi_object *obj; 62 + acpi_handle port_handle; 63 + int port1 = index + 1; 64 + guid_t guid; 65 + int ret; 66 + 67 + ret = guid_parse(UUID_USB_CONTROLLER_DSM, &guid); 68 + if (ret) 69 + return ret; 70 + 71 + port_handle = usb_get_hub_port_acpi_handle(hdev, port1); 72 + if (!port_handle) { 73 + dev_dbg(&hdev->dev, "port-%d no acpi handle\n", port1); 74 + return -ENODEV; 75 + } 76 + 77 + if (!acpi_check_dsm(port_handle, &guid, 0, 78 + BIT(USB_DSM_DISABLE_U1_U2_FOR_PORT))) { 79 + dev_dbg(&hdev->dev, "port-%d no _DSM function %d\n", 80 + port1, USB_DSM_DISABLE_U1_U2_FOR_PORT); 81 + return -ENODEV; 82 + } 83 + 84 + obj = acpi_evaluate_dsm(port_handle, &guid, 0, 85 + USB_DSM_DISABLE_U1_U2_FOR_PORT, NULL); 86 + 87 + if (!obj) 88 + return -ENODEV; 89 + 90 + if (obj->type != ACPI_TYPE_INTEGER) { 91 + dev_dbg(&hdev->dev, "evaluate port-%d _DSM failed\n", port1); 92 + ACPI_FREE(obj); 93 + return -EINVAL; 94 + } 95 + 96 + if (obj->integer.value == 0x01) 97 + ret = 1; 98 + 99 + ACPI_FREE(obj); 100 + 101 + return ret; 102 + } 103 + EXPORT_SYMBOL_GPL(usb_acpi_port_lpm_incapable); 104 + 40 105 /** 41 106 * usb_acpi_set_power_state - control usb port's power via acpi power 42 107 * resource
+1 -1
drivers/usb/dwc3/Kconfig
··· 3 3 config USB_DWC3 4 4 tristate "DesignWare USB3 DRD Core Support" 5 5 depends on (USB || USB_GADGET) && HAS_DMA 6 + depends on (EXTCON || EXTCON=n) 6 7 select USB_XHCI_PLATFORM if USB_XHCI_HCD 7 8 select USB_ROLE_SWITCH if USB_DWC3_DUAL_ROLE 8 9 help ··· 45 44 config USB_DWC3_DUAL_ROLE 46 45 bool "Dual Role mode" 47 46 depends on ((USB=y || USB=USB_DWC3) && (USB_GADGET=y || USB_GADGET=USB_DWC3)) 48 - depends on (EXTCON=y || EXTCON=USB_DWC3) 49 47 help 50 48 This is the default mode of working of DWC3 controller where 51 49 both host and gadget features are enabled.
+10 -2
drivers/usb/gadget/configfs.c
··· 393 393 WARN_ON(!list_empty(&gi->string_list)); 394 394 WARN_ON(!list_empty(&gi->available_func)); 395 395 kfree(gi->composite.gadget_driver.function); 396 + kfree(gi->composite.gadget_driver.driver.name); 396 397 kfree(gi); 397 398 } 398 399 ··· 1573 1572 .max_speed = USB_SPEED_SUPER_PLUS, 1574 1573 .driver = { 1575 1574 .owner = THIS_MODULE, 1576 - .name = "configfs-gadget", 1577 1575 }, 1578 1576 .match_existing_only = 1, 1579 1577 }; ··· 1623 1623 1624 1624 gi->composite.gadget_driver = configfs_driver_template; 1625 1625 1626 + gi->composite.gadget_driver.driver.name = kasprintf(GFP_KERNEL, 1627 + "configfs-gadget.%s", name); 1628 + if (!gi->composite.gadget_driver.driver.name) 1629 + goto err; 1630 + 1626 1631 gi->composite.gadget_driver.function = kstrdup(name, GFP_KERNEL); 1627 1632 gi->composite.name = gi->composite.gadget_driver.function; 1628 1633 1629 1634 if (!gi->composite.gadget_driver.function) 1630 - goto err; 1635 + goto out_free_driver_name; 1631 1636 1632 1637 return &gi->group; 1638 + 1639 + out_free_driver_name: 1640 + kfree(gi->composite.gadget_driver.driver.name); 1633 1641 err: 1634 1642 kfree(gi); 1635 1643 return ERR_PTR(-ENOMEM);
+7
drivers/usb/gadget/function/f_fs.c
··· 279 279 struct usb_request *req = ffs->ep0req; 280 280 int ret; 281 281 282 + if (!req) 283 + return -EINVAL; 284 + 282 285 req->zero = len < le16_to_cpu(ffs->ev.setup.wLength); 283 286 284 287 spin_unlock_irq(&ffs->ev.waitq.lock); ··· 1895 1892 ENTER(); 1896 1893 1897 1894 if (!WARN_ON(!ffs->gadget)) { 1895 + /* dequeue before freeing ep0req */ 1896 + usb_ep_dequeue(ffs->gadget->ep0, ffs->ep0req); 1897 + mutex_lock(&ffs->mutex); 1898 1898 usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req); 1899 1899 ffs->ep0req = NULL; 1900 1900 ffs->gadget = NULL; 1901 1901 clear_bit(FFS_FL_BOUND, &ffs->flags); 1902 + mutex_unlock(&ffs->mutex); 1902 1903 ffs_data_put(ffs); 1903 1904 } 1904 1905 }
+3 -1
drivers/usb/gadget/function/f_ncm.c
··· 83 83 /* peak (theoretical) bulk transfer rate in bits-per-second */ 84 84 static inline unsigned ncm_bitrate(struct usb_gadget *g) 85 85 { 86 - if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS) 86 + if (!g) 87 + return 0; 88 + else if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS) 87 89 return 4250000000U; 88 90 else if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER) 89 91 return 3750000000U;
+21 -7
drivers/usb/gadget/legacy/inode.c
··· 229 229 */ 230 230 231 231 static const char *CHIP; 232 + static DEFINE_MUTEX(sb_mutex); /* Serialize superblock operations */ 232 233 233 234 /*----------------------------------------------------------------------*/ 234 235 ··· 2011 2010 { 2012 2011 struct inode *inode; 2013 2012 struct dev_data *dev; 2013 + int rc; 2014 2014 2015 - if (the_device) 2016 - return -ESRCH; 2015 + mutex_lock(&sb_mutex); 2016 + 2017 + if (the_device) { 2018 + rc = -ESRCH; 2019 + goto Done; 2020 + } 2017 2021 2018 2022 CHIP = usb_get_gadget_udc_name(); 2019 - if (!CHIP) 2020 - return -ENODEV; 2023 + if (!CHIP) { 2024 + rc = -ENODEV; 2025 + goto Done; 2026 + } 2021 2027 2022 2028 /* superblock */ 2023 2029 sb->s_blocksize = PAGE_SIZE; ··· 2061 2053 * from binding to a controller. 2062 2054 */ 2063 2055 the_device = dev; 2064 - return 0; 2056 + rc = 0; 2057 + goto Done; 2065 2058 2066 - Enomem: 2059 + Enomem: 2067 2060 kfree(CHIP); 2068 2061 CHIP = NULL; 2062 + rc = -ENOMEM; 2069 2063 2070 - return -ENOMEM; 2064 + Done: 2065 + mutex_unlock(&sb_mutex); 2066 + return rc; 2071 2067 } 2072 2068 2073 2069 /* "mount -t gadgetfs path /dev/gadget" ends up here */ ··· 2093 2081 static void 2094 2082 gadgetfs_kill_sb (struct super_block *sb) 2095 2083 { 2084 + mutex_lock(&sb_mutex); 2096 2085 kill_litter_super (sb); 2097 2086 if (the_device) { 2098 2087 put_dev (the_device); ··· 2101 2088 } 2102 2089 kfree(CHIP); 2103 2090 CHIP = NULL; 2091 + mutex_unlock(&sb_mutex); 2104 2092 } 2105 2093 2106 2094 /*----------------------------------------------------------------------*/
+3
drivers/usb/gadget/legacy/webcam.c
··· 293 293 (const struct uvc_descriptor_header *) &uvc_format_yuv, 294 294 (const struct uvc_descriptor_header *) &uvc_frame_yuv_360p, 295 295 (const struct uvc_descriptor_header *) &uvc_frame_yuv_720p, 296 + (const struct uvc_descriptor_header *) &uvc_color_matching, 296 297 (const struct uvc_descriptor_header *) &uvc_format_mjpg, 297 298 (const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p, 298 299 (const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p, ··· 306 305 (const struct uvc_descriptor_header *) &uvc_format_yuv, 307 306 (const struct uvc_descriptor_header *) &uvc_frame_yuv_360p, 308 307 (const struct uvc_descriptor_header *) &uvc_frame_yuv_720p, 308 + (const struct uvc_descriptor_header *) &uvc_color_matching, 309 309 (const struct uvc_descriptor_header *) &uvc_format_mjpg, 310 310 (const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p, 311 311 (const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p, ··· 319 317 (const struct uvc_descriptor_header *) &uvc_format_yuv, 320 318 (const struct uvc_descriptor_header *) &uvc_frame_yuv_360p, 321 319 (const struct uvc_descriptor_header *) &uvc_frame_yuv_720p, 320 + (const struct uvc_descriptor_header *) &uvc_color_matching, 322 321 (const struct uvc_descriptor_header *) &uvc_format_mjpg, 323 322 (const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p, 324 323 (const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
+1 -1
drivers/usb/host/ehci-fsl.c
··· 29 29 #include "ehci-fsl.h" 30 30 31 31 #define DRIVER_DESC "Freescale EHCI Host controller driver" 32 - #define DRV_NAME "ehci-fsl" 32 + #define DRV_NAME "fsl-ehci" 33 33 34 34 static struct hc_driver __read_mostly fsl_ehci_hc_driver; 35 35
+45
drivers/usb/host/xhci-pci.c
··· 78 78 static struct hc_driver __read_mostly xhci_pci_hc_driver; 79 79 80 80 static int xhci_pci_setup(struct usb_hcd *hcd); 81 + static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, 82 + struct usb_tt *tt, gfp_t mem_flags); 81 83 82 84 static const struct xhci_driver_overrides xhci_pci_overrides __initconst = { 83 85 .reset = xhci_pci_setup, 86 + .update_hub_device = xhci_pci_update_hub_device, 84 87 }; 85 88 86 89 /* called after powerup, by probe or system-pm "wakeup" */ ··· 355 352 NULL); 356 353 ACPI_FREE(obj); 357 354 } 355 + 356 + static void xhci_find_lpm_incapable_ports(struct usb_hcd *hcd, struct usb_device *hdev) 357 + { 358 + struct xhci_hcd *xhci = hcd_to_xhci(hcd); 359 + struct xhci_hub *rhub = &xhci->usb3_rhub; 360 + int ret; 361 + int i; 362 + 363 + /* This is not the usb3 roothub we are looking for */ 364 + if (hcd != rhub->hcd) 365 + return; 366 + 367 + if (hdev->maxchild > rhub->num_ports) { 368 + dev_err(&hdev->dev, "USB3 roothub port number mismatch\n"); 369 + return; 370 + } 371 + 372 + for (i = 0; i < hdev->maxchild; i++) { 373 + ret = usb_acpi_port_lpm_incapable(hdev, i); 374 + 375 + dev_dbg(&hdev->dev, "port-%d disable U1/U2 _DSM: %d\n", i + 1, ret); 376 + 377 + if (ret >= 0) { 378 + rhub->ports[i]->lpm_incapable = ret; 379 + continue; 380 + } 381 + } 382 + } 383 + 358 384 #else 359 385 static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { } 386 + static void xhci_find_lpm_incapable_ports(struct usb_hcd *hcd, struct usb_device *hdev) { } 360 387 #endif /* CONFIG_ACPI */ 361 388 362 389 /* called during probe() after chip reset completes */ ··· 417 384 418 385 /* Find any debug ports */ 419 386 return xhci_pci_reinit(xhci, pdev); 387 + } 388 + 389 + static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, 390 + struct usb_tt *tt, gfp_t mem_flags) 391 + { 392 + /* Check if acpi claims some USB3 roothub ports are lpm incapable */ 393 + if (!hdev->parent) 394 + xhci_find_lpm_incapable_ports(hcd, hdev); 395 + 396 + return xhci_update_hub_device(hcd, hdev, tt, mem_flags); 420 397 } 421 398 422 399 /* ··· 497 454 498 455 if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW) 499 456 pm_runtime_allow(&dev->dev); 457 + 458 + dma_set_max_seg_size(&dev->dev, UINT_MAX); 500 459 501 460 return 0; 502 461
+4 -1
drivers/usb/host/xhci-ring.c
··· 1169 1169 struct xhci_virt_ep *ep; 1170 1170 struct xhci_ring *ring; 1171 1171 1172 - ep = &xhci->devs[slot_id]->eps[ep_index]; 1172 + ep = xhci_get_virt_ep(xhci, slot_id, ep_index); 1173 + if (!ep) 1174 + return; 1175 + 1173 1176 if ((ep->ep_state & EP_HAS_STREAMS) || 1174 1177 (ep->ep_state & EP_GETTING_NO_STREAMS)) { 1175 1178 int stream_id;
+17 -1
drivers/usb/host/xhci.c
··· 3974 3974 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3975 3975 struct xhci_virt_device *virt_dev; 3976 3976 struct xhci_slot_ctx *slot_ctx; 3977 + unsigned long flags; 3977 3978 int i, ret; 3978 3979 3979 3980 /* ··· 4001 4000 virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING; 4002 4001 virt_dev->udev = NULL; 4003 4002 xhci_disable_slot(xhci, udev->slot_id); 4003 + 4004 + spin_lock_irqsave(&xhci->lock, flags); 4004 4005 xhci_free_virt_device(xhci, udev->slot_id); 4006 + spin_unlock_irqrestore(&xhci->lock, flags); 4007 + 4005 4008 } 4006 4009 4007 4010 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) ··· 5049 5044 struct usb_device *udev, enum usb3_link_state state) 5050 5045 { 5051 5046 struct xhci_hcd *xhci; 5047 + struct xhci_port *port; 5052 5048 u16 hub_encoded_timeout; 5053 5049 int mel; 5054 5050 int ret; ··· 5065 5059 5066 5060 if (xhci_check_tier_policy(xhci, udev, state) < 0) 5067 5061 return USB3_LPM_DISABLED; 5062 + 5063 + /* If connected to root port then check port can handle lpm */ 5064 + if (udev->parent && !udev->parent->parent) { 5065 + port = xhci->usb3_rhub.ports[udev->portnum - 1]; 5066 + if (port->lpm_incapable) 5067 + return USB3_LPM_DISABLED; 5068 + } 5068 5069 5069 5070 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state); 5070 5071 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout); ··· 5132 5119 /* Once a hub descriptor is fetched for a device, we need to update the xHC's 5133 5120 * internal data structures for the device. 5134 5121 */ 5135 - static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, 5122 + int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, 5136 5123 struct usb_tt *tt, gfp_t mem_flags) 5137 5124 { 5138 5125 struct xhci_hcd *xhci = hcd_to_xhci(hcd); ··· 5232 5219 xhci_free_command(xhci, config_cmd); 5233 5220 return ret; 5234 5221 } 5222 + EXPORT_SYMBOL_GPL(xhci_update_hub_device); 5235 5223 5236 5224 static int xhci_get_frame(struct usb_hcd *hcd) 5237 5225 { ··· 5516 5502 drv->check_bandwidth = over->check_bandwidth; 5517 5503 if (over->reset_bandwidth) 5518 5504 drv->reset_bandwidth = over->reset_bandwidth; 5505 + if (over->update_hub_device) 5506 + drv->update_hub_device = over->update_hub_device; 5519 5507 } 5520 5508 } 5521 5509 EXPORT_SYMBOL_GPL(xhci_init_driver);
+5
drivers/usb/host/xhci.h
··· 1735 1735 int hcd_portnum; 1736 1736 struct xhci_hub *rhub; 1737 1737 struct xhci_port_cap *port_cap; 1738 + unsigned int lpm_incapable:1; 1738 1739 }; 1739 1740 1740 1741 struct xhci_hub { ··· 1944 1943 struct usb_host_endpoint *ep); 1945 1944 int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); 1946 1945 void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); 1946 + int (*update_hub_device)(struct usb_hcd *hcd, struct usb_device *hdev, 1947 + struct usb_tt *tt, gfp_t mem_flags); 1947 1948 }; 1948 1949 1949 1950 #define XHCI_CFC_DELAY 10 ··· 2125 2122 struct usb_host_endpoint *ep); 2126 2123 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); 2127 2124 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); 2125 + int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, 2126 + struct usb_tt *tt, gfp_t mem_flags); 2128 2127 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id); 2129 2128 int xhci_ext_cap_init(struct xhci_hcd *xhci); 2130 2129
+1 -1
drivers/usb/misc/iowarrior.c
··· 814 814 break; 815 815 816 816 case USB_DEVICE_ID_CODEMERCS_IOW100: 817 - dev->report_size = 13; 817 + dev->report_size = 12; 818 818 break; 819 819 } 820 820 }
+9 -9
drivers/usb/misc/onboard_usb_hub.c
··· 27 27 28 28 #include "onboard_usb_hub.h" 29 29 30 + static void onboard_hub_attach_usb_driver(struct work_struct *work); 31 + 30 32 static struct usb_device_driver onboard_hub_usbdev_driver; 33 + static DECLARE_WORK(attach_usb_driver_work, onboard_hub_attach_usb_driver); 31 34 32 35 /************************** Platform driver **************************/ 33 36 ··· 48 45 bool is_powered_on; 49 46 bool going_away; 50 47 struct list_head udev_list; 51 - struct work_struct attach_usb_driver_work; 52 48 struct mutex lock; 53 49 }; 54 50 ··· 273 271 * This needs to be done deferred to avoid self-deadlocks on systems 274 272 * with nested onboard hubs. 275 273 */ 276 - INIT_WORK(&hub->attach_usb_driver_work, onboard_hub_attach_usb_driver); 277 - schedule_work(&hub->attach_usb_driver_work); 274 + schedule_work(&attach_usb_driver_work); 278 275 279 276 return 0; 280 277 } ··· 285 284 struct usb_device *udev; 286 285 287 286 hub->going_away = true; 288 - 289 - if (&hub->attach_usb_driver_work != current_work()) 290 - cancel_work_sync(&hub->attach_usb_driver_work); 291 287 292 288 mutex_lock(&hub->lock); 293 289 ··· 431 433 { 432 434 int ret; 433 435 434 - ret = platform_driver_register(&onboard_hub_driver); 436 + ret = usb_register_device_driver(&onboard_hub_usbdev_driver, THIS_MODULE); 435 437 if (ret) 436 438 return ret; 437 439 438 - ret = usb_register_device_driver(&onboard_hub_usbdev_driver, THIS_MODULE); 440 + ret = platform_driver_register(&onboard_hub_driver); 439 441 if (ret) 440 - platform_driver_unregister(&onboard_hub_driver); 442 + usb_deregister_device_driver(&onboard_hub_usbdev_driver); 441 443 442 444 return ret; 443 445 } ··· 447 449 { 448 450 usb_deregister_device_driver(&onboard_hub_usbdev_driver); 449 451 platform_driver_unregister(&onboard_hub_driver); 452 + 453 + cancel_work_sync(&attach_usb_driver_work); 450 454 } 451 455 module_exit(onboard_hub_exit); 452 456
+3 -1
drivers/usb/musb/omap2430.c
··· 411 411 memset(musb_res, 0, sizeof(*musb_res) * ARRAY_SIZE(musb_res)); 412 412 413 413 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 414 - if (!res) 414 + if (!res) { 415 + ret = -EINVAL; 415 416 goto err2; 417 + } 416 418 417 419 musb_res[i].start = res->start; 418 420 musb_res[i].end = res->end;
+1
drivers/usb/serial/cp210x.c
··· 60 60 { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */ 61 61 { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ 62 62 { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ 63 + { USB_DEVICE(0x0908, 0x0070) }, /* Siemens SCALANCE LPE-9000 USB Serial Console */ 63 64 { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */ 64 65 { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */ 65 66 { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
+17
drivers/usb/serial/option.c
··· 255 255 #define QUECTEL_PRODUCT_EP06 0x0306 256 256 #define QUECTEL_PRODUCT_EM05G 0x030a 257 257 #define QUECTEL_PRODUCT_EM060K 0x030b 258 + #define QUECTEL_PRODUCT_EM05G_CS 0x030c 259 + #define QUECTEL_PRODUCT_EM05CN_SG 0x0310 258 260 #define QUECTEL_PRODUCT_EM05G_SG 0x0311 261 + #define QUECTEL_PRODUCT_EM05CN 0x0312 262 + #define QUECTEL_PRODUCT_EM05G_GR 0x0313 263 + #define QUECTEL_PRODUCT_EM05G_RS 0x0314 259 264 #define QUECTEL_PRODUCT_EM12 0x0512 260 265 #define QUECTEL_PRODUCT_RM500Q 0x0800 261 266 #define QUECTEL_PRODUCT_RM520N 0x0801 267 + #define QUECTEL_PRODUCT_EC200U 0x0901 262 268 #define QUECTEL_PRODUCT_EC200S_CN 0x6002 263 269 #define QUECTEL_PRODUCT_EC200T 0x6026 264 270 #define QUECTEL_PRODUCT_RM500K 0x7001 ··· 1165 1159 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), 1166 1160 .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, 1167 1161 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, 1162 + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05CN, 0xff), 1163 + .driver_info = RSVD(6) | ZLP }, 1164 + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05CN_SG, 0xff), 1165 + .driver_info = RSVD(6) | ZLP }, 1168 1166 { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff), 1167 + .driver_info = RSVD(6) | ZLP }, 1168 + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_CS, 0xff), 1169 + .driver_info = RSVD(6) | ZLP }, 1170 + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_GR, 0xff), 1171 + .driver_info = RSVD(6) | ZLP }, 1172 + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_RS, 0xff), 1169 1173 .driver_info = RSVD(6) | ZLP }, 1170 1174 { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_SG, 0xff), 1171 1175 .driver_info = RSVD(6) | ZLP }, ··· 1196 1180 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) }, 1197 1181 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) }, 1198 1182 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) }, 1183 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) }, 1199 1184 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) }, 1200 1185 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) }, 1201 1186 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
+13
drivers/usb/storage/uas-detect.h
··· 116 116 if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2) 117 117 flags |= US_FL_NO_ATA_1X; 118 118 119 + /* 120 + * RTL9210-based enclosure from HIKSEMI, MD202 reportedly have issues 121 + * with UAS. This isn't distinguishable with just idVendor and 122 + * idProduct, use manufacturer and product too. 123 + * 124 + * Reported-by: Hongling Zeng <zenghongling@kylinos.cn> 125 + */ 126 + if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bda && 127 + le16_to_cpu(udev->descriptor.idProduct) == 0x9210 && 128 + (udev->manufacturer && !strcmp(udev->manufacturer, "HIKSEMI")) && 129 + (udev->product && !strcmp(udev->product, "MD202"))) 130 + flags |= US_FL_IGNORE_UAS; 131 + 119 132 usb_stor_adjust_quirks(udev, &flags); 120 133 121 134 if (flags & US_FL_IGNORE_UAS) {
-7
drivers/usb/storage/unusual_uas.h
··· 83 83 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 84 84 US_FL_NO_REPORT_LUNS), 85 85 86 - /* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */ 87 - UNUSUAL_DEV(0x0bda, 0x9210, 0x0000, 0x9999, 88 - "Hiksemi", 89 - "External HDD", 90 - USB_SC_DEVICE, USB_PR_DEVICE, NULL, 91 - US_FL_IGNORE_UAS), 92 - 93 86 /* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */ 94 87 UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999, 95 88 "Initio Corporation",
+14 -8
drivers/usb/typec/altmodes/displayport.c
··· 419 419 [DP_PIN_ASSIGN_F] = "F", 420 420 }; 421 421 422 + /* 423 + * Helper function to extract a peripheral's currently supported 424 + * Pin Assignments from its DisplayPort alternate mode state. 425 + */ 426 + static u8 get_current_pin_assignments(struct dp_altmode *dp) 427 + { 428 + if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_UFP_U_AS_DFP_D) 429 + return DP_CAP_PIN_ASSIGN_DFP_D(dp->alt->vdo); 430 + else 431 + return DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo); 432 + } 433 + 422 434 static ssize_t 423 435 pin_assignment_store(struct device *dev, struct device_attribute *attr, 424 436 const char *buf, size_t size) ··· 457 445 goto out_unlock; 458 446 } 459 447 460 - if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_DFP_D) 461 - assignments = DP_CAP_UFP_D_PIN_ASSIGN(dp->alt->vdo); 462 - else 463 - assignments = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo); 448 + assignments = get_current_pin_assignments(dp); 464 449 465 450 if (!(DP_CONF_GET_PIN_ASSIGN(conf) & assignments)) { 466 451 ret = -EINVAL; ··· 494 485 495 486 cur = get_count_order(DP_CONF_GET_PIN_ASSIGN(dp->data.conf)); 496 487 497 - if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_DFP_D) 498 - assignments = DP_CAP_UFP_D_PIN_ASSIGN(dp->alt->vdo); 499 - else 500 - assignments = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo); 488 + assignments = get_current_pin_assignments(dp); 501 489 502 490 for (i = 0; assignments; assignments >>= 1, i++) { 503 491 if (assignments & 1) {
+3 -4
drivers/usb/typec/tcpm/tcpm.c
··· 4594 4594 tcpm_set_state(port, ready_state(port), 0); 4595 4595 break; 4596 4596 case DR_SWAP_CHANGE_DR: 4597 - if (port->data_role == TYPEC_HOST) { 4598 - tcpm_unregister_altmodes(port); 4597 + tcpm_unregister_altmodes(port); 4598 + if (port->data_role == TYPEC_HOST) 4599 4599 tcpm_set_roles(port, true, port->pwr_role, 4600 4600 TYPEC_DEVICE); 4601 - } else { 4601 + else 4602 4602 tcpm_set_roles(port, true, port->pwr_role, 4603 4603 TYPEC_HOST); 4604 - } 4605 4604 tcpm_ams_finish(port); 4606 4605 tcpm_set_state(port, ready_state(port), 0); 4607 4606 break;
+21 -3
drivers/usb/typec/ucsi/ucsi.c
··· 187 187 188 188 struct ucsi_work { 189 189 struct delayed_work work; 190 + struct list_head node; 190 191 unsigned long delay; 191 192 unsigned int count; 192 193 struct ucsi_connector *con; ··· 203 202 mutex_lock(&con->lock); 204 203 205 204 if (!con->partner) { 205 + list_del(&uwork->node); 206 206 mutex_unlock(&con->lock); 207 207 kfree(uwork); 208 208 return; ··· 211 209 212 210 ret = uwork->cb(con); 213 211 214 - if (uwork->count-- && (ret == -EBUSY || ret == -ETIMEDOUT)) 212 + if (uwork->count-- && (ret == -EBUSY || ret == -ETIMEDOUT)) { 215 213 queue_delayed_work(con->wq, &uwork->work, uwork->delay); 216 - else 214 + } else { 215 + list_del(&uwork->node); 217 216 kfree(uwork); 217 + } 218 218 219 219 mutex_unlock(&con->lock); 220 220 } ··· 240 236 uwork->con = con; 241 237 uwork->cb = cb; 242 238 239 + list_add_tail(&uwork->node, &con->partner_tasks); 243 240 queue_delayed_work(con->wq, &uwork->work, delay); 244 241 245 242 return 0; ··· 1061 1056 INIT_WORK(&con->work, ucsi_handle_connector_change); 1062 1057 init_completion(&con->complete); 1063 1058 mutex_init(&con->lock); 1059 + INIT_LIST_HEAD(&con->partner_tasks); 1064 1060 con->num = index + 1; 1065 1061 con->ucsi = ucsi; 1066 1062 ··· 1426 1420 ucsi_unregister_altmodes(&ucsi->connector[i], 1427 1421 UCSI_RECIPIENT_CON); 1428 1422 ucsi_unregister_port_psy(&ucsi->connector[i]); 1429 - if (ucsi->connector[i].wq) 1423 + 1424 + if (ucsi->connector[i].wq) { 1425 + struct ucsi_work *uwork; 1426 + 1427 + mutex_lock(&ucsi->connector[i].lock); 1428 + /* 1429 + * queue delayed items immediately so they can execute 1430 + * and free themselves before the wq is destroyed 1431 + */ 1432 + list_for_each_entry(uwork, &ucsi->connector[i].partner_tasks, node) 1433 + mod_delayed_work(ucsi->connector[i].wq, &uwork->work, 0); 1434 + mutex_unlock(&ucsi->connector[i].lock); 1430 1435 destroy_workqueue(ucsi->connector[i].wq); 1436 + } 1431 1437 typec_unregister_port(ucsi->connector[i].port); 1432 1438 } 1433 1439
+1
drivers/usb/typec/ucsi/ucsi.h
··· 322 322 struct work_struct work; 323 323 struct completion complete; 324 324 struct workqueue_struct *wq; 325 + struct list_head partner_tasks; 325 326 326 327 struct typec_port *port; 327 328 struct typec_partner *partner;
+5 -1
drivers/w1/w1.c
··· 1166 1166 /* remainder if it woke up early */ 1167 1167 unsigned long jremain = 0; 1168 1168 1169 + atomic_inc(&dev->refcnt); 1170 + 1169 1171 for (;;) { 1170 1172 1171 1173 if (!jremain && dev->search_count) { ··· 1195 1193 */ 1196 1194 mutex_unlock(&dev->list_mutex); 1197 1195 1198 - if (kthread_should_stop()) 1196 + if (kthread_should_stop()) { 1197 + __set_current_state(TASK_RUNNING); 1199 1198 break; 1199 + } 1200 1200 1201 1201 /* Only sleep when the search is active. */ 1202 1202 if (dev->search_count) {
+2 -3
drivers/w1/w1_int.c
··· 51 51 dev->search_count = w1_search_count; 52 52 dev->enable_pullup = w1_enable_pullup; 53 53 54 - /* 1 for w1_process to decrement 55 - * 1 for __w1_remove_master_device to decrement 54 + /* For __w1_remove_master_device to decrement 56 55 */ 57 - atomic_set(&dev->refcnt, 2); 56 + atomic_set(&dev->refcnt, 1); 58 57 59 58 INIT_LIST_HEAD(&dev->slist); 60 59 INIT_LIST_HEAD(&dev->async_list);
+10 -3
fs/btrfs/file.c
··· 3541 3541 struct extent_buffer *leaf = path->nodes[0]; 3542 3542 struct btrfs_file_extent_item *extent; 3543 3543 u64 extent_end; 3544 + u8 type; 3544 3545 3545 3546 if (path->slots[0] >= btrfs_header_nritems(leaf)) { 3546 3547 ret = btrfs_next_leaf(root, path); ··· 3597 3596 3598 3597 extent = btrfs_item_ptr(leaf, path->slots[0], 3599 3598 struct btrfs_file_extent_item); 3599 + type = btrfs_file_extent_type(leaf, extent); 3600 3600 3601 - if (btrfs_file_extent_disk_bytenr(leaf, extent) == 0 || 3602 - btrfs_file_extent_type(leaf, extent) == 3603 - BTRFS_FILE_EXTENT_PREALLOC) { 3601 + /* 3602 + * Can't access the extent's disk_bytenr field if this is an 3603 + * inline extent, since at that offset, it's where the extent 3604 + * data starts. 3605 + */ 3606 + if (type == BTRFS_FILE_EXTENT_PREALLOC || 3607 + (type == BTRFS_FILE_EXTENT_REG && 3608 + btrfs_file_extent_disk_bytenr(leaf, extent) == 0)) { 3604 3609 /* 3605 3610 * Explicit hole or prealloc extent, search for delalloc. 3606 3611 * A prealloc extent is treated like a hole.
+17 -8
fs/btrfs/qgroup.c
··· 3367 3367 int err = -ENOMEM; 3368 3368 int ret = 0; 3369 3369 bool stopped = false; 3370 + bool did_leaf_rescans = false; 3370 3371 3371 3372 path = btrfs_alloc_path(); 3372 3373 if (!path) ··· 3388 3387 } 3389 3388 3390 3389 err = qgroup_rescan_leaf(trans, path); 3390 + did_leaf_rescans = true; 3391 3391 3392 3392 if (err > 0) 3393 3393 btrfs_commit_transaction(trans); ··· 3409 3407 mutex_unlock(&fs_info->qgroup_rescan_lock); 3410 3408 3411 3409 /* 3412 - * only update status, since the previous part has already updated the 3413 - * qgroup info. 3410 + * Only update status, since the previous part has already updated the 3411 + * qgroup info, and only if we did any actual work. This also prevents 3412 + * race with a concurrent quota disable, which has already set 3413 + * fs_info->quota_root to NULL and cleared BTRFS_FS_QUOTA_ENABLED at 3414 + * btrfs_quota_disable(). 3414 3415 */ 3415 - trans = btrfs_start_transaction(fs_info->quota_root, 1); 3416 - if (IS_ERR(trans)) { 3417 - err = PTR_ERR(trans); 3416 + if (did_leaf_rescans) { 3417 + trans = btrfs_start_transaction(fs_info->quota_root, 1); 3418 + if (IS_ERR(trans)) { 3419 + err = PTR_ERR(trans); 3420 + trans = NULL; 3421 + btrfs_err(fs_info, 3422 + "fail to start transaction for status update: %d", 3423 + err); 3424 + } 3425 + } else { 3418 3426 trans = NULL; 3419 - btrfs_err(fs_info, 3420 - "fail to start transaction for status update: %d", 3421 - err); 3422 3427 } 3423 3428 3424 3429 mutex_lock(&fs_info->qgroup_rescan_lock);
+25 -25
fs/btrfs/volumes.c
··· 2014 2014 return num_devices; 2015 2015 } 2016 2016 2017 + static void btrfs_scratch_superblock(struct btrfs_fs_info *fs_info, 2018 + struct block_device *bdev, int copy_num) 2019 + { 2020 + struct btrfs_super_block *disk_super; 2021 + const size_t len = sizeof(disk_super->magic); 2022 + const u64 bytenr = btrfs_sb_offset(copy_num); 2023 + int ret; 2024 + 2025 + disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr); 2026 + if (IS_ERR(disk_super)) 2027 + return; 2028 + 2029 + memset(&disk_super->magic, 0, len); 2030 + folio_mark_dirty(virt_to_folio(disk_super)); 2031 + btrfs_release_disk_super(disk_super); 2032 + 2033 + ret = sync_blockdev_range(bdev, bytenr, bytenr + len - 1); 2034 + if (ret) 2035 + btrfs_warn(fs_info, "error clearing superblock number %d (%d)", 2036 + copy_num, ret); 2037 + } 2038 + 2017 2039 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, 2018 2040 struct block_device *bdev, 2019 2041 const char *device_path) 2020 2042 { 2021 - struct btrfs_super_block *disk_super; 2022 2043 int copy_num; 2023 2044 2024 2045 if (!bdev) 2025 2046 return; 2026 2047 2027 2048 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) { 2028 - struct page *page; 2029 - int ret; 2030 - 2031 - disk_super = btrfs_read_dev_one_super(bdev, copy_num, false); 2032 - if (IS_ERR(disk_super)) 2033 - continue; 2034 - 2035 - if (bdev_is_zoned(bdev)) { 2049 + if (bdev_is_zoned(bdev)) 2036 2050 btrfs_reset_sb_log_zones(bdev, copy_num); 2037 - continue; 2038 - } 2039 - 2040 - memset(&disk_super->magic, 0, sizeof(disk_super->magic)); 2041 - 2042 - page = virt_to_page(disk_super); 2043 - set_page_dirty(page); 2044 - lock_page(page); 2045 - /* write_on_page() unlocks the page */ 2046 - ret = write_one_page(page); 2047 - if (ret) 2048 - btrfs_warn(fs_info, 2049 - "error clearing superblock number %d (%d)", 2050 - copy_num, ret); 2051 - btrfs_release_disk_super(disk_super); 2052 - 2051 + else 2052 + btrfs_scratch_superblock(fs_info, bdev, copy_num); 2053 2053 } 2054 2054 2055 2055 /* Notify udev that device has changed */
+95 -146
fs/cifs/dfs_cache.c
··· 269 269 list_for_each_entry(t, &ce->tlist, list) { 270 270 seq_printf(m, " %s%s\n", 271 271 t->name, 272 - ce->tgthint == t ? " (target hint)" : ""); 272 + READ_ONCE(ce->tgthint) == t ? " (target hint)" : ""); 273 273 } 274 274 } 275 275 } ··· 321 321 cifs_dbg(FYI, "target list:\n"); 322 322 list_for_each_entry(t, &ce->tlist, list) { 323 323 cifs_dbg(FYI, " %s%s\n", t->name, 324 - ce->tgthint == t ? " (target hint)" : ""); 324 + READ_ONCE(ce->tgthint) == t ? " (target hint)" : ""); 325 325 } 326 326 } 327 327 ··· 427 427 /* Return target hint of a DFS cache entry */ 428 428 static inline char *get_tgt_name(const struct cache_entry *ce) 429 429 { 430 - struct cache_dfs_tgt *t = ce->tgthint; 430 + struct cache_dfs_tgt *t = READ_ONCE(ce->tgthint); 431 431 432 432 return t ? t->name : ERR_PTR(-ENOENT); 433 433 } ··· 470 470 static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs, 471 471 struct cache_entry *ce, const char *tgthint) 472 472 { 473 + struct cache_dfs_tgt *target; 473 474 int i; 474 475 475 476 ce->ttl = max_t(int, refs[0].ttl, CACHE_MIN_TTL); ··· 497 496 ce->numtgts++; 498 497 } 499 498 500 - ce->tgthint = list_first_entry_or_null(&ce->tlist, 501 - struct cache_dfs_tgt, list); 499 + target = list_first_entry_or_null(&ce->tlist, struct cache_dfs_tgt, 500 + list); 501 + WRITE_ONCE(ce->tgthint, target); 502 502 503 503 return 0; 504 504 } ··· 560 558 } 561 559 562 560 /* Add a new DFS cache entry */ 563 - static int add_cache_entry_locked(struct dfs_info3_param *refs, int numrefs) 561 + static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs, 562 + int numrefs) 564 563 { 565 564 int rc; 566 565 struct cache_entry *ce; ··· 576 573 577 574 rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash); 578 575 if (rc) 579 - return rc; 576 + return ERR_PTR(rc); 580 577 581 578 ce = alloc_cache_entry(refs, numrefs); 582 579 if (IS_ERR(ce)) 583 - return PTR_ERR(ce); 580 + return ce; 584 581 585 582 spin_lock(&cache_ttl_lock); 586 583 if (!cache_ttl) { ··· 597 594 598 595 atomic_inc(&cache_count); 599 596 600 - return 0; 597 + return ce; 601 598 } 602 599 603 600 /* Check if two DFS paths are equal. @s1 and @s2 are expected to be in @cache_cp's charset */ ··· 644 641 * 645 642 * Use whole path components in the match. Must be called with htable_rw_lock held. 646 643 * 644 + * Return cached entry if successful. 647 645 * Return ERR_PTR(-ENOENT) if the entry is not found. 646 + * Return error ptr otherwise. 648 647 */ 649 648 static struct cache_entry *lookup_cache_entry(const char *path) 650 649 { ··· 716 711 static int update_cache_entry_locked(struct cache_entry *ce, const struct dfs_info3_param *refs, 717 712 int numrefs) 718 713 { 714 + struct cache_dfs_tgt *target; 715 + char *th = NULL; 719 716 int rc; 720 - char *s, *th = NULL; 721 717 722 718 WARN_ON(!rwsem_is_locked(&htable_rw_lock)); 723 719 724 - if (ce->tgthint) { 725 - s = ce->tgthint->name; 726 - th = kstrdup(s, GFP_ATOMIC); 720 + target = READ_ONCE(ce->tgthint); 721 + if (target) { 722 + th = kstrdup(target->name, GFP_ATOMIC); 727 723 if (!th) 728 724 return -ENOMEM; 729 725 } ··· 773 767 * 774 768 * For interlinks, cifs_mount() and expand_dfs_referral() are supposed to 775 769 * handle them properly. 770 + * 771 + * On success, return entry with acquired lock for reading, otherwise error ptr. 776 772 */ 777 - static int cache_refresh_path(const unsigned int xid, struct cifs_ses *ses, const char *path) 773 + static struct cache_entry *cache_refresh_path(const unsigned int xid, 774 + struct cifs_ses *ses, 775 + const char *path, 776 + bool force_refresh) 778 777 { 779 - int rc; 780 - struct cache_entry *ce; 781 778 struct dfs_info3_param *refs = NULL; 779 + struct cache_entry *ce; 782 780 int numrefs = 0; 783 - bool newent = false; 781 + int rc; 784 782 785 783 cifs_dbg(FYI, "%s: search path: %s\n", __func__, path); 786 784 787 - down_write(&htable_rw_lock); 785 + down_read(&htable_rw_lock); 788 786 789 787 ce = lookup_cache_entry(path); 790 788 if (!IS_ERR(ce)) { 791 - if (!cache_entry_expired(ce)) { 792 - dump_ce(ce); 793 - up_write(&htable_rw_lock); 794 - return 0; 795 - } 796 - } else { 797 - newent = true; 789 + if (!force_refresh && !cache_entry_expired(ce)) 790 + return ce; 791 + } else if (PTR_ERR(ce) != -ENOENT) { 792 + up_read(&htable_rw_lock); 793 + return ce; 798 794 } 799 795 800 796 /* 801 - * Either the entry was not found, or it is expired. 797 + * Unlock shared access as we don't want to hold any locks while getting 798 + * a new referral. The @ses used for performing the I/O could be 799 + * reconnecting and it acquires @htable_rw_lock to look up the dfs cache 800 + * in order to failover -- if necessary. 801 + */ 802 + up_read(&htable_rw_lock); 803 + 804 + /* 805 + * Either the entry was not found, or it is expired, or it is a forced 806 + * refresh. 802 807 * Request a new DFS referral in order to create or update a cache entry. 803 808 */ 804 809 rc = get_dfs_referral(xid, ses, path, &refs, &numrefs); 805 - if (rc) 806 - goto out_unlock; 810 + if (rc) { 811 + ce = ERR_PTR(rc); 812 + goto out; 813 + } 807 814 808 815 dump_refs(refs, numrefs); 809 816 810 - if (!newent) { 811 - rc = update_cache_entry_locked(ce, refs, numrefs); 812 - goto out_unlock; 817 + down_write(&htable_rw_lock); 818 + /* Re-check as another task might have it added or refreshed already */ 819 + ce = lookup_cache_entry(path); 820 + if (!IS_ERR(ce)) { 821 + if (force_refresh || cache_entry_expired(ce)) { 822 + rc = update_cache_entry_locked(ce, refs, numrefs); 823 + if (rc) 824 + ce = ERR_PTR(rc); 825 + } 826 + } else if (PTR_ERR(ce) == -ENOENT) { 827 + ce = add_cache_entry_locked(refs, numrefs); 813 828 } 814 829 815 - rc = add_cache_entry_locked(refs, numrefs); 830 + if (IS_ERR(ce)) { 831 + up_write(&htable_rw_lock); 832 + goto out; 833 + } 816 834 817 - out_unlock: 818 - up_write(&htable_rw_lock); 835 + downgrade_write(&htable_rw_lock); 836 + out: 819 837 free_dfs_info_array(refs, numrefs); 820 - return rc; 838 + return ce; 821 839 } 822 840 823 841 /* ··· 908 878 } 909 879 it->it_path_consumed = t->path_consumed; 910 880 911 - if (ce->tgthint == t) 881 + if (READ_ONCE(ce->tgthint) == t) 912 882 list_add(&it->it_list, head); 913 883 else 914 884 list_add_tail(&it->it_list, head); ··· 961 931 if (IS_ERR(npath)) 962 932 return PTR_ERR(npath); 963 933 964 - rc = cache_refresh_path(xid, ses, npath); 965 - if (rc) 966 - goto out_free_path; 967 - 968 - down_read(&htable_rw_lock); 969 - 970 - ce = lookup_cache_entry(npath); 934 + ce = cache_refresh_path(xid, ses, npath, false); 971 935 if (IS_ERR(ce)) { 972 - up_read(&htable_rw_lock); 973 936 rc = PTR_ERR(ce); 974 937 goto out_free_path; 975 938 } ··· 1026 1003 } 1027 1004 1028 1005 /** 1029 - * dfs_cache_update_tgthint - update target hint of a DFS cache entry 1030 - * 1031 - * If it doesn't find the cache entry, then it will get a DFS referral for @path 1032 - * and create a new entry. 1033 - * 1034 - * In case the cache entry exists but expired, it will get a DFS referral 1035 - * for @path and then update the respective cache entry. 1036 - * 1037 - * @xid: syscall id 1038 - * @ses: smb session 1039 - * @cp: codepage 1040 - * @remap: type of character remapping for paths 1041 - * @path: path to lookup in DFS referral cache 1042 - * @it: DFS target iterator 1043 - * 1044 - * Return zero if the target hint was updated successfully, otherwise non-zero. 1045 - */ 1046 - int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses, 1047 - const struct nls_table *cp, int remap, const char *path, 1048 - const struct dfs_cache_tgt_iterator *it) 1049 - { 1050 - int rc; 1051 - const char *npath; 1052 - struct cache_entry *ce; 1053 - struct cache_dfs_tgt *t; 1054 - 1055 - npath = dfs_cache_canonical_path(path, cp, remap); 1056 - if (IS_ERR(npath)) 1057 - return PTR_ERR(npath); 1058 - 1059 - cifs_dbg(FYI, "%s: update target hint - path: %s\n", __func__, npath); 1060 - 1061 - rc = cache_refresh_path(xid, ses, npath); 1062 - if (rc) 1063 - goto out_free_path; 1064 - 1065 - down_write(&htable_rw_lock); 1066 - 1067 - ce = lookup_cache_entry(npath); 1068 - if (IS_ERR(ce)) { 1069 - rc = PTR_ERR(ce); 1070 - goto out_unlock; 1071 - } 1072 - 1073 - t = ce->tgthint; 1074 - 1075 - if (likely(!strcasecmp(it->it_name, t->name))) 1076 - goto out_unlock; 1077 - 1078 - list_for_each_entry(t, &ce->tlist, list) { 1079 - if (!strcasecmp(t->name, it->it_name)) { 1080 - ce->tgthint = t; 1081 - cifs_dbg(FYI, "%s: new target hint: %s\n", __func__, 1082 - it->it_name); 1083 - break; 1084 - } 1085 - } 1086 - 1087 - out_unlock: 1088 - up_write(&htable_rw_lock); 1089 - out_free_path: 1090 - kfree(npath); 1091 - return rc; 1092 - } 1093 - 1094 - /** 1095 1006 * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry 1096 1007 * without sending any requests to the currently connected server. 1097 1008 * ··· 1049 1092 1050 1093 cifs_dbg(FYI, "%s: path: %s\n", __func__, path); 1051 1094 1052 - if (!down_write_trylock(&htable_rw_lock)) 1053 - return; 1095 + down_read(&htable_rw_lock); 1054 1096 1055 1097 ce = lookup_cache_entry(path); 1056 1098 if (IS_ERR(ce)) 1057 1099 goto out_unlock; 1058 1100 1059 - t = ce->tgthint; 1101 + t = READ_ONCE(ce->tgthint); 1060 1102 1061 1103 if (unlikely(!strcasecmp(it->it_name, t->name))) 1062 1104 goto out_unlock; 1063 1105 1064 1106 list_for_each_entry(t, &ce->tlist, list) { 1065 1107 if (!strcasecmp(t->name, it->it_name)) { 1066 - ce->tgthint = t; 1108 + WRITE_ONCE(ce->tgthint, t); 1067 1109 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__, 1068 1110 it->it_name); 1069 1111 break; ··· 1070 1114 } 1071 1115 1072 1116 out_unlock: 1073 - up_write(&htable_rw_lock); 1117 + up_read(&htable_rw_lock); 1074 1118 } 1075 1119 1076 1120 /** ··· 1276 1320 * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new 1277 1321 * target shares in @refs. 1278 1322 */ 1279 - static void mark_for_reconnect_if_needed(struct cifs_tcon *tcon, struct dfs_cache_tgt_list *tl, 1280 - const struct dfs_info3_param *refs, int numrefs) 1323 + static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server, 1324 + struct dfs_cache_tgt_list *old_tl, 1325 + struct dfs_cache_tgt_list *new_tl) 1281 1326 { 1282 - struct dfs_cache_tgt_iterator *it; 1283 - int i; 1327 + struct dfs_cache_tgt_iterator *oit, *nit; 1284 1328 1285 - for (it = dfs_cache_get_tgt_iterator(tl); it; it = dfs_cache_get_next_tgt(tl, it)) { 1286 - for (i = 0; i < numrefs; i++) { 1287 - if (target_share_equal(tcon->ses->server, dfs_cache_get_tgt_name(it), 1288 - refs[i].node_name)) 1329 + for (oit = dfs_cache_get_tgt_iterator(old_tl); oit; 1330 + oit = dfs_cache_get_next_tgt(old_tl, oit)) { 1331 + for (nit = dfs_cache_get_tgt_iterator(new_tl); nit; 1332 + nit = dfs_cache_get_next_tgt(new_tl, nit)) { 1333 + if (target_share_equal(server, 1334 + dfs_cache_get_tgt_name(oit), 1335 + dfs_cache_get_tgt_name(nit))) 1289 1336 return; 1290 1337 } 1291 1338 } 1292 1339 1293 1340 cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__); 1294 - cifs_signal_cifsd_for_reconnect(tcon->ses->server, true); 1341 + cifs_signal_cifsd_for_reconnect(server, true); 1295 1342 } 1296 1343 1297 1344 /* Refresh dfs referral of tcon and mark it for reconnect if needed */ 1298 1345 static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_refresh) 1299 1346 { 1300 - struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl); 1347 + struct dfs_cache_tgt_list old_tl = DFS_CACHE_TGT_LIST_INIT(old_tl); 1348 + struct dfs_cache_tgt_list new_tl = DFS_CACHE_TGT_LIST_INIT(new_tl); 1301 1349 struct cifs_ses *ses = CIFS_DFS_ROOT_SES(tcon->ses); 1302 1350 struct cifs_tcon *ipc = ses->tcon_ipc; 1303 - struct dfs_info3_param *refs = NULL; 1304 1351 bool needs_refresh = false; 1305 1352 struct cache_entry *ce; 1306 1353 unsigned int xid; 1307 - int numrefs = 0; 1308 1354 int rc = 0; 1309 1355 1310 1356 xid = get_xid(); ··· 1315 1357 ce = lookup_cache_entry(path); 1316 1358 needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce); 1317 1359 if (!IS_ERR(ce)) { 1318 - rc = get_targets(ce, &tl); 1319 - if (rc) 1320 - cifs_dbg(FYI, "%s: could not get dfs targets: %d\n", __func__, rc); 1360 + rc = get_targets(ce, &old_tl); 1361 + cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc); 1321 1362 } 1322 1363 up_read(&htable_rw_lock); 1323 1364 ··· 1333 1376 } 1334 1377 spin_unlock(&ipc->tc_lock); 1335 1378 1336 - rc = get_dfs_referral(xid, ses, path, &refs, &numrefs); 1337 - if (!rc) { 1338 - /* Create or update a cache entry with the new referral */ 1339 - dump_refs(refs, numrefs); 1340 - 1341 - down_write(&htable_rw_lock); 1342 - ce = lookup_cache_entry(path); 1343 - if (IS_ERR(ce)) 1344 - add_cache_entry_locked(refs, numrefs); 1345 - else if (force_refresh || cache_entry_expired(ce)) 1346 - update_cache_entry_locked(ce, refs, numrefs); 1347 - up_write(&htable_rw_lock); 1348 - 1349 - mark_for_reconnect_if_needed(tcon, &tl, refs, numrefs); 1379 + ce = cache_refresh_path(xid, ses, path, true); 1380 + if (!IS_ERR(ce)) { 1381 + rc = get_targets(ce, &new_tl); 1382 + up_read(&htable_rw_lock); 1383 + cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc); 1384 + mark_for_reconnect_if_needed(tcon->ses->server, &old_tl, &new_tl); 1350 1385 } 1351 1386 1352 1387 out: 1353 1388 free_xid(xid); 1354 - dfs_cache_free_tgts(&tl); 1355 - free_dfs_info_array(refs, numrefs); 1389 + dfs_cache_free_tgts(&old_tl); 1390 + dfs_cache_free_tgts(&new_tl); 1356 1391 return rc; 1357 1392 } 1358 1393
-3
fs/cifs/dfs_cache.h
··· 35 35 struct dfs_cache_tgt_list *tgt_list); 36 36 int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref, 37 37 struct dfs_cache_tgt_list *tgt_list); 38 - int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses, 39 - const struct nls_table *cp, int remap, const char *path, 40 - const struct dfs_cache_tgt_iterator *it); 41 38 void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it); 42 39 int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it, 43 40 struct dfs_info3_param *ref);
+9 -6
fs/cifs/smb2pdu.c
··· 4163 4163 (struct smb2_hdr *)rdata->iov[0].iov_base; 4164 4164 struct cifs_credits credits = { .value = 0, .instance = 0 }; 4165 4165 struct smb_rqst rqst = { .rq_iov = &rdata->iov[1], 4166 - .rq_nvec = 1, 4167 - .rq_pages = rdata->pages, 4168 - .rq_offset = rdata->page_offset, 4169 - .rq_npages = rdata->nr_pages, 4170 - .rq_pagesz = rdata->pagesz, 4171 - .rq_tailsz = rdata->tailsz }; 4166 + .rq_nvec = 1, }; 4167 + 4168 + if (rdata->got_bytes) { 4169 + rqst.rq_pages = rdata->pages; 4170 + rqst.rq_offset = rdata->page_offset; 4171 + rqst.rq_npages = rdata->nr_pages; 4172 + rqst.rq_pagesz = rdata->pagesz; 4173 + rqst.rq_tailsz = rdata->tailsz; 4174 + } 4172 4175 4173 4176 WARN_ONCE(rdata->server != mid->server, 4174 4177 "rdata server %p != mid server %p",
+39 -2
fs/ext4/xattr.c
··· 81 81 struct mb_cache_entry **); 82 82 static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value, 83 83 size_t value_count); 84 + static __le32 ext4_xattr_hash_entry_signed(char *name, size_t name_len, __le32 *value, 85 + size_t value_count); 84 86 static void ext4_xattr_rehash(struct ext4_xattr_header *); 85 87 86 88 static const struct xattr_handler * const ext4_xattr_handler_map[] = { ··· 472 470 tmp_data = cpu_to_le32(hash); 473 471 e_hash = ext4_xattr_hash_entry(entry->e_name, entry->e_name_len, 474 472 &tmp_data, 1); 475 - if (e_hash != entry->e_hash) 476 - return -EFSCORRUPTED; 473 + /* All good? */ 474 + if (e_hash == entry->e_hash) 475 + return 0; 476 + 477 + /* 478 + * Not good. Maybe the entry hash was calculated 479 + * using the buggy signed char version? 480 + */ 481 + e_hash = ext4_xattr_hash_entry_signed(entry->e_name, entry->e_name_len, 482 + &tmp_data, 1); 483 + if (e_hash == entry->e_hash) 484 + return 0; 485 + 486 + /* Still no match - bad */ 487 + return -EFSCORRUPTED; 477 488 } 478 489 return 0; 479 490 } ··· 3097 3082 hash = (hash << NAME_HASH_SHIFT) ^ 3098 3083 (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^ 3099 3084 *name++; 3085 + } 3086 + while (value_count--) { 3087 + hash = (hash << VALUE_HASH_SHIFT) ^ 3088 + (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^ 3089 + le32_to_cpu(*value++); 3090 + } 3091 + return cpu_to_le32(hash); 3092 + } 3093 + 3094 + /* 3095 + * ext4_xattr_hash_entry_signed() 3096 + * 3097 + * Compute the hash of an extended attribute incorrectly. 3098 + */ 3099 + static __le32 ext4_xattr_hash_entry_signed(char *name, size_t name_len, __le32 *value, size_t value_count) 3100 + { 3101 + __u32 hash = 0; 3102 + 3103 + while (name_len--) { 3104 + hash = (hash << NAME_HASH_SHIFT) ^ 3105 + (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^ 3106 + (signed char)*name++; 3100 3107 } 3101 3108 while (value_count--) { 3102 3109 hash = (hash << VALUE_HASH_SHIFT) ^
+10 -1
fs/gfs2/log.c
··· 80 80 brelse(bd->bd_bh); 81 81 } 82 82 83 + static int __gfs2_writepage(struct page *page, struct writeback_control *wbc, 84 + void *data) 85 + { 86 + struct address_space *mapping = data; 87 + int ret = mapping->a_ops->writepage(page, wbc); 88 + mapping_set_error(mapping, ret); 89 + return ret; 90 + } 91 + 83 92 /** 84 93 * gfs2_ail1_start_one - Start I/O on a transaction 85 94 * @sdp: The superblock ··· 140 131 if (!mapping) 141 132 continue; 142 133 spin_unlock(&sdp->sd_ail_lock); 143 - ret = filemap_fdatawrite_wbc(mapping, wbc); 134 + ret = write_cache_pages(mapping, wbc, __gfs2_writepage, mapping); 144 135 if (need_resched()) { 145 136 blk_finish_plug(plug); 146 137 cond_resched();
+22
fs/zonefs/super.c
··· 442 442 data_size = zonefs_check_zone_condition(inode, zone, 443 443 false, false); 444 444 } 445 + } else if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO && 446 + data_size > isize) { 447 + /* Do not expose garbage data */ 448 + data_size = isize; 445 449 } 446 450 447 451 /* ··· 808 804 bio_set_polled(bio, iocb); 809 805 810 806 ret = submit_bio_wait(bio); 807 + 808 + /* 809 + * If the file zone was written underneath the file system, the zone 810 + * write pointer may not be where we expect it to be, but the zone 811 + * append write can still succeed. So check manually that we wrote where 812 + * we intended to, that is, at zi->i_wpoffset. 813 + */ 814 + if (!ret) { 815 + sector_t wpsector = 816 + zi->i_zsector + (zi->i_wpoffset >> SECTOR_SHIFT); 817 + 818 + if (bio->bi_iter.bi_sector != wpsector) { 819 + zonefs_warn(inode->i_sb, 820 + "Corrupted write pointer %llu for zone at %llu\n", 821 + wpsector, zi->i_zsector); 822 + ret = -EIO; 823 + } 824 + } 811 825 812 826 zonefs_file_write_dio_end_io(iocb, size, ret, 0); 813 827 trace_zonefs_file_dio_append(inode, size, ret);
+1 -1
include/linux/bpf.h
··· 1832 1832 struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); 1833 1833 void bpf_prog_put(struct bpf_prog *prog); 1834 1834 1835 - void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); 1835 + void bpf_prog_free_id(struct bpf_prog *prog); 1836 1836 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); 1837 1837 1838 1838 struct btf_field *btf_record_find(const struct btf_record *rec,
+4 -4
include/linux/firmware/xlnx-zynqmp.h
··· 545 545 const u64 address, 546 546 const enum zynqmp_pm_request_ack ack); 547 547 int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode); 548 - int zynqmp_pm_set_rpu_mode(u32 node_id, u32 arg1); 549 - int zynqmp_pm_set_tcm_config(u32 node_id, u32 arg1); 548 + int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode); 549 + int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode); 550 550 int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value); 551 551 int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config, 552 552 u32 value); ··· 845 845 return -ENODEV; 846 846 } 847 847 848 - static inline int zynqmp_pm_set_rpu_mode(u32 node_id, u32 arg1) 848 + static inline int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode) 849 849 { 850 850 return -ENODEV; 851 851 } 852 852 853 - static inline int zynqmp_pm_set_tcm_config(u32 node_id, u32 arg1) 853 + static inline int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode) 854 854 { 855 855 return -ENODEV; 856 856 }
+2 -2
include/linux/soc/ti/omap1-io.h
··· 5 5 #ifndef __ASSEMBLER__ 6 6 #include <linux/types.h> 7 7 8 - #ifdef CONFIG_ARCH_OMAP1_ANY 8 + #ifdef CONFIG_ARCH_OMAP1 9 9 /* 10 10 * NOTE: Please use ioremap + __raw_read/write where possible instead of these 11 11 */ ··· 15 15 extern void omap_writeb(u8 v, u32 pa); 16 16 extern void omap_writew(u16 v, u32 pa); 17 17 extern void omap_writel(u32 v, u32 pa); 18 - #else 18 + #elif defined(CONFIG_COMPILE_TEST) 19 19 static inline u8 omap_readb(u32 pa) { return 0; } 20 20 static inline u16 omap_readw(u32 pa) { return 0; } 21 21 static inline u32 omap_readl(u32 pa) { return 0; }
+10 -8
include/linux/usb.h
··· 267 267 } 268 268 269 269 /** 270 - * usb_set_intfdata() - associate driver-specific data with the interface 271 - * @intf: the usb interface 272 - * @data: pointer to the device priv structure or %NULL 270 + * usb_set_intfdata() - associate driver-specific data with an interface 271 + * @intf: USB interface 272 + * @data: driver data 273 273 * 274 - * Drivers should use this function in their probe() to associate their 275 - * driver-specific data with the usb interface. 274 + * Drivers can use this function in their probe() callbacks to associate 275 + * driver-specific data with an interface. 276 276 * 277 - * When disconnecting, the core will take care of setting @intf back to %NULL, 278 - * so no actions are needed on the driver side. The interface should not be set 279 - * to %NULL before all actions completed (e.g. no outsanding URB remaining). 277 + * Note that there is generally no need to clear the driver-data pointer even 278 + * if some drivers do so for historical or implementation-specific reasons. 280 279 */ 281 280 static inline void usb_set_intfdata(struct usb_interface *intf, void *data) 282 281 { ··· 773 774 extern int usb_acpi_set_power_state(struct usb_device *hdev, int index, 774 775 bool enable); 775 776 extern bool usb_acpi_power_manageable(struct usb_device *hdev, int index); 777 + extern int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index); 776 778 #else 777 779 static inline int usb_acpi_set_power_state(struct usb_device *hdev, int index, 778 780 bool enable) { return 0; } 779 781 static inline bool usb_acpi_power_manageable(struct usb_device *hdev, int index) 780 782 { return true; } 783 + static inline int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index) 784 + { return 0; } 781 785 #endif 782 786 783 787 /* USB autosuspend and autoresume */
-4
include/net/mac80211.h
··· 1832 1832 * @drv_priv: data area for driver use, will always be aligned to 1833 1833 * sizeof(void \*). 1834 1834 * @txq: the multicast data TX queue 1835 - * @txqs_stopped: per AC flag to indicate that intermediate TXQs are stopped, 1836 - * protected by fq->lock. 1837 1835 * @offload_flags: 802.3 -> 802.11 enapsulation offload flags, see 1838 1836 * &enum ieee80211_offload_flags. 1839 1837 * @mbssid_tx_vif: Pointer to the transmitting interface if MBSSID is enabled. ··· 1860 1862 1861 1863 bool probe_req_reg; 1862 1864 bool rx_mcast_action_reg; 1863 - 1864 - bool txqs_stopped[IEEE80211_NUM_ACS]; 1865 1865 1866 1866 struct ieee80211_vif *mbssid_tx_vif; 1867 1867
+7
include/net/sch_generic.h
··· 1288 1288 1289 1289 int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb)); 1290 1290 1291 + /* Make sure qdisc is no longer in SCHED state. */ 1292 + static inline void qdisc_synchronize(const struct Qdisc *q) 1293 + { 1294 + while (test_bit(__QDISC_STATE_SCHED, &q->state)) 1295 + msleep(1); 1296 + } 1297 + 1291 1298 #endif
+1 -1
include/soc/bcm2835/raspberrypi-firmware.h
··· 170 170 171 171 #define RPI_FIRMWARE_CLK_RATE_REQUEST(_id) \ 172 172 { \ 173 - .id = _id, \ 173 + .id = cpu_to_le32(_id), \ 174 174 } 175 175 176 176 #if IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE)
+1 -1
init/Kconfig
··· 204 204 appended after any matching localversion* files, and after the value 205 205 set in CONFIG_LOCALVERSION. 206 206 207 - (The actual string used here is the first eight characters produced 207 + (The actual string used here is the first 12 characters produced 208 208 by running the command: 209 209 210 210 $ git rev-parse --verify HEAD
-1
init/version-timestamp.c
··· 2 2 3 3 #include <generated/compile.h> 4 4 #include <generated/utsrelease.h> 5 - #include <linux/version.h> 6 5 #include <linux/proc_ns.h> 7 6 #include <linux/refcount.h> 8 7 #include <linux/uts.h>
+2 -2
io_uring/io_uring.c
··· 3674 3674 3675 3675 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER 3676 3676 && !(ctx->flags & IORING_SETUP_R_DISABLED)) 3677 - ctx->submitter_task = get_task_struct(current); 3677 + WRITE_ONCE(ctx->submitter_task, get_task_struct(current)); 3678 3678 3679 3679 file = io_uring_get_file(ctx); 3680 3680 if (IS_ERR(file)) { ··· 3868 3868 return -EBADFD; 3869 3869 3870 3870 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task) 3871 - ctx->submitter_task = get_task_struct(current); 3871 + WRITE_ONCE(ctx->submitter_task, get_task_struct(current)); 3872 3872 3873 3873 if (ctx->restrictions.registered) 3874 3874 ctx->restricted = 1;
+98 -64
io_uring/msg_ring.c
··· 25 25 u32 flags; 26 26 }; 27 27 28 - void io_msg_ring_cleanup(struct io_kiocb *req) 29 - { 30 - struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); 31 - 32 - if (WARN_ON_ONCE(!msg->src_file)) 33 - return; 34 - 35 - fput(msg->src_file); 36 - msg->src_file = NULL; 37 - } 38 - 39 - static void io_msg_tw_complete(struct callback_head *head) 40 - { 41 - struct io_msg *msg = container_of(head, struct io_msg, tw); 42 - struct io_kiocb *req = cmd_to_io_kiocb(msg); 43 - struct io_ring_ctx *target_ctx = req->file->private_data; 44 - int ret = 0; 45 - 46 - if (current->flags & PF_EXITING) 47 - ret = -EOWNERDEAD; 48 - else if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0)) 49 - ret = -EOVERFLOW; 50 - 51 - if (ret < 0) 52 - req_set_fail(req); 53 - io_req_queue_tw_complete(req, ret); 54 - } 55 - 56 - static int io_msg_ring_data(struct io_kiocb *req) 57 - { 58 - struct io_ring_ctx *target_ctx = req->file->private_data; 59 - struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); 60 - 61 - if (msg->src_fd || msg->dst_fd || msg->flags) 62 - return -EINVAL; 63 - 64 - if (target_ctx->task_complete && current != target_ctx->submitter_task) { 65 - init_task_work(&msg->tw, io_msg_tw_complete); 66 - if (task_work_add(target_ctx->submitter_task, &msg->tw, 67 - TWA_SIGNAL_NO_IPI)) 68 - return -EOWNERDEAD; 69 - 70 - atomic_or(IORING_SQ_TASKRUN, &target_ctx->rings->sq_flags); 71 - return IOU_ISSUE_SKIP_COMPLETE; 72 - } 73 - 74 - if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0)) 75 - return 0; 76 - 77 - return -EOVERFLOW; 78 - } 79 - 80 - static void io_double_unlock_ctx(struct io_ring_ctx *octx, 81 - unsigned int issue_flags) 28 + static void io_double_unlock_ctx(struct io_ring_ctx *octx) 82 29 { 83 30 mutex_unlock(&octx->uring_lock); 84 31 } ··· 45 98 } 46 99 mutex_lock(&octx->uring_lock); 47 100 return 0; 101 + } 102 + 103 + void io_msg_ring_cleanup(struct io_kiocb *req) 104 + { 105 + struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); 106 + 107 + if (WARN_ON_ONCE(!msg->src_file)) 108 + return; 109 + 110 + fput(msg->src_file); 111 + msg->src_file = NULL; 112 + } 113 + 114 + static inline bool io_msg_need_remote(struct io_ring_ctx *target_ctx) 115 + { 116 + if (!target_ctx->task_complete) 117 + return false; 118 + return current != target_ctx->submitter_task; 119 + } 120 + 121 + static int io_msg_exec_remote(struct io_kiocb *req, task_work_func_t func) 122 + { 123 + struct io_ring_ctx *ctx = req->file->private_data; 124 + struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); 125 + struct task_struct *task = READ_ONCE(ctx->submitter_task); 126 + 127 + if (unlikely(!task)) 128 + return -EOWNERDEAD; 129 + 130 + init_task_work(&msg->tw, func); 131 + if (task_work_add(ctx->submitter_task, &msg->tw, TWA_SIGNAL)) 132 + return -EOWNERDEAD; 133 + 134 + return IOU_ISSUE_SKIP_COMPLETE; 135 + } 136 + 137 + static void io_msg_tw_complete(struct callback_head *head) 138 + { 139 + struct io_msg *msg = container_of(head, struct io_msg, tw); 140 + struct io_kiocb *req = cmd_to_io_kiocb(msg); 141 + struct io_ring_ctx *target_ctx = req->file->private_data; 142 + int ret = 0; 143 + 144 + if (current->flags & PF_EXITING) { 145 + ret = -EOWNERDEAD; 146 + } else { 147 + /* 148 + * If the target ring is using IOPOLL mode, then we need to be 149 + * holding the uring_lock for posting completions. Other ring 150 + * types rely on the regular completion locking, which is 151 + * handled while posting. 152 + */ 153 + if (target_ctx->flags & IORING_SETUP_IOPOLL) 154 + mutex_lock(&target_ctx->uring_lock); 155 + if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0)) 156 + ret = -EOVERFLOW; 157 + if (target_ctx->flags & IORING_SETUP_IOPOLL) 158 + mutex_unlock(&target_ctx->uring_lock); 159 + } 160 + 161 + if (ret < 0) 162 + req_set_fail(req); 163 + io_req_queue_tw_complete(req, ret); 164 + } 165 + 166 + static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags) 167 + { 168 + struct io_ring_ctx *target_ctx = req->file->private_data; 169 + struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); 170 + int ret; 171 + 172 + if (msg->src_fd || msg->dst_fd || msg->flags) 173 + return -EINVAL; 174 + if (target_ctx->flags & IORING_SETUP_R_DISABLED) 175 + return -EBADFD; 176 + 177 + if (io_msg_need_remote(target_ctx)) 178 + return io_msg_exec_remote(req, io_msg_tw_complete); 179 + 180 + ret = -EOVERFLOW; 181 + if (target_ctx->flags & IORING_SETUP_IOPOLL) { 182 + if (unlikely(io_double_lock_ctx(target_ctx, issue_flags))) 183 + return -EAGAIN; 184 + if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0)) 185 + ret = 0; 186 + io_double_unlock_ctx(target_ctx); 187 + } else { 188 + if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0)) 189 + ret = 0; 190 + } 191 + return ret; 48 192 } 49 193 50 194 static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags) ··· 186 148 if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0)) 187 149 ret = -EOVERFLOW; 188 150 out_unlock: 189 - io_double_unlock_ctx(target_ctx, issue_flags); 151 + io_double_unlock_ctx(target_ctx); 190 152 return ret; 191 153 } 192 154 ··· 212 174 213 175 if (target_ctx == ctx) 214 176 return -EINVAL; 177 + if (target_ctx->flags & IORING_SETUP_R_DISABLED) 178 + return -EBADFD; 215 179 if (!src_file) { 216 180 src_file = io_msg_grab_file(req, issue_flags); 217 181 if (!src_file) ··· 222 182 req->flags |= REQ_F_NEED_CLEANUP; 223 183 } 224 184 225 - if (target_ctx->task_complete && current != target_ctx->submitter_task) { 226 - init_task_work(&msg->tw, io_msg_tw_fd_complete); 227 - if (task_work_add(target_ctx->submitter_task, &msg->tw, 228 - TWA_SIGNAL)) 229 - return -EOWNERDEAD; 230 - 231 - return IOU_ISSUE_SKIP_COMPLETE; 232 - } 185 + if (io_msg_need_remote(target_ctx)) 186 + return io_msg_exec_remote(req, io_msg_tw_fd_complete); 233 187 return io_msg_install_complete(req, issue_flags); 234 188 } 235 189 ··· 258 224 259 225 switch (msg->cmd) { 260 226 case IORING_MSG_DATA: 261 - ret = io_msg_ring_data(req); 227 + ret = io_msg_ring_data(req, issue_flags); 262 228 break; 263 229 case IORING_MSG_SEND_FD: 264 230 ret = io_msg_send_fd(req, issue_flags);
+5 -1
io_uring/poll.c
··· 283 283 * to the waitqueue, so if we get nothing back, we 284 284 * should be safe and attempt a reissue. 285 285 */ 286 - if (unlikely(!req->cqe.res)) 286 + if (unlikely(!req->cqe.res)) { 287 + /* Multishot armed need not reissue */ 288 + if (!(req->apoll_events & EPOLLONESHOT)) 289 + continue; 287 290 return IOU_POLL_REISSUE; 291 + } 288 292 } 289 293 if (req->apoll_events & EPOLLONESHOT) 290 294 return IOU_POLL_DONE;
+2 -2
kernel/bpf/hashtab.c
··· 152 152 { 153 153 unsigned long flags; 154 154 155 - hash = hash & HASHTAB_MAP_LOCK_MASK; 155 + hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); 156 156 157 157 preempt_disable(); 158 158 if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) { ··· 171 171 struct bucket *b, u32 hash, 172 172 unsigned long flags) 173 173 { 174 - hash = hash & HASHTAB_MAP_LOCK_MASK; 174 + hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); 175 175 raw_spin_unlock_irqrestore(&b->raw_lock, flags); 176 176 __this_cpu_dec(*(htab->map_locked[hash])); 177 177 preempt_enable();
-3
kernel/bpf/offload.c
··· 216 216 if (offload->dev_state) 217 217 offload->offdev->ops->destroy(prog); 218 218 219 - /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */ 220 - bpf_prog_free_id(prog, true); 221 - 222 219 list_del_init(&offload->offloads); 223 220 kfree(offload); 224 221 prog->aux->offload = NULL;
+7 -17
kernel/bpf/syscall.c
··· 1972 1972 return; 1973 1973 if (audit_enabled == AUDIT_OFF) 1974 1974 return; 1975 - if (op == BPF_AUDIT_LOAD) 1975 + if (!in_irq() && !irqs_disabled()) 1976 1976 ctx = audit_context(); 1977 1977 ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF); 1978 1978 if (unlikely(!ab)) ··· 2001 2001 return id > 0 ? 0 : id; 2002 2002 } 2003 2003 2004 - void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) 2004 + void bpf_prog_free_id(struct bpf_prog *prog) 2005 2005 { 2006 2006 unsigned long flags; 2007 2007 ··· 2013 2013 if (!prog->aux->id) 2014 2014 return; 2015 2015 2016 - if (do_idr_lock) 2017 - spin_lock_irqsave(&prog_idr_lock, flags); 2018 - else 2019 - __acquire(&prog_idr_lock); 2020 - 2016 + spin_lock_irqsave(&prog_idr_lock, flags); 2021 2017 idr_remove(&prog_idr, prog->aux->id); 2022 2018 prog->aux->id = 0; 2023 - 2024 - if (do_idr_lock) 2025 - spin_unlock_irqrestore(&prog_idr_lock, flags); 2026 - else 2027 - __release(&prog_idr_lock); 2019 + spin_unlock_irqrestore(&prog_idr_lock, flags); 2028 2020 } 2029 2021 2030 2022 static void __bpf_prog_put_rcu(struct rcu_head *rcu) ··· 2059 2067 prog = aux->prog; 2060 2068 perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0); 2061 2069 bpf_audit_prog(prog, BPF_AUDIT_UNLOAD); 2070 + bpf_prog_free_id(prog); 2062 2071 __bpf_prog_put_noref(prog, true); 2063 2072 } 2064 2073 2065 - static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) 2074 + static void __bpf_prog_put(struct bpf_prog *prog) 2066 2075 { 2067 2076 struct bpf_prog_aux *aux = prog->aux; 2068 2077 2069 2078 if (atomic64_dec_and_test(&aux->refcnt)) { 2070 - /* bpf_prog_free_id() must be called first */ 2071 - bpf_prog_free_id(prog, do_idr_lock); 2072 - 2073 2079 if (in_irq() || irqs_disabled()) { 2074 2080 INIT_WORK(&aux->work, bpf_prog_put_deferred); 2075 2081 schedule_work(&aux->work); ··· 2079 2089 2080 2090 void bpf_prog_put(struct bpf_prog *prog) 2081 2091 { 2082 - __bpf_prog_put(prog, true); 2092 + __bpf_prog_put(prog); 2083 2093 } 2084 2094 EXPORT_SYMBOL_GPL(bpf_prog_put); 2085 2095
+9 -1
kernel/bpf/verifier.c
··· 2748 2748 */ 2749 2749 if (insn->src_reg == 0 && is_callback_calling_function(insn->imm)) 2750 2750 return -ENOTSUPP; 2751 + /* kfunc with imm==0 is invalid and fixup_kfunc_call will 2752 + * catch this error later. Make backtracking conservative 2753 + * with ENOTSUPP. 2754 + */ 2755 + if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0) 2756 + return -ENOTSUPP; 2751 2757 /* regular helper call sets R0 */ 2752 2758 *reg_mask &= ~1; 2753 2759 if (*reg_mask & 0x3f) { ··· 3295 3289 bool sanitize = reg && is_spillable_regtype(reg->type); 3296 3290 3297 3291 for (i = 0; i < size; i++) { 3298 - if (state->stack[spi].slot_type[i] == STACK_INVALID) { 3292 + u8 type = state->stack[spi].slot_type[i]; 3293 + 3294 + if (type != STACK_MISC && type != STACK_ZERO) { 3299 3295 sanitize = true; 3300 3296 break; 3301 3297 }
+2
kernel/gen_kheaders.sh
··· 14 14 arch/$SRCARCH/include/ 15 15 " 16 16 17 + type cpio > /dev/null 18 + 17 19 # Support incremental builds by skipping archive generation 18 20 # if timestamps of files being archived are not changed. 19 21
+2
kernel/printk/printk.c
··· 123 123 { 124 124 return srcu_read_lock_held(&console_srcu); 125 125 } 126 + EXPORT_SYMBOL(console_srcu_read_lock_is_held); 126 127 #endif 127 128 128 129 enum devkmsg_log_bits { ··· 1892 1891 /** 1893 1892 * console_lock_spinning_disable_and_check - mark end of code where another 1894 1893 * thread was able to busy wait and check if there is a waiter 1894 + * @cookie: cookie returned from console_srcu_read_lock() 1895 1895 * 1896 1896 * This is called at the end of the section where spinning is allowed. 1897 1897 * It has two functions. First, it is a signal that it is no longer
+8 -2
kernel/sched/core.c
··· 8290 8290 if (retval) 8291 8291 goto out_put_task; 8292 8292 8293 + /* 8294 + * With non-SMP configs, user_cpus_ptr/user_mask isn't used and 8295 + * alloc_user_cpus_ptr() returns NULL. 8296 + */ 8293 8297 user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE); 8294 - if (IS_ENABLED(CONFIG_SMP) && !user_mask) { 8298 + if (user_mask) { 8299 + cpumask_copy(user_mask, in_mask); 8300 + } else if (IS_ENABLED(CONFIG_SMP)) { 8295 8301 retval = -ENOMEM; 8296 8302 goto out_put_task; 8297 8303 } 8298 - cpumask_copy(user_mask, in_mask); 8304 + 8299 8305 ac = (struct affinity_context){ 8300 8306 .new_mask = in_mask, 8301 8307 .user_mask = user_mask,
+26 -20
kernel/sched/fair.c
··· 7229 7229 eenv_task_busy_time(&eenv, p, prev_cpu); 7230 7230 7231 7231 for (; pd; pd = pd->next) { 7232 + unsigned long util_min = p_util_min, util_max = p_util_max; 7232 7233 unsigned long cpu_cap, cpu_thermal_cap, util; 7233 7234 unsigned long cur_delta, max_spare_cap = 0; 7234 7235 unsigned long rq_util_min, rq_util_max; 7235 - unsigned long util_min, util_max; 7236 7236 unsigned long prev_spare_cap = 0; 7237 7237 int max_spare_cap_cpu = -1; 7238 7238 unsigned long base_energy; ··· 7251 7251 eenv.pd_cap = 0; 7252 7252 7253 7253 for_each_cpu(cpu, cpus) { 7254 + struct rq *rq = cpu_rq(cpu); 7255 + 7254 7256 eenv.pd_cap += cpu_thermal_cap; 7255 7257 7256 7258 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) ··· 7271 7269 * much capacity we can get out of the CPU; this is 7272 7270 * aligned with sched_cpu_util(). 7273 7271 */ 7274 - if (uclamp_is_used()) { 7275 - if (uclamp_rq_is_idle(cpu_rq(cpu))) { 7276 - util_min = p_util_min; 7277 - util_max = p_util_max; 7278 - } else { 7279 - /* 7280 - * Open code uclamp_rq_util_with() except for 7281 - * the clamp() part. Ie: apply max aggregation 7282 - * only. util_fits_cpu() logic requires to 7283 - * operate on non clamped util but must use the 7284 - * max-aggregated uclamp_{min, max}. 7285 - */ 7286 - rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN); 7287 - rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX); 7272 + if (uclamp_is_used() && !uclamp_rq_is_idle(rq)) { 7273 + /* 7274 + * Open code uclamp_rq_util_with() except for 7275 + * the clamp() part. Ie: apply max aggregation 7276 + * only. util_fits_cpu() logic requires to 7277 + * operate on non clamped util but must use the 7278 + * max-aggregated uclamp_{min, max}. 7279 + */ 7280 + rq_util_min = uclamp_rq_get(rq, UCLAMP_MIN); 7281 + rq_util_max = uclamp_rq_get(rq, UCLAMP_MAX); 7288 7282 7289 - util_min = max(rq_util_min, p_util_min); 7290 - util_max = max(rq_util_max, p_util_max); 7291 - } 7283 + util_min = max(rq_util_min, p_util_min); 7284 + util_max = max(rq_util_max, p_util_max); 7292 7285 } 7293 7286 if (!util_fits_cpu(util, util_min, util_max, cpu)) 7294 7287 continue; ··· 8868 8871 * * Thermal pressure will impact all cpus in this perf domain 8869 8872 * equally. 8870 8873 */ 8871 - if (static_branch_unlikely(&sched_asym_cpucapacity)) { 8874 + if (sched_energy_enabled()) { 8872 8875 unsigned long inv_cap = capacity_orig - thermal_load_avg(rq); 8873 - struct perf_domain *pd = rcu_dereference(rq->rd->pd); 8876 + struct perf_domain *pd; 8874 8877 8878 + rcu_read_lock(); 8879 + 8880 + pd = rcu_dereference(rq->rd->pd); 8875 8881 rq->cpu_capacity_inverted = 0; 8876 8882 8877 8883 for (; pd; pd = pd->next) { 8878 8884 struct cpumask *pd_span = perf_domain_span(pd); 8879 8885 unsigned long pd_cap_orig, pd_cap; 8886 + 8887 + /* We can't be inverted against our own pd */ 8888 + if (cpumask_test_cpu(cpu_of(rq), pd_span)) 8889 + continue; 8880 8890 8881 8891 cpu = cpumask_any(pd_span); 8882 8892 pd_cap_orig = arch_scale_cpu_capacity(cpu); ··· 8909 8905 break; 8910 8906 } 8911 8907 } 8908 + 8909 + rcu_read_unlock(); 8912 8910 } 8913 8911 8914 8912 trace_sched_cpu_capacity_tp(rq);
+2
kernel/sys.c
··· 1442 1442 1443 1443 if (resource >= RLIM_NLIMITS) 1444 1444 return -EINVAL; 1445 + resource = array_index_nospec(resource, RLIM_NLIMITS); 1446 + 1445 1447 if (new_rlim) { 1446 1448 if (new_rlim->rlim_cur > new_rlim->rlim_max) 1447 1449 return -EINVAL;
+3
kernel/trace/bpf_trace.c
··· 848 848 return -EPERM; 849 849 if (unlikely(!nmi_uaccess_okay())) 850 850 return -EPERM; 851 + /* Task should not be pid=1 to avoid kernel panic. */ 852 + if (unlikely(is_global_init(current))) 853 + return -EPERM; 851 854 852 855 if (irqs_disabled()) { 853 856 /* Do an early check on signal validity. Otherwise,
+15 -10
lib/scatterlist.c
··· 470 470 return -EOPNOTSUPP; 471 471 472 472 if (sgt_append->prv) { 473 + unsigned long next_pfn = (page_to_phys(sg_page(sgt_append->prv)) + 474 + sgt_append->prv->offset + sgt_append->prv->length) / PAGE_SIZE; 475 + 473 476 if (WARN_ON(offset)) 474 477 return -EINVAL; 475 478 476 479 /* Merge contiguous pages into the last SG */ 477 480 prv_len = sgt_append->prv->length; 478 - last_pg = sg_page(sgt_append->prv); 479 - while (n_pages && pages_are_mergeable(pages[0], last_pg)) { 480 - if (sgt_append->prv->length + PAGE_SIZE > max_segment) 481 - break; 482 - sgt_append->prv->length += PAGE_SIZE; 483 - last_pg = pages[0]; 484 - pages++; 485 - n_pages--; 481 + if (page_to_pfn(pages[0]) == next_pfn) { 482 + last_pg = pfn_to_page(next_pfn - 1); 483 + while (n_pages && pages_are_mergeable(pages[0], last_pg)) { 484 + if (sgt_append->prv->length + PAGE_SIZE > max_segment) 485 + break; 486 + sgt_append->prv->length += PAGE_SIZE; 487 + last_pg = pages[0]; 488 + pages++; 489 + n_pages--; 490 + } 491 + if (!n_pages) 492 + goto out; 486 493 } 487 - if (!n_pages) 488 - goto out; 489 494 } 490 495 491 496 /* compute number of contiguous chunks */
+2
mm/slab.c
··· 2211 2211 raw_spin_unlock_irq(&n->list_lock); 2212 2212 slab_destroy(cache, slab); 2213 2213 nr_freed++; 2214 + 2215 + cond_resched(); 2214 2216 } 2215 2217 out: 2216 2218 return nr_freed;
+14 -4
net/bluetooth/hci_conn.c
··· 821 821 static int hci_le_terminate_big(struct hci_dev *hdev, u8 big, u8 bis) 822 822 { 823 823 struct iso_list_data *d; 824 + int ret; 824 825 825 826 bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", big, bis); 826 827 ··· 832 831 d->big = big; 833 832 d->bis = bis; 834 833 835 - return hci_cmd_sync_queue(hdev, terminate_big_sync, d, 836 - terminate_big_destroy); 834 + ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d, 835 + terminate_big_destroy); 836 + if (ret) 837 + kfree(d); 838 + 839 + return ret; 837 840 } 838 841 839 842 static int big_terminate_sync(struct hci_dev *hdev, void *data) ··· 862 857 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle) 863 858 { 864 859 struct iso_list_data *d; 860 + int ret; 865 861 866 862 bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, sync_handle); 867 863 ··· 873 867 d->big = big; 874 868 d->sync_handle = sync_handle; 875 869 876 - return hci_cmd_sync_queue(hdev, big_terminate_sync, d, 877 - terminate_big_destroy); 870 + ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d, 871 + terminate_big_destroy); 872 + if (ret) 873 + kfree(d); 874 + 875 + return ret; 878 876 } 879 877 880 878 /* Cleanup BIS connection
+4 -1
net/bluetooth/hci_event.c
··· 3848 3848 conn->handle, conn->link); 3849 3849 3850 3850 /* Create CIS if LE is already connected */ 3851 - if (conn->link && conn->link->state == BT_CONNECTED) 3851 + if (conn->link && conn->link->state == BT_CONNECTED) { 3852 + rcu_read_unlock(); 3852 3853 hci_le_create_cis(conn->link); 3854 + rcu_read_lock(); 3855 + } 3853 3856 3854 3857 if (i == rp->num_handles) 3855 3858 break;
+6 -13
net/bluetooth/hci_sync.c
··· 3572 3572 static int hci_le_read_buffer_size_sync(struct hci_dev *hdev) 3573 3573 { 3574 3574 /* Use Read LE Buffer Size V2 if supported */ 3575 - if (hdev->commands[41] & 0x20) 3575 + if (iso_capable(hdev) && hdev->commands[41] & 0x20) 3576 3576 return __hci_cmd_sync_status(hdev, 3577 3577 HCI_OP_LE_READ_BUFFER_SIZE_V2, 3578 3578 0, NULL, HCI_CMD_TIMEOUT); ··· 3597 3597 3598 3598 /* LE Controller init stage 2 command sequence */ 3599 3599 static const struct hci_init_stage le_init2[] = { 3600 - /* HCI_OP_LE_READ_BUFFER_SIZE */ 3601 - HCI_INIT(hci_le_read_buffer_size_sync), 3602 3600 /* HCI_OP_LE_READ_LOCAL_FEATURES */ 3603 3601 HCI_INIT(hci_le_read_local_features_sync), 3602 + /* HCI_OP_LE_READ_BUFFER_SIZE */ 3603 + HCI_INIT(hci_le_read_buffer_size_sync), 3604 3604 /* HCI_OP_LE_READ_SUPPORTED_STATES */ 3605 3605 HCI_INIT(hci_le_read_supported_states_sync), 3606 3606 {} ··· 6187 6187 6188 6188 static int _update_adv_data_sync(struct hci_dev *hdev, void *data) 6189 6189 { 6190 - u8 instance = *(u8 *)data; 6191 - 6192 - kfree(data); 6190 + u8 instance = PTR_ERR(data); 6193 6191 6194 6192 return hci_update_adv_data_sync(hdev, instance); 6195 6193 } 6196 6194 6197 6195 int hci_update_adv_data(struct hci_dev *hdev, u8 instance) 6198 6196 { 6199 - u8 *inst_ptr = kmalloc(1, GFP_KERNEL); 6200 - 6201 - if (!inst_ptr) 6202 - return -ENOMEM; 6203 - 6204 - *inst_ptr = instance; 6205 - return hci_cmd_sync_queue(hdev, _update_adv_data_sync, inst_ptr, NULL); 6197 + return hci_cmd_sync_queue(hdev, _update_adv_data_sync, 6198 + ERR_PTR(instance), NULL); 6206 6199 }
+26 -38
net/bluetooth/iso.c
··· 289 289 hci_dev_unlock(hdev); 290 290 hci_dev_put(hdev); 291 291 292 + err = iso_chan_add(conn, sk, NULL); 293 + if (err) 294 + return err; 295 + 292 296 lock_sock(sk); 293 297 294 298 /* Update source addr of the socket */ 295 299 bacpy(&iso_pi(sk)->src, &hcon->src); 296 - 297 - err = iso_chan_add(conn, sk, NULL); 298 - if (err) 299 - goto release; 300 300 301 301 if (hcon->state == BT_CONNECTED) { 302 302 iso_sock_clear_timer(sk); ··· 306 306 iso_sock_set_timer(sk, sk->sk_sndtimeo); 307 307 } 308 308 309 - release: 310 309 release_sock(sk); 311 310 return err; 312 311 ··· 371 372 hci_dev_unlock(hdev); 372 373 hci_dev_put(hdev); 373 374 375 + err = iso_chan_add(conn, sk, NULL); 376 + if (err) 377 + return err; 378 + 374 379 lock_sock(sk); 375 380 376 381 /* Update source addr of the socket */ 377 382 bacpy(&iso_pi(sk)->src, &hcon->src); 378 - 379 - err = iso_chan_add(conn, sk, NULL); 380 - if (err) 381 - goto release; 382 383 383 384 if (hcon->state == BT_CONNECTED) { 384 385 iso_sock_clear_timer(sk); ··· 391 392 iso_sock_set_timer(sk, sk->sk_sndtimeo); 392 393 } 393 394 394 - release: 395 395 release_sock(sk); 396 396 return err; 397 397 ··· 893 895 if (!hdev) 894 896 return -EHOSTUNREACH; 895 897 896 - hci_dev_lock(hdev); 897 - 898 898 err = hci_pa_create_sync(hdev, &iso_pi(sk)->dst, 899 899 le_addr_type(iso_pi(sk)->dst_type), 900 900 iso_pi(sk)->bc_sid); 901 901 902 - hci_dev_unlock(hdev); 903 902 hci_dev_put(hdev); 904 903 905 904 return err; ··· 1427 1432 struct sock *parent; 1428 1433 struct sock *sk = conn->sk; 1429 1434 struct hci_ev_le_big_sync_estabilished *ev; 1435 + struct hci_conn *hcon; 1430 1436 1431 1437 BT_DBG("conn %p", conn); 1432 1438 1433 1439 if (sk) { 1434 1440 iso_sock_ready(conn->sk); 1435 1441 } else { 1436 - iso_conn_lock(conn); 1437 - 1438 - if (!conn->hcon) { 1439 - iso_conn_unlock(conn); 1442 + hcon = conn->hcon; 1443 + if (!hcon) 1440 1444 return; 1441 - } 1442 1445 1443 - ev = hci_recv_event_data(conn->hcon->hdev, 1446 + ev = hci_recv_event_data(hcon->hdev, 1444 1447 HCI_EVT_LE_BIG_SYNC_ESTABILISHED); 1445 1448 if (ev) 1446 - parent = iso_get_sock_listen(&conn->hcon->src, 1447 - &conn->hcon->dst, 1449 + parent = iso_get_sock_listen(&hcon->src, 1450 + &hcon->dst, 1448 1451 iso_match_big, ev); 1449 1452 else 1450 - parent = iso_get_sock_listen(&conn->hcon->src, 1453 + parent = iso_get_sock_listen(&hcon->src, 1451 1454 BDADDR_ANY, NULL, NULL); 1452 1455 1453 - if (!parent) { 1454 - iso_conn_unlock(conn); 1456 + if (!parent) 1455 1457 return; 1456 - } 1457 1458 1458 1459 lock_sock(parent); 1459 1460 ··· 1457 1466 BTPROTO_ISO, GFP_ATOMIC, 0); 1458 1467 if (!sk) { 1459 1468 release_sock(parent); 1460 - iso_conn_unlock(conn); 1461 1469 return; 1462 1470 } 1463 1471 1464 1472 iso_sock_init(sk, parent); 1465 1473 1466 - bacpy(&iso_pi(sk)->src, &conn->hcon->src); 1467 - iso_pi(sk)->src_type = conn->hcon->src_type; 1474 + bacpy(&iso_pi(sk)->src, &hcon->src); 1475 + iso_pi(sk)->src_type = hcon->src_type; 1468 1476 1469 1477 /* If hcon has no destination address (BDADDR_ANY) it means it 1470 1478 * was created by HCI_EV_LE_BIG_SYNC_ESTABILISHED so we need to 1471 1479 * initialize using the parent socket destination address. 1472 1480 */ 1473 - if (!bacmp(&conn->hcon->dst, BDADDR_ANY)) { 1474 - bacpy(&conn->hcon->dst, &iso_pi(parent)->dst); 1475 - conn->hcon->dst_type = iso_pi(parent)->dst_type; 1476 - conn->hcon->sync_handle = iso_pi(parent)->sync_handle; 1481 + if (!bacmp(&hcon->dst, BDADDR_ANY)) { 1482 + bacpy(&hcon->dst, &iso_pi(parent)->dst); 1483 + hcon->dst_type = iso_pi(parent)->dst_type; 1484 + hcon->sync_handle = iso_pi(parent)->sync_handle; 1477 1485 } 1478 1486 1479 - bacpy(&iso_pi(sk)->dst, &conn->hcon->dst); 1480 - iso_pi(sk)->dst_type = conn->hcon->dst_type; 1487 + bacpy(&iso_pi(sk)->dst, &hcon->dst); 1488 + iso_pi(sk)->dst_type = hcon->dst_type; 1481 1489 1482 - hci_conn_hold(conn->hcon); 1483 - __iso_chan_add(conn, sk, parent); 1490 + hci_conn_hold(hcon); 1491 + iso_chan_add(conn, sk, parent); 1484 1492 1485 1493 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) 1486 1494 sk->sk_state = BT_CONNECT2; ··· 1490 1500 parent->sk_data_ready(parent); 1491 1501 1492 1502 release_sock(parent); 1493 - 1494 - iso_conn_unlock(conn); 1495 1503 } 1496 1504 } 1497 1505
+1 -1
net/bluetooth/mgmt_util.h
··· 27 27 struct sock *sk; 28 28 u8 handle; 29 29 u8 instance; 30 - u8 param[sizeof(struct mgmt_cp_mesh_send) + 29]; 30 + u8 param[sizeof(struct mgmt_cp_mesh_send) + 31]; 31 31 }; 32 32 33 33 struct mgmt_pending_cmd {
+6 -1
net/bluetooth/rfcomm/sock.c
··· 391 391 addr->sa_family != AF_BLUETOOTH) 392 392 return -EINVAL; 393 393 394 + sock_hold(sk); 394 395 lock_sock(sk); 395 396 396 397 if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) { ··· 411 410 d->sec_level = rfcomm_pi(sk)->sec_level; 412 411 d->role_switch = rfcomm_pi(sk)->role_switch; 413 412 413 + /* Drop sock lock to avoid potential deadlock with the RFCOMM lock */ 414 + release_sock(sk); 414 415 err = rfcomm_dlc_open(d, &rfcomm_pi(sk)->src, &sa->rc_bdaddr, 415 416 sa->rc_channel); 416 - if (!err) 417 + lock_sock(sk); 418 + if (!err && !sock_flag(sk, SOCK_ZAPPED)) 417 419 err = bt_sock_wait_state(sk, BT_CONNECTED, 418 420 sock_sndtimeo(sk, flags & O_NONBLOCK)); 419 421 420 422 done: 421 423 release_sock(sk); 424 + sock_put(sk); 422 425 return err; 423 426 } 424 427
+7 -4
net/ethtool/rss.c
··· 122 122 { 123 123 const struct rss_reply_data *data = RSS_REPDATA(reply_base); 124 124 125 - if (nla_put_u32(skb, ETHTOOL_A_RSS_HFUNC, data->hfunc) || 126 - nla_put(skb, ETHTOOL_A_RSS_INDIR, 127 - sizeof(u32) * data->indir_size, data->indir_table) || 128 - nla_put(skb, ETHTOOL_A_RSS_HKEY, data->hkey_size, data->hkey)) 125 + if ((data->hfunc && 126 + nla_put_u32(skb, ETHTOOL_A_RSS_HFUNC, data->hfunc)) || 127 + (data->indir_size && 128 + nla_put(skb, ETHTOOL_A_RSS_INDIR, 129 + sizeof(u32) * data->indir_size, data->indir_table)) || 130 + (data->hkey_size && 131 + nla_put(skb, ETHTOOL_A_RSS_HKEY, data->hkey_size, data->hkey))) 129 132 return -EMSGSIZE; 130 133 131 134 return 0;
+15 -2
net/ipv4/inet_hashtables.c
··· 650 650 spin_lock(lock); 651 651 if (osk) { 652 652 WARN_ON_ONCE(sk->sk_hash != osk->sk_hash); 653 - ret = sk_nulls_del_node_init_rcu(osk); 654 - } else if (found_dup_sk) { 653 + ret = sk_hashed(osk); 654 + if (ret) { 655 + /* Before deleting the node, we insert a new one to make 656 + * sure that the look-up-sk process would not miss either 657 + * of them and that at least one node would exist in ehash 658 + * table all the time. Otherwise there's a tiny chance 659 + * that lookup process could find nothing in ehash table. 660 + */ 661 + __sk_nulls_add_node_tail_rcu(sk, list); 662 + sk_nulls_del_node_init_rcu(osk); 663 + } 664 + goto unlock; 665 + } 666 + if (found_dup_sk) { 655 667 *found_dup_sk = inet_ehash_lookup_by_sk(sk, list); 656 668 if (*found_dup_sk) 657 669 ret = false; ··· 672 660 if (ret) 673 661 __sk_nulls_add_node_rcu(sk, list); 674 662 663 + unlock: 675 664 spin_unlock(lock); 676 665 677 666 return ret;
+4 -4
net/ipv4/inet_timewait_sock.c
··· 91 91 } 92 92 EXPORT_SYMBOL_GPL(inet_twsk_put); 93 93 94 - static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw, 95 - struct hlist_nulls_head *list) 94 + static void inet_twsk_add_node_tail_rcu(struct inet_timewait_sock *tw, 95 + struct hlist_nulls_head *list) 96 96 { 97 - hlist_nulls_add_head_rcu(&tw->tw_node, list); 97 + hlist_nulls_add_tail_rcu(&tw->tw_node, list); 98 98 } 99 99 100 100 static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw, ··· 147 147 148 148 spin_lock(lock); 149 149 150 - inet_twsk_add_node_rcu(tw, &ehead->chain); 150 + inet_twsk_add_node_tail_rcu(tw, &ehead->chain); 151 151 152 152 /* Step 3: Remove SK from hash chain */ 153 153 if (__sk_nulls_del_node_init_rcu(sk))
+2
net/ipv4/tcp.c
··· 435 435 436 436 /* There's a bubble in the pipe until at least the first ACK. */ 437 437 tp->app_limited = ~0U; 438 + tp->rate_app_limited = 1; 438 439 439 440 /* See draft-stevens-tcpca-spec-01 for discussion of the 440 441 * initialization of these values. ··· 3179 3178 tp->plb_rehash = 0; 3180 3179 /* There's a bubble in the pipe until at least the first ACK. */ 3181 3180 tp->app_limited = ~0U; 3181 + tp->rate_app_limited = 1; 3182 3182 tp->rack.mstamp = 0; 3183 3183 tp->rack.advanced = 0; 3184 3184 tp->rack.reo_wnd_steps = 1;
+1 -1
net/ipv4/tcp_ulp.c
··· 139 139 if (sk->sk_socket) 140 140 clear_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); 141 141 142 - err = -EINVAL; 142 + err = -ENOTCONN; 143 143 if (!ulp_ops->clone && sk->sk_state == TCP_LISTEN) 144 144 goto out_err; 145 145
+50 -52
net/l2tp/l2tp_core.c
··· 104 104 /* per-net private data for this module */ 105 105 static unsigned int l2tp_net_id; 106 106 struct l2tp_net { 107 - struct list_head l2tp_tunnel_list; 108 - /* Lock for write access to l2tp_tunnel_list */ 109 - spinlock_t l2tp_tunnel_list_lock; 107 + /* Lock for write access to l2tp_tunnel_idr */ 108 + spinlock_t l2tp_tunnel_idr_lock; 109 + struct idr l2tp_tunnel_idr; 110 110 struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2]; 111 111 /* Lock for write access to l2tp_session_hlist */ 112 112 spinlock_t l2tp_session_hlist_lock; ··· 208 208 struct l2tp_tunnel *tunnel; 209 209 210 210 rcu_read_lock_bh(); 211 - list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { 212 - if (tunnel->tunnel_id == tunnel_id && 213 - refcount_inc_not_zero(&tunnel->ref_count)) { 214 - rcu_read_unlock_bh(); 215 - 216 - return tunnel; 217 - } 211 + tunnel = idr_find(&pn->l2tp_tunnel_idr, tunnel_id); 212 + if (tunnel && refcount_inc_not_zero(&tunnel->ref_count)) { 213 + rcu_read_unlock_bh(); 214 + return tunnel; 218 215 } 219 216 rcu_read_unlock_bh(); 220 217 ··· 221 224 222 225 struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth) 223 226 { 224 - const struct l2tp_net *pn = l2tp_pernet(net); 227 + struct l2tp_net *pn = l2tp_pernet(net); 228 + unsigned long tunnel_id, tmp; 225 229 struct l2tp_tunnel *tunnel; 226 230 int count = 0; 227 231 228 232 rcu_read_lock_bh(); 229 - list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { 230 - if (++count > nth && 233 + idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) { 234 + if (tunnel && ++count > nth && 231 235 refcount_inc_not_zero(&tunnel->ref_count)) { 232 236 rcu_read_unlock_bh(); 233 237 return tunnel; ··· 1041 1043 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); 1042 1044 nf_reset_ct(skb); 1043 1045 1044 - bh_lock_sock(sk); 1046 + bh_lock_sock_nested(sk); 1045 1047 if (sock_owned_by_user(sk)) { 1046 1048 kfree_skb(skb); 1047 1049 ret = NET_XMIT_DROP; ··· 1225 1227 l2tp_tunnel_delete(tunnel); 1226 1228 } 1227 1229 1230 + static void l2tp_tunnel_remove(struct net *net, struct l2tp_tunnel *tunnel) 1231 + { 1232 + struct l2tp_net *pn = l2tp_pernet(net); 1233 + 1234 + spin_lock_bh(&pn->l2tp_tunnel_idr_lock); 1235 + idr_remove(&pn->l2tp_tunnel_idr, tunnel->tunnel_id); 1236 + spin_unlock_bh(&pn->l2tp_tunnel_idr_lock); 1237 + } 1238 + 1228 1239 /* Workqueue tunnel deletion function */ 1229 1240 static void l2tp_tunnel_del_work(struct work_struct *work) 1230 1241 { ··· 1241 1234 del_work); 1242 1235 struct sock *sk = tunnel->sock; 1243 1236 struct socket *sock = sk->sk_socket; 1244 - struct l2tp_net *pn; 1245 1237 1246 1238 l2tp_tunnel_closeall(tunnel); 1247 1239 ··· 1254 1248 } 1255 1249 } 1256 1250 1257 - /* Remove the tunnel struct from the tunnel list */ 1258 - pn = l2tp_pernet(tunnel->l2tp_net); 1259 - spin_lock_bh(&pn->l2tp_tunnel_list_lock); 1260 - list_del_rcu(&tunnel->list); 1261 - spin_unlock_bh(&pn->l2tp_tunnel_list_lock); 1262 - 1251 + l2tp_tunnel_remove(tunnel->l2tp_net, tunnel); 1263 1252 /* drop initial ref */ 1264 1253 l2tp_tunnel_dec_refcount(tunnel); 1265 1254 ··· 1385 1384 return err; 1386 1385 } 1387 1386 1388 - static struct lock_class_key l2tp_socket_class; 1389 - 1390 1387 int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, 1391 1388 struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp) 1392 1389 { ··· 1454 1455 int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net, 1455 1456 struct l2tp_tunnel_cfg *cfg) 1456 1457 { 1457 - struct l2tp_tunnel *tunnel_walk; 1458 - struct l2tp_net *pn; 1458 + struct l2tp_net *pn = l2tp_pernet(net); 1459 + u32 tunnel_id = tunnel->tunnel_id; 1459 1460 struct socket *sock; 1460 1461 struct sock *sk; 1461 1462 int ret; 1463 + 1464 + spin_lock_bh(&pn->l2tp_tunnel_idr_lock); 1465 + ret = idr_alloc_u32(&pn->l2tp_tunnel_idr, NULL, &tunnel_id, tunnel_id, 1466 + GFP_ATOMIC); 1467 + spin_unlock_bh(&pn->l2tp_tunnel_idr_lock); 1468 + if (ret) 1469 + return ret == -ENOSPC ? -EEXIST : ret; 1462 1470 1463 1471 if (tunnel->fd < 0) { 1464 1472 ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id, ··· 1480 1474 } 1481 1475 1482 1476 sk = sock->sk; 1477 + lock_sock(sk); 1483 1478 write_lock_bh(&sk->sk_callback_lock); 1484 1479 ret = l2tp_validate_socket(sk, net, tunnel->encap); 1485 1480 if (ret < 0) 1486 1481 goto err_inval_sock; 1487 1482 rcu_assign_sk_user_data(sk, tunnel); 1488 1483 write_unlock_bh(&sk->sk_callback_lock); 1489 - 1490 - tunnel->l2tp_net = net; 1491 - pn = l2tp_pernet(net); 1492 - 1493 - sock_hold(sk); 1494 - tunnel->sock = sk; 1495 - 1496 - spin_lock_bh(&pn->l2tp_tunnel_list_lock); 1497 - list_for_each_entry(tunnel_walk, &pn->l2tp_tunnel_list, list) { 1498 - if (tunnel_walk->tunnel_id == tunnel->tunnel_id) { 1499 - spin_unlock_bh(&pn->l2tp_tunnel_list_lock); 1500 - sock_put(sk); 1501 - ret = -EEXIST; 1502 - goto err_sock; 1503 - } 1504 - } 1505 - list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list); 1506 - spin_unlock_bh(&pn->l2tp_tunnel_list_lock); 1507 1484 1508 1485 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) { 1509 1486 struct udp_tunnel_sock_cfg udp_cfg = { ··· 1501 1512 1502 1513 tunnel->old_sk_destruct = sk->sk_destruct; 1503 1514 sk->sk_destruct = &l2tp_tunnel_destruct; 1504 - lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, 1505 - "l2tp_sock"); 1506 1515 sk->sk_allocation = GFP_ATOMIC; 1516 + release_sock(sk); 1517 + 1518 + sock_hold(sk); 1519 + tunnel->sock = sk; 1520 + tunnel->l2tp_net = net; 1521 + 1522 + spin_lock_bh(&pn->l2tp_tunnel_idr_lock); 1523 + idr_replace(&pn->l2tp_tunnel_idr, tunnel, tunnel->tunnel_id); 1524 + spin_unlock_bh(&pn->l2tp_tunnel_idr_lock); 1507 1525 1508 1526 trace_register_tunnel(tunnel); 1509 1527 ··· 1519 1523 1520 1524 return 0; 1521 1525 1522 - err_sock: 1523 - write_lock_bh(&sk->sk_callback_lock); 1524 - rcu_assign_sk_user_data(sk, NULL); 1525 1526 err_inval_sock: 1526 1527 write_unlock_bh(&sk->sk_callback_lock); 1528 + release_sock(sk); 1527 1529 1528 1530 if (tunnel->fd < 0) 1529 1531 sock_release(sock); 1530 1532 else 1531 1533 sockfd_put(sock); 1532 1534 err: 1535 + l2tp_tunnel_remove(net, tunnel); 1533 1536 return ret; 1534 1537 } 1535 1538 EXPORT_SYMBOL_GPL(l2tp_tunnel_register); ··· 1642 1647 struct l2tp_net *pn = net_generic(net, l2tp_net_id); 1643 1648 int hash; 1644 1649 1645 - INIT_LIST_HEAD(&pn->l2tp_tunnel_list); 1646 - spin_lock_init(&pn->l2tp_tunnel_list_lock); 1650 + idr_init(&pn->l2tp_tunnel_idr); 1651 + spin_lock_init(&pn->l2tp_tunnel_idr_lock); 1647 1652 1648 1653 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) 1649 1654 INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]); ··· 1657 1662 { 1658 1663 struct l2tp_net *pn = l2tp_pernet(net); 1659 1664 struct l2tp_tunnel *tunnel = NULL; 1665 + unsigned long tunnel_id, tmp; 1660 1666 int hash; 1661 1667 1662 1668 rcu_read_lock_bh(); 1663 - list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { 1664 - l2tp_tunnel_delete(tunnel); 1669 + idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) { 1670 + if (tunnel) 1671 + l2tp_tunnel_delete(tunnel); 1665 1672 } 1666 1673 rcu_read_unlock_bh(); 1667 1674 ··· 1673 1676 1674 1677 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) 1675 1678 WARN_ON_ONCE(!hlist_empty(&pn->l2tp_session_hlist[hash])); 1679 + idr_destroy(&pn->l2tp_tunnel_idr); 1676 1680 } 1677 1681 1678 1682 static struct pernet_operations l2tp_net_ops = {
+5 -3
net/mac80211/agg-tx.c
··· 491 491 { 492 492 struct tid_ampdu_tx *tid_tx; 493 493 struct ieee80211_local *local = sta->local; 494 - struct ieee80211_sub_if_data *sdata = sta->sdata; 494 + struct ieee80211_sub_if_data *sdata; 495 495 struct ieee80211_ampdu_params params = { 496 496 .sta = &sta->sta, 497 497 .action = IEEE80211_AMPDU_TX_START, ··· 511 511 */ 512 512 clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); 513 513 514 - ieee80211_agg_stop_txq(sta, tid); 515 - 516 514 /* 517 515 * Make sure no packets are being processed. This ensures that 518 516 * we have a valid starting sequence number and that in-flight ··· 519 521 */ 520 522 synchronize_net(); 521 523 524 + sdata = sta->sdata; 522 525 params.ssn = sta->tid_seq[tid] >> 4; 523 526 ret = drv_ampdu_action(local, sdata, &params); 524 527 tid_tx->ssn = params.ssn; ··· 533 534 */ 534 535 set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state); 535 536 } else if (ret) { 537 + if (!sdata) 538 + return; 539 + 536 540 ht_dbg(sdata, 537 541 "BA request denied - HW unavailable for %pM tid %d\n", 538 542 sta->sta.addr, tid);
+7
net/mac80211/cfg.c
··· 147 147 link_conf->bssid_index = 0; 148 148 link_conf->nontransmitted = false; 149 149 link_conf->ema_ap = false; 150 + link_conf->bssid_indicator = 0; 150 151 151 152 if (sdata->vif.type != NL80211_IFTYPE_AP || !params.tx_wdev) 152 153 return -EINVAL; ··· 1511 1510 1512 1511 kfree(link_conf->ftmr_params); 1513 1512 link_conf->ftmr_params = NULL; 1513 + 1514 + sdata->vif.mbssid_tx_vif = NULL; 1515 + link_conf->bssid_index = 0; 1516 + link_conf->nontransmitted = false; 1517 + link_conf->ema_ap = false; 1518 + link_conf->bssid_indicator = 0; 1514 1519 1515 1520 __sta_info_flush(sdata, true); 1516 1521 ieee80211_free_keys(sdata, true);
+3 -2
net/mac80211/debugfs_sta.c
··· 167 167 continue; 168 168 txqi = to_txq_info(sta->sta.txq[i]); 169 169 p += scnprintf(p, bufsz + buf - p, 170 - "%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s)\n", 170 + "%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s%s)\n", 171 171 txqi->txq.tid, 172 172 txqi->txq.ac, 173 173 txqi->tin.backlog_bytes, ··· 182 182 txqi->flags, 183 183 test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ? "STOP" : "RUN", 184 184 test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags) ? " AMPDU" : "", 185 - test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags) ? " NO-AMSDU" : ""); 185 + test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags) ? " NO-AMSDU" : "", 186 + test_bit(IEEE80211_TXQ_DIRTY, &txqi->flags) ? " DIRTY" : ""); 186 187 } 187 188 188 189 rcu_read_unlock();
+3
net/mac80211/driver-ops.c
··· 392 392 393 393 might_sleep(); 394 394 395 + if (!sdata) 396 + return -EIO; 397 + 395 398 sdata = get_bss_sdata(sdata); 396 399 if (!check_sdata_in_driver(sdata)) 397 400 return -EIO;
+1 -1
net/mac80211/driver-ops.h
··· 1199 1199 1200 1200 /* In reconfig don't transmit now, but mark for waking later */ 1201 1201 if (local->in_reconfig) { 1202 - set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txq->flags); 1202 + set_bit(IEEE80211_TXQ_DIRTY, &txq->flags); 1203 1203 return; 1204 1204 } 1205 1205
+31
net/mac80211/ht.c
··· 391 391 392 392 tid_tx = sta->ampdu_mlme.tid_start_tx[tid]; 393 393 if (!blocked && tid_tx) { 394 + struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]); 395 + struct ieee80211_sub_if_data *sdata = 396 + vif_to_sdata(txqi->txq.vif); 397 + struct fq *fq = &sdata->local->fq; 398 + 399 + spin_lock_bh(&fq->lock); 400 + 401 + /* Allow only frags to be dequeued */ 402 + set_bit(IEEE80211_TXQ_STOP, &txqi->flags); 403 + 404 + if (!skb_queue_empty(&txqi->frags)) { 405 + /* Fragmented Tx is ongoing, wait for it to 406 + * finish. Reschedule worker to retry later. 407 + */ 408 + 409 + spin_unlock_bh(&fq->lock); 410 + spin_unlock_bh(&sta->lock); 411 + 412 + /* Give the task working on the txq a chance 413 + * to send out the queued frags 414 + */ 415 + synchronize_net(); 416 + 417 + mutex_unlock(&sta->ampdu_mlme.mtx); 418 + 419 + ieee80211_queue_work(&sdata->local->hw, work); 420 + return; 421 + } 422 + 423 + spin_unlock_bh(&fq->lock); 424 + 394 425 /* 395 426 * Assign it over to the normal tid_tx array 396 427 * where it "goes live".
+1 -1
net/mac80211/ieee80211_i.h
··· 838 838 IEEE80211_TXQ_STOP, 839 839 IEEE80211_TXQ_AMPDU, 840 840 IEEE80211_TXQ_NO_AMSDU, 841 - IEEE80211_TXQ_STOP_NETIF_TX, 841 + IEEE80211_TXQ_DIRTY, 842 842 }; 843 843 844 844 /**
+3 -2
net/mac80211/iface.c
··· 364 364 365 365 /* No support for VLAN with MLO yet */ 366 366 if (iftype == NL80211_IFTYPE_AP_VLAN && 367 - nsdata->wdev.use_4addr) 367 + sdata->wdev.use_4addr && 368 + nsdata->vif.type == NL80211_IFTYPE_AP && 369 + nsdata->vif.valid_links) 368 370 return -EOPNOTSUPP; 369 371 370 372 /* ··· 2197 2195 2198 2196 ret = cfg80211_register_netdevice(ndev); 2199 2197 if (ret) { 2200 - ieee80211_if_free(ndev); 2201 2198 free_netdev(ndev); 2202 2199 return ret; 2203 2200 }
+102 -123
net/mac80211/rx.c
··· 4049 4049 #undef CALL_RXH 4050 4050 } 4051 4051 4052 + static bool 4053 + ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id) 4054 + { 4055 + if (!sta->mlo) 4056 + return false; 4057 + 4058 + return !!(sta->valid_links & BIT(link_id)); 4059 + } 4060 + 4061 + static bool ieee80211_rx_data_set_link(struct ieee80211_rx_data *rx, 4062 + u8 link_id) 4063 + { 4064 + rx->link_id = link_id; 4065 + rx->link = rcu_dereference(rx->sdata->link[link_id]); 4066 + 4067 + if (!rx->sta) 4068 + return rx->link; 4069 + 4070 + if (!ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta, link_id)) 4071 + return false; 4072 + 4073 + rx->link_sta = rcu_dereference(rx->sta->link[link_id]); 4074 + 4075 + return rx->link && rx->link_sta; 4076 + } 4077 + 4078 + static bool ieee80211_rx_data_set_sta(struct ieee80211_rx_data *rx, 4079 + struct ieee80211_sta *pubsta, 4080 + int link_id) 4081 + { 4082 + struct sta_info *sta; 4083 + 4084 + sta = container_of(pubsta, struct sta_info, sta); 4085 + 4086 + rx->link_id = link_id; 4087 + rx->sta = sta; 4088 + 4089 + if (sta) { 4090 + rx->local = sta->sdata->local; 4091 + if (!rx->sdata) 4092 + rx->sdata = sta->sdata; 4093 + rx->link_sta = &sta->deflink; 4094 + } 4095 + 4096 + if (link_id < 0) 4097 + rx->link = &rx->sdata->deflink; 4098 + else if (!ieee80211_rx_data_set_link(rx, link_id)) 4099 + return false; 4100 + 4101 + return true; 4102 + } 4103 + 4052 4104 /* 4053 4105 * This function makes calls into the RX path, therefore 4054 4106 * it has to be invoked under RCU read lock. ··· 4109 4057 { 4110 4058 struct sk_buff_head frames; 4111 4059 struct ieee80211_rx_data rx = { 4112 - .sta = sta, 4113 - .sdata = sta->sdata, 4114 - .local = sta->local, 4115 4060 /* This is OK -- must be QoS data frame */ 4116 4061 .security_idx = tid, 4117 4062 .seqno_idx = tid, 4118 - .link_id = -1, 4119 4063 }; 4120 4064 struct tid_ampdu_rx *tid_agg_rx; 4121 - u8 link_id; 4065 + int link_id = -1; 4066 + 4067 + /* FIXME: statistics won't be right with this */ 4068 + if (sta->sta.valid_links) 4069 + link_id = ffs(sta->sta.valid_links) - 1; 4070 + 4071 + if (!ieee80211_rx_data_set_sta(&rx, &sta->sta, link_id)) 4072 + return; 4122 4073 4123 4074 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 4124 4075 if (!tid_agg_rx) ··· 4141 4086 }; 4142 4087 drv_event_callback(rx.local, rx.sdata, &event); 4143 4088 } 4144 - /* FIXME: statistics won't be right with this */ 4145 - link_id = sta->sta.valid_links ? ffs(sta->sta.valid_links) - 1 : 0; 4146 - rx.link = rcu_dereference(sta->sdata->link[link_id]); 4147 - rx.link_sta = rcu_dereference(sta->link[link_id]); 4148 4089 4149 4090 ieee80211_rx_handlers(&rx, &frames); 4150 4091 } ··· 4156 4105 /* This is OK -- must be QoS data frame */ 4157 4106 .security_idx = tid, 4158 4107 .seqno_idx = tid, 4159 - .link_id = -1, 4160 4108 }; 4161 4109 int i, diff; 4162 4110 ··· 4166 4116 4167 4117 sta = container_of(pubsta, struct sta_info, sta); 4168 4118 4169 - rx.sta = sta; 4170 - rx.sdata = sta->sdata; 4171 - rx.link = &rx.sdata->deflink; 4172 - rx.local = sta->local; 4119 + if (!ieee80211_rx_data_set_sta(&rx, pubsta, -1)) 4120 + return; 4173 4121 4174 4122 rcu_read_lock(); 4175 4123 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); ··· 4554 4506 mutex_unlock(&local->sta_mtx); 4555 4507 } 4556 4508 4557 - static bool 4558 - ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id) 4559 - { 4560 - if (!sta->mlo) 4561 - return false; 4562 - 4563 - return !!(sta->valid_links & BIT(link_id)); 4564 - } 4565 - 4566 4509 static void ieee80211_rx_8023(struct ieee80211_rx_data *rx, 4567 4510 struct ieee80211_fast_rx *fast_rx, 4568 4511 int orig_len) ··· 4664 4625 struct sk_buff *skb = rx->skb; 4665 4626 struct ieee80211_hdr *hdr = (void *)skb->data; 4666 4627 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4667 - struct sta_info *sta = rx->sta; 4668 4628 int orig_len = skb->len; 4669 4629 int hdrlen = ieee80211_hdrlen(hdr->frame_control); 4670 4630 int snap_offs = hdrlen; ··· 4675 4637 u8 da[ETH_ALEN]; 4676 4638 u8 sa[ETH_ALEN]; 4677 4639 } addrs __aligned(2); 4678 - struct link_sta_info *link_sta; 4679 4640 struct ieee80211_sta_rx_stats *stats; 4680 4641 4681 4642 /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write ··· 4777 4740 drop: 4778 4741 dev_kfree_skb(skb); 4779 4742 4780 - if (rx->link_id >= 0) { 4781 - link_sta = rcu_dereference(sta->link[rx->link_id]); 4782 - if (!link_sta) 4783 - return true; 4784 - } else { 4785 - link_sta = &sta->deflink; 4786 - } 4787 - 4788 4743 if (fast_rx->uses_rss) 4789 - stats = this_cpu_ptr(link_sta->pcpu_rx_stats); 4744 + stats = this_cpu_ptr(rx->link_sta->pcpu_rx_stats); 4790 4745 else 4791 - stats = &link_sta->rx_stats; 4746 + stats = &rx->link_sta->rx_stats; 4792 4747 4793 4748 stats->dropped++; 4794 4749 return true; ··· 4798 4769 struct ieee80211_local *local = rx->local; 4799 4770 struct ieee80211_sub_if_data *sdata = rx->sdata; 4800 4771 struct ieee80211_hdr *hdr = (void *)skb->data; 4801 - struct link_sta_info *link_sta = NULL; 4802 - struct ieee80211_link_data *link; 4772 + struct link_sta_info *link_sta = rx->link_sta; 4773 + struct ieee80211_link_data *link = rx->link; 4803 4774 4804 4775 rx->skb = skb; 4805 4776 ··· 4821 4792 if (!ieee80211_accept_frame(rx)) 4822 4793 return false; 4823 4794 4824 - if (rx->link_id >= 0) { 4825 - link = rcu_dereference(rx->sdata->link[rx->link_id]); 4826 - 4827 - /* we might race link removal */ 4828 - if (!link) 4829 - return true; 4830 - rx->link = link; 4831 - 4832 - if (rx->sta) { 4833 - rx->link_sta = 4834 - rcu_dereference(rx->sta->link[rx->link_id]); 4835 - if (!rx->link_sta) 4836 - return true; 4837 - } 4838 - } else { 4839 - if (rx->sta) 4840 - rx->link_sta = &rx->sta->deflink; 4841 - 4842 - rx->link = &sdata->deflink; 4843 - } 4844 - 4845 - if (unlikely(!is_multicast_ether_addr(hdr->addr1) && 4846 - rx->link_id >= 0 && rx->sta && rx->sta->sta.mlo)) { 4847 - link_sta = rcu_dereference(rx->sta->link[rx->link_id]); 4848 - 4849 - if (WARN_ON_ONCE(!link_sta)) 4850 - return true; 4851 - } 4852 - 4853 4795 if (!consume) { 4854 4796 struct skb_shared_hwtstamps *shwt; 4855 4797 ··· 4838 4838 */ 4839 4839 shwt = skb_hwtstamps(rx->skb); 4840 4840 shwt->hwtstamp = skb_hwtstamps(skb)->hwtstamp; 4841 + 4842 + /* Update the hdr pointer to the new skb for translation below */ 4843 + hdr = (struct ieee80211_hdr *)rx->skb->data; 4841 4844 } 4842 4845 4843 - if (unlikely(link_sta)) { 4846 + if (unlikely(rx->sta && rx->sta->sta.mlo)) { 4844 4847 /* translate to MLD addresses */ 4845 4848 if (ether_addr_equal(link->conf->addr, hdr->addr1)) 4846 4849 ether_addr_copy(hdr->addr1, rx->sdata->vif.addr); ··· 4873 4870 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4874 4871 struct ieee80211_fast_rx *fast_rx; 4875 4872 struct ieee80211_rx_data rx; 4873 + int link_id = -1; 4876 4874 4877 4875 memset(&rx, 0, sizeof(rx)); 4878 4876 rx.skb = skb; ··· 4890 4886 if (!pubsta) 4891 4887 goto drop; 4892 4888 4893 - rx.sta = container_of(pubsta, struct sta_info, sta); 4894 - rx.sdata = rx.sta->sdata; 4895 - 4896 - if (status->link_valid && 4897 - !ieee80211_rx_is_valid_sta_link_id(pubsta, status->link_id)) 4898 - goto drop; 4889 + if (status->link_valid) 4890 + link_id = status->link_id; 4899 4891 4900 4892 /* 4901 4893 * TODO: Should the frame be dropped if the right link_id is not ··· 4900 4900 * link_id is used only for stats purpose and updating the stats on 4901 4901 * the deflink is fine? 4902 4902 */ 4903 - if (status->link_valid) 4904 - rx.link_id = status->link_id; 4905 - 4906 - if (rx.link_id >= 0) { 4907 - struct ieee80211_link_data *link; 4908 - 4909 - link = rcu_dereference(rx.sdata->link[rx.link_id]); 4910 - if (!link) 4911 - goto drop; 4912 - rx.link = link; 4913 - } else { 4914 - rx.link = &rx.sdata->deflink; 4915 - } 4903 + if (!ieee80211_rx_data_set_sta(&rx, pubsta, link_id)) 4904 + goto drop; 4916 4905 4917 4906 fast_rx = rcu_dereference(rx.sta->fast_rx); 4918 4907 if (!fast_rx) ··· 4919 4930 { 4920 4931 struct link_sta_info *link_sta; 4921 4932 struct ieee80211_hdr *hdr = (void *)skb->data; 4933 + struct sta_info *sta; 4934 + int link_id = -1; 4922 4935 4923 4936 /* 4924 4937 * Look up link station first, in case there's a ··· 4930 4939 */ 4931 4940 link_sta = link_sta_info_get_bss(rx->sdata, hdr->addr2); 4932 4941 if (link_sta) { 4933 - rx->sta = link_sta->sta; 4934 - rx->link_id = link_sta->link_id; 4942 + sta = link_sta->sta; 4943 + link_id = link_sta->link_id; 4935 4944 } else { 4936 4945 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4937 4946 4938 - rx->sta = sta_info_get_bss(rx->sdata, hdr->addr2); 4939 - if (rx->sta) { 4940 - if (status->link_valid && 4941 - !ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta, 4942 - status->link_id)) 4943 - return false; 4944 - 4945 - rx->link_id = status->link_valid ? status->link_id : -1; 4946 - } else { 4947 - rx->link_id = -1; 4948 - } 4947 + sta = sta_info_get_bss(rx->sdata, hdr->addr2); 4948 + if (status->link_valid) 4949 + link_id = status->link_id; 4949 4950 } 4951 + 4952 + if (!ieee80211_rx_data_set_sta(rx, &sta->sta, link_id)) 4953 + return false; 4950 4954 4951 4955 return ieee80211_prepare_and_rx_handle(rx, skb, consume); 4952 4956 } ··· 5001 5015 5002 5016 if (ieee80211_is_data(fc)) { 5003 5017 struct sta_info *sta, *prev_sta; 5004 - u8 link_id = status->link_id; 5018 + int link_id = -1; 5019 + 5020 + if (status->link_valid) 5021 + link_id = status->link_id; 5005 5022 5006 5023 if (pubsta) { 5007 - rx.sta = container_of(pubsta, struct sta_info, sta); 5008 - rx.sdata = rx.sta->sdata; 5009 - 5010 - if (status->link_valid && 5011 - !ieee80211_rx_is_valid_sta_link_id(pubsta, link_id)) 5024 + if (!ieee80211_rx_data_set_sta(&rx, pubsta, link_id)) 5012 5025 goto out; 5013 - 5014 - if (status->link_valid) 5015 - rx.link_id = status->link_id; 5016 5026 5017 5027 /* 5018 5028 * In MLO connection, fetch the link_id using addr2 ··· 5027 5045 if (!link_sta) 5028 5046 goto out; 5029 5047 5030 - rx.link_id = link_sta->link_id; 5048 + ieee80211_rx_data_set_link(&rx, link_sta->link_id); 5031 5049 } 5032 5050 5033 5051 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) ··· 5043 5061 continue; 5044 5062 } 5045 5063 5046 - if ((status->link_valid && 5047 - !ieee80211_rx_is_valid_sta_link_id(&prev_sta->sta, 5048 - link_id)) || 5049 - (!status->link_valid && prev_sta->sta.mlo)) 5064 + rx.sdata = prev_sta->sdata; 5065 + if (!ieee80211_rx_data_set_sta(&rx, &prev_sta->sta, 5066 + link_id)) 5067 + goto out; 5068 + 5069 + if (!status->link_valid && prev_sta->sta.mlo) 5050 5070 continue; 5051 5071 5052 - rx.link_id = status->link_valid ? link_id : -1; 5053 - rx.sta = prev_sta; 5054 - rx.sdata = prev_sta->sdata; 5055 5072 ieee80211_prepare_and_rx_handle(&rx, skb, false); 5056 5073 5057 5074 prev_sta = sta; 5058 5075 } 5059 5076 5060 5077 if (prev_sta) { 5061 - if ((status->link_valid && 5062 - !ieee80211_rx_is_valid_sta_link_id(&prev_sta->sta, 5063 - link_id)) || 5064 - (!status->link_valid && prev_sta->sta.mlo)) 5078 + rx.sdata = prev_sta->sdata; 5079 + if (!ieee80211_rx_data_set_sta(&rx, &prev_sta->sta, 5080 + link_id)) 5065 5081 goto out; 5066 5082 5067 - rx.link_id = status->link_valid ? link_id : -1; 5068 - rx.sta = prev_sta; 5069 - rx.sdata = prev_sta->sdata; 5083 + if (!status->link_valid && prev_sta->sta.mlo) 5084 + goto out; 5070 5085 5071 5086 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 5072 5087 return;
+17 -17
net/mac80211/tx.c
··· 1129 1129 struct sk_buff *purge_skb = NULL; 1130 1130 1131 1131 if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { 1132 - info->flags |= IEEE80211_TX_CTL_AMPDU; 1133 1132 reset_agg_timer = true; 1134 1133 } else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { 1135 1134 /* ··· 1160 1161 if (!tid_tx) { 1161 1162 /* do nothing, let packet pass through */ 1162 1163 } else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { 1163 - info->flags |= IEEE80211_TX_CTL_AMPDU; 1164 1164 reset_agg_timer = true; 1165 1165 } else { 1166 1166 queued = true; ··· 3675 3677 info->band = fast_tx->band; 3676 3678 info->control.vif = &sdata->vif; 3677 3679 info->flags = IEEE80211_TX_CTL_FIRST_FRAGMENT | 3678 - IEEE80211_TX_CTL_DONTFRAG | 3679 - (ampdu ? IEEE80211_TX_CTL_AMPDU : 0); 3680 + IEEE80211_TX_CTL_DONTFRAG; 3680 3681 info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT | 3681 3682 u32_encode_bits(IEEE80211_LINK_UNSPECIFIED, 3682 3683 IEEE80211_TX_CTRL_MLO_LINK); ··· 3780 3783 struct ieee80211_tx_data tx; 3781 3784 ieee80211_tx_result r; 3782 3785 struct ieee80211_vif *vif = txq->vif; 3786 + int q = vif->hw_queue[txq->ac]; 3787 + bool q_stopped; 3783 3788 3784 3789 WARN_ON_ONCE(softirq_count() == 0); 3785 3790 ··· 3789 3790 return NULL; 3790 3791 3791 3792 begin: 3792 - spin_lock_bh(&fq->lock); 3793 + spin_lock(&local->queue_stop_reason_lock); 3794 + q_stopped = local->queue_stop_reasons[q]; 3795 + spin_unlock(&local->queue_stop_reason_lock); 3793 3796 3794 - if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags) || 3795 - test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags)) 3796 - goto out; 3797 - 3798 - if (vif->txqs_stopped[txq->ac]) { 3799 - set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags); 3800 - goto out; 3797 + if (unlikely(q_stopped)) { 3798 + /* mark for waking later */ 3799 + set_bit(IEEE80211_TXQ_DIRTY, &txqi->flags); 3800 + return NULL; 3801 3801 } 3802 + 3803 + spin_lock_bh(&fq->lock); 3802 3804 3803 3805 /* Make sure fragments stay together. */ 3804 3806 skb = __skb_dequeue(&txqi->frags); ··· 3810 3810 IEEE80211_SKB_CB(skb)->control.flags &= 3811 3811 ~IEEE80211_TX_INTCFL_NEED_TXPROCESSING; 3812 3812 } else { 3813 + if (unlikely(test_bit(IEEE80211_TXQ_STOP, &txqi->flags))) 3814 + goto out; 3815 + 3813 3816 skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func); 3814 3817 } 3815 3818 ··· 3863 3860 } 3864 3861 3865 3862 if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags)) 3866 - info->flags |= IEEE80211_TX_CTL_AMPDU; 3867 - else 3868 - info->flags &= ~IEEE80211_TX_CTL_AMPDU; 3863 + info->flags |= (IEEE80211_TX_CTL_AMPDU | 3864 + IEEE80211_TX_CTL_DONTFRAG); 3869 3865 3870 3866 if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) { 3871 3867 if (!ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) { ··· 4598 4596 4599 4597 info = IEEE80211_SKB_CB(skb); 4600 4598 memset(info, 0, sizeof(*info)); 4601 - if (tid_tx) 4602 - info->flags |= IEEE80211_TX_CTL_AMPDU; 4603 4599 4604 4600 info->hw_queue = sdata->vif.hw_queue[queue]; 4605 4601
+3 -39
net/mac80211/util.c
··· 292 292 struct ieee80211_sub_if_data *sdata, 293 293 struct ieee80211_txq *queue) 294 294 { 295 - int q = sdata->vif.hw_queue[queue->ac]; 296 295 struct ieee80211_tx_control control = { 297 296 .sta = queue->sta, 298 297 }; 299 298 struct sk_buff *skb; 300 - unsigned long flags; 301 - bool q_stopped; 302 299 303 300 while (1) { 304 - spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 305 - q_stopped = local->queue_stop_reasons[q]; 306 - spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 307 - 308 - if (q_stopped) 309 - break; 310 - 311 301 skb = ieee80211_tx_dequeue(&local->hw, queue); 312 302 if (!skb) 313 303 break; ··· 337 347 local_bh_disable(); 338 348 spin_lock(&fq->lock); 339 349 340 - sdata->vif.txqs_stopped[ac] = false; 341 - 342 350 if (!test_bit(SDATA_STATE_RUNNING, &sdata->state)) 343 351 goto out; 344 352 ··· 358 370 if (ac != txq->ac) 359 371 continue; 360 372 361 - if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX, 373 + if (!test_and_clear_bit(IEEE80211_TXQ_DIRTY, 362 374 &txqi->flags)) 363 375 continue; 364 376 ··· 373 385 374 386 txqi = to_txq_info(vif->txq); 375 387 376 - if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags) || 388 + if (!test_and_clear_bit(IEEE80211_TXQ_DIRTY, &txqi->flags) || 377 389 (ps && atomic_read(&ps->num_sta_ps)) || ac != vif->txq->ac) 378 390 goto out; 379 391 ··· 505 517 bool refcounted) 506 518 { 507 519 struct ieee80211_local *local = hw_to_local(hw); 508 - struct ieee80211_sub_if_data *sdata; 509 - int n_acs = IEEE80211_NUM_ACS; 510 520 511 521 trace_stop_queue(local, queue, reason); 512 522 ··· 516 530 else 517 531 local->q_stop_reasons[queue][reason]++; 518 532 519 - if (__test_and_set_bit(reason, &local->queue_stop_reasons[queue])) 520 - return; 521 - 522 - if (local->hw.queues < IEEE80211_NUM_ACS) 523 - n_acs = 1; 524 - 525 - rcu_read_lock(); 526 - list_for_each_entry_rcu(sdata, &local->interfaces, list) { 527 - int ac; 528 - 529 - if (!sdata->dev) 530 - continue; 531 - 532 - for (ac = 0; ac < n_acs; ac++) { 533 - if (sdata->vif.hw_queue[ac] == queue || 534 - sdata->vif.cab_queue == queue) { 535 - spin_lock(&local->fq.lock); 536 - sdata->vif.txqs_stopped[ac] = true; 537 - spin_unlock(&local->fq.lock); 538 - } 539 - } 540 - } 541 - rcu_read_unlock(); 533 + set_bit(reason, &local->queue_stop_reasons[queue]); 542 534 } 543 535 544 536 void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
+25
net/mptcp/pm.c
··· 420 420 } 421 421 } 422 422 423 + /* if sk is ipv4 or ipv6_only allows only same-family local and remote addresses, 424 + * otherwise allow any matching local/remote pair 425 + */ 426 + bool mptcp_pm_addr_families_match(const struct sock *sk, 427 + const struct mptcp_addr_info *loc, 428 + const struct mptcp_addr_info *rem) 429 + { 430 + bool mptcp_is_v4 = sk->sk_family == AF_INET; 431 + 432 + #if IS_ENABLED(CONFIG_MPTCP_IPV6) 433 + bool loc_is_v4 = loc->family == AF_INET || ipv6_addr_v4mapped(&loc->addr6); 434 + bool rem_is_v4 = rem->family == AF_INET || ipv6_addr_v4mapped(&rem->addr6); 435 + 436 + if (mptcp_is_v4) 437 + return loc_is_v4 && rem_is_v4; 438 + 439 + if (ipv6_only_sock(sk)) 440 + return !loc_is_v4 && !rem_is_v4; 441 + 442 + return loc_is_v4 == rem_is_v4; 443 + #else 444 + return mptcp_is_v4 && loc->family == AF_INET && rem->family == AF_INET; 445 + #endif 446 + } 447 + 423 448 void mptcp_pm_data_reset(struct mptcp_sock *msk) 424 449 { 425 450 u8 pm_type = mptcp_get_pm_type(sock_net((struct sock *)msk));
+7
net/mptcp/pm_userspace.c
··· 294 294 } 295 295 296 296 sk = (struct sock *)msk; 297 + 298 + if (!mptcp_pm_addr_families_match(sk, &addr_l, &addr_r)) { 299 + GENL_SET_ERR_MSG(info, "families mismatch"); 300 + err = -EINVAL; 301 + goto create_err; 302 + } 303 + 297 304 lock_sock(sk); 298 305 299 306 err = __mptcp_subflow_connect(sk, &addr_l, &addr_r);
+1 -1
net/mptcp/protocol.c
··· 98 98 struct socket *ssock; 99 99 int err; 100 100 101 - err = mptcp_subflow_create_socket(sk, &ssock); 101 + err = mptcp_subflow_create_socket(sk, sk->sk_family, &ssock); 102 102 if (err) 103 103 return err; 104 104
+5 -1
net/mptcp/protocol.h
··· 641 641 /* called with sk socket lock held */ 642 642 int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc, 643 643 const struct mptcp_addr_info *remote); 644 - int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock); 644 + int mptcp_subflow_create_socket(struct sock *sk, unsigned short family, 645 + struct socket **new_sock); 645 646 void mptcp_info2sockaddr(const struct mptcp_addr_info *info, 646 647 struct sockaddr_storage *addr, 647 648 unsigned short family); ··· 777 776 int mptcp_pm_parse_entry(struct nlattr *attr, struct genl_info *info, 778 777 bool require_family, 779 778 struct mptcp_pm_addr_entry *entry); 779 + bool mptcp_pm_addr_families_match(const struct sock *sk, 780 + const struct mptcp_addr_info *loc, 781 + const struct mptcp_addr_info *rem); 780 782 void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk); 781 783 void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk); 782 784 void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side);
+5 -4
net/mptcp/subflow.c
··· 1547 1547 if (!mptcp_is_fully_established(sk)) 1548 1548 goto err_out; 1549 1549 1550 - err = mptcp_subflow_create_socket(sk, &sf); 1550 + err = mptcp_subflow_create_socket(sk, loc->family, &sf); 1551 1551 if (err) 1552 1552 goto err_out; 1553 1553 ··· 1660 1660 #endif 1661 1661 ssk->sk_prot = &tcp_prot; 1662 1662 } 1663 - int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock) 1663 + 1664 + int mptcp_subflow_create_socket(struct sock *sk, unsigned short family, 1665 + struct socket **new_sock) 1664 1666 { 1665 1667 struct mptcp_subflow_context *subflow; 1666 1668 struct net *net = sock_net(sk); ··· 1675 1673 if (unlikely(!sk->sk_socket)) 1676 1674 return -EINVAL; 1677 1675 1678 - err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP, 1679 - &sf); 1676 + err = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, &sf); 1680 1677 if (err) 1681 1678 return err; 1682 1679
+2 -2
net/netfilter/ipset/ip_set_bitmap_ip.c
··· 308 308 return -IPSET_ERR_BITMAP_RANGE; 309 309 310 310 pr_debug("mask_bits %u, netmask %u\n", mask_bits, netmask); 311 - hosts = 2 << (32 - netmask - 1); 312 - elements = 2 << (netmask - mask_bits - 1); 311 + hosts = 2U << (32 - netmask - 1); 312 + elements = 2UL << (netmask - mask_bits - 1); 313 313 } 314 314 if (elements > IPSET_BITMAP_MAX_RANGE + 1) 315 315 return -IPSET_ERR_BITMAP_RANGE_SIZE;
+15
net/netfilter/nf_conntrack_proto_tcp.c
··· 1068 1068 ct->proto.tcp.last_flags |= 1069 1069 IP_CT_EXP_CHALLENGE_ACK; 1070 1070 } 1071 + 1072 + /* possible challenge ack reply to syn */ 1073 + if (old_state == TCP_CONNTRACK_SYN_SENT && 1074 + index == TCP_ACK_SET && 1075 + dir == IP_CT_DIR_REPLY) 1076 + ct->proto.tcp.last_ack = ntohl(th->ack_seq); 1077 + 1071 1078 spin_unlock_bh(&ct->lock); 1072 1079 nf_ct_l4proto_log_invalid(skb, ct, state, 1073 1080 "packet (index %d) in dir %d ignored, state %s", ··· 1200 1193 * segments we ignored. */ 1201 1194 goto in_window; 1202 1195 } 1196 + 1197 + /* Reset in response to a challenge-ack we let through earlier */ 1198 + if (old_state == TCP_CONNTRACK_SYN_SENT && 1199 + ct->proto.tcp.last_index == TCP_ACK_SET && 1200 + ct->proto.tcp.last_dir == IP_CT_DIR_REPLY && 1201 + ntohl(th->seq) == ct->proto.tcp.last_ack) 1202 + goto in_window; 1203 + 1203 1204 break; 1204 1205 default: 1205 1206 /* Keep compilers happy. */
+1 -1
net/netfilter/nft_payload.c
··· 63 63 return false; 64 64 65 65 if (offset + len > VLAN_ETH_HLEN + vlan_hlen) 66 - ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen; 66 + ethlen -= offset + len - VLAN_ETH_HLEN - vlan_hlen; 67 67 68 68 memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen); 69 69
+1
net/nfc/llcp_core.c
··· 157 157 cancel_work_sync(&local->rx_work); 158 158 cancel_work_sync(&local->timeout_work); 159 159 kfree_skb(local->rx_pending); 160 + local->rx_pending = NULL; 160 161 del_timer_sync(&local->sdreq_timer); 161 162 cancel_work_sync(&local->sdreq_timeout_work); 162 163 nfc_llcp_free_sdp_tlv_list(&local->pending_sdreqs);
+1 -1
net/rxrpc/call_object.c
··· 294 294 static int rxrpc_connect_call(struct rxrpc_call *call, gfp_t gfp) 295 295 { 296 296 struct rxrpc_local *local = call->local; 297 - int ret = 0; 297 + int ret = -ENOMEM; 298 298 299 299 _enter("{%d,%lx},", call->debug_id, call->user_call_ID); 300 300
+2
net/sched/sch_gred.c
··· 377 377 /* Even if driver returns failure adjust the stats - in case offload 378 378 * ended but driver still wants to adjust the values. 379 379 */ 380 + sch_tree_lock(sch); 380 381 for (i = 0; i < MAX_DPs; i++) { 381 382 if (!table->tab[i]) 382 383 continue; ··· 394 393 sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits; 395 394 } 396 395 _bstats_update(&sch->bstats, bytes, packets); 396 + sch_tree_unlock(sch); 397 397 398 398 kfree(hw_stats); 399 399 return ret;
+16 -11
net/sched/sch_htb.c
··· 1549 1549 struct tc_htb_qopt_offload offload_opt; 1550 1550 struct netdev_queue *dev_queue; 1551 1551 struct Qdisc *q = cl->leaf.q; 1552 - struct Qdisc *old = NULL; 1552 + struct Qdisc *old; 1553 1553 int err; 1554 1554 1555 1555 if (cl->level) ··· 1557 1557 1558 1558 WARN_ON(!q); 1559 1559 dev_queue = htb_offload_get_queue(cl); 1560 - old = htb_graft_helper(dev_queue, NULL); 1561 - if (destroying) 1562 - /* Before HTB is destroyed, the kernel grafts noop_qdisc to 1563 - * all queues. 1560 + /* When destroying, caller qdisc_graft grafts the new qdisc and invokes 1561 + * qdisc_put for the qdisc being destroyed. htb_destroy_class_offload 1562 + * does not need to graft or qdisc_put the qdisc being destroyed. 1563 + */ 1564 + if (!destroying) { 1565 + old = htb_graft_helper(dev_queue, NULL); 1566 + /* Last qdisc grafted should be the same as cl->leaf.q when 1567 + * calling htb_delete. 1564 1568 */ 1565 - WARN_ON(!(old->flags & TCQ_F_BUILTIN)); 1566 - else 1567 1569 WARN_ON(old != q); 1570 + } 1568 1571 1569 1572 if (cl->parent) { 1570 1573 _bstats_update(&cl->parent->bstats_bias, ··· 1584 1581 }; 1585 1582 err = htb_offload(qdisc_dev(sch), &offload_opt); 1586 1583 1587 - if (!err || destroying) 1588 - qdisc_put(old); 1589 - else 1590 - htb_graft_helper(dev_queue, old); 1584 + if (!destroying) { 1585 + if (!err) 1586 + qdisc_put(old); 1587 + else 1588 + htb_graft_helper(dev_queue, old); 1589 + } 1591 1590 1592 1591 if (last_child) 1593 1592 return err;
+3
net/sched/sch_taprio.c
··· 1700 1700 int i; 1701 1701 1702 1702 hrtimer_cancel(&q->advance_timer); 1703 + qdisc_synchronize(sch); 1704 + 1703 1705 if (q->qdiscs) { 1704 1706 for (i = 0; i < dev->num_tx_queues; i++) 1705 1707 if (q->qdiscs[i]) ··· 1722 1720 * happens in qdisc_create(), after taprio_init() has been called. 1723 1721 */ 1724 1722 hrtimer_cancel(&q->advance_timer); 1723 + qdisc_synchronize(sch); 1725 1724 1726 1725 taprio_disable_offload(dev, q, NULL); 1727 1726
+14 -5
scripts/jobserver-exec
··· 26 26 # If the MAKEFLAGS variable contains multiple instances of the 27 27 # --jobserver-auth= option, the last one is relevant. 28 28 fds = opts[-1].split("=", 1)[1] 29 - reader, writer = [int(x) for x in fds.split(",", 1)] 30 - # Open a private copy of reader to avoid setting nonblocking 31 - # on an unexpecting process with the same reader fd. 32 - reader = os.open("/proc/self/fd/%d" % (reader), 33 - os.O_RDONLY | os.O_NONBLOCK) 29 + 30 + # Starting with GNU Make 4.4, named pipes are used for reader and writer. 31 + # Example argument: --jobserver-auth=fifo:/tmp/GMfifo8134 32 + _, _, path = fds.partition('fifo:') 33 + 34 + if path: 35 + reader = os.open(path, os.O_RDONLY | os.O_NONBLOCK) 36 + writer = os.open(path, os.O_WRONLY) 37 + else: 38 + reader, writer = [int(x) for x in fds.split(",", 1)] 39 + # Open a private copy of reader to avoid setting nonblocking 40 + # on an unexpecting process with the same reader fd. 41 + reader = os.open("/proc/self/fd/%d" % (reader), 42 + os.O_RDONLY | os.O_NONBLOCK) 34 43 35 44 # Read out as many jobserver slots as possible. 36 45 while True:
+1 -1
scripts/kconfig/.gitignore
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 /conf 3 3 /[gmnq]conf 4 + /[gmnq]conf-bin 4 5 /[gmnq]conf-cflags 5 6 /[gmnq]conf-libs 6 - /qconf-bin 7 7 /qconf-moc.cc
+1 -1
scripts/kconfig/Makefile
··· 209 209 $(obj)/gconf.o: | $(obj)/gconf-cflags 210 210 211 211 # check if necessary packages are available, and configure build flags 212 - cmd_conf_cfg = $< $(addprefix $(obj)/$*conf-, cflags libs bin) 212 + cmd_conf_cfg = $< $(addprefix $(obj)/$*conf-, cflags libs bin); touch $(obj)/$*conf-bin 213 213 214 214 $(obj)/%conf-cflags $(obj)/%conf-libs $(obj)/%conf-bin: $(src)/%conf-cfg.sh 215 215 $(call cmd,conf_cfg)
+1 -1
scripts/package/mkspec
··· 1 1 #!/bin/sh 2 2 # 3 3 # Output a simple RPM spec file. 4 - # This version assumes a minimum of RPM 4.0.3. 4 + # This version assumes a minimum of RPM 4.13 5 5 # 6 6 # The only gothic bit here is redefining install_post to avoid 7 7 # stripping the symbols from files in the kernel which we want
+7 -1
tools/arch/arm64/include/asm/cputype.h
··· 41 41 (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT) 42 42 43 43 #define MIDR_CPU_MODEL(imp, partnum) \ 44 - (((imp) << MIDR_IMPLEMENTOR_SHIFT) | \ 44 + ((_AT(u32, imp) << MIDR_IMPLEMENTOR_SHIFT) | \ 45 45 (0xf << MIDR_ARCHITECTURE_SHIFT) | \ 46 46 ((partnum) << MIDR_PARTNUM_SHIFT)) 47 47 ··· 80 80 #define ARM_CPU_PART_CORTEX_X1 0xD44 81 81 #define ARM_CPU_PART_CORTEX_A510 0xD46 82 82 #define ARM_CPU_PART_CORTEX_A710 0xD47 83 + #define ARM_CPU_PART_CORTEX_A715 0xD4D 83 84 #define ARM_CPU_PART_CORTEX_X2 0xD48 84 85 #define ARM_CPU_PART_NEOVERSE_N2 0xD49 85 86 #define ARM_CPU_PART_CORTEX_A78C 0xD4B ··· 124 123 #define APPLE_CPU_PART_M1_FIRESTORM_PRO 0x025 125 124 #define APPLE_CPU_PART_M1_ICESTORM_MAX 0x028 126 125 #define APPLE_CPU_PART_M1_FIRESTORM_MAX 0x029 126 + #define APPLE_CPU_PART_M2_BLIZZARD 0x032 127 + #define APPLE_CPU_PART_M2_AVALANCHE 0x033 127 128 128 129 #define AMPERE_CPU_PART_AMPERE1 0xAC3 129 130 ··· 145 142 #define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1) 146 143 #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510) 147 144 #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710) 145 + #define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715) 148 146 #define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2) 149 147 #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2) 150 148 #define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C) ··· 179 175 #define MIDR_APPLE_M1_FIRESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_PRO) 180 176 #define MIDR_APPLE_M1_ICESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_MAX) 181 177 #define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX) 178 + #define MIDR_APPLE_M2_BLIZZARD MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD) 179 + #define MIDR_APPLE_M2_AVALANCHE MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE) 182 180 #define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1) 183 181 184 182 /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
+1
tools/arch/arm64/include/uapi/asm/kvm.h
··· 43 43 #define __KVM_HAVE_VCPU_EVENTS 44 44 45 45 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 46 + #define KVM_DIRTY_LOG_PAGE_OFFSET 64 46 47 47 48 #define KVM_REG_SIZE(id) \ 48 49 (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
+5
tools/arch/x86/include/uapi/asm/kvm.h
··· 206 206 struct kvm_msr_filter_range { 207 207 #define KVM_MSR_FILTER_READ (1 << 0) 208 208 #define KVM_MSR_FILTER_WRITE (1 << 1) 209 + #define KVM_MSR_FILTER_RANGE_VALID_MASK (KVM_MSR_FILTER_READ | \ 210 + KVM_MSR_FILTER_WRITE) 209 211 __u32 flags; 210 212 __u32 nmsrs; /* number of msrs in bitmap */ 211 213 __u32 base; /* MSR index the bitmap starts at */ ··· 216 214 217 215 #define KVM_MSR_FILTER_MAX_RANGES 16 218 216 struct kvm_msr_filter { 217 + #ifndef __KERNEL__ 219 218 #define KVM_MSR_FILTER_DEFAULT_ALLOW (0 << 0) 219 + #endif 220 220 #define KVM_MSR_FILTER_DEFAULT_DENY (1 << 0) 221 + #define KVM_MSR_FILTER_VALID_MASK (KVM_MSR_FILTER_DEFAULT_DENY) 221 222 __u32 flags; 222 223 struct kvm_msr_filter_range ranges[KVM_MSR_FILTER_MAX_RANGES]; 223 224 };
+9
tools/include/linux/build_bug.h
··· 79 79 #define __static_assert(expr, msg, ...) _Static_assert(expr, msg) 80 80 #endif // static_assert 81 81 82 + 83 + /* 84 + * Compile time check that field has an expected offset 85 + */ 86 + #define ASSERT_STRUCT_OFFSET(type, field, expected_offset) \ 87 + BUILD_BUG_ON_MSG(offsetof(type, field) != (expected_offset), \ 88 + "Offset of " #field " in " #type " has changed.") 89 + 90 + 82 91 #endif /* _LINUX_BUILD_BUG_H */
+3
tools/include/uapi/linux/kvm.h
··· 1767 1767 __u8 runstate_update_flag; 1768 1768 struct { 1769 1769 __u64 gfn; 1770 + #define KVM_XEN_INVALID_GFN ((__u64)-1) 1770 1771 } shared_info; 1771 1772 struct { 1772 1773 __u32 send_port; ··· 1799 1798 } u; 1800 1799 }; 1801 1800 1801 + 1802 1802 /* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */ 1803 1803 #define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0 1804 1804 #define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1 ··· 1825 1823 __u16 pad[3]; 1826 1824 union { 1827 1825 __u64 gpa; 1826 + #define KVM_XEN_INVALID_GPA ((__u64)-1) 1828 1827 __u64 pad[8]; 1829 1828 struct { 1830 1829 __u64 state;
+14 -1
tools/perf/tests/shell/buildid.sh
··· 77 77 file=${build_id_dir}/.build-id/${id:0:2}/`readlink ${link}`/elf 78 78 echo "file: ${file}" 79 79 80 - if [ ! -x $file ]; then 80 + # Check for file permission of original file 81 + # in case of pe-file.exe file 82 + echo $1 | grep ".exe" 83 + if [ $? -eq 0 ]; then 84 + if [ -x $1 -a ! -x $file ]; then 85 + echo "failed: file ${file} executable does not exist" 86 + exit 1 87 + fi 88 + 89 + if [ ! -x $file -a ! -e $file ]; then 90 + echo "failed: file ${file} does not exist" 91 + exit 1 92 + fi 93 + elif [ ! -x $file ]; then 81 94 echo "failed: file ${file} does not exist" 82 95 exit 1 83 96 fi
+4 -1
tools/perf/trace/beauty/include/linux/socket.h
··· 33 33 34 34 struct sockaddr { 35 35 sa_family_t sa_family; /* address family, AF_xxx */ 36 - char sa_data[14]; /* 14 bytes of protocol address */ 36 + union { 37 + char sa_data_min[14]; /* Minimum 14 bytes of protocol address */ 38 + DECLARE_FLEX_ARRAY(char, sa_data); 39 + }; 37 40 }; 38 41 39 42 struct linger {
+7 -3
tools/perf/util/build-id.c
··· 715 715 } else if (nsi && nsinfo__need_setns(nsi)) { 716 716 if (copyfile_ns(name, filename, nsi)) 717 717 goto out_free; 718 - } else if (link(realname, filename) && errno != EEXIST && 719 - copyfile(name, filename)) 720 - goto out_free; 718 + } else if (link(realname, filename) && errno != EEXIST) { 719 + struct stat f_stat; 720 + 721 + if (!(stat(name, &f_stat) < 0) && 722 + copyfile_mode(name, filename, f_stat.st_mode)) 723 + goto out_free; 724 + } 721 725 } 722 726 723 727 /* Some binaries are stripped, but have .debug files with their symbol
+4 -1
tools/perf/util/expr.l
··· 42 42 char *dst = str; 43 43 44 44 while (*str) { 45 - if (*str == '\\') 45 + if (*str == '\\') { 46 46 *dst++ = *++str; 47 + if (!*str) 48 + break; 49 + } 47 50 else if (*str == '?') { 48 51 char *paramval; 49 52 int i = 0;
+1 -1
tools/testing/selftests/lib.mk
··· 20 20 21 21 ifeq ($(CROSS_COMPILE),) 22 22 ifeq ($(CLANG_TARGET_FLAGS),) 23 - $(error Specify CROSS_COMPILE or add '--target=' option to lib.mk 23 + $(error Specify CROSS_COMPILE or add '--target=' option to lib.mk) 24 24 else 25 25 CLANG_FLAGS += --target=$(CLANG_TARGET_FLAGS) 26 26 endif # CLANG_TARGET_FLAGS
+47
tools/testing/selftests/net/mptcp/userspace_pm.sh
··· 752 752 "$server4_token" > /dev/null 2>&1 753 753 } 754 754 755 + test_subflows_v4_v6_mix() 756 + { 757 + # Attempt to add a listener at 10.0.2.1:<subflow-port> 758 + ip netns exec "$ns1" ./pm_nl_ctl listen 10.0.2.1\ 759 + $app6_port > /dev/null 2>&1 & 760 + local listener_pid=$! 761 + 762 + # ADD_ADDR4 from server to client machine reusing the subflow port on 763 + # the established v6 connection 764 + :>"$client_evts" 765 + ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server6_token" id\ 766 + $server_addr_id dev ns1eth2 > /dev/null 2>&1 767 + stdbuf -o0 -e0 printf "ADD_ADDR4 id:%d 10.0.2.1 (ns1) => ns2, reuse port\t\t" $server_addr_id 768 + sleep 0.5 769 + verify_announce_event "$client_evts" "$ANNOUNCED" "$client6_token" "10.0.2.1"\ 770 + "$server_addr_id" "$app6_port" 771 + 772 + # CREATE_SUBFLOW from client to server machine 773 + :>"$client_evts" 774 + ip netns exec "$ns2" ./pm_nl_ctl csf lip 10.0.2.2 lid 23 rip 10.0.2.1 rport\ 775 + $app6_port token "$client6_token" > /dev/null 2>&1 776 + sleep 0.5 777 + verify_subflow_events "$client_evts" "$SUB_ESTABLISHED" "$client6_token"\ 778 + "$AF_INET" "10.0.2.2" "10.0.2.1" "$app6_port" "23"\ 779 + "$server_addr_id" "ns2" "ns1" 780 + 781 + # Delete the listener from the server ns, if one was created 782 + kill_wait $listener_pid 783 + 784 + sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts") 785 + 786 + # DESTROY_SUBFLOW from client to server machine 787 + :>"$client_evts" 788 + ip netns exec "$ns2" ./pm_nl_ctl dsf lip 10.0.2.2 lport "$sport" rip 10.0.2.1 rport\ 789 + $app6_port token "$client6_token" > /dev/null 2>&1 790 + sleep 0.5 791 + verify_subflow_events "$client_evts" "$SUB_CLOSED" "$client6_token" \ 792 + "$AF_INET" "10.0.2.2" "10.0.2.1" "$app6_port" "23"\ 793 + "$server_addr_id" "ns2" "ns1" 794 + 795 + # RM_ADDR from server to client machine 796 + ip netns exec "$ns1" ./pm_nl_ctl rem id $server_addr_id token\ 797 + "$server6_token" > /dev/null 2>&1 798 + sleep 0.5 799 + } 800 + 755 801 test_prio() 756 802 { 757 803 local count ··· 907 861 test_announce 908 862 test_remove 909 863 test_subflows 864 + test_subflows_v4_v6_mix 910 865 test_prio 911 866 test_listener 912 867
+7 -5
tools/testing/selftests/net/toeplitz.c
··· 215 215 } 216 216 217 217 /* A single TPACKET_V3 block can hold multiple frames */ 218 - static void recv_block(struct ring_state *ring) 218 + static bool recv_block(struct ring_state *ring) 219 219 { 220 220 struct tpacket_block_desc *block; 221 221 char *frame; ··· 223 223 224 224 block = (void *)(ring->mmap + ring->idx * ring_block_sz); 225 225 if (!(block->hdr.bh1.block_status & TP_STATUS_USER)) 226 - return; 226 + return false; 227 227 228 228 frame = (char *)block; 229 229 frame += block->hdr.bh1.offset_to_first_pkt; ··· 235 235 236 236 block->hdr.bh1.block_status = TP_STATUS_KERNEL; 237 237 ring->idx = (ring->idx + 1) % ring_block_nr; 238 + 239 + return true; 238 240 } 239 241 240 242 /* simple test: sleep once unconditionally and then process all rings */ ··· 247 245 usleep(1000 * cfg_timeout_msec); 248 246 249 247 for (i = 0; i < num_cpus; i++) 250 - recv_block(&rings[i]); 248 + do {} while (recv_block(&rings[i])); 251 249 252 250 fprintf(stderr, "count: pass=%u nohash=%u fail=%u\n", 253 251 frames_received - frames_nohash - frames_error, ··· 259 257 struct tpacket_req3 req3 = {0}; 260 258 void *ring; 261 259 262 - req3.tp_retire_blk_tov = cfg_timeout_msec; 260 + req3.tp_retire_blk_tov = cfg_timeout_msec / 8; 263 261 req3.tp_feature_req_word = TP_FT_REQ_FILL_RXHASH; 264 262 265 263 req3.tp_frame_size = 2048; 266 264 req3.tp_frame_nr = 1 << 10; 267 - req3.tp_block_nr = 2; 265 + req3.tp_block_nr = 16; 268 266 269 267 req3.tp_block_size = req3.tp_frame_size * req3.tp_frame_nr; 270 268 req3.tp_block_size /= req3.tp_block_nr;
+9 -7
tools/testing/selftests/netfilter/nft_trans_stress.sh
··· 10 10 ksft_skip=4 11 11 12 12 testns=testns-$(mktemp -u "XXXXXXXX") 13 + tmp="" 13 14 14 15 tables="foo bar baz quux" 15 16 global_ret=0 16 17 eret=0 17 18 lret=0 19 + 20 + cleanup() { 21 + ip netns pids "$testns" | xargs kill 2>/dev/null 22 + ip netns del "$testns" 23 + 24 + rm -f "$tmp" 25 + } 18 26 19 27 check_result() 20 28 { ··· 51 43 exit $ksft_skip 52 44 fi 53 45 46 + trap cleanup EXIT 54 47 tmp=$(mktemp) 55 48 56 49 for table in $tables; do ··· 147 138 done 148 139 149 140 check_result $lret "add/delete with nftrace enabled" 150 - 151 - pkill -9 ping 152 - 153 - wait 154 - 155 - rm -f "$tmp" 156 - ip netns del "$testns" 157 141 158 142 exit $global_ret
+1
tools/testing/selftests/netfilter/settings
··· 1 + timeout=120