Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge 6.18-rc3 into tty-next

We need the tty/serial fixes in here as well to build on top of.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

+2584 -1553
+1
.mailmap
··· 27 27 Alan Cox <root@hraefn.swansea.linux.org.uk> 28 28 Aleksandar Markovic <aleksandar.markovic@mips.com> <aleksandar.markovic@imgtec.com> 29 29 Aleksey Gorelov <aleksey_gorelov@phoenix.com> 30 + Alex Williamson <alex@shazbot.org> <alex.williamson@redhat.com> 30 31 Alexander Lobakin <alobakin@pm.me> <alobakin@dlink.ru> 31 32 Alexander Lobakin <alobakin@pm.me> <alobakin@marvell.com> 32 33 Alexander Lobakin <alobakin@pm.me> <bloodyreaper@yandex.ru>
+3 -1
Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml
··· 142 142 required: 143 143 - orientation-switch 144 144 then: 145 - $ref: /schemas/usb/usb-switch.yaml# 145 + allOf: 146 + - $ref: /schemas/usb/usb-switch.yaml# 147 + - $ref: /schemas/usb/usb-switch-ports.yaml# 146 148 147 149 unevaluatedProperties: false 148 150
+4
Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
··· 24 24 - enum: 25 25 - qcom,qcs8300-qmp-ufs-phy 26 26 - const: qcom,sa8775p-qmp-ufs-phy 27 + - items: 28 + - enum: 29 + - qcom,kaanapali-qmp-ufs-phy 30 + - const: qcom,sm8750-qmp-ufs-phy 27 31 - enum: 28 32 - qcom,msm8996-qmp-ufs-phy 29 33 - qcom,msm8998-qmp-ufs-phy
+3 -1
Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
··· 125 125 contains: 126 126 const: google,gs101-usb31drd-phy 127 127 then: 128 - $ref: /schemas/usb/usb-switch.yaml# 128 + allOf: 129 + - $ref: /schemas/usb/usb-switch.yaml# 130 + - $ref: /schemas/usb/usb-switch-ports.yaml# 129 131 130 132 properties: 131 133 clocks:
+1
Documentation/devicetree/bindings/serial/renesas,scif.yaml
··· 197 197 - renesas,rcar-gen2-scif 198 198 - renesas,rcar-gen3-scif 199 199 - renesas,rcar-gen4-scif 200 + - renesas,rcar-gen5-scif 200 201 then: 201 202 properties: 202 203 interrupts:
+8 -3
Documentation/devicetree/bindings/spi/spi-cadence.yaml
··· 14 14 15 15 properties: 16 16 compatible: 17 - enum: 18 - - cdns,spi-r1p6 19 - - xlnx,zynq-spi-r1p6 17 + oneOf: 18 + - enum: 19 + - xlnx,zynq-spi-r1p6 20 + - items: 21 + - enum: 22 + - xlnx,zynqmp-spi-r1p6 23 + - xlnx,versal-net-spi-r1p6 24 + - const: cdns,spi-r1p6 20 25 21 26 reg: 22 27 maxItems: 1
+1
Documentation/devicetree/bindings/spi/spi-rockchip.yaml
··· 34 34 - rockchip,rk3328-spi 35 35 - rockchip,rk3368-spi 36 36 - rockchip,rk3399-spi 37 + - rockchip,rk3506-spi 37 38 - rockchip,rk3528-spi 38 39 - rockchip,rk3562-spi 39 40 - rockchip,rk3568-spi
+2
Documentation/devicetree/bindings/ufs/qcom,sm8650-ufshc.yaml
··· 15 15 compatible: 16 16 contains: 17 17 enum: 18 + - qcom,kaanapali-ufshc 18 19 - qcom,sm8650-ufshc 19 20 - qcom,sm8750-ufshc 20 21 required: ··· 25 24 compatible: 26 25 items: 27 26 - enum: 27 + - qcom,kaanapali-ufshc 28 28 - qcom,sm8650-ufshc 29 29 - qcom,sm8750-ufshc 30 30 - const: qcom,ufshc
+1
Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml
··· 76 76 77 77 allOf: 78 78 - $ref: usb-switch.yaml# 79 + - $ref: usb-switch-ports.yaml# 79 80 80 81 additionalProperties: false 81 82
+9 -1
Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml
··· 89 89 - reg 90 90 - "#address-cells" 91 91 - "#size-cells" 92 - - dma-ranges 93 92 - ranges 94 93 - clocks 95 94 - clock-names 96 95 - interrupts 97 96 - power-domains 97 + 98 + allOf: 99 + - if: 100 + properties: 101 + compatible: 102 + const: fsl,imx8mp-dwc3 103 + then: 104 + required: 105 + - dma-ranges 98 106 99 107 additionalProperties: false 100 108
+1
Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml
··· 52 52 53 53 allOf: 54 54 - $ref: usb-switch.yaml# 55 + - $ref: usb-switch-ports.yaml# 55 56 - if: 56 57 required: 57 58 - mode-switch
+1
Documentation/devicetree/bindings/usb/nxp,ptn36502.yaml
··· 46 46 47 47 allOf: 48 48 - $ref: usb-switch.yaml# 49 + - $ref: usb-switch-ports.yaml# 49 50 50 51 additionalProperties: false 51 52
+1
Documentation/devicetree/bindings/usb/onnn,nb7vpq904m.yaml
··· 91 91 92 92 allOf: 93 93 - $ref: usb-switch.yaml# 94 + - $ref: usb-switch-ports.yaml# 94 95 95 96 additionalProperties: false 96 97
+1
Documentation/devicetree/bindings/usb/parade,ps8830.yaml
··· 81 81 82 82 allOf: 83 83 - $ref: usb-switch.yaml# 84 + - $ref: usb-switch-ports.yaml# 84 85 85 86 additionalProperties: false 86 87
+3
Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml
··· 68 68 - qcom,sm8550-dwc3 69 69 - qcom,sm8650-dwc3 70 70 - qcom,x1e80100-dwc3 71 + - qcom,x1e80100-dwc3-mp 71 72 - const: qcom,snps-dwc3 72 73 73 74 reg: ··· 461 460 then: 462 461 properties: 463 462 interrupts: 463 + minItems: 4 464 464 maxItems: 5 465 465 interrupt-names: 466 + minItems: 4 466 467 items: 467 468 - const: dwc_usb3 468 469 - const: pwr_event
+1
Documentation/devicetree/bindings/usb/qcom,wcd939x-usbss.yaml
··· 60 60 61 61 allOf: 62 62 - $ref: usb-switch.yaml# 63 + - $ref: usb-switch-ports.yaml# 63 64 64 65 additionalProperties: false 65 66
+1
Documentation/devicetree/bindings/usb/ti,tusb1046.yaml
··· 11 11 12 12 allOf: 13 13 - $ref: usb-switch.yaml# 14 + - $ref: usb-switch-ports.yaml# 14 15 15 16 properties: 16 17 compatible:
+68
Documentation/devicetree/bindings/usb/usb-switch-ports.yaml
··· 1 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + %YAML 1.2 3 + --- 4 + $id: http://devicetree.org/schemas/usb/usb-switch-ports.yaml# 5 + $schema: http://devicetree.org/meta-schemas/core.yaml# 6 + 7 + title: USB Orientation and Mode Switches Ports Graph Properties 8 + 9 + maintainers: 10 + - Greg Kroah-Hartman <gregkh@linuxfoundation.org> 11 + 12 + description: 13 + Ports Graph properties for devices handling USB mode and orientation switching. 14 + 15 + properties: 16 + port: 17 + $ref: /schemas/graph.yaml#/$defs/port-base 18 + description: 19 + A port node to link the device to a TypeC controller for the purpose of 20 + handling altmode muxing and orientation switching. 21 + 22 + properties: 23 + endpoint: 24 + $ref: /schemas/graph.yaml#/$defs/endpoint-base 25 + unevaluatedProperties: false 26 + properties: 27 + data-lanes: 28 + $ref: /schemas/types.yaml#/definitions/uint32-array 29 + minItems: 1 30 + maxItems: 8 31 + uniqueItems: true 32 + items: 33 + maximum: 8 34 + 35 + ports: 36 + $ref: /schemas/graph.yaml#/properties/ports 37 + properties: 38 + port@0: 39 + $ref: /schemas/graph.yaml#/properties/port 40 + description: 41 + Super Speed (SS) Output endpoint to the Type-C connector 42 + 43 + port@1: 44 + $ref: /schemas/graph.yaml#/$defs/port-base 45 + description: 46 + Super Speed (SS) Input endpoint from the Super-Speed PHY 47 + unevaluatedProperties: false 48 + 49 + properties: 50 + endpoint: 51 + $ref: /schemas/graph.yaml#/$defs/endpoint-base 52 + unevaluatedProperties: false 53 + properties: 54 + data-lanes: 55 + $ref: /schemas/types.yaml#/definitions/uint32-array 56 + minItems: 1 57 + maxItems: 8 58 + uniqueItems: true 59 + items: 60 + maximum: 8 61 + 62 + oneOf: 63 + - required: 64 + - port 65 + - required: 66 + - ports 67 + 68 + additionalProperties: true
-52
Documentation/devicetree/bindings/usb/usb-switch.yaml
··· 25 25 description: Possible handler of SuperSpeed signals retiming 26 26 type: boolean 27 27 28 - port: 29 - $ref: /schemas/graph.yaml#/$defs/port-base 30 - description: 31 - A port node to link the device to a TypeC controller for the purpose of 32 - handling altmode muxing and orientation switching. 33 - 34 - properties: 35 - endpoint: 36 - $ref: /schemas/graph.yaml#/$defs/endpoint-base 37 - unevaluatedProperties: false 38 - properties: 39 - data-lanes: 40 - $ref: /schemas/types.yaml#/definitions/uint32-array 41 - minItems: 1 42 - maxItems: 8 43 - uniqueItems: true 44 - items: 45 - maximum: 8 46 - 47 - ports: 48 - $ref: /schemas/graph.yaml#/properties/ports 49 - properties: 50 - port@0: 51 - $ref: /schemas/graph.yaml#/properties/port 52 - description: 53 - Super Speed (SS) Output endpoint to the Type-C connector 54 - 55 - port@1: 56 - $ref: /schemas/graph.yaml#/$defs/port-base 57 - description: 58 - Super Speed (SS) Input endpoint from the Super-Speed PHY 59 - unevaluatedProperties: false 60 - 61 - properties: 62 - endpoint: 63 - $ref: /schemas/graph.yaml#/$defs/endpoint-base 64 - unevaluatedProperties: false 65 - properties: 66 - data-lanes: 67 - $ref: /schemas/types.yaml#/definitions/uint32-array 68 - minItems: 1 69 - maxItems: 8 70 - uniqueItems: true 71 - items: 72 - maximum: 8 73 - 74 - oneOf: 75 - - required: 76 - - port 77 - - required: 78 - - ports 79 - 80 28 additionalProperties: true
+4 -3
Documentation/networking/ax25.rst
··· 11 11 12 12 There is a mailing list for discussing Linux amateur radio matters 13 13 called linux-hams@vger.kernel.org. To subscribe to it, send a message to 14 - majordomo@vger.kernel.org with the words "subscribe linux-hams" in the body 15 - of the message, the subject field is ignored. You don't need to be 16 - subscribed to post but of course that means you might miss an answer. 14 + linux-hams+subscribe@vger.kernel.org or use the web interface at 15 + https://vger.kernel.org. The subject and body of the message are 16 + ignored. You don't need to be subscribed to post but of course that 17 + means you might miss an answer.
+9 -3
Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst
··· 137 137 138 138 Checksum offload header fields are in big endian format. 139 139 140 + Packet format:: 141 + 140 142 Bit 0 - 6 7 8-15 16-31 141 143 Function Header Type Next Header Checksum Valid Reserved 142 144 143 145 Header Type is to indicate the type of header, this usually is set to CHECKSUM 144 146 145 147 Header types 146 - = ========================================== 148 + 149 + = =============== 147 150 0 Reserved 148 151 1 Reserved 149 152 2 checksum header 153 + = =============== 150 154 151 155 Checksum Valid is to indicate whether the header checksum is valid. Value of 1 152 156 implies that checksum is calculated on this packet and is valid, value of 0 ··· 187 183 packets and either ACK the MAP command or deliver the IP packet to the 188 184 network stack as needed 189 185 190 - MAP header|IP Packet|Optional padding|MAP header|IP Packet|Optional padding.... 186 + Packet format:: 191 187 192 - MAP header|IP Packet|Optional padding|MAP header|Command Packet|Optional pad... 188 + MAP header|IP Packet|Optional padding|MAP header|IP Packet|Optional padding.... 189 + 190 + MAP header|IP Packet|Optional padding|MAP header|Command Packet|Optional pad... 193 191 194 192 3. Userspace configuration 195 193 ==========================
+2 -4
Documentation/networking/net_failover.rst
··· 96 96 received only on the 'failover' device. 97 97 98 98 Below is the patch snippet used with 'cloud-ifupdown-helper' script found on 99 - Debian cloud images: 99 + Debian cloud images:: 100 100 101 - :: 102 101 @@ -27,6 +27,8 @@ do_setup() { 103 102 local working="$cfgdir/.$INTERFACE" 104 103 local final="$cfgdir/$INTERFACE" ··· 171 172 172 173 The following script is executed on the destination hypervisor once migration 173 174 completes, and it reattaches the VF to the VM and brings down the virtio-net 174 - interface. 175 + interface:: 175 176 176 - :: 177 177 # reattach-vf.sh 178 178 #!/bin/bash 179 179
+17 -2
MAINTAINERS
··· 1997 1997 1998 1998 ARM AND ARM64 SoC SUB-ARCHITECTURES (COMMON PARTS) 1999 1999 M: Arnd Bergmann <arnd@arndb.de> 2000 + M: Krzysztof Kozlowski <krzk@kernel.org> 2001 + M: Alexandre Belloni <alexandre.belloni@bootlin.com> 2002 + M: Linus Walleij <linus.walleij@linaro.org> 2003 + R: Drew Fustini <fustini@kernel.org> 2000 2004 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 2001 2005 L: soc@lists.linux.dev 2002 2006 S: Maintained ··· 3845 3841 ASUS NOTEBOOKS AND EEEPC ACPI/WMI EXTRAS DRIVERS 3846 3842 M: Corentin Chary <corentin.chary@gmail.com> 3847 3843 M: Luke D. Jones <luke@ljones.dev> 3844 + M: Denis Benato <benato.denis96@gmail.com> 3848 3845 L: platform-driver-x86@vger.kernel.org 3849 3846 S: Maintained 3850 3847 W: https://asus-linux.org/ ··· 13116 13111 F: include/uapi/linux/io_uring/ 13117 13112 F: io_uring/ 13118 13113 13114 + IO_URING ZCRX 13115 + M: Pavel Begunkov <asml.silence@gmail.com> 13116 + L: io-uring@vger.kernel.org 13117 + L: netdev@vger.kernel.org 13118 + T: git https://github.com/isilence/linux.git zcrx/for-next 13119 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux.git 13120 + S: Maintained 13121 + F: io_uring/zcrx.* 13122 + 13119 13123 IPMI SUBSYSTEM 13120 13124 M: Corey Minyard <corey@minyard.net> 13121 13125 L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers) ··· 14408 14394 14409 14395 LINUX-NEXT TREE 14410 14396 M: Stephen Rothwell <sfr@canb.auug.org.au> 14397 + M: Mark Brown <broonie@kernel.org> 14411 14398 L: linux-next@vger.kernel.org 14412 14399 S: Supported 14413 14400 B: mailto:linux-next@vger.kernel.org and the appropriate development tree ··· 26900 26885 F: drivers/vfio/cdx/* 26901 26886 26902 26887 VFIO DRIVER 26903 - M: Alex Williamson <alex.williamson@redhat.com> 26888 + M: Alex Williamson <alex@shazbot.org> 26904 26889 L: kvm@vger.kernel.org 26905 26890 S: Maintained 26906 26891 T: git https://github.com/awilliam/linux-vfio.git ··· 27063 27048 F: drivers/media/test-drivers/vimc/* 27064 27049 27065 27050 VIRT LIB 27066 - M: Alex Williamson <alex.williamson@redhat.com> 27051 + M: Alex Williamson <alex@shazbot.org> 27067 27052 M: Paolo Bonzini <pbonzini@redhat.com> 27068 27053 L: kvm@vger.kernel.org 27069 27054 S: Supported
+1 -1
Makefile
··· 2 2 VERSION = 6 3 3 PATCHLEVEL = 18 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc2 5 + EXTRAVERSION = -rc3 6 6 NAME = Baby Opossum Posse 7 7 8 8 # *DOCUMENTATION*
+8
arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi
··· 77 77 /delete-property/ pinctrl-0; 78 78 }; 79 79 80 + &pm { 81 + clocks = <&firmware_clocks 5>, 82 + <&clocks BCM2835_CLOCK_PERI_IMAGE>, 83 + <&clocks BCM2835_CLOCK_H264>, 84 + <&clocks BCM2835_CLOCK_ISP>; 85 + clock-names = "v3d", "peri_image", "h264", "isp"; 86 + }; 87 + 80 88 &rmem { 81 89 /* 82 90 * RPi4's co-processor will copy the board's bootloader configuration
+9
arch/arm/boot/dts/broadcom/bcm2835-rpi-common.dtsi
··· 13 13 clock-names = "pixel", "hdmi"; 14 14 }; 15 15 16 + &pm { 17 + clocks = <&firmware_clocks 5>, 18 + <&clocks BCM2835_CLOCK_PERI_IMAGE>, 19 + <&clocks BCM2835_CLOCK_H264>, 20 + <&clocks BCM2835_CLOCK_ISP>; 21 + clock-names = "v3d", "peri_image", "h264", "isp"; 22 + }; 23 + 16 24 &v3d { 25 + clocks = <&firmware_clocks 5>; 17 26 power-domains = <&power RPI_POWER_DOMAIN_V3D>; 18 27 }; 19 28
+2
arch/arm64/boot/dts/broadcom/bcm2712.dtsi
··· 326 326 <0x7fffe000 0x2000>; 327 327 interrupt-controller; 328 328 #address-cells = <0>; 329 + interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | 330 + IRQ_TYPE_LEVEL_HIGH)>; 329 331 #interrupt-cells = <3>; 330 332 }; 331 333
+2 -1
arch/arm64/include/asm/pgtable.h
··· 293 293 static inline pte_t pte_mkwrite_novma(pte_t pte) 294 294 { 295 295 pte = set_pte_bit(pte, __pgprot(PTE_WRITE)); 296 - pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); 296 + if (pte_sw_dirty(pte)) 297 + pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); 297 298 return pte; 298 299 } 299 300
+8 -3
arch/arm64/mm/copypage.c
··· 35 35 from != folio_page(src, 0)) 36 36 return; 37 37 38 - WARN_ON_ONCE(!folio_try_hugetlb_mte_tagging(dst)); 38 + folio_try_hugetlb_mte_tagging(dst); 39 39 40 40 /* 41 41 * Populate tags for all subpages. ··· 51 51 } 52 52 folio_set_hugetlb_mte_tagged(dst); 53 53 } else if (page_mte_tagged(from)) { 54 - /* It's a new page, shouldn't have been tagged yet */ 55 - WARN_ON_ONCE(!try_page_mte_tagging(to)); 54 + /* 55 + * Most of the time it's a new page that shouldn't have been 56 + * tagged yet. However, folio migration can end up reusing the 57 + * same page without untagging it. Ignore the warning if the 58 + * page is already tagged. 59 + */ 60 + try_page_mte_tagging(to); 56 61 57 62 mte_copy_page_tags(kto, kfrom); 58 63 set_page_mte_tagged(to);
+1 -1
arch/csky/abiv2/cacheflush.c
··· 21 21 22 22 folio = page_folio(pfn_to_page(pfn)); 23 23 24 - if (test_and_set_bit(PG_dcache_clean, &folio->flags)) 24 + if (test_and_set_bit(PG_dcache_clean, &folio->flags.f)) 25 25 return; 26 26 27 27 icache_inv_range(address, address + nr*PAGE_SIZE);
+2 -2
arch/csky/abiv2/inc/abi/cacheflush.h
··· 20 20 21 21 static inline void flush_dcache_folio(struct folio *folio) 22 22 { 23 - if (test_bit(PG_dcache_clean, &folio->flags)) 24 - clear_bit(PG_dcache_clean, &folio->flags); 23 + if (test_bit(PG_dcache_clean, &folio->flags.f)) 24 + clear_bit(PG_dcache_clean, &folio->flags.f); 25 25 } 26 26 #define flush_dcache_folio flush_dcache_folio 27 27
+2 -2
arch/mips/mti-malta/malta-setup.c
··· 47 47 .name = "keyboard", 48 48 .start = 0x60, 49 49 .end = 0x6f, 50 - .flags = IORESOURCE_IO | IORESOURCE_BUSY 50 + .flags = IORESOURCE_IO 51 51 }, 52 52 { 53 53 .name = "dma page reg", ··· 213 213 214 214 /* Request I/O space for devices used on the Malta board. */ 215 215 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++) 216 - request_resource(&ioport_resource, standard_io_resources+i); 216 + insert_resource(&ioport_resource, standard_io_resources + i); 217 217 218 218 /* 219 219 * Enable DMA channel 4 (cascade channel) in the PIIX4 south bridge.
+1 -2
arch/mips/pci/pci-malta.c
··· 230 230 } 231 231 232 232 /* PIIX4 ACPI starts at 0x1000 */ 233 - if (controller->io_resource->start < 0x00001000UL) 234 - controller->io_resource->start = 0x00001000UL; 233 + PCIBIOS_MIN_IO = 0x1000; 235 234 236 235 iomem_resource.end &= 0xfffffffffULL; /* 64 GB */ 237 236 ioport_resource.end = controller->io_resource->end;
+1 -7
arch/riscv/include/asm/asm.h
··· 84 84 .endm 85 85 86 86 #ifdef CONFIG_SMP 87 - #ifdef CONFIG_32BIT 88 - #define PER_CPU_OFFSET_SHIFT 2 89 - #else 90 - #define PER_CPU_OFFSET_SHIFT 3 91 - #endif 92 - 93 87 .macro asm_per_cpu dst sym tmp 94 88 lw \tmp, TASK_TI_CPU_NUM(tp) 95 - slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT 89 + slli \tmp, \tmp, RISCV_LGPTR 96 90 la \dst, __per_cpu_offset 97 91 add \dst, \dst, \tmp 98 92 REG_L \tmp, 0(\dst)
+2
arch/riscv/include/asm/cpufeature.h
··· 31 31 32 32 DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo); 33 33 34 + extern const struct seq_operations cpuinfo_op; 35 + 34 36 /* Per-cpu ISA extensions. */ 35 37 extern struct riscv_isainfo hart_isa[NR_CPUS]; 36 38
+7
arch/riscv/include/asm/hwprobe.h
··· 42 42 return pair->value == other_pair->value; 43 43 } 44 44 45 + #ifdef CONFIG_MMU 46 + void riscv_hwprobe_register_async_probe(void); 47 + void riscv_hwprobe_complete_async_probe(void); 48 + #else 49 + static inline void riscv_hwprobe_register_async_probe(void) {} 50 + static inline void riscv_hwprobe_complete_async_probe(void) {} 51 + #endif 45 52 #endif
+2
arch/riscv/include/asm/pgtable-64.h
··· 69 69 70 70 #define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t)) 71 71 72 + #define MAX_POSSIBLE_PHYSMEM_BITS 56 73 + 72 74 /* 73 75 * rv64 PTE format: 74 76 * | 63 | 62 61 | 60 54 | 53 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+2
arch/riscv/include/asm/pgtable.h
··· 654 654 return __pgprot(prot); 655 655 } 656 656 657 + #define pgprot_dmacoherent pgprot_writecombine 658 + 657 659 /* 658 660 * Both Svade and Svadu control the hardware behavior when the PTE A/D bits need to be set. By 659 661 * default the M-mode firmware enables the hardware updating scheme when only Svadu is present in
+6
arch/riscv/include/asm/vdso/arch_data.h
··· 12 12 13 13 /* Boolean indicating all CPUs have the same static hwprobe values. */ 14 14 __u8 homogeneous_cpus; 15 + 16 + /* 17 + * A gate to check and see if the hwprobe data is actually ready, as 18 + * probing is deferred to avoid boot slowdowns. 19 + */ 20 + __u8 ready; 15 21 }; 16 22 17 23 #endif /* __RISCV_ASM_VDSO_ARCH_DATA_H */
+1 -3
arch/riscv/kernel/cpu.c
··· 62 62 return -ENODEV; 63 63 } 64 64 65 - if (!of_device_is_available(node)) { 66 - pr_info("CPU with hartid=%lu is not available\n", *hart); 65 + if (!of_device_is_available(node)) 67 66 return -ENODEV; 68 - } 69 67 70 68 if (of_property_read_string(node, "riscv,isa-base", &isa)) 71 69 goto old_interface;
+2 -2
arch/riscv/kernel/cpufeature.c
··· 932 932 { 933 933 int cpu; 934 934 u32 prev_vlenb = 0; 935 - u32 vlenb; 935 + u32 vlenb = 0; 936 936 937 - /* Ignore thead,vlenb property if xtheavector is not enabled in the kernel */ 937 + /* Ignore thead,vlenb property if xtheadvector is not enabled in the kernel */ 938 938 if (!IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR)) 939 939 return 0; 940 940
+12 -12
arch/riscv/kernel/smp.c
··· 40 40 IPI_MAX 41 41 }; 42 42 43 + static const char * const ipi_names[] = { 44 + [IPI_RESCHEDULE] = "Rescheduling interrupts", 45 + [IPI_CALL_FUNC] = "Function call interrupts", 46 + [IPI_CPU_STOP] = "CPU stop interrupts", 47 + [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts", 48 + [IPI_IRQ_WORK] = "IRQ work interrupts", 49 + [IPI_TIMER] = "Timer broadcast interrupts", 50 + [IPI_CPU_BACKTRACE] = "CPU backtrace interrupts", 51 + [IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts", 52 + }; 53 + 43 54 unsigned long __cpuid_to_hartid_map[NR_CPUS] __ro_after_init = { 44 55 [0 ... NR_CPUS-1] = INVALID_HARTID 45 56 }; ··· 210 199 /* Request IPIs */ 211 200 for (i = 0; i < nr_ipi; i++) { 212 201 err = request_percpu_irq(ipi_virq_base + i, handle_IPI, 213 - "IPI", &ipi_dummy_dev); 202 + ipi_names[i], &ipi_dummy_dev); 214 203 WARN_ON(err); 215 204 216 205 ipi_desc[i] = irq_to_desc(ipi_virq_base + i); ··· 220 209 /* Enabled IPIs for boot CPU immediately */ 221 210 riscv_ipi_enable(); 222 211 } 223 - 224 - static const char * const ipi_names[] = { 225 - [IPI_RESCHEDULE] = "Rescheduling interrupts", 226 - [IPI_CALL_FUNC] = "Function call interrupts", 227 - [IPI_CPU_STOP] = "CPU stop interrupts", 228 - [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts", 229 - [IPI_IRQ_WORK] = "IRQ work interrupts", 230 - [IPI_TIMER] = "Timer broadcast interrupts", 231 - [IPI_CPU_BACKTRACE] = "CPU backtrace interrupts", 232 - [IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts", 233 - }; 234 212 235 213 void show_ipi_stats(struct seq_file *p, int prec) 236 214 {
+66 -14
arch/riscv/kernel/sys_hwprobe.c
··· 5 5 * more details. 6 6 */ 7 7 #include <linux/syscalls.h> 8 + #include <linux/completion.h> 9 + #include <linux/atomic.h> 10 + #include <linux/once.h> 8 11 #include <asm/cacheflush.h> 9 12 #include <asm/cpufeature.h> 10 13 #include <asm/hwprobe.h> ··· 30 27 u64 id = -1ULL; 31 28 bool first = true; 32 29 int cpu; 30 + 31 + if (pair->key != RISCV_HWPROBE_KEY_MVENDORID && 32 + pair->key != RISCV_HWPROBE_KEY_MIMPID && 33 + pair->key != RISCV_HWPROBE_KEY_MARCHID) 34 + goto out; 33 35 34 36 for_each_cpu(cpu, cpus) { 35 37 u64 cpu_id; ··· 66 58 } 67 59 } 68 60 61 + out: 69 62 pair->value = id; 70 63 } 71 64 ··· 463 454 return 0; 464 455 } 465 456 466 - static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, 467 - size_t pair_count, size_t cpusetsize, 468 - unsigned long __user *cpus_user, 469 - unsigned int flags) 470 - { 471 - if (flags & RISCV_HWPROBE_WHICH_CPUS) 472 - return hwprobe_get_cpus(pairs, pair_count, cpusetsize, 473 - cpus_user, flags); 474 - 475 - return hwprobe_get_values(pairs, pair_count, cpusetsize, 476 - cpus_user, flags); 477 - } 478 - 479 457 #ifdef CONFIG_MMU 480 458 481 - static int __init init_hwprobe_vdso_data(void) 459 + static DECLARE_COMPLETION(boot_probes_done); 460 + static atomic_t pending_boot_probes = ATOMIC_INIT(1); 461 + 462 + void riscv_hwprobe_register_async_probe(void) 463 + { 464 + atomic_inc(&pending_boot_probes); 465 + } 466 + 467 + void riscv_hwprobe_complete_async_probe(void) 468 + { 469 + if (atomic_dec_and_test(&pending_boot_probes)) 470 + complete(&boot_probes_done); 471 + } 472 + 473 + static int complete_hwprobe_vdso_data(void) 482 474 { 483 475 struct vdso_arch_data *avd = vdso_k_arch_data; 484 476 u64 id_bitsmash = 0; 485 477 struct riscv_hwprobe pair; 486 478 int key; 479 + 480 + if (unlikely(!atomic_dec_and_test(&pending_boot_probes))) 481 + wait_for_completion(&boot_probes_done); 487 482 488 483 /* 489 484 * Initialize vDSO data with the answers for the "all CPUs" case, to ··· 516 503 * vDSO should defer to the kernel for exotic cpu masks. 517 504 */ 518 505 avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1; 506 + 507 + /* 508 + * Make sure all the VDSO values are visible before we look at them. 509 + * This pairs with the implicit "no speculativly visible accesses" 510 + * barrier in the VDSO hwprobe code. 511 + */ 512 + smp_wmb(); 513 + avd->ready = true; 514 + return 0; 515 + } 516 + 517 + static int __init init_hwprobe_vdso_data(void) 518 + { 519 + struct vdso_arch_data *avd = vdso_k_arch_data; 520 + 521 + /* 522 + * Prevent the vDSO cached values from being used, as they're not ready 523 + * yet. 524 + */ 525 + avd->ready = false; 519 526 return 0; 520 527 } 521 528 522 529 arch_initcall_sync(init_hwprobe_vdso_data); 523 530 531 + #else 532 + 533 + static int complete_hwprobe_vdso_data(void) { return 0; } 534 + 524 535 #endif /* CONFIG_MMU */ 536 + 537 + static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, 538 + size_t pair_count, size_t cpusetsize, 539 + unsigned long __user *cpus_user, 540 + unsigned int flags) 541 + { 542 + DO_ONCE_SLEEPABLE(complete_hwprobe_vdso_data); 543 + 544 + if (flags & RISCV_HWPROBE_WHICH_CPUS) 545 + return hwprobe_get_cpus(pairs, pair_count, cpusetsize, 546 + cpus_user, flags); 547 + 548 + return hwprobe_get_values(pairs, pair_count, cpusetsize, 549 + cpus_user, flags); 550 + } 525 551 526 552 SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs, 527 553 size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
+7 -2
arch/riscv/kernel/unaligned_access_speed.c
··· 379 379 static int __init vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused) 380 380 { 381 381 schedule_on_each_cpu(check_vector_unaligned_access); 382 + riscv_hwprobe_complete_async_probe(); 382 383 383 384 return 0; 384 385 } ··· 474 473 per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param; 475 474 } else if (!check_vector_unaligned_access_emulated_all_cpus() && 476 475 IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) { 477 - kthread_run(vec_check_unaligned_access_speed_all_cpus, 478 - NULL, "vec_check_unaligned_access_speed_all_cpus"); 476 + riscv_hwprobe_register_async_probe(); 477 + if (IS_ERR(kthread_run(vec_check_unaligned_access_speed_all_cpus, 478 + NULL, "vec_check_unaligned_access_speed_all_cpus"))) { 479 + pr_warn("Failed to create vec_unalign_check kthread\n"); 480 + riscv_hwprobe_complete_async_probe(); 481 + } 479 482 } 480 483 481 484 /*
+1 -1
arch/riscv/kernel/vdso/hwprobe.c
··· 27 27 * homogeneous, then this function can handle requests for arbitrary 28 28 * masks. 29 29 */ 30 - if ((flags != 0) || (!all_cpus && !avd->homogeneous_cpus)) 30 + if (flags != 0 || (!all_cpus && !avd->homogeneous_cpus) || unlikely(!avd->ready)) 31 31 return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags); 32 32 33 33 /* This is something we can handle, fill out the pairs. */
+3 -8
arch/x86/kernel/cpu/bugs.c
··· 1463 1463 break; 1464 1464 default: 1465 1465 if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) { 1466 - pr_err(RETBLEED_INTEL_MSG); 1466 + if (retbleed_mitigation != RETBLEED_MITIGATION_NONE) 1467 + pr_err(RETBLEED_INTEL_MSG); 1468 + 1467 1469 retbleed_mitigation = RETBLEED_MITIGATION_NONE; 1468 1470 } 1469 1471 } ··· 1826 1824 } 1827 1825 } 1828 1826 #endif 1829 - 1830 - static inline bool match_option(const char *arg, int arglen, const char *opt) 1831 - { 1832 - int len = strlen(opt); 1833 - 1834 - return len == arglen && !strncmp(arg, opt, len); 1835 - } 1836 1827 1837 1828 /* The kernel command line selection for spectre v2 */ 1838 1829 enum spectre_v2_mitigation_cmd {
+1 -1
arch/x86/kernel/cpu/microcode/amd.c
··· 194 194 } 195 195 196 196 switch (cur_rev >> 8) { 197 - case 0x80012: return cur_rev <= 0x800126f; break; 197 + case 0x80012: return cur_rev <= 0x8001277; break; 198 198 case 0x80082: return cur_rev <= 0x800820f; break; 199 199 case 0x83010: return cur_rev <= 0x830107c; break; 200 200 case 0x86001: return cur_rev <= 0x860010e; break;
+10 -1
arch/x86/kernel/cpu/resctrl/monitor.c
··· 458 458 r->mon.mbm_cfg_mask = ecx & MAX_EVT_CONFIG_BITS; 459 459 } 460 460 461 - if (rdt_cpu_has(X86_FEATURE_ABMC)) { 461 + /* 462 + * resctrl assumes a system that supports assignable counters can 463 + * switch to "default" mode. Ensure that there is a "default" mode 464 + * to switch to. This enforces a dependency between the independent 465 + * X86_FEATURE_ABMC and X86_FEATURE_CQM_MBM_TOTAL/X86_FEATURE_CQM_MBM_LOCAL 466 + * hardware features. 467 + */ 468 + if (rdt_cpu_has(X86_FEATURE_ABMC) && 469 + (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL) || 470 + rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL))) { 462 471 r->mon.mbm_cntr_assignable = true; 463 472 cpuid_count(0x80000020, 5, &eax, &ebx, &ecx, &edx); 464 473 r->mon.num_mbm_cntrs = (ebx & GENMASK(15, 0)) + 1;
+10
block/blk-settings.c
··· 184 184 if (!bi->interval_exp) 185 185 bi->interval_exp = ilog2(lim->logical_block_size); 186 186 187 + /* 188 + * The PI generation / validation helpers do not expect intervals to 189 + * straddle multiple bio_vecs. Enforce alignment so that those are 190 + * never generated, and that each buffer is aligned as expected. 191 + */ 192 + if (bi->csum_type) { 193 + lim->dma_alignment = max(lim->dma_alignment, 194 + (1U << bi->interval_exp) - 1); 195 + } 196 + 187 197 return 0; 188 198 } 189 199
+6
drivers/acpi/acpica/tbprint.c
··· 95 95 { 96 96 struct acpi_table_header local_header; 97 97 98 + #pragma GCC diagnostic push 99 + #if defined(__GNUC__) && __GNUC__ >= 11 100 + #pragma GCC diagnostic ignored "-Wstringop-overread" 101 + #endif 102 + 98 103 if (ACPI_COMPARE_NAMESEG(header->signature, ACPI_SIG_FACS)) { 99 104 100 105 /* FACS only has signature and length fields */ ··· 148 143 local_header.asl_compiler_id, 149 144 local_header.asl_compiler_revision)); 150 145 } 146 + #pragma GCC diagnostic pop 151 147 }
+1 -1
drivers/acpi/property.c
··· 1107 1107 size_t num_args, 1108 1108 struct fwnode_reference_args *args) 1109 1109 { 1110 - return acpi_fwnode_get_reference_args(fwnode, propname, NULL, index, num_args, args); 1110 + return acpi_fwnode_get_reference_args(fwnode, propname, NULL, num_args, index, args); 1111 1111 } 1112 1112 EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference); 1113 1113
+61 -61
drivers/acpi/riscv/rimt.c
··· 61 61 return 0; 62 62 } 63 63 64 - /** 65 - * rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node 66 - * 67 - * @node: RIMT table node to be looked-up 68 - * 69 - * Returns: fwnode_handle pointer on success, NULL on failure 70 - */ 71 - static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node) 72 - { 73 - struct fwnode_handle *fwnode = NULL; 74 - struct rimt_fwnode *curr; 75 - 76 - spin_lock(&rimt_fwnode_lock); 77 - list_for_each_entry(curr, &rimt_fwnode_list, list) { 78 - if (curr->rimt_node == node) { 79 - fwnode = curr->fwnode; 80 - break; 81 - } 82 - } 83 - spin_unlock(&rimt_fwnode_lock); 84 - 85 - return fwnode; 86 - } 87 - 88 64 static acpi_status rimt_match_node_callback(struct acpi_rimt_node *node, 89 65 void *context) 90 66 { ··· 178 202 return NULL; 179 203 } 180 204 205 + /* 206 + * RISC-V supports IOMMU as a PCI device or a platform device. 207 + * When it is a platform device, there should be a namespace device as 208 + * well along with RIMT. To create the link between RIMT information and 209 + * the platform device, the IOMMU driver should register itself with the 210 + * RIMT module. This is true for PCI based IOMMU as well. 211 + */ 212 + int rimt_iommu_register(struct device *dev) 213 + { 214 + struct fwnode_handle *rimt_fwnode; 215 + struct acpi_rimt_node *node; 216 + 217 + node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev); 218 + if (!node) { 219 + pr_err("Could not find IOMMU node in RIMT\n"); 220 + return -ENODEV; 221 + } 222 + 223 + if (dev_is_pci(dev)) { 224 + rimt_fwnode = acpi_alloc_fwnode_static(); 225 + if (!rimt_fwnode) 226 + return -ENOMEM; 227 + 228 + rimt_fwnode->dev = dev; 229 + if (!dev->fwnode) 230 + dev->fwnode = rimt_fwnode; 231 + 232 + rimt_set_fwnode(node, rimt_fwnode); 233 + } else { 234 + rimt_set_fwnode(node, dev->fwnode); 235 + } 236 + 237 + return 0; 238 + } 239 + 240 + #ifdef CONFIG_IOMMU_API 241 + 242 + /** 243 + * rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node 244 + * 245 + * @node: RIMT table node to be looked-up 246 + * 247 + * Returns: fwnode_handle pointer on success, NULL on failure 248 + */ 249 + static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node) 250 + { 251 + struct fwnode_handle *fwnode = NULL; 252 + struct rimt_fwnode *curr; 253 + 254 + spin_lock(&rimt_fwnode_lock); 255 + list_for_each_entry(curr, &rimt_fwnode_list, list) { 256 + if (curr->rimt_node == node) { 257 + fwnode = curr->fwnode; 258 + break; 259 + } 260 + } 261 + spin_unlock(&rimt_fwnode_lock); 262 + 263 + return fwnode; 264 + } 265 + 181 266 static bool rimt_pcie_rc_supports_ats(struct acpi_rimt_node *node) 182 267 { 183 268 struct acpi_rimt_pcie_rc *pci_rc; ··· 326 289 327 290 return NULL; 328 291 } 329 - 330 - /* 331 - * RISC-V supports IOMMU as a PCI device or a platform device. 332 - * When it is a platform device, there should be a namespace device as 333 - * well along with RIMT. To create the link between RIMT information and 334 - * the platform device, the IOMMU driver should register itself with the 335 - * RIMT module. This is true for PCI based IOMMU as well. 336 - */ 337 - int rimt_iommu_register(struct device *dev) 338 - { 339 - struct fwnode_handle *rimt_fwnode; 340 - struct acpi_rimt_node *node; 341 - 342 - node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev); 343 - if (!node) { 344 - pr_err("Could not find IOMMU node in RIMT\n"); 345 - return -ENODEV; 346 - } 347 - 348 - if (dev_is_pci(dev)) { 349 - rimt_fwnode = acpi_alloc_fwnode_static(); 350 - if (!rimt_fwnode) 351 - return -ENOMEM; 352 - 353 - rimt_fwnode->dev = dev; 354 - if (!dev->fwnode) 355 - dev->fwnode = rimt_fwnode; 356 - 357 - rimt_set_fwnode(node, rimt_fwnode); 358 - } else { 359 - rimt_set_fwnode(node, dev->fwnode); 360 - } 361 - 362 - return 0; 363 - } 364 - 365 - #ifdef CONFIG_IOMMU_API 366 292 367 293 static struct acpi_rimt_node *rimt_node_map_id(struct acpi_rimt_node *node, 368 294 u32 id_in, u32 *id_out,
+15 -23
drivers/android/binder.c
··· 851 851 } else { 852 852 if (!internal) 853 853 node->local_weak_refs++; 854 - if (!node->has_weak_ref && list_empty(&node->work.entry)) { 855 - if (target_list == NULL) { 856 - pr_err("invalid inc weak node for %d\n", 857 - node->debug_id); 858 - return -EINVAL; 859 - } 860 - /* 861 - * See comment above 862 - */ 854 + if (!node->has_weak_ref && target_list && list_empty(&node->work.entry)) 863 855 binder_enqueue_work_ilocked(&node->work, target_list); 864 - } 865 856 } 866 857 return 0; 867 858 } ··· 2409 2418 2410 2419 /** 2411 2420 * struct binder_ptr_fixup - data to be fixed-up in target buffer 2412 - * @offset offset in target buffer to fixup 2413 - * @skip_size bytes to skip in copy (fixup will be written later) 2414 - * @fixup_data data to write at fixup offset 2415 - * @node list node 2421 + * @offset: offset in target buffer to fixup 2422 + * @skip_size: bytes to skip in copy (fixup will be written later) 2423 + * @fixup_data: data to write at fixup offset 2424 + * @node: list node 2416 2425 * 2417 2426 * This is used for the pointer fixup list (pf) which is created and consumed 2418 2427 * during binder_transaction() and is only accessed locally. No ··· 2429 2438 2430 2439 /** 2431 2440 * struct binder_sg_copy - scatter-gather data to be copied 2432 - * @offset offset in target buffer 2433 - * @sender_uaddr user address in source buffer 2434 - * @length bytes to copy 2435 - * @node list node 2441 + * @offset: offset in target buffer 2442 + * @sender_uaddr: user address in source buffer 2443 + * @length: bytes to copy 2444 + * @node: list node 2436 2445 * 2437 2446 * This is used for the sg copy list (sgc) which is created and consumed 2438 2447 * during binder_transaction() and is only accessed locally. No ··· 4054 4063 4055 4064 /** 4056 4065 * binder_free_buf() - free the specified buffer 4057 - * @proc: binder proc that owns buffer 4058 - * @buffer: buffer to be freed 4059 - * @is_failure: failed to send transaction 4066 + * @proc: binder proc that owns buffer 4067 + * @thread: binder thread performing the buffer release 4068 + * @buffer: buffer to be freed 4069 + * @is_failure: failed to send transaction 4060 4070 * 4061 - * If buffer for an async transaction, enqueue the next async 4071 + * If the buffer is for an async transaction, enqueue the next async 4062 4072 * transaction from the node. 4063 4073 * 4064 - * Cleanup buffer and free it. 4074 + * Cleanup the buffer and free it. 4065 4075 */ 4066 4076 static void 4067 4077 binder_free_buf(struct binder_proc *proc,
+14 -4
drivers/android/binder/freeze.rs
··· 106 106 return Ok(true); 107 107 } 108 108 if freeze.is_clearing { 109 - _removed_listener = freeze_entry.remove_node(); 109 + kernel::warn_on!(freeze.num_cleared_duplicates != 0); 110 + if freeze.num_pending_duplicates > 0 { 111 + // The primary freeze listener was deleted, so convert a pending duplicate back 112 + // into the primary one. 113 + freeze.num_pending_duplicates -= 1; 114 + freeze.is_pending = true; 115 + freeze.is_clearing = true; 116 + } else { 117 + _removed_listener = freeze_entry.remove_node(); 118 + } 110 119 drop(node_refs); 111 120 writer.write_code(BR_CLEAR_FREEZE_NOTIFICATION_DONE)?; 112 121 writer.write_payload(&self.cookie.0)?; 113 122 Ok(true) 114 123 } else { 115 - let is_frozen = freeze.node.owner.inner.lock().is_frozen; 124 + let is_frozen = freeze.node.owner.inner.lock().is_frozen.is_fully_frozen(); 116 125 if freeze.last_is_frozen == Some(is_frozen) { 117 126 return Ok(true); 118 127 } ··· 254 245 ); 255 246 return Err(EINVAL); 256 247 } 257 - if freeze.is_clearing { 258 - // Immediately send another FreezeMessage for BR_CLEAR_FREEZE_NOTIFICATION_DONE. 248 + let is_frozen = freeze.node.owner.inner.lock().is_frozen.is_fully_frozen(); 249 + if freeze.is_clearing || freeze.last_is_frozen != Some(is_frozen) { 250 + // Immediately send another FreezeMessage. 259 251 clear_msg = Some(FreezeMessage::init(alloc, cookie)); 260 252 } 261 253 freeze.is_pending = false;
+1 -1
drivers/android/binder/node.rs
··· 687 687 ); 688 688 } 689 689 if inner.freeze_list.is_empty() { 690 - _unused_capacity = mem::replace(&mut inner.freeze_list, KVVec::new()); 690 + _unused_capacity = mem::take(&mut inner.freeze_list); 691 691 } 692 692 } 693 693
+37 -13
drivers/android/binder/process.rs
··· 72 72 const PROC_DEFER_FLUSH: u8 = 1; 73 73 const PROC_DEFER_RELEASE: u8 = 2; 74 74 75 + #[derive(Copy, Clone)] 76 + pub(crate) enum IsFrozen { 77 + Yes, 78 + No, 79 + InProgress, 80 + } 81 + 82 + impl IsFrozen { 83 + /// Whether incoming transactions should be rejected due to freeze. 84 + pub(crate) fn is_frozen(self) -> bool { 85 + match self { 86 + IsFrozen::Yes => true, 87 + IsFrozen::No => false, 88 + IsFrozen::InProgress => true, 89 + } 90 + } 91 + 92 + /// Whether freeze notifications consider this process frozen. 93 + pub(crate) fn is_fully_frozen(self) -> bool { 94 + match self { 95 + IsFrozen::Yes => true, 96 + IsFrozen::No => false, 97 + IsFrozen::InProgress => false, 98 + } 99 + } 100 + } 101 + 75 102 /// The fields of `Process` protected by the spinlock. 76 103 pub(crate) struct ProcessInner { 77 104 is_manager: bool, ··· 125 98 /// are woken up. 126 99 outstanding_txns: u32, 127 100 /// Process is frozen and unable to service binder transactions. 128 - pub(crate) is_frozen: bool, 101 + pub(crate) is_frozen: IsFrozen, 129 102 /// Process received sync transactions since last frozen. 130 103 pub(crate) sync_recv: bool, 131 104 /// Process received async transactions since last frozen. ··· 151 124 started_thread_count: 0, 152 125 defer_work: 0, 153 126 outstanding_txns: 0, 154 - is_frozen: false, 127 + is_frozen: IsFrozen::No, 155 128 sync_recv: false, 156 129 async_recv: false, 157 130 binderfs_file: None, ··· 1287 1260 let is_manager = { 1288 1261 let mut inner = self.inner.lock(); 1289 1262 inner.is_dead = true; 1290 - inner.is_frozen = false; 1263 + inner.is_frozen = IsFrozen::No; 1291 1264 inner.sync_recv = false; 1292 1265 inner.async_recv = false; 1293 1266 inner.is_manager ··· 1373 1346 .alloc 1374 1347 .take_for_each(|offset, size, debug_id, odata| { 1375 1348 let ptr = offset + address; 1376 - pr_warn!( 1377 - "{}: removing orphan mapping {offset}:{size}\n", 1378 - self.pid_in_current_ns() 1379 - ); 1380 1349 let mut alloc = 1381 1350 Allocation::new(self.clone(), debug_id, offset, size, ptr, false); 1382 1351 if let Some(data) = odata { ··· 1394 1371 return; 1395 1372 } 1396 1373 inner.outstanding_txns -= 1; 1397 - inner.is_frozen && inner.outstanding_txns == 0 1374 + inner.is_frozen.is_frozen() && inner.outstanding_txns == 0 1398 1375 }; 1399 1376 1400 1377 if wake { ··· 1408 1385 let mut inner = self.inner.lock(); 1409 1386 inner.sync_recv = false; 1410 1387 inner.async_recv = false; 1411 - inner.is_frozen = false; 1388 + inner.is_frozen = IsFrozen::No; 1412 1389 drop(inner); 1413 1390 msgs.send_messages(); 1414 1391 return Ok(()); ··· 1417 1394 let mut inner = self.inner.lock(); 1418 1395 inner.sync_recv = false; 1419 1396 inner.async_recv = false; 1420 - inner.is_frozen = true; 1397 + inner.is_frozen = IsFrozen::InProgress; 1421 1398 1422 1399 if info.timeout_ms > 0 { 1423 1400 let mut jiffies = kernel::time::msecs_to_jiffies(info.timeout_ms); ··· 1431 1408 .wait_interruptible_timeout(&mut inner, jiffies) 1432 1409 { 1433 1410 CondVarTimeoutResult::Signal { .. } => { 1434 - inner.is_frozen = false; 1411 + inner.is_frozen = IsFrozen::No; 1435 1412 return Err(ERESTARTSYS); 1436 1413 } 1437 1414 CondVarTimeoutResult::Woken { jiffies: remaining } => { ··· 1445 1422 } 1446 1423 1447 1424 if inner.txns_pending_locked() { 1448 - inner.is_frozen = false; 1425 + inner.is_frozen = IsFrozen::No; 1449 1426 Err(EAGAIN) 1450 1427 } else { 1451 1428 drop(inner); 1452 1429 match self.prepare_freeze_messages() { 1453 1430 Ok(batch) => { 1431 + self.inner.lock().is_frozen = IsFrozen::Yes; 1454 1432 batch.send_messages(); 1455 1433 Ok(()) 1456 1434 } 1457 1435 Err(kernel::alloc::AllocError) => { 1458 - self.inner.lock().is_frozen = false; 1436 + self.inner.lock().is_frozen = IsFrozen::No; 1459 1437 Err(ENOMEM) 1460 1438 } 1461 1439 }
+3 -3
drivers/android/binder/transaction.rs
··· 249 249 250 250 if oneway { 251 251 if let Some(target_node) = self.target_node.clone() { 252 - if process_inner.is_frozen { 252 + if process_inner.is_frozen.is_frozen() { 253 253 process_inner.async_recv = true; 254 254 if self.flags & TF_UPDATE_TXN != 0 { 255 255 if let Some(t_outdated) = ··· 270 270 } 271 271 } 272 272 273 - if process_inner.is_frozen { 273 + if process_inner.is_frozen.is_frozen() { 274 274 return Err(BinderError::new_frozen_oneway()); 275 275 } else { 276 276 return Ok(()); ··· 280 280 } 281 281 } 282 282 283 - if process_inner.is_frozen { 283 + if process_inner.is_frozen.is_frozen() { 284 284 process_inner.sync_recv = true; 285 285 return Err(BinderError::new_frozen()); 286 286 }
+1 -1
drivers/base/arch_topology.c
··· 292 292 * frequency (by keeping the initial capacity_freq_ref value). 293 293 */ 294 294 cpu_clk = of_clk_get(cpu_node, 0); 295 - if (!PTR_ERR_OR_ZERO(cpu_clk)) { 295 + if (!IS_ERR_OR_NULL(cpu_clk)) { 296 296 per_cpu(capacity_freq_ref, cpu) = 297 297 clk_get_rate(cpu_clk) / HZ_PER_KHZ; 298 298 clk_put(cpu_clk);
+1 -1
drivers/base/core.c
··· 1784 1784 return 0; 1785 1785 1786 1786 if (fw_devlink_sync_state == FW_DEVLINK_SYNC_STATE_STRICT) { 1787 - dev_warn(sup, "sync_state() pending due to %s\n", 1787 + dev_info(sup, "sync_state() pending due to %s\n", 1788 1788 dev_name(link->consumer)); 1789 1789 return 0; 1790 1790 }
+83 -53
drivers/base/devcoredump.c
··· 23 23 void *data; 24 24 size_t datalen; 25 25 /* 26 - * Here, mutex is required to serialize the calls to del_wk work between 27 - * user/kernel space which happens when devcd is added with device_add() 28 - * and that sends uevent to user space. User space reads the uevents, 29 - * and calls to devcd_data_write() which try to modify the work which is 30 - * not even initialized/queued from devcoredump. 26 + * There are 2 races for which mutex is required. 31 27 * 28 + * The first race is between device creation and userspace writing to 29 + * schedule immediately destruction. 32 30 * 31 + * This race is handled by arming the timer before device creation, but 32 + * when device creation fails the timer still exists. 33 33 * 34 - * cpu0(X) cpu1(Y) 34 + * To solve this, hold the mutex during device_add(), and set 35 + * init_completed on success before releasing the mutex. 35 36 * 36 - * dev_coredump() uevent sent to user space 37 - * device_add() ======================> user space process Y reads the 38 - * uevents writes to devcd fd 39 - * which results into writes to 37 + * That way the timer will never fire until device_add() is called, 38 + * it will do nothing if init_completed is not set. The timer is also 39 + * cancelled in that case. 40 40 * 41 - * devcd_data_write() 42 - * mod_delayed_work() 43 - * try_to_grab_pending() 44 - * timer_delete() 45 - * debug_assert_init() 46 - * INIT_DELAYED_WORK() 47 - * schedule_delayed_work() 48 - * 49 - * 50 - * Also, mutex alone would not be enough to avoid scheduling of 51 - * del_wk work after it get flush from a call to devcd_free() 52 - * mentioned as below. 53 - * 54 - * disabled_store() 55 - * devcd_free() 56 - * mutex_lock() devcd_data_write() 57 - * flush_delayed_work() 58 - * mutex_unlock() 59 - * mutex_lock() 60 - * mod_delayed_work() 61 - * mutex_unlock() 62 - * So, delete_work flag is required. 41 + * The second race involves multiple parallel invocations of devcd_free(), 42 + * add a deleted flag so only 1 can call the destructor. 63 43 */ 64 44 struct mutex mutex; 65 - bool delete_work; 45 + bool init_completed, deleted; 66 46 struct module *owner; 67 47 ssize_t (*read)(char *buffer, loff_t offset, size_t count, 68 48 void *data, size_t datalen); 69 49 void (*free)(void *data); 50 + /* 51 + * If nothing interferes and device_add() was returns success, 52 + * del_wk will destroy the device after the timer fires. 53 + * 54 + * Multiple userspace processes can interfere in the working of the timer: 55 + * - Writing to the coredump will reschedule the timer to run immediately, 56 + * if still armed. 57 + * 58 + * This is handled by using "if (cancel_delayed_work()) { 59 + * schedule_delayed_work() }", to prevent re-arming after having 60 + * been previously fired. 61 + * - Writing to /sys/class/devcoredump/disabled will destroy the 62 + * coredump synchronously. 63 + * This is handled by using disable_delayed_work_sync(), and then 64 + * checking if deleted flag is set with &devcd->mutex held. 65 + */ 70 66 struct delayed_work del_wk; 71 67 struct device *failing_dev; 72 68 }; ··· 91 95 kfree(devcd); 92 96 } 93 97 98 + static void __devcd_del(struct devcd_entry *devcd) 99 + { 100 + devcd->deleted = true; 101 + device_del(&devcd->devcd_dev); 102 + put_device(&devcd->devcd_dev); 103 + } 104 + 94 105 static void devcd_del(struct work_struct *wk) 95 106 { 96 107 struct devcd_entry *devcd; 108 + bool init_completed; 97 109 98 110 devcd = container_of(wk, struct devcd_entry, del_wk.work); 99 111 100 - device_del(&devcd->devcd_dev); 101 - put_device(&devcd->devcd_dev); 112 + /* devcd->mutex serializes against dev_coredumpm_timeout */ 113 + mutex_lock(&devcd->mutex); 114 + init_completed = devcd->init_completed; 115 + mutex_unlock(&devcd->mutex); 116 + 117 + if (init_completed) 118 + __devcd_del(devcd); 102 119 } 103 120 104 121 static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj, ··· 131 122 struct device *dev = kobj_to_dev(kobj); 132 123 struct devcd_entry *devcd = dev_to_devcd(dev); 133 124 134 - mutex_lock(&devcd->mutex); 135 - if (!devcd->delete_work) { 136 - devcd->delete_work = true; 137 - mod_delayed_work(system_wq, &devcd->del_wk, 0); 138 - } 139 - mutex_unlock(&devcd->mutex); 125 + /* 126 + * Although it's tempting to use mod_delayed work here, 127 + * that will cause a reschedule if the timer already fired. 128 + */ 129 + if (cancel_delayed_work(&devcd->del_wk)) 130 + schedule_delayed_work(&devcd->del_wk, 0); 140 131 141 132 return count; 142 133 } ··· 160 151 { 161 152 struct devcd_entry *devcd = dev_to_devcd(dev); 162 153 154 + /* 155 + * To prevent a race with devcd_data_write(), disable work and 156 + * complete manually instead. 157 + * 158 + * We cannot rely on the return value of 159 + * disable_delayed_work_sync() here, because it might be in the 160 + * middle of a cancel_delayed_work + schedule_delayed_work pair. 161 + * 162 + * devcd->mutex here guards against multiple parallel invocations 163 + * of devcd_free(). 164 + */ 165 + disable_delayed_work_sync(&devcd->del_wk); 163 166 mutex_lock(&devcd->mutex); 164 - if (!devcd->delete_work) 165 - devcd->delete_work = true; 166 - 167 - flush_delayed_work(&devcd->del_wk); 167 + if (!devcd->deleted) 168 + __devcd_del(devcd); 168 169 mutex_unlock(&devcd->mutex); 169 170 return 0; 170 171 } ··· 198 179 * put_device() <- last reference 199 180 * error = fn(dev, data) devcd_dev_release() 200 181 * devcd_free(dev, data) kfree(devcd) 201 - * mutex_lock(&devcd->mutex); 202 182 * 203 183 * 204 184 * In the above diagram, it looks like disabled_store() would be racing with parallelly 205 - * running devcd_del() and result in memory abort while acquiring devcd->mutex which 206 - * is called after kfree of devcd memory after dropping its last reference with 185 + * running devcd_del() and result in memory abort after dropping its last reference with 207 186 * put_device(). However, this will not happens as fn(dev, data) runs 208 187 * with its own reference to device via klist_node so it is not its last reference. 209 188 * so, above situation would not occur. ··· 391 374 devcd->read = read; 392 375 devcd->free = free; 393 376 devcd->failing_dev = get_device(dev); 394 - devcd->delete_work = false; 377 + devcd->deleted = false; 395 378 396 379 mutex_init(&devcd->mutex); 397 380 device_initialize(&devcd->devcd_dev); ··· 400 383 atomic_inc_return(&devcd_count)); 401 384 devcd->devcd_dev.class = &devcd_class; 402 385 403 - mutex_lock(&devcd->mutex); 404 386 dev_set_uevent_suppress(&devcd->devcd_dev, true); 387 + 388 + /* devcd->mutex prevents devcd_del() completing until init finishes */ 389 + mutex_lock(&devcd->mutex); 390 + devcd->init_completed = false; 391 + INIT_DELAYED_WORK(&devcd->del_wk, devcd_del); 392 + schedule_delayed_work(&devcd->del_wk, timeout); 393 + 405 394 if (device_add(&devcd->devcd_dev)) 406 395 goto put_device; 407 396 ··· 424 401 425 402 dev_set_uevent_suppress(&devcd->devcd_dev, false); 426 403 kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD); 427 - INIT_DELAYED_WORK(&devcd->del_wk, devcd_del); 428 - schedule_delayed_work(&devcd->del_wk, timeout); 404 + 405 + /* 406 + * Safe to run devcd_del() now that we are done with devcd_dev. 407 + * Alternatively we could have taken a ref on devcd_dev before 408 + * dropping the lock. 409 + */ 410 + devcd->init_completed = true; 429 411 mutex_unlock(&devcd->mutex); 430 412 return; 431 413 put_device: 432 - put_device(&devcd->devcd_dev); 433 414 mutex_unlock(&devcd->mutex); 415 + cancel_delayed_work_sync(&devcd->del_wk); 416 + put_device(&devcd->devcd_dev); 417 + 434 418 put_module: 435 419 module_put(owner); 436 420 free:
+15
drivers/block/nbd.c
··· 52 52 static DEFINE_IDR(nbd_index_idr); 53 53 static DEFINE_MUTEX(nbd_index_mutex); 54 54 static struct workqueue_struct *nbd_del_wq; 55 + static struct cred *nbd_cred; 55 56 static int nbd_total_devices = 0; 56 57 57 58 struct nbd_sock { ··· 555 554 int result; 556 555 struct msghdr msg = {} ; 557 556 unsigned int noreclaim_flag; 557 + const struct cred *old_cred; 558 558 559 559 if (unlikely(!sock)) { 560 560 dev_err_ratelimited(disk_to_dev(nbd->disk), ··· 563 561 (send ? "send" : "recv")); 564 562 return -EINVAL; 565 563 } 564 + 565 + old_cred = override_creds(nbd_cred); 566 566 567 567 msg.msg_iter = *iter; 568 568 ··· 589 585 } while (msg_data_left(&msg)); 590 586 591 587 memalloc_noreclaim_restore(noreclaim_flag); 588 + 589 + revert_creds(old_cred); 592 590 593 591 return result; 594 592 } ··· 2683 2677 return -ENOMEM; 2684 2678 } 2685 2679 2680 + nbd_cred = prepare_kernel_cred(&init_task); 2681 + if (!nbd_cred) { 2682 + destroy_workqueue(nbd_del_wq); 2683 + unregister_blkdev(NBD_MAJOR, "nbd"); 2684 + return -ENOMEM; 2685 + } 2686 + 2686 2687 if (genl_register_family(&nbd_genl_family)) { 2688 + put_cred(nbd_cred); 2687 2689 destroy_workqueue(nbd_del_wq); 2688 2690 unregister_blkdev(NBD_MAJOR, "nbd"); 2689 2691 return -EINVAL; ··· 2746 2732 /* Also wait for nbd_dev_remove_work() completes */ 2747 2733 destroy_workqueue(nbd_del_wq); 2748 2734 2735 + put_cred(nbd_cred); 2749 2736 idr_destroy(&nbd_index_idr); 2750 2737 unregister_blkdev(NBD_MAJOR, "nbd"); 2751 2738 }
+1 -1
drivers/comedi/comedi_buf.c
··· 317 317 unsigned int count = 0; 318 318 const unsigned int num_sample_bytes = comedi_bytes_per_sample(s); 319 319 320 - if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) { 320 + if (!s->munge || (async->cmd.flags & CMDF_RAWDATA) || async->cmd.chanlist_len == 0) { 321 321 async->munge_count += num_bytes; 322 322 return num_bytes; 323 323 }
+5 -1
drivers/cpufreq/amd-pstate.c
··· 1614 1614 * min_perf value across kexec reboots. If this CPU is just onlined normally after this, the 1615 1615 * limits, epp and desired perf will get reset to the cached values in cpudata struct 1616 1616 */ 1617 - return amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false); 1617 + return amd_pstate_update_perf(policy, perf.bios_min_perf, 1618 + FIELD_GET(AMD_CPPC_DES_PERF_MASK, cpudata->cppc_req_cached), 1619 + FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached), 1620 + FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached), 1621 + false); 1618 1622 } 1619 1623 1620 1624 static int amd_pstate_suspend(struct cpufreq_policy *policy)
+9 -12
drivers/cpuidle/governors/menu.c
··· 188 188 * 189 189 * This can deal with workloads that have long pauses interspersed 190 190 * with sporadic activity with a bunch of short pauses. 191 + * 192 + * However, if the number of remaining samples is too small to exclude 193 + * any more outliers, allow the deepest available idle state to be 194 + * selected because there are systems where the time spent by CPUs in 195 + * deep idle states is correlated to the maximum frequency the CPUs 196 + * can get to. On those systems, shallow idle states should be avoided 197 + * unless there is a clear indication that the given CPU is most likley 198 + * going to be woken up shortly. 191 199 */ 192 - if (divisor * 4 <= INTERVALS * 3) { 193 - /* 194 - * If there are sufficiently many data points still under 195 - * consideration after the outliers have been eliminated, 196 - * returning without a prediction would be a mistake because it 197 - * is likely that the next interval will not exceed the current 198 - * maximum, so return the latter in that case. 199 - */ 200 - if (divisor >= INTERVALS / 2) 201 - return max; 202 - 200 + if (divisor * 4 <= INTERVALS * 3) 203 201 return UINT_MAX; 204 - } 205 202 206 203 /* Update the thresholds for the next round. */ 207 204 if (avg - min > max - avg)
+1 -1
drivers/firewire/core-transaction.c
··· 269 269 } 270 270 271 271 static int allocate_tlabel(struct fw_card *card) 272 - __must_hold(&card->transactions_lock) 272 + __must_hold(&card->transactions.lock) 273 273 { 274 274 int tlabel; 275 275
+10
drivers/firewire/init_ohci1394_dma.c
··· 167 167 168 168 /** 169 169 * init_ohci1394_wait_for_busresets - wait until bus resets are completed 170 + * @ohci: Pointer to the OHCI-1394 controller structure 170 171 * 171 172 * OHCI1394 initialization itself and any device going on- or offline 172 173 * and any cable issue cause a IEEE1394 bus reset. The OHCI1394 spec ··· 190 189 191 190 /** 192 191 * init_ohci1394_enable_physical_dma - Enable physical DMA for remote debugging 192 + * @ohci: Pointer to the OHCI-1394 controller structure 193 + * 193 194 * This enables remote DMA access over IEEE1394 from every host for the low 194 195 * 4GB of address space. DMA accesses above 4GB are not available currently. 195 196 */ ··· 204 201 205 202 /** 206 203 * init_ohci1394_reset_and_init_dma - init controller and enable DMA 204 + * @ohci: Pointer to the OHCI-1394 controller structure 205 + * 207 206 * This initializes the given controller and enables physical DMA engine in it. 208 207 */ 209 208 static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci) ··· 235 230 236 231 /** 237 232 * init_ohci1394_controller - Map the registers of the controller and init DMA 233 + * @num: PCI bus number 234 + * @slot: PCI device number 235 + * @func: PCI function number 236 + * 238 237 * This maps the registers of the specified controller and initializes it 239 238 */ 240 239 static inline void __init init_ohci1394_controller(int num, int slot, int func) ··· 293 284 294 285 /** 295 286 * setup_ohci1394_dma - enables early OHCI1394 DMA initialization 287 + * @opt: Kernel command line parameter string 296 288 */ 297 289 static int __init setup_ohci1394_dma(char *opt) 298 290 {
+27 -10
drivers/firmware/arm_ffa/driver.c
··· 649 649 return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE; 650 650 } 651 651 652 + static void ffa_emad_impdef_value_init(u32 version, void *dst, void *src) 653 + { 654 + struct ffa_mem_region_attributes *ep_mem_access; 655 + 656 + if (FFA_EMAD_HAS_IMPDEF_FIELD(version)) 657 + memcpy(dst, src, sizeof(ep_mem_access->impdef_val)); 658 + } 659 + 660 + static void 661 + ffa_mem_region_additional_setup(u32 version, struct ffa_mem_region *mem_region) 662 + { 663 + if (!FFA_MEM_REGION_HAS_EP_MEM_OFFSET(version)) { 664 + mem_region->ep_mem_size = 0; 665 + } else { 666 + mem_region->ep_mem_size = ffa_emad_size_get(version); 667 + mem_region->ep_mem_offset = sizeof(*mem_region); 668 + memset(mem_region->reserved, 0, 12); 669 + } 670 + } 671 + 652 672 static int 653 673 ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize, 654 674 struct ffa_mem_ops_args *args) ··· 687 667 mem_region->flags = args->flags; 688 668 mem_region->sender_id = drv_info->vm_id; 689 669 mem_region->attributes = ffa_memory_attributes_get(func_id); 690 - ep_mem_access = buffer + 691 - ffa_mem_desc_offset(buffer, 0, drv_info->version); 692 670 composite_offset = ffa_mem_desc_offset(buffer, args->nattrs, 693 671 drv_info->version); 694 672 695 - for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) { 673 + for (idx = 0; idx < args->nattrs; idx++) { 674 + ep_mem_access = buffer + 675 + ffa_mem_desc_offset(buffer, idx, drv_info->version); 696 676 ep_mem_access->receiver = args->attrs[idx].receiver; 697 677 ep_mem_access->attrs = args->attrs[idx].attrs; 698 678 ep_mem_access->composite_off = composite_offset; 699 679 ep_mem_access->flag = 0; 700 680 ep_mem_access->reserved = 0; 681 + ffa_emad_impdef_value_init(drv_info->version, 682 + ep_mem_access->impdef_val, 683 + args->attrs[idx].impdef_val); 701 684 } 702 685 mem_region->handle = 0; 703 686 mem_region->ep_count = args->nattrs; 704 - if (drv_info->version <= FFA_VERSION_1_0) { 705 - mem_region->ep_mem_size = 0; 706 - } else { 707 - mem_region->ep_mem_size = sizeof(*ep_mem_access); 708 - mem_region->ep_mem_offset = sizeof(*mem_region); 709 - memset(mem_region->reserved, 0, 12); 710 - } 687 + ffa_mem_region_additional_setup(drv_info->version, mem_region); 711 688 712 689 composite = buffer + composite_offset; 713 690 composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
+26 -6
drivers/firmware/arm_scmi/common.h
··· 309 309 SCMI_DEBUG_COUNTERS_LAST 310 310 }; 311 311 312 - static inline void scmi_inc_count(atomic_t *arr, int stat) 312 + /** 313 + * struct scmi_debug_info - Debug common info 314 + * @top_dentry: A reference to the top debugfs dentry 315 + * @name: Name of this SCMI instance 316 + * @type: Type of this SCMI instance 317 + * @is_atomic: Flag to state if the transport of this instance is atomic 318 + * @counters: An array of atomic_c's used for tracking statistics (if enabled) 319 + */ 320 + struct scmi_debug_info { 321 + struct dentry *top_dentry; 322 + const char *name; 323 + const char *type; 324 + bool is_atomic; 325 + atomic_t counters[SCMI_DEBUG_COUNTERS_LAST]; 326 + }; 327 + 328 + static inline void scmi_inc_count(struct scmi_debug_info *dbg, int stat) 313 329 { 314 - if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) 315 - atomic_inc(&arr[stat]); 330 + if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) { 331 + if (dbg) 332 + atomic_inc(&dbg->counters[stat]); 333 + } 316 334 } 317 335 318 - static inline void scmi_dec_count(atomic_t *arr, int stat) 336 + static inline void scmi_dec_count(struct scmi_debug_info *dbg, int stat) 319 337 { 320 - if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) 321 - atomic_dec(&arr[stat]); 338 + if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) { 339 + if (dbg) 340 + atomic_dec(&dbg->counters[stat]); 341 + } 322 342 } 323 343 324 344 enum scmi_bad_msg {
+21 -38
drivers/firmware/arm_scmi/driver.c
··· 116 116 #define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph) 117 117 118 118 /** 119 - * struct scmi_debug_info - Debug common info 120 - * @top_dentry: A reference to the top debugfs dentry 121 - * @name: Name of this SCMI instance 122 - * @type: Type of this SCMI instance 123 - * @is_atomic: Flag to state if the transport of this instance is atomic 124 - * @counters: An array of atomic_c's used for tracking statistics (if enabled) 125 - */ 126 - struct scmi_debug_info { 127 - struct dentry *top_dentry; 128 - const char *name; 129 - const char *type; 130 - bool is_atomic; 131 - atomic_t counters[SCMI_DEBUG_COUNTERS_LAST]; 132 - }; 133 - 134 - /** 135 119 * struct scmi_info - Structure representing a SCMI instance 136 120 * 137 121 * @id: A sequence number starting from zero identifying this instance ··· 594 610 /* Set in-flight */ 595 611 set_bit(xfer->hdr.seq, minfo->xfer_alloc_table); 596 612 hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq); 597 - scmi_inc_count(info->dbg->counters, XFERS_INFLIGHT); 613 + scmi_inc_count(info->dbg, XFERS_INFLIGHT); 598 614 599 615 xfer->pending = true; 600 616 } ··· 803 819 hash_del(&xfer->node); 804 820 xfer->pending = false; 805 821 806 - scmi_dec_count(info->dbg->counters, XFERS_INFLIGHT); 822 + scmi_dec_count(info->dbg, XFERS_INFLIGHT); 807 823 } 824 + xfer->flags = 0; 808 825 hlist_add_head(&xfer->node, &minfo->free_xfers); 809 826 } 810 827 spin_unlock_irqrestore(&minfo->xfer_lock, flags); ··· 824 839 { 825 840 struct scmi_info *info = handle_to_scmi_info(handle); 826 841 827 - xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW; 828 - xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET; 829 842 return __scmi_xfer_put(&info->tx_minfo, xfer); 830 843 } 831 844 ··· 1017 1034 spin_unlock_irqrestore(&minfo->xfer_lock, flags); 1018 1035 1019 1036 scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED); 1020 - scmi_inc_count(info->dbg->counters, ERR_MSG_UNEXPECTED); 1037 + scmi_inc_count(info->dbg, ERR_MSG_UNEXPECTED); 1021 1038 1022 1039 return xfer; 1023 1040 } ··· 1045 1062 msg_type, xfer_id, msg_hdr, xfer->state); 1046 1063 1047 1064 scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID); 1048 - scmi_inc_count(info->dbg->counters, ERR_MSG_INVALID); 1065 + scmi_inc_count(info->dbg, ERR_MSG_INVALID); 1049 1066 1050 1067 /* On error the refcount incremented above has to be dropped */ 1051 1068 __scmi_xfer_put(minfo, xfer); ··· 1090 1107 PTR_ERR(xfer)); 1091 1108 1092 1109 scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM); 1093 - scmi_inc_count(info->dbg->counters, ERR_MSG_NOMEM); 1110 + scmi_inc_count(info->dbg, ERR_MSG_NOMEM); 1094 1111 1095 1112 scmi_clear_channel(info, cinfo); 1096 1113 return; ··· 1106 1123 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, 1107 1124 xfer->hdr.id, "NOTI", xfer->hdr.seq, 1108 1125 xfer->hdr.status, xfer->rx.buf, xfer->rx.len); 1109 - scmi_inc_count(info->dbg->counters, NOTIFICATION_OK); 1126 + scmi_inc_count(info->dbg, NOTIFICATION_OK); 1110 1127 1111 1128 scmi_notify(cinfo->handle, xfer->hdr.protocol_id, 1112 1129 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts); ··· 1166 1183 if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) { 1167 1184 scmi_clear_channel(info, cinfo); 1168 1185 complete(xfer->async_done); 1169 - scmi_inc_count(info->dbg->counters, DELAYED_RESPONSE_OK); 1186 + scmi_inc_count(info->dbg, DELAYED_RESPONSE_OK); 1170 1187 } else { 1171 1188 complete(&xfer->done); 1172 - scmi_inc_count(info->dbg->counters, RESPONSE_OK); 1189 + scmi_inc_count(info->dbg, RESPONSE_OK); 1173 1190 } 1174 1191 1175 1192 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { ··· 1279 1296 "timed out in resp(caller: %pS) - polling\n", 1280 1297 (void *)_RET_IP_); 1281 1298 ret = -ETIMEDOUT; 1282 - scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_POLLED_TIMEOUT); 1299 + scmi_inc_count(info->dbg, XFERS_RESPONSE_POLLED_TIMEOUT); 1283 1300 } 1284 1301 } 1285 1302 ··· 1304 1321 "RESP" : "resp", 1305 1322 xfer->hdr.seq, xfer->hdr.status, 1306 1323 xfer->rx.buf, xfer->rx.len); 1307 - scmi_inc_count(info->dbg->counters, RESPONSE_POLLED_OK); 1324 + scmi_inc_count(info->dbg, RESPONSE_POLLED_OK); 1308 1325 1309 1326 if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { 1310 1327 scmi_raw_message_report(info->raw, xfer, ··· 1319 1336 dev_err(dev, "timed out in resp(caller: %pS)\n", 1320 1337 (void *)_RET_IP_); 1321 1338 ret = -ETIMEDOUT; 1322 - scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_TIMEOUT); 1339 + scmi_inc_count(info->dbg, XFERS_RESPONSE_TIMEOUT); 1323 1340 } 1324 1341 } 1325 1342 ··· 1403 1420 !is_transport_polling_capable(info->desc)) { 1404 1421 dev_warn_once(dev, 1405 1422 "Polling mode is not supported by transport.\n"); 1406 - scmi_inc_count(info->dbg->counters, SENT_FAIL_POLLING_UNSUPPORTED); 1423 + scmi_inc_count(info->dbg, SENT_FAIL_POLLING_UNSUPPORTED); 1407 1424 return -EINVAL; 1408 1425 } 1409 1426 1410 1427 cinfo = idr_find(&info->tx_idr, pi->proto->id); 1411 1428 if (unlikely(!cinfo)) { 1412 - scmi_inc_count(info->dbg->counters, SENT_FAIL_CHANNEL_NOT_FOUND); 1429 + scmi_inc_count(info->dbg, SENT_FAIL_CHANNEL_NOT_FOUND); 1413 1430 return -EINVAL; 1414 1431 } 1415 1432 /* True ONLY if also supported by transport. */ ··· 1444 1461 ret = info->desc->ops->send_message(cinfo, xfer); 1445 1462 if (ret < 0) { 1446 1463 dev_dbg(dev, "Failed to send message %d\n", ret); 1447 - scmi_inc_count(info->dbg->counters, SENT_FAIL); 1464 + scmi_inc_count(info->dbg, SENT_FAIL); 1448 1465 return ret; 1449 1466 } 1450 1467 1451 1468 trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, 1452 1469 xfer->hdr.id, "CMND", xfer->hdr.seq, 1453 1470 xfer->hdr.status, xfer->tx.buf, xfer->tx.len); 1454 - scmi_inc_count(info->dbg->counters, SENT_OK); 1471 + scmi_inc_count(info->dbg, SENT_OK); 1455 1472 1456 1473 ret = scmi_wait_for_message_response(cinfo, xfer); 1457 1474 if (!ret && xfer->hdr.status) { 1458 1475 ret = scmi_to_linux_errno(xfer->hdr.status); 1459 - scmi_inc_count(info->dbg->counters, ERR_PROTOCOL); 1476 + scmi_inc_count(info->dbg, ERR_PROTOCOL); 1460 1477 } 1461 1478 1462 1479 if (info->desc->ops->mark_txdone) ··· 3027 3044 u8 channels[SCMI_MAX_CHANNELS] = {}; 3028 3045 DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {}; 3029 3046 3030 - if (!info->dbg) 3031 - return -EINVAL; 3032 - 3033 3047 /* Enumerate all channels to collect their ids */ 3034 3048 idr_for_each_entry(&info->tx_idr, cinfo, id) { 3035 3049 /* ··· 3198 3218 if (!info->dbg) 3199 3219 dev_warn(dev, "Failed to setup SCMI debugfs.\n"); 3200 3220 3201 - if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { 3221 + if (info->dbg && IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { 3202 3222 ret = scmi_debugfs_raw_mode_setup(info); 3203 3223 if (!coex) { 3204 3224 if (ret) ··· 3402 3422 { 3403 3423 if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) { 3404 3424 struct scmi_info *info = handle_to_scmi_info(handle); 3425 + 3426 + if (!info->dbg) 3427 + return 0; 3405 3428 3406 3429 return atomic_read(&info->dbg->counters[XFERS_INFLIGHT]); 3407 3430 } else {
+1
drivers/gpio/gpio-104-idio-16.c
··· 59 59 .reg_stride = 1, 60 60 .val_bits = 8, 61 61 .io_port = true, 62 + .max_register = 0x5, 62 63 .wr_table = &idio_16_wr_table, 63 64 .rd_table = &idio_16_rd_table, 64 65 .volatile_table = &idio_16_rd_table,
+5
drivers/gpio/gpio-idio-16.c
··· 6 6 7 7 #define DEFAULT_SYMBOL_NAMESPACE "GPIO_IDIO_16" 8 8 9 + #include <linux/bitmap.h> 9 10 #include <linux/bits.h> 10 11 #include <linux/device.h> 11 12 #include <linux/err.h> ··· 108 107 struct idio_16_data *data; 109 108 struct regmap_irq_chip *chip; 110 109 struct regmap_irq_chip_data *chip_data; 110 + DECLARE_BITMAP(fixed_direction_output, IDIO_16_NGPIO); 111 111 112 112 if (!config->parent) 113 113 return -EINVAL; ··· 165 163 gpio_config.reg_stride = IDIO_16_REG_STRIDE; 166 164 gpio_config.irq_domain = regmap_irq_get_domain(chip_data); 167 165 gpio_config.reg_mask_xlate = idio_16_reg_mask_xlate; 166 + 167 + bitmap_from_u64(fixed_direction_output, GENMASK_U64(15, 0)); 168 + gpio_config.fixed_direction_output = fixed_direction_output; 168 169 169 170 return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config)); 170 171 }
+3 -11
drivers/gpio/gpio-ljca.c
··· 286 286 { 287 287 const struct ljca_gpio_packet *packet = evt_data; 288 288 struct ljca_gpio_dev *ljca_gpio = context; 289 - int i, irq; 289 + int i; 290 290 291 291 if (cmd != LJCA_GPIO_INT_EVENT) 292 292 return; 293 293 294 294 for (i = 0; i < packet->num; i++) { 295 - irq = irq_find_mapping(ljca_gpio->gc.irq.domain, 296 - packet->item[i].index); 297 - if (!irq) { 298 - dev_err(ljca_gpio->gc.parent, 299 - "gpio_id %u does not mapped to IRQ yet\n", 300 - packet->item[i].index); 301 - return; 302 - } 303 - 304 - generic_handle_domain_irq(ljca_gpio->gc.irq.domain, irq); 295 + generic_handle_domain_irq(ljca_gpio->gc.irq.domain, 296 + packet->item[i].index); 305 297 set_bit(packet->item[i].index, ljca_gpio->reenable_irqs); 306 298 } 307 299
+1
drivers/gpio/gpio-pci-idio-16.c
··· 41 41 .reg_stride = 1, 42 42 .val_bits = 8, 43 43 .io_port = true, 44 + .max_register = 0x7, 44 45 .wr_table = &idio_16_wr_table, 45 46 .rd_table = &idio_16_rd_table, 46 47 .volatile_table = &idio_16_rd_table,
+24 -2
drivers/gpio/gpio-regmap.c
··· 31 31 unsigned int reg_clr_base; 32 32 unsigned int reg_dir_in_base; 33 33 unsigned int reg_dir_out_base; 34 + unsigned long *fixed_direction_output; 34 35 35 36 #ifdef CONFIG_REGMAP_IRQ 36 37 int regmap_irq_line; ··· 134 133 struct gpio_regmap *gpio = gpiochip_get_data(chip); 135 134 unsigned int base, val, reg, mask; 136 135 int invert, ret; 136 + 137 + if (gpio->fixed_direction_output) { 138 + if (test_bit(offset, gpio->fixed_direction_output)) 139 + return GPIO_LINE_DIRECTION_OUT; 140 + else 141 + return GPIO_LINE_DIRECTION_IN; 142 + } 137 143 138 144 if (gpio->reg_dat_base && !gpio->reg_set_base) 139 145 return GPIO_LINE_DIRECTION_IN; ··· 292 284 goto err_free_gpio; 293 285 } 294 286 287 + if (config->fixed_direction_output) { 288 + gpio->fixed_direction_output = bitmap_alloc(chip->ngpio, 289 + GFP_KERNEL); 290 + if (!gpio->fixed_direction_output) { 291 + ret = -ENOMEM; 292 + goto err_free_gpio; 293 + } 294 + bitmap_copy(gpio->fixed_direction_output, 295 + config->fixed_direction_output, chip->ngpio); 296 + } 297 + 295 298 /* if not set, assume there is only one register */ 296 299 gpio->ngpio_per_reg = config->ngpio_per_reg; 297 300 if (!gpio->ngpio_per_reg) ··· 319 300 320 301 ret = gpiochip_add_data(chip, gpio); 321 302 if (ret < 0) 322 - goto err_free_gpio; 303 + goto err_free_bitmap; 323 304 324 305 #ifdef CONFIG_REGMAP_IRQ 325 306 if (config->regmap_irq_chip) { ··· 328 309 config->regmap_irq_line, config->regmap_irq_flags, 329 310 0, config->regmap_irq_chip, &gpio->irq_chip_data); 330 311 if (ret) 331 - goto err_free_gpio; 312 + goto err_free_bitmap; 332 313 333 314 irq_domain = regmap_irq_get_domain(gpio->irq_chip_data); 334 315 } else ··· 345 326 346 327 err_remove_gpiochip: 347 328 gpiochip_remove(chip); 329 + err_free_bitmap: 330 + bitmap_free(gpio->fixed_direction_output); 348 331 err_free_gpio: 349 332 kfree(gpio); 350 333 return ERR_PTR(ret); ··· 365 344 #endif 366 345 367 346 gpiochip_remove(&gpio->gpio_chip); 347 + bitmap_free(gpio->fixed_direction_output); 368 348 kfree(gpio); 369 349 } 370 350 EXPORT_SYMBOL_GPL(gpio_regmap_unregister);
+17 -14
drivers/gpio/gpiolib-acpi-core.c
··· 291 291 return GPIOD_ASIS; 292 292 } 293 293 294 + static void acpi_gpio_set_debounce_timeout(struct gpio_desc *desc, 295 + unsigned int acpi_debounce) 296 + { 297 + int ret; 298 + 299 + /* ACPI uses hundredths of milliseconds units */ 300 + acpi_debounce *= 10; 301 + ret = gpio_set_debounce_timeout(desc, acpi_debounce); 302 + if (ret) 303 + gpiod_warn(desc, "Failed to set debounce-timeout %u: %d\n", 304 + acpi_debounce, ret); 305 + } 306 + 294 307 static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip, 295 308 struct acpi_resource_gpio *agpio, 296 309 unsigned int index, ··· 313 300 enum gpiod_flags flags = acpi_gpio_to_gpiod_flags(agpio, polarity); 314 301 unsigned int pin = agpio->pin_table[index]; 315 302 struct gpio_desc *desc; 316 - int ret; 317 303 318 304 desc = gpiochip_request_own_desc(chip, pin, label, polarity, flags); 319 305 if (IS_ERR(desc)) 320 306 return desc; 321 307 322 - /* ACPI uses hundredths of milliseconds units */ 323 - ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout * 10); 324 - if (ret) 325 - dev_warn(chip->parent, 326 - "Failed to set debounce-timeout for pin 0x%04X, err %d\n", 327 - pin, ret); 308 + acpi_gpio_set_debounce_timeout(desc, agpio->debounce_timeout); 328 309 329 310 return desc; 330 311 } ··· 382 375 desc = acpi_request_own_gpiod(chip, agpio, 0, "ACPI:Event"); 383 376 if (IS_ERR(desc)) { 384 377 dev_err(chip->parent, 385 - "Failed to request GPIO for pin 0x%04X, err %ld\n", 386 - pin, PTR_ERR(desc)); 378 + "Failed to request GPIO for pin 0x%04X, err %pe\n", 379 + pin, desc); 387 380 return AE_OK; 388 381 } 389 382 ··· 951 944 bool can_fallback = acpi_can_fallback_to_crs(adev, con_id); 952 945 struct acpi_gpio_info info = {}; 953 946 struct gpio_desc *desc; 954 - int ret; 955 947 956 948 desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info); 957 949 if (IS_ERR(desc)) ··· 965 959 acpi_gpio_update_gpiod_flags(dflags, &info); 966 960 acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info); 967 961 968 - /* ACPI uses hundredths of milliseconds units */ 969 - ret = gpio_set_debounce_timeout(desc, info.debounce * 10); 970 - if (ret) 971 - return ERR_PTR(ret); 962 + acpi_gpio_set_debounce_timeout(desc, info.debounce); 972 963 973 964 return desc; 974 965 }
+2 -2
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 551 551 struct dc_stream_state *stream, 552 552 struct dc_crtc_timing_adjust *adjust) 553 553 { 554 - struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_KERNEL); 554 + struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_NOWAIT); 555 555 if (!offload_work) { 556 556 drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n"); 557 557 return; 558 558 } 559 559 560 - struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_KERNEL); 560 + struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_NOWAIT); 561 561 if (!adjust_copy) { 562 562 drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n"); 563 563 kfree(offload_work);
+3
drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
··· 200 200 */ 201 201 struct dc_link *link = dc->links[i]; 202 202 203 + if (link->ep_type != DISPLAY_ENDPOINT_PHY) 204 + continue; 205 + 203 206 link->link_enc->funcs->hw_init(link->link_enc); 204 207 205 208 /* Check for enabled DIG to identify enabled display */
+7 -1
drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
··· 44 44 */ 45 45 #define MAX_PIPES 6 46 46 #define MAX_PHANTOM_PIPES (MAX_PIPES / 2) 47 - #define MAX_LINKS (MAX_PIPES * 2 +2) 47 + 48 + #define MAX_DPIA 6 49 + #define MAX_CONNECTOR 6 50 + #define MAX_VIRTUAL_LINKS 4 51 + 52 + #define MAX_LINKS (MAX_DPIA + MAX_CONNECTOR + MAX_VIRTUAL_LINKS) 53 + 48 54 #define MAX_DIG_LINK_ENCODERS 7 49 55 #define MAX_DWB_PIPES 1 50 56 #define MAX_HPO_DP2_ENCODERS 4
+54 -6
drivers/gpu/drm/drm_panic.c
··· 174 174 *p = color & 0xff; 175 175 } 176 176 177 + /* 178 + * Special case if the pixel crosses page boundaries 179 + */ 180 + static void drm_panic_write_pixel24_xpage(void *vaddr, struct page *next_page, 181 + unsigned int offset, u32 color) 182 + { 183 + u8 *vaddr2; 184 + u8 *p = vaddr + offset; 185 + 186 + vaddr2 = kmap_local_page_try_from_panic(next_page); 187 + 188 + *p++ = color & 0xff; 189 + color >>= 8; 190 + 191 + if (offset == PAGE_SIZE - 1) 192 + p = vaddr2; 193 + 194 + *p++ = color & 0xff; 195 + color >>= 8; 196 + 197 + if (offset == PAGE_SIZE - 2) 198 + p = vaddr2; 199 + 200 + *p = color & 0xff; 201 + kunmap_local(vaddr2); 202 + } 203 + 177 204 static void drm_panic_write_pixel32(void *vaddr, unsigned int offset, u32 color) 178 205 { 179 206 u32 *p = vaddr + offset; ··· 258 231 page = new_page; 259 232 vaddr = kmap_local_page_try_from_panic(pages[page]); 260 233 } 261 - if (vaddr) 234 + if (!vaddr) 235 + continue; 236 + 237 + // Special case for 24bit, as a pixel might cross page boundaries 238 + if (cpp == 3 && offset + 3 > PAGE_SIZE) 239 + drm_panic_write_pixel24_xpage(vaddr, pages[page + 1], 240 + offset, fg32); 241 + else 262 242 drm_panic_write_pixel(vaddr, offset, fg32, cpp); 263 243 } 264 244 } ··· 355 321 page = new_page; 356 322 vaddr = kmap_local_page_try_from_panic(pages[page]); 357 323 } 358 - drm_panic_write_pixel(vaddr, offset, color, cpp); 324 + if (!vaddr) 325 + continue; 326 + 327 + // Special case for 24bit, as a pixel might cross page boundaries 328 + if (cpp == 3 && offset + 3 > PAGE_SIZE) 329 + drm_panic_write_pixel24_xpage(vaddr, pages[page + 1], 330 + offset, color); 331 + else 332 + drm_panic_write_pixel(vaddr, offset, color, cpp); 359 333 } 360 334 } 361 335 if (vaddr) ··· 471 429 static void drm_panic_logo_draw(struct drm_scanout_buffer *sb, struct drm_rect *rect, 472 430 const struct font_desc *font, u32 fg_color) 473 431 { 432 + if (rect->x2 > sb->width || rect->y2 > sb->height) 433 + return; 434 + 474 435 if (logo_mono) 475 436 drm_panic_blit(sb, rect, logo_mono->data, 476 437 DIV_ROUND_UP(drm_rect_width(rect), 8), 1, fg_color); ··· 522 477 struct drm_panic_line *line, int yoffset, u32 fg_color) 523 478 { 524 479 int chars_per_row = sb->width / font->width; 525 - struct drm_rect r_txt = DRM_RECT_INIT(0, yoffset, sb->width, sb->height); 480 + struct drm_rect r_txt = DRM_RECT_INIT(0, yoffset, sb->width, font->height); 526 481 struct drm_panic_line line_wrap; 527 482 528 483 if (line->len > chars_per_row) { ··· 565 520 struct drm_panic_line line; 566 521 int yoffset; 567 522 568 - if (!font) 523 + if (!font || font->width > sb->width) 569 524 return; 570 525 571 526 yoffset = sb->height - font->height - (sb->height % font->height) / 2; ··· 778 733 pr_debug("QR width %d and scale %d\n", qr_width, scale); 779 734 r_qr_canvas = DRM_RECT_INIT(0, 0, qr_canvas_width * scale, qr_canvas_width * scale); 780 735 781 - v_margin = (sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg)) / 5; 736 + v_margin = sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg); 737 + if (v_margin < 0) 738 + return -ENOSPC; 739 + v_margin /= 5; 782 740 783 741 drm_rect_translate(&r_qr_canvas, (sb->width - r_qr_canvas.x2) / 2, 2 * v_margin); 784 742 r_qr = DRM_RECT_INIT(r_qr_canvas.x1 + QR_MARGIN * scale, r_qr_canvas.y1 + QR_MARGIN * scale, ··· 794 746 /* Fill with the background color, and draw text on top */ 795 747 drm_panic_fill(sb, &r_screen, bg_color); 796 748 797 - if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr)) 749 + if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr_canvas)) 798 750 drm_panic_logo_draw(sb, &r_logo, font, fg_color); 799 751 800 752 draw_txt_rectangle(sb, font, panic_msg, panic_msg_lines, true, &r_msg, fg_color);
+13 -12
drivers/gpu/drm/i915/display/intel_fb.c
··· 2117 2117 2118 2118 intel_frontbuffer_put(intel_fb->frontbuffer); 2119 2119 2120 + kfree(intel_fb->panic); 2120 2121 kfree(intel_fb); 2121 2122 } 2122 2123 ··· 2216 2215 struct intel_display *display = to_intel_display(obj->dev); 2217 2216 struct drm_framebuffer *fb = &intel_fb->base; 2218 2217 u32 max_stride; 2219 - int ret = -EINVAL; 2218 + int ret; 2220 2219 int i; 2220 + 2221 + intel_fb->panic = intel_panic_alloc(); 2222 + if (!intel_fb->panic) 2223 + return -ENOMEM; 2221 2224 2222 2225 /* 2223 2226 * intel_frontbuffer_get() must be done before 2224 2227 * intel_fb_bo_framebuffer_init() to avoid set_tiling vs. addfb race. 2225 2228 */ 2226 2229 intel_fb->frontbuffer = intel_frontbuffer_get(obj); 2227 - if (!intel_fb->frontbuffer) 2228 - return -ENOMEM; 2230 + if (!intel_fb->frontbuffer) { 2231 + ret = -ENOMEM; 2232 + goto err_free_panic; 2233 + } 2229 2234 2230 2235 ret = intel_fb_bo_framebuffer_init(fb, obj, mode_cmd); 2231 2236 if (ret) ··· 2330 2323 intel_fb_bo_framebuffer_fini(obj); 2331 2324 err_frontbuffer_put: 2332 2325 intel_frontbuffer_put(intel_fb->frontbuffer); 2326 + err_free_panic: 2327 + kfree(intel_fb->panic); 2328 + 2333 2329 return ret; 2334 2330 } 2335 2331 ··· 2359 2349 struct intel_framebuffer *intel_framebuffer_alloc(void) 2360 2350 { 2361 2351 struct intel_framebuffer *intel_fb; 2362 - struct intel_panic *panic; 2363 2352 2364 2353 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 2365 2354 if (!intel_fb) 2366 2355 return NULL; 2367 - 2368 - panic = intel_panic_alloc(); 2369 - if (!panic) { 2370 - kfree(intel_fb); 2371 - return NULL; 2372 - } 2373 - 2374 - intel_fb->panic = panic; 2375 2356 2376 2357 return intel_fb; 2377 2358 }
+7 -3
drivers/gpu/drm/panthor/panthor_mmu.c
··· 1175 1175 break; 1176 1176 1177 1177 case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: 1178 - /* Partial unmaps might trigger a remap with either a prev or a next VA, 1179 - * but not both. 1178 + /* Two VMAs can be needed for an unmap, as an unmap can happen 1179 + * in the middle of a drm_gpuva, requiring a remap with both 1180 + * prev & next VA. Or an unmap can span more than one drm_gpuva 1181 + * where the first and last ones are covered partially, requring 1182 + * a remap for the first with a prev VA and remap for the last 1183 + * with a next VA. 1180 1184 */ 1181 - vma_count = 1; 1185 + vma_count = 2; 1182 1186 break; 1183 1187 1184 1188 default:
+1 -1
drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
··· 361 361 362 362 regmap_write(hdmi->regmap, RK3228_GRF_SOC_CON2, 363 363 FIELD_PREP_WM16(RK3228_HDMI_SDAIN_MSK, 1) | 364 - FIELD_PREP_WM16(RK3328_HDMI_SCLIN_MSK, 1)); 364 + FIELD_PREP_WM16(RK3228_HDMI_SCLIN_MSK, 1)); 365 365 } 366 366 367 367 static enum drm_connector_status
+3
drivers/gpu/drm/xe/xe_ggtt.c
··· 292 292 ggtt->pt_ops = &xelp_pt_ops; 293 293 294 294 ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM); 295 + if (!ggtt->wq) 296 + return -ENOMEM; 297 + 295 298 __xe_ggtt_init_early(ggtt, xe_wopcm_size(xe)); 296 299 297 300 err = drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);
+2 -2
drivers/gpu/drm/xe/xe_pt.c
··· 2022 2022 case DRM_GPUVA_OP_MAP: 2023 2023 if ((!op->map.immediate && xe_vm_in_fault_mode(vm) && 2024 2024 !op->map.invalidate_on_bind) || 2025 - op->map.is_cpu_addr_mirror) 2025 + (op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR)) 2026 2026 break; 2027 2027 2028 2028 err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma, ··· 2252 2252 switch (op->base.op) { 2253 2253 case DRM_GPUVA_OP_MAP: 2254 2254 if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) || 2255 - op->map.is_cpu_addr_mirror) 2255 + (op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR)) 2256 2256 break; 2257 2257 2258 2258 bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
+5
drivers/gpu/drm/xe/xe_svm.c
··· 302 302 if (!vma) 303 303 return -EINVAL; 304 304 305 + if (!(vma->gpuva.flags & XE_VMA_MADV_AUTORESET)) { 306 + drm_dbg(&vm->xe->drm, "Skipping madvise reset for vma.\n"); 307 + return 0; 308 + } 309 + 305 310 if (xe_vma_has_default_mem_attrs(vma)) 306 311 return 0; 307 312
+37 -59
drivers/gpu/drm/xe/xe_vm.c
··· 616 616 vops->pt_update_ops[i].num_ops += inc_val; 617 617 } 618 618 619 + #define XE_VMA_CREATE_MASK ( \ 620 + XE_VMA_READ_ONLY | \ 621 + XE_VMA_DUMPABLE | \ 622 + XE_VMA_SYSTEM_ALLOCATOR | \ 623 + DRM_GPUVA_SPARSE | \ 624 + XE_VMA_MADV_AUTORESET) 625 + 619 626 static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma, 620 627 u8 tile_mask) 621 628 { ··· 635 628 op->base.map.gem.offset = vma->gpuva.gem.offset; 636 629 op->map.vma = vma; 637 630 op->map.immediate = true; 638 - op->map.dumpable = vma->gpuva.flags & XE_VMA_DUMPABLE; 639 - op->map.is_null = xe_vma_is_null(vma); 631 + op->map.vma_flags = vma->gpuva.flags & XE_VMA_CREATE_MASK; 640 632 } 641 633 642 634 static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma, ··· 938 932 kfree(vma); 939 933 } 940 934 941 - #define VMA_CREATE_FLAG_READ_ONLY BIT(0) 942 - #define VMA_CREATE_FLAG_IS_NULL BIT(1) 943 - #define VMA_CREATE_FLAG_DUMPABLE BIT(2) 944 - #define VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR BIT(3) 945 - 946 935 static struct xe_vma *xe_vma_create(struct xe_vm *vm, 947 936 struct xe_bo *bo, 948 937 u64 bo_offset_or_userptr, ··· 948 947 struct xe_vma *vma; 949 948 struct xe_tile *tile; 950 949 u8 id; 951 - bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY); 952 - bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL); 953 - bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE); 954 - bool is_cpu_addr_mirror = 955 - (flags & VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR); 950 + bool is_null = (flags & DRM_GPUVA_SPARSE); 951 + bool is_cpu_addr_mirror = (flags & XE_VMA_SYSTEM_ALLOCATOR); 956 952 957 953 xe_assert(vm->xe, start < end); 958 954 xe_assert(vm->xe, end < vm->size); ··· 970 972 if (!vma) 971 973 return ERR_PTR(-ENOMEM); 972 974 973 - if (is_cpu_addr_mirror) 974 - vma->gpuva.flags |= XE_VMA_SYSTEM_ALLOCATOR; 975 - if (is_null) 976 - vma->gpuva.flags |= DRM_GPUVA_SPARSE; 977 975 if (bo) 978 976 vma->gpuva.gem.obj = &bo->ttm.base; 979 977 } ··· 980 986 vma->gpuva.vm = &vm->gpuvm; 981 987 vma->gpuva.va.addr = start; 982 988 vma->gpuva.va.range = end - start + 1; 983 - if (read_only) 984 - vma->gpuva.flags |= XE_VMA_READ_ONLY; 985 - if (dumpable) 986 - vma->gpuva.flags |= XE_VMA_DUMPABLE; 989 + vma->gpuva.flags = flags; 987 990 988 991 for_each_tile(tile, vm->xe, id) 989 992 vma->tile_mask |= 0x1 << id; ··· 2263 2272 if (__op->op == DRM_GPUVA_OP_MAP) { 2264 2273 op->map.immediate = 2265 2274 flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE; 2266 - op->map.read_only = 2267 - flags & DRM_XE_VM_BIND_FLAG_READONLY; 2268 - op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL; 2269 - op->map.is_cpu_addr_mirror = flags & 2270 - DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR; 2271 - op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE; 2275 + if (flags & DRM_XE_VM_BIND_FLAG_READONLY) 2276 + op->map.vma_flags |= XE_VMA_READ_ONLY; 2277 + if (flags & DRM_XE_VM_BIND_FLAG_NULL) 2278 + op->map.vma_flags |= DRM_GPUVA_SPARSE; 2279 + if (flags & DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR) 2280 + op->map.vma_flags |= XE_VMA_SYSTEM_ALLOCATOR; 2281 + if (flags & DRM_XE_VM_BIND_FLAG_DUMPABLE) 2282 + op->map.vma_flags |= XE_VMA_DUMPABLE; 2283 + if (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET) 2284 + op->map.vma_flags |= XE_VMA_MADV_AUTORESET; 2272 2285 op->map.pat_index = pat_index; 2273 2286 op->map.invalidate_on_bind = 2274 2287 __xe_vm_needs_clear_scratch_pages(vm, flags); ··· 2585 2590 .pat_index = op->map.pat_index, 2586 2591 }; 2587 2592 2588 - flags |= op->map.read_only ? 2589 - VMA_CREATE_FLAG_READ_ONLY : 0; 2590 - flags |= op->map.is_null ? 2591 - VMA_CREATE_FLAG_IS_NULL : 0; 2592 - flags |= op->map.dumpable ? 2593 - VMA_CREATE_FLAG_DUMPABLE : 0; 2594 - flags |= op->map.is_cpu_addr_mirror ? 2595 - VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0; 2593 + flags |= op->map.vma_flags & XE_VMA_CREATE_MASK; 2596 2594 2597 2595 vma = new_vma(vm, &op->base.map, &default_attr, 2598 2596 flags); ··· 2594 2606 2595 2607 op->map.vma = vma; 2596 2608 if (((op->map.immediate || !xe_vm_in_fault_mode(vm)) && 2597 - !op->map.is_cpu_addr_mirror) || 2609 + !(op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR)) || 2598 2610 op->map.invalidate_on_bind) 2599 2611 xe_vma_ops_incr_pt_update_ops(vops, 2600 2612 op->tile_mask, 1); ··· 2625 2637 op->remap.start = xe_vma_start(old); 2626 2638 op->remap.range = xe_vma_size(old); 2627 2639 2628 - flags |= op->base.remap.unmap->va->flags & 2629 - XE_VMA_READ_ONLY ? 2630 - VMA_CREATE_FLAG_READ_ONLY : 0; 2631 - flags |= op->base.remap.unmap->va->flags & 2632 - DRM_GPUVA_SPARSE ? 2633 - VMA_CREATE_FLAG_IS_NULL : 0; 2634 - flags |= op->base.remap.unmap->va->flags & 2635 - XE_VMA_DUMPABLE ? 2636 - VMA_CREATE_FLAG_DUMPABLE : 0; 2637 - flags |= xe_vma_is_cpu_addr_mirror(old) ? 2638 - VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0; 2639 - 2640 + flags |= op->base.remap.unmap->va->flags & XE_VMA_CREATE_MASK; 2640 2641 if (op->base.remap.prev) { 2641 2642 vma = new_vma(vm, op->base.remap.prev, 2642 2643 &old->attr, flags); ··· 3256 3279 DRM_XE_VM_BIND_FLAG_NULL | \ 3257 3280 DRM_XE_VM_BIND_FLAG_DUMPABLE | \ 3258 3281 DRM_XE_VM_BIND_FLAG_CHECK_PXP | \ 3259 - DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR) 3282 + DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR | \ 3283 + DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET) 3260 3284 3261 3285 #ifdef TEST_VM_OPS_ERROR 3262 3286 #define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR) ··· 3372 3394 XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC && 3373 3395 !(BIT(prefetch_region) & xe->info.mem_region_mask))) || 3374 3396 XE_IOCTL_DBG(xe, obj && 3375 - op == DRM_XE_VM_BIND_OP_UNMAP)) { 3397 + op == DRM_XE_VM_BIND_OP_UNMAP) || 3398 + XE_IOCTL_DBG(xe, (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET) && 3399 + (!is_cpu_addr_mirror || op != DRM_XE_VM_BIND_OP_MAP))) { 3376 3400 err = -EINVAL; 3377 3401 goto free_bind_ops; 3378 3402 } ··· 4192 4212 struct xe_vma_ops vops; 4193 4213 struct drm_gpuva_ops *ops = NULL; 4194 4214 struct drm_gpuva_op *__op; 4195 - bool is_cpu_addr_mirror = false; 4215 + unsigned int vma_flags = 0; 4196 4216 bool remap_op = false; 4197 4217 struct xe_vma_mem_attr tmp_attr; 4198 4218 u16 default_pat; ··· 4222 4242 vma = gpuva_to_vma(op->base.unmap.va); 4223 4243 XE_WARN_ON(!xe_vma_has_default_mem_attrs(vma)); 4224 4244 default_pat = vma->attr.default_pat_index; 4245 + vma_flags = vma->gpuva.flags; 4225 4246 } 4226 4247 4227 4248 if (__op->op == DRM_GPUVA_OP_REMAP) { 4228 4249 vma = gpuva_to_vma(op->base.remap.unmap->va); 4229 4250 default_pat = vma->attr.default_pat_index; 4251 + vma_flags = vma->gpuva.flags; 4230 4252 } 4231 4253 4232 4254 if (__op->op == DRM_GPUVA_OP_MAP) { 4233 - op->map.is_cpu_addr_mirror = true; 4255 + op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK; 4234 4256 op->map.pat_index = default_pat; 4235 4257 } 4236 4258 } else { ··· 4241 4259 xe_assert(vm->xe, !remap_op); 4242 4260 xe_assert(vm->xe, xe_vma_has_no_bo(vma)); 4243 4261 remap_op = true; 4244 - 4245 - if (xe_vma_is_cpu_addr_mirror(vma)) 4246 - is_cpu_addr_mirror = true; 4247 - else 4248 - is_cpu_addr_mirror = false; 4262 + vma_flags = vma->gpuva.flags; 4249 4263 } 4250 4264 4251 4265 if (__op->op == DRM_GPUVA_OP_MAP) { ··· 4250 4272 /* 4251 4273 * In case of madvise ops DRM_GPUVA_OP_MAP is 4252 4274 * always after DRM_GPUVA_OP_REMAP, so ensure 4253 - * we assign op->map.is_cpu_addr_mirror true 4254 - * if REMAP is for xe_vma_is_cpu_addr_mirror vma 4275 + * to propagate the flags from the vma we're 4276 + * unmapping. 4255 4277 */ 4256 - op->map.is_cpu_addr_mirror = is_cpu_addr_mirror; 4278 + op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK; 4257 4279 } 4258 4280 } 4259 4281 print_op(vm->xe, __op);
+2 -8
drivers/gpu/drm/xe/xe_vm_types.h
··· 46 46 #define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 7) 47 47 #define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 8) 48 48 #define XE_VMA_SYSTEM_ALLOCATOR (DRM_GPUVA_USERBITS << 9) 49 + #define XE_VMA_MADV_AUTORESET (DRM_GPUVA_USERBITS << 10) 49 50 50 51 /** 51 52 * struct xe_vma_mem_attr - memory attributes associated with vma ··· 346 345 struct xe_vma_op_map { 347 346 /** @vma: VMA to map */ 348 347 struct xe_vma *vma; 348 + unsigned int vma_flags; 349 349 /** @immediate: Immediate bind */ 350 350 bool immediate; 351 351 /** @read_only: Read only */ 352 - bool read_only; 353 - /** @is_null: is NULL binding */ 354 - bool is_null; 355 - /** @is_cpu_addr_mirror: is CPU address mirror binding */ 356 - bool is_cpu_addr_mirror; 357 - /** @dumpable: whether BO is dumped on GPU hang */ 358 - bool dumpable; 359 - /** @invalidate: invalidate the VMA before bind */ 360 352 bool invalidate_on_bind; 361 353 /** @pat_index: The pat index to use for this operation. */ 362 354 u16 pat_index;
+3
drivers/hwmon/cgbc-hwmon.c
··· 107 107 nb_sensors = data[0]; 108 108 109 109 hwmon->sensors = devm_kzalloc(dev, sizeof(*hwmon->sensors) * nb_sensors, GFP_KERNEL); 110 + if (!hwmon->sensors) 111 + return -ENOMEM; 112 + 110 113 sensor = hwmon->sensors; 111 114 112 115 for (i = 0; i < nb_sensors; i++) {
+5 -5
drivers/hwmon/gpd-fan.c
··· 615 615 const struct device *hwdev; 616 616 617 617 res = platform_get_resource(pdev, IORESOURCE_IO, 0); 618 - if (IS_ERR(res)) 619 - return dev_err_probe(dev, PTR_ERR(res), 618 + if (!res) 619 + return dev_err_probe(dev, -EINVAL, 620 620 "Failed to get platform resource\n"); 621 621 622 622 region = devm_request_region(dev, res->start, 623 623 resource_size(res), DRIVER_NAME); 624 - if (IS_ERR(region)) 625 - return dev_err_probe(dev, PTR_ERR(region), 624 + if (!region) 625 + return dev_err_probe(dev, -EBUSY, 626 626 "Failed to request region\n"); 627 627 628 628 hwdev = devm_hwmon_device_register_with_info(dev, ··· 631 631 &gpd_fan_chip_info, 632 632 NULL); 633 633 if (IS_ERR(hwdev)) 634 - return dev_err_probe(dev, PTR_ERR(region), 634 + return dev_err_probe(dev, PTR_ERR(hwdev), 635 635 "Failed to register hwmon device\n"); 636 636 637 637 return 0;
+1 -2
drivers/hwmon/pmbus/isl68137.c
··· 336 336 struct isl68137_data *data) 337 337 { 338 338 const struct device_node *np = dev->of_node; 339 - struct device_node *child; 340 339 int err; 341 340 342 - for_each_child_of_node(np, child) { 341 + for_each_child_of_node_scoped(np, child) { 343 342 if (strcmp(child->name, "channel")) 344 343 continue; 345 344
+6 -6
drivers/hwmon/pmbus/max34440.c
··· 336 336 .format[PSC_CURRENT_IN] = direct, 337 337 .format[PSC_CURRENT_OUT] = direct, 338 338 .format[PSC_TEMPERATURE] = direct, 339 - .m[PSC_VOLTAGE_IN] = 1, 339 + .m[PSC_VOLTAGE_IN] = 125, 340 340 .b[PSC_VOLTAGE_IN] = 0, 341 341 .R[PSC_VOLTAGE_IN] = 0, 342 - .m[PSC_VOLTAGE_OUT] = 1, 342 + .m[PSC_VOLTAGE_OUT] = 125, 343 343 .b[PSC_VOLTAGE_OUT] = 0, 344 344 .R[PSC_VOLTAGE_OUT] = 0, 345 - .m[PSC_CURRENT_IN] = 1, 345 + .m[PSC_CURRENT_IN] = 250, 346 346 .b[PSC_CURRENT_IN] = 0, 347 - .R[PSC_CURRENT_IN] = 2, 348 - .m[PSC_CURRENT_OUT] = 1, 347 + .R[PSC_CURRENT_IN] = -1, 348 + .m[PSC_CURRENT_OUT] = 250, 349 349 .b[PSC_CURRENT_OUT] = 0, 350 - .R[PSC_CURRENT_OUT] = 2, 350 + .R[PSC_CURRENT_OUT] = -1, 351 351 .m[PSC_TEMPERATURE] = 1, 352 352 .b[PSC_TEMPERATURE] = 0, 353 353 .R[PSC_TEMPERATURE] = 2,
+17 -10
drivers/hwmon/sht3x.c
··· 291 291 return data; 292 292 } 293 293 294 - static int temp1_input_read(struct device *dev) 294 + static int temp1_input_read(struct device *dev, long *temp) 295 295 { 296 296 struct sht3x_data *data = sht3x_update_client(dev); 297 297 298 298 if (IS_ERR(data)) 299 299 return PTR_ERR(data); 300 300 301 - return data->temperature; 301 + *temp = data->temperature; 302 + return 0; 302 303 } 303 304 304 - static int humidity1_input_read(struct device *dev) 305 + static int humidity1_input_read(struct device *dev, long *humidity) 305 306 { 306 307 struct sht3x_data *data = sht3x_update_client(dev); 307 308 308 309 if (IS_ERR(data)) 309 310 return PTR_ERR(data); 310 311 311 - return data->humidity; 312 + *humidity = data->humidity; 313 + return 0; 312 314 } 313 315 314 316 /* ··· 708 706 u32 attr, int channel, long *val) 709 707 { 710 708 enum sht3x_limits index; 709 + int ret; 711 710 712 711 switch (type) { 713 712 case hwmon_chip: ··· 723 720 case hwmon_temp: 724 721 switch (attr) { 725 722 case hwmon_temp_input: 726 - *val = temp1_input_read(dev); 727 - break; 723 + return temp1_input_read(dev, val); 728 724 case hwmon_temp_alarm: 729 - *val = temp1_alarm_read(dev); 725 + ret = temp1_alarm_read(dev); 726 + if (ret < 0) 727 + return ret; 728 + *val = ret; 730 729 break; 731 730 case hwmon_temp_max: 732 731 index = limit_max; ··· 753 748 case hwmon_humidity: 754 749 switch (attr) { 755 750 case hwmon_humidity_input: 756 - *val = humidity1_input_read(dev); 757 - break; 751 + return humidity1_input_read(dev, val); 758 752 case hwmon_humidity_alarm: 759 - *val = humidity1_alarm_read(dev); 753 + ret = humidity1_alarm_read(dev); 754 + if (ret < 0) 755 + return ret; 756 + *val = ret; 760 757 break; 761 758 case hwmon_humidity_max: 762 759 index = limit_max;
+2
drivers/misc/amd-sbi/Kconfig
··· 2 2 config AMD_SBRMI_I2C 3 3 tristate "AMD side band RMI support" 4 4 depends on I2C 5 + depends on ARM || ARM64 || COMPILE_TEST 5 6 select REGMAP_I2C 6 7 help 7 8 Side band RMI over I2C support for AMD out of band management. 9 + This driver is intended to run on the BMC, not the managed node. 8 10 9 11 This driver can also be built as a module. If so, the module will 10 12 be called sbrmi-i2c.
+2
drivers/misc/fastrpc.c
··· 381 381 } 382 382 spin_unlock(&fl->lock); 383 383 384 + dma_buf_put(buf); 385 + 384 386 return ret; 385 387 } 386 388
+2
drivers/misc/mei/hw-me-regs.h
··· 120 120 #define MEI_DEV_ID_PTL_H 0xE370 /* Panther Lake H */ 121 121 #define MEI_DEV_ID_PTL_P 0xE470 /* Panther Lake P */ 122 122 123 + #define MEI_DEV_ID_WCL_P 0x4D70 /* Wildcat Lake P */ 124 + 123 125 /* 124 126 * MEI HW Section 125 127 */
+1 -2
drivers/misc/mei/mei_lb.c
··· 134 134 return true; 135 135 } 136 136 137 - static int mei_lb_push_payload(struct device *dev, 138 - enum intel_lb_type type, u32 flags, 137 + static int mei_lb_push_payload(struct device *dev, u32 type, u32 flags, 139 138 const void *payload, size_t payload_size) 140 139 { 141 140 struct mei_cl_device *cldev;
+2
drivers/misc/mei/pci-me.c
··· 127 127 {MEI_PCI_DEVICE(MEI_DEV_ID_PTL_H, MEI_ME_PCH15_CFG)}, 128 128 {MEI_PCI_DEVICE(MEI_DEV_ID_PTL_P, MEI_ME_PCH15_CFG)}, 129 129 130 + {MEI_PCI_DEVICE(MEI_DEV_ID_WCL_P, MEI_ME_PCH15_CFG)}, 131 + 130 132 /* required last entry */ 131 133 {0, } 132 134 };
+7 -7
drivers/misc/mei/pci-txe.c
··· 109 109 goto end; 110 110 } 111 111 112 + err = mei_register(dev, &pdev->dev); 113 + if (err) 114 + goto release_irq; 115 + 112 116 if (mei_start(dev)) { 113 117 dev_err(&pdev->dev, "init hw failure.\n"); 114 118 err = -ENODEV; 115 - goto release_irq; 119 + goto deregister; 116 120 } 117 121 118 122 pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT); 119 123 pm_runtime_use_autosuspend(&pdev->dev); 120 - 121 - err = mei_register(dev, &pdev->dev); 122 - if (err) 123 - goto stop; 124 124 125 125 pci_set_drvdata(pdev, dev); 126 126 ··· 144 144 145 145 return 0; 146 146 147 - stop: 148 - mei_stop(dev); 147 + deregister: 148 + mei_deregister(dev); 149 149 release_irq: 150 150 mei_cancel_work(dev); 151 151 mei_disable_interrupts(dev);
+3 -5
drivers/misc/vmw_balloon.c
··· 1737 1737 { 1738 1738 unsigned long status, flags; 1739 1739 struct vmballoon *b; 1740 - int ret; 1740 + int ret = 0; 1741 1741 1742 1742 b = container_of(b_dev_info, struct vmballoon, b_dev_info); 1743 1743 ··· 1796 1796 * A failure happened. While we can deflate the page we just 1797 1797 * inflated, this deflation can also encounter an error. Instead 1798 1798 * we will decrease the size of the balloon to reflect the 1799 - * change and report failure. 1799 + * change. 1800 1800 */ 1801 1801 atomic64_dec(&b->size); 1802 - ret = -EBUSY; 1803 1802 } else { 1804 1803 /* 1805 1804 * Success. Take a reference for the page, and we will add it to 1806 1805 * the list after acquiring the lock. 1807 1806 */ 1808 1807 get_page(newpage); 1809 - ret = 0; 1810 1808 } 1811 1809 1812 1810 /* Update the balloon list under the @pages_lock */ ··· 1815 1817 * If we succeed just insert it to the list and update the statistics 1816 1818 * under the lock. 1817 1819 */ 1818 - if (!ret) { 1820 + if (status == VMW_BALLOON_SUCCESS) { 1819 1821 balloon_page_insert(&b->b_dev_info, newpage); 1820 1822 __count_vm_event(BALLOON_MIGRATE); 1821 1823 }
+5 -8
drivers/most/most_usb.c
··· 929 929 { 930 930 struct most_dev *mdev = to_mdev_from_dev(dev); 931 931 932 + kfree(mdev->busy_urbs); 933 + kfree(mdev->cap); 934 + kfree(mdev->conf); 935 + kfree(mdev->ep_address); 932 936 kfree(mdev); 933 937 } 934 938 /** ··· 1097 1093 err_free_conf: 1098 1094 kfree(mdev->conf); 1099 1095 err_free_mdev: 1100 - put_device(&mdev->dev); 1096 + kfree(mdev); 1101 1097 return ret; 1102 1098 } 1103 1099 ··· 1125 1121 if (mdev->dci) 1126 1122 device_unregister(&mdev->dci->dev); 1127 1123 most_deregister_interface(&mdev->iface); 1128 - 1129 - kfree(mdev->busy_urbs); 1130 - kfree(mdev->cap); 1131 - kfree(mdev->conf); 1132 - kfree(mdev->ep_address); 1133 - put_device(&mdev->dci->dev); 1134 - put_device(&mdev->dev); 1135 1124 } 1136 1125 1137 1126 static int hdm_suspend(struct usb_interface *interface, pm_message_t message)
+23 -24
drivers/net/bonding/bond_main.c
··· 2287 2287 unblock_netpoll_tx(); 2288 2288 } 2289 2289 2290 - if (bond_mode_can_use_xmit_hash(bond)) 2290 + /* broadcast mode uses the all_slaves to loop through slaves. */ 2291 + if (bond_mode_can_use_xmit_hash(bond) || 2292 + BOND_MODE(bond) == BOND_MODE_BROADCAST) 2291 2293 bond_update_slave_arr(bond, NULL); 2292 2294 2293 2295 if (!slave_dev->netdev_ops->ndo_bpf || ··· 2465 2463 2466 2464 bond_upper_dev_unlink(bond, slave); 2467 2465 2468 - if (bond_mode_can_use_xmit_hash(bond)) 2466 + if (bond_mode_can_use_xmit_hash(bond) || 2467 + BOND_MODE(bond) == BOND_MODE_BROADCAST) 2469 2468 bond_update_slave_arr(bond, slave); 2470 2469 2471 2470 slave_info(bond_dev, slave_dev, "Releasing %s interface\n", ··· 2874 2871 { 2875 2872 struct bonding *bond = container_of(work, struct bonding, 2876 2873 mii_work.work); 2877 - bool should_notify_peers = false; 2874 + bool should_notify_peers; 2878 2875 bool commit; 2879 2876 unsigned long delay; 2880 2877 struct slave *slave; ··· 2886 2883 goto re_arm; 2887 2884 2888 2885 rcu_read_lock(); 2886 + 2889 2887 should_notify_peers = bond_should_notify_peers(bond); 2890 2888 commit = !!bond_miimon_inspect(bond); 2891 - if (bond->send_peer_notif) { 2892 - rcu_read_unlock(); 2893 - if (rtnl_trylock()) { 2894 - bond->send_peer_notif--; 2895 - rtnl_unlock(); 2896 - } 2897 - } else { 2898 - rcu_read_unlock(); 2899 - } 2900 2889 2901 - if (commit) { 2890 + rcu_read_unlock(); 2891 + 2892 + if (commit || bond->send_peer_notif) { 2902 2893 /* Race avoidance with bond_close cancel of workqueue */ 2903 2894 if (!rtnl_trylock()) { 2904 2895 delay = 1; 2905 - should_notify_peers = false; 2906 2896 goto re_arm; 2907 2897 } 2908 2898 2909 - bond_for_each_slave(bond, slave, iter) { 2910 - bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER); 2899 + if (commit) { 2900 + bond_for_each_slave(bond, slave, iter) { 2901 + bond_commit_link_state(slave, 2902 + BOND_SLAVE_NOTIFY_LATER); 2903 + } 2904 + bond_miimon_commit(bond); 2911 2905 } 2912 - bond_miimon_commit(bond); 2906 + 2907 + if (bond->send_peer_notif) { 2908 + bond->send_peer_notif--; 2909 + if (should_notify_peers) 2910 + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, 2911 + bond->dev); 2912 + } 2913 2913 2914 2914 rtnl_unlock(); /* might sleep, hold no other locks */ 2915 2915 } ··· 2920 2914 re_arm: 2921 2915 if (bond->params.miimon) 2922 2916 queue_delayed_work(bond->wq, &bond->mii_work, delay); 2923 - 2924 - if (should_notify_peers) { 2925 - if (!rtnl_trylock()) 2926 - return; 2927 - call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); 2928 - rtnl_unlock(); 2929 - } 2930 2917 } 2931 2918 2932 2919 static int bond_upper_dev_walk(struct net_device *upper,
+1 -1
drivers/net/can/bxcan.c
··· 842 842 u32 id; 843 843 int i, j; 844 844 845 - if (can_dropped_invalid_skb(ndev, skb)) 845 + if (can_dev_dropped_skb(ndev, skb)) 846 846 return NETDEV_TX_OK; 847 847 848 848 if (bxcan_tx_busy(priv))
+4 -2
drivers/net/can/dev/netlink.c
··· 452 452 } 453 453 454 454 if (data[IFLA_CAN_RESTART_MS]) { 455 - if (!priv->do_set_mode) { 455 + unsigned int restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]); 456 + 457 + if (restart_ms != 0 && !priv->do_set_mode) { 456 458 NL_SET_ERR_MSG(extack, 457 459 "Device doesn't support restart from Bus Off"); 458 460 return -EOPNOTSUPP; ··· 463 461 /* Do not allow changing restart delay while running */ 464 462 if (dev->flags & IFF_UP) 465 463 return -EBUSY; 466 - priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]); 464 + priv->restart_ms = restart_ms; 467 465 } 468 466 469 467 if (data[IFLA_CAN_RESTART]) {
+1 -1
drivers/net/can/esd/esdacc.c
··· 254 254 u32 acc_id; 255 255 u32 acc_dlc; 256 256 257 - if (can_dropped_invalid_skb(netdev, skb)) 257 + if (can_dev_dropped_skb(netdev, skb)) 258 258 return NETDEV_TX_OK; 259 259 260 260 /* Access core->tx_fifo_tail only once because it may be changed
+1 -1
drivers/net/can/rockchip/rockchip_canfd-tx.c
··· 72 72 int err; 73 73 u8 i; 74 74 75 - if (can_dropped_invalid_skb(ndev, skb)) 75 + if (can_dev_dropped_skb(ndev, skb)) 76 76 return NETDEV_TX_OK; 77 77 78 78 if (!netif_subqueue_maybe_stop(priv->ndev, 0,
+1 -1
drivers/net/ethernet/dlink/dl2k.c
··· 733 733 u64 tfc_vlan_tag = 0; 734 734 735 735 if (np->link_status == 0) { /* Link Down */ 736 - dev_kfree_skb(skb); 736 + dev_kfree_skb_any(skb); 737 737 return NETDEV_TX_OK; 738 738 } 739 739 entry = np->cur_tx % TX_RING_SIZE;
+1 -2
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
··· 1077 1077 dma_addr_t addr; 1078 1078 1079 1079 buffer_start = skb->data - dpaa2_eth_needed_headroom(skb); 1080 - aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, 1081 - DPAA2_ETH_TX_BUF_ALIGN); 1080 + aligned_start = PTR_ALIGN(buffer_start, DPAA2_ETH_TX_BUF_ALIGN); 1082 1081 if (aligned_start >= skb->head) 1083 1082 buffer_start = aligned_start; 1084 1083 else
+21 -4
drivers/net/ethernet/freescale/enetc/enetc.c
··· 1595 1595 /* next descriptor to process */ 1596 1596 i = rx_ring->next_to_clean; 1597 1597 1598 + enetc_lock_mdio(); 1599 + 1598 1600 while (likely(rx_frm_cnt < work_limit)) { 1599 1601 union enetc_rx_bd *rxbd; 1600 1602 struct sk_buff *skb; ··· 1632 1630 rx_byte_cnt += skb->len + ETH_HLEN; 1633 1631 rx_frm_cnt++; 1634 1632 1633 + enetc_unlock_mdio(); 1635 1634 napi_gro_receive(napi, skb); 1635 + enetc_lock_mdio(); 1636 1636 } 1637 1637 1638 1638 rx_ring->next_to_clean = i; 1639 1639 1640 1640 rx_ring->stats.packets += rx_frm_cnt; 1641 1641 rx_ring->stats.bytes += rx_byte_cnt; 1642 + 1643 + enetc_unlock_mdio(); 1642 1644 1643 1645 return rx_frm_cnt; 1644 1646 } ··· 1953 1947 /* next descriptor to process */ 1954 1948 i = rx_ring->next_to_clean; 1955 1949 1950 + enetc_lock_mdio(); 1951 + 1956 1952 while (likely(rx_frm_cnt < work_limit)) { 1957 1953 union enetc_rx_bd *rxbd, *orig_rxbd; 1958 1954 struct xdp_buff xdp_buff; ··· 2018 2010 */ 2019 2011 enetc_bulk_flip_buff(rx_ring, orig_i, i); 2020 2012 2013 + enetc_unlock_mdio(); 2021 2014 napi_gro_receive(napi, skb); 2015 + enetc_lock_mdio(); 2022 2016 break; 2023 2017 case XDP_TX: 2024 2018 tx_ring = priv->xdp_tx_ring[rx_ring->index]; ··· 2055 2045 } 2056 2046 break; 2057 2047 case XDP_REDIRECT: 2048 + enetc_unlock_mdio(); 2058 2049 err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog); 2050 + enetc_lock_mdio(); 2059 2051 if (unlikely(err)) { 2060 2052 enetc_xdp_drop(rx_ring, orig_i, i); 2061 2053 rx_ring->stats.xdp_redirect_failures++; ··· 2077 2065 rx_ring->stats.packets += rx_frm_cnt; 2078 2066 rx_ring->stats.bytes += rx_byte_cnt; 2079 2067 2080 - if (xdp_redirect_frm_cnt) 2068 + if (xdp_redirect_frm_cnt) { 2069 + enetc_unlock_mdio(); 2081 2070 xdp_do_flush(); 2071 + enetc_lock_mdio(); 2072 + } 2082 2073 2083 2074 if (xdp_tx_frm_cnt) 2084 2075 enetc_update_tx_ring_tail(tx_ring); ··· 2089 2074 if (cleaned_cnt > rx_ring->xdp.xdp_tx_in_flight) 2090 2075 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) - 2091 2076 rx_ring->xdp.xdp_tx_in_flight); 2077 + 2078 + enetc_unlock_mdio(); 2092 2079 2093 2080 return rx_frm_cnt; 2094 2081 } ··· 2110 2093 for (i = 0; i < v->count_tx_rings; i++) 2111 2094 if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) 2112 2095 complete = false; 2096 + enetc_unlock_mdio(); 2113 2097 2114 2098 prog = rx_ring->xdp.prog; 2115 2099 if (prog) ··· 2122 2104 if (work_done) 2123 2105 v->rx_napi_work = true; 2124 2106 2125 - if (!complete) { 2126 - enetc_unlock_mdio(); 2107 + if (!complete) 2127 2108 return budget; 2128 - } 2129 2109 2130 2110 napi_complete_done(napi, work_done); 2131 2111 ··· 2132 2116 2133 2117 v->rx_napi_work = false; 2134 2118 2119 + enetc_lock_mdio(); 2135 2120 /* enable interrupts */ 2136 2121 enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE); 2137 2122
+1 -1
drivers/net/ethernet/freescale/enetc/enetc.h
··· 76 76 #define ENETC_LSO_MAX_DATA_LEN SZ_256K 77 77 78 78 #define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE 79 - #define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */ 79 + #define ENETC_RXB_TRUESIZE (PAGE_SIZE >> 1) 80 80 #define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */ 81 81 #define ENETC_RXB_DMA_SIZE \ 82 82 (SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD)
+1
drivers/net/ethernet/hisilicon/Kconfig
··· 148 148 tristate "Hisilicon BMC Gigabit Ethernet Device Support" 149 149 depends on PCI && PCI_MSI 150 150 select PHYLIB 151 + select FIXED_PHY 151 152 select MOTORCOMM_PHY 152 153 select REALTEK_PHY 153 154 help
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
··· 100 100 return sizeof(struct mlx5_ksm) * 4; 101 101 } 102 102 WARN_ONCE(1, "MPWRQ UMR mode %d is not known\n", mode); 103 - return 0; 103 + return 1; 104 104 } 105 105 106 106 u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
+5
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
··· 342 342 void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv, 343 343 struct mlx5e_priv *master_priv); 344 344 void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event); 345 + void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv); 345 346 346 347 static inline struct mlx5_core_dev * 347 348 mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry) ··· 386 385 } 387 386 388 387 static inline void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event) 388 + { 389 + } 390 + 391 + static inline void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv) 389 392 { 390 393 } 391 394 #endif
+23 -2
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
··· 2893 2893 2894 2894 void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event) 2895 2895 { 2896 - if (!priv->ipsec) 2897 - return; /* IPsec not supported */ 2896 + if (!priv->ipsec || mlx5_devcom_comp_get_size(priv->devcom) < 2) 2897 + return; /* IPsec not supported or no peers */ 2898 2898 2899 2899 mlx5_devcom_send_event(priv->devcom, event, event, priv); 2900 2900 wait_for_completion(&priv->ipsec->comp); 2901 + } 2902 + 2903 + void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv) 2904 + { 2905 + struct mlx5_devcom_comp_dev *tmp = NULL; 2906 + struct mlx5e_priv *peer_priv; 2907 + 2908 + if (!priv->devcom) 2909 + return; 2910 + 2911 + if (!mlx5_devcom_for_each_peer_begin(priv->devcom)) 2912 + goto out; 2913 + 2914 + peer_priv = mlx5_devcom_get_next_peer_data(priv->devcom, &tmp); 2915 + if (peer_priv) 2916 + complete_all(&peer_priv->ipsec->comp); 2917 + 2918 + mlx5_devcom_for_each_peer_end(priv->devcom); 2919 + out: 2920 + mlx5_devcom_unregister_component(priv->devcom); 2921 + priv->devcom = NULL; 2901 2922 }
+5 -3
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 242 242 &attr, 243 243 mlx5e_devcom_event_mpv, 244 244 priv); 245 - if (IS_ERR(priv->devcom)) 246 - return PTR_ERR(priv->devcom); 245 + if (!priv->devcom) 246 + return -EINVAL; 247 247 248 248 if (mlx5_core_is_mp_master(priv->mdev)) { 249 249 mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_UP, ··· 256 256 257 257 static void mlx5e_devcom_cleanup_mpv(struct mlx5e_priv *priv) 258 258 { 259 - if (IS_ERR_OR_NULL(priv->devcom)) 259 + if (!priv->devcom) 260 260 return; 261 261 262 262 if (mlx5_core_is_mp_master(priv->mdev)) { ··· 266 266 } 267 267 268 268 mlx5_devcom_unregister_component(priv->devcom); 269 + priv->devcom = NULL; 269 270 } 270 271 271 272 static int blocking_event(struct notifier_block *nb, unsigned long event, void *data) ··· 6121 6120 if (mlx5e_monitor_counter_supported(priv)) 6122 6121 mlx5e_monitor_counter_cleanup(priv); 6123 6122 6123 + mlx5e_ipsec_disable_events(priv); 6124 6124 mlx5e_disable_blocking_events(priv); 6125 6125 mlx5e_disable_async_events(priv); 6126 6126 mlx5_lag_remove_netdev(mdev, priv->netdev);
+42 -9
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 1794 1794 } 1795 1795 1796 1796 prog = rcu_dereference(rq->xdp_prog); 1797 - if (prog && mlx5e_xdp_handle(rq, prog, mxbuf)) { 1798 - if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { 1799 - struct mlx5e_wqe_frag_info *pwi; 1797 + if (prog) { 1798 + u8 nr_frags_free, old_nr_frags = sinfo->nr_frags; 1800 1799 1801 - for (pwi = head_wi; pwi < wi; pwi++) 1802 - pwi->frag_page->frags++; 1800 + if (mlx5e_xdp_handle(rq, prog, mxbuf)) { 1801 + if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, 1802 + rq->flags)) { 1803 + struct mlx5e_wqe_frag_info *pwi; 1804 + 1805 + wi -= old_nr_frags - sinfo->nr_frags; 1806 + 1807 + for (pwi = head_wi; pwi < wi; pwi++) 1808 + pwi->frag_page->frags++; 1809 + } 1810 + return NULL; /* page/packet was consumed by XDP */ 1803 1811 } 1804 - return NULL; /* page/packet was consumed by XDP */ 1812 + 1813 + nr_frags_free = old_nr_frags - sinfo->nr_frags; 1814 + if (unlikely(nr_frags_free)) { 1815 + wi -= nr_frags_free; 1816 + truesize -= nr_frags_free * frag_info->frag_stride; 1817 + } 1805 1818 } 1806 1819 1807 1820 skb = mlx5e_build_linear_skb( ··· 2040 2027 u32 byte_cnt = cqe_bcnt; 2041 2028 struct skb_shared_info *sinfo; 2042 2029 unsigned int truesize = 0; 2030 + u32 pg_consumed_bytes; 2043 2031 struct bpf_prog *prog; 2044 2032 struct sk_buff *skb; 2045 2033 u32 linear_frame_sz; ··· 2094 2080 2095 2081 while (byte_cnt) { 2096 2082 /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */ 2097 - u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); 2083 + pg_consumed_bytes = 2084 + min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); 2098 2085 2099 2086 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) 2100 2087 truesize += pg_consumed_bytes; ··· 2111 2096 } 2112 2097 2113 2098 if (prog) { 2099 + u8 nr_frags_free, old_nr_frags = sinfo->nr_frags; 2100 + u32 len; 2101 + 2114 2102 if (mlx5e_xdp_handle(rq, prog, mxbuf)) { 2115 2103 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { 2116 2104 struct mlx5e_frag_page *pfp; 2105 + 2106 + frag_page -= old_nr_frags - sinfo->nr_frags; 2117 2107 2118 2108 for (pfp = head_page; pfp < frag_page; pfp++) 2119 2109 pfp->frags++; ··· 2130 2110 return NULL; /* page/packet was consumed by XDP */ 2131 2111 } 2132 2112 2113 + nr_frags_free = old_nr_frags - sinfo->nr_frags; 2114 + if (unlikely(nr_frags_free)) { 2115 + frag_page -= nr_frags_free; 2116 + truesize -= (nr_frags_free - 1) * PAGE_SIZE + 2117 + ALIGN(pg_consumed_bytes, 2118 + BIT(rq->mpwqe.log_stride_sz)); 2119 + } 2120 + 2121 + len = mxbuf->xdp.data_end - mxbuf->xdp.data; 2122 + 2133 2123 skb = mlx5e_build_linear_skb( 2134 2124 rq, mxbuf->xdp.data_hard_start, linear_frame_sz, 2135 - mxbuf->xdp.data - mxbuf->xdp.data_hard_start, 0, 2125 + mxbuf->xdp.data - mxbuf->xdp.data_hard_start, len, 2136 2126 mxbuf->xdp.data - mxbuf->xdp.data_meta); 2137 2127 if (unlikely(!skb)) { 2138 2128 mlx5e_page_release_fragmented(rq->page_pool, ··· 2167 2137 do 2168 2138 pagep->frags++; 2169 2139 while (++pagep < frag_page); 2140 + 2141 + headlen = min_t(u16, MLX5E_RX_MAX_HEAD - len, 2142 + skb->data_len); 2143 + __pskb_pull_tail(skb, headlen); 2170 2144 } 2171 - __pskb_pull_tail(skb, headlen); 2172 2145 } else { 2173 2146 dma_addr_t addr; 2174 2147
+3 -1
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
··· 1614 1614 1615 1615 fec_set_corrected_bits_total(priv, fec_stats); 1616 1616 fec_set_block_stats(priv, mode, fec_stats); 1617 - fec_set_histograms_stats(priv, mode, hist); 1617 + 1618 + if (MLX5_CAP_PCAM_REG(priv->mdev, pphcr)) 1619 + fec_set_histograms_stats(priv, mode, hist); 1618 1620 } 1619 1621 1620 1622 #define PPORT_ETH_EXT_OFF(c) \
+4 -3
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
··· 256 256 u8 mode; 257 257 258 258 #ifdef CONFIG_MLX5_EN_TLS 259 - if (accel && accel->tls.tls_tisn) 259 + if (accel->tls.tls_tisn) 260 260 return MLX5_INLINE_MODE_TCP_UDP; 261 261 #endif 262 262 ··· 982 982 struct mlx5e_tx_attr attr; 983 983 struct mlx5i_tx_wqe *wqe; 984 984 985 + struct mlx5e_accel_tx_state accel = {}; 985 986 struct mlx5_wqe_datagram_seg *datagram; 986 987 struct mlx5_wqe_ctrl_seg *cseg; 987 988 struct mlx5_wqe_eth_seg *eseg; ··· 993 992 int num_dma; 994 993 u16 pi; 995 994 996 - mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr); 995 + mlx5e_sq_xmit_prepare(sq, skb, &accel, &attr); 997 996 mlx5i_sq_calc_wqe_attr(skb, &attr, &wqe_attr); 998 997 999 998 pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs); ··· 1010 1009 1011 1010 mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram); 1012 1011 1013 - mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, eseg); 1012 + mlx5e_txwqe_build_eseg_csum(sq, skb, &accel, eseg); 1014 1013 1015 1014 eseg->mss = attr.mss; 1016 1015
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 3129 3129 attr, 3130 3130 mlx5_esw_offloads_devcom_event, 3131 3131 esw); 3132 - if (IS_ERR(esw->devcom)) 3132 + if (!esw->devcom) 3133 3133 return; 3134 3134 3135 3135 mlx5_devcom_send_event(esw->devcom, ··· 3140 3140 3141 3141 void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) 3142 3142 { 3143 - if (IS_ERR_OR_NULL(esw->devcom)) 3143 + if (!esw->devcom) 3144 3144 return; 3145 3145 3146 3146 mlx5_devcom_send_event(esw->devcom,
+3 -4
drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
··· 1430 1430 mlx5_devcom_register_component(dev->priv.devc, 1431 1431 MLX5_DEVCOM_HCA_PORTS, 1432 1432 &attr, NULL, dev); 1433 - if (IS_ERR(dev->priv.hca_devcom_comp)) { 1433 + if (!dev->priv.hca_devcom_comp) { 1434 1434 mlx5_core_err(dev, 1435 - "Failed to register devcom HCA component, err: %ld\n", 1436 - PTR_ERR(dev->priv.hca_devcom_comp)); 1437 - return PTR_ERR(dev->priv.hca_devcom_comp); 1435 + "Failed to register devcom HCA component."); 1436 + return -EINVAL; 1438 1437 } 1439 1438 1440 1439 return 0;
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
··· 1444 1444 compd = mlx5_devcom_register_component(mdev->priv.devc, 1445 1445 MLX5_DEVCOM_SHARED_CLOCK, 1446 1446 &attr, NULL, mdev); 1447 - if (IS_ERR(compd)) 1447 + if (!compd) 1448 1448 return; 1449 1449 1450 1450 mdev->clock_state->compdev = compd;
+26 -27
drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
··· 76 76 struct mlx5_devcom_dev * 77 77 mlx5_devcom_register_device(struct mlx5_core_dev *dev) 78 78 { 79 - struct mlx5_devcom_dev *devc; 79 + struct mlx5_devcom_dev *devc = NULL; 80 80 81 81 mutex_lock(&dev_list_lock); 82 82 83 83 if (devcom_dev_exists(dev)) { 84 - devc = ERR_PTR(-EEXIST); 84 + mlx5_core_err(dev, "devcom device already exists"); 85 85 goto out; 86 86 } 87 87 88 88 devc = mlx5_devcom_dev_alloc(dev); 89 - if (!devc) { 90 - devc = ERR_PTR(-ENOMEM); 89 + if (!devc) 91 90 goto out; 92 - } 93 91 94 92 list_add_tail(&devc->list, &devcom_dev_list); 95 93 out: ··· 108 110 109 111 void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc) 110 112 { 111 - if (!IS_ERR_OR_NULL(devc)) 112 - kref_put(&devc->ref, mlx5_devcom_dev_release); 113 + if (!devc) 114 + return; 115 + 116 + kref_put(&devc->ref, mlx5_devcom_dev_release); 113 117 } 114 118 115 119 static struct mlx5_devcom_comp * ··· 122 122 123 123 comp = kzalloc(sizeof(*comp), GFP_KERNEL); 124 124 if (!comp) 125 - return ERR_PTR(-ENOMEM); 125 + return NULL; 126 126 127 127 comp->id = id; 128 128 comp->key.key = attr->key; ··· 160 160 161 161 devcom = kzalloc(sizeof(*devcom), GFP_KERNEL); 162 162 if (!devcom) 163 - return ERR_PTR(-ENOMEM); 163 + return NULL; 164 164 165 165 kref_get(&devc->ref); 166 166 devcom->devc = devc; ··· 240 240 mlx5_devcom_event_handler_t handler, 241 241 void *data) 242 242 { 243 - struct mlx5_devcom_comp_dev *devcom; 243 + struct mlx5_devcom_comp_dev *devcom = NULL; 244 244 struct mlx5_devcom_comp *comp; 245 245 246 - if (IS_ERR_OR_NULL(devc)) 247 - return ERR_PTR(-EINVAL); 246 + if (!devc) 247 + return NULL; 248 248 249 249 mutex_lock(&comp_list_lock); 250 250 comp = devcom_component_get(devc, id, attr, handler); 251 - if (IS_ERR(comp)) { 252 - devcom = ERR_PTR(-EINVAL); 251 + if (IS_ERR(comp)) 253 252 goto out_unlock; 254 - } 255 253 256 254 if (!comp) { 257 255 comp = mlx5_devcom_comp_alloc(id, attr, handler); 258 - if (IS_ERR(comp)) { 259 - devcom = ERR_CAST(comp); 256 + if (!comp) 260 257 goto out_unlock; 261 - } 258 + 262 259 list_add_tail(&comp->comp_list, &devcom_comp_list); 263 260 } 264 261 mutex_unlock(&comp_list_lock); 265 262 266 263 devcom = devcom_alloc_comp_dev(devc, comp, data); 267 - if (IS_ERR(devcom)) 264 + if (!devcom) 268 265 kref_put(&comp->ref, mlx5_devcom_comp_release); 269 266 270 267 return devcom; ··· 273 276 274 277 void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom) 275 278 { 276 - if (!IS_ERR_OR_NULL(devcom)) 277 - devcom_free_comp_dev(devcom); 279 + if (!devcom) 280 + return; 281 + 282 + devcom_free_comp_dev(devcom); 278 283 } 279 284 280 285 int mlx5_devcom_comp_get_size(struct mlx5_devcom_comp_dev *devcom) ··· 295 296 int err = 0; 296 297 void *data; 297 298 298 - if (IS_ERR_OR_NULL(devcom)) 299 + if (!devcom) 299 300 return -ENODEV; 300 301 301 302 comp = devcom->comp; ··· 337 338 338 339 bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom) 339 340 { 340 - if (IS_ERR_OR_NULL(devcom)) 341 + if (!devcom) 341 342 return false; 342 343 343 344 return READ_ONCE(devcom->comp->ready); ··· 347 348 { 348 349 struct mlx5_devcom_comp *comp; 349 350 350 - if (IS_ERR_OR_NULL(devcom)) 351 + if (!devcom) 351 352 return false; 352 353 353 354 comp = devcom->comp; ··· 420 421 421 422 void mlx5_devcom_comp_lock(struct mlx5_devcom_comp_dev *devcom) 422 423 { 423 - if (IS_ERR_OR_NULL(devcom)) 424 + if (!devcom) 424 425 return; 425 426 down_write(&devcom->comp->sem); 426 427 } 427 428 428 429 void mlx5_devcom_comp_unlock(struct mlx5_devcom_comp_dev *devcom) 429 430 { 430 - if (IS_ERR_OR_NULL(devcom)) 431 + if (!devcom) 431 432 return; 432 433 up_write(&devcom->comp->sem); 433 434 } 434 435 435 436 int mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev *devcom) 436 437 { 437 - if (IS_ERR_OR_NULL(devcom)) 438 + if (!devcom) 438 439 return 0; 439 440 return down_write_trylock(&devcom->comp->sem); 440 441 }
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
··· 221 221 attr.net = mlx5_core_net(dev); 222 222 devcom = mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_SD_GROUP, 223 223 &attr, NULL, dev); 224 - if (IS_ERR(devcom)) 225 - return PTR_ERR(devcom); 224 + if (!devcom) 225 + return -EINVAL; 226 226 227 227 sd->devcom = devcom; 228 228
+2 -3
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 978 978 int err; 979 979 980 980 dev->priv.devc = mlx5_devcom_register_device(dev); 981 - if (IS_ERR(dev->priv.devc)) 982 - mlx5_core_warn(dev, "failed to register devcom device %pe\n", 983 - dev->priv.devc); 981 + if (!dev->priv.devc) 982 + mlx5_core_warn(dev, "failed to register devcom device\n"); 984 983 985 984 err = mlx5_query_board_id(dev); 986 985 if (err) {
+22 -2
drivers/net/ethernet/renesas/ravb_main.c
··· 2211 2211 2212 2212 skb_tx_timestamp(skb); 2213 2213 } 2214 - /* Descriptor type must be set after all the above writes */ 2215 - dma_wmb(); 2214 + 2216 2215 if (num_tx_desc > 1) { 2217 2216 desc->die_dt = DT_FEND; 2218 2217 desc--; 2218 + /* When using multi-descriptors, DT_FEND needs to get written 2219 + * before DT_FSTART, but the compiler may reorder the memory 2220 + * writes in an attempt to optimize the code. 2221 + * Use a dma_wmb() barrier to make sure DT_FEND and DT_FSTART 2222 + * are written exactly in the order shown in the code. 2223 + * This is particularly important for cases where the DMA engine 2224 + * is already running when we are running this code. If the DMA 2225 + * sees DT_FSTART without the corresponding DT_FEND it will enter 2226 + * an error condition. 2227 + */ 2228 + dma_wmb(); 2219 2229 desc->die_dt = DT_FSTART; 2220 2230 } else { 2231 + /* Descriptor type must be set after all the above writes */ 2232 + dma_wmb(); 2221 2233 desc->die_dt = DT_FSINGLE; 2222 2234 } 2235 + 2236 + /* Before ringing the doorbell we need to make sure that the latest 2237 + * writes have been committed to memory, otherwise it could delay 2238 + * things until the doorbell is rang again. 2239 + * This is in replacement of the read operation mentioned in the HW 2240 + * manuals. 2241 + */ 2242 + dma_wmb(); 2223 2243 ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q); 2224 2244 2225 2245 priv->cur_tx[q] += num_tx_desc;
+5 -4
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
··· 1446 1446 } 1447 1447 } else { 1448 1448 if (bsp_priv->clk_enabled) { 1449 + if (bsp_priv->ops && bsp_priv->ops->set_clock_selection) { 1450 + bsp_priv->ops->set_clock_selection(bsp_priv, 1451 + bsp_priv->clock_input, false); 1452 + } 1453 + 1449 1454 clk_bulk_disable_unprepare(bsp_priv->num_clks, 1450 1455 bsp_priv->clks); 1451 1456 clk_disable_unprepare(bsp_priv->clk_phy); 1452 - 1453 - if (bsp_priv->ops && bsp_priv->ops->set_clock_selection) 1454 - bsp_priv->ops->set_clock_selection(bsp_priv, 1455 - bsp_priv->clock_input, false); 1456 1457 1457 1458 bsp_priv->clk_enabled = false; 1458 1459 }
+43 -20
drivers/net/ethernet/ti/am65-cpts.c
··· 163 163 struct device_node *clk_mux_np; 164 164 struct clk *refclk; 165 165 u32 refclk_freq; 166 - struct list_head events; 166 + /* separate lists to handle TX and RX timestamp independently */ 167 + struct list_head events_tx; 168 + struct list_head events_rx; 167 169 struct list_head pool; 168 170 struct am65_cpts_event pool_data[AM65_CPTS_MAX_EVENTS]; 169 171 spinlock_t lock; /* protects events lists*/ ··· 229 227 am65_cpts_write32(cpts, 0, int_enable); 230 228 } 231 229 230 + static int am65_cpts_purge_event_list(struct am65_cpts *cpts, 231 + struct list_head *events) 232 + { 233 + struct list_head *this, *next; 234 + struct am65_cpts_event *event; 235 + int removed = 0; 236 + 237 + list_for_each_safe(this, next, events) { 238 + event = list_entry(this, struct am65_cpts_event, list); 239 + if (time_after(jiffies, event->tmo)) { 240 + list_del_init(&event->list); 241 + list_add(&event->list, &cpts->pool); 242 + ++removed; 243 + } 244 + } 245 + return removed; 246 + } 247 + 232 248 static int am65_cpts_event_get_port(struct am65_cpts_event *event) 233 249 { 234 250 return (event->event1 & AM65_CPTS_EVENT_1_PORT_NUMBER_MASK) >> ··· 259 239 AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT; 260 240 } 261 241 262 - static int am65_cpts_cpts_purge_events(struct am65_cpts *cpts) 242 + static int am65_cpts_purge_events(struct am65_cpts *cpts) 263 243 { 264 - struct list_head *this, *next; 265 - struct am65_cpts_event *event; 266 244 int removed = 0; 267 245 268 - list_for_each_safe(this, next, &cpts->events) { 269 - event = list_entry(this, struct am65_cpts_event, list); 270 - if (time_after(jiffies, event->tmo)) { 271 - list_del_init(&event->list); 272 - list_add(&event->list, &cpts->pool); 273 - ++removed; 274 - } 275 - } 246 + removed += am65_cpts_purge_event_list(cpts, &cpts->events_tx); 247 + removed += am65_cpts_purge_event_list(cpts, &cpts->events_rx); 276 248 277 249 if (removed) 278 250 dev_dbg(cpts->dev, "event pool cleaned up %d\n", removed); ··· 299 287 struct am65_cpts_event, list); 300 288 301 289 if (!event) { 302 - if (am65_cpts_cpts_purge_events(cpts)) { 290 + if (am65_cpts_purge_events(cpts)) { 303 291 dev_err(cpts->dev, "cpts: event pool empty\n"); 304 292 ret = -1; 305 293 goto out; ··· 318 306 cpts->timestamp); 319 307 break; 320 308 case AM65_CPTS_EV_RX: 309 + event->tmo = jiffies + 310 + msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT); 311 + 312 + list_move_tail(&event->list, &cpts->events_rx); 313 + 314 + dev_dbg(cpts->dev, 315 + "AM65_CPTS_EV_RX e1:%08x e2:%08x t:%lld\n", 316 + event->event1, event->event2, 317 + event->timestamp); 318 + break; 321 319 case AM65_CPTS_EV_TX: 322 320 event->tmo = jiffies + 323 321 msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT); 324 322 325 - list_move_tail(&event->list, &cpts->events); 323 + list_move_tail(&event->list, &cpts->events_tx); 326 324 327 325 dev_dbg(cpts->dev, 328 326 "AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n", ··· 850 828 return found; 851 829 } 852 830 853 - static void am65_cpts_find_ts(struct am65_cpts *cpts) 831 + static void am65_cpts_find_tx_ts(struct am65_cpts *cpts) 854 832 { 855 833 struct am65_cpts_event *event; 856 834 struct list_head *this, *next; ··· 859 837 LIST_HEAD(events); 860 838 861 839 spin_lock_irqsave(&cpts->lock, flags); 862 - list_splice_init(&cpts->events, &events); 840 + list_splice_init(&cpts->events_tx, &events); 863 841 spin_unlock_irqrestore(&cpts->lock, flags); 864 842 865 843 list_for_each_safe(this, next, &events) { ··· 872 850 } 873 851 874 852 spin_lock_irqsave(&cpts->lock, flags); 875 - list_splice_tail(&events, &cpts->events); 853 + list_splice_tail(&events, &cpts->events_tx); 876 854 list_splice_tail(&events_free, &cpts->pool); 877 855 spin_unlock_irqrestore(&cpts->lock, flags); 878 856 } ··· 883 861 unsigned long flags; 884 862 long delay = -1; 885 863 886 - am65_cpts_find_ts(cpts); 864 + am65_cpts_find_tx_ts(cpts); 887 865 888 866 spin_lock_irqsave(&cpts->txq.lock, flags); 889 867 if (!skb_queue_empty(&cpts->txq)) ··· 927 905 928 906 spin_lock_irqsave(&cpts->lock, flags); 929 907 __am65_cpts_fifo_read(cpts); 930 - list_for_each_safe(this, next, &cpts->events) { 908 + list_for_each_safe(this, next, &cpts->events_rx) { 931 909 event = list_entry(this, struct am65_cpts_event, list); 932 910 if (time_after(jiffies, event->tmo)) { 933 911 list_move(&event->list, &cpts->pool); ··· 1177 1155 return ERR_PTR(ret); 1178 1156 1179 1157 mutex_init(&cpts->ptp_clk_lock); 1180 - INIT_LIST_HEAD(&cpts->events); 1158 + INIT_LIST_HEAD(&cpts->events_tx); 1159 + INIT_LIST_HEAD(&cpts->events_rx); 1181 1160 INIT_LIST_HEAD(&cpts->pool); 1182 1161 spin_lock_init(&cpts->lock); 1183 1162 skb_queue_head_init(&cpts->txq);
+22 -4
drivers/net/ovpn/tcp.c
··· 560 560 static __poll_t ovpn_tcp_poll(struct file *file, struct socket *sock, 561 561 poll_table *wait) 562 562 { 563 - __poll_t mask = datagram_poll(file, sock, wait); 563 + struct sk_buff_head *queue = &sock->sk->sk_receive_queue; 564 564 struct ovpn_socket *ovpn_sock; 565 + struct ovpn_peer *peer = NULL; 566 + __poll_t mask; 565 567 566 568 rcu_read_lock(); 567 569 ovpn_sock = rcu_dereference_sk_user_data(sock->sk); 568 - if (ovpn_sock && ovpn_sock->peer && 569 - !skb_queue_empty(&ovpn_sock->peer->tcp.user_queue)) 570 - mask |= EPOLLIN | EPOLLRDNORM; 570 + /* if we landed in this callback, we expect to have a 571 + * meaningful state. The ovpn_socket lifecycle would 572 + * prevent it otherwise. 573 + */ 574 + if (WARN(!ovpn_sock || !ovpn_sock->peer, 575 + "ovpn: null state in ovpn_tcp_poll!")) { 576 + rcu_read_unlock(); 577 + return 0; 578 + } 579 + 580 + if (ovpn_peer_hold(ovpn_sock->peer)) { 581 + peer = ovpn_sock->peer; 582 + queue = &peer->tcp.user_queue; 583 + } 571 584 rcu_read_unlock(); 585 + 586 + mask = datagram_poll_queue(file, sock, wait, queue); 587 + 588 + if (peer) 589 + ovpn_peer_put(peer); 572 590 573 591 return mask; 574 592 }
+2 -2
drivers/net/phy/micrel.c
··· 4262 4262 { 4263 4263 struct lan8814_shared_priv *shared = phy_package_get_priv(phydev); 4264 4264 4265 + shared->phydev = phydev; 4266 + 4265 4267 /* Initialise shared lock for clock*/ 4266 4268 mutex_init(&shared->shared_lock); 4267 4269 ··· 4318 4316 return 0; 4319 4317 4320 4318 phydev_dbg(phydev, "successfully registered ptp clock\n"); 4321 - 4322 - shared->phydev = phydev; 4323 4319 4324 4320 /* The EP.4 is shared between all the PHYs in the package and also it 4325 4321 * can be accessed by any of the PHYs
+8 -8
drivers/net/phy/realtek/realtek_main.c
··· 154 154 #define RTL_8211FVD_PHYID 0x001cc878 155 155 #define RTL_8221B 0x001cc840 156 156 #define RTL_8221B_VB_CG 0x001cc849 157 - #define RTL_8221B_VN_CG 0x001cc84a 157 + #define RTL_8221B_VM_CG 0x001cc84a 158 158 #define RTL_8251B 0x001cc862 159 159 #define RTL_8261C 0x001cc890 160 160 ··· 1523 1523 return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, true); 1524 1524 } 1525 1525 1526 - static int rtl8221b_vn_cg_c22_match_phy_device(struct phy_device *phydev, 1526 + static int rtl8221b_vm_cg_c22_match_phy_device(struct phy_device *phydev, 1527 1527 const struct phy_driver *phydrv) 1528 1528 { 1529 - return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, false); 1529 + return rtlgen_is_c45_match(phydev, RTL_8221B_VM_CG, false); 1530 1530 } 1531 1531 1532 - static int rtl8221b_vn_cg_c45_match_phy_device(struct phy_device *phydev, 1532 + static int rtl8221b_vm_cg_c45_match_phy_device(struct phy_device *phydev, 1533 1533 const struct phy_driver *phydrv) 1534 1534 { 1535 - return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, true); 1535 + return rtlgen_is_c45_match(phydev, RTL_8221B_VM_CG, true); 1536 1536 } 1537 1537 1538 1538 static int rtl_internal_nbaset_match_phy_device(struct phy_device *phydev, ··· 1879 1879 .suspend = genphy_c45_pma_suspend, 1880 1880 .resume = rtlgen_c45_resume, 1881 1881 }, { 1882 - .match_phy_device = rtl8221b_vn_cg_c22_match_phy_device, 1882 + .match_phy_device = rtl8221b_vm_cg_c22_match_phy_device, 1883 1883 .name = "RTL8221B-VM-CG 2.5Gbps PHY (C22)", 1884 1884 .probe = rtl822x_probe, 1885 1885 .get_features = rtl822x_get_features, ··· 1892 1892 .read_page = rtl821x_read_page, 1893 1893 .write_page = rtl821x_write_page, 1894 1894 }, { 1895 - .match_phy_device = rtl8221b_vn_cg_c45_match_phy_device, 1896 - .name = "RTL8221B-VN-CG 2.5Gbps PHY (C45)", 1895 + .match_phy_device = rtl8221b_vm_cg_c45_match_phy_device, 1896 + .name = "RTL8221B-VM-CG 2.5Gbps PHY (C45)", 1897 1897 .probe = rtl822x_probe, 1898 1898 .config_init = rtl822xb_config_init, 1899 1899 .get_rate_matching = rtl822xb_get_rate_matching,
+9 -2
drivers/net/usb/rtl8150.c
··· 685 685 rtl8150_t *dev = netdev_priv(netdev); 686 686 int count, res; 687 687 688 + /* pad the frame and ensure terminating USB packet, datasheet 9.2.3 */ 689 + count = max(skb->len, ETH_ZLEN); 690 + if (count % 64 == 0) 691 + count++; 692 + if (skb_padto(skb, count)) { 693 + netdev->stats.tx_dropped++; 694 + return NETDEV_TX_OK; 695 + } 696 + 688 697 netif_stop_queue(netdev); 689 - count = (skb->len < 60) ? 60 : skb->len; 690 - count = (count & 0x3f) ? count : count + 1; 691 698 dev->tx_skb = skb; 692 699 usb_fill_bulk_urb(dev->tx_urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), 693 700 skb->data, count, write_bulk_callback, dev);
+1
drivers/nvmem/rcar-efuse.c
··· 127 127 { .compatible = "renesas,r8a779h0-otp", .data = &rcar_fuse_v4m }, 128 128 { /* sentinel */ } 129 129 }; 130 + MODULE_DEVICE_TABLE(of, rcar_fuse_match); 130 131 131 132 static struct platform_driver rcar_fuse_driver = { 132 133 .probe = rcar_fuse_probe,
+40 -4
drivers/of/irq.c
··· 671 671 } 672 672 } 673 673 674 + static int of_check_msi_parent(struct device_node *dev_node, struct device_node **msi_node) 675 + { 676 + struct of_phandle_args msi_spec; 677 + int ret; 678 + 679 + /* 680 + * An msi-parent phandle with a missing or == 0 #msi-cells 681 + * property identifies a 1:1 ID translation mapping. 682 + * 683 + * Set the msi controller node if the firmware matches this 684 + * condition. 685 + */ 686 + ret = of_parse_phandle_with_optional_args(dev_node, "msi-parent", "#msi-cells", 687 + 0, &msi_spec); 688 + if (ret) 689 + return ret; 690 + 691 + if ((*msi_node && *msi_node != msi_spec.np) || msi_spec.args_count != 0) 692 + ret = -EINVAL; 693 + 694 + if (!ret) { 695 + /* Return with a node reference held */ 696 + *msi_node = msi_spec.np; 697 + return 0; 698 + } 699 + of_node_put(msi_spec.np); 700 + 701 + return ret; 702 + } 703 + 674 704 /** 675 705 * of_msi_xlate - map a MSI ID and find relevant MSI controller node 676 706 * @dev: device for which the mapping is to be done. ··· 708 678 * @id_in: Device ID. 709 679 * 710 680 * Walk up the device hierarchy looking for devices with a "msi-map" 711 - * property. If found, apply the mapping to @id_in. 681 + * or "msi-parent" property. If found, apply the mapping to @id_in. 712 682 * If @msi_np points to a non-NULL device node pointer, only entries targeting 713 683 * that node will be matched; if it points to a NULL value, it will receive the 714 684 * device node of the first matching target phandle, with a reference held. ··· 722 692 723 693 /* 724 694 * Walk up the device parent links looking for one with a 725 - * "msi-map" property. 695 + * "msi-map" or an "msi-parent" property. 726 696 */ 727 - for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent) 697 + for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent) { 728 698 if (!of_map_id(parent_dev->of_node, id_in, "msi-map", 729 699 "msi-map-mask", msi_np, &id_out)) 730 700 break; 701 + if (!of_check_msi_parent(parent_dev->of_node, msi_np)) 702 + break; 703 + } 731 704 return id_out; 732 705 } 706 + EXPORT_SYMBOL_GPL(of_msi_xlate); 733 707 734 708 /** 735 709 * of_msi_map_get_device_domain - Use msi-map to find the relevant MSI domain ··· 775 741 776 742 of_for_each_phandle(&it, err, np, "msi-parent", "#msi-cells", 0) { 777 743 d = irq_find_matching_host(it.node, token); 778 - if (d) 744 + if (d) { 745 + of_node_put(it.node); 779 746 return d; 747 + } 780 748 } 781 749 782 750 return NULL;
+24 -4
drivers/pci/controller/dwc/pcie-designware-host.c
··· 23 23 #include "pcie-designware.h" 24 24 25 25 static struct pci_ops dw_pcie_ops; 26 + static struct pci_ops dw_pcie_ecam_ops; 26 27 static struct pci_ops dw_child_pcie_ops; 27 28 28 29 #define DW_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ ··· 472 471 if (IS_ERR(pp->cfg)) 473 472 return PTR_ERR(pp->cfg); 474 473 475 - pci->dbi_base = pp->cfg->win; 476 - pci->dbi_phys_addr = res->start; 477 - 478 474 return 0; 479 475 } 480 476 ··· 527 529 if (ret) 528 530 return ret; 529 531 530 - pp->bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops; 532 + pp->bridge->ops = &dw_pcie_ecam_ops; 531 533 pp->bridge->sysdata = pp->cfg; 532 534 pp->cfg->priv = pp; 533 535 } else { ··· 840 842 } 841 843 EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus); 842 844 845 + static void __iomem *dw_pcie_ecam_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where) 846 + { 847 + struct pci_config_window *cfg = bus->sysdata; 848 + struct dw_pcie_rp *pp = cfg->priv; 849 + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 850 + unsigned int busn = bus->number; 851 + 852 + if (busn > 0) 853 + return pci_ecam_map_bus(bus, devfn, where); 854 + 855 + if (PCI_SLOT(devfn) > 0) 856 + return NULL; 857 + 858 + return pci->dbi_base + where; 859 + } 860 + 843 861 static struct pci_ops dw_pcie_ops = { 844 862 .map_bus = dw_pcie_own_conf_map_bus, 863 + .read = pci_generic_config_read, 864 + .write = pci_generic_config_write, 865 + }; 866 + 867 + static struct pci_ops dw_pcie_ecam_ops = { 868 + .map_bus = dw_pcie_ecam_conf_map_bus, 845 869 .read = pci_generic_config_read, 846 870 .write = pci_generic_config_write, 847 871 };
-68
drivers/pci/controller/dwc/pcie-qcom.c
··· 55 55 #define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8 56 56 #define PARF_Q2A_FLUSH 0x1ac 57 57 #define PARF_LTSSM 0x1b0 58 - #define PARF_SLV_DBI_ELBI 0x1b4 59 58 #define PARF_INT_ALL_STATUS 0x224 60 59 #define PARF_INT_ALL_CLEAR 0x228 61 60 #define PARF_INT_ALL_MASK 0x22c ··· 64 65 #define PARF_DBI_BASE_ADDR_V2_HI 0x354 65 66 #define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358 66 67 #define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c 67 - #define PARF_BLOCK_SLV_AXI_WR_BASE 0x360 68 - #define PARF_BLOCK_SLV_AXI_WR_BASE_HI 0x364 69 - #define PARF_BLOCK_SLV_AXI_WR_LIMIT 0x368 70 - #define PARF_BLOCK_SLV_AXI_WR_LIMIT_HI 0x36c 71 - #define PARF_BLOCK_SLV_AXI_RD_BASE 0x370 72 - #define PARF_BLOCK_SLV_AXI_RD_BASE_HI 0x374 73 - #define PARF_BLOCK_SLV_AXI_RD_LIMIT 0x378 74 - #define PARF_BLOCK_SLV_AXI_RD_LIMIT_HI 0x37c 75 - #define PARF_ECAM_BASE 0x380 76 - #define PARF_ECAM_BASE_HI 0x384 77 68 #define PARF_NO_SNOOP_OVERRIDE 0x3d4 78 69 #define PARF_ATU_BASE_ADDR 0x634 79 70 #define PARF_ATU_BASE_ADDR_HI 0x638 ··· 87 98 88 99 /* PARF_SYS_CTRL register fields */ 89 100 #define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN BIT(29) 90 - #define PCIE_ECAM_BLOCKER_EN BIT(26) 91 101 #define MST_WAKEUP_EN BIT(13) 92 102 #define SLV_WAKEUP_EN BIT(12) 93 103 #define MSTR_ACLK_CGC_DIS BIT(10) ··· 133 145 134 146 /* PARF_LTSSM register fields */ 135 147 #define LTSSM_EN BIT(8) 136 - 137 - /* PARF_SLV_DBI_ELBI */ 138 - #define SLV_DBI_ELBI_ADDR_BASE GENMASK(11, 0) 139 148 140 149 /* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */ 141 150 #define PARF_INT_ALL_LINK_UP BIT(13) ··· 309 324 /* Ensure that PERST has been asserted for at least 100 ms */ 310 325 msleep(PCIE_T_PVPERL_MS); 311 326 qcom_perst_assert(pcie, false); 312 - } 313 - 314 - static void qcom_pci_config_ecam(struct dw_pcie_rp *pp) 315 - { 316 - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 317 - struct qcom_pcie *pcie = to_qcom_pcie(pci); 318 - u64 addr, addr_end; 319 - u32 val; 320 - 321 - writel_relaxed(lower_32_bits(pci->dbi_phys_addr), pcie->parf + PARF_ECAM_BASE); 322 - writel_relaxed(upper_32_bits(pci->dbi_phys_addr), pcie->parf + PARF_ECAM_BASE_HI); 323 - 324 - /* 325 - * The only device on the root bus is a single Root Port. If we try to 326 - * access any devices other than Device/Function 00.0 on Bus 0, the TLP 327 - * will go outside of the controller to the PCI bus. But with CFG Shift 328 - * Feature (ECAM) enabled in iATU, there is no guarantee that the 329 - * response is going to be all F's. Hence, to make sure that the 330 - * requester gets all F's response for accesses other than the Root 331 - * Port, configure iATU to block the transactions starting from 332 - * function 1 of the root bus to the end of the root bus (i.e., from 333 - * dbi_base + 4KB to dbi_base + 1MB). 334 - */ 335 - addr = pci->dbi_phys_addr + SZ_4K; 336 - writel_relaxed(lower_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_WR_BASE); 337 - writel_relaxed(upper_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_WR_BASE_HI); 338 - 339 - writel_relaxed(lower_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_RD_BASE); 340 - writel_relaxed(upper_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_RD_BASE_HI); 341 - 342 - addr_end = pci->dbi_phys_addr + SZ_1M - 1; 343 - 344 - writel_relaxed(lower_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_WR_LIMIT); 345 - writel_relaxed(upper_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_WR_LIMIT_HI); 346 - 347 - writel_relaxed(lower_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_RD_LIMIT); 348 - writel_relaxed(upper_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_RD_LIMIT_HI); 349 - 350 - val = readl_relaxed(pcie->parf + PARF_SYS_CTRL); 351 - val |= PCIE_ECAM_BLOCKER_EN; 352 - writel_relaxed(val, pcie->parf + PARF_SYS_CTRL); 353 327 } 354 328 355 329 static int qcom_pcie_start_link(struct dw_pcie *pci) ··· 1264 1320 { 1265 1321 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 1266 1322 struct qcom_pcie *pcie = to_qcom_pcie(pci); 1267 - u16 offset; 1268 1323 int ret; 1269 1324 1270 1325 qcom_ep_reset_assert(pcie); ··· 1271 1328 ret = pcie->cfg->ops->init(pcie); 1272 1329 if (ret) 1273 1330 return ret; 1274 - 1275 - if (pp->ecam_enabled) { 1276 - /* 1277 - * Override ELBI when ECAM is enabled, as when ECAM is enabled, 1278 - * ELBI moves under the 'config' space. 1279 - */ 1280 - offset = FIELD_GET(SLV_DBI_ELBI_ADDR_BASE, readl(pcie->parf + PARF_SLV_DBI_ELBI)); 1281 - pci->elbi_base = pci->dbi_base + offset; 1282 - 1283 - qcom_pci_config_ecam(pp); 1284 - } 1285 1331 1286 1332 ret = qcom_pcie_phy_power_on(pcie); 1287 1333 if (ret)
+9 -25
drivers/pci/pcie/aspm.c
··· 243 243 /* Clock PM state */ 244 244 u32 clkpm_capable:1; /* Clock PM capable? */ 245 245 u32 clkpm_enabled:1; /* Current Clock PM state */ 246 - u32 clkpm_default:1; /* Default Clock PM state by BIOS or 247 - override */ 246 + u32 clkpm_default:1; /* Default Clock PM state by BIOS */ 248 247 u32 clkpm_disable:1; /* Clock PM disabled */ 249 248 }; 250 249 ··· 375 376 pcie_set_clkpm_nocheck(link, enable); 376 377 } 377 378 378 - static void pcie_clkpm_override_default_link_state(struct pcie_link_state *link, 379 - int enabled) 380 - { 381 - struct pci_dev *pdev = link->downstream; 382 - 383 - /* For devicetree platforms, enable ClockPM by default */ 384 - if (of_have_populated_dt() && !enabled) { 385 - link->clkpm_default = 1; 386 - pci_info(pdev, "ASPM: DT platform, enabling ClockPM\n"); 387 - } 388 - } 389 - 390 379 static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) 391 380 { 392 381 int capable = 1, enabled = 1; ··· 397 410 } 398 411 link->clkpm_enabled = enabled; 399 412 link->clkpm_default = enabled; 400 - pcie_clkpm_override_default_link_state(link, enabled); 401 413 link->clkpm_capable = capable; 402 414 link->clkpm_disable = blacklist ? 1 : 0; 403 415 } ··· 797 811 struct pci_dev *pdev = link->downstream; 798 812 u32 override; 799 813 800 - /* For devicetree platforms, enable all ASPM states by default */ 814 + /* For devicetree platforms, enable L0s and L1 by default */ 801 815 if (of_have_populated_dt()) { 802 - link->aspm_default = PCIE_LINK_STATE_ASPM_ALL; 816 + if (link->aspm_support & PCIE_LINK_STATE_L0S) 817 + link->aspm_default |= PCIE_LINK_STATE_L0S; 818 + if (link->aspm_support & PCIE_LINK_STATE_L1) 819 + link->aspm_default |= PCIE_LINK_STATE_L1; 803 820 override = link->aspm_default & ~link->aspm_enabled; 804 821 if (override) 805 - pci_info(pdev, "ASPM: DT platform, enabling%s%s%s%s%s%s%s\n", 806 - FLAG(override, L0S_UP, " L0s-up"), 807 - FLAG(override, L0S_DW, " L0s-dw"), 808 - FLAG(override, L1, " L1"), 809 - FLAG(override, L1_1, " ASPM-L1.1"), 810 - FLAG(override, L1_2, " ASPM-L1.2"), 811 - FLAG(override, L1_1_PCIPM, " PCI-PM-L1.1"), 812 - FLAG(override, L1_2_PCIPM, " PCI-PM-L1.2")); 822 + pci_info(pdev, "ASPM: default states%s%s\n", 823 + FLAG(override, L0S, " L0s"), 824 + FLAG(override, L1, " L1")); 813 825 } 814 826 } 815 827
+1
drivers/platform/mellanox/mlxbf-pmc.c
··· 2015 2015 if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_CRSPACE) { 2016 2016 /* Program crspace counters to count clock cycles using "count_clock" sysfs */ 2017 2017 attr = &pmc->block[blk_num].attr_count_clock; 2018 + sysfs_attr_init(&attr->dev_attr.attr); 2018 2019 attr->dev_attr.attr.mode = 0644; 2019 2020 attr->dev_attr.show = mlxbf_pmc_count_clock_show; 2020 2021 attr->dev_attr.store = mlxbf_pmc_count_clock_store;
+10 -2
drivers/platform/x86/dell/alienware-wmi-wmax.c
··· 210 210 .driver_data = &g_series_quirks, 211 211 }, 212 212 { 213 + .ident = "Dell Inc. G15 5530", 214 + .matches = { 215 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 216 + DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5530"), 217 + }, 218 + .driver_data = &g_series_quirks, 219 + }, 220 + { 213 221 .ident = "Dell Inc. G16 7630", 214 222 .matches = { 215 223 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), ··· 1647 1639 1648 1640 static int wmax_wmi_suspend(struct device *dev) 1649 1641 { 1650 - if (awcc->hwmon) 1642 + if (awcc && awcc->hwmon) 1651 1643 awcc_hwmon_suspend(dev); 1652 1644 1653 1645 return 0; ··· 1655 1647 1656 1648 static int wmax_wmi_resume(struct device *dev) 1657 1649 { 1658 - if (awcc->hwmon) 1650 + if (awcc && awcc->hwmon) 1659 1651 awcc_hwmon_resume(dev); 1660 1652 1661 1653 return 0;
+1 -1
drivers/ptp/ptp_ocp.c
··· 2548 2548 for (i = 0; i < OCP_SMA_NUM; i++) { 2549 2549 bp->sma[i].fixed_fcn = true; 2550 2550 bp->sma[i].fixed_dir = true; 2551 - bp->sma[1].dpll_prop.capabilities &= 2551 + bp->sma[i].dpll_prop.capabilities &= 2552 2552 ~DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE; 2553 2553 } 2554 2554 return;
+1 -1
drivers/scsi/libfc/fc_fcp.c
··· 503 503 host_bcode = FC_ERROR; 504 504 goto err; 505 505 } 506 - if (offset + len > fsp->data_len) { 506 + if (size_add(offset, len) > fsp->data_len) { 507 507 /* this should never happen */ 508 508 if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) && 509 509 fc_frame_crc_check(fp))
+4 -4
drivers/scsi/qla4xxx/ql4_os.c
··· 4104 4104 * The mid-level driver tries to ensure that queuecommand never gets 4105 4105 * invoked concurrently with itself or the interrupt handler (although 4106 4106 * the interrupt handler may call this routine as part of request- 4107 - * completion handling). Unfortunely, it sometimes calls the scheduler 4107 + * completion handling). Unfortunately, it sometimes calls the scheduler 4108 4108 * in interrupt context which is a big NO! NO!. 4109 4109 **/ 4110 4110 static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) ··· 4647 4647 cmd = scsi_host_find_tag(ha->host, index); 4648 4648 /* 4649 4649 * We cannot just check if the index is valid, 4650 - * becase if we are run from the scsi eh, then 4650 + * because if we are run from the scsi eh, then 4651 4651 * the scsi/block layer is going to prevent 4652 4652 * the tag from being released. 4653 4653 */ ··· 4952 4952 /* Upon successful firmware/chip reset, re-initialize the adapter */ 4953 4953 if (status == QLA_SUCCESS) { 4954 4954 /* For ISP-4xxx, force function 1 to always initialize 4955 - * before function 3 to prevent both funcions from 4955 + * before function 3 to prevent both functions from 4956 4956 * stepping on top of the other */ 4957 4957 if (is_qla40XX(ha) && (ha->mac_index == 3)) 4958 4958 ssleep(6); ··· 6914 6914 struct ddb_entry *ddb_entry = NULL; 6915 6915 6916 6916 /* Create session object, with INVALID_ENTRY, 6917 - * the targer_id would get set when we issue the login 6917 + * the target_id would get set when we issue the login 6918 6918 */ 6919 6919 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host, 6920 6920 cmds_max, sizeof(struct ddb_entry),
+46 -52
drivers/scsi/storvsc_drv.c
··· 1406 1406 } 1407 1407 1408 1408 /* 1409 - * Our channel array is sparsley populated and we 1409 + * Our channel array could be sparsley populated and we 1410 1410 * initiated I/O on a processor/hw-q that does not 1411 1411 * currently have a designated channel. Fix this. 1412 1412 * The strategy is simple: 1413 - * I. Ensure NUMA locality 1414 - * II. Distribute evenly (best effort) 1413 + * I. Prefer the channel associated with the current CPU 1414 + * II. Ensure NUMA locality 1415 + * III. Distribute evenly (best effort) 1415 1416 */ 1417 + 1418 + /* Prefer the channel on the I/O issuing processor/hw-q */ 1419 + if (cpumask_test_cpu(q_num, &stor_device->alloced_cpus)) 1420 + return stor_device->stor_chns[q_num]; 1416 1421 1417 1422 node_mask = cpumask_of_node(cpu_to_node(q_num)); 1418 1423 ··· 1474 1469 /* See storvsc_change_target_cpu(). */ 1475 1470 outgoing_channel = READ_ONCE(stor_device->stor_chns[q_num]); 1476 1471 if (outgoing_channel != NULL) { 1477 - if (outgoing_channel->target_cpu == q_num) { 1478 - /* 1479 - * Ideally, we want to pick a different channel if 1480 - * available on the same NUMA node. 1481 - */ 1482 - node_mask = cpumask_of_node(cpu_to_node(q_num)); 1483 - for_each_cpu_wrap(tgt_cpu, 1484 - &stor_device->alloced_cpus, q_num + 1) { 1485 - if (!cpumask_test_cpu(tgt_cpu, node_mask)) 1486 - continue; 1487 - if (tgt_cpu == q_num) 1488 - continue; 1489 - channel = READ_ONCE( 1490 - stor_device->stor_chns[tgt_cpu]); 1491 - if (channel == NULL) 1492 - continue; 1493 - if (hv_get_avail_to_write_percent( 1494 - &channel->outbound) 1495 - > ring_avail_percent_lowater) { 1496 - outgoing_channel = channel; 1497 - goto found_channel; 1498 - } 1499 - } 1472 + if (hv_get_avail_to_write_percent(&outgoing_channel->outbound) 1473 + > ring_avail_percent_lowater) 1474 + goto found_channel; 1500 1475 1501 - /* 1502 - * All the other channels on the same NUMA node are 1503 - * busy. Try to use the channel on the current CPU 1504 - */ 1505 - if (hv_get_avail_to_write_percent( 1506 - &outgoing_channel->outbound) 1507 - > ring_avail_percent_lowater) 1476 + /* 1477 + * Channel is busy, try to find a channel on the same NUMA node 1478 + */ 1479 + node_mask = cpumask_of_node(cpu_to_node(q_num)); 1480 + for_each_cpu_wrap(tgt_cpu, &stor_device->alloced_cpus, 1481 + q_num + 1) { 1482 + if (!cpumask_test_cpu(tgt_cpu, node_mask)) 1483 + continue; 1484 + channel = READ_ONCE(stor_device->stor_chns[tgt_cpu]); 1485 + if (!channel) 1486 + continue; 1487 + if (hv_get_avail_to_write_percent(&channel->outbound) 1488 + > ring_avail_percent_lowater) { 1489 + outgoing_channel = channel; 1508 1490 goto found_channel; 1509 - 1510 - /* 1511 - * If we reach here, all the channels on the current 1512 - * NUMA node are busy. Try to find a channel in 1513 - * other NUMA nodes 1514 - */ 1515 - for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) { 1516 - if (cpumask_test_cpu(tgt_cpu, node_mask)) 1517 - continue; 1518 - channel = READ_ONCE( 1519 - stor_device->stor_chns[tgt_cpu]); 1520 - if (channel == NULL) 1521 - continue; 1522 - if (hv_get_avail_to_write_percent( 1523 - &channel->outbound) 1524 - > ring_avail_percent_lowater) { 1525 - outgoing_channel = channel; 1526 - goto found_channel; 1527 - } 1528 1491 } 1529 1492 } 1493 + 1494 + /* 1495 + * If we reach here, all the channels on the current 1496 + * NUMA node are busy. Try to find a channel in 1497 + * all NUMA nodes 1498 + */ 1499 + for_each_cpu_wrap(tgt_cpu, &stor_device->alloced_cpus, 1500 + q_num + 1) { 1501 + channel = READ_ONCE(stor_device->stor_chns[tgt_cpu]); 1502 + if (!channel) 1503 + continue; 1504 + if (hv_get_avail_to_write_percent(&channel->outbound) 1505 + > ring_avail_percent_lowater) { 1506 + outgoing_channel = channel; 1507 + goto found_channel; 1508 + } 1509 + } 1510 + /* 1511 + * If we reach here, all the channels are busy. Use the 1512 + * original channel found. 1513 + */ 1530 1514 } else { 1531 1515 spin_lock_irqsave(&stor_device->lock, flags); 1532 1516 outgoing_channel = stor_device->stor_chns[q_num];
+97 -31
drivers/spi/spi-airoha-snfi.c
··· 192 192 #define SPI_NAND_OP_RESET 0xff 193 193 #define SPI_NAND_OP_DIE_SELECT 0xc2 194 194 195 + /* SNAND FIFO commands */ 196 + #define SNAND_FIFO_TX_BUSWIDTH_SINGLE 0x08 197 + #define SNAND_FIFO_TX_BUSWIDTH_DUAL 0x09 198 + #define SNAND_FIFO_TX_BUSWIDTH_QUAD 0x0a 199 + #define SNAND_FIFO_RX_BUSWIDTH_SINGLE 0x0c 200 + #define SNAND_FIFO_RX_BUSWIDTH_DUAL 0x0e 201 + #define SNAND_FIFO_RX_BUSWIDTH_QUAD 0x0f 202 + 195 203 #define SPI_NAND_CACHE_SIZE (SZ_4K + SZ_256) 196 204 #define SPI_MAX_TRANSFER_SIZE 511 197 205 ··· 395 387 return regmap_write(as_ctrl->regmap_ctrl, REG_SPI_CTRL_DUMMY, 0); 396 388 } 397 389 398 - static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl, u8 cmd, 399 - const u8 *data, int len) 390 + static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl, 391 + const u8 *data, int len, int buswidth) 400 392 { 401 393 int i, data_len; 394 + u8 cmd; 395 + 396 + switch (buswidth) { 397 + case 0: 398 + case 1: 399 + cmd = SNAND_FIFO_TX_BUSWIDTH_SINGLE; 400 + break; 401 + case 2: 402 + cmd = SNAND_FIFO_TX_BUSWIDTH_DUAL; 403 + break; 404 + case 4: 405 + cmd = SNAND_FIFO_TX_BUSWIDTH_QUAD; 406 + break; 407 + default: 408 + return -EINVAL; 409 + } 402 410 403 411 for (i = 0; i < len; i += data_len) { 404 412 int err; ··· 433 409 return 0; 434 410 } 435 411 436 - static int airoha_snand_read_data(struct airoha_snand_ctrl *as_ctrl, u8 *data, 437 - int len) 412 + static int airoha_snand_read_data(struct airoha_snand_ctrl *as_ctrl, 413 + u8 *data, int len, int buswidth) 438 414 { 439 415 int i, data_len; 416 + u8 cmd; 417 + 418 + switch (buswidth) { 419 + case 0: 420 + case 1: 421 + cmd = SNAND_FIFO_RX_BUSWIDTH_SINGLE; 422 + break; 423 + case 2: 424 + cmd = SNAND_FIFO_RX_BUSWIDTH_DUAL; 425 + break; 426 + case 4: 427 + cmd = SNAND_FIFO_RX_BUSWIDTH_QUAD; 428 + break; 429 + default: 430 + return -EINVAL; 431 + } 440 432 441 433 for (i = 0; i < len; i += data_len) { 442 434 int err; 443 435 444 436 data_len = min(len - i, SPI_MAX_TRANSFER_SIZE); 445 - err = airoha_snand_set_fifo_op(as_ctrl, 0xc, data_len); 437 + err = airoha_snand_set_fifo_op(as_ctrl, cmd, data_len); 446 438 if (err) 447 439 return err; 448 440 ··· 658 618 if (desc->info.offset + desc->info.length > U32_MAX) 659 619 return -EINVAL; 660 620 621 + /* continuous reading is not supported */ 622 + if (desc->info.length > SPI_NAND_CACHE_SIZE) 623 + return -E2BIG; 624 + 661 625 if (!airoha_snand_supports_op(desc->mem, &desc->info.op_tmpl)) 662 626 return -EOPNOTSUPP; 663 627 ··· 698 654 699 655 err = airoha_snand_nfi_config(as_ctrl); 700 656 if (err) 701 - return err; 657 + goto error_dma_mode_off; 702 658 703 659 dma_addr = dma_map_single(as_ctrl->dev, txrx_buf, SPI_NAND_CACHE_SIZE, 704 660 DMA_FROM_DEVICE); 705 661 err = dma_mapping_error(as_ctrl->dev, dma_addr); 706 662 if (err) 707 - return err; 663 + goto error_dma_mode_off; 708 664 709 665 /* set dma addr */ 710 666 err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_STRADDR, ··· 733 689 if (err) 734 690 goto error_dma_unmap; 735 691 736 - /* set read addr */ 737 - err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_RD_CTL3, 0x0); 692 + /* set read addr: zero page offset + descriptor read offset */ 693 + err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_RD_CTL3, 694 + desc->info.offset); 738 695 if (err) 739 696 goto error_dma_unmap; 740 697 ··· 805 760 error_dma_unmap: 806 761 dma_unmap_single(as_ctrl->dev, dma_addr, SPI_NAND_CACHE_SIZE, 807 762 DMA_FROM_DEVICE); 763 + error_dma_mode_off: 764 + airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL); 808 765 return err; 809 766 } 810 767 ··· 871 824 if (err) 872 825 goto error_dma_unmap; 873 826 874 - err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_PG_CTL2, 0x0); 827 + /* set write addr: zero page offset + descriptor write offset */ 828 + err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_PG_CTL2, 829 + desc->info.offset); 875 830 if (err) 876 831 goto error_dma_unmap; 877 832 ··· 941 892 error_dma_unmap: 942 893 dma_unmap_single(as_ctrl->dev, dma_addr, SPI_NAND_CACHE_SIZE, 943 894 DMA_TO_DEVICE); 895 + airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL); 944 896 return err; 945 897 } 946 898 947 899 static int airoha_snand_exec_op(struct spi_mem *mem, 948 900 const struct spi_mem_op *op) 949 901 { 950 - u8 data[8], cmd, opcode = op->cmd.opcode; 951 902 struct airoha_snand_ctrl *as_ctrl; 903 + int op_len, addr_len, dummy_len; 904 + u8 buf[20], *data; 952 905 int i, err; 953 906 954 907 as_ctrl = spi_controller_get_devdata(mem->spi->controller); 908 + 909 + op_len = op->cmd.nbytes; 910 + addr_len = op->addr.nbytes; 911 + dummy_len = op->dummy.nbytes; 912 + 913 + if (op_len + dummy_len + addr_len > sizeof(buf)) 914 + return -EIO; 915 + 916 + data = buf; 917 + for (i = 0; i < op_len; i++) 918 + *data++ = op->cmd.opcode >> (8 * (op_len - i - 1)); 919 + for (i = 0; i < addr_len; i++) 920 + *data++ = op->addr.val >> (8 * (addr_len - i - 1)); 921 + for (i = 0; i < dummy_len; i++) 922 + *data++ = 0xff; 955 923 956 924 /* switch to manual mode */ 957 925 err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL); ··· 980 914 return err; 981 915 982 916 /* opcode */ 983 - err = airoha_snand_write_data(as_ctrl, 0x8, &opcode, sizeof(opcode)); 917 + data = buf; 918 + err = airoha_snand_write_data(as_ctrl, data, op_len, 919 + op->cmd.buswidth); 984 920 if (err) 985 921 return err; 986 922 987 923 /* addr part */ 988 - cmd = opcode == SPI_NAND_OP_GET_FEATURE ? 0x11 : 0x8; 989 - put_unaligned_be64(op->addr.val, data); 990 - 991 - for (i = ARRAY_SIZE(data) - op->addr.nbytes; 992 - i < ARRAY_SIZE(data); i++) { 993 - err = airoha_snand_write_data(as_ctrl, cmd, &data[i], 994 - sizeof(data[0])); 924 + data += op_len; 925 + if (addr_len) { 926 + err = airoha_snand_write_data(as_ctrl, data, addr_len, 927 + op->addr.buswidth); 995 928 if (err) 996 929 return err; 997 930 } 998 931 999 932 /* dummy */ 1000 - data[0] = 0xff; 1001 - for (i = 0; i < op->dummy.nbytes; i++) { 1002 - err = airoha_snand_write_data(as_ctrl, 0x8, &data[0], 1003 - sizeof(data[0])); 933 + data += addr_len; 934 + if (dummy_len) { 935 + err = airoha_snand_write_data(as_ctrl, data, dummy_len, 936 + op->dummy.buswidth); 1004 937 if (err) 1005 938 return err; 1006 939 } 1007 940 1008 941 /* data */ 1009 - if (op->data.dir == SPI_MEM_DATA_IN) { 1010 - err = airoha_snand_read_data(as_ctrl, op->data.buf.in, 1011 - op->data.nbytes); 1012 - if (err) 1013 - return err; 1014 - } else { 1015 - err = airoha_snand_write_data(as_ctrl, 0x8, op->data.buf.out, 1016 - op->data.nbytes); 942 + if (op->data.nbytes) { 943 + if (op->data.dir == SPI_MEM_DATA_IN) 944 + err = airoha_snand_read_data(as_ctrl, op->data.buf.in, 945 + op->data.nbytes, 946 + op->data.buswidth); 947 + else 948 + err = airoha_snand_write_data(as_ctrl, op->data.buf.out, 949 + op->data.nbytes, 950 + op->data.buswidth); 1017 951 if (err) 1018 952 return err; 1019 953 }
+2 -2
drivers/spi/spi-amlogic-spifc-a4.c
··· 286 286 287 287 for (i = 0; i <= LANE_MAX; i++) { 288 288 if (buswidth == 1 << i) { 289 - conf = i << __bf_shf(mask); 289 + conf = i << __ffs(mask); 290 290 return regmap_update_bits(sfc->regmap_base, SFC_SPI_CFG, 291 291 mask, conf); 292 292 } ··· 566 566 if (!op->data.nbytes) 567 567 goto end_xfer; 568 568 569 - conf = (op->data.nbytes >> RAW_SIZE_BW) << __bf_shf(RAW_EXT_SIZE); 569 + conf = (op->data.nbytes >> RAW_SIZE_BW) << __ffs(RAW_EXT_SIZE); 570 570 ret = regmap_update_bits(sfc->regmap_base, SFC_SPI_CFG, RAW_EXT_SIZE, conf); 571 571 if (ret) 572 572 goto err_out;
+3 -2
drivers/spi/spi-cadence-quadspi.c
··· 1995 1995 if (cqspi->use_direct_mode) { 1996 1996 ret = cqspi_request_mmap_dma(cqspi); 1997 1997 if (ret == -EPROBE_DEFER) 1998 - goto probe_setup_failed; 1998 + goto probe_dma_failed; 1999 1999 } 2000 2000 2001 2001 if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) { ··· 2019 2019 2020 2020 return 0; 2021 2021 probe_setup_failed: 2022 - cqspi_controller_enable(cqspi, 0); 2023 2022 if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) 2024 2023 pm_runtime_disable(dev); 2024 + probe_dma_failed: 2025 + cqspi_controller_enable(cqspi, 0); 2025 2026 probe_reset_failed: 2026 2027 if (cqspi->is_jh7110) 2027 2028 cqspi_jh7110_disable_clk(pdev, cqspi);
+3 -1
drivers/spi/spi-dw-mmio.c
··· 358 358 if (IS_ERR(dwsmmio->rstc)) 359 359 return PTR_ERR(dwsmmio->rstc); 360 360 361 - reset_control_deassert(dwsmmio->rstc); 361 + ret = reset_control_deassert(dwsmmio->rstc); 362 + if (ret) 363 + return dev_err_probe(&pdev->dev, ret, "Failed to deassert resets\n"); 362 364 363 365 dws->bus_num = pdev->id; 364 366
+2
drivers/spi/spi-intel-pci.c
··· 75 75 { PCI_VDEVICE(INTEL, 0x38a4), (unsigned long)&bxt_info }, 76 76 { PCI_VDEVICE(INTEL, 0x43a4), (unsigned long)&cnl_info }, 77 77 { PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info }, 78 + { PCI_VDEVICE(INTEL, 0x4d23), (unsigned long)&cnl_info }, 78 79 { PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info }, 79 80 { PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info }, 80 81 { PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info }, 81 82 { PCI_VDEVICE(INTEL, 0x5794), (unsigned long)&cnl_info }, 83 + { PCI_VDEVICE(INTEL, 0x7723), (unsigned long)&cnl_info }, 82 84 { PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info }, 83 85 { PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info }, 84 86 { PCI_VDEVICE(INTEL, 0x7e23), (unsigned long)&cnl_info },
+6
drivers/spi/spi-intel.c
··· 132 132 #define FLCOMP_C0DEN_16M 0x05 133 133 #define FLCOMP_C0DEN_32M 0x06 134 134 #define FLCOMP_C0DEN_64M 0x07 135 + #define FLCOMP_C0DEN_128M 0x08 135 136 136 137 #define INTEL_SPI_TIMEOUT 5000 /* ms */ 137 138 #define INTEL_SPI_FIFO_SZ 64 ··· 1348 1347 case FLCOMP_C0DEN_64M: 1349 1348 ispi->chip0_size = SZ_64M; 1350 1349 break; 1350 + case FLCOMP_C0DEN_128M: 1351 + ispi->chip0_size = SZ_128M; 1352 + break; 1351 1353 default: 1354 + dev_warn(ispi->dev, "unsupported C0DEN: %#lx\n", 1355 + flcomp & FLCOMP_C0DEN_MASK); 1352 1356 return -EINVAL; 1353 1357 } 1354 1358
+27 -5
drivers/spi/spi-nxp-fspi.c
··· 404 404 #define FSPI_NEED_INIT BIT(0) 405 405 #define FSPI_DTR_MODE BIT(1) 406 406 int flags; 407 + /* save the previous operation clock rate */ 408 + unsigned long pre_op_rate; 409 + /* the max clock rate fspi output to device */ 410 + unsigned long max_rate; 407 411 }; 408 412 409 413 static inline int needs_ip_only(struct nxp_fspi *f) ··· 689 685 * change the mode back to mode 0. 690 686 */ 691 687 reg = fspi_readl(f, f->iobase + FSPI_MCR0); 692 - if (op_is_dtr) 688 + if (op_is_dtr) { 693 689 reg |= FSPI_MCR0_RXCLKSRC(3); 694 - else /*select mode 0 */ 690 + f->max_rate = 166000000; 691 + } else { /*select mode 0 */ 695 692 reg &= ~FSPI_MCR0_RXCLKSRC(3); 693 + f->max_rate = 66000000; 694 + } 696 695 fspi_writel(f, reg, f->iobase + FSPI_MCR0); 697 696 } 698 697 ··· 726 719 0, POLL_TOUT, true); 727 720 if (ret) 728 721 dev_warn(f->dev, "DLL lock failed, please fix it!\n"); 722 + 723 + /* 724 + * For ERR050272, DLL lock status bit is not accurate, 725 + * wait for 4us more as a workaround. 726 + */ 727 + udelay(4); 729 728 } 730 729 731 730 /* ··· 793 780 uint64_t size_kb; 794 781 795 782 /* 796 - * Return, if previously selected target device is same as current 797 - * requested target device. Also the DTR or STR mode do not change. 783 + * Return when following condition all meet, 784 + * 1, if previously selected target device is same as current 785 + * requested target device. 786 + * 2, the DTR or STR mode do not change. 787 + * 3, previous operation max rate equals current one. 788 + * 789 + * For other case, need to re-config. 798 790 */ 799 791 if ((f->selected == spi_get_chipselect(spi, 0)) && 800 - (!!(f->flags & FSPI_DTR_MODE) == op_is_dtr)) 792 + (!!(f->flags & FSPI_DTR_MODE) == op_is_dtr) && 793 + (f->pre_op_rate == op->max_freq)) 801 794 return; 802 795 803 796 /* Reset FLSHxxCR0 registers */ ··· 821 802 dev_dbg(f->dev, "Target device [CS:%x] selected\n", spi_get_chipselect(spi, 0)); 822 803 823 804 nxp_fspi_select_rx_sample_clk_source(f, op_is_dtr); 805 + rate = min(f->max_rate, op->max_freq); 824 806 825 807 if (op_is_dtr) { 826 808 f->flags |= FSPI_DTR_MODE; ··· 851 831 nxp_fspi_dll_calibration(f); 852 832 else 853 833 nxp_fspi_dll_override(f); 834 + 835 + f->pre_op_rate = op->max_freq; 854 836 855 837 f->selected = spi_get_chipselect(spi, 0); 856 838 }
+11 -1
drivers/spi/spi-rockchip-sfc.c
··· 704 704 ret = -ENOMEM; 705 705 goto err_dma; 706 706 } 707 - sfc->dma_buffer = virt_to_phys(sfc->buffer); 707 + sfc->dma_buffer = dma_map_single(dev, sfc->buffer, 708 + sfc->max_iosize, DMA_BIDIRECTIONAL); 709 + if (dma_mapping_error(dev, sfc->dma_buffer)) { 710 + ret = -ENOMEM; 711 + goto err_dma_map; 712 + } 708 713 } 709 714 710 715 ret = devm_spi_register_controller(dev, host); ··· 720 715 721 716 return 0; 722 717 err_register: 718 + dma_unmap_single(dev, sfc->dma_buffer, sfc->max_iosize, 719 + DMA_BIDIRECTIONAL); 720 + err_dma_map: 723 721 free_pages((unsigned long)sfc->buffer, get_order(sfc->max_iosize)); 724 722 err_dma: 725 723 pm_runtime_get_sync(dev); ··· 744 736 struct spi_controller *host = sfc->host; 745 737 746 738 spi_unregister_controller(host); 739 + dma_unmap_single(&pdev->dev, sfc->dma_buffer, sfc->max_iosize, 740 + DMA_BIDIRECTIONAL); 747 741 free_pages((unsigned long)sfc->buffer, get_order(sfc->max_iosize)); 748 742 749 743 clk_disable_unprepare(sfc->clk);
+7 -5
drivers/staging/gpib/agilent_82350b/agilent_82350b.c
··· 182 182 return retval; 183 183 #endif 184 184 185 - retval = agilent_82350b_write(board, buffer, 1, 0, &num_bytes); 186 - *bytes_written += num_bytes; 187 - if (retval < 0) 188 - return retval; 185 + if (fifotransferlength > 0) { 186 + retval = agilent_82350b_write(board, buffer, 1, 0, &num_bytes); 187 + *bytes_written += num_bytes; 188 + if (retval < 0) 189 + return retval; 190 + } 189 191 190 192 write_byte(tms_priv, tms_priv->imr0_bits & ~HR_BOIE, IMR0); 191 193 for (i = 1; i < fifotransferlength;) { ··· 219 217 break; 220 218 } 221 219 write_byte(tms_priv, tms_priv->imr0_bits, IMR0); 222 - if (retval) 220 + if (retval < 0) 223 221 return retval; 224 222 225 223 if (send_eoi) {
+5
drivers/staging/gpib/fmh_gpib/fmh_gpib.c
··· 1517 1517 resource_size(e_priv->gpib_iomem_res)); 1518 1518 } 1519 1519 fmh_gpib_generic_detach(board); 1520 + 1521 + if (board->dev) { 1522 + put_device(board->dev); 1523 + board->dev = NULL; 1524 + } 1520 1525 } 1521 1526 1522 1527 static int fmh_gpib_pci_attach_impl(struct gpib_board *board,
+10 -3
drivers/staging/gpib/ni_usb/ni_usb_gpib.c
··· 327 327 board->status &= ~clear_mask; 328 328 board->status &= ~ni_usb_ibsta_mask; 329 329 board->status |= ni_usb_ibsta & ni_usb_ibsta_mask; 330 - // FIXME should generate events on DTAS and DCAS 330 + if (ni_usb_ibsta & DCAS) 331 + push_gpib_event(board, EVENT_DEV_CLR); 332 + if (ni_usb_ibsta & DTAS) 333 + push_gpib_event(board, EVENT_DEV_TRG); 331 334 332 335 spin_lock_irqsave(&board->spinlock, flags); 333 336 /* remove set status bits from monitored set why ?***/ ··· 697 694 */ 698 695 break; 699 696 case NIUSB_ATN_STATE_ERROR: 700 - retval = -EIO; 701 - dev_err(&usb_dev->dev, "read when ATN set\n"); 697 + if (status.ibsta & DCAS) { 698 + retval = -EINTR; 699 + } else { 700 + retval = -EIO; 701 + dev_dbg(&usb_dev->dev, "read when ATN set stat: 0x%06x\n", status.ibsta); 702 + } 702 703 break; 703 704 case NIUSB_ADDRESSING_ERROR: 704 705 retval = -EIO;
+1
drivers/tee/qcomtee/Kconfig
··· 2 2 # Qualcomm Trusted Execution Environment Configuration 3 3 config QCOMTEE 4 4 tristate "Qualcomm TEE Support" 5 + depends on ARCH_QCOM || COMPILE_TEST 5 6 depends on !CPU_BIG_ENDIAN 6 7 select QCOM_SCM 7 8 select QCOM_TZMEM_MODE_SHMBRIDGE
+1 -1
drivers/tee/qcomtee/call.c
··· 308 308 } 309 309 310 310 /* Release any IO and OO objects not processed. */ 311 - for (; u[i].type && i < num_params; i++) { 311 + for (; i < num_params && u[i].type; i++) { 312 312 if (u[i].type == QCOMTEE_ARG_TYPE_OO || 313 313 u[i].type == QCOMTEE_ARG_TYPE_IO) 314 314 qcomtee_object_put(u[i].o);
+1 -1
drivers/tee/qcomtee/core.c
··· 424 424 if (!(u[i].flags & QCOMTEE_ARG_FLAGS_UADDR)) 425 425 memcpy(msgptr, u[i].b.addr, u[i].b.size); 426 426 else if (copy_from_user(msgptr, u[i].b.uaddr, u[i].b.size)) 427 - return -EINVAL; 427 + return -EFAULT; 428 428 429 429 offset += qcomtee_msg_offset_align(u[i].b.size); 430 430 ib++;
+3 -1
drivers/tty/serial/8250/8250_dw.c
··· 635 635 if (IS_ERR(data->rst)) 636 636 return PTR_ERR(data->rst); 637 637 638 - reset_control_deassert(data->rst); 638 + err = reset_control_deassert(data->rst); 639 + if (err) 640 + return dev_err_probe(dev, err, "failed to deassert resets\n"); 639 641 640 642 err = devm_add_action_or_reset(dev, dw8250_reset_control_assert, data->rst); 641 643 if (err)
+11
drivers/tty/serial/8250/8250_exar.c
··· 40 40 #define PCI_DEVICE_ID_ACCESSIO_COM_4SM 0x10db 41 41 #define PCI_DEVICE_ID_ACCESSIO_COM_8SM 0x10ea 42 42 43 + #define PCI_DEVICE_ID_ADVANTECH_XR17V352 0x0018 44 + 43 45 #define PCI_DEVICE_ID_COMMTECH_4224PCI335 0x0002 44 46 #define PCI_DEVICE_ID_COMMTECH_4222PCI335 0x0004 45 47 #define PCI_DEVICE_ID_COMMTECH_2324PCI335 0x000a ··· 1624 1622 .exit = pci_xr17v35x_exit, 1625 1623 }; 1626 1624 1625 + static const struct exar8250_board pbn_adv_XR17V352 = { 1626 + .num_ports = 2, 1627 + .setup = pci_xr17v35x_setup, 1628 + .exit = pci_xr17v35x_exit, 1629 + }; 1630 + 1627 1631 static const struct exar8250_board pbn_exar_XR17V4358 = { 1628 1632 .num_ports = 12, 1629 1633 .setup = pci_xr17v35x_setup, ··· 1703 1695 /* USRobotics USR298x-OEM PCI Modems */ 1704 1696 USR_DEVICE(XR17C152, 2980, pbn_exar_XR17C15x), 1705 1697 USR_DEVICE(XR17C152, 2981, pbn_exar_XR17C15x), 1698 + 1699 + /* ADVANTECH devices */ 1700 + EXAR_DEVICE(ADVANTECH, XR17V352, pbn_adv_XR17V352), 1706 1701 1707 1702 /* Exar Corp. XR17C15[248] Dual/Quad/Octal UART */ 1708 1703 EXAR_DEVICE(EXAR, XR17C152, pbn_exar_XR17C15x),
+4 -2
drivers/tty/serial/8250/8250_mtk.c
··· 435 435 while 436 436 (serial_in(up, MTK_UART_DEBUG0)); 437 437 438 + clk_disable_unprepare(data->uart_clk); 438 439 clk_disable_unprepare(data->bus_clk); 439 440 440 441 return 0; ··· 446 445 struct mtk8250_data *data = dev_get_drvdata(dev); 447 446 448 447 clk_prepare_enable(data->bus_clk); 448 + clk_prepare_enable(data->uart_clk); 449 449 450 450 return 0; 451 451 } ··· 477 475 int dmacnt; 478 476 #endif 479 477 480 - data->uart_clk = devm_clk_get(&pdev->dev, "baud"); 478 + data->uart_clk = devm_clk_get_enabled(&pdev->dev, "baud"); 481 479 if (IS_ERR(data->uart_clk)) { 482 480 /* 483 481 * For compatibility with older device trees try unnamed 484 482 * clk when no baud clk can be found. 485 483 */ 486 - data->uart_clk = devm_clk_get(&pdev->dev, NULL); 484 + data->uart_clk = devm_clk_get_enabled(&pdev->dev, NULL); 487 485 if (IS_ERR(data->uart_clk)) { 488 486 dev_warn(&pdev->dev, "Can't get uart clock\n"); 489 487 return PTR_ERR(data->uart_clk);
-7
drivers/tty/serial/sc16is7xx.c
··· 588 588 div /= prescaler; 589 589 } 590 590 591 - /* Enable enhanced features */ 592 - sc16is7xx_efr_lock(port); 593 - sc16is7xx_port_update(port, SC16IS7XX_EFR_REG, 594 - SC16IS7XX_EFR_ENABLE_BIT, 595 - SC16IS7XX_EFR_ENABLE_BIT); 596 - sc16is7xx_efr_unlock(port); 597 - 598 591 /* If bit MCR_CLKSEL is set, the divide by 4 prescaler is activated. */ 599 592 sc16is7xx_port_update(port, SC16IS7XX_MCR_REG, 600 593 SC16IS7XX_MCR_CLKSEL_BIT,
+8 -6
drivers/tty/serial/sh-sci.c
··· 1014 1014 struct sci_port *s = to_sci_port(port); 1015 1015 const struct plat_sci_reg *reg; 1016 1016 int copied = 0; 1017 - u16 status; 1017 + u32 status; 1018 1018 1019 - reg = sci_getreg(port, s->params->overrun_reg); 1020 - if (!reg->size) 1021 - return 0; 1019 + if (s->type != SCI_PORT_RSCI) { 1020 + reg = sci_getreg(port, s->params->overrun_reg); 1021 + if (!reg->size) 1022 + return 0; 1023 + } 1022 1024 1023 - status = sci_serial_in(port, s->params->overrun_reg); 1025 + status = s->ops->read_reg(port, s->params->overrun_reg); 1024 1026 if (status & s->params->overrun_mask) { 1025 1027 status &= ~s->params->overrun_mask; 1026 - sci_serial_out(port, s->params->overrun_reg, status); 1028 + s->ops->write_reg(port, s->params->overrun_reg, status); 1027 1029 1028 1030 port->icount.overrun++; 1029 1031
+2
drivers/usb/core/quirks.c
··· 467 467 /* Huawei 4G LTE module */ 468 468 { USB_DEVICE(0x12d1, 0x15bb), .driver_info = 469 469 USB_QUIRK_DISCONNECT_SUSPEND }, 470 + { USB_DEVICE(0x12d1, 0x15c1), .driver_info = 471 + USB_QUIRK_DISCONNECT_SUSPEND }, 470 472 { USB_DEVICE(0x12d1, 0x15c3), .driver_info = 471 473 USB_QUIRK_DISCONNECT_SUSPEND }, 472 474
-3
drivers/usb/dwc3/dwc3-generic-plat.c
··· 85 85 static void dwc3_generic_remove(struct platform_device *pdev) 86 86 { 87 87 struct dwc3 *dwc = platform_get_drvdata(pdev); 88 - struct dwc3_generic *dwc3g = to_dwc3_generic(dwc); 89 88 90 89 dwc3_core_remove(dwc); 91 - 92 - clk_bulk_disable_unprepare(dwc3g->num_clocks, dwc3g->clks); 93 90 } 94 91 95 92 static int dwc3_generic_suspend(struct device *dev)
-2
drivers/usb/gadget/legacy/raw_gadget.c
··· 667 667 return ERR_PTR(-EINVAL); 668 668 if (!usb_raw_io_flags_valid(io->flags)) 669 669 return ERR_PTR(-EINVAL); 670 - if (io->length > PAGE_SIZE) 671 - return ERR_PTR(-EINVAL); 672 670 if (get_from_user) 673 671 data = memdup_user(ptr + sizeof(*io), io->length); 674 672 else {
+12 -3
drivers/usb/host/xhci-dbgcap.c
··· 892 892 dev_info(dbc->dev, "DbC configured\n"); 893 893 portsc = readl(&dbc->regs->portsc); 894 894 writel(portsc, &dbc->regs->portsc); 895 - return EVT_GSER; 895 + ret = EVT_GSER; 896 + break; 896 897 } 897 898 898 899 return EVT_DONE; ··· 955 954 break; 956 955 case TRB_TYPE(TRB_TRANSFER): 957 956 dbc_handle_xfer_event(dbc, evt); 958 - ret = EVT_XFER_DONE; 957 + if (ret != EVT_GSER) 958 + ret = EVT_XFER_DONE; 959 959 break; 960 960 default: 961 961 break; ··· 1392 1390 if (!dbc) 1393 1391 return 0; 1394 1392 1395 - if (dbc->state == DS_CONFIGURED) 1393 + switch (dbc->state) { 1394 + case DS_ENABLED: 1395 + case DS_CONNECTED: 1396 + case DS_CONFIGURED: 1396 1397 dbc->resume_required = 1; 1398 + break; 1399 + default: 1400 + break; 1401 + } 1397 1402 1398 1403 xhci_dbc_stop(dbc); 1399 1404
+2 -1
drivers/usb/host/xhci-pci.c
··· 582 582 if (!usb_hcd_is_primary_hcd(hcd)) 583 583 return 0; 584 584 585 + xhci->allow_single_roothub = 1; 586 + 585 587 if (xhci->quirks & XHCI_PME_STUCK_QUIRK) 586 588 xhci_pme_acpi_rtd3_enable(pdev); 587 589 ··· 639 637 xhci = hcd_to_xhci(hcd); 640 638 xhci->reset = reset; 641 639 642 - xhci->allow_single_roothub = 1; 643 640 if (!xhci_has_one_roothub(xhci)) { 644 641 xhci->shared_hcd = usb_create_shared_hcd(&xhci_pci_hc_driver, &dev->dev, 645 642 pci_name(dev), hcd);
+1
drivers/usb/misc/Kconfig
··· 182 182 config USB_USBIO 183 183 tristate "Intel USBIO Bridge support" 184 184 depends on USB && ACPI 185 + depends on X86 || COMPILE_TEST 185 186 select AUXILIARY_BUS 186 187 help 187 188 This adds support for Intel USBIO drivers.
+10
drivers/usb/serial/option.c
··· 273 273 #define QUECTEL_PRODUCT_EM05CN 0x0312 274 274 #define QUECTEL_PRODUCT_EM05G_GR 0x0313 275 275 #define QUECTEL_PRODUCT_EM05G_RS 0x0314 276 + #define QUECTEL_PRODUCT_RG255C 0x0316 276 277 #define QUECTEL_PRODUCT_EM12 0x0512 277 278 #define QUECTEL_PRODUCT_RM500Q 0x0800 278 279 #define QUECTEL_PRODUCT_RM520N 0x0801 ··· 618 617 #define UNISOC_VENDOR_ID 0x1782 619 618 /* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */ 620 619 #define TOZED_PRODUCT_LT70C 0x4055 620 + #define UNISOC_PRODUCT_UIS7720 0x4064 621 621 /* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */ 622 622 #define LUAT_PRODUCT_AIR720U 0x4e00 623 623 ··· 1272 1270 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) }, 1273 1271 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG650V, 0xff, 0xff, 0x30) }, 1274 1272 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG650V, 0xff, 0, 0) }, 1273 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG255C, 0xff, 0xff, 0x30) }, 1274 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG255C, 0xff, 0, 0) }, 1275 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG255C, 0xff, 0xff, 0x40) }, 1275 1276 1276 1277 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, 1277 1278 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, ··· 1403 1398 .driver_info = RSVD(0) | NCTRL(3) }, 1404 1399 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a2, 0xff), /* Telit FN920C04 (MBIM) */ 1405 1400 .driver_info = NCTRL(4) }, 1401 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a3, 0xff), /* Telit FN920C04 (ECM) */ 1402 + .driver_info = NCTRL(4) }, 1406 1403 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff), /* Telit FN20C04 (rmnet) */ 1407 1404 .driver_info = RSVD(0) | NCTRL(3) }, 1408 1405 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a7, 0xff), /* Telit FN920C04 (MBIM) */ 1406 + .driver_info = NCTRL(4) }, 1407 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a8, 0xff), /* Telit FN920C04 (ECM) */ 1409 1408 .driver_info = NCTRL(4) }, 1410 1409 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff), /* Telit FN20C04 (rmnet) */ 1411 1410 .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) }, ··· 2475 2466 { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x30) }, 2476 2467 { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x40) }, 2477 2468 { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) }, 2469 + { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, UNISOC_PRODUCT_UIS7720, 0xff, 0, 0) }, 2478 2470 { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) }, 2479 2471 { USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0530, 0xff), /* TCL IK512 MBIM */ 2480 2472 .driver_info = NCTRL(1) },
+2 -2
drivers/usb/typec/tcpm/tcpm.c
··· 7876 7876 7877 7877 port->partner_desc.identity = &port->partner_ident; 7878 7878 7879 - port->role_sw = usb_role_switch_get(port->dev); 7879 + port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode); 7880 7880 if (!port->role_sw) 7881 - port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode); 7881 + port->role_sw = usb_role_switch_get(port->dev); 7882 7882 if (IS_ERR(port->role_sw)) { 7883 7883 err = PTR_ERR(port->role_sw); 7884 7884 goto out_destroy_wq;
+2 -8
fs/9p/vfs_dentry.c
··· 66 66 struct p9_fid *fid; 67 67 struct inode *inode; 68 68 struct v9fs_inode *v9inode; 69 - unsigned int cached; 70 69 71 70 if (flags & LOOKUP_RCU) 72 71 return -ECHILD; ··· 75 76 goto out_valid; 76 77 77 78 v9inode = V9FS_I(inode); 78 - struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode); 79 - 80 - cached = v9ses->cache & (CACHE_META | CACHE_LOOSE); 81 - 82 - if (!cached || v9inode->cache_validity & V9FS_INO_INVALID_ATTR) { 79 + if (v9inode->cache_validity & V9FS_INO_INVALID_ATTR) { 83 80 int retval; 84 81 struct v9fs_session_info *v9ses; 85 82 ··· 109 114 p9_debug(P9_DEBUG_VFS, 110 115 "refresh inode: dentry = %pd (%p), got error %pe\n", 111 116 dentry, dentry, ERR_PTR(retval)); 117 + if (retval < 0) 112 118 return retval; 113 119 } 114 120 } ··· 146 150 }; 147 151 148 152 const struct dentry_operations v9fs_dentry_operations = { 149 - .d_revalidate = v9fs_lookup_revalidate, 150 - .d_weak_revalidate = __v9fs_lookup_revalidate, 151 153 .d_release = v9fs_dentry_release, 152 154 .d_unalias_trylock = v9fs_dentry_unalias_trylock, 153 155 .d_unalias_unlock = v9fs_dentry_unalias_unlock,
+1 -7
fs/9p/vfs_inode.c
··· 1339 1339 * Don't update inode if the file type is different 1340 1340 */ 1341 1341 umode = p9mode2unixmode(v9ses, st, &rdev); 1342 - if (inode_wrong_type(inode, umode)) { 1343 - /* 1344 - * Do this as a way of letting the caller know the inode should not 1345 - * be reused 1346 - */ 1347 - v9fs_invalidate_inode_attr(inode); 1342 + if (inode_wrong_type(inode, umode)) 1348 1343 goto out; 1349 - } 1350 1344 1351 1345 /* 1352 1346 * We don't want to refresh inode->i_size,
+1 -7
fs/9p/vfs_inode_dotl.c
··· 897 897 /* 898 898 * Don't update inode if the file type is different 899 899 */ 900 - if (inode_wrong_type(inode, st->st_mode)) { 901 - /* 902 - * Do this as a way of letting the caller know the inode should not 903 - * be reused 904 - */ 905 - v9fs_invalidate_inode_attr(inode); 900 + if (inode_wrong_type(inode, st->st_mode)) 906 901 goto out; 907 - } 908 902 909 903 /* 910 904 * We don't want to refresh inode->i_size,
+1 -1
fs/btrfs/delayed-inode.c
··· 2110 2110 2111 2111 for (int i = 0; i < count; i++) { 2112 2112 __btrfs_kill_delayed_node(delayed_nodes[i]); 2113 + btrfs_delayed_node_ref_tracker_dir_print(delayed_nodes[i]); 2113 2114 btrfs_release_delayed_node(delayed_nodes[i], 2114 2115 &delayed_node_trackers[i]); 2115 - btrfs_delayed_node_ref_tracker_dir_print(delayed_nodes[i]); 2116 2116 } 2117 2117 } 2118 2118 }
+7
fs/btrfs/delayed-inode.h
··· 219 219 if (!btrfs_test_opt(node->root->fs_info, REF_TRACKER)) 220 220 return; 221 221 222 + /* 223 + * Only print if there are leaked references. The caller is 224 + * holding one reference, so if refs == 1 there is no leak. 225 + */ 226 + if (refcount_read(&node->refs) == 1) 227 + return; 228 + 222 229 ref_tracker_dir_print(&node->ref_dir.dir, 223 230 BTRFS_DELAYED_NODE_REF_TRACKER_DISPLAY_LIMIT); 224 231 }
+1 -1
fs/btrfs/ref-verify.c
··· 982 982 983 983 extent_root = btrfs_extent_root(fs_info, 0); 984 984 /* If the extent tree is damaged we cannot ignore it (IGNOREBADROOTS). */ 985 - if (IS_ERR(extent_root)) { 985 + if (!extent_root) { 986 986 btrfs_warn(fs_info, "ref-verify: extent tree not available, disabling"); 987 987 btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY); 988 988 return 0;
+48 -8
fs/btrfs/send.c
··· 4102 4102 return ret; 4103 4103 } 4104 4104 4105 + static int rbtree_check_dir_ref_comp(const void *k, const struct rb_node *node) 4106 + { 4107 + const struct recorded_ref *data = k; 4108 + const struct recorded_ref *ref = rb_entry(node, struct recorded_ref, node); 4109 + 4110 + if (data->dir > ref->dir) 4111 + return 1; 4112 + if (data->dir < ref->dir) 4113 + return -1; 4114 + if (data->dir_gen > ref->dir_gen) 4115 + return 1; 4116 + if (data->dir_gen < ref->dir_gen) 4117 + return -1; 4118 + return 0; 4119 + } 4120 + 4121 + static bool rbtree_check_dir_ref_less(struct rb_node *node, const struct rb_node *parent) 4122 + { 4123 + const struct recorded_ref *entry = rb_entry(node, struct recorded_ref, node); 4124 + 4125 + return rbtree_check_dir_ref_comp(entry, parent) < 0; 4126 + } 4127 + 4128 + static int record_check_dir_ref_in_tree(struct rb_root *root, 4129 + struct recorded_ref *ref, struct list_head *list) 4130 + { 4131 + struct recorded_ref *tmp_ref; 4132 + int ret; 4133 + 4134 + if (rb_find(ref, root, rbtree_check_dir_ref_comp)) 4135 + return 0; 4136 + 4137 + ret = dup_ref(ref, list); 4138 + if (ret < 0) 4139 + return ret; 4140 + 4141 + tmp_ref = list_last_entry(list, struct recorded_ref, list); 4142 + rb_add(&tmp_ref->node, root, rbtree_check_dir_ref_less); 4143 + tmp_ref->root = root; 4144 + return 0; 4145 + } 4146 + 4105 4147 static int rename_current_inode(struct send_ctx *sctx, 4106 4148 struct fs_path *current_path, 4107 4149 struct fs_path *new_path) ··· 4171 4129 struct recorded_ref *cur; 4172 4130 struct recorded_ref *cur2; 4173 4131 LIST_HEAD(check_dirs); 4132 + struct rb_root rbtree_check_dirs = RB_ROOT; 4174 4133 struct fs_path *valid_path = NULL; 4175 4134 u64 ow_inode = 0; 4176 4135 u64 ow_gen; 4177 4136 u64 ow_mode; 4178 - u64 last_dir_ino_rm = 0; 4179 4137 bool did_overwrite = false; 4180 4138 bool is_orphan = false; 4181 4139 bool can_rename = true; ··· 4479 4437 goto out; 4480 4438 } 4481 4439 } 4482 - ret = dup_ref(cur, &check_dirs); 4440 + ret = record_check_dir_ref_in_tree(&rbtree_check_dirs, cur, &check_dirs); 4483 4441 if (ret < 0) 4484 4442 goto out; 4485 4443 } ··· 4507 4465 } 4508 4466 4509 4467 list_for_each_entry(cur, &sctx->deleted_refs, list) { 4510 - ret = dup_ref(cur, &check_dirs); 4468 + ret = record_check_dir_ref_in_tree(&rbtree_check_dirs, cur, &check_dirs); 4511 4469 if (ret < 0) 4512 4470 goto out; 4513 4471 } ··· 4517 4475 * We have a moved dir. Add the old parent to check_dirs 4518 4476 */ 4519 4477 cur = list_first_entry(&sctx->deleted_refs, struct recorded_ref, list); 4520 - ret = dup_ref(cur, &check_dirs); 4478 + ret = record_check_dir_ref_in_tree(&rbtree_check_dirs, cur, &check_dirs); 4521 4479 if (ret < 0) 4522 4480 goto out; 4523 4481 } else if (!S_ISDIR(sctx->cur_inode_mode)) { ··· 4551 4509 if (is_current_inode_path(sctx, cur->full_path)) 4552 4510 fs_path_reset(&sctx->cur_inode_path); 4553 4511 } 4554 - ret = dup_ref(cur, &check_dirs); 4512 + ret = record_check_dir_ref_in_tree(&rbtree_check_dirs, cur, &check_dirs); 4555 4513 if (ret < 0) 4556 4514 goto out; 4557 4515 } ··· 4594 4552 ret = cache_dir_utimes(sctx, cur->dir, cur->dir_gen); 4595 4553 if (ret < 0) 4596 4554 goto out; 4597 - } else if (ret == inode_state_did_delete && 4598 - cur->dir != last_dir_ino_rm) { 4555 + } else if (ret == inode_state_did_delete) { 4599 4556 ret = can_rmdir(sctx, cur->dir, cur->dir_gen); 4600 4557 if (ret < 0) 4601 4558 goto out; ··· 4606 4565 ret = send_rmdir(sctx, valid_path); 4607 4566 if (ret < 0) 4608 4567 goto out; 4609 - last_dir_ino_rm = cur->dir; 4610 4568 } 4611 4569 } 4612 4570 }
+7 -1
fs/btrfs/super.c
··· 2068 2068 fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL); 2069 2069 fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL); 2070 2070 if (!fs_info->super_copy || !fs_info->super_for_commit) { 2071 - btrfs_free_fs_info(fs_info); 2071 + /* 2072 + * Dont call btrfs_free_fs_info() to free it as it's still 2073 + * initialized partially. 2074 + */ 2075 + kfree(fs_info->super_copy); 2076 + kfree(fs_info->super_for_commit); 2077 + kvfree(fs_info); 2072 2078 return -ENOMEM; 2073 2079 } 2074 2080 btrfs_init_fs_info(fs_info);
+30 -29
fs/erofs/zmap.c
··· 55 55 } else { 56 56 m->partialref = !!(advise & Z_EROFS_LI_PARTIAL_REF); 57 57 m->clusterofs = le16_to_cpu(di->di_clusterofs); 58 - if (m->clusterofs >= 1 << vi->z_lclusterbits) { 59 - DBG_BUGON(1); 60 - return -EFSCORRUPTED; 61 - } 62 58 m->pblk = le32_to_cpu(di->di_u.blkaddr); 63 59 } 64 60 return 0; ··· 236 240 static int z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder *m, 237 241 unsigned int lcn, bool lookahead) 238 242 { 243 + struct erofs_inode *vi = EROFS_I(m->inode); 244 + int err; 245 + 246 + if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT) { 247 + err = z_erofs_load_compact_lcluster(m, lcn, lookahead); 248 + } else { 249 + DBG_BUGON(vi->datalayout != EROFS_INODE_COMPRESSED_FULL); 250 + err = z_erofs_load_full_lcluster(m, lcn); 251 + } 252 + if (err) 253 + return err; 254 + 239 255 if (m->type >= Z_EROFS_LCLUSTER_TYPE_MAX) { 240 256 erofs_err(m->inode->i_sb, "unknown type %u @ lcn %u of nid %llu", 241 - m->type, lcn, EROFS_I(m->inode)->nid); 257 + m->type, lcn, EROFS_I(m->inode)->nid); 242 258 DBG_BUGON(1); 243 259 return -EOPNOTSUPP; 260 + } else if (m->type != Z_EROFS_LCLUSTER_TYPE_NONHEAD && 261 + m->clusterofs >= (1 << vi->z_lclusterbits)) { 262 + DBG_BUGON(1); 263 + return -EFSCORRUPTED; 244 264 } 245 - 246 - switch (EROFS_I(m->inode)->datalayout) { 247 - case EROFS_INODE_COMPRESSED_FULL: 248 - return z_erofs_load_full_lcluster(m, lcn); 249 - case EROFS_INODE_COMPRESSED_COMPACT: 250 - return z_erofs_load_compact_lcluster(m, lcn, lookahead); 251 - default: 252 - return -EINVAL; 253 - } 265 + return 0; 254 266 } 255 267 256 268 static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m, ··· 272 268 unsigned long lcn = m->lcn - lookback_distance; 273 269 int err; 274 270 271 + if (!lookback_distance) 272 + break; 273 + 275 274 err = z_erofs_load_lcluster_from_disk(m, lcn, false); 276 275 if (err) 277 276 return err; 278 - 279 277 if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) { 280 278 lookback_distance = m->delta[0]; 281 - if (!lookback_distance) 282 - break; 283 279 continue; 284 - } else { 285 - m->headtype = m->type; 286 - m->map->m_la = (lcn << lclusterbits) | m->clusterofs; 287 - return 0; 288 280 } 281 + m->headtype = m->type; 282 + m->map->m_la = (lcn << lclusterbits) | m->clusterofs; 283 + return 0; 289 284 } 290 285 erofs_err(sb, "bogus lookback distance %u @ lcn %lu of nid %llu", 291 286 lookback_distance, m->lcn, vi->nid); ··· 434 431 end = inode->i_size; 435 432 } else { 436 433 if (m.type != Z_EROFS_LCLUSTER_TYPE_NONHEAD) { 437 - /* m.lcn should be >= 1 if endoff < m.clusterofs */ 438 - if (!m.lcn) { 439 - erofs_err(sb, "invalid logical cluster 0 at nid %llu", 440 - vi->nid); 441 - err = -EFSCORRUPTED; 442 - goto unmap_out; 443 - } 444 434 end = (m.lcn << lclusterbits) | m.clusterofs; 445 435 map->m_flags |= EROFS_MAP_FULL_MAPPED; 446 436 m.delta[0] = 1; ··· 592 596 vi->z_fragmentoff = map->m_plen; 593 597 if (recsz > offsetof(struct z_erofs_extent, pstart_lo)) 594 598 vi->z_fragmentoff |= map->m_pa << 32; 595 - } else if (map->m_plen) { 599 + } else if (map->m_plen & Z_EROFS_EXTENT_PLEN_MASK) { 596 600 map->m_flags |= EROFS_MAP_MAPPED | 597 601 EROFS_MAP_FULL_MAPPED | EROFS_MAP_ENCODED; 598 602 fmt = map->m_plen >> Z_EROFS_EXTENT_PLEN_FMT_BIT; ··· 711 715 struct erofs_map_blocks *map) 712 716 { 713 717 struct erofs_sb_info *sbi = EROFS_I_SB(inode); 718 + u64 pend; 714 719 715 720 if (!(map->m_flags & EROFS_MAP_ENCODED)) 716 721 return 0; ··· 729 732 if (unlikely(map->m_plen > Z_EROFS_PCLUSTER_MAX_SIZE || 730 733 map->m_llen > Z_EROFS_PCLUSTER_MAX_DSIZE)) 731 734 return -EOPNOTSUPP; 735 + /* Filesystems beyond 48-bit physical block addresses are invalid */ 736 + if (unlikely(check_add_overflow(map->m_pa, map->m_plen, &pend) || 737 + (pend >> sbi->blkszbits) >= BIT_ULL(48))) 738 + return -EFSCORRUPTED; 732 739 return 0; 733 740 } 734 741
-9
fs/hugetlbfs/inode.c
··· 478 478 if (!hugetlb_vma_trylock_write(vma)) 479 479 continue; 480 480 481 - /* 482 - * Skip VMAs without shareable locks. Per the design in commit 483 - * 40549ba8f8e0, these will be handled by remove_inode_hugepages() 484 - * called after this function with proper locking. 485 - */ 486 - if (!__vma_shareable_lock(vma)) 487 - goto skip; 488 - 489 481 v_start = vma_offset_start(vma, start); 490 482 v_end = vma_offset_end(vma, end); 491 483 ··· 488 496 * vmas. Therefore, lock is not held when calling 489 497 * unmap_hugepage_range for private vmas. 490 498 */ 491 - skip: 492 499 hugetlb_vma_unlock_write(vma); 493 500 } 494 501 }
+6
fs/notify/fdinfo.c
··· 17 17 #include "fanotify/fanotify.h" 18 18 #include "fdinfo.h" 19 19 #include "fsnotify.h" 20 + #include "../internal.h" 20 21 21 22 #if defined(CONFIG_PROC_FS) 22 23 ··· 47 46 48 47 size = f->handle_bytes >> 2; 49 48 49 + if (!super_trylock_shared(inode->i_sb)) 50 + return; 51 + 50 52 ret = exportfs_encode_fid(inode, (struct fid *)f->f_handle, &size); 53 + up_read(&inode->i_sb->s_umount); 54 + 51 55 if ((ret == FILEID_INVALID) || (ret < 0)) 52 56 return; 53 57
+5
fs/ocfs2/move_extents.c
··· 867 867 mlog_errno(ret); 868 868 goto out; 869 869 } 870 + /* 871 + * Invalidate extent cache after moving/defragging to prevent 872 + * stale cached data with outdated extent flags. 873 + */ 874 + ocfs2_extent_map_trunc(inode, cpos); 870 875 871 876 context->clusters_moved += alloc_size; 872 877 next:
+7 -9
fs/resctrl/monitor.c
··· 1782 1782 mba_mbps_default_event = QOS_L3_MBM_TOTAL_EVENT_ID; 1783 1783 1784 1784 if (r->mon.mbm_cntr_assignable) { 1785 - if (!resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID)) 1786 - resctrl_enable_mon_event(QOS_L3_MBM_TOTAL_EVENT_ID); 1787 - if (!resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID)) 1788 - resctrl_enable_mon_event(QOS_L3_MBM_LOCAL_EVENT_ID); 1789 - mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask; 1790 - mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask & 1791 - (READS_TO_LOCAL_MEM | 1792 - READS_TO_LOCAL_S_MEM | 1793 - NON_TEMP_WRITE_TO_LOCAL_MEM); 1785 + if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID)) 1786 + mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask; 1787 + if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID)) 1788 + mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask & 1789 + (READS_TO_LOCAL_MEM | 1790 + READS_TO_LOCAL_S_MEM | 1791 + NON_TEMP_WRITE_TO_LOCAL_MEM); 1794 1792 r->mon.mbm_assign_on_mkdir = true; 1795 1793 resctrl_file_fflags_init("num_mbm_cntrs", 1796 1794 RFTYPE_MON_INFO | RFTYPE_RES_CACHE);
+1 -3
fs/smb/client/cifsglob.h
··· 534 534 void (*new_lease_key)(struct cifs_fid *); 535 535 int (*generate_signingkey)(struct cifs_ses *ses, 536 536 struct TCP_Server_Info *server); 537 - int (*calc_signature)(struct smb_rqst *, struct TCP_Server_Info *, 538 - bool allocate_crypto); 539 537 int (*set_integrity)(const unsigned int, struct cifs_tcon *tcon, 540 538 struct cifsFileInfo *src_file); 541 539 int (*enum_snapshots)(const unsigned int xid, struct cifs_tcon *tcon, ··· 730 732 bool nosharesock; 731 733 bool tcp_nodelay; 732 734 bool terminate; 733 - unsigned int credits; /* send no more requests at once */ 735 + int credits; /* send no more requests at once */ 734 736 unsigned int max_credits; /* can override large 32000 default at mnt */ 735 737 unsigned int in_flight; /* number of requests on the wire to server */ 736 738 unsigned int max_in_flight; /* max number of requests that were on wire */
+1
fs/smb/client/cifsproto.h
··· 9 9 #define _CIFSPROTO_H 10 10 #include <linux/nls.h> 11 11 #include <linux/ctype.h> 12 + #include "cifsglob.h" 12 13 #include "trace.h" 13 14 #ifdef CONFIG_CIFS_DFS_UPCALL 14 15 #include "dfs_cache.h"
+8
fs/smb/client/cifssmb.c
··· 1311 1311 .rreq_debug_id = rdata->rreq->debug_id, 1312 1312 .rreq_debug_index = rdata->subreq.debug_index, 1313 1313 }; 1314 + unsigned int rreq_debug_id = rdata->rreq->debug_id; 1315 + unsigned int subreq_debug_index = rdata->subreq.debug_index; 1314 1316 1315 1317 cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu\n", 1316 1318 __func__, mid->mid, mid->mid_state, rdata->result, ··· 1376 1374 __set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags); 1377 1375 } 1378 1376 1377 + trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, rdata->credits.value, 1378 + server->credits, server->in_flight, 1379 + 0, cifs_trace_rw_credits_read_response_clear); 1379 1380 rdata->credits.value = 0; 1380 1381 rdata->subreq.error = rdata->result; 1381 1382 rdata->subreq.transferred += rdata->got_bytes; ··· 1386 1381 netfs_read_subreq_terminated(&rdata->subreq); 1387 1382 release_mid(mid); 1388 1383 add_credits(server, &credits, 0); 1384 + trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0, 1385 + server->credits, server->in_flight, 1386 + credits.value, cifs_trace_rw_credits_read_response_add); 1389 1387 } 1390 1388 1391 1389 /* cifs_async_readv - send an async write, and set up mid to handle result */
+1 -4
fs/smb/client/inode.c
··· 2484 2484 } 2485 2485 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 2486 2486 do_rename_exit: 2487 - if (rc == 0) { 2487 + if (rc == 0) 2488 2488 d_move(from_dentry, to_dentry); 2489 - /* Force a new lookup */ 2490 - d_drop(from_dentry); 2491 - } 2492 2489 cifs_put_tlink(tlink); 2493 2490 return rc; 2494 2491 }
-4
fs/smb/client/smb2ops.c
··· 5446 5446 .get_lease_key = smb2_get_lease_key, 5447 5447 .set_lease_key = smb2_set_lease_key, 5448 5448 .new_lease_key = smb2_new_lease_key, 5449 - .calc_signature = smb2_calc_signature, 5450 5449 .is_read_op = smb2_is_read_op, 5451 5450 .set_oplock_level = smb2_set_oplock_level, 5452 5451 .create_lease_buf = smb2_create_lease_buf, ··· 5549 5550 .get_lease_key = smb2_get_lease_key, 5550 5551 .set_lease_key = smb2_set_lease_key, 5551 5552 .new_lease_key = smb2_new_lease_key, 5552 - .calc_signature = smb2_calc_signature, 5553 5553 .is_read_op = smb21_is_read_op, 5554 5554 .set_oplock_level = smb21_set_oplock_level, 5555 5555 .create_lease_buf = smb2_create_lease_buf, ··· 5658 5660 .set_lease_key = smb2_set_lease_key, 5659 5661 .new_lease_key = smb2_new_lease_key, 5660 5662 .generate_signingkey = generate_smb30signingkey, 5661 - .calc_signature = smb3_calc_signature, 5662 5663 .set_integrity = smb3_set_integrity, 5663 5664 .is_read_op = smb21_is_read_op, 5664 5665 .set_oplock_level = smb3_set_oplock_level, ··· 5774 5777 .set_lease_key = smb2_set_lease_key, 5775 5778 .new_lease_key = smb2_new_lease_key, 5776 5779 .generate_signingkey = generate_smb311signingkey, 5777 - .calc_signature = smb3_calc_signature, 5778 5780 .set_integrity = smb3_set_integrity, 5779 5781 .is_read_op = smb21_is_read_op, 5780 5782 .set_oplock_level = smb3_set_oplock_level,
-6
fs/smb/client/smb2proto.h
··· 39 39 struct TCP_Server_Info *server, struct smb_rqst *rqst); 40 40 extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server, 41 41 __u64 ses_id, __u32 tid); 42 - extern int smb2_calc_signature(struct smb_rqst *rqst, 43 - struct TCP_Server_Info *server, 44 - bool allocate_crypto); 45 - extern int smb3_calc_signature(struct smb_rqst *rqst, 46 - struct TCP_Server_Info *server, 47 - bool allocate_crypto); 48 42 extern void smb2_echo_request(struct work_struct *work); 49 43 extern __le32 smb2_get_lease_state(struct cifsInodeInfo *cinode); 50 44 extern bool smb2_is_valid_oplock_break(char *buffer,
+9 -9
fs/smb/client/smb2transport.c
··· 209 209 return tcon; 210 210 } 211 211 212 - int 212 + static int 213 213 smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server, 214 - bool allocate_crypto) 214 + bool allocate_crypto) 215 215 { 216 216 int rc; 217 217 unsigned char smb2_signature[SMB2_HMACSHA256_SIZE]; ··· 465 465 return generate_smb3signingkey(ses, server, &triplet); 466 466 } 467 467 468 - int 468 + static int 469 469 smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server, 470 - bool allocate_crypto) 470 + bool allocate_crypto) 471 471 { 472 472 int rc; 473 473 unsigned char smb3_signature[SMB2_CMACAES_SIZE]; ··· 476 476 struct shash_desc *shash = NULL; 477 477 struct smb_rqst drqst; 478 478 u8 key[SMB3_SIGN_KEY_SIZE]; 479 + 480 + if (server->vals->protocol_id <= SMB21_PROT_ID) 481 + return smb2_calc_signature(rqst, server, allocate_crypto); 479 482 480 483 rc = smb3_get_sign_key(le64_to_cpu(shdr->SessionId), server, key); 481 484 if (unlikely(rc)) { ··· 550 547 static int 551 548 smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server) 552 549 { 553 - int rc = 0; 554 550 struct smb2_hdr *shdr; 555 551 struct smb2_sess_setup_req *ssr; 556 552 bool is_binding; ··· 576 574 return 0; 577 575 } 578 576 579 - rc = server->ops->calc_signature(rqst, server, false); 580 - 581 - return rc; 577 + return smb3_calc_signature(rqst, server, false); 582 578 } 583 579 584 580 int ··· 612 612 613 613 memset(shdr->Signature, 0, SMB2_SIGNATURE_SIZE); 614 614 615 - rc = server->ops->calc_signature(rqst, server, true); 615 + rc = smb3_calc_signature(rqst, server, true); 616 616 617 617 if (rc) 618 618 return rc;
+65 -38
fs/smb/client/smbdirect.c
··· 172 172 * in order to notice the broken connection. 173 173 */ 174 174 wake_up_all(&sc->status_wait); 175 + wake_up_all(&sc->send_io.lcredits.wait_queue); 175 176 wake_up_all(&sc->send_io.credits.wait_queue); 176 177 wake_up_all(&sc->send_io.pending.dec_wait_queue); 177 178 wake_up_all(&sc->send_io.pending.zero_wait_queue); ··· 496 495 struct smbdirect_send_io *request = 497 496 container_of(wc->wr_cqe, struct smbdirect_send_io, cqe); 498 497 struct smbdirect_socket *sc = request->socket; 498 + int lcredits = 0; 499 499 500 500 log_rdma_send(INFO, "smbdirect_send_io 0x%p completed wc->status=%s\n", 501 501 request, ib_wc_status_msg(wc->status)); ··· 506 504 request->sge[i].addr, 507 505 request->sge[i].length, 508 506 DMA_TO_DEVICE); 507 + mempool_free(request, sc->send_io.mem.pool); 508 + lcredits += 1; 509 509 510 510 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) { 511 511 if (wc->status != IB_WC_WR_FLUSH_ERR) 512 512 log_rdma_send(ERR, "wc->status=%s wc->opcode=%d\n", 513 513 ib_wc_status_msg(wc->status), wc->opcode); 514 - mempool_free(request, sc->send_io.mem.pool); 515 514 smbd_disconnect_rdma_connection(sc); 516 515 return; 517 516 } 517 + 518 + atomic_add(lcredits, &sc->send_io.lcredits.count); 519 + wake_up(&sc->send_io.lcredits.wait_queue); 518 520 519 521 if (atomic_dec_and_test(&sc->send_io.pending.count)) 520 522 wake_up(&sc->send_io.pending.zero_wait_queue); 521 523 522 524 wake_up(&sc->send_io.pending.dec_wait_queue); 523 - 524 - mempool_free(request, sc->send_io.mem.pool); 525 525 } 526 526 527 527 static void dump_smbdirect_negotiate_resp(struct smbdirect_negotiate_resp *resp) ··· 571 567 log_rdma_event(ERR, "error: credits_granted==0\n"); 572 568 return false; 573 569 } 570 + atomic_set(&sc->send_io.lcredits.count, sp->send_credit_target); 574 571 atomic_set(&sc->send_io.credits.count, le16_to_cpu(packet->credits_granted)); 575 572 576 573 if (le32_to_cpu(packet->preferred_send_size) > sp->max_recv_size) { ··· 1119 1114 struct smbdirect_data_transfer *packet; 1120 1115 int new_credits = 0; 1121 1116 1117 + wait_lcredit: 1118 + /* Wait for local send credits */ 1119 + rc = wait_event_interruptible(sc->send_io.lcredits.wait_queue, 1120 + atomic_read(&sc->send_io.lcredits.count) > 0 || 1121 + sc->status != SMBDIRECT_SOCKET_CONNECTED); 1122 + if (rc) 1123 + goto err_wait_lcredit; 1124 + 1125 + if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { 1126 + log_outgoing(ERR, "disconnected not sending on wait_credit\n"); 1127 + rc = -EAGAIN; 1128 + goto err_wait_lcredit; 1129 + } 1130 + if (unlikely(atomic_dec_return(&sc->send_io.lcredits.count) < 0)) { 1131 + atomic_inc(&sc->send_io.lcredits.count); 1132 + goto wait_lcredit; 1133 + } 1134 + 1122 1135 wait_credit: 1123 1136 /* Wait for send credits. A SMBD packet needs one credit */ 1124 1137 rc = wait_event_interruptible(sc->send_io.credits.wait_queue, ··· 1153 1130 if (unlikely(atomic_dec_return(&sc->send_io.credits.count) < 0)) { 1154 1131 atomic_inc(&sc->send_io.credits.count); 1155 1132 goto wait_credit; 1156 - } 1157 - 1158 - wait_send_queue: 1159 - wait_event(sc->send_io.pending.dec_wait_queue, 1160 - atomic_read(&sc->send_io.pending.count) < sp->send_credit_target || 1161 - sc->status != SMBDIRECT_SOCKET_CONNECTED); 1162 - 1163 - if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { 1164 - log_outgoing(ERR, "disconnected not sending on wait_send_queue\n"); 1165 - rc = -EAGAIN; 1166 - goto err_wait_send_queue; 1167 - } 1168 - 1169 - if (unlikely(atomic_inc_return(&sc->send_io.pending.count) > 1170 - sp->send_credit_target)) { 1171 - atomic_dec(&sc->send_io.pending.count); 1172 - goto wait_send_queue; 1173 1133 } 1174 1134 1175 1135 request = mempool_alloc(sc->send_io.mem.pool, GFP_KERNEL); ··· 1235 1229 le32_to_cpu(packet->data_length), 1236 1230 le32_to_cpu(packet->remaining_data_length)); 1237 1231 1232 + /* 1233 + * Now that we got a local and a remote credit 1234 + * we add us as pending 1235 + */ 1236 + atomic_inc(&sc->send_io.pending.count); 1237 + 1238 1238 rc = smbd_post_send(sc, request); 1239 1239 if (!rc) 1240 1240 return 0; 1241 + 1242 + if (atomic_dec_and_test(&sc->send_io.pending.count)) 1243 + wake_up(&sc->send_io.pending.zero_wait_queue); 1244 + 1245 + wake_up(&sc->send_io.pending.dec_wait_queue); 1241 1246 1242 1247 err_dma: 1243 1248 for (i = 0; i < request->num_sge; i++) ··· 1263 1246 atomic_sub(new_credits, &sc->recv_io.credits.count); 1264 1247 1265 1248 err_alloc: 1266 - if (atomic_dec_and_test(&sc->send_io.pending.count)) 1267 - wake_up(&sc->send_io.pending.zero_wait_queue); 1268 - 1269 - err_wait_send_queue: 1270 - /* roll back send credits and pending */ 1271 1249 atomic_inc(&sc->send_io.credits.count); 1250 + wake_up(&sc->send_io.credits.wait_queue); 1272 1251 1273 1252 err_wait_credit: 1253 + atomic_inc(&sc->send_io.lcredits.count); 1254 + wake_up(&sc->send_io.lcredits.wait_queue); 1255 + 1256 + err_wait_lcredit: 1274 1257 return rc; 1275 1258 } 1276 1259 ··· 1784 1767 struct smbdirect_socket *sc; 1785 1768 struct smbdirect_socket_parameters *sp; 1786 1769 struct rdma_conn_param conn_param; 1770 + struct ib_qp_cap qp_cap; 1787 1771 struct ib_qp_init_attr qp_attr; 1788 1772 struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr; 1789 1773 struct ib_port_immutable port_immutable; ··· 1856 1838 goto config_failed; 1857 1839 } 1858 1840 1841 + sp->responder_resources = 1842 + min_t(u8, sp->responder_resources, 1843 + sc->ib.dev->attrs.max_qp_rd_atom); 1844 + log_rdma_mr(INFO, "responder_resources=%d\n", 1845 + sp->responder_resources); 1846 + 1847 + /* 1848 + * We use allocate sp->responder_resources * 2 MRs 1849 + * and each MR needs WRs for REG and INV, so 1850 + * we use '* 4'. 1851 + * 1852 + * +1 for ib_drain_qp() 1853 + */ 1854 + memset(&qp_cap, 0, sizeof(qp_cap)); 1855 + qp_cap.max_send_wr = sp->send_credit_target + sp->responder_resources * 4 + 1; 1856 + qp_cap.max_recv_wr = sp->recv_credit_max + 1; 1857 + qp_cap.max_send_sge = SMBDIRECT_SEND_IO_MAX_SGE; 1858 + qp_cap.max_recv_sge = SMBDIRECT_RECV_IO_MAX_SGE; 1859 + 1859 1860 sc->ib.pd = ib_alloc_pd(sc->ib.dev, 0); 1860 1861 if (IS_ERR(sc->ib.pd)) { 1861 1862 rc = PTR_ERR(sc->ib.pd); ··· 1885 1848 1886 1849 sc->ib.send_cq = 1887 1850 ib_alloc_cq_any(sc->ib.dev, sc, 1888 - sp->send_credit_target, IB_POLL_SOFTIRQ); 1851 + qp_cap.max_send_wr, IB_POLL_SOFTIRQ); 1889 1852 if (IS_ERR(sc->ib.send_cq)) { 1890 1853 sc->ib.send_cq = NULL; 1891 1854 goto alloc_cq_failed; ··· 1893 1856 1894 1857 sc->ib.recv_cq = 1895 1858 ib_alloc_cq_any(sc->ib.dev, sc, 1896 - sp->recv_credit_max, IB_POLL_SOFTIRQ); 1859 + qp_cap.max_recv_wr, IB_POLL_SOFTIRQ); 1897 1860 if (IS_ERR(sc->ib.recv_cq)) { 1898 1861 sc->ib.recv_cq = NULL; 1899 1862 goto alloc_cq_failed; ··· 1902 1865 memset(&qp_attr, 0, sizeof(qp_attr)); 1903 1866 qp_attr.event_handler = smbd_qp_async_error_upcall; 1904 1867 qp_attr.qp_context = sc; 1905 - qp_attr.cap.max_send_wr = sp->send_credit_target; 1906 - qp_attr.cap.max_recv_wr = sp->recv_credit_max; 1907 - qp_attr.cap.max_send_sge = SMBDIRECT_SEND_IO_MAX_SGE; 1908 - qp_attr.cap.max_recv_sge = SMBDIRECT_RECV_IO_MAX_SGE; 1909 - qp_attr.cap.max_inline_data = 0; 1868 + qp_attr.cap = qp_cap; 1910 1869 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 1911 1870 qp_attr.qp_type = IB_QPT_RC; 1912 1871 qp_attr.send_cq = sc->ib.send_cq; ··· 1915 1882 goto create_qp_failed; 1916 1883 } 1917 1884 sc->ib.qp = sc->rdma.cm_id->qp; 1918 - 1919 - sp->responder_resources = 1920 - min_t(u8, sp->responder_resources, 1921 - sc->ib.dev->attrs.max_qp_rd_atom); 1922 - log_rdma_mr(INFO, "responder_resources=%d\n", 1923 - sp->responder_resources); 1924 1885 1925 1886 memset(&conn_param, 0, sizeof(conn_param)); 1926 1887 conn_param.initiator_depth = sp->initiator_depth;
+1
fs/smb/client/trace.c
··· 4 4 * 5 5 * Author(s): Steve French <stfrench@microsoft.com> 6 6 */ 7 + #include "cifsglob.h" 7 8 #define CREATE_TRACE_POINTS 8 9 #include "trace.h"
+12 -1
fs/smb/common/smbdirect/smbdirect_socket.h
··· 142 142 } mem; 143 143 144 144 /* 145 - * The credit state for the send side 145 + * The local credit state for ib_post_send() 146 + */ 147 + struct { 148 + atomic_t count; 149 + wait_queue_head_t wait_queue; 150 + } lcredits; 151 + 152 + /* 153 + * The remote credit state for the send side 146 154 */ 147 155 struct { 148 156 atomic_t count; ··· 344 336 disable_work_sync(&sc->idle.immediate_work); 345 337 INIT_DELAYED_WORK(&sc->idle.timer_work, __smbdirect_socket_disabled_work); 346 338 disable_delayed_work_sync(&sc->idle.timer_work); 339 + 340 + atomic_set(&sc->send_io.lcredits.count, 0); 341 + init_waitqueue_head(&sc->send_io.lcredits.wait_queue); 347 342 348 343 atomic_set(&sc->send_io.credits.count, 0); 349 344 init_waitqueue_head(&sc->send_io.credits.wait_queue);
+219 -125
fs/smb/server/transport_rdma.c
··· 219 219 * in order to notice the broken connection. 220 220 */ 221 221 wake_up_all(&sc->status_wait); 222 + wake_up_all(&sc->send_io.lcredits.wait_queue); 222 223 wake_up_all(&sc->send_io.credits.wait_queue); 223 224 wake_up_all(&sc->send_io.pending.zero_wait_queue); 224 225 wake_up_all(&sc->recv_io.reassembly.wait_queue); ··· 451 450 struct smbdirect_recv_io *recvmsg; 452 451 453 452 disable_work_sync(&sc->disconnect_work); 454 - if (sc->status < SMBDIRECT_SOCKET_DISCONNECTING) { 453 + if (sc->status < SMBDIRECT_SOCKET_DISCONNECTING) 455 454 smb_direct_disconnect_rdma_work(&sc->disconnect_work); 456 - wait_event_interruptible(sc->status_wait, 457 - sc->status == SMBDIRECT_SOCKET_DISCONNECTED); 458 - } 455 + if (sc->status < SMBDIRECT_SOCKET_DISCONNECTED) 456 + wait_event(sc->status_wait, sc->status == SMBDIRECT_SOCKET_DISCONNECTED); 459 457 460 458 /* 461 459 * Wake up all waiters in all wait queues ··· 471 471 472 472 if (sc->ib.qp) { 473 473 ib_drain_qp(sc->ib.qp); 474 - ib_mr_pool_destroy(sc->ib.qp, &sc->ib.qp->rdma_mrs); 475 474 sc->ib.qp = NULL; 476 475 rdma_destroy_qp(sc->rdma.cm_id); 477 476 } ··· 522 523 struct smbdirect_send_io *msg) 523 524 { 524 525 int i; 526 + 527 + /* 528 + * The list needs to be empty! 529 + * The caller should take care of it. 530 + */ 531 + WARN_ON_ONCE(!list_empty(&msg->sibling_list)); 525 532 526 533 if (msg->num_sge > 0) { 527 534 ib_dma_unmap_single(sc->ib.dev, ··· 914 909 915 910 static void send_done(struct ib_cq *cq, struct ib_wc *wc) 916 911 { 917 - struct smbdirect_send_io *sendmsg, *sibling; 912 + struct smbdirect_send_io *sendmsg, *sibling, *next; 918 913 struct smbdirect_socket *sc; 919 - struct list_head *pos, *prev, *end; 914 + int lcredits = 0; 920 915 921 916 sendmsg = container_of(wc->wr_cqe, struct smbdirect_send_io, cqe); 922 917 sc = sendmsg->socket; ··· 925 920 ib_wc_status_msg(wc->status), wc->status, 926 921 wc->opcode); 927 922 923 + /* 924 + * Free possible siblings and then the main send_io 925 + */ 926 + list_for_each_entry_safe(sibling, next, &sendmsg->sibling_list, sibling_list) { 927 + list_del_init(&sibling->sibling_list); 928 + smb_direct_free_sendmsg(sc, sibling); 929 + lcredits += 1; 930 + } 931 + /* Note this frees wc->wr_cqe, but not wc */ 932 + smb_direct_free_sendmsg(sc, sendmsg); 933 + lcredits += 1; 934 + 928 935 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) { 929 936 pr_err("Send error. status='%s (%d)', opcode=%d\n", 930 937 ib_wc_status_msg(wc->status), wc->status, 931 938 wc->opcode); 932 939 smb_direct_disconnect_rdma_connection(sc); 940 + return; 933 941 } 942 + 943 + atomic_add(lcredits, &sc->send_io.lcredits.count); 944 + wake_up(&sc->send_io.lcredits.wait_queue); 934 945 935 946 if (atomic_dec_and_test(&sc->send_io.pending.count)) 936 947 wake_up(&sc->send_io.pending.zero_wait_queue); 937 - 938 - /* iterate and free the list of messages in reverse. the list's head 939 - * is invalid. 940 - */ 941 - for (pos = &sendmsg->sibling_list, prev = pos->prev, end = sendmsg->sibling_list.next; 942 - prev != end; pos = prev, prev = prev->prev) { 943 - sibling = container_of(pos, struct smbdirect_send_io, sibling_list); 944 - smb_direct_free_sendmsg(sc, sibling); 945 - } 946 - 947 - sibling = container_of(pos, struct smbdirect_send_io, sibling_list); 948 - smb_direct_free_sendmsg(sc, sibling); 949 948 } 950 949 951 950 static int manage_credits_prior_sending(struct smbdirect_socket *sc) ··· 997 988 ret = ib_post_send(sc->ib.qp, wr, NULL); 998 989 if (ret) { 999 990 pr_err("failed to post send: %d\n", ret); 1000 - if (atomic_dec_and_test(&sc->send_io.pending.count)) 1001 - wake_up(&sc->send_io.pending.zero_wait_queue); 1002 991 smb_direct_disconnect_rdma_connection(sc); 1003 992 } 1004 993 return ret; ··· 1039 1032 last->wr.send_flags = IB_SEND_SIGNALED; 1040 1033 last->wr.wr_cqe = &last->cqe; 1041 1034 1035 + /* 1036 + * Remove last from send_ctx->msg_list 1037 + * and splice the rest of send_ctx->msg_list 1038 + * to last->sibling_list. 1039 + * 1040 + * send_ctx->msg_list is a valid empty list 1041 + * at the end. 1042 + */ 1043 + list_del_init(&last->sibling_list); 1044 + list_splice_tail_init(&send_ctx->msg_list, &last->sibling_list); 1045 + send_ctx->wr_cnt = 0; 1046 + 1042 1047 ret = smb_direct_post_send(sc, &first->wr); 1043 - if (!ret) { 1044 - smb_direct_send_ctx_init(send_ctx, 1045 - send_ctx->need_invalidate_rkey, 1046 - send_ctx->remote_key); 1047 - } else { 1048 - atomic_add(send_ctx->wr_cnt, &sc->send_io.credits.count); 1049 - wake_up(&sc->send_io.credits.wait_queue); 1050 - list_for_each_entry_safe(first, last, &send_ctx->msg_list, 1051 - sibling_list) { 1052 - smb_direct_free_sendmsg(sc, first); 1048 + if (ret) { 1049 + struct smbdirect_send_io *sibling, *next; 1050 + 1051 + list_for_each_entry_safe(sibling, next, &last->sibling_list, sibling_list) { 1052 + list_del_init(&sibling->sibling_list); 1053 + smb_direct_free_sendmsg(sc, sibling); 1053 1054 } 1055 + smb_direct_free_sendmsg(sc, last); 1054 1056 } 1057 + 1055 1058 return ret; 1056 1059 } 1057 1060 ··· 1085 1068 else if (ret < 0) 1086 1069 return ret; 1087 1070 } while (true); 1071 + } 1072 + 1073 + static int wait_for_send_lcredit(struct smbdirect_socket *sc, 1074 + struct smbdirect_send_batch *send_ctx) 1075 + { 1076 + if (send_ctx && (atomic_read(&sc->send_io.lcredits.count) <= 1)) { 1077 + int ret; 1078 + 1079 + ret = smb_direct_flush_send_list(sc, send_ctx, false); 1080 + if (ret) 1081 + return ret; 1082 + } 1083 + 1084 + return wait_for_credits(sc, 1085 + &sc->send_io.lcredits.wait_queue, 1086 + &sc->send_io.lcredits.count, 1087 + 1); 1088 1088 } 1089 1089 1090 1090 static int wait_for_send_credits(struct smbdirect_socket *sc, ··· 1291 1257 int data_length; 1292 1258 struct scatterlist sg[SMBDIRECT_SEND_IO_MAX_SGE - 1]; 1293 1259 1260 + ret = wait_for_send_lcredit(sc, send_ctx); 1261 + if (ret) 1262 + goto lcredit_failed; 1263 + 1294 1264 ret = wait_for_send_credits(sc, send_ctx); 1295 1265 if (ret) 1296 - return ret; 1266 + goto credit_failed; 1297 1267 1298 1268 data_length = 0; 1299 1269 for (i = 0; i < niov; i++) ··· 1305 1267 1306 1268 ret = smb_direct_create_header(sc, data_length, remaining_data_length, 1307 1269 &msg); 1308 - if (ret) { 1309 - atomic_inc(&sc->send_io.credits.count); 1310 - return ret; 1311 - } 1270 + if (ret) 1271 + goto header_failed; 1312 1272 1313 1273 for (i = 0; i < niov; i++) { 1314 1274 struct ib_sge *sge; ··· 1344 1308 return 0; 1345 1309 err: 1346 1310 smb_direct_free_sendmsg(sc, msg); 1311 + header_failed: 1347 1312 atomic_inc(&sc->send_io.credits.count); 1313 + credit_failed: 1314 + atomic_inc(&sc->send_io.lcredits.count); 1315 + lcredit_failed: 1348 1316 return ret; 1349 1317 } 1350 1318 ··· 1911 1871 return ret; 1912 1872 } 1913 1873 1914 - static unsigned int smb_direct_get_max_fr_pages(struct smbdirect_socket *sc) 1915 - { 1916 - return min_t(unsigned int, 1917 - sc->ib.dev->attrs.max_fast_reg_page_list_len, 1918 - 256); 1919 - } 1920 - 1921 - static int smb_direct_init_params(struct smbdirect_socket *sc, 1922 - struct ib_qp_cap *cap) 1874 + static int smb_direct_init_params(struct smbdirect_socket *sc) 1923 1875 { 1924 1876 struct smbdirect_socket_parameters *sp = &sc->parameters; 1925 - struct ib_device *device = sc->ib.dev; 1926 - int max_send_sges, max_rw_wrs, max_send_wrs; 1927 - unsigned int max_sge_per_wr, wrs_per_credit; 1877 + int max_send_sges; 1878 + unsigned int maxpages; 1928 1879 1929 1880 /* need 3 more sge. because a SMB_DIRECT header, SMB2 header, 1930 1881 * SMB2 response could be mapped. ··· 1926 1895 return -EINVAL; 1927 1896 } 1928 1897 1929 - /* Calculate the number of work requests for RDMA R/W. 1930 - * The maximum number of pages which can be registered 1931 - * with one Memory region can be transferred with one 1932 - * R/W credit. And at least 4 work requests for each credit 1933 - * are needed for MR registration, RDMA R/W, local & remote 1934 - * MR invalidation. 1935 - */ 1936 - sc->rw_io.credits.num_pages = smb_direct_get_max_fr_pages(sc); 1937 - sc->rw_io.credits.max = DIV_ROUND_UP(sp->max_read_write_size, 1938 - (sc->rw_io.credits.num_pages - 1) * 1939 - PAGE_SIZE); 1898 + atomic_set(&sc->send_io.lcredits.count, sp->send_credit_target); 1940 1899 1941 - max_sge_per_wr = min_t(unsigned int, device->attrs.max_send_sge, 1942 - device->attrs.max_sge_rd); 1943 - max_sge_per_wr = max_t(unsigned int, max_sge_per_wr, 1944 - max_send_sges); 1945 - wrs_per_credit = max_t(unsigned int, 4, 1946 - DIV_ROUND_UP(sc->rw_io.credits.num_pages, 1947 - max_sge_per_wr) + 1); 1948 - max_rw_wrs = sc->rw_io.credits.max * wrs_per_credit; 1949 - 1950 - max_send_wrs = sp->send_credit_target + max_rw_wrs; 1951 - if (max_send_wrs > device->attrs.max_cqe || 1952 - max_send_wrs > device->attrs.max_qp_wr) { 1953 - pr_err("consider lowering send_credit_target = %d\n", 1954 - sp->send_credit_target); 1955 - pr_err("Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n", 1956 - device->attrs.max_cqe, device->attrs.max_qp_wr); 1957 - return -EINVAL; 1958 - } 1959 - 1960 - if (sp->recv_credit_max > device->attrs.max_cqe || 1961 - sp->recv_credit_max > device->attrs.max_qp_wr) { 1962 - pr_err("consider lowering receive_credit_max = %d\n", 1963 - sp->recv_credit_max); 1964 - pr_err("Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n", 1965 - device->attrs.max_cqe, device->attrs.max_qp_wr); 1966 - return -EINVAL; 1967 - } 1968 - 1969 - if (device->attrs.max_send_sge < SMBDIRECT_SEND_IO_MAX_SGE) { 1970 - pr_err("warning: device max_send_sge = %d too small\n", 1971 - device->attrs.max_send_sge); 1972 - return -EINVAL; 1973 - } 1974 - if (device->attrs.max_recv_sge < SMBDIRECT_RECV_IO_MAX_SGE) { 1975 - pr_err("warning: device max_recv_sge = %d too small\n", 1976 - device->attrs.max_recv_sge); 1977 - return -EINVAL; 1978 - } 1900 + maxpages = DIV_ROUND_UP(sp->max_read_write_size, PAGE_SIZE); 1901 + sc->rw_io.credits.max = rdma_rw_mr_factor(sc->ib.dev, 1902 + sc->rdma.cm_id->port_num, 1903 + maxpages); 1904 + sc->rw_io.credits.num_pages = DIV_ROUND_UP(maxpages, sc->rw_io.credits.max); 1905 + /* add one extra in order to handle unaligned pages */ 1906 + sc->rw_io.credits.max += 1; 1979 1907 1980 1908 sc->recv_io.credits.target = 1; 1981 1909 1982 1910 atomic_set(&sc->rw_io.credits.count, sc->rw_io.credits.max); 1983 1911 1984 - cap->max_send_wr = max_send_wrs; 1985 - cap->max_recv_wr = sp->recv_credit_max; 1986 - cap->max_send_sge = SMBDIRECT_SEND_IO_MAX_SGE; 1987 - cap->max_recv_sge = SMBDIRECT_RECV_IO_MAX_SGE; 1988 - cap->max_inline_data = 0; 1989 - cap->max_rdma_ctxs = sc->rw_io.credits.max; 1990 1912 return 0; 1991 1913 } 1992 1914 ··· 2013 2029 return -ENOMEM; 2014 2030 } 2015 2031 2016 - static int smb_direct_create_qpair(struct smbdirect_socket *sc, 2017 - struct ib_qp_cap *cap) 2032 + static u32 smb_direct_rdma_rw_send_wrs(struct ib_device *dev, const struct ib_qp_init_attr *attr) 2033 + { 2034 + /* 2035 + * This could be split out of rdma_rw_init_qp() 2036 + * and be a helper function next to rdma_rw_mr_factor() 2037 + * 2038 + * We can't check unlikely(rdma_rw_force_mr) here, 2039 + * but that is most likely 0 anyway. 2040 + */ 2041 + u32 factor; 2042 + 2043 + WARN_ON_ONCE(attr->port_num == 0); 2044 + 2045 + /* 2046 + * Each context needs at least one RDMA READ or WRITE WR. 2047 + * 2048 + * For some hardware we might need more, eventually we should ask the 2049 + * HCA driver for a multiplier here. 2050 + */ 2051 + factor = 1; 2052 + 2053 + /* 2054 + * If the device needs MRs to perform RDMA READ or WRITE operations, 2055 + * we'll need two additional MRs for the registrations and the 2056 + * invalidation. 2057 + */ 2058 + if (rdma_protocol_iwarp(dev, attr->port_num) || dev->attrs.max_sgl_rd) 2059 + factor += 2; /* inv + reg */ 2060 + 2061 + return factor * attr->cap.max_rdma_ctxs; 2062 + } 2063 + 2064 + static int smb_direct_create_qpair(struct smbdirect_socket *sc) 2018 2065 { 2019 2066 struct smbdirect_socket_parameters *sp = &sc->parameters; 2020 2067 int ret; 2068 + struct ib_qp_cap qp_cap; 2021 2069 struct ib_qp_init_attr qp_attr; 2022 - int pages_per_rw; 2070 + u32 max_send_wr; 2071 + u32 rdma_send_wr; 2072 + 2073 + /* 2074 + * Note that {rdma,ib}_create_qp() will call 2075 + * rdma_rw_init_qp() if cap->max_rdma_ctxs is not 0. 2076 + * It will adjust cap->max_send_wr to the required 2077 + * number of additional WRs for the RDMA RW operations. 2078 + * It will cap cap->max_send_wr to the device limit. 2079 + * 2080 + * +1 for ib_drain_qp 2081 + */ 2082 + qp_cap.max_send_wr = sp->send_credit_target + 1; 2083 + qp_cap.max_recv_wr = sp->recv_credit_max + 1; 2084 + qp_cap.max_send_sge = SMBDIRECT_SEND_IO_MAX_SGE; 2085 + qp_cap.max_recv_sge = SMBDIRECT_RECV_IO_MAX_SGE; 2086 + qp_cap.max_inline_data = 0; 2087 + qp_cap.max_rdma_ctxs = sc->rw_io.credits.max; 2088 + 2089 + /* 2090 + * Find out the number of max_send_wr 2091 + * after rdma_rw_init_qp() adjusted it. 2092 + * 2093 + * We only do it on a temporary variable, 2094 + * as rdma_create_qp() will trigger 2095 + * rdma_rw_init_qp() again. 2096 + */ 2097 + memset(&qp_attr, 0, sizeof(qp_attr)); 2098 + qp_attr.cap = qp_cap; 2099 + qp_attr.port_num = sc->rdma.cm_id->port_num; 2100 + rdma_send_wr = smb_direct_rdma_rw_send_wrs(sc->ib.dev, &qp_attr); 2101 + max_send_wr = qp_cap.max_send_wr + rdma_send_wr; 2102 + 2103 + if (qp_cap.max_send_wr > sc->ib.dev->attrs.max_cqe || 2104 + qp_cap.max_send_wr > sc->ib.dev->attrs.max_qp_wr) { 2105 + pr_err("Possible CQE overrun: max_send_wr %d\n", 2106 + qp_cap.max_send_wr); 2107 + pr_err("device %.*s reporting max_cqe %d max_qp_wr %d\n", 2108 + IB_DEVICE_NAME_MAX, 2109 + sc->ib.dev->name, 2110 + sc->ib.dev->attrs.max_cqe, 2111 + sc->ib.dev->attrs.max_qp_wr); 2112 + pr_err("consider lowering send_credit_target = %d\n", 2113 + sp->send_credit_target); 2114 + return -EINVAL; 2115 + } 2116 + 2117 + if (qp_cap.max_rdma_ctxs && 2118 + (max_send_wr >= sc->ib.dev->attrs.max_cqe || 2119 + max_send_wr >= sc->ib.dev->attrs.max_qp_wr)) { 2120 + pr_err("Possible CQE overrun: rdma_send_wr %d + max_send_wr %d = %d\n", 2121 + rdma_send_wr, qp_cap.max_send_wr, max_send_wr); 2122 + pr_err("device %.*s reporting max_cqe %d max_qp_wr %d\n", 2123 + IB_DEVICE_NAME_MAX, 2124 + sc->ib.dev->name, 2125 + sc->ib.dev->attrs.max_cqe, 2126 + sc->ib.dev->attrs.max_qp_wr); 2127 + pr_err("consider lowering send_credit_target = %d, max_rdma_ctxs = %d\n", 2128 + sp->send_credit_target, qp_cap.max_rdma_ctxs); 2129 + return -EINVAL; 2130 + } 2131 + 2132 + if (qp_cap.max_recv_wr > sc->ib.dev->attrs.max_cqe || 2133 + qp_cap.max_recv_wr > sc->ib.dev->attrs.max_qp_wr) { 2134 + pr_err("Possible CQE overrun: max_recv_wr %d\n", 2135 + qp_cap.max_recv_wr); 2136 + pr_err("device %.*s reporting max_cqe %d max_qp_wr %d\n", 2137 + IB_DEVICE_NAME_MAX, 2138 + sc->ib.dev->name, 2139 + sc->ib.dev->attrs.max_cqe, 2140 + sc->ib.dev->attrs.max_qp_wr); 2141 + pr_err("consider lowering receive_credit_max = %d\n", 2142 + sp->recv_credit_max); 2143 + return -EINVAL; 2144 + } 2145 + 2146 + if (qp_cap.max_send_sge > sc->ib.dev->attrs.max_send_sge || 2147 + qp_cap.max_recv_sge > sc->ib.dev->attrs.max_recv_sge) { 2148 + pr_err("device %.*s max_send_sge/max_recv_sge = %d/%d too small\n", 2149 + IB_DEVICE_NAME_MAX, 2150 + sc->ib.dev->name, 2151 + sc->ib.dev->attrs.max_send_sge, 2152 + sc->ib.dev->attrs.max_recv_sge); 2153 + return -EINVAL; 2154 + } 2023 2155 2024 2156 sc->ib.pd = ib_alloc_pd(sc->ib.dev, 0); 2025 2157 if (IS_ERR(sc->ib.pd)) { ··· 2146 2046 } 2147 2047 2148 2048 sc->ib.send_cq = ib_alloc_cq_any(sc->ib.dev, sc, 2149 - sp->send_credit_target + 2150 - cap->max_rdma_ctxs, 2049 + max_send_wr, 2151 2050 IB_POLL_WORKQUEUE); 2152 2051 if (IS_ERR(sc->ib.send_cq)) { 2153 2052 pr_err("Can't create RDMA send CQ\n"); ··· 2156 2057 } 2157 2058 2158 2059 sc->ib.recv_cq = ib_alloc_cq_any(sc->ib.dev, sc, 2159 - sp->recv_credit_max, 2060 + qp_cap.max_recv_wr, 2160 2061 IB_POLL_WORKQUEUE); 2161 2062 if (IS_ERR(sc->ib.recv_cq)) { 2162 2063 pr_err("Can't create RDMA recv CQ\n"); ··· 2165 2066 goto err; 2166 2067 } 2167 2068 2069 + /* 2070 + * We reset completely here! 2071 + * As the above use was just temporary 2072 + * to calc max_send_wr and rdma_send_wr. 2073 + * 2074 + * rdma_create_qp() will trigger rdma_rw_init_qp() 2075 + * again if max_rdma_ctxs is not 0. 2076 + */ 2168 2077 memset(&qp_attr, 0, sizeof(qp_attr)); 2169 2078 qp_attr.event_handler = smb_direct_qpair_handler; 2170 2079 qp_attr.qp_context = sc; 2171 - qp_attr.cap = *cap; 2080 + qp_attr.cap = qp_cap; 2172 2081 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 2173 2082 qp_attr.qp_type = IB_QPT_RC; 2174 2083 qp_attr.send_cq = sc->ib.send_cq; ··· 2191 2084 2192 2085 sc->ib.qp = sc->rdma.cm_id->qp; 2193 2086 sc->rdma.cm_id->event_handler = smb_direct_cm_handler; 2194 - 2195 - pages_per_rw = DIV_ROUND_UP(sp->max_read_write_size, PAGE_SIZE) + 1; 2196 - if (pages_per_rw > sc->ib.dev->attrs.max_sgl_rd) { 2197 - ret = ib_mr_pool_init(sc->ib.qp, &sc->ib.qp->rdma_mrs, 2198 - sc->rw_io.credits.max, IB_MR_TYPE_MEM_REG, 2199 - sc->rw_io.credits.num_pages, 0); 2200 - if (ret) { 2201 - pr_err("failed to init mr pool count %zu pages %zu\n", 2202 - sc->rw_io.credits.max, sc->rw_io.credits.num_pages); 2203 - goto err; 2204 - } 2205 - } 2206 2087 2207 2088 return 0; 2208 2089 err: ··· 2278 2183 2279 2184 static int smb_direct_connect(struct smbdirect_socket *sc) 2280 2185 { 2281 - struct ib_qp_cap qp_cap; 2282 2186 int ret; 2283 2187 2284 - ret = smb_direct_init_params(sc, &qp_cap); 2188 + ret = smb_direct_init_params(sc); 2285 2189 if (ret) { 2286 2190 pr_err("Can't configure RDMA parameters\n"); 2287 2191 return ret; ··· 2292 2198 return ret; 2293 2199 } 2294 2200 2295 - ret = smb_direct_create_qpair(sc, &qp_cap); 2201 + ret = smb_direct_create_qpair(sc); 2296 2202 if (ret) { 2297 2203 pr_err("Can't accept RDMA client: %d\n", ret); 2298 2204 return ret;
+21 -5
fs/sysfs/group.c
··· 498 498 } 499 499 EXPORT_SYMBOL_GPL(compat_only_sysfs_link_entry_to_kobj); 500 500 501 - static int sysfs_group_attrs_change_owner(struct kernfs_node *grp_kn, 501 + static int sysfs_group_attrs_change_owner(struct kobject *kobj, 502 + struct kernfs_node *grp_kn, 502 503 const struct attribute_group *grp, 503 504 struct iattr *newattrs) 504 505 { 505 506 struct kernfs_node *kn; 506 - int error; 507 + int error, i; 508 + umode_t mode; 507 509 508 510 if (grp->attrs) { 509 511 struct attribute *const *attr; 510 512 511 - for (attr = grp->attrs; *attr; attr++) { 513 + for (i = 0, attr = grp->attrs; *attr; i++, attr++) { 514 + if (grp->is_visible) { 515 + mode = grp->is_visible(kobj, *attr, i); 516 + if (mode & SYSFS_GROUP_INVISIBLE) 517 + break; 518 + if (!mode) 519 + continue; 520 + } 512 521 kn = kernfs_find_and_get(grp_kn, (*attr)->name); 513 522 if (!kn) 514 523 return -ENOENT; ··· 532 523 if (grp->bin_attrs) { 533 524 const struct bin_attribute *const *bin_attr; 534 525 535 - for (bin_attr = grp->bin_attrs; *bin_attr; bin_attr++) { 526 + for (i = 0, bin_attr = grp->bin_attrs; *bin_attr; i++, bin_attr++) { 527 + if (grp->is_bin_visible) { 528 + mode = grp->is_bin_visible(kobj, *bin_attr, i); 529 + if (mode & SYSFS_GROUP_INVISIBLE) 530 + break; 531 + if (!mode) 532 + continue; 533 + } 536 534 kn = kernfs_find_and_get(grp_kn, (*bin_attr)->attr.name); 537 535 if (!kn) 538 536 return -ENOENT; ··· 589 573 590 574 error = kernfs_setattr(grp_kn, &newattrs); 591 575 if (!error) 592 - error = sysfs_group_attrs_change_owner(grp_kn, grp, &newattrs); 576 + error = sysfs_group_attrs_change_owner(kobj, grp_kn, grp, &newattrs); 593 577 594 578 kernfs_put(grp_kn); 595 579
+10 -1
fs/xfs/Kconfig
··· 119 119 120 120 See the xfs man page in section 5 for additional information. 121 121 122 + This option is mandatory to support zoned block devices. For these 123 + devices, the realtime subvolume must be backed by a zoned block 124 + device and a regular block device used as the main device (for 125 + metadata). If the zoned block device is a host-managed SMR hard-disk 126 + containing conventional zones at the beginning of its address space, 127 + XFS will use the disk conventional zones as the main device and the 128 + remaining sequential write required zones as the backing storage for 129 + the realtime subvolume. 130 + 122 131 If unsure, say N. 123 132 124 133 config XFS_DRAIN_INTENTS ··· 165 156 bool "XFS online metadata check usage data collection" 166 157 default y 167 158 depends on XFS_ONLINE_SCRUB 168 - select DEBUG_FS 159 + depends on DEBUG_FS 169 160 help 170 161 If you say Y here, the kernel will gather usage data about 171 162 the online metadata check subsystem. This includes the number
+31 -3
fs/xfs/scrub/nlinks.c
··· 376 376 return error; 377 377 } 378 378 379 + static uint 380 + xchk_nlinks_ilock_dir( 381 + struct xfs_inode *ip) 382 + { 383 + uint lock_mode = XFS_ILOCK_SHARED; 384 + 385 + /* 386 + * We're going to scan the directory entries, so we must be ready to 387 + * pull the data fork mappings into memory if they aren't already. 388 + */ 389 + if (xfs_need_iread_extents(&ip->i_df)) 390 + lock_mode = XFS_ILOCK_EXCL; 391 + 392 + /* 393 + * We're going to scan the parent pointers, so we must be ready to 394 + * pull the attr fork mappings into memory if they aren't already. 395 + */ 396 + if (xfs_has_parent(ip->i_mount) && xfs_inode_has_attr_fork(ip) && 397 + xfs_need_iread_extents(&ip->i_af)) 398 + lock_mode = XFS_ILOCK_EXCL; 399 + 400 + /* 401 + * Take the IOLOCK so that other threads cannot start a directory 402 + * update while we're scanning. 403 + */ 404 + lock_mode |= XFS_IOLOCK_SHARED; 405 + xfs_ilock(ip, lock_mode); 406 + return lock_mode; 407 + } 408 + 379 409 /* Walk a directory to bump the observed link counts of the children. */ 380 410 STATIC int 381 411 xchk_nlinks_collect_dir( ··· 424 394 return 0; 425 395 426 396 /* Prevent anyone from changing this directory while we walk it. */ 427 - xfs_ilock(dp, XFS_IOLOCK_SHARED); 428 - lock_mode = xfs_ilock_data_map_shared(dp); 397 + lock_mode = xchk_nlinks_ilock_dir(dp); 429 398 430 399 /* 431 400 * The dotdot entry of an unlinked directory still points to the last ··· 481 452 xchk_iscan_abort(&xnc->collect_iscan); 482 453 out_unlock: 483 454 xfs_iunlock(dp, lock_mode); 484 - xfs_iunlock(dp, XFS_IOLOCK_SHARED); 485 455 return error; 486 456 } 487 457
+1 -1
fs/xfs/xfs_buf.c
··· 1751 1751 const char *descr) 1752 1752 { 1753 1753 /* The maximum size of the buftarg is only known once the sb is read. */ 1754 - btp->bt_nr_sectors = (xfs_daddr_t)-1; 1754 + btp->bt_nr_sectors = XFS_BUF_DADDR_MAX; 1755 1755 1756 1756 /* Set up device logical sector size mask */ 1757 1757 btp->bt_logical_sectorsize = logical_sectorsize;
+1
fs/xfs/xfs_buf.h
··· 22 22 */ 23 23 struct xfs_buf; 24 24 25 + #define XFS_BUF_DADDR_MAX ((xfs_daddr_t) S64_MAX) 25 26 #define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL)) 26 27 27 28 #define XBF_READ (1u << 0) /* buffer intended for reading from device */
-1
fs/xfs/xfs_mount.h
··· 236 236 bool m_update_sb; /* sb needs update in mount */ 237 237 unsigned int m_max_open_zones; 238 238 unsigned int m_zonegc_low_space; 239 - struct xfs_mru_cache *m_zone_cache; /* Inode to open zone cache */ 240 239 241 240 /* max_atomic_write mount option value */ 242 241 unsigned long long m_awu_max_bytes;
+42 -11
fs/xfs/xfs_super.c
··· 102 102 * Table driven mount option parser. 103 103 */ 104 104 enum { 105 - Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, 105 + Op_deprecated, Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, 106 106 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid, 107 107 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups, 108 108 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, ··· 114 114 Opt_lifetime, Opt_nolifetime, Opt_max_atomic_write, 115 115 }; 116 116 117 + #define fsparam_dead(NAME) \ 118 + __fsparam(NULL, (NAME), Op_deprecated, fs_param_deprecated, NULL) 119 + 117 120 static const struct fs_parameter_spec xfs_fs_parameters[] = { 121 + /* 122 + * These mount options were supposed to be deprecated in September 2025 123 + * but the deprecation warning was buggy, so not all users were 124 + * notified. The deprecation is now obnoxiously loud and postponed to 125 + * September 2030. 126 + */ 127 + fsparam_dead("attr2"), 128 + fsparam_dead("noattr2"), 129 + fsparam_dead("ikeep"), 130 + fsparam_dead("noikeep"), 131 + 118 132 fsparam_u32("logbufs", Opt_logbufs), 119 133 fsparam_string("logbsize", Opt_logbsize), 120 134 fsparam_string("logdev", Opt_logdev), ··· 800 786 801 787 truncate_inode_pages_final(&inode->i_data); 802 788 clear_inode(inode); 789 + 790 + if (IS_ENABLED(CONFIG_XFS_RT) && 791 + S_ISREG(inode->i_mode) && inode->i_private) { 792 + xfs_open_zone_put(inode->i_private); 793 + inode->i_private = NULL; 794 + } 803 795 } 804 796 805 797 static void ··· 1393 1373 static inline void 1394 1374 xfs_fs_warn_deprecated( 1395 1375 struct fs_context *fc, 1396 - struct fs_parameter *param, 1397 - uint64_t flag, 1398 - bool value) 1376 + struct fs_parameter *param) 1399 1377 { 1400 - /* Don't print the warning if reconfiguring and current mount point 1401 - * already had the flag set 1378 + /* 1379 + * Always warn about someone passing in a deprecated mount option. 1380 + * Previously we wouldn't print the warning if we were reconfiguring 1381 + * and current mount point already had the flag set, but that was not 1382 + * the right thing to do. 1383 + * 1384 + * Many distributions mount the root filesystem with no options in the 1385 + * initramfs and rely on mount -a to remount the root fs with the 1386 + * options in fstab. However, the old behavior meant that there would 1387 + * never be a warning about deprecated mount options for the root fs in 1388 + * /etc/fstab. On a single-fs system, that means no warning at all. 1389 + * 1390 + * Compounding this problem are distribution scripts that copy 1391 + * /proc/mounts to fstab, which means that we can't remove mount 1392 + * options unless we're 100% sure they have only ever been advertised 1393 + * in /proc/mounts in response to explicitly provided mount options. 1402 1394 */ 1403 - if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) && 1404 - !!(XFS_M(fc->root->d_sb)->m_features & flag) == value) 1405 - return; 1406 1395 xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key); 1407 1396 } 1408 1397 ··· 1437 1408 return opt; 1438 1409 1439 1410 switch (opt) { 1411 + case Op_deprecated: 1412 + xfs_fs_warn_deprecated(fc, param); 1413 + return 0; 1440 1414 case Opt_logbufs: 1441 1415 parsing_mp->m_logbufs = result.uint_32; 1442 1416 return 0; ··· 1560 1528 xfs_mount_set_dax_mode(parsing_mp, result.uint_32); 1561 1529 return 0; 1562 1530 #endif 1563 - /* Following mount options will be removed in September 2025 */ 1564 1531 case Opt_max_open_zones: 1565 1532 parsing_mp->m_max_open_zones = result.uint_32; 1566 1533 return 0; ··· 2252 2221 struct xfs_mount *mp; 2253 2222 int i; 2254 2223 2255 - mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL | __GFP_NOFAIL); 2224 + mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL); 2256 2225 if (!mp) 2257 2226 return -ENOMEM; 2258 2227
+60 -88
fs/xfs/xfs_zone_alloc.c
··· 26 26 #include "xfs_trace.h" 27 27 #include "xfs_mru_cache.h" 28 28 29 + static void 30 + xfs_open_zone_free_rcu( 31 + struct callback_head *cb) 32 + { 33 + struct xfs_open_zone *oz = container_of(cb, typeof(*oz), oz_rcu); 34 + 35 + xfs_rtgroup_rele(oz->oz_rtg); 36 + kfree(oz); 37 + } 38 + 29 39 void 30 40 xfs_open_zone_put( 31 41 struct xfs_open_zone *oz) 32 42 { 33 - if (atomic_dec_and_test(&oz->oz_ref)) { 34 - xfs_rtgroup_rele(oz->oz_rtg); 35 - kfree(oz); 36 - } 43 + if (atomic_dec_and_test(&oz->oz_ref)) 44 + call_rcu(&oz->oz_rcu, xfs_open_zone_free_rcu); 37 45 } 38 46 39 47 static inline uint32_t ··· 622 614 } 623 615 624 616 /* 625 - * Try to pack inodes that are written back after they were closed tight instead 626 - * of trying to open new zones for them or spread them to the least recently 627 - * used zone. This optimizes the data layout for workloads that untar or copy 628 - * a lot of small files. Right now this does not separate multiple such 617 + * Try to tightly pack small files that are written back after they were closed 618 + * instead of trying to open new zones for them or spread them to the least 619 + * recently used zone. This optimizes the data layout for workloads that untar 620 + * or copy a lot of small files. Right now this does not separate multiple such 629 621 * streams. 630 622 */ 631 623 static inline bool xfs_zoned_pack_tight(struct xfs_inode *ip) 632 624 { 625 + struct xfs_mount *mp = ip->i_mount; 626 + size_t zone_capacity = 627 + XFS_FSB_TO_B(mp, mp->m_groups[XG_TYPE_RTG].blocks); 628 + 629 + /* 630 + * Do not pack write files that are already using a full zone to avoid 631 + * fragmentation. 632 + */ 633 + if (i_size_read(VFS_I(ip)) >= zone_capacity) 634 + return false; 635 + 633 636 return !inode_is_open_for_write(VFS_I(ip)) && 634 637 !(ip->i_diflags & XFS_DIFLAG_APPEND); 635 638 } ··· 765 746 } 766 747 767 748 /* 768 - * Cache the last zone written to for an inode so that it is considered first 769 - * for subsequent writes. 770 - */ 771 - struct xfs_zone_cache_item { 772 - struct xfs_mru_cache_elem mru; 773 - struct xfs_open_zone *oz; 774 - }; 775 - 776 - static inline struct xfs_zone_cache_item * 777 - xfs_zone_cache_item(struct xfs_mru_cache_elem *mru) 778 - { 779 - return container_of(mru, struct xfs_zone_cache_item, mru); 780 - } 781 - 782 - static void 783 - xfs_zone_cache_free_func( 784 - void *data, 785 - struct xfs_mru_cache_elem *mru) 786 - { 787 - struct xfs_zone_cache_item *item = xfs_zone_cache_item(mru); 788 - 789 - xfs_open_zone_put(item->oz); 790 - kfree(item); 791 - } 792 - 793 - /* 794 749 * Check if we have a cached last open zone available for the inode and 795 750 * if yes return a reference to it. 796 751 */ 797 752 static struct xfs_open_zone * 798 - xfs_cached_zone( 799 - struct xfs_mount *mp, 800 - struct xfs_inode *ip) 753 + xfs_get_cached_zone( 754 + struct xfs_inode *ip) 801 755 { 802 - struct xfs_mru_cache_elem *mru; 803 - struct xfs_open_zone *oz; 756 + struct xfs_open_zone *oz; 804 757 805 - mru = xfs_mru_cache_lookup(mp->m_zone_cache, ip->i_ino); 806 - if (!mru) 807 - return NULL; 808 - oz = xfs_zone_cache_item(mru)->oz; 758 + rcu_read_lock(); 759 + oz = VFS_I(ip)->i_private; 809 760 if (oz) { 810 761 /* 811 762 * GC only steals open zones at mount time, so no GC zones 812 763 * should end up in the cache. 813 764 */ 814 765 ASSERT(!oz->oz_is_gc); 815 - ASSERT(atomic_read(&oz->oz_ref) > 0); 816 - atomic_inc(&oz->oz_ref); 766 + if (!atomic_inc_not_zero(&oz->oz_ref)) 767 + oz = NULL; 817 768 } 818 - xfs_mru_cache_done(mp->m_zone_cache); 769 + rcu_read_unlock(); 770 + 819 771 return oz; 820 772 } 821 773 822 774 /* 823 - * Update the last used zone cache for a given inode. 775 + * Stash our zone in the inode so that is is reused for future allocations. 824 776 * 825 - * The caller must have a reference on the open zone. 777 + * The open_zone structure will be pinned until either the inode is freed or 778 + * until the cached open zone is replaced with a different one because the 779 + * current one was full when we tried to use it. This means we keep any 780 + * open zone around forever as long as any inode that used it for the last 781 + * write is cached, which slightly increases the memory use of cached inodes 782 + * that were every written to, but significantly simplifies the cached zone 783 + * lookup. Because the open_zone is clearly marked as full when all data 784 + * in the underlying RTG was written, the caching is always safe. 826 785 */ 827 786 static void 828 - xfs_zone_cache_create_association( 829 - struct xfs_inode *ip, 830 - struct xfs_open_zone *oz) 787 + xfs_set_cached_zone( 788 + struct xfs_inode *ip, 789 + struct xfs_open_zone *oz) 831 790 { 832 - struct xfs_mount *mp = ip->i_mount; 833 - struct xfs_zone_cache_item *item = NULL; 834 - struct xfs_mru_cache_elem *mru; 791 + struct xfs_open_zone *old_oz; 835 792 836 - ASSERT(atomic_read(&oz->oz_ref) > 0); 837 793 atomic_inc(&oz->oz_ref); 838 - 839 - mru = xfs_mru_cache_lookup(mp->m_zone_cache, ip->i_ino); 840 - if (mru) { 841 - /* 842 - * If we have an association already, update it to point to the 843 - * new zone. 844 - */ 845 - item = xfs_zone_cache_item(mru); 846 - xfs_open_zone_put(item->oz); 847 - item->oz = oz; 848 - xfs_mru_cache_done(mp->m_zone_cache); 849 - return; 850 - } 851 - 852 - item = kmalloc(sizeof(*item), GFP_KERNEL); 853 - if (!item) { 854 - xfs_open_zone_put(oz); 855 - return; 856 - } 857 - item->oz = oz; 858 - xfs_mru_cache_insert(mp->m_zone_cache, ip->i_ino, &item->mru); 794 + old_oz = xchg(&VFS_I(ip)->i_private, oz); 795 + if (old_oz) 796 + xfs_open_zone_put(old_oz); 859 797 } 860 798 861 799 static void ··· 856 880 * the inode is still associated with a zone and use that if so. 857 881 */ 858 882 if (!*oz) 859 - *oz = xfs_cached_zone(mp, ip); 883 + *oz = xfs_get_cached_zone(ip); 860 884 861 885 if (!*oz) { 862 886 select_zone: 863 887 *oz = xfs_select_zone(mp, write_hint, pack_tight); 864 888 if (!*oz) 865 889 goto out_error; 866 - 867 - xfs_zone_cache_create_association(ip, *oz); 890 + xfs_set_cached_zone(ip, *oz); 868 891 } 869 892 870 893 alloc_len = xfs_zone_alloc_blocks(*oz, XFS_B_TO_FSB(mp, ioend->io_size), ··· 941 966 xfs_open_zone_put(oz); 942 967 } 943 968 spin_unlock(&zi->zi_open_zones_lock); 969 + 970 + /* 971 + * Wait for all open zones to be freed so that they drop the group 972 + * references: 973 + */ 974 + rcu_barrier(); 944 975 } 945 976 946 977 struct xfs_init_zones { ··· 1260 1279 error = xfs_zone_gc_mount(mp); 1261 1280 if (error) 1262 1281 goto out_free_zone_info; 1263 - 1264 - /* 1265 - * Set up a mru cache to track inode to open zone for data placement 1266 - * purposes. The magic values for group count and life time is the 1267 - * same as the defaults for file streams, which seems sane enough. 1268 - */ 1269 - xfs_mru_cache_create(&mp->m_zone_cache, mp, 1270 - 5000, 10, xfs_zone_cache_free_func); 1271 1282 return 0; 1272 1283 1273 1284 out_free_zone_info: ··· 1273 1300 { 1274 1301 xfs_zone_gc_unmount(mp); 1275 1302 xfs_free_zone_info(mp->m_zone_info); 1276 - xfs_mru_cache_destroy(mp->m_zone_cache); 1277 1303 }
+46 -35
fs/xfs/xfs_zone_gc.c
··· 491 491 struct xfs_rtgroup *victim_rtg = NULL; 492 492 unsigned int bucket; 493 493 494 - if (xfs_is_shutdown(mp)) 495 - return false; 496 - 497 - if (iter->victim_rtg) 498 - return true; 499 - 500 - /* 501 - * Don't start new work if we are asked to stop or park. 502 - */ 503 - if (kthread_should_stop() || kthread_should_park()) 504 - return false; 505 - 506 - if (!xfs_zoned_need_gc(mp)) 507 - return false; 508 - 509 494 spin_lock(&zi->zi_used_buckets_lock); 510 495 for (bucket = 0; bucket < XFS_ZONE_USED_BUCKETS; bucket++) { 511 496 victim_rtg = xfs_zone_gc_pick_victim_from(mp, bucket); ··· 960 975 } while (next); 961 976 } 962 977 978 + static bool 979 + xfs_zone_gc_should_start_new_work( 980 + struct xfs_zone_gc_data *data) 981 + { 982 + if (xfs_is_shutdown(data->mp)) 983 + return false; 984 + if (!xfs_zone_gc_space_available(data)) 985 + return false; 986 + 987 + if (!data->iter.victim_rtg) { 988 + if (kthread_should_stop() || kthread_should_park()) 989 + return false; 990 + if (!xfs_zoned_need_gc(data->mp)) 991 + return false; 992 + if (!xfs_zone_gc_select_victim(data)) 993 + return false; 994 + } 995 + 996 + return true; 997 + } 998 + 963 999 /* 964 1000 * Handle the work to read and write data for GC and to reset the zones, 965 1001 * including handling all completions. ··· 988 982 * Note that the order of the chunks is preserved so that we don't undo the 989 983 * optimal order established by xfs_zone_gc_query(). 990 984 */ 991 - static bool 985 + static void 992 986 xfs_zone_gc_handle_work( 993 987 struct xfs_zone_gc_data *data) 994 988 { ··· 1002 996 zi->zi_reset_list = NULL; 1003 997 spin_unlock(&zi->zi_reset_list_lock); 1004 998 1005 - if (!xfs_zone_gc_select_victim(data) || 1006 - !xfs_zone_gc_space_available(data)) { 1007 - if (list_empty(&data->reading) && 1008 - list_empty(&data->writing) && 1009 - list_empty(&data->resetting) && 1010 - !reset_list) 1011 - return false; 1012 - } 1013 - 1014 - __set_current_state(TASK_RUNNING); 1015 - try_to_freeze(); 1016 - 1017 - if (reset_list) 999 + if (reset_list) { 1000 + set_current_state(TASK_RUNNING); 1018 1001 xfs_zone_gc_reset_zones(data, reset_list); 1002 + } 1019 1003 1020 1004 list_for_each_entry_safe(chunk, next, &data->resetting, entry) { 1021 1005 if (READ_ONCE(chunk->state) != XFS_GC_BIO_DONE) 1022 1006 break; 1007 + set_current_state(TASK_RUNNING); 1023 1008 xfs_zone_gc_finish_reset(chunk); 1024 1009 } 1025 1010 1026 1011 list_for_each_entry_safe(chunk, next, &data->writing, entry) { 1027 1012 if (READ_ONCE(chunk->state) != XFS_GC_BIO_DONE) 1028 1013 break; 1014 + set_current_state(TASK_RUNNING); 1029 1015 xfs_zone_gc_finish_chunk(chunk); 1030 1016 } 1031 1017 ··· 1025 1027 list_for_each_entry_safe(chunk, next, &data->reading, entry) { 1026 1028 if (READ_ONCE(chunk->state) != XFS_GC_BIO_DONE) 1027 1029 break; 1030 + set_current_state(TASK_RUNNING); 1028 1031 xfs_zone_gc_write_chunk(chunk); 1029 1032 } 1030 1033 blk_finish_plug(&plug); 1031 1034 1032 - blk_start_plug(&plug); 1033 - while (xfs_zone_gc_start_chunk(data)) 1034 - ; 1035 - blk_finish_plug(&plug); 1036 - return true; 1035 + if (xfs_zone_gc_should_start_new_work(data)) { 1036 + set_current_state(TASK_RUNNING); 1037 + blk_start_plug(&plug); 1038 + while (xfs_zone_gc_start_chunk(data)) 1039 + ; 1040 + blk_finish_plug(&plug); 1041 + } 1037 1042 } 1038 1043 1039 1044 /* ··· 1060 1059 for (;;) { 1061 1060 set_current_state(TASK_INTERRUPTIBLE | TASK_FREEZABLE); 1062 1061 xfs_set_zonegc_running(mp); 1063 - if (xfs_zone_gc_handle_work(data)) 1062 + 1063 + xfs_zone_gc_handle_work(data); 1064 + 1065 + /* 1066 + * Only sleep if nothing set the state to running. Else check for 1067 + * work again as someone might have queued up more work and woken 1068 + * us in the meantime. 1069 + */ 1070 + if (get_current_state() == TASK_RUNNING) { 1071 + try_to_freeze(); 1064 1072 continue; 1073 + } 1065 1074 1066 1075 if (list_empty(&data->reading) && 1067 1076 list_empty(&data->writing) &&
+2
fs/xfs/xfs_zone_priv.h
··· 44 44 * the life time of an open zone. 45 45 */ 46 46 struct xfs_rtgroup *oz_rtg; 47 + 48 + struct rcu_head oz_rcu; 47 49 }; 48 50 49 51 /*
+19 -2
include/linux/arm_ffa.h
··· 338 338 * an `struct ffa_mem_region_addr_range`. 339 339 */ 340 340 u32 composite_off; 341 + u8 impdef_val[16]; 341 342 u64 reserved; 342 343 }; 343 344 ··· 418 417 #define CONSTITUENTS_OFFSET(x) \ 419 418 (offsetof(struct ffa_composite_mem_region, constituents[x])) 420 419 420 + #define FFA_EMAD_HAS_IMPDEF_FIELD(version) ((version) >= FFA_VERSION_1_2) 421 + #define FFA_MEM_REGION_HAS_EP_MEM_OFFSET(version) ((version) > FFA_VERSION_1_0) 422 + 423 + static inline u32 ffa_emad_size_get(u32 ffa_version) 424 + { 425 + u32 sz; 426 + struct ffa_mem_region_attributes *ep_mem_access; 427 + 428 + if (FFA_EMAD_HAS_IMPDEF_FIELD(ffa_version)) 429 + sz = sizeof(*ep_mem_access); 430 + else 431 + sz = sizeof(*ep_mem_access) - sizeof(ep_mem_access->impdef_val); 432 + 433 + return sz; 434 + } 435 + 421 436 static inline u32 422 437 ffa_mem_desc_offset(struct ffa_mem_region *buf, int count, u32 ffa_version) 423 438 { 424 - u32 offset = count * sizeof(struct ffa_mem_region_attributes); 439 + u32 offset = count * ffa_emad_size_get(ffa_version); 425 440 /* 426 441 * Earlier to v1.1, the endpoint memory descriptor array started at 427 442 * offset 32(i.e. offset of ep_mem_offset in the current structure) 428 443 */ 429 - if (ffa_version <= FFA_VERSION_1_0) 444 + if (!FFA_MEM_REGION_HAS_EP_MEM_OFFSET(ffa_version)) 430 445 offset += offsetof(struct ffa_mem_region, ep_mem_offset); 431 446 else 432 447 offset += sizeof(struct ffa_mem_region);
+1 -1
include/linux/cgroup-defs.h
··· 452 452 int nr_frozen_tasks; 453 453 454 454 /* Freeze time data consistency protection */ 455 - seqcount_t freeze_seq; 455 + seqcount_spinlock_t freeze_seq; 456 456 457 457 /* 458 458 * Most recent time the cgroup was requested to freeze.
+4 -3
include/linux/exportfs.h
··· 320 320 static inline bool exportfs_can_encode_fh(const struct export_operations *nop, 321 321 int fh_flags) 322 322 { 323 - if (!nop) 324 - return false; 325 - 326 323 /* 327 324 * If a non-decodeable file handle was requested, we only need to make 328 325 * sure that filesystem did not opt-out of encoding fid. 329 326 */ 330 327 if (fh_flags & EXPORT_FH_FID) 331 328 return exportfs_can_encode_fid(nop); 329 + 330 + /* Normal file handles cannot be created without export ops */ 331 + if (!nop) 332 + return false; 332 333 333 334 /* 334 335 * If a connectable file handle was requested, we need to make sure that
+5
include/linux/gpio/regmap.h
··· 38 38 * offset to a register/bitmask pair. If not 39 39 * given the default gpio_regmap_simple_xlate() 40 40 * is used. 41 + * @fixed_direction_output: 42 + * (Optional) Bitmap representing the fixed direction of 43 + * the GPIO lines. Useful when there are GPIO lines with a 44 + * fixed direction mixed together in the same register. 41 45 * @drvdata: (Optional) Pointer to driver specific data which is 42 46 * not used by gpio-remap but is provided "as is" to the 43 47 * driver callback(s). ··· 89 85 int reg_stride; 90 86 int ngpio_per_reg; 91 87 struct irq_domain *irq_domain; 88 + unsigned long *fixed_direction_output; 92 89 93 90 #ifdef CONFIG_REGMAP_IRQ 94 91 struct regmap_irq_chip *regmap_irq_chip;
+5 -3
include/linux/hung_task.h
··· 20 20 * always zero. So we can use these bits to encode the specific blocking 21 21 * type. 22 22 * 23 + * Note that on architectures where this is not guaranteed, or for any 24 + * unaligned lock, this tracking mechanism is silently skipped for that 25 + * lock. 26 + * 23 27 * Type encoding: 24 28 * 00 - Blocked on mutex (BLOCKER_TYPE_MUTEX) 25 29 * 01 - Blocked on semaphore (BLOCKER_TYPE_SEM) ··· 49 45 * If the lock pointer matches the BLOCKER_TYPE_MASK, return 50 46 * without writing anything. 51 47 */ 52 - if (WARN_ON_ONCE(lock_ptr & BLOCKER_TYPE_MASK)) 48 + if (lock_ptr & BLOCKER_TYPE_MASK) 53 49 return; 54 50 55 51 WRITE_ONCE(current->blocker, lock_ptr | type); ··· 57 53 58 54 static inline void hung_task_clear_blocker(void) 59 55 { 60 - WARN_ON_ONCE(!READ_ONCE(current->blocker)); 61 - 62 56 WRITE_ONCE(current->blocker, 0UL); 63 57 } 64 58
+1 -1
include/linux/misc_cgroup.h
··· 19 19 MISC_CG_RES_SEV_ES, 20 20 #endif 21 21 #ifdef CONFIG_INTEL_TDX_HOST 22 - /* Intel TDX HKIDs resource */ 22 + /** @MISC_CG_RES_TDX: Intel TDX HKIDs resource */ 23 23 MISC_CG_RES_TDX, 24 24 #endif 25 25 /** @MISC_CG_RES_TYPES: count of enum misc_res_type constants */
+3 -1
include/linux/mlx5/mlx5_ifc.h
··· 10833 10833 u8 port_access_reg_cap_mask_127_to_96[0x20]; 10834 10834 u8 port_access_reg_cap_mask_95_to_64[0x20]; 10835 10835 10836 - u8 port_access_reg_cap_mask_63_to_36[0x1c]; 10836 + u8 port_access_reg_cap_mask_63[0x1]; 10837 + u8 pphcr[0x1]; 10838 + u8 port_access_reg_cap_mask_61_to_36[0x1a]; 10837 10839 u8 pplm[0x1]; 10838 10840 u8 port_access_reg_cap_mask_34_to_32[0x3]; 10839 10841
+4 -4
include/linux/pm_runtime.h
··· 629 629 * device. 630 630 */ 631 631 DEFINE_GUARD_COND(pm_runtime_active, _try, 632 - pm_runtime_get_active(_T, RPM_TRANSPARENT)) 632 + pm_runtime_get_active(_T, RPM_TRANSPARENT), _RET == 0) 633 633 DEFINE_GUARD_COND(pm_runtime_active, _try_enabled, 634 - pm_runtime_resume_and_get(_T)) 634 + pm_runtime_resume_and_get(_T), _RET == 0) 635 635 DEFINE_GUARD_COND(pm_runtime_active_auto, _try, 636 - pm_runtime_get_active(_T, RPM_TRANSPARENT)) 636 + pm_runtime_get_active(_T, RPM_TRANSPARENT), _RET == 0) 637 637 DEFINE_GUARD_COND(pm_runtime_active_auto, _try_enabled, 638 - pm_runtime_resume_and_get(_T)) 638 + pm_runtime_resume_and_get(_T), _RET == 0) 639 639 640 640 /** 641 641 * pm_runtime_put_sync - Drop device usage counter and run "idle check" if 0.
+3
include/linux/skbuff.h
··· 4204 4204 struct sk_buff_head *sk_queue, 4205 4205 unsigned int flags, int *off, int *err); 4206 4206 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, int *err); 4207 + __poll_t datagram_poll_queue(struct file *file, struct socket *sock, 4208 + struct poll_table_struct *wait, 4209 + struct sk_buff_head *rcv_queue); 4207 4210 __poll_t datagram_poll(struct file *file, struct socket *sock, 4208 4211 struct poll_table_struct *wait); 4209 4212 int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
+4
include/linux/virtio_net.h
··· 401 401 if (!tnl_hdr_negotiated) 402 402 return -EINVAL; 403 403 404 + vhdr->hash_hdr.hash_value = 0; 405 + vhdr->hash_hdr.hash_report = 0; 406 + vhdr->hash_hdr.padding = 0; 407 + 404 408 /* Let the basic parsing deal with plain GSO features. */ 405 409 skb_shinfo(skb)->gso_type &= ~tnl_gso_type; 406 410 ret = virtio_net_hdr_from_skb(skb, hdr, true, false, vlan_hlen);
+15
include/uapi/drm/xe_drm.h
··· 1013 1013 * valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address 1014 1014 * mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO 1015 1015 * handle MBZ, and the BO offset MBZ. 1016 + * - %DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET - Can be used in combination with 1017 + * %DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR to reset madvises when the underlying 1018 + * CPU address space range is unmapped (typically with munmap(2) or brk(2)). 1019 + * The madvise values set with &DRM_IOCTL_XE_MADVISE are reset to the values 1020 + * that were present immediately after the &DRM_IOCTL_XE_VM_BIND. 1021 + * The reset GPU virtual address range is the intersection of the range bound 1022 + * using &DRM_IOCTL_XE_VM_BIND and the virtual CPU address space range 1023 + * unmapped. 1024 + * This functionality is present to mimic the behaviour of CPU address space 1025 + * madvises set using madvise(2), which are typically reset on unmap. 1026 + * Note: free(3) may or may not call munmap(2) and/or brk(2), and may thus 1027 + * not invoke autoreset. Neither will stack variables going out of scope. 1028 + * Therefore it's recommended to always explicitly reset the madvises when 1029 + * freeing the memory backing a region used in a &DRM_IOCTL_XE_MADVISE call. 1016 1030 * 1017 1031 * The @prefetch_mem_region_instance for %DRM_XE_VM_BIND_OP_PREFETCH can also be: 1018 1032 * - %DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, which ensures prefetching occurs in ··· 1133 1119 #define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3) 1134 1120 #define DRM_XE_VM_BIND_FLAG_CHECK_PXP (1 << 4) 1135 1121 #define DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR (1 << 5) 1122 + #define DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET (1 << 6) 1136 1123 /** @flags: Bind flags */ 1137 1124 __u32 flags; 1138 1125
+4 -4
io_uring/fdinfo.c
··· 59 59 { 60 60 struct io_overflow_cqe *ocqe; 61 61 struct io_rings *r = ctx->rings; 62 - struct rusage sq_usage; 63 62 unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1; 64 63 unsigned int sq_head = READ_ONCE(r->sq.head); 65 64 unsigned int sq_tail = READ_ONCE(r->sq.tail); ··· 151 152 * thread termination. 152 153 */ 153 154 if (tsk) { 155 + u64 usec; 156 + 154 157 get_task_struct(tsk); 155 158 rcu_read_unlock(); 156 - getrusage(tsk, RUSAGE_SELF, &sq_usage); 159 + usec = io_sq_cpu_usec(tsk); 157 160 put_task_struct(tsk); 158 161 sq_pid = sq->task_pid; 159 162 sq_cpu = sq->sq_cpu; 160 - sq_total_time = (sq_usage.ru_stime.tv_sec * 1000000 161 - + sq_usage.ru_stime.tv_usec); 163 + sq_total_time = usec; 162 164 sq_work_time = sq->work_time; 163 165 } else { 164 166 rcu_read_unlock();
+1 -1
io_uring/filetable.c
··· 57 57 58 58 static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file, 59 59 u32 slot_index) 60 - __must_hold(&req->ctx->uring_lock) 60 + __must_hold(&ctx->uring_lock) 61 61 { 62 62 struct io_rsrc_node *node; 63 63
+1 -1
io_uring/io_uring.c
··· 879 879 } 880 880 881 881 static __cold void io_cqe_overflow(struct io_ring_ctx *ctx, struct io_cqe *cqe, 882 - struct io_big_cqe *big_cqe) 882 + struct io_big_cqe *big_cqe) 883 883 { 884 884 struct io_overflow_cqe *ocqe; 885 885
+22 -11
io_uring/kbuf.c
··· 155 155 return 1; 156 156 } 157 157 158 + static bool io_should_commit(struct io_kiocb *req, unsigned int issue_flags) 159 + { 160 + /* 161 + * If we came in unlocked, we have no choice but to consume the 162 + * buffer here, otherwise nothing ensures that the buffer won't 163 + * get used by others. This does mean it'll be pinned until the 164 + * IO completes, coming in unlocked means we're being called from 165 + * io-wq context and there may be further retries in async hybrid 166 + * mode. For the locked case, the caller must call commit when 167 + * the transfer completes (or if we get -EAGAIN and must poll of 168 + * retry). 169 + */ 170 + if (issue_flags & IO_URING_F_UNLOCKED) 171 + return true; 172 + 173 + /* uring_cmd commits kbuf upfront, no need to auto-commit */ 174 + if (!io_file_can_poll(req) && req->opcode != IORING_OP_URING_CMD) 175 + return true; 176 + return false; 177 + } 178 + 158 179 static struct io_br_sel io_ring_buffer_select(struct io_kiocb *req, size_t *len, 159 180 struct io_buffer_list *bl, 160 181 unsigned int issue_flags) ··· 202 181 sel.buf_list = bl; 203 182 sel.addr = u64_to_user_ptr(buf->addr); 204 183 205 - if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) { 206 - /* 207 - * If we came in unlocked, we have no choice but to consume the 208 - * buffer here, otherwise nothing ensures that the buffer won't 209 - * get used by others. This does mean it'll be pinned until the 210 - * IO completes, coming in unlocked means we're being called from 211 - * io-wq context and there may be further retries in async hybrid 212 - * mode. For the locked case, the caller must call commit when 213 - * the transfer completes (or if we get -EAGAIN and must poll of 214 - * retry). 215 - */ 184 + if (io_should_commit(req, issue_flags)) { 216 185 io_kbuf_commit(req, sel.buf_list, *len, 1); 217 186 sel.buf_list = NULL; 218 187 }
+1 -1
io_uring/net.c
··· 383 383 return 0; 384 384 385 385 if (sr->flags & IORING_SEND_VECTORIZED) 386 - return io_net_import_vec(req, kmsg, sr->buf, sr->len, ITER_SOURCE); 386 + return io_net_import_vec(req, kmsg, sr->buf, sr->len, ITER_SOURCE); 387 387 388 388 return import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter); 389 389 }
+45 -20
io_uring/sqpoll.c
··· 11 11 #include <linux/audit.h> 12 12 #include <linux/security.h> 13 13 #include <linux/cpuset.h> 14 + #include <linux/sched/cputime.h> 14 15 #include <linux/io_uring.h> 15 16 16 17 #include <uapi/linux/io_uring.h> ··· 170 169 return READ_ONCE(sqd->state); 171 170 } 172 171 173 - static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) 172 + struct io_sq_time { 173 + bool started; 174 + u64 usec; 175 + }; 176 + 177 + u64 io_sq_cpu_usec(struct task_struct *tsk) 178 + { 179 + u64 utime, stime; 180 + 181 + task_cputime_adjusted(tsk, &utime, &stime); 182 + do_div(stime, 1000); 183 + return stime; 184 + } 185 + 186 + static void io_sq_update_worktime(struct io_sq_data *sqd, struct io_sq_time *ist) 187 + { 188 + if (!ist->started) 189 + return; 190 + ist->started = false; 191 + sqd->work_time += io_sq_cpu_usec(current) - ist->usec; 192 + } 193 + 194 + static void io_sq_start_worktime(struct io_sq_time *ist) 195 + { 196 + if (ist->started) 197 + return; 198 + ist->started = true; 199 + ist->usec = io_sq_cpu_usec(current); 200 + } 201 + 202 + static int __io_sq_thread(struct io_ring_ctx *ctx, struct io_sq_data *sqd, 203 + bool cap_entries, struct io_sq_time *ist) 174 204 { 175 205 unsigned int to_submit; 176 206 int ret = 0; ··· 213 181 214 182 if (to_submit || !wq_list_empty(&ctx->iopoll_list)) { 215 183 const struct cred *creds = NULL; 184 + 185 + io_sq_start_worktime(ist); 216 186 217 187 if (ctx->sq_creds != current_cred()) 218 188 creds = override_creds(ctx->sq_creds); ··· 289 255 return retry_list || !llist_empty(&tctx->task_list); 290 256 } 291 257 292 - static void io_sq_update_worktime(struct io_sq_data *sqd, struct rusage *start) 293 - { 294 - struct rusage end; 295 - 296 - getrusage(current, RUSAGE_SELF, &end); 297 - end.ru_stime.tv_sec -= start->ru_stime.tv_sec; 298 - end.ru_stime.tv_usec -= start->ru_stime.tv_usec; 299 - 300 - sqd->work_time += end.ru_stime.tv_usec + end.ru_stime.tv_sec * 1000000; 301 - } 302 - 303 258 static int io_sq_thread(void *data) 304 259 { 305 260 struct llist_node *retry_list = NULL; 306 261 struct io_sq_data *sqd = data; 307 262 struct io_ring_ctx *ctx; 308 - struct rusage start; 309 263 unsigned long timeout = 0; 310 264 char buf[TASK_COMM_LEN] = {}; 311 265 DEFINE_WAIT(wait); ··· 331 309 mutex_lock(&sqd->lock); 332 310 while (1) { 333 311 bool cap_entries, sqt_spin = false; 312 + struct io_sq_time ist = { }; 334 313 335 314 if (io_sqd_events_pending(sqd) || signal_pending(current)) { 336 315 if (io_sqd_handle_event(sqd)) ··· 340 317 } 341 318 342 319 cap_entries = !list_is_singular(&sqd->ctx_list); 343 - getrusage(current, RUSAGE_SELF, &start); 344 320 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { 345 - int ret = __io_sq_thread(ctx, cap_entries); 321 + int ret = __io_sq_thread(ctx, sqd, cap_entries, &ist); 346 322 347 323 if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list))) 348 324 sqt_spin = true; ··· 349 327 if (io_sq_tw(&retry_list, IORING_TW_CAP_ENTRIES_VALUE)) 350 328 sqt_spin = true; 351 329 352 - list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) 353 - if (io_napi(ctx)) 330 + list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { 331 + if (io_napi(ctx)) { 332 + io_sq_start_worktime(&ist); 354 333 io_napi_sqpoll_busy_poll(ctx); 334 + } 335 + } 336 + 337 + io_sq_update_worktime(sqd, &ist); 355 338 356 339 if (sqt_spin || !time_after(jiffies, timeout)) { 357 - if (sqt_spin) { 358 - io_sq_update_worktime(sqd, &start); 340 + if (sqt_spin) 359 341 timeout = jiffies + sqd->sq_thread_idle; 360 - } 361 342 if (unlikely(need_resched())) { 362 343 mutex_unlock(&sqd->lock); 363 344 cond_resched();
+1
io_uring/sqpoll.h
··· 29 29 void io_put_sq_data(struct io_sq_data *sqd); 30 30 void io_sqpoll_wait_sq(struct io_ring_ctx *ctx); 31 31 int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, cpumask_var_t mask); 32 + u64 io_sq_cpu_usec(struct task_struct *tsk); 32 33 33 34 static inline struct task_struct *sqpoll_task_locked(struct io_sq_data *sqd) 34 35 {
+1 -1
io_uring/waitid.c
··· 250 250 return -EINVAL; 251 251 252 252 iwa = io_uring_alloc_async_data(NULL, req); 253 - if (!unlikely(iwa)) 253 + if (unlikely(!iwa)) 254 254 return -ENOMEM; 255 255 iwa->req = req; 256 256
+1 -1
kernel/cgroup/cgroup.c
··· 5892 5892 * if the parent has to be frozen, the child has too. 5893 5893 */ 5894 5894 cgrp->freezer.e_freeze = parent->freezer.e_freeze; 5895 - seqcount_init(&cgrp->freezer.freeze_seq); 5895 + seqcount_spinlock_init(&cgrp->freezer.freeze_seq, &css_set_lock); 5896 5896 if (cgrp->freezer.e_freeze) { 5897 5897 /* 5898 5898 * Set the CGRP_FREEZE flag, so when a process will be
+4 -1
kernel/dma/debug.c
··· 23 23 #include <linux/ctype.h> 24 24 #include <linux/list.h> 25 25 #include <linux/slab.h> 26 + #include <linux/swiotlb.h> 26 27 #include <asm/sections.h> 27 28 #include "debug.h" 28 29 ··· 595 594 if (rc == -ENOMEM) { 596 595 pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n"); 597 596 global_disable = true; 598 - } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { 597 + } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && 598 + !(IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) && 599 + is_swiotlb_active(entry->dev))) { 599 600 err_printk(entry->dev, entry, 600 601 "cacheline tracking EEXIST, overlapping mappings aren't supported\n"); 601 602 }
+1 -1
kernel/irq/chip.c
··· 1030 1030 void __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 1031 1031 const char *name) 1032 1032 { 1033 - scoped_irqdesc_get_and_lock(irq, 0) 1033 + scoped_irqdesc_get_and_buslock(irq, 0) 1034 1034 __irq_do_set_handler(scoped_irqdesc, handle, is_chained, name); 1035 1035 } 1036 1036 EXPORT_SYMBOL_GPL(__irq_set_handler);
+2 -2
kernel/irq/manage.c
··· 659 659 660 660 static int __disable_irq_nosync(unsigned int irq) 661 661 { 662 - scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { 662 + scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { 663 663 __disable_irq(scoped_irqdesc); 664 664 return 0; 665 665 } ··· 789 789 */ 790 790 void enable_irq(unsigned int irq) 791 791 { 792 - scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { 792 + scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { 793 793 struct irq_desc *desc = scoped_irqdesc; 794 794 795 795 if (WARN(!desc->irq_data.chip, "enable_irq before setup/request_irq: irq %u\n", irq))
+12
kernel/sched/fair.c
··· 6437 6437 6438 6438 cfs_rq->throttle_count = pcfs_rq->throttle_count; 6439 6439 cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu)); 6440 + 6441 + /* 6442 + * It is not enough to sync the "pelt_clock_throttled" indicator 6443 + * with the parent cfs_rq when the hierarchy is not queued. 6444 + * Always join a throttled hierarchy with PELT clock throttled 6445 + * and leaf it to the first enqueue, or distribution to 6446 + * unthrottle the PELT clock. 6447 + */ 6448 + if (cfs_rq->throttle_count) 6449 + cfs_rq->pelt_clock_throttled = 1; 6440 6450 } 6441 6451 6442 6452 /* conditionally throttle active cfs_rq's from put_prev_entity() */ ··· 13197 13187 if (!cfs_rq_pelt_clock_throttled(cfs_rq)) 13198 13188 list_add_leaf_cfs_rq(cfs_rq); 13199 13189 } 13190 + 13191 + assert_list_leaf_cfs_rq(rq_of(cfs_rq)); 13200 13192 } 13201 13193 #else /* !CONFIG_FAIR_GROUP_SCHED: */ 13202 13194 static void propagate_entity_cfs_rq(struct sched_entity *se) { }
-2
kernel/sched/sched.h
··· 3740 3740 struct mm_struct *mm) 3741 3741 { 3742 3742 struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; 3743 - struct cpumask *cpumask; 3744 3743 int cid; 3745 3744 3746 3745 lockdep_assert_rq_held(rq); 3747 - cpumask = mm_cidmask(mm); 3748 3746 cid = __this_cpu_read(pcpu_cid->cid); 3749 3747 if (mm_cid_is_valid(cid)) { 3750 3748 mm_cid_snapshot_time(rq, mm);
+1 -1
kernel/time/timekeeping.c
··· 3070 3070 return -ENOMEM; 3071 3071 } 3072 3072 3073 - for (int i = 0; i <= MAX_AUX_CLOCKS; i++) { 3073 + for (int i = 0; i < MAX_AUX_CLOCKS; i++) { 3074 3074 char id[2] = { [0] = '0' + i, }; 3075 3075 struct kobject *clk = kobject_create_and_add(id, auxo); 3076 3076
+1
kernel/trace/rv/monitors/pagefault/Kconfig
··· 5 5 select RV_LTL_MONITOR 6 6 depends on RV_MON_RTAPP 7 7 depends on X86 || RISCV 8 + depends on MMU 8 9 default y 9 10 select LTL_MON_EVENTS_ID 10 11 bool "pagefault monitor"
+6 -6
kernel/trace/rv/rv.c
··· 501 501 502 502 list_for_each_entry_continue(mon, &rv_monitors_list, list) { 503 503 if (mon->enabled) 504 - return mon; 504 + return &mon->list; 505 505 } 506 506 507 507 return NULL; ··· 509 509 510 510 static void *enabled_monitors_start(struct seq_file *m, loff_t *pos) 511 511 { 512 - struct rv_monitor *mon; 512 + struct list_head *head; 513 513 loff_t l; 514 514 515 515 mutex_lock(&rv_interface_lock); ··· 517 517 if (list_empty(&rv_monitors_list)) 518 518 return NULL; 519 519 520 - mon = list_entry(&rv_monitors_list, struct rv_monitor, list); 520 + head = &rv_monitors_list; 521 521 522 522 for (l = 0; l <= *pos; ) { 523 - mon = enabled_monitors_next(m, mon, &l); 524 - if (!mon) 523 + head = enabled_monitors_next(m, head, &l); 524 + if (!head) 525 525 break; 526 526 } 527 527 528 - return mon; 528 + return head; 529 529 } 530 530 531 531 /*
+1 -1
lib/crypto/Kconfig
··· 97 97 98 98 config CRYPTO_LIB_POLY1305_ARCH 99 99 bool 100 - depends on CRYPTO_LIB_POLY1305 && !UML 100 + depends on CRYPTO_LIB_POLY1305 && !UML && !KMSAN 101 101 default y if ARM 102 102 default y if ARM64 && KERNEL_MODE_NEON 103 103 default y if MIPS
+5 -2
mm/damon/core.c
··· 452 452 damos_for_each_filter_safe(f, next, s) 453 453 damos_destroy_filter(f); 454 454 455 + damos_for_each_ops_filter_safe(f, next, s) 456 + damos_destroy_filter(f); 457 + 455 458 kfree(s->migrate_dests.node_id_arr); 456 459 kfree(s->migrate_dests.weight_arr); 457 460 damon_del_scheme(s); ··· 835 832 src_goal->metric, src_goal->target_value); 836 833 if (!new_goal) 837 834 return -ENOMEM; 838 - damos_commit_quota_goal_union(new_goal, src_goal); 835 + damos_commit_quota_goal(new_goal, src_goal); 839 836 damos_add_quota_goal(dst, new_goal); 840 837 } 841 838 return 0; ··· 1453 1450 INIT_LIST_HEAD(&control->list); 1454 1451 1455 1452 mutex_lock(&ctx->call_controls_lock); 1456 - list_add_tail(&ctx->call_controls, &control->list); 1453 + list_add_tail(&control->list, &ctx->call_controls); 1457 1454 mutex_unlock(&ctx->call_controls_lock); 1458 1455 if (!damon_is_running(ctx)) 1459 1456 return -EINVAL;
+4 -3
mm/damon/sysfs.c
··· 1473 1473 if (IS_ERR(param_ctx)) 1474 1474 return PTR_ERR(param_ctx); 1475 1475 test_ctx = damon_new_ctx(); 1476 + if (!test_ctx) 1477 + return -ENOMEM; 1476 1478 err = damon_commit_ctx(test_ctx, param_ctx); 1477 - if (err) { 1478 - damon_destroy_ctx(test_ctx); 1479 + if (err) 1479 1480 goto out; 1480 - } 1481 1481 err = damon_commit_ctx(kdamond->damon_ctx, param_ctx); 1482 1482 out: 1483 + damon_destroy_ctx(test_ctx); 1483 1484 damon_destroy_ctx(param_ctx); 1484 1485 return err; 1485 1486 }
+3
mm/huge_memory.c
··· 4109 4109 if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1) 4110 4110 return false; 4111 4111 4112 + if (folio_contain_hwpoisoned_page(folio)) 4113 + return false; 4114 + 4112 4115 for (i = 0; i < folio_nr_pages(folio); i++) { 4113 4116 if (pages_identical(folio_page(folio, i), ZERO_PAGE(0))) { 4114 4117 if (++num_zero_pages > khugepaged_max_ptes_none)
+2 -3
mm/hugetlb.c
··· 7614 7614 p4d_t *p4d = p4d_offset(pgd, addr); 7615 7615 pud_t *pud = pud_offset(p4d, addr); 7616 7616 7617 - i_mmap_assert_write_locked(vma->vm_file->f_mapping); 7618 - hugetlb_vma_assert_locked(vma); 7619 7617 if (sz != PMD_SIZE) 7620 7618 return 0; 7621 7619 if (!ptdesc_pmd_is_shared(virt_to_ptdesc(ptep))) 7622 7620 return 0; 7623 - 7621 + i_mmap_assert_write_locked(vma->vm_file->f_mapping); 7622 + hugetlb_vma_assert_locked(vma); 7624 7623 pud_clear(pud); 7625 7624 /* 7626 7625 * Once our caller drops the rmap lock, some other process might be
+2 -1
mm/migrate.c
··· 301 301 struct page *page = folio_page(folio, idx); 302 302 pte_t newpte; 303 303 304 - if (PageCompound(page)) 304 + if (PageCompound(page) || PageHWPoison(page)) 305 305 return false; 306 + 306 307 VM_BUG_ON_PAGE(!PageAnon(page), page); 307 308 VM_BUG_ON_PAGE(!PageLocked(page), page); 308 309 VM_BUG_ON_PAGE(pte_present(old_pte), page);
+6 -9
mm/mremap.c
··· 1237 1237 } 1238 1238 1239 1239 /* 1240 - * Perform final tasks for MADV_DONTUNMAP operation, clearing mlock() and 1241 - * account flags on remaining VMA by convention (it cannot be mlock()'d any 1242 - * longer, as pages in range are no longer mapped), and removing anon_vma_chain 1243 - * links from it (if the entire VMA was copied over). 1240 + * Perform final tasks for MADV_DONTUNMAP operation, clearing mlock() flag on 1241 + * remaining VMA by convention (it cannot be mlock()'d any longer, as pages in 1242 + * range are no longer mapped), and removing anon_vma_chain links from it if the 1243 + * entire VMA was copied over. 1244 1244 */ 1245 1245 static void dontunmap_complete(struct vma_remap_struct *vrm, 1246 1246 struct vm_area_struct *new_vma) ··· 1250 1250 unsigned long old_start = vrm->vma->vm_start; 1251 1251 unsigned long old_end = vrm->vma->vm_end; 1252 1252 1253 - /* 1254 - * We always clear VM_LOCKED[ONFAULT] | VM_ACCOUNT on the old 1255 - * vma. 1256 - */ 1257 - vm_flags_clear(vrm->vma, VM_LOCKED_MASK | VM_ACCOUNT); 1253 + /* We always clear VM_LOCKED[ONFAULT] on the old VMA. */ 1254 + vm_flags_clear(vrm->vma, VM_LOCKED_MASK); 1258 1255 1259 1256 /* 1260 1257 * anon_vma links of the old vma is no longer needed after its page
+3
mm/page_owner.c
··· 168 168 unsigned long flags; 169 169 struct stack *stack; 170 170 171 + if (!gfpflags_allow_spinning(gfp_mask)) 172 + return; 173 + 171 174 set_current_in_page_owner(); 172 175 stack = kmalloc(sizeof(*stack), gfp_nested_mask(gfp_mask)); 173 176 if (!stack) {
+21 -10
mm/slub.c
··· 2052 2052 } 2053 2053 } 2054 2054 2055 - static inline void mark_failed_objexts_alloc(struct slab *slab) 2055 + static inline bool mark_failed_objexts_alloc(struct slab *slab) 2056 2056 { 2057 - slab->obj_exts = OBJEXTS_ALLOC_FAIL; 2057 + return cmpxchg(&slab->obj_exts, 0, OBJEXTS_ALLOC_FAIL) == 0; 2058 2058 } 2059 2059 2060 2060 static inline void handle_failed_objexts_alloc(unsigned long obj_exts, ··· 2076 2076 #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ 2077 2077 2078 2078 static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {} 2079 - static inline void mark_failed_objexts_alloc(struct slab *slab) {} 2079 + static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; } 2080 2080 static inline void handle_failed_objexts_alloc(unsigned long obj_exts, 2081 2081 struct slabobj_ext *vec, unsigned int objects) {} 2082 2082 ··· 2124 2124 slab_nid(slab)); 2125 2125 } 2126 2126 if (!vec) { 2127 - /* Mark vectors which failed to allocate */ 2128 - mark_failed_objexts_alloc(slab); 2127 + /* 2128 + * Try to mark vectors which failed to allocate. 2129 + * If this operation fails, there may be a racing process 2130 + * that has already completed the allocation. 2131 + */ 2132 + if (!mark_failed_objexts_alloc(slab) && 2133 + slab_obj_exts(slab)) 2134 + return 0; 2129 2135 2130 2136 return -ENOMEM; 2131 2137 } ··· 2142 2136 #ifdef CONFIG_MEMCG 2143 2137 new_exts |= MEMCG_DATA_OBJEXTS; 2144 2138 #endif 2139 + retry: 2145 2140 old_exts = READ_ONCE(slab->obj_exts); 2146 2141 handle_failed_objexts_alloc(old_exts, vec, objects); 2147 2142 if (new_slab) { ··· 2152 2145 * be simply assigned. 2153 2146 */ 2154 2147 slab->obj_exts = new_exts; 2155 - } else if ((old_exts & ~OBJEXTS_FLAGS_MASK) || 2156 - cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) { 2148 + } else if (old_exts & ~OBJEXTS_FLAGS_MASK) { 2157 2149 /* 2158 2150 * If the slab is already in use, somebody can allocate and 2159 2151 * assign slabobj_exts in parallel. In this case the existing ··· 2164 2158 else 2165 2159 kfree(vec); 2166 2160 return 0; 2161 + } else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) { 2162 + /* Retry if a racing thread changed slab->obj_exts from under us. */ 2163 + goto retry; 2167 2164 } 2168 2165 2169 2166 if (allow_spin) ··· 3428 3419 3429 3420 if (!allow_spin && !spin_trylock_irqsave(&n->list_lock, flags)) { 3430 3421 /* Unlucky, discard newly allocated slab */ 3431 - slab->frozen = 1; 3432 3422 defer_deactivate_slab(slab, NULL); 3433 3423 return NULL; 3434 3424 } ··· 6476 6468 struct slab *slab = container_of(pos, struct slab, llnode); 6477 6469 6478 6470 #ifdef CONFIG_SLUB_TINY 6479 - discard_slab(slab->slab_cache, slab); 6471 + free_slab(slab->slab_cache, slab); 6480 6472 #else 6481 - deactivate_slab(slab->slab_cache, slab, slab->flush_freelist); 6473 + if (slab->frozen) 6474 + deactivate_slab(slab->slab_cache, slab, slab->flush_freelist); 6475 + else 6476 + free_slab(slab->slab_cache, slab); 6482 6477 #endif 6483 6478 } 6484 6479 }
+34 -10
net/core/datagram.c
··· 920 920 EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg); 921 921 922 922 /** 923 - * datagram_poll - generic datagram poll 923 + * datagram_poll_queue - same as datagram_poll, but on a specific receive 924 + * queue 924 925 * @file: file struct 925 926 * @sock: socket 926 927 * @wait: poll table 928 + * @rcv_queue: receive queue to poll 927 929 * 928 - * Datagram poll: Again totally generic. This also handles 929 - * sequenced packet sockets providing the socket receive queue 930 - * is only ever holding data ready to receive. 930 + * Performs polling on the given receive queue, handling shutdown, error, 931 + * and connection state. This is useful for protocols that deliver 932 + * userspace-bound packets through a custom queue instead of 933 + * sk->sk_receive_queue. 931 934 * 932 - * Note: when you *don't* use this routine for this protocol, 933 - * and you use a different write policy from sock_writeable() 934 - * then please supply your own write_space callback. 935 + * Return: poll bitmask indicating the socket's current state 935 936 */ 936 - __poll_t datagram_poll(struct file *file, struct socket *sock, 937 - poll_table *wait) 937 + __poll_t datagram_poll_queue(struct file *file, struct socket *sock, 938 + poll_table *wait, struct sk_buff_head *rcv_queue) 938 939 { 939 940 struct sock *sk = sock->sk; 940 941 __poll_t mask; ··· 957 956 mask |= EPOLLHUP; 958 957 959 958 /* readable? */ 960 - if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) 959 + if (!skb_queue_empty_lockless(rcv_queue)) 961 960 mask |= EPOLLIN | EPOLLRDNORM; 962 961 963 962 /* Connection-based need to check for termination and startup */ ··· 978 977 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 979 978 980 979 return mask; 980 + } 981 + EXPORT_SYMBOL(datagram_poll_queue); 982 + 983 + /** 984 + * datagram_poll - generic datagram poll 985 + * @file: file struct 986 + * @sock: socket 987 + * @wait: poll table 988 + * 989 + * Datagram poll: Again totally generic. This also handles 990 + * sequenced packet sockets providing the socket receive queue 991 + * is only ever holding data ready to receive. 992 + * 993 + * Note: when you *don't* use this routine for this protocol, 994 + * and you use a different write policy from sock_writeable() 995 + * then please supply your own write_space callback. 996 + * 997 + * Return: poll bitmask indicating the socket's current state 998 + */ 999 + __poll_t datagram_poll(struct file *file, struct socket *sock, poll_table *wait) 1000 + { 1001 + return datagram_poll_queue(file, sock, wait, 1002 + &sock->sk->sk_receive_queue); 981 1003 } 982 1004 EXPORT_SYMBOL(datagram_poll);
+8 -2
net/core/gro.c
··· 639 639 640 640 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) 641 641 { 642 + struct skb_shared_info *shinfo; 643 + 642 644 if (unlikely(skb->pfmemalloc)) { 643 645 consume_skb(skb); 644 646 return; ··· 657 655 658 656 skb->encapsulation = 0; 659 657 skb->ip_summed = CHECKSUM_NONE; 660 - skb_shinfo(skb)->gso_type = 0; 661 - skb_shinfo(skb)->gso_size = 0; 658 + 659 + shinfo = skb_shinfo(skb); 660 + shinfo->gso_type = 0; 661 + shinfo->gso_size = 0; 662 + shinfo->hwtstamps.hwtstamp = 0; 663 + 662 664 if (unlikely(skb->slow_gro)) { 663 665 skb_orphan(skb); 664 666 skb_ext_reset(skb);
+2 -3
net/core/gro_cells.c
··· 43 43 if (skb_queue_len(&cell->napi_skbs) == 1) 44 44 napi_schedule(&cell->napi); 45 45 46 - if (have_bh_lock) 47 - local_unlock_nested_bh(&gcells->cells->bh_lock); 48 - 49 46 res = NET_RX_SUCCESS; 50 47 51 48 unlock: 49 + if (have_bh_lock) 50 + local_unlock_nested_bh(&gcells->cells->bh_lock); 52 51 rcu_read_unlock(); 53 52 return res; 54 53 }
-3
net/core/rtnetlink.c
··· 4715 4715 int err; 4716 4716 u16 vid; 4717 4717 4718 - if (!netlink_capable(skb, CAP_NET_ADMIN)) 4719 - return -EPERM; 4720 - 4721 4718 if (!del_bulk) { 4722 4719 err = nlmsg_parse_deprecated(nlh, sizeof(*ndm), tb, NDA_MAX, 4723 4720 NULL, extack);
+7 -1
net/hsr/hsr_netlink.c
··· 34 34 struct netlink_ext_ack *extack) 35 35 { 36 36 struct net *link_net = rtnl_newlink_link_net(params); 37 + struct net_device *link[2], *interlink = NULL; 37 38 struct nlattr **data = params->data; 38 39 enum hsr_version proto_version; 39 40 unsigned char multicast_spec; 40 41 u8 proto = HSR_PROTOCOL_HSR; 41 42 42 - struct net_device *link[2], *interlink = NULL; 43 + if (!net_eq(link_net, dev_net(dev))) { 44 + NL_SET_ERR_MSG_MOD(extack, 45 + "HSR slaves/interlink must be on the same net namespace than HSR link"); 46 + return -EINVAL; 47 + } 48 + 43 49 if (!data) { 44 50 NL_SET_ERR_MSG_MOD(extack, "No slave devices specified"); 45 51 return -EINVAL;
+6
net/mptcp/pm_kernel.c
··· 370 370 } 371 371 372 372 subflow: 373 + /* No need to try establishing subflows to remote id0 if not allowed */ 374 + if (mptcp_pm_add_addr_c_flag_case(msk)) 375 + goto exit; 376 + 373 377 /* check if should create a new subflow */ 374 378 while (msk->pm.local_addr_used < endp_subflow_max && 375 379 msk->pm.extra_subflows < limit_extra_subflows) { ··· 405 401 __mptcp_subflow_connect(sk, &local, &addrs[i]); 406 402 spin_lock_bh(&msk->pm.lock); 407 403 } 404 + 405 + exit: 408 406 mptcp_pm_nl_check_work_pending(msk); 409 407 } 410 408
+7 -6
net/sctp/inqueue.c
··· 169 169 chunk->head_skb = chunk->skb; 170 170 171 171 /* skbs with "cover letter" */ 172 - if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len) 172 + if (chunk->head_skb && chunk->skb->data_len == chunk->skb->len) { 173 + if (WARN_ON(!skb_shinfo(chunk->skb)->frag_list)) { 174 + __SCTP_INC_STATS(dev_net(chunk->skb->dev), 175 + SCTP_MIB_IN_PKT_DISCARDS); 176 + sctp_chunk_free(chunk); 177 + goto next_chunk; 178 + } 173 179 chunk->skb = skb_shinfo(chunk->skb)->frag_list; 174 - 175 - if (WARN_ON(!chunk->skb)) { 176 - __SCTP_INC_STATS(dev_net(chunk->skb->dev), SCTP_MIB_IN_PKT_DISCARDS); 177 - sctp_chunk_free(chunk); 178 - goto next_chunk; 179 180 } 180 181 } 181 182
-13
net/smc/smc_inet.c
··· 56 56 .protocol = IPPROTO_SMC, 57 57 .prot = &smc_inet_prot, 58 58 .ops = &smc_inet_stream_ops, 59 - .flags = INET_PROTOSW_ICSK, 60 59 }; 61 60 62 61 #if IS_ENABLED(CONFIG_IPV6) ··· 103 104 .protocol = IPPROTO_SMC, 104 105 .prot = &smc_inet6_prot, 105 106 .ops = &smc_inet6_stream_ops, 106 - .flags = INET_PROTOSW_ICSK, 107 107 }; 108 108 #endif /* CONFIG_IPV6 */ 109 - 110 - static unsigned int smc_sync_mss(struct sock *sk, u32 pmtu) 111 - { 112 - /* No need pass it through to clcsock, mss can always be set by 113 - * sock_create_kern or smc_setsockopt. 114 - */ 115 - return 0; 116 - } 117 109 118 110 static int smc_inet_init_sock(struct sock *sk) 119 111 { ··· 112 122 113 123 /* init common smc sock */ 114 124 smc_sk_init(net, sk, IPPROTO_SMC); 115 - 116 - inet_csk(sk)->icsk_sync_mss = smc_sync_mss; 117 - 118 125 /* create clcsock */ 119 126 return smc_create_clcsk(net, sk, sk->sk_family); 120 127 }
+19 -19
net/vmw_vsock/af_vsock.c
··· 487 487 goto err; 488 488 } 489 489 490 - if (vsk->transport) { 491 - if (vsk->transport == new_transport) { 492 - ret = 0; 493 - goto err; 494 - } 490 + if (vsk->transport && vsk->transport == new_transport) { 491 + ret = 0; 492 + goto err; 493 + } 495 494 495 + /* We increase the module refcnt to prevent the transport unloading 496 + * while there are open sockets assigned to it. 497 + */ 498 + if (!new_transport || !try_module_get(new_transport->module)) { 499 + ret = -ENODEV; 500 + goto err; 501 + } 502 + 503 + /* It's safe to release the mutex after a successful try_module_get(). 504 + * Whichever transport `new_transport` points at, it won't go away until 505 + * the last module_put() below or in vsock_deassign_transport(). 506 + */ 507 + mutex_unlock(&vsock_register_mutex); 508 + 509 + if (vsk->transport) { 496 510 /* transport->release() must be called with sock lock acquired. 497 511 * This path can only be taken during vsock_connect(), where we 498 512 * have already held the sock lock. In the other cases, this ··· 525 511 sk->sk_state = TCP_CLOSE; 526 512 vsk->peer_shutdown = 0; 527 513 } 528 - 529 - /* We increase the module refcnt to prevent the transport unloading 530 - * while there are open sockets assigned to it. 531 - */ 532 - if (!new_transport || !try_module_get(new_transport->module)) { 533 - ret = -ENODEV; 534 - goto err; 535 - } 536 - 537 - /* It's safe to release the mutex after a successful try_module_get(). 538 - * Whichever transport `new_transport` points at, it won't go away until 539 - * the last module_put() below or in vsock_deassign_transport(). 540 - */ 541 - mutex_unlock(&vsock_register_mutex); 542 514 543 515 if (sk->sk_type == SOCK_SEQPACKET) { 544 516 if (!new_transport->seqpacket_allow ||
+1 -5
net/xfrm/espintcp.c
··· 555 555 static __poll_t espintcp_poll(struct file *file, struct socket *sock, 556 556 poll_table *wait) 557 557 { 558 - __poll_t mask = datagram_poll(file, sock, wait); 559 558 struct sock *sk = sock->sk; 560 559 struct espintcp_ctx *ctx = espintcp_getctx(sk); 561 560 562 - if (!skb_queue_empty(&ctx->ike_queue)) 563 - mask |= EPOLLIN | EPOLLRDNORM; 564 - 565 - return mask; 561 + return datagram_poll_queue(file, sock, wait, &ctx->ike_queue); 566 562 } 567 563 568 564 static void build_protos(struct proto *espintcp_prot,
+1 -7
rust/kernel/auxiliary.rs
··· 217 217 218 218 /// Returns a reference to the parent [`device::Device`], if any. 219 219 pub fn parent(&self) -> Option<&device::Device> { 220 - let ptr: *const Self = self; 221 - // CAST: `Device<Ctx: DeviceContext>` types are transparent to each other. 222 - let ptr: *const Device = ptr.cast(); 223 - // SAFETY: `ptr` was derived from `&self`. 224 - let this = unsafe { &*ptr }; 225 - 226 - this.as_ref().parent() 220 + self.as_ref().parent() 227 221 } 228 222 } 229 223
+2 -2
rust/kernel/device.rs
··· 251 251 252 252 /// Returns a reference to the parent device, if any. 253 253 #[cfg_attr(not(CONFIG_AUXILIARY_BUS), expect(dead_code))] 254 - pub(crate) fn parent(&self) -> Option<&Self> { 254 + pub(crate) fn parent(&self) -> Option<&Device> { 255 255 // SAFETY: 256 256 // - By the type invariant `self.as_raw()` is always valid. 257 257 // - The parent device is only ever set at device creation. ··· 264 264 // - Since `parent` is not NULL, it must be a valid pointer to a `struct device`. 265 265 // - `parent` is valid for the lifetime of `self`, since a `struct device` holds a 266 266 // reference count of its parent. 267 - Some(unsafe { Self::from_raw(parent) }) 267 + Some(unsafe { Device::from_raw(parent) }) 268 268 } 269 269 } 270 270
+3 -2
tools/objtool/check.c
··· 217 217 * these come from the Rust standard library). 218 218 */ 219 219 return str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail") || 220 + str_ends_with(func->name, "_4core6option13expect_failed") || 220 221 str_ends_with(func->name, "_4core6option13unwrap_failed") || 221 222 str_ends_with(func->name, "_4core6result13unwrap_failed") || 222 223 str_ends_with(func->name, "_4core9panicking5panic") || ··· 4711 4710 4712 4711 for_each_reloc(sec->rsec, reloc) { 4713 4712 if (arch_absolute_reloc(file->elf, reloc)) { 4714 - WARN("section %s has absolute relocation at offset 0x%lx", 4715 - sec->name, reloc_offset(reloc)); 4713 + WARN("section %s has absolute relocation at offset 0x%llx", 4714 + sec->name, (unsigned long long)reloc_offset(reloc)); 4716 4715 ret++; 4717 4716 } 4718 4717 }
+20
tools/testing/selftests/cgroup/lib/include/cgroup_util.h
··· 25 25 return labs(a - b) <= (a + b) / 100 * err; 26 26 } 27 27 28 + /* 29 + * Checks if two given values differ by less than err% of their sum and assert 30 + * with detailed debug info if not. 31 + */ 32 + static inline int values_close_report(long a, long b, int err) 33 + { 34 + long diff = labs(a - b); 35 + long limit = (a + b) / 100 * err; 36 + double actual_err = (a + b) ? (100.0 * diff / (a + b)) : 0.0; 37 + int close = diff <= limit; 38 + 39 + if (!close) 40 + fprintf(stderr, 41 + "[FAIL] actual=%ld expected=%ld | diff=%ld | limit=%ld | " 42 + "tolerance=%d%% | actual_error=%.2f%%\n", 43 + a, b, diff, limit, err, actual_err); 44 + 45 + return close; 46 + } 47 + 28 48 extern ssize_t read_text(const char *path, char *buf, size_t max_len); 29 49 extern ssize_t write_text(const char *path, char *buf, ssize_t len); 30 50
+9 -9
tools/testing/selftests/cgroup/test_cpu.c
··· 219 219 if (user_usec <= 0) 220 220 goto cleanup; 221 221 222 - if (!values_close(usage_usec, expected_usage_usec, 1)) 222 + if (!values_close_report(usage_usec, expected_usage_usec, 1)) 223 223 goto cleanup; 224 224 225 225 ret = KSFT_PASS; ··· 291 291 292 292 user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec"); 293 293 nice_usec = cg_read_key_long(cpucg, "cpu.stat", "nice_usec"); 294 - if (!values_close(nice_usec, expected_nice_usec, 1)) 294 + if (!values_close_report(nice_usec, expected_nice_usec, 1)) 295 295 goto cleanup; 296 296 297 297 ret = KSFT_PASS; ··· 404 404 goto cleanup; 405 405 406 406 delta = children[i + 1].usage - children[i].usage; 407 - if (!values_close(delta, children[0].usage, 35)) 407 + if (!values_close_report(delta, children[0].usage, 35)) 408 408 goto cleanup; 409 409 } 410 410 ··· 444 444 int ret = KSFT_FAIL, i; 445 445 446 446 for (i = 0; i < num_children - 1; i++) { 447 - if (!values_close(children[i + 1].usage, children[0].usage, 15)) 447 + if (!values_close_report(children[i + 1].usage, children[0].usage, 15)) 448 448 goto cleanup; 449 449 } 450 450 ··· 573 573 574 574 nested_leaf_usage = leaf[1].usage + leaf[2].usage; 575 575 if (overprovisioned) { 576 - if (!values_close(leaf[0].usage, nested_leaf_usage, 15)) 576 + if (!values_close_report(leaf[0].usage, nested_leaf_usage, 15)) 577 577 goto cleanup; 578 - } else if (!values_close(leaf[0].usage * 2, nested_leaf_usage, 15)) 578 + } else if (!values_close_report(leaf[0].usage * 2, nested_leaf_usage, 15)) 579 579 goto cleanup; 580 580 581 581 582 582 child_usage = cg_read_key_long(child, "cpu.stat", "usage_usec"); 583 583 if (child_usage <= 0) 584 584 goto cleanup; 585 - if (!values_close(child_usage, nested_leaf_usage, 1)) 585 + if (!values_close_report(child_usage, nested_leaf_usage, 1)) 586 586 goto cleanup; 587 587 588 588 ret = KSFT_PASS; ··· 691 691 expected_usage_usec 692 692 = n_periods * quota_usec + MIN(remainder_usec, quota_usec); 693 693 694 - if (!values_close(usage_usec, expected_usage_usec, 10)) 694 + if (!values_close_report(usage_usec, expected_usage_usec, 10)) 695 695 goto cleanup; 696 696 697 697 ret = KSFT_PASS; ··· 762 762 expected_usage_usec 763 763 = n_periods * quota_usec + MIN(remainder_usec, quota_usec); 764 764 765 - if (!values_close(usage_usec, expected_usage_usec, 10)) 765 + if (!values_close_report(usage_usec, expected_usage_usec, 10)) 766 766 goto cleanup; 767 767 768 768 ret = KSFT_PASS;
+9 -9
tools/testing/selftests/net/mptcp/mptcp_join.sh
··· 2324 2324 { 2325 2325 # no laminar endpoints: routing rules are used 2326 2326 if reset_with_tcp_filter "without a laminar endpoint" ns1 10.0.2.2 REJECT && 2327 - mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then 2327 + continue_if mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then 2328 2328 pm_nl_set_limits $ns1 0 2 2329 2329 pm_nl_set_limits $ns2 2 2 2330 2330 pm_nl_add_endpoint $ns1 10.0.2.1 flags signal ··· 2336 2336 2337 2337 # laminar endpoints: this endpoint is used 2338 2338 if reset_with_tcp_filter "with a laminar endpoint" ns1 10.0.2.2 REJECT && 2339 - mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then 2339 + continue_if mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then 2340 2340 pm_nl_set_limits $ns1 0 2 2341 2341 pm_nl_set_limits $ns2 2 2 2342 2342 pm_nl_add_endpoint $ns1 10.0.2.1 flags signal ··· 2348 2348 2349 2349 # laminar endpoints: these endpoints are used 2350 2350 if reset_with_tcp_filter "with multiple laminar endpoints" ns1 10.0.2.2 REJECT && 2351 - mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then 2351 + continue_if mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then 2352 2352 pm_nl_set_limits $ns1 0 2 2353 2353 pm_nl_set_limits $ns2 2 2 2354 2354 pm_nl_add_endpoint $ns1 10.0.2.1 flags signal ··· 2363 2363 2364 2364 # laminar endpoints: only one endpoint is used 2365 2365 if reset_with_tcp_filter "single laminar endpoint" ns1 10.0.2.2 REJECT && 2366 - mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then 2366 + continue_if mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then 2367 2367 pm_nl_set_limits $ns1 0 2 2368 2368 pm_nl_set_limits $ns2 2 2 2369 2369 pm_nl_add_endpoint $ns1 10.0.2.1 flags signal ··· 2376 2376 2377 2377 # laminar endpoints: subflow and laminar flags 2378 2378 if reset_with_tcp_filter "sublow + laminar endpoints" ns1 10.0.2.2 REJECT && 2379 - mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then 2379 + continue_if mptcp_lib_kallsyms_has "mptcp_pm_get_endp_laminar_max$"; then 2380 2380 pm_nl_set_limits $ns1 0 4 2381 2381 pm_nl_set_limits $ns2 2 4 2382 2382 pm_nl_add_endpoint $ns1 10.0.2.1 flags signal ··· 3939 3939 # subflow_rebuild_header is needed to support the implicit flag 3940 3940 # userspace pm type prevents add_addr 3941 3941 if reset "implicit EP" && 3942 - mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then 3942 + continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then 3943 3943 pm_nl_set_limits $ns1 2 2 3944 3944 pm_nl_set_limits $ns2 2 2 3945 3945 pm_nl_add_endpoint $ns1 10.0.2.1 flags signal ··· 3964 3964 fi 3965 3965 3966 3966 if reset_with_tcp_filter "delete and re-add" ns2 10.0.3.2 REJECT OUTPUT && 3967 - mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then 3967 + continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then 3968 3968 start_events 3969 3969 pm_nl_set_limits $ns1 0 3 3970 3970 pm_nl_set_limits $ns2 0 3 ··· 4040 4040 4041 4041 # remove and re-add 4042 4042 if reset_with_events "delete re-add signal" && 4043 - mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then 4043 + continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then 4044 4044 ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=0 4045 4045 pm_nl_set_limits $ns1 0 3 4046 4046 pm_nl_set_limits $ns2 3 3 ··· 4115 4115 4116 4116 # flush and re-add 4117 4117 if reset_with_tcp_filter "flush re-add" ns2 10.0.3.2 REJECT OUTPUT && 4118 - mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then 4118 + continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then 4119 4119 pm_nl_set_limits $ns1 0 2 4120 4120 pm_nl_set_limits $ns2 1 2 4121 4121 # broadcast IP: no packet for this address will be received on ns1
+2 -15
tools/testing/selftests/net/sctp_hello.c
··· 29 29 static int do_client(int argc, char *argv[]) 30 30 { 31 31 struct sockaddr_storage ss; 32 - char buf[] = "hello"; 33 32 int csk, ret, len; 34 33 35 34 if (argc < 5) { ··· 55 56 56 57 set_addr(&ss, argv[3], argv[4], &len); 57 58 ret = connect(csk, (struct sockaddr *)&ss, len); 58 - if (ret < 0) { 59 - printf("failed to connect to peer\n"); 59 + if (ret < 0) 60 60 return -1; 61 - } 62 61 63 - ret = send(csk, buf, strlen(buf) + 1, 0); 64 - if (ret < 0) { 65 - printf("failed to send msg %d\n", ret); 66 - return -1; 67 - } 62 + recv(csk, NULL, 0, 0); 68 63 close(csk); 69 64 70 65 return 0; ··· 68 75 { 69 76 struct sockaddr_storage ss; 70 77 int lsk, csk, ret, len; 71 - char buf[20]; 72 78 73 79 if (argc < 2 || (strcmp(argv[1], "server") && strcmp(argv[1], "client"))) { 74 80 printf("%s server|client ...\n", argv[0]); ··· 117 125 return -1; 118 126 } 119 127 120 - ret = recv(csk, buf, sizeof(buf), 0); 121 - if (ret <= 0) { 122 - printf("failed to recv msg %d\n", ret); 123 - return -1; 124 - } 125 128 close(csk); 126 129 close(lsk); 127 130
+45 -28
tools/testing/selftests/net/sctp_vrf.sh
··· 20 20 modprobe sctp_diag 21 21 setup_ns CLIENT_NS1 CLIENT_NS2 SERVER_NS 22 22 23 - ip net exec $CLIENT_NS1 sysctl -w net.ipv6.conf.default.accept_dad=0 2>&1 >/dev/null 24 - ip net exec $CLIENT_NS2 sysctl -w net.ipv6.conf.default.accept_dad=0 2>&1 >/dev/null 25 - ip net exec $SERVER_NS sysctl -w net.ipv6.conf.default.accept_dad=0 2>&1 >/dev/null 23 + ip net exec $CLIENT_NS1 sysctl -wq net.ipv6.conf.default.accept_dad=0 24 + ip net exec $CLIENT_NS2 sysctl -wq net.ipv6.conf.default.accept_dad=0 25 + ip net exec $SERVER_NS sysctl -wq net.ipv6.conf.default.accept_dad=0 26 26 27 27 ip -n $SERVER_NS link add veth1 type veth peer name veth1 netns $CLIENT_NS1 28 28 ip -n $SERVER_NS link add veth2 type veth peer name veth1 netns $CLIENT_NS2 ··· 62 62 } 63 63 64 64 cleanup() { 65 - ip netns exec $SERVER_NS pkill sctp_hello 2>&1 >/dev/null 65 + wait_client $CLIENT_NS1 66 + wait_client $CLIENT_NS2 67 + stop_server 66 68 cleanup_ns $CLIENT_NS1 $CLIENT_NS2 $SERVER_NS 67 69 } 68 70 69 - wait_server() { 71 + start_server() { 70 72 local IFACE=$1 71 73 local CNT=0 72 74 73 - until ip netns exec $SERVER_NS ss -lS src $SERVER_IP:$SERVER_PORT | \ 74 - grep LISTEN | grep "$IFACE" 2>&1 >/dev/null; do 75 - [ $((CNT++)) = "20" ] && { RET=3; return $RET; } 75 + ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP $SERVER_PORT $IFACE & 76 + disown 77 + until ip netns exec $SERVER_NS ss -SlH | grep -q "$IFACE"; do 78 + [ $((CNT++)) -eq 30 ] && { RET=3; return $RET; } 79 + sleep 0.1 80 + done 81 + } 82 + 83 + stop_server() { 84 + local CNT=0 85 + 86 + ip netns exec $SERVER_NS pkill sctp_hello 87 + while ip netns exec $SERVER_NS ss -SaH | grep -q .; do 88 + [ $((CNT++)) -eq 30 ] && break 89 + sleep 0.1 90 + done 91 + } 92 + 93 + wait_client() { 94 + local CLIENT_NS=$1 95 + local CNT=0 96 + 97 + while ip netns exec $CLIENT_NS ss -SaH | grep -q .; do 98 + [ $((CNT++)) -eq 30 ] && break 76 99 sleep 0.1 77 100 done 78 101 } ··· 104 81 local CLIENT_NS=$1 105 82 local IFACE=$2 106 83 107 - ip netns exec $SERVER_NS pkill sctp_hello 2>&1 >/dev/null 108 - ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP \ 109 - $SERVER_PORT $IFACE 2>&1 >/dev/null & 110 - disown 111 - wait_server $IFACE || return $RET 84 + start_server $IFACE || return $RET 112 85 timeout 3 ip netns exec $CLIENT_NS ./sctp_hello client $AF \ 113 - $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT 2>&1 >/dev/null 86 + $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT 114 87 RET=$? 88 + wait_client $CLIENT_NS 89 + stop_server 115 90 return $RET 116 91 } 117 92 ··· 117 96 local IFACE1=$1 118 97 local IFACE2=$2 119 98 120 - ip netns exec $SERVER_NS pkill sctp_hello 2>&1 >/dev/null 121 - ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP \ 122 - $SERVER_PORT $IFACE1 2>&1 >/dev/null & 123 - disown 124 - wait_server $IFACE1 || return $RET 125 - ip netns exec $SERVER_NS ./sctp_hello server $AF $SERVER_IP \ 126 - $SERVER_PORT $IFACE2 2>&1 >/dev/null & 127 - disown 128 - wait_server $IFACE2 || return $RET 99 + start_server $IFACE1 || return $RET 100 + start_server $IFACE2 || return $RET 129 101 timeout 3 ip netns exec $CLIENT_NS1 ./sctp_hello client $AF \ 130 - $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT 2>&1 >/dev/null && \ 102 + $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT && \ 131 103 timeout 3 ip netns exec $CLIENT_NS2 ./sctp_hello client $AF \ 132 - $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT 2>&1 >/dev/null 104 + $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT 133 105 RET=$? 106 + wait_client $CLIENT_NS1 107 + wait_client $CLIENT_NS2 108 + stop_server 134 109 return $RET 135 110 } 136 111 137 112 testup() { 138 - ip netns exec $SERVER_NS sysctl -w net.sctp.l3mdev_accept=1 2>&1 >/dev/null 113 + ip netns exec $SERVER_NS sysctl -wq net.sctp.l3mdev_accept=1 139 114 echo -n "TEST 01: nobind, connect from client 1, l3mdev_accept=1, Y " 140 115 do_test $CLIENT_NS1 || { echo "[FAIL]"; return $RET; } 141 116 echo "[PASS]" ··· 140 123 do_test $CLIENT_NS2 && { echo "[FAIL]"; return $RET; } 141 124 echo "[PASS]" 142 125 143 - ip netns exec $SERVER_NS sysctl -w net.sctp.l3mdev_accept=0 2>&1 >/dev/null 126 + ip netns exec $SERVER_NS sysctl -wq net.sctp.l3mdev_accept=0 144 127 echo -n "TEST 03: nobind, connect from client 1, l3mdev_accept=0, N " 145 128 do_test $CLIENT_NS1 && { echo "[FAIL]"; return $RET; } 146 129 echo "[PASS]" ··· 177 160 do_testx vrf-1 vrf-2 || { echo "[FAIL]"; return $RET; } 178 161 echo "[PASS]" 179 162 180 - echo -n "TEST 12: bind vrf-2 & 1 in server, connect from client 1 & 2, N " 163 + echo -n "TEST 12: bind vrf-2 & 1 in server, connect from client 1 & 2, Y " 181 164 do_testx vrf-2 vrf-1 || { echo "[FAIL]"; return $RET; } 182 165 echo "[PASS]" 183 166 }