Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Conflicts were all overlapping changes.

Signed-off-by: David S. Miller <davem@davemloft.net>

+3574 -1837
+8 -5
Documentation/admin-guide/device-mapper/dm-integrity.rst
··· 182 182 space-efficient. If this option is not present, large padding is 183 183 used - that is for compatibility with older kernels. 184 184 185 + allow_discards 186 + Allow block discard requests (a.k.a. TRIM) for the integrity device. 187 + Discards are only allowed to devices using internal hash. 185 188 186 - The journal mode (D/J), buffer_sectors, journal_watermark, commit_time can 187 - be changed when reloading the target (load an inactive table and swap the 188 - tables with suspend and resume). The other arguments should not be changed 189 - when reloading the target because the layout of disk data depend on them 190 - and the reloaded target would be non-functional. 189 + The journal mode (D/J), buffer_sectors, journal_watermark, commit_time and 190 + allow_discards can be changed when reloading the target (load an inactive 191 + table and swap the tables with suspend and resume). The other arguments 192 + should not be changed when reloading the target because the layout of disk 193 + data depend on them and the reloaded target would be non-functional. 191 194 192 195 193 196 The layout of the formatted block device:
+1 -2
Documentation/admin-guide/kernel-parameters.txt
··· 5187 5187 5188 5188 usbcore.old_scheme_first= 5189 5189 [USB] Start with the old device initialization 5190 - scheme, applies only to low and full-speed devices 5191 - (default 0 = off). 5190 + scheme (default 0 = off). 5192 5191 5193 5192 usbcore.usbfs_memory_mb= 5194 5193 [USB] Memory limit (in MB) for buffers allocated by
+13 -10
Documentation/devicetree/bindings/Makefile
··· 2 2 DT_DOC_CHECKER ?= dt-doc-validate 3 3 DT_EXTRACT_EX ?= dt-extract-example 4 4 DT_MK_SCHEMA ?= dt-mk-schema 5 + DT_MK_SCHEMA_USERONLY_FLAG := $(if $(DT_SCHEMA_FILES), -u) 5 6 6 7 quiet_cmd_chk_binding = CHKDT $(patsubst $(srctree)/%,%,$<) 7 8 cmd_chk_binding = $(DT_DOC_CHECKER) -u $(srctree)/$(src) $< ; \ ··· 14 13 # Use full schemas when checking %.example.dts 15 14 DT_TMP_SCHEMA := $(obj)/processed-schema-examples.yaml 16 15 17 - quiet_cmd_mk_schema = SCHEMA $@ 18 - cmd_mk_schema = $(DT_MK_SCHEMA) $(DT_MK_SCHEMA_FLAGS) -o $@ $(real-prereqs) 19 - 20 - DT_DOCS = $(addprefix $(src)/, \ 21 - $(shell \ 22 - cd $(srctree)/$(src) && \ 23 - find * \( -name '*.yaml' ! \ 16 + find_cmd = find $(srctree)/$(src) \( -name '*.yaml' ! \ 24 17 -name 'processed-schema*' ! \ 25 - -name '*.example.dt.yaml' \) \ 26 - )) 18 + -name '*.example.dt.yaml' \) 19 + 20 + quiet_cmd_mk_schema = SCHEMA $@ 21 + cmd_mk_schema = rm -f $@ ; \ 22 + $(if $(DT_MK_SCHEMA_FLAGS), \ 23 + echo $(real-prereqs), \ 24 + $(find_cmd)) | \ 25 + xargs $(DT_MK_SCHEMA) $(DT_MK_SCHEMA_FLAGS) >> $@ 26 + 27 + DT_DOCS = $(shell $(find_cmd) | sed -e 's|^$(srctree)/||') 27 28 28 29 DT_SCHEMA_FILES ?= $(DT_DOCS) 29 30 ··· 40 37 $(obj)/processed-schema-examples.yaml: $(DT_DOCS) FORCE 41 38 $(call if_changed,mk_schema) 42 39 43 - $(obj)/processed-schema.yaml: DT_MK_SCHEMA_FLAGS := -u 40 + $(obj)/processed-schema.yaml: DT_MK_SCHEMA_FLAGS := $(DT_MK_SCHEMA_USERONLY_FLAG) 44 41 $(obj)/processed-schema.yaml: $(DT_SCHEMA_FILES) FORCE 45 42 $(call if_changed,mk_schema) 46 43
+3 -4
Documentation/devicetree/bindings/dma/socionext,uniphier-xdmac.yaml
··· 22 22 const: socionext,uniphier-xdmac 23 23 24 24 reg: 25 - items: 26 - - description: XDMAC base register region (offset and length) 27 - - description: XDMAC extension register region (offset and length) 25 + maxItems: 1 28 26 29 27 interrupts: 30 28 maxItems: 1 ··· 47 49 - reg 48 50 - interrupts 49 51 - "#dma-cells" 52 + - dma-channels 50 53 51 54 examples: 52 55 - | 53 56 xdmac: dma-controller@5fc10000 { 54 57 compatible = "socionext,uniphier-xdmac"; 55 - reg = <0x5fc10000 0x1000>, <0x5fc20000 0x800>; 58 + reg = <0x5fc10000 0x5300>; 56 59 interrupts = <0 188 4>; 57 60 #dma-cells = <2>; 58 61 dma-channels = <16>;
+1 -1
Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml
··· 1 1 # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) 2 2 %YAML 1.2 3 3 --- 4 - $id: "http://devicetree.org/schemas/bindings/iio/adc/st,stm32-adc.yaml#" 4 + $id: "http://devicetree.org/schemas/iio/adc/st,stm32-adc.yaml#" 5 5 $schema: "http://devicetree.org/meta-schemas/core.yaml#" 6 6 7 7 title: STMicroelectronics STM32 ADC bindings
-2
Documentation/devicetree/bindings/mfd/st,stpmic1.yaml
··· 259 259 260 260 additionalProperties: false 261 261 262 - additionalProperties: false 263 - 264 262 additionalProperties: false 265 263 266 264 required:
+3 -3
Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml
··· 97 97 - $ref: /schemas/types.yaml#/definitions/uint32 98 98 - minimum: 0 99 99 maximum: 63 100 - default: 0 100 + default: 32 101 101 102 102 qcom,charge-ctrl-value: 103 103 description: ··· 130 130 - $ref: /schemas/types.yaml#/definitions/uint32 131 131 - minimum: 0 132 132 maximum: 3 133 - default: 2 133 + default: 0 134 134 135 135 qcom,preemphasis-width: 136 136 description: ··· 152 152 - $ref: /schemas/types.yaml#/definitions/uint32 153 153 - minimum: 0 154 154 maximum: 3 155 - default: 0 155 + default: 1 156 156 157 157 required: 158 158 - compatible
-1
Documentation/devicetree/bindings/regulator/mps,mp5416.yaml
··· 37 37 type: object 38 38 39 39 additionalProperties: false 40 - additionalProperties: false 41 40 42 41 required: 43 42 - compatible
+2 -1
Documentation/devicetree/bindings/regulator/mps,mpq7920.yaml
··· 75 75 description: | 76 76 disables over voltage protection of this buck 77 77 78 - additionalProperties: false 78 + unevaluatedProperties: false 79 + 79 80 additionalProperties: false 80 81 81 82 required:
+5 -1
Documentation/devicetree/bindings/regulator/rohm,bd71828-regulator.yaml
··· 35 35 description: 36 36 should be "ldo1", ..., "ldo7" 37 37 38 + unevaluatedProperties: false 39 + 38 40 "^BUCK[1-7]$": 39 41 type: object 40 42 allOf: ··· 105 103 106 104 required: 107 105 - regulator-name 108 - additionalProperties: false 106 + 107 + unevaluatedProperties: false 108 + 109 109 additionalProperties: false
+5 -1
Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.yaml
··· 41 41 description: 42 42 should be "ldo1", ..., "ldo7" 43 43 44 + unevaluatedProperties: false 45 + 44 46 "^BUCK[1-8]$": 45 47 type: object 46 48 allOf: ··· 101 99 102 100 required: 103 101 - regulator-name 104 - additionalProperties: false 102 + 103 + unevaluatedProperties: false 104 + 105 105 additionalProperties: false
+5 -1
Documentation/devicetree/bindings/regulator/rohm,bd71847-regulator.yaml
··· 40 40 description: 41 41 should be "ldo1", ..., "ldo6" 42 42 43 + unevaluatedProperties: false 44 + 43 45 "^BUCK[1-6]$": 44 46 type: object 45 47 allOf: ··· 95 93 96 94 required: 97 95 - regulator-name 98 - additionalProperties: false 96 + 97 + unevaluatedProperties: false 98 + 99 99 additionalProperties: false
+1
Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml
··· 18 18 - renesas,r8a774c0-usb3-peri # RZ/G2E 19 19 - renesas,r8a7795-usb3-peri # R-Car H3 20 20 - renesas,r8a7796-usb3-peri # R-Car M3-W 21 + - renesas,r8a77961-usb3-peri # R-Car M3-W+ 21 22 - renesas,r8a77965-usb3-peri # R-Car M3-N 22 23 - renesas,r8a77990-usb3-peri # R-Car E3 23 24 - const: renesas,rcar-gen3-usb3-peri
+1
Documentation/devicetree/bindings/usb/renesas,usbhs.yaml
··· 40 40 - renesas,usbhs-r8a774c0 # RZ/G2E 41 41 - renesas,usbhs-r8a7795 # R-Car H3 42 42 - renesas,usbhs-r8a7796 # R-Car M3-W 43 + - renesas,usbhs-r8a77961 # R-Car M3-W+ 43 44 - renesas,usbhs-r8a77965 # R-Car M3-N 44 45 - renesas,usbhs-r8a77990 # R-Car E3 45 46 - renesas,usbhs-r8a77995 # R-Car D3
+2 -1
Documentation/devicetree/bindings/usb/usb-xhci.txt
··· 16 16 - "renesas,xhci-r8a7791" for r8a7791 SoC 17 17 - "renesas,xhci-r8a7793" for r8a7793 SoC 18 18 - "renesas,xhci-r8a7795" for r8a7795 SoC 19 - - "renesas,xhci-r8a7796" for r8a7796 SoC 19 + - "renesas,xhci-r8a7796" for r8a77960 SoC 20 + - "renesas,xhci-r8a77961" for r8a77961 SoC 20 21 - "renesas,xhci-r8a77965" for r8a77965 SoC 21 22 - "renesas,xhci-r8a77990" for r8a77990 SoC 22 23 - "renesas,rcar-gen2-xhci" for a generic R-Car Gen2 or RZ/G1 compatible
+2 -2
Documentation/filesystems/debugfs.rst
··· 79 79 struct dentry *parent, u8 *value); 80 80 void debugfs_create_u16(const char *name, umode_t mode, 81 81 struct dentry *parent, u16 *value); 82 - struct dentry *debugfs_create_u32(const char *name, umode_t mode, 83 - struct dentry *parent, u32 *value); 82 + void debugfs_create_u32(const char *name, umode_t mode, 83 + struct dentry *parent, u32 *value); 84 84 void debugfs_create_u64(const char *name, umode_t mode, 85 85 struct dentry *parent, u64 *value); 86 86
+2 -2
Documentation/networking/devlink/ice.rst
··· 61 61 - running 62 62 - ICE OS Default Package 63 63 - The name of the DDP package that is active in the device. The DDP 64 - package is loaded by the driver during initialization. Each varation 65 - of DDP package shall have a unique name. 64 + package is loaded by the driver during initialization. Each 65 + variation of the DDP package has a unique name. 66 66 * - ``fw.app`` 67 67 - running 68 68 - 1.3.1.0
+9 -10
MAINTAINERS
··· 570 570 F: drivers/input/misc/adxl34x.c 571 571 572 572 ADXL372 THREE-AXIS DIGITAL ACCELEROMETER DRIVER 573 - M: Stefan Popa <stefan.popa@analog.com> 573 + M: Michael Hennerich <michael.hennerich@analog.com> 574 574 S: Supported 575 575 W: http://ez.analog.com/community/linux-device-drivers 576 576 F: Documentation/devicetree/bindings/iio/accel/adi,adxl372.yaml ··· 922 922 F: drivers/net/ethernet/amd/xgbe/ 923 923 924 924 ANALOG DEVICES INC AD5686 DRIVER 925 - M: Stefan Popa <stefan.popa@analog.com> 925 + M: Michael Hennerich <Michael.Hennerich@analog.com> 926 926 L: linux-pm@vger.kernel.org 927 927 S: Supported 928 928 W: http://ez.analog.com/community/linux-device-drivers ··· 930 930 F: drivers/iio/dac/ad5696* 931 931 932 932 ANALOG DEVICES INC AD5758 DRIVER 933 - M: Stefan Popa <stefan.popa@analog.com> 933 + M: Michael Hennerich <Michael.Hennerich@analog.com> 934 934 L: linux-iio@vger.kernel.org 935 935 S: Supported 936 936 W: http://ez.analog.com/community/linux-device-drivers ··· 946 946 F: drivers/iio/adc/ad7091r5.c 947 947 948 948 ANALOG DEVICES INC AD7124 DRIVER 949 - M: Stefan Popa <stefan.popa@analog.com> 949 + M: Michael Hennerich <Michael.Hennerich@analog.com> 950 950 L: linux-iio@vger.kernel.org 951 951 S: Supported 952 952 W: http://ez.analog.com/community/linux-device-drivers ··· 970 970 F: drivers/iio/adc/ad7292.c 971 971 972 972 ANALOG DEVICES INC AD7606 DRIVER 973 - M: Stefan Popa <stefan.popa@analog.com> 973 + M: Michael Hennerich <Michael.Hennerich@analog.com> 974 974 M: Beniamin Bia <beniamin.bia@analog.com> 975 975 L: linux-iio@vger.kernel.org 976 976 S: Supported ··· 979 979 F: drivers/iio/adc/ad7606.c 980 980 981 981 ANALOG DEVICES INC AD7768-1 DRIVER 982 - M: Stefan Popa <stefan.popa@analog.com> 982 + M: Michael Hennerich <Michael.Hennerich@analog.com> 983 983 L: linux-iio@vger.kernel.org 984 984 S: Supported 985 985 W: http://ez.analog.com/community/linux-device-drivers ··· 1040 1040 F: drivers/hwmon/adm1177.c 1041 1041 1042 1042 ANALOG DEVICES INC ADP5061 DRIVER 1043 - M: Stefan Popa <stefan.popa@analog.com> 1043 + M: Michael Hennerich <Michael.Hennerich@analog.com> 1044 1044 L: linux-pm@vger.kernel.org 1045 1045 S: Supported 1046 1046 W: http://ez.analog.com/community/linux-device-drivers ··· 1109 1109 ANALOG DEVICES INC IIO DRIVERS 1110 1110 M: Lars-Peter Clausen <lars@metafoo.de> 1111 1111 M: Michael Hennerich <Michael.Hennerich@analog.com> 1112 - M: Stefan Popa <stefan.popa@analog.com> 1113 1112 S: Supported 1114 1113 W: http://wiki.analog.com/ 1115 1114 W: http://ez.analog.com/community/linux-device-drivers ··· 3657 3658 S: Maintained 3658 3659 W: http://btrfs.wiki.kernel.org/ 3659 3660 Q: http://patchwork.kernel.org/project/linux-btrfs/list/ 3660 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs.git 3661 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux.git 3661 3662 F: Documentation/filesystems/btrfs.rst 3662 3663 F: fs/btrfs/ 3663 3664 F: include/linux/btrfs* ··· 5935 5936 DYNAMIC INTERRUPT MODERATION 5936 5937 M: Tal Gilboa <talgi@mellanox.com> 5937 5938 S: Maintained 5939 + F: Documentation/networking/net_dim.rst 5938 5940 F: include/linux/dim.h 5939 5941 F: lib/dim/ 5940 - F: Documentation/networking/net_dim.rst 5941 5942 5942 5943 DZ DECSTATION DZ11 SERIAL DRIVER 5943 5944 M: "Maciej W. Rozycki" <macro@linux-mips.org>
+1 -1
Makefile
··· 2 2 VERSION = 5 3 3 PATCHLEVEL = 7 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc2 5 + EXTRAVERSION = -rc4 6 6 NAME = Kleptomaniac Octopus 7 7 8 8 # *DOCUMENTATION*
+11 -3
arch/arm/crypto/chacha-glue.c
··· 91 91 return; 92 92 } 93 93 94 - kernel_neon_begin(); 95 - chacha_doneon(state, dst, src, bytes, nrounds); 96 - kernel_neon_end(); 94 + do { 95 + unsigned int todo = min_t(unsigned int, bytes, SZ_4K); 96 + 97 + kernel_neon_begin(); 98 + chacha_doneon(state, dst, src, todo, nrounds); 99 + kernel_neon_end(); 100 + 101 + bytes -= todo; 102 + src += todo; 103 + dst += todo; 104 + } while (bytes); 97 105 } 98 106 EXPORT_SYMBOL(chacha_crypt_arch); 99 107
+1 -1
arch/arm/crypto/nhpoly1305-neon-glue.c
··· 30 30 return crypto_nhpoly1305_update(desc, src, srclen); 31 31 32 32 do { 33 - unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); 33 + unsigned int n = min_t(unsigned int, srclen, SZ_4K); 34 34 35 35 kernel_neon_begin(); 36 36 crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon);
+11 -4
arch/arm/crypto/poly1305-glue.c
··· 160 160 unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE); 161 161 162 162 if (static_branch_likely(&have_neon) && do_neon) { 163 - kernel_neon_begin(); 164 - poly1305_blocks_neon(&dctx->h, src, len, 1); 165 - kernel_neon_end(); 163 + do { 164 + unsigned int todo = min_t(unsigned int, len, SZ_4K); 165 + 166 + kernel_neon_begin(); 167 + poly1305_blocks_neon(&dctx->h, src, todo, 1); 168 + kernel_neon_end(); 169 + 170 + len -= todo; 171 + src += todo; 172 + } while (len); 166 173 } else { 167 174 poly1305_blocks_arm(&dctx->h, src, len, 1); 175 + src += len; 168 176 } 169 - src += len; 170 177 nbytes %= POLY1305_BLOCK_SIZE; 171 178 } 172 179
+11 -3
arch/arm64/crypto/chacha-neon-glue.c
··· 87 87 !crypto_simd_usable()) 88 88 return chacha_crypt_generic(state, dst, src, bytes, nrounds); 89 89 90 - kernel_neon_begin(); 91 - chacha_doneon(state, dst, src, bytes, nrounds); 92 - kernel_neon_end(); 90 + do { 91 + unsigned int todo = min_t(unsigned int, bytes, SZ_4K); 92 + 93 + kernel_neon_begin(); 94 + chacha_doneon(state, dst, src, todo, nrounds); 95 + kernel_neon_end(); 96 + 97 + bytes -= todo; 98 + src += todo; 99 + dst += todo; 100 + } while (bytes); 93 101 } 94 102 EXPORT_SYMBOL(chacha_crypt_arch); 95 103
+1 -1
arch/arm64/crypto/nhpoly1305-neon-glue.c
··· 30 30 return crypto_nhpoly1305_update(desc, src, srclen); 31 31 32 32 do { 33 - unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); 33 + unsigned int n = min_t(unsigned int, srclen, SZ_4K); 34 34 35 35 kernel_neon_begin(); 36 36 crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon);
+11 -4
arch/arm64/crypto/poly1305-glue.c
··· 143 143 unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE); 144 144 145 145 if (static_branch_likely(&have_neon) && crypto_simd_usable()) { 146 - kernel_neon_begin(); 147 - poly1305_blocks_neon(&dctx->h, src, len, 1); 148 - kernel_neon_end(); 146 + do { 147 + unsigned int todo = min_t(unsigned int, len, SZ_4K); 148 + 149 + kernel_neon_begin(); 150 + poly1305_blocks_neon(&dctx->h, src, todo, 1); 151 + kernel_neon_end(); 152 + 153 + len -= todo; 154 + src += todo; 155 + } while (len); 149 156 } else { 150 157 poly1305_blocks(&dctx->h, src, len, 1); 158 + src += len; 151 159 } 152 - src += len; 153 160 nbytes %= POLY1305_BLOCK_SIZE; 154 161 } 155 162
+1 -1
arch/arm64/kernel/vdso/Makefile
··· 32 32 OBJECT_FILES_NON_STANDARD := y 33 33 KCOV_INSTRUMENT := n 34 34 35 - CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny 35 + CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny -fasynchronous-unwind-tables 36 36 37 37 ifneq ($(c-gettimeofday-y),) 38 38 CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
+1 -1
arch/powerpc/kernel/entry_32.S
··· 732 732 stw r10,_CCR(r1) 733 733 stw r1,KSP(r3) /* Set old stack pointer */ 734 734 735 - kuap_check r2, r4 735 + kuap_check r2, r0 736 736 #ifdef CONFIG_SMP 737 737 /* We need a sync somewhere here to make sure that if the 738 738 * previous task gets rescheduled on another CPU, it sees all
+2
arch/powerpc/kernel/setup_64.c
··· 534 534 lsizep = of_get_property(np, propnames[3], NULL); 535 535 if (bsizep == NULL) 536 536 bsizep = lsizep; 537 + if (lsizep == NULL) 538 + lsizep = bsizep; 537 539 if (lsizep != NULL) 538 540 lsize = be32_to_cpu(*lsizep); 539 541 if (bsizep != NULL)
+3
arch/powerpc/mm/nohash/8xx.c
··· 185 185 mmu_mapin_ram_chunk(etext8, einittext8, PAGE_KERNEL); 186 186 } 187 187 } 188 + _tlbil_all(); 188 189 } 189 190 190 191 #ifdef CONFIG_STRICT_KERNEL_RWX ··· 199 198 -__pa(((unsigned long)_sinittext) & 200 199 ~(LARGE_PAGE_SIZE_8M - 1))); 201 200 mmu_patch_addis(&patch__dtlbmiss_romem_top, -__pa(_sinittext)); 201 + 202 + _tlbil_all(); 202 203 203 204 /* Update page tables for PTDUMP and BDI */ 204 205 mmu_mapin_ram_chunk(0, sinittext, __pgprot(0));
+1 -1
arch/powerpc/platforms/Kconfig.cputype
··· 397 397 398 398 config PPC_KUAP_DEBUG 399 399 bool "Extra debugging for Kernel Userspace Access Protection" 400 - depends on PPC_KUAP && (PPC_RADIX_MMU || PPC_32) 400 + depends on PPC_KUAP && (PPC_RADIX_MMU || PPC32) 401 401 help 402 402 Add extra debugging for Kernel Userspace Access Protection (KUAP) 403 403 If you're unsure, say N.
+1 -1
arch/riscv/Kconfig
··· 60 60 select ARCH_HAS_GIGANTIC_PAGE 61 61 select ARCH_HAS_SET_DIRECT_MAP 62 62 select ARCH_HAS_SET_MEMORY 63 - select ARCH_HAS_STRICT_KERNEL_RWX 63 + select ARCH_HAS_STRICT_KERNEL_RWX if MMU 64 64 select ARCH_WANT_HUGE_PMD_SHARE if 64BIT 65 65 select SPARSEMEM_STATIC if 32BIT 66 66 select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
+10 -7
arch/riscv/kernel/sbi.c
··· 102 102 { 103 103 sbi_ecall(SBI_EXT_0_1_SHUTDOWN, 0, 0, 0, 0, 0, 0, 0); 104 104 } 105 - EXPORT_SYMBOL(sbi_set_timer); 105 + EXPORT_SYMBOL(sbi_shutdown); 106 106 107 107 /** 108 108 * sbi_clear_ipi() - Clear any pending IPIs for the calling hart. ··· 113 113 { 114 114 sbi_ecall(SBI_EXT_0_1_CLEAR_IPI, 0, 0, 0, 0, 0, 0, 0); 115 115 } 116 - EXPORT_SYMBOL(sbi_shutdown); 116 + EXPORT_SYMBOL(sbi_clear_ipi); 117 117 118 118 /** 119 119 * sbi_set_timer_v01() - Program the timer for next timer event. ··· 167 167 168 168 return result; 169 169 } 170 + 171 + static void sbi_set_power_off(void) 172 + { 173 + pm_power_off = sbi_shutdown; 174 + } 170 175 #else 171 176 static void __sbi_set_timer_v01(uint64_t stime_value) 172 177 { ··· 196 191 197 192 return 0; 198 193 } 194 + 195 + static void sbi_set_power_off(void) {} 199 196 #endif /* CONFIG_RISCV_SBI_V01 */ 200 197 201 198 static void __sbi_set_timer_v02(uint64_t stime_value) ··· 547 540 return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION); 548 541 } 549 542 550 - static void sbi_power_off(void) 551 - { 552 - sbi_shutdown(); 553 - } 554 543 555 544 int __init sbi_init(void) 556 545 { 557 546 int ret; 558 547 559 - pm_power_off = sbi_power_off; 548 + sbi_set_power_off(); 560 549 ret = sbi_get_spec_version(); 561 550 if (ret > 0) 562 551 sbi_spec_version = ret;
+2 -2
arch/riscv/kernel/stacktrace.c
··· 12 12 #include <linux/stacktrace.h> 13 13 #include <linux/ftrace.h> 14 14 15 + register unsigned long sp_in_global __asm__("sp"); 16 + 15 17 #ifdef CONFIG_FRAME_POINTER 16 18 17 19 struct stackframe { 18 20 unsigned long fp; 19 21 unsigned long ra; 20 22 }; 21 - 22 - register unsigned long sp_in_global __asm__("sp"); 23 23 24 24 void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, 25 25 bool (*fn)(unsigned long, void *), void *arg)
+3 -3
arch/riscv/kernel/vdso/Makefile
··· 33 33 $(call if_changed,vdsold) 34 34 35 35 # We also create a special relocatable object that should mirror the symbol 36 - # table and layout of the linked DSO. With ld -R we can then refer to 37 - # these symbols in the kernel code rather than hand-coded addresses. 36 + # table and layout of the linked DSO. With ld --just-symbols we can then 37 + # refer to these symbols in the kernel code rather than hand-coded addresses. 38 38 39 39 SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \ 40 40 -Wl,--build-id -Wl,--hash-style=both 41 41 $(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE 42 42 $(call if_changed,vdsold) 43 43 44 - LDFLAGS_vdso-syms.o := -r -R 44 + LDFLAGS_vdso-syms.o := -r --just-symbols 45 45 $(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE 46 46 $(call if_changed,ld) 47 47
-2
arch/s390/boot/uv.c
··· 7 7 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST 8 8 int __bootdata_preserved(prot_virt_guest); 9 9 #endif 10 - #if IS_ENABLED(CONFIG_KVM) 11 10 struct uv_info __bootdata_preserved(uv_info); 12 - #endif 13 11 14 12 void uv_query_info(void) 15 13 {
+1 -1
arch/s390/kernel/diag.c
··· 133 133 } 134 134 EXPORT_SYMBOL(diag_stat_inc); 135 135 136 - void diag_stat_inc_norecursion(enum diag_stat_enum nr) 136 + void notrace diag_stat_inc_norecursion(enum diag_stat_enum nr) 137 137 { 138 138 this_cpu_inc(diag_stat.counter[nr]); 139 139 trace_s390_diagnose_norecursion(diag_map[nr].code);
+2 -2
arch/s390/kernel/smp.c
··· 403 403 return -1; 404 404 } 405 405 406 - bool arch_vcpu_is_preempted(int cpu) 406 + bool notrace arch_vcpu_is_preempted(int cpu) 407 407 { 408 408 if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu)) 409 409 return false; ··· 413 413 } 414 414 EXPORT_SYMBOL(arch_vcpu_is_preempted); 415 415 416 - void smp_yield_cpu(int cpu) 416 + void notrace smp_yield_cpu(int cpu) 417 417 { 418 418 if (!MACHINE_HAS_DIAG9C) 419 419 return;
+1 -1
arch/s390/kernel/trace.c
··· 14 14 15 15 static DEFINE_PER_CPU(unsigned int, diagnose_trace_depth); 16 16 17 - void trace_s390_diagnose_norecursion(int diag_nr) 17 + void notrace trace_s390_diagnose_norecursion(int diag_nr) 18 18 { 19 19 unsigned long flags; 20 20 unsigned int *depth;
+2 -1
arch/s390/kernel/uv.c
··· 23 23 int __bootdata_preserved(prot_virt_guest); 24 24 #endif 25 25 26 + struct uv_info __bootdata_preserved(uv_info); 27 + 26 28 #if IS_ENABLED(CONFIG_KVM) 27 29 int prot_virt_host; 28 30 EXPORT_SYMBOL(prot_virt_host); 29 - struct uv_info __bootdata_preserved(uv_info); 30 31 EXPORT_SYMBOL(uv_info); 31 32 32 33 static int __init prot_virt_setup(char *val)
+4
arch/s390/lib/uaccess.c
··· 64 64 { 65 65 mm_segment_t old_fs; 66 66 unsigned long asce, cr; 67 + unsigned long flags; 67 68 68 69 old_fs = current->thread.mm_segment; 69 70 if (old_fs & 1) 70 71 return old_fs; 72 + /* protect against a concurrent page table upgrade */ 73 + local_irq_save(flags); 71 74 current->thread.mm_segment |= 1; 72 75 asce = S390_lowcore.kernel_asce; 73 76 if (likely(old_fs == USER_DS)) { ··· 86 83 __ctl_load(asce, 7, 7); 87 84 set_cpu_flag(CIF_ASCE_SECONDARY); 88 85 } 86 + local_irq_restore(flags); 89 87 return old_fs; 90 88 } 91 89 EXPORT_SYMBOL(enable_sacf_uaccess);
+14 -2
arch/s390/mm/pgalloc.c
··· 70 70 { 71 71 struct mm_struct *mm = arg; 72 72 73 - if (current->active_mm == mm) 74 - set_user_asce(mm); 73 + /* we must change all active ASCEs to avoid the creation of new TLBs */ 74 + if (current->active_mm == mm) { 75 + S390_lowcore.user_asce = mm->context.asce; 76 + if (current->thread.mm_segment == USER_DS) { 77 + __ctl_load(S390_lowcore.user_asce, 1, 1); 78 + /* Mark user-ASCE present in CR1 */ 79 + clear_cpu_flag(CIF_ASCE_PRIMARY); 80 + } 81 + if (current->thread.mm_segment == USER_DS_SACF) { 82 + __ctl_load(S390_lowcore.user_asce, 7, 7); 83 + /* enable_sacf_uaccess does all or nothing */ 84 + WARN_ON(!test_cpu_flag(CIF_ASCE_SECONDARY)); 85 + } 86 + } 75 87 __tlb_flush_local(); 76 88 } 77 89
+3 -2
arch/s390/pci/pci_irq.c
··· 115 115 .name = "PCI-MSI", 116 116 .irq_unmask = pci_msi_unmask_irq, 117 117 .irq_mask = pci_msi_mask_irq, 118 - .irq_set_affinity = zpci_set_irq_affinity, 119 118 }; 120 119 121 120 static void zpci_handle_cpu_local_irq(bool rescan) ··· 275 276 rc = -EIO; 276 277 if (hwirq - bit >= msi_vecs) 277 278 break; 278 - irq = __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE, msi->affinity); 279 + irq = __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE, 280 + (irq_delivery == DIRECTED) ? 281 + msi->affinity : NULL); 279 282 if (irq < 0) 280 283 return -ENOMEM; 281 284 rc = irq_set_msi_desc(irq, msi);
+4 -6
arch/x86/crypto/blake2s-glue.c
··· 32 32 const u32 inc) 33 33 { 34 34 /* SIMD disables preemption, so relax after processing each page. */ 35 - BUILD_BUG_ON(PAGE_SIZE / BLAKE2S_BLOCK_SIZE < 8); 35 + BUILD_BUG_ON(SZ_4K / BLAKE2S_BLOCK_SIZE < 8); 36 36 37 37 if (!static_branch_likely(&blake2s_use_ssse3) || !crypto_simd_usable()) { 38 38 blake2s_compress_generic(state, block, nblocks, inc); 39 39 return; 40 40 } 41 41 42 - for (;;) { 42 + do { 43 43 const size_t blocks = min_t(size_t, nblocks, 44 - PAGE_SIZE / BLAKE2S_BLOCK_SIZE); 44 + SZ_4K / BLAKE2S_BLOCK_SIZE); 45 45 46 46 kernel_fpu_begin(); 47 47 if (IS_ENABLED(CONFIG_AS_AVX512) && ··· 52 52 kernel_fpu_end(); 53 53 54 54 nblocks -= blocks; 55 - if (!nblocks) 56 - break; 57 55 block += blocks * BLAKE2S_BLOCK_SIZE; 58 - } 56 + } while (nblocks); 59 57 } 60 58 EXPORT_SYMBOL(blake2s_compress_arch); 61 59
+11 -3
arch/x86/crypto/chacha_glue.c
··· 153 153 bytes <= CHACHA_BLOCK_SIZE) 154 154 return chacha_crypt_generic(state, dst, src, bytes, nrounds); 155 155 156 - kernel_fpu_begin(); 157 - chacha_dosimd(state, dst, src, bytes, nrounds); 158 - kernel_fpu_end(); 156 + do { 157 + unsigned int todo = min_t(unsigned int, bytes, SZ_4K); 158 + 159 + kernel_fpu_begin(); 160 + chacha_dosimd(state, dst, src, todo, nrounds); 161 + kernel_fpu_end(); 162 + 163 + bytes -= todo; 164 + src += todo; 165 + dst += todo; 166 + } while (bytes); 159 167 } 160 168 EXPORT_SYMBOL(chacha_crypt_arch); 161 169
+1 -1
arch/x86/crypto/nhpoly1305-avx2-glue.c
··· 29 29 return crypto_nhpoly1305_update(desc, src, srclen); 30 30 31 31 do { 32 - unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); 32 + unsigned int n = min_t(unsigned int, srclen, SZ_4K); 33 33 34 34 kernel_fpu_begin(); 35 35 crypto_nhpoly1305_update_helper(desc, src, n, _nh_avx2);
+1 -1
arch/x86/crypto/nhpoly1305-sse2-glue.c
··· 29 29 return crypto_nhpoly1305_update(desc, src, srclen); 30 30 31 31 do { 32 - unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE); 32 + unsigned int n = min_t(unsigned int, srclen, SZ_4K); 33 33 34 34 kernel_fpu_begin(); 35 35 crypto_nhpoly1305_update_helper(desc, src, n, _nh_sse2);
+6 -7
arch/x86/crypto/poly1305_glue.c
··· 91 91 struct poly1305_arch_internal *state = ctx; 92 92 93 93 /* SIMD disables preemption, so relax after processing each page. */ 94 - BUILD_BUG_ON(PAGE_SIZE < POLY1305_BLOCK_SIZE || 95 - PAGE_SIZE % POLY1305_BLOCK_SIZE); 94 + BUILD_BUG_ON(SZ_4K < POLY1305_BLOCK_SIZE || 95 + SZ_4K % POLY1305_BLOCK_SIZE); 96 96 97 97 if (!static_branch_likely(&poly1305_use_avx) || 98 98 (len < (POLY1305_BLOCK_SIZE * 18) && !state->is_base2_26) || ··· 102 102 return; 103 103 } 104 104 105 - for (;;) { 106 - const size_t bytes = min_t(size_t, len, PAGE_SIZE); 105 + do { 106 + const size_t bytes = min_t(size_t, len, SZ_4K); 107 107 108 108 kernel_fpu_begin(); 109 109 if (IS_ENABLED(CONFIG_AS_AVX512) && static_branch_likely(&poly1305_use_avx512)) ··· 113 113 else 114 114 poly1305_blocks_avx(ctx, inp, bytes, padbit); 115 115 kernel_fpu_end(); 116 + 116 117 len -= bytes; 117 - if (!len) 118 - break; 119 118 inp += bytes; 120 - } 119 + } while (len); 121 120 } 122 121 123 122 static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
+10 -2
arch/x86/hyperv/hv_init.c
··· 73 73 struct page *pg; 74 74 75 75 input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg); 76 - pg = alloc_page(GFP_KERNEL); 76 + /* hv_cpu_init() can be called with IRQs disabled from hv_resume() */ 77 + pg = alloc_page(irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL); 77 78 if (unlikely(!pg)) 78 79 return -ENOMEM; 79 80 *input_arg = page_address(pg); ··· 255 254 static int hv_suspend(void) 256 255 { 257 256 union hv_x64_msr_hypercall_contents hypercall_msr; 257 + int ret; 258 258 259 259 /* 260 260 * Reset the hypercall page as it is going to be invalidated ··· 272 270 hypercall_msr.enable = 0; 273 271 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 274 272 275 - return 0; 273 + ret = hv_cpu_die(0); 274 + return ret; 276 275 } 277 276 278 277 static void hv_resume(void) 279 278 { 280 279 union hv_x64_msr_hypercall_contents hypercall_msr; 280 + int ret; 281 + 282 + ret = hv_cpu_init(0); 283 + WARN_ON(ret); 281 284 282 285 /* Re-enable the hypercall page */ 283 286 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); ··· 295 288 hv_hypercall_pg_saved = NULL; 296 289 } 297 290 291 + /* Note: when the ops are called, only CPU0 is online and IRQs are disabled. */ 298 292 static struct syscore_ops hv_syscore_ops = { 299 293 .suspend = hv_suspend, 300 294 .resume = hv_resume,
+2
arch/x86/include/asm/mshyperv.h
··· 35 35 rdmsrl(HV_X64_MSR_SINT0 + int_num, val) 36 36 #define hv_set_synint_state(int_num, val) \ 37 37 wrmsrl(HV_X64_MSR_SINT0 + int_num, val) 38 + #define hv_recommend_using_aeoi() \ 39 + (!(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED)) 38 40 39 41 #define hv_get_crash_ctl(val) \ 40 42 rdmsrl(HV_X64_MSR_CRASH_CTL, val)
+1 -1
block/partitions/core.c
··· 496 496 497 497 if (!disk_part_scan_enabled(disk)) 498 498 return 0; 499 - if (bdev->bd_part_count || bdev->bd_openers > 1) 499 + if (bdev->bd_part_count) 500 500 return -EBUSY; 501 501 res = invalidate_partition(disk, 0); 502 502 if (res)
+2 -2
drivers/acpi/device_pm.c
··· 273 273 end: 274 274 if (result) { 275 275 dev_warn(&device->dev, "Failed to change power state to %s\n", 276 - acpi_power_state_string(state)); 276 + acpi_power_state_string(target_state)); 277 277 } else { 278 278 device->power.state = target_state; 279 279 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 280 280 "Device [%s] transitioned to %s\n", 281 281 device->pnp.bus_id, 282 - acpi_power_state_string(state))); 282 + acpi_power_state_string(target_state))); 283 283 } 284 284 285 285 return result;
-1
drivers/base/firmware_loader/fallback_table.c
··· 45 45 }, 46 46 { } 47 47 }; 48 - EXPORT_SYMBOL_GPL(firmware_config_table); 49 48 #endif
+78 -8
drivers/block/virtio_blk.c
··· 33 33 } ____cacheline_aligned_in_smp; 34 34 35 35 struct virtio_blk { 36 + /* 37 + * This mutex must be held by anything that may run after 38 + * virtblk_remove() sets vblk->vdev to NULL. 39 + * 40 + * blk-mq, virtqueue processing, and sysfs attribute code paths are 41 + * shut down before vblk->vdev is set to NULL and therefore do not need 42 + * to hold this mutex. 43 + */ 44 + struct mutex vdev_mutex; 36 45 struct virtio_device *vdev; 37 46 38 47 /* The disk structure for the kernel. */ ··· 52 43 53 44 /* Process context for config space updates */ 54 45 struct work_struct config_work; 46 + 47 + /* 48 + * Tracks references from block_device_operations open/release and 49 + * virtio_driver probe/remove so this object can be freed once no 50 + * longer in use. 51 + */ 52 + refcount_t refs; 55 53 56 54 /* What host tells us, plus 2 for header & tailer. */ 57 55 unsigned int sg_elems; ··· 311 295 return err; 312 296 } 313 297 298 + static void virtblk_get(struct virtio_blk *vblk) 299 + { 300 + refcount_inc(&vblk->refs); 301 + } 302 + 303 + static void virtblk_put(struct virtio_blk *vblk) 304 + { 305 + if (refcount_dec_and_test(&vblk->refs)) { 306 + ida_simple_remove(&vd_index_ida, vblk->index); 307 + mutex_destroy(&vblk->vdev_mutex); 308 + kfree(vblk); 309 + } 310 + } 311 + 312 + static int virtblk_open(struct block_device *bd, fmode_t mode) 313 + { 314 + struct virtio_blk *vblk = bd->bd_disk->private_data; 315 + int ret = 0; 316 + 317 + mutex_lock(&vblk->vdev_mutex); 318 + 319 + if (vblk->vdev) 320 + virtblk_get(vblk); 321 + else 322 + ret = -ENXIO; 323 + 324 + mutex_unlock(&vblk->vdev_mutex); 325 + return ret; 326 + } 327 + 328 + static void virtblk_release(struct gendisk *disk, fmode_t mode) 329 + { 330 + struct virtio_blk *vblk = disk->private_data; 331 + 332 + virtblk_put(vblk); 333 + } 334 + 314 335 /* We provide getgeo only to please some old bootloader/partitioning tools */ 315 336 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) 316 337 { 317 338 struct virtio_blk *vblk = bd->bd_disk->private_data; 339 + int ret = 0; 340 + 341 + mutex_lock(&vblk->vdev_mutex); 342 + 343 + if (!vblk->vdev) { 344 + ret = -ENXIO; 345 + goto out; 346 + } 318 347 319 348 /* see if the host passed in geometry config */ 320 349 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) { ··· 375 314 geo->sectors = 1 << 5; 376 315 geo->cylinders = get_capacity(bd->bd_disk) >> 11; 377 316 } 378 - return 0; 317 + out: 318 + mutex_unlock(&vblk->vdev_mutex); 319 + return ret; 379 320 } 380 321 381 322 static const struct block_device_operations virtblk_fops = { 382 323 .owner = THIS_MODULE, 324 + .open = virtblk_open, 325 + .release = virtblk_release, 383 326 .getgeo = virtblk_getgeo, 384 327 }; 385 328 ··· 720 655 goto out_free_index; 721 656 } 722 657 658 + /* This reference is dropped in virtblk_remove(). */ 659 + refcount_set(&vblk->refs, 1); 660 + mutex_init(&vblk->vdev_mutex); 661 + 723 662 vblk->vdev = vdev; 724 663 vblk->sg_elems = sg_elems; 725 664 ··· 889 820 static void virtblk_remove(struct virtio_device *vdev) 890 821 { 891 822 struct virtio_blk *vblk = vdev->priv; 892 - int index = vblk->index; 893 - int refc; 894 823 895 824 /* Make sure no work handler is accessing the device. */ 896 825 flush_work(&vblk->config_work); ··· 898 831 899 832 blk_mq_free_tag_set(&vblk->tag_set); 900 833 834 + mutex_lock(&vblk->vdev_mutex); 835 + 901 836 /* Stop all the virtqueues. */ 902 837 vdev->config->reset(vdev); 903 838 904 - refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref); 839 + /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */ 840 + vblk->vdev = NULL; 841 + 905 842 put_disk(vblk->disk); 906 843 vdev->config->del_vqs(vdev); 907 844 kfree(vblk->vqs); 908 - kfree(vblk); 909 845 910 - /* Only free device id if we don't have any users */ 911 - if (refc == 1) 912 - ida_simple_remove(&vd_index_ida, index); 846 + mutex_unlock(&vblk->vdev_mutex); 847 + 848 + virtblk_put(vblk); 913 849 } 914 850 915 851 #ifdef CONFIG_PM_SLEEP
+160 -34
drivers/counter/104-quad-8.c
··· 44 44 * @base: base port address of the IIO device 45 45 */ 46 46 struct quad8_iio { 47 + struct mutex lock; 47 48 struct counter_device counter; 48 49 unsigned int fck_prescaler[QUAD8_NUM_COUNTERS]; 49 50 unsigned int preset[QUAD8_NUM_COUNTERS]; ··· 124 123 /* Borrow XOR Carry effectively doubles count range */ 125 124 *val = (borrow ^ carry) << 24; 126 125 126 + mutex_lock(&priv->lock); 127 + 127 128 /* Reset Byte Pointer; transfer Counter to Output Latch */ 128 129 outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT, 129 130 base_offset + 1); 130 131 131 132 for (i = 0; i < 3; i++) 132 133 *val |= (unsigned int)inb(base_offset) << (8 * i); 134 + 135 + mutex_unlock(&priv->lock); 133 136 134 137 return IIO_VAL_INT; 135 138 case IIO_CHAN_INFO_ENABLE: ··· 165 160 if ((unsigned int)val > 0xFFFFFF) 166 161 return -EINVAL; 167 162 163 + mutex_lock(&priv->lock); 164 + 168 165 /* Reset Byte Pointer */ 169 166 outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1); 170 167 ··· 190 183 /* Reset Error flag */ 191 184 outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1); 192 185 186 + mutex_unlock(&priv->lock); 187 + 193 188 return 0; 194 189 case IIO_CHAN_INFO_ENABLE: 195 190 /* only boolean values accepted */ 196 191 if (val < 0 || val > 1) 197 192 return -EINVAL; 193 + 194 + mutex_lock(&priv->lock); 198 195 199 196 priv->ab_enable[chan->channel] = val; 200 197 ··· 207 196 /* Load I/O control configuration */ 208 197 outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1); 209 198 199 + mutex_unlock(&priv->lock); 200 + 210 201 return 0; 211 202 case IIO_CHAN_INFO_SCALE: 203 + mutex_lock(&priv->lock); 204 + 212 205 /* Quadrature scaling only available in quadrature mode */ 213 - if (!priv->quadrature_mode[chan->channel] && (val2 || val != 1)) 206 + if (!priv->quadrature_mode[chan->channel] && 207 + (val2 || val != 1)) { 208 + mutex_unlock(&priv->lock); 214 209 return -EINVAL; 210 + } 215 211 216 212 /* Only three gain states (1, 0.5, 0.25) */ 217 213 if (val == 1 && !val2) ··· 232 214 priv->quadrature_scale[chan->channel] = 2; 233 215 break; 234 216 default: 217 + mutex_unlock(&priv->lock); 235 218 return -EINVAL; 236 219 } 237 - else 220 + else { 221 + mutex_unlock(&priv->lock); 238 222 return -EINVAL; 223 + } 239 224 225 + mutex_unlock(&priv->lock); 240 226 return 0; 241 227 } 242 228 ··· 277 255 if (preset > 0xFFFFFF) 278 256 return -EINVAL; 279 257 258 + mutex_lock(&priv->lock); 259 + 280 260 priv->preset[chan->channel] = preset; 281 261 282 262 /* Reset Byte Pointer */ ··· 287 263 /* Set Preset Register */ 288 264 for (i = 0; i < 3; i++) 289 265 outb(preset >> (8 * i), base_offset); 266 + 267 + mutex_unlock(&priv->lock); 290 268 291 269 return len; 292 270 } ··· 319 293 /* Preset enable is active low in Input/Output Control register */ 320 294 preset_enable = !preset_enable; 321 295 296 + mutex_lock(&priv->lock); 297 + 322 298 priv->preset_enable[chan->channel] = preset_enable; 323 299 324 300 ior_cfg = priv->ab_enable[chan->channel] | ··· 328 300 329 301 /* Load I/O control configuration to Input / Output Control Register */ 330 302 outb(QUAD8_CTR_IOR | ior_cfg, base_offset); 303 + 304 + mutex_unlock(&priv->lock); 331 305 332 306 return len; 333 307 } ··· 388 358 unsigned int mode_cfg = cnt_mode << 1; 389 359 const int base_offset = priv->base + 2 * chan->channel + 1; 390 360 361 + mutex_lock(&priv->lock); 362 + 391 363 priv->count_mode[chan->channel] = cnt_mode; 392 364 393 365 /* Add quadrature mode configuration */ ··· 398 366 399 367 /* Load mode configuration to Counter Mode Register */ 400 368 outb(QUAD8_CTR_CMR | mode_cfg, base_offset); 369 + 370 + mutex_unlock(&priv->lock); 401 371 402 372 return 0; 403 373 } ··· 428 394 const struct iio_chan_spec *chan, unsigned int synchronous_mode) 429 395 { 430 396 struct quad8_iio *const priv = iio_priv(indio_dev); 431 - const unsigned int idr_cfg = synchronous_mode | 432 - priv->index_polarity[chan->channel] << 1; 433 397 const int base_offset = priv->base + 2 * chan->channel + 1; 398 + unsigned int idr_cfg = synchronous_mode; 399 + 400 + mutex_lock(&priv->lock); 401 + 402 + idr_cfg |= priv->index_polarity[chan->channel] << 1; 434 403 435 404 /* Index function must be non-synchronous in non-quadrature mode */ 436 - if (synchronous_mode && !priv->quadrature_mode[chan->channel]) 405 + if (synchronous_mode && !priv->quadrature_mode[chan->channel]) { 406 + mutex_unlock(&priv->lock); 437 407 return -EINVAL; 408 + } 438 409 439 410 priv->synchronous_mode[chan->channel] = synchronous_mode; 440 411 441 412 /* Load Index Control configuration to Index Control Register */ 442 413 outb(QUAD8_CTR_IDR | idr_cfg, base_offset); 414 + 415 + mutex_unlock(&priv->lock); 443 416 444 417 return 0; 445 418 } ··· 475 434 const struct iio_chan_spec *chan, unsigned int quadrature_mode) 476 435 { 477 436 struct quad8_iio *const priv = iio_priv(indio_dev); 478 - unsigned int mode_cfg = priv->count_mode[chan->channel] << 1; 479 437 const int base_offset = priv->base + 2 * chan->channel + 1; 438 + unsigned int mode_cfg; 439 + 440 + mutex_lock(&priv->lock); 441 + 442 + mode_cfg = priv->count_mode[chan->channel] << 1; 480 443 481 444 if (quadrature_mode) 482 445 mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3; ··· 497 452 498 453 /* Load mode configuration to Counter Mode Register */ 499 454 outb(QUAD8_CTR_CMR | mode_cfg, base_offset); 455 + 456 + mutex_unlock(&priv->lock); 500 457 501 458 return 0; 502 459 } ··· 527 480 const struct iio_chan_spec *chan, unsigned int index_polarity) 528 481 { 529 482 struct quad8_iio *const priv = iio_priv(indio_dev); 530 - const unsigned int idr_cfg = priv->synchronous_mode[chan->channel] | 531 - index_polarity << 1; 532 483 const int base_offset = priv->base + 2 * chan->channel + 1; 484 + unsigned int idr_cfg = index_polarity << 1; 485 + 486 + mutex_lock(&priv->lock); 487 + 488 + idr_cfg |= priv->synchronous_mode[chan->channel]; 533 489 534 490 priv->index_polarity[chan->channel] = index_polarity; 535 491 536 492 /* Load Index Control configuration to Index Control Register */ 537 493 outb(QUAD8_CTR_IDR | idr_cfg, base_offset); 494 + 495 + mutex_unlock(&priv->lock); 538 496 539 497 return 0; 540 498 } ··· 641 589 static int quad8_count_read(struct counter_device *counter, 642 590 struct counter_count *count, unsigned long *val) 643 591 { 644 - const struct quad8_iio *const priv = counter->priv; 592 + struct quad8_iio *const priv = counter->priv; 645 593 const int base_offset = priv->base + 2 * count->id; 646 594 unsigned int flags; 647 595 unsigned int borrow; ··· 655 603 /* Borrow XOR Carry effectively doubles count range */ 656 604 *val = (unsigned long)(borrow ^ carry) << 24; 657 605 606 + mutex_lock(&priv->lock); 607 + 658 608 /* Reset Byte Pointer; transfer Counter to Output Latch */ 659 609 outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT, 660 610 base_offset + 1); ··· 664 610 for (i = 0; i < 3; i++) 665 611 *val |= (unsigned long)inb(base_offset) << (8 * i); 666 612 613 + mutex_unlock(&priv->lock); 614 + 667 615 return 0; 668 616 } 669 617 670 618 static int quad8_count_write(struct counter_device *counter, 671 619 struct counter_count *count, unsigned long val) 672 620 { 673 - const struct quad8_iio *const priv = counter->priv; 621 + struct quad8_iio *const priv = counter->priv; 674 622 const int base_offset = priv->base + 2 * count->id; 675 623 int i; 676 624 677 625 /* Only 24-bit values are supported */ 678 626 if (val > 0xFFFFFF) 679 627 return -EINVAL; 628 + 629 + mutex_lock(&priv->lock); 680 630 681 631 /* Reset Byte Pointer */ 682 632 outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1); ··· 705 647 /* Reset Error flag */ 706 648 outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1); 707 649 650 + mutex_unlock(&priv->lock); 651 + 708 652 return 0; 709 653 } 710 654 ··· 727 667 static int quad8_function_get(struct counter_device *counter, 728 668 struct counter_count *count, size_t *function) 729 669 { 730 - const struct quad8_iio *const priv = counter->priv; 670 + struct quad8_iio *const priv = counter->priv; 731 671 const int id = count->id; 732 - const unsigned int quadrature_mode = priv->quadrature_mode[id]; 733 - const unsigned int scale = priv->quadrature_scale[id]; 734 672 735 - if (quadrature_mode) 736 - switch (scale) { 673 + mutex_lock(&priv->lock); 674 + 675 + if (priv->quadrature_mode[id]) 676 + switch (priv->quadrature_scale[id]) { 737 677 case 0: 738 678 *function = QUAD8_COUNT_FUNCTION_QUADRATURE_X1; 739 679 break; ··· 747 687 else 748 688 *function = QUAD8_COUNT_FUNCTION_PULSE_DIRECTION; 749 689 690 + mutex_unlock(&priv->lock); 691 + 750 692 return 0; 751 693 } 752 694 ··· 759 697 const int id = count->id; 760 698 unsigned int *const quadrature_mode = priv->quadrature_mode + id; 761 699 unsigned int *const scale = priv->quadrature_scale + id; 762 - unsigned int mode_cfg = priv->count_mode[id] << 1; 763 700 unsigned int *const synchronous_mode = priv->synchronous_mode + id; 764 - const unsigned int idr_cfg = priv->index_polarity[id] << 1; 765 701 const int base_offset = priv->base + 2 * id + 1; 702 + unsigned int mode_cfg; 703 + unsigned int idr_cfg; 704 + 705 + mutex_lock(&priv->lock); 706 + 707 + mode_cfg = priv->count_mode[id] << 1; 708 + idr_cfg = priv->index_polarity[id] << 1; 766 709 767 710 if (function == QUAD8_COUNT_FUNCTION_PULSE_DIRECTION) { 768 711 *quadrature_mode = 0; ··· 802 735 803 736 /* Load mode configuration to Counter Mode Register */ 804 737 outb(QUAD8_CTR_CMR | mode_cfg, base_offset); 738 + 739 + mutex_unlock(&priv->lock); 805 740 806 741 return 0; 807 742 } ··· 921 852 { 922 853 struct quad8_iio *const priv = counter->priv; 923 854 const size_t channel_id = signal->id - 16; 924 - const unsigned int idr_cfg = priv->synchronous_mode[channel_id] | 925 - index_polarity << 1; 926 855 const int base_offset = priv->base + 2 * channel_id + 1; 856 + unsigned int idr_cfg = index_polarity << 1; 857 + 858 + mutex_lock(&priv->lock); 859 + 860 + idr_cfg |= priv->synchronous_mode[channel_id]; 927 861 928 862 priv->index_polarity[channel_id] = index_polarity; 929 863 930 864 /* Load Index Control configuration to Index Control Register */ 931 865 outb(QUAD8_CTR_IDR | idr_cfg, base_offset); 866 + 867 + mutex_unlock(&priv->lock); 932 868 933 869 return 0; 934 870 } ··· 961 887 { 962 888 struct quad8_iio *const priv = counter->priv; 963 889 const size_t channel_id = signal->id - 16; 964 - const unsigned int idr_cfg = synchronous_mode | 965 - priv->index_polarity[channel_id] << 1; 966 890 const int base_offset = priv->base + 2 * channel_id + 1; 891 + unsigned int idr_cfg = synchronous_mode; 892 + 893 + mutex_lock(&priv->lock); 894 + 895 + idr_cfg |= priv->index_polarity[channel_id] << 1; 967 896 968 897 /* Index function must be non-synchronous in non-quadrature mode */ 969 - if (synchronous_mode && !priv->quadrature_mode[channel_id]) 898 + if (synchronous_mode && !priv->quadrature_mode[channel_id]) { 899 + mutex_unlock(&priv->lock); 970 900 return -EINVAL; 901 + } 971 902 972 903 priv->synchronous_mode[channel_id] = synchronous_mode; 973 904 974 905 /* Load Index Control configuration to Index Control Register */ 975 906 outb(QUAD8_CTR_IDR | idr_cfg, base_offset); 907 + 908 + mutex_unlock(&priv->lock); 976 909 977 910 return 0; 978 911 } ··· 1045 964 break; 1046 965 } 1047 966 967 + mutex_lock(&priv->lock); 968 + 1048 969 priv->count_mode[count->id] = cnt_mode; 1049 970 1050 971 /* Set count mode configuration value */ ··· 1058 975 1059 976 /* Load mode configuration to Counter Mode Register */ 1060 977 outb(QUAD8_CTR_CMR | mode_cfg, base_offset); 978 + 979 + mutex_unlock(&priv->lock); 1061 980 1062 981 return 0; 1063 982 } ··· 1102 1017 if (err) 1103 1018 return err; 1104 1019 1020 + mutex_lock(&priv->lock); 1021 + 1105 1022 priv->ab_enable[count->id] = ab_enable; 1106 1023 1107 1024 ior_cfg = ab_enable | priv->preset_enable[count->id] << 1; 1108 1025 1109 1026 /* Load I/O control configuration */ 1110 1027 outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1); 1028 + 1029 + mutex_unlock(&priv->lock); 1111 1030 1112 1031 return len; 1113 1032 } ··· 1141 1052 return sprintf(buf, "%u\n", priv->preset[count->id]); 1142 1053 } 1143 1054 1055 + static void quad8_preset_register_set(struct quad8_iio *quad8iio, int id, 1056 + unsigned int preset) 1057 + { 1058 + const unsigned int base_offset = quad8iio->base + 2 * id; 1059 + int i; 1060 + 1061 + quad8iio->preset[id] = preset; 1062 + 1063 + /* Reset Byte Pointer */ 1064 + outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1); 1065 + 1066 + /* Set Preset Register */ 1067 + for (i = 0; i < 3; i++) 1068 + outb(preset >> (8 * i), base_offset); 1069 + } 1070 + 1144 1071 static ssize_t quad8_count_preset_write(struct counter_device *counter, 1145 1072 struct counter_count *count, void *private, const char *buf, size_t len) 1146 1073 { 1147 1074 struct quad8_iio *const priv = counter->priv; 1148 - const int base_offset = priv->base + 2 * count->id; 1149 1075 unsigned int preset; 1150 1076 int ret; 1151 - int i; 1152 1077 1153 1078 ret = kstrtouint(buf, 0, &preset); 1154 1079 if (ret) ··· 1172 1069 if (preset > 0xFFFFFF) 1173 1070 return -EINVAL; 1174 1071 1175 - priv->preset[count->id] = preset; 1072 + mutex_lock(&priv->lock); 1176 1073 1177 - /* Reset Byte Pointer */ 1178 - outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1); 1074 + quad8_preset_register_set(priv, count->id, preset); 1179 1075 1180 - /* Set Preset Register */ 1181 - for (i = 0; i < 3; i++) 1182 - outb(preset >> (8 * i), base_offset); 1076 + mutex_unlock(&priv->lock); 1183 1077 1184 1078 return len; 1185 1079 } ··· 1184 1084 static ssize_t quad8_count_ceiling_read(struct counter_device *counter, 1185 1085 struct counter_count *count, void *private, char *buf) 1186 1086 { 1187 - const struct quad8_iio *const priv = counter->priv; 1087 + struct quad8_iio *const priv = counter->priv; 1088 + 1089 + mutex_lock(&priv->lock); 1188 1090 1189 1091 /* Range Limit and Modulo-N count modes use preset value as ceiling */ 1190 1092 switch (priv->count_mode[count->id]) { 1191 1093 case 1: 1192 1094 case 3: 1193 - return quad8_count_preset_read(counter, count, private, buf); 1095 + mutex_unlock(&priv->lock); 1096 + return sprintf(buf, "%u\n", priv->preset[count->id]); 1194 1097 } 1098 + 1099 + mutex_unlock(&priv->lock); 1195 1100 1196 1101 /* By default 0x1FFFFFF (25 bits unsigned) is maximum count */ 1197 1102 return sprintf(buf, "33554431\n"); ··· 1206 1101 struct counter_count *count, void *private, const char *buf, size_t len) 1207 1102 { 1208 1103 struct quad8_iio *const priv = counter->priv; 1104 + unsigned int ceiling; 1105 + int ret; 1106 + 1107 + ret = kstrtouint(buf, 0, &ceiling); 1108 + if (ret) 1109 + return ret; 1110 + 1111 + /* Only 24-bit values are supported */ 1112 + if (ceiling > 0xFFFFFF) 1113 + return -EINVAL; 1114 + 1115 + mutex_lock(&priv->lock); 1209 1116 1210 1117 /* Range Limit and Modulo-N count modes use preset value as ceiling */ 1211 1118 switch (priv->count_mode[count->id]) { 1212 1119 case 1: 1213 1120 case 3: 1214 - return quad8_count_preset_write(counter, count, private, buf, 1215 - len); 1121 + quad8_preset_register_set(priv, count->id, ceiling); 1122 + break; 1216 1123 } 1124 + 1125 + mutex_unlock(&priv->lock); 1217 1126 1218 1127 return len; 1219 1128 } ··· 1256 1137 /* Preset enable is active low in Input/Output Control register */ 1257 1138 preset_enable = !preset_enable; 1258 1139 1140 + mutex_lock(&priv->lock); 1141 + 1259 1142 priv->preset_enable[count->id] = preset_enable; 1260 1143 1261 1144 ior_cfg = priv->ab_enable[count->id] | (unsigned int)preset_enable << 1; 1262 1145 1263 1146 /* Load I/O control configuration to Input / Output Control Register */ 1264 1147 outb(QUAD8_CTR_IOR | ior_cfg, base_offset); 1148 + 1149 + mutex_unlock(&priv->lock); 1265 1150 1266 1151 return len; 1267 1152 } ··· 1551 1428 quad8iio->counter.num_signals = ARRAY_SIZE(quad8_signals); 1552 1429 quad8iio->counter.priv = quad8iio; 1553 1430 quad8iio->base = base[id]; 1431 + 1432 + /* Initialize mutex */ 1433 + mutex_init(&quad8iio->lock); 1554 1434 1555 1435 /* Reset all counters and disable interrupt function */ 1556 1436 outb(QUAD8_CHAN_OP_RESET_COUNTERS, base[id] + QUAD8_REG_CHAN_OP);
+1 -1
drivers/cpufreq/intel_pstate.c
··· 1059 1059 1060 1060 update_turbo_state(); 1061 1061 if (global.turbo_disabled) { 1062 - pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 1062 + pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n"); 1063 1063 mutex_unlock(&intel_pstate_limits_lock); 1064 1064 mutex_unlock(&intel_pstate_driver_lock); 1065 1065 return -EPERM;
+7 -3
drivers/crypto/caam/caamalg.c
··· 963 963 struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev); 964 964 struct aead_edesc *edesc; 965 965 int ecode = 0; 966 + bool has_bklog; 966 967 967 968 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 968 969 969 970 edesc = rctx->edesc; 971 + has_bklog = edesc->bklog; 970 972 971 973 if (err) 972 974 ecode = caam_jr_strstatus(jrdev, err); ··· 981 979 * If no backlog flag, the completion of the request is done 982 980 * by CAAM, not crypto engine. 983 981 */ 984 - if (!edesc->bklog) 982 + if (!has_bklog) 985 983 aead_request_complete(req, ecode); 986 984 else 987 985 crypto_finalize_aead_request(jrp->engine, req, ecode); ··· 997 995 struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev); 998 996 int ivsize = crypto_skcipher_ivsize(skcipher); 999 997 int ecode = 0; 998 + bool has_bklog; 1000 999 1001 1000 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 1002 1001 1003 1002 edesc = rctx->edesc; 1003 + has_bklog = edesc->bklog; 1004 1004 if (err) 1005 1005 ecode = caam_jr_strstatus(jrdev, err); 1006 1006 ··· 1032 1028 * If no backlog flag, the completion of the request is done 1033 1029 * by CAAM, not crypto engine. 1034 1030 */ 1035 - if (!edesc->bklog) 1031 + if (!has_bklog) 1036 1032 skcipher_request_complete(req, ecode); 1037 1033 else 1038 1034 crypto_finalize_skcipher_request(jrp->engine, req, ecode); ··· 1715 1711 1716 1712 if (ivsize || mapped_dst_nents > 1) 1717 1713 sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx + 1718 - mapped_dst_nents); 1714 + mapped_dst_nents - 1 + !!ivsize); 1719 1715 1720 1716 if (sec4_sg_bytes) { 1721 1717 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+6 -2
drivers/crypto/caam/caamhash.c
··· 583 583 struct caam_hash_state *state = ahash_request_ctx(req); 584 584 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); 585 585 int ecode = 0; 586 + bool has_bklog; 586 587 587 588 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 588 589 589 590 edesc = state->edesc; 591 + has_bklog = edesc->bklog; 590 592 591 593 if (err) 592 594 ecode = caam_jr_strstatus(jrdev, err); ··· 605 603 * If no backlog flag, the completion of the request is done 606 604 * by CAAM, not crypto engine. 607 605 */ 608 - if (!edesc->bklog) 606 + if (!has_bklog) 609 607 req->base.complete(&req->base, ecode); 610 608 else 611 609 crypto_finalize_hash_request(jrp->engine, req, ecode); ··· 634 632 struct caam_hash_state *state = ahash_request_ctx(req); 635 633 int digestsize = crypto_ahash_digestsize(ahash); 636 634 int ecode = 0; 635 + bool has_bklog; 637 636 638 637 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); 639 638 640 639 edesc = state->edesc; 640 + has_bklog = edesc->bklog; 641 641 if (err) 642 642 ecode = caam_jr_strstatus(jrdev, err); 643 643 ··· 667 663 * If no backlog flag, the completion of the request is done 668 664 * by CAAM, not crypto engine. 669 665 */ 670 - if (!edesc->bklog) 666 + if (!has_bklog) 671 667 req->base.complete(&req->base, ecode); 672 668 else 673 669 crypto_finalize_hash_request(jrp->engine, req, ecode);
+6 -2
drivers/crypto/caam/caampkc.c
··· 121 121 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); 122 122 struct rsa_edesc *edesc; 123 123 int ecode = 0; 124 + bool has_bklog; 124 125 125 126 if (err) 126 127 ecode = caam_jr_strstatus(dev, err); 127 128 128 129 edesc = req_ctx->edesc; 130 + has_bklog = edesc->bklog; 129 131 130 132 rsa_pub_unmap(dev, edesc, req); 131 133 rsa_io_unmap(dev, edesc, req); ··· 137 135 * If no backlog flag, the completion of the request is done 138 136 * by CAAM, not crypto engine. 139 137 */ 140 - if (!edesc->bklog) 138 + if (!has_bklog) 141 139 akcipher_request_complete(req, ecode); 142 140 else 143 141 crypto_finalize_akcipher_request(jrp->engine, req, ecode); ··· 154 152 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); 155 153 struct rsa_edesc *edesc; 156 154 int ecode = 0; 155 + bool has_bklog; 157 156 158 157 if (err) 159 158 ecode = caam_jr_strstatus(dev, err); 160 159 161 160 edesc = req_ctx->edesc; 161 + has_bklog = edesc->bklog; 162 162 163 163 switch (key->priv_form) { 164 164 case FORM1: ··· 180 176 * If no backlog flag, the completion of the request is done 181 177 * by CAAM, not crypto engine. 182 178 */ 183 - if (!edesc->bklog) 179 + if (!has_bklog) 184 180 akcipher_request_complete(req, ecode); 185 181 else 186 182 crypto_finalize_akcipher_request(jrp->engine, req, ecode);
+46 -37
drivers/crypto/chelsio/chcr_ktls.c
··· 673 673 return 0; 674 674 } 675 675 676 - /* 677 - * chcr_write_cpl_set_tcb_ulp: update tcb values. 678 - * TCB is responsible to create tcp headers, so all the related values 679 - * should be correctly updated. 680 - * @tx_info - driver specific tls info. 681 - * @q - tx queue on which packet is going out. 682 - * @tid - TCB identifier. 683 - * @pos - current index where should we start writing. 684 - * @word - TCB word. 685 - * @mask - TCB word related mask. 686 - * @val - TCB word related value. 687 - * @reply - set 1 if looking for TP response. 688 - * return - next position to write. 689 - */ 690 - static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info, 691 - struct sge_eth_txq *q, u32 tid, 692 - void *pos, u16 word, u64 mask, 676 + static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info, 677 + u32 tid, void *pos, u16 word, u64 mask, 693 678 u64 val, u32 reply) 694 679 { 695 680 struct cpl_set_tcb_field_core *cpl; 696 681 struct ulptx_idata *idata; 697 682 struct ulp_txpkt *txpkt; 698 - void *save_pos = NULL; 699 - u8 buf[48] = {0}; 700 - int left; 701 683 702 - left = (void *)q->q.stat - pos; 703 - if (unlikely(left < CHCR_SET_TCB_FIELD_LEN)) { 704 - if (!left) { 705 - pos = q->q.desc; 706 - } else { 707 - save_pos = pos; 708 - pos = buf; 709 - } 710 - } 711 684 /* ULP_TXPKT */ 712 685 txpkt = pos; 713 686 txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0)); ··· 705 732 idata = (struct ulptx_idata *)(cpl + 1); 706 733 idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP)); 707 734 idata->len = htonl(0); 735 + pos = idata + 1; 708 736 709 - if (save_pos) { 710 - pos = chcr_copy_to_txd(buf, &q->q, save_pos, 711 - CHCR_SET_TCB_FIELD_LEN); 712 - } else { 713 - /* check again if we are at the end of the queue */ 714 - if (left == CHCR_SET_TCB_FIELD_LEN) 737 + return pos; 738 + } 739 + 740 + 741 + /* 742 + * chcr_write_cpl_set_tcb_ulp: update tcb values. 743 + * TCB is responsible to create tcp headers, so all the related values 744 + * should be correctly updated. 745 + * @tx_info - driver specific tls info. 746 + * @q - tx queue on which packet is going out. 747 + * @tid - TCB identifier. 748 + * @pos - current index where should we start writing. 749 + * @word - TCB word. 750 + * @mask - TCB word related mask. 751 + * @val - TCB word related value. 752 + * @reply - set 1 if looking for TP response. 753 + * return - next position to write. 754 + */ 755 + static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info, 756 + struct sge_eth_txq *q, u32 tid, 757 + void *pos, u16 word, u64 mask, 758 + u64 val, u32 reply) 759 + { 760 + int left = (void *)q->q.stat - pos; 761 + 762 + if (unlikely(left < CHCR_SET_TCB_FIELD_LEN)) { 763 + if (!left) { 715 764 pos = q->q.desc; 716 - else 717 - pos = idata + 1; 765 + } else { 766 + u8 buf[48] = {0}; 767 + 768 + __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word, 769 + mask, val, reply); 770 + 771 + return chcr_copy_to_txd(buf, &q->q, pos, 772 + CHCR_SET_TCB_FIELD_LEN); 773 + } 718 774 } 775 + 776 + pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word, 777 + mask, val, reply); 778 + 779 + /* check again if we are at the end of the queue */ 780 + if (left == CHCR_SET_TCB_FIELD_LEN) 781 + pos = q->q.desc; 719 782 720 783 return pos; 721 784 }
+4 -3
drivers/dma-buf/dma-buf.c
··· 388 388 389 389 return ret; 390 390 391 - case DMA_BUF_SET_NAME: 391 + case DMA_BUF_SET_NAME_A: 392 + case DMA_BUF_SET_NAME_B: 392 393 return dma_buf_set_name(dmabuf, (const char __user *)arg); 393 394 394 395 default: ··· 656 655 * calls attach() of dma_buf_ops to allow device-specific attach functionality 657 656 * @dmabuf: [in] buffer to attach device to. 658 657 * @dev: [in] device to be attached. 659 - * @importer_ops [in] importer operations for the attachment 660 - * @importer_priv [in] importer private pointer for the attachment 658 + * @importer_ops: [in] importer operations for the attachment 659 + * @importer_priv: [in] importer private pointer for the attachment 661 660 * 662 661 * Returns struct dma_buf_attachment pointer for this attachment. Attachments 663 662 * must be cleaned up by calling dma_buf_detach().
+2 -1
drivers/dma/Kconfig
··· 241 241 242 242 config HISI_DMA 243 243 tristate "HiSilicon DMA Engine support" 244 - depends on ARM64 || (COMPILE_TEST && PCI_MSI) 244 + depends on ARM64 || COMPILE_TEST 245 + depends on PCI_MSI 245 246 select DMA_ENGINE 246 247 select DMA_VIRTUAL_CHANNELS 247 248 help
+26 -34
drivers/dma/dmaengine.c
··· 232 232 struct dma_chan_dev *chan_dev; 233 233 234 234 chan_dev = container_of(dev, typeof(*chan_dev), device); 235 - if (atomic_dec_and_test(chan_dev->idr_ref)) { 236 - ida_free(&dma_ida, chan_dev->dev_id); 237 - kfree(chan_dev->idr_ref); 238 - } 239 235 kfree(chan_dev); 240 236 } 241 237 ··· 1039 1043 } 1040 1044 1041 1045 static int __dma_async_device_channel_register(struct dma_device *device, 1042 - struct dma_chan *chan, 1043 - int chan_id) 1046 + struct dma_chan *chan) 1044 1047 { 1045 1048 int rc = 0; 1046 - int chancnt = device->chancnt; 1047 - atomic_t *idr_ref; 1048 - struct dma_chan *tchan; 1049 - 1050 - tchan = list_first_entry_or_null(&device->channels, 1051 - struct dma_chan, device_node); 1052 - if (!tchan) 1053 - return -ENODEV; 1054 - 1055 - if (tchan->dev) { 1056 - idr_ref = tchan->dev->idr_ref; 1057 - } else { 1058 - idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); 1059 - if (!idr_ref) 1060 - return -ENOMEM; 1061 - atomic_set(idr_ref, 0); 1062 - } 1063 1049 1064 1050 chan->local = alloc_percpu(typeof(*chan->local)); 1065 1051 if (!chan->local) ··· 1057 1079 * When the chan_id is a negative value, we are dynamically adding 1058 1080 * the channel. Otherwise we are static enumerating. 1059 1081 */ 1060 - chan->chan_id = chan_id < 0 ? chancnt : chan_id; 1082 + mutex_lock(&device->chan_mutex); 1083 + chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL); 1084 + mutex_unlock(&device->chan_mutex); 1085 + if (chan->chan_id < 0) { 1086 + pr_err("%s: unable to alloc ida for chan: %d\n", 1087 + __func__, chan->chan_id); 1088 + goto err_out; 1089 + } 1090 + 1061 1091 chan->dev->device.class = &dma_devclass; 1062 1092 chan->dev->device.parent = device->dev; 1063 1093 chan->dev->chan = chan; 1064 - chan->dev->idr_ref = idr_ref; 1065 1094 chan->dev->dev_id = device->dev_id; 1066 - atomic_inc(idr_ref); 1067 1095 dev_set_name(&chan->dev->device, "dma%dchan%d", 1068 1096 device->dev_id, chan->chan_id); 1069 - 1070 1097 rc = device_register(&chan->dev->device); 1071 1098 if (rc) 1072 - goto err_out; 1099 + goto err_out_ida; 1073 1100 chan->client_count = 0; 1074 - device->chancnt = chan->chan_id + 1; 1101 + device->chancnt++; 1075 1102 1076 1103 return 0; 1077 1104 1105 + err_out_ida: 1106 + mutex_lock(&device->chan_mutex); 1107 + ida_free(&device->chan_ida, chan->chan_id); 1108 + mutex_unlock(&device->chan_mutex); 1078 1109 err_out: 1079 1110 free_percpu(chan->local); 1080 1111 kfree(chan->dev); 1081 - if (atomic_dec_return(idr_ref) == 0) 1082 - kfree(idr_ref); 1083 1112 return rc; 1084 1113 } 1085 1114 ··· 1095 1110 { 1096 1111 int rc; 1097 1112 1098 - rc = __dma_async_device_channel_register(device, chan, -1); 1113 + rc = __dma_async_device_channel_register(device, chan); 1099 1114 if (rc < 0) 1100 1115 return rc; 1101 1116 ··· 1115 1130 device->chancnt--; 1116 1131 chan->dev->chan = NULL; 1117 1132 mutex_unlock(&dma_list_mutex); 1133 + mutex_lock(&device->chan_mutex); 1134 + ida_free(&device->chan_ida, chan->chan_id); 1135 + mutex_unlock(&device->chan_mutex); 1118 1136 device_unregister(&chan->dev->device); 1119 1137 free_percpu(chan->local); 1120 1138 } ··· 1140 1152 */ 1141 1153 int dma_async_device_register(struct dma_device *device) 1142 1154 { 1143 - int rc, i = 0; 1155 + int rc; 1144 1156 struct dma_chan* chan; 1145 1157 1146 1158 if (!device) ··· 1245 1257 if (rc != 0) 1246 1258 return rc; 1247 1259 1260 + mutex_init(&device->chan_mutex); 1261 + ida_init(&device->chan_ida); 1262 + 1248 1263 /* represent channels in sysfs. Probably want devs too */ 1249 1264 list_for_each_entry(chan, &device->channels, device_node) { 1250 - rc = __dma_async_device_channel_register(device, chan, i++); 1265 + rc = __dma_async_device_channel_register(device, chan); 1251 1266 if (rc < 0) 1252 1267 goto err_out; 1253 1268 } ··· 1325 1334 */ 1326 1335 dma_cap_set(DMA_PRIVATE, device->cap_mask); 1327 1336 dma_channel_rebalance(); 1337 + ida_free(&dma_ida, device->dev_id); 1328 1338 dma_device_put(device); 1329 1339 mutex_unlock(&dma_list_mutex); 1330 1340 }
+3 -3
drivers/dma/dmatest.c
··· 240 240 struct dmatest_thread *thread; 241 241 242 242 list_for_each_entry(thread, &dtc->threads, node) { 243 - if (!thread->done) 243 + if (!thread->done && !thread->pending) 244 244 return true; 245 245 } 246 246 } ··· 662 662 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; 663 663 664 664 ktime = ktime_get(); 665 - while (!kthread_should_stop() 666 - && !(params->iterations && total_tests >= params->iterations)) { 665 + while (!(kthread_should_stop() || 666 + (params->iterations && total_tests >= params->iterations))) { 667 667 struct dma_async_tx_descriptor *tx = NULL; 668 668 struct dmaengine_unmap_data *um; 669 669 dma_addr_t *dsts;
+4 -1
drivers/dma/mmp_tdma.c
··· 363 363 gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, 364 364 size); 365 365 tdmac->desc_arr = NULL; 366 + if (tdmac->status == DMA_ERROR) 367 + tdmac->status = DMA_COMPLETE; 366 368 367 369 return; 368 370 } ··· 445 443 if (!desc) 446 444 goto err_out; 447 445 448 - mmp_tdma_config_write(chan, direction, &tdmac->slave_config); 446 + if (mmp_tdma_config_write(chan, direction, &tdmac->slave_config)) 447 + goto err_out; 449 448 450 449 while (buf < buf_len) { 451 450 desc = &tdmac->desc_arr[i];
+1 -1
drivers/dma/pch_dma.c
··· 865 865 } 866 866 867 867 pci_set_master(pdev); 868 + pd->dma.dev = &pdev->dev; 868 869 869 870 err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd); 870 871 if (err) { ··· 881 880 goto err_free_irq; 882 881 } 883 882 884 - pd->dma.dev = &pdev->dev; 885 883 886 884 INIT_LIST_HEAD(&pd->dma.channels); 887 885
+9
drivers/dma/tegra20-apb-dma.c
··· 816 816 static void tegra_dma_synchronize(struct dma_chan *dc) 817 817 { 818 818 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 819 + int err; 820 + 821 + err = pm_runtime_get_sync(tdc->tdma->dev); 822 + if (err < 0) { 823 + dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err); 824 + return; 825 + } 819 826 820 827 /* 821 828 * CPU, which handles interrupt, could be busy in ··· 832 825 wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc)); 833 826 834 827 tasklet_kill(&tdc->tasklet); 828 + 829 + pm_runtime_put(tdc->tdma->dev); 835 830 } 836 831 837 832 static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
+1
drivers/dma/ti/k3-psil.c
··· 27 27 soc_ep_map = &j721e_ep_map; 28 28 } else { 29 29 pr_err("PSIL: No compatible machine found for map\n"); 30 + mutex_unlock(&ep_map_mutex); 30 31 return ERR_PTR(-ENOTSUPP); 31 32 } 32 33 pr_debug("%s: Using map for %s\n", __func__, soc_ep_map->name);
+10 -10
drivers/dma/xilinx/xilinx_dma.c
··· 1230 1230 return ret; 1231 1231 1232 1232 spin_lock_irqsave(&chan->lock, flags); 1233 - 1234 - desc = list_last_entry(&chan->active_list, 1235 - struct xilinx_dma_tx_descriptor, node); 1236 - /* 1237 - * VDMA and simple mode do not support residue reporting, so the 1238 - * residue field will always be 0. 1239 - */ 1240 - if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA) 1241 - residue = xilinx_dma_get_residue(chan, desc); 1242 - 1233 + if (!list_empty(&chan->active_list)) { 1234 + desc = list_last_entry(&chan->active_list, 1235 + struct xilinx_dma_tx_descriptor, node); 1236 + /* 1237 + * VDMA and simple mode do not support residue reporting, so the 1238 + * residue field will always be 0. 1239 + */ 1240 + if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA) 1241 + residue = xilinx_dma_get_residue(chan, desc); 1242 + } 1243 1243 spin_unlock_irqrestore(&chan->lock, flags); 1244 1244 1245 1245 dma_set_residue(txstate, residue);
+2 -2
drivers/firmware/imx/Kconfig
··· 12 12 13 13 config IMX_SCU 14 14 bool "IMX SCU Protocol driver" 15 - depends on IMX_MBOX || COMPILE_TEST 15 + depends on IMX_MBOX 16 16 help 17 17 The System Controller Firmware (SCFW) is a low-level system function 18 18 which runs on a dedicated Cortex-M core to provide power, clock, and ··· 24 24 25 25 config IMX_SCU_PD 26 26 bool "IMX SCU Power Domain driver" 27 - depends on IMX_SCU || COMPILE_TEST 27 + depends on IMX_SCU 28 28 help 29 29 The System Controller Firmware (SCFW) based power domain driver.
+4 -2
drivers/fpga/dfl-pci.c
··· 248 248 return ret; 249 249 250 250 ret = pci_enable_sriov(pcidev, num_vfs); 251 - if (ret) 251 + if (ret) { 252 252 dfl_fpga_cdev_config_ports_pf(cdev); 253 + return ret; 254 + } 253 255 } 254 256 255 - return ret; 257 + return num_vfs; 256 258 } 257 259 258 260 static void cci_pci_remove(struct pci_dev *pcidev)
+2 -1
drivers/fpga/zynq-fpga.c
··· 583 583 584 584 priv->clk = devm_clk_get(dev, "ref_clk"); 585 585 if (IS_ERR(priv->clk)) { 586 - dev_err(dev, "input clock not found\n"); 586 + if (PTR_ERR(priv->clk) != -EPROBE_DEFER) 587 + dev_err(dev, "input clock not found\n"); 587 588 return PTR_ERR(priv->clk); 588 589 } 589 590
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 85 85 * - 3.34.0 - Non-DC can flip correctly between buffers with different pitches 86 86 * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask 87 87 * - 3.36.0 - Allow reading more status registers on si/cik 88 + * - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness 88 89 */ 89 90 #define KMS_DRIVER_MAJOR 3 90 - #define KMS_DRIVER_MINOR 36 91 + #define KMS_DRIVER_MINOR 37 91 92 #define KMS_DRIVER_PATCHLEVEL 0 92 93 93 94 int amdgpu_vram_limit = 0;
+16
drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h
··· 73 73 #define SDMA_OP_AQL_COPY 0 74 74 #define SDMA_OP_AQL_BARRIER_OR 0 75 75 76 + #define SDMA_GCR_RANGE_IS_PA (1 << 18) 77 + #define SDMA_GCR_SEQ(x) (((x) & 0x3) << 16) 78 + #define SDMA_GCR_GL2_WB (1 << 15) 79 + #define SDMA_GCR_GL2_INV (1 << 14) 80 + #define SDMA_GCR_GL2_DISCARD (1 << 13) 81 + #define SDMA_GCR_GL2_RANGE(x) (((x) & 0x3) << 11) 82 + #define SDMA_GCR_GL2_US (1 << 10) 83 + #define SDMA_GCR_GL1_INV (1 << 9) 84 + #define SDMA_GCR_GLV_INV (1 << 8) 85 + #define SDMA_GCR_GLK_INV (1 << 7) 86 + #define SDMA_GCR_GLK_WB (1 << 6) 87 + #define SDMA_GCR_GLM_INV (1 << 5) 88 + #define SDMA_GCR_GLM_WB (1 << 4) 89 + #define SDMA_GCR_GL1_RANGE(x) (((x) & 0x3) << 2) 90 + #define SDMA_GCR_GLI_INV(x) (((x) & 0x3) << 0) 91 + 76 92 /*define for op field*/ 77 93 #define SDMA_PKT_HEADER_op_offset 0 78 94 #define SDMA_PKT_HEADER_op_mask 0x000000FF
+13 -1
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
··· 382 382 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 383 383 uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid); 384 384 385 + /* Invalidate L2, because if we don't do it, we might get stale cache 386 + * lines from previous IBs. 387 + */ 388 + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ)); 389 + amdgpu_ring_write(ring, 0); 390 + amdgpu_ring_write(ring, (SDMA_GCR_GL2_INV | 391 + SDMA_GCR_GL2_WB | 392 + SDMA_GCR_GLM_INV | 393 + SDMA_GCR_GLM_WB) << 16); 394 + amdgpu_ring_write(ring, 0xffffff80); 395 + amdgpu_ring_write(ring, 0xffff); 396 + 385 397 /* An IB packet must end on a 8 DW boundary--the next dword 386 398 * must be on a 8-dword boundary. Our IB packet below is 6 387 399 * dwords long, thus add x number of NOPs, such that, in ··· 1607 1595 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + 1608 1596 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 + 1609 1597 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */ 1610 - .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */ 1598 + .emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */ 1611 1599 .emit_ib = sdma_v5_0_ring_emit_ib, 1612 1600 .emit_fence = sdma_v5_0_ring_emit_fence, 1613 1601 .emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync,
+29 -9
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 3340 3340 const union dc_tiling_info *tiling_info, 3341 3341 const uint64_t info, 3342 3342 struct dc_plane_dcc_param *dcc, 3343 - struct dc_plane_address *address) 3343 + struct dc_plane_address *address, 3344 + bool force_disable_dcc) 3344 3345 { 3345 3346 struct dc *dc = adev->dm.dc; 3346 3347 struct dc_dcc_surface_param input; ··· 3352 3351 3353 3352 memset(&input, 0, sizeof(input)); 3354 3353 memset(&output, 0, sizeof(output)); 3354 + 3355 + if (force_disable_dcc) 3356 + return 0; 3355 3357 3356 3358 if (!offset) 3357 3359 return 0; ··· 3405 3401 union dc_tiling_info *tiling_info, 3406 3402 struct plane_size *plane_size, 3407 3403 struct dc_plane_dcc_param *dcc, 3408 - struct dc_plane_address *address) 3404 + struct dc_plane_address *address, 3405 + bool force_disable_dcc) 3409 3406 { 3410 3407 const struct drm_framebuffer *fb = &afb->base; 3411 3408 int ret; ··· 3512 3507 3513 3508 ret = fill_plane_dcc_attributes(adev, afb, format, rotation, 3514 3509 plane_size, tiling_info, 3515 - tiling_flags, dcc, address); 3510 + tiling_flags, dcc, address, 3511 + force_disable_dcc); 3516 3512 if (ret) 3517 3513 return ret; 3518 3514 } ··· 3605 3599 const struct drm_plane_state *plane_state, 3606 3600 const uint64_t tiling_flags, 3607 3601 struct dc_plane_info *plane_info, 3608 - struct dc_plane_address *address) 3602 + struct dc_plane_address *address, 3603 + bool force_disable_dcc) 3609 3604 { 3610 3605 const struct drm_framebuffer *fb = plane_state->fb; 3611 3606 const struct amdgpu_framebuffer *afb = ··· 3688 3681 plane_info->rotation, tiling_flags, 3689 3682 &plane_info->tiling_info, 3690 3683 &plane_info->plane_size, 3691 - &plane_info->dcc, address); 3684 + &plane_info->dcc, address, 3685 + force_disable_dcc); 3692 3686 if (ret) 3693 3687 return ret; 3694 3688 ··· 3712 3704 struct dc_plane_info plane_info; 3713 3705 uint64_t tiling_flags; 3714 3706 int ret; 3707 + bool force_disable_dcc = false; 3715 3708 3716 3709 ret = fill_dc_scaling_info(plane_state, &scaling_info); 3717 3710 if (ret) ··· 3727 3718 if (ret) 3728 3719 return ret; 3729 3720 3721 + force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend; 3730 3722 ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags, 3731 3723 &plane_info, 3732 - &dc_plane_state->address); 3724 + &dc_plane_state->address, 3725 + force_disable_dcc); 3733 3726 if (ret) 3734 3727 return ret; 3735 3728 ··· 5353 5342 uint64_t tiling_flags; 5354 5343 uint32_t domain; 5355 5344 int r; 5345 + bool force_disable_dcc = false; 5356 5346 5357 5347 dm_plane_state_old = to_dm_plane_state(plane->state); 5358 5348 dm_plane_state_new = to_dm_plane_state(new_state); ··· 5412 5400 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { 5413 5401 struct dc_plane_state *plane_state = dm_plane_state_new->dc_state; 5414 5402 5403 + force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend; 5415 5404 fill_plane_buffer_attributes( 5416 5405 adev, afb, plane_state->format, plane_state->rotation, 5417 5406 tiling_flags, &plane_state->tiling_info, 5418 5407 &plane_state->plane_size, &plane_state->dcc, 5419 - &plane_state->address); 5408 + &plane_state->address, 5409 + force_disable_dcc); 5420 5410 } 5421 5411 5422 5412 return 0; ··· 6690 6676 fill_dc_plane_info_and_addr( 6691 6677 dm->adev, new_plane_state, tiling_flags, 6692 6678 &bundle->plane_infos[planes_count], 6693 - &bundle->flip_addrs[planes_count].address); 6679 + &bundle->flip_addrs[planes_count].address, 6680 + false); 6681 + 6682 + DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n", 6683 + new_plane_state->plane->index, 6684 + bundle->plane_infos[planes_count].dcc.enable); 6694 6685 6695 6686 bundle->surface_updates[planes_count].plane_info = 6696 6687 &bundle->plane_infos[planes_count]; ··· 8115 8096 ret = fill_dc_plane_info_and_addr( 8116 8097 dm->adev, new_plane_state, tiling_flags, 8117 8098 plane_info, 8118 - &flip_addr->address); 8099 + &flip_addr->address, 8100 + false); 8119 8101 if (ret) 8120 8102 goto cleanup; 8121 8103
+4 -36
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
··· 231 231 return dc_stream_get_status_from_state(dc->current_state, stream); 232 232 } 233 233 234 - static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc) 235 - { 236 - #if defined(CONFIG_DRM_AMD_DC_DCN) 237 - unsigned int vupdate_line; 238 - unsigned int lines_to_vupdate, us_to_vupdate, vpos, nvpos; 239 - struct dc_stream_state *stream = pipe_ctx->stream; 240 - unsigned int us_per_line; 241 - 242 - if (stream->ctx->asic_id.chip_family == FAMILY_RV && 243 - ASICREV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) { 244 - 245 - vupdate_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx); 246 - if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos)) 247 - return; 248 - 249 - if (vpos >= vupdate_line) 250 - return; 251 - 252 - us_per_line = stream->timing.h_total * 10000 / stream->timing.pix_clk_100hz; 253 - lines_to_vupdate = vupdate_line - vpos; 254 - us_to_vupdate = lines_to_vupdate * us_per_line; 255 - 256 - /* 70 us is a conservative estimate of cursor update time*/ 257 - if (us_to_vupdate < 70) 258 - udelay(us_to_vupdate); 259 - } 260 - #endif 261 - } 262 234 263 235 /** 264 236 * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address ··· 270 298 271 299 if (!pipe_to_program) { 272 300 pipe_to_program = pipe_ctx; 273 - 274 - delay_cursor_until_vupdate(pipe_ctx, dc); 275 - dc->hwss.pipe_control_lock(dc, pipe_to_program, true); 301 + dc->hwss.cursor_lock(dc, pipe_to_program, true); 276 302 } 277 303 278 304 dc->hwss.set_cursor_attribute(pipe_ctx); ··· 279 309 } 280 310 281 311 if (pipe_to_program) 282 - dc->hwss.pipe_control_lock(dc, pipe_to_program, false); 312 + dc->hwss.cursor_lock(dc, pipe_to_program, false); 283 313 284 314 return true; 285 315 } ··· 319 349 320 350 if (!pipe_to_program) { 321 351 pipe_to_program = pipe_ctx; 322 - 323 - delay_cursor_until_vupdate(pipe_ctx, dc); 324 - dc->hwss.pipe_control_lock(dc, pipe_to_program, true); 352 + dc->hwss.cursor_lock(dc, pipe_to_program, true); 325 353 } 326 354 327 355 dc->hwss.set_cursor_position(pipe_ctx); 328 356 } 329 357 330 358 if (pipe_to_program) 331 - dc->hwss.pipe_control_lock(dc, pipe_to_program, false); 359 + dc->hwss.cursor_lock(dc, pipe_to_program, false); 332 360 333 361 return true; 334 362 }
+1
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
··· 2757 2757 .disable_plane = dce110_power_down_fe, 2758 2758 .pipe_control_lock = dce_pipe_control_lock, 2759 2759 .interdependent_update_lock = NULL, 2760 + .cursor_lock = dce_pipe_control_lock, 2760 2761 .prepare_bandwidth = dce110_prepare_bandwidth, 2761 2762 .optimize_bandwidth = dce110_optimize_bandwidth, 2762 2763 .set_drr = set_drr,
+10
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
··· 1625 1625 hws->funcs.verify_allow_pstate_change_high(dc); 1626 1626 } 1627 1627 1628 + void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock) 1629 + { 1630 + /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */ 1631 + if (!pipe || pipe->top_pipe) 1632 + return; 1633 + 1634 + dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc, 1635 + pipe->stream_res.opp->inst, lock); 1636 + } 1637 + 1628 1638 static bool wait_for_reset_trigger_to_occur( 1629 1639 struct dc_context *dc_ctx, 1630 1640 struct timing_generator *tg)
+1
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
··· 49 49 struct dc *dc, 50 50 struct pipe_ctx *pipe, 51 51 bool lock); 52 + void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock); 52 53 void dcn10_blank_pixel_data( 53 54 struct dc *dc, 54 55 struct pipe_ctx *pipe_ctx,
+1
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
··· 50 50 .disable_audio_stream = dce110_disable_audio_stream, 51 51 .disable_plane = dcn10_disable_plane, 52 52 .pipe_control_lock = dcn10_pipe_control_lock, 53 + .cursor_lock = dcn10_cursor_lock, 53 54 .interdependent_update_lock = dcn10_lock_all_pipes, 54 55 .prepare_bandwidth = dcn10_prepare_bandwidth, 55 56 .optimize_bandwidth = dcn10_optimize_bandwidth,
+15
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
··· 223 223 REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, dpp_id); 224 224 REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, tree->opp_id); 225 225 226 + /* Configure VUPDATE lock set for this MPCC to map to the OPP */ 227 + REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, tree->opp_id); 228 + 226 229 /* update mpc tree mux setting */ 227 230 if (tree->opp_list == insert_above_mpcc) { 228 231 /* insert the toppest mpcc */ ··· 321 318 REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf); 322 319 REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); 323 320 REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf); 321 + REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf); 324 322 325 323 /* mark this mpcc as not in use */ 326 324 mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id); ··· 332 328 REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf); 333 329 REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); 334 330 REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf); 331 + REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf); 335 332 } 336 333 } 337 334 ··· 366 361 REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf); 367 362 REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); 368 363 REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf); 364 + REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf); 369 365 370 366 mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id); 371 367 } ··· 387 381 REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf); 388 382 REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); 389 383 REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf); 384 + REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf); 390 385 391 386 mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id); 392 387 ··· 460 453 MPCC_BUSY, &s->busy); 461 454 } 462 455 456 + void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock) 457 + { 458 + struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); 459 + 460 + REG_SET(CUR[opp_id], 0, CUR_VUPDATE_LOCK_SET, lock ? 1 : 0); 461 + } 462 + 463 463 static const struct mpc_funcs dcn10_mpc_funcs = { 464 464 .read_mpcc_state = mpc1_read_mpcc_state, 465 465 .insert_plane = mpc1_insert_plane, ··· 478 464 .assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect, 479 465 .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw, 480 466 .update_blending = mpc1_update_blending, 467 + .cursor_lock = mpc1_cursor_lock, 481 468 .set_denorm = NULL, 482 469 .set_denorm_clamp = NULL, 483 470 .set_output_csc = NULL,
+14 -6
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
··· 39 39 SRII(MPCC_BG_G_Y, MPCC, inst),\ 40 40 SRII(MPCC_BG_R_CR, MPCC, inst),\ 41 41 SRII(MPCC_BG_B_CB, MPCC, inst),\ 42 - SRII(MPCC_BG_B_CB, MPCC, inst),\ 43 - SRII(MPCC_SM_CONTROL, MPCC, inst) 42 + SRII(MPCC_SM_CONTROL, MPCC, inst),\ 43 + SRII(MPCC_UPDATE_LOCK_SEL, MPCC, inst) 44 44 45 45 #define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst) \ 46 - SRII(MUX, MPC_OUT, inst) 46 + SRII(MUX, MPC_OUT, inst),\ 47 + VUPDATE_SRII(CUR, VUPDATE_LOCK_SET, inst) 47 48 48 49 #define MPC_COMMON_REG_VARIABLE_LIST \ 49 50 uint32_t MPCC_TOP_SEL[MAX_MPCC]; \ ··· 56 55 uint32_t MPCC_BG_R_CR[MAX_MPCC]; \ 57 56 uint32_t MPCC_BG_B_CB[MAX_MPCC]; \ 58 57 uint32_t MPCC_SM_CONTROL[MAX_MPCC]; \ 59 - uint32_t MUX[MAX_OPP]; 58 + uint32_t MUX[MAX_OPP]; \ 59 + uint32_t MPCC_UPDATE_LOCK_SEL[MAX_MPCC]; \ 60 + uint32_t CUR[MAX_OPP]; 60 61 61 62 #define MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\ 62 63 SF(MPCC0_MPCC_TOP_SEL, MPCC_TOP_SEL, mask_sh),\ ··· 81 78 SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FIELD_ALT, mask_sh),\ 82 79 SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_FRAME_POL, mask_sh),\ 83 80 SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_TOP_POL, mask_sh),\ 84 - SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh) 81 + SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh),\ 82 + SF(MPCC0_MPCC_UPDATE_LOCK_SEL, MPCC_UPDATE_LOCK_SEL, mask_sh) 85 83 86 84 #define MPC_REG_FIELD_LIST(type) \ 87 85 type MPCC_TOP_SEL;\ ··· 105 101 type MPCC_SM_FIELD_ALT;\ 106 102 type MPCC_SM_FORCE_NEXT_FRAME_POL;\ 107 103 type MPCC_SM_FORCE_NEXT_TOP_POL;\ 108 - type MPC_OUT_MUX; 104 + type MPC_OUT_MUX;\ 105 + type MPCC_UPDATE_LOCK_SEL;\ 106 + type CUR_VUPDATE_LOCK_SET; 109 107 110 108 struct dcn_mpc_registers { 111 109 MPC_COMMON_REG_VARIABLE_LIST ··· 197 191 struct mpc *mpc, 198 192 int mpcc_inst, 199 193 struct mpcc_state *s); 194 + 195 + void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock); 200 196 201 197 #endif
+12 -2
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
··· 181 181 .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ 182 182 mm ## block ## id ## _ ## reg_name 183 183 184 + #define VUPDATE_SRII(reg_name, block, id)\ 185 + .reg_name[id] = BASE(mm ## reg_name ## 0 ## _ ## block ## id ## _BASE_IDX) + \ 186 + mm ## reg_name ## 0 ## _ ## block ## id 187 + 188 + /* set field/register/bitfield name */ 189 + #define SFRB(field_name, reg_name, bitfield, post_fix)\ 190 + .field_name = reg_name ## __ ## bitfield ## post_fix 191 + 184 192 /* NBIO */ 185 193 #define NBIO_BASE_INNER(seg) \ 186 194 NBIF_BASE__INST0_SEG ## seg ··· 427 419 }; 428 420 429 421 static const struct dcn_mpc_shift mpc_shift = { 430 - MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT) 422 + MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT),\ 423 + SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, __SHIFT) 431 424 }; 432 425 433 426 static const struct dcn_mpc_mask mpc_mask = { 434 - MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK), 427 + MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),\ 428 + SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, _MASK) 435 429 }; 436 430 437 431 #define tg_regs(id)\
+2 -1
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
··· 2294 2294 2295 2295 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); 2296 2296 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); 2297 - REG_WRITE(REFCLK_CNTL, 0); 2297 + if (REG(REFCLK_CNTL)) 2298 + REG_WRITE(REFCLK_CNTL, 0); 2298 2299 // 2299 2300 2300 2301
+1
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
··· 52 52 .disable_plane = dcn20_disable_plane, 53 53 .pipe_control_lock = dcn20_pipe_control_lock, 54 54 .interdependent_update_lock = dcn10_lock_all_pipes, 55 + .cursor_lock = dcn10_cursor_lock, 55 56 .prepare_bandwidth = dcn20_prepare_bandwidth, 56 57 .optimize_bandwidth = dcn20_optimize_bandwidth, 57 58 .update_bandwidth = dcn20_update_bandwidth,
+1
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
··· 545 545 .mpc_init = mpc1_mpc_init, 546 546 .mpc_init_single_inst = mpc1_mpc_init_single_inst, 547 547 .update_blending = mpc2_update_blending, 548 + .cursor_lock = mpc1_cursor_lock, 548 549 .get_mpcc_for_dpp = mpc2_get_mpcc_for_dpp, 549 550 .wait_for_idle = mpc2_assert_idle_mpcc, 550 551 .assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect,
+2 -1
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
··· 179 179 SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\ 180 180 SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\ 181 181 SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\ 182 - SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh) 182 + SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\ 183 + SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh) 183 184 184 185 /* 185 186 * DCN2 MPC_OCSC debug status register:
+4
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
··· 508 508 .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ 509 509 mm ## block ## id ## _ ## reg_name 510 510 511 + #define VUPDATE_SRII(reg_name, block, id)\ 512 + .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ 513 + mm ## reg_name ## _ ## block ## id 514 + 511 515 /* NBIO */ 512 516 #define NBIO_BASE_INNER(seg) \ 513 517 NBIO_BASE__INST0_SEG ## seg
+1
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
··· 53 53 .disable_plane = dcn20_disable_plane, 54 54 .pipe_control_lock = dcn20_pipe_control_lock, 55 55 .interdependent_update_lock = dcn10_lock_all_pipes, 56 + .cursor_lock = dcn10_cursor_lock, 56 57 .prepare_bandwidth = dcn20_prepare_bandwidth, 57 58 .optimize_bandwidth = dcn20_optimize_bandwidth, 58 59 .update_bandwidth = dcn20_update_bandwidth,
+32 -43
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
··· 284 284 .dram_channel_width_bytes = 4, 285 285 .fabric_datapath_to_dcn_data_return_bytes = 32, 286 286 .dcn_downspread_percent = 0.5, 287 - .downspread_percent = 0.5, 287 + .downspread_percent = 0.38, 288 288 .dram_page_open_time_ns = 50.0, 289 289 .dram_rw_turnaround_time_ns = 17.5, 290 290 .dram_return_buffer_per_channel_bytes = 8192, ··· 339 339 #define DCCG_SRII(reg_name, block, id)\ 340 340 .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ 341 341 mm ## block ## id ## _ ## reg_name 342 + 343 + #define VUPDATE_SRII(reg_name, block, id)\ 344 + .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \ 345 + mm ## reg_name ## _ ## block ## id 342 346 343 347 /* NBIO */ 344 348 #define NBIO_BASE_INNER(seg) \ ··· 1378 1374 { 1379 1375 struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool); 1380 1376 struct clk_limit_table *clk_table = &bw_params->clk_table; 1381 - unsigned int i, j, k; 1382 - int closest_clk_lvl; 1377 + struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES]; 1378 + unsigned int i, j, closest_clk_lvl; 1383 1379 1384 1380 // Default clock levels are used for diags, which may lead to overclocking. 1385 - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment)) { 1381 + if (!IS_DIAG_DC(dc->ctx->dce_environment)) { 1386 1382 dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator; 1387 1383 dcn2_1_ip.max_num_dpp = pool->base.pipe_count; 1388 1384 dcn2_1_soc.num_chans = bw_params->num_channels; 1389 1385 1390 - /* Vmin: leave lowest DCN clocks, override with dcfclk, fclk, memclk from fuse */ 1391 - dcn2_1_soc.clock_limits[0].state = 0; 1392 - dcn2_1_soc.clock_limits[0].dcfclk_mhz = clk_table->entries[0].dcfclk_mhz; 1393 - dcn2_1_soc.clock_limits[0].fabricclk_mhz = clk_table->entries[0].fclk_mhz; 1394 - dcn2_1_soc.clock_limits[0].socclk_mhz = clk_table->entries[0].socclk_mhz; 1395 - dcn2_1_soc.clock_limits[0].dram_speed_mts = clk_table->entries[0].memclk_mhz * 2; 1396 - 1397 - /* 1398 - * Other levels: find closest DCN clocks that fit the given clock limit using dcfclk 1399 - * as indicator 1400 - */ 1401 - 1402 - closest_clk_lvl = -1; 1403 - /* index currently being filled */ 1404 - k = 1; 1405 - for (i = 1; i < clk_table->num_entries; i++) { 1406 - /* loop backwards, skip duplicate state*/ 1407 - for (j = dcn2_1_soc.num_states - 1; j >= k; j--) { 1386 + ASSERT(clk_table->num_entries); 1387 + for (i = 0; i < clk_table->num_entries; i++) { 1388 + /* loop backwards*/ 1389 + for (closest_clk_lvl = 0, j = dcn2_1_soc.num_states - 1; j >= 0; j--) { 1408 1390 if ((unsigned int) dcn2_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { 1409 1391 closest_clk_lvl = j; 1410 1392 break; 1411 1393 } 1412 1394 } 1413 1395 1414 - /* if found a lvl that fits, use the DCN clks from it, if not, go to next clk limit*/ 1415 - if (closest_clk_lvl != -1) { 1416 - dcn2_1_soc.clock_limits[k].state = i; 1417 - dcn2_1_soc.clock_limits[k].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; 1418 - dcn2_1_soc.clock_limits[k].fabricclk_mhz = clk_table->entries[i].fclk_mhz; 1419 - dcn2_1_soc.clock_limits[k].socclk_mhz = clk_table->entries[i].socclk_mhz; 1420 - dcn2_1_soc.clock_limits[k].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2; 1396 + clock_limits[i].state = i; 1397 + clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; 1398 + clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; 1399 + clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz; 1400 + clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2; 1421 1401 1422 - dcn2_1_soc.clock_limits[k].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz; 1423 - dcn2_1_soc.clock_limits[k].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz; 1424 - dcn2_1_soc.clock_limits[k].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; 1425 - dcn2_1_soc.clock_limits[k].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz; 1426 - dcn2_1_soc.clock_limits[k].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; 1427 - dcn2_1_soc.clock_limits[k].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; 1428 - dcn2_1_soc.clock_limits[k].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz; 1429 - k++; 1430 - } 1402 + clock_limits[i].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz; 1403 + clock_limits[i].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz; 1404 + clock_limits[i].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps; 1405 + clock_limits[i].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz; 1406 + clock_limits[i].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz; 1407 + clock_limits[i].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz; 1408 + clock_limits[i].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz; 1431 1409 } 1432 - dcn2_1_soc.num_states = k; 1410 + for (i = 0; i < clk_table->num_entries; i++) 1411 + dcn2_1_soc.clock_limits[i] = clock_limits[i]; 1412 + if (clk_table->num_entries) { 1413 + dcn2_1_soc.num_states = clk_table->num_entries; 1414 + /* duplicate last level */ 1415 + dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1]; 1416 + dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states; 1417 + } 1433 1418 } 1434 - 1435 - /* duplicate last level */ 1436 - dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1]; 1437 - dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states; 1438 1419 1439 1420 dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21); 1440 1421 }
+16
drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
··· 210 210 struct mpcc_blnd_cfg *blnd_cfg, 211 211 int mpcc_id); 212 212 213 + /* 214 + * Lock cursor updates for the specified OPP. 215 + * OPP defines the set of MPCC that are locked together for cursor. 216 + * 217 + * Parameters: 218 + * [in] mpc - MPC context. 219 + * [in] opp_id - The OPP to lock cursor updates on 220 + * [in] lock - lock/unlock the OPP 221 + * 222 + * Return: void 223 + */ 224 + void (*cursor_lock)( 225 + struct mpc *mpc, 226 + int opp_id, 227 + bool lock); 228 + 213 229 struct mpcc* (*get_mpcc_for_dpp)( 214 230 struct mpc_tree *tree, 215 231 int dpp_id);
+1
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
··· 86 86 struct dc_state *context, bool lock); 87 87 void (*set_flip_control_gsl)(struct pipe_ctx *pipe_ctx, 88 88 bool flip_immediate); 89 + void (*cursor_lock)(struct dc *dc, struct pipe_ctx *pipe, bool lock); 89 90 90 91 /* Timing Related */ 91 92 void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes,
+5 -4
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
··· 1435 1435 if (!hwmgr) 1436 1436 return -EINVAL; 1437 1437 1438 - if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_capability) 1438 + if (!(hwmgr->not_vf && amdgpu_dpm) || 1439 + !hwmgr->hwmgr_func->get_asic_baco_capability) 1439 1440 return 0; 1440 1441 1441 1442 mutex_lock(&hwmgr->smu_lock); ··· 1453 1452 if (!hwmgr) 1454 1453 return -EINVAL; 1455 1454 1456 - if (!(hwmgr->not_vf && amdgpu_dpm) || 1457 - !hwmgr->hwmgr_func->get_asic_baco_state) 1455 + if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state) 1458 1456 return 0; 1459 1457 1460 1458 mutex_lock(&hwmgr->smu_lock); ··· 1470 1470 if (!hwmgr) 1471 1471 return -EINVAL; 1472 1472 1473 - if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_asic_baco_state) 1473 + if (!(hwmgr->not_vf && amdgpu_dpm) || 1474 + !hwmgr->hwmgr_func->set_asic_baco_state) 1474 1475 return 0; 1475 1476 1476 1477 mutex_lock(&hwmgr->smu_lock);
+6 -2
drivers/gpu/drm/drm_dp_mst_topology.c
··· 3442 3442 drm_dp_queue_down_tx(mgr, txmsg); 3443 3443 3444 3444 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 3445 - if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) 3446 - ret = -EIO; 3445 + if (ret > 0) { 3446 + if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) 3447 + ret = -EIO; 3448 + else 3449 + ret = size; 3450 + } 3447 3451 3448 3452 kfree(txmsg); 3449 3453 fail_put:
+1 -1
drivers/gpu/drm/drm_edid.c
··· 5111 5111 struct drm_display_mode *mode; 5112 5112 unsigned pixel_clock = (timings->pixel_clock[0] | 5113 5113 (timings->pixel_clock[1] << 8) | 5114 - (timings->pixel_clock[2] << 16)); 5114 + (timings->pixel_clock[2] << 16)) + 1; 5115 5115 unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1; 5116 5116 unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1; 5117 5117 unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1;
+20 -4
drivers/gpu/drm/i915/gem/i915_gem_tiling.c
··· 182 182 int tiling_mode, unsigned int stride) 183 183 { 184 184 struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt; 185 - struct i915_vma *vma; 185 + struct i915_vma *vma, *vn; 186 + LIST_HEAD(unbind); 186 187 int ret = 0; 187 188 188 189 if (tiling_mode == I915_TILING_NONE) 189 190 return 0; 190 191 191 192 mutex_lock(&ggtt->vm.mutex); 193 + 194 + spin_lock(&obj->vma.lock); 192 195 for_each_ggtt_vma(vma, obj) { 196 + GEM_BUG_ON(vma->vm != &ggtt->vm); 197 + 193 198 if (i915_vma_fence_prepare(vma, tiling_mode, stride)) 194 199 continue; 195 200 196 - ret = __i915_vma_unbind(vma); 197 - if (ret) 198 - break; 201 + list_move(&vma->vm_link, &unbind); 199 202 } 203 + spin_unlock(&obj->vma.lock); 204 + 205 + list_for_each_entry_safe(vma, vn, &unbind, vm_link) { 206 + ret = __i915_vma_unbind(vma); 207 + if (ret) { 208 + /* Restore the remaining vma on an error */ 209 + list_splice(&unbind, &ggtt->vm.bound_list); 210 + break; 211 + } 212 + } 213 + 200 214 mutex_unlock(&ggtt->vm.mutex); 201 215 202 216 return ret; ··· 282 268 } 283 269 mutex_unlock(&obj->mm.lock); 284 270 271 + spin_lock(&obj->vma.lock); 285 272 for_each_ggtt_vma(vma, obj) { 286 273 vma->fence_size = 287 274 i915_gem_fence_size(i915, vma->size, tiling, stride); ··· 293 278 if (vma->fence) 294 279 vma->fence->dirty = true; 295 280 } 281 + spin_unlock(&obj->vma.lock); 296 282 297 283 obj->tiling_and_stride = tiling | stride; 298 284 i915_gem_object_unlock(obj);
+8 -4
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
··· 1477 1477 unsigned int page_size = BIT(first); 1478 1478 1479 1479 obj = i915_gem_object_create_internal(dev_priv, page_size); 1480 - if (IS_ERR(obj)) 1481 - return PTR_ERR(obj); 1480 + if (IS_ERR(obj)) { 1481 + err = PTR_ERR(obj); 1482 + goto out_vm; 1483 + } 1482 1484 1483 1485 vma = i915_vma_instance(obj, vm, NULL); 1484 1486 if (IS_ERR(vma)) { ··· 1533 1531 } 1534 1532 1535 1533 obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE); 1536 - if (IS_ERR(obj)) 1537 - return PTR_ERR(obj); 1534 + if (IS_ERR(obj)) { 1535 + err = PTR_ERR(obj); 1536 + goto out_vm; 1537 + } 1538 1538 1539 1539 vma = i915_vma_instance(obj, vm, NULL); 1540 1540 if (IS_ERR(vma)) {
+2
drivers/gpu/drm/i915/gt/intel_timeline.c
··· 521 521 522 522 rcu_read_lock(); 523 523 cl = rcu_dereference(from->hwsp_cacheline); 524 + if (i915_request_completed(from)) /* confirm cacheline is valid */ 525 + goto unlock; 524 526 if (unlikely(!i915_active_acquire_if_busy(&cl->active))) 525 527 goto unlock; /* seqno wrapped and completed! */ 526 528 if (unlikely(i915_request_completed(from)))
+2 -4
drivers/gpu/drm/i915/i915_irq.c
··· 3358 3358 { 3359 3359 struct intel_uncore *uncore = &dev_priv->uncore; 3360 3360 3361 - u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3361 + u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) | 3362 + GEN8_PIPE_CDCLK_CRC_DONE; 3362 3363 u32 de_pipe_enables; 3363 3364 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 3364 3365 u32 de_port_enables; ··· 3370 3369 de_misc_masked |= GEN8_DE_MISC_GSE; 3371 3370 3372 3371 if (INTEL_GEN(dev_priv) >= 9) { 3373 - de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3374 3372 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3375 3373 GEN9_AUX_CHANNEL_D; 3376 3374 if (IS_GEN9_LP(dev_priv)) 3377 3375 de_port_masked |= BXT_DE_PORT_GMBUS; 3378 - } else { 3379 - de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3380 3376 } 3381 3377 3382 3378 if (INTEL_GEN(dev_priv) >= 11)
+6 -4
drivers/gpu/drm/i915/i915_vma.c
··· 158 158 159 159 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE)); 160 160 161 + spin_lock(&obj->vma.lock); 162 + 161 163 if (i915_is_ggtt(vm)) { 162 164 if (unlikely(overflows_type(vma->size, u32))) 163 - goto err_vma; 165 + goto err_unlock; 164 166 165 167 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size, 166 168 i915_gem_object_get_tiling(obj), 167 169 i915_gem_object_get_stride(obj)); 168 170 if (unlikely(vma->fence_size < vma->size || /* overflow */ 169 171 vma->fence_size > vm->total)) 170 - goto err_vma; 172 + goto err_unlock; 171 173 172 174 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT)); 173 175 ··· 180 178 181 179 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); 182 180 } 183 - 184 - spin_lock(&obj->vma.lock); 185 181 186 182 rb = NULL; 187 183 p = &obj->vma.tree.rb_node; ··· 225 225 226 226 return vma; 227 227 228 + err_unlock: 229 + spin_unlock(&obj->vma.lock); 228 230 err_vma: 229 231 i915_vma_free(vma); 230 232 return ERR_PTR(-E2BIG);
+5 -5
drivers/gpu/drm/qxl/qxl_cmd.c
··· 480 480 return ret; 481 481 482 482 ret = qxl_release_reserve_list(release, true); 483 - if (ret) 483 + if (ret) { 484 + qxl_release_free(qdev, release); 484 485 return ret; 485 - 486 + } 486 487 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); 487 488 cmd->type = QXL_SURFACE_CMD_CREATE; 488 489 cmd->flags = QXL_SURF_FLAG_KEEP_DATA; ··· 500 499 /* no need to add a release to the fence for this surface bo, 501 500 since it is only released when we ask to destroy the surface 502 501 and it would never signal otherwise */ 503 - qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); 504 502 qxl_release_fence_buffer_objects(release); 503 + qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); 505 504 506 505 surf->hw_surf_alloc = true; 507 506 spin_lock(&qdev->surf_id_idr_lock); ··· 543 542 cmd->surface_id = id; 544 543 qxl_release_unmap(qdev, release, &cmd->release_info); 545 544 546 - qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); 547 - 548 545 qxl_release_fence_buffer_objects(release); 546 + qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); 549 547 550 548 return 0; 551 549 }
+3 -3
drivers/gpu/drm/qxl/qxl_display.c
··· 510 510 cmd->u.set.visible = 1; 511 511 qxl_release_unmap(qdev, release, &cmd->release_info); 512 512 513 - qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 514 513 qxl_release_fence_buffer_objects(release); 514 + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 515 515 516 516 return ret; 517 517 ··· 652 652 cmd->u.position.y = plane->state->crtc_y + fb->hot_y; 653 653 654 654 qxl_release_unmap(qdev, release, &cmd->release_info); 655 - qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 656 655 qxl_release_fence_buffer_objects(release); 656 + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 657 657 658 658 if (old_cursor_bo != NULL) 659 659 qxl_bo_unpin(old_cursor_bo); ··· 700 700 cmd->type = QXL_CURSOR_HIDE; 701 701 qxl_release_unmap(qdev, release, &cmd->release_info); 702 702 703 - qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 704 703 qxl_release_fence_buffer_objects(release); 704 + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 705 705 } 706 706 707 707 static void qxl_update_dumb_head(struct qxl_device *qdev,
+4 -3
drivers/gpu/drm/qxl/qxl_draw.c
··· 209 209 goto out_release_backoff; 210 210 211 211 rects = drawable_set_clipping(qdev, num_clips, clips_bo); 212 - if (!rects) 212 + if (!rects) { 213 + ret = -EINVAL; 213 214 goto out_release_backoff; 214 - 215 + } 215 216 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); 216 217 217 218 drawable->clip.type = SPICE_CLIP_TYPE_RECTS; ··· 243 242 } 244 243 qxl_bo_kunmap(clips_bo); 245 244 246 - qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); 247 245 qxl_release_fence_buffer_objects(release); 246 + qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); 248 247 249 248 out_release_backoff: 250 249 if (ret)
+2 -1
drivers/gpu/drm/qxl/qxl_image.c
··· 212 212 break; 213 213 default: 214 214 DRM_ERROR("unsupported image bit depth\n"); 215 - return -EINVAL; /* TODO: cleanup */ 215 + qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); 216 + return -EINVAL; 216 217 } 217 218 image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN; 218 219 image->u.bitmap.x = width;
+1 -4
drivers/gpu/drm/qxl/qxl_ioctl.c
··· 261 261 apply_surf_reloc(qdev, &reloc_info[i]); 262 262 } 263 263 264 + qxl_release_fence_buffer_objects(release); 264 265 ret = qxl_push_command_ring_release(qdev, release, cmd->type, true); 265 - if (ret) 266 - qxl_release_backoff_reserve_list(release); 267 - else 268 - qxl_release_fence_buffer_objects(release); 269 266 270 267 out_free_bos: 271 268 out_free_release:
+6 -11
drivers/gpu/drm/virtio/virtgpu_kms.c
··· 53 53 events_clear, &events_clear); 54 54 } 55 55 56 - static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev, 57 - uint32_t ctx_id) 58 - { 59 - virtio_gpu_cmd_context_destroy(vgdev, ctx_id); 60 - virtio_gpu_notify(vgdev); 61 - ida_free(&vgdev->ctx_id_ida, ctx_id - 1); 62 - } 63 - 64 56 static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq, 65 57 void (*work_func)(struct work_struct *work)) 66 58 { ··· 267 275 void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file) 268 276 { 269 277 struct virtio_gpu_device *vgdev = dev->dev_private; 270 - struct virtio_gpu_fpriv *vfpriv; 278 + struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 271 279 272 280 if (!vgdev->has_virgl_3d) 273 281 return; 274 282 275 - vfpriv = file->driver_priv; 283 + if (vfpriv->context_created) { 284 + virtio_gpu_cmd_context_destroy(vgdev, vfpriv->ctx_id); 285 + virtio_gpu_notify(vgdev); 286 + } 276 287 277 - virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id); 288 + ida_free(&vgdev->ctx_id_ida, vfpriv->ctx_id - 1); 278 289 mutex_destroy(&vfpriv->context_lock); 279 290 kfree(vfpriv); 280 291 file->driver_priv = NULL;
+1
drivers/hid/Kconfig
··· 1155 1155 config HID_MCP2221 1156 1156 tristate "Microchip MCP2221 HID USB-to-I2C/SMbus host support" 1157 1157 depends on USB_HID && I2C 1158 + depends on GPIOLIB 1158 1159 ---help--- 1159 1160 Provides I2C and SMBUS host adapter functionality over USB-HID 1160 1161 through MCP2221 device.
+1
drivers/hid/hid-alps.c
··· 802 802 break; 803 803 case HID_DEVICE_ID_ALPS_U1_DUAL: 804 804 case HID_DEVICE_ID_ALPS_U1: 805 + case HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY: 805 806 data->dev_type = U1; 806 807 break; 807 808 default:
+7 -1
drivers/hid/hid-ids.h
··· 79 79 #define HID_DEVICE_ID_ALPS_U1_DUAL_PTP 0x121F 80 80 #define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP 0x1220 81 81 #define HID_DEVICE_ID_ALPS_U1 0x1215 82 + #define HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY 0x121E 82 83 #define HID_DEVICE_ID_ALPS_T4_BTNLESS 0x120C 83 84 #define HID_DEVICE_ID_ALPS_1222 0x1222 84 - 85 85 86 86 #define USB_VENDOR_ID_AMI 0x046b 87 87 #define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10 ··· 385 385 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7349 0x7349 386 386 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7 387 387 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001 388 + #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002 0xc002 388 389 389 390 #define USB_VENDOR_ID_ELAN 0x04f3 390 391 #define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401 ··· 760 759 #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2 0xc218 761 760 #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2 0xc219 762 761 #define USB_DEVICE_ID_LOGITECH_G15_LCD 0xc222 762 + #define USB_DEVICE_ID_LOGITECH_G11 0xc225 763 763 #define USB_DEVICE_ID_LOGITECH_G15_V2_LCD 0xc227 764 764 #define USB_DEVICE_ID_LOGITECH_G510 0xc22d 765 765 #define USB_DEVICE_ID_LOGITECH_G510_USB_AUDIO 0xc22e ··· 1099 1097 #define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300 1100 1098 #define USB_DEVICE_ID_SYMBOL_SCANNER_3 0x1200 1101 1099 1100 + #define I2C_VENDOR_ID_SYNAPTICS 0x06cb 1101 + #define I2C_PRODUCT_ID_SYNAPTICS_SYNA2393 0x7a13 1102 + 1102 1103 #define USB_VENDOR_ID_SYNAPTICS 0x06cb 1103 1104 #define USB_DEVICE_ID_SYNAPTICS_TP 0x0001 1104 1105 #define USB_DEVICE_ID_SYNAPTICS_INT_TP 0x0002 ··· 1116 1111 #define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10 1117 1112 #define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 1118 1113 #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 1114 + #define USB_DEVICE_ID_SYNAPTICS_DELL_K12A 0x2819 1119 1115 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968 1120 1116 #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 1121 1117 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7
+4
drivers/hid/hid-lg-g15.c
··· 872 872 } 873 873 874 874 static const struct hid_device_id lg_g15_devices[] = { 875 + /* The G11 is a G15 without the LCD, treat it as a G15 */ 876 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 877 + USB_DEVICE_ID_LOGITECH_G11), 878 + .driver_data = LG_G15 }, 875 879 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 876 880 USB_DEVICE_ID_LOGITECH_G15_LCD), 877 881 .driver_data = LG_G15 },
+3
drivers/hid/hid-multitouch.c
··· 1922 1922 { .driver_data = MT_CLS_EGALAX_SERIAL, 1923 1923 MT_USB_DEVICE(USB_VENDOR_ID_DWAV, 1924 1924 USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) }, 1925 + { .driver_data = MT_CLS_EGALAX, 1926 + MT_USB_DEVICE(USB_VENDOR_ID_DWAV, 1927 + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) }, 1925 1928 1926 1929 /* Elitegroup panel */ 1927 1930 { .driver_data = MT_CLS_SERIAL,
+1
drivers/hid/hid-quirks.c
··· 163 163 { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2), HID_QUIRK_NO_INIT_REPORTS }, 164 164 { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD), HID_QUIRK_NO_INIT_REPORTS }, 165 165 { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103), HID_QUIRK_NO_INIT_REPORTS }, 166 + { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K12A), HID_QUIRK_NO_INIT_REPORTS }, 166 167 { HID_USB_DEVICE(USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD), HID_QUIRK_BADPAD }, 167 168 { HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT }, 168 169 { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET },
+2
drivers/hid/i2c-hid/i2c-hid-core.c
··· 177 177 I2C_HID_QUIRK_BOGUS_IRQ }, 178 178 { USB_VENDOR_ID_ALPS_JP, HID_ANY_ID, 179 179 I2C_HID_QUIRK_RESET_ON_RESUME }, 180 + { I2C_VENDOR_ID_SYNAPTICS, I2C_PRODUCT_ID_SYNAPTICS_SYNA2393, 181 + I2C_HID_QUIRK_RESET_ON_RESUME }, 180 182 { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720, 181 183 I2C_HID_QUIRK_BAD_INPUT_SIZE }, 182 184 { 0, 0 }
+29 -8
drivers/hid/usbhid/hid-core.c
··· 682 682 struct usbhid_device *usbhid = hid->driver_data; 683 683 int res; 684 684 685 + mutex_lock(&usbhid->mutex); 686 + 685 687 set_bit(HID_OPENED, &usbhid->iofl); 686 688 687 - if (hid->quirks & HID_QUIRK_ALWAYS_POLL) 688 - return 0; 689 + if (hid->quirks & HID_QUIRK_ALWAYS_POLL) { 690 + res = 0; 691 + goto Done; 692 + } 689 693 690 694 res = usb_autopm_get_interface(usbhid->intf); 691 695 /* the device must be awake to reliably request remote wakeup */ 692 696 if (res < 0) { 693 697 clear_bit(HID_OPENED, &usbhid->iofl); 694 - return -EIO; 698 + res = -EIO; 699 + goto Done; 695 700 } 696 701 697 702 usbhid->intf->needs_remote_wakeup = 1; ··· 730 725 msleep(50); 731 726 732 727 clear_bit(HID_RESUME_RUNNING, &usbhid->iofl); 728 + 729 + Done: 730 + mutex_unlock(&usbhid->mutex); 733 731 return res; 734 732 } 735 733 736 734 static void usbhid_close(struct hid_device *hid) 737 735 { 738 736 struct usbhid_device *usbhid = hid->driver_data; 737 + 738 + mutex_lock(&usbhid->mutex); 739 739 740 740 /* 741 741 * Make sure we don't restart data acquisition due to ··· 753 743 clear_bit(HID_IN_POLLING, &usbhid->iofl); 754 744 spin_unlock_irq(&usbhid->lock); 755 745 756 - if (hid->quirks & HID_QUIRK_ALWAYS_POLL) 757 - return; 746 + if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) { 747 + hid_cancel_delayed_stuff(usbhid); 748 + usb_kill_urb(usbhid->urbin); 749 + usbhid->intf->needs_remote_wakeup = 0; 750 + } 758 751 759 - hid_cancel_delayed_stuff(usbhid); 760 - usb_kill_urb(usbhid->urbin); 761 - usbhid->intf->needs_remote_wakeup = 0; 752 + mutex_unlock(&usbhid->mutex); 762 753 } 763 754 764 755 /* ··· 1068 1057 unsigned int n, insize = 0; 1069 1058 int ret; 1070 1059 1060 + mutex_lock(&usbhid->mutex); 1061 + 1071 1062 clear_bit(HID_DISCONNECTED, &usbhid->iofl); 1072 1063 1073 1064 usbhid->bufsize = HID_MIN_BUFFER_SIZE; ··· 1190 1177 usbhid_set_leds(hid); 1191 1178 device_set_wakeup_enable(&dev->dev, 1); 1192 1179 } 1180 + 1181 + mutex_unlock(&usbhid->mutex); 1193 1182 return 0; 1194 1183 1195 1184 fail: ··· 1202 1187 usbhid->urbout = NULL; 1203 1188 usbhid->urbctrl = NULL; 1204 1189 hid_free_buffers(dev, hid); 1190 + mutex_unlock(&usbhid->mutex); 1205 1191 return ret; 1206 1192 } 1207 1193 ··· 1217 1201 clear_bit(HID_IN_POLLING, &usbhid->iofl); 1218 1202 usbhid->intf->needs_remote_wakeup = 0; 1219 1203 } 1204 + 1205 + mutex_lock(&usbhid->mutex); 1220 1206 1221 1207 clear_bit(HID_STARTED, &usbhid->iofl); 1222 1208 spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */ ··· 1240 1222 usbhid->urbout = NULL; 1241 1223 1242 1224 hid_free_buffers(hid_to_usb_dev(hid), hid); 1225 + 1226 + mutex_unlock(&usbhid->mutex); 1243 1227 } 1244 1228 1245 1229 static int usbhid_power(struct hid_device *hid, int lvl) ··· 1402 1382 INIT_WORK(&usbhid->reset_work, hid_reset); 1403 1383 timer_setup(&usbhid->io_retry, hid_retry_timeout, 0); 1404 1384 spin_lock_init(&usbhid->lock); 1385 + mutex_init(&usbhid->mutex); 1405 1386 1406 1387 ret = hid_add_device(hid); 1407 1388 if (ret) {
+1
drivers/hid/usbhid/usbhid.h
··· 80 80 dma_addr_t outbuf_dma; /* Output buffer dma */ 81 81 unsigned long last_out; /* record of last output for timeouts */ 82 82 83 + struct mutex mutex; /* start/stop/open/close */ 83 84 spinlock_t lock; /* fifo spinlock */ 84 85 unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */ 85 86 struct timer_list io_retry; /* Retry timer */
+3 -1
drivers/hid/wacom_sys.c
··· 319 319 data[0] = field->report->id; 320 320 ret = wacom_get_report(hdev, HID_FEATURE_REPORT, 321 321 data, n, WAC_CMD_RETRIES); 322 - if (ret == n) { 322 + if (ret == n && features->type == HID_GENERIC) { 323 323 ret = hid_report_raw_event(hdev, 324 324 HID_FEATURE_REPORT, data, n, 0); 325 + } else if (ret == 2 && features->type != HID_GENERIC) { 326 + features->touch_max = data[1]; 325 327 } else { 326 328 features->touch_max = 16; 327 329 hid_warn(hdev, "wacom_feature_mapping: "
+22 -66
drivers/hid/wacom_wac.c
··· 1427 1427 { 1428 1428 struct input_dev *pad_input = wacom->pad_input; 1429 1429 unsigned char *data = wacom->data; 1430 + int nbuttons = wacom->features.numbered_buttons; 1430 1431 1431 - int buttons = data[282] | ((data[281] & 0x40) << 2); 1432 + int expresskeys = data[282]; 1433 + int center = (data[281] & 0x40) >> 6; 1432 1434 int ring = data[285] & 0x7F; 1433 1435 bool ringstatus = data[285] & 0x80; 1434 - bool prox = buttons || ringstatus; 1436 + bool prox = expresskeys || center || ringstatus; 1435 1437 1436 1438 /* Fix touchring data: userspace expects 0 at left and increasing clockwise */ 1437 1439 ring = 71 - ring; ··· 1441 1439 if (ring > 71) 1442 1440 ring -= 72; 1443 1441 1444 - wacom_report_numbered_buttons(pad_input, 9, buttons); 1442 + wacom_report_numbered_buttons(pad_input, nbuttons, 1443 + expresskeys | (center << (nbuttons - 1))); 1445 1444 1446 1445 input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0); 1447 1446 ··· 2640 2637 case HID_DG_TIPSWITCH: 2641 2638 hid_data->last_slot_field = equivalent_usage; 2642 2639 break; 2640 + case HID_DG_CONTACTCOUNT: 2641 + hid_data->cc_report = report->id; 2642 + hid_data->cc_index = i; 2643 + hid_data->cc_value_index = j; 2644 + break; 2643 2645 } 2644 2646 } 2647 + } 2648 + 2649 + if (hid_data->cc_report != 0 && 2650 + hid_data->cc_index >= 0) { 2651 + struct hid_field *field = report->field[hid_data->cc_index]; 2652 + int value = field->value[hid_data->cc_value_index]; 2653 + if (value) 2654 + hid_data->num_expected = value; 2655 + } 2656 + else { 2657 + hid_data->num_expected = wacom_wac->features.touch_max; 2645 2658 } 2646 2659 } 2647 2660 ··· 2668 2649 struct wacom_wac *wacom_wac = &wacom->wacom_wac; 2669 2650 struct input_dev *input = wacom_wac->touch_input; 2670 2651 unsigned touch_max = wacom_wac->features.touch_max; 2671 - struct hid_data *hid_data = &wacom_wac->hid_data; 2672 2652 2673 2653 /* If more packets of data are expected, give us a chance to 2674 2654 * process them rather than immediately syncing a partial ··· 2681 2663 2682 2664 input_sync(input); 2683 2665 wacom_wac->hid_data.num_received = 0; 2684 - hid_data->num_expected = 0; 2685 2666 2686 2667 /* keep touch state for pen event */ 2687 2668 wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(wacom_wac); ··· 2755 2738 } 2756 2739 } 2757 2740 2758 - static void wacom_set_num_expected(struct hid_device *hdev, 2759 - struct hid_report *report, 2760 - int collection_index, 2761 - struct hid_field *field, 2762 - int field_index) 2763 - { 2764 - struct wacom *wacom = hid_get_drvdata(hdev); 2765 - struct wacom_wac *wacom_wac = &wacom->wacom_wac; 2766 - struct hid_data *hid_data = &wacom_wac->hid_data; 2767 - unsigned int original_collection_level = 2768 - hdev->collection[collection_index].level; 2769 - bool end_collection = false; 2770 - int i; 2771 - 2772 - if (hid_data->num_expected) 2773 - return; 2774 - 2775 - // find the contact count value for this segment 2776 - for (i = field_index; i < report->maxfield && !end_collection; i++) { 2777 - struct hid_field *field = report->field[i]; 2778 - unsigned int field_level = 2779 - hdev->collection[field->usage[0].collection_index].level; 2780 - unsigned int j; 2781 - 2782 - if (field_level != original_collection_level) 2783 - continue; 2784 - 2785 - for (j = 0; j < field->maxusage; j++) { 2786 - struct hid_usage *usage = &field->usage[j]; 2787 - 2788 - if (usage->collection_index != collection_index) { 2789 - end_collection = true; 2790 - break; 2791 - } 2792 - if (wacom_equivalent_usage(usage->hid) == HID_DG_CONTACTCOUNT) { 2793 - hid_data->cc_report = report->id; 2794 - hid_data->cc_index = i; 2795 - hid_data->cc_value_index = j; 2796 - 2797 - if (hid_data->cc_report != 0 && 2798 - hid_data->cc_index >= 0) { 2799 - 2800 - struct hid_field *field = 2801 - report->field[hid_data->cc_index]; 2802 - int value = 2803 - field->value[hid_data->cc_value_index]; 2804 - 2805 - if (value) 2806 - hid_data->num_expected = value; 2807 - } 2808 - } 2809 - } 2810 - } 2811 - 2812 - if (hid_data->cc_report == 0 || hid_data->cc_index < 0) 2813 - hid_data->num_expected = wacom_wac->features.touch_max; 2814 - } 2815 - 2816 2741 static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *report, 2817 2742 int collection_index, struct hid_field *field, 2818 2743 int field_index) 2819 2744 { 2820 2745 struct wacom *wacom = hid_get_drvdata(hdev); 2821 2746 2822 - if (WACOM_FINGER_FIELD(field)) 2823 - wacom_set_num_expected(hdev, report, collection_index, field, 2824 - field_index); 2825 2747 wacom_report_events(hdev, report, collection_index, field_index); 2826 2748 2827 2749 /*
+1 -5
drivers/hv/hv.c
··· 184 184 185 185 shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR; 186 186 shared_sint.masked = false; 187 - if (ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED) 188 - shared_sint.auto_eoi = false; 189 - else 190 - shared_sint.auto_eoi = true; 191 - 187 + shared_sint.auto_eoi = hv_recommend_using_aeoi(); 192 188 hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64); 193 189 194 190 /* Enable the global synic bit */
+2 -2
drivers/hv/hv_trace.h
··· 286 286 __field(int, ret) 287 287 ), 288 288 TP_fast_assign( 289 - memcpy(__entry->guest_id, &msg->guest_endpoint_id.b, 16); 290 - memcpy(__entry->host_id, &msg->host_service_id.b, 16); 289 + export_guid(__entry->guest_id, &msg->guest_endpoint_id); 290 + export_guid(__entry->host_id, &msg->host_service_id); 291 291 __entry->ret = ret; 292 292 ), 293 293 TP_printk("sending guest_endpoint_id %pUl, host_service_id %pUl, "
+34 -9
drivers/hv/vmbus_drv.c
··· 978 978 979 979 return drv->resume(dev); 980 980 } 981 + #else 982 + #define vmbus_suspend NULL 983 + #define vmbus_resume NULL 981 984 #endif /* CONFIG_PM_SLEEP */ 982 985 983 986 /* ··· 1000 997 } 1001 998 1002 999 /* 1003 - * Note: we must use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS rather than 1004 - * SET_SYSTEM_SLEEP_PM_OPS: see the comment before vmbus_bus_pm. 1000 + * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm. 1001 + * 1002 + * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we 1003 + * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there 1004 + * is no way to wake up a Generation-2 VM. 1005 + * 1006 + * The other 4 ops are for hibernation. 1005 1007 */ 1008 + 1006 1009 static const struct dev_pm_ops vmbus_pm = { 1007 - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(vmbus_suspend, vmbus_resume) 1010 + .suspend_noirq = NULL, 1011 + .resume_noirq = NULL, 1012 + .freeze_noirq = vmbus_suspend, 1013 + .thaw_noirq = vmbus_resume, 1014 + .poweroff_noirq = vmbus_suspend, 1015 + .restore_noirq = vmbus_resume, 1008 1016 }; 1009 1017 1010 1018 /* The one and only one */ ··· 2295 2281 2296 2282 return 0; 2297 2283 } 2284 + #else 2285 + #define vmbus_bus_suspend NULL 2286 + #define vmbus_bus_resume NULL 2298 2287 #endif /* CONFIG_PM_SLEEP */ 2299 2288 2300 2289 static const struct acpi_device_id vmbus_acpi_device_ids[] = { ··· 2308 2291 MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids); 2309 2292 2310 2293 /* 2311 - * Note: we must use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS rather than 2312 - * SET_SYSTEM_SLEEP_PM_OPS, otherwise NIC SR-IOV can not work, because the 2313 - * "pci_dev_pm_ops" uses the "noirq" callbacks: in the resume path, the 2314 - * pci "noirq" restore callback runs before "non-noirq" callbacks (see 2294 + * Note: we must use the "no_irq" ops, otherwise hibernation can not work with 2295 + * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in 2296 + * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see 2315 2297 * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() -> 2316 2298 * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's 2317 - * resume callback must also run via the "noirq" callbacks. 2299 + * resume callback must also run via the "noirq" ops. 2300 + * 2301 + * Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment 2302 + * earlier in this file before vmbus_pm. 2318 2303 */ 2304 + 2319 2305 static const struct dev_pm_ops vmbus_bus_pm = { 2320 - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(vmbus_bus_suspend, vmbus_bus_resume) 2306 + .suspend_noirq = NULL, 2307 + .resume_noirq = NULL, 2308 + .freeze_noirq = vmbus_bus_suspend, 2309 + .thaw_noirq = vmbus_bus_resume, 2310 + .poweroff_noirq = vmbus_bus_suspend, 2311 + .restore_noirq = vmbus_bus_resume 2321 2312 }; 2322 2313 2323 2314 static struct acpi_driver vmbus_acpi_driver = {
+1 -1
drivers/i2c/busses/i2c-amd-mp2-pci.c
··· 349 349 if (!privdata) 350 350 return -ENOMEM; 351 351 352 + privdata->pci_dev = pci_dev; 352 353 rc = amd_mp2_pci_init(privdata, pci_dev); 353 354 if (rc) 354 355 return rc; 355 356 356 357 mutex_init(&privdata->c2p_lock); 357 - privdata->pci_dev = pci_dev; 358 358 359 359 pm_runtime_set_autosuspend_delay(&pci_dev->dev, 1000); 360 360 pm_runtime_use_autosuspend(&pci_dev->dev);
+4 -1
drivers/i2c/busses/i2c-aspeed.c
··· 603 603 /* Ack all interrupts except for Rx done */ 604 604 writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE, 605 605 bus->base + ASPEED_I2C_INTR_STS_REG); 606 + readl(bus->base + ASPEED_I2C_INTR_STS_REG); 606 607 irq_remaining = irq_received; 607 608 608 609 #if IS_ENABLED(CONFIG_I2C_SLAVE) ··· 646 645 irq_received, irq_handled); 647 646 648 647 /* Ack Rx done */ 649 - if (irq_received & ASPEED_I2CD_INTR_RX_DONE) 648 + if (irq_received & ASPEED_I2CD_INTR_RX_DONE) { 650 649 writel(ASPEED_I2CD_INTR_RX_DONE, 651 650 bus->base + ASPEED_I2C_INTR_STS_REG); 651 + readl(bus->base + ASPEED_I2C_INTR_STS_REG); 652 + } 652 653 spin_unlock(&bus->lock); 653 654 return irq_remaining ? IRQ_NONE : IRQ_HANDLED; 654 655 }
+3
drivers/i2c/busses/i2c-bcm-iproc.c
··· 360 360 value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK); 361 361 i2c_slave_event(iproc_i2c->slave, 362 362 I2C_SLAVE_WRITE_RECEIVED, &value); 363 + if (rx_status == I2C_SLAVE_RX_END) 364 + i2c_slave_event(iproc_i2c->slave, 365 + I2C_SLAVE_STOP, &value); 363 366 } 364 367 } else if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) { 365 368 /* Master read other than start */
+12 -24
drivers/i2c/busses/i2c-tegra.c
··· 996 996 do { 997 997 u32 status = i2c_readl(i2c_dev, I2C_INT_STATUS); 998 998 999 - if (status) 999 + if (status) { 1000 1000 tegra_i2c_isr(i2c_dev->irq, i2c_dev); 1001 1001 1002 - if (completion_done(complete)) { 1003 - s64 delta = ktime_ms_delta(ktimeout, ktime); 1002 + if (completion_done(complete)) { 1003 + s64 delta = ktime_ms_delta(ktimeout, ktime); 1004 1004 1005 - return msecs_to_jiffies(delta) ?: 1; 1005 + return msecs_to_jiffies(delta) ?: 1; 1006 + } 1006 1007 } 1007 1008 1008 1009 ktime = ktime_get(); ··· 1030 1029 disable_irq(i2c_dev->irq); 1031 1030 1032 1031 /* 1033 - * Under some rare circumstances (like running KASAN + 1034 - * NFS root) CPU, which handles interrupt, may stuck in 1035 - * uninterruptible state for a significant time. In this 1036 - * case we will get timeout if I2C transfer is running on 1037 - * a sibling CPU, despite of IRQ being raised. 1038 - * 1039 - * In order to handle this rare condition, the IRQ status 1040 - * needs to be checked after timeout. 1032 + * There is a chance that completion may happen after IRQ 1033 + * synchronization, which is done by disable_irq(). 1041 1034 */ 1042 - if (ret == 0) 1043 - ret = tegra_i2c_poll_completion_timeout(i2c_dev, 1044 - complete, 0); 1035 + if (ret == 0 && completion_done(complete)) { 1036 + dev_warn(i2c_dev->dev, 1037 + "completion done after timeout\n"); 1038 + ret = 1; 1039 + } 1045 1040 } 1046 1041 1047 1042 return ret; ··· 1215 1218 if (dma) { 1216 1219 time_left = tegra_i2c_wait_completion_timeout( 1217 1220 i2c_dev, &i2c_dev->dma_complete, xfer_time); 1218 - 1219 - /* 1220 - * Synchronize DMA first, since dmaengine_terminate_sync() 1221 - * performs synchronization after the transfer's termination 1222 - * and we want to get a completion if transfer succeeded. 1223 - */ 1224 - dmaengine_synchronize(i2c_dev->msg_read ? 1225 - i2c_dev->rx_dma_chan : 1226 - i2c_dev->tx_dma_chan); 1227 1221 1228 1222 dmaengine_terminate_sync(i2c_dev->msg_read ? 1229 1223 i2c_dev->rx_dma_chan :
+47 -16
drivers/iio/adc/ad7192.c
··· 125 125 #define AD7193_CH_AINCOM 0x600 /* AINCOM - AINCOM */ 126 126 127 127 /* ID Register Bit Designations (AD7192_REG_ID) */ 128 - #define ID_AD7190 0x4 129 - #define ID_AD7192 0x0 130 - #define ID_AD7193 0x2 131 - #define ID_AD7195 0x6 128 + #define CHIPID_AD7190 0x4 129 + #define CHIPID_AD7192 0x0 130 + #define CHIPID_AD7193 0x2 131 + #define CHIPID_AD7195 0x6 132 132 #define AD7192_ID_MASK 0x0F 133 133 134 134 /* GPOCON Register Bit Designations (AD7192_REG_GPOCON) */ ··· 161 161 AD7192_SYSCALIB_FULL_SCALE, 162 162 }; 163 163 164 + enum { 165 + ID_AD7190, 166 + ID_AD7192, 167 + ID_AD7193, 168 + ID_AD7195, 169 + }; 170 + 171 + struct ad7192_chip_info { 172 + unsigned int chip_id; 173 + const char *name; 174 + }; 175 + 164 176 struct ad7192_state { 177 + const struct ad7192_chip_info *chip_info; 165 178 struct regulator *avdd; 166 179 struct regulator *dvdd; 167 180 struct clk *mclk; ··· 185 172 u32 conf; 186 173 u32 scale_avail[8][2]; 187 174 u8 gpocon; 188 - u8 devid; 189 175 u8 clock_sel; 190 176 struct mutex lock; /* protect sensor state */ 191 177 u8 syscalib_mode[8]; ··· 360 348 361 349 id &= AD7192_ID_MASK; 362 350 363 - if (id != st->devid) 351 + if (id != st->chip_info->chip_id) 364 352 dev_warn(&st->sd.spi->dev, "device ID query failed (0x%X)\n", 365 353 id); 366 354 ··· 375 363 st->mode |= AD7192_MODE_REJ60; 376 364 377 365 refin2_en = of_property_read_bool(np, "adi,refin2-pins-enable"); 378 - if (refin2_en && st->devid != ID_AD7195) 366 + if (refin2_en && st->chip_info->chip_id != CHIPID_AD7195) 379 367 st->conf |= AD7192_CONF_REFSEL; 380 368 381 369 st->conf &= ~AD7192_CONF_CHOP; ··· 871 859 IIO_CHAN_SOFT_TIMESTAMP(14), 872 860 }; 873 861 862 + static const struct ad7192_chip_info ad7192_chip_info_tbl[] = { 863 + [ID_AD7190] = { 864 + .chip_id = CHIPID_AD7190, 865 + .name = "ad7190", 866 + }, 867 + [ID_AD7192] = { 868 + .chip_id = CHIPID_AD7192, 869 + .name = "ad7192", 870 + }, 871 + [ID_AD7193] = { 872 + .chip_id = CHIPID_AD7193, 873 + .name = "ad7193", 874 + }, 875 + [ID_AD7195] = { 876 + .chip_id = CHIPID_AD7195, 877 + .name = "ad7195", 878 + }, 879 + }; 880 + 874 881 static int ad7192_channels_config(struct iio_dev *indio_dev) 875 882 { 876 883 struct ad7192_state *st = iio_priv(indio_dev); 877 884 878 - switch (st->devid) { 879 - case ID_AD7193: 885 + switch (st->chip_info->chip_id) { 886 + case CHIPID_AD7193: 880 887 indio_dev->channels = ad7193_channels; 881 888 indio_dev->num_channels = ARRAY_SIZE(ad7193_channels); 882 889 break; ··· 909 878 } 910 879 911 880 static const struct of_device_id ad7192_of_match[] = { 912 - { .compatible = "adi,ad7190", .data = (void *)ID_AD7190 }, 913 - { .compatible = "adi,ad7192", .data = (void *)ID_AD7192 }, 914 - { .compatible = "adi,ad7193", .data = (void *)ID_AD7193 }, 915 - { .compatible = "adi,ad7195", .data = (void *)ID_AD7195 }, 881 + { .compatible = "adi,ad7190", .data = &ad7192_chip_info_tbl[ID_AD7190] }, 882 + { .compatible = "adi,ad7192", .data = &ad7192_chip_info_tbl[ID_AD7192] }, 883 + { .compatible = "adi,ad7193", .data = &ad7192_chip_info_tbl[ID_AD7193] }, 884 + { .compatible = "adi,ad7195", .data = &ad7192_chip_info_tbl[ID_AD7195] }, 916 885 {} 917 886 }; 918 887 MODULE_DEVICE_TABLE(of, ad7192_of_match); ··· 969 938 } 970 939 971 940 spi_set_drvdata(spi, indio_dev); 972 - st->devid = (unsigned long)of_device_get_match_data(&spi->dev); 941 + st->chip_info = of_device_get_match_data(&spi->dev); 973 942 indio_dev->dev.parent = &spi->dev; 974 - indio_dev->name = spi_get_device_id(spi)->name; 943 + indio_dev->name = st->chip_info->name; 975 944 indio_dev->modes = INDIO_DIRECT_MODE; 976 945 977 946 ret = ad7192_channels_config(indio_dev); 978 947 if (ret < 0) 979 948 goto error_disable_dvdd; 980 949 981 - if (st->devid == ID_AD7195) 950 + if (st->chip_info->chip_id == CHIPID_AD7195) 982 951 indio_dev->info = &ad7195_info; 983 952 else 984 953 indio_dev->info = &ad7192_info;
+1 -1
drivers/iio/adc/ad7793.c
··· 542 542 .read_raw = &ad7793_read_raw, 543 543 .write_raw = &ad7793_write_raw, 544 544 .write_raw_get_fmt = &ad7793_write_raw_get_fmt, 545 - .attrs = &ad7793_attribute_group, 545 + .attrs = &ad7797_attribute_group, 546 546 .validate_trigger = ad_sd_validate_trigger, 547 547 }; 548 548
+28 -3
drivers/iio/adc/stm32-adc.c
··· 1418 1418 static void stm32_adc_dma_buffer_done(void *data) 1419 1419 { 1420 1420 struct iio_dev *indio_dev = data; 1421 + struct stm32_adc *adc = iio_priv(indio_dev); 1422 + int residue = stm32_adc_dma_residue(adc); 1421 1423 1422 - iio_trigger_poll_chained(indio_dev->trig); 1424 + /* 1425 + * In DMA mode the trigger services of IIO are not used 1426 + * (e.g. no call to iio_trigger_poll). 1427 + * Calling irq handler associated to the hardware trigger is not 1428 + * relevant as the conversions have already been done. Data 1429 + * transfers are performed directly in DMA callback instead. 1430 + * This implementation avoids to call trigger irq handler that 1431 + * may sleep, in an atomic context (DMA irq handler context). 1432 + */ 1433 + dev_dbg(&indio_dev->dev, "%s bufi=%d\n", __func__, adc->bufi); 1434 + 1435 + while (residue >= indio_dev->scan_bytes) { 1436 + u16 *buffer = (u16 *)&adc->rx_buf[adc->bufi]; 1437 + 1438 + iio_push_to_buffers(indio_dev, buffer); 1439 + 1440 + residue -= indio_dev->scan_bytes; 1441 + adc->bufi += indio_dev->scan_bytes; 1442 + if (adc->bufi >= adc->rx_buf_sz) 1443 + adc->bufi = 0; 1444 + } 1423 1445 } 1424 1446 1425 1447 static int stm32_adc_dma_start(struct iio_dev *indio_dev) ··· 1867 1845 { 1868 1846 struct iio_dev *indio_dev; 1869 1847 struct device *dev = &pdev->dev; 1848 + irqreturn_t (*handler)(int irq, void *p) = NULL; 1870 1849 struct stm32_adc *adc; 1871 1850 int ret; 1872 1851 ··· 1934 1911 if (ret < 0) 1935 1912 return ret; 1936 1913 1914 + if (!adc->dma_chan) 1915 + handler = &stm32_adc_trigger_handler; 1916 + 1937 1917 ret = iio_triggered_buffer_setup(indio_dev, 1938 - &iio_pollfunc_store_time, 1939 - &stm32_adc_trigger_handler, 1918 + &iio_pollfunc_store_time, handler, 1940 1919 &stm32_adc_buffer_setup_ops); 1941 1920 if (ret) { 1942 1921 dev_err(&pdev->dev, "buffer setup failed\n");
+3 -3
drivers/iio/adc/ti-ads8344.c
··· 29 29 struct mutex lock; 30 30 31 31 u8 tx_buf ____cacheline_aligned; 32 - u16 rx_buf; 32 + u8 rx_buf[3]; 33 33 }; 34 34 35 35 #define ADS8344_VOLTAGE_CHANNEL(chan, si) \ ··· 89 89 90 90 udelay(9); 91 91 92 - ret = spi_read(spi, &adc->rx_buf, 2); 92 + ret = spi_read(spi, adc->rx_buf, sizeof(adc->rx_buf)); 93 93 if (ret) 94 94 return ret; 95 95 96 - return adc->rx_buf; 96 + return adc->rx_buf[0] << 9 | adc->rx_buf[1] << 1 | adc->rx_buf[2] >> 7; 97 97 } 98 98 99 99 static int ads8344_read_raw(struct iio_dev *iio,
+74 -21
drivers/iio/adc/xilinx-xadc-core.c
··· 102 102 103 103 #define XADC_FLAGS_BUFFERED BIT(0) 104 104 105 + /* 106 + * The XADC hardware supports a samplerate of up to 1MSPS. Unfortunately it does 107 + * not have a hardware FIFO. Which means an interrupt is generated for each 108 + * conversion sequence. At 1MSPS sample rate the CPU in ZYNQ7000 is completely 109 + * overloaded by the interrupts that it soft-lockups. For this reason the driver 110 + * limits the maximum samplerate 150kSPS. At this rate the CPU is fairly busy, 111 + * but still responsive. 112 + */ 113 + #define XADC_MAX_SAMPLERATE 150000 114 + 105 115 static void xadc_write_reg(struct xadc *xadc, unsigned int reg, 106 116 uint32_t val) 107 117 { ··· 684 674 685 675 spin_lock_irqsave(&xadc->lock, flags); 686 676 xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val); 687 - xadc_write_reg(xadc, XADC_AXI_REG_IPISR, val & XADC_AXI_INT_EOS); 677 + xadc_write_reg(xadc, XADC_AXI_REG_IPISR, XADC_AXI_INT_EOS); 688 678 if (state) 689 679 val |= XADC_AXI_INT_EOS; 690 680 else ··· 732 722 { 733 723 uint16_t val; 734 724 725 + /* Powerdown the ADC-B when it is not needed. */ 735 726 switch (seq_mode) { 736 727 case XADC_CONF1_SEQ_SIMULTANEOUS: 737 728 case XADC_CONF1_SEQ_INDEPENDENT: 738 - val = XADC_CONF2_PD_ADC_B; 729 + val = 0; 739 730 break; 740 731 default: 741 - val = 0; 732 + val = XADC_CONF2_PD_ADC_B; 742 733 break; 743 734 } 744 735 ··· 808 797 if (ret) 809 798 goto err; 810 799 800 + /* 801 + * In simultaneous mode the upper and lower aux channels are samples at 802 + * the same time. In this mode the upper 8 bits in the sequencer 803 + * register are don't care and the lower 8 bits control two channels 804 + * each. As such we must set the bit if either the channel in the lower 805 + * group or the upper group is enabled. 806 + */ 807 + if (seq_mode == XADC_CONF1_SEQ_SIMULTANEOUS) 808 + scan_mask = ((scan_mask >> 8) | scan_mask) & 0xff0000; 809 + 811 810 ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(1), scan_mask >> 16); 812 811 if (ret) 813 812 goto err; ··· 844 823 .postdisable = &xadc_postdisable, 845 824 }; 846 825 826 + static int xadc_read_samplerate(struct xadc *xadc) 827 + { 828 + unsigned int div; 829 + uint16_t val16; 830 + int ret; 831 + 832 + ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16); 833 + if (ret) 834 + return ret; 835 + 836 + div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET; 837 + if (div < 2) 838 + div = 2; 839 + 840 + return xadc_get_dclk_rate(xadc) / div / 26; 841 + } 842 + 847 843 static int xadc_read_raw(struct iio_dev *indio_dev, 848 844 struct iio_chan_spec const *chan, int *val, int *val2, long info) 849 845 { 850 846 struct xadc *xadc = iio_priv(indio_dev); 851 - unsigned int div; 852 847 uint16_t val16; 853 848 int ret; 854 849 ··· 917 880 *val = -((273150 << 12) / 503975); 918 881 return IIO_VAL_INT; 919 882 case IIO_CHAN_INFO_SAMP_FREQ: 920 - ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16); 921 - if (ret) 883 + ret = xadc_read_samplerate(xadc); 884 + if (ret < 0) 922 885 return ret; 923 886 924 - div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET; 925 - if (div < 2) 926 - div = 2; 927 - 928 - *val = xadc_get_dclk_rate(xadc) / div / 26; 929 - 887 + *val = ret; 930 888 return IIO_VAL_INT; 931 889 default: 932 890 return -EINVAL; 933 891 } 934 892 } 935 893 936 - static int xadc_write_raw(struct iio_dev *indio_dev, 937 - struct iio_chan_spec const *chan, int val, int val2, long info) 894 + static int xadc_write_samplerate(struct xadc *xadc, int val) 938 895 { 939 - struct xadc *xadc = iio_priv(indio_dev); 940 896 unsigned long clk_rate = xadc_get_dclk_rate(xadc); 941 897 unsigned int div; 942 898 943 899 if (!clk_rate) 944 900 return -EINVAL; 945 901 946 - if (info != IIO_CHAN_INFO_SAMP_FREQ) 947 - return -EINVAL; 948 - 949 902 if (val <= 0) 950 903 return -EINVAL; 951 904 952 905 /* Max. 150 kSPS */ 953 - if (val > 150000) 954 - val = 150000; 906 + if (val > XADC_MAX_SAMPLERATE) 907 + val = XADC_MAX_SAMPLERATE; 955 908 956 909 val *= 26; 957 910 ··· 954 927 * limit. 955 928 */ 956 929 div = clk_rate / val; 957 - if (clk_rate / div / 26 > 150000) 930 + if (clk_rate / div / 26 > XADC_MAX_SAMPLERATE) 958 931 div++; 959 932 if (div < 2) 960 933 div = 2; ··· 963 936 964 937 return xadc_update_adc_reg(xadc, XADC_REG_CONF2, XADC_CONF2_DIV_MASK, 965 938 div << XADC_CONF2_DIV_OFFSET); 939 + } 940 + 941 + static int xadc_write_raw(struct iio_dev *indio_dev, 942 + struct iio_chan_spec const *chan, int val, int val2, long info) 943 + { 944 + struct xadc *xadc = iio_priv(indio_dev); 945 + 946 + if (info != IIO_CHAN_INFO_SAMP_FREQ) 947 + return -EINVAL; 948 + 949 + return xadc_write_samplerate(xadc, val); 966 950 } 967 951 968 952 static const struct iio_event_spec xadc_temp_events[] = { ··· 1260 1222 ret = clk_prepare_enable(xadc->clk); 1261 1223 if (ret) 1262 1224 goto err_free_samplerate_trigger; 1225 + 1226 + /* 1227 + * Make sure not to exceed the maximum samplerate since otherwise the 1228 + * resulting interrupt storm will soft-lock the system. 1229 + */ 1230 + if (xadc->ops->flags & XADC_FLAGS_BUFFERED) { 1231 + ret = xadc_read_samplerate(xadc); 1232 + if (ret < 0) 1233 + goto err_free_samplerate_trigger; 1234 + if (ret > XADC_MAX_SAMPLERATE) { 1235 + ret = xadc_write_samplerate(xadc, XADC_MAX_SAMPLERATE); 1236 + if (ret < 0) 1237 + goto err_free_samplerate_trigger; 1238 + } 1239 + } 1263 1240 1264 1241 ret = request_irq(xadc->irq, xadc->ops->interrupt_handler, 0, 1265 1242 dev_name(&pdev->dev), indio_dev);
+1 -1
drivers/iio/common/st_sensors/st_sensors_core.c
··· 79 79 struct st_sensor_odr_avl odr_out = {0, 0}; 80 80 struct st_sensor_data *sdata = iio_priv(indio_dev); 81 81 82 - if (!sdata->sensor_settings->odr.addr) 82 + if (!sdata->sensor_settings->odr.mask) 83 83 return 0; 84 84 85 85 err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out);
+1 -1
drivers/iio/dac/ad5770r.c
··· 525 525 ret = fwnode_property_read_u32(child, "num", &num); 526 526 if (ret) 527 527 return ret; 528 - if (num > AD5770R_MAX_CHANNELS) 528 + if (num >= AD5770R_MAX_CHANNELS) 529 529 return -EINVAL; 530 530 531 531 ret = fwnode_property_read_u32_array(child,
+10 -1
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
··· 1617 1617 if (result) 1618 1618 goto out_unlock; 1619 1619 1620 + pm_runtime_disable(dev); 1621 + pm_runtime_set_active(dev); 1622 + pm_runtime_enable(dev); 1623 + 1620 1624 result = inv_mpu6050_switch_engine(st, true, st->suspended_sensors); 1621 1625 if (result) 1622 1626 goto out_unlock; ··· 1642 1638 1643 1639 mutex_lock(&st->lock); 1644 1640 1641 + st->suspended_sensors = 0; 1642 + if (pm_runtime_suspended(dev)) { 1643 + result = 0; 1644 + goto out_unlock; 1645 + } 1646 + 1645 1647 if (iio_buffer_enabled(indio_dev)) { 1646 1648 result = inv_mpu6050_prepare_fifo(st, false); 1647 1649 if (result) 1648 1650 goto out_unlock; 1649 1651 } 1650 1652 1651 - st->suspended_sensors = 0; 1652 1653 if (st->chip_config.accl_en) 1653 1654 st->suspended_sensors |= INV_MPU6050_SENSOR_ACCL; 1654 1655 if (st->chip_config.gyro_en)
+3
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
··· 337 337 * @gain: Configured sensor sensitivity. 338 338 * @odr: Output data rate of the sensor [Hz]. 339 339 * @watermark: Sensor watermark level. 340 + * @decimator: Sensor decimation factor. 340 341 * @sip: Number of samples in a given pattern. 341 342 * @ts_ref: Sensor timestamp reference for hw one. 342 343 * @ext_info: Sensor settings if it is connected to i2c controller ··· 351 350 u32 odr; 352 351 353 352 u16 watermark; 353 + u8 decimator; 354 354 u8 sip; 355 355 s64 ts_ref; 356 356 357 357 struct { 358 358 const struct st_lsm6dsx_ext_dev_settings *settings; 359 + u32 slv_odr; 359 360 u8 addr; 360 361 } ext_info; 361 362 };
+16 -7
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
··· 93 93 break; 94 94 } 95 95 96 + sensor->decimator = decimator; 96 97 return i == max_size ? 0 : st_lsm6dsx_decimator_table[i].val; 97 98 } 98 99 ··· 338 337 int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw) 339 338 { 340 339 struct st_lsm6dsx_sensor *acc_sensor, *gyro_sensor, *ext_sensor = NULL; 341 - int err, acc_sip, gyro_sip, ts_sip, ext_sip, read_len, offset; 340 + int err, sip, acc_sip, gyro_sip, ts_sip, ext_sip, read_len, offset; 342 341 u16 fifo_len, pattern_len = hw->sip * ST_LSM6DSX_SAMPLE_SIZE; 343 342 u16 fifo_diff_mask = hw->settings->fifo_ops.fifo_diff.mask; 344 343 u8 gyro_buff[ST_LSM6DSX_IIO_BUFF_SIZE]; ··· 400 399 acc_sip = acc_sensor->sip; 401 400 ts_sip = hw->ts_sip; 402 401 offset = 0; 402 + sip = 0; 403 403 404 404 while (acc_sip > 0 || gyro_sip > 0 || ext_sip > 0) { 405 - if (gyro_sip > 0) { 405 + if (gyro_sip > 0 && !(sip % gyro_sensor->decimator)) { 406 406 memcpy(gyro_buff, &hw->buff[offset], 407 407 ST_LSM6DSX_SAMPLE_SIZE); 408 408 offset += ST_LSM6DSX_SAMPLE_SIZE; 409 409 } 410 - if (acc_sip > 0) { 410 + if (acc_sip > 0 && !(sip % acc_sensor->decimator)) { 411 411 memcpy(acc_buff, &hw->buff[offset], 412 412 ST_LSM6DSX_SAMPLE_SIZE); 413 413 offset += ST_LSM6DSX_SAMPLE_SIZE; 414 414 } 415 - if (ext_sip > 0) { 415 + if (ext_sip > 0 && !(sip % ext_sensor->decimator)) { 416 416 memcpy(ext_buff, &hw->buff[offset], 417 417 ST_LSM6DSX_SAMPLE_SIZE); 418 418 offset += ST_LSM6DSX_SAMPLE_SIZE; ··· 443 441 offset += ST_LSM6DSX_SAMPLE_SIZE; 444 442 } 445 443 446 - if (gyro_sip-- > 0) 444 + if (gyro_sip > 0 && !(sip % gyro_sensor->decimator)) { 447 445 iio_push_to_buffers_with_timestamp( 448 446 hw->iio_devs[ST_LSM6DSX_ID_GYRO], 449 447 gyro_buff, gyro_sensor->ts_ref + ts); 450 - if (acc_sip-- > 0) 448 + gyro_sip--; 449 + } 450 + if (acc_sip > 0 && !(sip % acc_sensor->decimator)) { 451 451 iio_push_to_buffers_with_timestamp( 452 452 hw->iio_devs[ST_LSM6DSX_ID_ACC], 453 453 acc_buff, acc_sensor->ts_ref + ts); 454 - if (ext_sip-- > 0) 454 + acc_sip--; 455 + } 456 + if (ext_sip > 0 && !(sip % ext_sensor->decimator)) { 455 457 iio_push_to_buffers_with_timestamp( 456 458 hw->iio_devs[ST_LSM6DSX_ID_EXT0], 457 459 ext_buff, ext_sensor->ts_ref + ts); 460 + ext_sip--; 461 + } 462 + sip++; 458 463 } 459 464 } 460 465
+23 -1
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
··· 2036 2036 return 0; 2037 2037 } 2038 2038 2039 - static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw) 2039 + static int st_lsm6dsx_reset_device(struct st_lsm6dsx_hw *hw) 2040 2040 { 2041 2041 const struct st_lsm6dsx_reg *reg; 2042 2042 int err; 2043 + 2044 + /* 2045 + * flush hw FIFO before device reset in order to avoid 2046 + * possible races on interrupt line 1. If the first interrupt 2047 + * line is asserted during hw reset the device will work in 2048 + * I3C-only mode (if it is supported) 2049 + */ 2050 + err = st_lsm6dsx_flush_fifo(hw); 2051 + if (err < 0 && err != -ENOTSUPP) 2052 + return err; 2043 2053 2044 2054 /* device sw reset */ 2045 2055 reg = &hw->settings->reset; ··· 2068 2058 return err; 2069 2059 2070 2060 msleep(50); 2061 + 2062 + return 0; 2063 + } 2064 + 2065 + static int st_lsm6dsx_init_device(struct st_lsm6dsx_hw *hw) 2066 + { 2067 + const struct st_lsm6dsx_reg *reg; 2068 + int err; 2069 + 2070 + err = st_lsm6dsx_reset_device(hw); 2071 + if (err < 0) 2072 + return err; 2071 2073 2072 2074 /* enable Block Data Update */ 2073 2075 reg = &hw->settings->bdu;
+22 -7
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
··· 421 421 422 422 settings = sensor->ext_info.settings; 423 423 if (enable) { 424 - err = st_lsm6dsx_shub_set_odr(sensor, sensor->odr); 424 + err = st_lsm6dsx_shub_set_odr(sensor, 425 + sensor->ext_info.slv_odr); 425 426 if (err < 0) 426 427 return err; 427 428 } else { ··· 460 459 if (err < 0) 461 460 return err; 462 461 463 - delay = 1000000000 / sensor->odr; 462 + delay = 1000000000 / sensor->ext_info.slv_odr; 464 463 usleep_range(delay, 2 * delay); 465 464 466 465 len = min_t(int, sizeof(data), ch->scan_type.realbits >> 3); ··· 501 500 iio_device_release_direct_mode(iio_dev); 502 501 break; 503 502 case IIO_CHAN_INFO_SAMP_FREQ: 504 - *val = sensor->odr / 1000; 505 - *val2 = (sensor->odr % 1000) * 1000; 503 + *val = sensor->ext_info.slv_odr / 1000; 504 + *val2 = (sensor->ext_info.slv_odr % 1000) * 1000; 506 505 ret = IIO_VAL_INT_PLUS_MICRO; 507 506 break; 508 507 case IIO_CHAN_INFO_SCALE: ··· 536 535 537 536 val = val * 1000 + val2 / 1000; 538 537 err = st_lsm6dsx_shub_get_odr_val(sensor, val, &data); 539 - if (!err) 540 - sensor->odr = val; 538 + if (!err) { 539 + struct st_lsm6dsx_hw *hw = sensor->hw; 540 + struct st_lsm6dsx_sensor *ref_sensor; 541 + u8 odr_val; 542 + int odr; 543 + 544 + ref_sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]); 545 + odr = st_lsm6dsx_check_odr(ref_sensor, val, &odr_val); 546 + if (odr < 0) 547 + return odr; 548 + 549 + sensor->ext_info.slv_odr = val; 550 + sensor->odr = odr; 551 + } 541 552 break; 542 553 } 543 554 default: ··· 626 613 const struct st_lsm6dsx_ext_dev_settings *info, 627 614 u8 i2c_addr, const char *name) 628 615 { 616 + enum st_lsm6dsx_sensor_id ref_id = ST_LSM6DSX_ID_ACC; 629 617 struct iio_chan_spec *ext_channels; 630 618 struct st_lsm6dsx_sensor *sensor; 631 619 struct iio_dev *iio_dev; ··· 642 628 sensor = iio_priv(iio_dev); 643 629 sensor->id = id; 644 630 sensor->hw = hw; 645 - sensor->odr = info->odr_table.odr_avl[0].milli_hz; 631 + sensor->odr = hw->settings->odr_table[ref_id].odr_avl[0].milli_hz; 632 + sensor->ext_info.slv_odr = info->odr_table.odr_avl[0].milli_hz; 646 633 sensor->gain = info->fs_table.fs_avl[0].gain; 647 634 sensor->ext_info.settings = info; 648 635 sensor->ext_info.addr = i2c_addr;
+2 -5
drivers/iio/industrialio-core.c
··· 915 915 return -EINVAL; 916 916 integer = ch; 917 917 } else { 918 - ret = iio_str_to_fixpoint(buf, fract_mult, &integer, &fract); 918 + ret = __iio_str_to_fixpoint(buf, fract_mult, &integer, &fract, 919 + scale_db); 919 920 if (ret) 920 921 return ret; 921 922 } 922 - ret = __iio_str_to_fixpoint(buf, fract_mult, &integer, &fract, 923 - scale_db); 924 - if (ret) 925 - return ret; 926 923 927 924 ret = indio_dev->info->write_raw(indio_dev, this_attr->c, 928 925 integer, fract, this_attr->address);
+14 -12
drivers/infiniband/core/cm.c
··· 862 862 863 863 ret = xa_alloc_cyclic_irq(&cm.local_id_table, &id, NULL, xa_limit_32b, 864 864 &cm.local_id_next, GFP_KERNEL); 865 - if (ret) 865 + if (ret < 0) 866 866 goto error; 867 867 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; 868 868 ··· 1828 1828 1829 1829 static void cm_format_rej(struct cm_rej_msg *rej_msg, 1830 1830 struct cm_id_private *cm_id_priv, 1831 - enum ib_cm_rej_reason reason, 1832 - void *ari, 1833 - u8 ari_length, 1834 - const void *private_data, 1835 - u8 private_data_len) 1831 + enum ib_cm_rej_reason reason, void *ari, 1832 + u8 ari_length, const void *private_data, 1833 + u8 private_data_len, enum ib_cm_state state) 1836 1834 { 1837 1835 lockdep_assert_held(&cm_id_priv->lock); 1838 1836 ··· 1838 1840 IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg, 1839 1841 be32_to_cpu(cm_id_priv->id.remote_id)); 1840 1842 1841 - switch(cm_id_priv->id.state) { 1843 + switch (state) { 1842 1844 case IB_CM_REQ_RCVD: 1843 1845 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0)); 1844 1846 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ); ··· 1903 1905 cm_id_priv->private_data_len); 1904 1906 break; 1905 1907 case IB_CM_TIMEWAIT: 1906 - cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv, 1907 - IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0); 1908 + cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, 1909 + IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0, 1910 + IB_CM_TIMEWAIT); 1908 1911 break; 1909 1912 default: 1910 1913 goto unlock; ··· 2903 2904 u8 ari_length, const void *private_data, 2904 2905 u8 private_data_len) 2905 2906 { 2907 + enum ib_cm_state state = cm_id_priv->id.state; 2906 2908 struct ib_mad_send_buf *msg; 2907 2909 int ret; 2908 2910 ··· 2913 2913 (ari && ari_length > IB_CM_REJ_ARI_LENGTH)) 2914 2914 return -EINVAL; 2915 2915 2916 - switch (cm_id_priv->id.state) { 2916 + switch (state) { 2917 2917 case IB_CM_REQ_SENT: 2918 2918 case IB_CM_MRA_REQ_RCVD: 2919 2919 case IB_CM_REQ_RCVD: ··· 2925 2925 if (ret) 2926 2926 return ret; 2927 2927 cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason, 2928 - ari, ari_length, private_data, private_data_len); 2928 + ari, ari_length, private_data, private_data_len, 2929 + state); 2929 2930 break; 2930 2931 case IB_CM_REP_SENT: 2931 2932 case IB_CM_MRA_REP_RCVD: ··· 2935 2934 if (ret) 2936 2935 return ret; 2937 2936 cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason, 2938 - ari, ari_length, private_data, private_data_len); 2937 + ari, ari_length, private_data, private_data_len, 2938 + state); 2939 2939 break; 2940 2940 default: 2941 2941 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
+4 -5
drivers/infiniband/core/rdma_core.c
··· 360 360 * uverbs_uobject_fd_release(), and the caller is expected to ensure 361 361 * that release is never done while a call to lookup is possible. 362 362 */ 363 - if (f->f_op != fd_type->fops) { 363 + if (f->f_op != fd_type->fops || uobject->ufile != ufile) { 364 364 fput(f); 365 365 return ERR_PTR(-EBADF); 366 366 } ··· 474 474 filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL, 475 475 fd_type->flags); 476 476 if (IS_ERR(filp)) { 477 + uverbs_uobject_put(uobj); 477 478 uobj = ERR_CAST(filp); 478 - goto err_uobj; 479 + goto err_fd; 479 480 } 480 481 uobj->object = filp; 481 482 482 483 uobj->id = new_fd; 483 484 return uobj; 484 485 485 - err_uobj: 486 - uverbs_uobject_put(uobj); 487 486 err_fd: 488 487 put_unused_fd(new_fd); 489 488 return uobj; ··· 678 679 enum rdma_lookup_mode mode) 679 680 { 680 681 assert_uverbs_usecnt(uobj, mode); 681 - uobj->uapi_object->type_class->lookup_put(uobj, mode); 682 682 /* 683 683 * In order to unlock an object, either decrease its usecnt for 684 684 * read access or zero it in case of exclusive access. See ··· 694 696 break; 695 697 } 696 698 699 + uobj->uapi_object->type_class->lookup_put(uobj, mode); 697 700 /* Pairs with the kref obtained by type->lookup_get */ 698 701 uverbs_uobject_put(uobj); 699 702 }
+4
drivers/infiniband/core/uverbs_main.c
··· 820 820 ret = mmget_not_zero(mm); 821 821 if (!ret) { 822 822 list_del_init(&priv->list); 823 + if (priv->entry) { 824 + rdma_user_mmap_entry_put(priv->entry); 825 + priv->entry = NULL; 826 + } 823 827 mm = NULL; 824 828 continue; 825 829 }
+1 -1
drivers/infiniband/hw/i40iw/i40iw_ctrl.c
··· 1046 1046 u64 header; 1047 1047 1048 1048 wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); 1049 - if (wqe) 1049 + if (!wqe) 1050 1050 return I40IW_ERR_RING_FULL; 1051 1051 1052 1052 set_64bit_val(wqe, 32, feat_mem->pa);
+2 -1
drivers/infiniband/hw/mlx4/main.c
··· 1499 1499 int i; 1500 1500 1501 1501 for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) { 1502 + union ib_flow_spec ib_spec = {}; 1502 1503 int ret; 1503 - union ib_flow_spec ib_spec; 1504 + 1504 1505 switch (pdefault_rules->rules_create_list[i]) { 1505 1506 case 0: 1506 1507 /* no rule */
+3 -1
drivers/infiniband/hw/mlx5/qp.c
··· 5554 5554 rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f); 5555 5555 rdma_ah_set_static_rate(ah_attr, 5556 5556 path->static_rate ? path->static_rate - 5 : 0); 5557 - if (path->grh_mlid & (1 << 7)) { 5557 + 5558 + if (path->grh_mlid & (1 << 7) || 5559 + ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) { 5558 5560 u32 tc_fl = be32_to_cpu(path->tclass_flowlabel); 5559 5561 5560 5562 rdma_ah_set_grh(ah_attr, NULL,
+2 -2
drivers/infiniband/sw/rdmavt/cq.c
··· 248 248 */ 249 249 if (udata && udata->outlen >= sizeof(__u64)) { 250 250 cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc); 251 - if (!cq->ip) { 252 - err = -ENOMEM; 251 + if (IS_ERR(cq->ip)) { 252 + err = PTR_ERR(cq->ip); 253 253 goto bail_wc; 254 254 } 255 255
+2 -2
drivers/infiniband/sw/rdmavt/mmap.c
··· 154 154 * @udata: user data (must be valid!) 155 155 * @obj: opaque pointer to a cq, wq etc 156 156 * 157 - * Return: rvt_mmap struct on success 157 + * Return: rvt_mmap struct on success, ERR_PTR on failure 158 158 */ 159 159 struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size, 160 160 struct ib_udata *udata, void *obj) ··· 166 166 167 167 ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node); 168 168 if (!ip) 169 - return ip; 169 + return ERR_PTR(-ENOMEM); 170 170 171 171 size = PAGE_ALIGN(size); 172 172
+2 -2
drivers/infiniband/sw/rdmavt/qp.c
··· 1244 1244 1245 1245 qp->ip = rvt_create_mmap_info(rdi, s, udata, 1246 1246 qp->r_rq.wq); 1247 - if (!qp->ip) { 1248 - ret = ERR_PTR(-ENOMEM); 1247 + if (IS_ERR(qp->ip)) { 1248 + ret = ERR_CAST(qp->ip); 1249 1249 goto bail_qpn; 1250 1250 } 1251 1251
+2 -2
drivers/infiniband/sw/rdmavt/srq.c
··· 111 111 u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz; 112 112 113 113 srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq); 114 - if (!srq->ip) { 115 - ret = -ENOMEM; 114 + if (IS_ERR(srq->ip)) { 115 + ret = PTR_ERR(srq->ip); 116 116 goto bail_wq; 117 117 } 118 118
+11 -4
drivers/infiniband/sw/siw/siw_qp_tx.c
··· 920 920 { 921 921 struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr; 922 922 struct siw_device *sdev = to_siw_dev(pd->device); 923 - struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8); 923 + struct siw_mem *mem; 924 924 int rv = 0; 925 925 926 926 siw_dbg_pd(pd, "STag 0x%08x\n", sqe->rkey); 927 927 928 - if (unlikely(!mem || !base_mr)) { 928 + if (unlikely(!base_mr)) { 929 929 pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey); 930 930 return -EINVAL; 931 931 } 932 + 932 933 if (unlikely(base_mr->rkey >> 8 != sqe->rkey >> 8)) { 933 934 pr_warn("siw: fastreg: STag 0x%08x: bad MR\n", sqe->rkey); 934 - rv = -EINVAL; 935 - goto out; 935 + return -EINVAL; 936 936 } 937 + 938 + mem = siw_mem_id2obj(sdev, sqe->rkey >> 8); 939 + if (unlikely(!mem)) { 940 + pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey); 941 + return -EINVAL; 942 + } 943 + 937 944 if (unlikely(mem->pd != pd)) { 938 945 pr_warn("siw: fastreg: PD mismatch\n"); 939 946 rv = -EINVAL;
+3 -2
drivers/interconnect/qcom/bcm-voter.c
··· 96 96 if (!cmd) 97 97 return; 98 98 99 + memset(cmd, 0, sizeof(*cmd)); 100 + 99 101 if (vote_x == 0 && vote_y == 0) 100 102 valid = false; 101 103 ··· 114 112 * Set the wait for completion flag on command that need to be completed 115 113 * before the next command. 116 114 */ 117 - if (commit) 118 - cmd->wait = true; 115 + cmd->wait = commit; 119 116 } 120 117 121 118 static void tcs_list_gen(struct list_head *bcm_list, int bucket,
+2 -2
drivers/iommu/Kconfig
··· 362 362 363 363 config SPAPR_TCE_IOMMU 364 364 bool "sPAPR TCE IOMMU Support" 365 - depends on PPC_POWERNV || PPC_PSERIES || (PPC && COMPILE_TEST) 365 + depends on PPC_POWERNV || PPC_PSERIES 366 366 select IOMMU_API 367 367 help 368 368 Enables bits of IOMMU API required by VFIO. The iommu_ops ··· 457 457 458 458 config MTK_IOMMU 459 459 bool "MTK IOMMU Support" 460 - depends on ARM || ARM64 || COMPILE_TEST 460 + depends on HAS_DMA 461 461 depends on ARCH_MEDIATEK || COMPILE_TEST 462 462 select ARM_DMA_USE_IOMMU 463 463 select IOMMU_API
+1 -1
drivers/iommu/amd_iommu_init.c
··· 2936 2936 { 2937 2937 for (; *str; ++str) { 2938 2938 if (strncmp(str, "legacy", 6) == 0) { 2939 - amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; 2939 + amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; 2940 2940 break; 2941 2941 } 2942 2942 if (strncmp(str, "vapic", 5) == 0) {
+2 -2
drivers/iommu/intel-iommu.c
··· 371 371 int dmar_disabled = 1; 372 372 #endif /* CONFIG_INTEL_IOMMU_DEFAULT_ON */ 373 373 374 - #ifdef INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON 374 + #ifdef CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON 375 375 int intel_iommu_sm = 1; 376 376 #else 377 377 int intel_iommu_sm; 378 - #endif /* INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */ 378 + #endif /* CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */ 379 379 380 380 int intel_iommu_enabled = 0; 381 381 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
+2 -1
drivers/iommu/iommu.c
··· 170 170 171 171 static void dev_iommu_free(struct device *dev) 172 172 { 173 + iommu_fwspec_free(dev); 173 174 kfree(dev->iommu); 174 175 dev->iommu = NULL; 175 176 } ··· 1429 1428 1430 1429 return group; 1431 1430 } 1432 - EXPORT_SYMBOL(iommu_group_get_for_dev); 1431 + EXPORT_SYMBOL_GPL(iommu_group_get_for_dev); 1433 1432 1434 1433 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) 1435 1434 {
+4 -1
drivers/iommu/qcom_iommu.c
··· 824 824 qcom_iommu->dev = dev; 825 825 826 826 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 827 - if (res) 827 + if (res) { 828 828 qcom_iommu->local_base = devm_ioremap_resource(dev, res); 829 + if (IS_ERR(qcom_iommu->local_base)) 830 + return PTR_ERR(qcom_iommu->local_base); 831 + } 829 832 830 833 qcom_iommu->iface_clk = devm_clk_get(dev, "iface"); 831 834 if (IS_ERR(qcom_iommu->iface_clk)) {
+4 -2
drivers/md/dm-mpath.c
··· 585 585 586 586 /* Do we need to select a new pgpath? */ 587 587 pgpath = READ_ONCE(m->current_pgpath); 588 - queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags); 589 - if (!pgpath || !queue_io) 588 + if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags)) 590 589 pgpath = choose_pgpath(m, bio->bi_iter.bi_size); 590 + 591 + /* MPATHF_QUEUE_IO might have been cleared by choose_pgpath. */ 592 + queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags); 591 593 592 594 if ((pgpath && queue_io) || 593 595 (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
+1 -1
drivers/md/dm-verity-fec.c
··· 435 435 fio->level++; 436 436 437 437 if (type == DM_VERITY_BLOCK_TYPE_METADATA) 438 - block += v->data_blocks; 438 + block = block - v->hash_start + v->data_blocks; 439 439 440 440 /* 441 441 * For RS(M, N), the continuous FEC data is divided into blocks of N
+37 -15
drivers/md/dm-writecache.c
··· 931 931 return 0; 932 932 } 933 933 934 + static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors) 935 + { 936 + struct dm_io_region region; 937 + struct dm_io_request req; 938 + 939 + region.bdev = wc->ssd_dev->bdev; 940 + region.sector = wc->start_sector; 941 + region.count = n_sectors; 942 + req.bi_op = REQ_OP_READ; 943 + req.bi_op_flags = REQ_SYNC; 944 + req.mem.type = DM_IO_VMA; 945 + req.mem.ptr.vma = (char *)wc->memory_map; 946 + req.client = wc->dm_io; 947 + req.notify.fn = NULL; 948 + 949 + return dm_io(&req, 1, &region, NULL); 950 + } 951 + 934 952 static void writecache_resume(struct dm_target *ti) 935 953 { 936 954 struct dm_writecache *wc = ti->private; ··· 959 941 960 942 wc_lock(wc); 961 943 962 - if (WC_MODE_PMEM(wc)) 944 + if (WC_MODE_PMEM(wc)) { 963 945 persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size); 946 + } else { 947 + r = writecache_read_metadata(wc, wc->metadata_sectors); 948 + if (r) { 949 + size_t sb_entries_offset; 950 + writecache_error(wc, r, "unable to read metadata: %d", r); 951 + sb_entries_offset = offsetof(struct wc_memory_superblock, entries); 952 + memset((char *)wc->memory_map + sb_entries_offset, -1, 953 + (wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset); 954 + } 955 + } 964 956 965 957 wc->tree = RB_ROOT; 966 958 INIT_LIST_HEAD(&wc->lru); ··· 2130 2102 ti->error = "Invalid block size"; 2131 2103 goto bad; 2132 2104 } 2105 + if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) || 2106 + wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) { 2107 + r = -EINVAL; 2108 + ti->error = "Block size is smaller than device logical block size"; 2109 + goto bad; 2110 + } 2133 2111 wc->block_size_bits = __ffs(wc->block_size); 2134 2112 2135 2113 wc->max_writeback_jobs = MAX_WRITEBACK_JOBS; ··· 2234 2200 goto bad; 2235 2201 } 2236 2202 } else { 2237 - struct dm_io_region region; 2238 - struct dm_io_request req; 2239 2203 size_t n_blocks, n_metadata_blocks; 2240 2204 uint64_t n_bitmap_bits; 2241 2205 ··· 2290 2258 goto bad; 2291 2259 } 2292 2260 2293 - region.bdev = wc->ssd_dev->bdev; 2294 - region.sector = wc->start_sector; 2295 - region.count = wc->metadata_sectors; 2296 - req.bi_op = REQ_OP_READ; 2297 - req.bi_op_flags = REQ_SYNC; 2298 - req.mem.type = DM_IO_VMA; 2299 - req.mem.ptr.vma = (char *)wc->memory_map; 2300 - req.client = wc->dm_io; 2301 - req.notify.fn = NULL; 2302 - 2303 - r = dm_io(&req, 1, &region, NULL); 2261 + r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT); 2304 2262 if (r) { 2305 - ti->error = "Unable to read metadata"; 2263 + ti->error = "Unable to read first block of metadata"; 2306 2264 goto bad; 2307 2265 } 2308 2266 }
+2 -1
drivers/misc/mei/pci-me.c
··· 203 203 } 204 204 hw = to_me_hw(dev); 205 205 hw->mem_addr = pcim_iomap_table(pdev)[0]; 206 - hw->irq = pdev->irq; 207 206 hw->read_fws = mei_me_read_fws; 208 207 209 208 pci_enable_msi(pdev); 209 + 210 + hw->irq = pdev->irq; 210 211 211 212 /* request and enable interrupt */ 212 213 irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
+1 -1
drivers/mmc/core/mmc_ops.c
··· 878 878 * Issued High Priority Interrupt, and check for card status 879 879 * until out-of prg-state. 880 880 */ 881 - int mmc_interrupt_hpi(struct mmc_card *card) 881 + static int mmc_interrupt_hpi(struct mmc_card *card) 882 882 { 883 883 int err; 884 884 u32 status;
+10 -11
drivers/mmc/host/cqhci.c
··· 5 5 #include <linux/delay.h> 6 6 #include <linux/highmem.h> 7 7 #include <linux/io.h> 8 + #include <linux/iopoll.h> 8 9 #include <linux/module.h> 9 10 #include <linux/dma-mapping.h> 10 11 #include <linux/slab.h> ··· 350 349 /* CQHCI is idle and should halt immediately, so set a small timeout */ 351 350 #define CQHCI_OFF_TIMEOUT 100 352 351 352 + static u32 cqhci_read_ctl(struct cqhci_host *cq_host) 353 + { 354 + return cqhci_readl(cq_host, CQHCI_CTL); 355 + } 356 + 353 357 static void cqhci_off(struct mmc_host *mmc) 354 358 { 355 359 struct cqhci_host *cq_host = mmc->cqe_private; 356 - ktime_t timeout; 357 - bool timed_out; 358 360 u32 reg; 361 + int err; 359 362 360 363 if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt) 361 364 return; ··· 369 364 370 365 cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL); 371 366 372 - timeout = ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT); 373 - while (1) { 374 - timed_out = ktime_compare(ktime_get(), timeout) > 0; 375 - reg = cqhci_readl(cq_host, CQHCI_CTL); 376 - if ((reg & CQHCI_HALT) || timed_out) 377 - break; 378 - } 379 - 380 - if (timed_out) 367 + err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg, 368 + reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT); 369 + if (err < 0) 381 370 pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc)); 382 371 else 383 372 pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
+1 -10
drivers/mmc/host/meson-mx-sdio.c
··· 357 357 meson_mx_mmc_start_cmd(mmc, mrq->cmd); 358 358 } 359 359 360 - static int meson_mx_mmc_card_busy(struct mmc_host *mmc) 361 - { 362 - struct meson_mx_mmc_host *host = mmc_priv(mmc); 363 - u32 irqc = readl(host->base + MESON_MX_SDIO_IRQC); 364 - 365 - return !!(irqc & MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK); 366 - } 367 - 368 360 static void meson_mx_mmc_read_response(struct mmc_host *mmc, 369 361 struct mmc_command *cmd) 370 362 { ··· 498 506 static struct mmc_host_ops meson_mx_mmc_ops = { 499 507 .request = meson_mx_mmc_request, 500 508 .set_ios = meson_mx_mmc_set_ios, 501 - .card_busy = meson_mx_mmc_card_busy, 502 509 .get_cd = mmc_gpio_get_cd, 503 510 .get_ro = mmc_gpio_get_ro, 504 511 }; ··· 561 570 mmc->f_max = clk_round_rate(host->cfg_div_clk, 562 571 clk_get_rate(host->parent_clk)); 563 572 564 - mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23; 573 + mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY; 565 574 mmc->ops = &meson_mx_mmc_ops; 566 575 567 576 ret = mmc_of_parse(mmc);
+2
drivers/mmc/host/sdhci-msm.c
··· 2087 2087 goto clk_disable; 2088 2088 } 2089 2089 2090 + msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY; 2091 + 2090 2092 pm_runtime_get_noresume(&pdev->dev); 2091 2093 pm_runtime_set_active(&pdev->dev); 2092 2094 pm_runtime_enable(&pdev->dev);
+3
drivers/mmc/host/sdhci-pci-core.c
··· 601 601 struct sdhci_pci_slot *slot = sdhci_priv(host); 602 602 struct intel_host *intel_host = sdhci_pci_priv(slot); 603 603 604 + if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv)) 605 + return 0; 606 + 604 607 return intel_host->drv_strength; 605 608 } 606 609
+10
drivers/mmc/host/sdhci-xenon.c
··· 235 235 { 236 236 /* Wait for 5ms after set 1.8V signal enable bit */ 237 237 usleep_range(5000, 5500); 238 + 239 + /* 240 + * For some reason the controller's Host Control2 register reports 241 + * the bit representing 1.8V signaling as 0 when read after it was 242 + * written as 1. Subsequent read reports 1. 243 + * 244 + * Since this may cause some issues, do an empty read of the Host 245 + * Control2 register here to circumvent this. 246 + */ 247 + sdhci_readw(host, SDHCI_HOST_CONTROL2); 238 248 } 239 249 240 250 static const struct sdhci_ops sdhci_xenon_ops = {
+1 -1
drivers/net/dsa/mv88e6xxx/Kconfig
··· 24 24 bool "PTP support for Marvell 88E6xxx" 25 25 default n 26 26 depends on NET_DSA_MV88E6XXX_GLOBAL2 27 + depends on PTP_1588_CLOCK 27 28 imply NETWORK_PHY_TIMESTAMPING 28 - imply PTP_1588_CLOCK 29 29 help 30 30 Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch 31 31 chips that support it.
-4
drivers/net/dsa/mv88e6xxx/chip.c
··· 3962 3962 .serdes_get_stats = mv88e6390_serdes_get_stats, 3963 3963 .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, 3964 3964 .serdes_get_regs = mv88e6390_serdes_get_regs, 3965 - .phylink_validate = mv88e6390_phylink_validate, 3966 3965 .gpio_ops = &mv88e6352_gpio_ops, 3967 3966 .phylink_validate = mv88e6390_phylink_validate, 3968 3967 }; ··· 4020 4021 .serdes_get_stats = mv88e6390_serdes_get_stats, 4021 4022 .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, 4022 4023 .serdes_get_regs = mv88e6390_serdes_get_regs, 4023 - .phylink_validate = mv88e6390_phylink_validate, 4024 4024 .gpio_ops = &mv88e6352_gpio_ops, 4025 4025 .phylink_validate = mv88e6390x_phylink_validate, 4026 4026 }; ··· 4077 4079 .serdes_get_stats = mv88e6390_serdes_get_stats, 4078 4080 .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, 4079 4081 .serdes_get_regs = mv88e6390_serdes_get_regs, 4080 - .phylink_validate = mv88e6390_phylink_validate, 4081 4082 .avb_ops = &mv88e6390_avb_ops, 4082 4083 .ptp_ops = &mv88e6352_ptp_ops, 4083 4084 .phylink_validate = mv88e6390_phylink_validate, ··· 4232 4235 .serdes_get_stats = mv88e6390_serdes_get_stats, 4233 4236 .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, 4234 4237 .serdes_get_regs = mv88e6390_serdes_get_regs, 4235 - .phylink_validate = mv88e6390_phylink_validate, 4236 4238 .gpio_ops = &mv88e6352_gpio_ops, 4237 4239 .avb_ops = &mv88e6390_avb_ops, 4238 4240 .ptp_ops = &mv88e6352_ptp_ops,
+1
drivers/net/dsa/ocelot/felix.c
··· 401 401 ocelot->stats_layout = felix->info->stats_layout; 402 402 ocelot->num_stats = felix->info->num_stats; 403 403 ocelot->shared_queue_sz = felix->info->shared_queue_sz; 404 + ocelot->num_mact_rows = felix->info->num_mact_rows; 404 405 ocelot->vcap_is2_keys = felix->info->vcap_is2_keys; 405 406 ocelot->vcap_is2_actions= felix->info->vcap_is2_actions; 406 407 ocelot->vcap = felix->info->vcap;
+1
drivers/net/dsa/ocelot/felix.h
··· 16 16 const u32 *const *map; 17 17 const struct ocelot_ops *ops; 18 18 int shared_queue_sz; 19 + int num_mact_rows; 19 20 const struct ocelot_stat_layout *stats_layout; 20 21 unsigned int num_stats; 21 22 int num_ports;
+1
drivers/net/dsa/ocelot/felix_vsc9959.c
··· 1222 1222 .vcap_is2_actions = vsc9959_vcap_is2_actions, 1223 1223 .vcap = vsc9959_vcap_props, 1224 1224 .shared_queue_sz = 128 * 1024, 1225 + .num_mact_rows = 2048, 1225 1226 .num_ports = 6, 1226 1227 .switch_pci_bar = 4, 1227 1228 .imdio_pci_bar = 0,
+1
drivers/net/dsa/sja1105/Kconfig
··· 20 20 config NET_DSA_SJA1105_PTP 21 21 bool "Support for the PTP clock on the NXP SJA1105 Ethernet switch" 22 22 depends on NET_DSA_SJA1105 23 + depends on PTP_1588_CLOCK 23 24 help 24 25 This enables support for timestamping and PTP clock manipulations in 25 26 the SJA1105 DSA driver.
+18 -8
drivers/net/dsa/sja1105/sja1105_ptp.c
··· 16 16 17 17 /* PTPSYNCTS has no interrupt or update mechanism, because the intended 18 18 * hardware use case is for the timestamp to be collected synchronously, 19 - * immediately after the CAS_MASTER SJA1105 switch has triggered a CASSYNC 20 - * pulse on the PTP_CLK pin. When used as a generic extts source, it needs 21 - * polling and a comparison with the old value. The polling interval is just 22 - * the Nyquist rate of a canonical PPS input (e.g. from a GPS module). 23 - * Anything of higher frequency than 1 Hz will be lost, since there is no 24 - * timestamp FIFO. 19 + * immediately after the CAS_MASTER SJA1105 switch has performed a CASSYNC 20 + * one-shot toggle (no return to level) on the PTP_CLK pin. When used as a 21 + * generic extts source, the PTPSYNCTS register needs polling and a comparison 22 + * with the old value. The polling interval is configured as the Nyquist rate 23 + * of a signal with 50% duty cycle and 1Hz frequency, which is sadly all that 24 + * this hardware can do (but may be enough for some setups). Anything of higher 25 + * frequency than 1 Hz will be lost, since there is no timestamp FIFO. 25 26 */ 26 - #define SJA1105_EXTTS_INTERVAL (HZ / 2) 27 + #define SJA1105_EXTTS_INTERVAL (HZ / 4) 27 28 28 29 /* This range is actually +/- SJA1105_MAX_ADJ_PPB 29 30 * divided by 1000 (ppb -> ppm) and with a 16-bit ··· 755 754 return -EOPNOTSUPP; 756 755 757 756 /* Reject requests with unsupported flags */ 758 - if (extts->flags) 757 + if (extts->flags & ~(PTP_ENABLE_FEATURE | 758 + PTP_RISING_EDGE | 759 + PTP_FALLING_EDGE | 760 + PTP_STRICT_FLAGS)) 761 + return -EOPNOTSUPP; 762 + 763 + /* We can only enable time stamping on both edges, sadly. */ 764 + if ((extts->flags & PTP_STRICT_FLAGS) && 765 + (extts->flags & PTP_ENABLE_FEATURE) && 766 + (extts->flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES) 759 767 return -EOPNOTSUPP; 760 768 761 769 rc = sja1105_change_ptp_clk_pin_func(priv, PTP_PF_EXTTS);
+1 -1
drivers/net/ethernet/amazon/ena/ena_netdev.h
··· 69 69 * 16kB. 70 70 */ 71 71 #if PAGE_SIZE > SZ_16K 72 - #define ENA_PAGE_SIZE SZ_16K 72 + #define ENA_PAGE_SIZE (_AC(SZ_16K, UL)) 73 73 #else 74 74 #define ENA_PAGE_SIZE PAGE_SIZE 75 75 #endif
+1 -1
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
··· 65 65 { AQ_DEVICE_ID_D108, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, }, 66 66 { AQ_DEVICE_ID_D109, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, }, 67 67 68 - { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, }, 68 + { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100, }, 69 69 { AQ_DEVICE_ID_AQC107, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, }, 70 70 { AQ_DEVICE_ID_AQC108, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, }, 71 71 { AQ_DEVICE_ID_AQC109, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, },
+15 -9
drivers/net/ethernet/broadcom/bgmac-platform.c
··· 172 172 { 173 173 struct device_node *np = pdev->dev.of_node; 174 174 struct bgmac *bgmac; 175 + struct resource *regs; 175 176 const u8 *mac_addr; 176 177 177 178 bgmac = bgmac_alloc(&pdev->dev); ··· 207 206 if (IS_ERR(bgmac->plat.base)) 208 207 return PTR_ERR(bgmac->plat.base); 209 208 210 - bgmac->plat.idm_base = 211 - devm_platform_ioremap_resource_byname(pdev, "idm_base"); 212 - if (IS_ERR(bgmac->plat.idm_base)) 213 - return PTR_ERR(bgmac->plat.idm_base); 214 - bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK; 209 + regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base"); 210 + if (regs) { 211 + bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs); 212 + if (IS_ERR(bgmac->plat.idm_base)) 213 + return PTR_ERR(bgmac->plat.idm_base); 214 + bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK; 215 + } 215 216 216 - bgmac->plat.nicpm_base = 217 - devm_platform_ioremap_resource_byname(pdev, "nicpm_base"); 218 - if (IS_ERR(bgmac->plat.nicpm_base)) 219 - return PTR_ERR(bgmac->plat.nicpm_base); 217 + regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base"); 218 + if (regs) { 219 + bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev, 220 + regs); 221 + if (IS_ERR(bgmac->plat.nicpm_base)) 222 + return PTR_ERR(bgmac->plat.nicpm_base); 223 + } 220 224 221 225 bgmac->read = platform_bgmac_read; 222 226 bgmac->write = platform_bgmac_write;
+13 -7
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 6661 6661 int rc; 6662 6662 6663 6663 if (!mem_size) 6664 - return 0; 6664 + return -EINVAL; 6665 6665 6666 6666 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); 6667 6667 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { ··· 9803 9803 netdev_features_t features) 9804 9804 { 9805 9805 struct bnxt *bp = netdev_priv(dev); 9806 + netdev_features_t vlan_features; 9806 9807 9807 9808 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) 9808 9809 features &= ~NETIF_F_NTUPLE; ··· 9820 9819 /* Both CTAG and STAG VLAN accelaration on the RX side have to be 9821 9820 * turned on or off together. 9822 9821 */ 9823 - if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) != 9824 - (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) { 9822 + vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX | 9823 + NETIF_F_HW_VLAN_STAG_RX); 9824 + if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX | 9825 + NETIF_F_HW_VLAN_STAG_RX)) { 9825 9826 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) 9826 9827 features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 9827 9828 NETIF_F_HW_VLAN_STAG_RX); 9828 - else 9829 + else if (vlan_features) 9829 9830 features |= NETIF_F_HW_VLAN_CTAG_RX | 9830 9831 NETIF_F_HW_VLAN_STAG_RX; 9831 9832 } ··· 12251 12248 bnxt_ulp_start(bp, err); 12252 12249 } 12253 12250 12254 - if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) 12255 - dev_close(netdev); 12251 + if (result != PCI_ERS_RESULT_RECOVERED) { 12252 + if (netif_running(netdev)) 12253 + dev_close(netdev); 12254 + pci_disable_device(pdev); 12255 + } 12256 12256 12257 12257 rtnl_unlock(); 12258 12258 12259 - return PCI_ERS_RESULT_RECOVERED; 12259 + return result; 12260 12260 } 12261 12261 12262 12262 /**
-1
drivers/net/ethernet/broadcom/bnxt/bnxt.h
··· 1082 1082 #define BNXT_VF_LINK_FORCED 0x4 1083 1083 #define BNXT_VF_LINK_UP 0x8 1084 1084 #define BNXT_VF_TRUST 0x10 1085 - u32 func_flags; /* func cfg flags */ 1086 1085 u32 min_tx_rate; 1087 1086 u32 max_tx_rate; 1088 1087 void *hwrm_cmd_req_addr;
+1 -1
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
··· 43 43 #define BNXT_NVM_CFG_VER_BITS 24 44 44 #define BNXT_NVM_CFG_VER_BYTES 4 45 45 46 - #define BNXT_MSIX_VEC_MAX 1280 46 + #define BNXT_MSIX_VEC_MAX 512 47 47 #define BNXT_MSIX_VEC_MIN_MAX 128 48 48 49 49 enum bnxt_nvm_dir_type {
+2 -8
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
··· 85 85 if (old_setting == setting) 86 86 return 0; 87 87 88 - func_flags = vf->func_flags; 89 88 if (setting) 90 - func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; 89 + func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; 91 90 else 92 - func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; 91 + func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; 93 92 /*TODO: if the driver supports VLAN filter on guest VLAN, 94 93 * the spoof check should also include vlan anti-spoofing 95 94 */ ··· 97 98 req.flags = cpu_to_le32(func_flags); 98 99 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 99 100 if (!rc) { 100 - vf->func_flags = func_flags; 101 101 if (setting) 102 102 vf->flags |= BNXT_VF_SPOOFCHK; 103 103 else ··· 226 228 memcpy(vf->mac_addr, mac, ETH_ALEN); 227 229 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 228 230 req.fid = cpu_to_le16(vf->fw_fid); 229 - req.flags = cpu_to_le32(vf->func_flags); 230 231 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); 231 232 memcpy(req.dflt_mac_addr, mac, ETH_ALEN); 232 233 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); ··· 263 266 264 267 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 265 268 req.fid = cpu_to_le16(vf->fw_fid); 266 - req.flags = cpu_to_le32(vf->func_flags); 267 269 req.dflt_vlan = cpu_to_le16(vlan_tag); 268 270 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); 269 271 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); ··· 301 305 return 0; 302 306 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 303 307 req.fid = cpu_to_le16(vf->fw_fid); 304 - req.flags = cpu_to_le32(vf->func_flags); 305 308 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); 306 309 req.max_bw = cpu_to_le32(max_tx_rate); 307 310 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); ··· 472 477 vf = &bp->pf.vf[vf_id]; 473 478 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 474 479 req.fid = cpu_to_le16(vf->fw_fid); 475 - req.flags = cpu_to_le32(vf->func_flags); 476 480 477 481 if (is_valid_ether_addr(vf->mac_addr)) { 478 482 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+1 -1
drivers/net/ethernet/cadence/Kconfig
··· 35 35 config MACB_USE_HWSTAMP 36 36 bool "Use IEEE 1588 hwstamp" 37 37 depends on MACB 38 + depends on PTP_1588_CLOCK 38 39 default y 39 - imply PTP_1588_CLOCK 40 40 ---help--- 41 41 Enable IEEE 1588 Precision Time Protocol (PTP) support for MACB. 42 42
+12 -12
drivers/net/ethernet/cadence/macb_main.c
··· 334 334 int status; 335 335 336 336 status = pm_runtime_get_sync(&bp->pdev->dev); 337 - if (status < 0) 337 + if (status < 0) { 338 + pm_runtime_put_noidle(&bp->pdev->dev); 338 339 goto mdio_pm_exit; 340 + } 339 341 340 342 status = macb_mdio_wait_for_idle(bp); 341 343 if (status < 0) ··· 388 386 int status; 389 387 390 388 status = pm_runtime_get_sync(&bp->pdev->dev); 391 - if (status < 0) 389 + if (status < 0) { 390 + pm_runtime_put_noidle(&bp->pdev->dev); 392 391 goto mdio_pm_exit; 392 + } 393 393 394 394 status = macb_mdio_wait_for_idle(bp); 395 395 if (status < 0) ··· 3820 3816 int ret; 3821 3817 3822 3818 ret = pm_runtime_get_sync(&lp->pdev->dev); 3823 - if (ret < 0) 3819 + if (ret < 0) { 3820 + pm_runtime_put_noidle(&lp->pdev->dev); 3824 3821 return ret; 3822 + } 3825 3823 3826 3824 /* Clear internal statistics */ 3827 3825 ctl = macb_readl(lp, NCR); ··· 4178 4172 4179 4173 static int fu540_c000_init(struct platform_device *pdev) 4180 4174 { 4181 - struct resource *res; 4182 - 4183 - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 4184 - if (!res) 4185 - return -ENODEV; 4186 - 4187 - mgmt->reg = ioremap(res->start, resource_size(res)); 4188 - if (!mgmt->reg) 4189 - return -ENOMEM; 4175 + mgmt->reg = devm_platform_ioremap_resource(pdev, 1); 4176 + if (IS_ERR(mgmt->reg)) 4177 + return PTR_ERR(mgmt->reg); 4190 4178 4191 4179 return macb_init(pdev); 4192 4180 }
+1 -1
drivers/net/ethernet/cavium/Kconfig
··· 54 54 config CAVIUM_PTP 55 55 tristate "Cavium PTP coprocessor as PTP clock" 56 56 depends on 64BIT && PCI 57 - imply PTP_1588_CLOCK 57 + depends on PTP_1588_CLOCK 58 58 ---help--- 59 59 This driver adds support for the Precision Time Protocol Clocks and 60 60 Timestamping coprocessor (PTP) found on Cavium processors.
+37 -3
drivers/net/ethernet/chelsio/cxgb4/sge.c
··· 2207 2207 if (unlikely(skip_eotx_wr)) { 2208 2208 start = (u64 *)wr; 2209 2209 eosw_txq->state = next_state; 2210 + eosw_txq->cred -= wrlen16; 2211 + eosw_txq->ncompl++; 2212 + eosw_txq->last_compl = 0; 2210 2213 goto write_wr_headers; 2211 2214 } 2212 2215 ··· 2368 2365 return cxgb4_eth_xmit(skb, dev); 2369 2366 } 2370 2367 2368 + static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq) 2369 + { 2370 + int pktcount = eosw_txq->pidx - eosw_txq->last_pidx; 2371 + int pidx = eosw_txq->pidx; 2372 + struct sk_buff *skb; 2373 + 2374 + if (!pktcount) 2375 + return; 2376 + 2377 + if (pktcount < 0) 2378 + pktcount += eosw_txq->ndesc; 2379 + 2380 + while (pktcount--) { 2381 + pidx--; 2382 + if (pidx < 0) 2383 + pidx += eosw_txq->ndesc; 2384 + 2385 + skb = eosw_txq->desc[pidx].skb; 2386 + if (skb) { 2387 + dev_consume_skb_any(skb); 2388 + eosw_txq->desc[pidx].skb = NULL; 2389 + eosw_txq->inuse--; 2390 + } 2391 + } 2392 + 2393 + eosw_txq->pidx = eosw_txq->last_pidx + 1; 2394 + } 2395 + 2371 2396 /** 2372 2397 * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc. 2373 2398 * @dev - netdevice ··· 2471 2440 FW_FLOWC_MNEM_EOSTATE_CLOSING : 2472 2441 FW_FLOWC_MNEM_EOSTATE_ESTABLISHED); 2473 2442 2474 - eosw_txq->cred -= len16; 2475 - eosw_txq->ncompl++; 2476 - eosw_txq->last_compl = 0; 2443 + /* Free up any pending skbs to ensure there's room for 2444 + * termination FLOWC. 2445 + */ 2446 + if (tc == FW_SCHED_CLS_NONE) 2447 + eosw_txq_flush_pending_skbs(eosw_txq); 2477 2448 2478 2449 ret = eosw_txq_enqueue(eosw_txq, skb); 2479 2450 if (ret) { ··· 2728 2695 * is ever running at a time ... 2729 2696 */ 2730 2697 static void service_ofldq(struct sge_uld_txq *q) 2698 + __must_hold(&q->sendq.lock) 2731 2699 { 2732 2700 u64 *pos, *before, *end; 2733 2701 int credits;
+1 -1
drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
··· 74 74 pci_disable_device(pdev); 75 75 err_pci_enable: 76 76 err_mdiobus_alloc: 77 - iounmap(port_regs); 78 77 err_hw_alloc: 78 + iounmap(port_regs); 79 79 err_ioremap: 80 80 return err; 81 81 }
+2 -1
drivers/net/ethernet/ibm/ibmvnic.c
··· 2189 2189 rc = do_hard_reset(adapter, rwi, reset_state); 2190 2190 rtnl_unlock(); 2191 2191 } 2192 - } else { 2192 + } else if (!(rwi->reset_reason == VNIC_RESET_FATAL && 2193 + adapter->from_passive_init)) { 2193 2194 rc = do_reset(adapter, rwi, reset_state); 2194 2195 } 2195 2196 kfree(rwi);
+3
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
··· 1428 1428 struct mvpp2_ethtool_fs *efs; 1429 1429 int ret; 1430 1430 1431 + if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW) 1432 + return -EINVAL; 1433 + 1431 1434 efs = port->rfs_rules[info->fs.location]; 1432 1435 if (!efs) 1433 1436 return -EINVAL;
+2
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
··· 4329 4329 4330 4330 if (!mvpp22_rss_is_supported()) 4331 4331 return -EOPNOTSUPP; 4332 + if (rss_context >= MVPP22_N_RSS_TABLES) 4333 + return -EINVAL; 4332 4334 4333 4335 if (hfunc) 4334 4336 *hfunc = ETH_RSS_HASH_CRC32;
+3 -1
drivers/net/ethernet/mellanox/mlx4/main.c
··· 2550 2550 2551 2551 if (!err || err == -ENOSPC) { 2552 2552 priv->def_counter[port] = idx; 2553 + err = 0; 2553 2554 } else if (err == -ENOENT) { 2554 2555 err = 0; 2555 2556 continue; ··· 2601 2600 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2602 2601 if (!err) 2603 2602 *idx = get_param_l(&out_param); 2604 - 2603 + if (WARN_ON(err == -ENOSPC)) 2604 + err = -EINVAL; 2605 2605 return err; 2606 2606 } 2607 2607 return __mlx4_counter_alloc(dev, idx);
+5 -1
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 888 888 } 889 889 890 890 cmd->ent_arr[ent->idx] = ent; 891 - set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); 892 891 lay = get_inst(cmd, ent->idx); 893 892 ent->lay = lay; 894 893 memset(lay, 0, sizeof(*lay)); ··· 909 910 910 911 if (ent->callback) 911 912 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); 913 + set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); 912 914 913 915 /* Skip sending command to fw if internal error */ 914 916 if (pci_channel_offline(dev->pdev) || ··· 922 922 MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); 923 923 924 924 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); 925 + /* no doorbell, no need to keep the entry */ 926 + free_ent(cmd, ent->idx); 927 + if (ent->callback) 928 + free_cmd(ent); 925 929 return; 926 930 } 927 931
+2 -7
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 1773 1773 1774 1774 static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv) 1775 1775 { 1776 - int err = mlx5e_init_rep_rx(priv); 1777 - 1778 - if (err) 1779 - return err; 1780 - 1781 1776 mlx5e_create_q_counters(priv); 1782 - return 0; 1777 + return mlx5e_init_rep_rx(priv); 1783 1778 } 1784 1779 1785 1780 static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv) 1786 1781 { 1787 - mlx5e_destroy_q_counters(priv); 1788 1782 mlx5e_cleanup_rep_rx(priv); 1783 + mlx5e_destroy_q_counters(priv); 1789 1784 } 1790 1785 1791 1786 static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
+9 -9
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 1550 1550 MLX5_FLOW_NAMESPACE_KERNEL, 1, 1551 1551 modact); 1552 1552 if (IS_ERR(mod_hdr)) { 1553 + err = PTR_ERR(mod_hdr); 1553 1554 esw_warn(dev, "Failed to create restore mod header, err: %d\n", 1554 1555 err); 1555 - err = PTR_ERR(mod_hdr); 1556 1556 goto err_mod_hdr; 1557 1557 } 1558 1558 ··· 2219 2219 total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev); 2220 2220 2221 2221 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); 2222 + mutex_init(&esw->fdb_table.offloads.vports.lock); 2223 + hash_init(esw->fdb_table.offloads.vports.table); 2222 2224 2223 2225 err = esw_create_uplink_offloads_acl_tables(esw); 2224 2226 if (err) 2225 - return err; 2227 + goto create_acl_err; 2226 2228 2227 2229 err = esw_create_offloads_table(esw, total_vports); 2228 2230 if (err) ··· 2242 2240 if (err) 2243 2241 goto create_fg_err; 2244 2242 2245 - mutex_init(&esw->fdb_table.offloads.vports.lock); 2246 - hash_init(esw->fdb_table.offloads.vports.table); 2247 - 2248 2243 return 0; 2249 2244 2250 2245 create_fg_err: ··· 2252 2253 esw_destroy_offloads_table(esw); 2253 2254 create_offloads_err: 2254 2255 esw_destroy_uplink_offloads_acl_tables(esw); 2255 - 2256 + create_acl_err: 2257 + mutex_destroy(&esw->fdb_table.offloads.vports.lock); 2256 2258 return err; 2257 2259 } 2258 2260 2259 2261 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) 2260 2262 { 2261 - mutex_destroy(&esw->fdb_table.offloads.vports.lock); 2262 2263 esw_destroy_vport_rx_group(esw); 2263 2264 esw_destroy_offloads_fdb_tables(esw); 2264 2265 esw_destroy_restore_table(esw); 2265 2266 esw_destroy_offloads_table(esw); 2266 2267 esw_destroy_uplink_offloads_acl_tables(esw); 2268 + mutex_destroy(&esw->fdb_table.offloads.vports.lock); 2267 2269 } 2268 2270 2269 2271 static void ··· 2377 2377 err_vports: 2378 2378 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK); 2379 2379 err_uplink: 2380 - esw_set_passing_vport_metadata(esw, false); 2381 - err_steering_init: 2382 2380 esw_offloads_steering_cleanup(esw); 2381 + err_steering_init: 2382 + esw_set_passing_vport_metadata(esw, false); 2383 2383 err_vport_metadata: 2384 2384 mlx5_rdma_disable_roce(esw->dev); 2385 2385 mutex_destroy(&esw->offloads.termtbl_mutex);
+14 -1
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
··· 693 693 return 0; 694 694 } 695 695 696 + static void dr_cq_complete(struct mlx5_core_cq *mcq, 697 + struct mlx5_eqe *eqe) 698 + { 699 + pr_err("CQ completion CQ: #%u\n", mcq->cqn); 700 + } 701 + 696 702 static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, 697 703 struct mlx5_uars_page *uar, 698 704 size_t ncqe) ··· 759 753 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas); 760 754 mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas); 761 755 756 + cq->mcq.comp = dr_cq_complete; 757 + 762 758 err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out)); 763 759 kvfree(in); 764 760 ··· 771 763 cq->mcq.set_ci_db = cq->wq_ctrl.db.db; 772 764 cq->mcq.arm_db = cq->wq_ctrl.db.db + 1; 773 765 *cq->mcq.set_ci_db = 0; 774 - *cq->mcq.arm_db = 0; 766 + 767 + /* set no-zero value, in order to avoid the HW to run db-recovery on 768 + * CQ that used in polling mode. 769 + */ 770 + *cq->mcq.arm_db = cpu_to_be32(2 << 28); 771 + 775 772 cq->mcq.vector = 0; 776 773 cq->mcq.irqn = irqn; 777 774 cq->mcq.uar = uar;
+10 -2
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
··· 986 986 unsigned int priority, 987 987 struct mlxsw_afk_element_usage *elusage) 988 988 { 989 + struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2; 989 990 struct mlxsw_sp_acl_tcam_vregion *vregion; 990 - struct mlxsw_sp_acl_tcam_vchunk *vchunk; 991 + struct list_head *pos; 991 992 int err; 992 993 993 994 if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO) ··· 1026 1025 } 1027 1026 1028 1027 mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion); 1029 - list_add_tail(&vchunk->list, &vregion->vchunk_list); 1028 + 1029 + /* Position the vchunk inside the list according to priority */ 1030 + list_for_each(pos, &vregion->vchunk_list) { 1031 + vchunk2 = list_entry(pos, typeof(*vchunk2), list); 1032 + if (vchunk2->priority > priority) 1033 + break; 1034 + } 1035 + list_add_tail(&vchunk->list, pos); 1030 1036 mutex_unlock(&vregion->lock); 1031 1037 1032 1038 return vchunk;
+2 -1
drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
··· 36 36 err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack); 37 37 if (err) 38 38 return err; 39 - } else if (act->hw_stats != FLOW_ACTION_HW_STATS_DISABLED) { 39 + } else if (act->hw_stats != FLOW_ACTION_HW_STATS_DISABLED && 40 + act->hw_stats != FLOW_ACTION_HW_STATS_DONT_CARE) { 40 41 NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type"); 41 42 return -EOPNOTSUPP; 42 43 }
+1 -1
drivers/net/ethernet/moxa/moxart_ether.c
··· 565 565 struct net_device *ndev = platform_get_drvdata(pdev); 566 566 567 567 unregister_netdev(ndev); 568 - free_irq(ndev->irq, ndev); 568 + devm_free_irq(&pdev->dev, ndev->irq, ndev); 569 569 moxart_mac_free_memory(ndev); 570 570 free_netdev(ndev); 571 571
+11 -6
drivers/net/ethernet/mscc/ocelot.c
··· 1030 1030 { 1031 1031 int i, j; 1032 1032 1033 - /* Loop through all the mac tables entries. There are 1024 rows of 4 1034 - * entries. 1035 - */ 1036 - for (i = 0; i < 1024; i++) { 1033 + /* Loop through all the mac tables entries. */ 1034 + for (i = 0; i < ocelot->num_mact_rows; i++) { 1037 1035 for (j = 0; j < 4; j++) { 1038 1036 struct ocelot_mact_entry entry; 1039 1037 bool is_static; ··· 1456 1458 1457 1459 void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs) 1458 1460 { 1459 - ocelot_write(ocelot, ANA_AUTOAGE_AGE_PERIOD(msecs / 2), 1460 - ANA_AUTOAGE); 1461 + unsigned int age_period = ANA_AUTOAGE_AGE_PERIOD(msecs / 2000); 1462 + 1463 + /* Setting AGE_PERIOD to zero effectively disables automatic aging, 1464 + * which is clearly not what our intention is. So avoid that. 1465 + */ 1466 + if (!age_period) 1467 + age_period = 1; 1468 + 1469 + ocelot_rmw(ocelot, age_period, ANA_AUTOAGE_AGE_PERIOD_M, ANA_AUTOAGE); 1461 1470 } 1462 1471 EXPORT_SYMBOL(ocelot_set_ageing_time); 1463 1472
+1
drivers/net/ethernet/mscc/ocelot_regs.c
··· 433 433 ocelot->stats_layout = ocelot_stats_layout; 434 434 ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout); 435 435 ocelot->shared_queue_sz = 224 * 1024; 436 + ocelot->num_mact_rows = 1024; 436 437 ocelot->ops = ops; 437 438 438 439 ret = ocelot_regfields_init(ocelot, ocelot_regfields);
+4 -2
drivers/net/ethernet/natsemi/jazzsonic.c
··· 208 208 209 209 err = register_netdev(dev); 210 210 if (err) 211 - goto out1; 211 + goto undo_probe1; 212 212 213 213 return 0; 214 214 215 - out1: 215 + undo_probe1: 216 + dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), 217 + lp->descriptors, lp->descriptors_laddr); 216 218 release_mem_region(dev->base_addr, SONIC_MEM_SIZE); 217 219 out: 218 220 free_netdev(dev);
+1
drivers/net/ethernet/netronome/nfp/abm/main.c
··· 283 283 if (!nfp_nsp_has_hwinfo_lookup(nsp)) { 284 284 nfp_warn(pf->cpp, "NSP doesn't support PF MAC generation\n"); 285 285 eth_hw_addr_random(nn->dp.netdev); 286 + nfp_nsp_close(nsp); 286 287 return; 287 288 } 288 289
+1 -2
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
··· 170 170 debugfs_create_x64("base_pa", 0400, cq_dentry, &cq->base_pa); 171 171 debugfs_create_u32("num_descs", 0400, cq_dentry, &cq->num_descs); 172 172 debugfs_create_u32("desc_size", 0400, cq_dentry, &cq->desc_size); 173 - debugfs_create_u8("done_color", 0400, cq_dentry, 174 - (u8 *)&cq->done_color); 173 + debugfs_create_bool("done_color", 0400, cq_dentry, &cq->done_color); 175 174 176 175 debugfs_create_file("tail", 0400, cq_dentry, cq, &cq_tail_fops); 177 176
+2 -2
drivers/net/ethernet/pensando/ionic/ionic_lif.c
··· 2101 2101 ionic_txrx_free(lif); 2102 2102 } 2103 2103 ionic_lifs_deinit(ionic); 2104 + ionic_reset(ionic); 2104 2105 ionic_qcqs_free(lif); 2105 2106 2106 2107 dev_info(ionic->dev, "FW Down: LIFs stopped\n"); ··· 2117 2116 2118 2117 dev_info(ionic->dev, "FW Up: restarting LIFs\n"); 2119 2118 2119 + ionic_init_devinfo(ionic); 2120 2120 err = ionic_qcqs_alloc(lif); 2121 2121 if (err) 2122 2122 goto err_out; ··· 2551 2549 dev_err(ionic->dev, "Cannot register net device, aborting\n"); 2552 2550 return err; 2553 2551 } 2554 - 2555 - ionic_link_status_check_request(ionic->master_lif); 2556 2552 ionic->master_lif->registered = true; 2557 2553 2558 2554 return 0;
+1 -1
drivers/net/ethernet/stmicro/stmmac/dwmac5.c
··· 624 624 total_offset += offset; 625 625 } 626 626 627 - total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000; 627 + total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000ULL; 628 628 total_ctr += total_offset; 629 629 630 630 ctr_low = do_div(total_ctr, 1000000000);
+1 -6
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 4051 4051 /** 4052 4052 * stmmac_interrupt - main ISR 4053 4053 * @irq: interrupt number. 4054 - * @dev_id: to pass the net device pointer. 4054 + * @dev_id: to pass the net device pointer (must be valid). 4055 4055 * Description: this is the main driver interrupt service routine. 4056 4056 * It can call: 4057 4057 * o DMA service routine (to manage incoming frame reception and transmission ··· 4074 4074 4075 4075 if (priv->irq_wake) 4076 4076 pm_wakeup_event(priv->device, 0); 4077 - 4078 - if (unlikely(!dev)) { 4079 - netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 4080 - return IRQ_NONE; 4081 - } 4082 4077 4083 4078 /* Check if adapter is up */ 4084 4079 if (test_bit(STMMAC_DOWN, &priv->state))
+1 -2
drivers/net/ethernet/ti/Kconfig
··· 90 90 config TI_CPTS_MOD 91 91 tristate 92 92 depends on TI_CPTS 93 + depends on PTP_1588_CLOCK 93 94 default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y || TI_CPSW_SWITCHDEV=y 94 - select NET_PTP_CLASSIFY 95 - imply PTP_1588_CLOCK 96 95 default m 97 96 98 97 config TI_K3_AM65_CPSW_NUSS
+3 -2
drivers/net/ethernet/ti/am65-cpsw-nuss.c
··· 1887 1887 1888 1888 ret = devm_request_irq(dev, tx_chn->irq, 1889 1889 am65_cpsw_nuss_tx_irq, 1890 - 0, tx_chn->tx_chn_name, tx_chn); 1890 + IRQF_TRIGGER_HIGH, 1891 + tx_chn->tx_chn_name, tx_chn); 1891 1892 if (ret) { 1892 1893 dev_err(dev, "failure requesting tx%u irq %u, %d\n", 1893 1894 tx_chn->id, tx_chn->irq, ret); ··· 1913 1912 1914 1913 ret = devm_request_irq(dev, common->rx_chns.irq, 1915 1914 am65_cpsw_nuss_rx_irq, 1916 - 0, dev_name(dev), common); 1915 + IRQF_TRIGGER_HIGH, dev_name(dev), common); 1917 1916 if (ret) { 1918 1917 dev_err(dev, "failure requesting rx irq %u, %d\n", 1919 1918 common->rx_chns.irq, ret);
+1 -1
drivers/net/ethernet/toshiba/tc35815.c
··· 643 643 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask); 644 644 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask); 645 645 } 646 - linkmode_and(phydev->supported, phydev->supported, mask); 646 + linkmode_andnot(phydev->supported, phydev->supported, mask); 647 647 linkmode_copy(phydev->advertising, phydev->supported); 648 648 649 649 lp->link = 0;
+5 -4
drivers/net/gtp.c
··· 1169 1169 static struct genl_family gtp_genl_family; 1170 1170 1171 1171 static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq, 1172 - u32 type, struct pdp_ctx *pctx) 1172 + int flags, u32 type, struct pdp_ctx *pctx) 1173 1173 { 1174 1174 void *genlh; 1175 1175 1176 - genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, 0, 1176 + genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, flags, 1177 1177 type); 1178 1178 if (genlh == NULL) 1179 1179 goto nlmsg_failure; ··· 1227 1227 goto err_unlock; 1228 1228 } 1229 1229 1230 - err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, 1231 - info->snd_seq, info->nlhdr->nlmsg_type, pctx); 1230 + err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, info->snd_seq, 1231 + 0, info->nlhdr->nlmsg_type, pctx); 1232 1232 if (err < 0) 1233 1233 goto err_unlock_free; 1234 1234 ··· 1271 1271 gtp_genl_fill_info(skb, 1272 1272 NETLINK_CB(cb->skb).portid, 1273 1273 cb->nlh->nlmsg_seq, 1274 + NLM_F_MULTI, 1274 1275 cb->nlh->nlmsg_type, pctx)) { 1275 1276 cb->args[0] = i; 1276 1277 cb->args[1] = j;
+2 -1
drivers/net/hyperv/netvsc_drv.c
··· 707 707 goto drop; 708 708 } 709 709 710 - static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *ndev) 710 + static netdev_tx_t netvsc_start_xmit(struct sk_buff *skb, 711 + struct net_device *ndev) 711 712 { 712 713 return netvsc_xmit(skb, ndev, false); 713 714 }
+9 -2
drivers/net/ipa/gsi.c
··· 1063 1063 1064 1064 complete(&gsi->completion); 1065 1065 } 1066 + 1066 1067 /* Inter-EE interrupt handler */ 1067 1068 static void gsi_isr_glob_ee(struct gsi *gsi) 1068 1069 { ··· 1516 1515 struct completion *completion = &gsi->completion; 1517 1516 u32 val; 1518 1517 1518 + /* First zero the result code field */ 1519 + val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1520 + val &= ~GENERIC_EE_RESULT_FMASK; 1521 + iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); 1522 + 1523 + /* Now issue the command */ 1519 1524 val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK); 1520 1525 val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK); 1521 1526 val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK); ··· 1827 1820 1828 1821 /* Worst case we need an event for every outstanding TRE */ 1829 1822 if (data->channel.tre_count > data->channel.event_count) { 1830 - dev_warn(gsi->dev, "channel %u limited to %u TREs\n", 1831 - data->channel_id, data->channel.tre_count); 1832 1823 tre_count = data->channel.event_count; 1824 + dev_warn(gsi->dev, "channel %u limited to %u TREs\n", 1825 + data->channel_id, tre_count); 1833 1826 } else { 1834 1827 tre_count = data->channel.tre_count; 1835 1828 }
+2
drivers/net/ipa/gsi_reg.h
··· 410 410 #define INTER_EE_RESULT_FMASK GENMASK(2, 0) 411 411 #define GENERIC_EE_RESULT_FMASK GENMASK(7, 5) 412 412 #define GENERIC_EE_SUCCESS_FVAL 1 413 + #define GENERIC_EE_INCORRECT_DIRECTION_FVAL 3 414 + #define GENERIC_EE_INCORRECT_CHANNEL_FVAL 5 413 415 #define GENERIC_EE_NO_RESOURCES_FVAL 7 414 416 #define USB_MAX_PACKET_FMASK GENMASK(15, 15) /* 0: HS; 1: SS */ 415 417 #define MHI_BASE_CHANNEL_FMASK GENMASK(31, 24)
+61
drivers/net/ipa/ipa_endpoint.c
··· 1269 1269 ret, endpoint->channel_id, endpoint->endpoint_id); 1270 1270 } 1271 1271 1272 + static int ipa_endpoint_stop_rx_dma(struct ipa *ipa) 1273 + { 1274 + u16 size = IPA_ENDPOINT_STOP_RX_SIZE; 1275 + struct gsi_trans *trans; 1276 + dma_addr_t addr; 1277 + int ret; 1278 + 1279 + trans = ipa_cmd_trans_alloc(ipa, 1); 1280 + if (!trans) { 1281 + dev_err(&ipa->pdev->dev, 1282 + "no transaction for RX endpoint STOP workaround\n"); 1283 + return -EBUSY; 1284 + } 1285 + 1286 + /* Read into the highest part of the zero memory area */ 1287 + addr = ipa->zero_addr + ipa->zero_size - size; 1288 + 1289 + ipa_cmd_dma_task_32b_addr_add(trans, size, addr, false); 1290 + 1291 + ret = gsi_trans_commit_wait_timeout(trans, ENDPOINT_STOP_DMA_TIMEOUT); 1292 + if (ret) 1293 + gsi_trans_free(trans); 1294 + 1295 + return ret; 1296 + } 1297 + 1298 + /** 1299 + * ipa_endpoint_stop() - Stops a GSI channel in IPA 1300 + * @client: Client whose endpoint should be stopped 1301 + * 1302 + * This function implements the sequence to stop a GSI channel 1303 + * in IPA. This function returns when the channel is is STOP state. 1304 + * 1305 + * Return value: 0 on success, negative otherwise 1306 + */ 1307 + int ipa_endpoint_stop(struct ipa_endpoint *endpoint) 1308 + { 1309 + u32 retries = IPA_ENDPOINT_STOP_RX_RETRIES; 1310 + int ret; 1311 + 1312 + do { 1313 + struct ipa *ipa = endpoint->ipa; 1314 + struct gsi *gsi = &ipa->gsi; 1315 + 1316 + ret = gsi_channel_stop(gsi, endpoint->channel_id); 1317 + if (ret != -EAGAIN || endpoint->toward_ipa) 1318 + break; 1319 + 1320 + /* For IPA v3.5.1, send a DMA read task and check again */ 1321 + if (ipa->version == IPA_VERSION_3_5_1) { 1322 + ret = ipa_endpoint_stop_rx_dma(ipa); 1323 + if (ret) 1324 + break; 1325 + } 1326 + 1327 + msleep(1); 1328 + } while (retries--); 1329 + 1330 + return retries ? ret : -EIO; 1331 + } 1332 + 1272 1333 static void ipa_endpoint_program(struct ipa_endpoint *endpoint) 1273 1334 { 1274 1335 if (endpoint->toward_ipa) {
+4 -2
drivers/net/macsec.c
··· 1305 1305 struct crypto_aead *tfm; 1306 1306 int ret; 1307 1307 1308 - tfm = crypto_alloc_aead("gcm(aes)", 0, 0); 1308 + /* Pick a sync gcm(aes) cipher to ensure order is preserved. */ 1309 + tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); 1309 1310 1310 1311 if (IS_ERR(tfm)) 1311 1312 return tfm; ··· 2641 2640 if (ret) 2642 2641 goto rollback; 2643 2642 2644 - rtnl_unlock(); 2645 2643 /* Force features update, since they are different for SW MACSec and 2646 2644 * HW offloading cases. 2647 2645 */ 2648 2646 netdev_update_features(dev); 2647 + 2648 + rtnl_unlock(); 2649 2649 return 0; 2650 2650 2651 2651 rollback:
+1 -1
drivers/net/phy/dp83640.c
··· 1120 1120 goto out; 1121 1121 } 1122 1122 dp83640_clock_init(clock, bus); 1123 - list_add_tail(&phyter_clocks, &clock->list); 1123 + list_add_tail(&clock->list, &phyter_clocks); 1124 1124 out: 1125 1125 mutex_unlock(&phyter_clocks_lock); 1126 1126
+15 -17
drivers/net/phy/dp83822.c
··· 137 137 value &= ~DP83822_WOL_SECURE_ON; 138 138 } 139 139 140 - value |= (DP83822_WOL_EN | DP83822_WOL_INDICATION_SEL | 141 - DP83822_WOL_CLR_INDICATION); 142 - phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG, 143 - value); 144 - } else { 145 - value = phy_read_mmd(phydev, DP83822_DEVADDR, 146 - MII_DP83822_WOL_CFG); 147 - value &= ~DP83822_WOL_EN; 148 - phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG, 149 - value); 150 - } 140 + /* Clear any pending WoL interrupt */ 141 + phy_read(phydev, MII_DP83822_MISR2); 151 142 152 - return 0; 143 + value |= DP83822_WOL_EN | DP83822_WOL_INDICATION_SEL | 144 + DP83822_WOL_CLR_INDICATION; 145 + 146 + return phy_write_mmd(phydev, DP83822_DEVADDR, 147 + MII_DP83822_WOL_CFG, value); 148 + } else { 149 + return phy_clear_bits_mmd(phydev, DP83822_DEVADDR, 150 + MII_DP83822_WOL_CFG, DP83822_WOL_EN); 151 + } 153 152 } 154 153 155 154 static void dp83822_get_wol(struct phy_device *phydev, ··· 257 258 258 259 static int dp83822_config_init(struct phy_device *phydev) 259 260 { 260 - int value; 261 + int value = DP83822_WOL_EN | DP83822_WOL_MAGIC_EN | 262 + DP83822_WOL_SECURE_ON; 261 263 262 - value = DP83822_WOL_MAGIC_EN | DP83822_WOL_SECURE_ON | DP83822_WOL_EN; 263 - 264 - return phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG, 265 - value); 264 + return phy_clear_bits_mmd(phydev, DP83822_DEVADDR, 265 + MII_DP83822_WOL_CFG, value); 266 266 } 267 267 268 268 static int dp83822_phy_reset(struct phy_device *phydev)
+12 -9
drivers/net/phy/dp83tc811.c
··· 139 139 value &= ~DP83811_WOL_SECURE_ON; 140 140 } 141 141 142 - value |= (DP83811_WOL_EN | DP83811_WOL_INDICATION_SEL | 143 - DP83811_WOL_CLR_INDICATION); 144 - phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, 145 - value); 142 + /* Clear any pending WoL interrupt */ 143 + phy_read(phydev, MII_DP83811_INT_STAT1); 144 + 145 + value |= DP83811_WOL_EN | DP83811_WOL_INDICATION_SEL | 146 + DP83811_WOL_CLR_INDICATION; 147 + 148 + return phy_write_mmd(phydev, DP83811_DEVADDR, 149 + MII_DP83811_WOL_CFG, value); 146 150 } else { 147 - phy_clear_bits_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, 148 - DP83811_WOL_EN); 151 + return phy_clear_bits_mmd(phydev, DP83811_DEVADDR, 152 + MII_DP83811_WOL_CFG, DP83811_WOL_EN); 149 153 } 150 154 151 - return 0; 152 155 } 153 156 154 157 static void dp83811_get_wol(struct phy_device *phydev, ··· 295 292 296 293 value = DP83811_WOL_MAGIC_EN | DP83811_WOL_SECURE_ON | DP83811_WOL_EN; 297 294 298 - return phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, 299 - value); 295 + return phy_clear_bits_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, 296 + value); 300 297 } 301 298 302 299 static int dp83811_phy_reset(struct phy_device *phydev)
+26 -1
drivers/net/phy/marvell10g.c
··· 66 66 MV_PCS_CSSR1_SPD2_2500 = 0x0004, 67 67 MV_PCS_CSSR1_SPD2_10000 = 0x0000, 68 68 69 + /* Temperature read register (88E2110 only) */ 70 + MV_PCS_TEMP = 0x8042, 71 + 69 72 /* These registers appear at 0x800X and 0xa00X - the 0xa00X control 70 73 * registers appear to set themselves to the 0x800X when AN is 71 74 * restarted, but status registers appear readable from either. ··· 80 77 MV_V2_PORT_CTRL = 0xf001, 81 78 MV_V2_PORT_CTRL_SWRST = BIT(15), 82 79 MV_V2_PORT_CTRL_PWRDOWN = BIT(11), 80 + /* Temperature control/read registers (88X3310 only) */ 83 81 MV_V2_TEMP_CTRL = 0xf08a, 84 82 MV_V2_TEMP_CTRL_MASK = 0xc000, 85 83 MV_V2_TEMP_CTRL_SAMPLE = 0x0000, ··· 108 104 return 0; 109 105 } 110 106 107 + static int mv3310_hwmon_read_temp_reg(struct phy_device *phydev) 108 + { 109 + return phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP); 110 + } 111 + 112 + static int mv2110_hwmon_read_temp_reg(struct phy_device *phydev) 113 + { 114 + return phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_TEMP); 115 + } 116 + 117 + static int mv10g_hwmon_read_temp_reg(struct phy_device *phydev) 118 + { 119 + if (phydev->drv->phy_id == MARVELL_PHY_ID_88X3310) 120 + return mv3310_hwmon_read_temp_reg(phydev); 121 + else /* MARVELL_PHY_ID_88E2110 */ 122 + return mv2110_hwmon_read_temp_reg(phydev); 123 + } 124 + 111 125 static int mv3310_hwmon_read(struct device *dev, enum hwmon_sensor_types type, 112 126 u32 attr, int channel, long *value) 113 127 { ··· 138 116 } 139 117 140 118 if (type == hwmon_temp && attr == hwmon_temp_input) { 141 - temp = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP); 119 + temp = mv10g_hwmon_read_temp_reg(phydev); 142 120 if (temp < 0) 143 121 return temp; 144 122 ··· 190 168 { 191 169 u16 val; 192 170 int ret; 171 + 172 + if (phydev->drv->phy_id != MARVELL_PHY_ID_88X3310) 173 + return 0; 193 174 194 175 ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP, 195 176 MV_V2_TEMP_UNKNOWN);
+1
drivers/net/usb/qmi_wwan.c
··· 1359 1359 {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ 1360 1360 {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ 1361 1361 {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ 1362 + {QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */ 1362 1363 {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ 1363 1364 {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */ 1364 1365 {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
+3 -1
drivers/net/wireguard/queueing.c
··· 35 35 if (multicore) { 36 36 queue->worker = wg_packet_percpu_multicore_worker_alloc( 37 37 function, queue); 38 - if (!queue->worker) 38 + if (!queue->worker) { 39 + ptr_ring_cleanup(&queue->ring, NULL); 39 40 return -ENOMEM; 41 + } 40 42 } else { 41 43 INIT_WORK(&queue->work, function); 42 44 }
+10 -11
drivers/net/wireguard/receive.c
··· 226 226 static void keep_key_fresh(struct wg_peer *peer) 227 227 { 228 228 struct noise_keypair *keypair; 229 - bool send = false; 229 + bool send; 230 230 231 231 if (peer->sent_lastminute_handshake) 232 232 return; 233 233 234 234 rcu_read_lock_bh(); 235 235 keypair = rcu_dereference_bh(peer->keypairs.current_keypair); 236 - if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) && 237 - keypair->i_am_the_initiator && 238 - unlikely(wg_birthdate_has_expired(keypair->sending.birthdate, 239 - REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT))) 240 - send = true; 236 + send = keypair && READ_ONCE(keypair->sending.is_valid) && 237 + keypair->i_am_the_initiator && 238 + wg_birthdate_has_expired(keypair->sending.birthdate, 239 + REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT); 241 240 rcu_read_unlock_bh(); 242 241 243 - if (send) { 242 + if (unlikely(send)) { 244 243 peer->sent_lastminute_handshake = true; 245 244 wg_packet_send_queued_handshake_initiation(peer, false); 246 245 } ··· 392 393 len = ntohs(ip_hdr(skb)->tot_len); 393 394 if (unlikely(len < sizeof(struct iphdr))) 394 395 goto dishonest_packet_size; 395 - if (INET_ECN_is_ce(PACKET_CB(skb)->ds)) 396 - IP_ECN_set_ce(ip_hdr(skb)); 396 + INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ip_hdr(skb)->tos); 397 397 } else if (skb->protocol == htons(ETH_P_IPV6)) { 398 398 len = ntohs(ipv6_hdr(skb)->payload_len) + 399 399 sizeof(struct ipv6hdr); 400 - if (INET_ECN_is_ce(PACKET_CB(skb)->ds)) 401 - IP6_ECN_set_ce(skb, ipv6_hdr(skb)); 400 + INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ipv6_get_dsfield(ipv6_hdr(skb))); 402 401 } else { 403 402 goto dishonest_packet_type; 404 403 } ··· 515 518 &PACKET_CB(skb)->keypair->receiving)) ? 516 519 PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; 517 520 wg_queue_enqueue_per_peer_napi(skb, state); 521 + if (need_resched()) 522 + cond_resched(); 518 523 } 519 524 } 520 525
+2 -2
drivers/net/wireguard/selftest/ratelimiter.c
··· 120 120 enum { TRIALS_BEFORE_GIVING_UP = 5000 }; 121 121 bool success = false; 122 122 int test = 0, trials; 123 - struct sk_buff *skb4, *skb6; 123 + struct sk_buff *skb4, *skb6 = NULL; 124 124 struct iphdr *hdr4; 125 - struct ipv6hdr *hdr6; 125 + struct ipv6hdr *hdr6 = NULL; 126 126 127 127 if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN)) 128 128 return true;
+10 -10
drivers/net/wireguard/send.c
··· 124 124 static void keep_key_fresh(struct wg_peer *peer) 125 125 { 126 126 struct noise_keypair *keypair; 127 - bool send = false; 127 + bool send; 128 128 129 129 rcu_read_lock_bh(); 130 130 keypair = rcu_dereference_bh(peer->keypairs.current_keypair); 131 - if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) && 132 - (unlikely(atomic64_read(&keypair->sending.counter.counter) > 133 - REKEY_AFTER_MESSAGES) || 134 - (keypair->i_am_the_initiator && 135 - unlikely(wg_birthdate_has_expired(keypair->sending.birthdate, 136 - REKEY_AFTER_TIME))))) 137 - send = true; 131 + send = keypair && READ_ONCE(keypair->sending.is_valid) && 132 + (atomic64_read(&keypair->sending.counter.counter) > REKEY_AFTER_MESSAGES || 133 + (keypair->i_am_the_initiator && 134 + wg_birthdate_has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME))); 138 135 rcu_read_unlock_bh(); 139 136 140 - if (send) 137 + if (unlikely(send)) 141 138 wg_packet_send_queued_handshake_initiation(peer, false); 142 139 } 143 140 ··· 278 281 279 282 wg_noise_keypair_put(keypair, false); 280 283 wg_peer_put(peer); 284 + if (need_resched()) 285 + cond_resched(); 281 286 } 282 287 } 283 288 ··· 303 304 } 304 305 wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first, 305 306 state); 306 - 307 + if (need_resched()) 308 + cond_resched(); 307 309 } 308 310 } 309 311
-12
drivers/net/wireguard/socket.c
··· 76 76 net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", 77 77 wg->dev->name, &endpoint->addr, ret); 78 78 goto err; 79 - } else if (unlikely(rt->dst.dev == skb->dev)) { 80 - ip_rt_put(rt); 81 - ret = -ELOOP; 82 - net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n", 83 - wg->dev->name, &endpoint->addr); 84 - goto err; 85 79 } 86 80 if (cache) 87 81 dst_cache_set_ip4(cache, &rt->dst, fl.saddr); ··· 142 148 ret = PTR_ERR(dst); 143 149 net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", 144 150 wg->dev->name, &endpoint->addr, ret); 145 - goto err; 146 - } else if (unlikely(dst->dev == skb->dev)) { 147 - dst_release(dst); 148 - ret = -ELOOP; 149 - net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n", 150 - wg->dev->name, &endpoint->addr); 151 151 goto err; 152 152 } 153 153 if (cache)
+2
drivers/nvme/host/core.c
··· 3642 3642 3643 3643 return; 3644 3644 out_put_disk: 3645 + /* prevent double queue cleanup */ 3646 + ns->disk->queue = NULL; 3645 3647 put_disk(ns->disk); 3646 3648 out_unlink_ns: 3647 3649 mutex_lock(&ctrl->subsys->lock);
+2 -1
drivers/phy/tegra/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 config PHY_TEGRA_XUSB 3 3 tristate "NVIDIA Tegra XUSB pad controller driver" 4 - depends on ARCH_TEGRA 4 + depends on ARCH_TEGRA && USB_SUPPORT 5 + select USB_COMMON 5 6 select USB_CONN_GPIO 6 7 select USB_PHY 7 8 help
+46 -34
drivers/platform/chrome/cros_ec_sensorhub.c
··· 52 52 int sensor_type[MOTIONSENSE_TYPE_MAX] = { 0 }; 53 53 struct cros_ec_command *msg = sensorhub->msg; 54 54 struct cros_ec_dev *ec = sensorhub->ec; 55 - int ret, i, sensor_num; 55 + int ret, i; 56 56 char *name; 57 57 58 - sensor_num = cros_ec_get_sensor_count(ec); 59 - if (sensor_num < 0) { 60 - dev_err(dev, 61 - "Unable to retrieve sensor information (err:%d)\n", 62 - sensor_num); 63 - return sensor_num; 64 - } 65 - 66 - sensorhub->sensor_num = sensor_num; 67 - if (sensor_num == 0) { 68 - dev_err(dev, "Zero sensors reported.\n"); 69 - return -EINVAL; 70 - } 71 58 72 59 msg->version = 1; 73 60 msg->insize = sizeof(struct ec_response_motion_sense); 74 61 msg->outsize = sizeof(struct ec_params_motion_sense); 75 62 76 - for (i = 0; i < sensor_num; i++) { 63 + for (i = 0; i < sensorhub->sensor_num; i++) { 77 64 sensorhub->params->cmd = MOTIONSENSE_CMD_INFO; 78 65 sensorhub->params->info.sensor_num = i; 79 66 ··· 127 140 struct cros_ec_dev *ec = dev_get_drvdata(dev->parent); 128 141 struct cros_ec_sensorhub *data; 129 142 struct cros_ec_command *msg; 130 - int ret; 131 - int i; 143 + int ret, i, sensor_num; 132 144 133 145 msg = devm_kzalloc(dev, sizeof(struct cros_ec_command) + 134 146 max((u16)sizeof(struct ec_params_motion_sense), ··· 152 166 dev_set_drvdata(dev, data); 153 167 154 168 /* Check whether this EC is a sensor hub. */ 155 - if (cros_ec_check_features(data->ec, EC_FEATURE_MOTION_SENSE)) { 169 + if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE)) { 170 + sensor_num = cros_ec_get_sensor_count(ec); 171 + if (sensor_num < 0) { 172 + dev_err(dev, 173 + "Unable to retrieve sensor information (err:%d)\n", 174 + sensor_num); 175 + return sensor_num; 176 + } 177 + if (sensor_num == 0) { 178 + dev_err(dev, "Zero sensors reported.\n"); 179 + return -EINVAL; 180 + } 181 + data->sensor_num = sensor_num; 182 + 183 + /* 184 + * Prepare the ring handler before enumering the 185 + * sensors. 186 + */ 187 + if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) { 188 + ret = cros_ec_sensorhub_ring_allocate(data); 189 + if (ret) 190 + return ret; 191 + } 192 + 193 + /* Enumerate the sensors.*/ 156 194 ret = cros_ec_sensorhub_register(dev, data); 157 195 if (ret) 158 196 return ret; 197 + 198 + /* 199 + * When the EC does not have a FIFO, the sensors will query 200 + * their data themselves via sysfs or a software trigger. 201 + */ 202 + if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) { 203 + ret = cros_ec_sensorhub_ring_add(data); 204 + if (ret) 205 + return ret; 206 + /* 207 + * The msg and its data is not under the control of the 208 + * ring handler. 209 + */ 210 + return devm_add_action_or_reset(dev, 211 + cros_ec_sensorhub_ring_remove, 212 + data); 213 + } 214 + 159 215 } else { 160 216 /* 161 217 * If the device has sensors but does not claim to ··· 212 184 } 213 185 } 214 186 215 - /* 216 - * If the EC does not have a FIFO, the sensors will query their data 217 - * themselves via sysfs or a software trigger. 218 - */ 219 - if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) { 220 - ret = cros_ec_sensorhub_ring_add(data); 221 - if (ret) 222 - return ret; 223 - /* 224 - * The msg and its data is not under the control of the ring 225 - * handler. 226 - */ 227 - return devm_add_action_or_reset(dev, 228 - cros_ec_sensorhub_ring_remove, 229 - data); 230 - } 231 187 232 188 return 0; 233 189 }
+47 -28
drivers/platform/chrome/cros_ec_sensorhub_ring.c
··· 957 957 } 958 958 959 959 /** 960 + * cros_ec_sensorhub_ring_allocate() - Prepare the FIFO functionality if the EC 961 + * supports it. 962 + * 963 + * @sensorhub : Sensor Hub object. 964 + * 965 + * Return: 0 on success. 966 + */ 967 + int cros_ec_sensorhub_ring_allocate(struct cros_ec_sensorhub *sensorhub) 968 + { 969 + int fifo_info_length = 970 + sizeof(struct ec_response_motion_sense_fifo_info) + 971 + sizeof(u16) * sensorhub->sensor_num; 972 + 973 + /* Allocate the array for lost events. */ 974 + sensorhub->fifo_info = devm_kzalloc(sensorhub->dev, fifo_info_length, 975 + GFP_KERNEL); 976 + if (!sensorhub->fifo_info) 977 + return -ENOMEM; 978 + 979 + /* 980 + * Allocate the callback area based on the number of sensors. 981 + * Add one for the sensor ring. 982 + */ 983 + sensorhub->push_data = devm_kcalloc(sensorhub->dev, 984 + sensorhub->sensor_num, 985 + sizeof(*sensorhub->push_data), 986 + GFP_KERNEL); 987 + if (!sensorhub->push_data) 988 + return -ENOMEM; 989 + 990 + sensorhub->tight_timestamps = cros_ec_check_features( 991 + sensorhub->ec, 992 + EC_FEATURE_MOTION_SENSE_TIGHT_TIMESTAMPS); 993 + 994 + if (sensorhub->tight_timestamps) { 995 + sensorhub->batch_state = devm_kcalloc(sensorhub->dev, 996 + sensorhub->sensor_num, 997 + sizeof(*sensorhub->batch_state), 998 + GFP_KERNEL); 999 + if (!sensorhub->batch_state) 1000 + return -ENOMEM; 1001 + } 1002 + 1003 + return 0; 1004 + } 1005 + 1006 + /** 960 1007 * cros_ec_sensorhub_ring_add() - Add the FIFO functionality if the EC 961 1008 * supports it. 962 1009 * ··· 1018 971 int fifo_info_length = 1019 972 sizeof(struct ec_response_motion_sense_fifo_info) + 1020 973 sizeof(u16) * sensorhub->sensor_num; 1021 - 1022 - /* Allocate the array for lost events. */ 1023 - sensorhub->fifo_info = devm_kzalloc(sensorhub->dev, fifo_info_length, 1024 - GFP_KERNEL); 1025 - if (!sensorhub->fifo_info) 1026 - return -ENOMEM; 1027 974 1028 975 /* Retrieve FIFO information */ 1029 976 sensorhub->msg->version = 2; ··· 1039 998 if (!sensorhub->ring) 1040 999 return -ENOMEM; 1041 1000 1042 - /* 1043 - * Allocate the callback area based on the number of sensors. 1044 - */ 1045 - sensorhub->push_data = devm_kcalloc( 1046 - sensorhub->dev, sensorhub->sensor_num, 1047 - sizeof(*sensorhub->push_data), 1048 - GFP_KERNEL); 1049 - if (!sensorhub->push_data) 1050 - return -ENOMEM; 1051 - 1052 1001 sensorhub->fifo_timestamp[CROS_EC_SENSOR_LAST_TS] = 1053 1002 cros_ec_get_time_ns(); 1054 - 1055 - sensorhub->tight_timestamps = cros_ec_check_features( 1056 - ec, EC_FEATURE_MOTION_SENSE_TIGHT_TIMESTAMPS); 1057 - 1058 - if (sensorhub->tight_timestamps) { 1059 - sensorhub->batch_state = devm_kcalloc(sensorhub->dev, 1060 - sensorhub->sensor_num, 1061 - sizeof(*sensorhub->batch_state), 1062 - GFP_KERNEL); 1063 - if (!sensorhub->batch_state) 1064 - return -ENOMEM; 1065 - } 1066 1003 1067 1004 /* Register the notifier that will act as a top half interrupt. */ 1068 1005 sensorhub->notifier.notifier_call = cros_ec_sensorhub_event;
+24
drivers/platform/x86/asus-nb-wmi.c
··· 515 515 .detect_quirks = asus_nb_wmi_quirks, 516 516 }; 517 517 518 + static const struct dmi_system_id asus_nb_wmi_blacklist[] __initconst = { 519 + { 520 + /* 521 + * asus-nb-wm adds no functionality. The T100TA has a detachable 522 + * USB kbd, so no hotkeys and it has no WMI rfkill; and loading 523 + * asus-nb-wm causes the camera LED to turn and _stay_ on. 524 + */ 525 + .matches = { 526 + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 527 + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"), 528 + }, 529 + }, 530 + { 531 + /* The Asus T200TA has the same issue as the T100TA */ 532 + .matches = { 533 + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 534 + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T200TA"), 535 + }, 536 + }, 537 + {} /* Terminating entry */ 538 + }; 518 539 519 540 static int __init asus_nb_wmi_init(void) 520 541 { 542 + if (dmi_check_system(asus_nb_wmi_blacklist)) 543 + return -ENODEV; 544 + 521 545 return asus_wmi_register_driver(&asus_nb_wmi_driver); 522 546 } 523 547
+1 -1
drivers/platform/x86/intel-uncore-frequency.c
··· 53 53 /* Storage for uncore data for all instances */ 54 54 static struct uncore_data *uncore_instances; 55 55 /* Root of the all uncore sysfs kobjs */ 56 - struct kobject *uncore_root_kobj; 56 + static struct kobject *uncore_root_kobj; 57 57 /* Stores the CPU mask of the target CPUs to use during uncore read/write */ 58 58 static cpumask_t uncore_cpu_mask; 59 59 /* CPU online callback register instance */
+5 -19
drivers/platform/x86/intel_pmc_core.c
··· 255 255 }; 256 256 257 257 static const struct pmc_bit_map icl_pfear_map[] = { 258 - /* Ice Lake generation onwards only */ 258 + /* Ice Lake and Jasper Lake generation onwards only */ 259 259 {"RES_65", BIT(0)}, 260 260 {"RES_66", BIT(1)}, 261 261 {"RES_67", BIT(2)}, ··· 274 274 }; 275 275 276 276 static const struct pmc_bit_map tgl_pfear_map[] = { 277 - /* Tiger Lake, Elkhart Lake and Jasper Lake generation onwards only */ 277 + /* Tiger Lake and Elkhart Lake generation onwards only */ 278 278 {"PSF9", BIT(0)}, 279 279 {"RES_66", BIT(1)}, 280 280 {"RES_67", BIT(2)}, ··· 692 692 kfree(lpm_regs); 693 693 } 694 694 695 - #if IS_ENABLED(CONFIG_DEBUG_FS) 696 695 static bool slps0_dbg_latch; 697 696 698 697 static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset) ··· 1132 1133 &pmc_core_substate_l_sts_regs_fops); 1133 1134 } 1134 1135 } 1135 - #else 1136 - static inline void pmc_core_dbgfs_register(struct pmc_dev *pmcdev) 1137 - { 1138 - } 1139 - 1140 - static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev) 1141 - { 1142 - } 1143 - #endif /* CONFIG_DEBUG_FS */ 1144 1136 1145 1137 static const struct x86_cpu_id intel_pmc_core_ids[] = { 1146 1138 X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &spt_reg_map), ··· 1146 1156 X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &tgl_reg_map), 1147 1157 X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &tgl_reg_map), 1148 1158 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &tgl_reg_map), 1149 - X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &tgl_reg_map), 1159 + X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &icl_reg_map), 1150 1160 {} 1151 1161 }; 1152 1162 ··· 1250 1260 return 0; 1251 1261 } 1252 1262 1253 - #ifdef CONFIG_PM_SLEEP 1254 - 1255 1263 static bool warn_on_s0ix_failures; 1256 1264 module_param(warn_on_s0ix_failures, bool, 0644); 1257 1265 MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures"); 1258 1266 1259 - static int pmc_core_suspend(struct device *dev) 1267 + static __maybe_unused int pmc_core_suspend(struct device *dev) 1260 1268 { 1261 1269 struct pmc_dev *pmcdev = dev_get_drvdata(dev); 1262 1270 ··· 1306 1318 return false; 1307 1319 } 1308 1320 1309 - static int pmc_core_resume(struct device *dev) 1321 + static __maybe_unused int pmc_core_resume(struct device *dev) 1310 1322 { 1311 1323 struct pmc_dev *pmcdev = dev_get_drvdata(dev); 1312 1324 const struct pmc_bit_map **maps = pmcdev->map->lpm_sts; ··· 1335 1347 1336 1348 return 0; 1337 1349 } 1338 - 1339 - #endif 1340 1350 1341 1351 static const struct dev_pm_ops pmc_core_pm_ops = { 1342 1352 SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume)
-2
drivers/platform/x86/intel_pmc_core.h
··· 282 282 u32 base_addr; 283 283 void __iomem *regbase; 284 284 const struct pmc_reg_map *map; 285 - #if IS_ENABLED(CONFIG_DEBUG_FS) 286 285 struct dentry *dbgfs_dir; 287 - #endif /* CONFIG_DEBUG_FS */ 288 286 int pmc_xram_read_bit; 289 287 struct mutex lock; /* generic mutex lock for PMC Core */ 290 288
+2 -2
drivers/platform/x86/surface3_power.c
··· 522 522 strlcpy(board_info.type, "MSHW0011-bat0", I2C_NAME_SIZE); 523 523 524 524 bat0 = i2c_acpi_new_device(dev, 1, &board_info); 525 - if (!bat0) 526 - return -ENOMEM; 525 + if (IS_ERR(bat0)) 526 + return PTR_ERR(bat0); 527 527 528 528 data->bat0 = bat0; 529 529 i2c_set_clientdata(bat0, data);
+1 -1
drivers/platform/x86/thinkpad_acpi.c
··· 9548 9548 if (!battery_info.batteries[battery].start_support) 9549 9549 return -ENODEV; 9550 9550 /* valid values are [0, 99] */ 9551 - if (value < 0 || value > 99) 9551 + if (value > 99) 9552 9552 return -EINVAL; 9553 9553 if (value > battery_info.batteries[battery].charge_stop) 9554 9554 return -EINVAL;
+2 -2
drivers/platform/x86/xiaomi-wmi.c
··· 23 23 unsigned int key_code; 24 24 }; 25 25 26 - int xiaomi_wmi_probe(struct wmi_device *wdev, const void *context) 26 + static int xiaomi_wmi_probe(struct wmi_device *wdev, const void *context) 27 27 { 28 28 struct xiaomi_wmi *data; 29 29 ··· 48 48 return input_register_device(data->input_dev); 49 49 } 50 50 51 - void xiaomi_wmi_notify(struct wmi_device *wdev, union acpi_object *dummy) 51 + static void xiaomi_wmi_notify(struct wmi_device *wdev, union acpi_object *dummy) 52 52 { 53 53 struct xiaomi_wmi *data; 54 54
+5 -5
drivers/s390/net/qeth_core_main.c
··· 7048 7048 unsigned int i; 7049 7049 7050 7050 /* Quiesce the NAPI instances: */ 7051 - qeth_for_each_output_queue(card, queue, i) { 7051 + qeth_for_each_output_queue(card, queue, i) 7052 7052 napi_disable(&queue->napi); 7053 - del_timer_sync(&queue->timer); 7054 - } 7055 7053 7056 7054 /* Stop .ndo_start_xmit, might still access queue->napi. */ 7057 7055 netif_tx_disable(dev); 7058 7056 7059 - /* Queues may get re-allocated, so remove the NAPIs here. */ 7060 - qeth_for_each_output_queue(card, queue, i) 7057 + qeth_for_each_output_queue(card, queue, i) { 7058 + del_timer_sync(&queue->timer); 7059 + /* Queues may get re-allocated, so remove the NAPIs. */ 7061 7060 netif_napi_del(&queue->napi); 7061 + } 7062 7062 } else { 7063 7063 netif_tx_disable(dev); 7064 7064 }
+17 -18
drivers/scsi/qla2xxx/qla_os.c
··· 3732 3732 } 3733 3733 qla2x00_wait_for_hba_ready(base_vha); 3734 3734 3735 + /* 3736 + * if UNLOADING flag is already set, then continue unload, 3737 + * where it was set first. 3738 + */ 3739 + if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags)) 3740 + return; 3741 + 3735 3742 if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || 3736 3743 IS_QLA28XX(ha)) { 3737 3744 if (ha->flags.fw_started) ··· 3756 3749 } 3757 3750 3758 3751 qla2x00_wait_for_sess_deletion(base_vha); 3759 - 3760 - /* 3761 - * if UNLOAD flag is already set, then continue unload, 3762 - * where it was set first. 3763 - */ 3764 - if (test_bit(UNLOADING, &base_vha->dpc_flags)) 3765 - return; 3766 - 3767 - set_bit(UNLOADING, &base_vha->dpc_flags); 3768 3752 3769 3753 qla_nvme_delete(base_vha); 3770 3754 ··· 4861 4863 { 4862 4864 struct qla_work_evt *e; 4863 4865 uint8_t bail; 4866 + 4867 + if (test_bit(UNLOADING, &vha->dpc_flags)) 4868 + return NULL; 4864 4869 4865 4870 QLA_VHA_MARK_BUSY(vha, bail); 4866 4871 if (bail) ··· 6629 6628 struct pci_dev *pdev = ha->pdev; 6630 6629 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 6631 6630 6632 - /* 6633 - * if UNLOAD flag is already set, then continue unload, 6634 - * where it was set first. 6635 - */ 6636 - if (test_bit(UNLOADING, &base_vha->dpc_flags)) 6637 - return; 6638 - 6639 6631 ql_log(ql_log_warn, base_vha, 0x015b, 6640 6632 "Disabling adapter.\n"); 6641 6633 ··· 6639 6645 return; 6640 6646 } 6641 6647 6642 - qla2x00_wait_for_sess_deletion(base_vha); 6648 + /* 6649 + * if UNLOADING flag is already set, then continue unload, 6650 + * where it was set first. 6651 + */ 6652 + if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags)) 6653 + return; 6643 6654 6644 - set_bit(UNLOADING, &base_vha->dpc_flags); 6655 + qla2x00_wait_for_sess_deletion(base_vha); 6645 6656 6646 6657 qla2x00_delete_all_vps(ha, base_vha); 6647 6658
+1
drivers/scsi/scsi_lib.c
··· 2284 2284 switch (oldstate) { 2285 2285 case SDEV_RUNNING: 2286 2286 case SDEV_CREATED_BLOCK: 2287 + case SDEV_QUIESCE: 2287 2288 case SDEV_OFFLINE: 2288 2289 break; 2289 2290 default:
+3 -1
drivers/staging/comedi/comedi_fops.c
··· 2725 2725 } 2726 2726 2727 2727 cfp = kzalloc(sizeof(*cfp), GFP_KERNEL); 2728 - if (!cfp) 2728 + if (!cfp) { 2729 + comedi_dev_put(dev); 2729 2730 return -ENOMEM; 2731 + } 2730 2732 2731 2733 cfp->dev = dev; 2732 2734
+3
drivers/staging/comedi/drivers/dt2815.c
··· 92 92 int ret; 93 93 94 94 for (i = 0; i < insn->n; i++) { 95 + /* FIXME: lo bit 0 chooses voltage output or current output */ 95 96 lo = ((data[i] & 0x0f) << 4) | (chan << 1) | 0x01; 96 97 hi = (data[i] & 0xff0) >> 4; 97 98 ··· 105 104 ret = comedi_timeout(dev, s, insn, dt2815_ao_status, 0x10); 106 105 if (ret) 107 106 return ret; 107 + 108 + outb(hi, dev->iobase + DT2815_DATA); 108 109 109 110 devpriv->ao_readback[chan] = data[i]; 110 111 }
+1 -2
drivers/staging/gasket/gasket_sysfs.c
··· 228 228 } 229 229 230 230 mutex_lock(&mapping->mutex); 231 - for (i = 0; strcmp(attrs[i].attr.attr.name, GASKET_ARRAY_END_MARKER); 232 - i++) { 231 + for (i = 0; attrs[i].attr.attr.name != NULL; i++) { 233 232 if (mapping->attribute_count == GASKET_SYSFS_MAX_NODES) { 234 233 dev_err(device, 235 234 "Maximum number of sysfs nodes reached for device\n");
-4
drivers/staging/gasket/gasket_sysfs.h
··· 30 30 */ 31 31 #define GASKET_SYSFS_MAX_NODES 196 32 32 33 - /* End markers for sysfs struct arrays. */ 34 - #define GASKET_ARRAY_END_TOKEN GASKET_RESERVED_ARRAY_END 35 - #define GASKET_ARRAY_END_MARKER __stringify(GASKET_ARRAY_END_TOKEN) 36 - 37 33 /* 38 34 * Terminator struct for a gasket_sysfs_attr array. Must be at the end of 39 35 * all gasket_sysfs_attribute arrays.
+3 -11
drivers/staging/vt6656/key.c
··· 83 83 case VNT_KEY_PAIRWISE: 84 84 key_mode |= mode; 85 85 key_inx = 4; 86 - /* Don't save entry for pairwise key for station mode */ 87 - if (priv->op_mode == NL80211_IFTYPE_STATION) 88 - clear_bit(entry, &priv->key_entry_inuse); 89 86 break; 90 87 default: 91 88 return -EINVAL; ··· 106 109 int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta, 107 110 struct ieee80211_vif *vif, struct ieee80211_key_conf *key) 108 111 { 109 - struct ieee80211_bss_conf *conf = &vif->bss_conf; 110 112 struct vnt_private *priv = hw->priv; 111 113 u8 *mac_addr = NULL; 112 114 u8 key_dec_mode = 0; ··· 150 154 return -EOPNOTSUPP; 151 155 } 152 156 153 - if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { 157 + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 154 158 vnt_set_keymode(hw, mac_addr, key, VNT_KEY_PAIRWISE, 155 159 key_dec_mode, true); 156 - } else { 157 - vnt_set_keymode(hw, mac_addr, key, VNT_KEY_DEFAULTKEY, 160 + else 161 + vnt_set_keymode(hw, mac_addr, key, VNT_KEY_GROUP_ADDRESS, 158 162 key_dec_mode, true); 159 - 160 - vnt_set_keymode(hw, (u8 *)conf->bssid, key, 161 - VNT_KEY_GROUP_ADDRESS, key_dec_mode, true); 162 - } 163 163 164 164 return 0; 165 165 }
+17 -14
drivers/staging/vt6656/main_usb.c
··· 625 625 626 626 priv->op_mode = vif->type; 627 627 628 - vnt_set_bss_mode(priv); 629 - 630 628 /* LED blink on TX */ 631 629 vnt_mac_set_led(priv, LEDSTS_STS, LEDSTS_INTER); 632 630 ··· 711 713 priv->basic_rates = conf->basic_rates; 712 714 713 715 vnt_update_top_rates(priv); 714 - vnt_set_bss_mode(priv); 715 716 716 717 dev_dbg(&priv->usb->dev, "basic rates %x\n", conf->basic_rates); 717 718 } ··· 739 742 priv->short_slot_time = false; 740 743 741 744 vnt_set_short_slot_time(priv); 742 - vnt_update_ifs(priv); 743 745 vnt_set_vga_gain_offset(priv, priv->bb_vga[0]); 744 746 vnt_update_pre_ed_threshold(priv, false); 745 747 } 748 + 749 + if (changed & (BSS_CHANGED_BASIC_RATES | BSS_CHANGED_ERP_PREAMBLE | 750 + BSS_CHANGED_ERP_SLOT)) 751 + vnt_set_bss_mode(priv); 746 752 747 753 if (changed & BSS_CHANGED_TXPOWER) 748 754 vnt_rf_setpower(priv, priv->current_rate, ··· 770 770 vnt_mac_reg_bits_on(priv, MAC_REG_TFTCTL, 771 771 TFTCTL_TSFCNTREN); 772 772 773 - vnt_adjust_tsf(priv, conf->beacon_rate->hw_value, 774 - conf->sync_tsf, priv->current_tsf); 775 - 776 773 vnt_mac_set_beacon_interval(priv, conf->beacon_int); 777 774 778 775 vnt_reset_next_tbtt(priv, conf->beacon_int); 776 + 777 + vnt_adjust_tsf(priv, conf->beacon_rate->hw_value, 778 + conf->sync_tsf, priv->current_tsf); 779 + 780 + vnt_update_next_tbtt(priv, 781 + conf->sync_tsf, conf->beacon_int); 779 782 } else { 780 783 vnt_clear_current_tsf(priv); 781 784 ··· 812 809 { 813 810 struct vnt_private *priv = hw->priv; 814 811 u8 rx_mode = 0; 815 - int rc; 816 812 817 813 *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC; 818 814 819 - rc = vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR, 820 - MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode); 821 - 822 - if (!rc) 823 - rx_mode = RCR_MULTICAST | RCR_BROADCAST; 815 + vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR, 816 + MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode); 824 817 825 818 dev_dbg(&priv->usb->dev, "rx mode in = %x\n", rx_mode); 826 819 ··· 855 856 case SET_KEY: 856 857 return vnt_set_keys(hw, sta, vif, key); 857 858 case DISABLE_KEY: 858 - if (test_bit(key->hw_key_idx, &priv->key_entry_inuse)) 859 + if (test_bit(key->hw_key_idx, &priv->key_entry_inuse)) { 859 860 clear_bit(key->hw_key_idx, &priv->key_entry_inuse); 861 + 862 + vnt_mac_disable_keyentry(priv, key->hw_key_idx); 863 + } 864 + 860 865 default: 861 866 break; 862 867 }
+2 -1
drivers/staging/vt6656/usbpipe.c
··· 207 207 priv->wake_up_count = 208 208 priv->hw->conf.listen_interval; 209 209 210 - --priv->wake_up_count; 210 + if (priv->wake_up_count) 211 + --priv->wake_up_count; 211 212 212 213 /* Turn on wake up to listen next beacon */ 213 214 if (priv->wake_up_count == 1)
+1 -1
drivers/target/target_core_iblock.c
··· 432 432 target_to_linux_sector(dev, cmd->t_task_lba), 433 433 target_to_linux_sector(dev, 434 434 sbc_get_write_same_sectors(cmd)), 435 - GFP_KERNEL, false); 435 + GFP_KERNEL, BLKDEV_ZERO_NOUNMAP); 436 436 if (ret) 437 437 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 438 438
+1 -1
drivers/tty/hvc/Kconfig
··· 88 88 89 89 config HVC_RISCV_SBI 90 90 bool "RISC-V SBI console support" 91 - depends on RISCV_SBI 91 + depends on RISCV_SBI_V01 92 92 select HVC_DRIVER 93 93 help 94 94 This enables support for console output via RISC-V SBI calls, which
+14 -9
drivers/tty/hvc/hvc_console.c
··· 302 302 vtermnos[index] = vtermno; 303 303 cons_ops[index] = ops; 304 304 305 - /* reserve all indices up to and including this index */ 306 - if (last_hvc < index) 307 - last_hvc = index; 308 - 309 305 /* check if we need to re-register the kernel console */ 310 306 hvc_check_console(index); 311 307 ··· 956 960 cons_ops[i] == hp->ops) 957 961 break; 958 962 959 - /* no matching slot, just use a counter */ 960 - if (i >= MAX_NR_HVC_CONSOLES) 961 - i = ++last_hvc; 963 + if (i >= MAX_NR_HVC_CONSOLES) { 964 + 965 + /* find 'empty' slot for console */ 966 + for (i = 0; i < MAX_NR_HVC_CONSOLES && vtermnos[i] != -1; i++) { 967 + } 968 + 969 + /* no matching slot, just use a counter */ 970 + if (i == MAX_NR_HVC_CONSOLES) 971 + i = ++last_hvc + MAX_NR_HVC_CONSOLES; 972 + } 962 973 963 974 hp->index = i; 964 - cons_ops[i] = ops; 965 - vtermnos[i] = vtermno; 975 + if (i < MAX_NR_HVC_CONSOLES) { 976 + cons_ops[i] = ops; 977 + vtermnos[i] = vtermno; 978 + } 966 979 967 980 list_add_tail(&(hp->next), &hvc_structs); 968 981 mutex_unlock(&hvc_structs_mutex);
+14 -11
drivers/tty/rocket.c
··· 632 632 tty_port_init(&info->port); 633 633 info->port.ops = &rocket_port_ops; 634 634 info->flags &= ~ROCKET_MODE_MASK; 635 - switch (pc104[board][line]) { 636 - case 422: 637 - info->flags |= ROCKET_MODE_RS422; 638 - break; 639 - case 485: 640 - info->flags |= ROCKET_MODE_RS485; 641 - break; 642 - case 232: 643 - default: 635 + if (board < ARRAY_SIZE(pc104) && line < ARRAY_SIZE(pc104_1)) 636 + switch (pc104[board][line]) { 637 + case 422: 638 + info->flags |= ROCKET_MODE_RS422; 639 + break; 640 + case 485: 641 + info->flags |= ROCKET_MODE_RS485; 642 + break; 643 + case 232: 644 + default: 645 + info->flags |= ROCKET_MODE_RS232; 646 + break; 647 + } 648 + else 644 649 info->flags |= ROCKET_MODE_RS232; 645 - break; 646 - } 647 650 648 651 info->intmask = RXF_TRIG | TXFIFO_MT | SRC_INT | DELTA_CD | DELTA_CTS | DELTA_DSR; 649 652 if (sInitChan(ctlp, &info->channel, aiop, chan) == 0) {
+1 -1
drivers/tty/serial/Kconfig
··· 86 86 87 87 config SERIAL_EARLYCON_RISCV_SBI 88 88 bool "Early console using RISC-V SBI" 89 - depends on RISCV_SBI 89 + depends on RISCV_SBI_V01 90 90 select SERIAL_CORE 91 91 select SERIAL_CORE_CONSOLE 92 92 select SERIAL_EARLYCON
+3 -1
drivers/tty/serial/bcm63xx_uart.c
··· 843 843 if (IS_ERR(clk) && pdev->dev.of_node) 844 844 clk = of_clk_get(pdev->dev.of_node, 0); 845 845 846 - if (IS_ERR(clk)) 846 + if (IS_ERR(clk)) { 847 + clk_put(clk); 847 848 return -ENODEV; 849 + } 848 850 849 851 port->iotype = UPIO_MEM; 850 852 port->irq = res_irq->start;
+7
drivers/tty/serial/owl-uart.c
··· 680 680 return PTR_ERR(owl_port->clk); 681 681 } 682 682 683 + ret = clk_prepare_enable(owl_port->clk); 684 + if (ret) { 685 + dev_err(&pdev->dev, "could not enable clk\n"); 686 + return ret; 687 + } 688 + 683 689 owl_port->port.dev = &pdev->dev; 684 690 owl_port->port.line = pdev->id; 685 691 owl_port->port.type = PORT_OWL; ··· 718 712 719 713 uart_remove_one_port(&owl_uart_driver, &owl_port->port); 720 714 owl_uart_ports[pdev->id] = NULL; 715 + clk_disable_unprepare(owl_port->clk); 721 716 722 717 return 0; 723 718 }
+9 -2
drivers/tty/serial/sh-sci.c
··· 870 870 tty_insert_flip_char(tport, c, TTY_NORMAL); 871 871 } else { 872 872 for (i = 0; i < count; i++) { 873 - char c = serial_port_in(port, SCxRDR); 873 + char c; 874 874 875 - status = serial_port_in(port, SCxSR); 875 + if (port->type == PORT_SCIF || 876 + port->type == PORT_HSCIF) { 877 + status = serial_port_in(port, SCxSR); 878 + c = serial_port_in(port, SCxRDR); 879 + } else { 880 + c = serial_port_in(port, SCxRDR); 881 + status = serial_port_in(port, SCxSR); 882 + } 876 883 if (uart_handle_sysrq_char(port, c)) { 877 884 count--; i--; 878 885 continue;
+3
drivers/tty/serial/sunhv.c
··· 567 567 sunserial_console_match(&sunhv_console, op->dev.of_node, 568 568 &sunhv_reg, port->line, false); 569 569 570 + /* We need to initialize lock even for non-registered console */ 571 + spin_lock_init(&port->lock); 572 + 570 573 err = uart_add_one_port(&sunhv_reg, port); 571 574 if (err) 572 575 goto out_unregister_driver;
+49 -162
drivers/tty/serial/xilinx_uartps.c
··· 26 26 27 27 #define CDNS_UART_TTY_NAME "ttyPS" 28 28 #define CDNS_UART_NAME "xuartps" 29 + #define CDNS_UART_MAJOR 0 /* use dynamic node allocation */ 30 + #define CDNS_UART_MINOR 0 /* works best with devtmpfs */ 31 + #define CDNS_UART_NR_PORTS 16 29 32 #define CDNS_UART_FIFO_SIZE 64 /* FIFO size */ 30 33 #define CDNS_UART_REGISTER_SPACE 0x1000 31 34 #define TX_TIMEOUT 500000 32 35 33 36 /* Rx Trigger level */ 34 37 static int rx_trigger_level = 56; 35 - static int uartps_major; 36 38 module_param(rx_trigger_level, uint, 0444); 37 39 MODULE_PARM_DESC(rx_trigger_level, "Rx trigger level, 1-63 bytes"); 38 40 ··· 190 188 * @pclk: APB clock 191 189 * @cdns_uart_driver: Pointer to UART driver 192 190 * @baud: Current baud rate 193 - * @id: Port ID 194 191 * @clk_rate_change_nb: Notifier block for clock changes 195 192 * @quirks: Flags for RXBS support. 196 193 */ ··· 199 198 struct clk *pclk; 200 199 struct uart_driver *cdns_uart_driver; 201 200 unsigned int baud; 202 - int id; 203 201 struct notifier_block clk_rate_change_nb; 204 202 u32 quirks; 205 203 bool cts_override; ··· 1133 1133 #endif 1134 1134 }; 1135 1135 1136 + static struct uart_driver cdns_uart_uart_driver; 1137 + 1136 1138 #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE 1137 1139 /** 1138 1140 * cdns_uart_console_putchar - write the character to the FIFO buffer ··· 1274 1272 1275 1273 return uart_set_options(port, co, baud, parity, bits, flow); 1276 1274 } 1275 + 1276 + static struct console cdns_uart_console = { 1277 + .name = CDNS_UART_TTY_NAME, 1278 + .write = cdns_uart_console_write, 1279 + .device = uart_console_device, 1280 + .setup = cdns_uart_console_setup, 1281 + .flags = CON_PRINTBUFFER, 1282 + .index = -1, /* Specified on the cmdline (e.g. console=ttyPS ) */ 1283 + .data = &cdns_uart_uart_driver, 1284 + }; 1277 1285 #endif /* CONFIG_SERIAL_XILINX_PS_UART_CONSOLE */ 1278 1286 1279 1287 #ifdef CONFIG_PM_SLEEP ··· 1415 1403 }; 1416 1404 MODULE_DEVICE_TABLE(of, cdns_uart_of_match); 1417 1405 1418 - /* 1419 - * Maximum number of instances without alias IDs but if there is alias 1420 - * which target "< MAX_UART_INSTANCES" range this ID can't be used. 1421 - */ 1422 - #define MAX_UART_INSTANCES 32 1423 - 1424 - /* Stores static aliases list */ 1425 - static DECLARE_BITMAP(alias_bitmap, MAX_UART_INSTANCES); 1426 - static int alias_bitmap_initialized; 1427 - 1428 - /* Stores actual bitmap of allocated IDs with alias IDs together */ 1429 - static DECLARE_BITMAP(bitmap, MAX_UART_INSTANCES); 1430 - /* Protect bitmap operations to have unique IDs */ 1431 - static DEFINE_MUTEX(bitmap_lock); 1432 - 1433 - static int cdns_get_id(struct platform_device *pdev) 1434 - { 1435 - int id, ret; 1436 - 1437 - mutex_lock(&bitmap_lock); 1438 - 1439 - /* Alias list is stable that's why get alias bitmap only once */ 1440 - if (!alias_bitmap_initialized) { 1441 - ret = of_alias_get_alias_list(cdns_uart_of_match, "serial", 1442 - alias_bitmap, MAX_UART_INSTANCES); 1443 - if (ret && ret != -EOVERFLOW) { 1444 - mutex_unlock(&bitmap_lock); 1445 - return ret; 1446 - } 1447 - 1448 - alias_bitmap_initialized++; 1449 - } 1450 - 1451 - /* Make sure that alias ID is not taken by instance without alias */ 1452 - bitmap_or(bitmap, bitmap, alias_bitmap, MAX_UART_INSTANCES); 1453 - 1454 - dev_dbg(&pdev->dev, "Alias bitmap: %*pb\n", 1455 - MAX_UART_INSTANCES, bitmap); 1456 - 1457 - /* Look for a serialN alias */ 1458 - id = of_alias_get_id(pdev->dev.of_node, "serial"); 1459 - if (id < 0) { 1460 - dev_warn(&pdev->dev, 1461 - "No serial alias passed. Using the first free id\n"); 1462 - 1463 - /* 1464 - * Start with id 0 and check if there is no serial0 alias 1465 - * which points to device which is compatible with this driver. 1466 - * If alias exists then try next free position. 1467 - */ 1468 - id = 0; 1469 - 1470 - for (;;) { 1471 - dev_info(&pdev->dev, "Checking id %d\n", id); 1472 - id = find_next_zero_bit(bitmap, MAX_UART_INSTANCES, id); 1473 - 1474 - /* No free empty instance */ 1475 - if (id == MAX_UART_INSTANCES) { 1476 - dev_err(&pdev->dev, "No free ID\n"); 1477 - mutex_unlock(&bitmap_lock); 1478 - return -EINVAL; 1479 - } 1480 - 1481 - dev_dbg(&pdev->dev, "The empty id is %d\n", id); 1482 - /* Check if ID is empty */ 1483 - if (!test_and_set_bit(id, bitmap)) { 1484 - /* Break the loop if bit is taken */ 1485 - dev_dbg(&pdev->dev, 1486 - "Selected ID %d allocation passed\n", 1487 - id); 1488 - break; 1489 - } 1490 - dev_dbg(&pdev->dev, 1491 - "Selected ID %d allocation failed\n", id); 1492 - /* if taking bit fails then try next one */ 1493 - id++; 1494 - } 1495 - } 1496 - 1497 - mutex_unlock(&bitmap_lock); 1498 - 1499 - return id; 1500 - } 1406 + /* Temporary variable for storing number of instances */ 1407 + static int instances; 1501 1408 1502 1409 /** 1503 1410 * cdns_uart_probe - Platform driver probe ··· 1426 1495 */ 1427 1496 static int cdns_uart_probe(struct platform_device *pdev) 1428 1497 { 1429 - int rc, irq; 1498 + int rc, id, irq; 1430 1499 struct uart_port *port; 1431 1500 struct resource *res; 1432 1501 struct cdns_uart *cdns_uart_data; 1433 1502 const struct of_device_id *match; 1434 - struct uart_driver *cdns_uart_uart_driver; 1435 - char *driver_name; 1436 - #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE 1437 - struct console *cdns_uart_console; 1438 - #endif 1439 1503 1440 1504 cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data), 1441 1505 GFP_KERNEL); ··· 1440 1514 if (!port) 1441 1515 return -ENOMEM; 1442 1516 1443 - cdns_uart_uart_driver = devm_kzalloc(&pdev->dev, 1444 - sizeof(*cdns_uart_uart_driver), 1445 - GFP_KERNEL); 1446 - if (!cdns_uart_uart_driver) 1447 - return -ENOMEM; 1517 + /* Look for a serialN alias */ 1518 + id = of_alias_get_id(pdev->dev.of_node, "serial"); 1519 + if (id < 0) 1520 + id = 0; 1448 1521 1449 - cdns_uart_data->id = cdns_get_id(pdev); 1450 - if (cdns_uart_data->id < 0) 1451 - return cdns_uart_data->id; 1452 - 1453 - /* There is a need to use unique driver name */ 1454 - driver_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s%d", 1455 - CDNS_UART_NAME, cdns_uart_data->id); 1456 - if (!driver_name) { 1457 - rc = -ENOMEM; 1458 - goto err_out_id; 1522 + if (id >= CDNS_UART_NR_PORTS) { 1523 + dev_err(&pdev->dev, "Cannot get uart_port structure\n"); 1524 + return -ENODEV; 1459 1525 } 1460 1526 1461 - cdns_uart_uart_driver->owner = THIS_MODULE; 1462 - cdns_uart_uart_driver->driver_name = driver_name; 1463 - cdns_uart_uart_driver->dev_name = CDNS_UART_TTY_NAME; 1464 - cdns_uart_uart_driver->major = uartps_major; 1465 - cdns_uart_uart_driver->minor = cdns_uart_data->id; 1466 - cdns_uart_uart_driver->nr = 1; 1467 - 1527 + if (!cdns_uart_uart_driver.state) { 1528 + cdns_uart_uart_driver.owner = THIS_MODULE; 1529 + cdns_uart_uart_driver.driver_name = CDNS_UART_NAME; 1530 + cdns_uart_uart_driver.dev_name = CDNS_UART_TTY_NAME; 1531 + cdns_uart_uart_driver.major = CDNS_UART_MAJOR; 1532 + cdns_uart_uart_driver.minor = CDNS_UART_MINOR; 1533 + cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS; 1468 1534 #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE 1469 - cdns_uart_console = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_console), 1470 - GFP_KERNEL); 1471 - if (!cdns_uart_console) { 1472 - rc = -ENOMEM; 1473 - goto err_out_id; 1474 - } 1475 - 1476 - strncpy(cdns_uart_console->name, CDNS_UART_TTY_NAME, 1477 - sizeof(cdns_uart_console->name)); 1478 - cdns_uart_console->index = cdns_uart_data->id; 1479 - cdns_uart_console->write = cdns_uart_console_write; 1480 - cdns_uart_console->device = uart_console_device; 1481 - cdns_uart_console->setup = cdns_uart_console_setup; 1482 - cdns_uart_console->flags = CON_PRINTBUFFER; 1483 - cdns_uart_console->data = cdns_uart_uart_driver; 1484 - cdns_uart_uart_driver->cons = cdns_uart_console; 1535 + cdns_uart_uart_driver.cons = &cdns_uart_console; 1485 1536 #endif 1486 1537 1487 - rc = uart_register_driver(cdns_uart_uart_driver); 1488 - if (rc < 0) { 1489 - dev_err(&pdev->dev, "Failed to register driver\n"); 1490 - goto err_out_id; 1538 + rc = uart_register_driver(&cdns_uart_uart_driver); 1539 + if (rc < 0) { 1540 + dev_err(&pdev->dev, "Failed to register driver\n"); 1541 + return rc; 1542 + } 1491 1543 } 1492 1544 1493 - cdns_uart_data->cdns_uart_driver = cdns_uart_uart_driver; 1494 - 1495 - /* 1496 - * Setting up proper name_base needs to be done after uart 1497 - * registration because tty_driver structure is not filled. 1498 - * name_base is 0 by default. 1499 - */ 1500 - cdns_uart_uart_driver->tty_driver->name_base = cdns_uart_data->id; 1545 + cdns_uart_data->cdns_uart_driver = &cdns_uart_uart_driver; 1501 1546 1502 1547 match = of_match_node(cdns_uart_of_match, pdev->dev.of_node); 1503 1548 if (match && match->data) { ··· 1546 1649 port->ops = &cdns_uart_ops; 1547 1650 port->fifosize = CDNS_UART_FIFO_SIZE; 1548 1651 port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_XILINX_PS_UART_CONSOLE); 1652 + port->line = id; 1549 1653 1550 1654 /* 1551 1655 * Register the port. ··· 1578 1680 console_port = port; 1579 1681 #endif 1580 1682 1581 - rc = uart_add_one_port(cdns_uart_uart_driver, port); 1683 + rc = uart_add_one_port(&cdns_uart_uart_driver, port); 1582 1684 if (rc) { 1583 1685 dev_err(&pdev->dev, 1584 1686 "uart_add_one_port() failed; err=%i\n", rc); ··· 1588 1690 #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE 1589 1691 /* This is not port which is used for console that's why clean it up */ 1590 1692 if (console_port == port && 1591 - !(cdns_uart_uart_driver->cons->flags & CON_ENABLED)) 1693 + !(cdns_uart_uart_driver.cons->flags & CON_ENABLED)) 1592 1694 console_port = NULL; 1593 1695 #endif 1594 1696 1595 - uartps_major = cdns_uart_uart_driver->tty_driver->major; 1596 1697 cdns_uart_data->cts_override = of_property_read_bool(pdev->dev.of_node, 1597 1698 "cts-override"); 1699 + 1700 + instances++; 1701 + 1598 1702 return 0; 1599 1703 1600 1704 err_out_pm_disable: ··· 1612 1712 err_out_clk_dis_pclk: 1613 1713 clk_disable_unprepare(cdns_uart_data->pclk); 1614 1714 err_out_unregister_driver: 1615 - uart_unregister_driver(cdns_uart_data->cdns_uart_driver); 1616 - err_out_id: 1617 - mutex_lock(&bitmap_lock); 1618 - if (cdns_uart_data->id < MAX_UART_INSTANCES) 1619 - clear_bit(cdns_uart_data->id, bitmap); 1620 - mutex_unlock(&bitmap_lock); 1715 + if (!instances) 1716 + uart_unregister_driver(cdns_uart_data->cdns_uart_driver); 1621 1717 return rc; 1622 1718 } 1623 1719 ··· 1636 1740 #endif 1637 1741 rc = uart_remove_one_port(cdns_uart_data->cdns_uart_driver, port); 1638 1742 port->mapbase = 0; 1639 - mutex_lock(&bitmap_lock); 1640 - if (cdns_uart_data->id < MAX_UART_INSTANCES) 1641 - clear_bit(cdns_uart_data->id, bitmap); 1642 - mutex_unlock(&bitmap_lock); 1643 1743 clk_disable_unprepare(cdns_uart_data->uartclk); 1644 1744 clk_disable_unprepare(cdns_uart_data->pclk); 1645 1745 pm_runtime_disable(&pdev->dev); ··· 1648 1756 console_port = NULL; 1649 1757 #endif 1650 1758 1651 - /* If this is last instance major number should be initialized */ 1652 - mutex_lock(&bitmap_lock); 1653 - if (bitmap_empty(bitmap, MAX_UART_INSTANCES)) 1654 - uartps_major = 0; 1655 - mutex_unlock(&bitmap_lock); 1656 - 1657 - uart_unregister_driver(cdns_uart_data->cdns_uart_driver); 1759 + if (!--instances) 1760 + uart_unregister_driver(cdns_uart_data->cdns_uart_driver); 1658 1761 return rc; 1659 1762 } 1660 1763
+2
drivers/tty/sysrq.c
··· 74 74 return 1; 75 75 return sysrq_enabled; 76 76 } 77 + EXPORT_SYMBOL_GPL(sysrq_mask); 77 78 78 79 /* 79 80 * A value of 1 means 'all', other nonzero values are an op mask: ··· 1059 1058 1060 1059 return 0; 1061 1060 } 1061 + EXPORT_SYMBOL_GPL(sysrq_toggle_support); 1062 1062 1063 1063 static int __sysrq_swap_key_ops(int key, struct sysrq_key_op *insert_op_p, 1064 1064 struct sysrq_key_op *remove_op_p)
+4 -3
drivers/tty/vt/vt.c
··· 81 81 #include <linux/errno.h> 82 82 #include <linux/kd.h> 83 83 #include <linux/slab.h> 84 + #include <linux/vmalloc.h> 84 85 #include <linux/major.h> 85 86 #include <linux/mm.h> 86 87 #include <linux/console.h> ··· 351 350 /* allocate everything in one go */ 352 351 memsize = cols * rows * sizeof(char32_t); 353 352 memsize += rows * sizeof(char32_t *); 354 - p = kmalloc(memsize, GFP_KERNEL); 353 + p = vmalloc(memsize); 355 354 if (!p) 356 355 return NULL; 357 356 ··· 367 366 368 367 static void vc_uniscr_set(struct vc_data *vc, struct uni_screen *new_uniscr) 369 368 { 370 - kfree(vc->vc_uni_screen); 369 + vfree(vc->vc_uni_screen); 371 370 vc->vc_uni_screen = new_uniscr; 372 371 } 373 372 ··· 1207 1206 if (new_cols == vc->vc_cols && new_rows == vc->vc_rows) 1208 1207 return 0; 1209 1208 1210 - if (new_screen_size > (4 << 20)) 1209 + if (new_screen_size > KMALLOC_MAX_SIZE) 1211 1210 return -EINVAL; 1212 1211 newscreen = kzalloc(new_screen_size, GFP_USER); 1213 1212 if (!newscreen)
+31 -5
drivers/usb/class/cdc-acm.c
··· 412 412 413 413 exit: 414 414 retval = usb_submit_urb(urb, GFP_ATOMIC); 415 - if (retval && retval != -EPERM) 415 + if (retval && retval != -EPERM && retval != -ENODEV) 416 416 dev_err(&acm->control->dev, 417 417 "%s - usb_submit_urb failed: %d\n", __func__, retval); 418 + else 419 + dev_vdbg(&acm->control->dev, 420 + "control resubmission terminated %d\n", retval); 418 421 } 419 422 420 423 static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags) ··· 433 430 dev_err(&acm->data->dev, 434 431 "urb %d failed submission with %d\n", 435 432 index, res); 433 + } else { 434 + dev_vdbg(&acm->data->dev, "intended failure %d\n", res); 436 435 } 437 436 set_bit(index, &acm->read_urbs_free); 438 437 return res; ··· 476 471 int status = urb->status; 477 472 bool stopped = false; 478 473 bool stalled = false; 474 + bool cooldown = false; 479 475 480 476 dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n", 481 477 rb->index, urb->actual_length, status); ··· 503 497 __func__, status); 504 498 stopped = true; 505 499 break; 500 + case -EOVERFLOW: 501 + case -EPROTO: 502 + dev_dbg(&acm->data->dev, 503 + "%s - cooling babbling device\n", __func__); 504 + usb_mark_last_busy(acm->dev); 505 + set_bit(rb->index, &acm->urbs_in_error_delay); 506 + cooldown = true; 507 + break; 506 508 default: 507 509 dev_dbg(&acm->data->dev, 508 510 "%s - nonzero urb status received: %d\n", ··· 532 518 */ 533 519 smp_mb__after_atomic(); 534 520 535 - if (stopped || stalled) { 521 + if (stopped || stalled || cooldown) { 536 522 if (stalled) 537 523 schedule_work(&acm->work); 524 + else if (cooldown) 525 + schedule_delayed_work(&acm->dwork, HZ / 2); 538 526 return; 539 527 } 540 528 ··· 573 557 struct acm *acm = container_of(work, struct acm, work); 574 558 575 559 if (test_bit(EVENT_RX_STALL, &acm->flags)) { 576 - if (!(usb_autopm_get_interface(acm->data))) { 560 + smp_mb(); /* against acm_suspend() */ 561 + if (!acm->susp_count) { 577 562 for (i = 0; i < acm->rx_buflimit; i++) 578 563 usb_kill_urb(acm->read_urbs[i]); 579 564 usb_clear_halt(acm->dev, acm->in); 580 565 acm_submit_read_urbs(acm, GFP_KERNEL); 581 - usb_autopm_put_interface(acm->data); 566 + clear_bit(EVENT_RX_STALL, &acm->flags); 582 567 } 583 - clear_bit(EVENT_RX_STALL, &acm->flags); 568 + } 569 + 570 + if (test_and_clear_bit(ACM_ERROR_DELAY, &acm->flags)) { 571 + for (i = 0; i < ACM_NR; i++) 572 + if (test_and_clear_bit(i, &acm->urbs_in_error_delay)) 573 + acm_submit_read_urb(acm, i, GFP_NOIO); 584 574 } 585 575 586 576 if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags)) ··· 1355 1333 acm->readsize = readsize; 1356 1334 acm->rx_buflimit = num_rx_buf; 1357 1335 INIT_WORK(&acm->work, acm_softint); 1336 + INIT_DELAYED_WORK(&acm->dwork, acm_softint); 1358 1337 init_waitqueue_head(&acm->wioctl); 1359 1338 spin_lock_init(&acm->write_lock); 1360 1339 spin_lock_init(&acm->read_lock); ··· 1565 1542 1566 1543 acm_kill_urbs(acm); 1567 1544 cancel_work_sync(&acm->work); 1545 + cancel_delayed_work_sync(&acm->dwork); 1568 1546 1569 1547 tty_unregister_device(acm_tty_driver, acm->minor); 1570 1548 ··· 1608 1584 1609 1585 acm_kill_urbs(acm); 1610 1586 cancel_work_sync(&acm->work); 1587 + cancel_delayed_work_sync(&acm->dwork); 1588 + acm->urbs_in_error_delay = 0; 1611 1589 1612 1590 return 0; 1613 1591 }
+4 -1
drivers/usb/class/cdc-acm.h
··· 109 109 # define EVENT_TTY_WAKEUP 0 110 110 # define EVENT_RX_STALL 1 111 111 # define ACM_THROTTLED 2 112 + # define ACM_ERROR_DELAY 3 113 + unsigned long urbs_in_error_delay; /* these need to be restarted after a delay */ 112 114 struct usb_cdc_line_coding line; /* bits, stop, parity */ 113 - struct work_struct work; /* work queue entry for line discipline waking up */ 115 + struct work_struct work; /* work queue entry for various purposes*/ 116 + struct delayed_work dwork; /* for cool downs needed in error recovery */ 114 117 unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */ 115 118 unsigned int ctrlout; /* output control lines (DTR, RTS) */ 116 119 struct async_icount iocount; /* counters for control line changes */
+15 -3
drivers/usb/core/hub.c
··· 1223 1223 #ifdef CONFIG_PM 1224 1224 udev->reset_resume = 1; 1225 1225 #endif 1226 + /* Don't set the change_bits when the device 1227 + * was powered off. 1228 + */ 1229 + if (test_bit(port1, hub->power_bits)) 1230 + set_bit(port1, hub->change_bits); 1226 1231 1227 1232 } else { 1228 1233 /* The power session is gone; tell hub_wq */ ··· 2728 2723 { 2729 2724 int old_scheme_first_port = 2730 2725 port_dev->quirks & USB_PORT_QUIRK_OLD_SCHEME; 2731 - int quick_enumeration = (udev->speed == USB_SPEED_HIGH); 2732 2726 2733 2727 if (udev->speed >= USB_SPEED_SUPER) 2734 2728 return false; 2735 2729 2736 - return USE_NEW_SCHEME(retry, old_scheme_first_port || old_scheme_first 2737 - || quick_enumeration); 2730 + return USE_NEW_SCHEME(retry, old_scheme_first_port || old_scheme_first); 2738 2731 } 2739 2732 2740 2733 /* Is a USB 3.0 port in the Inactive or Compliance Mode state? ··· 3091 3088 if (portchange & USB_PORT_STAT_C_ENABLE) 3092 3089 usb_clear_port_feature(hub->hdev, port1, 3093 3090 USB_PORT_FEAT_C_ENABLE); 3091 + 3092 + /* 3093 + * Whatever made this reset-resume necessary may have 3094 + * turned on the port1 bit in hub->change_bits. But after 3095 + * a successful reset-resume we want the bit to be clear; 3096 + * if it was on it would indicate that something happened 3097 + * following the reset-resume. 3098 + */ 3099 + clear_bit(port1, hub->change_bits); 3094 3100 } 3095 3101 3096 3102 return status;
+8 -1
drivers/usb/core/message.c
··· 589 589 int i, retval; 590 590 591 591 spin_lock_irqsave(&io->lock, flags); 592 - if (io->status) { 592 + if (io->status || io->count == 0) { 593 593 spin_unlock_irqrestore(&io->lock, flags); 594 594 return; 595 595 } 596 596 /* shut everything down */ 597 597 io->status = -ECONNRESET; 598 + io->count++; /* Keep the request alive until we're done */ 598 599 spin_unlock_irqrestore(&io->lock, flags); 599 600 600 601 for (i = io->entries - 1; i >= 0; --i) { ··· 609 608 dev_warn(&io->dev->dev, "%s, unlink --> %d\n", 610 609 __func__, retval); 611 610 } 611 + 612 + spin_lock_irqsave(&io->lock, flags); 613 + io->count--; 614 + if (!io->count) 615 + complete(&io->complete); 616 + spin_unlock_irqrestore(&io->lock, flags); 612 617 } 613 618 EXPORT_SYMBOL_GPL(usb_sg_cancel); 614 619
+4
drivers/usb/core/quirks.c
··· 430 430 /* Corsair K70 LUX */ 431 431 { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, 432 432 433 + /* Corsair K70 RGB RAPDIFIRE */ 434 + { USB_DEVICE(0x1b1c, 0x1b38), .driver_info = USB_QUIRK_DELAY_INIT | 435 + USB_QUIRK_DELAY_CTRL_MSG }, 436 + 433 437 /* MIDI keyboard WORLDE MINI */ 434 438 { USB_DEVICE(0x1c75, 0x0204), .driver_info = 435 439 USB_QUIRK_CONFIG_INTF_STRINGS },
+6 -2
drivers/usb/dwc3/core.h
··· 307 307 308 308 /* Global TX Fifo Size Register */ 309 309 #define DWC31_GTXFIFOSIZ_TXFRAMNUM BIT(15) /* DWC_usb31 only */ 310 - #define DWC31_GTXFIFOSIZ_TXFDEF(n) ((n) & 0x7fff) /* DWC_usb31 only */ 311 - #define DWC3_GTXFIFOSIZ_TXFDEF(n) ((n) & 0xffff) 310 + #define DWC31_GTXFIFOSIZ_TXFDEP(n) ((n) & 0x7fff) /* DWC_usb31 only */ 311 + #define DWC3_GTXFIFOSIZ_TXFDEP(n) ((n) & 0xffff) 312 312 #define DWC3_GTXFIFOSIZ_TXFSTADDR(n) ((n) & 0xffff0000) 313 + 314 + /* Global RX Fifo Size Register */ 315 + #define DWC31_GRXFIFOSIZ_RXFDEP(n) ((n) & 0x7fff) /* DWC_usb31 only */ 316 + #define DWC3_GRXFIFOSIZ_RXFDEP(n) ((n) & 0xffff) 313 317 314 318 /* Global Event Size Registers */ 315 319 #define DWC3_GEVNTSIZ_INTMASK BIT(31)
+47 -29
drivers/usb/dwc3/gadget.c
··· 1728 1728 u32 reg; 1729 1729 1730 1730 u8 link_state; 1731 - u8 speed; 1732 1731 1733 1732 /* 1734 1733 * According to the Databook Remote wakeup request should ··· 1737 1738 */ 1738 1739 reg = dwc3_readl(dwc->regs, DWC3_DSTS); 1739 1740 1740 - speed = reg & DWC3_DSTS_CONNECTSPD; 1741 - if ((speed == DWC3_DSTS_SUPERSPEED) || 1742 - (speed == DWC3_DSTS_SUPERSPEED_PLUS)) 1743 - return 0; 1744 - 1745 1741 link_state = DWC3_DSTS_USBLNKST(reg); 1746 1742 1747 1743 switch (link_state) { 1744 + case DWC3_LINK_STATE_RESET: 1748 1745 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */ 1749 1746 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */ 1747 + case DWC3_LINK_STATE_RESUME: 1750 1748 break; 1751 1749 default: 1752 1750 return -EINVAL; ··· 2223 2227 { 2224 2228 struct dwc3 *dwc = dep->dwc; 2225 2229 int mdwidth; 2226 - int kbytes; 2227 2230 int size; 2228 2231 2229 2232 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); ··· 2231 2236 2232 2237 size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1)); 2233 2238 if (dwc3_is_usb31(dwc)) 2234 - size = DWC31_GTXFIFOSIZ_TXFDEF(size); 2239 + size = DWC31_GTXFIFOSIZ_TXFDEP(size); 2235 2240 else 2236 - size = DWC3_GTXFIFOSIZ_TXFDEF(size); 2241 + size = DWC3_GTXFIFOSIZ_TXFDEP(size); 2237 2242 2238 2243 /* FIFO Depth is in MDWDITH bytes. Multiply */ 2239 2244 size *= mdwidth; 2240 2245 2241 - kbytes = size / 1024; 2242 - if (kbytes == 0) 2243 - kbytes = 1; 2244 - 2245 2246 /* 2246 - * FIFO sizes account an extra MDWIDTH * (kbytes + 1) bytes for 2247 - * internal overhead. We don't really know how these are used, 2248 - * but documentation say it exists. 2247 + * To meet performance requirement, a minimum TxFIFO size of 3x 2248 + * MaxPacketSize is recommended for endpoints that support burst and a 2249 + * minimum TxFIFO size of 2x MaxPacketSize for endpoints that don't 2250 + * support burst. Use those numbers and we can calculate the max packet 2251 + * limit as below. 2249 2252 */ 2250 - size -= mdwidth * (kbytes + 1); 2251 - size /= kbytes; 2253 + if (dwc->maximum_speed >= USB_SPEED_SUPER) 2254 + size /= 3; 2255 + else 2256 + size /= 2; 2252 2257 2253 2258 usb_ep_set_maxpacket_limit(&dep->endpoint, size); 2254 2259 ··· 2266 2271 static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep) 2267 2272 { 2268 2273 struct dwc3 *dwc = dep->dwc; 2274 + int mdwidth; 2275 + int size; 2269 2276 2270 - usb_ep_set_maxpacket_limit(&dep->endpoint, 1024); 2277 + mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); 2278 + 2279 + /* MDWIDTH is represented in bits, convert to bytes */ 2280 + mdwidth /= 8; 2281 + 2282 + /* All OUT endpoints share a single RxFIFO space */ 2283 + size = dwc3_readl(dwc->regs, DWC3_GRXFIFOSIZ(0)); 2284 + if (dwc3_is_usb31(dwc)) 2285 + size = DWC31_GRXFIFOSIZ_RXFDEP(size); 2286 + else 2287 + size = DWC3_GRXFIFOSIZ_RXFDEP(size); 2288 + 2289 + /* FIFO depth is in MDWDITH bytes */ 2290 + size *= mdwidth; 2291 + 2292 + /* 2293 + * To meet performance requirement, a minimum recommended RxFIFO size 2294 + * is defined as follow: 2295 + * RxFIFO size >= (3 x MaxPacketSize) + 2296 + * (3 x 8 bytes setup packets size) + (16 bytes clock crossing margin) 2297 + * 2298 + * Then calculate the max packet limit as below. 2299 + */ 2300 + size -= (3 * 8) + 16; 2301 + if (size < 0) 2302 + size = 0; 2303 + else 2304 + size /= 3; 2305 + 2306 + usb_ep_set_maxpacket_limit(&dep->endpoint, size); 2271 2307 dep->endpoint.max_streams = 15; 2272 2308 dep->endpoint.ops = &dwc3_gadget_ep_ops; 2273 2309 list_add_tail(&dep->endpoint.ep_list, ··· 2510 2484 2511 2485 static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req) 2512 2486 { 2513 - /* 2514 - * For OUT direction, host may send less than the setup 2515 - * length. Return true for all OUT requests. 2516 - */ 2517 - if (!req->direction) 2518 - return true; 2519 - 2520 - return req->request.actual == req->request.length; 2487 + return req->num_pending_sgs == 0; 2521 2488 } 2522 2489 2523 2490 static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, ··· 2534 2515 2535 2516 req->request.actual = req->request.length - req->remaining; 2536 2517 2537 - if (!dwc3_gadget_ep_request_completed(req) || 2538 - req->num_pending_sgs) { 2518 + if (!dwc3_gadget_ep_request_completed(req)) { 2539 2519 __dwc3_gadget_kick_transfer(dep); 2540 2520 goto out; 2541 2521 }
+4 -4
drivers/usb/early/xhci-dbc.c
··· 728 728 case COMP_USB_TRANSACTION_ERROR: 729 729 case COMP_STALL_ERROR: 730 730 default: 731 - if (ep_id == XDBC_EPID_OUT) 731 + if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL) 732 732 xdbc.flags |= XDBC_FLAGS_OUT_STALL; 733 - if (ep_id == XDBC_EPID_IN) 733 + if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL) 734 734 xdbc.flags |= XDBC_FLAGS_IN_STALL; 735 735 736 736 xdbc_trace("endpoint %d stalled\n", ep_id); 737 737 break; 738 738 } 739 739 740 - if (ep_id == XDBC_EPID_IN) { 740 + if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL) { 741 741 xdbc.flags &= ~XDBC_FLAGS_IN_PROCESS; 742 742 xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true); 743 - } else if (ep_id == XDBC_EPID_OUT) { 743 + } else if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL) { 744 744 xdbc.flags &= ~XDBC_FLAGS_OUT_PROCESS; 745 745 } else { 746 746 xdbc_trace("invalid endpoint id %d\n", ep_id);
+16 -2
drivers/usb/early/xhci-dbc.h
··· 120 120 u32 cycle_state; 121 121 }; 122 122 123 - #define XDBC_EPID_OUT 2 124 - #define XDBC_EPID_IN 3 123 + /* 124 + * These are the "Endpoint ID" (also known as "Context Index") values for the 125 + * OUT Transfer Ring and the IN Transfer Ring of a Debug Capability Context data 126 + * structure. 127 + * According to the "eXtensible Host Controller Interface for Universal Serial 128 + * Bus (xHCI)" specification, section "7.6.3.2 Endpoint Contexts and Transfer 129 + * Rings", these should be 0 and 1, and those are the values AMD machines give 130 + * you; but Intel machines seem to use the formula from section "4.5.1 Device 131 + * Context Index", which is supposed to be used for the Device Context only. 132 + * Luckily the values from Intel don't overlap with those from AMD, so we can 133 + * just test for both. 134 + */ 135 + #define XDBC_EPID_OUT 0 136 + #define XDBC_EPID_IN 1 137 + #define XDBC_EPID_OUT_INTEL 2 138 + #define XDBC_EPID_IN_INTEL 3 125 139 126 140 struct xdbc_state { 127 141 u16 vendor;
+4
drivers/usb/gadget/function/f_fs.c
··· 1813 1813 ffs->state = FFS_READ_DESCRIPTORS; 1814 1814 ffs->setup_state = FFS_NO_SETUP; 1815 1815 ffs->flags = 0; 1816 + 1817 + ffs->ms_os_descs_ext_prop_count = 0; 1818 + ffs->ms_os_descs_ext_prop_name_len = 0; 1819 + ffs->ms_os_descs_ext_prop_data_len = 0; 1816 1820 } 1817 1821 1818 1822
+42 -29
drivers/usb/gadget/legacy/raw_gadget.c
··· 81 81 static struct usb_raw_event *raw_event_queue_fetch( 82 82 struct raw_event_queue *queue) 83 83 { 84 + int ret; 84 85 unsigned long flags; 85 86 struct usb_raw_event *event; 86 87 ··· 90 89 * there's at least one event queued by decrementing the semaphore, 91 90 * and then take the lock to protect queue struct fields. 92 91 */ 93 - if (down_interruptible(&queue->sema)) 94 - return NULL; 92 + ret = down_interruptible(&queue->sema); 93 + if (ret) 94 + return ERR_PTR(ret); 95 95 spin_lock_irqsave(&queue->lock, flags); 96 - if (WARN_ON(!queue->size)) 97 - return NULL; 96 + /* 97 + * queue->size must have the same value as queue->sema counter (before 98 + * the down_interruptible() call above), so this check is a fail-safe. 99 + */ 100 + if (WARN_ON(!queue->size)) { 101 + spin_unlock_irqrestore(&queue->lock, flags); 102 + return ERR_PTR(-ENODEV); 103 + } 98 104 event = queue->events[0]; 99 105 queue->size--; 100 106 memmove(&queue->events[0], &queue->events[1], ··· 400 392 char *udc_device_name; 401 393 unsigned long flags; 402 394 403 - ret = copy_from_user(&arg, (void __user *)value, sizeof(arg)); 404 - if (ret) 405 - return ret; 395 + if (copy_from_user(&arg, (void __user *)value, sizeof(arg))) 396 + return -EFAULT; 406 397 407 398 switch (arg.speed) { 408 399 case USB_SPEED_UNKNOWN: ··· 508 501 509 502 static int raw_ioctl_event_fetch(struct raw_dev *dev, unsigned long value) 510 503 { 511 - int ret = 0; 512 504 struct usb_raw_event arg; 513 505 unsigned long flags; 514 506 struct usb_raw_event *event; 515 507 uint32_t length; 516 508 517 - ret = copy_from_user(&arg, (void __user *)value, sizeof(arg)); 518 - if (ret) 519 - return ret; 509 + if (copy_from_user(&arg, (void __user *)value, sizeof(arg))) 510 + return -EFAULT; 520 511 521 512 spin_lock_irqsave(&dev->lock, flags); 522 513 if (dev->state != STATE_DEV_RUNNING) { ··· 530 525 spin_unlock_irqrestore(&dev->lock, flags); 531 526 532 527 event = raw_event_queue_fetch(&dev->queue); 533 - if (!event) { 528 + if (PTR_ERR(event) == -EINTR) { 534 529 dev_dbg(&dev->gadget->dev, "event fetching interrupted\n"); 535 530 return -EINTR; 536 531 } 532 + if (IS_ERR(event)) { 533 + dev_err(&dev->gadget->dev, "failed to fetch event\n"); 534 + spin_lock_irqsave(&dev->lock, flags); 535 + dev->state = STATE_DEV_FAILED; 536 + spin_unlock_irqrestore(&dev->lock, flags); 537 + return -ENODEV; 538 + } 537 539 length = min(arg.length, event->length); 538 - ret = copy_to_user((void __user *)value, event, 539 - sizeof(*event) + length); 540 - return ret; 540 + if (copy_to_user((void __user *)value, event, sizeof(*event) + length)) 541 + return -EFAULT; 542 + 543 + return 0; 541 544 } 542 545 543 546 static void *raw_alloc_io_data(struct usb_raw_ep_io *io, void __user *ptr, 544 547 bool get_from_user) 545 548 { 546 - int ret; 547 549 void *data; 548 550 549 - ret = copy_from_user(io, ptr, sizeof(*io)); 550 - if (ret) 551 - return ERR_PTR(ret); 551 + if (copy_from_user(io, ptr, sizeof(*io))) 552 + return ERR_PTR(-EFAULT); 552 553 if (io->ep >= USB_RAW_MAX_ENDPOINTS) 553 554 return ERR_PTR(-EINVAL); 554 555 if (!usb_raw_io_flags_valid(io->flags)) ··· 669 658 if (IS_ERR(data)) 670 659 return PTR_ERR(data); 671 660 ret = raw_process_ep0_io(dev, &io, data, false); 672 - if (ret < 0) { 673 - kfree(data); 674 - return ret; 675 - } 661 + if (ret) 662 + goto free; 663 + 676 664 length = min(io.length, (unsigned int)ret); 677 - ret = copy_to_user((void __user *)(value + sizeof(io)), data, length); 665 + if (copy_to_user((void __user *)(value + sizeof(io)), data, length)) 666 + ret = -EFAULT; 667 + free: 678 668 kfree(data); 679 669 return ret; 680 670 } ··· 964 952 if (IS_ERR(data)) 965 953 return PTR_ERR(data); 966 954 ret = raw_process_ep_io(dev, &io, data, false); 967 - if (ret < 0) { 968 - kfree(data); 969 - return ret; 970 - } 955 + if (ret) 956 + goto free; 957 + 971 958 length = min(io.length, (unsigned int)ret); 972 - ret = copy_to_user((void __user *)(value + sizeof(io)), data, length); 959 + if (copy_to_user((void __user *)(value + sizeof(io)), data, length)) 960 + ret = -EFAULT; 961 + free: 973 962 kfree(data); 974 963 return ret; 975 964 }
+2 -2
drivers/usb/gadget/udc/atmel_usba_udc.c
··· 1951 1951 usba_start(udc); 1952 1952 } else { 1953 1953 udc->suspended = false; 1954 - usba_stop(udc); 1955 - 1956 1954 if (udc->driver->disconnect) 1957 1955 udc->driver->disconnect(&udc->gadget); 1956 + 1957 + usba_stop(udc); 1958 1958 } 1959 1959 udc->vbus_prev = vbus; 1960 1960 }
+1 -1
drivers/usb/gadget/udc/bdc/bdc_ep.c
··· 540 540 { 541 541 struct bdc *bdc = ep->bdc; 542 542 543 - if (req == NULL || &req->queue == NULL || &req->usb_req == NULL) 543 + if (req == NULL) 544 544 return; 545 545 546 546 dev_dbg(bdc->dev, "%s ep:%s status:%d\n", __func__, ep->name, status);
+9
drivers/usb/host/xhci-hub.c
··· 1571 1571 } 1572 1572 if ((temp & PORT_RC)) 1573 1573 reset_change = true; 1574 + if (temp & PORT_OC) 1575 + status = 1; 1574 1576 } 1575 1577 if (!status && !reset_change) { 1576 1578 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); ··· 1637 1635 xhci_dbg(xhci, "port %d polling in bus suspend, waiting\n", 1638 1636 port_index); 1639 1637 goto retry; 1638 + } 1639 + /* bail out if port detected a over-current condition */ 1640 + if (t1 & PORT_OC) { 1641 + bus_state->bus_suspended = 0; 1642 + spin_unlock_irqrestore(&xhci->lock, flags); 1643 + xhci_dbg(xhci, "Bus suspend bailout, port over-current detected\n"); 1644 + return -EBUSY; 1640 1645 } 1641 1646 /* suspend ports in U0, or bail out for new connect changes */ 1642 1647 if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
+40 -6
drivers/usb/host/xhci-ring.c
··· 547 547 stream_id); 548 548 return; 549 549 } 550 + /* 551 + * A cancelled TD can complete with a stall if HW cached the trb. 552 + * In this case driver can't find cur_td, but if the ring is empty we 553 + * can move the dequeue pointer to the current enqueue position. 554 + */ 555 + if (!cur_td) { 556 + if (list_empty(&ep_ring->td_list)) { 557 + state->new_deq_seg = ep_ring->enq_seg; 558 + state->new_deq_ptr = ep_ring->enqueue; 559 + state->new_cycle_state = ep_ring->cycle_state; 560 + goto done; 561 + } else { 562 + xhci_warn(xhci, "Can't find new dequeue state, missing cur_td\n"); 563 + return; 564 + } 565 + } 566 + 550 567 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ 551 568 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 552 569 "Finding endpoint context"); ··· 609 592 state->new_deq_seg = new_seg; 610 593 state->new_deq_ptr = new_deq; 611 594 595 + done: 612 596 /* Don't update the ring cycle state for the producer (us). */ 613 597 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 614 598 "Cycle state = 0x%x", state->new_cycle_state); ··· 1874 1856 1875 1857 if (reset_type == EP_HARD_RESET) { 1876 1858 ep->ep_state |= EP_HARD_CLEAR_TOGGLE; 1877 - xhci_cleanup_stalled_ring(xhci, ep_index, stream_id, td); 1878 - xhci_clear_hub_tt_buffer(xhci, td, ep); 1859 + xhci_cleanup_stalled_ring(xhci, slot_id, ep_index, stream_id, 1860 + td); 1879 1861 } 1880 1862 xhci_ring_cmd_db(xhci); 1881 1863 } ··· 1996 1978 if (trb_comp_code == COMP_STALL_ERROR || 1997 1979 xhci_requires_manual_halt_cleanup(xhci, ep_ctx, 1998 1980 trb_comp_code)) { 1999 - /* Issue a reset endpoint command to clear the host side 2000 - * halt, followed by a set dequeue command to move the 2001 - * dequeue pointer past the TD. 2002 - * The class driver clears the device side halt later. 1981 + /* 1982 + * xhci internal endpoint state will go to a "halt" state for 1983 + * any stall, including default control pipe protocol stall. 1984 + * To clear the host side halt we need to issue a reset endpoint 1985 + * command, followed by a set dequeue command to move past the 1986 + * TD. 1987 + * Class drivers clear the device side halt from a functional 1988 + * stall later. Hub TT buffer should only be cleared for FS/LS 1989 + * devices behind HS hubs for functional stalls. 2003 1990 */ 1991 + if ((ep_index != 0) || (trb_comp_code != COMP_STALL_ERROR)) 1992 + xhci_clear_hub_tt_buffer(xhci, td, ep); 2004 1993 xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, 2005 1994 ep_ring->stream_id, td, EP_HARD_RESET); 2006 1995 } else { ··· 2563 2538 ep->skip = false; 2564 2539 xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n", 2565 2540 slot_id, ep_index); 2541 + } 2542 + if (trb_comp_code == COMP_STALL_ERROR || 2543 + xhci_requires_manual_halt_cleanup(xhci, ep_ctx, 2544 + trb_comp_code)) { 2545 + xhci_cleanup_halted_endpoint(xhci, slot_id, 2546 + ep_index, 2547 + ep_ring->stream_id, 2548 + NULL, 2549 + EP_HARD_RESET); 2566 2550 } 2567 2551 goto cleanup; 2568 2552 }
+7 -7
drivers/usb/host/xhci.c
··· 3031 3031 added_ctxs, added_ctxs); 3032 3032 } 3033 3033 3034 - void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, 3035 - unsigned int stream_id, struct xhci_td *td) 3034 + void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id, 3035 + unsigned int ep_index, unsigned int stream_id, 3036 + struct xhci_td *td) 3036 3037 { 3037 3038 struct xhci_dequeue_state deq_state; 3038 - struct usb_device *udev = td->urb->dev; 3039 3039 3040 3040 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, 3041 3041 "Cleaning up stalled endpoint ring"); 3042 3042 /* We need to move the HW's dequeue pointer past this TD, 3043 3043 * or it will attempt to resend it on the next doorbell ring. 3044 3044 */ 3045 - xhci_find_new_dequeue_state(xhci, udev->slot_id, 3046 - ep_index, stream_id, td, &deq_state); 3045 + xhci_find_new_dequeue_state(xhci, slot_id, ep_index, stream_id, td, 3046 + &deq_state); 3047 3047 3048 3048 if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg) 3049 3049 return; ··· 3054 3054 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { 3055 3055 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, 3056 3056 "Queueing new dequeue state"); 3057 - xhci_queue_new_dequeue_state(xhci, udev->slot_id, 3057 + xhci_queue_new_dequeue_state(xhci, slot_id, 3058 3058 ep_index, &deq_state); 3059 3059 } else { 3060 3060 /* Better hope no one uses the input context between now and the ··· 3065 3065 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 3066 3066 "Setting up input context for " 3067 3067 "configure endpoint command"); 3068 - xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, 3068 + xhci_setup_input_ctx_for_quirk(xhci, slot_id, 3069 3069 ep_index, &deq_state); 3070 3070 } 3071 3071 }
+3 -2
drivers/usb/host/xhci.h
··· 2116 2116 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, 2117 2117 unsigned int slot_id, unsigned int ep_index, 2118 2118 struct xhci_dequeue_state *deq_state); 2119 - void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, 2120 - unsigned int stream_id, struct xhci_td *td); 2119 + void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id, 2120 + unsigned int ep_index, unsigned int stream_id, 2121 + struct xhci_td *td); 2121 2122 void xhci_stop_endpoint_command_watchdog(struct timer_list *t); 2122 2123 void xhci_handle_command_timeout(struct work_struct *work); 2123 2124
+10 -10
drivers/usb/misc/sisusbvga/sisusb.c
··· 1199 1199 /* High level: Gfx (indexed) register access */ 1200 1200 1201 1201 #ifdef CONFIG_USB_SISUSBVGA_CON 1202 - int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data) 1202 + int sisusb_setreg(struct sisusb_usb_data *sisusb, u32 port, u8 data) 1203 1203 { 1204 1204 return sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, data); 1205 1205 } 1206 1206 1207 - int sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 *data) 1207 + int sisusb_getreg(struct sisusb_usb_data *sisusb, u32 port, u8 *data) 1208 1208 { 1209 1209 return sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, port, data); 1210 1210 } 1211 1211 #endif 1212 1212 1213 - int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port, 1213 + int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port, 1214 1214 u8 index, u8 data) 1215 1215 { 1216 1216 int ret; ··· 1220 1220 return ret; 1221 1221 } 1222 1222 1223 - int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port, 1223 + int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port, 1224 1224 u8 index, u8 *data) 1225 1225 { 1226 1226 int ret; ··· 1230 1230 return ret; 1231 1231 } 1232 1232 1233 - int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx, 1233 + int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, u32 port, u8 idx, 1234 1234 u8 myand, u8 myor) 1235 1235 { 1236 1236 int ret; ··· 1245 1245 } 1246 1246 1247 1247 static int sisusb_setidxregmask(struct sisusb_usb_data *sisusb, 1248 - int port, u8 idx, u8 data, u8 mask) 1248 + u32 port, u8 idx, u8 data, u8 mask) 1249 1249 { 1250 1250 int ret; 1251 1251 u8 tmp; ··· 1258 1258 return ret; 1259 1259 } 1260 1260 1261 - int sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port, 1261 + int sisusb_setidxregor(struct sisusb_usb_data *sisusb, u32 port, 1262 1262 u8 index, u8 myor) 1263 1263 { 1264 1264 return sisusb_setidxregandor(sisusb, port, index, 0xff, myor); 1265 1265 } 1266 1266 1267 - int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port, 1267 + int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port, 1268 1268 u8 idx, u8 myand) 1269 1269 { 1270 1270 return sisusb_setidxregandor(sisusb, port, idx, myand, 0x00); ··· 2785 2785 static int sisusb_handle_command(struct sisusb_usb_data *sisusb, 2786 2786 struct sisusb_command *y, unsigned long arg) 2787 2787 { 2788 - int retval, port, length; 2789 - u32 address; 2788 + int retval, length; 2789 + u32 port, address; 2790 2790 2791 2791 /* All our commands require the device 2792 2792 * to be initialized.
+7 -7
drivers/usb/misc/sisusbvga/sisusb_init.h
··· 812 812 int SiSUSBSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo); 813 813 int SiSUSBSetVESAMode(struct SiS_Private *SiS_Pr, unsigned short VModeNo); 814 814 815 - extern int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data); 816 - extern int sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 * data); 817 - extern int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port, 815 + extern int sisusb_setreg(struct sisusb_usb_data *sisusb, u32 port, u8 data); 816 + extern int sisusb_getreg(struct sisusb_usb_data *sisusb, u32 port, u8 * data); 817 + extern int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port, 818 818 u8 index, u8 data); 819 - extern int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port, 819 + extern int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port, 820 820 u8 index, u8 * data); 821 - extern int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, 821 + extern int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, u32 port, 822 822 u8 idx, u8 myand, u8 myor); 823 - extern int sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port, 823 + extern int sisusb_setidxregor(struct sisusb_usb_data *sisusb, u32 port, 824 824 u8 index, u8 myor); 825 - extern int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port, 825 + extern int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port, 826 826 u8 idx, u8 myand); 827 827 828 828 void sisusb_delete(struct kref *kref);
+43 -3
drivers/usb/storage/uas.c
··· 81 81 static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix, 82 82 int status); 83 83 84 + /* 85 + * This driver needs its own workqueue, as we need to control memory allocation. 86 + * 87 + * In the course of error handling and power management uas_wait_for_pending_cmnds() 88 + * needs to flush pending work items. In these contexts we cannot allocate memory 89 + * by doing block IO as we would deadlock. For the same reason we cannot wait 90 + * for anything allocating memory not heeding these constraints. 91 + * 92 + * So we have to control all work items that can be on the workqueue we flush. 93 + * Hence we cannot share a queue and need our own. 94 + */ 95 + static struct workqueue_struct *workqueue; 96 + 84 97 static void uas_do_work(struct work_struct *work) 85 98 { 86 99 struct uas_dev_info *devinfo = ··· 122 109 if (!err) 123 110 cmdinfo->state &= ~IS_IN_WORK_LIST; 124 111 else 125 - schedule_work(&devinfo->work); 112 + queue_work(workqueue, &devinfo->work); 126 113 } 127 114 out: 128 115 spin_unlock_irqrestore(&devinfo->lock, flags); ··· 147 134 148 135 lockdep_assert_held(&devinfo->lock); 149 136 cmdinfo->state |= IS_IN_WORK_LIST; 150 - schedule_work(&devinfo->work); 137 + queue_work(workqueue, &devinfo->work); 151 138 } 152 139 153 140 static void uas_zap_pending(struct uas_dev_info *devinfo, int result) ··· 202 189 { 203 190 struct uas_cmd_info *ci = (void *)&cmnd->SCp; 204 191 struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp; 192 + 193 + if (status == -ENODEV) /* too late */ 194 + return; 205 195 206 196 scmd_printk(KERN_INFO, cmnd, 207 197 "%s %d uas-tag %d inflight:%s%s%s%s%s%s%s%s%s%s%s%s ", ··· 1242 1226 .id_table = uas_usb_ids, 1243 1227 }; 1244 1228 1245 - module_usb_driver(uas_driver); 1229 + static int __init uas_init(void) 1230 + { 1231 + int rv; 1232 + 1233 + workqueue = alloc_workqueue("uas", WQ_MEM_RECLAIM, 0); 1234 + if (!workqueue) 1235 + return -ENOMEM; 1236 + 1237 + rv = usb_register(&uas_driver); 1238 + if (rv) { 1239 + destroy_workqueue(workqueue); 1240 + return -ENOMEM; 1241 + } 1242 + 1243 + return 0; 1244 + } 1245 + 1246 + static void __exit uas_exit(void) 1247 + { 1248 + usb_deregister(&uas_driver); 1249 + destroy_workqueue(workqueue); 1250 + } 1251 + 1252 + module_init(uas_init); 1253 + module_exit(uas_exit); 1246 1254 1247 1255 MODULE_LICENSE("GPL"); 1248 1256 MODULE_IMPORT_NS(USB_STORAGE);
+7
drivers/usb/storage/unusual_devs.h
··· 2323 2323 USB_SC_DEVICE,USB_PR_DEVICE,NULL, 2324 2324 US_FL_MAX_SECTORS_64 ), 2325 2325 2326 + /* Reported by Cyril Roelandt <tipecaml@gmail.com> */ 2327 + UNUSUAL_DEV( 0x357d, 0x7788, 0x0114, 0x0114, 2328 + "JMicron", 2329 + "USB to ATA/ATAPI Bridge", 2330 + USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2331 + US_FL_BROKEN_FUA ), 2332 + 2326 2333 /* Reported by Andrey Rahmatullin <wrar@altlinux.org> */ 2327 2334 UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100, 2328 2335 "iRiver",
+4 -1
drivers/usb/typec/bus.c
··· 198 198 const struct typec_altmode * 199 199 typec_altmode_get_partner(struct typec_altmode *adev) 200 200 { 201 - return adev ? &to_altmode(adev)->partner->adev : NULL; 201 + if (!adev || !to_altmode(adev)->partner) 202 + return NULL; 203 + 204 + return &to_altmode(adev)->partner->adev; 202 205 } 203 206 EXPORT_SYMBOL_GPL(typec_altmode_get_partner); 204 207
+2 -2
drivers/usb/typec/mux/pi3usb30532.c
··· 114 114 static int pi3usb30532_probe(struct i2c_client *client) 115 115 { 116 116 struct device *dev = &client->dev; 117 - struct typec_switch_desc sw_desc; 118 - struct typec_mux_desc mux_desc; 117 + struct typec_switch_desc sw_desc = { }; 118 + struct typec_mux_desc mux_desc = { }; 119 119 struct pi3usb30532 *pi; 120 120 int ret; 121 121
+26
drivers/usb/typec/tcpm/tcpm.c
··· 3794 3794 */ 3795 3795 break; 3796 3796 3797 + case PORT_RESET: 3798 + case PORT_RESET_WAIT_OFF: 3799 + /* 3800 + * State set back to default mode once the timer completes. 3801 + * Ignore CC changes here. 3802 + */ 3803 + break; 3804 + 3797 3805 default: 3798 3806 if (tcpm_port_is_disconnected(port)) 3799 3807 tcpm_set_state(port, unattached_state(port), 0); ··· 3863 3855 case SRC_TRY_DEBOUNCE: 3864 3856 /* Do nothing, waiting for sink detection */ 3865 3857 break; 3858 + 3859 + case PORT_RESET: 3860 + case PORT_RESET_WAIT_OFF: 3861 + /* 3862 + * State set back to default mode once the timer completes. 3863 + * Ignore vbus changes here. 3864 + */ 3865 + break; 3866 + 3866 3867 default: 3867 3868 break; 3868 3869 } ··· 3925 3908 case PORT_RESET_WAIT_OFF: 3926 3909 tcpm_set_state(port, tcpm_default_state(port), 0); 3927 3910 break; 3911 + 3928 3912 case SRC_TRY_WAIT: 3929 3913 case SRC_TRY_DEBOUNCE: 3930 3914 /* Do nothing, waiting for sink detection */ 3931 3915 break; 3916 + 3917 + case PORT_RESET: 3918 + /* 3919 + * State set back to default mode once the timer completes. 3920 + * Ignore vbus changes here. 3921 + */ 3922 + break; 3923 + 3932 3924 default: 3933 3925 if (port->pwr_role == TYPEC_SINK && 3934 3926 port->attached)
+5 -5
drivers/vfio/vfio_iommu_type1.c
··· 342 342 vma = find_vma_intersection(mm, vaddr, vaddr + 1); 343 343 344 344 if (vma && vma->vm_flags & VM_PFNMAP) { 345 - *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 346 - if (is_invalid_reserved_pfn(*pfn)) 345 + if (!follow_pfn(vma, vaddr, pfn) && 346 + is_invalid_reserved_pfn(*pfn)) 347 347 ret = 0; 348 348 } 349 349 done: ··· 555 555 continue; 556 556 } 557 557 558 - remote_vaddr = dma->vaddr + iova - dma->iova; 558 + remote_vaddr = dma->vaddr + (iova - dma->iova); 559 559 ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i], 560 560 do_accounting); 561 561 if (ret) ··· 2345 2345 vaddr = dma->vaddr + offset; 2346 2346 2347 2347 if (write) 2348 - *copied = __copy_to_user((void __user *)vaddr, data, 2348 + *copied = copy_to_user((void __user *)vaddr, data, 2349 2349 count) ? 0 : count; 2350 2350 else 2351 - *copied = __copy_from_user(data, (void __user *)vaddr, 2351 + *copied = copy_from_user(data, (void __user *)vaddr, 2352 2352 count) ? 0 : count; 2353 2353 if (kthread) 2354 2354 unuse_mm(mm);
+16 -5
drivers/vhost/vsock.c
··· 181 181 break; 182 182 } 183 183 184 - vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len); 185 - added = true; 186 - 187 - /* Deliver to monitoring devices all correctly transmitted 188 - * packets. 184 + /* Deliver to monitoring devices all packets that we 185 + * will transmit. 189 186 */ 190 187 virtio_transport_deliver_tap_pkt(pkt); 188 + 189 + vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len); 190 + added = true; 191 191 192 192 pkt->off += payload_len; 193 193 total_len += payload_len; ··· 196 196 * to send it with the next available buffer. 197 197 */ 198 198 if (pkt->off < pkt->len) { 199 + /* We are queueing the same virtio_vsock_pkt to handle 200 + * the remaining bytes, and we want to deliver it 201 + * to monitoring devices in the next iteration. 202 + */ 203 + pkt->tap_delivered = false; 204 + 199 205 spin_lock_bh(&vsock->send_pkt_list_lock); 200 206 list_add(&pkt->list, &vsock->send_pkt_list); 201 207 spin_unlock_bh(&vsock->send_pkt_list_lock); ··· 548 542 549 543 mutex_unlock(&vq->mutex); 550 544 } 545 + 546 + /* Some packets may have been queued before the device was started, 547 + * let's kick the send worker to send them. 548 + */ 549 + vhost_work_queue(&vsock->dev, &vsock->send_pkt_work); 551 550 552 551 mutex_unlock(&vsock->dev.mutex); 553 552 return 0;
+1 -1
fs/btrfs/backref.c
··· 391 391 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node; 392 392 struct rb_node *parent = NULL; 393 393 struct prelim_ref *ref = NULL; 394 - struct prelim_ref target = {0}; 394 + struct prelim_ref target = {}; 395 395 int result; 396 396 397 397 target.parent = bytenr;
+14 -6
fs/btrfs/block-group.c
··· 916 916 path = btrfs_alloc_path(); 917 917 if (!path) { 918 918 ret = -ENOMEM; 919 - goto out; 919 + goto out_put_group; 920 920 } 921 921 922 922 /* ··· 954 954 ret = btrfs_orphan_add(trans, BTRFS_I(inode)); 955 955 if (ret) { 956 956 btrfs_add_delayed_iput(inode); 957 - goto out; 957 + goto out_put_group; 958 958 } 959 959 clear_nlink(inode); 960 960 /* One for the block groups ref */ ··· 977 977 978 978 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); 979 979 if (ret < 0) 980 - goto out; 980 + goto out_put_group; 981 981 if (ret > 0) 982 982 btrfs_release_path(path); 983 983 if (ret == 0) { 984 984 ret = btrfs_del_item(trans, tree_root, path); 985 985 if (ret) 986 - goto out; 986 + goto out_put_group; 987 987 btrfs_release_path(path); 988 988 } 989 989 ··· 1102 1102 1103 1103 ret = remove_block_group_free_space(trans, block_group); 1104 1104 if (ret) 1105 - goto out; 1105 + goto out_put_group; 1106 1106 1107 - btrfs_put_block_group(block_group); 1107 + /* Once for the block groups rbtree */ 1108 1108 btrfs_put_block_group(block_group); 1109 1109 1110 1110 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); ··· 1127 1127 /* once for the tree */ 1128 1128 free_extent_map(em); 1129 1129 } 1130 + 1131 + out_put_group: 1132 + /* Once for the lookup reference */ 1133 + btrfs_put_block_group(block_group); 1130 1134 out: 1131 1135 if (remove_rsv) 1132 1136 btrfs_delayed_refs_rsv_release(fs_info, 1); ··· 1292 1288 if (ret) 1293 1289 goto err; 1294 1290 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 1291 + if (prev_trans) 1292 + btrfs_put_transaction(prev_trans); 1295 1293 1296 1294 return true; 1297 1295 1298 1296 err: 1299 1297 mutex_unlock(&fs_info->unused_bg_unpin_mutex); 1298 + if (prev_trans) 1299 + btrfs_put_transaction(prev_trans); 1300 1300 btrfs_dec_block_group_ro(bg); 1301 1301 return false; 1302 1302 }
+1 -1
fs/btrfs/discard.h
··· 1 - // SPDX-License-Identifier: GPL-2.0 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 2 3 3 #ifndef BTRFS_DISCARD_H 4 4 #define BTRFS_DISCARD_H
+32 -4
fs/btrfs/disk-io.c
··· 2036 2036 for (i = 0; i < ret; i++) 2037 2037 btrfs_drop_and_free_fs_root(fs_info, gang[i]); 2038 2038 } 2039 - 2040 - if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) 2041 - btrfs_free_log_root_tree(NULL, fs_info); 2042 2039 } 2043 2040 2044 2041 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info) ··· 3885 3888 spin_unlock(&fs_info->fs_roots_radix_lock); 3886 3889 3887 3890 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 3888 - btrfs_free_log(NULL, root); 3891 + ASSERT(root->log_root == NULL); 3889 3892 if (root->reloc_root) { 3890 3893 btrfs_put_root(root->reloc_root); 3891 3894 root->reloc_root = NULL; ··· 4206 4209 4207 4210 down_write(&fs_info->cleanup_work_sem); 4208 4211 up_write(&fs_info->cleanup_work_sem); 4212 + } 4213 + 4214 + static void btrfs_drop_all_logs(struct btrfs_fs_info *fs_info) 4215 + { 4216 + struct btrfs_root *gang[8]; 4217 + u64 root_objectid = 0; 4218 + int ret; 4219 + 4220 + spin_lock(&fs_info->fs_roots_radix_lock); 4221 + while ((ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, 4222 + (void **)gang, root_objectid, 4223 + ARRAY_SIZE(gang))) != 0) { 4224 + int i; 4225 + 4226 + for (i = 0; i < ret; i++) 4227 + gang[i] = btrfs_grab_root(gang[i]); 4228 + spin_unlock(&fs_info->fs_roots_radix_lock); 4229 + 4230 + for (i = 0; i < ret; i++) { 4231 + if (!gang[i]) 4232 + continue; 4233 + root_objectid = gang[i]->root_key.objectid; 4234 + btrfs_free_log(NULL, gang[i]); 4235 + btrfs_put_root(gang[i]); 4236 + } 4237 + root_objectid++; 4238 + spin_lock(&fs_info->fs_roots_radix_lock); 4239 + } 4240 + spin_unlock(&fs_info->fs_roots_radix_lock); 4241 + btrfs_free_log_root_tree(NULL, fs_info); 4209 4242 } 4210 4243 4211 4244 static void btrfs_destroy_ordered_extents(struct btrfs_root *root) ··· 4630 4603 btrfs_destroy_delayed_inodes(fs_info); 4631 4604 btrfs_assert_delayed_root_empty(fs_info); 4632 4605 btrfs_destroy_all_delalloc_inodes(fs_info); 4606 + btrfs_drop_all_logs(fs_info); 4633 4607 mutex_unlock(&fs_info->transaction_kthread_mutex); 4634 4608 4635 4609 return 0;
+1
fs/btrfs/relocation.c
··· 4559 4559 if (IS_ERR(fs_root)) { 4560 4560 err = PTR_ERR(fs_root); 4561 4561 list_add_tail(&reloc_root->root_list, &reloc_roots); 4562 + btrfs_end_transaction(trans); 4562 4563 goto out_unset; 4563 4564 } 4564 4565
+11 -2
fs/btrfs/transaction.c
··· 662 662 } 663 663 664 664 got_it: 665 - btrfs_record_root_in_trans(h, root); 666 - 667 665 if (!current->journal_info) 668 666 current->journal_info = h; 667 + 668 + /* 669 + * btrfs_record_root_in_trans() needs to alloc new extents, and may 670 + * call btrfs_join_transaction() while we're also starting a 671 + * transaction. 672 + * 673 + * Thus it need to be called after current->journal_info initialized, 674 + * or we can deadlock. 675 + */ 676 + btrfs_record_root_in_trans(h, root); 677 + 669 678 return h; 670 679 671 680 join_fail:
+40 -3
fs/btrfs/tree-log.c
··· 4226 4226 const u64 ino = btrfs_ino(inode); 4227 4227 struct btrfs_path *dst_path = NULL; 4228 4228 bool dropped_extents = false; 4229 + u64 truncate_offset = i_size; 4230 + struct extent_buffer *leaf; 4231 + int slot; 4229 4232 int ins_nr = 0; 4230 4233 int start_slot; 4231 4234 int ret; ··· 4243 4240 if (ret < 0) 4244 4241 goto out; 4245 4242 4243 + /* 4244 + * We must check if there is a prealloc extent that starts before the 4245 + * i_size and crosses the i_size boundary. This is to ensure later we 4246 + * truncate down to the end of that extent and not to the i_size, as 4247 + * otherwise we end up losing part of the prealloc extent after a log 4248 + * replay and with an implicit hole if there is another prealloc extent 4249 + * that starts at an offset beyond i_size. 4250 + */ 4251 + ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY); 4252 + if (ret < 0) 4253 + goto out; 4254 + 4255 + if (ret == 0) { 4256 + struct btrfs_file_extent_item *ei; 4257 + 4258 + leaf = path->nodes[0]; 4259 + slot = path->slots[0]; 4260 + ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 4261 + 4262 + if (btrfs_file_extent_type(leaf, ei) == 4263 + BTRFS_FILE_EXTENT_PREALLOC) { 4264 + u64 extent_end; 4265 + 4266 + btrfs_item_key_to_cpu(leaf, &key, slot); 4267 + extent_end = key.offset + 4268 + btrfs_file_extent_num_bytes(leaf, ei); 4269 + 4270 + if (extent_end > i_size) 4271 + truncate_offset = extent_end; 4272 + } 4273 + } else { 4274 + ret = 0; 4275 + } 4276 + 4246 4277 while (true) { 4247 - struct extent_buffer *leaf = path->nodes[0]; 4248 - int slot = path->slots[0]; 4278 + leaf = path->nodes[0]; 4279 + slot = path->slots[0]; 4249 4280 4250 4281 if (slot >= btrfs_header_nritems(leaf)) { 4251 4282 if (ins_nr > 0) { ··· 4317 4280 ret = btrfs_truncate_inode_items(trans, 4318 4281 root->log_root, 4319 4282 &inode->vfs_inode, 4320 - i_size, 4283 + truncate_offset, 4321 4284 BTRFS_EXTENT_DATA_KEY); 4322 4285 } while (ret == -EAGAIN); 4323 4286 if (ret)
+2 -1
fs/cifs/cifsglob.h
··· 1891 1891 /* 1892 1892 * This lock protects the cifs_tcp_ses_list, the list of smb sessions per 1893 1893 * tcp session, and the list of tcon's per smb session. It also protects 1894 - * the reference counters for the server, smb session, and tcon. Finally, 1894 + * the reference counters for the server, smb session, and tcon. It also 1895 + * protects some fields in the TCP_Server_Info struct such as dstaddr. Finally, 1895 1896 * changes to the tcon->tidStatus should be done while holding this lock. 1896 1897 * generally the locks should be taken in order tcp_ses_lock before 1897 1898 * tcon->open_file_lock and that before file->file_info_lock since the
+6
fs/cifs/connect.c
··· 375 375 return rc; 376 376 } 377 377 378 + spin_lock(&cifs_tcp_ses_lock); 378 379 rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr, 379 380 strlen(ipaddr)); 381 + spin_unlock(&cifs_tcp_ses_lock); 380 382 kfree(ipaddr); 381 383 382 384 return !rc ? -1 : 0; ··· 3375 3373 spin_lock(&cifs_tcp_ses_lock); 3376 3374 list_for_each(tmp, &ses->tcon_list) { 3377 3375 tcon = list_entry(tmp, struct cifs_tcon, tcon_list); 3376 + #ifdef CONFIG_CIFS_DFS_UPCALL 3377 + if (tcon->dfs_path) 3378 + continue; 3379 + #endif 3378 3380 if (!match_tcon(tcon, volume_info)) 3379 3381 continue; 3380 3382 ++tcon->tc_count;
+65 -17
fs/cifs/misc.c
··· 1025 1025 } 1026 1026 1027 1027 struct super_cb_data { 1028 - struct TCP_Server_Info *server; 1028 + void *data; 1029 1029 struct super_block *sb; 1030 1030 }; 1031 1031 1032 - static void super_cb(struct super_block *sb, void *arg) 1032 + static void tcp_super_cb(struct super_block *sb, void *arg) 1033 1033 { 1034 - struct super_cb_data *d = arg; 1034 + struct super_cb_data *sd = arg; 1035 + struct TCP_Server_Info *server = sd->data; 1035 1036 struct cifs_sb_info *cifs_sb; 1036 1037 struct cifs_tcon *tcon; 1037 1038 1038 - if (d->sb) 1039 + if (sd->sb) 1039 1040 return; 1040 1041 1041 1042 cifs_sb = CIFS_SB(sb); 1042 1043 tcon = cifs_sb_master_tcon(cifs_sb); 1043 - if (tcon->ses->server == d->server) 1044 - d->sb = sb; 1044 + if (tcon->ses->server == server) 1045 + sd->sb = sb; 1045 1046 } 1046 1047 1047 - struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server) 1048 + static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void *), 1049 + void *data) 1048 1050 { 1049 - struct super_cb_data d = { 1050 - .server = server, 1051 + struct super_cb_data sd = { 1052 + .data = data, 1051 1053 .sb = NULL, 1052 1054 }; 1053 1055 1054 - iterate_supers_type(&cifs_fs_type, super_cb, &d); 1056 + iterate_supers_type(&cifs_fs_type, f, &sd); 1055 1057 1056 - if (unlikely(!d.sb)) 1057 - return ERR_PTR(-ENOENT); 1058 + if (!sd.sb) 1059 + return ERR_PTR(-EINVAL); 1058 1060 /* 1059 1061 * Grab an active reference in order to prevent automounts (DFS links) 1060 1062 * of expiring and then freeing up our cifs superblock pointer while 1061 1063 * we're doing failover. 1062 1064 */ 1063 - cifs_sb_active(d.sb); 1064 - return d.sb; 1065 + cifs_sb_active(sd.sb); 1066 + return sd.sb; 1065 1067 } 1066 1068 1067 - void cifs_put_tcp_super(struct super_block *sb) 1069 + static void __cifs_put_super(struct super_block *sb) 1068 1070 { 1069 1071 if (!IS_ERR_OR_NULL(sb)) 1070 1072 cifs_sb_deactive(sb); 1071 1073 } 1074 + 1075 + struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server) 1076 + { 1077 + return __cifs_get_super(tcp_super_cb, server); 1078 + } 1079 + 1080 + void cifs_put_tcp_super(struct super_block *sb) 1081 + { 1082 + __cifs_put_super(sb); 1083 + } 1084 + 1085 + #ifdef CONFIG_CIFS_DFS_UPCALL 1086 + static void tcon_super_cb(struct super_block *sb, void *arg) 1087 + { 1088 + struct super_cb_data *sd = arg; 1089 + struct cifs_tcon *tcon = sd->data; 1090 + struct cifs_sb_info *cifs_sb; 1091 + 1092 + if (sd->sb) 1093 + return; 1094 + 1095 + cifs_sb = CIFS_SB(sb); 1096 + if (tcon->dfs_path && cifs_sb->origin_fullpath && 1097 + !strcasecmp(tcon->dfs_path, cifs_sb->origin_fullpath)) 1098 + sd->sb = sb; 1099 + } 1100 + 1101 + static inline struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon) 1102 + { 1103 + return __cifs_get_super(tcon_super_cb, tcon); 1104 + } 1105 + 1106 + static inline void cifs_put_tcon_super(struct super_block *sb) 1107 + { 1108 + __cifs_put_super(sb); 1109 + } 1110 + #else 1111 + static inline struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon) 1112 + { 1113 + return ERR_PTR(-EOPNOTSUPP); 1114 + } 1115 + 1116 + static inline void cifs_put_tcon_super(struct super_block *sb) 1117 + { 1118 + } 1119 + #endif 1072 1120 1073 1121 int update_super_prepath(struct cifs_tcon *tcon, const char *prefix, 1074 1122 size_t prefix_len) ··· 1125 1077 struct cifs_sb_info *cifs_sb; 1126 1078 int rc = 0; 1127 1079 1128 - sb = cifs_get_tcp_super(tcon->ses->server); 1080 + sb = cifs_get_tcon_super(tcon); 1129 1081 if (IS_ERR(sb)) 1130 1082 return PTR_ERR(sb); 1131 1083 ··· 1147 1099 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; 1148 1100 1149 1101 out: 1150 - cifs_put_tcp_super(sb); 1102 + cifs_put_tcon_super(sb); 1151 1103 return rc; 1152 1104 }
+5
fs/cifs/smb2ops.c
··· 687 687 if (smb3_encryption_required(tcon)) 688 688 flags |= CIFS_TRANSFORM_REQ; 689 689 690 + if (!server->ops->new_lease_key) 691 + return -EIO; 692 + 693 + server->ops->new_lease_key(pfid); 694 + 690 695 memset(rqst, 0, sizeof(rqst)); 691 696 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER; 692 697 memset(rsp_iov, 0, sizeof(rsp_iov));
+3 -12
fs/debugfs/file.c
··· 506 506 * This function creates a file in debugfs with the given name that 507 507 * contains the value of the variable @value. If the @mode variable is so 508 508 * set, it can be read from, and written to. 509 - * 510 - * This function will return a pointer to a dentry if it succeeds. This 511 - * pointer must be passed to the debugfs_remove() function when the file is 512 - * to be removed (no automatic cleanup happens if your module is unloaded, 513 - * you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be 514 - * returned. 515 - * 516 - * If debugfs is not enabled in the kernel, the value ERR_PTR(-ENODEV) will 517 - * be returned. 518 509 */ 519 - struct dentry *debugfs_create_u32(const char *name, umode_t mode, 520 - struct dentry *parent, u32 *value) 510 + void debugfs_create_u32(const char *name, umode_t mode, struct dentry *parent, 511 + u32 *value) 521 512 { 522 - return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u32, 513 + debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u32, 523 514 &fops_u32_ro, &fops_u32_wo); 524 515 } 525 516 EXPORT_SYMBOL_GPL(debugfs_create_u32);
+31 -27
fs/io_uring.c
··· 524 524 REQ_F_OVERFLOW_BIT, 525 525 REQ_F_POLLED_BIT, 526 526 REQ_F_BUFFER_SELECTED_BIT, 527 + REQ_F_NO_FILE_TABLE_BIT, 527 528 528 529 /* not a real bit, just to check we're not overflowing the space */ 529 530 __REQ_F_LAST_BIT, ··· 578 577 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT), 579 578 /* buffer already selected */ 580 579 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT), 580 + /* doesn't need file table for this request */ 581 + REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT), 581 582 }; 582 583 583 584 struct async_poll { ··· 802 799 .needs_file = 1, 803 800 .fd_non_neg = 1, 804 801 .needs_fs = 1, 802 + .file_table = 1, 805 803 }, 806 804 [IORING_OP_READ] = { 807 805 .needs_mm = 1, ··· 1295 1291 struct io_kiocb *req; 1296 1292 1297 1293 req = ctx->fallback_req; 1298 - if (!test_and_set_bit_lock(0, (unsigned long *) ctx->fallback_req)) 1294 + if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req)) 1299 1295 return req; 1300 1296 1301 1297 return NULL; ··· 1382 1378 if (likely(!io_is_fallback_req(req))) 1383 1379 kmem_cache_free(req_cachep, req); 1384 1380 else 1385 - clear_bit_unlock(0, (unsigned long *) req->ctx->fallback_req); 1381 + clear_bit_unlock(0, (unsigned long *) &req->ctx->fallback_req); 1386 1382 } 1387 1383 1388 1384 struct req_batch { ··· 2038 2034 * any file. For now, just ensure that anything potentially problematic is done 2039 2035 * inline. 2040 2036 */ 2041 - static bool io_file_supports_async(struct file *file) 2037 + static bool io_file_supports_async(struct file *file, int rw) 2042 2038 { 2043 2039 umode_t mode = file_inode(file)->i_mode; 2044 2040 ··· 2047 2043 if (S_ISREG(mode) && file->f_op != &io_uring_fops) 2048 2044 return true; 2049 2045 2050 - return false; 2046 + if (!(file->f_mode & FMODE_NOWAIT)) 2047 + return false; 2048 + 2049 + if (rw == READ) 2050 + return file->f_op->read_iter != NULL; 2051 + 2052 + return file->f_op->write_iter != NULL; 2051 2053 } 2052 2054 2053 2055 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, ··· 2581 2571 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so 2582 2572 * we know to async punt it even if it was opened O_NONBLOCK 2583 2573 */ 2584 - if (force_nonblock && !io_file_supports_async(req->file)) 2574 + if (force_nonblock && !io_file_supports_async(req->file, READ)) 2585 2575 goto copy_iov; 2586 2576 2587 2577 iov_count = iov_iter_count(&iter); ··· 2604 2594 if (ret) 2605 2595 goto out_free; 2606 2596 /* any defer here is final, must blocking retry */ 2607 - if (!(req->flags & REQ_F_NOWAIT)) 2597 + if (!(req->flags & REQ_F_NOWAIT) && 2598 + !file_can_poll(req->file)) 2608 2599 req->flags |= REQ_F_MUST_PUNT; 2609 2600 return -EAGAIN; 2610 2601 } ··· 2673 2662 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so 2674 2663 * we know to async punt it even if it was opened O_NONBLOCK 2675 2664 */ 2676 - if (force_nonblock && !io_file_supports_async(req->file)) 2665 + if (force_nonblock && !io_file_supports_async(req->file, WRITE)) 2677 2666 goto copy_iov; 2678 2667 2679 2668 /* file path doesn't support NOWAIT for non-direct_IO */ ··· 2727 2716 if (ret) 2728 2717 goto out_free; 2729 2718 /* any defer here is final, must blocking retry */ 2730 - req->flags |= REQ_F_MUST_PUNT; 2719 + if (!file_can_poll(req->file)) 2720 + req->flags |= REQ_F_MUST_PUNT; 2731 2721 return -EAGAIN; 2732 2722 } 2733 2723 } ··· 2768 2756 return 0; 2769 2757 } 2770 2758 2771 - static bool io_splice_punt(struct file *file) 2772 - { 2773 - if (get_pipe_info(file)) 2774 - return false; 2775 - if (!io_file_supports_async(file)) 2776 - return true; 2777 - return !(file->f_flags & O_NONBLOCK); 2778 - } 2779 - 2780 2759 static int io_splice(struct io_kiocb *req, bool force_nonblock) 2781 2760 { 2782 2761 struct io_splice *sp = &req->splice; ··· 2777 2774 loff_t *poff_in, *poff_out; 2778 2775 long ret; 2779 2776 2780 - if (force_nonblock) { 2781 - if (io_splice_punt(in) || io_splice_punt(out)) 2782 - return -EAGAIN; 2783 - flags |= SPLICE_F_NONBLOCK; 2784 - } 2777 + if (force_nonblock) 2778 + return -EAGAIN; 2785 2779 2786 2780 poff_in = (sp->off_in == -1) ? NULL : &sp->off_in; 2787 2781 poff_out = (sp->off_out == -1) ? NULL : &sp->off_out; ··· 3355 3355 struct kstat stat; 3356 3356 int ret; 3357 3357 3358 - if (force_nonblock) 3358 + if (force_nonblock) { 3359 + /* only need file table for an actual valid fd */ 3360 + if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD) 3361 + req->flags |= REQ_F_NO_FILE_TABLE; 3359 3362 return -EAGAIN; 3363 + } 3360 3364 3361 3365 if (vfs_stat_set_lookup_flags(&lookup_flags, ctx->how.flags)) 3362 3366 return -EINVAL; ··· 3506 3502 if (io_req_cancelled(req)) 3507 3503 return; 3508 3504 __io_sync_file_range(req); 3509 - io_put_req(req); /* put submission ref */ 3505 + io_steal_work(req, workptr); 3510 3506 } 3511 3507 3512 3508 static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock) ··· 5019 5015 int ret; 5020 5016 5021 5017 /* Still need defer if there is pending req in defer list. */ 5022 - if (!req_need_defer(req) && list_empty(&ctx->defer_list)) 5018 + if (!req_need_defer(req) && list_empty_careful(&ctx->defer_list)) 5023 5019 return 0; 5024 5020 5025 5021 if (!req->io && io_alloc_async_ctx(req)) ··· 5433 5429 int ret = -EBADF; 5434 5430 struct io_ring_ctx *ctx = req->ctx; 5435 5431 5436 - if (req->work.files) 5432 + if (req->work.files || (req->flags & REQ_F_NO_FILE_TABLE)) 5437 5433 return 0; 5438 5434 if (!ctx->ring_file) 5439 5435 return -EBADF; ··· 7331 7327 * it could cause shutdown to hang. 7332 7328 */ 7333 7329 while (ctx->sqo_thread && !wq_has_sleeper(&ctx->sqo_wait)) 7334 - cpu_relax(); 7330 + cond_resched(); 7335 7331 7336 7332 io_kill_timeouts(ctx); 7337 7333 io_poll_remove_all(ctx);
+8
fs/ioctl.c
··· 55 55 static int ioctl_fibmap(struct file *filp, int __user *p) 56 56 { 57 57 struct inode *inode = file_inode(filp); 58 + struct super_block *sb = inode->i_sb; 58 59 int error, ur_block; 59 60 sector_t block; 60 61 ··· 71 70 72 71 block = ur_block; 73 72 error = bmap(inode, &block); 73 + 74 + if (block > INT_MAX) { 75 + error = -ERANGE; 76 + pr_warn_ratelimited("[%s/%d] FS: %s File: %pD4 would truncate fibmap result\n", 77 + current->comm, task_pid_nr(current), 78 + sb->s_id, filp); 79 + } 74 80 75 81 if (error) 76 82 ur_block = 0;
+1 -4
fs/iomap/fiemap.c
··· 117 117 118 118 if (iomap->type == IOMAP_MAPPED) { 119 119 addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits; 120 - if (addr > INT_MAX) 121 - WARN(1, "would truncate bmap result\n"); 122 - else 123 - *bno = addr; 120 + *bno = addr; 124 121 } 125 122 return 0; 126 123 }
+15 -7
fs/nfs/nfs3acl.c
··· 253 253 254 254 int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type) 255 255 { 256 - struct posix_acl *alloc = NULL, *dfacl = NULL; 256 + struct posix_acl *orig = acl, *dfacl = NULL, *alloc; 257 257 int status; 258 258 259 259 if (S_ISDIR(inode->i_mode)) { 260 260 switch(type) { 261 261 case ACL_TYPE_ACCESS: 262 - alloc = dfacl = get_acl(inode, ACL_TYPE_DEFAULT); 262 + alloc = get_acl(inode, ACL_TYPE_DEFAULT); 263 263 if (IS_ERR(alloc)) 264 264 goto fail; 265 + dfacl = alloc; 265 266 break; 266 267 267 268 case ACL_TYPE_DEFAULT: 268 - dfacl = acl; 269 - alloc = acl = get_acl(inode, ACL_TYPE_ACCESS); 269 + alloc = get_acl(inode, ACL_TYPE_ACCESS); 270 270 if (IS_ERR(alloc)) 271 271 goto fail; 272 + dfacl = acl; 273 + acl = alloc; 272 274 break; 273 275 } 274 276 } 275 277 276 278 if (acl == NULL) { 277 - alloc = acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); 279 + alloc = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); 278 280 if (IS_ERR(alloc)) 279 281 goto fail; 282 + acl = alloc; 280 283 } 281 284 status = __nfs3_proc_setacls(inode, acl, dfacl); 282 - posix_acl_release(alloc); 285 + out: 286 + if (acl != orig) 287 + posix_acl_release(acl); 288 + if (dfacl != orig) 289 + posix_acl_release(dfacl); 283 290 return status; 284 291 285 292 fail: 286 - return PTR_ERR(alloc); 293 + status = PTR_ERR(alloc); 294 + goto out; 287 295 } 288 296 289 297 const struct xattr_handler *nfs3_xattr_handlers[] = {
+9 -2
fs/nfs/nfs4proc.c
··· 7891 7891 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) 7892 7892 { 7893 7893 struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp; 7894 + struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp; 7894 7895 struct nfs_client *clp = args->client; 7895 7896 7896 7897 switch (task->tk_status) { ··· 7899 7898 case -NFS4ERR_DEADSESSION: 7900 7899 nfs4_schedule_session_recovery(clp->cl_session, 7901 7900 task->tk_status); 7901 + } 7902 + if (args->dir == NFS4_CDFC4_FORE_OR_BOTH && 7903 + res->dir != NFS4_CDFS4_BOTH) { 7904 + rpc_task_close_connection(task); 7905 + if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES) 7906 + rpc_restart_call(task); 7902 7907 } 7903 7908 } 7904 7909 ··· 7928 7921 struct nfs41_bind_conn_to_session_args args = { 7929 7922 .client = clp, 7930 7923 .dir = NFS4_CDFC4_FORE_OR_BOTH, 7924 + .retries = 0, 7931 7925 }; 7932 7926 struct nfs41_bind_conn_to_session_res res; 7933 7927 struct rpc_message msg = { ··· 9199 9191 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0); 9200 9192 9201 9193 task = rpc_run_task(&task_setup_data); 9202 - if (IS_ERR(task)) 9203 - return ERR_CAST(task); 9194 + 9204 9195 status = rpc_wait_for_completion_task(task); 9205 9196 if (status != 0) 9206 9197 goto out;
+5 -6
fs/nfs/pnfs.c
··· 1332 1332 !valid_layout) { 1333 1333 spin_unlock(&ino->i_lock); 1334 1334 dprintk("NFS: %s no layout segments to return\n", __func__); 1335 - goto out_put_layout_hdr; 1335 + goto out_wait_layoutreturn; 1336 1336 } 1337 1337 1338 1338 send = pnfs_prepare_layoutreturn(lo, &stateid, &cred, NULL); 1339 1339 spin_unlock(&ino->i_lock); 1340 1340 if (send) 1341 1341 status = pnfs_send_layoutreturn(lo, &stateid, &cred, IOMODE_ANY, true); 1342 + out_wait_layoutreturn: 1343 + wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN, TASK_UNINTERRUPTIBLE); 1342 1344 out_put_layout_hdr: 1343 1345 pnfs_free_lseg_list(&tmp_list); 1344 1346 pnfs_put_layout_hdr(lo); ··· 1458 1456 /* lo ref dropped in pnfs_roc_release() */ 1459 1457 layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &lc_cred, &iomode); 1460 1458 /* If the creds don't match, we can't compound the layoutreturn */ 1461 - if (!layoutreturn) 1459 + if (!layoutreturn || cred_fscmp(cred, lc_cred) != 0) 1462 1460 goto out_noroc; 1463 - if (cred_fscmp(cred, lc_cred) != 0) 1464 - goto out_noroc_put_cred; 1465 1461 1466 1462 roc = layoutreturn; 1467 1463 pnfs_init_layoutreturn_args(args, lo, &stateid, iomode); 1468 1464 res->lrs_present = 0; 1469 1465 layoutreturn = false; 1470 - 1471 - out_noroc_put_cred: 1472 1466 put_cred(lc_cred); 1467 + 1473 1468 out_noroc: 1474 1469 spin_unlock(&ino->i_lock); 1475 1470 rcu_read_unlock();
+1 -1
fs/nfs/super.c
··· 185 185 186 186 rcu_read_lock(); 187 187 list_for_each_entry_rcu(server, head, client_link) { 188 - if (!nfs_sb_active(server->super)) 188 + if (!(server->super && nfs_sb_active(server->super))) 189 189 continue; 190 190 rcu_read_unlock(); 191 191 if (last)
+12 -15
fs/ocfs2/dlmfs/dlmfs.c
··· 275 275 loff_t *ppos) 276 276 { 277 277 int bytes_left; 278 - ssize_t writelen; 279 278 char *lvb_buf; 280 279 struct inode *inode = file_inode(filp); 281 280 ··· 284 285 if (*ppos >= i_size_read(inode)) 285 286 return -ENOSPC; 286 287 288 + /* don't write past the lvb */ 289 + if (count > i_size_read(inode) - *ppos) 290 + count = i_size_read(inode) - *ppos; 291 + 287 292 if (!count) 288 293 return 0; 289 294 290 295 if (!access_ok(buf, count)) 291 296 return -EFAULT; 292 297 293 - /* don't write past the lvb */ 294 - if ((count + *ppos) > i_size_read(inode)) 295 - writelen = i_size_read(inode) - *ppos; 296 - else 297 - writelen = count - *ppos; 298 - 299 - lvb_buf = kmalloc(writelen, GFP_NOFS); 298 + lvb_buf = kmalloc(count, GFP_NOFS); 300 299 if (!lvb_buf) 301 300 return -ENOMEM; 302 301 303 - bytes_left = copy_from_user(lvb_buf, buf, writelen); 304 - writelen -= bytes_left; 305 - if (writelen) 306 - user_dlm_write_lvb(inode, lvb_buf, writelen); 302 + bytes_left = copy_from_user(lvb_buf, buf, count); 303 + count -= bytes_left; 304 + if (count) 305 + user_dlm_write_lvb(inode, lvb_buf, count); 307 306 308 307 kfree(lvb_buf); 309 308 310 - *ppos = *ppos + writelen; 311 - mlog(0, "wrote %zd bytes\n", writelen); 312 - return writelen; 309 + *ppos = *ppos + count; 310 + mlog(0, "wrote %zu bytes\n", count); 311 + return count; 313 312 } 314 313 315 314 static void dlmfs_init_once(void *foo)
+4 -5
fs/pnode.c
··· 261 261 child = copy_tree(last_source, last_source->mnt.mnt_root, type); 262 262 if (IS_ERR(child)) 263 263 return PTR_ERR(child); 264 + read_seqlock_excl(&mount_lock); 264 265 mnt_set_mountpoint(m, mp, child); 266 + if (m->mnt_master != dest_master) 267 + SET_MNT_MARK(m->mnt_master); 268 + read_sequnlock_excl(&mount_lock); 265 269 last_dest = m; 266 270 last_source = child; 267 - if (m->mnt_master != dest_master) { 268 - read_seqlock_excl(&mount_lock); 269 - SET_MNT_MARK(m->mnt_master); 270 - read_sequnlock_excl(&mount_lock); 271 - } 272 271 hlist_add_head(&child->mnt_hash, list); 273 272 return count_mounts(m->mnt_ns, child); 274 273 }
+1 -1
fs/super.c
··· 1302 1302 mutex_lock(&bdev->bd_fsfreeze_mutex); 1303 1303 if (bdev->bd_fsfreeze_count > 0) { 1304 1304 mutex_unlock(&bdev->bd_fsfreeze_mutex); 1305 - blkdev_put(bdev, mode); 1306 1305 warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev); 1306 + blkdev_put(bdev, mode); 1307 1307 return -EBUSY; 1308 1308 } 1309 1309
+4 -8
include/linux/debugfs.h
··· 103 103 u8 *value); 104 104 void debugfs_create_u16(const char *name, umode_t mode, struct dentry *parent, 105 105 u16 *value); 106 - struct dentry *debugfs_create_u32(const char *name, umode_t mode, 107 - struct dentry *parent, u32 *value); 106 + void debugfs_create_u32(const char *name, umode_t mode, struct dentry *parent, 107 + u32 *value); 108 108 void debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent, 109 109 u64 *value); 110 110 struct dentry *debugfs_create_ulong(const char *name, umode_t mode, ··· 250 250 static inline void debugfs_create_u16(const char *name, umode_t mode, 251 251 struct dentry *parent, u16 *value) { } 252 252 253 - static inline struct dentry *debugfs_create_u32(const char *name, umode_t mode, 254 - struct dentry *parent, 255 - u32 *value) 256 - { 257 - return ERR_PTR(-ENODEV); 258 - } 253 + static inline void debugfs_create_u32(const char *name, umode_t mode, 254 + struct dentry *parent, u32 *value) { } 259 255 260 256 static inline void debugfs_create_u64(const char *name, umode_t mode, 261 257 struct dentry *parent, u64 *value) { }
+1 -2
include/linux/dma-buf.h
··· 329 329 330 330 /** 331 331 * struct dma_buf_attach_ops - importer operations for an attachment 332 - * @move_notify: [optional] notification that the DMA-buf is moving 333 332 * 334 333 * Attachment operations implemented by the importer. 335 334 */ 336 335 struct dma_buf_attach_ops { 337 336 /** 338 - * @move_notify 337 + * @move_notify: [optional] notification that the DMA-buf is moving 339 338 * 340 339 * If this callback is provided the framework can avoid pinning the 341 340 * backing store while mappings exists.
+6 -6
include/linux/dmaengine.h
··· 83 83 /** 84 84 * Interleaved Transfer Request 85 85 * ---------------------------- 86 - * A chunk is collection of contiguous bytes to be transfered. 86 + * A chunk is collection of contiguous bytes to be transferred. 87 87 * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG). 88 - * ICGs may or maynot change between chunks. 88 + * ICGs may or may not change between chunks. 89 89 * A FRAME is the smallest series of contiguous {chunk,icg} pairs, 90 90 * that when repeated an integral number of times, specifies the transfer. 91 91 * A transfer template is specification of a Frame, the number of times ··· 341 341 * @chan: driver channel device 342 342 * @device: sysfs device 343 343 * @dev_id: parent dma_device dev_id 344 - * @idr_ref: reference count to gate release of dma_device dev_id 345 344 */ 346 345 struct dma_chan_dev { 347 346 struct dma_chan *chan; 348 347 struct device device; 349 348 int dev_id; 350 - atomic_t *idr_ref; 351 349 }; 352 350 353 351 /** ··· 833 835 int dev_id; 834 836 struct device *dev; 835 837 struct module *owner; 838 + struct ida chan_ida; 839 + struct mutex chan_mutex; /* to protect chan_ida */ 836 840 837 841 u32 src_addr_widths; 838 842 u32 dst_addr_widths; ··· 1069 1069 * dmaengine_synchronize() needs to be called before it is safe to free 1070 1070 * any memory that is accessed by previously submitted descriptors or before 1071 1071 * freeing any resources accessed from within the completion callback of any 1072 - * perviously submitted descriptors. 1072 + * previously submitted descriptors. 1073 1073 * 1074 1074 * This function can be called from atomic context as well as from within a 1075 1075 * complete callback of a descriptor submitted on the same channel. ··· 1091 1091 * 1092 1092 * Synchronizes to the DMA channel termination to the current context. When this 1093 1093 * function returns it is guaranteed that all transfers for previously issued 1094 - * descriptors have stopped and and it is safe to free the memory assoicated 1094 + * descriptors have stopped and it is safe to free the memory associated 1095 1095 * with them. Furthermore it is guaranteed that all complete callback functions 1096 1096 * for a previously submitted descriptor have finished running and it is safe to 1097 1097 * free resources accessed from within the complete callbacks.
+1 -1
include/linux/fs.h
··· 983 983 __u32 handle_bytes; 984 984 int handle_type; 985 985 /* file identifier */ 986 - unsigned char f_handle[0]; 986 + unsigned char f_handle[]; 987 987 }; 988 988 989 989 static inline struct file *get_file(struct file *f)
+1 -1
include/linux/iio/iio.h
··· 600 600 * 0 on success, negative error number on failure. 601 601 */ 602 602 #define devm_iio_device_register(dev, indio_dev) \ 603 - __devm_iio_device_register((dev), (indio_dev), THIS_MODULE); 603 + __devm_iio_device_register((dev), (indio_dev), THIS_MODULE) 604 604 int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev, 605 605 struct module *this_mod); 606 606 void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev);
+2
include/linux/nfs_xdr.h
··· 1317 1317 struct nfstime4 date; 1318 1318 }; 1319 1319 1320 + #define MAX_BIND_CONN_TO_SESSION_RETRIES 3 1320 1321 struct nfs41_bind_conn_to_session_args { 1321 1322 struct nfs_client *client; 1322 1323 struct nfs4_sessionid sessionid; 1323 1324 u32 dir; 1324 1325 bool use_conn_in_rdma_mode; 1326 + int retries; 1325 1327 }; 1326 1328 1327 1329 struct nfs41_bind_conn_to_session_res {
+1
include/linux/platform_data/cros_ec_sensorhub.h
··· 185 185 void cros_ec_sensorhub_unregister_push_data(struct cros_ec_sensorhub *sensorhub, 186 186 u8 sensor_num); 187 187 188 + int cros_ec_sensorhub_ring_allocate(struct cros_ec_sensorhub *sensorhub); 188 189 int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub); 189 190 void cros_ec_sensorhub_ring_remove(void *arg); 190 191 int cros_ec_sensorhub_ring_fifo_enable(struct cros_ec_sensorhub *sensorhub,
+12 -1
include/linux/sunrpc/clnt.h
··· 71 71 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 72 72 struct dentry *cl_debugfs; /* debugfs directory */ 73 73 #endif 74 - struct rpc_xprt_iter cl_xpi; 74 + /* cl_work is only needed after cl_xpi is no longer used, 75 + * and that are of similar size 76 + */ 77 + union { 78 + struct rpc_xprt_iter cl_xpi; 79 + struct work_struct cl_work; 80 + }; 75 81 const struct cred *cl_cred; 76 82 }; 77 83 ··· 242 236 (task->tk_msg.rpc_proc->p_decode != NULL); 243 237 } 244 238 239 + static inline void rpc_task_close_connection(struct rpc_task *task) 240 + { 241 + if (task->tk_xprt) 242 + xprt_force_disconnect(task->tk_xprt); 243 + } 245 244 #endif /* _LINUX_SUNRPC_CLNT_H */
-51
include/linux/tcp.h
··· 78 78 #define TCP_SACK_SEEN (1 << 0) /*1 = peer is SACK capable, */ 79 79 #define TCP_DSACK_SEEN (1 << 2) /*1 = DSACK was received from peer*/ 80 80 81 - #if IS_ENABLED(CONFIG_MPTCP) 82 - struct mptcp_options_received { 83 - u64 sndr_key; 84 - u64 rcvr_key; 85 - u64 data_ack; 86 - u64 data_seq; 87 - u32 subflow_seq; 88 - u16 data_len; 89 - u16 mp_capable : 1, 90 - mp_join : 1, 91 - dss : 1, 92 - add_addr : 1, 93 - rm_addr : 1, 94 - family : 4, 95 - echo : 1, 96 - backup : 1; 97 - u32 token; 98 - u32 nonce; 99 - u64 thmac; 100 - u8 hmac[20]; 101 - u8 join_id; 102 - u8 use_map:1, 103 - dsn64:1, 104 - data_fin:1, 105 - use_ack:1, 106 - ack64:1, 107 - mpc_map:1, 108 - __unused:2; 109 - u8 addr_id; 110 - u8 rm_id; 111 - union { 112 - struct in_addr addr; 113 - #if IS_ENABLED(CONFIG_MPTCP_IPV6) 114 - struct in6_addr addr6; 115 - #endif 116 - }; 117 - u64 ahmac; 118 - u16 port; 119 - }; 120 - #endif 121 - 122 81 struct tcp_options_received { 123 82 /* PAWS/RTTM data */ 124 83 int ts_recent_stamp;/* Time we stored ts_recent (for aging) */ ··· 95 136 u8 num_sacks; /* Number of SACK blocks */ 96 137 u16 user_mss; /* mss requested by user in ioctl */ 97 138 u16 mss_clamp; /* Maximal mss, negotiated at connection setup */ 98 - #if IS_ENABLED(CONFIG_MPTCP) 99 - struct mptcp_options_received mptcp; 100 - #endif 101 139 }; 102 140 103 141 static inline void tcp_clear_options(struct tcp_options_received *rx_opt) ··· 103 147 rx_opt->wscale_ok = rx_opt->snd_wscale = 0; 104 148 #if IS_ENABLED(CONFIG_SMC) 105 149 rx_opt->smc_ok = 0; 106 - #endif 107 - #if IS_ENABLED(CONFIG_MPTCP) 108 - rx_opt->mptcp.mp_capable = 0; 109 - rx_opt->mptcp.mp_join = 0; 110 - rx_opt->mptcp.add_addr = 0; 111 - rx_opt->mptcp.rm_addr = 0; 112 - rx_opt->mptcp.dss = 0; 113 150 #endif 114 151 } 115 152
+1 -1
include/linux/tty.h
··· 66 66 int read; 67 67 int flags; 68 68 /* Data points here */ 69 - unsigned long data[0]; 69 + unsigned long data[]; 70 70 }; 71 71 72 72 /* Values for .flags field of tty_buffer */
+24 -2
include/linux/virtio_net.h
··· 3 3 #define _LINUX_VIRTIO_NET_H 4 4 5 5 #include <linux/if_vlan.h> 6 + #include <uapi/linux/tcp.h> 7 + #include <uapi/linux/udp.h> 6 8 #include <uapi/linux/virtio_net.h> 7 9 8 10 static inline int virtio_net_hdr_set_proto(struct sk_buff *skb, ··· 30 28 bool little_endian) 31 29 { 32 30 unsigned int gso_type = 0; 31 + unsigned int thlen = 0; 32 + unsigned int ip_proto; 33 33 34 34 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { 35 35 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 36 36 case VIRTIO_NET_HDR_GSO_TCPV4: 37 37 gso_type = SKB_GSO_TCPV4; 38 + ip_proto = IPPROTO_TCP; 39 + thlen = sizeof(struct tcphdr); 38 40 break; 39 41 case VIRTIO_NET_HDR_GSO_TCPV6: 40 42 gso_type = SKB_GSO_TCPV6; 43 + ip_proto = IPPROTO_TCP; 44 + thlen = sizeof(struct tcphdr); 41 45 break; 42 46 case VIRTIO_NET_HDR_GSO_UDP: 43 47 gso_type = SKB_GSO_UDP; 48 + ip_proto = IPPROTO_UDP; 49 + thlen = sizeof(struct udphdr); 44 50 break; 45 51 default: 46 52 return -EINVAL; ··· 67 57 68 58 if (!skb_partial_csum_set(skb, start, off)) 69 59 return -EINVAL; 60 + 61 + if (skb_transport_offset(skb) + thlen > skb_headlen(skb)) 62 + return -EINVAL; 70 63 } else { 71 64 /* gso packets without NEEDS_CSUM do not set transport_offset. 72 65 * probe and drop if does not match one of the above types. 73 66 */ 74 67 if (gso_type && skb->network_header) { 68 + struct flow_keys_basic keys; 69 + 75 70 if (!skb->protocol) 76 71 virtio_net_hdr_set_proto(skb, hdr); 77 72 retry: 78 - skb_probe_transport_header(skb); 79 - if (!skb_transport_header_was_set(skb)) { 73 + if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys, 74 + NULL, 0, 0, 0, 75 + 0)) { 80 76 /* UFO does not specify ipv4 or 6: try both */ 81 77 if (gso_type & SKB_GSO_UDP && 82 78 skb->protocol == htons(ETH_P_IP)) { ··· 91 75 } 92 76 return -EINVAL; 93 77 } 78 + 79 + if (keys.control.thoff + thlen > skb_headlen(skb) || 80 + keys.basic.ip_proto != ip_proto) 81 + return -EINVAL; 82 + 83 + skb_set_transport_header(skb, keys.control.thoff); 94 84 } 95 85 } 96 86
+1
include/linux/virtio_vsock.h
··· 48 48 u32 len; 49 49 u32 off; 50 50 bool reply; 51 + bool tap_delivered; 51 52 }; 52 53 53 54 struct virtio_vsock_pkt_info {
+8 -1
include/net/flow_offload.h
··· 167 167 enum flow_action_hw_stats_bit { 168 168 FLOW_ACTION_HW_STATS_IMMEDIATE_BIT, 169 169 FLOW_ACTION_HW_STATS_DELAYED_BIT, 170 + FLOW_ACTION_HW_STATS_DISABLED_BIT, 170 171 }; 171 172 172 173 enum flow_action_hw_stats { 173 - FLOW_ACTION_HW_STATS_DISABLED = 0, 174 + FLOW_ACTION_HW_STATS_DONT_CARE = 0, 174 175 FLOW_ACTION_HW_STATS_IMMEDIATE = 175 176 BIT(FLOW_ACTION_HW_STATS_IMMEDIATE_BIT), 176 177 FLOW_ACTION_HW_STATS_DELAYED = BIT(FLOW_ACTION_HW_STATS_DELAYED_BIT), 177 178 FLOW_ACTION_HW_STATS_ANY = FLOW_ACTION_HW_STATS_IMMEDIATE | 178 179 FLOW_ACTION_HW_STATS_DELAYED, 180 + FLOW_ACTION_HW_STATS_DISABLED = 181 + BIT(FLOW_ACTION_HW_STATS_DISABLED_BIT), 179 182 }; 180 183 181 184 typedef void (*action_destr)(void *priv); ··· 338 335 return true; 339 336 if (!flow_action_mixed_hw_stats_check(action, extack)) 340 337 return false; 338 + 341 339 action_entry = flow_action_first_entry_get(action); 340 + if (action_entry->hw_stats == FLOW_ACTION_HW_STATS_DONT_CARE) 341 + return true; 342 + 342 343 if (!check_allow_bit && 343 344 action_entry->hw_stats != FLOW_ACTION_HW_STATS_ANY) { 344 345 NL_SET_ERR_MSG_MOD(extack, "Driver supports only default HW stats type \"any\"");
+55 -2
include/net/inet_ecn.h
··· 99 99 return 1; 100 100 } 101 101 102 + static inline int IP_ECN_set_ect1(struct iphdr *iph) 103 + { 104 + u32 check = (__force u32)iph->check; 105 + 106 + if ((iph->tos & INET_ECN_MASK) != INET_ECN_ECT_0) 107 + return 0; 108 + 109 + check += (__force u16)htons(0x100); 110 + 111 + iph->check = (__force __sum16)(check + (check>=0xFFFF)); 112 + iph->tos ^= INET_ECN_MASK; 113 + return 1; 114 + } 115 + 102 116 static inline void IP_ECN_clear(struct iphdr *iph) 103 117 { 104 118 iph->tos &= ~INET_ECN_MASK; ··· 148 134 return 1; 149 135 } 150 136 137 + static inline int IP6_ECN_set_ect1(struct sk_buff *skb, struct ipv6hdr *iph) 138 + { 139 + __be32 from, to; 140 + 141 + if ((ipv6_get_dsfield(iph) & INET_ECN_MASK) != INET_ECN_ECT_0) 142 + return 0; 143 + 144 + from = *(__be32 *)iph; 145 + to = from ^ htonl(INET_ECN_MASK << 20); 146 + *(__be32 *)iph = to; 147 + if (skb->ip_summed == CHECKSUM_COMPLETE) 148 + skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from), 149 + (__force __wsum)to); 150 + return 1; 151 + } 152 + 151 153 static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner) 152 154 { 153 155 dscp &= ~INET_ECN_MASK; ··· 183 153 if (skb_network_header(skb) + sizeof(struct ipv6hdr) <= 184 154 skb_tail_pointer(skb)) 185 155 return IP6_ECN_set_ce(skb, ipv6_hdr(skb)); 156 + break; 157 + } 158 + 159 + return 0; 160 + } 161 + 162 + static inline int INET_ECN_set_ect1(struct sk_buff *skb) 163 + { 164 + switch (skb->protocol) { 165 + case cpu_to_be16(ETH_P_IP): 166 + if (skb_network_header(skb) + sizeof(struct iphdr) <= 167 + skb_tail_pointer(skb)) 168 + return IP_ECN_set_ect1(ip_hdr(skb)); 169 + break; 170 + 171 + case cpu_to_be16(ETH_P_IPV6): 172 + if (skb_network_header(skb) + sizeof(struct ipv6hdr) <= 173 + skb_tail_pointer(skb)) 174 + return IP6_ECN_set_ect1(skb, ipv6_hdr(skb)); 186 175 break; 187 176 } 188 177 ··· 257 208 int rc; 258 209 259 210 rc = __INET_ECN_decapsulate(outer, inner, &set_ce); 260 - if (!rc && set_ce) 261 - INET_ECN_set_ce(skb); 211 + if (!rc) { 212 + if (set_ce) 213 + INET_ECN_set_ce(skb); 214 + else if ((outer & INET_ECN_MASK) == INET_ECN_ECT_1) 215 + INET_ECN_set_ect1(skb); 216 + } 262 217 263 218 return rc; 264 219 }
+4
include/net/ip6_fib.h
··· 203 203 struct rt6_info { 204 204 struct dst_entry dst; 205 205 struct fib6_info __rcu *from; 206 + int sernum; 206 207 207 208 struct rt6key rt6i_dst; 208 209 struct rt6key rt6i_src; ··· 291 290 { 292 291 struct fib6_info *from; 293 292 u32 cookie = 0; 293 + 294 + if (rt->sernum) 295 + return rt->sernum; 294 296 295 297 rcu_read_lock(); 296 298
-4
include/net/mptcp.h
··· 69 69 } 70 70 71 71 void mptcp_space(const struct sock *ssk, int *space, int *full_space); 72 - 73 - void mptcp_parse_option(const struct sk_buff *skb, const unsigned char *ptr, 74 - int opsize, struct tcp_options_received *opt_rx); 75 72 bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb, 76 73 unsigned int *size, struct mptcp_out_options *opts); 77 - void mptcp_rcv_synsent(struct sock *sk); 78 74 bool mptcp_synack_options(const struct request_sock *req, unsigned int *size, 79 75 struct mptcp_out_options *opts); 80 76 bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
+7
include/net/net_namespace.h
··· 437 437 return atomic_read(&net->ipv4.rt_genid); 438 438 } 439 439 440 + #if IS_ENABLED(CONFIG_IPV6) 441 + static inline int rt_genid_ipv6(const struct net *net) 442 + { 443 + return atomic_read(&net->ipv6.fib6_sernum); 444 + } 445 + #endif 446 + 440 447 static inline void rt_genid_bump_ipv4(struct net *net) 441 448 { 442 449 atomic_inc(&net->ipv4.rt_genid);
+1
include/net/sch_generic.h
··· 407 407 struct mutex lock; 408 408 struct list_head chain_list; 409 409 u32 index; /* block index for shared blocks */ 410 + u32 classid; /* which class this block belongs to */ 410 411 refcount_t refcnt; 411 412 struct net *net; 412 413 struct Qdisc *q;
+1
include/soc/mscc/ocelot.h
··· 507 507 unsigned int num_stats; 508 508 509 509 int shared_queue_sz; 510 + int num_mact_rows; 510 511 511 512 struct net_device *hw_bridge_dev; 512 513 u16 bridge_mask;
+4 -8
include/trace/events/rpcrdma.h
··· 692 692 693 693 TRACE_EVENT(xprtrdma_post_send, 694 694 TP_PROTO( 695 - const struct rpcrdma_req *req, 696 - int status 695 + const struct rpcrdma_req *req 697 696 ), 698 697 699 - TP_ARGS(req, status), 698 + TP_ARGS(req), 700 699 701 700 TP_STRUCT__entry( 702 701 __field(const void *, req) ··· 704 705 __field(unsigned int, client_id) 705 706 __field(int, num_sge) 706 707 __field(int, signaled) 707 - __field(int, status) 708 708 ), 709 709 710 710 TP_fast_assign( ··· 716 718 __entry->sc = req->rl_sendctx; 717 719 __entry->num_sge = req->rl_wr.num_sge; 718 720 __entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED; 719 - __entry->status = status; 720 721 ), 721 722 722 - TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %sstatus=%d", 723 + TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %s", 723 724 __entry->task_id, __entry->client_id, 724 725 __entry->req, __entry->sc, __entry->num_sge, 725 726 (__entry->num_sge == 1 ? "" : "s"), 726 - (__entry->signaled ? "signaled " : ""), 727 - __entry->status 727 + (__entry->signaled ? "signaled" : "") 728 728 ) 729 729 ); 730 730
+4
include/uapi/drm/amdgpu_drm.h
··· 346 346 #define AMDGPU_TILING_DCC_PITCH_MAX_MASK 0x3FFF 347 347 #define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT 43 348 348 #define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK 0x1 349 + #define AMDGPU_TILING_DCC_INDEPENDENT_128B_SHIFT 44 350 + #define AMDGPU_TILING_DCC_INDEPENDENT_128B_MASK 0x1 351 + #define AMDGPU_TILING_SCANOUT_SHIFT 63 352 + #define AMDGPU_TILING_SCANOUT_MASK 0x1 349 353 350 354 /* Set/Get helpers for tiling flags. */ 351 355 #define AMDGPU_TILING_SET(field, value) \
+1 -1
include/uapi/linux/bpf.h
··· 73 73 /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ 74 74 struct bpf_lpm_trie_key { 75 75 __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ 76 - __u8 data[]; /* Arbitrary size */ 76 + __u8 data[0]; /* Arbitrary size */ 77 77 }; 78 78 79 79 struct bpf_cgroup_storage_key {
+2 -2
include/uapi/linux/dlm_device.h
··· 45 45 void __user *bastaddr; 46 46 struct dlm_lksb __user *lksb; 47 47 char lvb[DLM_USER_LVB_LEN]; 48 - char name[]; 48 + char name[0]; 49 49 }; 50 50 51 51 struct dlm_lspace_params { 52 52 __u32 flags; 53 53 __u32 minor; 54 - char name[]; 54 + char name[0]; 55 55 }; 56 56 57 57 struct dlm_purge_params {
+6
include/uapi/linux/dma-buf.h
··· 39 39 40 40 #define DMA_BUF_BASE 'b' 41 41 #define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync) 42 + 43 + /* 32/64bitness of this uapi was botched in android, there's no difference 44 + * between them in actual uapi, they're just different numbers. 45 + */ 42 46 #define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *) 47 + #define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, u32) 48 + #define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, u64) 43 49 44 50 #endif
+1 -1
include/uapi/linux/fiemap.h
··· 34 34 __u32 fm_mapped_extents;/* number of extents that were mapped (out) */ 35 35 __u32 fm_extent_count; /* size of fm_extents array (in) */ 36 36 __u32 fm_reserved; 37 - struct fiemap_extent fm_extents[]; /* array of mapped extents (out) */ 37 + struct fiemap_extent fm_extents[0]; /* array of mapped extents (out) */ 38 38 }; 39 39 40 40 #define FIEMAP_MAX_OFFSET (~0ULL)
+2 -2
include/uapi/linux/hyperv.h
··· 119 119 120 120 struct hv_fcopy_hdr { 121 121 __u32 operation; 122 - uuid_le service_id0; /* currently unused */ 123 - uuid_le service_id1; /* currently unused */ 122 + __u8 service_id0[16]; /* currently unused */ 123 + __u8 service_id1[16]; /* currently unused */ 124 124 } __attribute__((packed)); 125 125 126 126 #define OVER_WRITE 0x1
+3 -3
include/uapi/linux/if_arcnet.h
··· 60 60 __u8 proto; /* protocol ID field - varies */ 61 61 __u8 split_flag; /* for use with split packets */ 62 62 __be16 sequence; /* sequence number */ 63 - __u8 payload[]; /* space remaining in packet (504 bytes)*/ 63 + __u8 payload[0]; /* space remaining in packet (504 bytes)*/ 64 64 }; 65 65 #define RFC1201_HDR_SIZE 4 66 66 ··· 69 69 */ 70 70 struct arc_rfc1051 { 71 71 __u8 proto; /* ARC_P_RFC1051_ARP/RFC1051_IP */ 72 - __u8 payload[]; /* 507 bytes */ 72 + __u8 payload[0]; /* 507 bytes */ 73 73 }; 74 74 #define RFC1051_HDR_SIZE 1 75 75 ··· 80 80 struct arc_eth_encap { 81 81 __u8 proto; /* Always ARC_P_ETHER */ 82 82 struct ethhdr eth; /* standard ethernet header (yuck!) */ 83 - __u8 payload[]; /* 493 bytes */ 83 + __u8 payload[0]; /* 493 bytes */ 84 84 }; 85 85 #define ETH_ENCAP_HDR_SIZE 14 86 86
+1 -1
include/uapi/linux/mmc/ioctl.h
··· 57 57 */ 58 58 struct mmc_ioc_multi_cmd { 59 59 __u64 num_of_cmds; 60 - struct mmc_ioc_cmd cmds[]; 60 + struct mmc_ioc_cmd cmds[0]; 61 61 }; 62 62 63 63 #define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd)
+2 -2
include/uapi/linux/net_dropmon.h
··· 29 29 30 30 struct net_dm_config_msg { 31 31 __u32 entries; 32 - struct net_dm_config_entry options[]; 32 + struct net_dm_config_entry options[0]; 33 33 }; 34 34 35 35 struct net_dm_alert_msg { 36 36 __u32 entries; 37 - struct net_dm_drop_point points[]; 37 + struct net_dm_drop_point points[0]; 38 38 }; 39 39 40 40 struct net_dm_user_msg {
+1 -1
include/uapi/linux/netfilter_bridge/ebt_among.h
··· 40 40 struct ebt_mac_wormhash { 41 41 int table[257]; 42 42 int poolsize; 43 - struct ebt_mac_wormhash_tuple pool[]; 43 + struct ebt_mac_wormhash_tuple pool[0]; 44 44 }; 45 45 46 46 #define ebt_mac_wormhash_size(x) ((x) ? sizeof(struct ebt_mac_wormhash) \
+1 -1
include/uapi/scsi/scsi_bsg_fc.h
··· 209 209 __u64 vendor_id; 210 210 211 211 /* start of vendor command area */ 212 - __u32 vendor_cmd[]; 212 + __u32 vendor_cmd[0]; 213 213 }; 214 214 215 215 /* Response:
+7
kernel/power/hibernate.c
··· 898 898 error = freeze_processes(); 899 899 if (error) 900 900 goto Close_Finish; 901 + 902 + error = freeze_kernel_threads(); 903 + if (error) { 904 + thaw_processes(); 905 + goto Close_Finish; 906 + } 907 + 901 908 error = load_image_and_restore(); 902 909 thaw_processes(); 903 910 Finish:
+1 -1
lib/kunit/test.c
··· 93 93 * representation. 94 94 */ 95 95 if (suite) 96 - pr_info("%s %zd - %s", 96 + pr_info("%s %zd - %s\n", 97 97 kunit_status_to_string(is_ok), 98 98 test_number, description); 99 99 else
+17 -17
lib/mpi/longlong.h
··· 722 722 do { \ 723 723 if (__builtin_constant_p(bh) && (bh) == 0) \ 724 724 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \ 725 - : "=r" ((USItype)(sh)), \ 726 - "=&r" ((USItype)(sl)) \ 725 + : "=r" (sh), \ 726 + "=&r" (sl) \ 727 727 : "%r" ((USItype)(ah)), \ 728 728 "%r" ((USItype)(al)), \ 729 729 "rI" ((USItype)(bl))); \ 730 730 else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \ 731 731 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \ 732 - : "=r" ((USItype)(sh)), \ 733 - "=&r" ((USItype)(sl)) \ 732 + : "=r" (sh), \ 733 + "=&r" (sl) \ 734 734 : "%r" ((USItype)(ah)), \ 735 735 "%r" ((USItype)(al)), \ 736 736 "rI" ((USItype)(bl))); \ 737 737 else \ 738 738 __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \ 739 - : "=r" ((USItype)(sh)), \ 740 - "=&r" ((USItype)(sl)) \ 739 + : "=r" (sh), \ 740 + "=&r" (sl) \ 741 741 : "%r" ((USItype)(ah)), \ 742 742 "r" ((USItype)(bh)), \ 743 743 "%r" ((USItype)(al)), \ ··· 747 747 do { \ 748 748 if (__builtin_constant_p(ah) && (ah) == 0) \ 749 749 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \ 750 - : "=r" ((USItype)(sh)), \ 751 - "=&r" ((USItype)(sl)) \ 750 + : "=r" (sh), \ 751 + "=&r" (sl) \ 752 752 : "r" ((USItype)(bh)), \ 753 753 "rI" ((USItype)(al)), \ 754 754 "r" ((USItype)(bl))); \ 755 755 else if (__builtin_constant_p(ah) && (ah) == ~(USItype) 0) \ 756 756 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \ 757 - : "=r" ((USItype)(sh)), \ 758 - "=&r" ((USItype)(sl)) \ 757 + : "=r" (sh), \ 758 + "=&r" (sl) \ 759 759 : "r" ((USItype)(bh)), \ 760 760 "rI" ((USItype)(al)), \ 761 761 "r" ((USItype)(bl))); \ 762 762 else if (__builtin_constant_p(bh) && (bh) == 0) \ 763 763 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \ 764 - : "=r" ((USItype)(sh)), \ 765 - "=&r" ((USItype)(sl)) \ 764 + : "=r" (sh), \ 765 + "=&r" (sl) \ 766 766 : "r" ((USItype)(ah)), \ 767 767 "rI" ((USItype)(al)), \ 768 768 "r" ((USItype)(bl))); \ 769 769 else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \ 770 770 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \ 771 - : "=r" ((USItype)(sh)), \ 772 - "=&r" ((USItype)(sl)) \ 771 + : "=r" (sh), \ 772 + "=&r" (sl) \ 773 773 : "r" ((USItype)(ah)), \ 774 774 "rI" ((USItype)(al)), \ 775 775 "r" ((USItype)(bl))); \ 776 776 else \ 777 777 __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \ 778 - : "=r" ((USItype)(sh)), \ 779 - "=&r" ((USItype)(sl)) \ 778 + : "=r" (sh), \ 779 + "=&r" (sl) \ 780 780 : "r" ((USItype)(ah)), \ 781 781 "r" ((USItype)(bh)), \ 782 782 "rI" ((USItype)(al)), \ ··· 787 787 do { \ 788 788 USItype __m0 = (m0), __m1 = (m1); \ 789 789 __asm__ ("mulhwu %0,%1,%2" \ 790 - : "=r" ((USItype) ph) \ 790 + : "=r" (ph) \ 791 791 : "%r" (__m0), \ 792 792 "r" (__m1)); \ 793 793 (pl) = __m0 * __m1; \
+10 -10
net/atm/common.c
··· 177 177 178 178 set_bit(ATM_VF_CLOSE, &vcc->flags); 179 179 clear_bit(ATM_VF_READY, &vcc->flags); 180 - if (vcc->dev) { 181 - if (vcc->dev->ops->close) 182 - vcc->dev->ops->close(vcc); 183 - if (vcc->push) 184 - vcc->push(vcc, NULL); /* atmarpd has no push */ 185 - module_put(vcc->owner); 180 + if (vcc->dev && vcc->dev->ops->close) 181 + vcc->dev->ops->close(vcc); 182 + if (vcc->push) 183 + vcc->push(vcc, NULL); /* atmarpd has no push */ 184 + module_put(vcc->owner); 186 185 187 - while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 188 - atm_return(vcc, skb->truesize); 189 - kfree_skb(skb); 190 - } 186 + while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { 187 + atm_return(vcc, skb->truesize); 188 + kfree_skb(skb); 189 + } 191 190 191 + if (vcc->dev && vcc->dev->ops->owner) { 192 192 module_put(vcc->dev->ops->owner); 193 193 atm_dev_put(vcc->dev); 194 194 }
+6
net/atm/lec.c
··· 1264 1264 entry->vcc = NULL; 1265 1265 } 1266 1266 if (entry->recv_vcc) { 1267 + struct atm_vcc *vcc = entry->recv_vcc; 1268 + struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc); 1269 + 1270 + kfree(vpriv); 1271 + vcc->user_back = NULL; 1272 + 1267 1273 entry->recv_vcc->push = entry->old_recv_push; 1268 1274 vcc_release_async(entry->recv_vcc, -EPIPE); 1269 1275 entry->recv_vcc = NULL;
+1 -1
net/batman-adv/bat_v_ogm.c
··· 893 893 894 894 orig_node = batadv_v_ogm_orig_get(bat_priv, ogm_packet->orig); 895 895 if (!orig_node) 896 - return; 896 + goto out; 897 897 898 898 neigh_node = batadv_neigh_node_get_or_create(orig_node, if_incoming, 899 899 ethhdr->h_source);
+1 -8
net/batman-adv/network-coding.c
··· 1009 1009 */ 1010 1010 static u8 batadv_nc_random_weight_tq(u8 tq) 1011 1011 { 1012 - u8 rand_val, rand_tq; 1013 - 1014 - get_random_bytes(&rand_val, sizeof(rand_val)); 1015 - 1016 1012 /* randomize the estimated packet loss (max TQ - estimated TQ) */ 1017 - rand_tq = rand_val * (BATADV_TQ_MAX_VALUE - tq); 1018 - 1019 - /* normalize the randomized packet loss */ 1020 - rand_tq /= BATADV_TQ_MAX_VALUE; 1013 + u8 rand_tq = prandom_u32_max(BATADV_TQ_MAX_VALUE + 1 - tq); 1021 1014 1022 1015 /* convert to (randomized) estimated tq again */ 1023 1016 return BATADV_TQ_MAX_VALUE - rand_tq;
+2 -1
net/batman-adv/sysfs.c
··· 1150 1150 ret = batadv_parse_throughput(net_dev, buff, "throughput_override", 1151 1151 &tp_override); 1152 1152 if (!ret) 1153 - return count; 1153 + goto out; 1154 1154 1155 1155 old_tp_override = atomic_read(&hard_iface->bat_v.throughput_override); 1156 1156 if (old_tp_override == tp_override) ··· 1190 1190 1191 1191 tp_override = atomic_read(&hard_iface->bat_v.throughput_override); 1192 1192 1193 + batadv_hardif_put(hard_iface); 1193 1194 return sprintf(buff, "%u.%u MBit\n", tp_override / 10, 1194 1195 tp_override % 10); 1195 1196 }
+1
net/bridge/br_netlink.c
··· 615 615 v - 1, rtm_cmd); 616 616 v_change_start = 0; 617 617 } 618 + cond_resched(); 618 619 } 619 620 /* v_change_start is set only if the last/whole range changed */ 620 621 if (v_change_start)
+10 -2
net/core/devlink.c
··· 4331 4331 end_offset = nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]); 4332 4332 end_offset += nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]); 4333 4333 dump = false; 4334 + 4335 + if (start_offset == end_offset) { 4336 + err = 0; 4337 + goto nla_put_failure; 4338 + } 4334 4339 } 4335 4340 4336 4341 err = devlink_nl_region_read_snapshot_fill(skb, devlink, ··· 5416 5411 { 5417 5412 enum devlink_health_reporter_state prev_health_state; 5418 5413 struct devlink *devlink = reporter->devlink; 5414 + unsigned long recover_ts_threshold; 5419 5415 5420 5416 /* write a log message of the current error */ 5421 5417 WARN_ON(!msg); ··· 5427 5421 devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER); 5428 5422 5429 5423 /* abort if the previous error wasn't recovered */ 5424 + recover_ts_threshold = reporter->last_recovery_ts + 5425 + msecs_to_jiffies(reporter->graceful_period); 5430 5426 if (reporter->auto_recover && 5431 5427 (prev_health_state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY || 5432 - jiffies - reporter->last_recovery_ts < 5433 - msecs_to_jiffies(reporter->graceful_period))) { 5428 + (reporter->last_recovery_ts && reporter->recovery_count && 5429 + time_is_after_jiffies(recover_ts_threshold)))) { 5434 5430 trace_devlink_health_recover_aborted(devlink, 5435 5431 reporter->ops->name, 5436 5432 reporter->health_state,
+7 -4
net/core/drop_monitor.c
··· 213 213 static void trace_drop_common(struct sk_buff *skb, void *location) 214 214 { 215 215 struct net_dm_alert_msg *msg; 216 + struct net_dm_drop_point *point; 216 217 struct nlmsghdr *nlh; 217 218 struct nlattr *nla; 218 219 int i; ··· 232 231 nlh = (struct nlmsghdr *)dskb->data; 233 232 nla = genlmsg_data(nlmsg_data(nlh)); 234 233 msg = nla_data(nla); 234 + point = msg->points; 235 235 for (i = 0; i < msg->entries; i++) { 236 - if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { 237 - msg->points[i].count++; 236 + if (!memcmp(&location, &point->pc, sizeof(void *))) { 237 + point->count++; 238 238 goto out; 239 239 } 240 + point++; 240 241 } 241 242 if (msg->entries == dm_hit_limit) 242 243 goto out; ··· 247 244 */ 248 245 __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point)); 249 246 nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point)); 250 - memcpy(msg->points[msg->entries].pc, &location, sizeof(void *)); 251 - msg->points[msg->entries].count = 1; 247 + memcpy(point->pc, &location, sizeof(void *)); 248 + point->count = 1; 252 249 msg->entries++; 253 250 254 251 if (!timer_pending(&data->send_timer)) {
+3 -3
net/core/neighbour.c
··· 1956 1956 NEIGH_UPDATE_F_OVERRIDE_ISROUTER); 1957 1957 } 1958 1958 1959 + if (protocol) 1960 + neigh->protocol = protocol; 1961 + 1959 1962 if (ndm->ndm_flags & NTF_EXT_LEARNED) 1960 1963 flags |= NEIGH_UPDATE_F_EXT_LEARNED; 1961 1964 ··· 1971 1968 } else 1972 1969 err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags, 1973 1970 NETLINK_CB(skb).portid, extack); 1974 - 1975 - if (protocol) 1976 - neigh->protocol = protocol; 1977 1971 1978 1972 neigh_release(neigh); 1979 1973
-1
net/core/sock.c
··· 2364 2364 } 2365 2365 } 2366 2366 2367 - /* On 32bit arches, an skb frag is limited to 2^15 */ 2368 2367 #define SKB_FRAG_PAGE_ORDER get_order(32768) 2369 2368 DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key); 2370 2369
+1 -1
net/dsa/dsa2.c
··· 459 459 list_for_each_entry(dp, &dst->ports, list) { 460 460 err = dsa_port_setup(dp); 461 461 if (err) 462 - goto teardown; 462 + continue; 463 463 } 464 464 465 465 return 0;
+2 -1
net/dsa/master.c
··· 289 289 { 290 290 struct dsa_port *cpu_dp = dev->dsa_ptr; 291 291 292 - dev->netdev_ops = cpu_dp->orig_ndo_ops; 292 + if (cpu_dp->orig_ndo_ops) 293 + dev->netdev_ops = cpu_dp->orig_ndo_ops; 293 294 cpu_dp->orig_ndo_ops = NULL; 294 295 } 295 296
+3 -5
net/dsa/slave.c
··· 856 856 struct dsa_port *to_dp; 857 857 int err; 858 858 859 - act = &cls->rule->action.entries[0]; 860 - 861 859 if (!ds->ops->port_mirror_add) 862 860 return -EOPNOTSUPP; 863 - 864 - if (!act->dev) 865 - return -EINVAL; 866 861 867 862 if (!flow_action_basic_hw_stats_check(&cls->rule->action, 868 863 cls->common.extack)) 869 864 return -EOPNOTSUPP; 870 865 871 866 act = &cls->rule->action.entries[0]; 867 + 868 + if (!act->dev) 869 + return -EINVAL; 872 870 873 871 if (!dsa_slave_dev_check(act->dev)) 874 872 return -EOPNOTSUPP;
+1 -1
net/hsr/hsr_slave.c
··· 18 18 { 19 19 struct sk_buff *skb = *pskb; 20 20 struct hsr_port *port; 21 - u16 protocol; 21 + __be16 protocol; 22 22 23 23 if (!skb_mac_header_was_set(skb)) { 24 24 WARN_ONCE(1, "%s: skb invalid", __func__);
-7
net/ipv4/tcp_input.c
··· 3926 3926 */ 3927 3927 break; 3928 3928 #endif 3929 - case TCPOPT_MPTCP: 3930 - mptcp_parse_option(skb, ptr, opsize, opt_rx); 3931 - break; 3932 - 3933 3929 case TCPOPT_FASTOPEN: 3934 3930 tcp_parse_fastopen_option( 3935 3931 opsize - TCPOLEN_FASTOPEN_BASE, ··· 6014 6018 6015 6019 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); 6016 6020 tcp_initialize_rcv_mss(sk); 6017 - 6018 - if (sk_is_mptcp(sk)) 6019 - mptcp_rcv_synsent(sk); 6020 6021 6021 6022 /* Remember, tcp_poll() does not lock socket! 6022 6023 * Change state from SYN-SENT only after copied_seq
+25
net/ipv6/route.c
··· 1385 1385 } 1386 1386 ip6_rt_copy_init(pcpu_rt, res); 1387 1387 pcpu_rt->rt6i_flags |= RTF_PCPU; 1388 + 1389 + if (f6i->nh) 1390 + pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev)); 1391 + 1388 1392 return pcpu_rt; 1393 + } 1394 + 1395 + static bool rt6_is_valid(const struct rt6_info *rt6) 1396 + { 1397 + return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev)); 1389 1398 } 1390 1399 1391 1400 /* It should be called with rcu_read_lock() acquired */ ··· 1403 1394 struct rt6_info *pcpu_rt; 1404 1395 1405 1396 pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu); 1397 + 1398 + if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) { 1399 + struct rt6_info *prev, **p; 1400 + 1401 + p = this_cpu_ptr(res->nh->rt6i_pcpu); 1402 + prev = xchg(p, NULL); 1403 + if (prev) { 1404 + dst_dev_put(&prev->dst); 1405 + dst_release(&prev->dst); 1406 + } 1407 + 1408 + pcpu_rt = NULL; 1409 + } 1406 1410 1407 1411 return pcpu_rt; 1408 1412 } ··· 2614 2592 struct rt6_info *rt; 2615 2593 2616 2594 rt = container_of(dst, struct rt6_info, dst); 2595 + 2596 + if (rt->sernum) 2597 + return rt6_is_valid(rt) ? dst : NULL; 2617 2598 2618 2599 rcu_read_lock(); 2619 2600
+8 -2
net/ipv6/seg6.c
··· 27 27 28 28 bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len) 29 29 { 30 - int trailing; 31 30 unsigned int tlv_offset; 31 + int max_last_entry; 32 + int trailing; 32 33 33 34 if (srh->type != IPV6_SRCRT_TYPE_4) 34 35 return false; ··· 37 36 if (((srh->hdrlen + 1) << 3) != len) 38 37 return false; 39 38 40 - if (srh->segments_left > srh->first_segment) 39 + max_last_entry = (srh->hdrlen / 2) - 1; 40 + 41 + if (srh->first_segment > max_last_entry) 42 + return false; 43 + 44 + if (srh->segments_left > srh->first_segment + 1) 41 45 return false; 42 46 43 47 tlv_offset = sizeof(*srh) + ((srh->first_segment + 1) << 4);
+41 -54
net/mptcp/options.c
··· 16 16 return (flags & MPTCP_CAP_FLAG_MASK) == MPTCP_CAP_HMAC_SHA256; 17 17 } 18 18 19 - void mptcp_parse_option(const struct sk_buff *skb, const unsigned char *ptr, 20 - int opsize, struct tcp_options_received *opt_rx) 19 + static void mptcp_parse_option(const struct sk_buff *skb, 20 + const unsigned char *ptr, int opsize, 21 + struct mptcp_options_received *mp_opt) 21 22 { 22 - struct mptcp_options_received *mp_opt = &opt_rx->mptcp; 23 23 u8 subtype = *ptr >> 4; 24 24 int expected_opsize; 25 25 u8 version; ··· 283 283 } 284 284 285 285 void mptcp_get_options(const struct sk_buff *skb, 286 - struct tcp_options_received *opt_rx) 286 + struct mptcp_options_received *mp_opt) 287 287 { 288 - const unsigned char *ptr; 289 288 const struct tcphdr *th = tcp_hdr(skb); 290 - int length = (th->doff * 4) - sizeof(struct tcphdr); 289 + const unsigned char *ptr; 290 + int length; 291 291 292 + /* initialize option status */ 293 + mp_opt->mp_capable = 0; 294 + mp_opt->mp_join = 0; 295 + mp_opt->add_addr = 0; 296 + mp_opt->rm_addr = 0; 297 + mp_opt->dss = 0; 298 + 299 + length = (th->doff * 4) - sizeof(struct tcphdr); 292 300 ptr = (const unsigned char *)(th + 1); 293 301 294 302 while (length > 0) { ··· 316 308 if (opsize > length) 317 309 return; /* don't parse partial options */ 318 310 if (opcode == TCPOPT_MPTCP) 319 - mptcp_parse_option(skb, ptr, opsize, opt_rx); 311 + mptcp_parse_option(skb, ptr, opsize, mp_opt); 320 312 ptr += opsize - 2; 321 313 length -= opsize; 322 314 } ··· 350 342 return true; 351 343 } 352 344 return false; 353 - } 354 - 355 - void mptcp_rcv_synsent(struct sock *sk) 356 - { 357 - struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 358 - struct tcp_sock *tp = tcp_sk(sk); 359 - 360 - if (subflow->request_mptcp && tp->rx_opt.mptcp.mp_capable) { 361 - subflow->mp_capable = 1; 362 - subflow->can_ack = 1; 363 - subflow->remote_key = tp->rx_opt.mptcp.sndr_key; 364 - pr_debug("subflow=%p, remote_key=%llu", subflow, 365 - subflow->remote_key); 366 - } else if (subflow->request_join && tp->rx_opt.mptcp.mp_join) { 367 - subflow->mp_join = 1; 368 - subflow->thmac = tp->rx_opt.mptcp.thmac; 369 - subflow->remote_nonce = tp->rx_opt.mptcp.nonce; 370 - pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow, 371 - subflow->thmac, subflow->remote_nonce); 372 - } else if (subflow->request_mptcp) { 373 - tcp_sk(sk)->is_mptcp = 0; 374 - } 375 345 } 376 346 377 347 /* MP_JOIN client subflow must wait for 4th ack before sending any data: ··· 695 709 if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1) 696 710 return subflow->mp_capable; 697 711 698 - if (mp_opt->use_ack) { 712 + if (mp_opt->dss && mp_opt->use_ack) { 699 713 /* subflows are fully established as soon as we get any 700 714 * additional ack. 701 715 */ 702 716 subflow->fully_established = 1; 703 717 goto fully_established; 704 718 } 705 - 706 - WARN_ON_ONCE(subflow->can_ack); 707 719 708 720 /* If the first established packet does not contain MP_CAPABLE + data 709 721 * then fallback to TCP ··· 712 728 return false; 713 729 } 714 730 731 + if (unlikely(!READ_ONCE(msk->pm.server_side))) 732 + pr_warn_once("bogus mpc option on established client sk"); 715 733 subflow->fully_established = 1; 716 734 subflow->remote_key = mp_opt->sndr_key; 717 735 subflow->can_ack = 1; ··· 805 819 { 806 820 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 807 821 struct mptcp_sock *msk = mptcp_sk(subflow->conn); 808 - struct mptcp_options_received *mp_opt; 822 + struct mptcp_options_received mp_opt; 809 823 struct mptcp_ext *mpext; 810 824 811 - mp_opt = &opt_rx->mptcp; 812 - if (!check_fully_established(msk, sk, subflow, skb, mp_opt)) 825 + mptcp_get_options(skb, &mp_opt); 826 + if (!check_fully_established(msk, sk, subflow, skb, &mp_opt)) 813 827 return; 814 828 815 - if (mp_opt->add_addr && add_addr_hmac_valid(msk, mp_opt)) { 829 + if (mp_opt.add_addr && add_addr_hmac_valid(msk, &mp_opt)) { 816 830 struct mptcp_addr_info addr; 817 831 818 - addr.port = htons(mp_opt->port); 819 - addr.id = mp_opt->addr_id; 820 - if (mp_opt->family == MPTCP_ADDR_IPVERSION_4) { 832 + addr.port = htons(mp_opt.port); 833 + addr.id = mp_opt.addr_id; 834 + if (mp_opt.family == MPTCP_ADDR_IPVERSION_4) { 821 835 addr.family = AF_INET; 822 - addr.addr = mp_opt->addr; 836 + addr.addr = mp_opt.addr; 823 837 } 824 838 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 825 - else if (mp_opt->family == MPTCP_ADDR_IPVERSION_6) { 839 + else if (mp_opt.family == MPTCP_ADDR_IPVERSION_6) { 826 840 addr.family = AF_INET6; 827 - addr.addr6 = mp_opt->addr6; 841 + addr.addr6 = mp_opt.addr6; 828 842 } 829 843 #endif 830 - if (!mp_opt->echo) 844 + if (!mp_opt.echo) 831 845 mptcp_pm_add_addr_received(msk, &addr); 832 - mp_opt->add_addr = 0; 846 + mp_opt.add_addr = 0; 833 847 } 834 848 835 - if (!mp_opt->dss) 849 + if (!mp_opt.dss) 836 850 return; 837 851 838 852 /* we can't wait for recvmsg() to update the ack_seq, otherwise 839 853 * monodirectional flows will stuck 840 854 */ 841 - if (mp_opt->use_ack) 842 - update_una(msk, mp_opt); 855 + if (mp_opt.use_ack) 856 + update_una(msk, &mp_opt); 843 857 844 858 mpext = skb_ext_add(skb, SKB_EXT_MPTCP); 845 859 if (!mpext) ··· 847 861 848 862 memset(mpext, 0, sizeof(*mpext)); 849 863 850 - if (mp_opt->use_map) { 851 - if (mp_opt->mpc_map) { 864 + if (mp_opt.use_map) { 865 + if (mp_opt.mpc_map) { 852 866 /* this is an MP_CAPABLE carrying MPTCP data 853 867 * we know this map the first chunk of data 854 868 */ ··· 858 872 mpext->subflow_seq = 1; 859 873 mpext->dsn64 = 1; 860 874 mpext->mpc_map = 1; 875 + mpext->data_fin = 0; 861 876 } else { 862 - mpext->data_seq = mp_opt->data_seq; 863 - mpext->subflow_seq = mp_opt->subflow_seq; 864 - mpext->dsn64 = mp_opt->dsn64; 865 - mpext->data_fin = mp_opt->data_fin; 877 + mpext->data_seq = mp_opt.data_seq; 878 + mpext->subflow_seq = mp_opt.subflow_seq; 879 + mpext->dsn64 = mp_opt.dsn64; 880 + mpext->data_fin = mp_opt.data_fin; 866 881 } 867 - mpext->data_len = mp_opt->data_len; 882 + mpext->data_len = mp_opt.data_len; 868 883 mpext->use_map = 1; 869 884 } 870 885 }
+9 -8
net/mptcp/protocol.c
··· 1316 1316 1317 1317 static int mptcp_disconnect(struct sock *sk, int flags) 1318 1318 { 1319 - lock_sock(sk); 1320 - __mptcp_clear_xmit(sk); 1321 - release_sock(sk); 1322 - mptcp_cancel_work(sk); 1323 - return tcp_disconnect(sk, flags); 1319 + /* Should never be called. 1320 + * inet_stream_connect() calls ->disconnect, but that 1321 + * refers to the subflow socket, not the mptcp one. 1322 + */ 1323 + WARN_ON_ONCE(1); 1324 + return 0; 1324 1325 } 1325 1326 1326 1327 #if IS_ENABLED(CONFIG_MPTCP_IPV6) ··· 1334 1333 #endif 1335 1334 1336 1335 struct sock *mptcp_sk_clone(const struct sock *sk, 1337 - const struct tcp_options_received *opt_rx, 1336 + const struct mptcp_options_received *mp_opt, 1338 1337 struct request_sock *req) 1339 1338 { 1340 1339 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); ··· 1373 1372 1374 1373 msk->write_seq = subflow_req->idsn + 1; 1375 1374 atomic64_set(&msk->snd_una, msk->write_seq); 1376 - if (opt_rx->mptcp.mp_capable) { 1375 + if (mp_opt->mp_capable) { 1377 1376 msk->can_ack = true; 1378 - msk->remote_key = opt_rx->mptcp.sndr_key; 1377 + msk->remote_key = mp_opt->sndr_key; 1379 1378 mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq); 1380 1379 ack_seq++; 1381 1380 msk->ack_seq = ack_seq;
+41 -2
net/mptcp/protocol.h
··· 91 91 #define MPTCP_WORK_RTX 2 92 92 #define MPTCP_WORK_EOF 3 93 93 94 + struct mptcp_options_received { 95 + u64 sndr_key; 96 + u64 rcvr_key; 97 + u64 data_ack; 98 + u64 data_seq; 99 + u32 subflow_seq; 100 + u16 data_len; 101 + u16 mp_capable : 1, 102 + mp_join : 1, 103 + dss : 1, 104 + add_addr : 1, 105 + rm_addr : 1, 106 + family : 4, 107 + echo : 1, 108 + backup : 1; 109 + u32 token; 110 + u32 nonce; 111 + u64 thmac; 112 + u8 hmac[20]; 113 + u8 join_id; 114 + u8 use_map:1, 115 + dsn64:1, 116 + data_fin:1, 117 + use_ack:1, 118 + ack64:1, 119 + mpc_map:1, 120 + __unused:2; 121 + u8 addr_id; 122 + u8 rm_id; 123 + union { 124 + struct in_addr addr; 125 + #if IS_ENABLED(CONFIG_MPTCP_IPV6) 126 + struct in6_addr addr6; 127 + #endif 128 + }; 129 + u64 ahmac; 130 + u16 port; 131 + }; 132 + 94 133 static inline __be32 mptcp_option(u8 subopt, u8 len, u8 nib, u8 field) 95 134 { 96 135 return htonl((TCPOPT_MPTCP << 24) | (len << 16) | (subopt << 12) | ··· 370 331 #endif 371 332 372 333 struct sock *mptcp_sk_clone(const struct sock *sk, 373 - const struct tcp_options_received *opt_rx, 334 + const struct mptcp_options_received *mp_opt, 374 335 struct request_sock *req); 375 336 void mptcp_get_options(const struct sk_buff *skb, 376 - struct tcp_options_received *opt_rx); 337 + struct mptcp_options_received *mp_opt); 377 338 378 339 void mptcp_finish_connect(struct sock *sk); 379 340 void mptcp_data_ready(struct sock *sk, struct sock *ssk);
+55 -31
net/mptcp/subflow.c
··· 124 124 { 125 125 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener); 126 126 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 127 - struct tcp_options_received rx_opt; 127 + struct mptcp_options_received mp_opt; 128 128 129 129 pr_debug("subflow_req=%p, listener=%p", subflow_req, listener); 130 130 131 - memset(&rx_opt.mptcp, 0, sizeof(rx_opt.mptcp)); 132 - mptcp_get_options(skb, &rx_opt); 131 + mptcp_get_options(skb, &mp_opt); 133 132 134 133 subflow_req->mp_capable = 0; 135 134 subflow_req->mp_join = 0; ··· 141 142 return; 142 143 #endif 143 144 144 - if (rx_opt.mptcp.mp_capable) { 145 + if (mp_opt.mp_capable) { 145 146 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE); 146 147 147 - if (rx_opt.mptcp.mp_join) 148 + if (mp_opt.mp_join) 148 149 return; 149 - } else if (rx_opt.mptcp.mp_join) { 150 + } else if (mp_opt.mp_join) { 150 151 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX); 151 152 } 152 153 153 - if (rx_opt.mptcp.mp_capable && listener->request_mptcp) { 154 + if (mp_opt.mp_capable && listener->request_mptcp) { 154 155 int err; 155 156 156 157 err = mptcp_token_new_request(req); ··· 158 159 subflow_req->mp_capable = 1; 159 160 160 161 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; 161 - } else if (rx_opt.mptcp.mp_join && listener->request_mptcp) { 162 + } else if (mp_opt.mp_join && listener->request_mptcp) { 162 163 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; 163 164 subflow_req->mp_join = 1; 164 - subflow_req->backup = rx_opt.mptcp.backup; 165 - subflow_req->remote_id = rx_opt.mptcp.join_id; 166 - subflow_req->token = rx_opt.mptcp.token; 167 - subflow_req->remote_nonce = rx_opt.mptcp.nonce; 165 + subflow_req->backup = mp_opt.backup; 166 + subflow_req->remote_id = mp_opt.join_id; 167 + subflow_req->token = mp_opt.token; 168 + subflow_req->remote_nonce = mp_opt.nonce; 168 169 pr_debug("token=%u, remote_nonce=%u", subflow_req->token, 169 170 subflow_req->remote_nonce); 170 171 if (!subflow_token_join_request(req, skb)) { ··· 220 221 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) 221 222 { 222 223 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); 224 + struct mptcp_options_received mp_opt; 223 225 struct sock *parent = subflow->conn; 226 + struct tcp_sock *tp = tcp_sk(sk); 224 227 225 228 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb); 226 229 227 - if (inet_sk_state_load(parent) != TCP_ESTABLISHED) { 230 + if (inet_sk_state_load(parent) == TCP_SYN_SENT) { 228 231 inet_sk_state_store(parent, TCP_ESTABLISHED); 229 232 parent->sk_state_change(parent); 230 233 } 231 234 232 - if (subflow->conn_finished || !tcp_sk(sk)->is_mptcp) 235 + /* be sure no special action on any packet other than syn-ack */ 236 + if (subflow->conn_finished) 237 + return; 238 + 239 + subflow->conn_finished = 1; 240 + 241 + mptcp_get_options(skb, &mp_opt); 242 + if (subflow->request_mptcp && mp_opt.mp_capable) { 243 + subflow->mp_capable = 1; 244 + subflow->can_ack = 1; 245 + subflow->remote_key = mp_opt.sndr_key; 246 + pr_debug("subflow=%p, remote_key=%llu", subflow, 247 + subflow->remote_key); 248 + } else if (subflow->request_join && mp_opt.mp_join) { 249 + subflow->mp_join = 1; 250 + subflow->thmac = mp_opt.thmac; 251 + subflow->remote_nonce = mp_opt.nonce; 252 + pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow, 253 + subflow->thmac, subflow->remote_nonce); 254 + } else if (subflow->request_mptcp) { 255 + tp->is_mptcp = 0; 256 + } 257 + 258 + if (!tp->is_mptcp) 233 259 return; 234 260 235 261 if (subflow->mp_capable) { 236 262 pr_debug("subflow=%p, remote_key=%llu", mptcp_subflow_ctx(sk), 237 263 subflow->remote_key); 238 264 mptcp_finish_connect(sk); 239 - subflow->conn_finished = 1; 240 265 241 266 if (skb) { 242 267 pr_debug("synack seq=%u", TCP_SKB_CB(skb)->seq); ··· 287 264 if (!mptcp_finish_join(sk)) 288 265 goto do_reset; 289 266 290 - subflow->conn_finished = 1; 291 267 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX); 292 268 } else { 293 269 do_reset: ··· 344 322 345 323 /* validate hmac received in third ACK */ 346 324 static bool subflow_hmac_valid(const struct request_sock *req, 347 - const struct tcp_options_received *rx_opt) 325 + const struct mptcp_options_received *mp_opt) 348 326 { 349 327 const struct mptcp_subflow_request_sock *subflow_req; 350 328 u8 hmac[MPTCPOPT_HMAC_LEN]; ··· 361 339 subflow_req->local_nonce, hmac); 362 340 363 341 ret = true; 364 - if (crypto_memneq(hmac, rx_opt->mptcp.hmac, sizeof(hmac))) 342 + if (crypto_memneq(hmac, mp_opt->hmac, sizeof(hmac))) 365 343 ret = false; 366 344 367 345 sock_put((struct sock *)msk); ··· 417 395 { 418 396 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk); 419 397 struct mptcp_subflow_request_sock *subflow_req; 420 - struct tcp_options_received opt_rx; 398 + struct mptcp_options_received mp_opt; 421 399 bool fallback_is_fatal = false; 422 400 struct sock *new_msk = NULL; 423 401 bool fallback = false; ··· 425 403 426 404 pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn); 427 405 428 - opt_rx.mptcp.mp_capable = 0; 406 + /* we need later a valid 'mp_capable' value even when options are not 407 + * parsed 408 + */ 409 + mp_opt.mp_capable = 0; 429 410 if (tcp_rsk(req)->is_mptcp == 0) 430 411 goto create_child; 431 412 ··· 443 418 goto create_msk; 444 419 } 445 420 446 - mptcp_get_options(skb, &opt_rx); 447 - if (!opt_rx.mptcp.mp_capable) { 421 + mptcp_get_options(skb, &mp_opt); 422 + if (!mp_opt.mp_capable) { 448 423 fallback = true; 449 424 goto create_child; 450 425 } 451 426 452 427 create_msk: 453 - new_msk = mptcp_sk_clone(listener->conn, &opt_rx, req); 428 + new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req); 454 429 if (!new_msk) 455 430 fallback = true; 456 431 } else if (subflow_req->mp_join) { 457 432 fallback_is_fatal = true; 458 - opt_rx.mptcp.mp_join = 0; 459 - mptcp_get_options(skb, &opt_rx); 460 - if (!opt_rx.mptcp.mp_join || 461 - !subflow_hmac_valid(req, &opt_rx)) { 433 + mptcp_get_options(skb, &mp_opt); 434 + if (!mp_opt.mp_join || 435 + !subflow_hmac_valid(req, &mp_opt)) { 462 436 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC); 463 437 return NULL; 464 438 } ··· 497 473 /* with OoO packets we can reach here without ingress 498 474 * mpc option 499 475 */ 500 - ctx->remote_key = opt_rx.mptcp.sndr_key; 501 - ctx->fully_established = opt_rx.mptcp.mp_capable; 502 - ctx->can_ack = opt_rx.mptcp.mp_capable; 476 + ctx->remote_key = mp_opt.sndr_key; 477 + ctx->fully_established = mp_opt.mp_capable; 478 + ctx->can_ack = mp_opt.mp_capable; 503 479 } else if (ctx->mp_join) { 504 480 struct mptcp_sock *owner; 505 481 ··· 523 499 /* check for expected invariant - should never trigger, just help 524 500 * catching eariler subtle bugs 525 501 */ 526 - WARN_ON_ONCE(*own_req && child && tcp_sk(child)->is_mptcp && 502 + WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp && 527 503 (!mptcp_subflow_ctx(child) || 528 504 !mptcp_subflow_ctx(child)->conn)); 529 505 return child;
+1 -3
net/netfilter/nf_nat_proto.c
··· 68 68 enum nf_nat_manip_type maniptype) 69 69 { 70 70 struct udphdr *hdr; 71 - bool do_csum; 72 71 73 72 if (skb_ensure_writable(skb, hdroff + sizeof(*hdr))) 74 73 return false; 75 74 76 75 hdr = (struct udphdr *)(skb->data + hdroff); 77 - do_csum = hdr->check || skb->ip_summed == CHECKSUM_PARTIAL; 76 + __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, !!hdr->check); 78 77 79 - __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, do_csum); 80 78 return true; 81 79 } 82 80
+16 -6
net/sched/cls_api.c
··· 2074 2074 err = PTR_ERR(block); 2075 2075 goto errout; 2076 2076 } 2077 + block->classid = parent; 2077 2078 2078 2079 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2079 2080 if (chain_index > TC_ACT_EXT_VAL_MASK) { ··· 2617 2616 return skb->len; 2618 2617 2619 2618 parent = tcm->tcm_parent; 2620 - if (!parent) { 2619 + if (!parent) 2621 2620 q = dev->qdisc; 2622 - parent = q->handle; 2623 - } else { 2621 + else 2624 2622 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 2625 - } 2626 2623 if (!q) 2627 2624 goto out; 2628 2625 cops = q->ops->cl_ops; ··· 2636 2637 block = cops->tcf_block(q, cl, NULL); 2637 2638 if (!block) 2638 2639 goto out; 2640 + parent = block->classid; 2639 2641 if (tcf_block_shared(block)) 2640 2642 q = NULL; 2641 2643 } ··· 3548 3548 return 0; 3549 3549 } 3550 3550 3551 + static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats) 3552 + { 3553 + if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY)) 3554 + return FLOW_ACTION_HW_STATS_DONT_CARE; 3555 + else if (!hw_stats) 3556 + return FLOW_ACTION_HW_STATS_DISABLED; 3557 + 3558 + return hw_stats; 3559 + } 3560 + 3551 3561 int tc_setup_flow_action(struct flow_action *flow_action, 3552 3562 const struct tcf_exts *exts) 3553 3563 { ··· 3581 3571 if (err) 3582 3572 goto err_out_locked; 3583 3573 3584 - entry->hw_stats = act->hw_stats; 3574 + entry->hw_stats = tc_act_hw_stats(act->hw_stats); 3585 3575 3586 3576 if (is_tcf_gact_ok(act)) { 3587 3577 entry->id = FLOW_ACTION_ACCEPT; ··· 3649 3639 entry->mangle.mask = tcf_pedit_mask(act, k); 3650 3640 entry->mangle.val = tcf_pedit_val(act, k); 3651 3641 entry->mangle.offset = tcf_pedit_offset(act, k); 3652 - entry->hw_stats = act->hw_stats; 3642 + entry->hw_stats = tc_act_hw_stats(act->hw_stats); 3653 3643 entry = &flow_action->entries[++j]; 3654 3644 } 3655 3645 } else if (is_tcf_csum(act)) {
+2 -1
net/sched/sch_choke.c
··· 317 317 318 318 sch->q.qlen = 0; 319 319 sch->qstats.backlog = 0; 320 - memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *)); 320 + if (q->tab) 321 + memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *)); 321 322 q->head = q->tail = 0; 322 323 red_restart(&q->vars); 323 324 }
+1 -1
net/sched/sch_fq_codel.c
··· 416 416 q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM])); 417 417 418 418 if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]) 419 - q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); 419 + q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); 420 420 421 421 if (tb[TCA_FQ_CODEL_MEMORY_LIMIT]) 422 422 q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
+9
net/sched/sch_sfq.c
··· 637 637 if (ctl->divisor && 638 638 (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536)) 639 639 return -EINVAL; 640 + 641 + /* slot->allot is a short, make sure quantum is not too big. */ 642 + if (ctl->quantum) { 643 + unsigned int scaled = SFQ_ALLOT_SIZE(ctl->quantum); 644 + 645 + if (scaled <= 0 || scaled > SHRT_MAX) 646 + return -EINVAL; 647 + } 648 + 640 649 if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max, 641 650 ctl_v1->Wlog)) 642 651 return -EINVAL;
+3
net/sched/sch_skbprio.c
··· 169 169 { 170 170 struct tc_skbprio_qopt *ctl = nla_data(opt); 171 171 172 + if (opt->nla_len != nla_attr_size(sizeof(*ctl))) 173 + return -EINVAL; 174 + 172 175 sch->limit = ctl->limit; 173 176 return 0; 174 177 }
+18 -6
net/sunrpc/clnt.c
··· 880 880 /* 881 881 * Free an RPC client 882 882 */ 883 + static void rpc_free_client_work(struct work_struct *work) 884 + { 885 + struct rpc_clnt *clnt = container_of(work, struct rpc_clnt, cl_work); 886 + 887 + /* These might block on processes that might allocate memory, 888 + * so they cannot be called in rpciod, so they are handled separately 889 + * here. 890 + */ 891 + rpc_clnt_debugfs_unregister(clnt); 892 + rpc_clnt_remove_pipedir(clnt); 893 + 894 + kfree(clnt); 895 + rpciod_down(); 896 + } 883 897 static struct rpc_clnt * 884 898 rpc_free_client(struct rpc_clnt *clnt) 885 899 { ··· 904 890 rcu_dereference(clnt->cl_xprt)->servername); 905 891 if (clnt->cl_parent != clnt) 906 892 parent = clnt->cl_parent; 907 - rpc_clnt_debugfs_unregister(clnt); 908 - rpc_clnt_remove_pipedir(clnt); 909 893 rpc_unregister_client(clnt); 910 894 rpc_free_iostats(clnt->cl_metrics); 911 895 clnt->cl_metrics = NULL; 912 896 xprt_put(rcu_dereference_raw(clnt->cl_xprt)); 913 897 xprt_iter_destroy(&clnt->cl_xpi); 914 - rpciod_down(); 915 898 put_cred(clnt->cl_cred); 916 899 rpc_free_clid(clnt); 917 - kfree(clnt); 900 + 901 + INIT_WORK(&clnt->cl_work, rpc_free_client_work); 902 + schedule_work(&clnt->cl_work); 918 903 return parent; 919 904 } 920 905 ··· 2821 2808 task = rpc_call_null_helper(clnt, xprt, NULL, 2822 2809 RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC|RPC_TASK_NULLCREDS, 2823 2810 &rpc_cb_add_xprt_call_ops, data); 2824 - if (IS_ERR(task)) 2825 - return PTR_ERR(task); 2811 + 2826 2812 rpc_put_task(task); 2827 2813 success: 2828 2814 return 1;
+11 -4
net/sunrpc/xprtrdma/rpc_rdma.c
··· 388 388 } while (nsegs); 389 389 390 390 done: 391 - return xdr_stream_encode_item_absent(xdr); 391 + if (xdr_stream_encode_item_absent(xdr) < 0) 392 + return -EMSGSIZE; 393 + return 0; 392 394 } 393 395 394 396 /* Register and XDR encode the Write list. Supports encoding a list ··· 456 454 *segcount = cpu_to_be32(nchunks); 457 455 458 456 done: 459 - return xdr_stream_encode_item_absent(xdr); 457 + if (xdr_stream_encode_item_absent(xdr) < 0) 458 + return -EMSGSIZE; 459 + return 0; 460 460 } 461 461 462 462 /* Register and XDR encode the Reply chunk. Supports encoding an array ··· 484 480 int nsegs, nchunks; 485 481 __be32 *segcount; 486 482 487 - if (wtype != rpcrdma_replych) 488 - return xdr_stream_encode_item_absent(xdr); 483 + if (wtype != rpcrdma_replych) { 484 + if (xdr_stream_encode_item_absent(xdr) < 0) 485 + return -EMSGSIZE; 486 + return 0; 487 + } 489 488 490 489 seg = req->rl_segments; 491 490 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
+2 -1
net/sunrpc/xprtrdma/verbs.c
··· 289 289 case RDMA_CM_EVENT_DISCONNECTED: 290 290 ep->re_connect_status = -ECONNABORTED; 291 291 disconnected: 292 + xprt_force_disconnect(xprt); 292 293 return rpcrdma_ep_destroy(ep); 293 294 default: 294 295 break; ··· 1356 1355 --ep->re_send_count; 1357 1356 } 1358 1357 1358 + trace_xprtrdma_post_send(req); 1359 1359 rc = frwr_send(r_xprt, req); 1360 - trace_xprtrdma_post_send(req, rc); 1361 1360 if (rc) 1362 1361 return -ENOTCONN; 1363 1362 return 0;
+3 -2
net/tipc/topsrv.c
··· 402 402 read_lock_bh(&sk->sk_callback_lock); 403 403 ret = tipc_conn_rcv_sub(srv, con, &s); 404 404 read_unlock_bh(&sk->sk_callback_lock); 405 + if (!ret) 406 + return 0; 405 407 } 406 - if (ret < 0) 407 - tipc_conn_close(con); 408 408 409 + tipc_conn_close(con); 409 410 return ret; 410 411 } 411 412
+5 -2
net/tls/tls_sw.c
··· 800 800 *copied -= sk_msg_free(sk, msg); 801 801 tls_free_open_rec(sk); 802 802 } 803 + if (psock) 804 + sk_psock_put(sk, psock); 803 805 return err; 804 806 } 805 807 more_data: ··· 2083 2081 strp_data_ready(&ctx->strp); 2084 2082 2085 2083 psock = sk_psock_get(sk); 2086 - if (psock && !list_empty(&psock->ingress_msg)) { 2087 - ctx->saved_data_ready(sk); 2084 + if (psock) { 2085 + if (!list_empty(&psock->ingress_msg)) 2086 + ctx->saved_data_ready(sk); 2088 2087 sk_psock_put(sk, psock); 2089 2088 } 2090 2089 }
+4
net/vmw_vsock/virtio_transport_common.c
··· 157 157 158 158 void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt) 159 159 { 160 + if (pkt->tap_delivered) 161 + return; 162 + 160 163 vsock_deliver_tap(virtio_transport_build_skb, pkt); 164 + pkt->tap_delivered = true; 161 165 } 162 166 EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt); 163 167
+6
net/x25/x25_subr.c
··· 357 357 sk->sk_state_change(sk); 358 358 sock_set_flag(sk, SOCK_DEAD); 359 359 } 360 + if (x25->neighbour) { 361 + read_lock_bh(&x25_list_lock); 362 + x25_neigh_put(x25->neighbour); 363 + x25->neighbour = NULL; 364 + read_unlock_bh(&x25_list_lock); 365 + } 360 366 } 361 367 362 368 /*
+1
scripts/gcc-plugins/Makefile
··· 4 4 HOST_EXTRACXXFLAGS += -I$(GCC_PLUGINS_DIR)/include -I$(src) -std=gnu++98 -fno-rtti 5 5 HOST_EXTRACXXFLAGS += -fno-exceptions -fasynchronous-unwind-tables -ggdb 6 6 HOST_EXTRACXXFLAGS += -Wno-narrowing -Wno-unused-variable -Wno-c++11-compat 7 + HOST_EXTRACXXFLAGS += -Wno-format-diag 7 8 8 9 $(obj)/randomize_layout_plugin.o: $(objtree)/$(obj)/randomize_layout_seed.h 9 10 quiet_cmd_create_randomize_layout_seed = GENSEED $@
+4
scripts/gcc-plugins/gcc-common.h
··· 35 35 #include "ggc.h" 36 36 #include "timevar.h" 37 37 38 + #if BUILDING_GCC_VERSION < 10000 38 39 #include "params.h" 40 + #endif 39 41 40 42 #if BUILDING_GCC_VERSION <= 4009 41 43 #include "pointer-set.h" ··· 849 847 return gimple_build_assign(lhs, subcode, op1, op2 PASS_MEM_STAT); 850 848 } 851 849 850 + #if BUILDING_GCC_VERSION < 10000 852 851 template <> 853 852 template <> 854 853 inline bool is_a_helper<const ggoto *>::test(const_gimple gs) ··· 863 860 { 864 861 return gs->code == GIMPLE_RETURN; 865 862 } 863 + #endif 866 864 867 865 static inline gasm *as_a_gasm(gimple stmt) 868 866 {
+2 -3
scripts/gcc-plugins/stackleak_plugin.c
··· 51 51 gimple stmt; 52 52 gcall *stackleak_track_stack; 53 53 cgraph_node_ptr node; 54 - int frequency; 55 54 basic_block bb; 56 55 57 56 /* Insert call to void stackleak_track_stack(void) */ ··· 67 68 bb = gimple_bb(stackleak_track_stack); 68 69 node = cgraph_get_create_node(track_function_decl); 69 70 gcc_assert(node); 70 - frequency = compute_call_stmt_bb_frequency(current_function_decl, bb); 71 71 cgraph_create_edge(cgraph_get_node(current_function_decl), node, 72 - stackleak_track_stack, bb->count, frequency); 72 + stackleak_track_stack, bb->count, 73 + compute_call_stmt_bb_frequency(current_function_decl, bb)); 73 74 } 74 75 75 76 static bool is_alloca(gimple stmt)
+1 -1
scripts/kallsyms.c
··· 34 34 unsigned int len; 35 35 unsigned int start_pos; 36 36 unsigned int percpu_absolute; 37 - unsigned char sym[0]; 37 + unsigned char sym[]; 38 38 }; 39 39 40 40 struct addr_range {
+45 -25
security/selinux/hooks.c
··· 5842 5842 5843 5843 static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb) 5844 5844 { 5845 - int err = 0; 5846 - u32 perm; 5845 + int rc = 0; 5846 + unsigned int msg_len; 5847 + unsigned int data_len = skb->len; 5848 + unsigned char *data = skb->data; 5847 5849 struct nlmsghdr *nlh; 5848 5850 struct sk_security_struct *sksec = sk->sk_security; 5851 + u16 sclass = sksec->sclass; 5852 + u32 perm; 5849 5853 5850 - if (skb->len < NLMSG_HDRLEN) { 5851 - err = -EINVAL; 5852 - goto out; 5853 - } 5854 - nlh = nlmsg_hdr(skb); 5854 + while (data_len >= nlmsg_total_size(0)) { 5855 + nlh = (struct nlmsghdr *)data; 5855 5856 5856 - err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm); 5857 - if (err) { 5858 - if (err == -EINVAL) { 5857 + /* NOTE: the nlmsg_len field isn't reliably set by some netlink 5858 + * users which means we can't reject skb's with bogus 5859 + * length fields; our solution is to follow what 5860 + * netlink_rcv_skb() does and simply skip processing at 5861 + * messages with length fields that are clearly junk 5862 + */ 5863 + if (nlh->nlmsg_len < NLMSG_HDRLEN || nlh->nlmsg_len > data_len) 5864 + return 0; 5865 + 5866 + rc = selinux_nlmsg_lookup(sclass, nlh->nlmsg_type, &perm); 5867 + if (rc == 0) { 5868 + rc = sock_has_perm(sk, perm); 5869 + if (rc) 5870 + return rc; 5871 + } else if (rc == -EINVAL) { 5872 + /* -EINVAL is a missing msg/perm mapping */ 5859 5873 pr_warn_ratelimited("SELinux: unrecognized netlink" 5860 - " message: protocol=%hu nlmsg_type=%hu sclass=%s" 5861 - " pid=%d comm=%s\n", 5862 - sk->sk_protocol, nlh->nlmsg_type, 5863 - secclass_map[sksec->sclass - 1].name, 5864 - task_pid_nr(current), current->comm); 5865 - if (!enforcing_enabled(&selinux_state) || 5866 - security_get_allow_unknown(&selinux_state)) 5867 - err = 0; 5874 + " message: protocol=%hu nlmsg_type=%hu sclass=%s" 5875 + " pid=%d comm=%s\n", 5876 + sk->sk_protocol, nlh->nlmsg_type, 5877 + secclass_map[sclass - 1].name, 5878 + task_pid_nr(current), current->comm); 5879 + if (enforcing_enabled(&selinux_state) && 5880 + !security_get_allow_unknown(&selinux_state)) 5881 + return rc; 5882 + rc = 0; 5883 + } else if (rc == -ENOENT) { 5884 + /* -ENOENT is a missing socket/class mapping, ignore */ 5885 + rc = 0; 5886 + } else { 5887 + return rc; 5868 5888 } 5869 5889 5870 - /* Ignore */ 5871 - if (err == -ENOENT) 5872 - err = 0; 5873 - goto out; 5890 + /* move to the next message after applying netlink padding */ 5891 + msg_len = NLMSG_ALIGN(nlh->nlmsg_len); 5892 + if (msg_len >= data_len) 5893 + return 0; 5894 + data_len -= msg_len; 5895 + data += msg_len; 5874 5896 } 5875 5897 5876 - err = sock_has_perm(sk, perm); 5877 - out: 5878 - return err; 5898 + return rc; 5879 5899 } 5880 5900 5881 5901 static void ipc_init_security(struct ipc_security_struct *isec, u16 sclass)
+1 -1
security/selinux/ss/conditional.c
··· 429 429 430 430 p->cond_list = kcalloc(len, sizeof(*p->cond_list), GFP_KERNEL); 431 431 if (!p->cond_list) 432 - return rc; 432 + return -ENOMEM; 433 433 434 434 rc = avtab_alloc(&(p->te_cond_avtab), p->te_avtab.nel); 435 435 if (rc)
+6 -4
sound/core/oss/pcm_plugin.c
··· 205 205 plugin = snd_pcm_plug_first(plug); 206 206 while (plugin && frames > 0) { 207 207 plugin_next = plugin->next; 208 + if (check_size && plugin->buf_frames && 209 + frames > plugin->buf_frames) 210 + frames = plugin->buf_frames; 208 211 if (plugin->dst_frames) { 209 212 frames = plugin->dst_frames(plugin, frames); 210 213 if (frames < 0) 211 214 return frames; 212 215 } 213 - if (check_size && frames > plugin->buf_frames) 214 - frames = plugin->buf_frames; 215 216 plugin = plugin_next; 216 217 } 217 218 return frames; ··· 226 225 227 226 plugin = snd_pcm_plug_last(plug); 228 227 while (plugin && frames > 0) { 229 - if (check_size && frames > plugin->buf_frames) 230 - frames = plugin->buf_frames; 231 228 plugin_prev = plugin->prev; 232 229 if (plugin->src_frames) { 233 230 frames = plugin->src_frames(plugin, frames); 234 231 if (frames < 0) 235 232 return frames; 236 233 } 234 + if (check_size && plugin->buf_frames && 235 + frames > plugin->buf_frames) 236 + frames = plugin->buf_frames; 237 237 plugin = plugin_prev; 238 238 } 239 239 return frames;
+6 -3
sound/isa/opti9xx/miro.c
··· 867 867 spin_unlock_irqrestore(&chip->lock, flags); 868 868 } 869 869 870 + static inline void snd_miro_write_mask(struct snd_miro *chip, 871 + unsigned char reg, unsigned char value, unsigned char mask) 872 + { 873 + unsigned char oldval = snd_miro_read(chip, reg); 870 874 871 - #define snd_miro_write_mask(chip, reg, value, mask) \ 872 - snd_miro_write(chip, reg, \ 873 - (snd_miro_read(chip, reg) & ~(mask)) | ((value) & (mask))) 875 + snd_miro_write(chip, reg, (oldval & ~mask) | (value & mask)); 876 + } 874 877 875 878 /* 876 879 * Proc Interface
+6 -3
sound/isa/opti9xx/opti92x-ad1848.c
··· 317 317 } 318 318 319 319 320 - #define snd_opti9xx_write_mask(chip, reg, value, mask) \ 321 - snd_opti9xx_write(chip, reg, \ 322 - (snd_opti9xx_read(chip, reg) & ~(mask)) | ((value) & (mask))) 320 + static inline void snd_opti9xx_write_mask(struct snd_opti9xx *chip, 321 + unsigned char reg, unsigned char value, unsigned char mask) 322 + { 323 + unsigned char oldval = snd_opti9xx_read(chip, reg); 323 324 325 + snd_opti9xx_write(chip, reg, (oldval & ~mask) | (value & mask)); 326 + } 324 327 325 328 static int snd_opti9xx_configure(struct snd_opti9xx *chip, 326 329 long port,
+5 -4
sound/pci/hda/hda_intel.c
··· 2078 2078 * some HD-audio PCI entries are exposed without any codecs, and such devices 2079 2079 * should be ignored from the beginning. 2080 2080 */ 2081 - static const struct snd_pci_quirk driver_blacklist[] = { 2082 - SND_PCI_QUIRK(0x1462, 0xcb59, "MSI TRX40 Creator", 0), 2083 - SND_PCI_QUIRK(0x1462, 0xcb60, "MSI TRX40", 0), 2081 + static const struct pci_device_id driver_blacklist[] = { 2082 + { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1043, 0x874f) }, /* ASUS ROG Zenith II / Strix */ 2083 + { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1462, 0xcb59) }, /* MSI TRX40 Creator */ 2084 + { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1462, 0xcb60) }, /* MSI TRX40 */ 2084 2085 {} 2085 2086 }; 2086 2087 ··· 2101 2100 bool schedule_probe; 2102 2101 int err; 2103 2102 2104 - if (snd_pci_quirk_lookup(pci, driver_blacklist)) { 2103 + if (pci_match_id(driver_blacklist, pci)) { 2105 2104 dev_info(&pci->dev, "Skipping the blacklisted device\n"); 2106 2105 return -ENODEV; 2107 2106 }
+5 -1
sound/pci/hda/patch_hdmi.c
··· 1848 1848 /* Add sanity check to pass klockwork check. 1849 1849 * This should never happen. 1850 1850 */ 1851 - if (WARN_ON(spdif == NULL)) 1851 + if (WARN_ON(spdif == NULL)) { 1852 + mutex_unlock(&codec->spdif_mutex); 1852 1853 return true; 1854 + } 1853 1855 non_pcm = !!(spdif->status & IEC958_AES0_NONAUDIO); 1854 1856 mutex_unlock(&codec->spdif_mutex); 1855 1857 return non_pcm; ··· 2200 2198 2201 2199 for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) { 2202 2200 struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx); 2201 + struct hdmi_eld *pin_eld = &per_pin->sink_eld; 2203 2202 2203 + pin_eld->eld_valid = false; 2204 2204 hdmi_present_sense(per_pin, 0); 2205 2205 } 2206 2206
+1
sound/pci/hda/patch_realtek.c
··· 7420 7420 SND_PCI_QUIRK(0x1558, 0x8560, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC), 7421 7421 SND_PCI_QUIRK(0x1558, 0x8561, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC), 7422 7422 SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS), 7423 + SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), 7423 7424 SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE), 7424 7425 SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE), 7425 7426 SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
+5 -17
sound/usb/line6/podhd.c
··· 21 21 enum { 22 22 LINE6_PODHD300, 23 23 LINE6_PODHD400, 24 - LINE6_PODHD500_0, 25 - LINE6_PODHD500_1, 24 + LINE6_PODHD500, 26 25 LINE6_PODX3, 27 26 LINE6_PODX3LIVE, 28 27 LINE6_PODHD500X, ··· 317 318 /* TODO: no need to alloc data interfaces when only audio is used */ 318 319 { LINE6_DEVICE(0x5057), .driver_info = LINE6_PODHD300 }, 319 320 { LINE6_DEVICE(0x5058), .driver_info = LINE6_PODHD400 }, 320 - { LINE6_IF_NUM(0x414D, 0), .driver_info = LINE6_PODHD500_0 }, 321 - { LINE6_IF_NUM(0x414D, 1), .driver_info = LINE6_PODHD500_1 }, 321 + { LINE6_IF_NUM(0x414D, 0), .driver_info = LINE6_PODHD500 }, 322 322 { LINE6_IF_NUM(0x414A, 0), .driver_info = LINE6_PODX3 }, 323 323 { LINE6_IF_NUM(0x414B, 0), .driver_info = LINE6_PODX3LIVE }, 324 324 { LINE6_IF_NUM(0x4159, 0), .driver_info = LINE6_PODHD500X }, ··· 350 352 .ep_audio_r = 0x82, 351 353 .ep_audio_w = 0x01, 352 354 }, 353 - [LINE6_PODHD500_0] = { 355 + [LINE6_PODHD500] = { 354 356 .id = "PODHD500", 355 357 .name = "POD HD500", 356 - .capabilities = LINE6_CAP_PCM 358 + .capabilities = LINE6_CAP_PCM | LINE6_CAP_CONTROL 357 359 | LINE6_CAP_HWMON, 358 360 .altsetting = 1, 359 - .ep_ctrl_r = 0x81, 360 - .ep_ctrl_w = 0x01, 361 - .ep_audio_r = 0x86, 362 - .ep_audio_w = 0x02, 363 - }, 364 - [LINE6_PODHD500_1] = { 365 - .id = "PODHD500", 366 - .name = "POD HD500", 367 - .capabilities = LINE6_CAP_PCM 368 - | LINE6_CAP_HWMON, 369 - .altsetting = 0, 361 + .ctrl_if = 1, 370 362 .ep_ctrl_r = 0x81, 371 363 .ep_ctrl_w = 0x01, 372 364 .ep_audio_r = 0x86,
+1 -1
sound/usb/quirks.c
··· 1687 1687 1688 1688 case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */ 1689 1689 case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */ 1690 - case USB_ID(0x16b0, 0x06b2): /* NuPrime DAC-10 */ 1690 + case USB_ID(0x16d0, 0x06b2): /* NuPrime DAC-10 */ 1691 1691 case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */ 1692 1692 case USB_ID(0x16d0, 0x0733): /* Furutech ADL Stratos */ 1693 1693 case USB_ID(0x16d0, 0x09db): /* NuPrime Audio DAC-9 */
+1 -4
tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter-stack.tc
··· 10 10 exit_unsupported 11 11 fi 12 12 13 - if [ ! -f set_ftrace_filter ]; then 14 - echo "set_ftrace_filter not found? Is dynamic ftrace not set?" 15 - exit_unsupported 16 - fi 13 + check_filter_file set_ftrace_filter 17 14 18 15 do_reset() { 19 16 if [ -e /proc/sys/kernel/stack_tracer_enabled ]; then
+2
tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter.tc
··· 9 9 exit_unsupported 10 10 fi 11 11 12 + check_filter_file set_ftrace_filter 13 + 12 14 fail() { # msg 13 15 echo $1 14 16 exit_fail
+2
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
··· 9 9 exit_unsupported 10 10 fi 11 11 12 + check_filter_file set_ftrace_filter 13 + 12 14 disable_tracing 13 15 clear_trace 14 16
+1 -4
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-notrace-pid.tc
··· 15 15 exit_unsupported 16 16 fi 17 17 18 - if [ ! -f set_ftrace_filter ]; then 19 - echo "set_ftrace_filter not found? Is function tracer not set?" 20 - exit_unsupported 21 - fi 18 + check_filter_file set_ftrace_filter 22 19 23 20 do_function_fork=1 24 21
+1 -4
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
··· 16 16 exit_unsupported 17 17 fi 18 18 19 - if [ ! -f set_ftrace_filter ]; then 20 - echo "set_ftrace_filter not found? Is function tracer not set?" 21 - exit_unsupported 22 - fi 19 + check_filter_file set_ftrace_filter 23 20 24 21 do_function_fork=1 25 22
+1 -1
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-stacktrace.tc
··· 3 3 # description: ftrace - stacktrace filter command 4 4 # flags: instance 5 5 6 - [ ! -f set_ftrace_filter ] && exit_unsupported 6 + check_filter_file set_ftrace_filter 7 7 8 8 echo _do_fork:stacktrace >> set_ftrace_filter 9 9
+1 -4
tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
··· 11 11 # 12 12 13 13 # The triggers are set within the set_ftrace_filter file 14 - if [ ! -f set_ftrace_filter ]; then 15 - echo "set_ftrace_filter not found? Is dynamic ftrace not set?" 16 - exit_unsupported 17 - fi 14 + check_filter_file set_ftrace_filter 18 15 19 16 do_reset() { 20 17 reset_ftrace_filter
+1 -1
tools/testing/selftests/ftrace/test.d/ftrace/func_mod_trace.tc
··· 2 2 # SPDX-License-Identifier: GPL-2.0 3 3 # description: ftrace - function trace on module 4 4 5 - [ ! -f set_ftrace_filter ] && exit_unsupported 5 + check_filter_file set_ftrace_filter 6 6 7 7 : "mod: allows to filter a non exist function" 8 8 echo 'non_exist_func:mod:non_exist_module' > set_ftrace_filter
+1 -4
tools/testing/selftests/ftrace/test.d/ftrace/func_profiler.tc
··· 18 18 exit_unsupported; 19 19 fi 20 20 21 - if [ ! -f set_ftrace_filter ]; then 22 - echo "set_ftrace_filter not found? Is dynamic ftrace not set?" 23 - exit_unsupported 24 - fi 21 + check_filter_file set_ftrace_filter 25 22 26 23 if [ ! -f function_profile_enabled ]; then 27 24 echo "function_profile_enabled not found, function profiling enabled?"
+1 -4
tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc
··· 10 10 # 11 11 12 12 # The triggers are set within the set_ftrace_filter file 13 - if [ ! -f set_ftrace_filter ]; then 14 - echo "set_ftrace_filter not found? Is dynamic ftrace not set?" 15 - exit_unsupported 16 - fi 13 + check_filter_file set_ftrace_filter 17 14 18 15 fail() { # mesg 19 16 echo $1
+2
tools/testing/selftests/ftrace/test.d/ftrace/func_stack_tracer.tc
··· 8 8 exit_unsupported 9 9 fi 10 10 11 + check_filter_file stack_trace_filter 12 + 11 13 echo > stack_trace_filter 12 14 echo 0 > stack_max_size 13 15 echo 1 > /proc/sys/kernel/stack_tracer_enabled
+1 -4
tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc
··· 11 11 # 12 12 13 13 # The triggers are set within the set_ftrace_filter file 14 - if [ ! -f set_ftrace_filter ]; then 15 - echo "set_ftrace_filter not found? Is dynamic ftrace not set?" 16 - exit_unsupported 17 - fi 14 + check_filter_file set_ftrace_filter 18 15 19 16 fail() { # mesg 20 17 echo $1
+6
tools/testing/selftests/ftrace/test.d/functions
··· 1 + check_filter_file() { # check filter file introduced by dynamic ftrace 2 + if [ ! -f "$1" ]; then 3 + echo "$1 not found? Is dynamic ftrace not set?" 4 + exit_unsupported 5 + fi 6 + } 1 7 2 8 clear_trace() { # reset trace output 3 9 echo > trace
+1 -1
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc
··· 38 38 echo 0 > events/kprobes/testprobe/enable 39 39 40 40 : "Confirm the arguments is recorded in given types correctly" 41 - ARGS=`grep "testprobe" trace | sed -e 's/.* arg1=\(.*\) arg2=\(.*\) arg3=\(.*\) arg4=\(.*\)/\1 \2 \3 \4/'` 41 + ARGS=`grep "testprobe" trace | head -n 1 | sed -e 's/.* arg1=\(.*\) arg2=\(.*\) arg3=\(.*\) arg4=\(.*\)/\1 \2 \3 \4/'` 42 42 check_types $ARGS $width 43 43 44 44 : "Clear event for next loop"
+2
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_ftrace.tc
··· 5 5 [ -f kprobe_events ] || exit_unsupported # this is configurable 6 6 grep "function" available_tracers || exit_unsupported # this is configurable 7 7 8 + check_filter_file set_ftrace_filter 9 + 8 10 # prepare 9 11 echo nop > current_tracer 10 12 echo _do_fork > set_ftrace_filter
+6 -6
tools/testing/selftests/gpio/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 3 - MOUNT_CFLAGS := $(shell pkg-config --cflags mount 2>/dev/null) 4 - MOUNT_LDLIBS := $(shell pkg-config --libs mount 2>/dev/null) 5 - ifeq ($(MOUNT_LDLIBS),) 6 - MOUNT_LDLIBS := -lmount -I/usr/include/libmount 3 + VAR_CFLAGS := $(shell pkg-config --cflags mount 2>/dev/null) 4 + VAR_LDLIBS := $(shell pkg-config --libs mount 2>/dev/null) 5 + ifeq ($(VAR_LDLIBS),) 6 + VAR_LDLIBS := -lmount -I/usr/include/libmount 7 7 endif 8 8 9 - CFLAGS += -O2 -g -std=gnu99 -Wall -I../../../../usr/include/ $(MOUNT_CFLAGS) 10 - LDLIBS += $(MOUNT_LDLIBS) 9 + CFLAGS += -O2 -g -std=gnu99 -Wall -I../../../../usr/include/ $(VAR_CFLAGS) 10 + LDLIBS += $(VAR_LDLIBS) 11 11 12 12 TEST_PROGS := gpio-mockup.sh 13 13 TEST_FILES := gpio-mockup-sysfs.sh
+1 -1
tools/testing/selftests/intel_pstate/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 CFLAGS := $(CFLAGS) -Wall -D_GNU_SOURCE 3 - LDLIBS := $(LDLIBS) -lm 3 + LDLIBS += -lm 4 4 5 5 uname_M := $(shell uname -m 2>/dev/null || echo not) 6 6 ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
+272
tools/testing/selftests/kselftest_deps.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: GPL-2.0 3 + # kselftest_deps.sh 4 + # 5 + # Checks for kselftest build dependencies on the build system. 6 + # Copyright (c) 2020 Shuah Khan <skhan@linuxfoundation.org> 7 + # 8 + # 9 + 10 + usage() 11 + { 12 + 13 + echo -e "Usage: $0 -[p] <compiler> [test_name]\n" 14 + echo -e "\tkselftest_deps.sh [-p] gcc" 15 + echo -e "\tkselftest_deps.sh [-p] gcc vm" 16 + echo -e "\tkselftest_deps.sh [-p] aarch64-linux-gnu-gcc" 17 + echo -e "\tkselftest_deps.sh [-p] aarch64-linux-gnu-gcc vm\n" 18 + echo "- Should be run in selftests directory in the kernel repo." 19 + echo "- Checks if Kselftests can be built/cross-built on a system." 20 + echo "- Parses all test/sub-test Makefile to find library dependencies." 21 + echo "- Runs compile test on a trivial C file with LDLIBS specified" 22 + echo " in the test Makefiles to identify missing library dependencies." 23 + echo "- Prints suggested target list for a system filtering out tests" 24 + echo " failed the build dependency check from the TARGETS in Selftests" 25 + echo " main Makefile when optional -p is specified." 26 + echo "- Prints pass/fail dependency check for each tests/sub-test." 27 + echo "- Prints pass/fail targets and libraries." 28 + echo "- Default: runs dependency checks on all tests." 29 + echo "- Optional test name can be specified to check dependencies for it." 30 + exit 1 31 + 32 + } 33 + 34 + # Start main() 35 + main() 36 + { 37 + 38 + base_dir=`pwd` 39 + # Make sure we're in the selftests top-level directory. 40 + if [ $(basename "$base_dir") != "selftests" ]; then 41 + echo -e "\tPlease run $0 in" 42 + echo -e "\ttools/testing/selftests directory ..." 43 + exit 1 44 + fi 45 + 46 + print_targets=0 47 + 48 + while getopts "p" arg; do 49 + case $arg in 50 + p) 51 + print_targets=1 52 + shift;; 53 + esac 54 + done 55 + 56 + if [ $# -eq 0 ] 57 + then 58 + usage 59 + fi 60 + 61 + # Compiler 62 + CC=$1 63 + 64 + tmp_file=$(mktemp).c 65 + trap "rm -f $tmp_file.o $tmp_file $tmp_file.bin" EXIT 66 + #echo $tmp_file 67 + 68 + pass=$(mktemp).out 69 + trap "rm -f $pass" EXIT 70 + #echo $pass 71 + 72 + fail=$(mktemp).out 73 + trap "rm -f $fail" EXIT 74 + #echo $fail 75 + 76 + # Generate tmp source fire for compile test 77 + cat << "EOF" > $tmp_file 78 + int main() 79 + { 80 + } 81 + EOF 82 + 83 + # Save results 84 + total_cnt=0 85 + fail_trgts=() 86 + fail_libs=() 87 + fail_cnt=0 88 + pass_trgts=() 89 + pass_libs=() 90 + pass_cnt=0 91 + 92 + # Get all TARGETS from selftests Makefile 93 + targets=$(egrep "^TARGETS +|^TARGETS =" Makefile | cut -d "=" -f2) 94 + 95 + # Single test case 96 + if [ $# -eq 2 ] 97 + then 98 + test=$2/Makefile 99 + 100 + l1_test $test 101 + l2_test $test 102 + l3_test $test 103 + 104 + print_results $1 $2 105 + exit $? 106 + fi 107 + 108 + # Level 1: LDLIBS set static. 109 + # 110 + # Find all LDLIBS set statically for all executables built by a Makefile 111 + # and filter out VAR_LDLIBS to discard the following: 112 + # gpio/Makefile:LDLIBS += $(VAR_LDLIBS) 113 + # Append space at the end of the list to append more tests. 114 + 115 + l1_tests=$(grep -r --include=Makefile "^LDLIBS" | \ 116 + grep -v "VAR_LDLIBS" | awk -F: '{print $1}') 117 + 118 + # Level 2: LDLIBS set dynamically. 119 + # 120 + # Level 2 121 + # Some tests have multiple valid LDLIBS lines for individual sub-tests 122 + # that need dependency checks. Find them and append them to the tests 123 + # e.g: vm/Makefile:$(OUTPUT)/userfaultfd: LDLIBS += -lpthread 124 + # Filter out VAR_LDLIBS to discard the following: 125 + # memfd/Makefile:$(OUTPUT)/fuse_mnt: LDLIBS += $(VAR_LDLIBS) 126 + # Append space at the end of the list to append more tests. 127 + 128 + l2_tests=$(grep -r --include=Makefile ": LDLIBS" | \ 129 + grep -v "VAR_LDLIBS" | awk -F: '{print $1}') 130 + 131 + # Level 3 132 + # gpio, memfd and others use pkg-config to find mount and fuse libs 133 + # respectively and save it in VAR_LDLIBS. If pkg-config doesn't find 134 + # any, VAR_LDLIBS set to default. 135 + # Use the default value and filter out pkg-config for dependency check. 136 + # e.g: 137 + # gpio/Makefile 138 + # VAR_LDLIBS := $(shell pkg-config --libs mount) 2>/dev/null) 139 + # memfd/Makefile 140 + # VAR_LDLIBS := $(shell pkg-config fuse --libs 2>/dev/null) 141 + 142 + l3_tests=$(grep -r --include=Makefile "^VAR_LDLIBS" | \ 143 + grep -v "pkg-config" | awk -F: '{print $1}') 144 + 145 + #echo $l1_tests 146 + #echo $l2_1_tests 147 + #echo $l3_tests 148 + 149 + all_tests 150 + print_results $1 $2 151 + 152 + exit $? 153 + } 154 + # end main() 155 + 156 + all_tests() 157 + { 158 + for test in $l1_tests; do 159 + l1_test $test 160 + done 161 + 162 + for test in $l2_tests; do 163 + l2_test $test 164 + done 165 + 166 + for test in $l3_tests; do 167 + l3_test $test 168 + done 169 + } 170 + 171 + # Use same parsing used for l1_tests and pick libraries this time. 172 + l1_test() 173 + { 174 + test_libs=$(grep --include=Makefile "^LDLIBS" $test | \ 175 + grep -v "VAR_LDLIBS" | \ 176 + sed -e 's/\:/ /' | \ 177 + sed -e 's/+/ /' | cut -d "=" -f 2) 178 + 179 + check_libs $test $test_libs 180 + } 181 + 182 + # Use same parsing used for l2__tests and pick libraries this time. 183 + l2_test() 184 + { 185 + test_libs=$(grep --include=Makefile ": LDLIBS" $test | \ 186 + grep -v "VAR_LDLIBS" | \ 187 + sed -e 's/\:/ /' | sed -e 's/+/ /' | \ 188 + cut -d "=" -f 2) 189 + 190 + check_libs $test $test_libs 191 + } 192 + 193 + l3_test() 194 + { 195 + test_libs=$(grep --include=Makefile "^VAR_LDLIBS" $test | \ 196 + grep -v "pkg-config" | sed -e 's/\:/ /' | 197 + sed -e 's/+/ /' | cut -d "=" -f 2) 198 + 199 + check_libs $test $test_libs 200 + } 201 + 202 + check_libs() 203 + { 204 + 205 + if [[ ! -z "${test_libs// }" ]] 206 + then 207 + 208 + #echo $test_libs 209 + 210 + for lib in $test_libs; do 211 + 212 + let total_cnt+=1 213 + $CC -o $tmp_file.bin $lib $tmp_file > /dev/null 2>&1 214 + if [ $? -ne 0 ]; then 215 + echo "FAIL: $test dependency check: $lib" >> $fail 216 + let fail_cnt+=1 217 + fail_libs+="$lib " 218 + fail_target=$(echo "$test" | cut -d "/" -f1) 219 + fail_trgts+="$fail_target " 220 + targets=$(echo "$targets" | grep -v "$fail_target") 221 + else 222 + echo "PASS: $test dependency check passed $lib" >> $pass 223 + let pass_cnt+=1 224 + pass_libs+="$lib " 225 + pass_trgts+="$(echo "$test" | cut -d "/" -f1) " 226 + fi 227 + 228 + done 229 + fi 230 + } 231 + 232 + print_results() 233 + { 234 + echo -e "========================================================"; 235 + echo -e "Kselftest Dependency Check for [$0 $1 $2] results..." 236 + 237 + if [ $print_targets -ne 0 ] 238 + then 239 + echo -e "Suggested Selftest Targets for your configuration:" 240 + echo -e "$targets"; 241 + fi 242 + 243 + echo -e "========================================================"; 244 + echo -e "Checked tests defining LDLIBS dependencies" 245 + echo -e "--------------------------------------------------------"; 246 + echo -e "Total tests with Dependencies:" 247 + echo -e "$total_cnt Pass: $pass_cnt Fail: $fail_cnt"; 248 + 249 + if [ $pass_cnt -ne 0 ]; then 250 + echo -e "--------------------------------------------------------"; 251 + cat $pass 252 + echo -e "--------------------------------------------------------"; 253 + echo -e "Targets passed build dependency check on system:" 254 + echo -e "$(echo "$pass_trgts" | xargs -n1 | sort -u | xargs)" 255 + fi 256 + 257 + if [ $fail_cnt -ne 0 ]; then 258 + echo -e "--------------------------------------------------------"; 259 + cat $fail 260 + echo -e "--------------------------------------------------------"; 261 + echo -e "Targets failed build dependency check on system:" 262 + echo -e "$(echo "$fail_trgts" | xargs -n1 | sort -u | xargs)" 263 + echo -e "--------------------------------------------------------"; 264 + echo -e "Missing libraries system" 265 + echo -e "$(echo "$fail_libs" | xargs -n1 | sort -u | xargs)" 266 + fi 267 + 268 + echo -e "--------------------------------------------------------"; 269 + echo -e "========================================================"; 270 + } 271 + 272 + main "$@"
+12 -2
tools/testing/selftests/memfd/Makefile
··· 8 8 TEST_PROGS := run_fuse_test.sh run_hugetlbfs_test.sh 9 9 TEST_GEN_FILES := fuse_test fuse_mnt 10 10 11 - fuse_mnt.o: CFLAGS += $(shell pkg-config fuse --cflags) 11 + VAR_CFLAGS := $(shell pkg-config fuse --cflags 2>/dev/null) 12 + ifeq ($(VAR_CFLAGS),) 13 + VAR_CFLAGS := -D_FILE_OFFSET_BITS=64 -I/usr/include/fuse 14 + endif 15 + 16 + VAR_LDLIBS := $(shell pkg-config fuse --libs 2>/dev/null) 17 + ifeq ($(VAR_LDLIBS),) 18 + VAR_LDLIBS := -lfuse -pthread 19 + endif 20 + 21 + fuse_mnt.o: CFLAGS += $(VAR_CFLAGS) 12 22 13 23 include ../lib.mk 14 24 15 - $(OUTPUT)/fuse_mnt: LDLIBS += $(shell pkg-config fuse --libs) 25 + $(OUTPUT)/fuse_mnt: LDLIBS += $(VAR_LDLIBS) 16 26 17 27 $(OUTPUT)/memfd_test: memfd_test.c common.c 18 28 $(OUTPUT)/fuse_test: fuse_test.c common.c
+5 -2
tools/testing/selftests/net/tcp_mmap.c
··· 165 165 socklen_t zc_len = sizeof(zc); 166 166 int res; 167 167 168 + memset(&zc, 0, sizeof(zc)); 168 169 zc.address = (__u64)((unsigned long)addr); 169 170 zc.length = chunk_size; 170 - zc.recv_skip_hint = 0; 171 + 171 172 res = getsockopt(fd, IPPROTO_TCP, TCP_ZEROCOPY_RECEIVE, 172 173 &zc, &zc_len); 173 174 if (res == -1) ··· 282 281 static void do_accept(int fdlisten) 283 282 { 284 283 pthread_attr_t attr; 284 + int rcvlowat; 285 285 286 286 pthread_attr_init(&attr); 287 287 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); 288 288 289 + rcvlowat = chunk_size; 289 290 if (setsockopt(fdlisten, SOL_SOCKET, SO_RCVLOWAT, 290 - &chunk_size, sizeof(chunk_size)) == -1) { 291 + &rcvlowat, sizeof(rcvlowat)) == -1) { 291 292 perror("setsockopt SO_RCVLOWAT"); 292 293 } 293 294
+51 -3
tools/testing/selftests/wireguard/netns.sh
··· 48 48 exec 2>/dev/null 49 49 printf "$orig_message_cost" > /proc/sys/net/core/message_cost 50 50 ip0 link del dev wg0 51 + ip0 link del dev wg1 51 52 ip1 link del dev wg0 53 + ip1 link del dev wg1 52 54 ip2 link del dev wg0 55 + ip2 link del dev wg1 53 56 local to_kill="$(ip netns pids $netns0) $(ip netns pids $netns1) $(ip netns pids $netns2)" 54 57 [[ -n $to_kill ]] && kill $to_kill 55 58 pp ip netns del $netns1 ··· 80 77 key1="$(pp wg genkey)" 81 78 key2="$(pp wg genkey)" 82 79 key3="$(pp wg genkey)" 80 + key4="$(pp wg genkey)" 83 81 pub1="$(pp wg pubkey <<<"$key1")" 84 82 pub2="$(pp wg pubkey <<<"$key2")" 85 83 pub3="$(pp wg pubkey <<<"$key3")" 84 + pub4="$(pp wg pubkey <<<"$key4")" 86 85 psk="$(pp wg genpsk)" 87 86 [[ -n $key1 && -n $key2 && -n $psk ]] 88 87 89 88 configure_peers() { 90 89 ip1 addr add 192.168.241.1/24 dev wg0 91 - ip1 addr add fd00::1/24 dev wg0 90 + ip1 addr add fd00::1/112 dev wg0 92 91 93 92 ip2 addr add 192.168.241.2/24 dev wg0 94 - ip2 addr add fd00::2/24 dev wg0 93 + ip2 addr add fd00::2/112 dev wg0 95 94 96 95 n1 wg set wg0 \ 97 96 private-key <(echo "$key1") \ ··· 235 230 n1 wg set wg0 private-key <(echo "$key3") 236 231 n2 wg set wg0 peer "$pub3" preshared-key <(echo "$psk") allowed-ips 192.168.241.1/32 peer "$pub1" remove 237 232 n1 ping -W 1 -c 1 192.168.241.2 233 + n2 wg set wg0 peer "$pub3" remove 238 234 239 - ip1 link del wg0 235 + # Test that we can route wg through wg 236 + ip1 addr flush dev wg0 237 + ip2 addr flush dev wg0 238 + ip1 addr add fd00::5:1/112 dev wg0 239 + ip2 addr add fd00::5:2/112 dev wg0 240 + n1 wg set wg0 private-key <(echo "$key1") peer "$pub2" preshared-key <(echo "$psk") allowed-ips fd00::5:2/128 endpoint 127.0.0.1:2 241 + n2 wg set wg0 private-key <(echo "$key2") listen-port 2 peer "$pub1" preshared-key <(echo "$psk") allowed-ips fd00::5:1/128 endpoint 127.212.121.99:9998 242 + ip1 link add wg1 type wireguard 243 + ip2 link add wg1 type wireguard 244 + ip1 addr add 192.168.241.1/24 dev wg1 245 + ip1 addr add fd00::1/112 dev wg1 246 + ip2 addr add 192.168.241.2/24 dev wg1 247 + ip2 addr add fd00::2/112 dev wg1 248 + ip1 link set mtu 1340 up dev wg1 249 + ip2 link set mtu 1340 up dev wg1 250 + n1 wg set wg1 listen-port 5 private-key <(echo "$key3") peer "$pub4" allowed-ips 192.168.241.2/32,fd00::2/128 endpoint [fd00::5:2]:5 251 + n2 wg set wg1 listen-port 5 private-key <(echo "$key4") peer "$pub3" allowed-ips 192.168.241.1/32,fd00::1/128 endpoint [fd00::5:1]:5 252 + tests 253 + # Try to set up a routing loop between the two namespaces 254 + ip1 link set netns $netns0 dev wg1 255 + ip0 addr add 192.168.241.1/24 dev wg1 256 + ip0 link set up dev wg1 257 + n0 ping -W 1 -c 1 192.168.241.2 258 + n1 wg set wg0 peer "$pub2" endpoint 192.168.241.2:7 240 259 ip2 link del wg0 260 + ip2 link del wg1 261 + ! n0 ping -W 1 -c 10 -f 192.168.241.2 || false # Should not crash kernel 262 + 263 + ip0 link del wg1 264 + ip1 link del wg0 241 265 242 266 # Test using NAT. We now change the topology to this: 243 267 # ┌────────────────────────────────────────┐ ┌────────────────────────────────────────────────┐ ┌────────────────────────────────────────┐ ··· 315 281 pp sleep 3 316 282 n2 ping -W 1 -c 1 192.168.241.1 317 283 n1 wg set wg0 peer "$pub2" persistent-keepalive 0 284 + 285 + # Test that onion routing works, even when it loops 286 + n1 wg set wg0 peer "$pub3" allowed-ips 192.168.242.2/32 endpoint 192.168.241.2:5 287 + ip1 addr add 192.168.242.1/24 dev wg0 288 + ip2 link add wg1 type wireguard 289 + ip2 addr add 192.168.242.2/24 dev wg1 290 + n2 wg set wg1 private-key <(echo "$key3") listen-port 5 peer "$pub1" allowed-ips 192.168.242.1/32 291 + ip2 link set wg1 up 292 + n1 ping -W 1 -c 1 192.168.242.2 293 + ip2 link del wg1 294 + n1 wg set wg0 peer "$pub3" endpoint 192.168.242.2:5 295 + ! n1 ping -W 1 -c 1 192.168.242.2 || false # Should not crash kernel 296 + n1 wg set wg0 peer "$pub3" remove 297 + ip1 addr del 192.168.242.1/24 dev wg0 318 298 319 299 # Do a wg-quick(8)-style policy routing for the default route, making sure vethc has a v6 address to tease out bugs. 320 300 ip1 -6 addr add fc00::9/96 dev vethc
+1
tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config
··· 10 10 CONFIG_CMDLINE="console=hvc0 wg.success=hvc1" 11 11 CONFIG_SECTION_MISMATCH_WARN_ONLY=y 12 12 CONFIG_FRAME_WARN=1280 13 + CONFIG_THREAD_SHIFT=14