Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'kvm-s390-master-6.1-1' of https://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

A PCI allocation fix and a PV clock fix.

+3491 -1735
+37 -1
Documentation/arm64/cpu-feature-registers.rst
··· 92 92 93 93 The infrastructure emulates only the following system register space:: 94 94 95 - Op0=3, Op1=0, CRn=0, CRm=0,4,5,6,7 95 + Op0=3, Op1=0, CRn=0, CRm=0,2,3,4,5,6,7 96 96 97 97 (See Table C5-6 'System instruction encodings for non-Debug System 98 98 register accesses' in ARMv8 ARM DDI 0487A.h, for the list of ··· 291 291 | RPRES | [7-4] | y | 292 292 +------------------------------+---------+---------+ 293 293 | WFXT | [3-0] | y | 294 + +------------------------------+---------+---------+ 295 + 296 + 10) MVFR0_EL1 - AArch32 Media and VFP Feature Register 0 297 + 298 + +------------------------------+---------+---------+ 299 + | Name | bits | visible | 300 + +------------------------------+---------+---------+ 301 + | FPDP | [11-8] | y | 302 + +------------------------------+---------+---------+ 303 + 304 + 11) MVFR1_EL1 - AArch32 Media and VFP Feature Register 1 305 + 306 + +------------------------------+---------+---------+ 307 + | Name | bits | visible | 308 + +------------------------------+---------+---------+ 309 + | SIMDFMAC | [31-28] | y | 310 + +------------------------------+---------+---------+ 311 + | SIMDSP | [19-16] | y | 312 + +------------------------------+---------+---------+ 313 + | SIMDInt | [15-12] | y | 314 + +------------------------------+---------+---------+ 315 + | SIMDLS | [11-8] | y | 316 + +------------------------------+---------+---------+ 317 + 318 + 12) ID_ISAR5_EL1 - AArch32 Instruction Set Attribute Register 5 319 + 320 + +------------------------------+---------+---------+ 321 + | Name | bits | visible | 322 + +------------------------------+---------+---------+ 323 + | CRC32 | [19-16] | y | 324 + +------------------------------+---------+---------+ 325 + | SHA2 | [15-12] | y | 326 + +------------------------------+---------+---------+ 327 + | SHA1 | [11-8] | y | 328 + +------------------------------+---------+---------+ 329 + | AES | [7-4] | y | 294 330 +------------------------------+---------+---------+ 295 331 296 332
+3
Documentation/devicetree/bindings/power/fsl,imx-gpcv2.yaml
··· 81 81 82 82 power-supply: true 83 83 84 + power-domains: 85 + maxItems: 1 86 + 84 87 resets: 85 88 description: | 86 89 A number of phandles to resets that need to be asserted during
+1
Documentation/driver-api/driver-model/devres.rst
··· 279 279 devm_gpio_request_one() 280 280 281 281 I2C 282 + devm_i2c_add_adapter() 282 283 devm_i2c_new_dummy_device() 283 284 284 285 IIO
+10
Documentation/kbuild/reproducible-builds.rst
··· 119 119 kernel versions by including an arbitrary string of "salt" in it. 120 120 This is specified by the Kconfig symbol ``CONFIG_BUILD_SALT``. 121 121 122 + Git 123 + --- 124 + 125 + Uncommitted changes or different commit ids in git can also lead 126 + to different compilation results. For example, after executing 127 + ``git reset HEAD^``, even if the code is the same, the 128 + ``include/config/kernel.release`` generated during compilation 129 + will be different, which will eventually lead to binary differences. 130 + See ``scripts/setlocalversion`` for details. 131 + 122 132 .. _KBUILD_BUILD_TIMESTAMP: kbuild.html#kbuild-build-timestamp 123 133 .. _KBUILD_BUILD_USER and KBUILD_BUILD_HOST: kbuild.html#kbuild-build-user-kbuild-build-host 124 134 .. _KCFLAGS: kbuild.html#kcflags
+3
Documentation/virt/kvm/devices/vm.rst
··· 215 215 :Parameters: address of a buffer in user space to store the data (u8) to 216 216 :Returns: -EFAULT if the given address is not accessible from kernel space; 217 217 -EINVAL if setting the TOD clock extension to != 0 is not supported 218 + -EOPNOTSUPP for a PV guest (TOD managed by the ultravisor) 218 219 219 220 3.2. ATTRIBUTE: KVM_S390_VM_TOD_LOW 220 221 ----------------------------------- ··· 225 224 226 225 :Parameters: address of a buffer in user space to store the data (u64) to 227 226 :Returns: -EFAULT if the given address is not accessible from kernel space 227 + -EOPNOTSUPP for a PV guest (TOD managed by the ultravisor) 228 228 229 229 3.3. ATTRIBUTE: KVM_S390_VM_TOD_EXT 230 230 ----------------------------------- ··· 239 237 (kvm_s390_vm_tod_clock) to 240 238 :Returns: -EFAULT if the given address is not accessible from kernel space; 241 239 -EINVAL if setting the TOD clock extension to != 0 is not supported 240 + -EOPNOTSUPP for a PV guest (TOD managed by the ultravisor) 242 241 243 242 4. GROUP: KVM_S390_VM_CRYPTO 244 243 ============================
+13 -10
MAINTAINERS
··· 3984 3984 R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com> 3985 3985 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 3986 3986 S: Maintained 3987 - T: git git://github.com/broadcom/stblinux.git 3987 + T: git https://github.com/broadcom/stblinux.git 3988 3988 F: Documentation/devicetree/bindings/arm/bcm/brcm,bcmbca.yaml 3989 3989 F: arch/arm64/boot/dts/broadcom/bcmbca/* 3990 3990 N: bcmbca ··· 4009 4009 L: linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers) 4010 4010 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 4011 4011 S: Maintained 4012 - T: git git://github.com/broadcom/stblinux.git 4012 + T: git https://github.com/broadcom/stblinux.git 4013 4013 F: Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml 4014 4014 F: drivers/pci/controller/pcie-brcmstb.c 4015 4015 F: drivers/staging/vc04_services ··· 4023 4023 M: Scott Branden <sbranden@broadcom.com> 4024 4024 R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com> 4025 4025 S: Maintained 4026 - T: git git://github.com/broadcom/mach-bcm 4026 + T: git https://github.com/broadcom/mach-bcm 4027 4027 F: arch/arm/mach-bcm/ 4028 4028 N: bcm281* 4029 4029 N: bcm113* ··· 4088 4088 R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com> 4089 4089 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 4090 4090 S: Maintained 4091 - T: git git://github.com/broadcom/stblinux.git 4091 + T: git https://github.com/broadcom/stblinux.git 4092 4092 F: Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml 4093 4093 F: arch/arm/boot/dts/bcm7*.dts* 4094 4094 F: arch/arm/include/asm/hardware/cache-b15-rac.h ··· 4121 4121 R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com> 4122 4122 L: linux-mips@vger.kernel.org 4123 4123 S: Maintained 4124 - T: git git://github.com/broadcom/stblinux.git 4124 + T: git https://github.com/broadcom/stblinux.git 4125 4125 F: arch/mips/bmips/* 4126 4126 F: arch/mips/boot/dts/brcm/bcm*.dts* 4127 4127 F: arch/mips/include/asm/mach-bmips/* ··· 4262 4262 R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com> 4263 4263 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 4264 4264 S: Maintained 4265 - T: git git://github.com/broadcom/stblinux.git 4265 + T: git https://github.com/broadcom/stblinux.git 4266 4266 F: arch/arm64/boot/dts/broadcom/northstar2/* 4267 4267 F: arch/arm64/boot/dts/broadcom/stingray/* 4268 4268 F: drivers/clk/bcm/clk-ns* ··· 4332 4332 R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com> 4333 4333 L: linux-pm@vger.kernel.org 4334 4334 S: Maintained 4335 - T: git git://github.com/broadcom/stblinux.git 4335 + T: git https://github.com/broadcom/stblinux.git 4336 4336 F: drivers/soc/bcm/bcm63xx/bcm-pmb.c 4337 4337 F: include/dt-bindings/soc/bcm-pmb.h 4338 4338 ··· 5041 5041 5042 5042 CISCO VIC ETHERNET NIC DRIVER 5043 5043 M: Christian Benvenuti <benve@cisco.com> 5044 - M: Govindarajulu Varadarajan <_govind@gmx.com> 5044 + M: Satish Kharat <satishkh@cisco.com> 5045 5045 S: Supported 5046 5046 F: drivers/net/ethernet/cisco/enic/ 5047 5047 ··· 9217 9217 F: drivers/i2c/busses/i2c-hisi.c 9218 9218 9219 9219 HISILICON LPC BUS DRIVER 9220 - M: john.garry@huawei.com 9220 + M: Jay Fang <f.fangjian@huawei.com> 9221 9221 S: Maintained 9222 9222 W: http://www.hisilicon.com 9223 9223 F: Documentation/devicetree/bindings/arm/hisilicon/low-pin-count.yaml ··· 9778 9778 F: drivers/pci/hotplug/rpaphp* 9779 9779 9780 9780 IBM Power SRIOV Virtual NIC Device Driver 9781 - M: Dany Madden <drt@linux.ibm.com> 9781 + M: Haren Myneni <haren@linux.ibm.com> 9782 + M: Rick Lindsley <ricklind@linux.ibm.com> 9783 + R: Nick Child <nnac123@linux.ibm.com> 9784 + R: Dany Madden <danymadden@us.ibm.com> 9782 9785 R: Thomas Falcon <tlfalcon@linux.ibm.com> 9783 9786 L: netdev@vger.kernel.org 9784 9787 S: Supported
+2 -2
Makefile
··· 2 2 VERSION = 6 3 3 PATCHLEVEL = 1 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc3 5 + EXTRAVERSION = -rc4 6 6 NAME = Hurr durr I'ma ninja sloth 7 7 8 8 # *DOCUMENTATION* ··· 1218 1218 cmd_ar_vmlinux.a = \ 1219 1219 rm -f $@; \ 1220 1220 $(AR) cDPrST $@ $(KBUILD_VMLINUX_OBJS); \ 1221 - $(AR) mPiT $$($(AR) t $@ | head -n1) $@ $$($(AR) t $@ | grep -F --file=$(srctree)/scripts/head-object-list.txt) 1221 + $(AR) mPiT $$($(AR) t $@ | sed -n 1p) $@ $$($(AR) t $@ | grep -F -f $(srctree)/scripts/head-object-list.txt) 1222 1222 1223 1223 targets += vmlinux.a 1224 1224 vmlinux.a: $(KBUILD_VMLINUX_OBJS) scripts/head-object-list.txt autoksyms_recursive FORCE
+4
arch/arm/boot/dts/imx6q-yapp4-crux.dts
··· 33 33 status = "okay"; 34 34 }; 35 35 36 + &reg_pu { 37 + regulator-always-on; 38 + }; 39 + 36 40 &reg_usb_h1_vbus { 37 41 status = "okay"; 38 42 };
+1 -1
arch/arm/boot/dts/imx6qdl-gw5910.dtsi
··· 29 29 30 30 user-pb { 31 31 label = "user_pb"; 32 - gpios = <&gsc_gpio 0 GPIO_ACTIVE_LOW>; 32 + gpios = <&gsc_gpio 2 GPIO_ACTIVE_LOW>; 33 33 linux,code = <BTN_0>; 34 34 }; 35 35
+1 -1
arch/arm/boot/dts/imx6qdl-gw5913.dtsi
··· 26 26 27 27 user-pb { 28 28 label = "user_pb"; 29 - gpios = <&gsc_gpio 0 GPIO_ACTIVE_LOW>; 29 + gpios = <&gsc_gpio 2 GPIO_ACTIVE_LOW>; 30 30 linux,code = <BTN_0>; 31 31 }; 32 32
+4
arch/arm/boot/dts/imx6qp-yapp4-crux-plus.dts
··· 33 33 status = "okay"; 34 34 }; 35 35 36 + &reg_pu { 37 + regulator-always-on; 38 + }; 39 + 36 40 &reg_usb_h1_vbus { 37 41 status = "okay"; 38 42 };
+8
arch/arm/boot/dts/ste-href.dtsi
··· 24 24 polling-delay = <0>; 25 25 polling-delay-passive = <0>; 26 26 thermal-sensors = <&bat_therm>; 27 + 28 + trips { 29 + battery-crit-hi { 30 + temperature = <70000>; 31 + hysteresis = <2000>; 32 + type = "critical"; 33 + }; 34 + }; 27 35 }; 28 36 }; 29 37
+8
arch/arm/boot/dts/ste-snowball.dts
··· 28 28 polling-delay = <0>; 29 29 polling-delay-passive = <0>; 30 30 thermal-sensors = <&bat_therm>; 31 + 32 + trips { 33 + battery-crit-hi { 34 + temperature = <70000>; 35 + hysteresis = <2000>; 36 + type = "critical"; 37 + }; 38 + }; 31 39 }; 32 40 }; 33 41
+8
arch/arm/boot/dts/ste-ux500-samsung-codina-tmo.dts
··· 44 44 polling-delay = <0>; 45 45 polling-delay-passive = <0>; 46 46 thermal-sensors = <&bat_therm>; 47 + 48 + trips { 49 + battery-crit-hi { 50 + temperature = <70000>; 51 + hysteresis = <2000>; 52 + type = "critical"; 53 + }; 54 + }; 47 55 }; 48 56 }; 49 57
+8
arch/arm/boot/dts/ste-ux500-samsung-codina.dts
··· 57 57 polling-delay = <0>; 58 58 polling-delay-passive = <0>; 59 59 thermal-sensors = <&bat_therm>; 60 + 61 + trips { 62 + battery-crit-hi { 63 + temperature = <70000>; 64 + hysteresis = <2000>; 65 + type = "critical"; 66 + }; 67 + }; 60 68 }; 61 69 }; 62 70
+8
arch/arm/boot/dts/ste-ux500-samsung-gavini.dts
··· 30 30 polling-delay = <0>; 31 31 polling-delay-passive = <0>; 32 32 thermal-sensors = <&bat_therm>; 33 + 34 + trips { 35 + battery-crit-hi { 36 + temperature = <70000>; 37 + hysteresis = <2000>; 38 + type = "critical"; 39 + }; 40 + }; 33 41 }; 34 42 }; 35 43
+8
arch/arm/boot/dts/ste-ux500-samsung-golden.dts
··· 35 35 polling-delay = <0>; 36 36 polling-delay-passive = <0>; 37 37 thermal-sensors = <&bat_therm>; 38 + 39 + trips { 40 + battery-crit-hi { 41 + temperature = <70000>; 42 + hysteresis = <2000>; 43 + type = "critical"; 44 + }; 45 + }; 38 46 }; 39 47 }; 40 48
+8
arch/arm/boot/dts/ste-ux500-samsung-janice.dts
··· 30 30 polling-delay = <0>; 31 31 polling-delay-passive = <0>; 32 32 thermal-sensors = <&bat_therm>; 33 + 34 + trips { 35 + battery-crit-hi { 36 + temperature = <70000>; 37 + hysteresis = <2000>; 38 + type = "critical"; 39 + }; 40 + }; 33 41 }; 34 42 }; 35 43
+8
arch/arm/boot/dts/ste-ux500-samsung-kyle.dts
··· 34 34 polling-delay = <0>; 35 35 polling-delay-passive = <0>; 36 36 thermal-sensors = <&bat_therm>; 37 + 38 + trips { 39 + battery-crit-hi { 40 + temperature = <70000>; 41 + hysteresis = <2000>; 42 + type = "critical"; 43 + }; 44 + }; 37 45 }; 38 46 }; 39 47
+8
arch/arm/boot/dts/ste-ux500-samsung-skomer.dts
··· 30 30 polling-delay = <0>; 31 31 polling-delay-passive = <0>; 32 32 thermal-sensors = <&bat_therm>; 33 + 34 + trips { 35 + battery-crit-hi { 36 + temperature = <70000>; 37 + hysteresis = <2000>; 38 + type = "critical"; 39 + }; 40 + }; 33 41 }; 34 42 }; 35 43
+14
arch/arm64/boot/dts/arm/juno-base.dtsi
··· 751 751 polling-delay = <1000>; 752 752 polling-delay-passive = <100>; 753 753 thermal-sensors = <&scpi_sensors0 0>; 754 + trips { 755 + pmic_crit0: trip0 { 756 + temperature = <90000>; 757 + hysteresis = <2000>; 758 + type = "critical"; 759 + }; 760 + }; 754 761 }; 755 762 756 763 soc { 757 764 polling-delay = <1000>; 758 765 polling-delay-passive = <100>; 759 766 thermal-sensors = <&scpi_sensors0 3>; 767 + trips { 768 + soc_crit0: trip0 { 769 + temperature = <80000>; 770 + hysteresis = <2000>; 771 + type = "critical"; 772 + }; 773 + }; 760 774 }; 761 775 762 776 big_cluster_thermal_zone: big-cluster {
+6
arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi
··· 779 779 little-endian; 780 780 #address-cells = <1>; 781 781 #size-cells = <0>; 782 + clock-frequency = <2500000>; 783 + clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL 784 + QORIQ_CLK_PLL_DIV(1)>; 782 785 status = "disabled"; 783 786 }; 784 787 ··· 791 788 little-endian; 792 789 #address-cells = <1>; 793 790 #size-cells = <0>; 791 + clock-frequency = <2500000>; 792 + clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL 793 + QORIQ_CLK_PLL_DIV(1)>; 794 794 status = "disabled"; 795 795 }; 796 796
+6
arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
··· 532 532 little-endian; 533 533 #address-cells = <1>; 534 534 #size-cells = <0>; 535 + clock-frequency = <2500000>; 536 + clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL 537 + QORIQ_CLK_PLL_DIV(2)>; 535 538 status = "disabled"; 536 539 }; 537 540 ··· 544 541 little-endian; 545 542 #address-cells = <1>; 546 543 #size-cells = <0>; 544 + clock-frequency = <2500000>; 545 + clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL 546 + QORIQ_CLK_PLL_DIV(2)>; 547 547 status = "disabled"; 548 548 }; 549 549
+6
arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
··· 1385 1385 #address-cells = <1>; 1386 1386 #size-cells = <0>; 1387 1387 little-endian; 1388 + clock-frequency = <2500000>; 1389 + clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL 1390 + QORIQ_CLK_PLL_DIV(2)>; 1388 1391 status = "disabled"; 1389 1392 }; 1390 1393 ··· 1398 1395 little-endian; 1399 1396 #address-cells = <1>; 1400 1397 #size-cells = <0>; 1398 + clock-frequency = <2500000>; 1399 + clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL 1400 + QORIQ_CLK_PLL_DIV(2)>; 1401 1401 status = "disabled"; 1402 1402 }; 1403 1403
+9 -9
arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
··· 38 38 interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>; 39 39 reg = <0x5b010000 0x10000>; 40 40 clocks = <&sdhc0_lpcg IMX_LPCG_CLK_4>, 41 - <&sdhc0_lpcg IMX_LPCG_CLK_5>, 42 - <&sdhc0_lpcg IMX_LPCG_CLK_0>; 43 - clock-names = "ipg", "per", "ahb"; 41 + <&sdhc0_lpcg IMX_LPCG_CLK_0>, 42 + <&sdhc0_lpcg IMX_LPCG_CLK_5>; 43 + clock-names = "ipg", "ahb", "per"; 44 44 power-domains = <&pd IMX_SC_R_SDHC_0>; 45 45 status = "disabled"; 46 46 }; ··· 49 49 interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>; 50 50 reg = <0x5b020000 0x10000>; 51 51 clocks = <&sdhc1_lpcg IMX_LPCG_CLK_4>, 52 - <&sdhc1_lpcg IMX_LPCG_CLK_5>, 53 - <&sdhc1_lpcg IMX_LPCG_CLK_0>; 54 - clock-names = "ipg", "per", "ahb"; 52 + <&sdhc1_lpcg IMX_LPCG_CLK_0>, 53 + <&sdhc1_lpcg IMX_LPCG_CLK_5>; 54 + clock-names = "ipg", "ahb", "per"; 55 55 power-domains = <&pd IMX_SC_R_SDHC_1>; 56 56 fsl,tuning-start-tap = <20>; 57 57 fsl,tuning-step = <2>; ··· 62 62 interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>; 63 63 reg = <0x5b030000 0x10000>; 64 64 clocks = <&sdhc2_lpcg IMX_LPCG_CLK_4>, 65 - <&sdhc2_lpcg IMX_LPCG_CLK_5>, 66 - <&sdhc2_lpcg IMX_LPCG_CLK_0>; 67 - clock-names = "ipg", "per", "ahb"; 65 + <&sdhc2_lpcg IMX_LPCG_CLK_0>, 66 + <&sdhc2_lpcg IMX_LPCG_CLK_5>; 67 + clock-names = "ipg", "ahb", "per"; 68 68 power-domains = <&pd IMX_SC_R_SDHC_2>; 69 69 status = "disabled"; 70 70 };
+8 -8
arch/arm64/boot/dts/freescale/imx8mm-mx8menlo.dts
··· 250 250 /* SODIMM 96 */ 251 251 MX8MM_IOMUXC_SAI1_RXD2_GPIO4_IO4 0x1c4 252 252 /* CPLD_D[7] */ 253 - MX8MM_IOMUXC_SAI1_RXD3_GPIO4_IO5 0x1c4 253 + MX8MM_IOMUXC_SAI1_RXD3_GPIO4_IO5 0x184 254 254 /* CPLD_D[6] */ 255 - MX8MM_IOMUXC_SAI1_RXFS_GPIO4_IO0 0x1c4 255 + MX8MM_IOMUXC_SAI1_RXFS_GPIO4_IO0 0x184 256 256 /* CPLD_D[5] */ 257 - MX8MM_IOMUXC_SAI1_TXC_GPIO4_IO11 0x1c4 257 + MX8MM_IOMUXC_SAI1_TXC_GPIO4_IO11 0x184 258 258 /* CPLD_D[4] */ 259 - MX8MM_IOMUXC_SAI1_TXD0_GPIO4_IO12 0x1c4 259 + MX8MM_IOMUXC_SAI1_TXD0_GPIO4_IO12 0x184 260 260 /* CPLD_D[3] */ 261 - MX8MM_IOMUXC_SAI1_TXD1_GPIO4_IO13 0x1c4 261 + MX8MM_IOMUXC_SAI1_TXD1_GPIO4_IO13 0x184 262 262 /* CPLD_D[2] */ 263 - MX8MM_IOMUXC_SAI1_TXD2_GPIO4_IO14 0x1c4 263 + MX8MM_IOMUXC_SAI1_TXD2_GPIO4_IO14 0x184 264 264 /* CPLD_D[1] */ 265 - MX8MM_IOMUXC_SAI1_TXD3_GPIO4_IO15 0x1c4 265 + MX8MM_IOMUXC_SAI1_TXD3_GPIO4_IO15 0x184 266 266 /* CPLD_D[0] */ 267 - MX8MM_IOMUXC_SAI1_TXD4_GPIO4_IO16 0x1c4 267 + MX8MM_IOMUXC_SAI1_TXD4_GPIO4_IO16 0x184 268 268 /* KBD_intK */ 269 269 MX8MM_IOMUXC_SAI2_MCLK_GPIO4_IO27 0x1c4 270 270 /* DISP_reset */
+4 -4
arch/arm64/boot/dts/freescale/imx8mm.dtsi
··· 276 276 assigned-clocks = <&clk IMX8MM_CLK_USB_PHY_REF>; 277 277 assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_100M>; 278 278 clock-names = "main_clk"; 279 + power-domains = <&pgc_otg1>; 279 280 }; 280 281 281 282 usbphynop2: usbphynop2 { ··· 286 285 assigned-clocks = <&clk IMX8MM_CLK_USB_PHY_REF>; 287 286 assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_100M>; 288 287 clock-names = "main_clk"; 288 + power-domains = <&pgc_otg2>; 289 289 }; 290 290 291 291 soc: soc@0 { ··· 676 674 pgc_otg1: power-domain@2 { 677 675 #power-domain-cells = <0>; 678 676 reg = <IMX8MM_POWER_DOMAIN_OTG1>; 679 - power-domains = <&pgc_hsiomix>; 680 677 }; 681 678 682 679 pgc_otg2: power-domain@3 { 683 680 #power-domain-cells = <0>; 684 681 reg = <IMX8MM_POWER_DOMAIN_OTG2>; 685 - power-domains = <&pgc_hsiomix>; 686 682 }; 687 683 688 684 pgc_gpumix: power-domain@4 { ··· 1186 1186 assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_500M>; 1187 1187 phys = <&usbphynop1>; 1188 1188 fsl,usbmisc = <&usbmisc1 0>; 1189 - power-domains = <&pgc_otg1>; 1189 + power-domains = <&pgc_hsiomix>; 1190 1190 status = "disabled"; 1191 1191 }; 1192 1192 ··· 1206 1206 assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_500M>; 1207 1207 phys = <&usbphynop2>; 1208 1208 fsl,usbmisc = <&usbmisc2 0>; 1209 - power-domains = <&pgc_otg2>; 1209 + power-domains = <&pgc_hsiomix>; 1210 1210 status = "disabled"; 1211 1211 }; 1212 1212
+2 -2
arch/arm64/boot/dts/freescale/imx8mn.dtsi
··· 662 662 pgc_otg1: power-domain@1 { 663 663 #power-domain-cells = <0>; 664 664 reg = <IMX8MN_POWER_DOMAIN_OTG1>; 665 - power-domains = <&pgc_hsiomix>; 666 665 }; 667 666 668 667 pgc_gpumix: power-domain@2 { ··· 1075 1076 assigned-clock-parents = <&clk IMX8MN_SYS_PLL2_500M>; 1076 1077 phys = <&usbphynop1>; 1077 1078 fsl,usbmisc = <&usbmisc1 0>; 1078 - power-domains = <&pgc_otg1>; 1079 + power-domains = <&pgc_hsiomix>; 1079 1080 status = "disabled"; 1080 1081 }; 1081 1082 ··· 1174 1175 assigned-clocks = <&clk IMX8MN_CLK_USB_PHY_REF>; 1175 1176 assigned-clock-parents = <&clk IMX8MN_SYS_PLL1_100M>; 1176 1177 clock-names = "main_clk"; 1178 + power-domains = <&pgc_otg1>; 1177 1179 }; 1178 1180 };
+10 -10
arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
··· 354 354 "SODIMM_82", 355 355 "SODIMM_70", 356 356 "SODIMM_72"; 357 - 358 - ctrl-sleep-moci-hog { 359 - gpio-hog; 360 - /* Verdin CTRL_SLEEP_MOCI# (SODIMM 256) */ 361 - gpios = <29 GPIO_ACTIVE_HIGH>; 362 - line-name = "CTRL_SLEEP_MOCI#"; 363 - output-high; 364 - pinctrl-names = "default"; 365 - pinctrl-0 = <&pinctrl_ctrl_sleep_moci>; 366 - }; 367 357 }; 368 358 369 359 &gpio3 { ··· 422 432 "SODIMM_256", 423 433 "SODIMM_48", 424 434 "SODIMM_44"; 435 + 436 + ctrl-sleep-moci-hog { 437 + gpio-hog; 438 + /* Verdin CTRL_SLEEP_MOCI# (SODIMM 256) */ 439 + gpios = <29 GPIO_ACTIVE_HIGH>; 440 + line-name = "CTRL_SLEEP_MOCI#"; 441 + output-high; 442 + pinctrl-names = "default"; 443 + pinctrl-0 = <&pinctrl_ctrl_sleep_moci>; 444 + }; 425 445 }; 426 446 427 447 /* On-module I2C */
+6 -5
arch/arm64/boot/dts/freescale/imx93.dtsi
··· 451 451 clocks = <&clk IMX93_CLK_GPIO2_GATE>, 452 452 <&clk IMX93_CLK_GPIO2_GATE>; 453 453 clock-names = "gpio", "port"; 454 - gpio-ranges = <&iomuxc 0 32 32>; 454 + gpio-ranges = <&iomuxc 0 4 30>; 455 455 }; 456 456 457 457 gpio3: gpio@43820080 { ··· 465 465 clocks = <&clk IMX93_CLK_GPIO3_GATE>, 466 466 <&clk IMX93_CLK_GPIO3_GATE>; 467 467 clock-names = "gpio", "port"; 468 - gpio-ranges = <&iomuxc 0 64 32>; 468 + gpio-ranges = <&iomuxc 0 84 8>, <&iomuxc 8 66 18>, 469 + <&iomuxc 26 34 2>, <&iomuxc 28 0 4>; 469 470 }; 470 471 471 472 gpio4: gpio@43830080 { ··· 480 479 clocks = <&clk IMX93_CLK_GPIO4_GATE>, 481 480 <&clk IMX93_CLK_GPIO4_GATE>; 482 481 clock-names = "gpio", "port"; 483 - gpio-ranges = <&iomuxc 0 96 32>; 482 + gpio-ranges = <&iomuxc 0 38 28>, <&iomuxc 28 36 2>; 484 483 }; 485 484 486 485 gpio1: gpio@47400080 { ··· 494 493 clocks = <&clk IMX93_CLK_GPIO1_GATE>, 495 494 <&clk IMX93_CLK_GPIO1_GATE>; 496 495 clock-names = "gpio", "port"; 497 - gpio-ranges = <&iomuxc 0 0 32>; 496 + gpio-ranges = <&iomuxc 0 92 16>; 498 497 }; 499 498 500 499 s4muap: mailbox@47520000 { ··· 502 501 reg = <0x47520000 0x10000>; 503 502 interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>, 504 503 <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>; 505 - interrupt-names = "txirq", "rxirq"; 504 + interrupt-names = "tx", "rx"; 506 505 #mbox-cells = <2>; 507 506 }; 508 507
+8
arch/arm64/include/asm/efi.h
··· 14 14 15 15 #ifdef CONFIG_EFI 16 16 extern void efi_init(void); 17 + 18 + bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg); 17 19 #else 18 20 #define efi_init() 21 + 22 + static inline 23 + bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg) 24 + { 25 + return false; 26 + } 19 27 #endif 20 28 21 29 int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
+33 -9
arch/arm64/kernel/cpufeature.c
··· 428 428 ARM64_FTR_END, 429 429 }; 430 430 431 + static const struct arm64_ftr_bits ftr_mvfr0[] = { 432 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPROUND_SHIFT, 4, 0), 433 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPSHVEC_SHIFT, 4, 0), 434 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPSQRT_SHIFT, 4, 0), 435 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPDIVIDE_SHIFT, 4, 0), 436 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPTRAP_SHIFT, 4, 0), 437 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPDP_SHIFT, 4, 0), 438 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_FPSP_SHIFT, 4, 0), 439 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_SIMD_SHIFT, 4, 0), 440 + ARM64_FTR_END, 441 + }; 442 + 443 + static const struct arm64_ftr_bits ftr_mvfr1[] = { 444 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDFMAC_SHIFT, 4, 0), 445 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_FPHP_SHIFT, 4, 0), 446 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDHP_SHIFT, 4, 0), 447 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDSP_SHIFT, 4, 0), 448 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDINT_SHIFT, 4, 0), 449 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_SIMDLS_SHIFT, 4, 0), 450 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_FPDNAN_SHIFT, 4, 0), 451 + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_FPFTZ_SHIFT, 4, 0), 452 + ARM64_FTR_END, 453 + }; 454 + 431 455 static const struct arm64_ftr_bits ftr_mvfr2[] = { 432 456 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_FPMISC_SHIFT, 4, 0), 433 457 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_SIMDMISC_SHIFT, 4, 0), ··· 482 458 483 459 static const struct arm64_ftr_bits ftr_id_isar5[] = { 484 460 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0), 485 - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0), 486 - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0), 487 - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0), 488 - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0), 461 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0), 462 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0), 463 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0), 464 + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0), 489 465 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0), 490 466 ARM64_FTR_END, 491 467 }; ··· 598 574 * Common ftr bits for a 32bit register with all hidden, strict 599 575 * attributes, with 4bit feature fields and a default safe value of 600 576 * 0. Covers the following 32bit registers: 601 - * id_isar[1-4], id_mmfr[1-3], id_pfr1, mvfr[0-1] 577 + * id_isar[1-3], id_mmfr[1-3] 602 578 */ 603 579 static const struct arm64_ftr_bits ftr_generic_32bits[] = { 604 580 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0), ··· 669 645 ARM64_FTR_REG(SYS_ID_ISAR6_EL1, ftr_id_isar6), 670 646 671 647 /* Op1 = 0, CRn = 0, CRm = 3 */ 672 - ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits), 673 - ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits), 648 + ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_mvfr0), 649 + ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_mvfr1), 674 650 ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2), 675 651 ARM64_FTR_REG(SYS_ID_PFR2_EL1, ftr_id_pfr2), 676 652 ARM64_FTR_REG(SYS_ID_DFR1_EL1, ftr_id_dfr1), ··· 3363 3339 3364 3340 /* 3365 3341 * We emulate only the following system register space. 3366 - * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7] 3342 + * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 2 - 7] 3367 3343 * See Table C5-6 System instruction encodings for System register accesses, 3368 3344 * ARMv8 ARM(ARM DDI 0487A.f) for more details. 3369 3345 */ ··· 3373 3349 sys_reg_CRn(id) == 0x0 && 3374 3350 sys_reg_Op1(id) == 0x0 && 3375 3351 (sys_reg_CRm(id) == 0 || 3376 - ((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7)))); 3352 + ((sys_reg_CRm(id) >= 2) && (sys_reg_CRm(id) <= 7)))); 3377 3353 } 3378 3354 3379 3355 /*
+31 -2
arch/arm64/kernel/efi-rt-wrapper.S
··· 6 6 #include <linux/linkage.h> 7 7 8 8 SYM_FUNC_START(__efi_rt_asm_wrapper) 9 - stp x29, x30, [sp, #-32]! 9 + stp x29, x30, [sp, #-112]! 10 10 mov x29, sp 11 11 12 12 /* ··· 15 15 * (such as UEFI) should never touch it. 16 16 */ 17 17 stp x1, x18, [sp, #16] 18 + 19 + /* 20 + * Preserve all callee saved registers and record the stack pointer 21 + * value in a per-CPU variable so we can recover from synchronous 22 + * exceptions occurring while running the firmware routines. 23 + */ 24 + stp x19, x20, [sp, #32] 25 + stp x21, x22, [sp, #48] 26 + stp x23, x24, [sp, #64] 27 + stp x25, x26, [sp, #80] 28 + stp x27, x28, [sp, #96] 29 + 30 + adr_this_cpu x8, __efi_rt_asm_recover_sp, x9 31 + str x29, [x8] 18 32 19 33 /* 20 34 * We are lucky enough that no EFI runtime services take more than ··· 45 31 46 32 ldp x1, x2, [sp, #16] 47 33 cmp x2, x18 48 - ldp x29, x30, [sp], #32 34 + ldp x29, x30, [sp], #112 49 35 b.ne 0f 50 36 ret 51 37 0: ··· 59 45 mov x18, x2 60 46 b efi_handle_corrupted_x18 // tail call 61 47 SYM_FUNC_END(__efi_rt_asm_wrapper) 48 + 49 + SYM_FUNC_START(__efi_rt_asm_recover) 50 + ldr_this_cpu x8, __efi_rt_asm_recover_sp, x9 51 + mov sp, x8 52 + 53 + ldp x0, x18, [sp, #16] 54 + ldp x19, x20, [sp, #32] 55 + ldp x21, x22, [sp, #48] 56 + ldp x23, x24, [sp, #64] 57 + ldp x25, x26, [sp, #80] 58 + ldp x27, x28, [sp, #96] 59 + ldp x29, x30, [sp], #112 60 + 61 + b efi_handle_runtime_exception 62 + SYM_FUNC_END(__efi_rt_asm_recover)
+26
arch/arm64/kernel/efi.c
··· 9 9 10 10 #include <linux/efi.h> 11 11 #include <linux/init.h> 12 + #include <linux/percpu.h> 12 13 13 14 #include <asm/efi.h> 14 15 ··· 128 127 { 129 128 pr_err_ratelimited(FW_BUG "register x18 corrupted by EFI %s\n", f); 130 129 return s; 130 + } 131 + 132 + asmlinkage DEFINE_PER_CPU(u64, __efi_rt_asm_recover_sp); 133 + 134 + asmlinkage efi_status_t __efi_rt_asm_recover(void); 135 + 136 + asmlinkage efi_status_t efi_handle_runtime_exception(const char *f) 137 + { 138 + pr_err(FW_BUG "Synchronous exception occurred in EFI runtime service %s()\n", f); 139 + clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); 140 + return EFI_ABORTED; 141 + } 142 + 143 + bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg) 144 + { 145 + /* Check whether the exception occurred while running the firmware */ 146 + if (current_work() != &efi_rts_work.work || regs->pc >= TASK_SIZE_64) 147 + return false; 148 + 149 + pr_err(FW_BUG "Unable to handle %s in EFI runtime service\n", msg); 150 + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); 151 + dump_stack(); 152 + 153 + regs->pc = (u64)__efi_rt_asm_recover; 154 + return true; 131 155 }
+2 -1
arch/arm64/kernel/entry-common.c
··· 329 329 __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0); 330 330 } 331 331 332 - static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) 332 + static __always_inline bool 333 + cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) 333 334 { 334 335 if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa)) 335 336 return false;
+4
arch/arm64/mm/fault.c
··· 30 30 #include <asm/bug.h> 31 31 #include <asm/cmpxchg.h> 32 32 #include <asm/cpufeature.h> 33 + #include <asm/efi.h> 33 34 #include <asm/exception.h> 34 35 #include <asm/daifflags.h> 35 36 #include <asm/debug-monitors.h> ··· 391 390 392 391 msg = "paging request"; 393 392 } 393 + 394 + if (efi_runtime_fixup_exception(regs, msg)) 395 + return; 394 396 395 397 die_kernel_fault(msg, addr, esr, regs); 396 398 }
+2 -1
arch/powerpc/Kconfig
··· 147 147 select ARCH_MIGHT_HAVE_PC_SERIO 148 148 select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX 149 149 select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT 150 + select ARCH_SPLIT_ARG64 if PPC32 150 151 select ARCH_STACKWALK 151 152 select ARCH_SUPPORTS_ATOMIC_RMW 152 153 select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC_BOOK3S || PPC_8xx || 40x ··· 286 285 # 287 286 288 287 config PPC_LONG_DOUBLE_128 289 - depends on PPC64 288 + depends on PPC64 && ALTIVEC 290 289 def_bool $(success,test "$(shell,echo __LONG_DOUBLE_128__ | $(CC) -E -P -)" = 1) 291 290 292 291 config PPC_BARRIER_NOSPEC
+7
arch/powerpc/include/asm/syscalls.h
··· 104 104 unsigned long len1, unsigned long len2); 105 105 long sys_ppc32_fadvise64(int fd, u32 unused, u32 offset1, u32 offset2, 106 106 size_t len, int advice); 107 + long sys_ppc_sync_file_range2(int fd, unsigned int flags, 108 + unsigned int offset1, 109 + unsigned int offset2, 110 + unsigned int nbytes1, 111 + unsigned int nbytes2); 112 + long sys_ppc_fallocate(int fd, int mode, u32 offset1, u32 offset2, 113 + u32 len1, u32 len2); 107 114 #endif 108 115 #ifdef CONFIG_COMPAT 109 116 long compat_sys_mmap2(unsigned long addr, size_t len,
+12 -1
arch/powerpc/kernel/sys_ppc32.c
··· 112 112 advice); 113 113 } 114 114 115 - COMPAT_SYSCALL_DEFINE6(ppc_sync_file_range2, 115 + PPC32_SYSCALL_DEFINE6(ppc_sync_file_range2, 116 116 int, fd, unsigned int, flags, 117 117 unsigned int, offset1, unsigned int, offset2, 118 118 unsigned int, nbytes1, unsigned int, nbytes2) ··· 122 122 123 123 return ksys_sync_file_range(fd, offset, nbytes, flags); 124 124 } 125 + 126 + #ifdef CONFIG_PPC32 127 + SYSCALL_DEFINE6(ppc_fallocate, 128 + int, fd, int, mode, 129 + u32, offset1, u32, offset2, u32, len1, u32, len2) 130 + { 131 + return ksys_fallocate(fd, mode, 132 + merge_64(offset1, offset2), 133 + merge_64(len1, len2)); 134 + } 135 + #endif
+5 -2
arch/powerpc/kernel/syscalls/syscall.tbl
··· 394 394 305 common signalfd sys_signalfd compat_sys_signalfd 395 395 306 common timerfd_create sys_timerfd_create 396 396 307 common eventfd sys_eventfd 397 - 308 common sync_file_range2 sys_sync_file_range2 compat_sys_ppc_sync_file_range2 398 - 309 nospu fallocate sys_fallocate compat_sys_fallocate 397 + 308 32 sync_file_range2 sys_ppc_sync_file_range2 compat_sys_ppc_sync_file_range2 398 + 308 64 sync_file_range2 sys_sync_file_range2 399 + 308 spu sync_file_range2 sys_sync_file_range2 400 + 309 32 fallocate sys_ppc_fallocate compat_sys_fallocate 401 + 309 64 fallocate sys_fallocate 399 402 310 nospu subpage_prot sys_subpage_prot 400 403 311 32 timerfd_settime sys_timerfd_settime32 401 404 311 64 timerfd_settime sys_timerfd_settime
+17 -9
arch/s390/kvm/kvm-s390.c
··· 1207 1207 return 0; 1208 1208 } 1209 1209 1210 + static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod); 1211 + 1210 1212 static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) 1211 1213 { 1212 1214 struct kvm_s390_vm_tod_clock gtod; ··· 1218 1216 1219 1217 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx) 1220 1218 return -EINVAL; 1221 - kvm_s390_set_tod_clock(kvm, &gtod); 1219 + __kvm_s390_set_tod_clock(kvm, &gtod); 1222 1220 1223 1221 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", 1224 1222 gtod.epoch_idx, gtod.tod); ··· 1249 1247 sizeof(gtod.tod))) 1250 1248 return -EFAULT; 1251 1249 1252 - kvm_s390_set_tod_clock(kvm, &gtod); 1250 + __kvm_s390_set_tod_clock(kvm, &gtod); 1253 1251 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod); 1254 1252 return 0; 1255 1253 } ··· 1260 1258 1261 1259 if (attr->flags) 1262 1260 return -EINVAL; 1261 + 1262 + mutex_lock(&kvm->lock); 1263 + /* 1264 + * For protected guests, the TOD is managed by the ultravisor, so trying 1265 + * to change it will never bring the expected results. 1266 + */ 1267 + if (kvm_s390_pv_is_protected(kvm)) { 1268 + ret = -EOPNOTSUPP; 1269 + goto out_unlock; 1270 + } 1263 1271 1264 1272 switch (attr->attr) { 1265 1273 case KVM_S390_VM_TOD_EXT: ··· 1285 1273 ret = -ENXIO; 1286 1274 break; 1287 1275 } 1276 + 1277 + out_unlock: 1278 + mutex_unlock(&kvm->lock); 1288 1279 return ret; 1289 1280 } 1290 1281 ··· 4390 4375 4391 4376 kvm_s390_vcpu_unblock_all(kvm); 4392 4377 preempt_enable(); 4393 - } 4394 - 4395 - void kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) 4396 - { 4397 - mutex_lock(&kvm->lock); 4398 - __kvm_s390_set_tod_clock(kvm, gtod); 4399 - mutex_unlock(&kvm->lock); 4400 4378 } 4401 4379 4402 4380 int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
-1
arch/s390/kvm/kvm-s390.h
··· 363 363 int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); 364 364 365 365 /* implemented in kvm-s390.c */ 366 - void kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod); 367 366 int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod); 368 367 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); 369 368 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
+1 -1
arch/s390/kvm/pci.c
··· 126 126 return -EPERM; 127 127 128 128 mutex_lock(&aift->aift_lock); 129 - aift->kzdev = kcalloc(ZPCI_NR_DEVICES, sizeof(struct kvm_zdev), 129 + aift->kzdev = kcalloc(ZPCI_NR_DEVICES, sizeof(struct kvm_zdev *), 130 130 GFP_KERNEL); 131 131 if (!aift->kzdev) { 132 132 rc = -ENOMEM;
+19 -8
arch/x86/coco/tdx/tdx.c
··· 34 34 #define VE_GET_PORT_NUM(e) ((e) >> 16) 35 35 #define VE_IS_IO_STRING(e) ((e) & BIT(4)) 36 36 37 + #define ATTR_SEPT_VE_DISABLE BIT(28) 38 + 37 39 /* 38 40 * Wrapper for standard use of __tdx_hypercall with no output aside from 39 41 * return code. ··· 100 98 panic("TDCALL %lld failed (Buggy TDX module!)\n", fn); 101 99 } 102 100 103 - static u64 get_cc_mask(void) 101 + static void tdx_parse_tdinfo(u64 *cc_mask) 104 102 { 105 103 struct tdx_module_output out; 106 104 unsigned int gpa_width; 105 + u64 td_attr; 107 106 108 107 /* 109 108 * TDINFO TDX module call is used to get the TD execution environment ··· 112 109 * information, etc. More details about the ABI can be found in TDX 113 110 * Guest-Host-Communication Interface (GHCI), section 2.4.2 TDCALL 114 111 * [TDG.VP.INFO]. 115 - * 116 - * The GPA width that comes out of this call is critical. TDX guests 117 - * can not meaningfully run without it. 118 112 */ 119 113 tdx_module_call(TDX_GET_INFO, 0, 0, 0, 0, &out); 120 - 121 - gpa_width = out.rcx & GENMASK(5, 0); 122 114 123 115 /* 124 116 * The highest bit of a guest physical address is the "sharing" bit. 125 117 * Set it for shared pages and clear it for private pages. 118 + * 119 + * The GPA width that comes out of this call is critical. TDX guests 120 + * can not meaningfully run without it. 126 121 */ 127 - return BIT_ULL(gpa_width - 1); 122 + gpa_width = out.rcx & GENMASK(5, 0); 123 + *cc_mask = BIT_ULL(gpa_width - 1); 124 + 125 + /* 126 + * The kernel can not handle #VE's when accessing normal kernel 127 + * memory. Ensure that no #VE will be delivered for accesses to 128 + * TD-private memory. Only VMM-shared memory (MMIO) will #VE. 129 + */ 130 + td_attr = out.rdx; 131 + if (!(td_attr & ATTR_SEPT_VE_DISABLE)) 132 + panic("TD misconfiguration: SEPT_VE_DISABLE attibute must be set.\n"); 128 133 } 129 134 130 135 /* ··· 769 758 setup_force_cpu_cap(X86_FEATURE_TDX_GUEST); 770 759 771 760 cc_set_vendor(CC_VENDOR_INTEL); 772 - cc_mask = get_cc_mask(); 761 + tdx_parse_tdinfo(&cc_mask); 773 762 cc_set_mask(cc_mask); 774 763 775 764 /*
+1
arch/x86/events/intel/core.c
··· 4911 4911 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000), 4912 4912 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 6, 0x00000000), 4913 4913 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 7, 0x00000000), 4914 + INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 11, 0x00000000), 4914 4915 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_L, 3, 0x0000007c), 4915 4916 INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE, 3, 0x0000007c), 4916 4917 INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE, 9, 0x0000004e),
+14 -4
arch/x86/events/intel/ds.c
··· 982 982 INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), /* SLOTS */ 983 983 984 984 INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 985 - INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */ 986 - INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x2d0, 0xf), /* MEM_INST_RETIRED.STORE */ 985 + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ 986 + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ 987 + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */ 988 + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */ 989 + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */ 990 + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */ 991 + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */ 987 992 988 993 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */ 989 994 ··· 1009 1004 INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xfe), 1010 1005 INTEL_PLD_CONSTRAINT(0x1cd, 0xfe), 1011 1006 INTEL_PSD_CONSTRAINT(0x2cd, 0x1), 1012 - INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), 1013 - INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x2d0, 0xf), 1007 + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ 1008 + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ 1009 + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */ 1010 + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */ 1011 + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */ 1012 + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */ 1013 + INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */ 1014 1014 1015 1015 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), 1016 1016
+1 -5
arch/x86/events/rapl.c
··· 619 619 case RAPL_UNIT_QUIRK_INTEL_HSW: 620 620 rapl_hw_unit[PERF_RAPL_RAM] = 16; 621 621 break; 622 - /* 623 - * SPR shares the same DRAM domain energy unit as HSW, plus it 624 - * also has a fixed energy unit for Psys domain. 625 - */ 622 + /* SPR uses a fixed energy unit for Psys domain. */ 626 623 case RAPL_UNIT_QUIRK_INTEL_SPR: 627 - rapl_hw_unit[PERF_RAPL_RAM] = 16; 628 624 rapl_hw_unit[PERF_RAPL_PSYS] = 0; 629 625 break; 630 626 default:
+10 -1
arch/x86/include/asm/intel-family.h
··· 107 107 108 108 #define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F /* Golden Cove */ 109 109 110 + #define INTEL_FAM6_EMERALDRAPIDS_X 0xCF 111 + 112 + #define INTEL_FAM6_GRANITERAPIDS_X 0xAD 113 + #define INTEL_FAM6_GRANITERAPIDS_D 0xAE 114 + 110 115 #define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */ 111 116 #define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */ 112 117 #define INTEL_FAM6_ALDERLAKE_N 0xBE ··· 123 118 #define INTEL_FAM6_METEORLAKE 0xAC 124 119 #define INTEL_FAM6_METEORLAKE_L 0xAA 125 120 126 - /* "Small Core" Processors (Atom) */ 121 + /* "Small Core" Processors (Atom/E-Core) */ 127 122 128 123 #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ 129 124 #define INTEL_FAM6_ATOM_BONNELL_MID 0x26 /* Silverthorne, Lincroft */ ··· 149 144 #define INTEL_FAM6_ATOM_TREMONT_D 0x86 /* Jacobsville */ 150 145 #define INTEL_FAM6_ATOM_TREMONT 0x96 /* Elkhart Lake */ 151 146 #define INTEL_FAM6_ATOM_TREMONT_L 0x9C /* Jasper Lake */ 147 + 148 + #define INTEL_FAM6_SIERRAFOREST_X 0xAF 149 + 150 + #define INTEL_FAM6_GRANDRIDGE 0xB6 152 151 153 152 /* Xeon Phi */ 154 153
+1 -1
arch/x86/include/asm/syscall_wrapper.h
··· 6 6 #ifndef _ASM_X86_SYSCALL_WRAPPER_H 7 7 #define _ASM_X86_SYSCALL_WRAPPER_H 8 8 9 - struct pt_regs; 9 + #include <asm/ptrace.h> 10 10 11 11 extern long __x64_sys_ni_syscall(const struct pt_regs *regs); 12 12 extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
+1 -1
arch/x86/xen/pmu.c
··· 302 302 static bool pmu_msr_chk_emulated(unsigned int msr, uint64_t *val, bool is_read, 303 303 bool *emul) 304 304 { 305 - int type, index; 305 + int type, index = 0; 306 306 307 307 if (is_amd_pmu_msr(msr)) 308 308 *emul = xen_amd_pmu_emulate(msr, val, is_read);
+6 -17
arch/x86/xen/setup.c
··· 910 910 911 911 void xen_enable_sysenter(void) 912 912 { 913 - int ret; 914 - unsigned sysenter_feature; 915 - 916 - sysenter_feature = X86_FEATURE_SYSENTER32; 917 - 918 - if (!boot_cpu_has(sysenter_feature)) 919 - return; 920 - 921 - ret = register_callback(CALLBACKTYPE_sysenter, xen_entry_SYSENTER_compat); 922 - if(ret != 0) 923 - setup_clear_cpu_cap(sysenter_feature); 913 + if (cpu_feature_enabled(X86_FEATURE_SYSENTER32) && 914 + register_callback(CALLBACKTYPE_sysenter, xen_entry_SYSENTER_compat)) 915 + setup_clear_cpu_cap(X86_FEATURE_SYSENTER32); 924 916 } 925 917 926 918 void xen_enable_syscall(void) ··· 926 934 mechanism for syscalls. */ 927 935 } 928 936 929 - if (boot_cpu_has(X86_FEATURE_SYSCALL32)) { 930 - ret = register_callback(CALLBACKTYPE_syscall32, 931 - xen_entry_SYSCALL_compat); 932 - if (ret != 0) 933 - setup_clear_cpu_cap(X86_FEATURE_SYSCALL32); 934 - } 937 + if (cpu_feature_enabled(X86_FEATURE_SYSCALL32) && 938 + register_callback(CALLBACKTYPE_syscall32, xen_entry_SYSCALL_compat)) 939 + setup_clear_cpu_cap(X86_FEATURE_SYSCALL32); 935 940 } 936 941 937 942 static void __init xen_pvmmu_arch_setup(void)
+2 -3
block/blk-mq.c
··· 1262 1262 (!blk_queue_nomerges(rq->q) && 1263 1263 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { 1264 1264 blk_mq_flush_plug_list(plug, false); 1265 + last = NULL; 1265 1266 trace_block_plug(rq->q); 1266 1267 } 1267 1268 ··· 4194 4193 return 0; 4195 4194 4196 4195 err_hctxs: 4197 - xa_destroy(&q->hctx_table); 4198 - q->nr_hw_queues = 0; 4199 - blk_mq_sysfs_deinit(q); 4196 + blk_mq_release(q); 4200 4197 err_poll: 4201 4198 blk_stat_free_callback(q->poll_cb); 4202 4199 q->poll_cb = NULL;
+1
block/genhd.c
··· 527 527 bdi_unregister(disk->bdi); 528 528 out_unregister_queue: 529 529 blk_unregister_queue(disk); 530 + rq_qos_exit(disk->queue); 530 531 out_put_slave_dir: 531 532 kobject_put(disk->slave_dir); 532 533 out_put_holder_dir:
+1
drivers/acpi/numa/srat.c
··· 327 327 pr_warn("ACPI NUMA: Failed to add memblk for CFMWS node %d [mem %#llx-%#llx]\n", 328 328 node, start, end); 329 329 } 330 + node_set(node, numa_nodes_parsed); 330 331 331 332 /* Set the next available fake_pxm value */ 332 333 (*fake_pxm)++;
+6
drivers/acpi/x86/utils.c
··· 219 219 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 14 7425 2-in-1"), 220 220 } 221 221 }, 222 + { 223 + .matches = { 224 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 225 + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 16 5625"), 226 + } 227 + }, 222 228 {} 223 229 }; 224 230
+3 -2
drivers/ata/pata_legacy.c
··· 315 315 outb(inb(0x1F4) & 0x07, 0x1F4); 316 316 317 317 rt = inb(0x1F3); 318 - rt &= 0x07 << (3 * adev->devno); 318 + rt &= ~(0x07 << (3 * !adev->devno)); 319 319 if (pio) 320 - rt |= (1 + 3 * pio) << (3 * adev->devno); 320 + rt |= (1 + 3 * pio) << (3 * !adev->devno); 321 + outb(rt, 0x1F3); 321 322 322 323 udelay(100); 323 324 outb(inb(0x1F2) | 0x01, 0x1F2);
+2 -2
drivers/ata/pata_palmld.c
··· 63 63 64 64 /* remap drive's physical memory address */ 65 65 mem = devm_platform_ioremap_resource(pdev, 0); 66 - if (!mem) 67 - return -ENOMEM; 66 + if (IS_ERR(mem)) 67 + return PTR_ERR(mem); 68 68 69 69 /* request and activate power and reset GPIOs */ 70 70 lda->power = devm_gpiod_get(dev, "power", GPIOD_OUT_HIGH);
+6
drivers/block/Kconfig
··· 408 408 definition isn't finalized yet, and might change according to future 409 409 requirement, so mark is as experimental now. 410 410 411 + Say Y if you want to get better performance because task_work_add() 412 + can be used in IO path for replacing io_uring cmd, which will become 413 + shared between IO tasks and ubq daemon, meantime task_work_add() can 414 + can handle batch more effectively, but task_work_add() isn't exported 415 + for module, so ublk has to be built to kernel. 416 + 411 417 source "drivers/block/rnbd/Kconfig" 412 418 413 419 endif # BLK_DEV
+68 -47
drivers/block/ublk_drv.c
··· 57 57 #define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD) 58 58 59 59 struct ublk_rq_data { 60 - struct callback_head work; 60 + union { 61 + struct callback_head work; 62 + struct llist_node node; 63 + }; 61 64 }; 62 65 63 66 struct ublk_uring_cmd_pdu { 64 - struct request *req; 67 + struct ublk_queue *ubq; 65 68 }; 66 69 67 70 /* ··· 121 118 unsigned long flags; 122 119 struct task_struct *ubq_daemon; 123 120 char *io_cmd_buf; 121 + 122 + struct llist_head io_cmds; 124 123 125 124 unsigned long io_addr; /* mapped vm address */ 126 125 unsigned int max_io_sz; ··· 769 764 static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd) 770 765 { 771 766 struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); 767 + struct ublk_queue *ubq = pdu->ubq; 768 + struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds); 769 + struct ublk_rq_data *data; 772 770 773 - __ublk_rq_task_work(pdu->req); 771 + llist_for_each_entry(data, io_cmds, node) 772 + __ublk_rq_task_work(blk_mq_rq_from_pdu(data)); 774 773 } 775 774 776 775 static void ublk_rq_task_work_fn(struct callback_head *work) ··· 784 775 struct request *req = blk_mq_rq_from_pdu(data); 785 776 786 777 __ublk_rq_task_work(req); 778 + } 779 + 780 + static void ublk_submit_cmd(struct ublk_queue *ubq, const struct request *rq) 781 + { 782 + struct ublk_io *io = &ubq->ios[rq->tag]; 783 + 784 + /* 785 + * If the check pass, we know that this is a re-issued request aborted 786 + * previously in monitor_work because the ubq_daemon(cmd's task) is 787 + * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore 788 + * because this ioucmd's io_uring context may be freed now if no inflight 789 + * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work. 790 + * 791 + * Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing 792 + * the tag). Then the request is re-started(allocating the tag) and we are here. 793 + * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED 794 + * guarantees that here is a re-issued request aborted previously. 795 + */ 796 + if (unlikely(io->flags & UBLK_IO_FLAG_ABORTED)) { 797 + struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds); 798 + struct ublk_rq_data *data; 799 + 800 + llist_for_each_entry(data, io_cmds, node) 801 + __ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data)); 802 + } else { 803 + struct io_uring_cmd *cmd = io->cmd; 804 + struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); 805 + 806 + pdu->ubq = ubq; 807 + io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb); 808 + } 809 + } 810 + 811 + static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq, 812 + bool last) 813 + { 814 + struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq); 815 + 816 + if (ublk_can_use_task_work(ubq)) { 817 + enum task_work_notify_mode notify_mode = last ? 818 + TWA_SIGNAL_NO_IPI : TWA_NONE; 819 + 820 + if (task_work_add(ubq->ubq_daemon, &data->work, notify_mode)) 821 + __ublk_abort_rq(ubq, rq); 822 + } else { 823 + if (llist_add(&data->node, &ubq->io_cmds)) 824 + ublk_submit_cmd(ubq, rq); 825 + } 787 826 } 788 827 789 828 static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx, ··· 845 788 res = ublk_setup_iod(ubq, rq); 846 789 if (unlikely(res != BLK_STS_OK)) 847 790 return BLK_STS_IOERR; 791 + 848 792 /* With recovery feature enabled, force_abort is set in 849 793 * ublk_stop_dev() before calling del_gendisk(). We have to 850 794 * abort all requeued and new rqs here to let del_gendisk() ··· 861 803 blk_mq_start_request(bd->rq); 862 804 863 805 if (unlikely(ubq_daemon_is_dying(ubq))) { 864 - fail: 865 806 __ublk_abort_rq(ubq, rq); 866 807 return BLK_STS_OK; 867 808 } 868 809 869 - if (ublk_can_use_task_work(ubq)) { 870 - struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq); 871 - enum task_work_notify_mode notify_mode = bd->last ? 872 - TWA_SIGNAL_NO_IPI : TWA_NONE; 873 - 874 - if (task_work_add(ubq->ubq_daemon, &data->work, notify_mode)) 875 - goto fail; 876 - } else { 877 - struct ublk_io *io = &ubq->ios[rq->tag]; 878 - struct io_uring_cmd *cmd = io->cmd; 879 - struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); 880 - 881 - /* 882 - * If the check pass, we know that this is a re-issued request aborted 883 - * previously in monitor_work because the ubq_daemon(cmd's task) is 884 - * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore 885 - * because this ioucmd's io_uring context may be freed now if no inflight 886 - * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work. 887 - * 888 - * Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing 889 - * the tag). Then the request is re-started(allocating the tag) and we are here. 890 - * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED 891 - * guarantees that here is a re-issued request aborted previously. 892 - */ 893 - if ((io->flags & UBLK_IO_FLAG_ABORTED)) 894 - goto fail; 895 - 896 - pdu->req = rq; 897 - io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb); 898 - } 810 + ublk_queue_cmd(ubq, rq, bd->last); 899 811 900 812 return BLK_STS_OK; 901 813 } ··· 1192 1164 } 1193 1165 1194 1166 static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id, 1195 - int tag, struct io_uring_cmd *cmd) 1167 + int tag) 1196 1168 { 1197 1169 struct ublk_queue *ubq = ublk_get_queue(ub, q_id); 1198 1170 struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag); 1199 1171 1200 - if (ublk_can_use_task_work(ubq)) { 1201 - struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); 1202 - 1203 - /* should not fail since we call it just in ubq->ubq_daemon */ 1204 - task_work_add(ubq->ubq_daemon, &data->work, TWA_SIGNAL_NO_IPI); 1205 - } else { 1206 - struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); 1207 - 1208 - pdu->req = req; 1209 - io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb); 1210 - } 1172 + ublk_queue_cmd(ubq, req, true); 1211 1173 } 1212 1174 1213 1175 static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) ··· 1285 1267 io->addr = ub_cmd->addr; 1286 1268 io->cmd = cmd; 1287 1269 io->flags |= UBLK_IO_FLAG_ACTIVE; 1288 - ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag, cmd); 1270 + ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag); 1289 1271 break; 1290 1272 default: 1291 1273 goto out; ··· 1675 1657 * (features) to handle. 1676 1658 */ 1677 1659 ub->dev_info.flags &= UBLK_F_ALL; 1660 + 1661 + if (!IS_BUILTIN(CONFIG_BLK_DEV_UBLK)) 1662 + ub->dev_info.flags |= UBLK_F_URING_CMD_COMP_IN_TASK; 1678 1663 1679 1664 /* We are not ready to support zero copy */ 1680 1665 ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY;
+1 -1
drivers/bluetooth/virtio_bt.c
··· 219 219 if (!skb) 220 220 return; 221 221 222 - skb->len = len; 222 + skb_put(skb, len); 223 223 virtbt_rx_handle(vbt, skb); 224 224 225 225 if (virtbt_add_inbuf(vbt) < 0)
+62 -3
drivers/clk/clk-renesas-pcie.c
··· 90 90 .n_yes_ranges = ARRAY_SIZE(rs9_writeable_ranges), 91 91 }; 92 92 93 + static int rs9_regmap_i2c_write(void *context, 94 + unsigned int reg, unsigned int val) 95 + { 96 + struct i2c_client *i2c = context; 97 + const u8 data[3] = { reg, 1, val }; 98 + const int count = ARRAY_SIZE(data); 99 + int ret; 100 + 101 + ret = i2c_master_send(i2c, data, count); 102 + if (ret == count) 103 + return 0; 104 + else if (ret < 0) 105 + return ret; 106 + else 107 + return -EIO; 108 + } 109 + 110 + static int rs9_regmap_i2c_read(void *context, 111 + unsigned int reg, unsigned int *val) 112 + { 113 + struct i2c_client *i2c = context; 114 + struct i2c_msg xfer[2]; 115 + u8 txdata = reg; 116 + u8 rxdata[2]; 117 + int ret; 118 + 119 + xfer[0].addr = i2c->addr; 120 + xfer[0].flags = 0; 121 + xfer[0].len = 1; 122 + xfer[0].buf = (void *)&txdata; 123 + 124 + xfer[1].addr = i2c->addr; 125 + xfer[1].flags = I2C_M_RD; 126 + xfer[1].len = 2; 127 + xfer[1].buf = (void *)rxdata; 128 + 129 + ret = i2c_transfer(i2c->adapter, xfer, 2); 130 + if (ret < 0) 131 + return ret; 132 + if (ret != 2) 133 + return -EIO; 134 + 135 + /* 136 + * Byte 0 is transfer length, which is always 1 due 137 + * to BCP register programming to 1 in rs9_probe(), 138 + * ignore it and use data from Byte 1. 139 + */ 140 + *val = rxdata[1]; 141 + return 0; 142 + } 143 + 93 144 static const struct regmap_config rs9_regmap_config = { 94 145 .reg_bits = 8, 95 146 .val_bits = 8, 96 - .cache_type = REGCACHE_FLAT, 97 - .max_register = 0x8, 147 + .cache_type = REGCACHE_NONE, 148 + .max_register = RS9_REG_BCP, 98 149 .rd_table = &rs9_readable_table, 99 150 .wr_table = &rs9_writeable_table, 151 + .reg_write = rs9_regmap_i2c_write, 152 + .reg_read = rs9_regmap_i2c_read, 100 153 }; 101 154 102 155 static int rs9_get_output_config(struct rs9_driver_data *rs9, int idx) ··· 295 242 return ret; 296 243 } 297 244 298 - rs9->regmap = devm_regmap_init_i2c(client, &rs9_regmap_config); 245 + rs9->regmap = devm_regmap_init(&client->dev, NULL, 246 + client, &rs9_regmap_config); 299 247 if (IS_ERR(rs9->regmap)) 300 248 return dev_err_probe(&client->dev, PTR_ERR(rs9->regmap), 301 249 "Failed to allocate register map\n"); 250 + 251 + /* Always read back 1 Byte via I2C */ 252 + ret = regmap_write(rs9->regmap, RS9_REG_BCP, 1); 253 + if (ret < 0) 254 + return ret; 302 255 303 256 /* Register clock */ 304 257 for (i = 0; i < rs9->chip_info->num_clks; i++) {
+5 -1
drivers/clk/clk.c
··· 1459 1459 { 1460 1460 struct clk_core *parent; 1461 1461 1462 - if (WARN_ON(!core || !req)) 1462 + if (WARN_ON(!req)) 1463 1463 return; 1464 1464 1465 1465 memset(req, 0, sizeof(*req)); 1466 + req->max_rate = ULONG_MAX; 1467 + 1468 + if (!core) 1469 + return; 1466 1470 1467 1471 req->rate = rate; 1468 1472 clk_core_get_boundaries(core, &req->min_rate, &req->max_rate);
+3 -1
drivers/clk/mediatek/clk-mt8195-topckgen.c
··· 1270 1270 hw = devm_clk_hw_register_mux(&pdev->dev, "mfg_ck_fast_ref", mfg_fast_parents, 1271 1271 ARRAY_SIZE(mfg_fast_parents), CLK_SET_RATE_PARENT, 1272 1272 (base + 0x250), 8, 1, 0, &mt8195_clk_lock); 1273 - if (IS_ERR(hw)) 1273 + if (IS_ERR(hw)) { 1274 + r = PTR_ERR(hw); 1274 1275 goto unregister_muxes; 1276 + } 1275 1277 top_clk_data->hws[CLK_TOP_MFG_CK_FAST_REF] = hw; 1276 1278 1277 1279 r = clk_mt8195_reg_mfg_mux_notifier(&pdev->dev,
+1
drivers/clk/qcom/gcc-sc7280.c
··· 3467 3467 regmap_update_bits(regmap, 0x28004, BIT(0), BIT(0)); 3468 3468 regmap_update_bits(regmap, 0x28014, BIT(0), BIT(0)); 3469 3469 regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0)); 3470 + regmap_update_bits(regmap, 0x7100C, BIT(13), BIT(13)); 3470 3471 3471 3472 ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks, 3472 3473 ARRAY_SIZE(gcc_dfs_clocks));
+1
drivers/clk/qcom/gpucc-sc7280.c
··· 463 463 */ 464 464 regmap_update_bits(regmap, 0x1170, BIT(0), BIT(0)); 465 465 regmap_update_bits(regmap, 0x1098, BIT(0), BIT(0)); 466 + regmap_update_bits(regmap, 0x1098, BIT(13), BIT(13)); 466 467 467 468 return qcom_cc_really_probe(pdev, &gpu_cc_sc7280_desc, regmap); 468 469 }
+9 -4
drivers/clk/renesas/r8a779g0-cpg-mssr.c
··· 47 47 CLK_S0_VIO, 48 48 CLK_S0_VC, 49 49 CLK_S0_HSC, 50 + CLK_SASYNCPER, 50 51 CLK_SV_VIP, 51 52 CLK_SV_IR, 52 53 CLK_SDSRC, ··· 85 84 DEF_FIXED(".s0_vio", CLK_S0_VIO, CLK_PLL1_DIV2, 2, 1), 86 85 DEF_FIXED(".s0_vc", CLK_S0_VC, CLK_PLL1_DIV2, 2, 1), 87 86 DEF_FIXED(".s0_hsc", CLK_S0_HSC, CLK_PLL1_DIV2, 2, 1), 87 + DEF_FIXED(".sasyncper", CLK_SASYNCPER, CLK_PLL5_DIV4, 3, 1), 88 88 DEF_FIXED(".sv_vip", CLK_SV_VIP, CLK_PLL1, 5, 1), 89 89 DEF_FIXED(".sv_ir", CLK_SV_IR, CLK_PLL1, 5, 1), 90 90 DEF_BASE(".sdsrc", CLK_SDSRC, CLK_TYPE_GEN4_SDSRC, CLK_PLL5), ··· 130 128 DEF_FIXED("s0d4_hsc", R8A779G0_CLK_S0D4_HSC, CLK_S0_HSC, 4, 1), 131 129 DEF_FIXED("cl16m_hsc", R8A779G0_CLK_CL16M_HSC, CLK_S0_HSC, 48, 1), 132 130 DEF_FIXED("s0d2_cc", R8A779G0_CLK_S0D2_CC, CLK_S0, 2, 1), 131 + DEF_FIXED("sasyncperd1",R8A779G0_CLK_SASYNCPERD1, CLK_SASYNCPER,1, 1), 132 + DEF_FIXED("sasyncperd2",R8A779G0_CLK_SASYNCPERD2, CLK_SASYNCPER,2, 1), 133 + DEF_FIXED("sasyncperd4",R8A779G0_CLK_SASYNCPERD4, CLK_SASYNCPER,4, 1), 133 134 DEF_FIXED("svd1_ir", R8A779G0_CLK_SVD1_IR, CLK_SV_IR, 1, 1), 134 135 DEF_FIXED("svd2_ir", R8A779G0_CLK_SVD2_IR, CLK_SV_IR, 2, 1), 135 136 DEF_FIXED("svd1_vip", R8A779G0_CLK_SVD1_VIP, CLK_SV_VIP, 1, 1), ··· 158 153 DEF_MOD("avb0", 211, R8A779G0_CLK_S0D4_HSC), 159 154 DEF_MOD("avb1", 212, R8A779G0_CLK_S0D4_HSC), 160 155 DEF_MOD("avb2", 213, R8A779G0_CLK_S0D4_HSC), 161 - DEF_MOD("hscif0", 514, R8A779G0_CLK_S0D3_PER), 162 - DEF_MOD("hscif1", 515, R8A779G0_CLK_S0D3_PER), 163 - DEF_MOD("hscif2", 516, R8A779G0_CLK_S0D3_PER), 164 - DEF_MOD("hscif3", 517, R8A779G0_CLK_S0D3_PER), 156 + DEF_MOD("hscif0", 514, R8A779G0_CLK_SASYNCPERD1), 157 + DEF_MOD("hscif1", 515, R8A779G0_CLK_SASYNCPERD1), 158 + DEF_MOD("hscif2", 516, R8A779G0_CLK_SASYNCPERD1), 159 + DEF_MOD("hscif3", 517, R8A779G0_CLK_SASYNCPERD1), 165 160 DEF_MOD("i2c0", 518, R8A779G0_CLK_S0D6_PER), 166 161 DEF_MOD("i2c1", 519, R8A779G0_CLK_S0D6_PER), 167 162 DEF_MOD("i2c2", 520, R8A779G0_CLK_S0D6_PER),
+3 -1
drivers/clk/sifive/Kconfig
··· 2 2 3 3 menuconfig CLK_SIFIVE 4 4 bool "SiFive SoC driver support" 5 - depends on RISCV || COMPILE_TEST 5 + depends on SOC_SIFIVE || COMPILE_TEST 6 + default SOC_SIFIVE 6 7 help 7 8 SoC drivers for SiFive Linux-capable SoCs. 8 9 ··· 11 10 12 11 config CLK_SIFIVE_PRCI 13 12 bool "PRCI driver for SiFive SoCs" 13 + default SOC_SIFIVE 14 14 select RESET_CONTROLLER 15 15 select RESET_SIMPLE 16 16 select CLK_ANALOGBITS_WRPLL_CLN28HPC
+1 -1
drivers/cxl/core/mbox.c
··· 174 174 }; 175 175 int rc; 176 176 177 - if (out_size > cxlds->payload_size) 177 + if (in_size > cxlds->payload_size || out_size > cxlds->payload_size) 178 178 return -E2BIG; 179 179 180 180 rc = cxlds->mbox_send(cxlds, &mbox_cmd);
+2
drivers/cxl/core/pmem.c
··· 188 188 { 189 189 struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev); 190 190 191 + xa_destroy(&cxl_nvd->pmem_regions); 191 192 kfree(cxl_nvd); 192 193 } 193 194 ··· 231 230 232 231 dev = &cxl_nvd->dev; 233 232 cxl_nvd->cxlmd = cxlmd; 233 + xa_init(&cxl_nvd->pmem_regions); 234 234 device_initialize(dev); 235 235 lockdep_set_class(&dev->mutex, &cxl_nvdimm_key); 236 236 device_set_pm_not_required(dev);
+9 -2
drivers/cxl/core/port.c
··· 811 811 static int add_dport(struct cxl_port *port, struct cxl_dport *new) 812 812 { 813 813 struct cxl_dport *dup; 814 + int rc; 814 815 815 816 device_lock_assert(&port->dev); 816 817 dup = find_dport(port, new->port_id); ··· 822 821 dev_name(dup->dport)); 823 822 return -EBUSY; 824 823 } 825 - return xa_insert(&port->dports, (unsigned long)new->dport, new, 826 - GFP_KERNEL); 824 + 825 + rc = xa_insert(&port->dports, (unsigned long)new->dport, new, 826 + GFP_KERNEL); 827 + if (rc) 828 + return rc; 829 + 830 + port->nr_dports++; 831 + return 0; 827 832 } 828 833 829 834 /*
+85 -28
drivers/cxl/core/region.c
··· 174 174 iter = to_cxl_port(iter->dev.parent)) { 175 175 cxl_rr = cxl_rr_load(iter, cxlr); 176 176 cxld = cxl_rr->decoder; 177 - rc = cxld->commit(cxld); 177 + if (cxld->commit) 178 + rc = cxld->commit(cxld); 178 179 if (rc) 179 180 break; 180 181 } ··· 658 657 xa_for_each(&port->regions, index, iter) { 659 658 struct cxl_region_params *ip = &iter->region->params; 660 659 660 + if (!ip->res) 661 + continue; 662 + 661 663 if (ip->res->start > p->res->start) { 662 664 dev_dbg(&cxlr->dev, 663 665 "%s: HPA order violation %s:%pr vs %pr\n", ··· 690 686 return cxl_rr; 691 687 } 692 688 693 - static void free_region_ref(struct cxl_region_ref *cxl_rr) 689 + static void cxl_rr_free_decoder(struct cxl_region_ref *cxl_rr) 694 690 { 695 - struct cxl_port *port = cxl_rr->port; 696 691 struct cxl_region *cxlr = cxl_rr->region; 697 692 struct cxl_decoder *cxld = cxl_rr->decoder; 693 + 694 + if (!cxld) 695 + return; 698 696 699 697 dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n"); 700 698 if (cxld->region == cxlr) { 701 699 cxld->region = NULL; 702 700 put_device(&cxlr->dev); 703 701 } 702 + } 704 703 704 + static void free_region_ref(struct cxl_region_ref *cxl_rr) 705 + { 706 + struct cxl_port *port = cxl_rr->port; 707 + struct cxl_region *cxlr = cxl_rr->region; 708 + 709 + cxl_rr_free_decoder(cxl_rr); 705 710 xa_erase(&port->regions, (unsigned long)cxlr); 706 711 xa_destroy(&cxl_rr->endpoints); 707 712 kfree(cxl_rr); ··· 738 725 get_device(&cxlr->dev); 739 726 } 740 727 728 + return 0; 729 + } 730 + 731 + static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr, 732 + struct cxl_endpoint_decoder *cxled, 733 + struct cxl_region_ref *cxl_rr) 734 + { 735 + struct cxl_decoder *cxld; 736 + 737 + if (port == cxled_to_port(cxled)) 738 + cxld = &cxled->cxld; 739 + else 740 + cxld = cxl_region_find_decoder(port, cxlr); 741 + if (!cxld) { 742 + dev_dbg(&cxlr->dev, "%s: no decoder available\n", 743 + dev_name(&port->dev)); 744 + return -EBUSY; 745 + } 746 + 747 + if (cxld->region) { 748 + dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n", 749 + dev_name(&port->dev), dev_name(&cxld->dev), 750 + dev_name(&cxld->region->dev)); 751 + return -EBUSY; 752 + } 753 + 754 + cxl_rr->decoder = cxld; 741 755 return 0; 742 756 } 743 757 ··· 834 794 cxl_rr->nr_targets++; 835 795 nr_targets_inc = true; 836 796 } 837 - 838 - /* 839 - * The decoder for @cxlr was allocated when the region was first 840 - * attached to @port. 841 - */ 842 - cxld = cxl_rr->decoder; 843 797 } else { 844 798 cxl_rr = alloc_region_ref(port, cxlr); 845 799 if (IS_ERR(cxl_rr)) { ··· 844 810 } 845 811 nr_targets_inc = true; 846 812 847 - if (port == cxled_to_port(cxled)) 848 - cxld = &cxled->cxld; 849 - else 850 - cxld = cxl_region_find_decoder(port, cxlr); 851 - if (!cxld) { 852 - dev_dbg(&cxlr->dev, "%s: no decoder available\n", 853 - dev_name(&port->dev)); 813 + rc = cxl_rr_alloc_decoder(port, cxlr, cxled, cxl_rr); 814 + if (rc) 854 815 goto out_erase; 855 - } 856 - 857 - if (cxld->region) { 858 - dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n", 859 - dev_name(&port->dev), dev_name(&cxld->dev), 860 - dev_name(&cxld->region->dev)); 861 - rc = -EBUSY; 862 - goto out_erase; 863 - } 864 - 865 - cxl_rr->decoder = cxld; 866 816 } 817 + cxld = cxl_rr->decoder; 867 818 868 819 rc = cxl_rr_ep_add(cxl_rr, cxled); 869 820 if (rc) { ··· 990 971 if (cxl_rr->nr_targets_set) { 991 972 int i, distance; 992 973 993 - distance = p->nr_targets / cxl_rr->nr_targets; 974 + /* 975 + * Passthrough ports impose no distance requirements between 976 + * peers 977 + */ 978 + if (port->nr_dports == 1) 979 + distance = 0; 980 + else 981 + distance = p->nr_targets / cxl_rr->nr_targets; 994 982 for (i = 0; i < cxl_rr->nr_targets_set; i++) 995 983 if (ep->dport == cxlsd->target[i]) { 996 984 rc = check_last_peer(cxled, ep, cxl_rr, ··· 1534 1508 1535 1509 static void cxl_region_release(struct device *dev) 1536 1510 { 1511 + struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); 1537 1512 struct cxl_region *cxlr = to_cxl_region(dev); 1513 + int id = atomic_read(&cxlrd->region_id); 1514 + 1515 + /* 1516 + * Try to reuse the recently idled id rather than the cached 1517 + * next id to prevent the region id space from increasing 1518 + * unnecessarily. 1519 + */ 1520 + if (cxlr->id < id) 1521 + if (atomic_try_cmpxchg(&cxlrd->region_id, &id, cxlr->id)) { 1522 + memregion_free(id); 1523 + goto out; 1524 + } 1538 1525 1539 1526 memregion_free(cxlr->id); 1527 + out: 1528 + put_device(dev->parent); 1540 1529 kfree(cxlr); 1541 1530 } 1542 1531 ··· 1579 1538 static void unregister_region(void *dev) 1580 1539 { 1581 1540 struct cxl_region *cxlr = to_cxl_region(dev); 1541 + struct cxl_region_params *p = &cxlr->params; 1542 + int i; 1582 1543 1583 1544 device_del(dev); 1545 + 1546 + /* 1547 + * Now that region sysfs is shutdown, the parameter block is now 1548 + * read-only, so no need to hold the region rwsem to access the 1549 + * region parameters. 1550 + */ 1551 + for (i = 0; i < p->interleave_ways; i++) 1552 + detach_target(cxlr, i); 1553 + 1584 1554 cxl_region_iomem_release(cxlr); 1585 1555 put_device(dev); 1586 1556 } ··· 1613 1561 device_initialize(dev); 1614 1562 lockdep_set_class(&dev->mutex, &cxl_region_key); 1615 1563 dev->parent = &cxlrd->cxlsd.cxld.dev; 1564 + /* 1565 + * Keep root decoder pinned through cxl_region_release to fixup 1566 + * region id allocations 1567 + */ 1568 + get_device(dev->parent); 1616 1569 device_set_pm_not_required(dev); 1617 1570 dev->bus = &cxl_bus_type; 1618 1571 dev->type = &cxl_region_type;
+3 -1
drivers/cxl/cxl.h
··· 423 423 struct device dev; 424 424 struct cxl_memdev *cxlmd; 425 425 struct cxl_nvdimm_bridge *bridge; 426 - struct cxl_pmem_region *region; 426 + struct xarray pmem_regions; 427 427 }; 428 428 429 429 struct cxl_pmem_region_mapping { ··· 457 457 * @regions: cxl_region_ref instances, regions mapped by this port 458 458 * @parent_dport: dport that points to this port in the parent 459 459 * @decoder_ida: allocator for decoder ids 460 + * @nr_dports: number of entries in @dports 460 461 * @hdm_end: track last allocated HDM decoder instance for allocation ordering 461 462 * @commit_end: cursor to track highest committed decoder for commit ordering 462 463 * @component_reg_phys: component register capability base address (optional) ··· 476 475 struct xarray regions; 477 476 struct cxl_dport *parent_dport; 478 477 struct ida decoder_ida; 478 + int nr_dports; 479 479 int hdm_end; 480 480 int commit_end; 481 481 resource_size_t component_reg_phys;
+68 -39
drivers/cxl/pmem.c
··· 30 30 struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); 31 31 struct cxl_nvdimm_bridge *cxl_nvb = cxl_nvd->bridge; 32 32 struct cxl_pmem_region *cxlr_pmem; 33 + unsigned long index; 33 34 34 35 device_lock(&cxl_nvb->dev); 35 - cxlr_pmem = cxl_nvd->region; 36 36 dev_set_drvdata(&cxl_nvd->dev, NULL); 37 - cxl_nvd->region = NULL; 38 - device_unlock(&cxl_nvb->dev); 37 + xa_for_each(&cxl_nvd->pmem_regions, index, cxlr_pmem) { 38 + get_device(&cxlr_pmem->dev); 39 + device_unlock(&cxl_nvb->dev); 39 40 40 - if (cxlr_pmem) { 41 41 device_release_driver(&cxlr_pmem->dev); 42 42 put_device(&cxlr_pmem->dev); 43 + 44 + device_lock(&cxl_nvb->dev); 43 45 } 46 + device_unlock(&cxl_nvb->dev); 44 47 45 48 nvdimm_delete(nvdimm); 46 49 cxl_nvd->bridge = NULL; ··· 110 107 111 108 *cmd = (struct nd_cmd_get_config_size) { 112 109 .config_size = cxlds->lsa_size, 113 - .max_xfer = cxlds->payload_size, 110 + .max_xfer = cxlds->payload_size - sizeof(struct cxl_mbox_set_lsa), 114 111 }; 115 112 116 113 return 0; ··· 151 148 return -EINVAL; 152 149 153 150 /* 4-byte status follows the input data in the payload */ 154 - if (struct_size(cmd, in_buf, cmd->in_length) + 4 > buf_len) 151 + if (size_add(struct_size(cmd, in_buf, cmd->in_length), 4) > buf_len) 155 152 return -EINVAL; 156 153 157 154 set_lsa = ··· 369 366 370 367 static void unregister_nvdimm_region(void *nd_region) 371 368 { 372 - struct cxl_nvdimm_bridge *cxl_nvb; 373 - struct cxl_pmem_region *cxlr_pmem; 374 - int i; 369 + nvdimm_region_delete(nd_region); 370 + } 375 371 376 - cxlr_pmem = nd_region_provider_data(nd_region); 377 - cxl_nvb = cxlr_pmem->bridge; 372 + static int cxl_nvdimm_add_region(struct cxl_nvdimm *cxl_nvd, 373 + struct cxl_pmem_region *cxlr_pmem) 374 + { 375 + int rc; 376 + 377 + rc = xa_insert(&cxl_nvd->pmem_regions, (unsigned long)cxlr_pmem, 378 + cxlr_pmem, GFP_KERNEL); 379 + if (rc) 380 + return rc; 381 + 382 + get_device(&cxlr_pmem->dev); 383 + return 0; 384 + } 385 + 386 + static void cxl_nvdimm_del_region(struct cxl_nvdimm *cxl_nvd, 387 + struct cxl_pmem_region *cxlr_pmem) 388 + { 389 + /* 390 + * It is possible this is called without a corresponding 391 + * cxl_nvdimm_add_region for @cxlr_pmem 392 + */ 393 + cxlr_pmem = xa_erase(&cxl_nvd->pmem_regions, (unsigned long)cxlr_pmem); 394 + if (cxlr_pmem) 395 + put_device(&cxlr_pmem->dev); 396 + } 397 + 398 + static void release_mappings(void *data) 399 + { 400 + int i; 401 + struct cxl_pmem_region *cxlr_pmem = data; 402 + struct cxl_nvdimm_bridge *cxl_nvb = cxlr_pmem->bridge; 403 + 378 404 device_lock(&cxl_nvb->dev); 379 405 for (i = 0; i < cxlr_pmem->nr_mappings; i++) { 380 406 struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i]; 381 407 struct cxl_nvdimm *cxl_nvd = m->cxl_nvd; 382 408 383 - if (cxl_nvd->region) { 384 - put_device(&cxlr_pmem->dev); 385 - cxl_nvd->region = NULL; 386 - } 409 + cxl_nvdimm_del_region(cxl_nvd, cxlr_pmem); 387 410 } 388 411 device_unlock(&cxl_nvb->dev); 389 - 390 - nvdimm_region_delete(nd_region); 391 412 } 392 413 393 414 static void cxlr_pmem_remove_resource(void *res) ··· 449 422 if (!cxl_nvb->nvdimm_bus) { 450 423 dev_dbg(dev, "nvdimm bus not found\n"); 451 424 rc = -ENXIO; 452 - goto err; 425 + goto out_nvb; 453 426 } 454 427 455 428 memset(&mappings, 0, sizeof(mappings)); ··· 458 431 res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL); 459 432 if (!res) { 460 433 rc = -ENOMEM; 461 - goto err; 434 + goto out_nvb; 462 435 } 463 436 464 437 res->name = "Persistent Memory"; ··· 469 442 470 443 rc = insert_resource(&iomem_resource, res); 471 444 if (rc) 472 - goto err; 445 + goto out_nvb; 473 446 474 447 rc = devm_add_action_or_reset(dev, cxlr_pmem_remove_resource, res); 475 448 if (rc) 476 - goto err; 449 + goto out_nvb; 477 450 478 451 ndr_desc.res = res; 479 452 ndr_desc.provider_data = cxlr_pmem; ··· 489 462 nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); 490 463 if (!nd_set) { 491 464 rc = -ENOMEM; 492 - goto err; 465 + goto out_nvb; 493 466 } 494 467 495 468 ndr_desc.memregion = cxlr->id; ··· 499 472 info = kmalloc_array(cxlr_pmem->nr_mappings, sizeof(*info), GFP_KERNEL); 500 473 if (!info) { 501 474 rc = -ENOMEM; 502 - goto err; 475 + goto out_nvb; 503 476 } 477 + 478 + rc = devm_add_action_or_reset(dev, release_mappings, cxlr_pmem); 479 + if (rc) 480 + goto out_nvd; 504 481 505 482 for (i = 0; i < cxlr_pmem->nr_mappings; i++) { 506 483 struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i]; ··· 517 486 dev_dbg(dev, "[%d]: %s: no cxl_nvdimm found\n", i, 518 487 dev_name(&cxlmd->dev)); 519 488 rc = -ENODEV; 520 - goto err; 489 + goto out_nvd; 521 490 } 522 491 523 492 /* safe to drop ref now with bridge lock held */ ··· 529 498 dev_dbg(dev, "[%d]: %s: no nvdimm found\n", i, 530 499 dev_name(&cxlmd->dev)); 531 500 rc = -ENODEV; 532 - goto err; 501 + goto out_nvd; 533 502 } 534 - cxl_nvd->region = cxlr_pmem; 535 - get_device(&cxlr_pmem->dev); 503 + 504 + /* 505 + * Pin the region per nvdimm device as those may be released 506 + * out-of-order with respect to the region, and a single nvdimm 507 + * maybe associated with multiple regions 508 + */ 509 + rc = cxl_nvdimm_add_region(cxl_nvd, cxlr_pmem); 510 + if (rc) 511 + goto out_nvd; 536 512 m->cxl_nvd = cxl_nvd; 537 513 mappings[i] = (struct nd_mapping_desc) { 538 514 .nvdimm = nvdimm, ··· 565 527 nvdimm_pmem_region_create(cxl_nvb->nvdimm_bus, &ndr_desc); 566 528 if (!cxlr_pmem->nd_region) { 567 529 rc = -ENOMEM; 568 - goto err; 530 + goto out_nvd; 569 531 } 570 532 571 533 rc = devm_add_action_or_reset(dev, unregister_nvdimm_region, 572 534 cxlr_pmem->nd_region); 573 - out: 535 + out_nvd: 574 536 kfree(info); 537 + out_nvb: 575 538 device_unlock(&cxl_nvb->dev); 576 539 put_device(&cxl_nvb->dev); 577 540 578 541 return rc; 579 - 580 - err: 581 - dev_dbg(dev, "failed to create nvdimm region\n"); 582 - for (i--; i >= 0; i--) { 583 - nvdimm = mappings[i].nvdimm; 584 - cxl_nvd = nvdimm_provider_data(nvdimm); 585 - put_device(&cxl_nvd->region->dev); 586 - cxl_nvd->region = NULL; 587 - } 588 - goto out; 589 542 } 590 543 591 544 static struct cxl_driver cxl_pmem_region_driver = {
+11
drivers/firmware/arm_scmi/bus.c
··· 216 216 device_unregister(&scmi_dev->dev); 217 217 } 218 218 219 + void scmi_device_link_add(struct device *consumer, struct device *supplier) 220 + { 221 + struct device_link *link; 222 + 223 + link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER); 224 + 225 + WARN_ON(!link); 226 + } 227 + 219 228 void scmi_set_handle(struct scmi_device *scmi_dev) 220 229 { 221 230 scmi_dev->handle = scmi_handle_get(&scmi_dev->dev); 231 + if (scmi_dev->handle) 232 + scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev); 222 233 } 223 234 224 235 int scmi_protocol_register(const struct scmi_protocol *proto)
+4 -1
drivers/firmware/arm_scmi/common.h
··· 97 97 struct scmi_revision_info * 98 98 scmi_revision_area_get(const struct scmi_protocol_handle *ph); 99 99 int scmi_handle_put(const struct scmi_handle *handle); 100 + void scmi_device_link_add(struct device *consumer, struct device *supplier); 100 101 struct scmi_handle *scmi_handle_get(struct device *dev); 101 102 void scmi_set_handle(struct scmi_device *scmi_dev); 102 103 void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph, ··· 118 117 * 119 118 * @dev: Reference to device in the SCMI hierarchy corresponding to this 120 119 * channel 120 + * @rx_timeout_ms: The configured RX timeout in milliseconds. 121 121 * @handle: Pointer to SCMI entity handle 122 122 * @no_completion_irq: Flag to indicate that this channel has no completion 123 123 * interrupt mechanism for synchronous commands. ··· 128 126 */ 129 127 struct scmi_chan_info { 130 128 struct device *dev; 129 + unsigned int rx_timeout_ms; 131 130 struct scmi_handle *handle; 132 131 bool no_completion_irq; 133 132 void *transport_info; ··· 235 232 struct scmi_shared_mem; 236 233 237 234 void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, 238 - struct scmi_xfer *xfer); 235 + struct scmi_xfer *xfer, struct scmi_chan_info *cinfo); 239 236 u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem); 240 237 void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, 241 238 struct scmi_xfer *xfer);
+27 -14
drivers/firmware/arm_scmi/driver.c
··· 2013 2013 return -ENOMEM; 2014 2014 2015 2015 cinfo->dev = dev; 2016 + cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms; 2016 2017 2017 2018 ret = info->desc->ops->chan_setup(cinfo, info->dev, tx); 2018 2019 if (ret) ··· 2045 2044 { 2046 2045 int ret = scmi_chan_setup(info, dev, prot_id, true); 2047 2046 2048 - if (!ret) /* Rx is optional, hence no error check */ 2049 - scmi_chan_setup(info, dev, prot_id, false); 2047 + if (!ret) { 2048 + /* Rx is optional, report only memory errors */ 2049 + ret = scmi_chan_setup(info, dev, prot_id, false); 2050 + if (ret && ret != -ENOMEM) 2051 + ret = 0; 2052 + } 2050 2053 2051 2054 return ret; 2052 2055 } ··· 2278 2273 sdev = scmi_get_protocol_device(child, info, 2279 2274 id_table->protocol_id, 2280 2275 id_table->name); 2281 - /* Set handle if not already set: device existed */ 2282 - if (sdev && !sdev->handle) 2283 - sdev->handle = 2284 - scmi_handle_get_from_info_unlocked(info); 2276 + if (sdev) { 2277 + /* Set handle if not already set: device existed */ 2278 + if (!sdev->handle) 2279 + sdev->handle = 2280 + scmi_handle_get_from_info_unlocked(info); 2281 + /* Relink consumer and suppliers */ 2282 + if (sdev->handle) 2283 + scmi_device_link_add(&sdev->dev, 2284 + sdev->handle->dev); 2285 + } 2285 2286 } else { 2286 2287 dev_err(info->dev, 2287 2288 "Failed. SCMI protocol %d not active.\n", ··· 2486 2475 2487 2476 static int scmi_remove(struct platform_device *pdev) 2488 2477 { 2489 - int ret = 0, id; 2478 + int ret, id; 2490 2479 struct scmi_info *info = platform_get_drvdata(pdev); 2491 2480 struct device_node *child; 2492 2481 2493 2482 mutex_lock(&scmi_list_mutex); 2494 2483 if (info->users) 2495 - ret = -EBUSY; 2496 - else 2497 - list_del(&info->node); 2484 + dev_warn(&pdev->dev, 2485 + "Still active SCMI users will be forcibly unbound.\n"); 2486 + list_del(&info->node); 2498 2487 mutex_unlock(&scmi_list_mutex); 2499 - 2500 - if (ret) 2501 - return ret; 2502 2488 2503 2489 scmi_notification_exit(&info->handle); 2504 2490 ··· 2508 2500 idr_destroy(&info->active_protocols); 2509 2501 2510 2502 /* Safe to free channels since no more users */ 2511 - return scmi_cleanup_txrx_channels(info); 2503 + ret = scmi_cleanup_txrx_channels(info); 2504 + if (ret) 2505 + dev_warn(&pdev->dev, "Failed to cleanup SCMI channels.\n"); 2506 + 2507 + return 0; 2512 2508 } 2513 2509 2514 2510 static ssize_t protocol_version_show(struct device *dev, ··· 2583 2571 static struct platform_driver scmi_driver = { 2584 2572 .driver = { 2585 2573 .name = "arm-scmi", 2574 + .suppress_bind_attrs = true, 2586 2575 .of_match_table = scmi_of_match, 2587 2576 .dev_groups = versions_groups, 2588 2577 },
+1 -1
drivers/firmware/arm_scmi/mailbox.c
··· 36 36 { 37 37 struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); 38 38 39 - shmem_tx_prepare(smbox->shmem, m); 39 + shmem_tx_prepare(smbox->shmem, m, smbox->cinfo); 40 40 } 41 41 42 42 static void rx_callback(struct mbox_client *cl, void *m)
+1 -1
drivers/firmware/arm_scmi/optee.c
··· 498 498 msg_tx_prepare(channel->req.msg, xfer); 499 499 ret = invoke_process_msg_channel(channel, msg_command_size(xfer)); 500 500 } else { 501 - shmem_tx_prepare(channel->req.shmem, xfer); 501 + shmem_tx_prepare(channel->req.shmem, xfer, cinfo); 502 502 ret = invoke_process_smt_channel(channel); 503 503 } 504 504
+27 -4
drivers/firmware/arm_scmi/shmem.c
··· 5 5 * Copyright (C) 2019 ARM Ltd. 6 6 */ 7 7 8 + #include <linux/ktime.h> 8 9 #include <linux/io.h> 9 10 #include <linux/processor.h> 10 11 #include <linux/types.h> 12 + 13 + #include <asm-generic/bug.h> 11 14 12 15 #include "common.h" 13 16 ··· 33 30 }; 34 31 35 32 void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, 36 - struct scmi_xfer *xfer) 33 + struct scmi_xfer *xfer, struct scmi_chan_info *cinfo) 37 34 { 35 + ktime_t stop; 36 + 38 37 /* 39 38 * Ideally channel must be free by now unless OS timeout last 40 39 * request and platform continued to process the same, wait 41 40 * until it releases the shared memory, otherwise we may endup 42 - * overwriting its response with new message payload or vice-versa 41 + * overwriting its response with new message payload or vice-versa. 42 + * Giving up anyway after twice the expected channel timeout so as 43 + * not to bail-out on intermittent issues where the platform is 44 + * occasionally a bit slower to answer. 45 + * 46 + * Note that after a timeout is detected we bail-out and carry on but 47 + * the transport functionality is probably permanently compromised: 48 + * this is just to ease debugging and avoid complete hangs on boot 49 + * due to a misbehaving SCMI firmware. 43 50 */ 44 - spin_until_cond(ioread32(&shmem->channel_status) & 45 - SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); 51 + stop = ktime_add_ms(ktime_get(), 2 * cinfo->rx_timeout_ms); 52 + spin_until_cond((ioread32(&shmem->channel_status) & 53 + SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE) || 54 + ktime_after(ktime_get(), stop)); 55 + if (!(ioread32(&shmem->channel_status) & 56 + SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE)) { 57 + WARN_ON_ONCE(1); 58 + dev_err(cinfo->dev, 59 + "Timeout waiting for a free TX channel !\n"); 60 + return; 61 + } 62 + 46 63 /* Mark channel busy + clear error */ 47 64 iowrite32(0x0, &shmem->channel_status); 48 65 iowrite32(xfer->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
+1 -1
drivers/firmware/arm_scmi/smc.c
··· 188 188 */ 189 189 smc_channel_lock_acquire(scmi_info, xfer); 190 190 191 - shmem_tx_prepare(scmi_info->shmem, xfer); 191 + shmem_tx_prepare(scmi_info->shmem, xfer, cinfo); 192 192 193 193 arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res); 194 194
+16 -10
drivers/firmware/arm_scmi/virtio.c
··· 148 148 { 149 149 unsigned long flags; 150 150 DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done); 151 - void *deferred_wq = NULL; 152 151 153 152 /* 154 153 * Prepare to wait for the last release if not already released ··· 161 162 162 163 vioch->shutdown_done = &vioch_shutdown_done; 163 164 virtio_break_device(vioch->vqueue->vdev); 164 - if (!vioch->is_rx && vioch->deferred_tx_wq) { 165 - deferred_wq = vioch->deferred_tx_wq; 165 + if (!vioch->is_rx && vioch->deferred_tx_wq) 166 166 /* Cannot be kicked anymore after this...*/ 167 167 vioch->deferred_tx_wq = NULL; 168 - } 169 168 spin_unlock_irqrestore(&vioch->lock, flags); 170 - 171 - if (deferred_wq) 172 - destroy_workqueue(deferred_wq); 173 169 174 170 scmi_vio_channel_release(vioch); 175 171 ··· 410 416 return vioch && !vioch->cinfo; 411 417 } 412 418 419 + static void scmi_destroy_tx_workqueue(void *deferred_tx_wq) 420 + { 421 + destroy_workqueue(deferred_tx_wq); 422 + } 423 + 413 424 static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, 414 425 bool tx) 415 426 { ··· 429 430 430 431 /* Setup a deferred worker for polling. */ 431 432 if (tx && !vioch->deferred_tx_wq) { 433 + int ret; 434 + 432 435 vioch->deferred_tx_wq = 433 436 alloc_workqueue(dev_name(&scmi_vdev->dev), 434 437 WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS, 435 438 0); 436 439 if (!vioch->deferred_tx_wq) 437 440 return -ENOMEM; 441 + 442 + ret = devm_add_action_or_reset(dev, scmi_destroy_tx_workqueue, 443 + vioch->deferred_tx_wq); 444 + if (ret) 445 + return ret; 438 446 439 447 INIT_WORK(&vioch->deferred_tx_work, 440 448 scmi_vio_deferred_tx_worker); ··· 450 444 for (i = 0; i < vioch->max_msg; i++) { 451 445 struct scmi_vio_msg *msg; 452 446 453 - msg = devm_kzalloc(cinfo->dev, sizeof(*msg), GFP_KERNEL); 447 + msg = devm_kzalloc(dev, sizeof(*msg), GFP_KERNEL); 454 448 if (!msg) 455 449 return -ENOMEM; 456 450 457 451 if (tx) { 458 - msg->request = devm_kzalloc(cinfo->dev, 452 + msg->request = devm_kzalloc(dev, 459 453 VIRTIO_SCMI_MAX_PDU_SIZE, 460 454 GFP_KERNEL); 461 455 if (!msg->request) ··· 464 458 refcount_set(&msg->users, 1); 465 459 } 466 460 467 - msg->input = devm_kzalloc(cinfo->dev, VIRTIO_SCMI_MAX_PDU_SIZE, 461 + msg->input = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE, 468 462 GFP_KERNEL); 469 463 if (!msg->input) 470 464 return -ENOMEM;
+1 -1
drivers/firmware/efi/efi.c
··· 611 611 612 612 seed = early_memremap(efi_rng_seed, sizeof(*seed)); 613 613 if (seed != NULL) { 614 - size = READ_ONCE(seed->size); 614 + size = min(seed->size, EFI_RANDOM_SEED_SIZE); 615 615 early_memunmap(seed, sizeof(*seed)); 616 616 } else { 617 617 pr_err("Could not map UEFI random seed!\n");
+6 -1
drivers/firmware/efi/libstub/random.c
··· 75 75 if (status != EFI_SUCCESS) 76 76 return status; 77 77 78 - status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA, 78 + /* 79 + * Use EFI_ACPI_RECLAIM_MEMORY here so that it is guaranteed that the 80 + * allocation will survive a kexec reboot (although we refresh the seed 81 + * beforehand) 82 + */ 83 + status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY, 79 84 sizeof(*seed) + EFI_RANDOM_SEED_SIZE, 80 85 (void **)&seed); 81 86 if (status != EFI_SUCCESS)
+1 -1
drivers/firmware/efi/tpm.c
··· 97 97 goto out_calc; 98 98 } 99 99 100 - memblock_reserve((unsigned long)final_tbl, 100 + memblock_reserve(efi.tpm_final_log, 101 101 tbl_size + sizeof(*final_tbl)); 102 102 efi_tpm_final_log_size = tbl_size; 103 103
+20 -48
drivers/firmware/efi/vars.c
··· 21 21 22 22 static DEFINE_SEMAPHORE(efivars_lock); 23 23 24 - static efi_status_t check_var_size(u32 attributes, unsigned long size) 24 + static efi_status_t check_var_size(bool nonblocking, u32 attributes, 25 + unsigned long size) 25 26 { 26 27 const struct efivar_operations *fops; 28 + efi_status_t status; 27 29 28 30 fops = __efivars->ops; 29 31 30 32 if (!fops->query_variable_store) 33 + status = EFI_UNSUPPORTED; 34 + else 35 + status = fops->query_variable_store(attributes, size, 36 + nonblocking); 37 + if (status == EFI_UNSUPPORTED) 31 38 return (size <= SZ_64K) ? EFI_SUCCESS : EFI_OUT_OF_RESOURCES; 32 - 33 - return fops->query_variable_store(attributes, size, false); 34 - } 35 - 36 - static 37 - efi_status_t check_var_size_nonblocking(u32 attributes, unsigned long size) 38 - { 39 - const struct efivar_operations *fops; 40 - 41 - fops = __efivars->ops; 42 - 43 - if (!fops->query_variable_store) 44 - return (size <= SZ_64K) ? EFI_SUCCESS : EFI_OUT_OF_RESOURCES; 45 - 46 - return fops->query_variable_store(attributes, size, true); 39 + return status; 47 40 } 48 41 49 42 /** ··· 189 196 EXPORT_SYMBOL_NS_GPL(efivar_get_next_variable, EFIVAR); 190 197 191 198 /* 192 - * efivar_set_variable_blocking() - local helper function for set_variable 193 - * 194 - * Must be called with efivars_lock held. 195 - */ 196 - static efi_status_t 197 - efivar_set_variable_blocking(efi_char16_t *name, efi_guid_t *vendor, 198 - u32 attr, unsigned long data_size, void *data) 199 - { 200 - efi_status_t status; 201 - 202 - if (data_size > 0) { 203 - status = check_var_size(attr, data_size + 204 - ucs2_strsize(name, 1024)); 205 - if (status != EFI_SUCCESS) 206 - return status; 207 - } 208 - return __efivars->ops->set_variable(name, vendor, attr, data_size, data); 209 - } 210 - 211 - /* 212 199 * efivar_set_variable_locked() - set a variable identified by name/vendor 213 200 * 214 201 * Must be called with efivars_lock held. If @nonblocking is set, it will use ··· 201 228 efi_set_variable_t *setvar; 202 229 efi_status_t status; 203 230 204 - if (!nonblocking) 205 - return efivar_set_variable_blocking(name, vendor, attr, 206 - data_size, data); 231 + if (data_size > 0) { 232 + status = check_var_size(nonblocking, attr, 233 + data_size + ucs2_strsize(name, 1024)); 234 + if (status != EFI_SUCCESS) 235 + return status; 236 + } 207 237 208 238 /* 209 239 * If no _nonblocking variant exists, the ordinary one 210 240 * is assumed to be non-blocking. 211 241 */ 212 - setvar = __efivars->ops->set_variable_nonblocking ?: 213 - __efivars->ops->set_variable; 242 + setvar = __efivars->ops->set_variable_nonblocking; 243 + if (!setvar || !nonblocking) 244 + setvar = __efivars->ops->set_variable; 214 245 215 - if (data_size > 0) { 216 - status = check_var_size_nonblocking(attr, data_size + 217 - ucs2_strsize(name, 1024)); 218 - if (status != EFI_SUCCESS) 219 - return status; 220 - } 221 246 return setvar(name, vendor, attr, data_size, data); 222 247 } 223 248 EXPORT_SYMBOL_NS_GPL(efivar_set_variable_locked, EFIVAR); ··· 235 264 if (efivar_lock()) 236 265 return EFI_ABORTED; 237 266 238 - status = efivar_set_variable_blocking(name, vendor, attr, data_size, data); 267 + status = efivar_set_variable_locked(name, vendor, attr, data_size, 268 + data, false); 239 269 efivar_unlock(); 240 270 return status; 241 271 }
+7
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
··· 706 706 707 707 void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle) 708 708 { 709 + /* Temporary workaround to fix issues observed in some 710 + * compute applications when GFXOFF is enabled on GFX11. 711 + */ 712 + if (IP_VERSION_MAJ(adev->ip_versions[GC_HWIP][0]) == 11) { 713 + pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled"); 714 + amdgpu_gfx_off_ctrl(adev, idle); 715 + } 709 716 amdgpu_dpm_switch_power_profile(adev, 710 717 PP_SMC_POWER_PROFILE_COMPUTE, 711 718 !idle);
+10 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 4060 4060 * at suspend time. 4061 4061 * 4062 4062 */ 4063 - static void amdgpu_device_evict_resources(struct amdgpu_device *adev) 4063 + static int amdgpu_device_evict_resources(struct amdgpu_device *adev) 4064 4064 { 4065 + int ret; 4066 + 4065 4067 /* No need to evict vram on APUs for suspend to ram or s2idle */ 4066 4068 if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU)) 4067 - return; 4069 + return 0; 4068 4070 4069 - if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM)) 4071 + ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); 4072 + if (ret) 4070 4073 DRM_WARN("evicting device resources failed\n"); 4071 - 4074 + return ret; 4072 4075 } 4073 4076 4074 4077 /* ··· 4121 4118 if (!adev->in_s0ix) 4122 4119 amdgpu_amdkfd_suspend(adev, adev->in_runpm); 4123 4120 4124 - amdgpu_device_evict_resources(adev); 4121 + r = amdgpu_device_evict_resources(adev); 4122 + if (r) 4123 + return r; 4125 4124 4126 4125 amdgpu_fence_driver_hw_fini(adev); 4127 4126
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 2201 2201 pm_runtime_forbid(dev->dev); 2202 2202 } 2203 2203 2204 - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) { 2204 + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) && 2205 + !amdgpu_sriov_vf(adev)) { 2205 2206 bool need_to_reset_gpu = false; 2206 2207 2207 2208 if (adev->gmc.xgmi.num_physical_nodes > 1) {
+6 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 337 337 fw_info->feature = adev->psp.cap_feature_version; 338 338 break; 339 339 case AMDGPU_INFO_FW_MES_KIQ: 340 - fw_info->ver = adev->mes.ucode_fw_version[0]; 341 - fw_info->feature = 0; 340 + fw_info->ver = adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK; 341 + fw_info->feature = (adev->mes.kiq_version & AMDGPU_MES_FEAT_VERSION_MASK) 342 + >> AMDGPU_MES_FEAT_VERSION_SHIFT; 342 343 break; 343 344 case AMDGPU_INFO_FW_MES: 344 - fw_info->ver = adev->mes.ucode_fw_version[1]; 345 - fw_info->feature = 0; 345 + fw_info->ver = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK; 346 + fw_info->feature = (adev->mes.sched_version & AMDGPU_MES_FEAT_VERSION_MASK) 347 + >> AMDGPU_MES_FEAT_VERSION_SHIFT; 346 348 break; 347 349 case AMDGPU_INFO_FW_IMU: 348 350 fw_info->ver = adev->gfx.imu_fw_version;
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
··· 500 500 501 501 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; 502 502 503 + adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; 504 + 503 505 r = amdgpu_display_modeset_create_props(adev); 504 506 if (r) 505 507 return r;
+397 -395
drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
··· 2495 2495 0xbf9f0000, 0x00000000, 2496 2496 }; 2497 2497 static const uint32_t cwsr_trap_gfx11_hex[] = { 2498 - 0xbfa00001, 0xbfa0021e, 2498 + 0xbfa00001, 0xbfa00221, 2499 2499 0xb0804006, 0xb8f8f802, 2500 2500 0x9178ff78, 0x00020006, 2501 - 0xb8fbf803, 0xbf0d9f6d, 2502 - 0xbfa20006, 0x8b6eff78, 2503 - 0x00002000, 0xbfa10009, 2504 - 0x8b6eff6d, 0x00ff0000, 2505 - 0xbfa2001e, 0x8b6eff7b, 2506 - 0x00000400, 0xbfa20041, 2507 - 0xbf830010, 0xb8fbf803, 2508 - 0xbfa0fffa, 0x8b6eff7b, 2509 - 0x00000900, 0xbfa20015, 2510 - 0x8b6eff7b, 0x000071ff, 2511 - 0xbfa10008, 0x8b6fff7b, 2512 - 0x00007080, 0xbfa10001, 2513 - 0xbeee1287, 0xb8eff801, 2514 - 0x846e8c6e, 0x8b6e6f6e, 2515 - 0xbfa2000a, 0x8b6eff6d, 2516 - 0x00ff0000, 0xbfa20007, 2517 - 0xb8eef801, 0x8b6eff6e, 2518 - 0x00000800, 0xbfa20003, 2501 + 0xb8fbf803, 0xbf0d9e6d, 2502 + 0xbfa10001, 0xbfbd0000, 2503 + 0xbf0d9f6d, 0xbfa20006, 2504 + 0x8b6eff78, 0x00002000, 2505 + 0xbfa10009, 0x8b6eff6d, 2506 + 0x00ff0000, 0xbfa2001e, 2519 2507 0x8b6eff7b, 0x00000400, 2520 - 0xbfa20026, 0xbefa4d82, 2521 - 0xbf89fc07, 0x84fa887a, 2522 - 0xf4005bbd, 0xf8000010, 2523 - 0xbf89fc07, 0x846e976e, 2524 - 0x9177ff77, 0x00800000, 2525 - 0x8c776e77, 0xf4045bbd, 2526 - 0xf8000000, 0xbf89fc07, 2527 - 0xf4045ebd, 0xf8000008, 2528 - 0xbf89fc07, 0x8bee6e6e, 2529 - 0xbfa10001, 0xbe80486e, 2530 - 0x8b6eff6d, 0x01ff0000, 2531 - 0xbfa20005, 0x8c78ff78, 2532 - 0x00002000, 0x80ec886c, 2533 - 0x82ed806d, 0xbfa00005, 2534 - 0x8b6eff6d, 0x01000000, 2535 - 0xbfa20002, 0x806c846c, 2536 - 0x826d806d, 0x8b6dff6d, 2537 - 0x0000ffff, 0x8bfe7e7e, 2538 - 0x8bea6a6a, 0xb978f802, 2539 - 0xbe804a6c, 0x8b6dff6d, 2540 - 0x0000ffff, 0xbefa0080, 2541 - 0xb97a0283, 0xbeee007e, 2542 - 0xbeef007f, 0xbefe0180, 2543 - 0xbefe4d84, 0xbf89fc07, 2544 - 0x8b7aff7f, 0x04000000, 2545 - 0x847a857a, 0x8c6d7a6d, 2546 - 0xbefa007e, 0x8b7bff7f, 2547 - 0x0000ffff, 0xbefe00c1, 2548 - 0xbeff00c1, 0xdca6c000, 2549 - 0x007a0000, 0x7e000280, 2550 - 0xbefe007a, 0xbeff007b, 2551 - 0xb8fb02dc, 0x847b997b, 2552 - 0xb8fa3b05, 0x807a817a, 2553 - 0xbf0d997b, 0xbfa20002, 2554 - 0x847a897a, 0xbfa00001, 2555 - 0x847a8a7a, 0xb8fb1e06, 2556 - 0x847b8a7b, 0x807a7b7a, 2508 + 0xbfa20041, 0xbf830010, 2509 + 0xb8fbf803, 0xbfa0fffa, 2510 + 0x8b6eff7b, 0x00000900, 2511 + 0xbfa20015, 0x8b6eff7b, 2512 + 0x000071ff, 0xbfa10008, 2513 + 0x8b6fff7b, 0x00007080, 2514 + 0xbfa10001, 0xbeee1287, 2515 + 0xb8eff801, 0x846e8c6e, 2516 + 0x8b6e6f6e, 0xbfa2000a, 2517 + 0x8b6eff6d, 0x00ff0000, 2518 + 0xbfa20007, 0xb8eef801, 2519 + 0x8b6eff6e, 0x00000800, 2520 + 0xbfa20003, 0x8b6eff7b, 2521 + 0x00000400, 0xbfa20026, 2522 + 0xbefa4d82, 0xbf89fc07, 2523 + 0x84fa887a, 0xf4005bbd, 2524 + 0xf8000010, 0xbf89fc07, 2525 + 0x846e976e, 0x9177ff77, 2526 + 0x00800000, 0x8c776e77, 2527 + 0xf4045bbd, 0xf8000000, 2528 + 0xbf89fc07, 0xf4045ebd, 2529 + 0xf8000008, 0xbf89fc07, 2530 + 0x8bee6e6e, 0xbfa10001, 2531 + 0xbe80486e, 0x8b6eff6d, 2532 + 0x01ff0000, 0xbfa20005, 2533 + 0x8c78ff78, 0x00002000, 2534 + 0x80ec886c, 0x82ed806d, 2535 + 0xbfa00005, 0x8b6eff6d, 2536 + 0x01000000, 0xbfa20002, 2537 + 0x806c846c, 0x826d806d, 2538 + 0x8b6dff6d, 0x0000ffff, 2539 + 0x8bfe7e7e, 0x8bea6a6a, 2540 + 0xb978f802, 0xbe804a6c, 2541 + 0x8b6dff6d, 0x0000ffff, 2542 + 0xbefa0080, 0xb97a0283, 2543 + 0xbeee007e, 0xbeef007f, 2544 + 0xbefe0180, 0xbefe4d84, 2545 + 0xbf89fc07, 0x8b7aff7f, 2546 + 0x04000000, 0x847a857a, 2547 + 0x8c6d7a6d, 0xbefa007e, 2557 2548 0x8b7bff7f, 0x0000ffff, 2558 - 0x807aff7a, 0x00000200, 2559 - 0x807a7e7a, 0x827b807b, 2560 - 0xd7610000, 0x00010870, 2561 - 0xd7610000, 0x00010a71, 2562 - 0xd7610000, 0x00010c72, 2563 - 0xd7610000, 0x00010e73, 2564 - 0xd7610000, 0x00011074, 2565 - 0xd7610000, 0x00011275, 2566 - 0xd7610000, 0x00011476, 2567 - 0xd7610000, 0x00011677, 2568 - 0xd7610000, 0x00011a79, 2569 - 0xd7610000, 0x00011c7e, 2570 - 0xd7610000, 0x00011e7f, 2571 - 0xbefe00ff, 0x00003fff, 2572 - 0xbeff0080, 0xdca6c040, 2573 - 0x007a0000, 0xd760007a, 2574 - 0x00011d00, 0xd760007b, 2575 - 0x00011f00, 0xbefe007a, 2576 - 0xbeff007b, 0xbef4007e, 2577 - 0x8b75ff7f, 0x0000ffff, 2578 - 0x8c75ff75, 0x00040000, 2579 - 0xbef60080, 0xbef700ff, 2580 - 0x10807fac, 0xbef1007d, 2581 - 0xbef00080, 0xb8f302dc, 2582 - 0x84739973, 0xbefe00c1, 2583 - 0x857d9973, 0x8b7d817d, 2584 - 0xbf06817d, 0xbfa20002, 2585 - 0xbeff0080, 0xbfa00002, 2586 - 0xbeff00c1, 0xbfa00009, 2549 + 0xbefe00c1, 0xbeff00c1, 2550 + 0xdca6c000, 0x007a0000, 2551 + 0x7e000280, 0xbefe007a, 2552 + 0xbeff007b, 0xb8fb02dc, 2553 + 0x847b997b, 0xb8fa3b05, 2554 + 0x807a817a, 0xbf0d997b, 2555 + 0xbfa20002, 0x847a897a, 2556 + 0xbfa00001, 0x847a8a7a, 2557 + 0xb8fb1e06, 0x847b8a7b, 2558 + 0x807a7b7a, 0x8b7bff7f, 2559 + 0x0000ffff, 0x807aff7a, 2560 + 0x00000200, 0x807a7e7a, 2561 + 0x827b807b, 0xd7610000, 2562 + 0x00010870, 0xd7610000, 2563 + 0x00010a71, 0xd7610000, 2564 + 0x00010c72, 0xd7610000, 2565 + 0x00010e73, 0xd7610000, 2566 + 0x00011074, 0xd7610000, 2567 + 0x00011275, 0xd7610000, 2568 + 0x00011476, 0xd7610000, 2569 + 0x00011677, 0xd7610000, 2570 + 0x00011a79, 0xd7610000, 2571 + 0x00011c7e, 0xd7610000, 2572 + 0x00011e7f, 0xbefe00ff, 2573 + 0x00003fff, 0xbeff0080, 2574 + 0xdca6c040, 0x007a0000, 2575 + 0xd760007a, 0x00011d00, 2576 + 0xd760007b, 0x00011f00, 2577 + 0xbefe007a, 0xbeff007b, 2578 + 0xbef4007e, 0x8b75ff7f, 2579 + 0x0000ffff, 0x8c75ff75, 2580 + 0x00040000, 0xbef60080, 2581 + 0xbef700ff, 0x10807fac, 2582 + 0xbef1007d, 0xbef00080, 2583 + 0xb8f302dc, 0x84739973, 2584 + 0xbefe00c1, 0x857d9973, 2585 + 0x8b7d817d, 0xbf06817d, 2586 + 0xbfa20002, 0xbeff0080, 2587 + 0xbfa00002, 0xbeff00c1, 2588 + 0xbfa00009, 0xbef600ff, 2589 + 0x01000000, 0xe0685080, 2590 + 0x701d0100, 0xe0685100, 2591 + 0x701d0200, 0xe0685180, 2592 + 0x701d0300, 0xbfa00008, 2587 2593 0xbef600ff, 0x01000000, 2588 - 0xe0685080, 0x701d0100, 2589 - 0xe0685100, 0x701d0200, 2590 - 0xe0685180, 0x701d0300, 2591 - 0xbfa00008, 0xbef600ff, 2592 - 0x01000000, 0xe0685100, 2593 - 0x701d0100, 0xe0685200, 2594 - 0x701d0200, 0xe0685300, 2595 - 0x701d0300, 0xb8f03b05, 2596 - 0x80708170, 0xbf0d9973, 2597 - 0xbfa20002, 0x84708970, 2598 - 0xbfa00001, 0x84708a70, 2599 - 0xb8fa1e06, 0x847a8a7a, 2600 - 0x80707a70, 0x8070ff70, 2601 - 0x00000200, 0xbef600ff, 2602 - 0x01000000, 0x7e000280, 2603 - 0x7e020280, 0x7e040280, 2604 - 0xbefd0080, 0xd7610002, 2605 - 0x0000fa71, 0x807d817d, 2606 - 0xd7610002, 0x0000fa6c, 2607 - 0x807d817d, 0x917aff6d, 2608 - 0x80000000, 0xd7610002, 2609 - 0x0000fa7a, 0x807d817d, 2610 - 0xd7610002, 0x0000fa6e, 2611 - 0x807d817d, 0xd7610002, 2612 - 0x0000fa6f, 0x807d817d, 2613 - 0xd7610002, 0x0000fa78, 2614 - 0x807d817d, 0xb8faf803, 2615 - 0xd7610002, 0x0000fa7a, 2616 - 0x807d817d, 0xd7610002, 2617 - 0x0000fa7b, 0x807d817d, 2618 - 0xb8f1f801, 0xd7610002, 2619 - 0x0000fa71, 0x807d817d, 2620 - 0xb8f1f814, 0xd7610002, 2621 - 0x0000fa71, 0x807d817d, 2622 - 0xb8f1f815, 0xd7610002, 2623 - 0x0000fa71, 0x807d817d, 2624 - 0xbefe00ff, 0x0000ffff, 2625 - 0xbeff0080, 0xe0685000, 2626 - 0x701d0200, 0xbefe00c1, 2594 + 0xe0685100, 0x701d0100, 2595 + 0xe0685200, 0x701d0200, 2596 + 0xe0685300, 0x701d0300, 2627 2597 0xb8f03b05, 0x80708170, 2628 2598 0xbf0d9973, 0xbfa20002, 2629 2599 0x84708970, 0xbfa00001, 2630 2600 0x84708a70, 0xb8fa1e06, 2631 2601 0x847a8a7a, 0x80707a70, 2602 + 0x8070ff70, 0x00000200, 2632 2603 0xbef600ff, 0x01000000, 2633 - 0xbef90080, 0xbefd0080, 2634 - 0xbf800000, 0xbe804100, 2635 - 0xbe824102, 0xbe844104, 2636 - 0xbe864106, 0xbe884108, 2637 - 0xbe8a410a, 0xbe8c410c, 2638 - 0xbe8e410e, 0xd7610002, 2639 - 0x0000f200, 0x80798179, 2640 - 0xd7610002, 0x0000f201, 2641 - 0x80798179, 0xd7610002, 2642 - 0x0000f202, 0x80798179, 2643 - 0xd7610002, 0x0000f203, 2644 - 0x80798179, 0xd7610002, 2645 - 0x0000f204, 0x80798179, 2646 - 0xd7610002, 0x0000f205, 2647 - 0x80798179, 0xd7610002, 2648 - 0x0000f206, 0x80798179, 2649 - 0xd7610002, 0x0000f207, 2650 - 0x80798179, 0xd7610002, 2651 - 0x0000f208, 0x80798179, 2652 - 0xd7610002, 0x0000f209, 2653 - 0x80798179, 0xd7610002, 2654 - 0x0000f20a, 0x80798179, 2655 - 0xd7610002, 0x0000f20b, 2656 - 0x80798179, 0xd7610002, 2657 - 0x0000f20c, 0x80798179, 2658 - 0xd7610002, 0x0000f20d, 2659 - 0x80798179, 0xd7610002, 2660 - 0x0000f20e, 0x80798179, 2661 - 0xd7610002, 0x0000f20f, 2662 - 0x80798179, 0xbf06a079, 2663 - 0xbfa10006, 0xe0685000, 2664 - 0x701d0200, 0x8070ff70, 2665 - 0x00000080, 0xbef90080, 2666 - 0x7e040280, 0x807d907d, 2667 - 0xbf0aff7d, 0x00000060, 2668 - 0xbfa2ffbc, 0xbe804100, 2669 - 0xbe824102, 0xbe844104, 2670 - 0xbe864106, 0xbe884108, 2671 - 0xbe8a410a, 0xd7610002, 2672 - 0x0000f200, 0x80798179, 2673 - 0xd7610002, 0x0000f201, 2674 - 0x80798179, 0xd7610002, 2675 - 0x0000f202, 0x80798179, 2676 - 0xd7610002, 0x0000f203, 2677 - 0x80798179, 0xd7610002, 2678 - 0x0000f204, 0x80798179, 2679 - 0xd7610002, 0x0000f205, 2680 - 0x80798179, 0xd7610002, 2681 - 0x0000f206, 0x80798179, 2682 - 0xd7610002, 0x0000f207, 2683 - 0x80798179, 0xd7610002, 2684 - 0x0000f208, 0x80798179, 2685 - 0xd7610002, 0x0000f209, 2686 - 0x80798179, 0xd7610002, 2687 - 0x0000f20a, 0x80798179, 2688 - 0xd7610002, 0x0000f20b, 2689 - 0x80798179, 0xe0685000, 2690 - 0x701d0200, 0xbefe00c1, 2691 - 0x857d9973, 0x8b7d817d, 2692 - 0xbf06817d, 0xbfa20002, 2693 - 0xbeff0080, 0xbfa00001, 2694 - 0xbeff00c1, 0xb8fb4306, 2695 - 0x8b7bc17b, 0xbfa10044, 2696 - 0xbfbd0000, 0x8b7aff6d, 2697 - 0x80000000, 0xbfa10040, 2698 - 0x847b867b, 0x847b827b, 2699 - 0xbef6007b, 0xb8f03b05, 2604 + 0x7e000280, 0x7e020280, 2605 + 0x7e040280, 0xbefd0080, 2606 + 0xd7610002, 0x0000fa71, 2607 + 0x807d817d, 0xd7610002, 2608 + 0x0000fa6c, 0x807d817d, 2609 + 0x917aff6d, 0x80000000, 2610 + 0xd7610002, 0x0000fa7a, 2611 + 0x807d817d, 0xd7610002, 2612 + 0x0000fa6e, 0x807d817d, 2613 + 0xd7610002, 0x0000fa6f, 2614 + 0x807d817d, 0xd7610002, 2615 + 0x0000fa78, 0x807d817d, 2616 + 0xb8faf803, 0xd7610002, 2617 + 0x0000fa7a, 0x807d817d, 2618 + 0xd7610002, 0x0000fa7b, 2619 + 0x807d817d, 0xb8f1f801, 2620 + 0xd7610002, 0x0000fa71, 2621 + 0x807d817d, 0xb8f1f814, 2622 + 0xd7610002, 0x0000fa71, 2623 + 0x807d817d, 0xb8f1f815, 2624 + 0xd7610002, 0x0000fa71, 2625 + 0x807d817d, 0xbefe00ff, 2626 + 0x0000ffff, 0xbeff0080, 2627 + 0xe0685000, 0x701d0200, 2628 + 0xbefe00c1, 0xb8f03b05, 2700 2629 0x80708170, 0xbf0d9973, 2701 2630 0xbfa20002, 0x84708970, 2702 2631 0xbfa00001, 0x84708a70, 2703 2632 0xb8fa1e06, 0x847a8a7a, 2704 - 0x80707a70, 0x8070ff70, 2705 - 0x00000200, 0x8070ff70, 2706 - 0x00000080, 0xbef600ff, 2707 - 0x01000000, 0xd71f0000, 2708 - 0x000100c1, 0xd7200000, 2709 - 0x000200c1, 0x16000084, 2710 - 0x857d9973, 0x8b7d817d, 2711 - 0xbf06817d, 0xbefd0080, 2712 - 0xbfa20012, 0xbe8300ff, 2713 - 0x00000080, 0xbf800000, 2714 - 0xbf800000, 0xbf800000, 2715 - 0xd8d80000, 0x01000000, 2716 - 0xbf890000, 0xe0685000, 2717 - 0x701d0100, 0x807d037d, 2718 - 0x80700370, 0xd5250000, 2719 - 0x0001ff00, 0x00000080, 2720 - 0xbf0a7b7d, 0xbfa2fff4, 2721 - 0xbfa00011, 0xbe8300ff, 2722 - 0x00000100, 0xbf800000, 2723 - 0xbf800000, 0xbf800000, 2724 - 0xd8d80000, 0x01000000, 2725 - 0xbf890000, 0xe0685000, 2726 - 0x701d0100, 0x807d037d, 2727 - 0x80700370, 0xd5250000, 2728 - 0x0001ff00, 0x00000100, 2729 - 0xbf0a7b7d, 0xbfa2fff4, 2633 + 0x80707a70, 0xbef600ff, 2634 + 0x01000000, 0xbef90080, 2635 + 0xbefd0080, 0xbf800000, 2636 + 0xbe804100, 0xbe824102, 2637 + 0xbe844104, 0xbe864106, 2638 + 0xbe884108, 0xbe8a410a, 2639 + 0xbe8c410c, 0xbe8e410e, 2640 + 0xd7610002, 0x0000f200, 2641 + 0x80798179, 0xd7610002, 2642 + 0x0000f201, 0x80798179, 2643 + 0xd7610002, 0x0000f202, 2644 + 0x80798179, 0xd7610002, 2645 + 0x0000f203, 0x80798179, 2646 + 0xd7610002, 0x0000f204, 2647 + 0x80798179, 0xd7610002, 2648 + 0x0000f205, 0x80798179, 2649 + 0xd7610002, 0x0000f206, 2650 + 0x80798179, 0xd7610002, 2651 + 0x0000f207, 0x80798179, 2652 + 0xd7610002, 0x0000f208, 2653 + 0x80798179, 0xd7610002, 2654 + 0x0000f209, 0x80798179, 2655 + 0xd7610002, 0x0000f20a, 2656 + 0x80798179, 0xd7610002, 2657 + 0x0000f20b, 0x80798179, 2658 + 0xd7610002, 0x0000f20c, 2659 + 0x80798179, 0xd7610002, 2660 + 0x0000f20d, 0x80798179, 2661 + 0xd7610002, 0x0000f20e, 2662 + 0x80798179, 0xd7610002, 2663 + 0x0000f20f, 0x80798179, 2664 + 0xbf06a079, 0xbfa10006, 2665 + 0xe0685000, 0x701d0200, 2666 + 0x8070ff70, 0x00000080, 2667 + 0xbef90080, 0x7e040280, 2668 + 0x807d907d, 0xbf0aff7d, 2669 + 0x00000060, 0xbfa2ffbc, 2670 + 0xbe804100, 0xbe824102, 2671 + 0xbe844104, 0xbe864106, 2672 + 0xbe884108, 0xbe8a410a, 2673 + 0xd7610002, 0x0000f200, 2674 + 0x80798179, 0xd7610002, 2675 + 0x0000f201, 0x80798179, 2676 + 0xd7610002, 0x0000f202, 2677 + 0x80798179, 0xd7610002, 2678 + 0x0000f203, 0x80798179, 2679 + 0xd7610002, 0x0000f204, 2680 + 0x80798179, 0xd7610002, 2681 + 0x0000f205, 0x80798179, 2682 + 0xd7610002, 0x0000f206, 2683 + 0x80798179, 0xd7610002, 2684 + 0x0000f207, 0x80798179, 2685 + 0xd7610002, 0x0000f208, 2686 + 0x80798179, 0xd7610002, 2687 + 0x0000f209, 0x80798179, 2688 + 0xd7610002, 0x0000f20a, 2689 + 0x80798179, 0xd7610002, 2690 + 0x0000f20b, 0x80798179, 2691 + 0xe0685000, 0x701d0200, 2730 2692 0xbefe00c1, 0x857d9973, 2731 2693 0x8b7d817d, 0xbf06817d, 2732 - 0xbfa20004, 0xbef000ff, 2733 - 0x00000200, 0xbeff0080, 2734 - 0xbfa00003, 0xbef000ff, 2735 - 0x00000400, 0xbeff00c1, 2736 - 0xb8fb3b05, 0x807b817b, 2737 - 0x847b827b, 0x857d9973, 2694 + 0xbfa20002, 0xbeff0080, 2695 + 0xbfa00001, 0xbeff00c1, 2696 + 0xb8fb4306, 0x8b7bc17b, 2697 + 0xbfa10044, 0xbfbd0000, 2698 + 0x8b7aff6d, 0x80000000, 2699 + 0xbfa10040, 0x847b867b, 2700 + 0x847b827b, 0xbef6007b, 2701 + 0xb8f03b05, 0x80708170, 2702 + 0xbf0d9973, 0xbfa20002, 2703 + 0x84708970, 0xbfa00001, 2704 + 0x84708a70, 0xb8fa1e06, 2705 + 0x847a8a7a, 0x80707a70, 2706 + 0x8070ff70, 0x00000200, 2707 + 0x8070ff70, 0x00000080, 2708 + 0xbef600ff, 0x01000000, 2709 + 0xd71f0000, 0x000100c1, 2710 + 0xd7200000, 0x000200c1, 2711 + 0x16000084, 0x857d9973, 2738 2712 0x8b7d817d, 0xbf06817d, 2739 - 0xbfa20017, 0xbef600ff, 2713 + 0xbefd0080, 0xbfa20012, 2714 + 0xbe8300ff, 0x00000080, 2715 + 0xbf800000, 0xbf800000, 2716 + 0xbf800000, 0xd8d80000, 2717 + 0x01000000, 0xbf890000, 2718 + 0xe0685000, 0x701d0100, 2719 + 0x807d037d, 0x80700370, 2720 + 0xd5250000, 0x0001ff00, 2721 + 0x00000080, 0xbf0a7b7d, 2722 + 0xbfa2fff4, 0xbfa00011, 2723 + 0xbe8300ff, 0x00000100, 2724 + 0xbf800000, 0xbf800000, 2725 + 0xbf800000, 0xd8d80000, 2726 + 0x01000000, 0xbf890000, 2727 + 0xe0685000, 0x701d0100, 2728 + 0x807d037d, 0x80700370, 2729 + 0xd5250000, 0x0001ff00, 2730 + 0x00000100, 0xbf0a7b7d, 2731 + 0xbfa2fff4, 0xbefe00c1, 2732 + 0x857d9973, 0x8b7d817d, 2733 + 0xbf06817d, 0xbfa20004, 2734 + 0xbef000ff, 0x00000200, 2735 + 0xbeff0080, 0xbfa00003, 2736 + 0xbef000ff, 0x00000400, 2737 + 0xbeff00c1, 0xb8fb3b05, 2738 + 0x807b817b, 0x847b827b, 2739 + 0x857d9973, 0x8b7d817d, 2740 + 0xbf06817d, 0xbfa20017, 2741 + 0xbef600ff, 0x01000000, 2742 + 0xbefd0084, 0xbf0a7b7d, 2743 + 0xbfa10037, 0x7e008700, 2744 + 0x7e028701, 0x7e048702, 2745 + 0x7e068703, 0xe0685000, 2746 + 0x701d0000, 0xe0685080, 2747 + 0x701d0100, 0xe0685100, 2748 + 0x701d0200, 0xe0685180, 2749 + 0x701d0300, 0x807d847d, 2750 + 0x8070ff70, 0x00000200, 2751 + 0xbf0a7b7d, 0xbfa2ffef, 2752 + 0xbfa00025, 0xbef600ff, 2740 2753 0x01000000, 0xbefd0084, 2741 - 0xbf0a7b7d, 0xbfa10037, 2754 + 0xbf0a7b7d, 0xbfa10011, 2742 2755 0x7e008700, 0x7e028701, 2743 2756 0x7e048702, 0x7e068703, 2744 2757 0xe0685000, 0x701d0000, 2745 - 0xe0685080, 0x701d0100, 2746 - 0xe0685100, 0x701d0200, 2747 - 0xe0685180, 0x701d0300, 2758 + 0xe0685100, 0x701d0100, 2759 + 0xe0685200, 0x701d0200, 2760 + 0xe0685300, 0x701d0300, 2748 2761 0x807d847d, 0x8070ff70, 2749 - 0x00000200, 0xbf0a7b7d, 2750 - 0xbfa2ffef, 0xbfa00025, 2751 - 0xbef600ff, 0x01000000, 2752 - 0xbefd0084, 0xbf0a7b7d, 2753 - 0xbfa10011, 0x7e008700, 2754 - 0x7e028701, 0x7e048702, 2755 - 0x7e068703, 0xe0685000, 2756 - 0x701d0000, 0xe0685100, 2757 - 0x701d0100, 0xe0685200, 2758 - 0x701d0200, 0xe0685300, 2759 - 0x701d0300, 0x807d847d, 2760 - 0x8070ff70, 0x00000400, 2761 - 0xbf0a7b7d, 0xbfa2ffef, 2762 - 0xb8fb1e06, 0x8b7bc17b, 2763 - 0xbfa1000c, 0x847b837b, 2764 - 0x807b7d7b, 0xbefe00c1, 2765 - 0xbeff0080, 0x7e008700, 2766 - 0xe0685000, 0x701d0000, 2767 - 0x807d817d, 0x8070ff70, 2768 - 0x00000080, 0xbf0a7b7d, 2769 - 0xbfa2fff8, 0xbfa00146, 2770 - 0xbef4007e, 0x8b75ff7f, 2771 - 0x0000ffff, 0x8c75ff75, 2772 - 0x00040000, 0xbef60080, 2773 - 0xbef700ff, 0x10807fac, 2774 - 0xb8f202dc, 0x84729972, 2775 - 0x8b6eff7f, 0x04000000, 2776 - 0xbfa1003a, 0xbefe00c1, 2777 - 0x857d9972, 0x8b7d817d, 2778 - 0xbf06817d, 0xbfa20002, 2779 - 0xbeff0080, 0xbfa00001, 2780 - 0xbeff00c1, 0xb8ef4306, 2781 - 0x8b6fc16f, 0xbfa1002f, 2782 - 0x846f866f, 0x846f826f, 2783 - 0xbef6006f, 0xb8f83b05, 2784 - 0x80788178, 0xbf0d9972, 2785 - 0xbfa20002, 0x84788978, 2786 - 0xbfa00001, 0x84788a78, 2787 - 0xb8ee1e06, 0x846e8a6e, 2788 - 0x80786e78, 0x8078ff78, 2789 - 0x00000200, 0x8078ff78, 2790 - 0x00000080, 0xbef600ff, 2791 - 0x01000000, 0x857d9972, 2792 - 0x8b7d817d, 0xbf06817d, 2793 - 0xbefd0080, 0xbfa2000c, 2794 - 0xe0500000, 0x781d0000, 2795 - 0xbf8903f7, 0xdac00000, 2796 - 0x00000000, 0x807dff7d, 2797 - 0x00000080, 0x8078ff78, 2798 - 0x00000080, 0xbf0a6f7d, 2799 - 0xbfa2fff5, 0xbfa0000b, 2800 - 0xe0500000, 0x781d0000, 2801 - 0xbf8903f7, 0xdac00000, 2802 - 0x00000000, 0x807dff7d, 2803 - 0x00000100, 0x8078ff78, 2804 - 0x00000100, 0xbf0a6f7d, 2805 - 0xbfa2fff5, 0xbef80080, 2762 + 0x00000400, 0xbf0a7b7d, 2763 + 0xbfa2ffef, 0xb8fb1e06, 2764 + 0x8b7bc17b, 0xbfa1000c, 2765 + 0x847b837b, 0x807b7d7b, 2766 + 0xbefe00c1, 0xbeff0080, 2767 + 0x7e008700, 0xe0685000, 2768 + 0x701d0000, 0x807d817d, 2769 + 0x8070ff70, 0x00000080, 2770 + 0xbf0a7b7d, 0xbfa2fff8, 2771 + 0xbfa00146, 0xbef4007e, 2772 + 0x8b75ff7f, 0x0000ffff, 2773 + 0x8c75ff75, 0x00040000, 2774 + 0xbef60080, 0xbef700ff, 2775 + 0x10807fac, 0xb8f202dc, 2776 + 0x84729972, 0x8b6eff7f, 2777 + 0x04000000, 0xbfa1003a, 2806 2778 0xbefe00c1, 0x857d9972, 2807 2779 0x8b7d817d, 0xbf06817d, 2808 2780 0xbfa20002, 0xbeff0080, 2809 2781 0xbfa00001, 0xbeff00c1, 2810 - 0xb8ef3b05, 0x806f816f, 2811 - 0x846f826f, 0x857d9972, 2812 - 0x8b7d817d, 0xbf06817d, 2813 - 0xbfa20024, 0xbef600ff, 2814 - 0x01000000, 0xbeee0078, 2815 - 0x8078ff78, 0x00000200, 2816 - 0xbefd0084, 0xbf0a6f7d, 2817 - 0xbfa10050, 0xe0505000, 2818 - 0x781d0000, 0xe0505080, 2819 - 0x781d0100, 0xe0505100, 2820 - 0x781d0200, 0xe0505180, 2821 - 0x781d0300, 0xbf8903f7, 2822 - 0x7e008500, 0x7e028501, 2823 - 0x7e048502, 0x7e068503, 2824 - 0x807d847d, 0x8078ff78, 2825 - 0x00000200, 0xbf0a6f7d, 2826 - 0xbfa2ffee, 0xe0505000, 2827 - 0x6e1d0000, 0xe0505080, 2828 - 0x6e1d0100, 0xe0505100, 2829 - 0x6e1d0200, 0xe0505180, 2830 - 0x6e1d0300, 0xbf8903f7, 2831 - 0xbfa00034, 0xbef600ff, 2832 - 0x01000000, 0xbeee0078, 2833 - 0x8078ff78, 0x00000400, 2834 - 0xbefd0084, 0xbf0a6f7d, 2835 - 0xbfa10012, 0xe0505000, 2836 - 0x781d0000, 0xe0505100, 2837 - 0x781d0100, 0xe0505200, 2838 - 0x781d0200, 0xe0505300, 2839 - 0x781d0300, 0xbf8903f7, 2840 - 0x7e008500, 0x7e028501, 2841 - 0x7e048502, 0x7e068503, 2842 - 0x807d847d, 0x8078ff78, 2843 - 0x00000400, 0xbf0a6f7d, 2844 - 0xbfa2ffee, 0xb8ef1e06, 2845 - 0x8b6fc16f, 0xbfa1000e, 2846 - 0x846f836f, 0x806f7d6f, 2847 - 0xbefe00c1, 0xbeff0080, 2848 - 0xe0505000, 0x781d0000, 2849 - 0xbf8903f7, 0x7e008500, 2850 - 0x807d817d, 0x8078ff78, 2851 - 0x00000080, 0xbf0a6f7d, 2852 - 0xbfa2fff7, 0xbeff00c1, 2853 - 0xe0505000, 0x6e1d0000, 2854 - 0xe0505100, 0x6e1d0100, 2855 - 0xe0505200, 0x6e1d0200, 2856 - 0xe0505300, 0x6e1d0300, 2857 - 0xbf8903f7, 0xb8f83b05, 2858 - 0x80788178, 0xbf0d9972, 2859 - 0xbfa20002, 0x84788978, 2860 - 0xbfa00001, 0x84788a78, 2861 - 0xb8ee1e06, 0x846e8a6e, 2862 - 0x80786e78, 0x8078ff78, 2863 - 0x00000200, 0x80f8ff78, 2864 - 0x00000050, 0xbef600ff, 2865 - 0x01000000, 0xbefd00ff, 2866 - 0x0000006c, 0x80f89078, 2867 - 0xf428403a, 0xf0000000, 2868 - 0xbf89fc07, 0x80fd847d, 2869 - 0xbf800000, 0xbe804300, 2870 - 0xbe824302, 0x80f8a078, 2871 - 0xf42c403a, 0xf0000000, 2872 - 0xbf89fc07, 0x80fd887d, 2873 - 0xbf800000, 0xbe804300, 2874 - 0xbe824302, 0xbe844304, 2875 - 0xbe864306, 0x80f8c078, 2876 - 0xf430403a, 0xf0000000, 2877 - 0xbf89fc07, 0x80fd907d, 2878 - 0xbf800000, 0xbe804300, 2879 - 0xbe824302, 0xbe844304, 2880 - 0xbe864306, 0xbe884308, 2881 - 0xbe8a430a, 0xbe8c430c, 2882 - 0xbe8e430e, 0xbf06807d, 2883 - 0xbfa1fff0, 0xb980f801, 2884 - 0x00000000, 0xbfbd0000, 2782 + 0xb8ef4306, 0x8b6fc16f, 2783 + 0xbfa1002f, 0x846f866f, 2784 + 0x846f826f, 0xbef6006f, 2885 2785 0xb8f83b05, 0x80788178, 2886 2786 0xbf0d9972, 0xbfa20002, 2887 2787 0x84788978, 0xbfa00001, 2888 2788 0x84788a78, 0xb8ee1e06, 2889 2789 0x846e8a6e, 0x80786e78, 2890 2790 0x8078ff78, 0x00000200, 2791 + 0x8078ff78, 0x00000080, 2891 2792 0xbef600ff, 0x01000000, 2892 - 0xf4205bfa, 0xf0000000, 2893 - 0x80788478, 0xf4205b3a, 2793 + 0x857d9972, 0x8b7d817d, 2794 + 0xbf06817d, 0xbefd0080, 2795 + 0xbfa2000c, 0xe0500000, 2796 + 0x781d0000, 0xbf8903f7, 2797 + 0xdac00000, 0x00000000, 2798 + 0x807dff7d, 0x00000080, 2799 + 0x8078ff78, 0x00000080, 2800 + 0xbf0a6f7d, 0xbfa2fff5, 2801 + 0xbfa0000b, 0xe0500000, 2802 + 0x781d0000, 0xbf8903f7, 2803 + 0xdac00000, 0x00000000, 2804 + 0x807dff7d, 0x00000100, 2805 + 0x8078ff78, 0x00000100, 2806 + 0xbf0a6f7d, 0xbfa2fff5, 2807 + 0xbef80080, 0xbefe00c1, 2808 + 0x857d9972, 0x8b7d817d, 2809 + 0xbf06817d, 0xbfa20002, 2810 + 0xbeff0080, 0xbfa00001, 2811 + 0xbeff00c1, 0xb8ef3b05, 2812 + 0x806f816f, 0x846f826f, 2813 + 0x857d9972, 0x8b7d817d, 2814 + 0xbf06817d, 0xbfa20024, 2815 + 0xbef600ff, 0x01000000, 2816 + 0xbeee0078, 0x8078ff78, 2817 + 0x00000200, 0xbefd0084, 2818 + 0xbf0a6f7d, 0xbfa10050, 2819 + 0xe0505000, 0x781d0000, 2820 + 0xe0505080, 0x781d0100, 2821 + 0xe0505100, 0x781d0200, 2822 + 0xe0505180, 0x781d0300, 2823 + 0xbf8903f7, 0x7e008500, 2824 + 0x7e028501, 0x7e048502, 2825 + 0x7e068503, 0x807d847d, 2826 + 0x8078ff78, 0x00000200, 2827 + 0xbf0a6f7d, 0xbfa2ffee, 2828 + 0xe0505000, 0x6e1d0000, 2829 + 0xe0505080, 0x6e1d0100, 2830 + 0xe0505100, 0x6e1d0200, 2831 + 0xe0505180, 0x6e1d0300, 2832 + 0xbf8903f7, 0xbfa00034, 2833 + 0xbef600ff, 0x01000000, 2834 + 0xbeee0078, 0x8078ff78, 2835 + 0x00000400, 0xbefd0084, 2836 + 0xbf0a6f7d, 0xbfa10012, 2837 + 0xe0505000, 0x781d0000, 2838 + 0xe0505100, 0x781d0100, 2839 + 0xe0505200, 0x781d0200, 2840 + 0xe0505300, 0x781d0300, 2841 + 0xbf8903f7, 0x7e008500, 2842 + 0x7e028501, 0x7e048502, 2843 + 0x7e068503, 0x807d847d, 2844 + 0x8078ff78, 0x00000400, 2845 + 0xbf0a6f7d, 0xbfa2ffee, 2846 + 0xb8ef1e06, 0x8b6fc16f, 2847 + 0xbfa1000e, 0x846f836f, 2848 + 0x806f7d6f, 0xbefe00c1, 2849 + 0xbeff0080, 0xe0505000, 2850 + 0x781d0000, 0xbf8903f7, 2851 + 0x7e008500, 0x807d817d, 2852 + 0x8078ff78, 0x00000080, 2853 + 0xbf0a6f7d, 0xbfa2fff7, 2854 + 0xbeff00c1, 0xe0505000, 2855 + 0x6e1d0000, 0xe0505100, 2856 + 0x6e1d0100, 0xe0505200, 2857 + 0x6e1d0200, 0xe0505300, 2858 + 0x6e1d0300, 0xbf8903f7, 2859 + 0xb8f83b05, 0x80788178, 2860 + 0xbf0d9972, 0xbfa20002, 2861 + 0x84788978, 0xbfa00001, 2862 + 0x84788a78, 0xb8ee1e06, 2863 + 0x846e8a6e, 0x80786e78, 2864 + 0x8078ff78, 0x00000200, 2865 + 0x80f8ff78, 0x00000050, 2866 + 0xbef600ff, 0x01000000, 2867 + 0xbefd00ff, 0x0000006c, 2868 + 0x80f89078, 0xf428403a, 2869 + 0xf0000000, 0xbf89fc07, 2870 + 0x80fd847d, 0xbf800000, 2871 + 0xbe804300, 0xbe824302, 2872 + 0x80f8a078, 0xf42c403a, 2873 + 0xf0000000, 0xbf89fc07, 2874 + 0x80fd887d, 0xbf800000, 2875 + 0xbe804300, 0xbe824302, 2876 + 0xbe844304, 0xbe864306, 2877 + 0x80f8c078, 0xf430403a, 2878 + 0xf0000000, 0xbf89fc07, 2879 + 0x80fd907d, 0xbf800000, 2880 + 0xbe804300, 0xbe824302, 2881 + 0xbe844304, 0xbe864306, 2882 + 0xbe884308, 0xbe8a430a, 2883 + 0xbe8c430c, 0xbe8e430e, 2884 + 0xbf06807d, 0xbfa1fff0, 2885 + 0xb980f801, 0x00000000, 2886 + 0xbfbd0000, 0xb8f83b05, 2887 + 0x80788178, 0xbf0d9972, 2888 + 0xbfa20002, 0x84788978, 2889 + 0xbfa00001, 0x84788a78, 2890 + 0xb8ee1e06, 0x846e8a6e, 2891 + 0x80786e78, 0x8078ff78, 2892 + 0x00000200, 0xbef600ff, 2893 + 0x01000000, 0xf4205bfa, 2894 2894 0xf0000000, 0x80788478, 2895 - 0xf4205b7a, 0xf0000000, 2896 - 0x80788478, 0xf4205c3a, 2895 + 0xf4205b3a, 0xf0000000, 2896 + 0x80788478, 0xf4205b7a, 2897 2897 0xf0000000, 0x80788478, 2898 - 0xf4205c7a, 0xf0000000, 2899 - 0x80788478, 0xf4205eba, 2898 + 0xf4205c3a, 0xf0000000, 2899 + 0x80788478, 0xf4205c7a, 2900 2900 0xf0000000, 0x80788478, 2901 - 0xf4205efa, 0xf0000000, 2902 - 0x80788478, 0xf4205e7a, 2901 + 0xf4205eba, 0xf0000000, 2902 + 0x80788478, 0xf4205efa, 2903 2903 0xf0000000, 0x80788478, 2904 - 0xf4205cfa, 0xf0000000, 2905 - 0x80788478, 0xf4205bba, 2904 + 0xf4205e7a, 0xf0000000, 2905 + 0x80788478, 0xf4205cfa, 2906 2906 0xf0000000, 0x80788478, 2907 - 0xbf89fc07, 0xb96ef814, 2908 2907 0xf4205bba, 0xf0000000, 2909 2908 0x80788478, 0xbf89fc07, 2910 - 0xb96ef815, 0xbefd006f, 2911 - 0xbefe0070, 0xbeff0071, 2912 - 0x8b6f7bff, 0x000003ff, 2913 - 0xb96f4803, 0x8b6f7bff, 2914 - 0xfffff800, 0x856f8b6f, 2915 - 0xb96fa2c3, 0xb973f801, 2916 - 0xb8ee3b05, 0x806e816e, 2917 - 0xbf0d9972, 0xbfa20002, 2918 - 0x846e896e, 0xbfa00001, 2919 - 0x846e8a6e, 0xb8ef1e06, 2920 - 0x846f8a6f, 0x806e6f6e, 2921 - 0x806eff6e, 0x00000200, 2922 - 0x806e746e, 0x826f8075, 2923 - 0x8b6fff6f, 0x0000ffff, 2924 - 0xf4085c37, 0xf8000050, 2925 - 0xf4085d37, 0xf8000060, 2926 - 0xf4005e77, 0xf8000074, 2927 - 0xbf89fc07, 0x8b6dff6d, 2928 - 0x0000ffff, 0x8bfe7e7e, 2929 - 0x8bea6a6a, 0xb8eef802, 2930 - 0xbf0d866e, 0xbfa20002, 2931 - 0xb97af802, 0xbe80486c, 2932 - 0xb97af802, 0xbe804a6c, 2933 - 0xbfb00000, 0xbf9f0000, 2909 + 0xb96ef814, 0xf4205bba, 2910 + 0xf0000000, 0x80788478, 2911 + 0xbf89fc07, 0xb96ef815, 2912 + 0xbefd006f, 0xbefe0070, 2913 + 0xbeff0071, 0x8b6f7bff, 2914 + 0x000003ff, 0xb96f4803, 2915 + 0x8b6f7bff, 0xfffff800, 2916 + 0x856f8b6f, 0xb96fa2c3, 2917 + 0xb973f801, 0xb8ee3b05, 2918 + 0x806e816e, 0xbf0d9972, 2919 + 0xbfa20002, 0x846e896e, 2920 + 0xbfa00001, 0x846e8a6e, 2921 + 0xb8ef1e06, 0x846f8a6f, 2922 + 0x806e6f6e, 0x806eff6e, 2923 + 0x00000200, 0x806e746e, 2924 + 0x826f8075, 0x8b6fff6f, 2925 + 0x0000ffff, 0xf4085c37, 2926 + 0xf8000050, 0xf4085d37, 2927 + 0xf8000060, 0xf4005e77, 2928 + 0xf8000074, 0xbf89fc07, 2929 + 0x8b6dff6d, 0x0000ffff, 2930 + 0x8bfe7e7e, 0x8bea6a6a, 2931 + 0xb8eef802, 0xbf0d866e, 2932 + 0xbfa20002, 0xb97af802, 2933 + 0xbe80486c, 0xb97af802, 2934 + 0xbe804a6c, 0xbfb00000, 2934 2935 0xbf9f0000, 0xbf9f0000, 2935 2936 0xbf9f0000, 0xbf9f0000, 2937 + 0xbf9f0000, 0x00000000, 2936 2938 };
+6
drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
··· 186 186 s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) 187 187 188 188 #if SW_SA_TRAP 189 + // If ttmp1[30] is set then issue s_barrier to unblock dependent waves. 190 + s_bitcmp1_b32 s_save_pc_hi, 30 191 + s_cbranch_scc0 L_TRAP_NO_BARRIER 192 + s_barrier 193 + 194 + L_TRAP_NO_BARRIER: 189 195 // If ttmp1[31] is set then trap may occur early. 190 196 // Spin wait until SAVECTX exception is raised. 191 197 s_bitcmp1_b32 s_save_pc_hi, 31
+1 -3
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
··· 973 973 out_unlock_svms: 974 974 mutex_unlock(&p->svms.lock); 975 975 out_unref_process: 976 + pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr); 976 977 kfd_unref_process(p); 977 978 out_mmput: 978 979 mmput(mm); 979 - 980 - pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr); 981 - 982 980 return r ? VM_FAULT_SIGBUS : 0; 983 981 } 984 982
+3
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 1549 1549 1550 1550 adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; 1551 1551 1552 + /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ 1553 + adev->dm.dc->debug.ignore_cable_id = true; 1554 + 1552 1555 r = dm_dmub_hw_init(adev); 1553 1556 if (r) { 1554 1557 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+7 -4
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
··· 157 157 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 158 158 unsigned int num_levels; 159 159 struct clk_limit_num_entries *num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk; 160 + unsigned int i; 160 161 161 162 memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks)); 162 163 clk_mgr_base->clks.p_state_change_support = true; ··· 206 205 clk_mgr->dpm_present = true; 207 206 208 207 if (clk_mgr_base->ctx->dc->debug.min_disp_clk_khz) { 209 - unsigned int i; 210 - 211 208 for (i = 0; i < num_levels; i++) 212 209 if (clk_mgr_base->bw_params->clk_table.entries[i].dispclk_mhz 213 210 < khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_disp_clk_khz)) 214 211 clk_mgr_base->bw_params->clk_table.entries[i].dispclk_mhz 215 212 = khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_disp_clk_khz); 216 213 } 214 + for (i = 0; i < num_levels; i++) 215 + if (clk_mgr_base->bw_params->clk_table.entries[i].dispclk_mhz > 1950) 216 + clk_mgr_base->bw_params->clk_table.entries[i].dispclk_mhz = 1950; 217 217 218 218 if (clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz) { 219 - unsigned int i; 220 - 221 219 for (i = 0; i < num_levels; i++) 222 220 if (clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz 223 221 < khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz)) ··· 668 668 dcn32_init_single_clock(clk_mgr, PPCLK_UCLK, 669 669 &clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz, 670 670 &num_entries_per_clk->num_memclk_levels); 671 + 672 + /* memclk must have at least one level */ 673 + num_entries_per_clk->num_memclk_levels = num_entries_per_clk->num_memclk_levels ? num_entries_per_clk->num_memclk_levels : 1; 671 674 672 675 dcn32_init_single_clock(clk_mgr, PPCLK_FCLK, 673 676 &clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz,
+1
drivers/gpu/drm/amd/display/dc/dc.h
··· 852 852 bool enable_double_buffered_dsc_pg_support; 853 853 bool enable_dp_dig_pixel_rate_div_policy; 854 854 enum lttpr_mode lttpr_mode_override; 855 + unsigned int dsc_delay_factor_wa_x1000; 855 856 }; 856 857 857 858 struct gpu_info_soc_bounding_box_v1_0;
+4
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
··· 623 623 hubp->att.size.bits.width = attr->width; 624 624 hubp->att.size.bits.height = attr->height; 625 625 hubp->att.cur_ctl.bits.mode = attr->color_format; 626 + 627 + hubp->cur_rect.w = attr->width; 628 + hubp->cur_rect.h = attr->height; 629 + 626 630 hubp->att.cur_ctl.bits.pitch = hw_pitch; 627 631 hubp->att.cur_ctl.bits.line_per_chunk = lpc; 628 632 hubp->att.cur_ctl.bits.cur_2x_magnify = attr->attribute_flags.bits.ENABLE_MAGNIFICATION;
+1 -1
drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
··· 847 847 .num_ddc = 5, 848 848 .num_vmid = 16, 849 849 .num_mpc_3dlut = 2, 850 - .num_dsc = 3, 850 + .num_dsc = 4, 851 851 }; 852 852 853 853 static const struct dc_plane_cap plane_cap = {
+1
drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
··· 1228 1228 pipes[pipe_cnt].pipe.src.dcc = false; 1229 1229 pipes[pipe_cnt].pipe.src.dcc_rate = 1; 1230 1230 pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank; 1231 + pipes[pipe_cnt].pipe.dest.synchronize_timings = synchronized_vblank; 1231 1232 pipes[pipe_cnt].pipe.dest.hblank_start = timing->h_total - timing->h_front_porch; 1232 1233 pipes[pipe_cnt].pipe.dest.hblank_end = pipes[pipe_cnt].pipe.dest.hblank_start 1233 1234 - timing->h_addressable
+3 -1
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
··· 2359 2359 2360 2360 if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) 2361 2361 dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; 2362 - 2363 2362 } 2363 + 2364 + /* DML DSC delay factor workaround */ 2365 + dcn3_2_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0; 2364 2366 2365 2367 /* Override dispclk_dppclk_vco_speed_mhz from Clk Mgr */ 2366 2368 dcn3_2_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+6 -4
drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
··· 364 364 for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { 365 365 v->DSCDelay[k] = dml32_DSCDelayRequirement(mode_lib->vba.DSCEnabled[k], 366 366 mode_lib->vba.ODMCombineEnabled[k], mode_lib->vba.DSCInputBitPerComponent[k], 367 - mode_lib->vba.OutputBpp[k], mode_lib->vba.HActive[k], mode_lib->vba.HTotal[k], 367 + mode_lib->vba.OutputBppPerState[mode_lib->vba.VoltageLevel][k], 368 + mode_lib->vba.HActive[k], mode_lib->vba.HTotal[k], 368 369 mode_lib->vba.NumberOfDSCSlices[k], mode_lib->vba.OutputFormat[k], 369 370 mode_lib->vba.Output[k], mode_lib->vba.PixelClock[k], 370 - mode_lib->vba.PixelClockBackEnd[k]); 371 + mode_lib->vba.PixelClockBackEnd[k], mode_lib->vba.ip.dsc_delay_factor_wa); 371 372 } 372 373 373 374 for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) ··· 1628 1627 && !mode_lib->vba.MSOOrODMSplitWithNonDPLink 1629 1628 && !mode_lib->vba.NotEnoughLanesForMSO 1630 1629 && mode_lib->vba.LinkCapacitySupport[i] == true && !mode_lib->vba.P2IWith420 1631 - && !mode_lib->vba.DSCOnlyIfNecessaryWithBPP 1630 + //&& !mode_lib->vba.DSCOnlyIfNecessaryWithBPP 1632 1631 && !mode_lib->vba.DSC422NativeNotSupported 1633 1632 && !mode_lib->vba.MPCCombineMethodIncompatible 1634 1633 && mode_lib->vba.ODMCombine2To1SupportCheckOK[i] == true ··· 2476 2475 mode_lib->vba.OutputBppPerState[i][k], mode_lib->vba.HActive[k], 2477 2476 mode_lib->vba.HTotal[k], mode_lib->vba.NumberOfDSCSlices[k], 2478 2477 mode_lib->vba.OutputFormat[k], mode_lib->vba.Output[k], 2479 - mode_lib->vba.PixelClock[k], mode_lib->vba.PixelClockBackEnd[k]); 2478 + mode_lib->vba.PixelClock[k], mode_lib->vba.PixelClockBackEnd[k], 2479 + mode_lib->vba.ip.dsc_delay_factor_wa); 2480 2480 } 2481 2481 2482 2482 for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
+4 -3
drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
··· 1726 1726 enum output_format_class OutputFormat, 1727 1727 enum output_encoder_class Output, 1728 1728 double PixelClock, 1729 - double PixelClockBackEnd) 1729 + double PixelClockBackEnd, 1730 + double dsc_delay_factor_wa) 1730 1731 { 1731 1732 unsigned int DSCDelayRequirement_val; 1732 1733 ··· 1747 1746 } 1748 1747 1749 1748 DSCDelayRequirement_val = DSCDelayRequirement_val + (HTotal - HActive) * 1750 - dml_ceil(DSCDelayRequirement_val / HActive, 1); 1749 + dml_ceil((double)DSCDelayRequirement_val / HActive, 1); 1751 1750 1752 1751 DSCDelayRequirement_val = DSCDelayRequirement_val * PixelClock / PixelClockBackEnd; 1753 1752 ··· 1765 1764 dml_print("DML::%s: DSCDelayRequirement_val = %d\n", __func__, DSCDelayRequirement_val); 1766 1765 #endif 1767 1766 1768 - return DSCDelayRequirement_val; 1767 + return dml_ceil(DSCDelayRequirement_val * dsc_delay_factor_wa, 1); 1769 1768 } 1770 1769 1771 1770 void dml32_CalculateSurfaceSizeInMall(
+2 -1
drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
··· 327 327 enum output_format_class OutputFormat, 328 328 enum output_encoder_class Output, 329 329 double PixelClock, 330 - double PixelClockBackEnd); 330 + double PixelClockBackEnd, 331 + double dsc_delay_factor_wa); 331 332 332 333 void dml32_CalculateSurfaceSizeInMall( 333 334 unsigned int NumberOfActiveSurfaces,
+2 -2
drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
··· 291 291 292 292 dml_print("DML_DLG: %s: vready_after_vcount0 = %d\n", __func__, dlg_regs->vready_after_vcount0); 293 293 294 - dst_x_after_scaler = get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); 295 - dst_y_after_scaler = get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); 294 + dst_x_after_scaler = dml_ceil(get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx), 1); 295 + dst_y_after_scaler = dml_ceil(get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx), 1); 296 296 297 297 // do some adjustment on the dst_after scaler to account for odm combine mode 298 298 dml_print("DML_DLG: %s: input dst_x_after_scaler = %d\n", __func__, dst_x_after_scaler);
+9 -6
drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
··· 29 29 #include "dcn321_fpu.h" 30 30 #include "dcn32/dcn32_resource.h" 31 31 #include "dcn321/dcn321_resource.h" 32 + #include "dml/dcn32/display_mode_vba_util_32.h" 32 33 33 34 #define DCN3_2_DEFAULT_DET_SIZE 256 34 35 ··· 120 119 }, 121 120 }, 122 121 .num_states = 1, 123 - .sr_exit_time_us = 12.36, 124 - .sr_enter_plus_exit_time_us = 16.72, 122 + .sr_exit_time_us = 19.95, 123 + .sr_enter_plus_exit_time_us = 24.36, 125 124 .sr_exit_z8_time_us = 285.0, 126 125 .sr_enter_plus_exit_z8_time_us = 320, 127 126 .writeback_latency_us = 12.0, 128 127 .round_trip_ping_latency_dcfclk_cycles = 263, 129 - .urgent_latency_pixel_data_only_us = 4.0, 130 - .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, 131 - .urgent_latency_vm_data_only_us = 4.0, 128 + .urgent_latency_pixel_data_only_us = 9.35, 129 + .urgent_latency_pixel_mixed_with_vm_data_us = 9.35, 130 + .urgent_latency_vm_data_only_us = 9.35, 132 131 .fclk_change_latency_us = 20, 133 132 .usr_retraining_latency_us = 2, 134 133 .smn_latency_us = 2, ··· 539 538 540 539 if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) 541 540 dcn3_21_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; 542 - 543 541 } 542 + 543 + /* DML DSC delay factor workaround */ 544 + dcn3_21_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0; 544 545 545 546 /* Override dispclk_dppclk_vco_speed_mhz from Clk Mgr */ 546 547 dcn3_21_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+3
drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
··· 364 364 unsigned int max_num_dp2p0_outputs; 365 365 unsigned int max_num_dp2p0_streams; 366 366 unsigned int VBlankNomDefaultUS; 367 + 368 + /* DM workarounds */ 369 + double dsc_delay_factor_wa; // TODO: Remove after implementing root cause fix 367 370 }; 368 371 369 372 struct _vcs_dpi_display_xfc_params_st {
+1 -1
drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
··· 625 625 mode_lib->vba.skip_dio_check[mode_lib->vba.NumberOfActivePlanes] = 626 626 dout->is_virtual; 627 627 628 - if (!dout->dsc_enable) 628 + if (dout->dsc_enable) 629 629 mode_lib->vba.ForcedOutputLinkBPP[mode_lib->vba.NumberOfActivePlanes] = dout->output_bpp; 630 630 else 631 631 mode_lib->vba.ForcedOutputLinkBPP[mode_lib->vba.NumberOfActivePlanes] = 0.0;
+47 -19
drivers/gpu/drm/drm_format_helper.c
··· 807 807 return false; 808 808 } 809 809 810 + static const uint32_t conv_from_xrgb8888[] = { 811 + DRM_FORMAT_XRGB8888, 812 + DRM_FORMAT_ARGB8888, 813 + DRM_FORMAT_XRGB2101010, 814 + DRM_FORMAT_ARGB2101010, 815 + DRM_FORMAT_RGB565, 816 + DRM_FORMAT_RGB888, 817 + }; 818 + 819 + static const uint32_t conv_from_rgb565_888[] = { 820 + DRM_FORMAT_XRGB8888, 821 + DRM_FORMAT_ARGB8888, 822 + }; 823 + 824 + static bool is_conversion_supported(uint32_t from, uint32_t to) 825 + { 826 + switch (from) { 827 + case DRM_FORMAT_XRGB8888: 828 + case DRM_FORMAT_ARGB8888: 829 + return is_listed_fourcc(conv_from_xrgb8888, ARRAY_SIZE(conv_from_xrgb8888), to); 830 + case DRM_FORMAT_RGB565: 831 + case DRM_FORMAT_RGB888: 832 + return is_listed_fourcc(conv_from_rgb565_888, ARRAY_SIZE(conv_from_rgb565_888), to); 833 + case DRM_FORMAT_XRGB2101010: 834 + return to == DRM_FORMAT_ARGB2101010; 835 + case DRM_FORMAT_ARGB2101010: 836 + return to == DRM_FORMAT_XRGB2101010; 837 + default: 838 + return false; 839 + } 840 + } 841 + 810 842 /** 811 843 * drm_fb_build_fourcc_list - Filters a list of supported color formats against 812 844 * the device's native formats ··· 859 827 * be handed over to drm_universal_plane_init() et al. Native formats 860 828 * will go before emulated formats. Other heuristics might be applied 861 829 * to optimize the order. Formats near the beginning of the list are 862 - * usually preferred over formats near the end of the list. 830 + * usually preferred over formats near the end of the list. Formats 831 + * without conversion helpers will be skipped. New drivers should only 832 + * pass in XRGB8888 and avoid exposing additional emulated formats. 863 833 * 864 834 * Returns: 865 835 * The number of color-formats 4CC codes returned in @fourccs_out. ··· 873 839 { 874 840 u32 *fourccs = fourccs_out; 875 841 const u32 *fourccs_end = fourccs_out + nfourccs_out; 876 - bool found_native = false; 842 + uint32_t native_format = 0; 877 843 size_t i; 878 844 879 845 /* ··· 892 858 893 859 drm_dbg_kms(dev, "adding native format %p4cc\n", &fourcc); 894 860 895 - if (!found_native) 896 - found_native = is_listed_fourcc(driver_fourccs, driver_nfourccs, fourcc); 861 + /* 862 + * There should only be one native format with the current API. 863 + * This API needs to be refactored to correctly support arbitrary 864 + * sets of native formats, since it needs to report which native 865 + * format to use for each emulated format. 866 + */ 867 + if (!native_format) 868 + native_format = fourcc; 897 869 *fourccs = fourcc; 898 870 ++fourccs; 899 - } 900 - 901 - /* 902 - * The plane's atomic_update helper converts the framebuffer's color format 903 - * to a native format when copying to device memory. 904 - * 905 - * If there is not a single format supported by both, device and 906 - * driver, the native formats are likely not supported by the conversion 907 - * helpers. Therefore *only* support the native formats and add a 908 - * conversion helper ASAP. 909 - */ 910 - if (!found_native) { 911 - drm_warn(dev, "Format conversion helpers required to add extra formats.\n"); 912 - goto out; 913 871 } 914 872 915 873 /* ··· 916 890 } else if (fourccs == fourccs_end) { 917 891 drm_warn(dev, "Ignoring emulated format %p4cc\n", &fourcc); 918 892 continue; /* end of available output buffer */ 893 + } else if (!is_conversion_supported(fourcc, native_format)) { 894 + drm_dbg_kms(dev, "Unsupported emulated format %p4cc\n", &fourcc); 895 + continue; /* format is not supported for conversion */ 919 896 } 920 897 921 898 drm_dbg_kms(dev, "adding emulated format %p4cc\n", &fourcc); ··· 927 898 ++fourccs; 928 899 } 929 900 930 - out: 931 901 return fourccs - fourccs_out; 932 902 } 933 903 EXPORT_SYMBOL(drm_fb_build_fourcc_list);
+1
drivers/gpu/drm/i915/Makefile
··· 282 282 display/intel_ddi.o \ 283 283 display/intel_ddi_buf_trans.o \ 284 284 display/intel_display_trace.o \ 285 + display/intel_dkl_phy.o \ 285 286 display/intel_dp.o \ 286 287 display/intel_dp_aux.o \ 287 288 display/intel_dp_aux_backlight.o \
+28 -40
drivers/gpu/drm/i915/display/intel_ddi.c
··· 43 43 #include "intel_de.h" 44 44 #include "intel_display_power.h" 45 45 #include "intel_display_types.h" 46 + #include "intel_dkl_phy.h" 46 47 #include "intel_dp.h" 47 48 #include "intel_dp_link_training.h" 48 49 #include "intel_dp_mst.h" ··· 1263 1262 for (ln = 0; ln < 2; ln++) { 1264 1263 int level; 1265 1264 1266 - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), 1267 - HIP_INDEX_VAL(tc_port, ln)); 1268 - 1269 - intel_de_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port), 0); 1265 + intel_dkl_phy_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port), ln, 0); 1270 1266 1271 1267 level = intel_ddi_level(encoder, crtc_state, 2*ln+0); 1272 1268 1273 - intel_de_rmw(dev_priv, DKL_TX_DPCNTL0(tc_port), 1274 - DKL_TX_PRESHOOT_COEFF_MASK | 1275 - DKL_TX_DE_EMPAHSIS_COEFF_MASK | 1276 - DKL_TX_VSWING_CONTROL_MASK, 1277 - DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) | 1278 - DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) | 1279 - DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing)); 1269 + intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL0(tc_port), ln, 1270 + DKL_TX_PRESHOOT_COEFF_MASK | 1271 + DKL_TX_DE_EMPAHSIS_COEFF_MASK | 1272 + DKL_TX_VSWING_CONTROL_MASK, 1273 + DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) | 1274 + DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) | 1275 + DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing)); 1280 1276 1281 1277 level = intel_ddi_level(encoder, crtc_state, 2*ln+1); 1282 1278 1283 - intel_de_rmw(dev_priv, DKL_TX_DPCNTL1(tc_port), 1284 - DKL_TX_PRESHOOT_COEFF_MASK | 1285 - DKL_TX_DE_EMPAHSIS_COEFF_MASK | 1286 - DKL_TX_VSWING_CONTROL_MASK, 1287 - DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) | 1288 - DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) | 1289 - DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing)); 1279 + intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL1(tc_port), ln, 1280 + DKL_TX_PRESHOOT_COEFF_MASK | 1281 + DKL_TX_DE_EMPAHSIS_COEFF_MASK | 1282 + DKL_TX_VSWING_CONTROL_MASK, 1283 + DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) | 1284 + DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) | 1285 + DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing)); 1290 1286 1291 - intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), 1292 - DKL_TX_DP20BITMODE, 0); 1287 + intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), ln, 1288 + DKL_TX_DP20BITMODE, 0); 1293 1289 1294 1290 if (IS_ALDERLAKE_P(dev_priv)) { 1295 1291 u32 val; ··· 1304 1306 val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(0); 1305 1307 } 1306 1308 1307 - intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), 1308 - DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK | 1309 - DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK, 1310 - val); 1309 + intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), ln, 1310 + DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK | 1311 + DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK, 1312 + val); 1311 1313 } 1312 1314 } 1313 1315 } ··· 2017 2019 return; 2018 2020 2019 2021 if (DISPLAY_VER(dev_priv) >= 12) { 2020 - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), 2021 - HIP_INDEX_VAL(tc_port, 0x0)); 2022 - ln0 = intel_de_read(dev_priv, DKL_DP_MODE(tc_port)); 2023 - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), 2024 - HIP_INDEX_VAL(tc_port, 0x1)); 2025 - ln1 = intel_de_read(dev_priv, DKL_DP_MODE(tc_port)); 2022 + ln0 = intel_dkl_phy_read(dev_priv, DKL_DP_MODE(tc_port), 0); 2023 + ln1 = intel_dkl_phy_read(dev_priv, DKL_DP_MODE(tc_port), 1); 2026 2024 } else { 2027 2025 ln0 = intel_de_read(dev_priv, MG_DP_MODE(0, tc_port)); 2028 2026 ln1 = intel_de_read(dev_priv, MG_DP_MODE(1, tc_port)); ··· 2079 2085 } 2080 2086 2081 2087 if (DISPLAY_VER(dev_priv) >= 12) { 2082 - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), 2083 - HIP_INDEX_VAL(tc_port, 0x0)); 2084 - intel_de_write(dev_priv, DKL_DP_MODE(tc_port), ln0); 2085 - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), 2086 - HIP_INDEX_VAL(tc_port, 0x1)); 2087 - intel_de_write(dev_priv, DKL_DP_MODE(tc_port), ln1); 2088 + intel_dkl_phy_write(dev_priv, DKL_DP_MODE(tc_port), 0, ln0); 2089 + intel_dkl_phy_write(dev_priv, DKL_DP_MODE(tc_port), 1, ln1); 2088 2090 } else { 2089 2091 intel_de_write(dev_priv, MG_DP_MODE(0, tc_port), ln0); 2090 2092 intel_de_write(dev_priv, MG_DP_MODE(1, tc_port), ln1); ··· 3084 3094 enum tc_port tc_port = intel_port_to_tc(i915, encoder->port); 3085 3095 int ln; 3086 3096 3087 - for (ln = 0; ln < 2; ln++) { 3088 - intel_de_write(i915, HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln)); 3089 - intel_de_rmw(i915, DKL_PCS_DW5(tc_port), DKL_PCS_DW5_CORE_SOFTRESET, 0); 3090 - } 3097 + for (ln = 0; ln < 2; ln++) 3098 + intel_dkl_phy_rmw(i915, DKL_PCS_DW5(tc_port), ln, DKL_PCS_DW5_CORE_SOFTRESET, 0); 3091 3099 } 3092 3100 3093 3101 static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
+8
drivers/gpu/drm/i915/display/intel_display_core.h
··· 316 316 } dbuf; 317 317 318 318 struct { 319 + /* 320 + * dkl.phy_lock protects against concurrent access of the 321 + * Dekel TypeC PHYs. 322 + */ 323 + spinlock_t phy_lock; 324 + } dkl; 325 + 326 + struct { 319 327 /* VLV/CHV/BXT/GLK DSI MMIO register base address */ 320 328 u32 mmio_base; 321 329 } dsi;
+3 -4
drivers/gpu/drm/i915/display/intel_display_power_well.c
··· 12 12 #include "intel_de.h" 13 13 #include "intel_display_power_well.h" 14 14 #include "intel_display_types.h" 15 + #include "intel_dkl_phy.h" 15 16 #include "intel_dmc.h" 16 17 #include "intel_dpio_phy.h" 17 18 #include "intel_dpll.h" ··· 530 529 enum tc_port tc_port; 531 530 532 531 tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx); 533 - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), 534 - HIP_INDEX_VAL(tc_port, 0x2)); 535 532 536 - if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port), 537 - DKL_CMN_UC_DW27_UC_HEALTH, 1)) 533 + if (wait_for(intel_dkl_phy_read(dev_priv, DKL_CMN_UC_DW_27(tc_port), 2) & 534 + DKL_CMN_UC_DW27_UC_HEALTH, 1)) 538 535 drm_warn(&dev_priv->drm, 539 536 "Timeout waiting TC uC health\n"); 540 537 }
+109
drivers/gpu/drm/i915/display/intel_dkl_phy.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2022 Intel Corporation 4 + */ 5 + 6 + #include "i915_drv.h" 7 + #include "i915_reg.h" 8 + 9 + #include "intel_de.h" 10 + #include "intel_display.h" 11 + #include "intel_dkl_phy.h" 12 + 13 + static void 14 + dkl_phy_set_hip_idx(struct drm_i915_private *i915, i915_reg_t reg, int idx) 15 + { 16 + enum tc_port tc_port = DKL_REG_TC_PORT(reg); 17 + 18 + drm_WARN_ON(&i915->drm, tc_port < TC_PORT_1 || tc_port >= I915_MAX_TC_PORTS); 19 + 20 + intel_de_write(i915, 21 + HIP_INDEX_REG(tc_port), 22 + HIP_INDEX_VAL(tc_port, idx)); 23 + } 24 + 25 + /** 26 + * intel_dkl_phy_read - read a Dekel PHY register 27 + * @i915: i915 device instance 28 + * @reg: Dekel PHY register 29 + * @ln: lane instance of @reg 30 + * 31 + * Read the @reg Dekel PHY register. 32 + * 33 + * Returns the read value. 34 + */ 35 + u32 36 + intel_dkl_phy_read(struct drm_i915_private *i915, i915_reg_t reg, int ln) 37 + { 38 + u32 val; 39 + 40 + spin_lock(&i915->display.dkl.phy_lock); 41 + 42 + dkl_phy_set_hip_idx(i915, reg, ln); 43 + val = intel_de_read(i915, reg); 44 + 45 + spin_unlock(&i915->display.dkl.phy_lock); 46 + 47 + return val; 48 + } 49 + 50 + /** 51 + * intel_dkl_phy_write - write a Dekel PHY register 52 + * @i915: i915 device instance 53 + * @reg: Dekel PHY register 54 + * @ln: lane instance of @reg 55 + * @val: value to write 56 + * 57 + * Write @val to the @reg Dekel PHY register. 58 + */ 59 + void 60 + intel_dkl_phy_write(struct drm_i915_private *i915, i915_reg_t reg, int ln, u32 val) 61 + { 62 + spin_lock(&i915->display.dkl.phy_lock); 63 + 64 + dkl_phy_set_hip_idx(i915, reg, ln); 65 + intel_de_write(i915, reg, val); 66 + 67 + spin_unlock(&i915->display.dkl.phy_lock); 68 + } 69 + 70 + /** 71 + * intel_dkl_phy_rmw - read-modify-write a Dekel PHY register 72 + * @i915: i915 device instance 73 + * @reg: Dekel PHY register 74 + * @ln: lane instance of @reg 75 + * @clear: mask to clear 76 + * @set: mask to set 77 + * 78 + * Read the @reg Dekel PHY register, clearing then setting the @clear/@set bits in it, and writing 79 + * this value back to the register if the value differs from the read one. 80 + */ 81 + void 82 + intel_dkl_phy_rmw(struct drm_i915_private *i915, i915_reg_t reg, int ln, u32 clear, u32 set) 83 + { 84 + spin_lock(&i915->display.dkl.phy_lock); 85 + 86 + dkl_phy_set_hip_idx(i915, reg, ln); 87 + intel_de_rmw(i915, reg, clear, set); 88 + 89 + spin_unlock(&i915->display.dkl.phy_lock); 90 + } 91 + 92 + /** 93 + * intel_dkl_phy_posting_read - do a posting read from a Dekel PHY register 94 + * @i915: i915 device instance 95 + * @reg: Dekel PHY register 96 + * @ln: lane instance of @reg 97 + * 98 + * Read the @reg Dekel PHY register without returning the read value. 99 + */ 100 + void 101 + intel_dkl_phy_posting_read(struct drm_i915_private *i915, i915_reg_t reg, int ln) 102 + { 103 + spin_lock(&i915->display.dkl.phy_lock); 104 + 105 + dkl_phy_set_hip_idx(i915, reg, ln); 106 + intel_de_posting_read(i915, reg); 107 + 108 + spin_unlock(&i915->display.dkl.phy_lock); 109 + }
+24
drivers/gpu/drm/i915/display/intel_dkl_phy.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2022 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_DKL_PHY_H__ 7 + #define __INTEL_DKL_PHY_H__ 8 + 9 + #include <linux/types.h> 10 + 11 + #include "i915_reg_defs.h" 12 + 13 + struct drm_i915_private; 14 + 15 + u32 16 + intel_dkl_phy_read(struct drm_i915_private *i915, i915_reg_t reg, int ln); 17 + void 18 + intel_dkl_phy_write(struct drm_i915_private *i915, i915_reg_t reg, int ln, u32 val); 19 + void 20 + intel_dkl_phy_rmw(struct drm_i915_private *i915, i915_reg_t reg, int ln, u32 clear, u32 set); 21 + void 22 + intel_dkl_phy_posting_read(struct drm_i915_private *i915, i915_reg_t reg, int ln); 23 + 24 + #endif /* __INTEL_DKL_PHY_H__ */
+1 -1
drivers/gpu/drm/i915/display/intel_dp.c
··· 5276 5276 encoder->devdata, IS_ERR(edid) ? NULL : edid); 5277 5277 5278 5278 intel_panel_add_edid_fixed_modes(intel_connector, 5279 - intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE, 5279 + intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE || 5280 5280 intel_vrr_is_capable(intel_connector)); 5281 5281 5282 5282 /* MSO requires information from the EDID */
+27 -32
drivers/gpu/drm/i915/display/intel_dpll_mgr.c
··· 25 25 26 26 #include "intel_de.h" 27 27 #include "intel_display_types.h" 28 + #include "intel_dkl_phy.h" 28 29 #include "intel_dpio_phy.h" 29 30 #include "intel_dpll.h" 30 31 #include "intel_dpll_mgr.h" ··· 3509 3508 * All registers read here have the same HIP_INDEX_REG even though 3510 3509 * they are on different building blocks 3511 3510 */ 3512 - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), 3513 - HIP_INDEX_VAL(tc_port, 0x2)); 3514 - 3515 - hw_state->mg_refclkin_ctl = intel_de_read(dev_priv, 3516 - DKL_REFCLKIN_CTL(tc_port)); 3511 + hw_state->mg_refclkin_ctl = intel_dkl_phy_read(dev_priv, 3512 + DKL_REFCLKIN_CTL(tc_port), 2); 3517 3513 hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; 3518 3514 3519 3515 hw_state->mg_clktop2_hsclkctl = 3520 - intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port)); 3516 + intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), 2); 3521 3517 hw_state->mg_clktop2_hsclkctl &= 3522 3518 MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | 3523 3519 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | ··· 3522 3524 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK; 3523 3525 3524 3526 hw_state->mg_clktop2_coreclkctl1 = 3525 - intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port)); 3527 + intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), 2); 3526 3528 hw_state->mg_clktop2_coreclkctl1 &= 3527 3529 MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; 3528 3530 3529 - hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port)); 3531 + hw_state->mg_pll_div0 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV0(tc_port), 2); 3530 3532 val = DKL_PLL_DIV0_MASK; 3531 3533 if (dev_priv->display.vbt.override_afc_startup) 3532 3534 val |= DKL_PLL_DIV0_AFC_STARTUP_MASK; 3533 3535 hw_state->mg_pll_div0 &= val; 3534 3536 3535 - hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port)); 3537 + hw_state->mg_pll_div1 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port), 2); 3536 3538 hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK | 3537 3539 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK); 3538 3540 3539 - hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port)); 3541 + hw_state->mg_pll_ssc = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port), 2); 3540 3542 hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK | 3541 3543 DKL_PLL_SSC_STEP_LEN_MASK | 3542 3544 DKL_PLL_SSC_STEP_NUM_MASK | 3543 3545 DKL_PLL_SSC_EN); 3544 3546 3545 - hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port)); 3547 + hw_state->mg_pll_bias = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port), 2); 3546 3548 hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H | 3547 3549 DKL_PLL_BIAS_FBDIV_FRAC_MASK); 3548 3550 3549 3551 hw_state->mg_pll_tdc_coldst_bias = 3550 - intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port)); 3552 + intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), 2); 3551 3553 hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK | 3552 3554 DKL_PLL_TDC_FEED_FWD_GAIN_MASK); 3553 3555 ··· 3735 3737 * All registers programmed here have the same HIP_INDEX_REG even 3736 3738 * though on different building block 3737 3739 */ 3738 - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), 3739 - HIP_INDEX_VAL(tc_port, 0x2)); 3740 - 3741 3740 /* All the registers are RMW */ 3742 - val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port)); 3741 + val = intel_dkl_phy_read(dev_priv, DKL_REFCLKIN_CTL(tc_port), 2); 3743 3742 val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK; 3744 3743 val |= hw_state->mg_refclkin_ctl; 3745 - intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val); 3744 + intel_dkl_phy_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), 2, val); 3746 3745 3747 - val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port)); 3746 + val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), 2); 3748 3747 val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; 3749 3748 val |= hw_state->mg_clktop2_coreclkctl1; 3750 - intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val); 3749 + intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), 2, val); 3751 3750 3752 - val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port)); 3751 + val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), 2); 3753 3752 val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | 3754 3753 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | 3755 3754 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | 3756 3755 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK); 3757 3756 val |= hw_state->mg_clktop2_hsclkctl; 3758 - intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val); 3757 + intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), 2, val); 3759 3758 3760 3759 val = DKL_PLL_DIV0_MASK; 3761 3760 if (dev_priv->display.vbt.override_afc_startup) 3762 3761 val |= DKL_PLL_DIV0_AFC_STARTUP_MASK; 3763 - intel_de_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val, 3764 - hw_state->mg_pll_div0); 3762 + intel_dkl_phy_rmw(dev_priv, DKL_PLL_DIV0(tc_port), 2, val, 3763 + hw_state->mg_pll_div0); 3765 3764 3766 - val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port)); 3765 + val = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port), 2); 3767 3766 val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK | 3768 3767 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK); 3769 3768 val |= hw_state->mg_pll_div1; 3770 - intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val); 3769 + intel_dkl_phy_write(dev_priv, DKL_PLL_DIV1(tc_port), 2, val); 3771 3770 3772 - val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port)); 3771 + val = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port), 2); 3773 3772 val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK | 3774 3773 DKL_PLL_SSC_STEP_LEN_MASK | 3775 3774 DKL_PLL_SSC_STEP_NUM_MASK | 3776 3775 DKL_PLL_SSC_EN); 3777 3776 val |= hw_state->mg_pll_ssc; 3778 - intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val); 3777 + intel_dkl_phy_write(dev_priv, DKL_PLL_SSC(tc_port), 2, val); 3779 3778 3780 - val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port)); 3779 + val = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port), 2); 3781 3780 val &= ~(DKL_PLL_BIAS_FRAC_EN_H | 3782 3781 DKL_PLL_BIAS_FBDIV_FRAC_MASK); 3783 3782 val |= hw_state->mg_pll_bias; 3784 - intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val); 3783 + intel_dkl_phy_write(dev_priv, DKL_PLL_BIAS(tc_port), 2, val); 3785 3784 3786 - val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port)); 3785 + val = intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), 2); 3787 3786 val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK | 3788 3787 DKL_PLL_TDC_FEED_FWD_GAIN_MASK); 3789 3788 val |= hw_state->mg_pll_tdc_coldst_bias; 3790 - intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val); 3789 + intel_dkl_phy_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), 2, val); 3791 3790 3792 - intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port)); 3791 + intel_dkl_phy_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), 2); 3793 3792 } 3794 3793 3795 3794 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
+1 -2
drivers/gpu/drm/i915/display/intel_lvds.c
··· 972 972 973 973 /* Try EDID first */ 974 974 intel_panel_add_edid_fixed_modes(intel_connector, 975 - intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE, 976 - false); 975 + intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE); 977 976 978 977 /* Failed to get EDID, what about VBT? */ 979 978 if (!intel_panel_preferred_fixed_mode(intel_connector))
+2 -2
drivers/gpu/drm/i915/display/intel_panel.c
··· 254 254 } 255 255 256 256 void intel_panel_add_edid_fixed_modes(struct intel_connector *connector, 257 - bool has_drrs, bool has_vrr) 257 + bool use_alt_fixed_modes) 258 258 { 259 259 intel_panel_add_edid_preferred_mode(connector); 260 - if (intel_panel_preferred_fixed_mode(connector) && (has_drrs || has_vrr)) 260 + if (intel_panel_preferred_fixed_mode(connector) && use_alt_fixed_modes) 261 261 intel_panel_add_edid_alt_fixed_modes(connector); 262 262 intel_panel_destroy_probed_modes(connector); 263 263 }
+1 -1
drivers/gpu/drm/i915/display/intel_panel.h
··· 44 44 int intel_panel_compute_config(struct intel_connector *connector, 45 45 struct drm_display_mode *adjusted_mode); 46 46 void intel_panel_add_edid_fixed_modes(struct intel_connector *connector, 47 - bool has_drrs, bool has_vrr); 47 + bool use_alt_fixed_modes); 48 48 void intel_panel_add_vbt_lfp_fixed_mode(struct intel_connector *connector); 49 49 void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector); 50 50 void intel_panel_add_encoder_fixed_mode(struct intel_connector *connector,
+39 -25
drivers/gpu/drm/i915/display/intel_sdvo.c
··· 2747 2747 if (!intel_sdvo_connector) 2748 2748 return false; 2749 2749 2750 - if (device == 0) { 2751 - intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0; 2750 + if (device == 0) 2752 2751 intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0; 2753 - } else if (device == 1) { 2754 - intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1; 2752 + else if (device == 1) 2755 2753 intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1; 2756 - } 2757 2754 2758 2755 intel_connector = &intel_sdvo_connector->base; 2759 2756 connector = &intel_connector->base; ··· 2805 2808 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; 2806 2809 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; 2807 2810 2808 - intel_sdvo->controlled_output |= type; 2809 2811 intel_sdvo_connector->output_flag = type; 2810 2812 2811 2813 if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { ··· 2845 2849 encoder->encoder_type = DRM_MODE_ENCODER_DAC; 2846 2850 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2847 2851 2848 - if (device == 0) { 2849 - intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0; 2852 + if (device == 0) 2850 2853 intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; 2851 - } else if (device == 1) { 2852 - intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1; 2854 + else if (device == 1) 2853 2855 intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; 2854 - } 2855 2856 2856 2857 if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { 2857 2858 kfree(intel_sdvo_connector); ··· 2878 2885 encoder->encoder_type = DRM_MODE_ENCODER_LVDS; 2879 2886 connector->connector_type = DRM_MODE_CONNECTOR_LVDS; 2880 2887 2881 - if (device == 0) { 2882 - intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0; 2888 + if (device == 0) 2883 2889 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; 2884 - } else if (device == 1) { 2885 - intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1; 2890 + else if (device == 1) 2886 2891 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; 2887 - } 2888 2892 2889 2893 if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { 2890 2894 kfree(intel_sdvo_connector); ··· 2900 2910 intel_panel_add_vbt_sdvo_fixed_mode(intel_connector); 2901 2911 2902 2912 if (!intel_panel_preferred_fixed_mode(intel_connector)) { 2913 + mutex_lock(&i915->drm.mode_config.mutex); 2914 + 2903 2915 intel_ddc_get_modes(connector, &intel_sdvo->ddc); 2904 - intel_panel_add_edid_fixed_modes(intel_connector, false, false); 2916 + intel_panel_add_edid_fixed_modes(intel_connector, false); 2917 + 2918 + mutex_unlock(&i915->drm.mode_config.mutex); 2905 2919 } 2906 2920 2907 2921 intel_panel_init(intel_connector); ··· 2920 2926 return false; 2921 2927 } 2922 2928 2929 + static u16 intel_sdvo_filter_output_flags(u16 flags) 2930 + { 2931 + flags &= SDVO_OUTPUT_MASK; 2932 + 2933 + /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ 2934 + if (!(flags & SDVO_OUTPUT_TMDS0)) 2935 + flags &= ~SDVO_OUTPUT_TMDS1; 2936 + 2937 + if (!(flags & SDVO_OUTPUT_RGB0)) 2938 + flags &= ~SDVO_OUTPUT_RGB1; 2939 + 2940 + if (!(flags & SDVO_OUTPUT_LVDS0)) 2941 + flags &= ~SDVO_OUTPUT_LVDS1; 2942 + 2943 + return flags; 2944 + } 2945 + 2923 2946 static bool 2924 2947 intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags) 2925 2948 { 2926 - /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ 2949 + struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev); 2950 + 2951 + flags = intel_sdvo_filter_output_flags(flags); 2952 + 2953 + intel_sdvo->controlled_output = flags; 2954 + 2955 + intel_sdvo_select_ddc_bus(i915, intel_sdvo); 2927 2956 2928 2957 if (flags & SDVO_OUTPUT_TMDS0) 2929 2958 if (!intel_sdvo_dvi_init(intel_sdvo, 0)) 2930 2959 return false; 2931 2960 2932 - if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK) 2961 + if (flags & SDVO_OUTPUT_TMDS1) 2933 2962 if (!intel_sdvo_dvi_init(intel_sdvo, 1)) 2934 2963 return false; 2935 2964 ··· 2973 2956 if (!intel_sdvo_analog_init(intel_sdvo, 0)) 2974 2957 return false; 2975 2958 2976 - if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK) 2959 + if (flags & SDVO_OUTPUT_RGB1) 2977 2960 if (!intel_sdvo_analog_init(intel_sdvo, 1)) 2978 2961 return false; 2979 2962 ··· 2981 2964 if (!intel_sdvo_lvds_init(intel_sdvo, 0)) 2982 2965 return false; 2983 2966 2984 - if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK) 2967 + if (flags & SDVO_OUTPUT_LVDS1) 2985 2968 if (!intel_sdvo_lvds_init(intel_sdvo, 1)) 2986 2969 return false; 2987 2970 2988 - if ((flags & SDVO_OUTPUT_MASK) == 0) { 2971 + if (flags == 0) { 2989 2972 unsigned char bytes[2]; 2990 2973 2991 - intel_sdvo->controlled_output = 0; 2992 2974 memcpy(bytes, &intel_sdvo->caps.output_flags, 2); 2993 2975 DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", 2994 2976 SDVO_NAME(intel_sdvo), ··· 3398 3382 * cloning for SDVO encoders. 3399 3383 */ 3400 3384 intel_sdvo->base.cloneable = 0; 3401 - 3402 - intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo); 3403 3385 3404 3386 /* Set the input timing to the screen. Assume always input 0. */ 3405 3387 if (!intel_sdvo_set_target_input(intel_sdvo))
+4 -15
drivers/gpu/drm/i915/gem/i915_gem_internal.c
··· 6 6 7 7 #include <linux/scatterlist.h> 8 8 #include <linux/slab.h> 9 - #include <linux/swiotlb.h> 10 9 11 10 #include "i915_drv.h" 12 11 #include "i915_gem.h" ··· 37 38 struct scatterlist *sg; 38 39 unsigned int sg_page_sizes; 39 40 unsigned int npages; 40 - int max_order; 41 + int max_order = MAX_ORDER; 42 + unsigned int max_segment; 41 43 gfp_t gfp; 42 44 43 - max_order = MAX_ORDER; 44 - #ifdef CONFIG_SWIOTLB 45 - if (is_swiotlb_active(obj->base.dev->dev)) { 46 - unsigned int max_segment; 47 - 48 - max_segment = swiotlb_max_segment(); 49 - if (max_segment) { 50 - max_segment = max_t(unsigned int, max_segment, 51 - PAGE_SIZE) >> PAGE_SHIFT; 52 - max_order = min(max_order, ilog2(max_segment)); 53 - } 54 - } 55 - #endif 45 + max_segment = i915_sg_segment_size(i915->drm.dev) >> PAGE_SHIFT; 46 + max_order = min(max_order, get_order(max_segment)); 56 47 57 48 gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; 58 49 if (IS_I965GM(i915) || IS_I965G(i915)) {
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
··· 194 194 struct intel_memory_region *mem = obj->mm.region; 195 195 struct address_space *mapping = obj->base.filp->f_mapping; 196 196 const unsigned long page_count = obj->base.size / PAGE_SIZE; 197 - unsigned int max_segment = i915_sg_segment_size(); 197 + unsigned int max_segment = i915_sg_segment_size(i915->drm.dev); 198 198 struct sg_table *st; 199 199 struct sgt_iter sgt_iter; 200 200 struct page *page;
+2 -2
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
··· 189 189 struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev); 190 190 struct intel_memory_region *mr = i915->mm.regions[INTEL_MEMORY_SYSTEM]; 191 191 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); 192 - const unsigned int max_segment = i915_sg_segment_size(); 192 + const unsigned int max_segment = i915_sg_segment_size(i915->drm.dev); 193 193 const size_t size = (size_t)ttm->num_pages << PAGE_SHIFT; 194 194 struct file *filp = i915_tt->filp; 195 195 struct sgt_iter sgt_iter; ··· 538 538 ret = sg_alloc_table_from_pages_segment(st, 539 539 ttm->pages, ttm->num_pages, 540 540 0, (unsigned long)ttm->num_pages << PAGE_SHIFT, 541 - i915_sg_segment_size(), GFP_KERNEL); 541 + i915_sg_segment_size(i915_tt->dev), GFP_KERNEL); 542 542 if (ret) { 543 543 st->sgl = NULL; 544 544 return ERR_PTR(ret);
+1 -1
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
··· 129 129 static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) 130 130 { 131 131 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; 132 - unsigned int max_segment = i915_sg_segment_size(); 132 + unsigned int max_segment = i915_sg_segment_size(obj->base.dev->dev); 133 133 struct sg_table *st; 134 134 unsigned int sg_page_sizes; 135 135 struct page **pvec;
+1
drivers/gpu/drm/i915/i915_driver.c
··· 353 353 mutex_init(&dev_priv->display.wm.wm_mutex); 354 354 mutex_init(&dev_priv->display.pps.mutex); 355 355 mutex_init(&dev_priv->display.hdcp.comp_mutex); 356 + spin_lock_init(&dev_priv->display.dkl.phy_lock); 356 357 357 358 i915_memcpy_init_early(dev_priv); 358 359 intel_runtime_pm_init_early(&dev_priv->runtime_pm);
+3
drivers/gpu/drm/i915/i915_reg.h
··· 7420 7420 #define _DKL_PHY5_BASE 0x16C000 7421 7421 #define _DKL_PHY6_BASE 0x16D000 7422 7422 7423 + #define DKL_REG_TC_PORT(__reg) \ 7424 + (TC_PORT_1 + ((__reg).reg - _DKL_PHY1_BASE) / (_DKL_PHY2_BASE - _DKL_PHY1_BASE)) 7425 + 7423 7426 /* DEKEL PHY MMIO Address = Phy base + (internal address & ~index_mask) */ 7424 7427 #define _DKL_PCS_DW5 0x14 7425 7428 #define DKL_PCS_DW5(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
+20 -12
drivers/gpu/drm/i915/i915_scatterlist.h
··· 9 9 10 10 #include <linux/pfn.h> 11 11 #include <linux/scatterlist.h> 12 - #include <linux/swiotlb.h> 12 + #include <linux/dma-mapping.h> 13 + #include <xen/xen.h> 13 14 14 15 #include "i915_gem.h" 15 16 ··· 128 127 return page_sizes; 129 128 } 130 129 131 - static inline unsigned int i915_sg_segment_size(void) 130 + static inline unsigned int i915_sg_segment_size(struct device *dev) 132 131 { 133 - unsigned int size = swiotlb_max_segment(); 132 + size_t max = min_t(size_t, UINT_MAX, dma_max_mapping_size(dev)); 134 133 135 - if (size == 0) 136 - size = UINT_MAX; 137 - 138 - size = rounddown(size, PAGE_SIZE); 139 - /* swiotlb_max_segment_size can return 1 byte when it means one page. */ 140 - if (size < PAGE_SIZE) 141 - size = PAGE_SIZE; 142 - 143 - return size; 134 + /* 135 + * For Xen PV guests pages aren't contiguous in DMA (machine) address 136 + * space. The DMA API takes care of that both in dma_alloc_* (by 137 + * calling into the hypervisor to make the pages contiguous) and in 138 + * dma_map_* (by bounce buffering). But i915 abuses ignores the 139 + * coherency aspects of the DMA API and thus can't cope with bounce 140 + * buffering actually happening, so add a hack here to force small 141 + * allocations and mappings when running in PV mode on Xen. 142 + * 143 + * Note this will still break if bounce buffering is required for other 144 + * reasons, like confidential computing hypervisors or PCIe root ports 145 + * with addressing limitations. 146 + */ 147 + if (xen_pv_domain()) 148 + max = PAGE_SIZE; 149 + return round_down(max, PAGE_SIZE); 144 150 } 145 151 146 152 bool i915_sg_trim(struct sg_table *orig_st);
-1
drivers/gpu/drm/imx/Kconfig
··· 4 4 select DRM_KMS_HELPER 5 5 select VIDEOMODE_HELPERS 6 6 select DRM_GEM_DMA_HELPER 7 - select DRM_KMS_HELPER 8 7 depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM || COMPILE_TEST) 9 8 depends on IMX_IPUV3_CORE 10 9 help
+3 -2
drivers/gpu/drm/imx/imx-tve.c
··· 218 218 return ret; 219 219 } 220 220 221 - static int imx_tve_connector_mode_valid(struct drm_connector *connector, 222 - struct drm_display_mode *mode) 221 + static enum drm_mode_status 222 + imx_tve_connector_mode_valid(struct drm_connector *connector, 223 + struct drm_display_mode *mode) 223 224 { 224 225 struct imx_tve *tve = con_to_tve(connector); 225 226 unsigned long rate;
+19 -7
drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
··· 752 752 static void dw_mipi_dsi_rockchip_set_lcdsel(struct dw_mipi_dsi_rockchip *dsi, 753 753 int mux) 754 754 { 755 - if (dsi->cdata->lcdsel_grf_reg < 0) 755 + if (dsi->cdata->lcdsel_grf_reg) 756 756 regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg, 757 757 mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big); 758 758 } ··· 1051 1051 if (ret) { 1052 1052 DRM_DEV_ERROR(dsi->dev, "Failed to register component: %d\n", 1053 1053 ret); 1054 - return ret; 1054 + goto out; 1055 1055 } 1056 1056 1057 1057 second = dw_mipi_dsi_rockchip_find_second(dsi); 1058 - if (IS_ERR(second)) 1059 - return PTR_ERR(second); 1058 + if (IS_ERR(second)) { 1059 + ret = PTR_ERR(second); 1060 + goto out; 1061 + } 1060 1062 if (second) { 1061 1063 ret = component_add(second, &dw_mipi_dsi_rockchip_ops); 1062 1064 if (ret) { 1063 1065 DRM_DEV_ERROR(second, 1064 1066 "Failed to register component: %d\n", 1065 1067 ret); 1066 - return ret; 1068 + goto out; 1067 1069 } 1068 1070 } 1069 1071 1070 1072 return 0; 1073 + 1074 + out: 1075 + mutex_lock(&dsi->usage_mutex); 1076 + dsi->usage_mode = DW_DSI_USAGE_IDLE; 1077 + mutex_unlock(&dsi->usage_mutex); 1078 + return ret; 1071 1079 } 1072 1080 1073 1081 static int dw_mipi_dsi_rockchip_host_detach(void *priv_data, ··· 1643 1635 static const struct rockchip_dw_dsi_chip_data rk3568_chip_data[] = { 1644 1636 { 1645 1637 .reg = 0xfe060000, 1646 - .lcdsel_grf_reg = -1, 1647 1638 .lanecfg1_grf_reg = RK3568_GRF_VO_CON2, 1648 1639 .lanecfg1 = HIWORD_UPDATE(0, RK3568_DSI0_SKEWCALHS | 1649 1640 RK3568_DSI0_FORCETXSTOPMODE | ··· 1652 1645 }, 1653 1646 { 1654 1647 .reg = 0xfe070000, 1655 - .lcdsel_grf_reg = -1, 1656 1648 .lanecfg1_grf_reg = RK3568_GRF_VO_CON3, 1657 1649 .lanecfg1 = HIWORD_UPDATE(0, RK3568_DSI1_SKEWCALHS | 1658 1650 RK3568_DSI1_FORCETXSTOPMODE | ··· 1687 1681 .of_match_table = dw_mipi_dsi_rockchip_dt_ids, 1688 1682 .pm = &dw_mipi_dsi_rockchip_pm_ops, 1689 1683 .name = "dw-mipi-dsi-rockchip", 1684 + /* 1685 + * For dual-DSI display, one DSI pokes at the other DSI's 1686 + * drvdata in dw_mipi_dsi_rockchip_find_second(). This is not 1687 + * safe for asynchronous probe. 1688 + */ 1689 + .probe_type = PROBE_FORCE_SYNCHRONOUS, 1690 1690 }, 1691 1691 };
+2 -1
drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
··· 565 565 566 566 ret = rockchip_hdmi_parse_dt(hdmi); 567 567 if (ret) { 568 - DRM_DEV_ERROR(hdmi->dev, "Unable to parse OF data\n"); 568 + if (ret != -EPROBE_DEFER) 569 + DRM_DEV_ERROR(hdmi->dev, "Unable to parse OF data\n"); 569 570 return ret; 570 571 } 571 572
+4 -1
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
··· 364 364 { 365 365 struct rockchip_gem_object *rk_obj; 366 366 struct drm_gem_object *obj; 367 + bool is_framebuffer; 367 368 int ret; 368 369 369 - rk_obj = rockchip_gem_create_object(drm, size, false); 370 + is_framebuffer = drm->fb_helper && file_priv == drm->fb_helper->client.file; 371 + 372 + rk_obj = rockchip_gem_create_object(drm, size, is_framebuffer); 370 373 if (IS_ERR(rk_obj)) 371 374 return ERR_CAST(rk_obj); 372 375
+8 -2
drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
··· 877 877 { 878 878 struct vop2_video_port *vp = to_vop2_video_port(crtc); 879 879 struct vop2 *vop2 = vp->vop2; 880 + struct drm_crtc_state *old_crtc_state; 880 881 int ret; 881 882 882 883 vop2_lock(vop2); 884 + 885 + old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); 886 + drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false); 883 887 884 888 drm_crtc_vblank_off(crtc); 885 889 ··· 1000 996 static void vop2_plane_atomic_disable(struct drm_plane *plane, 1001 997 struct drm_atomic_state *state) 1002 998 { 1003 - struct drm_plane_state *old_pstate = drm_atomic_get_old_plane_state(state, plane); 999 + struct drm_plane_state *old_pstate = NULL; 1004 1000 struct vop2_win *win = to_vop2_win(plane); 1005 1001 struct vop2 *vop2 = win->vop2; 1006 1002 1007 1003 drm_dbg(vop2->drm, "%s disable\n", win->data->name); 1008 1004 1009 - if (!old_pstate->crtc) 1005 + if (state) 1006 + old_pstate = drm_atomic_get_old_plane_state(state, plane); 1007 + if (old_pstate && !old_pstate->crtc) 1010 1008 return; 1011 1009 1012 1010 vop2_win_disable(win);
-1
drivers/hwmon/pmbus/pmbus.h
··· 467 467 #define PMBUS_REGULATOR_STEP(_name, _id, _voltages, _step) \ 468 468 [_id] = { \ 469 469 .name = (_name # _id), \ 470 - .supply_name = "vin", \ 471 470 .id = (_id), \ 472 471 .of_match = of_match_ptr(_name # _id), \ 473 472 .regulators_node = of_match_ptr("regulators"), \
+103 -13
drivers/hwmon/scmi-hwmon.c
··· 20 20 const struct scmi_sensor_info **info[hwmon_max]; 21 21 }; 22 22 23 + struct scmi_thermal_sensor { 24 + const struct scmi_protocol_handle *ph; 25 + const struct scmi_sensor_info *info; 26 + }; 27 + 23 28 static inline u64 __pow10(u8 x) 24 29 { 25 30 u64 r = 1; ··· 69 64 return 0; 70 65 } 71 66 72 - static int scmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type, 73 - u32 attr, int channel, long *val) 67 + static int scmi_hwmon_read_scaled_value(const struct scmi_protocol_handle *ph, 68 + const struct scmi_sensor_info *sensor, 69 + long *val) 74 70 { 75 71 int ret; 76 72 u64 value; 77 - const struct scmi_sensor_info *sensor; 78 - struct scmi_sensors *scmi_sensors = dev_get_drvdata(dev); 79 73 80 - sensor = *(scmi_sensors->info[type] + channel); 81 - ret = sensor_ops->reading_get(scmi_sensors->ph, sensor->id, &value); 74 + ret = sensor_ops->reading_get(ph, sensor->id, &value); 82 75 if (ret) 83 76 return ret; 84 77 ··· 85 82 *val = value; 86 83 87 84 return ret; 85 + } 86 + 87 + static int scmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type, 88 + u32 attr, int channel, long *val) 89 + { 90 + const struct scmi_sensor_info *sensor; 91 + struct scmi_sensors *scmi_sensors = dev_get_drvdata(dev); 92 + 93 + sensor = *(scmi_sensors->info[type] + channel); 94 + 95 + return scmi_hwmon_read_scaled_value(scmi_sensors->ph, sensor, val); 88 96 } 89 97 90 98 static int ··· 136 122 .info = NULL, 137 123 }; 138 124 125 + static int scmi_hwmon_thermal_get_temp(struct thermal_zone_device *tz, 126 + int *temp) 127 + { 128 + int ret; 129 + long value; 130 + struct scmi_thermal_sensor *th_sensor = tz->devdata; 131 + 132 + ret = scmi_hwmon_read_scaled_value(th_sensor->ph, th_sensor->info, 133 + &value); 134 + if (!ret) 135 + *temp = value; 136 + 137 + return ret; 138 + } 139 + 140 + static const struct thermal_zone_device_ops scmi_hwmon_thermal_ops = { 141 + .get_temp = scmi_hwmon_thermal_get_temp, 142 + }; 143 + 139 144 static int scmi_hwmon_add_chan_info(struct hwmon_channel_info *scmi_hwmon_chan, 140 145 struct device *dev, int num, 141 146 enum hwmon_sensor_types type, u32 config) ··· 182 149 }; 183 150 184 151 static u32 hwmon_attributes[hwmon_max] = { 185 - [hwmon_chip] = HWMON_C_REGISTER_TZ, 186 152 [hwmon_temp] = HWMON_T_INPUT | HWMON_T_LABEL, 187 153 [hwmon_in] = HWMON_I_INPUT | HWMON_I_LABEL, 188 154 [hwmon_curr] = HWMON_C_INPUT | HWMON_C_LABEL, 189 155 [hwmon_power] = HWMON_P_INPUT | HWMON_P_LABEL, 190 156 [hwmon_energy] = HWMON_E_INPUT | HWMON_E_LABEL, 191 157 }; 158 + 159 + static int scmi_thermal_sensor_register(struct device *dev, 160 + const struct scmi_protocol_handle *ph, 161 + const struct scmi_sensor_info *sensor) 162 + { 163 + struct scmi_thermal_sensor *th_sensor; 164 + struct thermal_zone_device *tzd; 165 + 166 + th_sensor = devm_kzalloc(dev, sizeof(*th_sensor), GFP_KERNEL); 167 + if (!th_sensor) 168 + return -ENOMEM; 169 + 170 + th_sensor->ph = ph; 171 + th_sensor->info = sensor; 172 + 173 + /* 174 + * Try to register a temperature sensor with the Thermal Framework: 175 + * skip sensors not defined as part of any thermal zone (-ENODEV) but 176 + * report any other errors related to misconfigured zones/sensors. 177 + */ 178 + tzd = devm_thermal_of_zone_register(dev, th_sensor->info->id, th_sensor, 179 + &scmi_hwmon_thermal_ops); 180 + if (IS_ERR(tzd)) { 181 + devm_kfree(dev, th_sensor); 182 + 183 + if (PTR_ERR(tzd) != -ENODEV) 184 + return PTR_ERR(tzd); 185 + 186 + dev_dbg(dev, "Sensor '%s' not attached to any thermal zone.\n", 187 + sensor->name); 188 + } else { 189 + dev_dbg(dev, "Sensor '%s' attached to thermal zone ID:%d\n", 190 + sensor->name, tzd->id); 191 + } 192 + 193 + return 0; 194 + } 192 195 193 196 static int scmi_hwmon_probe(struct scmi_device *sdev) 194 197 { ··· 233 164 enum hwmon_sensor_types type; 234 165 struct scmi_sensors *scmi_sensors; 235 166 const struct scmi_sensor_info *sensor; 236 - int nr_count[hwmon_max] = {0}, nr_types = 0; 167 + int nr_count[hwmon_max] = {0}, nr_types = 0, nr_count_temp = 0; 237 168 const struct hwmon_chip_info *chip_info; 238 169 struct device *hwdev, *dev = &sdev->dev; 239 170 struct hwmon_channel_info *scmi_hwmon_chan; ··· 277 208 } 278 209 } 279 210 280 - if (nr_count[hwmon_temp]) { 281 - nr_count[hwmon_chip]++; 282 - nr_types++; 283 - } 211 + if (nr_count[hwmon_temp]) 212 + nr_count_temp = nr_count[hwmon_temp]; 284 213 285 214 scmi_hwmon_chan = devm_kcalloc(dev, nr_types, sizeof(*scmi_hwmon_chan), 286 215 GFP_KERNEL); ··· 329 262 hwdev = devm_hwmon_device_register_with_info(dev, "scmi_sensors", 330 263 scmi_sensors, chip_info, 331 264 NULL); 265 + if (IS_ERR(hwdev)) 266 + return PTR_ERR(hwdev); 332 267 333 - return PTR_ERR_OR_ZERO(hwdev); 268 + for (i = 0; i < nr_count_temp; i++) { 269 + int ret; 270 + 271 + sensor = *(scmi_sensors->info[hwmon_temp] + i); 272 + if (!sensor) 273 + continue; 274 + 275 + /* 276 + * Warn on any misconfiguration related to thermal zones but 277 + * bail out of probing only on memory errors. 278 + */ 279 + ret = scmi_thermal_sensor_register(dev, ph, sensor); 280 + if (ret) { 281 + if (ret == -ENOMEM) 282 + return ret; 283 + dev_warn(dev, 284 + "Thermal zone misconfigured for %s. err=%d\n", 285 + sensor->name, ret); 286 + } 287 + } 288 + 289 + return 0; 334 290 } 335 291 336 292 static const struct scmi_device_id scmi_id_table[] = {
+1
drivers/i2c/busses/i2c-i801.c
··· 1243 1243 */ 1244 1244 { "Latitude 5480", 0x29 }, 1245 1245 { "Vostro V131", 0x1d }, 1246 + { "Vostro 5568", 0x29 }, 1246 1247 }; 1247 1248 1248 1249 static void register_dell_lis3lv02d_i2c_device(struct i801_priv *priv)
+1
drivers/i2c/busses/i2c-piix4.c
··· 1080 1080 "", &piix4_main_adapters[0]); 1081 1081 if (retval < 0) 1082 1082 return retval; 1083 + piix4_adapter_count = 1; 1083 1084 } 1084 1085 1085 1086 /* Check for auxiliary SMBus on some AMD chipsets */
+10 -6
drivers/i2c/busses/i2c-tegra.c
··· 284 284 struct dma_chan *tx_dma_chan; 285 285 struct dma_chan *rx_dma_chan; 286 286 unsigned int dma_buf_size; 287 + struct device *dma_dev; 287 288 dma_addr_t dma_phys; 288 289 void *dma_buf; 289 290 ··· 421 420 static void tegra_i2c_release_dma(struct tegra_i2c_dev *i2c_dev) 422 421 { 423 422 if (i2c_dev->dma_buf) { 424 - dma_free_coherent(i2c_dev->dev, i2c_dev->dma_buf_size, 423 + dma_free_coherent(i2c_dev->dma_dev, i2c_dev->dma_buf_size, 425 424 i2c_dev->dma_buf, i2c_dev->dma_phys); 426 425 i2c_dev->dma_buf = NULL; 427 426 } ··· 473 472 474 473 i2c_dev->tx_dma_chan = chan; 475 474 475 + WARN_ON(i2c_dev->tx_dma_chan->device != i2c_dev->rx_dma_chan->device); 476 + i2c_dev->dma_dev = chan->device->dev; 477 + 476 478 i2c_dev->dma_buf_size = i2c_dev->hw->quirks->max_write_len + 477 479 I2C_PACKET_HEADER_SIZE; 478 480 479 - dma_buf = dma_alloc_coherent(i2c_dev->dev, i2c_dev->dma_buf_size, 481 + dma_buf = dma_alloc_coherent(i2c_dev->dma_dev, i2c_dev->dma_buf_size, 480 482 &dma_phys, GFP_KERNEL | __GFP_NOWARN); 481 483 if (!dma_buf) { 482 484 dev_err(i2c_dev->dev, "failed to allocate DMA buffer\n"); ··· 1276 1272 1277 1273 if (i2c_dev->dma_mode) { 1278 1274 if (i2c_dev->msg_read) { 1279 - dma_sync_single_for_device(i2c_dev->dev, 1275 + dma_sync_single_for_device(i2c_dev->dma_dev, 1280 1276 i2c_dev->dma_phys, 1281 1277 xfer_size, DMA_FROM_DEVICE); 1282 1278 ··· 1284 1280 if (err) 1285 1281 return err; 1286 1282 } else { 1287 - dma_sync_single_for_cpu(i2c_dev->dev, 1283 + dma_sync_single_for_cpu(i2c_dev->dma_dev, 1288 1284 i2c_dev->dma_phys, 1289 1285 xfer_size, DMA_TO_DEVICE); 1290 1286 } ··· 1297 1293 memcpy(i2c_dev->dma_buf + I2C_PACKET_HEADER_SIZE, 1298 1294 msg->buf, msg->len); 1299 1295 1300 - dma_sync_single_for_device(i2c_dev->dev, 1296 + dma_sync_single_for_device(i2c_dev->dma_dev, 1301 1297 i2c_dev->dma_phys, 1302 1298 xfer_size, DMA_TO_DEVICE); 1303 1299 ··· 1348 1344 } 1349 1345 1350 1346 if (i2c_dev->msg_read && i2c_dev->msg_err == I2C_ERR_NONE) { 1351 - dma_sync_single_for_cpu(i2c_dev->dev, 1347 + dma_sync_single_for_cpu(i2c_dev->dma_dev, 1352 1348 i2c_dev->dma_phys, 1353 1349 xfer_size, DMA_FROM_DEVICE); 1354 1350
+1 -1
drivers/isdn/hardware/mISDN/netjet.c
··· 956 956 } 957 957 if (card->irq > 0) 958 958 free_irq(card->irq, card); 959 - if (card->isac.dch.dev.dev.class) 959 + if (device_is_registered(&card->isac.dch.dev.dev)) 960 960 mISDN_unregister_device(&card->isac.dch.dev); 961 961 962 962 for (i = 0; i < 2; i++) {
+3 -2
drivers/isdn/mISDN/core.c
··· 233 233 if (debug & DEBUG_CORE) 234 234 printk(KERN_DEBUG "mISDN_register %s %d\n", 235 235 dev_name(&dev->dev), dev->id); 236 + dev->dev.class = &mISDN_class; 237 + 236 238 err = create_stack(dev); 237 239 if (err) 238 240 goto error1; 239 241 240 - dev->dev.class = &mISDN_class; 241 242 dev->dev.platform_data = dev; 242 243 dev->dev.parent = parent; 243 244 dev_set_drvdata(&dev->dev, dev); ··· 250 249 251 250 error3: 252 251 delete_stack(dev); 253 - return err; 254 252 error1: 253 + put_device(&dev->dev); 255 254 return err; 256 255 257 256 }
+18 -7
drivers/net/dsa/dsa_loop.c
··· 376 376 377 377 #define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2) 378 378 379 + static void dsa_loop_phydevs_unregister(void) 380 + { 381 + unsigned int i; 382 + 383 + for (i = 0; i < NUM_FIXED_PHYS; i++) 384 + if (!IS_ERR(phydevs[i])) { 385 + fixed_phy_unregister(phydevs[i]); 386 + phy_device_free(phydevs[i]); 387 + } 388 + } 389 + 379 390 static int __init dsa_loop_init(void) 380 391 { 381 392 struct fixed_phy_status status = { ··· 394 383 .speed = SPEED_100, 395 384 .duplex = DUPLEX_FULL, 396 385 }; 397 - unsigned int i; 386 + unsigned int i, ret; 398 387 399 388 for (i = 0; i < NUM_FIXED_PHYS; i++) 400 389 phydevs[i] = fixed_phy_register(PHY_POLL, &status, NULL); 401 390 402 - return mdio_driver_register(&dsa_loop_drv); 391 + ret = mdio_driver_register(&dsa_loop_drv); 392 + if (ret) 393 + dsa_loop_phydevs_unregister(); 394 + 395 + return ret; 403 396 } 404 397 module_init(dsa_loop_init); 405 398 406 399 static void __exit dsa_loop_exit(void) 407 400 { 408 - unsigned int i; 409 - 410 401 mdio_driver_unregister(&dsa_loop_drv); 411 - for (i = 0; i < NUM_FIXED_PHYS; i++) 412 - if (!IS_ERR(phydevs[i])) 413 - fixed_phy_unregister(phydevs[i]); 402 + dsa_loop_phydevs_unregister(); 414 403 } 415 404 module_exit(dsa_loop_exit); 416 405
+29 -9
drivers/net/ethernet/adi/adin1110.c
··· 1512 1512 .notifier_call = adin1110_switchdev_event, 1513 1513 }; 1514 1514 1515 - static void adin1110_unregister_notifiers(void *data) 1515 + static void adin1110_unregister_notifiers(void) 1516 1516 { 1517 1517 unregister_switchdev_blocking_notifier(&adin1110_switchdev_blocking_notifier); 1518 1518 unregister_switchdev_notifier(&adin1110_switchdev_notifier); 1519 1519 unregister_netdevice_notifier(&adin1110_netdevice_nb); 1520 1520 } 1521 1521 1522 - static int adin1110_setup_notifiers(struct adin1110_priv *priv) 1522 + static int adin1110_setup_notifiers(void) 1523 1523 { 1524 - struct device *dev = &priv->spidev->dev; 1525 1524 int ret; 1526 1525 1527 1526 ret = register_netdevice_notifier(&adin1110_netdevice_nb); ··· 1535 1536 if (ret < 0) 1536 1537 goto err_sdev; 1537 1538 1538 - return devm_add_action_or_reset(dev, adin1110_unregister_notifiers, NULL); 1539 + return 0; 1539 1540 1540 1541 err_sdev: 1541 1542 unregister_switchdev_notifier(&adin1110_switchdev_notifier); 1542 1543 1543 1544 err_netdev: 1544 1545 unregister_netdevice_notifier(&adin1110_netdevice_nb); 1546 + 1545 1547 return ret; 1546 1548 } 1547 1549 ··· 1610 1610 adin1110_irq, 1611 1611 IRQF_TRIGGER_LOW | IRQF_ONESHOT, 1612 1612 dev_name(dev), priv); 1613 - if (ret < 0) 1614 - return ret; 1615 - 1616 - ret = adin1110_setup_notifiers(priv); 1617 1613 if (ret < 0) 1618 1614 return ret; 1619 1615 ··· 1689 1693 .probe = adin1110_probe, 1690 1694 .id_table = adin1110_spi_id, 1691 1695 }; 1692 - module_spi_driver(adin1110_driver); 1696 + 1697 + static int __init adin1110_driver_init(void) 1698 + { 1699 + int ret; 1700 + 1701 + ret = adin1110_setup_notifiers(); 1702 + if (ret < 0) 1703 + return ret; 1704 + 1705 + ret = spi_register_driver(&adin1110_driver); 1706 + if (ret < 0) { 1707 + adin1110_unregister_notifiers(); 1708 + return ret; 1709 + } 1710 + 1711 + return 0; 1712 + } 1713 + 1714 + static void __exit adin1110_exit(void) 1715 + { 1716 + adin1110_unregister_notifiers(); 1717 + spi_unregister_driver(&adin1110_driver); 1718 + } 1719 + module_init(adin1110_driver_init); 1720 + module_exit(adin1110_exit); 1693 1721 1694 1722 MODULE_DESCRIPTION("ADIN1110 Network driver"); 1695 1723 MODULE_AUTHOR("Alexandru Tachici <alexandru.tachici@analog.com>");
+2 -2
drivers/net/ethernet/freescale/fec_main.c
··· 713 713 dev_kfree_skb_any(skb); 714 714 if (net_ratelimit()) 715 715 netdev_err(ndev, "Tx DMA memory map failed\n"); 716 - return NETDEV_TX_BUSY; 716 + return NETDEV_TX_OK; 717 717 } 718 718 719 719 bdp->cbd_datlen = cpu_to_fec16(size); ··· 775 775 dev_kfree_skb_any(skb); 776 776 if (net_ratelimit()) 777 777 netdev_err(ndev, "Tx DMA memory map failed\n"); 778 - return NETDEV_TX_BUSY; 778 + return NETDEV_TX_OK; 779 779 } 780 780 } 781 781
+8 -8
drivers/net/ethernet/ibm/ibmvnic.c
··· 3007 3007 rwi = get_next_rwi(adapter); 3008 3008 3009 3009 /* 3010 - * If there is another reset queued, free the previous rwi 3011 - * and process the new reset even if previous reset failed 3012 - * (the previous reset could have failed because of a fail 3013 - * over for instance, so process the fail over). 3014 - * 3015 3010 * If there are no resets queued and the previous reset failed, 3016 3011 * the adapter would be in an undefined state. So retry the 3017 3012 * previous reset as a hard reset. 3013 + * 3014 + * Else, free the previous rwi and, if there is another reset 3015 + * queued, process the new reset even if previous reset failed 3016 + * (the previous reset could have failed because of a fail 3017 + * over for instance, so process the fail over). 3018 3018 */ 3019 - if (rwi) 3020 - kfree(tmprwi); 3021 - else if (rc) 3019 + if (!rwi && rc) 3022 3020 rwi = tmprwi; 3021 + else 3022 + kfree(tmprwi); 3023 3023 3024 3024 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER || 3025 3025 rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
+18 -8
drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
··· 414 414 /* Get the received frame and unmap it */ 415 415 db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; 416 416 page = rx->page[rx->dcb_index][rx->db_index]; 417 + 418 + dma_sync_single_for_cpu(lan966x->dev, (dma_addr_t)db->dataptr, 419 + FDMA_DCB_STATUS_BLOCKL(db->status), 420 + DMA_FROM_DEVICE); 421 + 417 422 skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order); 418 423 if (unlikely(!skb)) 419 424 goto unmap_page; 420 425 421 - dma_unmap_single(lan966x->dev, (dma_addr_t)db->dataptr, 422 - FDMA_DCB_STATUS_BLOCKL(db->status), 423 - DMA_FROM_DEVICE); 424 426 skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status)); 425 427 426 428 lan966x_ifh_get_src_port(skb->data, &src_port); ··· 430 428 431 429 if (WARN_ON(src_port >= lan966x->num_phys_ports)) 432 430 goto free_skb; 431 + 432 + dma_unmap_single_attrs(lan966x->dev, (dma_addr_t)db->dataptr, 433 + PAGE_SIZE << rx->page_order, DMA_FROM_DEVICE, 434 + DMA_ATTR_SKIP_CPU_SYNC); 433 435 434 436 skb->dev = lan966x->ports[src_port]->dev; 435 437 skb_pull(skb, IFH_LEN * sizeof(u32)); ··· 460 454 free_skb: 461 455 kfree_skb(skb); 462 456 unmap_page: 463 - dma_unmap_page(lan966x->dev, (dma_addr_t)db->dataptr, 464 - FDMA_DCB_STATUS_BLOCKL(db->status), 465 - DMA_FROM_DEVICE); 457 + dma_unmap_single_attrs(lan966x->dev, (dma_addr_t)db->dataptr, 458 + PAGE_SIZE << rx->page_order, DMA_FROM_DEVICE, 459 + DMA_ATTR_SKIP_CPU_SYNC); 466 460 __free_pages(page, rx->page_order); 467 461 468 462 return NULL; ··· 674 668 int i; 675 669 676 670 for (i = 0; i < lan966x->num_phys_ports; ++i) { 671 + struct lan966x_port *port; 677 672 int mtu; 678 673 679 - if (!lan966x->ports[i]) 674 + port = lan966x->ports[i]; 675 + if (!port) 680 676 continue; 681 677 682 - mtu = lan966x->ports[i]->dev->mtu; 678 + mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port)); 683 679 if (mtu > max_mtu) 684 680 max_mtu = mtu; 685 681 } ··· 741 733 742 734 max_mtu = lan966x_fdma_get_max_mtu(lan966x); 743 735 max_mtu += IFH_LEN * sizeof(u32); 736 + max_mtu += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 737 + max_mtu += VLAN_HLEN * 2; 744 738 745 739 if (round_up(max_mtu, PAGE_SIZE) / PAGE_SIZE - 1 == 746 740 lan966x->rx.page_order)
+2 -2
drivers/net/ethernet/microchip/lan966x/lan966x_main.c
··· 386 386 int old_mtu = dev->mtu; 387 387 int err; 388 388 389 - lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(new_mtu), 389 + lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(LAN966X_HW_MTU(new_mtu)), 390 390 lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port)); 391 391 dev->mtu = new_mtu; 392 392 ··· 395 395 396 396 err = lan966x_fdma_change_mtu(lan966x); 397 397 if (err) { 398 - lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(old_mtu), 398 + lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(LAN966X_HW_MTU(old_mtu)), 399 399 lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port)); 400 400 dev->mtu = old_mtu; 401 401 }
+2
drivers/net/ethernet/microchip/lan966x/lan966x_main.h
··· 26 26 #define LAN966X_BUFFER_MEMORY (160 * 1024) 27 27 #define LAN966X_BUFFER_MIN_SZ 60 28 28 29 + #define LAN966X_HW_MTU(mtu) ((mtu) + ETH_HLEN + ETH_FCS_LEN) 30 + 29 31 #define PGID_AGGR 64 30 32 #define PGID_SRC 80 31 33 #define PGID_ENTRIES 89
+15
drivers/net/ethernet/microchip/lan966x/lan966x_regs.h
··· 585 585 #define DEV_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\ 586 586 FIELD_GET(DEV_MAC_MAXLEN_CFG_MAX_LEN, x) 587 587 588 + /* DEV:MAC_CFG_STATUS:MAC_TAGS_CFG */ 589 + #define DEV_MAC_TAGS_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 12, 0, 1, 4) 590 + 591 + #define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA BIT(1) 592 + #define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA_SET(x)\ 593 + FIELD_PREP(DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA, x) 594 + #define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA_GET(x)\ 595 + FIELD_GET(DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA, x) 596 + 597 + #define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0) 598 + #define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(x)\ 599 + FIELD_PREP(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA, x) 600 + #define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\ 601 + FIELD_GET(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA, x) 602 + 588 603 /* DEV:MAC_CFG_STATUS:MAC_IFG_CFG */ 589 604 #define DEV_MAC_IFG_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 20, 0, 1, 4) 590 605
+6
drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
··· 169 169 ANA_VLAN_CFG_VLAN_POP_CNT, 170 170 lan966x, ANA_VLAN_CFG(port->chip_port)); 171 171 172 + lan_rmw(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(port->vlan_aware) | 173 + DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA_SET(port->vlan_aware), 174 + DEV_MAC_TAGS_CFG_VLAN_AWR_ENA | 175 + DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA, 176 + lan966x, DEV_MAC_TAGS_CFG(port->chip_port)); 177 + 172 178 /* Drop frames with multicast source address */ 173 179 val = ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(1); 174 180 if (port->vlan_aware && !pvid)
+6 -2
drivers/net/ethernet/sfc/efx.c
··· 1059 1059 1060 1060 /* Allocate and initialise a struct net_device */ 1061 1061 net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES); 1062 - if (!net_dev) 1063 - return -ENOMEM; 1062 + if (!net_dev) { 1063 + rc = -ENOMEM; 1064 + goto fail0; 1065 + } 1064 1066 probe_ptr = netdev_priv(net_dev); 1065 1067 *probe_ptr = probe_data; 1066 1068 efx->net_dev = net_dev; ··· 1134 1132 WARN_ON(rc > 0); 1135 1133 netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); 1136 1134 free_netdev(net_dev); 1135 + fail0: 1136 + kfree(probe_data); 1137 1137 return rc; 1138 1138 } 1139 1139
+2 -5
drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
··· 51 51 struct stmmac_resources res; 52 52 struct device_node *np; 53 53 int ret, i, phy_mode; 54 - bool mdio = false; 55 54 56 55 np = dev_of_node(&pdev->dev); 57 56 ··· 68 69 if (!plat) 69 70 return -ENOMEM; 70 71 72 + plat->mdio_node = of_get_child_by_name(np, "mdio"); 71 73 if (plat->mdio_node) { 72 - dev_err(&pdev->dev, "Found MDIO subnode\n"); 73 - mdio = true; 74 - } 74 + dev_info(&pdev->dev, "Found MDIO subnode\n"); 75 75 76 - if (mdio) { 77 76 plat->mdio_bus_data = devm_kzalloc(&pdev->dev, 78 77 sizeof(*plat->mdio_bus_data), 79 78 GFP_KERNEL);
+1 -1
drivers/net/ethernet/xilinx/xilinx_emaclite.c
··· 108 108 * @next_tx_buf_to_use: next Tx buffer to write to 109 109 * @next_rx_buf_to_use: next Rx buffer to read from 110 110 * @base_addr: base address of the Emaclite device 111 - * @reset_lock: lock used for synchronization 111 + * @reset_lock: lock to serialize xmit and tx_timeout execution 112 112 * @deferred_skb: holds an skb (for transmission at a later time) when the 113 113 * Tx buffer is not free 114 114 * @phy_dev: pointer to the PHY device
+1 -1
drivers/net/phy/mdio_bus.c
··· 583 583 } 584 584 585 585 for (i = 0; i < PHY_MAX_ADDR; i++) { 586 - if ((bus->phy_mask & (1 << i)) == 0) { 586 + if ((bus->phy_mask & BIT(i)) == 0) { 587 587 struct phy_device *phydev; 588 588 589 589 phydev = mdiobus_scan(bus, i);
+2 -1
drivers/net/tun.c
··· 1459 1459 int err; 1460 1460 int i; 1461 1461 1462 - if (it->nr_segs > MAX_SKB_FRAGS + 1) 1462 + if (it->nr_segs > MAX_SKB_FRAGS + 1 || 1463 + len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN)) 1463 1464 return ERR_PTR(-EMSGSIZE); 1464 1465 1465 1466 local_bh_disable();
+9 -1
drivers/nfc/fdp/fdp.c
··· 249 249 static int fdp_nci_send(struct nci_dev *ndev, struct sk_buff *skb) 250 250 { 251 251 struct fdp_nci_info *info = nci_get_drvdata(ndev); 252 + int ret; 252 253 253 254 if (atomic_dec_and_test(&info->data_pkt_counter)) 254 255 info->data_pkt_counter_cb(ndev); 255 256 256 - return info->phy_ops->write(info->phy, skb); 257 + ret = info->phy_ops->write(info->phy, skb); 258 + if (ret < 0) { 259 + kfree_skb(skb); 260 + return ret; 261 + } 262 + 263 + consume_skb(skb); 264 + return 0; 257 265 } 258 266 259 267 static int fdp_nci_request_firmware(struct nci_dev *ndev)
+7 -2
drivers/nfc/nfcmrvl/i2c.c
··· 132 132 ret = -EREMOTEIO; 133 133 } else 134 134 ret = 0; 135 - kfree_skb(skb); 136 135 } 137 136 138 - return ret; 137 + if (ret) { 138 + kfree_skb(skb); 139 + return ret; 140 + } 141 + 142 + consume_skb(skb); 143 + return 0; 139 144 } 140 145 141 146 static void nfcmrvl_i2c_nci_update_config(struct nfcmrvl_private *priv,
+5 -2
drivers/nfc/nxp-nci/core.c
··· 80 80 return -EINVAL; 81 81 82 82 r = info->phy_ops->write(info->phy_id, skb); 83 - if (r < 0) 83 + if (r < 0) { 84 84 kfree_skb(skb); 85 + return r; 86 + } 85 87 86 - return r; 88 + consume_skb(skb); 89 + return 0; 87 90 } 88 91 89 92 static int nxp_nci_rf_pll_unlocked_ntf(struct nci_dev *ndev,
+6 -2
drivers/nfc/s3fwrn5/core.c
··· 110 110 } 111 111 112 112 ret = s3fwrn5_write(info, skb); 113 - if (ret < 0) 113 + if (ret < 0) { 114 114 kfree_skb(skb); 115 + mutex_unlock(&info->mutex); 116 + return ret; 117 + } 115 118 119 + consume_skb(skb); 116 120 mutex_unlock(&info->mutex); 117 - return ret; 121 + return 0; 118 122 } 119 123 120 124 static int s3fwrn5_nci_post_setup(struct nci_dev *ndev)
+15 -2
drivers/soc/imx/imx93-pd.c
··· 135 135 136 136 ret = pm_genpd_init(&domain->genpd, NULL, domain->init_off); 137 137 if (ret) 138 - return ret; 138 + goto err_clk_unprepare; 139 139 140 140 platform_set_drvdata(pdev, domain); 141 141 142 - return of_genpd_add_provider_simple(np, &domain->genpd); 142 + ret = of_genpd_add_provider_simple(np, &domain->genpd); 143 + if (ret) 144 + goto err_genpd_remove; 145 + 146 + return 0; 147 + 148 + err_genpd_remove: 149 + pm_genpd_remove(&domain->genpd); 150 + 151 + err_clk_unprepare: 152 + if (!domain->init_off) 153 + clk_bulk_disable_unprepare(domain->num_clks, domain->clks); 154 + 155 + return ret; 143 156 } 144 157 145 158 static const struct of_device_id imx93_pd_ids[] = {
+34 -20
fs/btrfs/backref.c
··· 289 289 struct prelim_ref *ref, *next_ref; 290 290 291 291 rbtree_postorder_for_each_entry_safe(ref, next_ref, 292 - &preftree->root.rb_root, rbnode) 292 + &preftree->root.rb_root, rbnode) { 293 + free_inode_elem_list(ref->inode_list); 293 294 free_pref(ref); 295 + } 294 296 295 297 preftree->root = RB_ROOT_CACHED; 296 298 preftree->count = 0; ··· 650 648 return (struct extent_inode_elem *)(uintptr_t)node->aux; 651 649 } 652 650 651 + static void free_leaf_list(struct ulist *ulist) 652 + { 653 + struct ulist_node *node; 654 + struct ulist_iterator uiter; 655 + 656 + ULIST_ITER_INIT(&uiter); 657 + while ((node = ulist_next(ulist, &uiter))) 658 + free_inode_elem_list(unode_aux_to_inode_list(node)); 659 + 660 + ulist_free(ulist); 661 + } 662 + 653 663 /* 654 664 * We maintain three separate rbtrees: one for direct refs, one for 655 665 * indirect refs which have a key, and one for indirect refs which do not ··· 776 762 cond_resched(); 777 763 } 778 764 out: 779 - ulist_free(parents); 765 + /* 766 + * We may have inode lists attached to refs in the parents ulist, so we 767 + * must free them before freeing the ulist and its refs. 768 + */ 769 + free_leaf_list(parents); 780 770 return ret; 781 771 } 782 772 ··· 1386 1368 if (ret < 0) 1387 1369 goto out; 1388 1370 ref->inode_list = eie; 1371 + /* 1372 + * We transferred the list ownership to the ref, 1373 + * so set to NULL to avoid a double free in case 1374 + * an error happens after this. 1375 + */ 1376 + eie = NULL; 1389 1377 } 1390 1378 ret = ulist_add_merge_ptr(refs, ref->parent, 1391 1379 ref->inode_list, ··· 1417 1393 eie->next = ref->inode_list; 1418 1394 } 1419 1395 eie = NULL; 1396 + /* 1397 + * We have transferred the inode list ownership from 1398 + * this ref to the ref we added to the 'refs' ulist. 1399 + * So set this ref's inode list to NULL to avoid 1400 + * use-after-free when our caller uses it or double 1401 + * frees in case an error happens before we return. 1402 + */ 1403 + ref->inode_list = NULL; 1420 1404 } 1421 1405 cond_resched(); 1422 1406 } ··· 1439 1407 if (ret < 0) 1440 1408 free_inode_elem_list(eie); 1441 1409 return ret; 1442 - } 1443 - 1444 - static void free_leaf_list(struct ulist *blocks) 1445 - { 1446 - struct ulist_node *node = NULL; 1447 - struct extent_inode_elem *eie; 1448 - struct ulist_iterator uiter; 1449 - 1450 - ULIST_ITER_INIT(&uiter); 1451 - while ((node = ulist_next(blocks, &uiter))) { 1452 - if (!node->aux) 1453 - continue; 1454 - eie = unode_aux_to_inode_list(node); 1455 - free_inode_elem_list(eie); 1456 - node->aux = 0; 1457 - } 1458 - 1459 - ulist_free(blocks); 1460 1410 } 1461 1411 1462 1412 /*
+4 -1
fs/btrfs/ctree.h
··· 3462 3462 ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, 3463 3463 const struct btrfs_ioctl_encoded_io_args *encoded); 3464 3464 3465 - ssize_t btrfs_dio_rw(struct kiocb *iocb, struct iov_iter *iter, size_t done_before); 3465 + ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter, 3466 + size_t done_before); 3467 + struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter, 3468 + size_t done_before); 3466 3469 3467 3470 extern const struct dentry_operations btrfs_dentry_operations; 3468 3471
+22 -7
fs/btrfs/file.c
··· 1598 1598 write_bytes); 1599 1599 else 1600 1600 btrfs_check_nocow_unlock(BTRFS_I(inode)); 1601 + 1602 + if (nowait && ret == -ENOSPC) 1603 + ret = -EAGAIN; 1601 1604 break; 1602 1605 } 1603 1606 1604 1607 release_bytes = reserve_bytes; 1605 1608 again: 1606 1609 ret = balance_dirty_pages_ratelimited_flags(inode->i_mapping, bdp_flags); 1607 - if (ret) 1610 + if (ret) { 1611 + btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes); 1608 1612 break; 1613 + } 1609 1614 1610 1615 /* 1611 1616 * This is going to setup the pages array with the number of ··· 1770 1765 loff_t endbyte; 1771 1766 ssize_t err; 1772 1767 unsigned int ilock_flags = 0; 1768 + struct iomap_dio *dio; 1773 1769 1774 1770 if (iocb->ki_flags & IOCB_NOWAIT) 1775 1771 ilock_flags |= BTRFS_ILOCK_TRY; ··· 1831 1825 * So here we disable page faults in the iov_iter and then retry if we 1832 1826 * got -EFAULT, faulting in the pages before the retry. 1833 1827 */ 1834 - again: 1835 1828 from->nofault = true; 1836 - err = btrfs_dio_rw(iocb, from, written); 1829 + dio = btrfs_dio_write(iocb, from, written); 1837 1830 from->nofault = false; 1831 + 1832 + /* 1833 + * iomap_dio_complete() will call btrfs_sync_file() if we have a dsync 1834 + * iocb, and that needs to lock the inode. So unlock it before calling 1835 + * iomap_dio_complete() to avoid a deadlock. 1836 + */ 1837 + btrfs_inode_unlock(inode, ilock_flags); 1838 + 1839 + if (IS_ERR_OR_NULL(dio)) 1840 + err = PTR_ERR_OR_ZERO(dio); 1841 + else 1842 + err = iomap_dio_complete(dio); 1838 1843 1839 1844 /* No increment (+=) because iomap returns a cumulative value. */ 1840 1845 if (err > 0) ··· 1872 1855 } else { 1873 1856 fault_in_iov_iter_readable(from, left); 1874 1857 prev_left = left; 1875 - goto again; 1858 + goto relock; 1876 1859 } 1877 1860 } 1878 - 1879 - btrfs_inode_unlock(inode, ilock_flags); 1880 1861 1881 1862 /* 1882 1863 * If 'err' is -ENOTBLK or we have not written all data, then it means ··· 4050 4035 */ 4051 4036 pagefault_disable(); 4052 4037 to->nofault = true; 4053 - ret = btrfs_dio_rw(iocb, to, read); 4038 + ret = btrfs_dio_read(iocb, to, read); 4054 4039 to->nofault = false; 4055 4040 pagefault_enable(); 4056 4041
+12 -4
fs/btrfs/inode.c
··· 7980 7980 */ 7981 7981 status = BLK_STS_RESOURCE; 7982 7982 dip->csums = kcalloc(nr_sectors, fs_info->csum_size, GFP_NOFS); 7983 - if (!dip) 7983 + if (!dip->csums) 7984 7984 goto out_err; 7985 7985 7986 7986 status = btrfs_lookup_bio_sums(inode, dio_bio, dip->csums); ··· 8078 8078 .bio_set = &btrfs_dio_bioset, 8079 8079 }; 8080 8080 8081 - ssize_t btrfs_dio_rw(struct kiocb *iocb, struct iov_iter *iter, size_t done_before) 8081 + ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter, size_t done_before) 8082 8082 { 8083 8083 struct btrfs_dio_data data; 8084 8084 8085 8085 return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, 8086 - IOMAP_DIO_PARTIAL | IOMAP_DIO_NOSYNC, 8087 - &data, done_before); 8086 + IOMAP_DIO_PARTIAL, &data, done_before); 8087 + } 8088 + 8089 + struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter, 8090 + size_t done_before) 8091 + { 8092 + struct btrfs_dio_data data; 8093 + 8094 + return __iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, 8095 + IOMAP_DIO_PARTIAL, &data, done_before); 8088 8096 } 8089 8097 8090 8098 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+19 -17
fs/btrfs/tests/qgroup-tests.c
··· 225 225 */ 226 226 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false); 227 227 if (ret) { 228 - ulist_free(old_roots); 229 228 test_err("couldn't find old roots: %d", ret); 230 229 return ret; 231 230 } 232 231 233 232 ret = insert_normal_tree_ref(root, nodesize, nodesize, 0, 234 233 BTRFS_FS_TREE_OBJECTID); 235 - if (ret) 234 + if (ret) { 235 + ulist_free(old_roots); 236 236 return ret; 237 + } 237 238 238 239 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false); 239 240 if (ret) { 240 241 ulist_free(old_roots); 241 - ulist_free(new_roots); 242 242 test_err("couldn't find old roots: %d", ret); 243 243 return ret; 244 244 } ··· 250 250 return ret; 251 251 } 252 252 253 + /* btrfs_qgroup_account_extent() always frees the ulists passed to it. */ 254 + old_roots = NULL; 255 + new_roots = NULL; 256 + 253 257 if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, 254 258 nodesize, nodesize)) { 255 259 test_err("qgroup counts didn't match expected values"); 256 260 return -EINVAL; 257 261 } 258 - old_roots = NULL; 259 - new_roots = NULL; 260 262 261 263 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false); 262 264 if (ret) { 263 - ulist_free(old_roots); 264 265 test_err("couldn't find old roots: %d", ret); 265 266 return ret; 266 267 } 267 268 268 269 ret = remove_extent_item(root, nodesize, nodesize); 269 - if (ret) 270 + if (ret) { 271 + ulist_free(old_roots); 270 272 return -EINVAL; 273 + } 271 274 272 275 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false); 273 276 if (ret) { 274 277 ulist_free(old_roots); 275 - ulist_free(new_roots); 276 278 test_err("couldn't find old roots: %d", ret); 277 279 return ret; 278 280 } ··· 324 322 325 323 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false); 326 324 if (ret) { 327 - ulist_free(old_roots); 328 325 test_err("couldn't find old roots: %d", ret); 329 326 return ret; 330 327 } 331 328 332 329 ret = insert_normal_tree_ref(root, nodesize, nodesize, 0, 333 330 BTRFS_FS_TREE_OBJECTID); 334 - if (ret) 331 + if (ret) { 332 + ulist_free(old_roots); 335 333 return ret; 334 + } 336 335 337 336 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false); 338 337 if (ret) { 339 338 ulist_free(old_roots); 340 - ulist_free(new_roots); 341 339 test_err("couldn't find old roots: %d", ret); 342 340 return ret; 343 341 } ··· 357 355 358 356 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false); 359 357 if (ret) { 360 - ulist_free(old_roots); 361 358 test_err("couldn't find old roots: %d", ret); 362 359 return ret; 363 360 } 364 361 365 362 ret = add_tree_ref(root, nodesize, nodesize, 0, 366 363 BTRFS_FIRST_FREE_OBJECTID); 367 - if (ret) 364 + if (ret) { 365 + ulist_free(old_roots); 368 366 return ret; 367 + } 369 368 370 369 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false); 371 370 if (ret) { 372 371 ulist_free(old_roots); 373 - ulist_free(new_roots); 374 372 test_err("couldn't find old roots: %d", ret); 375 373 return ret; 376 374 } ··· 396 394 397 395 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots, false); 398 396 if (ret) { 399 - ulist_free(old_roots); 400 397 test_err("couldn't find old roots: %d", ret); 401 398 return ret; 402 399 } 403 400 404 401 ret = remove_extent_ref(root, nodesize, nodesize, 0, 405 402 BTRFS_FIRST_FREE_OBJECTID); 406 - if (ret) 403 + if (ret) { 404 + ulist_free(old_roots); 407 405 return ret; 406 + } 408 407 409 408 ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots, false); 410 409 if (ret) { 411 410 ulist_free(old_roots); 412 - ulist_free(new_roots); 413 411 test_err("couldn't find old roots: %d", ret); 414 412 return ret; 415 413 }
+25 -1
fs/cifs/cifsfs.c
··· 1143 1143 .fiemap = cifs_fiemap, 1144 1144 }; 1145 1145 1146 + const char *cifs_get_link(struct dentry *dentry, struct inode *inode, 1147 + struct delayed_call *done) 1148 + { 1149 + char *target_path; 1150 + 1151 + target_path = kmalloc(PATH_MAX, GFP_KERNEL); 1152 + if (!target_path) 1153 + return ERR_PTR(-ENOMEM); 1154 + 1155 + spin_lock(&inode->i_lock); 1156 + if (likely(CIFS_I(inode)->symlink_target)) { 1157 + strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX); 1158 + } else { 1159 + kfree(target_path); 1160 + target_path = ERR_PTR(-EOPNOTSUPP); 1161 + } 1162 + spin_unlock(&inode->i_lock); 1163 + 1164 + if (!IS_ERR(target_path)) 1165 + set_delayed_call(done, kfree_link, target_path); 1166 + 1167 + return target_path; 1168 + } 1169 + 1146 1170 const struct inode_operations cifs_symlink_inode_ops = { 1147 - .get_link = simple_get_link, 1171 + .get_link = cifs_get_link, 1148 1172 .permission = cifs_permission, 1149 1173 .listxattr = cifs_listxattr, 1150 1174 };
-5
fs/cifs/inode.c
··· 215 215 kfree(cifs_i->symlink_target); 216 216 cifs_i->symlink_target = fattr->cf_symlink_target; 217 217 fattr->cf_symlink_target = NULL; 218 - 219 - if (unlikely(!cifs_i->symlink_target)) 220 - inode->i_link = ERR_PTR(-EOPNOTSUPP); 221 - else 222 - inode->i_link = cifs_i->symlink_target; 223 218 } 224 219 spin_unlock(&inode->i_lock); 225 220
+5 -1
fs/cifs/misc.c
··· 400 400 { 401 401 struct smb_hdr *buf = (struct smb_hdr *)buffer; 402 402 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf; 403 + struct TCP_Server_Info *pserver; 403 404 struct cifs_ses *ses; 404 405 struct cifs_tcon *tcon; 405 406 struct cifsInodeInfo *pCifsInode; ··· 465 464 if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE)) 466 465 return false; 467 466 467 + /* If server is a channel, select the primary channel */ 468 + pserver = CIFS_SERVER_IS_CHAN(srv) ? srv->primary_server : srv; 469 + 468 470 /* look up tcon based on tid & uid */ 469 471 spin_lock(&cifs_tcp_ses_lock); 470 - list_for_each_entry(ses, &srv->smb_ses_list, smb_ses_list) { 472 + list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { 471 473 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 472 474 if (tcon->tid != buf->Tid) 473 475 continue;
+45 -36
fs/cifs/smb2misc.c
··· 135 135 int 136 136 smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *server) 137 137 { 138 + struct TCP_Server_Info *pserver; 138 139 struct smb2_hdr *shdr = (struct smb2_hdr *)buf; 139 140 struct smb2_pdu *pdu = (struct smb2_pdu *)shdr; 140 141 int hdr_size = sizeof(struct smb2_hdr); ··· 143 142 int command; 144 143 __u32 calc_len; /* calculated length */ 145 144 __u64 mid; 145 + 146 + /* If server is a channel, select the primary channel */ 147 + pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server; 146 148 147 149 /* 148 150 * Add function to do table lookup of StructureSize by command ··· 159 155 160 156 /* decrypt frame now that it is completely read in */ 161 157 spin_lock(&cifs_tcp_ses_lock); 162 - list_for_each_entry(iter, &server->smb_ses_list, smb_ses_list) { 158 + list_for_each_entry(iter, &pserver->smb_ses_list, smb_ses_list) { 163 159 if (iter->Suid == le64_to_cpu(thdr->SessionId)) { 164 160 ses = iter; 165 161 break; ··· 612 608 } 613 609 614 610 static bool 615 - smb2_is_valid_lease_break(char *buffer) 611 + smb2_is_valid_lease_break(char *buffer, struct TCP_Server_Info *server) 616 612 { 617 613 struct smb2_lease_break *rsp = (struct smb2_lease_break *)buffer; 618 - struct TCP_Server_Info *server; 614 + struct TCP_Server_Info *pserver; 619 615 struct cifs_ses *ses; 620 616 struct cifs_tcon *tcon; 621 617 struct cifs_pending_open *open; 622 618 623 619 cifs_dbg(FYI, "Checking for lease break\n"); 624 620 621 + /* If server is a channel, select the primary channel */ 622 + pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server; 623 + 625 624 /* look up tcon based on tid & uid */ 626 625 spin_lock(&cifs_tcp_ses_lock); 627 - list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { 628 - list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 629 - list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 630 - spin_lock(&tcon->open_file_lock); 631 - cifs_stats_inc( 632 - &tcon->stats.cifs_stats.num_oplock_brks); 633 - if (smb2_tcon_has_lease(tcon, rsp)) { 634 - spin_unlock(&tcon->open_file_lock); 635 - spin_unlock(&cifs_tcp_ses_lock); 636 - return true; 637 - } 638 - open = smb2_tcon_find_pending_open_lease(tcon, 639 - rsp); 640 - if (open) { 641 - __u8 lease_key[SMB2_LEASE_KEY_SIZE]; 642 - struct tcon_link *tlink; 643 - 644 - tlink = cifs_get_tlink(open->tlink); 645 - memcpy(lease_key, open->lease_key, 646 - SMB2_LEASE_KEY_SIZE); 647 - spin_unlock(&tcon->open_file_lock); 648 - spin_unlock(&cifs_tcp_ses_lock); 649 - smb2_queue_pending_open_break(tlink, 650 - lease_key, 651 - rsp->NewLeaseState); 652 - return true; 653 - } 626 + list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { 627 + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 628 + spin_lock(&tcon->open_file_lock); 629 + cifs_stats_inc( 630 + &tcon->stats.cifs_stats.num_oplock_brks); 631 + if (smb2_tcon_has_lease(tcon, rsp)) { 654 632 spin_unlock(&tcon->open_file_lock); 633 + spin_unlock(&cifs_tcp_ses_lock); 634 + return true; 635 + } 636 + open = smb2_tcon_find_pending_open_lease(tcon, 637 + rsp); 638 + if (open) { 639 + __u8 lease_key[SMB2_LEASE_KEY_SIZE]; 640 + struct tcon_link *tlink; 655 641 656 - if (cached_dir_lease_break(tcon, rsp->LeaseKey)) { 657 - spin_unlock(&cifs_tcp_ses_lock); 658 - return true; 659 - } 642 + tlink = cifs_get_tlink(open->tlink); 643 + memcpy(lease_key, open->lease_key, 644 + SMB2_LEASE_KEY_SIZE); 645 + spin_unlock(&tcon->open_file_lock); 646 + spin_unlock(&cifs_tcp_ses_lock); 647 + smb2_queue_pending_open_break(tlink, 648 + lease_key, 649 + rsp->NewLeaseState); 650 + return true; 651 + } 652 + spin_unlock(&tcon->open_file_lock); 653 + 654 + if (cached_dir_lease_break(tcon, rsp->LeaseKey)) { 655 + spin_unlock(&cifs_tcp_ses_lock); 656 + return true; 660 657 } 661 658 } 662 659 } ··· 676 671 smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) 677 672 { 678 673 struct smb2_oplock_break *rsp = (struct smb2_oplock_break *)buffer; 674 + struct TCP_Server_Info *pserver; 679 675 struct cifs_ses *ses; 680 676 struct cifs_tcon *tcon; 681 677 struct cifsInodeInfo *cinode; ··· 690 684 if (rsp->StructureSize != 691 685 smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) { 692 686 if (le16_to_cpu(rsp->StructureSize) == 44) 693 - return smb2_is_valid_lease_break(buffer); 687 + return smb2_is_valid_lease_break(buffer, server); 694 688 else 695 689 return false; 696 690 } 697 691 698 692 cifs_dbg(FYI, "oplock level 0x%x\n", rsp->OplockLevel); 699 693 694 + /* If server is a channel, select the primary channel */ 695 + pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server; 696 + 700 697 /* look up tcon based on tid & uid */ 701 698 spin_lock(&cifs_tcp_ses_lock); 702 - list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 699 + list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { 703 700 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 704 701 705 702 spin_lock(&tcon->open_file_lock);
+18 -12
fs/cifs/smb2ops.c
··· 2302 2302 smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server) 2303 2303 { 2304 2304 struct smb2_hdr *shdr = (struct smb2_hdr *)buf; 2305 + struct TCP_Server_Info *pserver; 2305 2306 struct cifs_ses *ses; 2306 2307 struct cifs_tcon *tcon; 2307 2308 2308 2309 if (shdr->Status != STATUS_NETWORK_NAME_DELETED) 2309 2310 return; 2310 2311 2312 + /* If server is a channel, select the primary channel */ 2313 + pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server; 2314 + 2311 2315 spin_lock(&cifs_tcp_ses_lock); 2312 - list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 2316 + list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { 2313 2317 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 2314 2318 if (tcon->tid == le32_to_cpu(shdr->Id.SyncId.TreeId)) { 2315 2319 spin_lock(&tcon->tc_lock); ··· 4268 4264 static int 4269 4265 smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key) 4270 4266 { 4267 + struct TCP_Server_Info *pserver; 4271 4268 struct cifs_ses *ses; 4272 4269 u8 *ses_enc_key; 4273 4270 4271 + /* If server is a channel, select the primary channel */ 4272 + pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server; 4273 + 4274 4274 spin_lock(&cifs_tcp_ses_lock); 4275 - list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { 4276 - list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 4277 - if (ses->Suid == ses_id) { 4278 - spin_lock(&ses->ses_lock); 4279 - ses_enc_key = enc ? ses->smb3encryptionkey : 4280 - ses->smb3decryptionkey; 4281 - memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE); 4282 - spin_unlock(&ses->ses_lock); 4283 - spin_unlock(&cifs_tcp_ses_lock); 4284 - return 0; 4285 - } 4275 + list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { 4276 + if (ses->Suid == ses_id) { 4277 + spin_lock(&ses->ses_lock); 4278 + ses_enc_key = enc ? ses->smb3encryptionkey : 4279 + ses->smb3decryptionkey; 4280 + memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE); 4281 + spin_unlock(&ses->ses_lock); 4282 + spin_unlock(&cifs_tcp_ses_lock); 4283 + return 0; 4286 4284 } 4287 4285 } 4288 4286 spin_unlock(&cifs_tcp_ses_lock);
+12 -7
fs/cifs/smb2transport.c
··· 77 77 int smb2_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key) 78 78 { 79 79 struct cifs_chan *chan; 80 + struct TCP_Server_Info *pserver; 80 81 struct cifs_ses *ses = NULL; 81 - struct TCP_Server_Info *it = NULL; 82 82 int i; 83 83 int rc = 0; 84 84 85 85 spin_lock(&cifs_tcp_ses_lock); 86 86 87 - list_for_each_entry(it, &cifs_tcp_ses_list, tcp_ses_list) { 88 - list_for_each_entry(ses, &it->smb_ses_list, smb_ses_list) { 89 - if (ses->Suid == ses_id) 90 - goto found; 91 - } 87 + /* If server is a channel, select the primary channel */ 88 + pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server; 89 + 90 + list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { 91 + if (ses->Suid == ses_id) 92 + goto found; 92 93 } 93 94 cifs_server_dbg(VFS, "%s: Could not find session 0x%llx\n", 94 95 __func__, ses_id); ··· 137 136 static struct cifs_ses * 138 137 smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id) 139 138 { 139 + struct TCP_Server_Info *pserver; 140 140 struct cifs_ses *ses; 141 141 142 - list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 142 + /* If server is a channel, select the primary channel */ 143 + pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server; 144 + 145 + list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { 143 146 if (ses->Suid != ses_id) 144 147 continue; 145 148 ++ses->ses_count;
+3 -2
fs/ext4/fast_commit.c
··· 1521 1521 struct ext4_iloc iloc; 1522 1522 int inode_len, ino, ret, tag = tl->fc_tag; 1523 1523 struct ext4_extent_header *eh; 1524 + size_t off_gen = offsetof(struct ext4_inode, i_generation); 1524 1525 1525 1526 memcpy(&fc_inode, val, sizeof(fc_inode)); 1526 1527 ··· 1549 1548 raw_inode = ext4_raw_inode(&iloc); 1550 1549 1551 1550 memcpy(raw_inode, raw_fc_inode, offsetof(struct ext4_inode, i_block)); 1552 - memcpy(&raw_inode->i_generation, &raw_fc_inode->i_generation, 1553 - inode_len - offsetof(struct ext4_inode, i_generation)); 1551 + memcpy((u8 *)raw_inode + off_gen, (u8 *)raw_fc_inode + off_gen, 1552 + inode_len - off_gen); 1554 1553 if (le32_to_cpu(raw_inode->i_flags) & EXT4_EXTENTS_FL) { 1555 1554 eh = (struct ext4_extent_header *)(&raw_inode->i_block[0]); 1556 1555 if (eh->eh_magic != EXT4_EXT_MAGIC) {
+1 -2
fs/ext4/ioctl.c
··· 145 145 if (ext4_has_metadata_csum(sb) && 146 146 es->s_checksum != ext4_superblock_csum(sb, es)) { 147 147 ext4_msg(sb, KERN_ERR, "Invalid checksum for backup " 148 - "superblock %llu\n", sb_block); 148 + "superblock %llu", sb_block); 149 149 unlock_buffer(bh); 150 - err = -EFSBADCRC; 151 150 goto out_bh; 152 151 } 153 152 func(es, arg);
+2 -1
fs/ext4/migrate.c
··· 424 424 * already is extent-based, error out. 425 425 */ 426 426 if (!ext4_has_feature_extents(inode->i_sb) || 427 - (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 427 + ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) || 428 + ext4_has_inline_data(inode)) 428 429 return -EINVAL; 429 430 430 431 if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
+9 -1
fs/ext4/namei.c
··· 2259 2259 memset(de, 0, len); /* wipe old data */ 2260 2260 de = (struct ext4_dir_entry_2 *) data2; 2261 2261 top = data2 + len; 2262 - while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top) 2262 + while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top) { 2263 + if (ext4_check_dir_entry(dir, NULL, de, bh2, data2, len, 2264 + (data2 + (blocksize - csum_size) - 2265 + (char *) de))) { 2266 + brelse(bh2); 2267 + brelse(bh); 2268 + return -EFSCORRUPTED; 2269 + } 2263 2270 de = de2; 2271 + } 2264 2272 de->rec_len = ext4_rec_len_to_disk(data2 + (blocksize - csum_size) - 2265 2273 (char *) de, blocksize); 2266 2274
+5
fs/ext4/resize.c
··· 1158 1158 while (group < sbi->s_groups_count) { 1159 1159 struct buffer_head *bh; 1160 1160 ext4_fsblk_t backup_block; 1161 + struct ext4_super_block *es; 1161 1162 1162 1163 /* Out of journal space, and can't get more - abort - so sad */ 1163 1164 err = ext4_resize_ensure_credits_batch(handle, 1); ··· 1187 1186 memcpy(bh->b_data, data, size); 1188 1187 if (rest) 1189 1188 memset(bh->b_data + size, 0, rest); 1189 + es = (struct ext4_super_block *) bh->b_data; 1190 + es->s_block_group_nr = cpu_to_le16(group); 1191 + if (ext4_has_metadata_csum(sb)) 1192 + es->s_checksum = ext4_superblock_csum(sb, es); 1190 1193 set_buffer_uptodate(bh); 1191 1194 unlock_buffer(bh); 1192 1195 err = ext4_handle_dirty_metadata(handle, NULL, bh);
+1 -1
fs/ext4/super.c
··· 4881 4881 flush_work(&sbi->s_error_work); 4882 4882 jbd2_journal_destroy(sbi->s_journal); 4883 4883 sbi->s_journal = NULL; 4884 - return err; 4884 + return -EINVAL; 4885 4885 } 4886 4886 4887 4887 static int ext4_journal_data_mode_check(struct super_block *sb)
+4
fs/fuse/file.c
··· 3001 3001 goto out; 3002 3002 } 3003 3003 3004 + err = file_modified(file); 3005 + if (err) 3006 + goto out; 3007 + 3004 3008 if (!(mode & FALLOC_FL_KEEP_SIZE)) 3005 3009 set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); 3006 3010
+9 -1
fs/fuse/readdir.c
··· 77 77 goto unlock; 78 78 79 79 addr = kmap_local_page(page); 80 - if (!offset) 80 + if (!offset) { 81 81 clear_page(addr); 82 + SetPageUptodate(page); 83 + } 82 84 memcpy(addr + offset, dirent, reclen); 83 85 kunmap_local(addr); 84 86 fi->rdc.size = (index << PAGE_SHIFT) + offset + reclen; ··· 518 516 519 517 page = find_get_page_flags(file->f_mapping, index, 520 518 FGP_ACCESSED | FGP_LOCK); 519 + /* Page gone missing, then re-added to cache, but not initialized? */ 520 + if (page && !PageUptodate(page)) { 521 + unlock_page(page); 522 + put_page(page); 523 + page = NULL; 524 + } 521 525 spin_lock(&fi->rdc.lock); 522 526 if (!page) { 523 527 /*
+15
fs/xfs/libxfs/xfs_ag.h
··· 133 133 return true; 134 134 } 135 135 136 + static inline bool 137 + xfs_verify_agbext( 138 + struct xfs_perag *pag, 139 + xfs_agblock_t agbno, 140 + xfs_agblock_t len) 141 + { 142 + if (agbno + len <= agbno) 143 + return false; 144 + 145 + if (!xfs_verify_agbno(pag, agbno)) 146 + return false; 147 + 148 + return xfs_verify_agbno(pag, agbno + len - 1); 149 + } 150 + 136 151 /* 137 152 * Verify that an AG inode number pointer neither points outside the AG 138 153 * nor points at static metadata.
+1 -5
fs/xfs/libxfs/xfs_alloc.c
··· 263 263 goto out_bad_rec; 264 264 265 265 /* check for valid extent range, including overflow */ 266 - if (!xfs_verify_agbno(pag, *bno)) 267 - goto out_bad_rec; 268 - if (*bno > *bno + *len) 269 - goto out_bad_rec; 270 - if (!xfs_verify_agbno(pag, *bno + *len - 1)) 266 + if (!xfs_verify_agbext(pag, *bno, *len)) 271 267 goto out_bad_rec; 272 268 273 269 return 0;
+7 -2
fs/xfs/libxfs/xfs_dir2_leaf.c
··· 146 146 xfs_dir2_leaf_tail_t *ltp; 147 147 int stale; 148 148 int i; 149 + bool isleaf1 = (hdr->magic == XFS_DIR2_LEAF1_MAGIC || 150 + hdr->magic == XFS_DIR3_LEAF1_MAGIC); 149 151 150 152 ltp = xfs_dir2_leaf_tail_p(geo, leaf); 151 153 ··· 160 158 return __this_address; 161 159 162 160 /* Leaves and bests don't overlap in leaf format. */ 163 - if ((hdr->magic == XFS_DIR2_LEAF1_MAGIC || 164 - hdr->magic == XFS_DIR3_LEAF1_MAGIC) && 161 + if (isleaf1 && 165 162 (char *)&hdr->ents[hdr->count] > (char *)xfs_dir2_leaf_bests_p(ltp)) 166 163 return __this_address; 167 164 ··· 176 175 } 177 176 if (hdr->ents[i].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) 178 177 stale++; 178 + if (isleaf1 && xfs_dir2_dataptr_to_db(geo, 179 + be32_to_cpu(hdr->ents[i].address)) >= 180 + be32_to_cpu(ltp->bestcount)) 181 + return __this_address; 179 182 } 180 183 if (hdr->stale != stale) 181 184 return __this_address;
+1 -21
fs/xfs/libxfs/xfs_format.h
··· 1564 1564 #define RMAPBT_UNUSED_OFFSET_BITLEN 7 1565 1565 #define RMAPBT_OFFSET_BITLEN 54 1566 1566 1567 - #define XFS_RMAP_ATTR_FORK (1 << 0) 1568 - #define XFS_RMAP_BMBT_BLOCK (1 << 1) 1569 - #define XFS_RMAP_UNWRITTEN (1 << 2) 1570 - #define XFS_RMAP_KEY_FLAGS (XFS_RMAP_ATTR_FORK | \ 1571 - XFS_RMAP_BMBT_BLOCK) 1572 - #define XFS_RMAP_REC_FLAGS (XFS_RMAP_UNWRITTEN) 1573 - struct xfs_rmap_irec { 1574 - xfs_agblock_t rm_startblock; /* extent start block */ 1575 - xfs_extlen_t rm_blockcount; /* extent length */ 1576 - uint64_t rm_owner; /* extent owner */ 1577 - uint64_t rm_offset; /* offset within the owner */ 1578 - unsigned int rm_flags; /* state flags */ 1579 - }; 1580 - 1581 1567 /* 1582 1568 * Key structure 1583 1569 * ··· 1612 1626 * on the startblock. This speeds up mount time deletion of stale 1613 1627 * staging extents because they're all at the right side of the tree. 1614 1628 */ 1615 - #define XFS_REFC_COW_START ((xfs_agblock_t)(1U << 31)) 1629 + #define XFS_REFC_COWFLAG (1U << 31) 1616 1630 #define REFCNTBT_COWFLAG_BITLEN 1 1617 1631 #define REFCNTBT_AGBLOCK_BITLEN 31 1618 1632 ··· 1624 1638 1625 1639 struct xfs_refcount_key { 1626 1640 __be32 rc_startblock; /* starting block number */ 1627 - }; 1628 - 1629 - struct xfs_refcount_irec { 1630 - xfs_agblock_t rc_startblock; /* starting block number */ 1631 - xfs_extlen_t rc_blockcount; /* count of free blocks */ 1632 - xfs_nlink_t rc_refcount; /* number of inodes linked here */ 1633 1641 }; 1634 1642 1635 1643 #define MAXREFCOUNT ((xfs_nlink_t)~0U)
+54 -6
fs/xfs/libxfs/xfs_log_format.h
··· 613 613 uint16_t efi_size; /* size of this item */ 614 614 uint32_t efi_nextents; /* # extents to free */ 615 615 uint64_t efi_id; /* efi identifier */ 616 - xfs_extent_t efi_extents[1]; /* array of extents to free */ 616 + xfs_extent_t efi_extents[]; /* array of extents to free */ 617 617 } xfs_efi_log_format_t; 618 + 619 + static inline size_t 620 + xfs_efi_log_format_sizeof( 621 + unsigned int nr) 622 + { 623 + return sizeof(struct xfs_efi_log_format) + 624 + nr * sizeof(struct xfs_extent); 625 + } 618 626 619 627 typedef struct xfs_efi_log_format_32 { 620 628 uint16_t efi_type; /* efi log item type */ 621 629 uint16_t efi_size; /* size of this item */ 622 630 uint32_t efi_nextents; /* # extents to free */ 623 631 uint64_t efi_id; /* efi identifier */ 624 - xfs_extent_32_t efi_extents[1]; /* array of extents to free */ 632 + xfs_extent_32_t efi_extents[]; /* array of extents to free */ 625 633 } __attribute__((packed)) xfs_efi_log_format_32_t; 634 + 635 + static inline size_t 636 + xfs_efi_log_format32_sizeof( 637 + unsigned int nr) 638 + { 639 + return sizeof(struct xfs_efi_log_format_32) + 640 + nr * sizeof(struct xfs_extent_32); 641 + } 626 642 627 643 typedef struct xfs_efi_log_format_64 { 628 644 uint16_t efi_type; /* efi log item type */ 629 645 uint16_t efi_size; /* size of this item */ 630 646 uint32_t efi_nextents; /* # extents to free */ 631 647 uint64_t efi_id; /* efi identifier */ 632 - xfs_extent_64_t efi_extents[1]; /* array of extents to free */ 648 + xfs_extent_64_t efi_extents[]; /* array of extents to free */ 633 649 } xfs_efi_log_format_64_t; 650 + 651 + static inline size_t 652 + xfs_efi_log_format64_sizeof( 653 + unsigned int nr) 654 + { 655 + return sizeof(struct xfs_efi_log_format_64) + 656 + nr * sizeof(struct xfs_extent_64); 657 + } 634 658 635 659 /* 636 660 * This is the structure used to lay out an efd log item in the ··· 666 642 uint16_t efd_size; /* size of this item */ 667 643 uint32_t efd_nextents; /* # of extents freed */ 668 644 uint64_t efd_efi_id; /* id of corresponding efi */ 669 - xfs_extent_t efd_extents[1]; /* array of extents freed */ 645 + xfs_extent_t efd_extents[]; /* array of extents freed */ 670 646 } xfs_efd_log_format_t; 647 + 648 + static inline size_t 649 + xfs_efd_log_format_sizeof( 650 + unsigned int nr) 651 + { 652 + return sizeof(struct xfs_efd_log_format) + 653 + nr * sizeof(struct xfs_extent); 654 + } 671 655 672 656 typedef struct xfs_efd_log_format_32 { 673 657 uint16_t efd_type; /* efd log item type */ 674 658 uint16_t efd_size; /* size of this item */ 675 659 uint32_t efd_nextents; /* # of extents freed */ 676 660 uint64_t efd_efi_id; /* id of corresponding efi */ 677 - xfs_extent_32_t efd_extents[1]; /* array of extents freed */ 661 + xfs_extent_32_t efd_extents[]; /* array of extents freed */ 678 662 } __attribute__((packed)) xfs_efd_log_format_32_t; 663 + 664 + static inline size_t 665 + xfs_efd_log_format32_sizeof( 666 + unsigned int nr) 667 + { 668 + return sizeof(struct xfs_efd_log_format_32) + 669 + nr * sizeof(struct xfs_extent_32); 670 + } 679 671 680 672 typedef struct xfs_efd_log_format_64 { 681 673 uint16_t efd_type; /* efd log item type */ 682 674 uint16_t efd_size; /* size of this item */ 683 675 uint32_t efd_nextents; /* # of extents freed */ 684 676 uint64_t efd_efi_id; /* id of corresponding efi */ 685 - xfs_extent_64_t efd_extents[1]; /* array of extents freed */ 677 + xfs_extent_64_t efd_extents[]; /* array of extents freed */ 686 678 } xfs_efd_log_format_64_t; 679 + 680 + static inline size_t 681 + xfs_efd_log_format64_sizeof( 682 + unsigned int nr) 683 + { 684 + return sizeof(struct xfs_efd_log_format_64) + 685 + nr * sizeof(struct xfs_extent_64); 686 + } 687 687 688 688 /* 689 689 * RUI/RUD (reverse mapping) log format definitions
+199 -87
fs/xfs/libxfs/xfs_refcount.c
··· 46 46 int 47 47 xfs_refcount_lookup_le( 48 48 struct xfs_btree_cur *cur, 49 + enum xfs_refc_domain domain, 49 50 xfs_agblock_t bno, 50 51 int *stat) 51 52 { 52 - trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno, bno, 53 + trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno, 54 + xfs_refcount_encode_startblock(bno, domain), 53 55 XFS_LOOKUP_LE); 54 56 cur->bc_rec.rc.rc_startblock = bno; 55 57 cur->bc_rec.rc.rc_blockcount = 0; 58 + cur->bc_rec.rc.rc_domain = domain; 56 59 return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat); 57 60 } 58 61 ··· 66 63 int 67 64 xfs_refcount_lookup_ge( 68 65 struct xfs_btree_cur *cur, 66 + enum xfs_refc_domain domain, 69 67 xfs_agblock_t bno, 70 68 int *stat) 71 69 { 72 - trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno, bno, 70 + trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno, 71 + xfs_refcount_encode_startblock(bno, domain), 73 72 XFS_LOOKUP_GE); 74 73 cur->bc_rec.rc.rc_startblock = bno; 75 74 cur->bc_rec.rc.rc_blockcount = 0; 75 + cur->bc_rec.rc.rc_domain = domain; 76 76 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); 77 77 } 78 78 ··· 86 80 int 87 81 xfs_refcount_lookup_eq( 88 82 struct xfs_btree_cur *cur, 83 + enum xfs_refc_domain domain, 89 84 xfs_agblock_t bno, 90 85 int *stat) 91 86 { 92 - trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno, bno, 87 + trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno, 88 + xfs_refcount_encode_startblock(bno, domain), 93 89 XFS_LOOKUP_LE); 94 90 cur->bc_rec.rc.rc_startblock = bno; 95 91 cur->bc_rec.rc.rc_blockcount = 0; 92 + cur->bc_rec.rc.rc_domain = domain; 96 93 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); 97 94 } 98 95 ··· 105 96 const union xfs_btree_rec *rec, 106 97 struct xfs_refcount_irec *irec) 107 98 { 108 - irec->rc_startblock = be32_to_cpu(rec->refc.rc_startblock); 99 + uint32_t start; 100 + 101 + start = be32_to_cpu(rec->refc.rc_startblock); 102 + if (start & XFS_REFC_COWFLAG) { 103 + start &= ~XFS_REFC_COWFLAG; 104 + irec->rc_domain = XFS_REFC_DOMAIN_COW; 105 + } else { 106 + irec->rc_domain = XFS_REFC_DOMAIN_SHARED; 107 + } 108 + 109 + irec->rc_startblock = start; 109 110 irec->rc_blockcount = be32_to_cpu(rec->refc.rc_blockcount); 110 111 irec->rc_refcount = be32_to_cpu(rec->refc.rc_refcount); 111 112 } ··· 133 114 struct xfs_perag *pag = cur->bc_ag.pag; 134 115 union xfs_btree_rec *rec; 135 116 int error; 136 - xfs_agblock_t realstart; 137 117 138 118 error = xfs_btree_get_rec(cur, &rec, stat); 139 119 if (error || !*stat) ··· 142 124 if (irec->rc_blockcount == 0 || irec->rc_blockcount > MAXREFCEXTLEN) 143 125 goto out_bad_rec; 144 126 145 - /* handle special COW-staging state */ 146 - realstart = irec->rc_startblock; 147 - if (realstart & XFS_REFC_COW_START) { 148 - if (irec->rc_refcount != 1) 149 - goto out_bad_rec; 150 - realstart &= ~XFS_REFC_COW_START; 151 - } else if (irec->rc_refcount < 2) { 127 + if (!xfs_refcount_check_domain(irec)) 152 128 goto out_bad_rec; 153 - } 154 129 155 130 /* check for valid extent range, including overflow */ 156 - if (!xfs_verify_agbno(pag, realstart)) 157 - goto out_bad_rec; 158 - if (realstart > realstart + irec->rc_blockcount) 159 - goto out_bad_rec; 160 - if (!xfs_verify_agbno(pag, realstart + irec->rc_blockcount - 1)) 131 + if (!xfs_verify_agbext(pag, irec->rc_startblock, irec->rc_blockcount)) 161 132 goto out_bad_rec; 162 133 163 134 if (irec->rc_refcount == 0 || irec->rc_refcount > MAXREFCOUNT) ··· 176 169 struct xfs_refcount_irec *irec) 177 170 { 178 171 union xfs_btree_rec rec; 172 + uint32_t start; 179 173 int error; 180 174 181 175 trace_xfs_refcount_update(cur->bc_mp, cur->bc_ag.pag->pag_agno, irec); 182 - rec.refc.rc_startblock = cpu_to_be32(irec->rc_startblock); 176 + 177 + start = xfs_refcount_encode_startblock(irec->rc_startblock, 178 + irec->rc_domain); 179 + rec.refc.rc_startblock = cpu_to_be32(start); 183 180 rec.refc.rc_blockcount = cpu_to_be32(irec->rc_blockcount); 184 181 rec.refc.rc_refcount = cpu_to_be32(irec->rc_refcount); 182 + 185 183 error = xfs_btree_update(cur, &rec); 186 184 if (error) 187 185 trace_xfs_refcount_update_error(cur->bc_mp, ··· 208 196 int error; 209 197 210 198 trace_xfs_refcount_insert(cur->bc_mp, cur->bc_ag.pag->pag_agno, irec); 199 + 211 200 cur->bc_rec.rc.rc_startblock = irec->rc_startblock; 212 201 cur->bc_rec.rc.rc_blockcount = irec->rc_blockcount; 213 202 cur->bc_rec.rc.rc_refcount = irec->rc_refcount; 203 + cur->bc_rec.rc.rc_domain = irec->rc_domain; 204 + 214 205 error = xfs_btree_insert(cur, i); 215 206 if (error) 216 207 goto out_error; ··· 259 244 } 260 245 if (error) 261 246 goto out_error; 262 - error = xfs_refcount_lookup_ge(cur, irec.rc_startblock, &found_rec); 247 + error = xfs_refcount_lookup_ge(cur, irec.rc_domain, irec.rc_startblock, 248 + &found_rec); 263 249 out_error: 264 250 if (error) 265 251 trace_xfs_refcount_delete_error(cur->bc_mp, ··· 359 343 STATIC int 360 344 xfs_refcount_split_extent( 361 345 struct xfs_btree_cur *cur, 346 + enum xfs_refc_domain domain, 362 347 xfs_agblock_t agbno, 363 348 bool *shape_changed) 364 349 { ··· 368 351 int error; 369 352 370 353 *shape_changed = false; 371 - error = xfs_refcount_lookup_le(cur, agbno, &found_rec); 354 + error = xfs_refcount_lookup_le(cur, domain, agbno, &found_rec); 372 355 if (error) 373 356 goto out_error; 374 357 if (!found_rec) ··· 381 364 error = -EFSCORRUPTED; 382 365 goto out_error; 383 366 } 367 + if (rcext.rc_domain != domain) 368 + return 0; 384 369 if (rcext.rc_startblock == agbno || xfs_refc_next(&rcext) <= agbno) 385 370 return 0; 386 371 ··· 434 415 trace_xfs_refcount_merge_center_extents(cur->bc_mp, 435 416 cur->bc_ag.pag->pag_agno, left, center, right); 436 417 418 + ASSERT(left->rc_domain == center->rc_domain); 419 + ASSERT(right->rc_domain == center->rc_domain); 420 + 437 421 /* 438 422 * Make sure the center and right extents are not in the btree. 439 423 * If the center extent was synthesized, the first delete call ··· 445 423 * call removes the center and the second one removes the right 446 424 * extent. 447 425 */ 448 - error = xfs_refcount_lookup_ge(cur, center->rc_startblock, 449 - &found_rec); 426 + error = xfs_refcount_lookup_ge(cur, center->rc_domain, 427 + center->rc_startblock, &found_rec); 450 428 if (error) 451 429 goto out_error; 452 430 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { ··· 473 451 } 474 452 475 453 /* Enlarge the left extent. */ 476 - error = xfs_refcount_lookup_le(cur, left->rc_startblock, 477 - &found_rec); 454 + error = xfs_refcount_lookup_le(cur, left->rc_domain, 455 + left->rc_startblock, &found_rec); 478 456 if (error) 479 457 goto out_error; 480 458 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { ··· 513 491 trace_xfs_refcount_merge_left_extent(cur->bc_mp, 514 492 cur->bc_ag.pag->pag_agno, left, cleft); 515 493 494 + ASSERT(left->rc_domain == cleft->rc_domain); 495 + 516 496 /* If the extent at agbno (cleft) wasn't synthesized, remove it. */ 517 497 if (cleft->rc_refcount > 1) { 518 - error = xfs_refcount_lookup_le(cur, cleft->rc_startblock, 519 - &found_rec); 498 + error = xfs_refcount_lookup_le(cur, cleft->rc_domain, 499 + cleft->rc_startblock, &found_rec); 520 500 if (error) 521 501 goto out_error; 522 502 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { ··· 536 512 } 537 513 538 514 /* Enlarge the left extent. */ 539 - error = xfs_refcount_lookup_le(cur, left->rc_startblock, 540 - &found_rec); 515 + error = xfs_refcount_lookup_le(cur, left->rc_domain, 516 + left->rc_startblock, &found_rec); 541 517 if (error) 542 518 goto out_error; 543 519 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { ··· 576 552 trace_xfs_refcount_merge_right_extent(cur->bc_mp, 577 553 cur->bc_ag.pag->pag_agno, cright, right); 578 554 555 + ASSERT(right->rc_domain == cright->rc_domain); 556 + 579 557 /* 580 558 * If the extent ending at agbno+aglen (cright) wasn't synthesized, 581 559 * remove it. 582 560 */ 583 561 if (cright->rc_refcount > 1) { 584 - error = xfs_refcount_lookup_le(cur, cright->rc_startblock, 585 - &found_rec); 562 + error = xfs_refcount_lookup_le(cur, cright->rc_domain, 563 + cright->rc_startblock, &found_rec); 586 564 if (error) 587 565 goto out_error; 588 566 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { ··· 602 576 } 603 577 604 578 /* Enlarge the right extent. */ 605 - error = xfs_refcount_lookup_le(cur, right->rc_startblock, 606 - &found_rec); 579 + error = xfs_refcount_lookup_le(cur, right->rc_domain, 580 + right->rc_startblock, &found_rec); 607 581 if (error) 608 582 goto out_error; 609 583 if (XFS_IS_CORRUPT(cur->bc_mp, found_rec != 1)) { ··· 626 600 return error; 627 601 } 628 602 629 - #define XFS_FIND_RCEXT_SHARED 1 630 - #define XFS_FIND_RCEXT_COW 2 631 603 /* 632 604 * Find the left extent and the one after it (cleft). This function assumes 633 605 * that we've already split any extent crossing agbno. ··· 635 611 struct xfs_btree_cur *cur, 636 612 struct xfs_refcount_irec *left, 637 613 struct xfs_refcount_irec *cleft, 614 + enum xfs_refc_domain domain, 638 615 xfs_agblock_t agbno, 639 - xfs_extlen_t aglen, 640 - int flags) 616 + xfs_extlen_t aglen) 641 617 { 642 618 struct xfs_refcount_irec tmp; 643 619 int error; 644 620 int found_rec; 645 621 646 622 left->rc_startblock = cleft->rc_startblock = NULLAGBLOCK; 647 - error = xfs_refcount_lookup_le(cur, agbno - 1, &found_rec); 623 + error = xfs_refcount_lookup_le(cur, domain, agbno - 1, &found_rec); 648 624 if (error) 649 625 goto out_error; 650 626 if (!found_rec) ··· 658 634 goto out_error; 659 635 } 660 636 637 + if (tmp.rc_domain != domain) 638 + return 0; 661 639 if (xfs_refc_next(&tmp) != agbno) 662 - return 0; 663 - if ((flags & XFS_FIND_RCEXT_SHARED) && tmp.rc_refcount < 2) 664 - return 0; 665 - if ((flags & XFS_FIND_RCEXT_COW) && tmp.rc_refcount > 1) 666 640 return 0; 667 641 /* We have a left extent; retrieve (or invent) the next right one */ 668 642 *left = tmp; ··· 676 654 error = -EFSCORRUPTED; 677 655 goto out_error; 678 656 } 657 + 658 + if (tmp.rc_domain != domain) 659 + goto not_found; 679 660 680 661 /* if tmp starts at the end of our range, just use that */ 681 662 if (tmp.rc_startblock == agbno) ··· 696 671 cleft->rc_blockcount = min(aglen, 697 672 tmp.rc_startblock - agbno); 698 673 cleft->rc_refcount = 1; 674 + cleft->rc_domain = domain; 699 675 } 700 676 } else { 677 + not_found: 701 678 /* 702 679 * No extents, so pretend that there's one covering the whole 703 680 * range. ··· 707 680 cleft->rc_startblock = agbno; 708 681 cleft->rc_blockcount = aglen; 709 682 cleft->rc_refcount = 1; 683 + cleft->rc_domain = domain; 710 684 } 711 685 trace_xfs_refcount_find_left_extent(cur->bc_mp, cur->bc_ag.pag->pag_agno, 712 686 left, cleft, agbno); ··· 728 700 struct xfs_btree_cur *cur, 729 701 struct xfs_refcount_irec *right, 730 702 struct xfs_refcount_irec *cright, 703 + enum xfs_refc_domain domain, 731 704 xfs_agblock_t agbno, 732 - xfs_extlen_t aglen, 733 - int flags) 705 + xfs_extlen_t aglen) 734 706 { 735 707 struct xfs_refcount_irec tmp; 736 708 int error; 737 709 int found_rec; 738 710 739 711 right->rc_startblock = cright->rc_startblock = NULLAGBLOCK; 740 - error = xfs_refcount_lookup_ge(cur, agbno + aglen, &found_rec); 712 + error = xfs_refcount_lookup_ge(cur, domain, agbno + aglen, &found_rec); 741 713 if (error) 742 714 goto out_error; 743 715 if (!found_rec) ··· 751 723 goto out_error; 752 724 } 753 725 726 + if (tmp.rc_domain != domain) 727 + return 0; 754 728 if (tmp.rc_startblock != agbno + aglen) 755 - return 0; 756 - if ((flags & XFS_FIND_RCEXT_SHARED) && tmp.rc_refcount < 2) 757 - return 0; 758 - if ((flags & XFS_FIND_RCEXT_COW) && tmp.rc_refcount > 1) 759 729 return 0; 760 730 /* We have a right extent; retrieve (or invent) the next left one */ 761 731 *right = tmp; ··· 769 743 error = -EFSCORRUPTED; 770 744 goto out_error; 771 745 } 746 + 747 + if (tmp.rc_domain != domain) 748 + goto not_found; 772 749 773 750 /* if tmp ends at the end of our range, just use that */ 774 751 if (xfs_refc_next(&tmp) == agbno + aglen) ··· 789 760 cright->rc_blockcount = right->rc_startblock - 790 761 cright->rc_startblock; 791 762 cright->rc_refcount = 1; 763 + cright->rc_domain = domain; 792 764 } 793 765 } else { 766 + not_found: 794 767 /* 795 768 * No extents, so pretend that there's one covering the whole 796 769 * range. ··· 800 769 cright->rc_startblock = agbno; 801 770 cright->rc_blockcount = aglen; 802 771 cright->rc_refcount = 1; 772 + cright->rc_domain = domain; 803 773 } 804 774 trace_xfs_refcount_find_right_extent(cur->bc_mp, cur->bc_ag.pag->pag_agno, 805 775 cright, right, agbno + aglen); ··· 826 794 STATIC int 827 795 xfs_refcount_merge_extents( 828 796 struct xfs_btree_cur *cur, 797 + enum xfs_refc_domain domain, 829 798 xfs_agblock_t *agbno, 830 799 xfs_extlen_t *aglen, 831 800 enum xfs_refc_adjust_op adjust, 832 - int flags, 833 801 bool *shape_changed) 834 802 { 835 803 struct xfs_refcount_irec left = {0}, cleft = {0}; ··· 844 812 * just below (agbno + aglen) [cright], and just above (agbno + aglen) 845 813 * [right]. 846 814 */ 847 - error = xfs_refcount_find_left_extents(cur, &left, &cleft, *agbno, 848 - *aglen, flags); 815 + error = xfs_refcount_find_left_extents(cur, &left, &cleft, domain, 816 + *agbno, *aglen); 849 817 if (error) 850 818 return error; 851 - error = xfs_refcount_find_right_extents(cur, &right, &cright, *agbno, 852 - *aglen, flags); 819 + error = xfs_refcount_find_right_extents(cur, &right, &cright, domain, 820 + *agbno, *aglen); 853 821 if (error) 854 822 return error; 855 823 ··· 902 870 aglen); 903 871 } 904 872 905 - return error; 873 + return 0; 906 874 } 907 875 908 876 /* ··· 965 933 if (*aglen == 0) 966 934 return 0; 967 935 968 - error = xfs_refcount_lookup_ge(cur, *agbno, &found_rec); 936 + error = xfs_refcount_lookup_ge(cur, XFS_REFC_DOMAIN_SHARED, *agbno, 937 + &found_rec); 969 938 if (error) 970 939 goto out_error; 971 940 ··· 974 941 error = xfs_refcount_get_rec(cur, &ext, &found_rec); 975 942 if (error) 976 943 goto out_error; 977 - if (!found_rec) { 944 + if (!found_rec || ext.rc_domain != XFS_REFC_DOMAIN_SHARED) { 978 945 ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks; 979 946 ext.rc_blockcount = 0; 980 947 ext.rc_refcount = 0; 948 + ext.rc_domain = XFS_REFC_DOMAIN_SHARED; 981 949 } 982 950 983 951 /* ··· 991 957 tmp.rc_blockcount = min(*aglen, 992 958 ext.rc_startblock - *agbno); 993 959 tmp.rc_refcount = 1 + adj; 960 + tmp.rc_domain = XFS_REFC_DOMAIN_SHARED; 961 + 994 962 trace_xfs_refcount_modify_extent(cur->bc_mp, 995 963 cur->bc_ag.pag->pag_agno, &tmp); 996 964 ··· 1022 986 (*agbno) += tmp.rc_blockcount; 1023 987 (*aglen) -= tmp.rc_blockcount; 1024 988 1025 - error = xfs_refcount_lookup_ge(cur, *agbno, 989 + /* Stop if there's nothing left to modify */ 990 + if (*aglen == 0 || !xfs_refcount_still_have_space(cur)) 991 + break; 992 + 993 + /* Move the cursor to the start of ext. */ 994 + error = xfs_refcount_lookup_ge(cur, 995 + XFS_REFC_DOMAIN_SHARED, *agbno, 1026 996 &found_rec); 1027 997 if (error) 1028 998 goto out_error; 1029 999 } 1030 1000 1031 - /* Stop if there's nothing left to modify */ 1032 - if (*aglen == 0 || !xfs_refcount_still_have_space(cur)) 1033 - break; 1001 + /* 1002 + * A previous step trimmed agbno/aglen such that the end of the 1003 + * range would not be in the middle of the record. If this is 1004 + * no longer the case, something is seriously wrong with the 1005 + * btree. Make sure we never feed the synthesized record into 1006 + * the processing loop below. 1007 + */ 1008 + if (XFS_IS_CORRUPT(cur->bc_mp, ext.rc_blockcount == 0) || 1009 + XFS_IS_CORRUPT(cur->bc_mp, ext.rc_blockcount > *aglen)) { 1010 + error = -EFSCORRUPTED; 1011 + goto out_error; 1012 + } 1034 1013 1035 1014 /* 1036 1015 * Adjust the reference count and either update the tree ··· 1121 1070 /* 1122 1071 * Ensure that no rcextents cross the boundary of the adjustment range. 1123 1072 */ 1124 - error = xfs_refcount_split_extent(cur, agbno, &shape_changed); 1073 + error = xfs_refcount_split_extent(cur, XFS_REFC_DOMAIN_SHARED, 1074 + agbno, &shape_changed); 1125 1075 if (error) 1126 1076 goto out_error; 1127 1077 if (shape_changed) 1128 1078 shape_changes++; 1129 1079 1130 - error = xfs_refcount_split_extent(cur, agbno + aglen, &shape_changed); 1080 + error = xfs_refcount_split_extent(cur, XFS_REFC_DOMAIN_SHARED, 1081 + agbno + aglen, &shape_changed); 1131 1082 if (error) 1132 1083 goto out_error; 1133 1084 if (shape_changed) ··· 1138 1085 /* 1139 1086 * Try to merge with the left or right extents of the range. 1140 1087 */ 1141 - error = xfs_refcount_merge_extents(cur, new_agbno, new_aglen, adj, 1142 - XFS_FIND_RCEXT_SHARED, &shape_changed); 1088 + error = xfs_refcount_merge_extents(cur, XFS_REFC_DOMAIN_SHARED, 1089 + new_agbno, new_aglen, adj, &shape_changed); 1143 1090 if (error) 1144 1091 goto out_error; 1145 1092 if (shape_changed) ··· 1175 1122 xfs_btree_del_cursor(rcur, error); 1176 1123 if (error) 1177 1124 xfs_trans_brelse(tp, agbp); 1125 + } 1126 + 1127 + /* 1128 + * Set up a continuation a deferred refcount operation by updating the intent. 1129 + * Checks to make sure we're not going to run off the end of the AG. 1130 + */ 1131 + static inline int 1132 + xfs_refcount_continue_op( 1133 + struct xfs_btree_cur *cur, 1134 + xfs_fsblock_t startblock, 1135 + xfs_agblock_t new_agbno, 1136 + xfs_extlen_t new_len, 1137 + xfs_fsblock_t *new_fsbno) 1138 + { 1139 + struct xfs_mount *mp = cur->bc_mp; 1140 + struct xfs_perag *pag = cur->bc_ag.pag; 1141 + 1142 + if (XFS_IS_CORRUPT(mp, !xfs_verify_agbext(pag, new_agbno, new_len))) 1143 + return -EFSCORRUPTED; 1144 + 1145 + *new_fsbno = XFS_AGB_TO_FSB(mp, pag->pag_agno, new_agbno); 1146 + 1147 + ASSERT(xfs_verify_fsbext(mp, *new_fsbno, new_len)); 1148 + ASSERT(pag->pag_agno == XFS_FSB_TO_AGNO(mp, *new_fsbno)); 1149 + 1150 + return 0; 1178 1151 } 1179 1152 1180 1153 /* ··· 1270 1191 case XFS_REFCOUNT_INCREASE: 1271 1192 error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno, 1272 1193 new_len, XFS_REFCOUNT_ADJUST_INCREASE); 1273 - *new_fsb = XFS_AGB_TO_FSB(mp, pag->pag_agno, new_agbno); 1194 + if (error) 1195 + goto out_drop; 1196 + if (*new_len > 0) 1197 + error = xfs_refcount_continue_op(rcur, startblock, 1198 + new_agbno, *new_len, new_fsb); 1274 1199 break; 1275 1200 case XFS_REFCOUNT_DECREASE: 1276 1201 error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno, 1277 1202 new_len, XFS_REFCOUNT_ADJUST_DECREASE); 1278 - *new_fsb = XFS_AGB_TO_FSB(mp, pag->pag_agno, new_agbno); 1203 + if (error) 1204 + goto out_drop; 1205 + if (*new_len > 0) 1206 + error = xfs_refcount_continue_op(rcur, startblock, 1207 + new_agbno, *new_len, new_fsb); 1279 1208 break; 1280 1209 case XFS_REFCOUNT_ALLOC_COW: 1281 1210 *new_fsb = startblock + blockcount; ··· 1394 1307 *flen = 0; 1395 1308 1396 1309 /* Try to find a refcount extent that crosses the start */ 1397 - error = xfs_refcount_lookup_le(cur, agbno, &have); 1310 + error = xfs_refcount_lookup_le(cur, XFS_REFC_DOMAIN_SHARED, agbno, 1311 + &have); 1398 1312 if (error) 1399 1313 goto out_error; 1400 1314 if (!have) { ··· 1413 1325 error = -EFSCORRUPTED; 1414 1326 goto out_error; 1415 1327 } 1328 + if (tmp.rc_domain != XFS_REFC_DOMAIN_SHARED) 1329 + goto done; 1416 1330 1417 1331 /* If the extent ends before the start, look at the next one */ 1418 1332 if (tmp.rc_startblock + tmp.rc_blockcount <= agbno) { ··· 1430 1340 error = -EFSCORRUPTED; 1431 1341 goto out_error; 1432 1342 } 1343 + if (tmp.rc_domain != XFS_REFC_DOMAIN_SHARED) 1344 + goto done; 1433 1345 } 1434 1346 1435 1347 /* If the extent starts after the range we want, bail out */ ··· 1463 1371 error = -EFSCORRUPTED; 1464 1372 goto out_error; 1465 1373 } 1466 - if (tmp.rc_startblock >= agbno + aglen || 1374 + if (tmp.rc_domain != XFS_REFC_DOMAIN_SHARED || 1375 + tmp.rc_startblock >= agbno + aglen || 1467 1376 tmp.rc_startblock != *fbno + *flen) 1468 1377 break; 1469 1378 *flen = min(*flen + tmp.rc_blockcount, agbno + aglen - *fbno); ··· 1548 1455 return 0; 1549 1456 1550 1457 /* Find any overlapping refcount records */ 1551 - error = xfs_refcount_lookup_ge(cur, agbno, &found_rec); 1458 + error = xfs_refcount_lookup_ge(cur, XFS_REFC_DOMAIN_COW, agbno, 1459 + &found_rec); 1552 1460 if (error) 1553 1461 goto out_error; 1554 1462 error = xfs_refcount_get_rec(cur, &ext, &found_rec); 1555 1463 if (error) 1556 1464 goto out_error; 1465 + if (XFS_IS_CORRUPT(cur->bc_mp, found_rec && 1466 + ext.rc_domain != XFS_REFC_DOMAIN_COW)) { 1467 + error = -EFSCORRUPTED; 1468 + goto out_error; 1469 + } 1557 1470 if (!found_rec) { 1558 - ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks + 1559 - XFS_REFC_COW_START; 1471 + ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks; 1560 1472 ext.rc_blockcount = 0; 1561 1473 ext.rc_refcount = 0; 1474 + ext.rc_domain = XFS_REFC_DOMAIN_COW; 1562 1475 } 1563 1476 1564 1477 switch (adj) { ··· 1579 1480 tmp.rc_startblock = agbno; 1580 1481 tmp.rc_blockcount = aglen; 1581 1482 tmp.rc_refcount = 1; 1483 + tmp.rc_domain = XFS_REFC_DOMAIN_COW; 1484 + 1582 1485 trace_xfs_refcount_modify_extent(cur->bc_mp, 1583 1486 cur->bc_ag.pag->pag_agno, &tmp); 1584 1487 ··· 1643 1542 bool shape_changed; 1644 1543 int error; 1645 1544 1646 - agbno += XFS_REFC_COW_START; 1647 - 1648 1545 /* 1649 1546 * Ensure that no rcextents cross the boundary of the adjustment range. 1650 1547 */ 1651 - error = xfs_refcount_split_extent(cur, agbno, &shape_changed); 1548 + error = xfs_refcount_split_extent(cur, XFS_REFC_DOMAIN_COW, 1549 + agbno, &shape_changed); 1652 1550 if (error) 1653 1551 goto out_error; 1654 1552 1655 - error = xfs_refcount_split_extent(cur, agbno + aglen, &shape_changed); 1553 + error = xfs_refcount_split_extent(cur, XFS_REFC_DOMAIN_COW, 1554 + agbno + aglen, &shape_changed); 1656 1555 if (error) 1657 1556 goto out_error; 1658 1557 1659 1558 /* 1660 1559 * Try to merge with the left or right extents of the range. 1661 1560 */ 1662 - error = xfs_refcount_merge_extents(cur, &agbno, &aglen, adj, 1663 - XFS_FIND_RCEXT_COW, &shape_changed); 1561 + error = xfs_refcount_merge_extents(cur, XFS_REFC_DOMAIN_COW, &agbno, 1562 + &aglen, adj, &shape_changed); 1664 1563 if (error) 1665 1564 goto out_error; 1666 1565 ··· 1767 1666 be32_to_cpu(rec->refc.rc_refcount) != 1)) 1768 1667 return -EFSCORRUPTED; 1769 1668 1770 - rr = kmem_alloc(sizeof(struct xfs_refcount_recovery), 0); 1669 + rr = kmalloc(sizeof(struct xfs_refcount_recovery), 1670 + GFP_KERNEL | __GFP_NOFAIL); 1671 + INIT_LIST_HEAD(&rr->rr_list); 1771 1672 xfs_refcount_btrec_to_irec(rec, &rr->rr_rrec); 1772 - list_add_tail(&rr->rr_list, debris); 1773 1673 1674 + if (XFS_IS_CORRUPT(cur->bc_mp, 1675 + rr->rr_rrec.rc_domain != XFS_REFC_DOMAIN_COW)) { 1676 + kfree(rr); 1677 + return -EFSCORRUPTED; 1678 + } 1679 + 1680 + list_add_tail(&rr->rr_list, debris); 1774 1681 return 0; 1775 1682 } 1776 1683 ··· 1796 1687 union xfs_btree_irec low; 1797 1688 union xfs_btree_irec high; 1798 1689 xfs_fsblock_t fsb; 1799 - xfs_agblock_t agbno; 1800 1690 int error; 1801 1691 1802 - if (mp->m_sb.sb_agblocks >= XFS_REFC_COW_START) 1692 + /* reflink filesystems mustn't have AGs larger than 2^31-1 blocks */ 1693 + BUILD_BUG_ON(XFS_MAX_CRC_AG_BLOCKS >= XFS_REFC_COWFLAG); 1694 + if (mp->m_sb.sb_agblocks > XFS_MAX_CRC_AG_BLOCKS) 1803 1695 return -EOPNOTSUPP; 1804 1696 1805 1697 INIT_LIST_HEAD(&debris); ··· 1827 1717 /* Find all the leftover CoW staging extents. */ 1828 1718 memset(&low, 0, sizeof(low)); 1829 1719 memset(&high, 0, sizeof(high)); 1830 - low.rc.rc_startblock = XFS_REFC_COW_START; 1720 + low.rc.rc_domain = high.rc.rc_domain = XFS_REFC_DOMAIN_COW; 1831 1721 high.rc.rc_startblock = -1U; 1832 1722 error = xfs_btree_query_range(cur, &low, &high, 1833 1723 xfs_refcount_recover_extent, &debris); ··· 1848 1738 &rr->rr_rrec); 1849 1739 1850 1740 /* Free the orphan record */ 1851 - agbno = rr->rr_rrec.rc_startblock - XFS_REFC_COW_START; 1852 - fsb = XFS_AGB_TO_FSB(mp, pag->pag_agno, agbno); 1741 + fsb = XFS_AGB_TO_FSB(mp, pag->pag_agno, 1742 + rr->rr_rrec.rc_startblock); 1853 1743 xfs_refcount_free_cow_extent(tp, fsb, 1854 1744 rr->rr_rrec.rc_blockcount); 1855 1745 ··· 1861 1751 goto out_free; 1862 1752 1863 1753 list_del(&rr->rr_list); 1864 - kmem_free(rr); 1754 + kfree(rr); 1865 1755 } 1866 1756 1867 1757 return error; ··· 1871 1761 /* Free the leftover list */ 1872 1762 list_for_each_entry_safe(rr, n, &debris, rr_list) { 1873 1763 list_del(&rr->rr_list); 1874 - kmem_free(rr); 1764 + kfree(rr); 1875 1765 } 1876 1766 return error; 1877 1767 } ··· 1880 1770 int 1881 1771 xfs_refcount_has_record( 1882 1772 struct xfs_btree_cur *cur, 1773 + enum xfs_refc_domain domain, 1883 1774 xfs_agblock_t bno, 1884 1775 xfs_extlen_t len, 1885 1776 bool *exists) ··· 1892 1781 low.rc.rc_startblock = bno; 1893 1782 memset(&high, 0xFF, sizeof(high)); 1894 1783 high.rc.rc_startblock = bno + len - 1; 1784 + low.rc.rc_domain = high.rc.rc_domain = domain; 1895 1785 1896 1786 return xfs_btree_has_record(cur, &low, &high, exists); 1897 1787 }
+36 -4
fs/xfs/libxfs/xfs_refcount.h
··· 14 14 struct xfs_refcount_irec; 15 15 16 16 extern int xfs_refcount_lookup_le(struct xfs_btree_cur *cur, 17 - xfs_agblock_t bno, int *stat); 17 + enum xfs_refc_domain domain, xfs_agblock_t bno, int *stat); 18 18 extern int xfs_refcount_lookup_ge(struct xfs_btree_cur *cur, 19 - xfs_agblock_t bno, int *stat); 19 + enum xfs_refc_domain domain, xfs_agblock_t bno, int *stat); 20 20 extern int xfs_refcount_lookup_eq(struct xfs_btree_cur *cur, 21 - xfs_agblock_t bno, int *stat); 21 + enum xfs_refc_domain domain, xfs_agblock_t bno, int *stat); 22 22 extern int xfs_refcount_get_rec(struct xfs_btree_cur *cur, 23 23 struct xfs_refcount_irec *irec, int *stat); 24 + 25 + static inline uint32_t 26 + xfs_refcount_encode_startblock( 27 + xfs_agblock_t startblock, 28 + enum xfs_refc_domain domain) 29 + { 30 + uint32_t start; 31 + 32 + /* 33 + * low level btree operations need to handle the generic btree range 34 + * query functions (which set rc_domain == -1U), so we check that the 35 + * domain is /not/ shared. 36 + */ 37 + start = startblock & ~XFS_REFC_COWFLAG; 38 + if (domain != XFS_REFC_DOMAIN_SHARED) 39 + start |= XFS_REFC_COWFLAG; 40 + 41 + return start; 42 + } 24 43 25 44 enum xfs_refcount_intent_type { 26 45 XFS_REFCOUNT_INCREASE = 1, ··· 54 35 xfs_extlen_t ri_blockcount; 55 36 xfs_fsblock_t ri_startblock; 56 37 }; 38 + 39 + /* Check that the refcount is appropriate for the record domain. */ 40 + static inline bool 41 + xfs_refcount_check_domain( 42 + const struct xfs_refcount_irec *irec) 43 + { 44 + if (irec->rc_domain == XFS_REFC_DOMAIN_COW && irec->rc_refcount != 1) 45 + return false; 46 + if (irec->rc_domain == XFS_REFC_DOMAIN_SHARED && irec->rc_refcount < 2) 47 + return false; 48 + return true; 49 + } 57 50 58 51 void xfs_refcount_increase_extent(struct xfs_trans *tp, 59 52 struct xfs_bmbt_irec *irec); ··· 110 79 #define XFS_REFCOUNT_ITEM_OVERHEAD 32 111 80 112 81 extern int xfs_refcount_has_record(struct xfs_btree_cur *cur, 113 - xfs_agblock_t bno, xfs_extlen_t len, bool *exists); 82 + enum xfs_refc_domain domain, xfs_agblock_t bno, 83 + xfs_extlen_t len, bool *exists); 114 84 union xfs_btree_rec; 115 85 extern void xfs_refcount_btrec_to_irec(const union xfs_btree_rec *rec, 116 86 struct xfs_refcount_irec *irec);
+12 -3
fs/xfs/libxfs/xfs_refcount_btree.c
··· 13 13 #include "xfs_btree.h" 14 14 #include "xfs_btree_staging.h" 15 15 #include "xfs_refcount_btree.h" 16 + #include "xfs_refcount.h" 16 17 #include "xfs_alloc.h" 17 18 #include "xfs_error.h" 18 19 #include "xfs_trace.h" ··· 161 160 struct xfs_btree_cur *cur, 162 161 union xfs_btree_rec *rec) 163 162 { 164 - rec->refc.rc_startblock = cpu_to_be32(cur->bc_rec.rc.rc_startblock); 163 + const struct xfs_refcount_irec *irec = &cur->bc_rec.rc; 164 + uint32_t start; 165 + 166 + start = xfs_refcount_encode_startblock(irec->rc_startblock, 167 + irec->rc_domain); 168 + rec->refc.rc_startblock = cpu_to_be32(start); 165 169 rec->refc.rc_blockcount = cpu_to_be32(cur->bc_rec.rc.rc_blockcount); 166 170 rec->refc.rc_refcount = cpu_to_be32(cur->bc_rec.rc.rc_refcount); 167 171 } ··· 188 182 struct xfs_btree_cur *cur, 189 183 const union xfs_btree_key *key) 190 184 { 191 - struct xfs_refcount_irec *rec = &cur->bc_rec.rc; 192 185 const struct xfs_refcount_key *kp = &key->refc; 186 + const struct xfs_refcount_irec *irec = &cur->bc_rec.rc; 187 + uint32_t start; 193 188 194 - return (int64_t)be32_to_cpu(kp->rc_startblock) - rec->rc_startblock; 189 + start = xfs_refcount_encode_startblock(irec->rc_startblock, 190 + irec->rc_domain); 191 + return (int64_t)be32_to_cpu(kp->rc_startblock) - start; 195 192 } 196 193 197 194 STATIC int64_t
+2 -7
fs/xfs/libxfs/xfs_rmap.c
··· 235 235 goto out_bad_rec; 236 236 } else { 237 237 /* check for valid extent range, including overflow */ 238 - if (!xfs_verify_agbno(pag, irec->rm_startblock)) 239 - goto out_bad_rec; 240 - if (irec->rm_startblock > 241 - irec->rm_startblock + irec->rm_blockcount) 242 - goto out_bad_rec; 243 - if (!xfs_verify_agbno(pag, 244 - irec->rm_startblock + irec->rm_blockcount - 1)) 238 + if (!xfs_verify_agbext(pag, irec->rm_startblock, 239 + irec->rm_blockcount)) 245 240 goto out_bad_rec; 246 241 } 247 242
+2 -2
fs/xfs/libxfs/xfs_trans_resv.c
··· 422 422 423 423 /* 424 424 * In renaming a files we can modify: 425 - * the four inodes involved: 4 * inode size 425 + * the five inodes involved: 5 * inode size 426 426 * the two directory btrees: 2 * (max depth + v2) * dir block size 427 427 * the two directory bmap btrees: 2 * max depth * block size 428 428 * And the bmap_finish transaction can free dir and bmap blocks (two sets ··· 437 437 struct xfs_mount *mp) 438 438 { 439 439 return XFS_DQUOT_LOGRES(mp) + 440 - max((xfs_calc_inode_res(mp, 4) + 440 + max((xfs_calc_inode_res(mp, 5) + 441 441 xfs_calc_buf_res(2 * XFS_DIROP_LOG_COUNT(mp), 442 442 XFS_FSB_TO_B(mp, 1))), 443 443 (xfs_calc_buf_res(7, mp->m_sb.sb_sectsize) +
+30
fs/xfs/libxfs/xfs_types.h
··· 166 166 xfs_exntst_t br_state; /* extent state */ 167 167 } xfs_bmbt_irec_t; 168 168 169 + enum xfs_refc_domain { 170 + XFS_REFC_DOMAIN_SHARED = 0, 171 + XFS_REFC_DOMAIN_COW, 172 + }; 173 + 174 + #define XFS_REFC_DOMAIN_STRINGS \ 175 + { XFS_REFC_DOMAIN_SHARED, "shared" }, \ 176 + { XFS_REFC_DOMAIN_COW, "cow" } 177 + 178 + struct xfs_refcount_irec { 179 + xfs_agblock_t rc_startblock; /* starting block number */ 180 + xfs_extlen_t rc_blockcount; /* count of free blocks */ 181 + xfs_nlink_t rc_refcount; /* number of inodes linked here */ 182 + enum xfs_refc_domain rc_domain; /* shared or cow staging extent? */ 183 + }; 184 + 185 + #define XFS_RMAP_ATTR_FORK (1 << 0) 186 + #define XFS_RMAP_BMBT_BLOCK (1 << 1) 187 + #define XFS_RMAP_UNWRITTEN (1 << 2) 188 + #define XFS_RMAP_KEY_FLAGS (XFS_RMAP_ATTR_FORK | \ 189 + XFS_RMAP_BMBT_BLOCK) 190 + #define XFS_RMAP_REC_FLAGS (XFS_RMAP_UNWRITTEN) 191 + struct xfs_rmap_irec { 192 + xfs_agblock_t rm_startblock; /* extent start block */ 193 + xfs_extlen_t rm_blockcount; /* extent length */ 194 + uint64_t rm_owner; /* extent owner */ 195 + uint64_t rm_offset; /* offset within the owner */ 196 + unsigned int rm_flags; /* state flags */ 197 + }; 198 + 169 199 /* per-AG block reservation types */ 170 200 enum xfs_ag_resv_type { 171 201 XFS_AG_RESV_NONE = 0,
+1 -3
fs/xfs/scrub/alloc.c
··· 100 100 bno = be32_to_cpu(rec->alloc.ar_startblock); 101 101 len = be32_to_cpu(rec->alloc.ar_blockcount); 102 102 103 - if (bno + len <= bno || 104 - !xfs_verify_agbno(pag, bno) || 105 - !xfs_verify_agbno(pag, bno + len - 1)) 103 + if (!xfs_verify_agbext(pag, bno, len)) 106 104 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 107 105 108 106 xchk_allocbt_xref(bs->sc, bno, len);
+2 -3
fs/xfs/scrub/ialloc.c
··· 108 108 xfs_agblock_t bno; 109 109 110 110 bno = XFS_AGINO_TO_AGBNO(mp, agino); 111 - if (bno + len <= bno || 112 - !xfs_verify_agbno(pag, bno) || 113 - !xfs_verify_agbno(pag, bno + len - 1)) 111 + 112 + if (!xfs_verify_agbext(pag, bno, len)) 114 113 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 115 114 116 115 xchk_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len);
+30 -42
fs/xfs/scrub/refcount.c
··· 269 269 STATIC void 270 270 xchk_refcountbt_xref_rmap( 271 271 struct xfs_scrub *sc, 272 - xfs_agblock_t bno, 273 - xfs_extlen_t len, 274 - xfs_nlink_t refcount) 272 + const struct xfs_refcount_irec *irec) 275 273 { 276 274 struct xchk_refcnt_check refchk = { 277 - .sc = sc, 278 - .bno = bno, 279 - .len = len, 280 - .refcount = refcount, 275 + .sc = sc, 276 + .bno = irec->rc_startblock, 277 + .len = irec->rc_blockcount, 278 + .refcount = irec->rc_refcount, 281 279 .seen = 0, 282 280 }; 283 281 struct xfs_rmap_irec low; ··· 289 291 290 292 /* Cross-reference with the rmapbt to confirm the refcount. */ 291 293 memset(&low, 0, sizeof(low)); 292 - low.rm_startblock = bno; 294 + low.rm_startblock = irec->rc_startblock; 293 295 memset(&high, 0xFF, sizeof(high)); 294 - high.rm_startblock = bno + len - 1; 296 + high.rm_startblock = irec->rc_startblock + irec->rc_blockcount - 1; 295 297 296 298 INIT_LIST_HEAD(&refchk.fragments); 297 299 error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high, ··· 300 302 goto out_free; 301 303 302 304 xchk_refcountbt_process_rmap_fragments(&refchk); 303 - if (refcount != refchk.seen) 305 + if (irec->rc_refcount != refchk.seen) 304 306 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); 305 307 306 308 out_free: ··· 313 315 /* Cross-reference with the other btrees. */ 314 316 STATIC void 315 317 xchk_refcountbt_xref( 316 - struct xfs_scrub *sc, 317 - xfs_agblock_t agbno, 318 - xfs_extlen_t len, 319 - xfs_nlink_t refcount) 318 + struct xfs_scrub *sc, 319 + const struct xfs_refcount_irec *irec) 320 320 { 321 321 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 322 322 return; 323 323 324 - xchk_xref_is_used_space(sc, agbno, len); 325 - xchk_xref_is_not_inode_chunk(sc, agbno, len); 326 - xchk_refcountbt_xref_rmap(sc, agbno, len, refcount); 324 + xchk_xref_is_used_space(sc, irec->rc_startblock, irec->rc_blockcount); 325 + xchk_xref_is_not_inode_chunk(sc, irec->rc_startblock, 326 + irec->rc_blockcount); 327 + xchk_refcountbt_xref_rmap(sc, irec); 327 328 } 328 329 329 330 /* Scrub a refcountbt record. */ ··· 331 334 struct xchk_btree *bs, 332 335 const union xfs_btree_rec *rec) 333 336 { 337 + struct xfs_refcount_irec irec; 334 338 xfs_agblock_t *cow_blocks = bs->private; 335 339 struct xfs_perag *pag = bs->cur->bc_ag.pag; 336 - xfs_agblock_t bno; 337 - xfs_extlen_t len; 338 - xfs_nlink_t refcount; 339 - bool has_cowflag; 340 340 341 - bno = be32_to_cpu(rec->refc.rc_startblock); 342 - len = be32_to_cpu(rec->refc.rc_blockcount); 343 - refcount = be32_to_cpu(rec->refc.rc_refcount); 341 + xfs_refcount_btrec_to_irec(rec, &irec); 344 342 345 - /* Only CoW records can have refcount == 1. */ 346 - has_cowflag = (bno & XFS_REFC_COW_START); 347 - if ((refcount == 1 && !has_cowflag) || (refcount != 1 && has_cowflag)) 343 + /* Check the domain and refcount are not incompatible. */ 344 + if (!xfs_refcount_check_domain(&irec)) 348 345 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 349 - if (has_cowflag) 350 - (*cow_blocks) += len; 346 + 347 + if (irec.rc_domain == XFS_REFC_DOMAIN_COW) 348 + (*cow_blocks) += irec.rc_blockcount; 351 349 352 350 /* Check the extent. */ 353 - bno &= ~XFS_REFC_COW_START; 354 - if (bno + len <= bno || 355 - !xfs_verify_agbno(pag, bno) || 356 - !xfs_verify_agbno(pag, bno + len - 1)) 351 + if (!xfs_verify_agbext(pag, irec.rc_startblock, irec.rc_blockcount)) 357 352 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 358 353 359 - if (refcount == 0) 354 + if (irec.rc_refcount == 0) 360 355 xchk_btree_set_corrupt(bs->sc, bs->cur, 0); 361 356 362 - xchk_refcountbt_xref(bs->sc, bno, len, refcount); 357 + xchk_refcountbt_xref(bs->sc, &irec); 363 358 364 359 return 0; 365 360 } ··· 415 426 xfs_extlen_t len) 416 427 { 417 428 struct xfs_refcount_irec rc; 418 - bool has_cowflag; 419 429 int has_refcount; 420 430 int error; 421 431 ··· 422 434 return; 423 435 424 436 /* Find the CoW staging extent. */ 425 - error = xfs_refcount_lookup_le(sc->sa.refc_cur, 426 - agbno + XFS_REFC_COW_START, &has_refcount); 437 + error = xfs_refcount_lookup_le(sc->sa.refc_cur, XFS_REFC_DOMAIN_COW, 438 + agbno, &has_refcount); 427 439 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) 428 440 return; 429 441 if (!has_refcount) { ··· 439 451 return; 440 452 } 441 453 442 - /* CoW flag must be set, refcount must be 1. */ 443 - has_cowflag = (rc.rc_startblock & XFS_REFC_COW_START); 444 - if (!has_cowflag || rc.rc_refcount != 1) 454 + /* CoW lookup returned a shared extent record? */ 455 + if (rc.rc_domain != XFS_REFC_DOMAIN_COW) 445 456 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); 446 457 447 458 /* Must be at least as long as what was passed in */ ··· 464 477 if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm)) 465 478 return; 466 479 467 - error = xfs_refcount_has_record(sc->sa.refc_cur, agbno, len, &shared); 480 + error = xfs_refcount_has_record(sc->sa.refc_cur, XFS_REFC_DOMAIN_SHARED, 481 + agbno, len, &shared); 468 482 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) 469 483 return; 470 484 if (shared)
+33 -34
fs/xfs/xfs_attr_item.c
··· 245 245 return attrip; 246 246 } 247 247 248 - /* 249 - * Copy an attr format buffer from the given buf, and into the destination attr 250 - * format structure. 251 - */ 252 - STATIC int 253 - xfs_attri_copy_format( 254 - struct xfs_log_iovec *buf, 255 - struct xfs_attri_log_format *dst_attr_fmt) 256 - { 257 - struct xfs_attri_log_format *src_attr_fmt = buf->i_addr; 258 - size_t len; 259 - 260 - len = sizeof(struct xfs_attri_log_format); 261 - if (buf->i_len != len) { 262 - XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL); 263 - return -EFSCORRUPTED; 264 - } 265 - 266 - memcpy((char *)dst_attr_fmt, (char *)src_attr_fmt, len); 267 - return 0; 268 - } 269 - 270 248 static inline struct xfs_attrd_log_item *ATTRD_ITEM(struct xfs_log_item *lip) 271 249 { 272 250 return container_of(lip, struct xfs_attrd_log_item, attrd_item); ··· 709 731 struct xfs_attri_log_nameval *nv; 710 732 const void *attr_value = NULL; 711 733 const void *attr_name; 712 - int error; 734 + size_t len; 713 735 714 736 attri_formatp = item->ri_buf[0].i_addr; 715 737 attr_name = item->ri_buf[1].i_addr; 716 738 717 739 /* Validate xfs_attri_log_format before the large memory allocation */ 740 + len = sizeof(struct xfs_attri_log_format); 741 + if (item->ri_buf[0].i_len != len) { 742 + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 743 + item->ri_buf[0].i_addr, item->ri_buf[0].i_len); 744 + return -EFSCORRUPTED; 745 + } 746 + 718 747 if (!xfs_attri_validate(mp, attri_formatp)) { 719 - XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 748 + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 749 + item->ri_buf[0].i_addr, item->ri_buf[0].i_len); 750 + return -EFSCORRUPTED; 751 + } 752 + 753 + /* Validate the attr name */ 754 + if (item->ri_buf[1].i_len != 755 + xlog_calc_iovec_len(attri_formatp->alfi_name_len)) { 756 + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 757 + item->ri_buf[0].i_addr, item->ri_buf[0].i_len); 720 758 return -EFSCORRUPTED; 721 759 } 722 760 723 761 if (!xfs_attr_namecheck(attr_name, attri_formatp->alfi_name_len)) { 724 - XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 762 + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 763 + item->ri_buf[1].i_addr, item->ri_buf[1].i_len); 725 764 return -EFSCORRUPTED; 726 765 } 727 766 728 - if (attri_formatp->alfi_value_len) 767 + /* Validate the attr value, if present */ 768 + if (attri_formatp->alfi_value_len != 0) { 769 + if (item->ri_buf[2].i_len != xlog_calc_iovec_len(attri_formatp->alfi_value_len)) { 770 + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 771 + item->ri_buf[0].i_addr, 772 + item->ri_buf[0].i_len); 773 + return -EFSCORRUPTED; 774 + } 775 + 729 776 attr_value = item->ri_buf[2].i_addr; 777 + } 730 778 731 779 /* 732 780 * Memory alloc failure will cause replay to abort. We attach the ··· 764 760 attri_formatp->alfi_value_len); 765 761 766 762 attrip = xfs_attri_init(mp, nv); 767 - error = xfs_attri_copy_format(&item->ri_buf[0], &attrip->attri_format); 768 - if (error) 769 - goto out; 763 + memcpy(&attrip->attri_format, attri_formatp, len); 770 764 771 765 /* 772 766 * The ATTRI has two references. One for the ATTRD and one for ATTRI to ··· 776 774 xfs_attri_release(attrip); 777 775 xfs_attri_log_nameval_put(nv); 778 776 return 0; 779 - out: 780 - xfs_attri_item_free(attrip); 781 - xfs_attri_log_nameval_put(nv); 782 - return error; 783 777 } 784 778 785 779 /* ··· 840 842 841 843 attrd_formatp = item->ri_buf[0].i_addr; 842 844 if (item->ri_buf[0].i_len != sizeof(struct xfs_attrd_log_format)) { 843 - XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL); 845 + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, 846 + item->ri_buf[0].i_addr, item->ri_buf[0].i_len); 844 847 return -EFSCORRUPTED; 845 848 } 846 849
+29 -27
fs/xfs/xfs_bmap_item.c
··· 608 608 .iop_relog = xfs_bui_item_relog, 609 609 }; 610 610 611 - /* 612 - * Copy an BUI format buffer from the given buf, and into the destination 613 - * BUI format structure. The BUI/BUD items were designed not to need any 614 - * special alignment handling. 615 - */ 616 - static int 611 + static inline void 617 612 xfs_bui_copy_format( 618 - struct xfs_log_iovec *buf, 619 - struct xfs_bui_log_format *dst_bui_fmt) 613 + struct xfs_bui_log_format *dst, 614 + const struct xfs_bui_log_format *src) 620 615 { 621 - struct xfs_bui_log_format *src_bui_fmt; 622 - uint len; 616 + unsigned int i; 623 617 624 - src_bui_fmt = buf->i_addr; 625 - len = xfs_bui_log_format_sizeof(src_bui_fmt->bui_nextents); 618 + memcpy(dst, src, offsetof(struct xfs_bui_log_format, bui_extents)); 626 619 627 - if (buf->i_len == len) { 628 - memcpy(dst_bui_fmt, src_bui_fmt, len); 629 - return 0; 630 - } 631 - XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL); 632 - return -EFSCORRUPTED; 620 + for (i = 0; i < src->bui_nextents; i++) 621 + memcpy(&dst->bui_extents[i], &src->bui_extents[i], 622 + sizeof(struct xfs_map_extent)); 633 623 } 634 624 635 625 /* ··· 636 646 struct xlog_recover_item *item, 637 647 xfs_lsn_t lsn) 638 648 { 639 - int error; 640 649 struct xfs_mount *mp = log->l_mp; 641 650 struct xfs_bui_log_item *buip; 642 651 struct xfs_bui_log_format *bui_formatp; 652 + size_t len; 643 653 644 654 bui_formatp = item->ri_buf[0].i_addr; 645 655 646 - if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) { 647 - XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp); 656 + if (item->ri_buf[0].i_len < xfs_bui_log_format_sizeof(0)) { 657 + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 658 + item->ri_buf[0].i_addr, item->ri_buf[0].i_len); 648 659 return -EFSCORRUPTED; 649 660 } 650 - buip = xfs_bui_init(mp); 651 - error = xfs_bui_copy_format(&item->ri_buf[0], &buip->bui_format); 652 - if (error) { 653 - xfs_bui_item_free(buip); 654 - return error; 661 + 662 + if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) { 663 + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 664 + item->ri_buf[0].i_addr, item->ri_buf[0].i_len); 665 + return -EFSCORRUPTED; 655 666 } 667 + 668 + len = xfs_bui_log_format_sizeof(bui_formatp->bui_nextents); 669 + if (item->ri_buf[0].i_len != len) { 670 + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 671 + item->ri_buf[0].i_addr, item->ri_buf[0].i_len); 672 + return -EFSCORRUPTED; 673 + } 674 + 675 + buip = xfs_bui_init(mp); 676 + xfs_bui_copy_format(&buip->bui_format, bui_formatp); 656 677 atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents); 657 678 /* 658 679 * Insert the intent into the AIL directly and drop one reference so ··· 697 696 698 697 bud_formatp = item->ri_buf[0].i_addr; 699 698 if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format)) { 700 - XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp); 699 + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, 700 + item->ri_buf[0].i_addr, item->ri_buf[0].i_len); 701 701 return -EFSCORRUPTED; 702 702 } 703 703
+7 -2
fs/xfs/xfs_error.c
··· 234 234 xfs_errortag_init( 235 235 struct xfs_mount *mp) 236 236 { 237 + int ret; 238 + 237 239 mp->m_errortag = kmem_zalloc(sizeof(unsigned int) * XFS_ERRTAG_MAX, 238 240 KM_MAYFAIL); 239 241 if (!mp->m_errortag) 240 242 return -ENOMEM; 241 243 242 - return xfs_sysfs_init(&mp->m_errortag_kobj, &xfs_errortag_ktype, 243 - &mp->m_kobj, "errortag"); 244 + ret = xfs_sysfs_init(&mp->m_errortag_kobj, &xfs_errortag_ktype, 245 + &mp->m_kobj, "errortag"); 246 + if (ret) 247 + kmem_free(mp->m_errortag); 248 + return ret; 244 249 } 245 250 246 251 void
+45 -49
fs/xfs/xfs_extfree_item.c
··· 66 66 xfs_efi_item_free(efip); 67 67 } 68 68 69 - /* 70 - * This returns the number of iovecs needed to log the given efi item. 71 - * We only need 1 iovec for an efi item. It just logs the efi_log_format 72 - * structure. 73 - */ 74 - static inline int 75 - xfs_efi_item_sizeof( 76 - struct xfs_efi_log_item *efip) 77 - { 78 - return sizeof(struct xfs_efi_log_format) + 79 - (efip->efi_format.efi_nextents - 1) * sizeof(xfs_extent_t); 80 - } 81 - 82 69 STATIC void 83 70 xfs_efi_item_size( 84 71 struct xfs_log_item *lip, 85 72 int *nvecs, 86 73 int *nbytes) 87 74 { 75 + struct xfs_efi_log_item *efip = EFI_ITEM(lip); 76 + 88 77 *nvecs += 1; 89 - *nbytes += xfs_efi_item_sizeof(EFI_ITEM(lip)); 78 + *nbytes += xfs_efi_log_format_sizeof(efip->efi_format.efi_nextents); 90 79 } 91 80 92 81 /* ··· 101 112 102 113 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_EFI_FORMAT, 103 114 &efip->efi_format, 104 - xfs_efi_item_sizeof(efip)); 115 + xfs_efi_log_format_sizeof(efip->efi_format.efi_nextents)); 105 116 } 106 117 107 118 ··· 144 155 145 156 { 146 157 struct xfs_efi_log_item *efip; 147 - uint size; 148 158 149 159 ASSERT(nextents > 0); 150 160 if (nextents > XFS_EFI_MAX_FAST_EXTENTS) { 151 - size = (uint)(sizeof(struct xfs_efi_log_item) + 152 - ((nextents - 1) * sizeof(xfs_extent_t))); 153 - efip = kmem_zalloc(size, 0); 161 + efip = kzalloc(xfs_efi_log_item_sizeof(nextents), 162 + GFP_KERNEL | __GFP_NOFAIL); 154 163 } else { 155 164 efip = kmem_cache_zalloc(xfs_efi_cache, 156 165 GFP_KERNEL | __GFP_NOFAIL); ··· 175 188 { 176 189 xfs_efi_log_format_t *src_efi_fmt = buf->i_addr; 177 190 uint i; 178 - uint len = sizeof(xfs_efi_log_format_t) + 179 - (src_efi_fmt->efi_nextents - 1) * sizeof(xfs_extent_t); 180 - uint len32 = sizeof(xfs_efi_log_format_32_t) + 181 - (src_efi_fmt->efi_nextents - 1) * sizeof(xfs_extent_32_t); 182 - uint len64 = sizeof(xfs_efi_log_format_64_t) + 183 - (src_efi_fmt->efi_nextents - 1) * sizeof(xfs_extent_64_t); 191 + uint len = xfs_efi_log_format_sizeof(src_efi_fmt->efi_nextents); 192 + uint len32 = xfs_efi_log_format32_sizeof(src_efi_fmt->efi_nextents); 193 + uint len64 = xfs_efi_log_format64_sizeof(src_efi_fmt->efi_nextents); 184 194 185 195 if (buf->i_len == len) { 186 - memcpy((char *)dst_efi_fmt, (char*)src_efi_fmt, len); 196 + memcpy(dst_efi_fmt, src_efi_fmt, 197 + offsetof(struct xfs_efi_log_format, efi_extents)); 198 + for (i = 0; i < src_efi_fmt->efi_nextents; i++) 199 + memcpy(&dst_efi_fmt->efi_extents[i], 200 + &src_efi_fmt->efi_extents[i], 201 + sizeof(struct xfs_extent)); 187 202 return 0; 188 203 } else if (buf->i_len == len32) { 189 204 xfs_efi_log_format_32_t *src_efi_fmt_32 = buf->i_addr; ··· 216 227 } 217 228 return 0; 218 229 } 219 - XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL); 230 + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, NULL, buf->i_addr, 231 + buf->i_len); 220 232 return -EFSCORRUPTED; 221 233 } 222 234 ··· 236 246 kmem_cache_free(xfs_efd_cache, efdp); 237 247 } 238 248 239 - /* 240 - * This returns the number of iovecs needed to log the given efd item. 241 - * We only need 1 iovec for an efd item. It just logs the efd_log_format 242 - * structure. 243 - */ 244 - static inline int 245 - xfs_efd_item_sizeof( 246 - struct xfs_efd_log_item *efdp) 247 - { 248 - return sizeof(xfs_efd_log_format_t) + 249 - (efdp->efd_format.efd_nextents - 1) * sizeof(xfs_extent_t); 250 - } 251 - 252 249 STATIC void 253 250 xfs_efd_item_size( 254 251 struct xfs_log_item *lip, 255 252 int *nvecs, 256 253 int *nbytes) 257 254 { 255 + struct xfs_efd_log_item *efdp = EFD_ITEM(lip); 256 + 258 257 *nvecs += 1; 259 - *nbytes += xfs_efd_item_sizeof(EFD_ITEM(lip)); 258 + *nbytes += xfs_efd_log_format_sizeof(efdp->efd_format.efd_nextents); 260 259 } 261 260 262 261 /* ··· 270 291 271 292 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_EFD_FORMAT, 272 293 &efdp->efd_format, 273 - xfs_efd_item_sizeof(efdp)); 294 + xfs_efd_log_format_sizeof(efdp->efd_format.efd_nextents)); 274 295 } 275 296 276 297 /* ··· 319 340 ASSERT(nextents > 0); 320 341 321 342 if (nextents > XFS_EFD_MAX_FAST_EXTENTS) { 322 - efdp = kmem_zalloc(sizeof(struct xfs_efd_log_item) + 323 - (nextents - 1) * sizeof(struct xfs_extent), 324 - 0); 343 + efdp = kzalloc(xfs_efd_log_item_sizeof(nextents), 344 + GFP_KERNEL | __GFP_NOFAIL); 325 345 } else { 326 346 efdp = kmem_cache_zalloc(xfs_efd_cache, 327 347 GFP_KERNEL | __GFP_NOFAIL); ··· 711 733 712 734 efi_formatp = item->ri_buf[0].i_addr; 713 735 736 + if (item->ri_buf[0].i_len < xfs_efi_log_format_sizeof(0)) { 737 + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 738 + item->ri_buf[0].i_addr, item->ri_buf[0].i_len); 739 + return -EFSCORRUPTED; 740 + } 741 + 714 742 efip = xfs_efi_init(mp, efi_formatp->efi_nextents); 715 743 error = xfs_efi_copy_format(&item->ri_buf[0], &efip->efi_format); 716 744 if (error) { ··· 753 769 xfs_lsn_t lsn) 754 770 { 755 771 struct xfs_efd_log_format *efd_formatp; 772 + int buflen = item->ri_buf[0].i_len; 756 773 757 774 efd_formatp = item->ri_buf[0].i_addr; 758 - ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) + 759 - ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) || 760 - (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) + 761 - ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t))))); 775 + 776 + if (buflen < sizeof(struct xfs_efd_log_format)) { 777 + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, 778 + efd_formatp, buflen); 779 + return -EFSCORRUPTED; 780 + } 781 + 782 + if (item->ri_buf[0].i_len != xfs_efd_log_format32_sizeof( 783 + efd_formatp->efd_nextents) && 784 + item->ri_buf[0].i_len != xfs_efd_log_format64_sizeof( 785 + efd_formatp->efd_nextents)) { 786 + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, 787 + efd_formatp, buflen); 788 + return -EFSCORRUPTED; 789 + } 762 790 763 791 xlog_recover_release_intent(log, XFS_LI_EFI, efd_formatp->efd_efi_id); 764 792 return 0;
+16
fs/xfs/xfs_extfree_item.h
··· 52 52 xfs_efi_log_format_t efi_format; 53 53 }; 54 54 55 + static inline size_t 56 + xfs_efi_log_item_sizeof( 57 + unsigned int nr) 58 + { 59 + return offsetof(struct xfs_efi_log_item, efi_format) + 60 + xfs_efi_log_format_sizeof(nr); 61 + } 62 + 55 63 /* 56 64 * This is the "extent free done" log item. It is used to log 57 65 * the fact that some extents earlier mentioned in an efi item ··· 71 63 uint efd_next_extent; 72 64 xfs_efd_log_format_t efd_format; 73 65 }; 66 + 67 + static inline size_t 68 + xfs_efd_log_item_sizeof( 69 + unsigned int nr) 70 + { 71 + return offsetof(struct xfs_efd_log_item, efd_format) + 72 + xfs_efd_log_format_sizeof(nr); 73 + } 74 74 75 75 /* 76 76 * Max number of extents in fast allocation path.
+4 -3
fs/xfs/xfs_file.c
··· 1261 1261 } 1262 1262 1263 1263 #ifdef CONFIG_FS_DAX 1264 - static int 1264 + static inline vm_fault_t 1265 1265 xfs_dax_fault( 1266 1266 struct vm_fault *vmf, 1267 1267 enum page_entry_size pe_size, ··· 1274 1274 &xfs_read_iomap_ops); 1275 1275 } 1276 1276 #else 1277 - static int 1277 + static inline vm_fault_t 1278 1278 xfs_dax_fault( 1279 1279 struct vm_fault *vmf, 1280 1280 enum page_entry_size pe_size, 1281 1281 bool write_fault, 1282 1282 pfn_t *pfn) 1283 1283 { 1284 - return 0; 1284 + ASSERT(0); 1285 + return VM_FAULT_SIGBUS; 1285 1286 } 1286 1287 #endif 1287 1288
+1 -1
fs/xfs/xfs_inode.c
··· 2818 2818 * Lock all the participating inodes. Depending upon whether 2819 2819 * the target_name exists in the target directory, and 2820 2820 * whether the target directory is the same as the source 2821 - * directory, we can lock from 2 to 4 inodes. 2821 + * directory, we can lock from 2 to 5 inodes. 2822 2822 */ 2823 2823 xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL); 2824 2824
+8 -2
fs/xfs/xfs_log_recover.c
··· 2552 2552 for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); 2553 2553 lip != NULL; 2554 2554 lip = xfs_trans_ail_cursor_next(ailp, &cur)) { 2555 + const struct xfs_item_ops *ops; 2556 + 2555 2557 if (!xlog_item_is_intent(lip)) 2556 2558 break; 2557 2559 ··· 2569 2567 * deferred ops, you /must/ attach them to the capture list in 2570 2568 * the recover routine or else those subsequent intents will be 2571 2569 * replayed in the wrong order! 2570 + * 2571 + * The recovery function can free the log item, so we must not 2572 + * access lip after it returns. 2572 2573 */ 2573 2574 spin_unlock(&ailp->ail_lock); 2574 - error = lip->li_ops->iop_recover(lip, &capture_list); 2575 + ops = lip->li_ops; 2576 + error = ops->iop_recover(lip, &capture_list); 2575 2577 spin_lock(&ailp->ail_lock); 2576 2578 if (error) { 2577 2579 trace_xlog_intent_recovery_failed(log->l_mp, error, 2578 - lip->li_ops->iop_recover); 2580 + ops->iop_recover); 2579 2581 break; 2580 2582 } 2581 2583 }
+19 -4
fs/xfs/xfs_ondisk.h
··· 118 118 /* log structures */ 119 119 XFS_CHECK_STRUCT_SIZE(struct xfs_buf_log_format, 88); 120 120 XFS_CHECK_STRUCT_SIZE(struct xfs_dq_logformat, 24); 121 - XFS_CHECK_STRUCT_SIZE(struct xfs_efd_log_format_32, 28); 122 - XFS_CHECK_STRUCT_SIZE(struct xfs_efd_log_format_64, 32); 123 - XFS_CHECK_STRUCT_SIZE(struct xfs_efi_log_format_32, 28); 124 - XFS_CHECK_STRUCT_SIZE(struct xfs_efi_log_format_64, 32); 121 + XFS_CHECK_STRUCT_SIZE(struct xfs_efd_log_format_32, 16); 122 + XFS_CHECK_STRUCT_SIZE(struct xfs_efd_log_format_64, 16); 123 + XFS_CHECK_STRUCT_SIZE(struct xfs_efi_log_format_32, 16); 124 + XFS_CHECK_STRUCT_SIZE(struct xfs_efi_log_format_64, 16); 125 125 XFS_CHECK_STRUCT_SIZE(struct xfs_extent_32, 12); 126 126 XFS_CHECK_STRUCT_SIZE(struct xfs_extent_64, 16); 127 127 XFS_CHECK_STRUCT_SIZE(struct xfs_log_dinode, 176); ··· 134 134 XFS_CHECK_STRUCT_SIZE(struct xfs_trans_header, 16); 135 135 XFS_CHECK_STRUCT_SIZE(struct xfs_attri_log_format, 40); 136 136 XFS_CHECK_STRUCT_SIZE(struct xfs_attrd_log_format, 16); 137 + XFS_CHECK_STRUCT_SIZE(struct xfs_bui_log_format, 16); 138 + XFS_CHECK_STRUCT_SIZE(struct xfs_bud_log_format, 16); 139 + XFS_CHECK_STRUCT_SIZE(struct xfs_cui_log_format, 16); 140 + XFS_CHECK_STRUCT_SIZE(struct xfs_cud_log_format, 16); 141 + XFS_CHECK_STRUCT_SIZE(struct xfs_rui_log_format, 16); 142 + XFS_CHECK_STRUCT_SIZE(struct xfs_rud_log_format, 16); 143 + XFS_CHECK_STRUCT_SIZE(struct xfs_map_extent, 32); 144 + XFS_CHECK_STRUCT_SIZE(struct xfs_phys_extent, 16); 145 + 146 + XFS_CHECK_OFFSET(struct xfs_bui_log_format, bui_extents, 16); 147 + XFS_CHECK_OFFSET(struct xfs_cui_log_format, cui_extents, 16); 148 + XFS_CHECK_OFFSET(struct xfs_rui_log_format, rui_extents, 16); 149 + XFS_CHECK_OFFSET(struct xfs_efi_log_format, efi_extents, 16); 150 + XFS_CHECK_OFFSET(struct xfs_efi_log_format_32, efi_extents, 16); 151 + XFS_CHECK_OFFSET(struct xfs_efi_log_format_64, efi_extents, 16); 137 152 138 153 /* 139 154 * The v5 superblock format extended several v4 header structures with
+30 -27
fs/xfs/xfs_refcount_item.c
··· 523 523 type = refc_type; 524 524 break; 525 525 default: 526 - XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 526 + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 527 + &cuip->cui_format, 528 + sizeof(cuip->cui_format)); 527 529 error = -EFSCORRUPTED; 528 530 goto abort_error; 529 531 } ··· 538 536 &new_fsb, &new_len, &rcur); 539 537 if (error == -EFSCORRUPTED) 540 538 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 541 - refc, sizeof(*refc)); 539 + &cuip->cui_format, 540 + sizeof(cuip->cui_format)); 542 541 if (error) 543 542 goto abort_error; 544 543 ··· 625 622 .iop_relog = xfs_cui_item_relog, 626 623 }; 627 624 628 - /* 629 - * Copy an CUI format buffer from the given buf, and into the destination 630 - * CUI format structure. The CUI/CUD items were designed not to need any 631 - * special alignment handling. 632 - */ 633 - static int 625 + static inline void 634 626 xfs_cui_copy_format( 635 - struct xfs_log_iovec *buf, 636 - struct xfs_cui_log_format *dst_cui_fmt) 627 + struct xfs_cui_log_format *dst, 628 + const struct xfs_cui_log_format *src) 637 629 { 638 - struct xfs_cui_log_format *src_cui_fmt; 639 - uint len; 630 + unsigned int i; 640 631 641 - src_cui_fmt = buf->i_addr; 642 - len = xfs_cui_log_format_sizeof(src_cui_fmt->cui_nextents); 632 + memcpy(dst, src, offsetof(struct xfs_cui_log_format, cui_extents)); 643 633 644 - if (buf->i_len == len) { 645 - memcpy(dst_cui_fmt, src_cui_fmt, len); 646 - return 0; 647 - } 648 - XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL); 649 - return -EFSCORRUPTED; 634 + for (i = 0; i < src->cui_nextents; i++) 635 + memcpy(&dst->cui_extents[i], &src->cui_extents[i], 636 + sizeof(struct xfs_phys_extent)); 650 637 } 651 638 652 639 /* ··· 653 660 struct xlog_recover_item *item, 654 661 xfs_lsn_t lsn) 655 662 { 656 - int error; 657 663 struct xfs_mount *mp = log->l_mp; 658 664 struct xfs_cui_log_item *cuip; 659 665 struct xfs_cui_log_format *cui_formatp; 666 + size_t len; 660 667 661 668 cui_formatp = item->ri_buf[0].i_addr; 662 669 663 - cuip = xfs_cui_init(mp, cui_formatp->cui_nextents); 664 - error = xfs_cui_copy_format(&item->ri_buf[0], &cuip->cui_format); 665 - if (error) { 666 - xfs_cui_item_free(cuip); 667 - return error; 670 + if (item->ri_buf[0].i_len < xfs_cui_log_format_sizeof(0)) { 671 + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 672 + item->ri_buf[0].i_addr, item->ri_buf[0].i_len); 673 + return -EFSCORRUPTED; 668 674 } 675 + 676 + len = xfs_cui_log_format_sizeof(cui_formatp->cui_nextents); 677 + if (item->ri_buf[0].i_len != len) { 678 + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 679 + item->ri_buf[0].i_addr, item->ri_buf[0].i_len); 680 + return -EFSCORRUPTED; 681 + } 682 + 683 + cuip = xfs_cui_init(mp, cui_formatp->cui_nextents); 684 + xfs_cui_copy_format(&cuip->cui_format, cui_formatp); 669 685 atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents); 670 686 /* 671 687 * Insert the intent into the AIL directly and drop one reference so ··· 708 706 709 707 cud_formatp = item->ri_buf[0].i_addr; 710 708 if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) { 711 - XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp); 709 + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, 710 + item->ri_buf[0].i_addr, item->ri_buf[0].i_len); 712 711 return -EFSCORRUPTED; 713 712 } 714 713
+37 -33
fs/xfs/xfs_rmap_item.c
··· 155 155 return ruip; 156 156 } 157 157 158 - /* 159 - * Copy an RUI format buffer from the given buf, and into the destination 160 - * RUI format structure. The RUI/RUD items were designed not to need any 161 - * special alignment handling. 162 - */ 163 - STATIC int 164 - xfs_rui_copy_format( 165 - struct xfs_log_iovec *buf, 166 - struct xfs_rui_log_format *dst_rui_fmt) 167 - { 168 - struct xfs_rui_log_format *src_rui_fmt; 169 - uint len; 170 - 171 - src_rui_fmt = buf->i_addr; 172 - len = xfs_rui_log_format_sizeof(src_rui_fmt->rui_nextents); 173 - 174 - if (buf->i_len != len) { 175 - XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL); 176 - return -EFSCORRUPTED; 177 - } 178 - 179 - memcpy(dst_rui_fmt, src_rui_fmt, len); 180 - return 0; 181 - } 182 - 183 158 static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip) 184 159 { 185 160 return container_of(lip, struct xfs_rud_log_item, rud_item); ··· 557 582 type = XFS_RMAP_FREE; 558 583 break; 559 584 default: 560 - XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL); 585 + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 586 + &ruip->rui_format, 587 + sizeof(ruip->rui_format)); 561 588 error = -EFSCORRUPTED; 562 589 goto abort_error; 563 590 } ··· 629 652 .iop_relog = xfs_rui_item_relog, 630 653 }; 631 654 655 + static inline void 656 + xfs_rui_copy_format( 657 + struct xfs_rui_log_format *dst, 658 + const struct xfs_rui_log_format *src) 659 + { 660 + unsigned int i; 661 + 662 + memcpy(dst, src, offsetof(struct xfs_rui_log_format, rui_extents)); 663 + 664 + for (i = 0; i < src->rui_nextents; i++) 665 + memcpy(&dst->rui_extents[i], &src->rui_extents[i], 666 + sizeof(struct xfs_map_extent)); 667 + } 668 + 632 669 /* 633 670 * This routine is called to create an in-core extent rmap update 634 671 * item from the rui format structure which was logged on disk. ··· 657 666 struct xlog_recover_item *item, 658 667 xfs_lsn_t lsn) 659 668 { 660 - int error; 661 669 struct xfs_mount *mp = log->l_mp; 662 670 struct xfs_rui_log_item *ruip; 663 671 struct xfs_rui_log_format *rui_formatp; 672 + size_t len; 664 673 665 674 rui_formatp = item->ri_buf[0].i_addr; 666 675 667 - ruip = xfs_rui_init(mp, rui_formatp->rui_nextents); 668 - error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format); 669 - if (error) { 670 - xfs_rui_item_free(ruip); 671 - return error; 676 + if (item->ri_buf[0].i_len < xfs_rui_log_format_sizeof(0)) { 677 + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 678 + item->ri_buf[0].i_addr, item->ri_buf[0].i_len); 679 + return -EFSCORRUPTED; 672 680 } 681 + 682 + len = xfs_rui_log_format_sizeof(rui_formatp->rui_nextents); 683 + if (item->ri_buf[0].i_len != len) { 684 + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 685 + item->ri_buf[0].i_addr, item->ri_buf[0].i_len); 686 + return -EFSCORRUPTED; 687 + } 688 + 689 + ruip = xfs_rui_init(mp, rui_formatp->rui_nextents); 690 + xfs_rui_copy_format(&ruip->rui_format, rui_formatp); 673 691 atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents); 674 692 /* 675 693 * Insert the intent into the AIL directly and drop one reference so ··· 711 711 struct xfs_rud_log_format *rud_formatp; 712 712 713 713 rud_formatp = item->ri_buf[0].i_addr; 714 - ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format)); 714 + if (item->ri_buf[0].i_len != sizeof(struct xfs_rud_log_format)) { 715 + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, 716 + rud_formatp, item->ri_buf[0].i_len); 717 + return -EFSCORRUPTED; 718 + } 715 719 716 720 xlog_recover_release_intent(log, XFS_LI_RUI, rud_formatp->rud_rui_id); 717 721 return 0;
+4 -8
fs/xfs/xfs_super.c
··· 2028 2028 goto out_destroy_trans_cache; 2029 2029 2030 2030 xfs_efd_cache = kmem_cache_create("xfs_efd_item", 2031 - (sizeof(struct xfs_efd_log_item) + 2032 - (XFS_EFD_MAX_FAST_EXTENTS - 1) * 2033 - sizeof(struct xfs_extent)), 2034 - 0, 0, NULL); 2031 + xfs_efd_log_item_sizeof(XFS_EFD_MAX_FAST_EXTENTS), 2032 + 0, 0, NULL); 2035 2033 if (!xfs_efd_cache) 2036 2034 goto out_destroy_buf_item_cache; 2037 2035 2038 2036 xfs_efi_cache = kmem_cache_create("xfs_efi_item", 2039 - (sizeof(struct xfs_efi_log_item) + 2040 - (XFS_EFI_MAX_FAST_EXTENTS - 1) * 2041 - sizeof(struct xfs_extent)), 2042 - 0, 0, NULL); 2037 + xfs_efi_log_item_sizeof(XFS_EFI_MAX_FAST_EXTENTS), 2038 + 0, 0, NULL); 2043 2039 if (!xfs_efi_cache) 2044 2040 goto out_destroy_efd_cache; 2045 2041
+6 -1
fs/xfs/xfs_sysfs.h
··· 33 33 const char *name) 34 34 { 35 35 struct kobject *parent; 36 + int err; 36 37 37 38 parent = parent_kobj ? &parent_kobj->kobject : NULL; 38 39 init_completion(&kobj->complete); 39 - return kobject_init_and_add(&kobj->kobject, ktype, parent, "%s", name); 40 + err = kobject_init_and_add(&kobj->kobject, ktype, parent, "%s", name); 41 + if (err) 42 + kobject_put(&kobj->kobject); 43 + 44 + return err; 40 45 } 41 46 42 47 static inline void
+39 -9
fs/xfs/xfs_trace.h
··· 799 799 TRACE_DEFINE_ENUM(PE_SIZE_PMD); 800 800 TRACE_DEFINE_ENUM(PE_SIZE_PUD); 801 801 802 + TRACE_DEFINE_ENUM(XFS_REFC_DOMAIN_SHARED); 803 + TRACE_DEFINE_ENUM(XFS_REFC_DOMAIN_COW); 804 + 802 805 TRACE_EVENT(xfs_filemap_fault, 803 806 TP_PROTO(struct xfs_inode *ip, enum page_entry_size pe_size, 804 807 bool write_fault), ··· 2928 2925 TP_STRUCT__entry( 2929 2926 __field(dev_t, dev) 2930 2927 __field(xfs_agnumber_t, agno) 2928 + __field(enum xfs_refc_domain, domain) 2931 2929 __field(xfs_agblock_t, startblock) 2932 2930 __field(xfs_extlen_t, blockcount) 2933 2931 __field(xfs_nlink_t, refcount) ··· 2936 2932 TP_fast_assign( 2937 2933 __entry->dev = mp->m_super->s_dev; 2938 2934 __entry->agno = agno; 2935 + __entry->domain = irec->rc_domain; 2939 2936 __entry->startblock = irec->rc_startblock; 2940 2937 __entry->blockcount = irec->rc_blockcount; 2941 2938 __entry->refcount = irec->rc_refcount; 2942 2939 ), 2943 - TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x refcount %u", 2940 + TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u", 2944 2941 MAJOR(__entry->dev), MINOR(__entry->dev), 2945 2942 __entry->agno, 2943 + __print_symbolic(__entry->domain, XFS_REFC_DOMAIN_STRINGS), 2946 2944 __entry->startblock, 2947 2945 __entry->blockcount, 2948 2946 __entry->refcount) ··· 2964 2958 TP_STRUCT__entry( 2965 2959 __field(dev_t, dev) 2966 2960 __field(xfs_agnumber_t, agno) 2961 + __field(enum xfs_refc_domain, domain) 2967 2962 __field(xfs_agblock_t, startblock) 2968 2963 __field(xfs_extlen_t, blockcount) 2969 2964 __field(xfs_nlink_t, refcount) ··· 2973 2966 TP_fast_assign( 2974 2967 __entry->dev = mp->m_super->s_dev; 2975 2968 __entry->agno = agno; 2969 + __entry->domain = irec->rc_domain; 2976 2970 __entry->startblock = irec->rc_startblock; 2977 2971 __entry->blockcount = irec->rc_blockcount; 2978 2972 __entry->refcount = irec->rc_refcount; 2979 2973 __entry->agbno = agbno; 2980 2974 ), 2981 - TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x refcount %u @ agbno 0x%x", 2975 + TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u @ agbno 0x%x", 2982 2976 MAJOR(__entry->dev), MINOR(__entry->dev), 2983 2977 __entry->agno, 2978 + __print_symbolic(__entry->domain, XFS_REFC_DOMAIN_STRINGS), 2984 2979 __entry->startblock, 2985 2980 __entry->blockcount, 2986 2981 __entry->refcount, ··· 3003 2994 TP_STRUCT__entry( 3004 2995 __field(dev_t, dev) 3005 2996 __field(xfs_agnumber_t, agno) 2997 + __field(enum xfs_refc_domain, i1_domain) 3006 2998 __field(xfs_agblock_t, i1_startblock) 3007 2999 __field(xfs_extlen_t, i1_blockcount) 3008 3000 __field(xfs_nlink_t, i1_refcount) 3001 + __field(enum xfs_refc_domain, i2_domain) 3009 3002 __field(xfs_agblock_t, i2_startblock) 3010 3003 __field(xfs_extlen_t, i2_blockcount) 3011 3004 __field(xfs_nlink_t, i2_refcount) ··· 3015 3004 TP_fast_assign( 3016 3005 __entry->dev = mp->m_super->s_dev; 3017 3006 __entry->agno = agno; 3007 + __entry->i1_domain = i1->rc_domain; 3018 3008 __entry->i1_startblock = i1->rc_startblock; 3019 3009 __entry->i1_blockcount = i1->rc_blockcount; 3020 3010 __entry->i1_refcount = i1->rc_refcount; 3011 + __entry->i2_domain = i2->rc_domain; 3021 3012 __entry->i2_startblock = i2->rc_startblock; 3022 3013 __entry->i2_blockcount = i2->rc_blockcount; 3023 3014 __entry->i2_refcount = i2->rc_refcount; 3024 3015 ), 3025 - TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x refcount %u -- " 3026 - "agbno 0x%x fsbcount 0x%x refcount %u", 3016 + TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u -- " 3017 + "dom %s agbno 0x%x fsbcount 0x%x refcount %u", 3027 3018 MAJOR(__entry->dev), MINOR(__entry->dev), 3028 3019 __entry->agno, 3020 + __print_symbolic(__entry->i1_domain, XFS_REFC_DOMAIN_STRINGS), 3029 3021 __entry->i1_startblock, 3030 3022 __entry->i1_blockcount, 3031 3023 __entry->i1_refcount, 3024 + __print_symbolic(__entry->i2_domain, XFS_REFC_DOMAIN_STRINGS), 3032 3025 __entry->i2_startblock, 3033 3026 __entry->i2_blockcount, 3034 3027 __entry->i2_refcount) ··· 3053 3038 TP_STRUCT__entry( 3054 3039 __field(dev_t, dev) 3055 3040 __field(xfs_agnumber_t, agno) 3041 + __field(enum xfs_refc_domain, i1_domain) 3056 3042 __field(xfs_agblock_t, i1_startblock) 3057 3043 __field(xfs_extlen_t, i1_blockcount) 3058 3044 __field(xfs_nlink_t, i1_refcount) 3045 + __field(enum xfs_refc_domain, i2_domain) 3059 3046 __field(xfs_agblock_t, i2_startblock) 3060 3047 __field(xfs_extlen_t, i2_blockcount) 3061 3048 __field(xfs_nlink_t, i2_refcount) ··· 3066 3049 TP_fast_assign( 3067 3050 __entry->dev = mp->m_super->s_dev; 3068 3051 __entry->agno = agno; 3052 + __entry->i1_domain = i1->rc_domain; 3069 3053 __entry->i1_startblock = i1->rc_startblock; 3070 3054 __entry->i1_blockcount = i1->rc_blockcount; 3071 3055 __entry->i1_refcount = i1->rc_refcount; 3056 + __entry->i2_domain = i2->rc_domain; 3072 3057 __entry->i2_startblock = i2->rc_startblock; 3073 3058 __entry->i2_blockcount = i2->rc_blockcount; 3074 3059 __entry->i2_refcount = i2->rc_refcount; 3075 3060 __entry->agbno = agbno; 3076 3061 ), 3077 - TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x refcount %u -- " 3078 - "agbno 0x%x fsbcount 0x%x refcount %u @ agbno 0x%x", 3062 + TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u -- " 3063 + "dom %s agbno 0x%x fsbcount 0x%x refcount %u @ agbno 0x%x", 3079 3064 MAJOR(__entry->dev), MINOR(__entry->dev), 3080 3065 __entry->agno, 3066 + __print_symbolic(__entry->i1_domain, XFS_REFC_DOMAIN_STRINGS), 3081 3067 __entry->i1_startblock, 3082 3068 __entry->i1_blockcount, 3083 3069 __entry->i1_refcount, 3070 + __print_symbolic(__entry->i2_domain, XFS_REFC_DOMAIN_STRINGS), 3084 3071 __entry->i2_startblock, 3085 3072 __entry->i2_blockcount, 3086 3073 __entry->i2_refcount, ··· 3107 3086 TP_STRUCT__entry( 3108 3087 __field(dev_t, dev) 3109 3088 __field(xfs_agnumber_t, agno) 3089 + __field(enum xfs_refc_domain, i1_domain) 3110 3090 __field(xfs_agblock_t, i1_startblock) 3111 3091 __field(xfs_extlen_t, i1_blockcount) 3112 3092 __field(xfs_nlink_t, i1_refcount) 3093 + __field(enum xfs_refc_domain, i2_domain) 3113 3094 __field(xfs_agblock_t, i2_startblock) 3114 3095 __field(xfs_extlen_t, i2_blockcount) 3115 3096 __field(xfs_nlink_t, i2_refcount) 3097 + __field(enum xfs_refc_domain, i3_domain) 3116 3098 __field(xfs_agblock_t, i3_startblock) 3117 3099 __field(xfs_extlen_t, i3_blockcount) 3118 3100 __field(xfs_nlink_t, i3_refcount) ··· 3123 3099 TP_fast_assign( 3124 3100 __entry->dev = mp->m_super->s_dev; 3125 3101 __entry->agno = agno; 3102 + __entry->i1_domain = i1->rc_domain; 3126 3103 __entry->i1_startblock = i1->rc_startblock; 3127 3104 __entry->i1_blockcount = i1->rc_blockcount; 3128 3105 __entry->i1_refcount = i1->rc_refcount; 3106 + __entry->i2_domain = i2->rc_domain; 3129 3107 __entry->i2_startblock = i2->rc_startblock; 3130 3108 __entry->i2_blockcount = i2->rc_blockcount; 3131 3109 __entry->i2_refcount = i2->rc_refcount; 3110 + __entry->i3_domain = i3->rc_domain; 3132 3111 __entry->i3_startblock = i3->rc_startblock; 3133 3112 __entry->i3_blockcount = i3->rc_blockcount; 3134 3113 __entry->i3_refcount = i3->rc_refcount; 3135 3114 ), 3136 - TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x refcount %u -- " 3137 - "agbno 0x%x fsbcount 0x%x refcount %u -- " 3138 - "agbno 0x%x fsbcount 0x%x refcount %u", 3115 + TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u -- " 3116 + "dom %s agbno 0x%x fsbcount 0x%x refcount %u -- " 3117 + "dom %s agbno 0x%x fsbcount 0x%x refcount %u", 3139 3118 MAJOR(__entry->dev), MINOR(__entry->dev), 3140 3119 __entry->agno, 3120 + __print_symbolic(__entry->i1_domain, XFS_REFC_DOMAIN_STRINGS), 3141 3121 __entry->i1_startblock, 3142 3122 __entry->i1_blockcount, 3143 3123 __entry->i1_refcount, 3124 + __print_symbolic(__entry->i2_domain, XFS_REFC_DOMAIN_STRINGS), 3144 3125 __entry->i2_startblock, 3145 3126 __entry->i2_blockcount, 3146 3127 __entry->i2_refcount, 3128 + __print_symbolic(__entry->i3_domain, XFS_REFC_DOMAIN_STRINGS), 3147 3129 __entry->i3_startblock, 3148 3130 __entry->i3_blockcount, 3149 3131 __entry->i3_refcount)
+1 -2
fs/xfs/xfs_trans_ail.c
··· 730 730 xfs_ail_push_all_sync( 731 731 struct xfs_ail *ailp) 732 732 { 733 - struct xfs_log_item *lip; 734 733 DEFINE_WAIT(wait); 735 734 736 735 spin_lock(&ailp->ail_lock); 737 - while ((lip = xfs_ail_max(ailp)) != NULL) { 736 + while (xfs_ail_max(ailp) != NULL) { 738 737 prepare_to_wait(&ailp->ail_empty, &wait, TASK_UNINTERRUPTIBLE); 739 738 wake_up_process(ailp->ail_task); 740 739 spin_unlock(&ailp->ail_lock);
+1 -1
include/asm-generic/compat.h
··· 15 15 #endif 16 16 17 17 #ifndef compat_arg_u64 18 - #ifdef CONFIG_CPU_BIG_ENDIAN 18 + #ifndef CONFIG_CPU_BIG_ENDIAN 19 19 #define compat_arg_u64(name) u32 name##_lo, u32 name##_hi 20 20 #define compat_arg_u64_dual(name) u32, name##_lo, u32, name##_hi 21 21 #else
+1 -1
include/linux/efi.h
··· 1222 1222 arch_efi_call_virt_teardown(); \ 1223 1223 }) 1224 1224 1225 - #define EFI_RANDOM_SEED_SIZE 64U 1225 + #define EFI_RANDOM_SEED_SIZE 32U // BLAKE2S_HASH_SIZE 1226 1226 1227 1227 struct linux_efi_random_seed { 1228 1228 u32 size;
+9 -4
include/linux/fortify-string.h
··· 454 454 455 455 #define __fortify_memcpy_chk(p, q, size, p_size, q_size, \ 456 456 p_size_field, q_size_field, op) ({ \ 457 - size_t __fortify_size = (size_t)(size); \ 458 - WARN_ONCE(fortify_memcpy_chk(__fortify_size, p_size, q_size, \ 459 - p_size_field, q_size_field, #op), \ 457 + const size_t __fortify_size = (size_t)(size); \ 458 + const size_t __p_size = (p_size); \ 459 + const size_t __q_size = (q_size); \ 460 + const size_t __p_size_field = (p_size_field); \ 461 + const size_t __q_size_field = (q_size_field); \ 462 + WARN_ONCE(fortify_memcpy_chk(__fortify_size, __p_size, \ 463 + __q_size, __p_size_field, \ 464 + __q_size_field, #op), \ 460 465 #op ": detected field-spanning write (size %zu) of single %s (size %zu)\n", \ 461 466 __fortify_size, \ 462 467 "field \"" #p "\" at " __FILE__ ":" __stringify(__LINE__), \ 463 - p_size_field); \ 468 + __p_size_field); \ 464 469 __underlying_##op(p, q, __fortify_size); \ 465 470 }) 466 471
+27 -21
include/net/netlink.h
··· 181 181 NLA_S64, 182 182 NLA_BITFIELD32, 183 183 NLA_REJECT, 184 + NLA_BE16, 185 + NLA_BE32, 184 186 __NLA_TYPE_MAX, 185 187 }; 186 188 ··· 233 231 * NLA_U32, NLA_U64, 234 232 * NLA_S8, NLA_S16, 235 233 * NLA_S32, NLA_S64, 234 + * NLA_BE16, NLA_BE32, 236 235 * NLA_MSECS Leaving the length field zero will verify the 237 236 * given type fits, using it verifies minimum length 238 237 * just like "All other" ··· 264 261 * NLA_U16, 265 262 * NLA_U32, 266 263 * NLA_U64, 264 + * NLA_BE16, 265 + * NLA_BE32, 267 266 * NLA_S8, 268 267 * NLA_S16, 269 268 * NLA_S32, ··· 322 317 u8 validation_type; 323 318 u16 len; 324 319 union { 325 - const u32 bitfield32_valid; 326 - const u32 mask; 327 - const char *reject_message; 328 - const struct nla_policy *nested_policy; 329 - struct netlink_range_validation *range; 330 - struct netlink_range_validation_signed *range_signed; 331 - struct { 332 - s16 min, max; 333 - u8 network_byte_order:1; 334 - }; 335 - int (*validate)(const struct nlattr *attr, 336 - struct netlink_ext_ack *extack); 337 - /* This entry is special, and used for the attribute at index 0 320 + /** 321 + * @strict_start_type: first attribute to validate strictly 322 + * 323 + * This entry is special, and used for the attribute at index 0 338 324 * only, and specifies special data about the policy, namely it 339 325 * specifies the "boundary type" where strict length validation 340 326 * starts for any attribute types >= this value, also, strict ··· 344 348 * was added to enforce strict validation from thereon. 345 349 */ 346 350 u16 strict_start_type; 351 + 352 + /* private: use NLA_POLICY_*() to set */ 353 + const u32 bitfield32_valid; 354 + const u32 mask; 355 + const char *reject_message; 356 + const struct nla_policy *nested_policy; 357 + struct netlink_range_validation *range; 358 + struct netlink_range_validation_signed *range_signed; 359 + struct { 360 + s16 min, max; 361 + }; 362 + int (*validate)(const struct nlattr *attr, 363 + struct netlink_ext_ack *extack); 347 364 }; 348 365 }; 349 366 ··· 378 369 (tp == NLA_U8 || tp == NLA_U16 || tp == NLA_U32 || tp == NLA_U64) 379 370 #define __NLA_IS_SINT_TYPE(tp) \ 380 371 (tp == NLA_S8 || tp == NLA_S16 || tp == NLA_S32 || tp == NLA_S64) 372 + #define __NLA_IS_BEINT_TYPE(tp) \ 373 + (tp == NLA_BE16 || tp == NLA_BE32) 381 374 382 375 #define __NLA_ENSURE(condition) BUILD_BUG_ON_ZERO(!(condition)) 383 376 #define NLA_ENSURE_UINT_TYPE(tp) \ ··· 393 382 #define NLA_ENSURE_INT_OR_BINARY_TYPE(tp) \ 394 383 (__NLA_ENSURE(__NLA_IS_UINT_TYPE(tp) || \ 395 384 __NLA_IS_SINT_TYPE(tp) || \ 385 + __NLA_IS_BEINT_TYPE(tp) || \ 396 386 tp == NLA_MSECS || \ 397 387 tp == NLA_BINARY) + tp) 398 388 #define NLA_ENSURE_NO_VALIDATION_PTR(tp) \ ··· 401 389 tp != NLA_REJECT && \ 402 390 tp != NLA_NESTED && \ 403 391 tp != NLA_NESTED_ARRAY) + tp) 392 + #define NLA_ENSURE_BEINT_TYPE(tp) \ 393 + (__NLA_ENSURE(__NLA_IS_BEINT_TYPE(tp)) + tp) 404 394 405 395 #define NLA_POLICY_RANGE(tp, _min, _max) { \ 406 396 .type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \ ··· 433 419 .type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \ 434 420 .validation_type = NLA_VALIDATE_MAX, \ 435 421 .max = _max, \ 436 - .network_byte_order = 0, \ 437 - } 438 - 439 - #define NLA_POLICY_MAX_BE(tp, _max) { \ 440 - .type = NLA_ENSURE_UINT_TYPE(tp), \ 441 - .validation_type = NLA_VALIDATE_MAX, \ 442 - .max = _max, \ 443 - .network_byte_order = 1, \ 444 422 } 445 423 446 424 #define NLA_POLICY_MASK(tp, _mask) { \
+7
include/net/sock.h
··· 1889 1889 void sock_kzfree_s(struct sock *sk, void *mem, int size); 1890 1890 void sk_send_sigurg(struct sock *sk); 1891 1891 1892 + static inline void sock_replace_proto(struct sock *sk, struct proto *proto) 1893 + { 1894 + if (sk->sk_socket) 1895 + clear_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); 1896 + WRITE_ONCE(sk->sk_prot, proto); 1897 + } 1898 + 1892 1899 struct sockcm_cookie { 1893 1900 u64 transmit_time; 1894 1901 u32 mark;
+2 -2
kernel/events/hw_breakpoint_test.c
··· 295 295 { 296 296 /* Most test cases want 2 distinct CPUs. */ 297 297 if (num_online_cpus() < 2) 298 - return -EINVAL; 298 + kunit_skip(test, "not enough cpus"); 299 299 300 300 /* Want the system to not use breakpoints elsewhere. */ 301 301 if (hw_breakpoint_is_used()) 302 - return -EBUSY; 302 + kunit_skip(test, "hw breakpoint already in use"); 303 303 304 304 return 0; 305 305 }
+4 -1
kernel/kprobes.c
··· 2429 2429 if (!kprobes_all_disarmed && kprobe_disabled(p)) { 2430 2430 p->flags &= ~KPROBE_FLAG_DISABLED; 2431 2431 ret = arm_kprobe(p); 2432 - if (ret) 2432 + if (ret) { 2433 2433 p->flags |= KPROBE_FLAG_DISABLED; 2434 + if (p != kp) 2435 + kp->flags |= KPROBE_FLAG_DISABLED; 2436 + } 2434 2437 } 2435 2438 out: 2436 2439 mutex_unlock(&kprobe_mutex);
+4 -1
kernel/trace/fprobe.c
··· 141 141 return -E2BIG; 142 142 143 143 fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler); 144 + if (!fp->rethook) 145 + return -ENOMEM; 144 146 for (i = 0; i < size; i++) { 145 147 struct fprobe_rethook_node *node; 146 148 ··· 303 301 { 304 302 int ret; 305 303 306 - if (!fp || fp->ops.func != fprobe_handler) 304 + if (!fp || (fp->ops.saved_func != fprobe_handler && 305 + fp->ops.saved_func != fprobe_kprobe_handler)) 307 306 return -EINVAL; 308 307 309 308 /*
+3 -13
kernel/trace/ftrace.c
··· 3028 3028 command |= FTRACE_UPDATE_TRACE_FUNC; 3029 3029 } 3030 3030 3031 - if (!command || !ftrace_enabled) { 3032 - /* 3033 - * If these are dynamic or per_cpu ops, they still 3034 - * need their data freed. Since, function tracing is 3035 - * not currently active, we can just free them 3036 - * without synchronizing all CPUs. 3037 - */ 3038 - if (ops->flags & FTRACE_OPS_FL_DYNAMIC) 3039 - goto free_ops; 3040 - 3041 - return 0; 3042 - } 3031 + if (!command || !ftrace_enabled) 3032 + goto out; 3043 3033 3044 3034 /* 3045 3035 * If the ops uses a trampoline, then it needs to be ··· 3066 3076 removed_ops = NULL; 3067 3077 ops->flags &= ~FTRACE_OPS_FL_REMOVING; 3068 3078 3079 + out: 3069 3080 /* 3070 3081 * Dynamic ops may be freed, we must make sure that all 3071 3082 * callers are done before leaving this function. ··· 3094 3103 if (IS_ENABLED(CONFIG_PREEMPTION)) 3095 3104 synchronize_rcu_tasks(); 3096 3105 3097 - free_ops: 3098 3106 ftrace_trampoline_free(ops); 3099 3107 } 3100 3108
+7 -11
kernel/trace/kprobe_event_gen_test.c
··· 100 100 KPROBE_GEN_TEST_FUNC, 101 101 KPROBE_GEN_TEST_ARG0, KPROBE_GEN_TEST_ARG1); 102 102 if (ret) 103 - goto free; 103 + goto out; 104 104 105 105 /* Use kprobe_event_add_fields to add the rest of the fields */ 106 106 107 107 ret = kprobe_event_add_fields(&cmd, KPROBE_GEN_TEST_ARG2, KPROBE_GEN_TEST_ARG3); 108 108 if (ret) 109 - goto free; 109 + goto out; 110 110 111 111 /* 112 112 * This actually creates the event. 113 113 */ 114 114 ret = kprobe_event_gen_cmd_end(&cmd); 115 115 if (ret) 116 - goto free; 116 + goto out; 117 117 118 118 /* 119 119 * Now get the gen_kprobe_test event file. We need to prevent ··· 136 136 goto delete; 137 137 } 138 138 out: 139 + kfree(buf); 139 140 return ret; 140 141 delete: 141 142 /* We got an error after creating the event, delete it */ 142 143 ret = kprobe_event_delete("gen_kprobe_test"); 143 - free: 144 - kfree(buf); 145 - 146 144 goto out; 147 145 } 148 146 ··· 168 170 KPROBE_GEN_TEST_FUNC, 169 171 "$retval"); 170 172 if (ret) 171 - goto free; 173 + goto out; 172 174 173 175 /* 174 176 * This actually creates the event. 175 177 */ 176 178 ret = kretprobe_event_gen_cmd_end(&cmd); 177 179 if (ret) 178 - goto free; 180 + goto out; 179 181 180 182 /* 181 183 * Now get the gen_kretprobe_test event file. We need to ··· 199 201 goto delete; 200 202 } 201 203 out: 204 + kfree(buf); 202 205 return ret; 203 206 delete: 204 207 /* We got an error after creating the event, delete it */ 205 208 ret = kprobe_event_delete("gen_kretprobe_test"); 206 - free: 207 - kfree(buf); 208 - 209 209 goto out; 210 210 } 211 211
+11
kernel/trace/ring_buffer.c
··· 937 937 struct ring_buffer_per_cpu *cpu_buffer; 938 938 struct rb_irq_work *rbwork; 939 939 940 + if (!buffer) 941 + return; 942 + 940 943 if (cpu == RING_BUFFER_ALL_CPUS) { 941 944 942 945 /* Wake up individual ones too. One level recursion */ ··· 948 945 949 946 rbwork = &buffer->irq_work; 950 947 } else { 948 + if (WARN_ON_ONCE(!buffer->buffers)) 949 + return; 950 + if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) 951 + return; 952 + 951 953 cpu_buffer = buffer->buffers[cpu]; 954 + /* The CPU buffer may not have been initialized yet */ 955 + if (!cpu_buffer) 956 + return; 952 957 rbwork = &cpu_buffer->irq_work; 953 958 } 954 959
+15 -26
lib/nlattr.c
··· 124 124 range->max = U8_MAX; 125 125 break; 126 126 case NLA_U16: 127 + case NLA_BE16: 127 128 case NLA_BINARY: 128 129 range->max = U16_MAX; 129 130 break; 130 131 case NLA_U32: 132 + case NLA_BE32: 131 133 range->max = U32_MAX; 132 134 break; 133 135 case NLA_U64: ··· 161 159 } 162 160 } 163 161 164 - static u64 nla_get_attr_bo(const struct nla_policy *pt, 165 - const struct nlattr *nla) 166 - { 167 - switch (pt->type) { 168 - case NLA_U16: 169 - if (pt->network_byte_order) 170 - return ntohs(nla_get_be16(nla)); 171 - 172 - return nla_get_u16(nla); 173 - case NLA_U32: 174 - if (pt->network_byte_order) 175 - return ntohl(nla_get_be32(nla)); 176 - 177 - return nla_get_u32(nla); 178 - case NLA_U64: 179 - if (pt->network_byte_order) 180 - return be64_to_cpu(nla_get_be64(nla)); 181 - 182 - return nla_get_u64(nla); 183 - } 184 - 185 - WARN_ON_ONCE(1); 186 - return 0; 187 - } 188 - 189 162 static int nla_validate_range_unsigned(const struct nla_policy *pt, 190 163 const struct nlattr *nla, 191 164 struct netlink_ext_ack *extack, ··· 174 197 value = nla_get_u8(nla); 175 198 break; 176 199 case NLA_U16: 200 + value = nla_get_u16(nla); 201 + break; 177 202 case NLA_U32: 203 + value = nla_get_u32(nla); 204 + break; 178 205 case NLA_U64: 179 - value = nla_get_attr_bo(pt, nla); 206 + value = nla_get_u64(nla); 180 207 break; 181 208 case NLA_MSECS: 182 209 value = nla_get_u64(nla); 183 210 break; 184 211 case NLA_BINARY: 185 212 value = nla_len(nla); 213 + break; 214 + case NLA_BE16: 215 + value = ntohs(nla_get_be16(nla)); 216 + break; 217 + case NLA_BE32: 218 + value = ntohl(nla_get_be32(nla)); 186 219 break; 187 220 default: 188 221 return -EINVAL; ··· 321 334 case NLA_U64: 322 335 case NLA_MSECS: 323 336 case NLA_BINARY: 337 + case NLA_BE16: 338 + case NLA_BE32: 324 339 return nla_validate_range_unsigned(pt, nla, extack, validate); 325 340 case NLA_S8: 326 341 case NLA_S16:
+12 -6
net/bluetooth/hci_conn.c
··· 1067 1067 hdev->acl_cnt += conn->sent; 1068 1068 } else { 1069 1069 struct hci_conn *acl = conn->link; 1070 + 1070 1071 if (acl) { 1071 1072 acl->link = NULL; 1072 1073 hci_conn_drop(acl); 1074 + } 1075 + 1076 + /* Unacked ISO frames */ 1077 + if (conn->type == ISO_LINK) { 1078 + if (hdev->iso_pkts) 1079 + hdev->iso_cnt += conn->sent; 1080 + else if (hdev->le_pkts) 1081 + hdev->le_cnt += conn->sent; 1082 + else 1083 + hdev->acl_cnt += conn->sent; 1073 1084 } 1074 1085 } 1075 1086 ··· 1772 1761 if (!cis) 1773 1762 return ERR_PTR(-ENOMEM); 1774 1763 cis->cleanup = cis_cleanup; 1764 + cis->dst_type = dst_type; 1775 1765 } 1776 1766 1777 1767 if (cis->state == BT_CONNECTED) ··· 2151 2139 { 2152 2140 struct hci_conn *le; 2153 2141 struct hci_conn *cis; 2154 - 2155 - /* Convert from ISO socket address type to HCI address type */ 2156 - if (dst_type == BDADDR_LE_PUBLIC) 2157 - dst_type = ADDR_LE_DEV_PUBLIC; 2158 - else 2159 - dst_type = ADDR_LE_DEV_RANDOM; 2160 2142 2161 2143 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) 2162 2144 le = hci_connect_le(hdev, dst, dst_type, false,
+12 -2
net/bluetooth/iso.c
··· 235 235 return err; 236 236 } 237 237 238 + static inline u8 le_addr_type(u8 bdaddr_type) 239 + { 240 + if (bdaddr_type == BDADDR_LE_PUBLIC) 241 + return ADDR_LE_DEV_PUBLIC; 242 + else 243 + return ADDR_LE_DEV_RANDOM; 244 + } 245 + 238 246 static int iso_connect_bis(struct sock *sk) 239 247 { 240 248 struct iso_conn *conn; ··· 336 328 /* Just bind if DEFER_SETUP has been set */ 337 329 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { 338 330 hcon = hci_bind_cis(hdev, &iso_pi(sk)->dst, 339 - iso_pi(sk)->dst_type, &iso_pi(sk)->qos); 331 + le_addr_type(iso_pi(sk)->dst_type), 332 + &iso_pi(sk)->qos); 340 333 if (IS_ERR(hcon)) { 341 334 err = PTR_ERR(hcon); 342 335 goto done; 343 336 } 344 337 } else { 345 338 hcon = hci_connect_cis(hdev, &iso_pi(sk)->dst, 346 - iso_pi(sk)->dst_type, &iso_pi(sk)->qos); 339 + le_addr_type(iso_pi(sk)->dst_type), 340 + &iso_pi(sk)->qos); 347 341 if (IS_ERR(hcon)) { 348 342 err = PTR_ERR(hcon); 349 343 goto done;
+73 -13
net/bluetooth/l2cap_core.c
··· 1990 1990 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR) 1991 1991 continue; 1992 1992 1993 - if (c->psm == psm) { 1993 + if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) { 1994 1994 int src_match, dst_match; 1995 1995 int src_any, dst_any; 1996 1996 ··· 3764 3764 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 3765 3765 sizeof(rfc), (unsigned long) &rfc, endptr - ptr); 3766 3766 3767 - if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { 3767 + if (remote_efs && 3768 + test_bit(FLAG_EFS_ENABLE, &chan->flags)) { 3768 3769 chan->remote_id = efs.id; 3769 3770 chan->remote_stype = efs.stype; 3770 3771 chan->remote_msdu = le16_to_cpu(efs.msdu); ··· 5814 5813 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm), 5815 5814 scid, mtu, mps); 5816 5815 5816 + /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A 5817 + * page 1059: 5818 + * 5819 + * Valid range: 0x0001-0x00ff 5820 + * 5821 + * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges 5822 + */ 5823 + if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) { 5824 + result = L2CAP_CR_LE_BAD_PSM; 5825 + chan = NULL; 5826 + goto response; 5827 + } 5828 + 5817 5829 /* Check if we have socket listening on psm */ 5818 5830 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, 5819 5831 &conn->hcon->dst, LE_LINK); ··· 6014 6000 } 6015 6001 6016 6002 psm = req->psm; 6003 + 6004 + /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A 6005 + * page 1059: 6006 + * 6007 + * Valid range: 0x0001-0x00ff 6008 + * 6009 + * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges 6010 + */ 6011 + if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) { 6012 + result = L2CAP_CR_LE_BAD_PSM; 6013 + goto response; 6014 + } 6017 6015 6018 6016 BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps); 6019 6017 ··· 6911 6885 struct l2cap_ctrl *control, 6912 6886 struct sk_buff *skb, u8 event) 6913 6887 { 6888 + struct l2cap_ctrl local_control; 6914 6889 int err = 0; 6915 6890 bool skb_in_use = false; 6916 6891 ··· 6936 6909 chan->buffer_seq = chan->expected_tx_seq; 6937 6910 skb_in_use = true; 6938 6911 6912 + /* l2cap_reassemble_sdu may free skb, hence invalidate 6913 + * control, so make a copy in advance to use it after 6914 + * l2cap_reassemble_sdu returns and to avoid the race 6915 + * condition, for example: 6916 + * 6917 + * The current thread calls: 6918 + * l2cap_reassemble_sdu 6919 + * chan->ops->recv == l2cap_sock_recv_cb 6920 + * __sock_queue_rcv_skb 6921 + * Another thread calls: 6922 + * bt_sock_recvmsg 6923 + * skb_recv_datagram 6924 + * skb_free_datagram 6925 + * Then the current thread tries to access control, but 6926 + * it was freed by skb_free_datagram. 6927 + */ 6928 + local_control = *control; 6939 6929 err = l2cap_reassemble_sdu(chan, skb, control); 6940 6930 if (err) 6941 6931 break; 6942 6932 6943 - if (control->final) { 6933 + if (local_control.final) { 6944 6934 if (!test_and_clear_bit(CONN_REJ_ACT, 6945 6935 &chan->conn_state)) { 6946 - control->final = 0; 6947 - l2cap_retransmit_all(chan, control); 6936 + local_control.final = 0; 6937 + l2cap_retransmit_all(chan, &local_control); 6948 6938 l2cap_ertm_send(chan); 6949 6939 } 6950 6940 } ··· 7341 7297 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, 7342 7298 struct sk_buff *skb) 7343 7299 { 7300 + /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store 7301 + * the txseq field in advance to use it after l2cap_reassemble_sdu 7302 + * returns and to avoid the race condition, for example: 7303 + * 7304 + * The current thread calls: 7305 + * l2cap_reassemble_sdu 7306 + * chan->ops->recv == l2cap_sock_recv_cb 7307 + * __sock_queue_rcv_skb 7308 + * Another thread calls: 7309 + * bt_sock_recvmsg 7310 + * skb_recv_datagram 7311 + * skb_free_datagram 7312 + * Then the current thread tries to access control, but it was freed by 7313 + * skb_free_datagram. 7314 + */ 7315 + u16 txseq = control->txseq; 7316 + 7344 7317 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb, 7345 7318 chan->rx_state); 7346 7319 7347 - if (l2cap_classify_txseq(chan, control->txseq) == 7348 - L2CAP_TXSEQ_EXPECTED) { 7320 + if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) { 7349 7321 l2cap_pass_to_tx(chan, control); 7350 7322 7351 7323 BT_DBG("buffer_seq %u->%u", chan->buffer_seq, ··· 7384 7324 } 7385 7325 } 7386 7326 7387 - chan->last_acked_seq = control->txseq; 7388 - chan->expected_tx_seq = __next_seq(chan, control->txseq); 7327 + chan->last_acked_seq = txseq; 7328 + chan->expected_tx_seq = __next_seq(chan, txseq); 7389 7329 7390 7330 return 0; 7391 7331 } ··· 7641 7581 return; 7642 7582 } 7643 7583 7584 + l2cap_chan_hold(chan); 7644 7585 l2cap_chan_lock(chan); 7645 7586 } else { 7646 7587 BT_DBG("unknown cid 0x%4.4x", cid); ··· 8487 8426 * expected length. 8488 8427 */ 8489 8428 if (skb->len < L2CAP_LEN_SIZE) { 8490 - if (l2cap_recv_frag(conn, skb, conn->mtu) < 0) 8491 - goto drop; 8492 - return; 8429 + l2cap_recv_frag(conn, skb, conn->mtu); 8430 + break; 8493 8431 } 8494 8432 8495 8433 len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE; ··· 8532 8472 8533 8473 /* Header still could not be read just continue */ 8534 8474 if (conn->rx_skb->len < L2CAP_LEN_SIZE) 8535 - return; 8475 + break; 8536 8476 } 8537 8477 8538 8478 if (skb->len > conn->rx_len) {
+1 -1
net/bridge/br_netlink.c
··· 1332 1332 1333 1333 if (data[IFLA_BR_FDB_FLUSH]) { 1334 1334 struct net_bridge_fdb_flush_desc desc = { 1335 - .flags_mask = BR_FDB_STATIC 1335 + .flags_mask = BIT(BR_FDB_STATIC) 1336 1336 }; 1337 1337 1338 1338 br_fdb_flush(br, &desc);
+1 -1
net/bridge/br_sysfs_br.c
··· 345 345 struct netlink_ext_ack *extack) 346 346 { 347 347 struct net_bridge_fdb_flush_desc desc = { 348 - .flags_mask = BR_FDB_STATIC 348 + .flags_mask = BIT(BR_FDB_STATIC) 349 349 }; 350 350 351 351 br_fdb_flush(br, &desc);
+1 -1
net/core/neighbour.c
··· 409 409 write_lock_bh(&tbl->lock); 410 410 neigh_flush_dev(tbl, dev, skip_perm); 411 411 pneigh_ifdown_and_unlock(tbl, dev); 412 - pneigh_queue_purge(&tbl->proxy_queue, dev_net(dev)); 412 + pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL); 413 413 if (skb_queue_empty_lockless(&tbl->proxy_queue)) 414 414 del_timer_sync(&tbl->proxy_timer); 415 415 return 0;
+10 -3
net/dsa/dsa2.c
··· 1409 1409 static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master, 1410 1410 const char *user_protocol) 1411 1411 { 1412 + const struct dsa_device_ops *tag_ops = NULL; 1412 1413 struct dsa_switch *ds = dp->ds; 1413 1414 struct dsa_switch_tree *dst = ds->dst; 1414 - const struct dsa_device_ops *tag_ops; 1415 1415 enum dsa_tag_protocol default_proto; 1416 1416 1417 1417 /* Find out which protocol the switch would prefer. */ ··· 1434 1434 } 1435 1435 1436 1436 tag_ops = dsa_find_tagger_by_name(user_protocol); 1437 - } else { 1438 - tag_ops = dsa_tag_driver_get(default_proto); 1437 + if (IS_ERR(tag_ops)) { 1438 + dev_warn(ds->dev, 1439 + "Failed to find a tagging driver for protocol %s, using default\n", 1440 + user_protocol); 1441 + tag_ops = NULL; 1442 + } 1439 1443 } 1444 + 1445 + if (!tag_ops) 1446 + tag_ops = dsa_tag_driver_get(default_proto); 1440 1447 1441 1448 if (IS_ERR(tag_ops)) { 1442 1449 if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
+2
net/ipv4/af_inet.c
··· 754 754 (TCPF_ESTABLISHED | TCPF_SYN_RECV | 755 755 TCPF_CLOSE_WAIT | TCPF_CLOSE))); 756 756 757 + if (test_bit(SOCK_SUPPORT_ZC, &sock->flags)) 758 + set_bit(SOCK_SUPPORT_ZC, &newsock->flags); 757 759 sock_graft(sk2, newsock); 758 760 759 761 newsock->state = SS_CONNECTED;
+2 -2
net/ipv4/tcp_bpf.c
··· 607 607 } else { 608 608 sk->sk_write_space = psock->saved_write_space; 609 609 /* Pairs with lockless read in sk_clone_lock() */ 610 - WRITE_ONCE(sk->sk_prot, psock->sk_proto); 610 + sock_replace_proto(sk, psock->sk_proto); 611 611 } 612 612 return 0; 613 613 } ··· 620 620 } 621 621 622 622 /* Pairs with lockless read in sk_clone_lock() */ 623 - WRITE_ONCE(sk->sk_prot, &tcp_bpf_prots[family][config]); 623 + sock_replace_proto(sk, &tcp_bpf_prots[family][config]); 624 624 return 0; 625 625 } 626 626 EXPORT_SYMBOL_GPL(tcp_bpf_update_proto);
+3
net/ipv4/tcp_ulp.c
··· 136 136 if (icsk->icsk_ulp_ops) 137 137 goto out_err; 138 138 139 + if (sk->sk_socket) 140 + clear_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); 141 + 139 142 err = ulp_ops->init(sk); 140 143 if (err) 141 144 goto out_err;
+2 -2
net/ipv4/udp_bpf.c
··· 141 141 142 142 if (restore) { 143 143 sk->sk_write_space = psock->saved_write_space; 144 - WRITE_ONCE(sk->sk_prot, psock->sk_proto); 144 + sock_replace_proto(sk, psock->sk_proto); 145 145 return 0; 146 146 } 147 147 148 148 if (sk->sk_family == AF_INET6) 149 149 udp_bpf_check_v6_needs_rebuild(psock->sk_proto); 150 150 151 - WRITE_ONCE(sk->sk_prot, &udp_bpf_prots[family]); 151 + sock_replace_proto(sk, &udp_bpf_prots[family]); 152 152 return 0; 153 153 } 154 154 EXPORT_SYMBOL_GPL(udp_bpf_update_proto);
+10 -4
net/ipv6/route.c
··· 6555 6555 static int __net_init ip6_route_net_init_late(struct net *net) 6556 6556 { 6557 6557 #ifdef CONFIG_PROC_FS 6558 - proc_create_net("ipv6_route", 0, net->proc_net, &ipv6_route_seq_ops, 6559 - sizeof(struct ipv6_route_iter)); 6560 - proc_create_net_single("rt6_stats", 0444, net->proc_net, 6561 - rt6_stats_seq_show, NULL); 6558 + if (!proc_create_net("ipv6_route", 0, net->proc_net, 6559 + &ipv6_route_seq_ops, 6560 + sizeof(struct ipv6_route_iter))) 6561 + return -ENOMEM; 6562 + 6563 + if (!proc_create_net_single("rt6_stats", 0444, net->proc_net, 6564 + rt6_stats_seq_show, NULL)) { 6565 + remove_proc_entry("ipv6_route", net->proc_net); 6566 + return -ENOMEM; 6567 + } 6562 6568 #endif 6563 6569 return 0; 6564 6570 }
+1
net/ipv6/udp.c
··· 66 66 { 67 67 skb_queue_head_init(&udp_sk(sk)->reader_queue); 68 68 sk->sk_destruct = udpv6_destruct_sock; 69 + set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); 69 70 return 0; 70 71 } 71 72
+6 -24
net/netfilter/ipset/ip_set_hash_gen.h
··· 42 42 #define AHASH_MAX_SIZE (6 * AHASH_INIT_SIZE) 43 43 /* Max muber of elements in the array block when tuned */ 44 44 #define AHASH_MAX_TUNED 64 45 - 46 45 #define AHASH_MAX(h) ((h)->bucketsize) 47 - 48 - /* Max number of elements can be tuned */ 49 - #ifdef IP_SET_HASH_WITH_MULTI 50 - static u8 51 - tune_bucketsize(u8 curr, u32 multi) 52 - { 53 - u32 n; 54 - 55 - if (multi < curr) 56 - return curr; 57 - 58 - n = curr + AHASH_INIT_SIZE; 59 - /* Currently, at listing one hash bucket must fit into a message. 60 - * Therefore we have a hard limit here. 61 - */ 62 - return n > curr && n <= AHASH_MAX_TUNED ? n : curr; 63 - } 64 - #define TUNE_BUCKETSIZE(h, multi) \ 65 - ((h)->bucketsize = tune_bucketsize((h)->bucketsize, multi)) 66 - #else 67 - #define TUNE_BUCKETSIZE(h, multi) 68 - #endif 69 46 70 47 /* A hash bucket */ 71 48 struct hbucket { ··· 913 936 goto set_full; 914 937 /* Create a new slot */ 915 938 if (n->pos >= n->size) { 916 - TUNE_BUCKETSIZE(h, multi); 939 + #ifdef IP_SET_HASH_WITH_MULTI 940 + if (h->bucketsize >= AHASH_MAX_TUNED) 941 + goto set_full; 942 + else if (h->bucketsize < multi) 943 + h->bucketsize += AHASH_INIT_SIZE; 944 + #endif 917 945 if (n->size >= AHASH_MAX(h)) { 918 946 /* Trigger rehashing */ 919 947 mtype_data_next(&h->next, d);
+8 -2
net/netfilter/ipvs/ip_vs_app.c
··· 599 599 int __net_init ip_vs_app_net_init(struct netns_ipvs *ipvs) 600 600 { 601 601 INIT_LIST_HEAD(&ipvs->app_list); 602 - proc_create_net("ip_vs_app", 0, ipvs->net->proc_net, &ip_vs_app_seq_ops, 603 - sizeof(struct seq_net_private)); 602 + #ifdef CONFIG_PROC_FS 603 + if (!proc_create_net("ip_vs_app", 0, ipvs->net->proc_net, 604 + &ip_vs_app_seq_ops, 605 + sizeof(struct seq_net_private))) 606 + return -ENOMEM; 607 + #endif 604 608 return 0; 605 609 } 606 610 607 611 void __net_exit ip_vs_app_net_cleanup(struct netns_ipvs *ipvs) 608 612 { 609 613 unregister_ip_vs_app(ipvs, NULL /* all */); 614 + #ifdef CONFIG_PROC_FS 610 615 remove_proc_entry("ip_vs_app", ipvs->net->proc_net); 616 + #endif 611 617 }
+23 -7
net/netfilter/ipvs/ip_vs_conn.c
··· 1265 1265 * The drop rate array needs tuning for real environments. 1266 1266 * Called from timer bh only => no locking 1267 1267 */ 1268 - static const char todrop_rate[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; 1269 - static char todrop_counter[9] = {0}; 1268 + static const signed char todrop_rate[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; 1269 + static signed char todrop_counter[9] = {0}; 1270 1270 int i; 1271 1271 1272 1272 /* if the conn entry hasn't lasted for 60 seconds, don't drop it. ··· 1447 1447 { 1448 1448 atomic_set(&ipvs->conn_count, 0); 1449 1449 1450 - proc_create_net("ip_vs_conn", 0, ipvs->net->proc_net, 1451 - &ip_vs_conn_seq_ops, sizeof(struct ip_vs_iter_state)); 1452 - proc_create_net("ip_vs_conn_sync", 0, ipvs->net->proc_net, 1453 - &ip_vs_conn_sync_seq_ops, 1454 - sizeof(struct ip_vs_iter_state)); 1450 + #ifdef CONFIG_PROC_FS 1451 + if (!proc_create_net("ip_vs_conn", 0, ipvs->net->proc_net, 1452 + &ip_vs_conn_seq_ops, 1453 + sizeof(struct ip_vs_iter_state))) 1454 + goto err_conn; 1455 + 1456 + if (!proc_create_net("ip_vs_conn_sync", 0, ipvs->net->proc_net, 1457 + &ip_vs_conn_sync_seq_ops, 1458 + sizeof(struct ip_vs_iter_state))) 1459 + goto err_conn_sync; 1460 + #endif 1461 + 1455 1462 return 0; 1463 + 1464 + #ifdef CONFIG_PROC_FS 1465 + err_conn_sync: 1466 + remove_proc_entry("ip_vs_conn", ipvs->net->proc_net); 1467 + err_conn: 1468 + return -ENOMEM; 1469 + #endif 1456 1470 } 1457 1471 1458 1472 void __net_exit ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs) 1459 1473 { 1460 1474 /* flush all the connection entries first */ 1461 1475 ip_vs_conn_flush(ipvs); 1476 + #ifdef CONFIG_PROC_FS 1462 1477 remove_proc_entry("ip_vs_conn", ipvs->net->proc_net); 1463 1478 remove_proc_entry("ip_vs_conn_sync", ipvs->net->proc_net); 1479 + #endif 1464 1480 } 1465 1481 1466 1482 int __init ip_vs_conn_init(void)
+10 -1
net/netfilter/nf_nat_core.c
··· 1152 1152 WARN_ON(nf_nat_hook != NULL); 1153 1153 RCU_INIT_POINTER(nf_nat_hook, &nat_hook); 1154 1154 1155 - return register_nf_nat_bpf(); 1155 + ret = register_nf_nat_bpf(); 1156 + if (ret < 0) { 1157 + RCU_INIT_POINTER(nf_nat_hook, NULL); 1158 + nf_ct_helper_expectfn_unregister(&follow_master_nat); 1159 + synchronize_net(); 1160 + unregister_pernet_subsys(&nat_net_ops); 1161 + kvfree(nf_nat_bysource); 1162 + } 1163 + 1164 + return ret; 1156 1165 } 1157 1166 1158 1167 static void __exit nf_nat_cleanup(void)
+5 -3
net/netfilter/nf_tables_api.c
··· 8465 8465 nf_tables_chain_destroy(&trans->ctx); 8466 8466 break; 8467 8467 case NFT_MSG_DELRULE: 8468 - if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) 8469 - nft_flow_rule_destroy(nft_trans_flow_rule(trans)); 8470 - 8471 8468 nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); 8472 8469 break; 8473 8470 case NFT_MSG_DELSET: ··· 8970 8973 nft_rule_expr_deactivate(&trans->ctx, 8971 8974 nft_trans_rule(trans), 8972 8975 NFT_TRANS_COMMIT); 8976 + 8977 + if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) 8978 + nft_flow_rule_destroy(nft_trans_flow_rule(trans)); 8973 8979 break; 8974 8980 case NFT_MSG_NEWSET: 8975 8981 nft_clear(net, nft_trans_set(trans)); ··· 10030 10030 nft_net = nft_pernet(net); 10031 10031 deleted = 0; 10032 10032 mutex_lock(&nft_net->commit_mutex); 10033 + if (!list_empty(&nf_tables_destroy_list)) 10034 + rcu_barrier(); 10033 10035 again: 10034 10036 list_for_each_entry(table, &nft_net->tables, list) { 10035 10037 if (nft_table_has_owner(table) &&
+3 -3
net/netfilter/nft_payload.c
··· 173 173 [NFTA_PAYLOAD_SREG] = { .type = NLA_U32 }, 174 174 [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 }, 175 175 [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 }, 176 - [NFTA_PAYLOAD_OFFSET] = NLA_POLICY_MAX_BE(NLA_U32, 255), 177 - [NFTA_PAYLOAD_LEN] = NLA_POLICY_MAX_BE(NLA_U32, 255), 176 + [NFTA_PAYLOAD_OFFSET] = NLA_POLICY_MAX(NLA_BE32, 255), 177 + [NFTA_PAYLOAD_LEN] = NLA_POLICY_MAX(NLA_BE32, 255), 178 178 [NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 }, 179 - [NFTA_PAYLOAD_CSUM_OFFSET] = NLA_POLICY_MAX_BE(NLA_U32, 255), 179 + [NFTA_PAYLOAD_CSUM_OFFSET] = NLA_POLICY_MAX(NLA_BE32, 255), 180 180 [NFTA_PAYLOAD_CSUM_FLAGS] = { .type = NLA_U32 }, 181 181 }; 182 182
+1
net/openvswitch/datapath.c
··· 2544 2544 .parallel_ops = true, 2545 2545 .small_ops = dp_vport_genl_ops, 2546 2546 .n_small_ops = ARRAY_SIZE(dp_vport_genl_ops), 2547 + .resv_start_op = OVS_VPORT_CMD_SET + 1, 2547 2548 .mcgrps = &ovs_dp_vport_multicast_group, 2548 2549 .n_mcgrps = 1, 2549 2550 .module = THIS_MODULE,
+3
net/rose/rose_link.c
··· 236 236 unsigned char *dptr; 237 237 int len; 238 238 239 + if (!neigh->dev) 240 + return; 241 + 239 242 len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 3; 240 243 241 244 if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
+3 -1
net/sched/sch_red.c
··· 72 72 { 73 73 struct red_sched_data *q = qdisc_priv(sch); 74 74 struct Qdisc *child = q->qdisc; 75 + unsigned int len; 75 76 int ret; 76 77 77 78 q->vars.qavg = red_calc_qavg(&q->parms, ··· 127 126 break; 128 127 } 129 128 129 + len = qdisc_pkt_len(skb); 130 130 ret = qdisc_enqueue(skb, child, to_free); 131 131 if (likely(ret == NET_XMIT_SUCCESS)) { 132 - qdisc_qstats_backlog_inc(sch, skb); 132 + sch->qstats.backlog += len; 133 133 sch->q.qlen++; 134 134 } else if (net_xmit_drop_count(ret)) { 135 135 q->stats.pdrop++;
+4 -2
net/smc/af_smc.c
··· 3380 3380 3381 3381 rc = register_pernet_subsys(&smc_net_stat_ops); 3382 3382 if (rc) 3383 - return rc; 3383 + goto out_pernet_subsys; 3384 3384 3385 3385 smc_ism_init(); 3386 3386 smc_clc_init(); 3387 3387 3388 3388 rc = smc_nl_init(); 3389 3389 if (rc) 3390 - goto out_pernet_subsys; 3390 + goto out_pernet_subsys_stat; 3391 3391 3392 3392 rc = smc_pnet_init(); 3393 3393 if (rc) ··· 3480 3480 smc_pnet_exit(); 3481 3481 out_nl: 3482 3482 smc_nl_exit(); 3483 + out_pernet_subsys_stat: 3484 + unregister_pernet_subsys(&smc_net_stat_ops); 3483 3485 out_pernet_subsys: 3484 3486 unregister_pernet_subsys(&smc_net_ops); 3485 3487
+4 -4
net/unix/unix_bpf.c
··· 145 145 146 146 if (restore) { 147 147 sk->sk_write_space = psock->saved_write_space; 148 - WRITE_ONCE(sk->sk_prot, psock->sk_proto); 148 + sock_replace_proto(sk, psock->sk_proto); 149 149 return 0; 150 150 } 151 151 152 152 unix_dgram_bpf_check_needs_rebuild(psock->sk_proto); 153 - WRITE_ONCE(sk->sk_prot, &unix_dgram_bpf_prot); 153 + sock_replace_proto(sk, &unix_dgram_bpf_prot); 154 154 return 0; 155 155 } 156 156 ··· 158 158 { 159 159 if (restore) { 160 160 sk->sk_write_space = psock->saved_write_space; 161 - WRITE_ONCE(sk->sk_prot, psock->sk_proto); 161 + sock_replace_proto(sk, psock->sk_proto); 162 162 return 0; 163 163 } 164 164 165 165 unix_stream_bpf_check_needs_rebuild(psock->sk_proto); 166 - WRITE_ONCE(sk->sk_prot, &unix_stream_bpf_prot); 166 + sock_replace_proto(sk, &unix_stream_bpf_prot); 167 167 return 0; 168 168 } 169 169
+4 -3
net/vmw_vsock/af_vsock.c
··· 1905 1905 err = 0; 1906 1906 transport = vsk->transport; 1907 1907 1908 - while ((data = vsock_connectible_has_data(vsk)) == 0) { 1908 + while (1) { 1909 1909 prepare_to_wait(sk_sleep(sk), wait, TASK_INTERRUPTIBLE); 1910 + data = vsock_connectible_has_data(vsk); 1911 + if (data != 0) 1912 + break; 1910 1913 1911 1914 if (sk->sk_err != 0 || 1912 1915 (sk->sk_shutdown & RCV_SHUTDOWN) || ··· 2094 2091 struct vsock_sock *vsk; 2095 2092 const struct vsock_transport *transport; 2096 2093 int err; 2097 - 2098 - DEFINE_WAIT(wait); 2099 2094 2100 2095 sk = sock->sk; 2101 2096 vsk = vsock_sk(sk);
+1 -1
scripts/Makefile.modpost
··· 122 122 sed 's/ko$$/o/' $(or $(modorder-if-needed), /dev/null) | $(MODPOST) $(modpost-args) -T - $(vmlinux.o-if-present) 123 123 124 124 targets += $(output-symdump) 125 - $(output-symdump): $(modorder-if-needed) $(vmlinux.o-if-present) $(moudle.symvers-if-present) $(MODPOST) FORCE 125 + $(output-symdump): $(modorder-if-needed) $(vmlinux.o-if-present) $(module.symvers-if-present) $(MODPOST) FORCE 126 126 $(call if_changed,modpost) 127 127 128 128 __modpost: $(output-symdump)
+4 -19
scripts/kconfig/menu.c
··· 722 722 if (!expr_eq(prop->menu->dep, prop->visible.expr)) 723 723 get_dep_str(r, prop->visible.expr, " Visible if: "); 724 724 725 - menu = prop->menu->parent; 726 - for (i = 0; menu && i < 8; menu = menu->parent) { 725 + menu = prop->menu; 726 + for (i = 0; menu != &rootmenu && i < 8; menu = menu->parent) { 727 727 bool accessible = menu_is_visible(menu); 728 728 729 729 submenu[i++] = menu; ··· 733 733 if (head && location) { 734 734 jump = xmalloc(sizeof(struct jump_key)); 735 735 736 - if (menu_is_visible(prop->menu)) { 737 - /* 738 - * There is not enough room to put the hint at the 739 - * beginning of the "Prompt" line. Put the hint on the 740 - * last "Location" line even when it would belong on 741 - * the former. 742 - */ 743 - jump->target = prop->menu; 744 - } else 745 - jump->target = location; 736 + jump->target = location; 746 737 747 738 if (list_empty(head)) 748 739 jump->index = 0; ··· 749 758 menu = submenu[i]; 750 759 if (jump && menu == location) 751 760 jump->offset = strlen(r->s); 752 - 753 - if (menu == &rootmenu) 754 - /* The real rootmenu prompt is ugly */ 755 - str_printf(r, "%*cMain menu", j, ' '); 756 - else 757 - str_printf(r, "%*c-> %s", j, ' ', menu_get_prompt(menu)); 758 - 761 + str_printf(r, "%*c-> %s", j, ' ', menu_get_prompt(menu)); 759 762 if (menu->sym) { 760 763 str_printf(r, " (%s [=%s])", menu->sym->name ? 761 764 menu->sym->name : "<choice>",
+280 -21
tools/testing/cxl/test/cxl.c
··· 12 12 #include "mock.h" 13 13 14 14 #define NR_CXL_HOST_BRIDGES 2 15 + #define NR_CXL_SINGLE_HOST 1 15 16 #define NR_CXL_ROOT_PORTS 2 16 17 #define NR_CXL_SWITCH_PORTS 2 17 18 #define NR_CXL_PORT_DECODERS 8 18 19 19 20 static struct platform_device *cxl_acpi; 20 21 static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES]; 21 - static struct platform_device 22 - *cxl_root_port[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS]; 23 - static struct platform_device 24 - *cxl_switch_uport[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS]; 25 - static struct platform_device 26 - *cxl_switch_dport[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * 27 - NR_CXL_SWITCH_PORTS]; 28 - struct platform_device 29 - *cxl_mem[NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS]; 22 + #define NR_MULTI_ROOT (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS) 23 + static struct platform_device *cxl_root_port[NR_MULTI_ROOT]; 24 + static struct platform_device *cxl_switch_uport[NR_MULTI_ROOT]; 25 + #define NR_MEM_MULTI \ 26 + (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS) 27 + static struct platform_device *cxl_switch_dport[NR_MEM_MULTI]; 28 + 29 + static struct platform_device *cxl_hb_single[NR_CXL_SINGLE_HOST]; 30 + static struct platform_device *cxl_root_single[NR_CXL_SINGLE_HOST]; 31 + static struct platform_device *cxl_swu_single[NR_CXL_SINGLE_HOST]; 32 + #define NR_MEM_SINGLE (NR_CXL_SINGLE_HOST * NR_CXL_SWITCH_PORTS) 33 + static struct platform_device *cxl_swd_single[NR_MEM_SINGLE]; 34 + 35 + struct platform_device *cxl_mem[NR_MEM_MULTI]; 36 + struct platform_device *cxl_mem_single[NR_MEM_SINGLE]; 37 + 38 + 39 + static inline bool is_multi_bridge(struct device *dev) 40 + { 41 + int i; 42 + 43 + for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) 44 + if (&cxl_host_bridge[i]->dev == dev) 45 + return true; 46 + return false; 47 + } 48 + 49 + static inline bool is_single_bridge(struct device *dev) 50 + { 51 + int i; 52 + 53 + for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) 54 + if (&cxl_hb_single[i]->dev == dev) 55 + return true; 56 + return false; 57 + } 30 58 31 59 static struct acpi_device acpi0017_mock; 32 - static struct acpi_device host_bridge[NR_CXL_HOST_BRIDGES] = { 60 + static struct acpi_device host_bridge[NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST] = { 33 61 [0] = { 34 62 .handle = &host_bridge[0], 35 63 }, 36 64 [1] = { 37 65 .handle = &host_bridge[1], 38 66 }, 67 + [2] = { 68 + .handle = &host_bridge[2], 69 + }, 70 + 39 71 }; 40 72 41 73 static bool is_mock_dev(struct device *dev) ··· 76 44 77 45 for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) 78 46 if (dev == &cxl_mem[i]->dev) 47 + return true; 48 + for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) 49 + if (dev == &cxl_mem_single[i]->dev) 79 50 return true; 80 51 if (dev == &cxl_acpi->dev) 81 52 return true; ··· 101 66 102 67 static struct { 103 68 struct acpi_table_cedt cedt; 104 - struct acpi_cedt_chbs chbs[NR_CXL_HOST_BRIDGES]; 69 + struct acpi_cedt_chbs chbs[NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST]; 105 70 struct { 106 71 struct acpi_cedt_cfmws cfmws; 107 72 u32 target[1]; ··· 118 83 struct acpi_cedt_cfmws cfmws; 119 84 u32 target[2]; 120 85 } cfmws3; 86 + struct { 87 + struct acpi_cedt_cfmws cfmws; 88 + u32 target[1]; 89 + } cfmws4; 121 90 } __packed mock_cedt = { 122 91 .cedt = { 123 92 .header = { ··· 144 105 .length = sizeof(mock_cedt.chbs[0]), 145 106 }, 146 107 .uid = 1, 108 + .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20, 109 + }, 110 + .chbs[2] = { 111 + .header = { 112 + .type = ACPI_CEDT_TYPE_CHBS, 113 + .length = sizeof(mock_cedt.chbs[0]), 114 + }, 115 + .uid = 2, 147 116 .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20, 148 117 }, 149 118 .cfmws0 = { ··· 214 167 }, 215 168 .target = { 0, 1, }, 216 169 }, 170 + .cfmws4 = { 171 + .cfmws = { 172 + .header = { 173 + .type = ACPI_CEDT_TYPE_CFMWS, 174 + .length = sizeof(mock_cedt.cfmws4), 175 + }, 176 + .interleave_ways = 0, 177 + .granularity = 4, 178 + .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 | 179 + ACPI_CEDT_CFMWS_RESTRICT_PMEM, 180 + .qtg_id = 4, 181 + .window_size = SZ_256M * 4UL, 182 + }, 183 + .target = { 2 }, 184 + }, 217 185 }; 218 186 219 - struct acpi_cedt_cfmws *mock_cfmws[4] = { 187 + struct acpi_cedt_cfmws *mock_cfmws[] = { 220 188 [0] = &mock_cedt.cfmws0.cfmws, 221 189 [1] = &mock_cedt.cfmws1.cfmws, 222 190 [2] = &mock_cedt.cfmws2.cfmws, 223 191 [3] = &mock_cedt.cfmws3.cfmws, 192 + [4] = &mock_cedt.cfmws4.cfmws, 224 193 }; 225 194 226 195 struct cxl_mock_res { ··· 367 304 for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) 368 305 if (dev == &cxl_host_bridge[i]->dev) 369 306 return true; 307 + for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) 308 + if (dev == &cxl_hb_single[i]->dev) 309 + return true; 370 310 return false; 371 311 } 372 312 ··· 390 324 391 325 for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++) 392 326 if (dev == &cxl_switch_dport[i]->dev) 327 + return true; 328 + 329 + for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++) 330 + if (dev == &cxl_root_single[i]->dev) 331 + return true; 332 + 333 + for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++) 334 + if (dev == &cxl_swu_single[i]->dev) 335 + return true; 336 + 337 + for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++) 338 + if (dev == &cxl_swd_single[i]->dev) 393 339 return true; 394 340 395 341 if (is_cxl_memdev(dev)) ··· 639 561 int i, array_size; 640 562 641 563 if (port->depth == 1) { 642 - array_size = ARRAY_SIZE(cxl_root_port); 643 - array = cxl_root_port; 564 + if (is_multi_bridge(port->uport)) { 565 + array_size = ARRAY_SIZE(cxl_root_port); 566 + array = cxl_root_port; 567 + } else if (is_single_bridge(port->uport)) { 568 + array_size = ARRAY_SIZE(cxl_root_single); 569 + array = cxl_root_single; 570 + } else { 571 + dev_dbg(&port->dev, "%s: unknown bridge type\n", 572 + dev_name(port->uport)); 573 + return -ENXIO; 574 + } 644 575 } else if (port->depth == 2) { 645 - array_size = ARRAY_SIZE(cxl_switch_dport); 646 - array = cxl_switch_dport; 576 + struct cxl_port *parent = to_cxl_port(port->dev.parent); 577 + 578 + if (is_multi_bridge(parent->uport)) { 579 + array_size = ARRAY_SIZE(cxl_switch_dport); 580 + array = cxl_switch_dport; 581 + } else if (is_single_bridge(parent->uport)) { 582 + array_size = ARRAY_SIZE(cxl_swd_single); 583 + array = cxl_swd_single; 584 + } else { 585 + dev_dbg(&port->dev, "%s: unknown bridge type\n", 586 + dev_name(port->uport)); 587 + return -ENXIO; 588 + } 647 589 } else { 648 590 dev_WARN_ONCE(&port->dev, 1, "unexpected depth %d\n", 649 591 port->depth); ··· 674 576 struct platform_device *pdev = array[i]; 675 577 struct cxl_dport *dport; 676 578 677 - if (pdev->dev.parent != port->uport) 579 + if (pdev->dev.parent != port->uport) { 580 + dev_dbg(&port->dev, "%s: mismatch parent %s\n", 581 + dev_name(port->uport), 582 + dev_name(pdev->dev.parent)); 678 583 continue; 584 + } 679 585 680 586 dport = devm_cxl_add_dport(port, &pdev->dev, pdev->id, 681 587 CXL_RESOURCE_NONE); ··· 728 626 #ifndef SZ_512G 729 627 #define SZ_512G (SZ_64G * 8) 730 628 #endif 629 + 630 + static __init int cxl_single_init(void) 631 + { 632 + int i, rc; 633 + 634 + for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) { 635 + struct acpi_device *adev = 636 + &host_bridge[NR_CXL_HOST_BRIDGES + i]; 637 + struct platform_device *pdev; 638 + 639 + pdev = platform_device_alloc("cxl_host_bridge", 640 + NR_CXL_HOST_BRIDGES + i); 641 + if (!pdev) 642 + goto err_bridge; 643 + 644 + mock_companion(adev, &pdev->dev); 645 + rc = platform_device_add(pdev); 646 + if (rc) { 647 + platform_device_put(pdev); 648 + goto err_bridge; 649 + } 650 + 651 + cxl_hb_single[i] = pdev; 652 + rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj, 653 + "physical_node"); 654 + if (rc) 655 + goto err_bridge; 656 + } 657 + 658 + for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++) { 659 + struct platform_device *bridge = 660 + cxl_hb_single[i % ARRAY_SIZE(cxl_hb_single)]; 661 + struct platform_device *pdev; 662 + 663 + pdev = platform_device_alloc("cxl_root_port", 664 + NR_MULTI_ROOT + i); 665 + if (!pdev) 666 + goto err_port; 667 + pdev->dev.parent = &bridge->dev; 668 + 669 + rc = platform_device_add(pdev); 670 + if (rc) { 671 + platform_device_put(pdev); 672 + goto err_port; 673 + } 674 + cxl_root_single[i] = pdev; 675 + } 676 + 677 + for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++) { 678 + struct platform_device *root_port = cxl_root_single[i]; 679 + struct platform_device *pdev; 680 + 681 + pdev = platform_device_alloc("cxl_switch_uport", 682 + NR_MULTI_ROOT + i); 683 + if (!pdev) 684 + goto err_uport; 685 + pdev->dev.parent = &root_port->dev; 686 + 687 + rc = platform_device_add(pdev); 688 + if (rc) { 689 + platform_device_put(pdev); 690 + goto err_uport; 691 + } 692 + cxl_swu_single[i] = pdev; 693 + } 694 + 695 + for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++) { 696 + struct platform_device *uport = 697 + cxl_swu_single[i % ARRAY_SIZE(cxl_swu_single)]; 698 + struct platform_device *pdev; 699 + 700 + pdev = platform_device_alloc("cxl_switch_dport", 701 + i + NR_MEM_MULTI); 702 + if (!pdev) 703 + goto err_dport; 704 + pdev->dev.parent = &uport->dev; 705 + 706 + rc = platform_device_add(pdev); 707 + if (rc) { 708 + platform_device_put(pdev); 709 + goto err_dport; 710 + } 711 + cxl_swd_single[i] = pdev; 712 + } 713 + 714 + for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) { 715 + struct platform_device *dport = cxl_swd_single[i]; 716 + struct platform_device *pdev; 717 + 718 + pdev = platform_device_alloc("cxl_mem", NR_MEM_MULTI + i); 719 + if (!pdev) 720 + goto err_mem; 721 + pdev->dev.parent = &dport->dev; 722 + set_dev_node(&pdev->dev, i % 2); 723 + 724 + rc = platform_device_add(pdev); 725 + if (rc) { 726 + platform_device_put(pdev); 727 + goto err_mem; 728 + } 729 + cxl_mem_single[i] = pdev; 730 + } 731 + 732 + return 0; 733 + 734 + err_mem: 735 + for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--) 736 + platform_device_unregister(cxl_mem_single[i]); 737 + err_dport: 738 + for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--) 739 + platform_device_unregister(cxl_swd_single[i]); 740 + err_uport: 741 + for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--) 742 + platform_device_unregister(cxl_swu_single[i]); 743 + err_port: 744 + for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--) 745 + platform_device_unregister(cxl_root_single[i]); 746 + err_bridge: 747 + for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) { 748 + struct platform_device *pdev = cxl_hb_single[i]; 749 + 750 + if (!pdev) 751 + continue; 752 + sysfs_remove_link(&pdev->dev.kobj, "physical_node"); 753 + platform_device_unregister(cxl_hb_single[i]); 754 + } 755 + 756 + return rc; 757 + } 758 + 759 + static void cxl_single_exit(void) 760 + { 761 + int i; 762 + 763 + for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--) 764 + platform_device_unregister(cxl_mem_single[i]); 765 + for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--) 766 + platform_device_unregister(cxl_swd_single[i]); 767 + for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--) 768 + platform_device_unregister(cxl_swu_single[i]); 769 + for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--) 770 + platform_device_unregister(cxl_root_single[i]); 771 + for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) { 772 + struct platform_device *pdev = cxl_hb_single[i]; 773 + 774 + if (!pdev) 775 + continue; 776 + sysfs_remove_link(&pdev->dev.kobj, "physical_node"); 777 + platform_device_unregister(cxl_hb_single[i]); 778 + } 779 + } 731 780 732 781 static __init int cxl_test_init(void) 733 782 { ··· 948 695 949 696 pdev = platform_device_alloc("cxl_switch_uport", i); 950 697 if (!pdev) 951 - goto err_port; 698 + goto err_uport; 952 699 pdev->dev.parent = &root_port->dev; 953 700 954 701 rc = platform_device_add(pdev); ··· 966 713 967 714 pdev = platform_device_alloc("cxl_switch_dport", i); 968 715 if (!pdev) 969 - goto err_port; 716 + goto err_dport; 970 717 pdev->dev.parent = &uport->dev; 971 718 972 719 rc = platform_device_add(pdev); ··· 977 724 cxl_switch_dport[i] = pdev; 978 725 } 979 726 980 - BUILD_BUG_ON(ARRAY_SIZE(cxl_mem) != ARRAY_SIZE(cxl_switch_dport)); 981 727 for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) { 982 728 struct platform_device *dport = cxl_switch_dport[i]; 983 729 struct platform_device *pdev; ··· 995 743 cxl_mem[i] = pdev; 996 744 } 997 745 746 + rc = cxl_single_init(); 747 + if (rc) 748 + goto err_mem; 749 + 998 750 cxl_acpi = platform_device_alloc("cxl_acpi", 0); 999 751 if (!cxl_acpi) 1000 - goto err_mem; 752 + goto err_single; 1001 753 1002 754 mock_companion(&acpi0017_mock, &cxl_acpi->dev); 1003 755 acpi0017_mock.dev.bus = &platform_bus_type; ··· 1014 758 1015 759 err_add: 1016 760 platform_device_put(cxl_acpi); 761 + err_single: 762 + cxl_single_exit(); 1017 763 err_mem: 1018 764 for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--) 1019 765 platform_device_unregister(cxl_mem[i]); ··· 1051 793 int i; 1052 794 1053 795 platform_device_unregister(cxl_acpi); 796 + cxl_single_exit(); 1054 797 for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--) 1055 798 platform_device_unregister(cxl_mem[i]); 1056 799 for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
+4 -3
tools/testing/selftests/landlock/Makefile
··· 3 3 # First run: make -C ../../../.. headers_install 4 4 5 5 CFLAGS += -Wall -O2 $(KHDR_INCLUDES) 6 - LDLIBS += -lcap 7 6 8 7 LOCAL_HDRS += common.h 9 8 ··· 12 13 13 14 TEST_GEN_PROGS_EXTENDED := true 14 15 15 - # Static linking for short targets: 16 + # Short targets: 17 + $(TEST_GEN_PROGS): LDLIBS += -lcap 16 18 $(TEST_GEN_PROGS_EXTENDED): LDFLAGS += -static 17 19 18 20 include ../lib.mk 19 21 20 - # Static linking for targets with $(OUTPUT)/ prefix: 22 + # Targets with $(OUTPUT)/ prefix: 23 + $(TEST_GEN_PROGS): LDLIBS += -lcap 21 24 $(TEST_GEN_PROGS_EXTENDED): LDFLAGS += -static
+1 -1
tools/testing/selftests/pidfd/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 - CFLAGS += -g -I../../../../usr/include/ -pthread 2 + CFLAGS += -g -I../../../../usr/include/ -pthread -Wall 3 3 4 4 TEST_GEN_PROGS := pidfd_test pidfd_fdinfo_test pidfd_open_test \ 5 5 pidfd_poll_test pidfd_wait pidfd_getfd_test pidfd_setns_test
+3 -1
tools/testing/selftests/pidfd/pidfd_test.c
··· 413 413 414 414 c = epoll_wait(epoll_fd, events, MAX_EVENTS, 5000); 415 415 if (c != 1 || !(events[0].events & EPOLLIN)) 416 - ksft_exit_fail_msg("%s test: Unexpected epoll_wait result (c=%d, events=%x) ", 416 + ksft_exit_fail_msg("%s test: Unexpected epoll_wait result (c=%d, events=%x) " 417 417 "(errno %d)\n", 418 418 test_name, c, events[0].events, errno); 419 419 ··· 435 435 */ 436 436 while (1) 437 437 sleep(1); 438 + 439 + return 0; 438 440 } 439 441 440 442 static void test_pidfd_poll_exec(int use_waitpid)
+11 -1
tools/testing/selftests/pidfd/pidfd_wait.c
··· 95 95 .flags = CLONE_PIDFD | CLONE_PARENT_SETTID, 96 96 .exit_signal = SIGCHLD, 97 97 }; 98 + int pfd[2]; 98 99 pid_t pid; 99 100 siginfo_t info = { 100 101 .si_signo = 0, 101 102 }; 102 103 104 + ASSERT_EQ(pipe(pfd), 0); 103 105 pid = sys_clone3(&args); 104 106 ASSERT_GE(pid, 0); 105 107 106 108 if (pid == 0) { 109 + char buf[2]; 110 + 111 + close(pfd[1]); 107 112 kill(getpid(), SIGSTOP); 113 + ASSERT_EQ(read(pfd[0], buf, 1), 1); 114 + close(pfd[0]); 108 115 kill(getpid(), SIGSTOP); 109 116 exit(EXIT_SUCCESS); 110 117 } 111 118 119 + close(pfd[0]); 112 120 ASSERT_EQ(sys_waitid(P_PIDFD, pidfd, &info, WSTOPPED, NULL), 0); 113 121 ASSERT_EQ(info.si_signo, SIGCHLD); 114 122 ASSERT_EQ(info.si_code, CLD_STOPPED); ··· 125 117 ASSERT_EQ(sys_pidfd_send_signal(pidfd, SIGCONT, NULL, 0), 0); 126 118 127 119 ASSERT_EQ(sys_waitid(P_PIDFD, pidfd, &info, WCONTINUED, NULL), 0); 120 + ASSERT_EQ(write(pfd[1], "C", 1), 1); 121 + close(pfd[1]); 128 122 ASSERT_EQ(info.si_signo, SIGCHLD); 129 123 ASSERT_EQ(info.si_code, CLD_CONTINUED); 130 124 ASSERT_EQ(info.si_pid, parent_tid); ··· 148 138 149 139 TEST(wait_nonblock) 150 140 { 151 - int pidfd, status = 0; 141 + int pidfd; 152 142 unsigned int flags = 0; 153 143 pid_t parent_tid = -1; 154 144 struct clone_args args = {