Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge remote-tracking branch 'torvalds/master' into perf/core

To pick up the fixes in perf/urgent that were just merged into upstream.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>

+8878 -5008
+1
CREDITS
··· 971 971 N: Daniel Drake 972 972 E: dsd@gentoo.org 973 973 D: USBAT02 CompactFlash support in usb-storage 974 + D: ZD1211RW wireless driver 974 975 S: UK 975 976 976 977 N: Oleg Drokin
+3 -2
Documentation/core-api/irq/irq-domain.rst
··· 175 175 case the Linux IRQ numbers cannot be dynamically assigned and the legacy 176 176 mapping should be used. 177 177 178 - As the name implies, the *_legacy() functions are deprecated and only 178 + As the name implies, the \*_legacy() functions are deprecated and only 179 179 exist to ease the support of ancient platforms. No new users should be 180 - added. 180 + added. Same goes for the \*_simple() functions when their use results 181 + in the legacy behaviour. 181 182 182 183 The legacy map assumes a contiguous range of IRQ numbers has already 183 184 been allocated for the controller and that the IRQ number can be
-2
Documentation/devicetree/bindings/display/bridge/ti,sn65dsi83.yaml
··· 50 50 data-lanes: 51 51 description: array of physical DSI data lane indexes. 52 52 minItems: 1 53 - maxItems: 4 54 53 items: 55 54 - const: 1 56 55 - const: 2 ··· 70 71 data-lanes: 71 72 description: array of physical DSI data lane indexes. 72 73 minItems: 1 73 - maxItems: 4 74 74 items: 75 75 - const: 1 76 76 - const: 2
+1 -1
Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml
··· 18 18 const: ti,sn65dsi86 19 19 20 20 reg: 21 - const: 0x2d 21 + enum: [ 0x2c, 0x2d ] 22 22 23 23 enable-gpios: 24 24 maxItems: 1
+42 -4
Documentation/devicetree/bindings/interconnect/qcom,sdm660.yaml
··· 31 31 32 32 clocks: 33 33 minItems: 1 34 - maxItems: 3 34 + maxItems: 7 35 35 36 36 clock-names: 37 37 minItems: 1 38 - maxItems: 3 38 + maxItems: 7 39 39 40 40 required: 41 41 - compatible ··· 72 72 contains: 73 73 enum: 74 74 - qcom,sdm660-a2noc 75 + then: 76 + properties: 77 + clocks: 78 + items: 79 + - description: Bus Clock. 80 + - description: Bus A Clock. 81 + - description: IPA Clock. 82 + - description: UFS AXI Clock. 83 + - description: Aggregate2 UFS AXI Clock. 84 + - description: Aggregate2 USB3 AXI Clock. 85 + - description: Config NoC USB2 AXI Clock. 86 + clock-names: 87 + items: 88 + - const: bus 89 + - const: bus_a 90 + - const: ipa 91 + - const: ufs_axi 92 + - const: aggre2_ufs_axi 93 + - const: aggre2_usb3_axi 94 + - const: cfg_noc_usb2_axi 95 + 96 + - if: 97 + properties: 98 + compatible: 99 + contains: 100 + enum: 75 101 - qcom,sdm660-bimc 76 102 - qcom,sdm660-cnoc 77 103 - qcom,sdm660-gnoc ··· 117 91 - | 118 92 #include <dt-bindings/clock/qcom,rpmcc.h> 119 93 #include <dt-bindings/clock/qcom,mmcc-sdm660.h> 94 + #include <dt-bindings/clock/qcom,gcc-sdm660.h> 120 95 121 96 bimc: interconnect@1008000 { 122 97 compatible = "qcom,sdm660-bimc"; ··· 150 123 compatible = "qcom,sdm660-a2noc"; 151 124 reg = <0x01704000 0xc100>; 152 125 #interconnect-cells = <1>; 153 - clock-names = "bus", "bus_a"; 126 + clock-names = "bus", 127 + "bus_a", 128 + "ipa", 129 + "ufs_axi", 130 + "aggre2_ufs_axi", 131 + "aggre2_usb3_axi", 132 + "cfg_noc_usb2_axi"; 154 133 clocks = <&rpmcc RPM_SMD_AGGR2_NOC_CLK>, 155 - <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>; 134 + <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>, 135 + <&rpmcc RPM_SMD_IPA_CLK>, 136 + <&gcc GCC_UFS_AXI_CLK>, 137 + <&gcc GCC_AGGRE2_UFS_AXI_CLK>, 138 + <&gcc GCC_AGGRE2_USB3_AXI_CLK>, 139 + <&gcc GCC_CFG_NOC_USB2_AXI_CLK>; 156 140 }; 157 141 158 142 mnoc: interconnect@1745000 {
+1 -1
Documentation/devicetree/bindings/media/i2c/ovti,ov5647.yaml
··· 31 31 maxItems: 1 32 32 33 33 port: 34 - $ref: /schemas/graph.yaml#/properties/port 34 + $ref: /schemas/graph.yaml#/$defs/port-base 35 35 additionalProperties: false 36 36 37 37 properties:
+1 -1
Documentation/devicetree/bindings/media/i2c/ovti,ov9282.yaml
··· 38 38 39 39 port: 40 40 additionalProperties: false 41 - $ref: /schemas/graph.yaml#/properties/port 41 + $ref: /schemas/graph.yaml#/$defs/port-base 42 42 43 43 properties: 44 44 endpoint:
+1 -1
Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml
··· 38 38 39 39 port: 40 40 additionalProperties: false 41 - $ref: /schemas/graph.yaml#/properties/port 41 + $ref: /schemas/graph.yaml#/$defs/port-base 42 42 43 43 properties: 44 44 endpoint:
+1 -1
Documentation/devicetree/bindings/media/i2c/sony,imx412.yaml
··· 38 38 39 39 port: 40 40 additionalProperties: false 41 - $ref: /schemas/graph.yaml#/properties/port 41 + $ref: /schemas/graph.yaml#/$defs/port-base 42 42 43 43 properties: 44 44 endpoint:
+1 -3
Documentation/devicetree/bindings/mmc/snps,dwcmshc-sdhci.yaml
··· 20 20 - snps,dwcmshc-sdhci 21 21 22 22 reg: 23 - minItems: 1 24 - items: 25 - - description: Offset and length of the register set for the device 23 + maxItems: 1 26 24 27 25 interrupts: 28 26 maxItems: 1
+1 -1
Documentation/devicetree/bindings/net/dsa/marvell.txt
··· 83 83 #interrupt-cells = <2>; 84 84 85 85 switch0: switch@0 { 86 - compatible = "marvell,mv88e6390"; 86 + compatible = "marvell,mv88e6190"; 87 87 reg = <0>; 88 88 reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>; 89 89
-1
Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml
··· 34 34 35 35 clocks: 36 36 minItems: 3 37 - maxItems: 5 38 37 items: 39 38 - description: MAC host clock 40 39 - description: MAC apb clock
-1
Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
··· 41 41 - description: builtin MSI controller. 42 42 43 43 interrupt-names: 44 - minItems: 1 45 44 items: 46 45 - const: msi 47 46
-17
Documentation/hwmon/k10temp.rst
··· 132 132 Core Complex Die (CCD) temperatures. Up to 8 such temperatures are reported 133 133 as temp{3..10}_input, labeled Tccd{1..8}. Actual support depends on the CPU 134 134 variant. 135 - 136 - Various Family 17h and 18h CPUs report voltage and current telemetry 137 - information. The following attributes may be reported. 138 - 139 - Attribute Label Description 140 - =============== ======= ================ 141 - in0_input Vcore Core voltage 142 - in1_input Vsoc SoC voltage 143 - curr1_input Icore Core current 144 - curr2_input Isoc SoC current 145 - =============== ======= ================ 146 - 147 - Current values are raw (unscaled) as reported by the CPU. Core current is 148 - reported as multiples of 1A / LSB. SoC is reported as multiples of 0.25A 149 - / LSB. The real current is board specific. Reported currents should be seen 150 - as rough guidance, and should be scaled using sensors3.conf as appropriate 151 - for a given board.
+1 -1
Documentation/networking/device_drivers/ethernet/intel/ice.rst
··· 851 851 - 0x88A8 traffic will not be received unless VLAN stripping is disabled with 852 852 the following command:: 853 853 854 - # ethool -K <ethX> rxvlan off 854 + # ethtool -K <ethX> rxvlan off 855 855 856 856 - 0x88A8/0x8100 double VLANs cannot be used with 0x8100 or 0x8100/0x8100 VLANS 857 857 configured on the same port. 0x88a8/0x8100 traffic will not be received if
+35 -38
MAINTAINERS
··· 414 414 F: drivers/acpi/pmic/ 415 415 416 416 ACPI THERMAL DRIVER 417 - M: Zhang Rui <rui.zhang@intel.com> 417 + M: Rafael J. Wysocki <rafael@kernel.org> 418 + R: Zhang Rui <rui.zhang@intel.com> 418 419 L: linux-acpi@vger.kernel.org 419 420 S: Supported 420 421 W: https://01.org/linux-acpi ··· 811 810 F: drivers/dma/altera-msgdma.c 812 811 813 812 ALTERA PIO DRIVER 814 - M: Joyce Ooi <joyce.ooi@intel.com> 813 + M: Mun Yew Tham <mun.yew.tham@intel.com> 815 814 L: linux-gpio@vger.kernel.org 816 815 S: Maintained 817 816 F: drivers/gpio/gpio-altera.c ··· 978 977 S: Maintained 979 978 F: drivers/platform/x86/amd-pmc.* 980 979 981 - AMD POWERPLAY 980 + AMD POWERPLAY AND SWSMU 982 981 M: Evan Quan <evan.quan@amd.com> 983 982 L: amd-gfx@lists.freedesktop.org 984 983 S: Supported 985 984 T: git https://gitlab.freedesktop.org/agd5f/linux.git 986 - F: drivers/gpu/drm/amd/pm/powerplay/ 985 + F: drivers/gpu/drm/amd/pm/ 987 986 988 987 AMD PTDMA DRIVER 989 988 M: Sanjay R Mehta <sanju.mehta@amd.com> ··· 1276 1275 1277 1276 APPLE DART IOMMU DRIVER 1278 1277 M: Sven Peter <sven@svenpeter.dev> 1278 + R: Alyssa Rosenzweig <alyssa@rosenzweig.io> 1279 1279 L: iommu@lists.linux-foundation.org 1280 1280 S: Maintained 1281 1281 F: Documentation/devicetree/bindings/iommu/apple,dart.yaml ··· 1713 1711 1714 1712 ARM/APPLE MACHINE SUPPORT 1715 1713 M: Hector Martin <marcan@marcan.st> 1714 + M: Sven Peter <sven@svenpeter.dev> 1715 + R: Alyssa Rosenzweig <alyssa@rosenzweig.io> 1716 1716 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1717 1717 S: Maintained 1718 1718 W: https://asahilinux.org ··· 2240 2236 2241 2237 ARM/MStar/Sigmastar Armv7 SoC support 2242 2238 M: Daniel Palmer <daniel@thingy.jp> 2239 + M: Romain Perier <romain.perier@gmail.com> 2243 2240 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 2244 2241 S: Maintained 2245 2242 W: http://linux-chenxing.org/ ··· 2717 2712 2718 2713 ARM/TEXAS INSTRUMENTS K3 ARCHITECTURE 2719 2714 M: Nishanth Menon <nm@ti.com> 2715 + M: Vignesh Raghavendra <vigneshr@ti.com> 2720 2716 M: Tero Kristo <kristo@kernel.org> 2721 2717 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 2722 2718 S: Supported ··· 2810 2804 F: arch/arm/mach-pxa/vpac270.c 2811 2805 2812 2806 ARM/VT8500 ARM ARCHITECTURE 2813 - M: Tony Prisk <linux@prisktech.co.nz> 2814 2807 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 2815 - S: Maintained 2808 + S: Orphan 2816 2809 F: Documentation/devicetree/bindings/i2c/i2c-wmt.txt 2817 2810 F: arch/arm/mach-vt8500/ 2818 2811 F: drivers/clocksource/timer-vt8500.c ··· 2967 2962 F: include/linux/async_tx.h 2968 2963 2969 2964 AT24 EEPROM DRIVER 2970 - M: Bartosz Golaszewski <bgolaszewski@baylibre.com> 2965 + M: Bartosz Golaszewski <brgl@bgdev.pl> 2971 2966 L: linux-i2c@vger.kernel.org 2972 2967 S: Maintained 2973 2968 T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git ··· 3390 3385 F: Documentation/userspace-api/ebpf/ 3391 3386 F: arch/*/net/* 3392 3387 F: include/linux/bpf* 3388 + F: include/linux/btf* 3393 3389 F: include/linux/filter.h 3394 3390 F: include/trace/events/xdp.h 3395 3391 F: include/uapi/linux/bpf* 3392 + F: include/uapi/linux/btf* 3396 3393 F: include/uapi/linux/filter.h 3397 3394 F: kernel/bpf/ 3398 3395 F: kernel/trace/bpf_trace.c ··· 3828 3821 3829 3822 BROADCOM NETXTREME-E ROCE DRIVER 3830 3823 M: Selvin Xavier <selvin.xavier@broadcom.com> 3831 - M: Naresh Kumar PBS <nareshkumar.pbs@broadcom.com> 3832 3824 L: linux-rdma@vger.kernel.org 3833 3825 S: Supported 3834 3826 W: http://www.broadcom.com ··· 4662 4656 T: git git://git.samba.org/sfrench/cifs-2.6.git 4663 4657 F: Documentation/admin-guide/cifs/ 4664 4658 F: fs/cifs/ 4665 - F: fs/cifs_common/ 4659 + F: fs/smbfs_common/ 4666 4660 4667 4661 COMPACTPCI HOTPLUG CORE 4668 4662 M: Scott Murray <scott@spiteful.org> ··· 7992 7986 7993 7987 GPIO SUBSYSTEM 7994 7988 M: Linus Walleij <linus.walleij@linaro.org> 7995 - M: Bartosz Golaszewski <bgolaszewski@baylibre.com> 7989 + M: Bartosz Golaszewski <brgl@bgdev.pl> 7996 7990 L: linux-gpio@vger.kernel.org 7997 7991 S: Maintained 7998 7992 T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git ··· 8614 8608 F: drivers/iio/humidity/hts221* 8615 8609 8616 8610 HUAWEI ETHERNET DRIVER 8617 - M: Bin Luo <luobin9@huawei.com> 8618 8611 L: netdev@vger.kernel.org 8619 - S: Supported 8612 + S: Orphan 8620 8613 F: Documentation/networking/device_drivers/ethernet/huawei/hinic.rst 8621 8614 F: drivers/net/ethernet/huawei/hinic/ 8622 8615 ··· 10199 10194 L: linux-cifs@vger.kernel.org 10200 10195 S: Maintained 10201 10196 T: git git://git.samba.org/ksmbd.git 10202 - F: fs/cifs_common/ 10203 10197 F: fs/ksmbd/ 10198 + F: fs/smbfs_common/ 10204 10199 10205 10200 KERNEL UNIT TESTING FRAMEWORK (KUnit) 10206 10201 M: Brendan Higgins <brendanhiggins@google.com> ··· 11372 11367 F: drivers/iio/proximity/mb1232.c 11373 11368 11374 11369 MAXIM MAX77650 PMIC MFD DRIVER 11375 - M: Bartosz Golaszewski <bgolaszewski@baylibre.com> 11370 + M: Bartosz Golaszewski <brgl@bgdev.pl> 11376 11371 L: linux-kernel@vger.kernel.org 11377 11372 S: Maintained 11378 11373 F: Documentation/devicetree/bindings/*/*max77650.yaml ··· 13260 13255 F: drivers/scsi/nsp32* 13261 13256 13262 13257 NIOS2 ARCHITECTURE 13263 - M: Ley Foon Tan <ley.foon.tan@intel.com> 13258 + M: Dinh Nguyen <dinguyen@kernel.org> 13264 13259 S: Maintained 13265 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/lftan/nios2.git 13260 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git 13266 13261 F: arch/nios2/ 13267 13262 13268 13263 NITRO ENCLAVES (NE) ··· 16656 16651 S: Supported 16657 16652 F: drivers/char/pcmcia/scr24x_cs.c 16658 16653 16659 - SCSI CDROM DRIVER 16660 - M: Jens Axboe <axboe@kernel.dk> 16661 - L: linux-scsi@vger.kernel.org 16662 - S: Maintained 16663 - W: http://www.kernel.dk 16664 - F: drivers/scsi/sr* 16665 - 16666 16654 SCSI RDMA PROTOCOL (SRP) INITIATOR 16667 16655 M: Bart Van Assche <bvanassche@acm.org> 16668 16656 L: linux-rdma@vger.kernel.org ··· 16954 16956 16955 16957 SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS 16956 16958 M: Karsten Graul <kgraul@linux.ibm.com> 16957 - M: Guvenc Gulce <guvenc@linux.ibm.com> 16958 16959 L: linux-s390@vger.kernel.org 16959 16960 S: Supported 16960 16961 W: http://www.ibm.com/developerworks/linux/linux390/ ··· 17798 17801 17799 17802 STAGING - OLPC SECONDARY DISPLAY CONTROLLER (DCON) 17800 17803 M: Jens Frederich <jfrederich@gmail.com> 17801 - M: Daniel Drake <dsd@laptop.org> 17802 17804 M: Jon Nettleton <jon.nettleton@gmail.com> 17803 17805 S: Maintained 17804 17806 W: http://wiki.laptop.org/go/DCON ··· 17888 17892 M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com> 17889 17893 L: alsa-devel@alsa-project.org (moderated for non-subscribers) 17890 17894 S: Maintained 17891 - F: Documentation/devicetree/bindings/iio/adc/st,stm32-*.yaml 17895 + F: Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml 17896 + F: Documentation/devicetree/bindings/sound/st,stm32-*.yaml 17892 17897 F: sound/soc/stm/ 17893 17898 17894 17899 STM32 TIMER/LPTIMER DRIVERS ··· 17966 17969 F: arch/x86/boot/video* 17967 17970 17968 17971 SWIOTLB SUBSYSTEM 17969 - M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 17972 + M: Christoph Hellwig <hch@infradead.org> 17970 17973 L: iommu@lists.linux-foundation.org 17971 17974 S: Supported 17972 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git 17975 + W: http://git.infradead.org/users/hch/dma-mapping.git 17976 + T: git git://git.infradead.org/users/hch/dma-mapping.git 17973 17977 F: arch/*/kernel/pci-swiotlb.c 17974 17978 F: include/linux/swiotlb.h 17975 17979 F: kernel/dma/swiotlb.c ··· 18553 18555 F: drivers/media/radio/radio-raremono.c 18554 18556 18555 18557 THERMAL 18556 - M: Zhang Rui <rui.zhang@intel.com> 18558 + M: Rafael J. Wysocki <rafael@kernel.org> 18557 18559 M: Daniel Lezcano <daniel.lezcano@linaro.org> 18558 18560 R: Amit Kucheria <amitk@kernel.org> 18561 + R: Zhang Rui <rui.zhang@intel.com> 18559 18562 L: linux-pm@vger.kernel.org 18560 18563 S: Supported 18561 18564 Q: https://patchwork.kernel.org/project/linux-pm/list/ 18562 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/thermal/linux.git 18565 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git thermal 18563 18566 F: Documentation/devicetree/bindings/thermal/ 18564 18567 F: drivers/thermal/ 18565 18568 F: include/linux/cpu_cooling.h ··· 18689 18690 18690 18691 TI DAVINCI MACHINE SUPPORT 18691 18692 M: Sekhar Nori <nsekhar@ti.com> 18692 - R: Bartosz Golaszewski <bgolaszewski@baylibre.com> 18693 + R: Bartosz Golaszewski <brgl@bgdev.pl> 18693 18694 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 18694 18695 S: Supported 18695 18696 T: git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git ··· 19288 19289 F: drivers/usb/misc/chaoskey.c 19289 19290 19290 19291 USB CYPRESS C67X00 DRIVER 19291 - M: Peter Korsgaard <jacmet@sunsite.dk> 19292 19292 L: linux-usb@vger.kernel.org 19293 - S: Maintained 19293 + S: Orphan 19294 19294 F: drivers/usb/c67x00/ 19295 19295 19296 19296 USB DAVICOM DM9601 DRIVER 19297 - M: Peter Korsgaard <jacmet@sunsite.dk> 19297 + M: Peter Korsgaard <peter@korsgaard.com> 19298 19298 L: netdev@vger.kernel.org 19299 19299 S: Maintained 19300 19300 W: http://www.linux-usb.org/usbnet ··· 20473 20475 F: tools/lib/bpf/xsk* 20474 20476 20475 20477 XEN BLOCK SUBSYSTEM 20476 - M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 20477 20478 M: Roger Pau Monné <roger.pau@citrix.com> 20478 20479 L: xen-devel@lists.xenproject.org (moderated for non-subscribers) 20479 20480 S: Supported ··· 20520 20523 F: drivers/net/xen-netback/* 20521 20524 20522 20525 XEN PCI SUBSYSTEM 20523 - M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 20526 + M: Juergen Gross <jgross@suse.com> 20524 20527 L: xen-devel@lists.xenproject.org (moderated for non-subscribers) 20525 20528 S: Supported 20526 20529 F: arch/x86/pci/*xen* ··· 20543 20546 F: sound/xen/* 20544 20547 20545 20548 XEN SWIOTLB SUBSYSTEM 20546 - M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 20549 + M: Juergen Gross <jgross@suse.com> 20550 + M: Stefano Stabellini <sstabellini@kernel.org> 20547 20551 L: xen-devel@lists.xenproject.org (moderated for non-subscribers) 20548 20552 L: iommu@lists.linux-foundation.org 20549 20553 S: Supported ··· 20703 20705 F: mm/zbud.c 20704 20706 20705 20707 ZD1211RW WIRELESS DRIVER 20706 - M: Daniel Drake <dsd@gentoo.org> 20707 20708 M: Ulrich Kunitz <kune@deine-taler.de> 20708 20709 L: linux-wireless@vger.kernel.org 20709 20710 L: zd1211-devs@lists.sourceforge.net (subscribers-only)
+1 -1
Makefile
··· 2 2 VERSION = 5 3 3 PATCHLEVEL = 15 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc2 5 + EXTRAVERSION = -rc4 6 6 NAME = Opossums on Parade 7 7 8 8 # *DOCUMENTATION*
-1
arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
··· 71 71 isc: isc@f0008000 { 72 72 pinctrl-names = "default"; 73 73 pinctrl-0 = <&pinctrl_isc_base &pinctrl_isc_data_8bit &pinctrl_isc_data_9_10 &pinctrl_isc_data_11_12>; 74 - status = "okay"; 75 74 }; 76 75 77 76 qspi1: spi@f0024000 {
+39 -6
arch/arm/boot/dts/at91-sama7g5ek.dts
··· 196 196 197 197 regulator-state-standby { 198 198 regulator-on-in-suspend; 199 + regulator-suspend-microvolt = <1350000>; 199 200 regulator-mode = <4>; 200 201 }; 201 202 202 203 regulator-state-mem { 203 204 regulator-on-in-suspend; 205 + regulator-suspend-microvolt = <1350000>; 204 206 regulator-mode = <4>; 205 207 }; 206 208 }; ··· 355 353 #address-cells = <1>; 356 354 #size-cells = <0>; 357 355 pinctrl-names = "default"; 358 - pinctrl-0 = <&pinctrl_gmac0_default &pinctrl_gmac0_txck_default &pinctrl_gmac0_phy_irq>; 356 + pinctrl-0 = <&pinctrl_gmac0_default 357 + &pinctrl_gmac0_mdio_default 358 + &pinctrl_gmac0_txck_default 359 + &pinctrl_gmac0_phy_irq>; 359 360 phy-mode = "rgmii-id"; 360 361 status = "okay"; 361 362 ··· 373 368 #address-cells = <1>; 374 369 #size-cells = <0>; 375 370 pinctrl-names = "default"; 376 - pinctrl-0 = <&pinctrl_gmac1_default &pinctrl_gmac1_phy_irq>; 371 + pinctrl-0 = <&pinctrl_gmac1_default 372 + &pinctrl_gmac1_mdio_default 373 + &pinctrl_gmac1_phy_irq>; 377 374 phy-mode = "rmii"; 378 375 status = "okay"; 379 376 ··· 430 423 <PIN_PA15__G0_TXEN>, 431 424 <PIN_PA30__G0_RXCK>, 432 425 <PIN_PA18__G0_RXDV>, 433 - <PIN_PA22__G0_MDC>, 434 - <PIN_PA23__G0_MDIO>, 435 426 <PIN_PA25__G0_125CK>; 427 + slew-rate = <0>; 428 + bias-disable; 429 + }; 430 + 431 + pinctrl_gmac0_mdio_default: gmac0_mdio_default { 432 + pinmux = <PIN_PA22__G0_MDC>, 433 + <PIN_PA23__G0_MDIO>; 436 434 bias-disable; 437 435 }; 438 436 439 437 pinctrl_gmac0_txck_default: gmac0_txck_default { 440 438 pinmux = <PIN_PA24__G0_TXCK>; 439 + slew-rate = <0>; 441 440 bias-pull-up; 442 441 }; 443 442 ··· 460 447 <PIN_PD25__G1_RX0>, 461 448 <PIN_PD26__G1_RX1>, 462 449 <PIN_PD27__G1_RXER>, 463 - <PIN_PD24__G1_RXDV>, 464 - <PIN_PD28__G1_MDC>, 450 + <PIN_PD24__G1_RXDV>; 451 + slew-rate = <0>; 452 + bias-disable; 453 + }; 454 + 455 + pinctrl_gmac1_mdio_default: gmac1_mdio_default { 456 + pinmux = <PIN_PD28__G1_MDC>, 465 457 <PIN_PD29__G1_MDIO>; 466 458 bias-disable; 467 459 }; ··· 558 540 <PIN_PA8__SDMMC0_DAT5>, 559 541 <PIN_PA9__SDMMC0_DAT6>, 560 542 <PIN_PA10__SDMMC0_DAT7>; 543 + slew-rate = <0>; 561 544 bias-pull-up; 562 545 }; 563 546 ··· 566 547 pinmux = <PIN_PA0__SDMMC0_CK>, 567 548 <PIN_PA2__SDMMC0_RSTN>, 568 549 <PIN_PA11__SDMMC0_DS>; 550 + slew-rate = <0>; 569 551 bias-pull-up; 570 552 }; 571 553 }; ··· 578 558 <PIN_PC0__SDMMC1_DAT1>, 579 559 <PIN_PC1__SDMMC1_DAT2>, 580 560 <PIN_PC2__SDMMC1_DAT3>; 561 + slew-rate = <0>; 581 562 bias-pull-up; 582 563 }; 583 564 ··· 587 566 <PIN_PB28__SDMMC1_RSTN>, 588 567 <PIN_PC5__SDMMC1_1V8SEL>, 589 568 <PIN_PC4__SDMMC1_CD>; 569 + slew-rate = <0>; 590 570 bias-pull-up; 591 571 }; 592 572 }; ··· 599 577 <PIN_PD6__SDMMC2_DAT1>, 600 578 <PIN_PD7__SDMMC2_DAT2>, 601 579 <PIN_PD8__SDMMC2_DAT3>; 580 + slew-rate = <0>; 602 581 bias-pull-up; 603 582 }; 604 583 605 584 ck { 606 585 pinmux = <PIN_PD4__SDMMC2_CK>; 586 + slew-rate = <0>; 607 587 bias-pull-up; 608 588 }; 609 589 }; ··· 656 632 sdhci-caps-mask = <0x0 0x00200000>; 657 633 pinctrl-names = "default"; 658 634 pinctrl-0 = <&pinctrl_sdmmc2_default>; 635 + }; 636 + 637 + &shdwc { 638 + atmel,shdwc-debouncer = <976>; 639 + status = "okay"; 640 + 641 + input@0 { 642 + reg = <0>; 643 + }; 659 644 }; 660 645 661 646 &spdifrx {
+2 -2
arch/arm/boot/dts/imx53-m53menlo.dts
··· 56 56 panel { 57 57 compatible = "edt,etm0700g0dh6"; 58 58 pinctrl-0 = <&pinctrl_display_gpio>; 59 + pinctrl-names = "default"; 59 60 enable-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>; 60 61 61 62 port { ··· 77 76 regulator-name = "vbus"; 78 77 regulator-min-microvolt = <5000000>; 79 78 regulator-max-microvolt = <5000000>; 80 - gpio = <&gpio1 2 GPIO_ACTIVE_HIGH>; 81 - enable-active-high; 79 + gpio = <&gpio1 2 0>; 82 80 }; 83 81 }; 84 82
+5
arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
··· 5 5 #include <dt-bindings/gpio/gpio.h> 6 6 #include <dt-bindings/interrupt-controller/irq.h> 7 7 #include <dt-bindings/input/input.h> 8 + #include <dt-bindings/leds/common.h> 8 9 #include <dt-bindings/pwm/pwm.h> 9 10 10 11 / { ··· 278 277 led-cur = /bits/ 8 <0x20>; 279 278 max-cur = /bits/ 8 <0x60>; 280 279 reg = <0>; 280 + color = <LED_COLOR_ID_RED>; 281 281 }; 282 282 283 283 chan@1 { ··· 286 284 led-cur = /bits/ 8 <0x20>; 287 285 max-cur = /bits/ 8 <0x60>; 288 286 reg = <1>; 287 + color = <LED_COLOR_ID_GREEN>; 289 288 }; 290 289 291 290 chan@2 { ··· 294 291 led-cur = /bits/ 8 <0x20>; 295 292 max-cur = /bits/ 8 <0x60>; 296 293 reg = <2>; 294 + color = <LED_COLOR_ID_BLUE>; 297 295 }; 298 296 299 297 chan@3 { ··· 302 298 led-cur = /bits/ 8 <0x0>; 303 299 max-cur = /bits/ 8 <0x0>; 304 300 reg = <3>; 301 + color = <LED_COLOR_ID_WHITE>; 305 302 }; 306 303 }; 307 304
+11
arch/arm/boot/dts/imx6qdl-pico.dtsi
··· 176 176 pinctrl-0 = <&pinctrl_enet>; 177 177 phy-mode = "rgmii-id"; 178 178 phy-reset-gpios = <&gpio1 26 GPIO_ACTIVE_LOW>; 179 + phy-handle = <&phy>; 179 180 status = "okay"; 181 + 182 + mdio { 183 + #address-cells = <1>; 184 + #size-cells = <0>; 185 + 186 + phy: ethernet-phy@1 { 187 + reg = <1>; 188 + qca,clk-out-frequency = <125000000>; 189 + }; 190 + }; 180 191 }; 181 192 182 193 &hdmi {
+2 -2
arch/arm/boot/dts/imx6sx-sdb.dts
··· 114 114 compatible = "micron,n25q256a", "jedec,spi-nor"; 115 115 spi-max-frequency = <29000000>; 116 116 spi-rx-bus-width = <4>; 117 - spi-tx-bus-width = <4>; 117 + spi-tx-bus-width = <1>; 118 118 reg = <0>; 119 119 }; 120 120 ··· 124 124 compatible = "micron,n25q256a", "jedec,spi-nor"; 125 125 spi-max-frequency = <29000000>; 126 126 spi-rx-bus-width = <4>; 127 - spi-tx-bus-width = <4>; 127 + spi-tx-bus-width = <1>; 128 128 reg = <2>; 129 129 }; 130 130 };
+1 -1
arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
··· 292 292 compatible = "micron,n25q256a", "jedec,spi-nor"; 293 293 spi-max-frequency = <29000000>; 294 294 spi-rx-bus-width = <4>; 295 - spi-tx-bus-width = <4>; 295 + spi-tx-bus-width = <1>; 296 296 reg = <0>; 297 297 }; 298 298 };
+1 -1
arch/arm/boot/dts/omap3430-sdp.dts
··· 101 101 102 102 nand@1,0 { 103 103 compatible = "ti,omap2-nand"; 104 - reg = <0 0 4>; /* CS0, offset 0, IO size 4 */ 104 + reg = <1 0 4>; /* CS1, offset 0, IO size 4 */ 105 105 interrupt-parent = <&gpmc>; 106 106 interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */ 107 107 <1 IRQ_TYPE_NONE>; /* termcount */
+7 -8
arch/arm/boot/dts/qcom-apq8064.dtsi
··· 198 198 clock-frequency = <19200000>; 199 199 }; 200 200 201 - pxo_board { 201 + pxo_board: pxo_board { 202 202 compatible = "fixed-clock"; 203 203 #clock-cells = <0>; 204 204 clock-frequency = <27000000>; ··· 1148 1148 }; 1149 1149 1150 1150 gpu: adreno-3xx@4300000 { 1151 - compatible = "qcom,adreno-3xx"; 1151 + compatible = "qcom,adreno-320.2", "qcom,adreno"; 1152 1152 reg = <0x04300000 0x20000>; 1153 1153 reg-names = "kgsl_3d0_reg_memory"; 1154 1154 interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>; 1155 1155 interrupt-names = "kgsl_3d0_irq"; 1156 1156 clock-names = 1157 - "core_clk", 1158 - "iface_clk", 1159 - "mem_clk", 1160 - "mem_iface_clk"; 1157 + "core", 1158 + "iface", 1159 + "mem", 1160 + "mem_iface"; 1161 1161 clocks = 1162 1162 <&mmcc GFX3D_CLK>, 1163 1163 <&mmcc GFX3D_AHB_CLK>, 1164 1164 <&mmcc GFX3D_AXI_CLK>, 1165 1165 <&mmcc MMSS_IMEM_AHB_CLK>; 1166 - qcom,chipid = <0x03020002>; 1167 1166 1168 1167 iommus = <&gfx3d 0 1169 1168 &gfx3d 1 ··· 1305 1306 reg-names = "dsi_pll", "dsi_phy", "dsi_phy_regulator"; 1306 1307 clock-names = "iface_clk", "ref"; 1307 1308 clocks = <&mmcc DSI_M_AHB_CLK>, 1308 - <&cxo_board>; 1309 + <&pxo_board>; 1309 1310 }; 1310 1311 1311 1312
+39
arch/arm/boot/dts/sama7g5.dtsi
··· 75 75 #size-cells = <1>; 76 76 ranges; 77 77 78 + securam: securam@e0000000 { 79 + compatible = "microchip,sama7g5-securam", "atmel,sama5d2-securam", "mmio-sram"; 80 + reg = <0xe0000000 0x4000>; 81 + clocks = <&pmc PMC_TYPE_PERIPHERAL 18>; 82 + #address-cells = <1>; 83 + #size-cells = <1>; 84 + ranges = <0 0xe0000000 0x4000>; 85 + no-memory-wc; 86 + status = "okay"; 87 + }; 88 + 78 89 secumod: secumod@e0004000 { 79 90 compatible = "microchip,sama7g5-secumod", "atmel,sama5d2-secumod", "syscon"; 80 91 reg = <0xe0004000 0x4000>; ··· 122 111 clock-names = "td_slck", "md_slck", "main_xtal"; 123 112 }; 124 113 114 + shdwc: shdwc@e001d010 { 115 + compatible = "microchip,sama7g5-shdwc", "syscon"; 116 + reg = <0xe001d010 0x10>; 117 + clocks = <&clk32k 0>; 118 + #address-cells = <1>; 119 + #size-cells = <0>; 120 + atmel,wakeup-rtc-timer; 121 + atmel,wakeup-rtt-timer; 122 + status = "disabled"; 123 + }; 124 + 125 125 rtt: rtt@e001d020 { 126 126 compatible = "microchip,sama7g5-rtt", "microchip,sam9x60-rtt", "atmel,at91sam9260-rtt"; 127 127 reg = <0xe001d020 0x30>; ··· 157 135 reg = <0xe001d180 0x24>; 158 136 interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>; 159 137 clocks = <&clk32k 0>; 138 + }; 139 + 140 + chipid@e0020000 { 141 + compatible = "microchip,sama7g5-chipid"; 142 + reg = <0xe0020000 0x8>; 160 143 }; 161 144 162 145 sdmmc0: mmc@e1204000 { ··· 540 513 dma-names = "rx", "tx"; 541 514 status = "disabled"; 542 515 }; 516 + }; 517 + 518 + uddrc: uddrc@e3800000 { 519 + compatible = "microchip,sama7g5-uddrc"; 520 + reg = <0xe3800000 0x4000>; 521 + status = "okay"; 522 + }; 523 + 524 + ddr3phy: ddr3phy@e3804000 { 525 + compatible = "microchip,sama7g5-ddr3phy"; 526 + reg = <0xe3804000 0x1000>; 527 + status = "okay"; 543 528 }; 544 529 545 530 gic: interrupt-controller@e8c11000 {
+60 -7
arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
··· 17 17 * TAKE CARE WHEN MAINTAINING THIS FILE TO PROPAGATE ANY RELEVANT 18 18 * CHANGES TO vexpress-v2m.dtsi! 19 19 */ 20 + #include <dt-bindings/interrupt-controller/arm-gic.h> 20 21 21 22 / { 22 23 v2m_fixed_3v3: fixed-regulator-0 { ··· 102 101 }; 103 102 104 103 bus@8000000 { 105 - motherboard-bus { 106 - model = "V2M-P1"; 104 + compatible = "simple-bus"; 105 + #address-cells = <1>; 106 + #size-cells = <1>; 107 + 108 + #interrupt-cells = <1>; 109 + interrupt-map-mask = <0 63>; 110 + interrupt-map = <0 0 &gic GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, 111 + <0 1 &gic GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>, 112 + <0 2 &gic GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>, 113 + <0 3 &gic GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>, 114 + <0 4 &gic GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, 115 + <0 5 &gic GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>, 116 + <0 6 &gic GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, 117 + <0 7 &gic GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>, 118 + <0 8 &gic GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>, 119 + <0 9 &gic GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, 120 + <0 10 &gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>, 121 + <0 11 &gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>, 122 + <0 12 &gic GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, 123 + <0 13 &gic GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, 124 + <0 14 &gic GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, 125 + <0 15 &gic GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>, 126 + <0 16 &gic GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>, 127 + <0 17 &gic GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>, 128 + <0 18 &gic GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>, 129 + <0 19 &gic GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>, 130 + <0 20 &gic GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>, 131 + <0 21 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>, 132 + <0 22 &gic GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>, 133 + <0 23 &gic GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>, 134 + <0 24 &gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>, 135 + <0 25 &gic GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>, 136 + <0 26 &gic GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>, 137 + <0 27 &gic GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>, 138 + <0 28 &gic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>, 139 + <0 29 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>, 140 + <0 30 &gic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>, 141 + <0 31 &gic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>, 142 + <0 32 &gic GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>, 143 + <0 33 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>, 144 + <0 34 &gic GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>, 145 + <0 35 &gic GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>, 146 + <0 36 &gic GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>, 147 + <0 37 &gic GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>, 148 + <0 38 &gic GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>, 149 + <0 39 &gic GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>, 150 + <0 40 &gic GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>, 151 + <0 41 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>, 152 + <0 42 &gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>; 153 + 154 + motherboard-bus@8000000 { 107 155 arm,hbi = <0x190>; 108 156 arm,vexpress,site = <0>; 109 - arm,v2m-memory-map = "rs1"; 110 157 compatible = "arm,vexpress,v2m-p1", "simple-bus"; 111 158 #address-cells = <2>; /* SMB chipselect number and offset */ 112 159 #size-cells = <1>; 113 - #interrupt-cells = <1>; 114 - ranges; 160 + ranges = <0 0 0x08000000 0x04000000>, 161 + <1 0 0x14000000 0x04000000>, 162 + <2 0 0x18000000 0x04000000>, 163 + <3 0 0x1c000000 0x04000000>, 164 + <4 0 0x0c000000 0x04000000>, 165 + <5 0 0x10000000 0x04000000>; 115 166 116 167 nor_flash: flash@0 { 117 168 compatible = "arm,vexpress-flash", "cfi-flash"; ··· 268 215 clock-names = "apb_pclk"; 269 216 }; 270 217 271 - mmci@50000 { 218 + mmc@50000 { 272 219 compatible = "arm,pl180", "arm,primecell"; 273 220 reg = <0x050000 0x1000>; 274 221 interrupts = <9>, <10>; ··· 328 275 clock-names = "uartclk", "apb_pclk"; 329 276 }; 330 277 331 - wdt@f0000 { 278 + watchdog@f0000 { 332 279 compatible = "arm,sp805", "arm,primecell"; 333 280 reg = <0x0f0000 0x1000>; 334 281 interrupts = <0>;
+60 -5
arch/arm/boot/dts/vexpress-v2m.dtsi
··· 17 17 * TAKE CARE WHEN MAINTAINING THIS FILE TO PROPAGATE ANY RELEVANT 18 18 * CHANGES TO vexpress-v2m-rs1.dtsi! 19 19 */ 20 + #include <dt-bindings/interrupt-controller/arm-gic.h> 20 21 21 22 / { 22 - bus@4000000 { 23 - motherboard { 24 - model = "V2M-P1"; 23 + bus@40000000 { 24 + compatible = "simple-bus"; 25 + #address-cells = <1>; 26 + #size-cells = <1>; 27 + ranges = <0x40000000 0x40000000 0x10000000>, 28 + <0x10000000 0x10000000 0x00020000>; 29 + 30 + #interrupt-cells = <1>; 31 + interrupt-map-mask = <0 63>; 32 + interrupt-map = <0 0 &gic GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, 33 + <0 1 &gic GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>, 34 + <0 2 &gic GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>, 35 + <0 3 &gic GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>, 36 + <0 4 &gic GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, 37 + <0 5 &gic GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>, 38 + <0 6 &gic GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, 39 + <0 7 &gic GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>, 40 + <0 8 &gic GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>, 41 + <0 9 &gic GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, 42 + <0 10 &gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>, 43 + <0 11 &gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>, 44 + <0 12 &gic GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, 45 + <0 13 &gic GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, 46 + <0 14 &gic GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, 47 + <0 15 &gic GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>, 48 + <0 16 &gic GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>, 49 + <0 17 &gic GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>, 50 + <0 18 &gic GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>, 51 + <0 19 &gic GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>, 52 + <0 20 &gic GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>, 53 + <0 21 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>, 54 + <0 22 &gic GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>, 55 + <0 23 &gic GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>, 56 + <0 24 &gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>, 57 + <0 25 &gic GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>, 58 + <0 26 &gic GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>, 59 + <0 27 &gic GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>, 60 + <0 28 &gic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>, 61 + <0 29 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>, 62 + <0 30 &gic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>, 63 + <0 31 &gic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>, 64 + <0 32 &gic GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>, 65 + <0 33 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>, 66 + <0 34 &gic GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>, 67 + <0 35 &gic GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>, 68 + <0 36 &gic GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>, 69 + <0 37 &gic GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>, 70 + <0 38 &gic GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>, 71 + <0 39 &gic GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>, 72 + <0 40 &gic GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>, 73 + <0 41 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>, 74 + <0 42 &gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>; 75 + 76 + motherboard-bus@40000000 { 25 77 arm,hbi = <0x190>; 26 78 arm,vexpress,site = <0>; 27 79 compatible = "arm,vexpress,v2m-p1", "simple-bus"; 28 80 #address-cells = <2>; /* SMB chipselect number and offset */ 29 81 #size-cells = <1>; 30 - #interrupt-cells = <1>; 31 - ranges; 82 + ranges = <0 0 0x40000000 0x04000000>, 83 + <1 0 0x44000000 0x04000000>, 84 + <2 0 0x48000000 0x04000000>, 85 + <3 0 0x4c000000 0x04000000>, 86 + <7 0 0x10000000 0x00020000>; 32 87 33 88 flash@0,00000000 { 34 89 compatible = "arm,vexpress-flash", "cfi-flash";
+1 -56
arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
··· 237 237 }; 238 238 239 239 bus@8000000 { 240 - compatible = "simple-bus"; 241 - 242 - #address-cells = <2>; 243 - #size-cells = <1>; 244 - ranges = <0 0 0 0x08000000 0x04000000>, 245 - <1 0 0 0x14000000 0x04000000>, 246 - <2 0 0 0x18000000 0x04000000>, 247 - <3 0 0 0x1c000000 0x04000000>, 248 - <4 0 0 0x0c000000 0x04000000>, 249 - <5 0 0 0x10000000 0x04000000>; 250 - 251 - #interrupt-cells = <1>; 252 - interrupt-map-mask = <0 0 63>; 253 - interrupt-map = <0 0 0 &gic 0 0 4>, 254 - <0 0 1 &gic 0 1 4>, 255 - <0 0 2 &gic 0 2 4>, 256 - <0 0 3 &gic 0 3 4>, 257 - <0 0 4 &gic 0 4 4>, 258 - <0 0 5 &gic 0 5 4>, 259 - <0 0 6 &gic 0 6 4>, 260 - <0 0 7 &gic 0 7 4>, 261 - <0 0 8 &gic 0 8 4>, 262 - <0 0 9 &gic 0 9 4>, 263 - <0 0 10 &gic 0 10 4>, 264 - <0 0 11 &gic 0 11 4>, 265 - <0 0 12 &gic 0 12 4>, 266 - <0 0 13 &gic 0 13 4>, 267 - <0 0 14 &gic 0 14 4>, 268 - <0 0 15 &gic 0 15 4>, 269 - <0 0 16 &gic 0 16 4>, 270 - <0 0 17 &gic 0 17 4>, 271 - <0 0 18 &gic 0 18 4>, 272 - <0 0 19 &gic 0 19 4>, 273 - <0 0 20 &gic 0 20 4>, 274 - <0 0 21 &gic 0 21 4>, 275 - <0 0 22 &gic 0 22 4>, 276 - <0 0 23 &gic 0 23 4>, 277 - <0 0 24 &gic 0 24 4>, 278 - <0 0 25 &gic 0 25 4>, 279 - <0 0 26 &gic 0 26 4>, 280 - <0 0 27 &gic 0 27 4>, 281 - <0 0 28 &gic 0 28 4>, 282 - <0 0 29 &gic 0 29 4>, 283 - <0 0 30 &gic 0 30 4>, 284 - <0 0 31 &gic 0 31 4>, 285 - <0 0 32 &gic 0 32 4>, 286 - <0 0 33 &gic 0 33 4>, 287 - <0 0 34 &gic 0 34 4>, 288 - <0 0 35 &gic 0 35 4>, 289 - <0 0 36 &gic 0 36 4>, 290 - <0 0 37 &gic 0 37 4>, 291 - <0 0 38 &gic 0 38 4>, 292 - <0 0 39 &gic 0 39 4>, 293 - <0 0 40 &gic 0 40 4>, 294 - <0 0 41 &gic 0 41 4>, 295 - <0 0 42 &gic 0 42 4>; 240 + ranges = <0x8000000 0 0x8000000 0x18000000>; 296 241 }; 297 242 298 243 site2: hsb@40000000 {
+1 -56
arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
··· 609 609 }; 610 610 611 611 smb: bus@8000000 { 612 - compatible = "simple-bus"; 613 - 614 - #address-cells = <2>; 615 - #size-cells = <1>; 616 - ranges = <0 0 0 0x08000000 0x04000000>, 617 - <1 0 0 0x14000000 0x04000000>, 618 - <2 0 0 0x18000000 0x04000000>, 619 - <3 0 0 0x1c000000 0x04000000>, 620 - <4 0 0 0x0c000000 0x04000000>, 621 - <5 0 0 0x10000000 0x04000000>; 622 - 623 - #interrupt-cells = <1>; 624 - interrupt-map-mask = <0 0 63>; 625 - interrupt-map = <0 0 0 &gic 0 0 4>, 626 - <0 0 1 &gic 0 1 4>, 627 - <0 0 2 &gic 0 2 4>, 628 - <0 0 3 &gic 0 3 4>, 629 - <0 0 4 &gic 0 4 4>, 630 - <0 0 5 &gic 0 5 4>, 631 - <0 0 6 &gic 0 6 4>, 632 - <0 0 7 &gic 0 7 4>, 633 - <0 0 8 &gic 0 8 4>, 634 - <0 0 9 &gic 0 9 4>, 635 - <0 0 10 &gic 0 10 4>, 636 - <0 0 11 &gic 0 11 4>, 637 - <0 0 12 &gic 0 12 4>, 638 - <0 0 13 &gic 0 13 4>, 639 - <0 0 14 &gic 0 14 4>, 640 - <0 0 15 &gic 0 15 4>, 641 - <0 0 16 &gic 0 16 4>, 642 - <0 0 17 &gic 0 17 4>, 643 - <0 0 18 &gic 0 18 4>, 644 - <0 0 19 &gic 0 19 4>, 645 - <0 0 20 &gic 0 20 4>, 646 - <0 0 21 &gic 0 21 4>, 647 - <0 0 22 &gic 0 22 4>, 648 - <0 0 23 &gic 0 23 4>, 649 - <0 0 24 &gic 0 24 4>, 650 - <0 0 25 &gic 0 25 4>, 651 - <0 0 26 &gic 0 26 4>, 652 - <0 0 27 &gic 0 27 4>, 653 - <0 0 28 &gic 0 28 4>, 654 - <0 0 29 &gic 0 29 4>, 655 - <0 0 30 &gic 0 30 4>, 656 - <0 0 31 &gic 0 31 4>, 657 - <0 0 32 &gic 0 32 4>, 658 - <0 0 33 &gic 0 33 4>, 659 - <0 0 34 &gic 0 34 4>, 660 - <0 0 35 &gic 0 35 4>, 661 - <0 0 36 &gic 0 36 4>, 662 - <0 0 37 &gic 0 37 4>, 663 - <0 0 38 &gic 0 38 4>, 664 - <0 0 39 &gic 0 39 4>, 665 - <0 0 40 &gic 0 40 4>, 666 - <0 0 41 &gic 0 41 4>, 667 - <0 0 42 &gic 0 42 4>; 612 + ranges = <0x8000000 0 0x8000000 0x18000000>; 668 613 }; 669 614 670 615 site2: hsb@40000000 {
+1 -56
arch/arm/boot/dts/vexpress-v2p-ca5s.dts
··· 207 207 }; 208 208 209 209 smb: bus@8000000 { 210 - compatible = "simple-bus"; 211 - 212 - #address-cells = <2>; 213 - #size-cells = <1>; 214 - ranges = <0 0 0x08000000 0x04000000>, 215 - <1 0 0x14000000 0x04000000>, 216 - <2 0 0x18000000 0x04000000>, 217 - <3 0 0x1c000000 0x04000000>, 218 - <4 0 0x0c000000 0x04000000>, 219 - <5 0 0x10000000 0x04000000>; 220 - 221 - #interrupt-cells = <1>; 222 - interrupt-map-mask = <0 0 63>; 223 - interrupt-map = <0 0 0 &gic 0 0 4>, 224 - <0 0 1 &gic 0 1 4>, 225 - <0 0 2 &gic 0 2 4>, 226 - <0 0 3 &gic 0 3 4>, 227 - <0 0 4 &gic 0 4 4>, 228 - <0 0 5 &gic 0 5 4>, 229 - <0 0 6 &gic 0 6 4>, 230 - <0 0 7 &gic 0 7 4>, 231 - <0 0 8 &gic 0 8 4>, 232 - <0 0 9 &gic 0 9 4>, 233 - <0 0 10 &gic 0 10 4>, 234 - <0 0 11 &gic 0 11 4>, 235 - <0 0 12 &gic 0 12 4>, 236 - <0 0 13 &gic 0 13 4>, 237 - <0 0 14 &gic 0 14 4>, 238 - <0 0 15 &gic 0 15 4>, 239 - <0 0 16 &gic 0 16 4>, 240 - <0 0 17 &gic 0 17 4>, 241 - <0 0 18 &gic 0 18 4>, 242 - <0 0 19 &gic 0 19 4>, 243 - <0 0 20 &gic 0 20 4>, 244 - <0 0 21 &gic 0 21 4>, 245 - <0 0 22 &gic 0 22 4>, 246 - <0 0 23 &gic 0 23 4>, 247 - <0 0 24 &gic 0 24 4>, 248 - <0 0 25 &gic 0 25 4>, 249 - <0 0 26 &gic 0 26 4>, 250 - <0 0 27 &gic 0 27 4>, 251 - <0 0 28 &gic 0 28 4>, 252 - <0 0 29 &gic 0 29 4>, 253 - <0 0 30 &gic 0 30 4>, 254 - <0 0 31 &gic 0 31 4>, 255 - <0 0 32 &gic 0 32 4>, 256 - <0 0 33 &gic 0 33 4>, 257 - <0 0 34 &gic 0 34 4>, 258 - <0 0 35 &gic 0 35 4>, 259 - <0 0 36 &gic 0 36 4>, 260 - <0 0 37 &gic 0 37 4>, 261 - <0 0 38 &gic 0 38 4>, 262 - <0 0 39 &gic 0 39 4>, 263 - <0 0 40 &gic 0 40 4>, 264 - <0 0 41 &gic 0 41 4>, 265 - <0 0 42 &gic 0 42 4>; 210 + ranges = <0 0x8000000 0x18000000>; 266 211 }; 267 212 268 213 site2: hsb@40000000 {
-58
arch/arm/boot/dts/vexpress-v2p-ca9.dts
··· 295 295 }; 296 296 }; 297 297 298 - smb: bus@4000000 { 299 - compatible = "simple-bus"; 300 - 301 - #address-cells = <2>; 302 - #size-cells = <1>; 303 - ranges = <0 0 0x40000000 0x04000000>, 304 - <1 0 0x44000000 0x04000000>, 305 - <2 0 0x48000000 0x04000000>, 306 - <3 0 0x4c000000 0x04000000>, 307 - <7 0 0x10000000 0x00020000>; 308 - 309 - #interrupt-cells = <1>; 310 - interrupt-map-mask = <0 0 63>; 311 - interrupt-map = <0 0 0 &gic 0 0 4>, 312 - <0 0 1 &gic 0 1 4>, 313 - <0 0 2 &gic 0 2 4>, 314 - <0 0 3 &gic 0 3 4>, 315 - <0 0 4 &gic 0 4 4>, 316 - <0 0 5 &gic 0 5 4>, 317 - <0 0 6 &gic 0 6 4>, 318 - <0 0 7 &gic 0 7 4>, 319 - <0 0 8 &gic 0 8 4>, 320 - <0 0 9 &gic 0 9 4>, 321 - <0 0 10 &gic 0 10 4>, 322 - <0 0 11 &gic 0 11 4>, 323 - <0 0 12 &gic 0 12 4>, 324 - <0 0 13 &gic 0 13 4>, 325 - <0 0 14 &gic 0 14 4>, 326 - <0 0 15 &gic 0 15 4>, 327 - <0 0 16 &gic 0 16 4>, 328 - <0 0 17 &gic 0 17 4>, 329 - <0 0 18 &gic 0 18 4>, 330 - <0 0 19 &gic 0 19 4>, 331 - <0 0 20 &gic 0 20 4>, 332 - <0 0 21 &gic 0 21 4>, 333 - <0 0 22 &gic 0 22 4>, 334 - <0 0 23 &gic 0 23 4>, 335 - <0 0 24 &gic 0 24 4>, 336 - <0 0 25 &gic 0 25 4>, 337 - <0 0 26 &gic 0 26 4>, 338 - <0 0 27 &gic 0 27 4>, 339 - <0 0 28 &gic 0 28 4>, 340 - <0 0 29 &gic 0 29 4>, 341 - <0 0 30 &gic 0 30 4>, 342 - <0 0 31 &gic 0 31 4>, 343 - <0 0 32 &gic 0 32 4>, 344 - <0 0 33 &gic 0 33 4>, 345 - <0 0 34 &gic 0 34 4>, 346 - <0 0 35 &gic 0 35 4>, 347 - <0 0 36 &gic 0 36 4>, 348 - <0 0 37 &gic 0 37 4>, 349 - <0 0 38 &gic 0 38 4>, 350 - <0 0 39 &gic 0 39 4>, 351 - <0 0 40 &gic 0 40 4>, 352 - <0 0 41 &gic 0 41 4>, 353 - <0 0 42 &gic 0 42 4>; 354 - }; 355 - 356 298 site2: hsb@e0000000 { 357 299 compatible = "simple-bus"; 358 300 #address-cells = <1>;
+3 -1
arch/arm/common/sharpsl_param.c
··· 40 40 41 41 void sharpsl_save_param(void) 42 42 { 43 - memcpy(&sharpsl_param, param_start(PARAM_BASE), sizeof(struct sharpsl_param_info)); 43 + struct sharpsl_param_info *params = param_start(PARAM_BASE); 44 + 45 + memcpy(&sharpsl_param, params, sizeof(*params)); 44 46 45 47 if (sharpsl_param.comadj_keyword != COMADJ_MAGIC) 46 48 sharpsl_param.comadj=-1;
+1
arch/arm/configs/gemini_defconfig
··· 76 76 CONFIG_DRM=y 77 77 CONFIG_DRM_PANEL_ILITEK_IL9322=y 78 78 CONFIG_DRM_TVE200=y 79 + CONFIG_FB=y 79 80 CONFIG_LOGO=y 80 81 CONFIG_USB=y 81 82 CONFIG_USB_MON=y
+1
arch/arm/configs/imx_v6_v7_defconfig
··· 292 292 CONFIG_DRM_IMX_HDMI=y 293 293 CONFIG_DRM_ETNAVIV=y 294 294 CONFIG_DRM_MXSFB=y 295 + CONFIG_FB=y 295 296 CONFIG_FB_MODE_HELPERS=y 296 297 CONFIG_LCD_CLASS_DEVICE=y 297 298 CONFIG_LCD_L4F00242T03=y
+3
arch/arm/configs/multi_v7_defconfig
··· 456 456 CONFIG_PINCTRL_PALMAS=y 457 457 CONFIG_PINCTRL_OWL=y 458 458 CONFIG_PINCTRL_S500=y 459 + CONFIG_PINCTRL_MSM=y 459 460 CONFIG_PINCTRL_APQ8064=y 460 461 CONFIG_PINCTRL_APQ8084=y 461 462 CONFIG_PINCTRL_IPQ8064=y ··· 726 725 CONFIG_DRM_LIMA=m 727 726 CONFIG_DRM_PANFROST=m 728 727 CONFIG_DRM_ASPEED_GFX=m 728 + CONFIG_FB=y 729 729 CONFIG_FB_EFI=y 730 730 CONFIG_FB_WM8505=y 731 731 CONFIG_FB_SH_MOBILE_LCDC=y ··· 1124 1122 CONFIG_OMAP_USB2=y 1125 1123 CONFIG_TI_PIPE3=y 1126 1124 CONFIG_TWL4030_USB=m 1125 + CONFIG_RAS=y 1127 1126 CONFIG_NVMEM_IMX_OCOTP=y 1128 1127 CONFIG_ROCKCHIP_EFUSE=m 1129 1128 CONFIG_NVMEM_SUNXI_SID=y
-1
arch/arm/kernel/signal.c
··· 628 628 uprobe_notify_resume(regs); 629 629 } else { 630 630 tracehook_notify_resume(regs); 631 - rseq_handle_notify_resume(NULL, regs); 632 631 } 633 632 } 634 633 local_irq_disable();
+114 -16
arch/arm/mach-at91/pm.c
··· 47 47 unsigned long ddr_phy_calibration[BACKUP_DDR_PHY_CALIBRATION]; 48 48 }; 49 49 50 + /* 51 + * struct at91_pm_sfrbu_offsets: registers mapping for SFRBU 52 + * @pswbu: power switch BU control registers 53 + */ 54 + struct at91_pm_sfrbu_regs { 55 + struct { 56 + u32 key; 57 + u32 ctrl; 58 + u32 state; 59 + u32 softsw; 60 + } pswbu; 61 + }; 62 + 50 63 /** 51 64 * struct at91_soc_pm - AT91 SoC power management data structure 52 65 * @config_shdwc_ws: wakeup sources configuration function for SHDWC 53 66 * @config_pmc_ws: wakeup srouces configuration function for PMC 54 67 * @ws_ids: wakup sources of_device_id array 55 68 * @data: PM data to be used on last phase of suspend 69 + * @sfrbu_regs: SFRBU registers mapping 56 70 * @bu: backup unit mapped data (for backup mode) 57 71 * @memcs: memory chip select 58 72 */ ··· 76 62 const struct of_device_id *ws_ids; 77 63 struct at91_pm_bu *bu; 78 64 struct at91_pm_data data; 65 + struct at91_pm_sfrbu_regs sfrbu_regs; 79 66 void *memcs; 80 67 }; 81 68 ··· 371 356 return 0; 372 357 } 373 358 359 + static void at91_pm_switch_ba_to_vbat(void) 360 + { 361 + unsigned int offset = offsetof(struct at91_pm_sfrbu_regs, pswbu); 362 + unsigned int val; 363 + 364 + /* Just for safety. */ 365 + if (!soc_pm.data.sfrbu) 366 + return; 367 + 368 + val = readl(soc_pm.data.sfrbu + offset); 369 + 370 + /* Already on VBAT. */ 371 + if (!(val & soc_pm.sfrbu_regs.pswbu.state)) 372 + return; 373 + 374 + val &= ~soc_pm.sfrbu_regs.pswbu.softsw; 375 + val |= soc_pm.sfrbu_regs.pswbu.key | soc_pm.sfrbu_regs.pswbu.ctrl; 376 + writel(val, soc_pm.data.sfrbu + offset); 377 + 378 + /* Wait for update. */ 379 + val = readl(soc_pm.data.sfrbu + offset); 380 + while (val & soc_pm.sfrbu_regs.pswbu.state) 381 + val = readl(soc_pm.data.sfrbu + offset); 382 + } 383 + 374 384 static void at91_pm_suspend(suspend_state_t state) 375 385 { 376 386 if (soc_pm.data.mode == AT91_PM_BACKUP) { 387 + at91_pm_switch_ba_to_vbat(); 388 + 377 389 cpu_suspend(0, at91_suspend_finish); 378 390 379 391 /* The SRAM is lost between suspend cycles */ ··· 631 589 { /* Sentinel. */ }, 632 590 }; 633 591 634 - static __init void at91_dt_ramc(bool phy_mandatory) 592 + static __init int at91_dt_ramc(bool phy_mandatory) 635 593 { 636 594 struct device_node *np; 637 595 const struct of_device_id *of_id; 638 596 int idx = 0; 639 597 void *standby = NULL; 640 598 const struct ramc_info *ramc; 599 + int ret; 641 600 642 601 for_each_matching_node_and_match(np, ramc_ids, &of_id) { 643 602 soc_pm.data.ramc[idx] = of_iomap(np, 0); 644 - if (!soc_pm.data.ramc[idx]) 645 - panic(pr_fmt("unable to map ramc[%d] cpu registers\n"), idx); 603 + if (!soc_pm.data.ramc[idx]) { 604 + pr_err("unable to map ramc[%d] cpu registers\n", idx); 605 + ret = -ENOMEM; 606 + goto unmap_ramc; 607 + } 646 608 647 609 ramc = of_id->data; 648 610 if (ramc) { ··· 658 612 idx++; 659 613 } 660 614 661 - if (!idx) 662 - panic(pr_fmt("unable to find compatible ram controller node in dtb\n")); 615 + if (!idx) { 616 + pr_err("unable to find compatible ram controller node in dtb\n"); 617 + ret = -ENODEV; 618 + goto unmap_ramc; 619 + } 663 620 664 621 /* Lookup for DDR PHY node, if any. */ 665 622 for_each_matching_node_and_match(np, ramc_phy_ids, &of_id) { 666 623 soc_pm.data.ramc_phy = of_iomap(np, 0); 667 - if (!soc_pm.data.ramc_phy) 668 - panic(pr_fmt("unable to map ramc phy cpu registers\n")); 624 + if (!soc_pm.data.ramc_phy) { 625 + pr_err("unable to map ramc phy cpu registers\n"); 626 + ret = -ENOMEM; 627 + goto unmap_ramc; 628 + } 669 629 } 670 630 671 - if (phy_mandatory && !soc_pm.data.ramc_phy) 672 - panic(pr_fmt("DDR PHY is mandatory!\n")); 631 + if (phy_mandatory && !soc_pm.data.ramc_phy) { 632 + pr_err("DDR PHY is mandatory!\n"); 633 + ret = -ENODEV; 634 + goto unmap_ramc; 635 + } 673 636 674 637 if (!standby) { 675 638 pr_warn("ramc no standby function available\n"); 676 - return; 639 + return 0; 677 640 } 678 641 679 642 at91_cpuidle_device.dev.platform_data = standby; 643 + 644 + return 0; 645 + 646 + unmap_ramc: 647 + while (idx) 648 + iounmap(soc_pm.data.ramc[--idx]); 649 + 650 + return ret; 680 651 } 681 652 682 653 static void at91rm9200_idle(void) ··· 1080 1017 1081 1018 void __init at91rm9200_pm_init(void) 1082 1019 { 1020 + int ret; 1021 + 1083 1022 if (!IS_ENABLED(CONFIG_SOC_AT91RM9200)) 1084 1023 return; 1085 1024 ··· 1093 1028 soc_pm.data.standby_mode = AT91_PM_STANDBY; 1094 1029 soc_pm.data.suspend_mode = AT91_PM_ULP0; 1095 1030 1096 - at91_dt_ramc(false); 1031 + ret = at91_dt_ramc(false); 1032 + if (ret) 1033 + return; 1097 1034 1098 1035 /* 1099 1036 * AT91RM9200 SDRAM low-power mode cannot be used with self-refresh. ··· 1113 1046 static const int iomaps[] __initconst = { 1114 1047 [AT91_PM_ULP1] = AT91_PM_IOMAP(SHDWC), 1115 1048 }; 1049 + int ret; 1116 1050 1117 1051 if (!IS_ENABLED(CONFIG_SOC_SAM9X60)) 1118 1052 return; 1119 1053 1120 1054 at91_pm_modes_validate(modes, ARRAY_SIZE(modes)); 1121 1055 at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps)); 1122 - at91_dt_ramc(false); 1056 + ret = at91_dt_ramc(false); 1057 + if (ret) 1058 + return; 1059 + 1123 1060 at91_pm_init(NULL); 1124 1061 1125 1062 soc_pm.ws_ids = sam9x60_ws_ids; ··· 1132 1061 1133 1062 void __init at91sam9_pm_init(void) 1134 1063 { 1064 + int ret; 1065 + 1135 1066 if (!IS_ENABLED(CONFIG_SOC_AT91SAM9)) 1136 1067 return; 1137 1068 ··· 1145 1072 soc_pm.data.standby_mode = AT91_PM_STANDBY; 1146 1073 soc_pm.data.suspend_mode = AT91_PM_ULP0; 1147 1074 1148 - at91_dt_ramc(false); 1075 + ret = at91_dt_ramc(false); 1076 + if (ret) 1077 + return; 1078 + 1149 1079 at91_pm_init(at91sam9_idle); 1150 1080 } 1151 1081 ··· 1157 1081 static const int modes[] __initconst = { 1158 1082 AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST, 1159 1083 }; 1084 + int ret; 1160 1085 1161 1086 if (!IS_ENABLED(CONFIG_SOC_SAMA5)) 1162 1087 return; 1163 1088 1164 1089 at91_pm_modes_validate(modes, ARRAY_SIZE(modes)); 1165 - at91_dt_ramc(false); 1090 + ret = at91_dt_ramc(false); 1091 + if (ret) 1092 + return; 1093 + 1166 1094 at91_pm_init(NULL); 1167 1095 } 1168 1096 ··· 1181 1101 [AT91_PM_BACKUP] = AT91_PM_IOMAP(SHDWC) | 1182 1102 AT91_PM_IOMAP(SFRBU), 1183 1103 }; 1104 + int ret; 1184 1105 1185 1106 if (!IS_ENABLED(CONFIG_SOC_SAMA5D2)) 1186 1107 return; 1187 1108 1188 1109 at91_pm_modes_validate(modes, ARRAY_SIZE(modes)); 1189 1110 at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps)); 1190 - at91_dt_ramc(false); 1111 + ret = at91_dt_ramc(false); 1112 + if (ret) 1113 + return; 1114 + 1191 1115 at91_pm_init(NULL); 1192 1116 1193 1117 soc_pm.ws_ids = sama5d2_ws_ids; 1194 1118 soc_pm.config_shdwc_ws = at91_sama5d2_config_shdwc_ws; 1195 1119 soc_pm.config_pmc_ws = at91_sama5d2_config_pmc_ws; 1120 + 1121 + soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8); 1122 + soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0); 1123 + soc_pm.sfrbu_regs.pswbu.softsw = BIT(1); 1124 + soc_pm.sfrbu_regs.pswbu.state = BIT(3); 1196 1125 } 1197 1126 1198 1127 void __init sama7_pm_init(void) ··· 1216 1127 [AT91_PM_BACKUP] = AT91_PM_IOMAP(SFRBU) | 1217 1128 AT91_PM_IOMAP(SHDWC), 1218 1129 }; 1130 + int ret; 1219 1131 1220 1132 if (!IS_ENABLED(CONFIG_SOC_SAMA7)) 1221 1133 return; 1222 1134 1223 1135 at91_pm_modes_validate(modes, ARRAY_SIZE(modes)); 1224 1136 1225 - at91_dt_ramc(true); 1137 + ret = at91_dt_ramc(true); 1138 + if (ret) 1139 + return; 1140 + 1226 1141 at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps)); 1227 1142 at91_pm_init(NULL); 1228 1143 1229 1144 soc_pm.ws_ids = sama7g5_ws_ids; 1230 1145 soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws; 1146 + 1147 + soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8); 1148 + soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0); 1149 + soc_pm.sfrbu_regs.pswbu.softsw = BIT(1); 1150 + soc_pm.sfrbu_regs.pswbu.state = BIT(2); 1231 1151 } 1232 1152 1233 1153 static int __init at91_pm_modes_select(char *str)
+37 -13
arch/arm/mach-at91/pm_suspend.S
··· 1014 1014 mov tmp1, #0 1015 1015 mcr p15, 0, tmp1, c7, c10, 4 1016 1016 1017 - ldr tmp1, [r0, #PM_DATA_PMC] 1018 - str tmp1, .pmc_base 1019 - ldr tmp1, [r0, #PM_DATA_RAMC0] 1020 - str tmp1, .sramc_base 1021 - ldr tmp1, [r0, #PM_DATA_RAMC1] 1022 - str tmp1, .sramc1_base 1023 - ldr tmp1, [r0, #PM_DATA_RAMC_PHY] 1024 - str tmp1, .sramc_phy_base 1025 - ldr tmp1, [r0, #PM_DATA_MEMCTRL] 1026 - str tmp1, .memtype 1027 - ldr tmp1, [r0, #PM_DATA_MODE] 1028 - str tmp1, .pm_mode 1017 + /* Flush tlb. */ 1018 + mov r4, #0 1019 + mcr p15, 0, r4, c8, c7, 0 1020 + 1029 1021 ldr tmp1, [r0, #PM_DATA_PMC_MCKR_OFFSET] 1030 1022 str tmp1, .mckr_offset 1031 1023 ldr tmp1, [r0, #PM_DATA_PMC_VERSION] 1032 1024 str tmp1, .pmc_version 1033 - /* Both ldrne below are here to preload their address in the TLB */ 1025 + ldr tmp1, [r0, #PM_DATA_MEMCTRL] 1026 + str tmp1, .memtype 1027 + ldr tmp1, [r0, #PM_DATA_MODE] 1028 + str tmp1, .pm_mode 1029 + 1030 + /* 1031 + * ldrne below are here to preload their address in the TLB as access 1032 + * to RAM may be limited while in self-refresh. 1033 + */ 1034 + ldr tmp1, [r0, #PM_DATA_PMC] 1035 + str tmp1, .pmc_base 1036 + cmp tmp1, #0 1037 + ldrne tmp2, [tmp1, #0] 1038 + 1039 + ldr tmp1, [r0, #PM_DATA_RAMC0] 1040 + str tmp1, .sramc_base 1041 + cmp tmp1, #0 1042 + ldrne tmp2, [tmp1, #0] 1043 + 1044 + ldr tmp1, [r0, #PM_DATA_RAMC1] 1045 + str tmp1, .sramc1_base 1046 + cmp tmp1, #0 1047 + ldrne tmp2, [tmp1, #0] 1048 + 1049 + #ifndef CONFIG_SOC_SAM_V4_V5 1050 + /* ldrne below are here to preload their address in the TLB */ 1051 + ldr tmp1, [r0, #PM_DATA_RAMC_PHY] 1052 + str tmp1, .sramc_phy_base 1053 + cmp tmp1, #0 1054 + ldrne tmp2, [tmp1, #0] 1055 + 1034 1056 ldr tmp1, [r0, #PM_DATA_SHDWC] 1035 1057 str tmp1, .shdwc 1036 1058 cmp tmp1, #0 1037 1059 ldrne tmp2, [tmp1, #0] 1060 + 1038 1061 ldr tmp1, [r0, #PM_DATA_SFRBU] 1039 1062 str tmp1, .sfrbu 1040 1063 cmp tmp1, #0 1041 1064 ldrne tmp2, [tmp1, #0x10] 1065 + #endif 1042 1066 1043 1067 /* Active the self-refresh mode */ 1044 1068 at91_sramc_self_refresh_ena
+2 -2
arch/arm/mach-dove/include/mach/uncompress.h
··· 11 11 12 12 #define LSR_THRE 0x20 13 13 14 - static void putc(const char c) 14 + static inline void putc(const char c) 15 15 { 16 16 int i; 17 17 ··· 24 24 *UART_THR = c; 25 25 } 26 26 27 - static void flush(void) 27 + static inline void flush(void) 28 28 { 29 29 } 30 30
+3
arch/arm/mach-imx/mach-imx6q.c
··· 172 172 imx_get_soc_revision()); 173 173 174 174 imx6q_enet_phy_init(); 175 + 176 + of_platform_default_populate(NULL, NULL, NULL); 177 + 175 178 imx_anatop_init(); 176 179 cpu_is_imx6q() ? imx6q_pm_init() : imx6dl_pm_init(); 177 180 imx6q_1588_init();
+2
arch/arm/mach-imx/pm-imx6.c
··· 10 10 #include <linux/io.h> 11 11 #include <linux/irq.h> 12 12 #include <linux/genalloc.h> 13 + #include <linux/irqchip/arm-gic.h> 13 14 #include <linux/mfd/syscon.h> 14 15 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> 15 16 #include <linux/of.h> ··· 620 619 621 620 static void imx6_pm_stby_poweroff(void) 622 621 { 622 + gic_cpu_if_down(0); 623 623 imx6_set_lpm(STOP_POWER_OFF); 624 624 imx6q_suspend_finish(0); 625 625
-12
arch/arm/mach-omap1/include/mach/memory.h
··· 9 9 /* REVISIT: omap1 legacy drivers still rely on this */ 10 10 #include <mach/soc.h> 11 11 12 - /* 13 - * Bus address is physical address, except for OMAP-1510 Local Bus. 14 - * OMAP-1510 bus address is translated into a Local Bus address if the 15 - * OMAP bus type is lbus. We do the address translation based on the 16 - * device overriding the defaults used in the dma-mapping API. 17 - */ 18 - 19 - /* 20 - * OMAP-1510 Local Bus address offset 21 - */ 22 - #define OMAP1510_LB_OFFSET UL(0x30000000) 23 - 24 12 #endif
+82 -34
arch/arm/mach-omap1/usb.c
··· 11 11 #include <linux/platform_device.h> 12 12 #include <linux/dma-map-ops.h> 13 13 #include <linux/io.h> 14 + #include <linux/delay.h> 14 15 15 16 #include <asm/irq.h> 16 17 ··· 207 206 208 207 #endif 209 208 210 - #if IS_ENABLED(CONFIG_USB_OHCI_HCD) 211 - 212 209 /* The dmamask must be set for OHCI to work */ 213 210 static u64 ohci_dmamask = ~(u32)0; 214 211 ··· 235 236 236 237 static inline void ohci_device_init(struct omap_usb_config *pdata) 237 238 { 239 + if (!IS_ENABLED(CONFIG_USB_OHCI_HCD)) 240 + return; 241 + 238 242 if (cpu_is_omap7xx()) 239 243 ohci_resources[1].start = INT_7XX_USB_HHC_1; 240 244 pdata->ohci_device = &ohci_device; 241 245 pdata->ocpi_enable = &ocpi_enable; 242 246 } 243 - 244 - #else 245 - 246 - static inline void ohci_device_init(struct omap_usb_config *pdata) 247 - { 248 - } 249 - 250 - #endif 251 247 252 248 #if defined(CONFIG_USB_OTG) && defined(CONFIG_ARCH_OMAP_OTG) 253 249 ··· 528 534 } 529 535 530 536 #ifdef CONFIG_ARCH_OMAP15XX 537 + /* OMAP-1510 OHCI has its own MMU for DMA */ 538 + #define OMAP1510_LB_MEMSIZE 32 /* Should be same as SDRAM size */ 539 + #define OMAP1510_LB_CLOCK_DIV 0xfffec10c 540 + #define OMAP1510_LB_MMU_CTL 0xfffec208 541 + #define OMAP1510_LB_MMU_LCK 0xfffec224 542 + #define OMAP1510_LB_MMU_LD_TLB 0xfffec228 543 + #define OMAP1510_LB_MMU_CAM_H 0xfffec22c 544 + #define OMAP1510_LB_MMU_CAM_L 0xfffec230 545 + #define OMAP1510_LB_MMU_RAM_H 0xfffec234 546 + #define OMAP1510_LB_MMU_RAM_L 0xfffec238 547 + 548 + /* 549 + * Bus address is physical address, except for OMAP-1510 Local Bus. 550 + * OMAP-1510 bus address is translated into a Local Bus address if the 551 + * OMAP bus type is lbus. 552 + */ 553 + #define OMAP1510_LB_OFFSET UL(0x30000000) 554 + 555 + /* 556 + * OMAP-1510 specific Local Bus clock on/off 557 + */ 558 + static int omap_1510_local_bus_power(int on) 559 + { 560 + if (on) { 561 + omap_writel((1 << 1) | (1 << 0), OMAP1510_LB_MMU_CTL); 562 + udelay(200); 563 + } else { 564 + omap_writel(0, OMAP1510_LB_MMU_CTL); 565 + } 566 + 567 + return 0; 568 + } 569 + 570 + /* 571 + * OMAP-1510 specific Local Bus initialization 572 + * NOTE: This assumes 32MB memory size in OMAP1510LB_MEMSIZE. 573 + * See also arch/mach-omap/memory.h for __virt_to_dma() and 574 + * __dma_to_virt() which need to match with the physical 575 + * Local Bus address below. 576 + */ 577 + static int omap_1510_local_bus_init(void) 578 + { 579 + unsigned int tlb; 580 + unsigned long lbaddr, physaddr; 581 + 582 + omap_writel((omap_readl(OMAP1510_LB_CLOCK_DIV) & 0xfffffff8) | 0x4, 583 + OMAP1510_LB_CLOCK_DIV); 584 + 585 + /* Configure the Local Bus MMU table */ 586 + for (tlb = 0; tlb < OMAP1510_LB_MEMSIZE; tlb++) { 587 + lbaddr = tlb * 0x00100000 + OMAP1510_LB_OFFSET; 588 + physaddr = tlb * 0x00100000 + PHYS_OFFSET; 589 + omap_writel((lbaddr & 0x0fffffff) >> 22, OMAP1510_LB_MMU_CAM_H); 590 + omap_writel(((lbaddr & 0x003ffc00) >> 6) | 0xc, 591 + OMAP1510_LB_MMU_CAM_L); 592 + omap_writel(physaddr >> 16, OMAP1510_LB_MMU_RAM_H); 593 + omap_writel((physaddr & 0x0000fc00) | 0x300, OMAP1510_LB_MMU_RAM_L); 594 + omap_writel(tlb << 4, OMAP1510_LB_MMU_LCK); 595 + omap_writel(0x1, OMAP1510_LB_MMU_LD_TLB); 596 + } 597 + 598 + /* Enable the walking table */ 599 + omap_writel(omap_readl(OMAP1510_LB_MMU_CTL) | (1 << 3), OMAP1510_LB_MMU_CTL); 600 + udelay(200); 601 + 602 + return 0; 603 + } 604 + 605 + static void omap_1510_local_bus_reset(void) 606 + { 607 + omap_1510_local_bus_power(1); 608 + omap_1510_local_bus_init(); 609 + } 531 610 532 611 /* ULPD_DPLL_CTRL */ 533 612 #define DPLL_IOB (1 << 13) ··· 609 542 610 543 /* ULPD_APLL_CTRL */ 611 544 #define APLL_NDPLL_SWITCH (1 << 0) 612 - 613 - static int omap_1510_usb_ohci_notifier(struct notifier_block *nb, 614 - unsigned long event, void *data) 615 - { 616 - struct device *dev = data; 617 - 618 - if (event != BUS_NOTIFY_ADD_DEVICE) 619 - return NOTIFY_DONE; 620 - 621 - if (strncmp(dev_name(dev), "ohci", 4) == 0 && 622 - dma_direct_set_offset(dev, PHYS_OFFSET, OMAP1510_LB_OFFSET, 623 - (u64)-1)) 624 - WARN_ONCE(1, "failed to set DMA offset\n"); 625 - return NOTIFY_OK; 626 - } 627 - 628 - static struct notifier_block omap_1510_usb_ohci_nb = { 629 - .notifier_call = omap_1510_usb_ohci_notifier, 630 - }; 631 545 632 546 static void __init omap_1510_usb_init(struct omap_usb_config *config) 633 547 { ··· 664 616 } 665 617 #endif 666 618 667 - #if IS_ENABLED(CONFIG_USB_OHCI_HCD) 668 - if (config->register_host) { 619 + if (IS_ENABLED(CONFIG_USB_OHCI_HCD) && config->register_host) { 669 620 int status; 670 621 671 - bus_register_notifier(&platform_bus_type, 672 - &omap_1510_usb_ohci_nb); 673 622 ohci_device.dev.platform_data = config; 623 + dma_direct_set_offset(&ohci_device.dev, PHYS_OFFSET, 624 + OMAP1510_LB_OFFSET, (u64)-1); 674 625 status = platform_device_register(&ohci_device); 675 626 if (status) 676 627 pr_debug("can't register OHCI device, %d\n", status); 677 628 /* hcd explicitly gates 48MHz */ 629 + 630 + config->lb_reset = omap_1510_local_bus_reset; 678 631 } 679 - #endif 680 632 } 681 633 682 634 #else
+2
arch/arm/mach-omap2/omap_hwmod.c
··· 3614 3614 oh->flags |= HWMOD_SWSUP_SIDLE_ACT; 3615 3615 if (data->cfg->quirks & SYSC_QUIRK_SWSUP_MSTANDBY) 3616 3616 oh->flags |= HWMOD_SWSUP_MSTANDBY; 3617 + if (data->cfg->quirks & SYSC_QUIRK_CLKDM_NOAUTO) 3618 + oh->flags |= HWMOD_CLKDM_NOAUTO; 3617 3619 3618 3620 error = omap_hwmod_check_module(dev, oh, data, sysc_fields, 3619 3621 rev_offs, sysc_offs, syss_offs,
+19
arch/arm/net/bpf_jit_32.c
··· 36 36 * +-----+ 37 37 * |RSVD | JIT scratchpad 38 38 * current ARM_SP => +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE) 39 + * | ... | caller-saved registers 40 + * +-----+ 41 + * | ... | arguments passed on stack 42 + * ARM_SP during call => +-----| 39 43 * | | 40 44 * | ... | Function call stack 41 45 * | | ··· 67 63 * 68 64 * When popping registers off the stack at the end of a BPF function, we 69 65 * reference them via the current ARM_FP register. 66 + * 67 + * Some eBPF operations are implemented via a call to a helper function. 68 + * Such calls are "invisible" in the eBPF code, so it is up to the calling 69 + * program to preserve any caller-saved ARM registers during the call. The 70 + * JIT emits code to push and pop those registers onto the stack, immediately 71 + * above the callee stack frame. 70 72 */ 71 73 #define CALLEE_MASK (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \ 72 74 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R9 | \ 73 75 1 << ARM_FP) 74 76 #define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR) 75 77 #define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC) 78 + 79 + #define CALLER_MASK (1 << ARM_R0 | 1 << ARM_R1 | 1 << ARM_R2 | 1 << ARM_R3) 76 80 77 81 enum { 78 82 /* Stack layout - these are offsets from (top of stack - 4) */ ··· 476 464 477 465 static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op) 478 466 { 467 + const int exclude_mask = BIT(ARM_R0) | BIT(ARM_R1); 479 468 const s8 *tmp = bpf2a32[TMP_REG_1]; 480 469 481 470 #if __LINUX_ARM_ARCH__ == 7 ··· 508 495 emit(ARM_MOV_R(ARM_R0, rm), ctx); 509 496 } 510 497 498 + /* Push caller-saved registers on stack */ 499 + emit(ARM_PUSH(CALLER_MASK & ~exclude_mask), ctx); 500 + 511 501 /* Call appropriate function */ 512 502 emit_mov_i(ARM_IP, op == BPF_DIV ? 513 503 (u32)jit_udiv32 : (u32)jit_mod32, ctx); 514 504 emit_blx_r(ARM_IP, ctx); 505 + 506 + /* Restore caller-saved registers from stack */ 507 + emit(ARM_POP(CALLER_MASK & ~exclude_mask), ctx); 515 508 516 509 /* Save return value */ 517 510 if (rd != ARM_R0)
-1
arch/arm64/boot/dts/arm/foundation-v8.dtsi
··· 115 115 116 116 bus@8000000 { 117 117 compatible = "arm,vexpress,v2m-p1", "simple-bus"; 118 - arm,v2m-memory-map = "rs1"; 119 118 #address-cells = <2>; /* SMB chipselect number and offset */ 120 119 #size-cells = <1>; 121 120
-23
arch/arm64/boot/dts/arm/fvp-base-revc.dts
··· 192 192 remote-endpoint = <&clcd_pads>; 193 193 }; 194 194 }; 195 - 196 - panel-timing { 197 - clock-frequency = <63500127>; 198 - hactive = <1024>; 199 - hback-porch = <152>; 200 - hfront-porch = <48>; 201 - hsync-len = <104>; 202 - vactive = <768>; 203 - vback-porch = <23>; 204 - vfront-porch = <3>; 205 - vsync-len = <4>; 206 - }; 207 195 }; 208 196 209 197 bus@8000000 { 210 - compatible = "simple-bus"; 211 - 212 - #address-cells = <2>; 213 - #size-cells = <1>; 214 - ranges = <0 0 0 0x08000000 0x04000000>, 215 - <1 0 0 0x14000000 0x04000000>, 216 - <2 0 0 0x18000000 0x04000000>, 217 - <3 0 0 0x1c000000 0x04000000>, 218 - <4 0 0 0x0c000000 0x04000000>, 219 - <5 0 0 0x10000000 0x04000000>; 220 - 221 198 #interrupt-cells = <1>; 222 199 interrupt-map-mask = <0 0 63>; 223 200 interrupt-map = <0 0 0 &gic 0 0 GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
-12
arch/arm64/boot/dts/arm/juno-base.dtsi
··· 27 27 reg = <0x0 0x2b1f0000 0x0 0x1000>; 28 28 interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>, 29 29 <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>; 30 - interrupt-names = "mhu_lpri_rx", 31 - "mhu_hpri_rx"; 32 30 #mbox-cells = <1>; 33 31 clocks = <&soc_refclk100mhz>; 34 32 clock-names = "apb_pclk"; ··· 802 804 }; 803 805 804 806 bus@8000000 { 805 - compatible = "simple-bus"; 806 - #address-cells = <2>; 807 - #size-cells = <1>; 808 - ranges = <0 0 0 0x08000000 0x04000000>, 809 - <1 0 0 0x14000000 0x04000000>, 810 - <2 0 0 0x18000000 0x04000000>, 811 - <3 0 0 0x1c000000 0x04000000>, 812 - <4 0 0 0x0c000000 0x04000000>, 813 - <5 0 0 0x10000000 0x04000000>; 814 - 815 807 #interrupt-cells = <1>; 816 808 interrupt-map-mask = <0 0 15>; 817 809 interrupt-map = <0 0 0 &gic 0 GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+14 -7
arch/arm64/boot/dts/arm/juno-motherboard.dtsi
··· 92 92 }; 93 93 94 94 bus@8000000 { 95 - motherboard-bus { 95 + compatible = "simple-bus"; 96 + #address-cells = <2>; 97 + #size-cells = <1>; 98 + ranges = <0 0x8000000 0 0x8000000 0x18000000>; 99 + 100 + motherboard-bus@8000000 { 96 101 compatible = "arm,vexpress,v2p-p1", "simple-bus"; 97 102 #address-cells = <2>; /* SMB chipselect number and offset */ 98 103 #size-cells = <1>; 99 - #interrupt-cells = <1>; 100 - ranges; 101 - model = "V2M-Juno"; 104 + ranges = <0 0 0 0x08000000 0x04000000>, 105 + <1 0 0 0x14000000 0x04000000>, 106 + <2 0 0 0x18000000 0x04000000>, 107 + <3 0 0 0x1c000000 0x04000000>, 108 + <4 0 0 0x0c000000 0x04000000>, 109 + <5 0 0 0x10000000 0x04000000>; 102 110 arm,hbi = <0x252>; 103 111 arm,vexpress,site = <0>; 104 - arm,v2m-memory-map = "rs1"; 105 112 106 113 flash@0 { 107 114 /* 2 * 32MiB NOR Flash memory mounted on CS0 */ ··· 225 218 }; 226 219 }; 227 220 228 - mmci@50000 { 221 + mmc@50000 { 229 222 compatible = "arm,pl180", "arm,primecell"; 230 223 reg = <0x050000 0x1000>; 231 224 interrupts = <5>; ··· 253 246 clock-names = "KMIREFCLK", "apb_pclk"; 254 247 }; 255 248 256 - wdt@f0000 { 249 + watchdog@f0000 { 257 250 compatible = "arm,sp805", "arm,primecell"; 258 251 reg = <0x0f0000 0x10000>; 259 252 interrupts = <7>;
-11
arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
··· 133 133 }; 134 134 135 135 bus@8000000 { 136 - compatible = "simple-bus"; 137 - 138 - #address-cells = <2>; 139 - #size-cells = <1>; 140 - ranges = <0 0 0 0x08000000 0x04000000>, 141 - <1 0 0 0x14000000 0x04000000>, 142 - <2 0 0 0x18000000 0x04000000>, 143 - <3 0 0 0x1c000000 0x04000000>, 144 - <4 0 0 0x0c000000 0x04000000>, 145 - <5 0 0 0x10000000 0x04000000>; 146 - 147 136 #interrupt-cells = <1>; 148 137 interrupt-map-mask = <0 0 63>; 149 138 interrupt-map = <0 0 0 &gic GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+1 -1
arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi
··· 6 6 */ 7 7 / { 8 8 bus@8000000 { 9 - motherboard-bus { 9 + motherboard-bus@8000000 { 10 10 arm,v2m-memory-map = "rs2"; 11 11 12 12 iofpga-bus@300000000 {
+14 -6
arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
··· 77 77 }; 78 78 79 79 bus@8000000 { 80 - motherboard-bus { 81 - arm,v2m-memory-map = "rs1"; 80 + compatible = "simple-bus"; 81 + #address-cells = <2>; 82 + #size-cells = <1>; 83 + ranges = <0 0x8000000 0 0x8000000 0x18000000>; 84 + 85 + motherboard-bus@8000000 { 82 86 compatible = "arm,vexpress,v2m-p1", "simple-bus"; 83 87 #address-cells = <2>; /* SMB chipselect number and offset */ 84 88 #size-cells = <1>; 85 - #interrupt-cells = <1>; 86 - ranges; 89 + ranges = <0 0 0 0x08000000 0x04000000>, 90 + <1 0 0 0x14000000 0x04000000>, 91 + <2 0 0 0x18000000 0x04000000>, 92 + <3 0 0 0x1c000000 0x04000000>, 93 + <4 0 0 0x0c000000 0x04000000>, 94 + <5 0 0 0x10000000 0x04000000>; 87 95 88 96 flash@0 { 89 97 compatible = "arm,vexpress-flash", "cfi-flash"; ··· 138 130 clock-names = "apb_pclk"; 139 131 }; 140 132 141 - mmci@50000 { 133 + mmc@50000 { 142 134 compatible = "arm,pl180", "arm,primecell"; 143 135 reg = <0x050000 0x1000>; 144 136 interrupts = <9>, <10>; ··· 198 190 clock-names = "uartclk", "apb_pclk"; 199 191 }; 200 192 201 - wdt@f0000 { 193 + watchdog@f0000 { 202 194 compatible = "arm,sp805", "arm,primecell"; 203 195 reg = <0x0f0000 0x1000>; 204 196 interrupts = <0>;
+1 -56
arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
··· 145 145 }; 146 146 147 147 smb: bus@8000000 { 148 - compatible = "simple-bus"; 149 - 150 - #address-cells = <2>; 151 - #size-cells = <1>; 152 - ranges = <0 0 0 0x08000000 0x04000000>, 153 - <1 0 0 0x14000000 0x04000000>, 154 - <2 0 0 0x18000000 0x04000000>, 155 - <3 0 0 0x1c000000 0x04000000>, 156 - <4 0 0 0x0c000000 0x04000000>, 157 - <5 0 0 0x10000000 0x04000000>; 158 - 159 - #interrupt-cells = <1>; 160 - interrupt-map-mask = <0 0 63>; 161 - interrupt-map = <0 0 0 &gic GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, 162 - <0 0 1 &gic GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>, 163 - <0 0 2 &gic GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>, 164 - <0 0 3 &gic GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>, 165 - <0 0 4 &gic GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, 166 - <0 0 5 &gic GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>, 167 - <0 0 6 &gic GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, 168 - <0 0 7 &gic GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>, 169 - <0 0 8 &gic GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>, 170 - <0 0 9 &gic GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, 171 - <0 0 10 &gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>, 172 - <0 0 11 &gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>, 173 - <0 0 12 &gic GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, 174 - <0 0 13 &gic GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, 175 - <0 0 14 &gic GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, 176 - <0 0 15 &gic GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>, 177 - <0 0 16 &gic GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>, 178 - <0 0 17 &gic GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>, 179 - <0 0 18 &gic GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>, 180 - <0 0 19 &gic GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>, 181 - <0 0 20 &gic GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>, 182 - <0 0 21 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>, 183 - <0 0 22 &gic GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>, 184 - <0 0 23 &gic GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>, 185 - <0 0 24 &gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>, 186 - <0 0 25 &gic GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>, 187 - <0 0 26 &gic GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>, 188 - <0 0 27 &gic GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>, 189 - <0 0 28 &gic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>, 190 - <0 0 29 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>, 191 - <0 0 30 &gic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>, 192 - <0 0 31 &gic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>, 193 - <0 0 32 &gic GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>, 194 - <0 0 33 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>, 195 - <0 0 34 &gic GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>, 196 - <0 0 35 &gic GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>, 197 - <0 0 36 &gic GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>, 198 - <0 0 37 &gic GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>, 199 - <0 0 38 &gic GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>, 200 - <0 0 39 &gic GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>, 201 - <0 0 40 &gic GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>, 202 - <0 0 41 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>, 203 - <0 0 42 &gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>; 148 + ranges = <0x8000000 0 0x8000000 0x18000000>; 204 149 }; 205 150 };
+2 -2
arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
··· 405 405 interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>; 406 406 clock-frequency = <0>; /* fixed up by bootloader */ 407 407 clocks = <&clockgen QORIQ_CLK_HWACCEL 1>; 408 - voltage-ranges = <1800 1800 3300 3300>; 408 + voltage-ranges = <1800 1800>; 409 409 sdhci,auto-cmd12; 410 - broken-cd; 410 + non-removable; 411 411 little-endian; 412 412 bus-width = <4>; 413 413 status = "disabled";
+1 -1
arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
··· 91 91 #size-cells = <1>; 92 92 compatible = "jedec,spi-nor"; 93 93 spi-max-frequency = <80000000>; 94 - spi-tx-bus-width = <4>; 94 + spi-tx-bus-width = <1>; 95 95 spi-rx-bus-width = <4>; 96 96 }; 97 97 };
+1 -1
arch/arm64/boot/dts/freescale/imx8mm-evk.dts
··· 48 48 #size-cells = <1>; 49 49 compatible = "jedec,spi-nor"; 50 50 spi-max-frequency = <80000000>; 51 - spi-tx-bus-width = <4>; 51 + spi-tx-bus-width = <1>; 52 52 spi-rx-bus-width = <4>; 53 53 }; 54 54 };
+1
arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-som.dtsi
··· 102 102 regulator-min-microvolt = <850000>; 103 103 regulator-max-microvolt = <950000>; 104 104 regulator-boot-on; 105 + regulator-always-on; 105 106 regulator-ramp-delay = <3125>; 106 107 nxp,dvs-run-voltage = <950000>; 107 108 nxp,dvs-standby-voltage = <850000>;
+1 -1
arch/arm64/boot/dts/freescale/imx8mm-venice-gw7902.dts
··· 647 647 pinctrl_hog: hoggrp { 648 648 fsl,pins = < 649 649 MX8MM_IOMUXC_NAND_CE0_B_GPIO3_IO1 0x40000159 /* M2_GDIS# */ 650 - MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x40000041 /* M2_RST# */ 650 + MX8MM_IOMUXC_GPIO1_IO13_GPIO1_IO13 0x40000041 /* M2_RST# */ 651 651 MX8MM_IOMUXC_NAND_DATA01_GPIO3_IO7 0x40000119 /* M2_OFF# */ 652 652 MX8MM_IOMUXC_GPIO1_IO15_GPIO1_IO15 0x40000159 /* M2_WDIS# */ 653 653 MX8MM_IOMUXC_SAI1_TXD2_GPIO4_IO14 0x40000041 /* AMP GPIO1 */
+1 -1
arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
··· 101 101 #size-cells = <1>; 102 102 compatible = "jedec,spi-nor"; 103 103 spi-max-frequency = <80000000>; 104 - spi-tx-bus-width = <4>; 104 + spi-tx-bus-width = <1>; 105 105 spi-rx-bus-width = <4>; 106 106 }; 107 107 };
+1 -1
arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts
··· 633 633 pinctrl_hog: hoggrp { 634 634 fsl,pins = < 635 635 MX8MN_IOMUXC_NAND_CE0_B_GPIO3_IO1 0x40000159 /* M2_GDIS# */ 636 - MX8MN_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x40000041 /* M2_RST# */ 636 + MX8MN_IOMUXC_GPIO1_IO13_GPIO1_IO13 0x40000041 /* M2_RST# */ 637 637 MX8MN_IOMUXC_NAND_DATA01_GPIO3_IO7 0x40000119 /* M2_OFF# */ 638 638 MX8MN_IOMUXC_GPIO1_IO15_GPIO1_IO15 0x40000159 /* M2_WDIS# */ 639 639 MX8MN_IOMUXC_SAI2_RXFS_GPIO4_IO21 0x40000041 /* APP GPIO1 */
+1 -1
arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
··· 74 74 compatible = "jedec,spi-nor"; 75 75 reg = <0>; 76 76 spi-max-frequency = <80000000>; 77 - spi-tx-bus-width = <4>; 77 + spi-tx-bus-width = <1>; 78 78 spi-rx-bus-width = <4>; 79 79 }; 80 80 };
+2
arch/arm64/boot/dts/freescale/imx8mq-evk.dts
··· 337 337 #size-cells = <1>; 338 338 compatible = "micron,n25q256a", "jedec,spi-nor"; 339 339 spi-max-frequency = <29000000>; 340 + spi-tx-bus-width = <1>; 341 + spi-rx-bus-width = <4>; 340 342 }; 341 343 }; 342 344
+1 -1
arch/arm64/boot/dts/freescale/imx8mq-kontron-pitx-imx8m.dts
··· 281 281 #address-cells = <1>; 282 282 #size-cells = <1>; 283 283 reg = <0>; 284 - spi-tx-bus-width = <4>; 284 + spi-tx-bus-width = <1>; 285 285 spi-rx-bus-width = <4>; 286 286 m25p,fast-read; 287 287 spi-max-frequency = <50000000>;
-2
arch/arm64/boot/dts/qcom/ipq8074.dtsi
··· 487 487 interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>; 488 488 phys = <&qusb_phy_0>, <&usb0_ssphy>; 489 489 phy-names = "usb2-phy", "usb3-phy"; 490 - tx-fifo-resize; 491 490 snps,is-utmi-l1-suspend; 492 491 snps,hird-threshold = /bits/ 8 <0x0>; 493 492 snps,dis_u2_susphy_quirk; ··· 527 528 interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>; 528 529 phys = <&qusb_phy_1>, <&usb1_ssphy>; 529 530 phy-names = "usb2-phy", "usb3-phy"; 530 - tx-fifo-resize; 531 531 snps,is-utmi-l1-suspend; 532 532 snps,hird-threshold = /bits/ 8 <0x0>; 533 533 snps,dis_u2_susphy_quirk;
+3 -1
arch/arm64/boot/dts/qcom/pm8150.dtsi
··· 48 48 #size-cells = <0>; 49 49 50 50 pon: power-on@800 { 51 - compatible = "qcom,pm8916-pon"; 51 + compatible = "qcom,pm8998-pon"; 52 52 reg = <0x0800>; 53 + mode-bootloader = <0x2>; 54 + mode-recovery = <0x1>; 53 55 54 56 pon_pwrkey: pwrkey { 55 57 compatible = "qcom,pm8941-pwrkey";
+10
arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
··· 804 804 }; 805 805 }; 806 806 807 + &pon_pwrkey { 808 + status = "okay"; 809 + }; 810 + 811 + &pon_resin { 812 + status = "okay"; 813 + 814 + linux,code = <KEY_VOLUMEDOWN>; 815 + }; 816 + 807 817 &qupv3_id_0 { 808 818 status = "okay"; 809 819 };
+4 -5
arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
··· 273 273 "Headphone Jack", "HPOL", 274 274 "Headphone Jack", "HPOR"; 275 275 276 - #sound-dai-cells = <0>; 277 276 #address-cells = <1>; 278 277 #size-cells = <0>; 279 278 ··· 300 301 }; 301 302 }; 302 303 303 - dai-link@2 { 304 + dai-link@5 { 304 305 link-name = "MultiMedia2"; 305 - reg = <2>; 306 + reg = <LPASS_DP_RX>; 306 307 cpu { 307 - sound-dai = <&lpass_cpu 2>; 308 + sound-dai = <&lpass_cpu LPASS_DP_RX>; 308 309 }; 309 310 310 311 codec { ··· 781 782 qcom,playback-sd-lines = <0>; 782 783 }; 783 784 784 - hdmi-primary@0 { 785 + hdmi@5 { 785 786 reg = <LPASS_DP_RX>; 786 787 }; 787 788 };
+3 -3
arch/arm64/boot/dts/qcom/sc7280.dtsi
··· 1850 1850 1851 1851 cpufreq_hw: cpufreq@18591000 { 1852 1852 compatible = "qcom,cpufreq-epss"; 1853 - reg = <0 0x18591100 0 0x900>, 1854 - <0 0x18592100 0 0x900>, 1855 - <0 0x18593100 0 0x900>; 1853 + reg = <0 0x18591000 0 0x1000>, 1854 + <0 0x18592000 0 0x1000>, 1855 + <0 0x18593000 0 0x1000>; 1856 1856 clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_GPLL0>; 1857 1857 clock-names = "xo", "alternate"; 1858 1858 #freq-domain-cells = <1>;
+13 -2
arch/arm64/boot/dts/qcom/sdm630.dtsi
··· 654 654 compatible = "qcom,sdm660-a2noc"; 655 655 reg = <0x01704000 0xc100>; 656 656 #interconnect-cells = <1>; 657 - clock-names = "bus", "bus_a"; 657 + clock-names = "bus", 658 + "bus_a", 659 + "ipa", 660 + "ufs_axi", 661 + "aggre2_ufs_axi", 662 + "aggre2_usb3_axi", 663 + "cfg_noc_usb2_axi"; 658 664 clocks = <&rpmcc RPM_SMD_AGGR2_NOC_CLK>, 659 - <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>; 665 + <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>, 666 + <&rpmcc RPM_SMD_IPA_CLK>, 667 + <&gcc GCC_UFS_AXI_CLK>, 668 + <&gcc GCC_AGGRE2_UFS_AXI_CLK>, 669 + <&gcc GCC_AGGRE2_USB3_AXI_CLK>, 670 + <&gcc GCC_CFG_NOC_USB2_AXI_CLK>; 660 671 }; 661 672 662 673 mnoc: interconnect@1745000 {
+13 -8
arch/arm64/boot/dts/qcom/sdm845.dtsi
··· 128 128 no-map; 129 129 }; 130 130 131 - wlan_msa_mem: memory@8c400000 { 132 - reg = <0 0x8c400000 0 0x100000>; 131 + ipa_fw_mem: memory@8c400000 { 132 + reg = <0 0x8c400000 0 0x10000>; 133 133 no-map; 134 134 }; 135 135 136 - gpu_mem: memory@8c515000 { 137 - reg = <0 0x8c515000 0 0x2000>; 136 + ipa_gsi_mem: memory@8c410000 { 137 + reg = <0 0x8c410000 0 0x5000>; 138 138 no-map; 139 139 }; 140 140 141 - ipa_fw_mem: memory@8c517000 { 142 - reg = <0 0x8c517000 0 0x5a000>; 141 + gpu_mem: memory@8c415000 { 142 + reg = <0 0x8c415000 0 0x2000>; 143 143 no-map; 144 144 }; 145 145 146 - adsp_mem: memory@8c600000 { 147 - reg = <0 0x8c600000 0 0x1a00000>; 146 + adsp_mem: memory@8c500000 { 147 + reg = <0 0x8c500000 0 0x1a00000>; 148 + no-map; 149 + }; 150 + 151 + wlan_msa_mem: memory@8df00000 { 152 + reg = <0 0x8df00000 0 0x100000>; 148 153 no-map; 149 154 }; 150 155
+34
arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
··· 16 16 #include "sdm850.dtsi" 17 17 #include "pm8998.dtsi" 18 18 19 + /* 20 + * Update following upstream (sdm845.dtsi) reserved 21 + * memory mappings for firmware loading to succeed 22 + * and enable the IPA device. 23 + */ 24 + /delete-node/ &ipa_fw_mem; 25 + /delete-node/ &ipa_gsi_mem; 26 + /delete-node/ &gpu_mem; 27 + /delete-node/ &adsp_mem; 28 + /delete-node/ &wlan_msa_mem; 29 + 19 30 / { 20 31 model = "Lenovo Yoga C630"; 21 32 compatible = "lenovo,yoga-c630", "qcom,sdm845"; ··· 66 55 remote-endpoint = <&sn65dsi86_out>; 67 56 }; 68 57 }; 58 + }; 59 + }; 60 + 61 + /* Reserved memory changes for IPA */ 62 + reserved-memory { 63 + wlan_msa_mem: memory@8c400000 { 64 + reg = <0 0x8c400000 0 0x100000>; 65 + no-map; 66 + }; 67 + 68 + gpu_mem: memory@8c515000 { 69 + reg = <0 0x8c515000 0 0x2000>; 70 + no-map; 71 + }; 72 + 73 + ipa_fw_mem: memory@8c517000 { 74 + reg = <0 0x8c517000 0 0x5a000>; 75 + no-map; 76 + }; 77 + 78 + adsp_mem: memory@8c600000 { 79 + reg = <0 0x8c600000 0 0x1a00000>; 80 + no-map; 69 81 }; 70 82 }; 71 83
-3
arch/arm64/include/asm/acpi.h
··· 50 50 void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size); 51 51 #define acpi_os_ioremap acpi_os_ioremap 52 52 53 - void __iomem *acpi_os_memmap(acpi_physical_address phys, acpi_size size); 54 - #define acpi_os_memmap acpi_os_memmap 55 - 56 53 typedef u64 phys_cpuid_t; 57 54 #define PHYS_CPUID_INVALID INVALID_HWID 58 55
+5
arch/arm64/include/asm/assembler.h
··· 525 525 #define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name) 526 526 #endif 527 527 528 + #ifdef CONFIG_KASAN_HW_TAGS 529 + #define EXPORT_SYMBOL_NOHWKASAN(name) 530 + #else 531 + #define EXPORT_SYMBOL_NOHWKASAN(name) EXPORT_SYMBOL_NOKASAN(name) 532 + #endif 528 533 /* 529 534 * Emit a 64-bit absolute little endian symbol reference in a way that 530 535 * ensures that it will be resolved at build time, even when building a
+6
arch/arm64/include/asm/mte.h
··· 99 99 100 100 static inline void mte_check_tfsr_entry(void) 101 101 { 102 + if (!system_supports_mte()) 103 + return; 104 + 102 105 mte_check_tfsr_el1(); 103 106 } 104 107 105 108 static inline void mte_check_tfsr_exit(void) 106 109 { 110 + if (!system_supports_mte()) 111 + return; 112 + 107 113 /* 108 114 * The asynchronous faults are sync'ed automatically with 109 115 * TFSR_EL1 on kernel entry but for exit an explicit dsb()
+2
arch/arm64/include/asm/string.h
··· 12 12 #define __HAVE_ARCH_STRCHR 13 13 extern char *strchr(const char *, int c); 14 14 15 + #ifndef CONFIG_KASAN_HW_TAGS 15 16 #define __HAVE_ARCH_STRCMP 16 17 extern int strcmp(const char *, const char *); 17 18 18 19 #define __HAVE_ARCH_STRNCMP 19 20 extern int strncmp(const char *, const char *, __kernel_size_t); 21 + #endif 20 22 21 23 #define __HAVE_ARCH_STRLEN 22 24 extern __kernel_size_t strlen(const char *);
+3 -16
arch/arm64/kernel/acpi.c
··· 273 273 return __pgprot(PROT_DEVICE_nGnRnE); 274 274 } 275 275 276 - static void __iomem *__acpi_os_ioremap(acpi_physical_address phys, 277 - acpi_size size, bool memory) 276 + void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) 278 277 { 279 278 efi_memory_desc_t *md, *region = NULL; 280 279 pgprot_t prot; ··· 299 300 * It is fine for AML to remap regions that are not represented in the 300 301 * EFI memory map at all, as it only describes normal memory, and MMIO 301 302 * regions that require a virtual mapping to make them accessible to 302 - * the EFI runtime services. Determine the region default 303 - * attributes by checking the requested memory semantics. 303 + * the EFI runtime services. 304 304 */ 305 - prot = memory ? __pgprot(PROT_NORMAL_NC) : 306 - __pgprot(PROT_DEVICE_nGnRnE); 305 + prot = __pgprot(PROT_DEVICE_nGnRnE); 307 306 if (region) { 308 307 switch (region->type) { 309 308 case EFI_LOADER_CODE: ··· 359 362 } 360 363 } 361 364 return __ioremap(phys, size, prot); 362 - } 363 - 364 - void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) 365 - { 366 - return __acpi_os_ioremap(phys, size, false); 367 - } 368 - 369 - void __iomem *acpi_os_memmap(acpi_physical_address phys, acpi_size size) 370 - { 371 - return __acpi_os_ioremap(phys, size, true); 372 365 } 373 366 374 367 /*
+6 -2
arch/arm64/kernel/cpufeature.c
··· 1526 1526 /* 1527 1527 * For reasons that aren't entirely clear, enabling KPTI on Cavium 1528 1528 * ThunderX leads to apparent I-cache corruption of kernel text, which 1529 - * ends as well as you might imagine. Don't even try. 1529 + * ends as well as you might imagine. Don't even try. We cannot rely 1530 + * on the cpus_have_*cap() helpers here to detect the CPU erratum 1531 + * because cpucap detection order may change. However, since we know 1532 + * affected CPUs are always in a homogeneous configuration, it is 1533 + * safe to rely on this_cpu_has_cap() here. 1530 1534 */ 1531 - if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) { 1535 + if (this_cpu_has_cap(ARM64_WORKAROUND_CAVIUM_27456)) { 1532 1536 str = "ARM64_WORKAROUND_CAVIUM_27456"; 1533 1537 __kpti_forced = -1; 1534 1538 }
+4 -6
arch/arm64/kernel/mte.c
··· 142 142 #ifdef CONFIG_KASAN_HW_TAGS 143 143 void mte_check_tfsr_el1(void) 144 144 { 145 - u64 tfsr_el1; 146 - 147 - if (!system_supports_mte()) 148 - return; 149 - 150 - tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1); 145 + u64 tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1); 151 146 152 147 if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) { 153 148 /* ··· 194 199 195 200 void mte_thread_switch(struct task_struct *next) 196 201 { 202 + if (!system_supports_mte()) 203 + return; 204 + 197 205 mte_update_sctlr_user(next); 198 206 199 207 /*
+1 -3
arch/arm64/kernel/signal.c
··· 940 940 if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) 941 941 do_signal(regs); 942 942 943 - if (thread_flags & _TIF_NOTIFY_RESUME) { 943 + if (thread_flags & _TIF_NOTIFY_RESUME) 944 944 tracehook_notify_resume(regs); 945 - rseq_handle_notify_resume(NULL, regs); 946 - } 947 945 948 946 if (thread_flags & _TIF_FOREIGN_FPSTATE) 949 947 fpsimd_restore_current_state();
+1 -1
arch/arm64/kvm/hyp/nvhe/Makefile
··· 54 54 # runtime. Because the hypervisor is part of the kernel binary, relocations 55 55 # produce a kernel VA. We enumerate relocations targeting hyp at build time 56 56 # and convert the kernel VAs at those positions to hyp VAs. 57 - $(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel 57 + $(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel FORCE 58 58 $(call if_changed,hyprel) 59 59 60 60 # 5) Compile hyp-reloc.S and link it into the existing partially linked object.
-3
arch/arm64/kvm/perf.c
··· 50 50 51 51 int kvm_perf_init(void) 52 52 { 53 - if (kvm_pmu_probe_pmuver() != ID_AA64DFR0_PMUVER_IMP_DEF && !is_protected_kvm_enabled()) 54 - static_branch_enable(&kvm_arm_pmu_available); 55 - 56 53 return perf_register_guest_info_callbacks(&kvm_guest_cbs); 57 54 } 58 55
+8 -1
arch/arm64/kvm/pmu-emul.c
··· 740 740 kvm_pmu_create_perf_event(vcpu, select_idx); 741 741 } 742 742 743 - int kvm_pmu_probe_pmuver(void) 743 + void kvm_host_pmu_init(struct arm_pmu *pmu) 744 + { 745 + if (pmu->pmuver != 0 && pmu->pmuver != ID_AA64DFR0_PMUVER_IMP_DEF && 746 + !kvm_arm_support_pmu_v3() && !is_protected_kvm_enabled()) 747 + static_branch_enable(&kvm_arm_pmu_available); 748 + } 749 + 750 + static int kvm_pmu_probe_pmuver(void) 744 751 { 745 752 struct perf_event_attr attr = { }; 746 753 struct perf_event *event;
+1 -1
arch/arm64/lib/strcmp.S
··· 173 173 ret 174 174 175 175 SYM_FUNC_END_PI(strcmp) 176 - EXPORT_SYMBOL_NOKASAN(strcmp) 176 + EXPORT_SYMBOL_NOHWKASAN(strcmp)
+1 -1
arch/arm64/lib/strncmp.S
··· 258 258 ret 259 259 260 260 SYM_FUNC_END_PI(strncmp) 261 - EXPORT_SYMBOL_NOKASAN(strncmp) 261 + EXPORT_SYMBOL_NOHWKASAN(strncmp)
+1 -3
arch/csky/kernel/signal.c
··· 260 260 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) 261 261 do_signal(regs); 262 262 263 - if (thread_info_flags & _TIF_NOTIFY_RESUME) { 263 + if (thread_info_flags & _TIF_NOTIFY_RESUME) 264 264 tracehook_notify_resume(regs); 265 - rseq_handle_notify_resume(NULL, regs); 266 - } 267 265 }
-4
arch/m68k/68000/entry.S
··· 15 15 #include <asm/unistd.h> 16 16 #include <asm/errno.h> 17 17 #include <asm/setup.h> 18 - #include <asm/segment.h> 19 18 #include <asm/traps.h> 20 19 #include <asm/asm-offsets.h> 21 20 #include <asm/entry.h> ··· 24 25 .globl system_call 25 26 .globl resume 26 27 .globl ret_from_exception 27 - .globl ret_from_signal 28 28 .globl sys_call_table 29 29 .globl bad_interrupt 30 30 .globl inthandler1 ··· 57 59 subql #4,%sp /* dummy return address */ 58 60 SAVE_SWITCH_STACK 59 61 jbsr syscall_trace_leave 60 - 61 - ret_from_signal: 62 62 RESTORE_SWITCH_STACK 63 63 addql #4,%sp 64 64 jra ret_from_exception
-1
arch/m68k/Kconfig
··· 29 29 select NO_DMA if !MMU && !COLDFIRE 30 30 select OLD_SIGACTION 31 31 select OLD_SIGSUSPEND3 32 - select SET_FS 33 32 select UACCESS_MEMCPY if !MMU 34 33 select VIRT_TO_BUS 35 34 select ZONE_DMA
-4
arch/m68k/coldfire/entry.S
··· 31 31 #include <asm/thread_info.h> 32 32 #include <asm/errno.h> 33 33 #include <asm/setup.h> 34 - #include <asm/segment.h> 35 34 #include <asm/asm-offsets.h> 36 35 #include <asm/entry.h> 37 36 ··· 50 51 .globl system_call 51 52 .globl resume 52 53 .globl ret_from_exception 53 - .globl ret_from_signal 54 54 .globl sys_call_table 55 55 .globl inthandler 56 56 ··· 96 98 subql #4,%sp /* dummy return address */ 97 99 SAVE_SWITCH_STACK 98 100 jbsr syscall_trace_leave 99 - 100 - ret_from_signal: 101 101 RESTORE_SWITCH_STACK 102 102 addql #4,%sp 103 103
+28 -3
arch/m68k/include/asm/processor.h
··· 9 9 #define __ASM_M68K_PROCESSOR_H 10 10 11 11 #include <linux/thread_info.h> 12 - #include <asm/segment.h> 13 12 #include <asm/fpu.h> 14 13 #include <asm/ptrace.h> 15 14 ··· 74 75 #define TASK_UNMAPPED_BASE 0 75 76 #endif 76 77 78 + /* Address spaces (or Function Codes in Motorola lingo) */ 79 + #define USER_DATA 1 80 + #define USER_PROGRAM 2 81 + #define SUPER_DATA 5 82 + #define SUPER_PROGRAM 6 83 + #define CPU_SPACE 7 84 + 85 + #ifdef CONFIG_CPU_HAS_ADDRESS_SPACES 86 + /* 87 + * Set the SFC/DFC registers for special MM operations. For most normal 88 + * operation these remain set to USER_DATA for the uaccess routines. 89 + */ 90 + static inline void set_fc(unsigned long val) 91 + { 92 + WARN_ON_ONCE(in_interrupt()); 93 + 94 + __asm__ __volatile__ ("movec %0,%/sfc\n\t" 95 + "movec %0,%/dfc\n\t" 96 + : /* no outputs */ : "r" (val) : "memory"); 97 + } 98 + #else 99 + static inline void set_fc(unsigned long val) 100 + { 101 + } 102 + #endif /* CONFIG_CPU_HAS_ADDRESS_SPACES */ 103 + 77 104 struct thread_struct { 78 105 unsigned long ksp; /* kernel stack pointer */ 79 106 unsigned long usp; /* user stack pointer */ 80 107 unsigned short sr; /* saved status register */ 81 - unsigned short fs; /* saved fs (sfc, dfc) */ 108 + unsigned short fc; /* saved fc (sfc, dfc) */ 82 109 unsigned long crp[2]; /* cpu root pointer */ 83 110 unsigned long esp0; /* points to SR of stack frame */ 84 111 unsigned long faddr; /* info about last fault */ ··· 117 92 #define INIT_THREAD { \ 118 93 .ksp = sizeof(init_stack) + (unsigned long) init_stack, \ 119 94 .sr = PS_S, \ 120 - .fs = __KERNEL_DS, \ 95 + .fc = USER_DATA, \ 121 96 } 122 97 123 98 /*
-59
arch/m68k/include/asm/segment.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef _M68K_SEGMENT_H 3 - #define _M68K_SEGMENT_H 4 - 5 - /* define constants */ 6 - /* Address spaces (FC0-FC2) */ 7 - #define USER_DATA (1) 8 - #ifndef __USER_DS 9 - #define __USER_DS (USER_DATA) 10 - #endif 11 - #define USER_PROGRAM (2) 12 - #define SUPER_DATA (5) 13 - #ifndef __KERNEL_DS 14 - #define __KERNEL_DS (SUPER_DATA) 15 - #endif 16 - #define SUPER_PROGRAM (6) 17 - #define CPU_SPACE (7) 18 - 19 - #ifndef __ASSEMBLY__ 20 - 21 - typedef struct { 22 - unsigned long seg; 23 - } mm_segment_t; 24 - 25 - #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 26 - 27 - #ifdef CONFIG_CPU_HAS_ADDRESS_SPACES 28 - /* 29 - * Get/set the SFC/DFC registers for MOVES instructions 30 - */ 31 - #define USER_DS MAKE_MM_SEG(__USER_DS) 32 - #define KERNEL_DS MAKE_MM_SEG(__KERNEL_DS) 33 - 34 - static inline mm_segment_t get_fs(void) 35 - { 36 - mm_segment_t _v; 37 - __asm__ ("movec %/dfc,%0":"=r" (_v.seg):); 38 - return _v; 39 - } 40 - 41 - static inline void set_fs(mm_segment_t val) 42 - { 43 - __asm__ __volatile__ ("movec %0,%/sfc\n\t" 44 - "movec %0,%/dfc\n\t" 45 - : /* no outputs */ : "r" (val.seg) : "memory"); 46 - } 47 - 48 - #else 49 - #define USER_DS MAKE_MM_SEG(TASK_SIZE) 50 - #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) 51 - #define get_fs() (current_thread_info()->addr_limit) 52 - #define set_fs(x) (current_thread_info()->addr_limit = (x)) 53 - #endif 54 - 55 - #define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) 56 - 57 - #endif /* __ASSEMBLY__ */ 58 - 59 - #endif /* _M68K_SEGMENT_H */
-3
arch/m68k/include/asm/thread_info.h
··· 4 4 5 5 #include <asm/types.h> 6 6 #include <asm/page.h> 7 - #include <asm/segment.h> 8 7 9 8 /* 10 9 * On machines with 4k pages we default to an 8k thread size, though we ··· 26 27 struct thread_info { 27 28 struct task_struct *task; /* main task structure */ 28 29 unsigned long flags; 29 - mm_segment_t addr_limit; /* thread address space */ 30 30 int preempt_count; /* 0 => preemptable, <0 => BUG */ 31 31 __u32 cpu; /* should always be 0 on m68k */ 32 32 unsigned long tp_value; /* thread pointer */ ··· 35 37 #define INIT_THREAD_INFO(tsk) \ 36 38 { \ 37 39 .task = &tsk, \ 38 - .addr_limit = KERNEL_DS, \ 39 40 .preempt_count = INIT_PREEMPT_COUNT, \ 40 41 } 41 42
+3 -8
arch/m68k/include/asm/tlbflush.h
··· 13 13 if (CPU_IS_COLDFIRE) { 14 14 mmu_write(MMUOR, MMUOR_CNL); 15 15 } else if (CPU_IS_040_OR_060) { 16 - mm_segment_t old_fs = get_fs(); 17 - set_fs(KERNEL_DS); 16 + set_fc(SUPER_DATA); 18 17 __asm__ __volatile__(".chip 68040\n\t" 19 18 "pflush (%0)\n\t" 20 19 ".chip 68k" 21 20 : : "a" (addr)); 22 - set_fs(old_fs); 21 + set_fc(USER_DATA); 23 22 } else if (CPU_IS_020_OR_030) 24 23 __asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr)); 25 24 } ··· 83 84 84 85 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) 85 86 { 86 - if (vma->vm_mm == current->active_mm) { 87 - mm_segment_t old_fs = force_uaccess_begin(); 88 - 87 + if (vma->vm_mm == current->active_mm) 89 88 __flush_tlb_one(addr); 90 - force_uaccess_end(old_fs); 91 - } 92 89 } 93 90 94 91 static inline void flush_tlb_range(struct vm_area_struct *vma,
+4
arch/m68k/include/asm/traps.h
··· 267 267 } un; 268 268 }; 269 269 270 + #ifdef CONFIG_M68040 271 + asmlinkage void berr_040cleanup(struct frame *fp); 272 + #endif 273 + 270 274 #endif /* __ASSEMBLY__ */ 271 275 272 276 #endif /* _M68K_TRAPS_H */
+141 -74
arch/m68k/include/asm/uaccess.h
··· 9 9 */ 10 10 #include <linux/compiler.h> 11 11 #include <linux/types.h> 12 - #include <asm/segment.h> 13 12 #include <asm/extable.h> 14 13 15 14 /* We let the MMU do all checking */ 16 15 static inline int access_ok(const void __user *addr, 17 16 unsigned long size) 18 17 { 18 + /* 19 + * XXX: for !CONFIG_CPU_HAS_ADDRESS_SPACES this really needs to check 20 + * for TASK_SIZE! 21 + */ 19 22 return 1; 20 23 } 21 24 ··· 38 35 #define MOVES "move" 39 36 #endif 40 37 41 - extern int __put_user_bad(void); 42 - extern int __get_user_bad(void); 43 - 44 - #define __put_user_asm(res, x, ptr, bwl, reg, err) \ 38 + #define __put_user_asm(inst, res, x, ptr, bwl, reg, err) \ 45 39 asm volatile ("\n" \ 46 - "1: "MOVES"."#bwl" %2,%1\n" \ 40 + "1: "inst"."#bwl" %2,%1\n" \ 47 41 "2:\n" \ 48 42 " .section .fixup,\"ax\"\n" \ 49 43 " .even\n" \ ··· 56 56 : "+d" (res), "=m" (*(ptr)) \ 57 57 : #reg (x), "i" (err)) 58 58 59 + #define __put_user_asm8(inst, res, x, ptr) \ 60 + do { \ 61 + const void *__pu_ptr = (const void __force *)(ptr); \ 62 + \ 63 + asm volatile ("\n" \ 64 + "1: "inst".l %2,(%1)+\n" \ 65 + "2: "inst".l %R2,(%1)\n" \ 66 + "3:\n" \ 67 + " .section .fixup,\"ax\"\n" \ 68 + " .even\n" \ 69 + "10: movel %3,%0\n" \ 70 + " jra 3b\n" \ 71 + " .previous\n" \ 72 + "\n" \ 73 + " .section __ex_table,\"a\"\n" \ 74 + " .align 4\n" \ 75 + " .long 1b,10b\n" \ 76 + " .long 2b,10b\n" \ 77 + " .long 3b,10b\n" \ 78 + " .previous" \ 79 + : "+d" (res), "+a" (__pu_ptr) \ 80 + : "r" (x), "i" (-EFAULT) \ 81 + : "memory"); \ 82 + } while (0) 83 + 59 84 /* 60 85 * These are the main single-value transfer routines. They automatically 61 86 * use the right size if we just have the right pointer type. ··· 93 68 __chk_user_ptr(ptr); \ 94 69 switch (sizeof (*(ptr))) { \ 95 70 case 1: \ 96 - __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \ 71 + __put_user_asm(MOVES, __pu_err, __pu_val, ptr, b, d, -EFAULT); \ 97 72 break; \ 98 73 case 2: \ 99 - __put_user_asm(__pu_err, __pu_val, ptr, w, r, -EFAULT); \ 74 + __put_user_asm(MOVES, __pu_err, __pu_val, ptr, w, r, -EFAULT); \ 100 75 break; \ 101 76 case 4: \ 102 - __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \ 77 + __put_user_asm(MOVES, __pu_err, __pu_val, ptr, l, r, -EFAULT); \ 103 78 break; \ 104 79 case 8: \ 105 - { \ 106 - const void __user *__pu_ptr = (ptr); \ 107 - asm volatile ("\n" \ 108 - "1: "MOVES".l %2,(%1)+\n" \ 109 - "2: "MOVES".l %R2,(%1)\n" \ 110 - "3:\n" \ 111 - " .section .fixup,\"ax\"\n" \ 112 - " .even\n" \ 113 - "10: movel %3,%0\n" \ 114 - " jra 3b\n" \ 115 - " .previous\n" \ 116 - "\n" \ 117 - " .section __ex_table,\"a\"\n" \ 118 - " .align 4\n" \ 119 - " .long 1b,10b\n" \ 120 - " .long 2b,10b\n" \ 121 - " .long 3b,10b\n" \ 122 - " .previous" \ 123 - : "+d" (__pu_err), "+a" (__pu_ptr) \ 124 - : "r" (__pu_val), "i" (-EFAULT) \ 125 - : "memory"); \ 80 + __put_user_asm8(MOVES, __pu_err, __pu_val, ptr); \ 126 81 break; \ 127 - } \ 128 82 default: \ 129 - __pu_err = __put_user_bad(); \ 130 - break; \ 83 + BUILD_BUG(); \ 131 84 } \ 132 85 __pu_err; \ 133 86 }) 134 87 #define put_user(x, ptr) __put_user(x, ptr) 135 88 136 89 137 - #define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ 90 + #define __get_user_asm(inst, res, x, ptr, type, bwl, reg, err) ({ \ 138 91 type __gu_val; \ 139 92 asm volatile ("\n" \ 140 - "1: "MOVES"."#bwl" %2,%1\n" \ 93 + "1: "inst"."#bwl" %2,%1\n" \ 141 94 "2:\n" \ 142 95 " .section .fixup,\"ax\"\n" \ 143 96 " .even\n" \ ··· 133 130 (x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val; \ 134 131 }) 135 132 133 + #define __get_user_asm8(inst, res, x, ptr) \ 134 + do { \ 135 + const void *__gu_ptr = (const void __force *)(ptr); \ 136 + union { \ 137 + u64 l; \ 138 + __typeof__(*(ptr)) t; \ 139 + } __gu_val; \ 140 + \ 141 + asm volatile ("\n" \ 142 + "1: "inst".l (%2)+,%1\n" \ 143 + "2: "inst".l (%2),%R1\n" \ 144 + "3:\n" \ 145 + " .section .fixup,\"ax\"\n" \ 146 + " .even\n" \ 147 + "10: move.l %3,%0\n" \ 148 + " sub.l %1,%1\n" \ 149 + " sub.l %R1,%R1\n" \ 150 + " jra 3b\n" \ 151 + " .previous\n" \ 152 + "\n" \ 153 + " .section __ex_table,\"a\"\n" \ 154 + " .align 4\n" \ 155 + " .long 1b,10b\n" \ 156 + " .long 2b,10b\n" \ 157 + " .previous" \ 158 + : "+d" (res), "=&r" (__gu_val.l), \ 159 + "+a" (__gu_ptr) \ 160 + : "i" (-EFAULT) \ 161 + : "memory"); \ 162 + (x) = __gu_val.t; \ 163 + } while (0) 164 + 136 165 #define __get_user(x, ptr) \ 137 166 ({ \ 138 167 int __gu_err = 0; \ 139 168 __chk_user_ptr(ptr); \ 140 169 switch (sizeof(*(ptr))) { \ 141 170 case 1: \ 142 - __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \ 171 + __get_user_asm(MOVES, __gu_err, x, ptr, u8, b, d, -EFAULT); \ 143 172 break; \ 144 173 case 2: \ 145 - __get_user_asm(__gu_err, x, ptr, u16, w, r, -EFAULT); \ 174 + __get_user_asm(MOVES, __gu_err, x, ptr, u16, w, r, -EFAULT); \ 146 175 break; \ 147 176 case 4: \ 148 - __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \ 177 + __get_user_asm(MOVES, __gu_err, x, ptr, u32, l, r, -EFAULT); \ 149 178 break; \ 150 - case 8: { \ 151 - const void __user *__gu_ptr = (ptr); \ 152 - union { \ 153 - u64 l; \ 154 - __typeof__(*(ptr)) t; \ 155 - } __gu_val; \ 156 - asm volatile ("\n" \ 157 - "1: "MOVES".l (%2)+,%1\n" \ 158 - "2: "MOVES".l (%2),%R1\n" \ 159 - "3:\n" \ 160 - " .section .fixup,\"ax\"\n" \ 161 - " .even\n" \ 162 - "10: move.l %3,%0\n" \ 163 - " sub.l %1,%1\n" \ 164 - " sub.l %R1,%R1\n" \ 165 - " jra 3b\n" \ 166 - " .previous\n" \ 167 - "\n" \ 168 - " .section __ex_table,\"a\"\n" \ 169 - " .align 4\n" \ 170 - " .long 1b,10b\n" \ 171 - " .long 2b,10b\n" \ 172 - " .previous" \ 173 - : "+d" (__gu_err), "=&r" (__gu_val.l), \ 174 - "+a" (__gu_ptr) \ 175 - : "i" (-EFAULT) \ 176 - : "memory"); \ 177 - (x) = __gu_val.t; \ 179 + case 8: \ 180 + __get_user_asm8(MOVES, __gu_err, x, ptr); \ 178 181 break; \ 179 - } \ 180 182 default: \ 181 - __gu_err = __get_user_bad(); \ 182 - break; \ 183 + BUILD_BUG(); \ 183 184 } \ 184 185 __gu_err; \ 185 186 }) ··· 329 322 330 323 switch (n) { 331 324 case 1: 332 - __put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1); 325 + __put_user_asm(MOVES, res, *(u8 *)from, (u8 __user *)to, 326 + b, d, 1); 333 327 break; 334 328 case 2: 335 - __put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, r, 2); 329 + __put_user_asm(MOVES, res, *(u16 *)from, (u16 __user *)to, 330 + w, r, 2); 336 331 break; 337 332 case 3: 338 333 __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,); 339 334 break; 340 335 case 4: 341 - __put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4); 336 + __put_user_asm(MOVES, res, *(u32 *)from, (u32 __user *)to, 337 + l, r, 4); 342 338 break; 343 339 case 5: 344 340 __constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,); ··· 390 380 #define INLINE_COPY_FROM_USER 391 381 #define INLINE_COPY_TO_USER 392 382 393 - #define user_addr_max() \ 394 - (uaccess_kernel() ? ~0UL : TASK_SIZE) 383 + #define HAVE_GET_KERNEL_NOFAULT 384 + 385 + #define __get_kernel_nofault(dst, src, type, err_label) \ 386 + do { \ 387 + type *__gk_dst = (type *)(dst); \ 388 + type *__gk_src = (type *)(src); \ 389 + int __gk_err = 0; \ 390 + \ 391 + switch (sizeof(type)) { \ 392 + case 1: \ 393 + __get_user_asm("move", __gk_err, *__gk_dst, __gk_src, \ 394 + u8, b, d, -EFAULT); \ 395 + break; \ 396 + case 2: \ 397 + __get_user_asm("move", __gk_err, *__gk_dst, __gk_src, \ 398 + u16, w, r, -EFAULT); \ 399 + break; \ 400 + case 4: \ 401 + __get_user_asm("move", __gk_err, *__gk_dst, __gk_src, \ 402 + u32, l, r, -EFAULT); \ 403 + break; \ 404 + case 8: \ 405 + __get_user_asm8("move", __gk_err, *__gk_dst, __gk_src); \ 406 + break; \ 407 + default: \ 408 + BUILD_BUG(); \ 409 + } \ 410 + if (unlikely(__gk_err)) \ 411 + goto err_label; \ 412 + } while (0) 413 + 414 + #define __put_kernel_nofault(dst, src, type, err_label) \ 415 + do { \ 416 + type __pk_src = *(type *)(src); \ 417 + type *__pk_dst = (type *)(dst); \ 418 + int __pk_err = 0; \ 419 + \ 420 + switch (sizeof(type)) { \ 421 + case 1: \ 422 + __put_user_asm("move", __pk_err, __pk_src, __pk_dst, \ 423 + b, d, -EFAULT); \ 424 + break; \ 425 + case 2: \ 426 + __put_user_asm("move", __pk_err, __pk_src, __pk_dst, \ 427 + w, r, -EFAULT); \ 428 + break; \ 429 + case 4: \ 430 + __put_user_asm("move", __pk_err, __pk_src, __pk_dst, \ 431 + l, r, -EFAULT); \ 432 + break; \ 433 + case 8: \ 434 + __put_user_asm8("move", __pk_err, __pk_src, __pk_dst); \ 435 + break; \ 436 + default: \ 437 + BUILD_BUG(); \ 438 + } \ 439 + if (unlikely(__pk_err)) \ 440 + goto err_label; \ 441 + } while (0) 395 442 396 443 extern long strncpy_from_user(char *dst, const char __user *src, long count); 397 444 extern __must_check long strnlen_user(const char __user *str, long n);
+1 -1
arch/m68k/kernel/asm-offsets.c
··· 31 31 DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); 32 32 DEFINE(THREAD_USP, offsetof(struct thread_struct, usp)); 33 33 DEFINE(THREAD_SR, offsetof(struct thread_struct, sr)); 34 - DEFINE(THREAD_FS, offsetof(struct thread_struct, fs)); 34 + DEFINE(THREAD_FC, offsetof(struct thread_struct, fc)); 35 35 DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp)); 36 36 DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0)); 37 37 DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp));
+28 -30
arch/m68k/kernel/entry.S
··· 36 36 #include <linux/linkage.h> 37 37 #include <asm/errno.h> 38 38 #include <asm/setup.h> 39 - #include <asm/segment.h> 40 39 #include <asm/traps.h> 41 40 #include <asm/unistd.h> 42 41 #include <asm/asm-offsets.h> ··· 77 78 78 79 ENTRY(sys_sigreturn) 79 80 SAVE_SWITCH_STACK 80 - movel %sp,%sp@- | switch_stack pointer 81 - pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer 81 + movel %sp,%a1 | switch_stack pointer 82 + lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer 83 + lea %sp@(-84),%sp | leave a gap 84 + movel %a1,%sp@- 85 + movel %a0,%sp@- 82 86 jbsr do_sigreturn 83 - addql #8,%sp 84 - RESTORE_SWITCH_STACK 85 - rts 87 + jra 1f | shared with rt_sigreturn() 86 88 87 89 ENTRY(sys_rt_sigreturn) 88 90 SAVE_SWITCH_STACK 89 - movel %sp,%sp@- | switch_stack pointer 90 - pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer 91 + movel %sp,%a1 | switch_stack pointer 92 + lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer 93 + lea %sp@(-84),%sp | leave a gap 94 + movel %a1,%sp@- 95 + movel %a0,%sp@- 96 + | stack contents: 97 + | [original pt_regs address] [original switch_stack address] 98 + | [gap] [switch_stack] [pt_regs] [exception frame] 91 99 jbsr do_rt_sigreturn 92 - addql #8,%sp 100 + 101 + 1: 102 + | stack contents now: 103 + | [original pt_regs address] [original switch_stack address] 104 + | [unused part of the gap] [moved switch_stack] [moved pt_regs] 105 + | [replacement exception frame] 106 + | return value of do_{rt_,}sigreturn() points to moved switch_stack. 107 + 108 + movel %d0,%sp | discard the leftover junk 93 109 RESTORE_SWITCH_STACK 110 + | stack contents now is just [syscall return address] [pt_regs] [frame] 111 + | return pt_regs.d0 112 + movel %sp@(PT_OFF_D0+4),%d0 94 113 rts 95 114 96 115 ENTRY(buserr) ··· 197 180 jbsr syscall_trace 198 181 RESTORE_SWITCH_STACK 199 182 addql #4,%sp 200 - jra .Lret_from_exception 201 - 202 - ENTRY(ret_from_signal) 203 - movel %curptr@(TASK_STACK),%a1 204 - tstb %a1@(TINFO_FLAGS+2) 205 - jge 1f 206 - jbsr syscall_trace 207 - 1: RESTORE_SWITCH_STACK 208 - addql #4,%sp 209 - /* on 68040 complete pending writebacks if any */ 210 - #ifdef CONFIG_M68040 211 - bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0 212 - subql #7,%d0 | bus error frame ? 213 - jbne 1f 214 - movel %sp,%sp@- 215 - jbsr berr_040cleanup 216 - addql #4,%sp 217 - 1: 218 - #endif 219 183 jra .Lret_from_exception 220 184 221 185 ENTRY(system_call) ··· 336 338 337 339 /* save fs (sfc,%dfc) (may be pointing to kernel memory) */ 338 340 movec %sfc,%d0 339 - movew %d0,%a0@(TASK_THREAD+THREAD_FS) 341 + movew %d0,%a0@(TASK_THREAD+THREAD_FC) 340 342 341 343 /* save usp */ 342 344 /* it is better to use a movel here instead of a movew 8*) */ ··· 422 424 movel %a0,%usp 423 425 424 426 /* restore fs (sfc,%dfc) */ 425 - movew %a1@(TASK_THREAD+THREAD_FS),%a0 427 + movew %a1@(TASK_THREAD+THREAD_FC),%a0 426 428 movec %a0,%sfc 427 429 movec %a0,%dfc 428 430
+2 -2
arch/m68k/kernel/process.c
··· 92 92 93 93 void flush_thread(void) 94 94 { 95 - current->thread.fs = __USER_DS; 95 + current->thread.fc = USER_DATA; 96 96 #ifdef CONFIG_FPU 97 97 if (!FPU_IS_EMU) { 98 98 unsigned long zero = 0; ··· 155 155 * Must save the current SFC/DFC value, NOT the value when 156 156 * the parent was last descheduled - RGH 10-08-96 157 157 */ 158 - p->thread.fs = get_fs().seg; 158 + p->thread.fc = USER_DATA; 159 159 160 160 if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { 161 161 /* kernel thread */
+82 -115
arch/m68k/kernel/signal.c
··· 447 447 448 448 if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) { 449 449 fpu_version = sc->sc_fpstate[0]; 450 - if (CPU_IS_020_OR_030 && 450 + if (CPU_IS_020_OR_030 && !regs->stkadj && 451 451 regs->vector >= (VEC_FPBRUC * 4) && 452 452 regs->vector <= (VEC_FPNAN * 4)) { 453 453 /* Clear pending exception in 68882 idle frame */ ··· 510 510 if (!(CPU_IS_060 || CPU_IS_COLDFIRE)) 511 511 context_size = fpstate[1]; 512 512 fpu_version = fpstate[0]; 513 - if (CPU_IS_020_OR_030 && 513 + if (CPU_IS_020_OR_030 && !regs->stkadj && 514 514 regs->vector >= (VEC_FPBRUC * 4) && 515 515 regs->vector <= (VEC_FPNAN * 4)) { 516 516 /* Clear pending exception in 68882 idle frame */ ··· 641 641 static int mangle_kernel_stack(struct pt_regs *regs, int formatvec, 642 642 void __user *fp) 643 643 { 644 - int fsize = frame_extra_sizes(formatvec >> 12); 645 - if (fsize < 0) { 644 + int extra = frame_extra_sizes(formatvec >> 12); 645 + char buf[sizeof_field(struct frame, un)]; 646 + 647 + if (extra < 0) { 646 648 /* 647 649 * user process trying to return with weird frame format 648 650 */ 649 651 pr_debug("user process returning with weird frame format\n"); 650 - return 1; 652 + return -1; 651 653 } 652 - if (!fsize) { 653 - regs->format = formatvec >> 12; 654 - regs->vector = formatvec & 0xfff; 655 - } else { 656 - struct switch_stack *sw = (struct switch_stack *)regs - 1; 657 - /* yes, twice as much as max(sizeof(frame.un.fmt<x>)) */ 658 - unsigned long buf[sizeof_field(struct frame, un) / 2]; 654 + if (extra && copy_from_user(buf, fp, extra)) 655 + return -1; 656 + regs->format = formatvec >> 12; 657 + regs->vector = formatvec & 0xfff; 658 + if (extra) { 659 + void *p = (struct switch_stack *)regs - 1; 660 + struct frame *new = (void *)regs - extra; 661 + int size = sizeof(struct pt_regs)+sizeof(struct switch_stack); 659 662 660 - /* that'll make sure that expansion won't crap over data */ 661 - if (copy_from_user(buf + fsize / 4, fp, fsize)) 662 - return 1; 663 - 664 - /* point of no return */ 665 - regs->format = formatvec >> 12; 666 - regs->vector = formatvec & 0xfff; 667 - #define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack)) 668 - __asm__ __volatile__ ( 669 - #ifdef CONFIG_COLDFIRE 670 - " movel %0,%/sp\n\t" 671 - " bra ret_from_signal\n" 672 - #else 673 - " movel %0,%/a0\n\t" 674 - " subl %1,%/a0\n\t" /* make room on stack */ 675 - " movel %/a0,%/sp\n\t" /* set stack pointer */ 676 - /* move switch_stack and pt_regs */ 677 - "1: movel %0@+,%/a0@+\n\t" 678 - " dbra %2,1b\n\t" 679 - " lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */ 680 - " lsrl #2,%1\n\t" 681 - " subql #1,%1\n\t" 682 - /* copy to the gap we'd made */ 683 - "2: movel %4@+,%/a0@+\n\t" 684 - " dbra %1,2b\n\t" 685 - " bral ret_from_signal\n" 663 + memmove(p - extra, p, size); 664 + memcpy(p - extra + size, buf, extra); 665 + current->thread.esp0 = (unsigned long)&new->ptregs; 666 + #ifdef CONFIG_M68040 667 + /* on 68040 complete pending writebacks if any */ 668 + if (new->ptregs.format == 7) // bus error frame 669 + berr_040cleanup(new); 686 670 #endif 687 - : /* no outputs, it doesn't ever return */ 688 - : "a" (sw), "d" (fsize), "d" (frame_offset/4-1), 689 - "n" (frame_offset), "a" (buf + fsize/4) 690 - : "a0"); 691 - #undef frame_offset 692 671 } 693 - return 0; 672 + return extra; 694 673 } 695 674 696 675 static inline int ··· 677 698 { 678 699 int formatvec; 679 700 struct sigcontext context; 680 - int err = 0; 681 701 682 702 siginfo_build_tests(); 683 703 ··· 685 707 686 708 /* get previous context */ 687 709 if (copy_from_user(&context, usc, sizeof(context))) 688 - goto badframe; 710 + return -1; 689 711 690 712 /* restore passed registers */ 691 713 regs->d0 = context.sc_d0; ··· 698 720 wrusp(context.sc_usp); 699 721 formatvec = context.sc_formatvec; 700 722 701 - err = restore_fpu_state(&context); 723 + if (restore_fpu_state(&context)) 724 + return -1; 702 725 703 - if (err || mangle_kernel_stack(regs, formatvec, fp)) 704 - goto badframe; 705 - 706 - return 0; 707 - 708 - badframe: 709 - return 1; 726 + return mangle_kernel_stack(regs, formatvec, fp); 710 727 } 711 728 712 729 static inline int ··· 718 745 719 746 err = __get_user(temp, &uc->uc_mcontext.version); 720 747 if (temp != MCONTEXT_VERSION) 721 - goto badframe; 748 + return -1; 722 749 /* restore passed registers */ 723 750 err |= __get_user(regs->d0, &gregs[0]); 724 751 err |= __get_user(regs->d1, &gregs[1]); ··· 747 774 err |= restore_altstack(&uc->uc_stack); 748 775 749 776 if (err) 750 - goto badframe; 777 + return -1; 751 778 752 - if (mangle_kernel_stack(regs, temp, &uc->uc_extra)) 753 - goto badframe; 754 - 755 - return 0; 756 - 757 - badframe: 758 - return 1; 779 + return mangle_kernel_stack(regs, temp, &uc->uc_extra); 759 780 } 760 781 761 - asmlinkage int do_sigreturn(struct pt_regs *regs, struct switch_stack *sw) 782 + asmlinkage void *do_sigreturn(struct pt_regs *regs, struct switch_stack *sw) 762 783 { 763 784 unsigned long usp = rdusp(); 764 785 struct sigframe __user *frame = (struct sigframe __user *)(usp - 4); 765 786 sigset_t set; 787 + int size; 766 788 767 789 if (!access_ok(frame, sizeof(*frame))) 768 790 goto badframe; ··· 769 801 770 802 set_current_blocked(&set); 771 803 772 - if (restore_sigcontext(regs, &frame->sc, frame + 1)) 804 + size = restore_sigcontext(regs, &frame->sc, frame + 1); 805 + if (size < 0) 773 806 goto badframe; 774 - return regs->d0; 807 + return (void *)sw - size; 775 808 776 809 badframe: 777 810 force_sig(SIGSEGV); 778 - return 0; 811 + return sw; 779 812 } 780 813 781 - asmlinkage int do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw) 814 + asmlinkage void *do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw) 782 815 { 783 816 unsigned long usp = rdusp(); 784 817 struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4); 785 818 sigset_t set; 819 + int size; 786 820 787 821 if (!access_ok(frame, sizeof(*frame))) 788 822 goto badframe; ··· 793 823 794 824 set_current_blocked(&set); 795 825 796 - if (rt_restore_ucontext(regs, sw, &frame->uc)) 826 + size = rt_restore_ucontext(regs, sw, &frame->uc); 827 + if (size < 0) 797 828 goto badframe; 798 - return regs->d0; 829 + return (void *)sw - size; 799 830 800 831 badframe: 801 832 force_sig(SIGSEGV); 802 - return 0; 833 + return sw; 834 + } 835 + 836 + static inline struct pt_regs *rte_regs(struct pt_regs *regs) 837 + { 838 + return (void *)regs + regs->stkadj; 803 839 } 804 840 805 841 static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, 806 842 unsigned long mask) 807 843 { 844 + struct pt_regs *tregs = rte_regs(regs); 808 845 sc->sc_mask = mask; 809 846 sc->sc_usp = rdusp(); 810 847 sc->sc_d0 = regs->d0; 811 848 sc->sc_d1 = regs->d1; 812 849 sc->sc_a0 = regs->a0; 813 850 sc->sc_a1 = regs->a1; 814 - sc->sc_sr = regs->sr; 815 - sc->sc_pc = regs->pc; 816 - sc->sc_formatvec = regs->format << 12 | regs->vector; 851 + sc->sc_sr = tregs->sr; 852 + sc->sc_pc = tregs->pc; 853 + sc->sc_formatvec = tregs->format << 12 | tregs->vector; 817 854 save_a5_state(sc, regs); 818 855 save_fpu_state(sc, regs); 819 856 } ··· 828 851 static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs) 829 852 { 830 853 struct switch_stack *sw = (struct switch_stack *)regs - 1; 854 + struct pt_regs *tregs = rte_regs(regs); 831 855 greg_t __user *gregs = uc->uc_mcontext.gregs; 832 856 int err = 0; 833 857 ··· 849 871 err |= __put_user(sw->a5, &gregs[13]); 850 872 err |= __put_user(sw->a6, &gregs[14]); 851 873 err |= __put_user(rdusp(), &gregs[15]); 852 - err |= __put_user(regs->pc, &gregs[16]); 853 - err |= __put_user(regs->sr, &gregs[17]); 854 - err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec); 874 + err |= __put_user(tregs->pc, &gregs[16]); 875 + err |= __put_user(tregs->sr, &gregs[17]); 876 + err |= __put_user((tregs->format << 12) | tregs->vector, &uc->uc_formatvec); 855 877 err |= rt_save_fpu_state(uc, regs); 856 878 return err; 857 879 } ··· 868 890 struct pt_regs *regs) 869 891 { 870 892 struct sigframe __user *frame; 871 - int fsize = frame_extra_sizes(regs->format); 893 + struct pt_regs *tregs = rte_regs(regs); 894 + int fsize = frame_extra_sizes(tregs->format); 872 895 struct sigcontext context; 873 896 int err = 0, sig = ksig->sig; 874 897 875 898 if (fsize < 0) { 876 899 pr_debug("setup_frame: Unknown frame format %#x\n", 877 - regs->format); 900 + tregs->format); 878 901 return -EFAULT; 879 902 } 880 903 ··· 886 907 887 908 err |= __put_user(sig, &frame->sig); 888 909 889 - err |= __put_user(regs->vector, &frame->code); 910 + err |= __put_user(tregs->vector, &frame->code); 890 911 err |= __put_user(&frame->sc, &frame->psc); 891 912 892 913 if (_NSIG_WORDS > 1) ··· 913 934 push_cache ((unsigned long) &frame->retcode); 914 935 915 936 /* 916 - * Set up registers for signal handler. All the state we are about 917 - * to destroy is successfully copied to sigframe. 918 - */ 919 - wrusp ((unsigned long) frame); 920 - regs->pc = (unsigned long) ksig->ka.sa.sa_handler; 921 - adjustformat(regs); 922 - 923 - /* 924 937 * This is subtle; if we build more than one sigframe, all but the 925 938 * first one will see frame format 0 and have fsize == 0, so we won't 926 939 * screw stkadj. 927 940 */ 928 - if (fsize) 941 + if (fsize) { 929 942 regs->stkadj = fsize; 930 - 931 - /* Prepare to skip over the extra stuff in the exception frame. */ 932 - if (regs->stkadj) { 933 - struct pt_regs *tregs = 934 - (struct pt_regs *)((ulong)regs + regs->stkadj); 943 + tregs = rte_regs(regs); 935 944 pr_debug("Performing stackadjust=%04lx\n", regs->stkadj); 936 - /* This must be copied with decreasing addresses to 937 - handle overlaps. */ 938 945 tregs->vector = 0; 939 946 tregs->format = 0; 940 - tregs->pc = regs->pc; 941 947 tregs->sr = regs->sr; 942 948 } 949 + 950 + /* 951 + * Set up registers for signal handler. All the state we are about 952 + * to destroy is successfully copied to sigframe. 953 + */ 954 + wrusp ((unsigned long) frame); 955 + tregs->pc = (unsigned long) ksig->ka.sa.sa_handler; 956 + adjustformat(regs); 957 + 943 958 return 0; 944 959 } 945 960 ··· 941 968 struct pt_regs *regs) 942 969 { 943 970 struct rt_sigframe __user *frame; 944 - int fsize = frame_extra_sizes(regs->format); 971 + struct pt_regs *tregs = rte_regs(regs); 972 + int fsize = frame_extra_sizes(tregs->format); 945 973 int err = 0, sig = ksig->sig; 946 974 947 975 if (fsize < 0) { ··· 993 1019 push_cache ((unsigned long) &frame->retcode); 994 1020 995 1021 /* 996 - * Set up registers for signal handler. All the state we are about 997 - * to destroy is successfully copied to sigframe. 998 - */ 999 - wrusp ((unsigned long) frame); 1000 - regs->pc = (unsigned long) ksig->ka.sa.sa_handler; 1001 - adjustformat(regs); 1002 - 1003 - /* 1004 1022 * This is subtle; if we build more than one sigframe, all but the 1005 1023 * first one will see frame format 0 and have fsize == 0, so we won't 1006 1024 * screw stkadj. 1007 1025 */ 1008 - if (fsize) 1026 + if (fsize) { 1009 1027 regs->stkadj = fsize; 1010 - 1011 - /* Prepare to skip over the extra stuff in the exception frame. */ 1012 - if (regs->stkadj) { 1013 - struct pt_regs *tregs = 1014 - (struct pt_regs *)((ulong)regs + regs->stkadj); 1028 + tregs = rte_regs(regs); 1015 1029 pr_debug("Performing stackadjust=%04lx\n", regs->stkadj); 1016 - /* This must be copied with decreasing addresses to 1017 - handle overlaps. */ 1018 1030 tregs->vector = 0; 1019 1031 tregs->format = 0; 1020 - tregs->pc = regs->pc; 1021 1032 tregs->sr = regs->sr; 1022 1033 } 1034 + 1035 + /* 1036 + * Set up registers for signal handler. All the state we are about 1037 + * to destroy is successfully copied to sigframe. 1038 + */ 1039 + wrusp ((unsigned long) frame); 1040 + tregs->pc = (unsigned long) ksig->ka.sa.sa_handler; 1041 + adjustformat(regs); 1023 1042 return 0; 1024 1043 } 1025 1044
+4 -9
arch/m68k/kernel/traps.c
··· 181 181 static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs) 182 182 { 183 183 unsigned long mmusr; 184 - mm_segment_t old_fs = get_fs(); 185 184 186 - set_fs(MAKE_MM_SEG(wbs)); 185 + set_fc(wbs); 187 186 188 187 if (iswrite) 189 188 asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr)); ··· 191 192 192 193 asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr)); 193 194 194 - set_fs(old_fs); 195 + set_fc(USER_DATA); 195 196 196 197 return mmusr; 197 198 } ··· 200 201 unsigned long wbd) 201 202 { 202 203 int res = 0; 203 - mm_segment_t old_fs = get_fs(); 204 204 205 - /* set_fs can not be moved, otherwise put_user() may oops */ 206 - set_fs(MAKE_MM_SEG(wbs)); 205 + set_fc(wbs); 207 206 208 207 switch (wbs & WBSIZ_040) { 209 208 case BA_SIZE_BYTE: ··· 215 218 break; 216 219 } 217 220 218 - /* set_fs can not be moved, otherwise put_user() may oops */ 219 - set_fs(old_fs); 220 - 221 + set_fc(USER_DATA); 221 222 222 223 pr_debug("do_040writeback1, res=%d\n", res); 223 224
-1
arch/m68k/mac/misc.c
··· 18 18 19 19 #include <linux/uaccess.h> 20 20 #include <asm/io.h> 21 - #include <asm/segment.h> 22 21 #include <asm/setup.h> 23 22 #include <asm/macintosh.h> 24 23 #include <asm/mac_via.h>
+3 -22
arch/m68k/mm/cache.c
··· 49 49 if (mmusr & MMU_R_040) 50 50 return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK); 51 51 } else { 52 - unsigned short mmusr; 53 - unsigned long *descaddr; 54 - 55 - asm volatile ("ptestr %3,%2@,#7,%0\n\t" 56 - "pmove %%psr,%1" 57 - : "=a&" (descaddr), "=m" (mmusr) 58 - : "a" (vaddr), "d" (get_fs().seg)); 59 - if (mmusr & (MMU_I|MMU_B|MMU_L)) 60 - return 0; 61 - descaddr = phys_to_virt((unsigned long)descaddr); 62 - switch (mmusr & MMU_NUM) { 63 - case 1: 64 - return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff); 65 - case 2: 66 - return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff); 67 - case 3: 68 - return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK); 69 - } 52 + WARN_ON_ONCE(!CPU_IS_040_OR_060); 70 53 } 71 54 return 0; 72 55 } ··· 90 107 91 108 void flush_icache_range(unsigned long address, unsigned long endaddr) 92 109 { 93 - mm_segment_t old_fs = get_fs(); 94 - 95 - set_fs(KERNEL_DS); 110 + set_fc(SUPER_DATA); 96 111 flush_icache_user_range(address, endaddr); 97 - set_fs(old_fs); 112 + set_fc(USER_DATA); 98 113 } 99 114 EXPORT_SYMBOL(flush_icache_range); 100 115
-6
arch/m68k/mm/init.c
··· 72 72 if (!empty_zero_page) 73 73 panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 74 74 __func__, PAGE_SIZE, PAGE_SIZE); 75 - 76 - /* 77 - * Set up SFC/DFC registers (user data space). 78 - */ 79 - set_fs (USER_DS); 80 - 81 75 max_zone_pfn[ZONE_DMA] = end_mem >> PAGE_SHIFT; 82 76 free_area_init(max_zone_pfn); 83 77 }
-1
arch/m68k/mm/kmap.c
··· 17 17 #include <linux/vmalloc.h> 18 18 19 19 #include <asm/setup.h> 20 - #include <asm/segment.h> 21 20 #include <asm/page.h> 22 21 #include <asm/io.h> 23 22 #include <asm/tlbflush.h>
-1
arch/m68k/mm/memory.c
··· 15 15 #include <linux/gfp.h> 16 16 17 17 #include <asm/setup.h> 18 - #include <asm/segment.h> 19 18 #include <asm/page.h> 20 19 #include <asm/traps.h> 21 20 #include <asm/machdep.h>
+1 -1
arch/m68k/mm/motorola.c
··· 467 467 /* 468 468 * Set up SFC/DFC registers 469 469 */ 470 - set_fs(KERNEL_DS); 470 + set_fc(USER_DATA); 471 471 472 472 #ifdef DEBUG 473 473 printk ("before free_area_init\n");
+1 -2
arch/m68k/sun3/config.c
··· 31 31 #include <asm/intersil.h> 32 32 #include <asm/irq.h> 33 33 #include <asm/sections.h> 34 - #include <asm/segment.h> 35 34 #include <asm/sun3ints.h> 36 35 37 36 char sun3_reserved_pmeg[SUN3_PMEGS_NUM]; ··· 88 89 sun3_reserved_pmeg[249] = 1; 89 90 sun3_reserved_pmeg[252] = 1; 90 91 sun3_reserved_pmeg[253] = 1; 91 - set_fs(KERNEL_DS); 92 + set_fc(USER_DATA); 92 93 } 93 94 94 95 /* Without this, Bad Things happen when something calls arch_reset. */
+2 -4
arch/m68k/sun3/mmu_emu.c
··· 23 23 #include <linux/uaccess.h> 24 24 #include <asm/page.h> 25 25 #include <asm/sun3mmu.h> 26 - #include <asm/segment.h> 27 26 #include <asm/oplib.h> 28 27 #include <asm/mmu_context.h> 29 28 #include <asm/dvma.h> ··· 190 191 for(seg = 0; seg < PAGE_OFFSET; seg += SUN3_PMEG_SIZE) 191 192 sun3_put_segmap(seg, SUN3_INVALID_PMEG); 192 193 193 - set_fs(MAKE_MM_SEG(3)); 194 + set_fc(3); 194 195 for(seg = 0; seg < 0x10000000; seg += SUN3_PMEG_SIZE) { 195 196 i = sun3_get_segmap(seg); 196 197 for(j = 1; j < CONTEXTS_NUM; j++) 197 198 (*(romvec->pv_setctxt))(j, (void *)seg, i); 198 199 } 199 - set_fs(KERNEL_DS); 200 - 200 + set_fc(USER_DATA); 201 201 } 202 202 203 203 /* erase the mappings for a dead context. Uses the pg_dir for hints
-1
arch/m68k/sun3/sun3ints.c
··· 11 11 #include <linux/sched.h> 12 12 #include <linux/kernel_stat.h> 13 13 #include <linux/interrupt.h> 14 - #include <asm/segment.h> 15 14 #include <asm/intersil.h> 16 15 #include <asm/oplib.h> 17 16 #include <asm/sun3ints.h>
-1
arch/m68k/sun3x/prom.c
··· 14 14 #include <asm/traps.h> 15 15 #include <asm/sun3xprom.h> 16 16 #include <asm/idprom.h> 17 - #include <asm/segment.h> 18 17 #include <asm/sun3ints.h> 19 18 #include <asm/openprom.h> 20 19 #include <asm/machines.h>
+1 -22
arch/mips/include/asm/mips-cps.h
··· 10 10 #include <linux/io.h> 11 11 #include <linux/types.h> 12 12 13 - #include <asm/mips-boards/launch.h> 14 - 15 13 extern unsigned long __cps_access_bad_size(void) 16 14 __compiletime_error("Bad size for CPS accessor"); 17 15 ··· 165 167 */ 166 168 static inline unsigned int mips_cps_numcores(unsigned int cluster) 167 169 { 168 - unsigned int ncores; 169 - 170 170 if (!mips_cm_present()) 171 171 return 0; 172 172 173 173 /* Add one before masking to handle 0xff indicating no cores */ 174 - ncores = (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES; 175 - 176 - if (IS_ENABLED(CONFIG_SOC_MT7621)) { 177 - struct cpulaunch *launch; 178 - 179 - /* 180 - * Ralink MT7621S SoC is single core, but the GCR_CONFIG method 181 - * always reports 2 cores. Check the second core's LAUNCH_FREADY 182 - * flag to detect if the second core is missing. This method 183 - * only works before the core has been started. 184 - */ 185 - launch = (struct cpulaunch *)CKSEG0ADDR(CPULAUNCH); 186 - launch += 2; /* MT7621 has 2 VPEs per core */ 187 - if (!(launch->flags & LAUNCH_FREADY)) 188 - ncores = 1; 189 - } 190 - 191 - return ncores; 174 + return (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES; 192 175 } 193 176 194 177 /**
+1 -3
arch/mips/kernel/signal.c
··· 906 906 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) 907 907 do_signal(regs); 908 908 909 - if (thread_info_flags & _TIF_NOTIFY_RESUME) { 909 + if (thread_info_flags & _TIF_NOTIFY_RESUME) 910 910 tracehook_notify_resume(regs); 911 - rseq_handle_notify_resume(NULL, regs); 912 - } 913 911 914 912 user_enter(); 915 913 }
+43 -14
arch/mips/net/bpf_jit.c
··· 662 662 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \ 663 663 func##_positive) 664 664 665 + static bool is_bad_offset(int b_off) 666 + { 667 + return b_off > 0x1ffff || b_off < -0x20000; 668 + } 669 + 665 670 static int build_body(struct jit_ctx *ctx) 666 671 { 667 672 const struct bpf_prog *prog = ctx->skf; ··· 733 728 /* Load return register on DS for failures */ 734 729 emit_reg_move(r_ret, r_zero, ctx); 735 730 /* Return with error */ 736 - emit_b(b_imm(prog->len, ctx), ctx); 731 + b_off = b_imm(prog->len, ctx); 732 + if (is_bad_offset(b_off)) 733 + return -E2BIG; 734 + emit_b(b_off, ctx); 737 735 emit_nop(ctx); 738 736 break; 739 737 case BPF_LD | BPF_W | BPF_IND: ··· 783 775 emit_jalr(MIPS_R_RA, r_s0, ctx); 784 776 emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */ 785 777 /* Check the error value */ 786 - emit_bcond(MIPS_COND_NE, r_ret, 0, 787 - b_imm(prog->len, ctx), ctx); 778 + b_off = b_imm(prog->len, ctx); 779 + if (is_bad_offset(b_off)) 780 + return -E2BIG; 781 + emit_bcond(MIPS_COND_NE, r_ret, 0, b_off, ctx); 788 782 emit_reg_move(r_ret, r_zero, ctx); 789 783 /* We are good */ 790 784 /* X <- P[1:K] & 0xf */ ··· 865 855 /* A /= X */ 866 856 ctx->flags |= SEEN_X | SEEN_A; 867 857 /* Check if r_X is zero */ 868 - emit_bcond(MIPS_COND_EQ, r_X, r_zero, 869 - b_imm(prog->len, ctx), ctx); 858 + b_off = b_imm(prog->len, ctx); 859 + if (is_bad_offset(b_off)) 860 + return -E2BIG; 861 + emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx); 870 862 emit_load_imm(r_ret, 0, ctx); /* delay slot */ 871 863 emit_div(r_A, r_X, ctx); 872 864 break; ··· 876 864 /* A %= X */ 877 865 ctx->flags |= SEEN_X | SEEN_A; 878 866 /* Check if r_X is zero */ 879 - emit_bcond(MIPS_COND_EQ, r_X, r_zero, 880 - b_imm(prog->len, ctx), ctx); 867 + b_off = b_imm(prog->len, ctx); 868 + if (is_bad_offset(b_off)) 869 + return -E2BIG; 870 + emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx); 881 871 emit_load_imm(r_ret, 0, ctx); /* delay slot */ 882 872 emit_mod(r_A, r_X, ctx); 883 873 break; ··· 940 926 break; 941 927 case BPF_JMP | BPF_JA: 942 928 /* pc += K */ 943 - emit_b(b_imm(i + k + 1, ctx), ctx); 929 + b_off = b_imm(i + k + 1, ctx); 930 + if (is_bad_offset(b_off)) 931 + return -E2BIG; 932 + emit_b(b_off, ctx); 944 933 emit_nop(ctx); 945 934 break; 946 935 case BPF_JMP | BPF_JEQ | BPF_K: ··· 1073 1056 break; 1074 1057 case BPF_RET | BPF_A: 1075 1058 ctx->flags |= SEEN_A; 1076 - if (i != prog->len - 1) 1059 + if (i != prog->len - 1) { 1077 1060 /* 1078 1061 * If this is not the last instruction 1079 1062 * then jump to the epilogue 1080 1063 */ 1081 - emit_b(b_imm(prog->len, ctx), ctx); 1064 + b_off = b_imm(prog->len, ctx); 1065 + if (is_bad_offset(b_off)) 1066 + return -E2BIG; 1067 + emit_b(b_off, ctx); 1068 + } 1082 1069 emit_reg_move(r_ret, r_A, ctx); /* delay slot */ 1083 1070 break; 1084 1071 case BPF_RET | BPF_K: ··· 1096 1075 * If this is not the last instruction 1097 1076 * then jump to the epilogue 1098 1077 */ 1099 - emit_b(b_imm(prog->len, ctx), ctx); 1078 + b_off = b_imm(prog->len, ctx); 1079 + if (is_bad_offset(b_off)) 1080 + return -E2BIG; 1081 + emit_b(b_off, ctx); 1100 1082 emit_nop(ctx); 1101 1083 } 1102 1084 break; ··· 1157 1133 /* Load *dev pointer */ 1158 1134 emit_load_ptr(r_s0, r_skb, off, ctx); 1159 1135 /* error (0) in the delay slot */ 1160 - emit_bcond(MIPS_COND_EQ, r_s0, r_zero, 1161 - b_imm(prog->len, ctx), ctx); 1136 + b_off = b_imm(prog->len, ctx); 1137 + if (is_bad_offset(b_off)) 1138 + return -E2BIG; 1139 + emit_bcond(MIPS_COND_EQ, r_s0, r_zero, b_off, ctx); 1162 1140 emit_reg_move(r_ret, r_zero, ctx); 1163 1141 if (code == (BPF_ANC | SKF_AD_IFINDEX)) { 1164 1142 BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4); ··· 1270 1244 1271 1245 /* Generate the actual JIT code */ 1272 1246 build_prologue(&ctx); 1273 - build_body(&ctx); 1247 + if (build_body(&ctx)) { 1248 + module_memfree(ctx.target); 1249 + goto out; 1250 + } 1274 1251 build_epilogue(&ctx); 1275 1252 1276 1253 /* Update the icache */
+2 -1
arch/nios2/Kconfig.debug
··· 3 3 config EARLY_PRINTK 4 4 bool "Activate early kernel debugging" 5 5 default y 6 + depends on TTY 6 7 select SERIAL_CORE_CONSOLE 7 8 help 8 - Enable early printk on console 9 + Enable early printk on console. 9 10 This is useful for kernel debugging when your machine crashes very 10 11 early before the console code is initialized. 11 12 You should normally say N here, unless you want to debug such a crash.
-2
arch/nios2/kernel/setup.c
··· 149 149 150 150 void __init setup_arch(char **cmdline_p) 151 151 { 152 - int dram_start; 153 - 154 152 console_verbose(); 155 153 156 154 memory_start = memblock_start_of_DRAM();
+1 -1
arch/powerpc/boot/dts/fsl/t1023rdb.dts
··· 154 154 155 155 fm1mac3: ethernet@e4000 { 156 156 phy-handle = <&sgmii_aqr_phy3>; 157 - phy-connection-type = "sgmii-2500"; 157 + phy-connection-type = "2500base-x"; 158 158 sleep = <&rcpm 0x20000000>; 159 159 }; 160 160
+1 -3
arch/powerpc/kernel/signal.c
··· 293 293 do_signal(current); 294 294 } 295 295 296 - if (thread_info_flags & _TIF_NOTIFY_RESUME) { 296 + if (thread_info_flags & _TIF_NOTIFY_RESUME) 297 297 tracehook_notify_resume(regs); 298 - rseq_handle_notify_resume(NULL, regs); 299 - } 300 298 } 301 299 302 300 static unsigned long get_tm_stackpointer(struct task_struct *tsk)
+1 -1
arch/s390/include/asm/ccwgroup.h
··· 55 55 int num_devices, const char *buf); 56 56 57 57 extern int ccwgroup_set_online(struct ccwgroup_device *gdev); 58 - extern int ccwgroup_set_offline(struct ccwgroup_device *gdev); 58 + int ccwgroup_set_offline(struct ccwgroup_device *gdev, bool call_gdrv); 59 59 60 60 extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev); 61 61 extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev);
+2 -2
arch/s390/kvm/interrupt.c
··· 419 419 static void __set_cpu_idle(struct kvm_vcpu *vcpu) 420 420 { 421 421 kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT); 422 - set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask); 422 + set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask); 423 423 } 424 424 425 425 static void __unset_cpu_idle(struct kvm_vcpu *vcpu) 426 426 { 427 427 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT); 428 - clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask); 428 + clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask); 429 429 } 430 430 431 431 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
+1 -1
arch/s390/kvm/kvm-s390.c
··· 4066 4066 kvm_s390_patch_guest_per_regs(vcpu); 4067 4067 } 4068 4068 4069 - clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.gisa_int.kicked_mask); 4069 + clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); 4070 4070 4071 4071 vcpu->arch.sie_block->icptcode = 0; 4072 4072 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
+1 -1
arch/s390/kvm/kvm-s390.h
··· 79 79 80 80 static inline int is_vcpu_idle(struct kvm_vcpu *vcpu) 81 81 { 82 - return test_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask); 82 + return test_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask); 83 83 } 84 84 85 85 static inline int kvm_is_ucontrol(struct kvm *kvm)
+1 -1
arch/sh/include/asm/pgtable-3level.h
··· 34 34 35 35 static inline pmd_t *pud_pgtable(pud_t pud) 36 36 { 37 - return (pmd_t *)pud_val(pud); 37 + return (pmd_t *)(unsigned long)pud_val(pud); 38 38 } 39 39 40 40 /* only used by the stubbed out hugetlb gup code, should never be called */
-1
arch/x86/Kconfig
··· 2610 2610 config PCI_XEN 2611 2611 def_bool y 2612 2612 depends on PCI && XEN 2613 - select SWIOTLB_XEN 2614 2613 2615 2614 config MMCONF_FAM10H 2616 2615 def_bool y
+3 -2
arch/x86/crypto/sm4-aesni-avx-asm_64.S
··· 367 367 * %rdx: src (1..8 blocks) 368 368 * %rcx: num blocks (1..8) 369 369 */ 370 - FRAME_BEGIN 371 - 372 370 cmpq $5, %rcx; 373 371 jb sm4_aesni_avx_crypt4; 372 + 373 + FRAME_BEGIN 374 + 374 375 vmovdqu (0 * 16)(%rdx), RA0; 375 376 vmovdqu (1 * 16)(%rdx), RA1; 376 377 vmovdqu (2 * 16)(%rdx), RA2;
+1
arch/x86/events/core.c
··· 2465 2465 if (err) { 2466 2466 if (event->destroy) 2467 2467 event->destroy(event); 2468 + event->destroy = NULL; 2468 2469 } 2469 2470 2470 2471 if (READ_ONCE(x86_pmu.attr_rdpmc) &&
+1
arch/x86/events/intel/core.c
··· 263 263 INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf), 264 264 INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf), 265 265 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf), 266 + INTEL_EVENT_CONSTRAINT(0xef, 0xf), 266 267 INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf), 267 268 EVENT_CONSTRAINT_END 268 269 };
+15 -5
arch/x86/hyperv/hv_apic.c
··· 122 122 ipi_arg->reserved = 0; 123 123 ipi_arg->vp_set.valid_bank_mask = 0; 124 124 125 - if (!cpumask_equal(mask, cpu_present_mask)) { 125 + /* 126 + * Use HV_GENERIC_SET_ALL and avoid converting cpumask to VP_SET 127 + * when the IPI is sent to all currently present CPUs. 128 + */ 129 + if (!cpumask_equal(mask, cpu_present_mask) || exclude_self) { 126 130 ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K; 127 131 if (exclude_self) 128 132 nr_bank = cpumask_to_vpset_noself(&(ipi_arg->vp_set), mask); 129 133 else 130 134 nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask); 131 - } 132 - if (nr_bank < 0) 133 - goto ipi_mask_ex_done; 134 - if (!nr_bank) 135 + 136 + /* 137 + * 'nr_bank <= 0' means some CPUs in cpumask can't be 138 + * represented in VP_SET. Return an error and fall back to 139 + * native (architectural) method of sending IPIs. 140 + */ 141 + if (nr_bank <= 0) 142 + goto ipi_mask_ex_done; 143 + } else { 135 144 ipi_arg->vp_set.format = HV_GENERIC_SET_ALL; 145 + } 136 146 137 147 status = hv_do_rep_hypercall(HVCALL_SEND_IPI_EX, 0, nr_bank, 138 148 ipi_arg, NULL);
+1 -1
arch/x86/include/asm/kvm_page_track.h
··· 46 46 struct kvm_page_track_notifier_node *node); 47 47 }; 48 48 49 - void kvm_page_track_init(struct kvm *kvm); 49 + int kvm_page_track_init(struct kvm *kvm); 50 50 void kvm_page_track_cleanup(struct kvm *kvm); 51 51 52 52 void kvm_page_track_free_memslot(struct kvm_memory_slot *slot);
+14
arch/x86/include/asm/kvmclock.h
··· 2 2 #ifndef _ASM_X86_KVM_CLOCK_H 3 3 #define _ASM_X86_KVM_CLOCK_H 4 4 5 + #include <linux/percpu.h> 6 + 5 7 extern struct clocksource kvm_clock; 8 + 9 + DECLARE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu); 10 + 11 + static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void) 12 + { 13 + return &this_cpu_read(hv_clock_per_cpu)->pvti; 14 + } 15 + 16 + static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void) 17 + { 18 + return this_cpu_read(hv_clock_per_cpu); 19 + } 6 20 7 21 #endif /* _ASM_X86_KVM_CLOCK_H */
-2
arch/x86/include/asm/pkeys.h
··· 2 2 #ifndef _ASM_X86_PKEYS_H 3 3 #define _ASM_X86_PKEYS_H 4 4 5 - #define ARCH_DEFAULT_PKEY 0 6 - 7 5 /* 8 6 * If more than 16 keys are ever supported, a thorough audit 9 7 * will be necessary to ensure that the types that store key
+1 -1
arch/x86/include/asm/special_insns.h
··· 275 275 { 276 276 const struct { char _[64]; } *__src = src; 277 277 struct { char _[64]; } __iomem *__dst = dst; 278 - int zf; 278 + bool zf; 279 279 280 280 /* 281 281 * ENQCMDS %(rdx), rax
+1 -5
arch/x86/include/asm/xen/swiotlb-xen.h
··· 3 3 #define _ASM_X86_SWIOTLB_XEN_H 4 4 5 5 #ifdef CONFIG_SWIOTLB_XEN 6 - extern int xen_swiotlb; 7 6 extern int __init pci_xen_swiotlb_detect(void); 8 - extern void __init pci_xen_swiotlb_init(void); 9 7 extern int pci_xen_swiotlb_init_late(void); 10 8 #else 11 - #define xen_swiotlb (0) 12 - static inline int __init pci_xen_swiotlb_detect(void) { return 0; } 13 - static inline void __init pci_xen_swiotlb_init(void) { } 9 + #define pci_xen_swiotlb_detect NULL 14 10 static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; } 15 11 #endif 16 12
+2 -11
arch/x86/kernel/kvmclock.c
··· 49 49 static struct pvclock_vsyscall_time_info 50 50 hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE); 51 51 static struct pvclock_wall_clock wall_clock __bss_decrypted; 52 - static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu); 53 52 static struct pvclock_vsyscall_time_info *hvclock_mem; 54 - 55 - static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void) 56 - { 57 - return &this_cpu_read(hv_clock_per_cpu)->pvti; 58 - } 59 - 60 - static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void) 61 - { 62 - return this_cpu_read(hv_clock_per_cpu); 63 - } 53 + DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu); 54 + EXPORT_PER_CPU_SYMBOL_GPL(hv_clock_per_cpu); 64 55 65 56 /* 66 57 * The wallclock is the time of day when we booted. Since then, some time may
+14 -12
arch/x86/kernel/setup.c
··· 830 830 831 831 x86_init.oem.arch_setup(); 832 832 833 + /* 834 + * Do some memory reservations *before* memory is added to memblock, so 835 + * memblock allocations won't overwrite it. 836 + * 837 + * After this point, everything still needed from the boot loader or 838 + * firmware or kernel text should be early reserved or marked not RAM in 839 + * e820. All other memory is free game. 840 + * 841 + * This call needs to happen before e820__memory_setup() which calls the 842 + * xen_memory_setup() on Xen dom0 which relies on the fact that those 843 + * early reservations have happened already. 844 + */ 845 + early_reserve_memory(); 846 + 833 847 iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1; 834 848 e820__memory_setup(); 835 849 parse_setup_data(); ··· 889 875 x86_configure_nx(); 890 876 891 877 parse_early_param(); 892 - 893 - /* 894 - * Do some memory reservations *before* memory is added to 895 - * memblock, so memblock allocations won't overwrite it. 896 - * Do it after early param, so we could get (unlikely) panic from 897 - * serial. 898 - * 899 - * After this point everything still needed from the boot loader or 900 - * firmware or kernel text should be early reserved or marked not 901 - * RAM in e820. All other memory is free game. 902 - */ 903 - early_reserve_memory(); 904 878 905 879 #ifdef CONFIG_MEMORY_HOTPLUG 906 880 /*
+2 -2
arch/x86/kvm/cpuid.c
··· 65 65 for (i = 0; i < nent; i++) { 66 66 e = &entries[i]; 67 67 68 - if (e->function == function && (e->index == index || 69 - !(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX))) 68 + if (e->function == function && 69 + (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index)) 70 70 return e; 71 71 } 72 72
+1 -2
arch/x86/kvm/emulate.c
··· 435 435 __FOP_RET(#op) 436 436 437 437 asm(".pushsection .fixup, \"ax\"\n" 438 - ".global kvm_fastop_exception \n" 439 438 "kvm_fastop_exception: xor %esi, %esi; ret\n" 440 439 ".popsection"); 441 440 ··· 4205 4206 u64 cr4 = ctxt->ops->get_cr(ctxt, 4); 4206 4207 4207 4208 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt)) 4208 - return emulate_ud(ctxt); 4209 + return emulate_gp(ctxt, 0); 4209 4210 4210 4211 return X86EMUL_CONTINUE; 4211 4212 }
+3 -4
arch/x86/kvm/hyperv.c
··· 939 939 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) 940 940 stimer_init(&hv_vcpu->stimer[i], i); 941 941 942 - hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu); 942 + hv_vcpu->vp_index = vcpu->vcpu_idx; 943 943 944 944 return 0; 945 945 } ··· 1444 1444 switch (msr) { 1445 1445 case HV_X64_MSR_VP_INDEX: { 1446 1446 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); 1447 - int vcpu_idx = kvm_vcpu_get_idx(vcpu); 1448 1447 u32 new_vp_index = (u32)data; 1449 1448 1450 1449 if (!host || new_vp_index >= KVM_MAX_VCPUS) ··· 1458 1459 * VP index is changing, adjust num_mismatched_vp_indexes if 1459 1460 * it now matches or no longer matches vcpu_idx. 1460 1461 */ 1461 - if (hv_vcpu->vp_index == vcpu_idx) 1462 + if (hv_vcpu->vp_index == vcpu->vcpu_idx) 1462 1463 atomic_inc(&hv->num_mismatched_vp_indexes); 1463 - else if (new_vp_index == vcpu_idx) 1464 + else if (new_vp_index == vcpu->vcpu_idx) 1464 1465 atomic_dec(&hv->num_mismatched_vp_indexes); 1465 1466 1466 1467 hv_vcpu->vp_index = new_vp_index;
+1 -1
arch/x86/kvm/hyperv.h
··· 83 83 { 84 84 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); 85 85 86 - return hv_vcpu ? hv_vcpu->vp_index : kvm_vcpu_get_idx(vcpu); 86 + return hv_vcpu ? hv_vcpu->vp_index : vcpu->vcpu_idx; 87 87 } 88 88 89 89 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
+5 -5
arch/x86/kvm/ioapic.c
··· 319 319 unsigned index; 320 320 bool mask_before, mask_after; 321 321 union kvm_ioapic_redirect_entry *e; 322 - unsigned long vcpu_bitmap; 323 322 int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode; 323 + DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS); 324 324 325 325 switch (ioapic->ioregsel) { 326 326 case IOAPIC_REG_VERSION: ··· 384 384 irq.shorthand = APIC_DEST_NOSHORT; 385 385 irq.dest_id = e->fields.dest_id; 386 386 irq.msi_redir_hint = false; 387 - bitmap_zero(&vcpu_bitmap, 16); 387 + bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS); 388 388 kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq, 389 - &vcpu_bitmap); 389 + vcpu_bitmap); 390 390 if (old_dest_mode != e->fields.dest_mode || 391 391 old_dest_id != e->fields.dest_id) { 392 392 /* ··· 399 399 kvm_lapic_irq_dest_mode( 400 400 !!e->fields.dest_mode); 401 401 kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq, 402 - &vcpu_bitmap); 402 + vcpu_bitmap); 403 403 } 404 404 kvm_make_scan_ioapic_request_mask(ioapic->kvm, 405 - &vcpu_bitmap); 405 + vcpu_bitmap); 406 406 } else { 407 407 kvm_make_scan_ioapic_request(ioapic->kvm); 408 408 }
+10 -7
arch/x86/kvm/mmu/mmu.c
··· 2027 2027 } while (!sp->unsync_children); 2028 2028 } 2029 2029 2030 - static void mmu_sync_children(struct kvm_vcpu *vcpu, 2031 - struct kvm_mmu_page *parent) 2030 + static int mmu_sync_children(struct kvm_vcpu *vcpu, 2031 + struct kvm_mmu_page *parent, bool can_yield) 2032 2032 { 2033 2033 int i; 2034 2034 struct kvm_mmu_page *sp; ··· 2055 2055 } 2056 2056 if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) { 2057 2057 kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); 2058 + if (!can_yield) { 2059 + kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); 2060 + return -EINTR; 2061 + } 2062 + 2058 2063 cond_resched_rwlock_write(&vcpu->kvm->mmu_lock); 2059 2064 flush = false; 2060 2065 } 2061 2066 } 2062 2067 2063 2068 kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); 2069 + return 0; 2064 2070 } 2065 2071 2066 2072 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp) ··· 2151 2145 WARN_ON(!list_empty(&invalid_list)); 2152 2146 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 2153 2147 } 2154 - 2155 - if (sp->unsync_children) 2156 - kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); 2157 2148 2158 2149 __clear_sp_write_flooding_count(sp); 2159 2150 ··· 3687 3684 write_lock(&vcpu->kvm->mmu_lock); 3688 3685 kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); 3689 3686 3690 - mmu_sync_children(vcpu, sp); 3687 + mmu_sync_children(vcpu, sp, true); 3691 3688 3692 3689 kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); 3693 3690 write_unlock(&vcpu->kvm->mmu_lock); ··· 3703 3700 if (IS_VALID_PAE_ROOT(root)) { 3704 3701 root &= PT64_BASE_ADDR_MASK; 3705 3702 sp = to_shadow_page(root); 3706 - mmu_sync_children(vcpu, sp); 3703 + mmu_sync_children(vcpu, sp, true); 3707 3704 } 3708 3705 } 3709 3706
+2 -2
arch/x86/kvm/mmu/page_track.c
··· 164 164 cleanup_srcu_struct(&head->track_srcu); 165 165 } 166 166 167 - void kvm_page_track_init(struct kvm *kvm) 167 + int kvm_page_track_init(struct kvm *kvm) 168 168 { 169 169 struct kvm_page_track_notifier_head *head; 170 170 171 171 head = &kvm->arch.track_notifier_head; 172 - init_srcu_struct(&head->track_srcu); 173 172 INIT_HLIST_HEAD(&head->track_notifier_list); 173 + return init_srcu_struct(&head->track_srcu); 174 174 } 175 175 176 176 /*
+23 -23
arch/x86/kvm/mmu/paging_tmpl.h
··· 707 707 if (!is_shadow_present_pte(*it.sptep)) { 708 708 table_gfn = gw->table_gfn[it.level - 2]; 709 709 access = gw->pt_access[it.level - 2]; 710 - sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1, 711 - false, access); 710 + sp = kvm_mmu_get_page(vcpu, table_gfn, addr, 711 + it.level-1, false, access); 712 + /* 713 + * We must synchronize the pagetable before linking it 714 + * because the guest doesn't need to flush tlb when 715 + * the gpte is changed from non-present to present. 716 + * Otherwise, the guest may use the wrong mapping. 717 + * 718 + * For PG_LEVEL_4K, kvm_mmu_get_page() has already 719 + * synchronized it transiently via kvm_sync_page(). 720 + * 721 + * For higher level pagetable, we synchronize it via 722 + * the slower mmu_sync_children(). If it needs to 723 + * break, some progress has been made; return 724 + * RET_PF_RETRY and retry on the next #PF. 725 + * KVM_REQ_MMU_SYNC is not necessary but it 726 + * expedites the process. 727 + */ 728 + if (sp->unsync_children && 729 + mmu_sync_children(vcpu, sp, false)) 730 + return RET_PF_RETRY; 712 731 } 713 732 714 733 /* ··· 1066 1047 * Using the cached information from sp->gfns is safe because: 1067 1048 * - The spte has a reference to the struct page, so the pfn for a given gfn 1068 1049 * can't change unless all sptes pointing to it are nuked first. 1069 - * 1070 - * Note: 1071 - * We should flush all tlbs if spte is dropped even though guest is 1072 - * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page 1073 - * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't 1074 - * used by guest then tlbs are not flushed, so guest is allowed to access the 1075 - * freed pages. 1076 - * And we increase kvm->tlbs_dirty to delay tlbs flush in this case. 1077 1050 */ 1078 1051 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 1079 1052 { ··· 1118 1107 return 0; 1119 1108 1120 1109 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { 1121 - /* 1122 - * Update spte before increasing tlbs_dirty to make 1123 - * sure no tlb flush is lost after spte is zapped; see 1124 - * the comments in kvm_flush_remote_tlbs(). 1125 - */ 1126 - smp_wmb(); 1127 - vcpu->kvm->tlbs_dirty++; 1110 + set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH; 1128 1111 continue; 1129 1112 } 1130 1113 ··· 1133 1128 1134 1129 if (gfn != sp->gfns[i]) { 1135 1130 drop_spte(vcpu->kvm, &sp->spt[i]); 1136 - /* 1137 - * The same as above where we are doing 1138 - * prefetch_invalid_gpte(). 1139 - */ 1140 - smp_wmb(); 1141 - vcpu->kvm->tlbs_dirty++; 1131 + set_spte_ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH; 1142 1132 continue; 1143 1133 } 1144 1134
+6 -4
arch/x86/kvm/svm/nested.c
··· 545 545 (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) | 546 546 (svm->vmcb01.ptr->control.int_ctl & int_ctl_vmcb01_bits); 547 547 548 - svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext; 549 548 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; 550 549 svm->vmcb->control.int_state = svm->nested.ctl.int_state; 551 550 svm->vmcb->control.event_inj = svm->nested.ctl.event_inj; ··· 578 579 } 579 580 580 581 int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa, 581 - struct vmcb *vmcb12) 582 + struct vmcb *vmcb12, bool from_vmrun) 582 583 { 583 584 struct vcpu_svm *svm = to_svm(vcpu); 584 585 int ret; ··· 608 609 nested_vmcb02_prepare_save(svm, vmcb12); 609 610 610 611 ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3, 611 - nested_npt_enabled(svm), true); 612 + nested_npt_enabled(svm), from_vmrun); 612 613 if (ret) 613 614 return ret; 614 615 615 616 if (!npt_enabled) 616 617 vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested; 618 + 619 + if (!from_vmrun) 620 + kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu); 617 621 618 622 svm_set_gif(svm, true); 619 623 ··· 683 681 684 682 svm->nested.nested_run_pending = 1; 685 683 686 - if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12)) 684 + if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true)) 687 685 goto out_exit_err; 688 686 689 687 if (nested_svm_vmrun_msrpm(svm))
+62 -30
arch/x86/kvm/svm/sev.c
··· 595 595 return 0; 596 596 } 597 597 598 + static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu, 599 + int *error) 600 + { 601 + struct sev_data_launch_update_vmsa vmsa; 602 + struct vcpu_svm *svm = to_svm(vcpu); 603 + int ret; 604 + 605 + /* Perform some pre-encryption checks against the VMSA */ 606 + ret = sev_es_sync_vmsa(svm); 607 + if (ret) 608 + return ret; 609 + 610 + /* 611 + * The LAUNCH_UPDATE_VMSA command will perform in-place encryption of 612 + * the VMSA memory content (i.e it will write the same memory region 613 + * with the guest's key), so invalidate it first. 614 + */ 615 + clflush_cache_range(svm->vmsa, PAGE_SIZE); 616 + 617 + vmsa.reserved = 0; 618 + vmsa.handle = to_kvm_svm(kvm)->sev_info.handle; 619 + vmsa.address = __sme_pa(svm->vmsa); 620 + vmsa.len = PAGE_SIZE; 621 + return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error); 622 + } 623 + 598 624 static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) 599 625 { 600 - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 601 - struct sev_data_launch_update_vmsa vmsa; 602 626 struct kvm_vcpu *vcpu; 603 627 int i, ret; 604 628 605 629 if (!sev_es_guest(kvm)) 606 630 return -ENOTTY; 607 631 608 - vmsa.reserved = 0; 609 - 610 632 kvm_for_each_vcpu(i, vcpu, kvm) { 611 - struct vcpu_svm *svm = to_svm(vcpu); 612 - 613 - /* Perform some pre-encryption checks against the VMSA */ 614 - ret = sev_es_sync_vmsa(svm); 633 + ret = mutex_lock_killable(&vcpu->mutex); 615 634 if (ret) 616 635 return ret; 617 636 618 - /* 619 - * The LAUNCH_UPDATE_VMSA command will perform in-place 620 - * encryption of the VMSA memory content (i.e it will write 621 - * the same memory region with the guest's key), so invalidate 622 - * it first. 623 - */ 624 - clflush_cache_range(svm->vmsa, PAGE_SIZE); 637 + ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error); 625 638 626 - vmsa.handle = sev->handle; 627 - vmsa.address = __sme_pa(svm->vmsa); 628 - vmsa.len = PAGE_SIZE; 629 - ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, 630 - &argp->error); 639 + mutex_unlock(&vcpu->mutex); 631 640 if (ret) 632 641 return ret; 633 - 634 - svm->vcpu.arch.guest_state_protected = true; 635 642 } 636 643 637 644 return 0; ··· 1404 1397 1405 1398 /* Bind ASID to this guest */ 1406 1399 ret = sev_bind_asid(kvm, start.handle, error); 1407 - if (ret) 1400 + if (ret) { 1401 + sev_decommission(start.handle); 1408 1402 goto e_free_session; 1403 + } 1409 1404 1410 1405 params.handle = start.handle; 1411 1406 if (copy_to_user((void __user *)(uintptr_t)argp->data, ··· 1473 1464 1474 1465 /* Pin guest memory */ 1475 1466 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, 1476 - PAGE_SIZE, &n, 0); 1467 + PAGE_SIZE, &n, 1); 1477 1468 if (IS_ERR(guest_page)) { 1478 1469 ret = PTR_ERR(guest_page); 1479 1470 goto e_free_trans; ··· 1510 1501 return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error); 1511 1502 } 1512 1503 1504 + static bool cmd_allowed_from_miror(u32 cmd_id) 1505 + { 1506 + /* 1507 + * Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES 1508 + * active mirror VMs. Also allow the debugging and status commands. 1509 + */ 1510 + if (cmd_id == KVM_SEV_LAUNCH_UPDATE_VMSA || 1511 + cmd_id == KVM_SEV_GUEST_STATUS || cmd_id == KVM_SEV_DBG_DECRYPT || 1512 + cmd_id == KVM_SEV_DBG_ENCRYPT) 1513 + return true; 1514 + 1515 + return false; 1516 + } 1517 + 1513 1518 int svm_mem_enc_op(struct kvm *kvm, void __user *argp) 1514 1519 { 1515 1520 struct kvm_sev_cmd sev_cmd; ··· 1540 1517 1541 1518 mutex_lock(&kvm->lock); 1542 1519 1543 - /* enc_context_owner handles all memory enc operations */ 1544 - if (is_mirroring_enc_context(kvm)) { 1520 + /* Only the enc_context_owner handles some memory enc operations. */ 1521 + if (is_mirroring_enc_context(kvm) && 1522 + !cmd_allowed_from_miror(sev_cmd.id)) { 1545 1523 r = -EINVAL; 1546 1524 goto out; 1547 1525 } ··· 1739 1715 { 1740 1716 struct file *source_kvm_file; 1741 1717 struct kvm *source_kvm; 1742 - struct kvm_sev_info *mirror_sev; 1743 - unsigned int asid; 1718 + struct kvm_sev_info source_sev, *mirror_sev; 1744 1719 int ret; 1745 1720 1746 1721 source_kvm_file = fget(source_fd); ··· 1762 1739 goto e_source_unlock; 1763 1740 } 1764 1741 1765 - asid = to_kvm_svm(source_kvm)->sev_info.asid; 1742 + memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info, 1743 + sizeof(source_sev)); 1766 1744 1767 1745 /* 1768 1746 * The mirror kvm holds an enc_context_owner ref so its asid can't ··· 1783 1759 /* Set enc_context_owner and copy its encryption context over */ 1784 1760 mirror_sev = &to_kvm_svm(kvm)->sev_info; 1785 1761 mirror_sev->enc_context_owner = source_kvm; 1786 - mirror_sev->asid = asid; 1787 1762 mirror_sev->active = true; 1763 + mirror_sev->asid = source_sev.asid; 1764 + mirror_sev->fd = source_sev.fd; 1765 + mirror_sev->es_active = source_sev.es_active; 1766 + mirror_sev->handle = source_sev.handle; 1767 + /* 1768 + * Do not copy ap_jump_table. Since the mirror does not share the same 1769 + * KVM contexts as the original, and they may have different 1770 + * memory-views. 1771 + */ 1788 1772 1789 1773 mutex_unlock(&kvm->lock); 1790 1774 return 0;
+74 -63
arch/x86/kvm/svm/svm.c
··· 1566 1566 1567 1567 svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & 1568 1568 V_IRQ_INJECTION_BITS_MASK; 1569 + 1570 + svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; 1569 1571 } 1570 1572 1571 1573 vmcb_mark_dirty(svm->vmcb, VMCB_INTR); ··· 2222 2220 2223 2221 /* Both #GP cases have zero error_code */ 2224 2222 if (error_code) 2223 + goto reinject; 2224 + 2225 + /* All SVM instructions expect page aligned RAX */ 2226 + if (svm->vmcb->save.rax & ~PAGE_MASK) 2225 2227 goto reinject; 2226 2228 2227 2229 /* Decode the instruction for usage later */ ··· 4291 4285 struct kvm_host_map map_save; 4292 4286 int ret; 4293 4287 4294 - if (is_guest_mode(vcpu)) { 4295 - /* FED8h - SVM Guest */ 4296 - put_smstate(u64, smstate, 0x7ed8, 1); 4297 - /* FEE0h - SVM Guest VMCB Physical Address */ 4298 - put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa); 4288 + if (!is_guest_mode(vcpu)) 4289 + return 0; 4299 4290 4300 - svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; 4301 - svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; 4302 - svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; 4291 + /* FED8h - SVM Guest */ 4292 + put_smstate(u64, smstate, 0x7ed8, 1); 4293 + /* FEE0h - SVM Guest VMCB Physical Address */ 4294 + put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa); 4303 4295 4304 - ret = nested_svm_vmexit(svm); 4305 - if (ret) 4306 - return ret; 4296 + svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; 4297 + svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; 4298 + svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; 4307 4299 4308 - /* 4309 - * KVM uses VMCB01 to store L1 host state while L2 runs but 4310 - * VMCB01 is going to be used during SMM and thus the state will 4311 - * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save 4312 - * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the 4313 - * format of the area is identical to guest save area offsetted 4314 - * by 0x400 (matches the offset of 'struct vmcb_save_area' 4315 - * within 'struct vmcb'). Note: HSAVE area may also be used by 4316 - * L1 hypervisor to save additional host context (e.g. KVM does 4317 - * that, see svm_prepare_guest_switch()) which must be 4318 - * preserved. 4319 - */ 4320 - if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), 4321 - &map_save) == -EINVAL) 4322 - return 1; 4300 + ret = nested_svm_vmexit(svm); 4301 + if (ret) 4302 + return ret; 4323 4303 4324 - BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400); 4304 + /* 4305 + * KVM uses VMCB01 to store L1 host state while L2 runs but 4306 + * VMCB01 is going to be used during SMM and thus the state will 4307 + * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save 4308 + * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the 4309 + * format of the area is identical to guest save area offsetted 4310 + * by 0x400 (matches the offset of 'struct vmcb_save_area' 4311 + * within 'struct vmcb'). Note: HSAVE area may also be used by 4312 + * L1 hypervisor to save additional host context (e.g. KVM does 4313 + * that, see svm_prepare_guest_switch()) which must be 4314 + * preserved. 4315 + */ 4316 + if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), 4317 + &map_save) == -EINVAL) 4318 + return 1; 4325 4319 4326 - svm_copy_vmrun_state(map_save.hva + 0x400, 4327 - &svm->vmcb01.ptr->save); 4320 + BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400); 4328 4321 4329 - kvm_vcpu_unmap(vcpu, &map_save, true); 4330 - } 4322 + svm_copy_vmrun_state(map_save.hva + 0x400, 4323 + &svm->vmcb01.ptr->save); 4324 + 4325 + kvm_vcpu_unmap(vcpu, &map_save, true); 4331 4326 return 0; 4332 4327 } 4333 4328 ··· 4336 4329 { 4337 4330 struct vcpu_svm *svm = to_svm(vcpu); 4338 4331 struct kvm_host_map map, map_save; 4339 - int ret = 0; 4332 + u64 saved_efer, vmcb12_gpa; 4333 + struct vmcb *vmcb12; 4334 + int ret; 4340 4335 4341 - if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) { 4342 - u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0); 4343 - u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8); 4344 - u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0); 4345 - struct vmcb *vmcb12; 4336 + if (!guest_cpuid_has(vcpu, X86_FEATURE_LM)) 4337 + return 0; 4346 4338 4347 - if (guest) { 4348 - if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM)) 4349 - return 1; 4339 + /* Non-zero if SMI arrived while vCPU was in guest mode. */ 4340 + if (!GET_SMSTATE(u64, smstate, 0x7ed8)) 4341 + return 0; 4350 4342 4351 - if (!(saved_efer & EFER_SVME)) 4352 - return 1; 4343 + if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM)) 4344 + return 1; 4353 4345 4354 - if (kvm_vcpu_map(vcpu, 4355 - gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL) 4356 - return 1; 4346 + saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0); 4347 + if (!(saved_efer & EFER_SVME)) 4348 + return 1; 4357 4349 4358 - if (svm_allocate_nested(svm)) 4359 - return 1; 4350 + vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0); 4351 + if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL) 4352 + return 1; 4360 4353 4361 - vmcb12 = map.hva; 4354 + ret = 1; 4355 + if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save) == -EINVAL) 4356 + goto unmap_map; 4362 4357 4363 - nested_load_control_from_vmcb12(svm, &vmcb12->control); 4358 + if (svm_allocate_nested(svm)) 4359 + goto unmap_save; 4364 4360 4365 - ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12); 4366 - kvm_vcpu_unmap(vcpu, &map, true); 4361 + /* 4362 + * Restore L1 host state from L1 HSAVE area as VMCB01 was 4363 + * used during SMM (see svm_enter_smm()) 4364 + */ 4367 4365 4368 - /* 4369 - * Restore L1 host state from L1 HSAVE area as VMCB01 was 4370 - * used during SMM (see svm_enter_smm()) 4371 - */ 4372 - if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), 4373 - &map_save) == -EINVAL) 4374 - return 1; 4366 + svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400); 4375 4367 4376 - svm_copy_vmrun_state(&svm->vmcb01.ptr->save, 4377 - map_save.hva + 0x400); 4368 + /* 4369 + * Enter the nested guest now 4370 + */ 4378 4371 4379 - kvm_vcpu_unmap(vcpu, &map_save, true); 4380 - } 4381 - } 4372 + vmcb12 = map.hva; 4373 + nested_load_control_from_vmcb12(svm, &vmcb12->control); 4374 + ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false); 4382 4375 4376 + unmap_save: 4377 + kvm_vcpu_unmap(vcpu, &map_save, true); 4378 + unmap_map: 4379 + kvm_vcpu_unmap(vcpu, &map, true); 4383 4380 return ret; 4384 4381 } 4385 4382
+2 -1
arch/x86/kvm/svm/svm.h
··· 459 459 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI); 460 460 } 461 461 462 - int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12); 462 + int enter_svm_guest_mode(struct kvm_vcpu *vcpu, 463 + u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun); 463 464 void svm_leave_nested(struct vcpu_svm *svm); 464 465 void svm_free_nested(struct vcpu_svm *svm); 465 466 int svm_allocate_nested(struct vcpu_svm *svm);
+9 -3
arch/x86/kvm/vmx/evmcs.c
··· 353 353 switch (msr_index) { 354 354 case MSR_IA32_VMX_EXIT_CTLS: 355 355 case MSR_IA32_VMX_TRUE_EXIT_CTLS: 356 - ctl_high &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; 356 + ctl_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL; 357 357 break; 358 358 case MSR_IA32_VMX_ENTRY_CTLS: 359 359 case MSR_IA32_VMX_TRUE_ENTRY_CTLS: 360 - ctl_high &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; 360 + ctl_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL; 361 361 break; 362 362 case MSR_IA32_VMX_PROCBASED_CTLS2: 363 - ctl_high &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 363 + ctl_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC; 364 + break; 365 + case MSR_IA32_VMX_PINBASED_CTLS: 366 + ctl_high &= ~EVMCS1_UNSUPPORTED_PINCTRL; 367 + break; 368 + case MSR_IA32_VMX_VMFUNC: 369 + ctl_low &= ~EVMCS1_UNSUPPORTED_VMFUNC; 364 370 break; 365 371 } 366 372
+15 -9
arch/x86/kvm/vmx/nested.c
··· 2583 2583 * Guest state is invalid and unrestricted guest is disabled, 2584 2584 * which means L1 attempted VMEntry to L2 with invalid state. 2585 2585 * Fail the VMEntry. 2586 + * 2587 + * However when force loading the guest state (SMM exit or 2588 + * loading nested state after migration, it is possible to 2589 + * have invalid guest state now, which will be later fixed by 2590 + * restoring L2 register state 2586 2591 */ 2587 - if (CC(!vmx_guest_state_valid(vcpu))) { 2592 + if (CC(from_vmentry && !vmx_guest_state_valid(vcpu))) { 2588 2593 *entry_failure_code = ENTRY_FAIL_DEFAULT; 2589 2594 return -EINVAL; 2590 2595 } ··· 4356 4351 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, 4357 4352 vmcs12->vm_exit_msr_load_count)) 4358 4353 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); 4354 + 4355 + to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu); 4359 4356 } 4360 4357 4361 4358 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx) ··· 4906 4899 return -ENOMEM; 4907 4900 } 4908 4901 4909 - /* 4910 - * Emulate the VMXON instruction. 4911 - * Currently, we just remember that VMX is active, and do not save or even 4912 - * inspect the argument to VMXON (the so-called "VMXON pointer") because we 4913 - * do not currently need to store anything in that guest-allocated memory 4914 - * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their 4915 - * argument is different from the VMXON pointer (which the spec says they do). 4916 - */ 4902 + /* Emulate the VMXON instruction. */ 4917 4903 static int handle_vmon(struct kvm_vcpu *vcpu) 4918 4904 { 4919 4905 int ret; ··· 5902 5902 return true; 5903 5903 case EXIT_REASON_VMFUNC: 5904 5904 /* VM functions are emulated through L2->L0 vmexits. */ 5905 + return true; 5906 + case EXIT_REASON_BUS_LOCK: 5907 + /* 5908 + * At present, bus lock VM exit is never exposed to L1. 5909 + * Handle L2's bus locks in L0 directly. 5910 + */ 5905 5911 return true; 5906 5912 default: 5907 5913 break;
+27 -12
arch/x86/kvm/vmx/vmx.c
··· 1323 1323 vmx_prepare_switch_to_host(to_vmx(vcpu)); 1324 1324 } 1325 1325 1326 - static bool emulation_required(struct kvm_vcpu *vcpu) 1326 + bool vmx_emulation_required(struct kvm_vcpu *vcpu) 1327 1327 { 1328 1328 return emulate_invalid_guest_state && !vmx_guest_state_valid(vcpu); 1329 1329 } ··· 1367 1367 vmcs_writel(GUEST_RFLAGS, rflags); 1368 1368 1369 1369 if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM) 1370 - vmx->emulation_required = emulation_required(vcpu); 1370 + vmx->emulation_required = vmx_emulation_required(vcpu); 1371 1371 } 1372 1372 1373 1373 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) ··· 1837 1837 &msr_info->data)) 1838 1838 return 1; 1839 1839 /* 1840 - * Enlightened VMCS v1 doesn't have certain fields, but buggy 1841 - * Hyper-V versions are still trying to use corresponding 1842 - * features when they are exposed. Filter out the essential 1843 - * minimum. 1840 + * Enlightened VMCS v1 doesn't have certain VMCS fields but 1841 + * instead of just ignoring the features, different Hyper-V 1842 + * versions are either trying to use them and fail or do some 1843 + * sanity checking and refuse to boot. Filter all unsupported 1844 + * features out. 1844 1845 */ 1845 1846 if (!msr_info->host_initiated && 1846 1847 vmx->nested.enlightened_vmcs_enabled) ··· 3078 3077 } 3079 3078 3080 3079 /* depends on vcpu->arch.cr0 to be set to a new value */ 3081 - vmx->emulation_required = emulation_required(vcpu); 3080 + vmx->emulation_required = vmx_emulation_required(vcpu); 3082 3081 } 3083 3082 3084 3083 static int vmx_get_max_tdp_level(void) ··· 3331 3330 { 3332 3331 __vmx_set_segment(vcpu, var, seg); 3333 3332 3334 - to_vmx(vcpu)->emulation_required = emulation_required(vcpu); 3333 + to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu); 3335 3334 } 3336 3335 3337 3336 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) ··· 6622 6621 vmx->loaded_vmcs->soft_vnmi_blocked)) 6623 6622 vmx->loaded_vmcs->entry_time = ktime_get(); 6624 6623 6625 - /* Don't enter VMX if guest state is invalid, let the exit handler 6626 - start emulation until we arrive back to a valid state */ 6627 - if (vmx->emulation_required) 6624 + /* 6625 + * Don't enter VMX if guest state is invalid, let the exit handler 6626 + * start emulation until we arrive back to a valid state. Synthesize a 6627 + * consistency check VM-Exit due to invalid guest state and bail. 6628 + */ 6629 + if (unlikely(vmx->emulation_required)) { 6630 + 6631 + /* We don't emulate invalid state of a nested guest */ 6632 + vmx->fail = is_guest_mode(vcpu); 6633 + 6634 + vmx->exit_reason.full = EXIT_REASON_INVALID_STATE; 6635 + vmx->exit_reason.failed_vmentry = 1; 6636 + kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1); 6637 + vmx->exit_qualification = ENTRY_FAIL_DEFAULT; 6638 + kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2); 6639 + vmx->exit_intr_info = 0; 6628 6640 return EXIT_FASTPATH_NONE; 6641 + } 6629 6642 6630 6643 trace_kvm_entry(vcpu); 6631 6644 ··· 6848 6833 */ 6849 6834 tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL); 6850 6835 if (tsx_ctrl) 6851 - vmx->guest_uret_msrs[i].mask = ~(u64)TSX_CTRL_CPUID_CLEAR; 6836 + tsx_ctrl->mask = ~(u64)TSX_CTRL_CPUID_CLEAR; 6852 6837 } 6853 6838 6854 6839 err = alloc_loaded_vmcs(&vmx->vmcs01);
+1 -4
arch/x86/kvm/vmx/vmx.h
··· 248 248 * only loaded into hardware when necessary, e.g. SYSCALL #UDs outside 249 249 * of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to 250 250 * be loaded into hardware if those conditions aren't met. 251 - * nr_active_uret_msrs tracks the number of MSRs that need to be loaded 252 - * into hardware when running the guest. guest_uret_msrs[] is resorted 253 - * whenever the number of "active" uret MSRs is modified. 254 251 */ 255 252 struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS]; 256 - int nr_active_uret_msrs; 257 253 bool guest_uret_msrs_loaded; 258 254 #ifdef CONFIG_X86_64 259 255 u64 msr_host_kernel_gs_base; ··· 355 359 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel, 356 360 unsigned long fs_base, unsigned long gs_base); 357 361 int vmx_get_cpl(struct kvm_vcpu *vcpu); 362 + bool vmx_emulation_required(struct kvm_vcpu *vcpu); 358 363 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu); 359 364 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); 360 365 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
+26 -2
arch/x86/kvm/x86.c
··· 1332 1332 MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13, 1333 1333 MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15, 1334 1334 MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17, 1335 + 1336 + MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3, 1337 + MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3, 1338 + MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2, 1339 + MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5, 1340 + MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2, 1341 + MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5, 1335 1342 }; 1336 1343 1337 1344 static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)]; ··· 2976 2969 offsetof(struct compat_vcpu_info, time)); 2977 2970 if (vcpu->xen.vcpu_time_info_set) 2978 2971 kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0); 2979 - if (v == kvm_get_vcpu(v->kvm, 0)) 2972 + if (!v->vcpu_idx) 2980 2973 kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); 2981 2974 return 0; 2982 2975 } ··· 7665 7658 7666 7659 /* Process a latched INIT or SMI, if any. */ 7667 7660 kvm_make_request(KVM_REQ_EVENT, vcpu); 7661 + 7662 + /* 7663 + * Even if KVM_SET_SREGS2 loaded PDPTRs out of band, 7664 + * on SMM exit we still need to reload them from 7665 + * guest memory 7666 + */ 7667 + vcpu->arch.pdptrs_from_userspace = false; 7668 7668 } 7669 7669 7670 7670 kvm_mmu_reset_context(vcpu); ··· 10666 10652 int r; 10667 10653 10668 10654 vcpu->arch.last_vmentry_cpu = -1; 10655 + vcpu->arch.regs_avail = ~0; 10656 + vcpu->arch.regs_dirty = ~0; 10669 10657 10670 10658 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) 10671 10659 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; ··· 10908 10892 10909 10893 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); 10910 10894 kvm_rip_write(vcpu, 0xfff0); 10895 + 10896 + vcpu->arch.cr3 = 0; 10897 + kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); 10911 10898 10912 10899 /* 10913 10900 * CR0.CD/NW are set on RESET, preserved on INIT. Note, some versions ··· 11158 11139 11159 11140 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 11160 11141 { 11142 + int ret; 11143 + 11161 11144 if (type) 11162 11145 return -EINVAL; 11146 + 11147 + ret = kvm_page_track_init(kvm); 11148 + if (ret) 11149 + return ret; 11163 11150 11164 11151 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); 11165 11152 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); ··· 11199 11174 11200 11175 kvm_apicv_init(kvm); 11201 11176 kvm_hv_init_vm(kvm); 11202 - kvm_page_track_init(kvm); 11203 11177 kvm_mmu_init_vm(kvm); 11204 11178 kvm_xen_init_vm(kvm); 11205 11179
+2 -2
arch/x86/lib/insn.c
··· 37 37 ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr) 38 38 39 39 #define __get_next(t, insn) \ 40 - ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); leXX_to_cpu(t, r); }) 40 + ({ t r; memcpy(&r, insn->next_byte, sizeof(t)); insn->next_byte += sizeof(t); leXX_to_cpu(t, r); }) 41 41 42 42 #define __peek_nbyte_next(t, insn, n) \ 43 - ({ t r = *(t*)((insn)->next_byte + n); leXX_to_cpu(t, r); }) 43 + ({ t r; memcpy(&r, (insn)->next_byte + n, sizeof(t)); leXX_to_cpu(t, r); }) 44 44 45 45 #define get_next(t, insn) \ 46 46 ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
+18 -8
arch/x86/mm/fault.c
··· 710 710 711 711 static noinline void 712 712 kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code, 713 - unsigned long address, int signal, int si_code) 713 + unsigned long address, int signal, int si_code, 714 + u32 pkey) 714 715 { 715 716 WARN_ON_ONCE(user_mode(regs)); 716 717 ··· 736 735 737 736 set_signal_archinfo(address, error_code); 738 737 739 - /* XXX: hwpoison faults will set the wrong code. */ 740 - force_sig_fault(signal, si_code, (void __user *)address); 738 + if (si_code == SEGV_PKUERR) { 739 + force_sig_pkuerr((void __user *)address, pkey); 740 + } else { 741 + /* XXX: hwpoison faults will set the wrong code. */ 742 + force_sig_fault(signal, si_code, (void __user *)address); 743 + } 741 744 } 742 745 743 746 /* ··· 803 798 struct task_struct *tsk = current; 804 799 805 800 if (!user_mode(regs)) { 806 - kernelmode_fixup_or_oops(regs, error_code, address, pkey, si_code); 801 + kernelmode_fixup_or_oops(regs, error_code, address, 802 + SIGSEGV, si_code, pkey); 807 803 return; 808 804 } 809 805 ··· 936 930 { 937 931 /* Kernel mode? Handle exceptions or die: */ 938 932 if (!user_mode(regs)) { 939 - kernelmode_fixup_or_oops(regs, error_code, address, SIGBUS, BUS_ADRERR); 933 + kernelmode_fixup_or_oops(regs, error_code, address, 934 + SIGBUS, BUS_ADRERR, ARCH_DEFAULT_PKEY); 940 935 return; 941 936 } 942 937 ··· 1403 1396 */ 1404 1397 if (!user_mode(regs)) 1405 1398 kernelmode_fixup_or_oops(regs, error_code, address, 1406 - SIGBUS, BUS_ADRERR); 1399 + SIGBUS, BUS_ADRERR, 1400 + ARCH_DEFAULT_PKEY); 1407 1401 return; 1408 1402 } 1409 1403 ··· 1424 1416 return; 1425 1417 1426 1418 if (fatal_signal_pending(current) && !user_mode(regs)) { 1427 - kernelmode_fixup_or_oops(regs, error_code, address, 0, 0); 1419 + kernelmode_fixup_or_oops(regs, error_code, address, 1420 + 0, 0, ARCH_DEFAULT_PKEY); 1428 1421 return; 1429 1422 } 1430 1423 ··· 1433 1424 /* Kernel mode? Handle exceptions or die: */ 1434 1425 if (!user_mode(regs)) { 1435 1426 kernelmode_fixup_or_oops(regs, error_code, address, 1436 - SIGSEGV, SEGV_MAPERR); 1427 + SIGSEGV, SEGV_MAPERR, 1428 + ARCH_DEFAULT_PKEY); 1437 1429 return; 1438 1430 } 1439 1431
+48 -18
arch/x86/net/bpf_jit_comp.c
··· 1341 1341 if (insn->imm == (BPF_AND | BPF_FETCH) || 1342 1342 insn->imm == (BPF_OR | BPF_FETCH) || 1343 1343 insn->imm == (BPF_XOR | BPF_FETCH)) { 1344 - u8 *branch_target; 1345 1344 bool is64 = BPF_SIZE(insn->code) == BPF_DW; 1346 1345 u32 real_src_reg = src_reg; 1346 + u32 real_dst_reg = dst_reg; 1347 + u8 *branch_target; 1347 1348 1348 1349 /* 1349 1350 * Can't be implemented with a single x86 insn. ··· 1355 1354 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0); 1356 1355 if (src_reg == BPF_REG_0) 1357 1356 real_src_reg = BPF_REG_AX; 1357 + if (dst_reg == BPF_REG_0) 1358 + real_dst_reg = BPF_REG_AX; 1358 1359 1359 1360 branch_target = prog; 1360 1361 /* Load old value */ 1361 1362 emit_ldx(&prog, BPF_SIZE(insn->code), 1362 - BPF_REG_0, dst_reg, insn->off); 1363 + BPF_REG_0, real_dst_reg, insn->off); 1363 1364 /* 1364 1365 * Perform the (commutative) operation locally, 1365 1366 * put the result in the AUX_REG. ··· 1372 1369 add_2reg(0xC0, AUX_REG, real_src_reg)); 1373 1370 /* Attempt to swap in new value */ 1374 1371 err = emit_atomic(&prog, BPF_CMPXCHG, 1375 - dst_reg, AUX_REG, insn->off, 1372 + real_dst_reg, AUX_REG, 1373 + insn->off, 1376 1374 BPF_SIZE(insn->code)); 1377 1375 if (WARN_ON(err)) 1378 1376 return err; ··· 1387 1383 /* Restore R0 after clobbering RAX */ 1388 1384 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX); 1389 1385 break; 1390 - 1391 1386 } 1392 1387 1393 1388 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg, 1394 - insn->off, BPF_SIZE(insn->code)); 1389 + insn->off, BPF_SIZE(insn->code)); 1395 1390 if (err) 1396 1391 return err; 1397 1392 break; ··· 1747 1744 } 1748 1745 1749 1746 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, 1750 - struct bpf_prog *p, int stack_size, bool mod_ret) 1747 + struct bpf_prog *p, int stack_size, bool save_ret) 1751 1748 { 1752 1749 u8 *prog = *pprog; 1753 1750 u8 *jmp_insn; ··· 1780 1777 if (emit_call(&prog, p->bpf_func, prog)) 1781 1778 return -EINVAL; 1782 1779 1783 - /* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return 1780 + /* 1781 + * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return 1784 1782 * of the previous call which is then passed on the stack to 1785 1783 * the next BPF program. 1784 + * 1785 + * BPF_TRAMP_FENTRY trampoline may need to return the return 1786 + * value of BPF_PROG_TYPE_STRUCT_OPS prog. 1786 1787 */ 1787 - if (mod_ret) 1788 + if (save_ret) 1788 1789 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); 1789 1790 1790 1791 /* replace 2 nops with JE insn, since jmp target is known */ ··· 1835 1828 } 1836 1829 1837 1830 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, 1838 - struct bpf_tramp_progs *tp, int stack_size) 1831 + struct bpf_tramp_progs *tp, int stack_size, 1832 + bool save_ret) 1839 1833 { 1840 1834 int i; 1841 1835 u8 *prog = *pprog; 1842 1836 1843 1837 for (i = 0; i < tp->nr_progs; i++) { 1844 - if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false)) 1838 + if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, 1839 + save_ret)) 1845 1840 return -EINVAL; 1846 1841 } 1847 1842 *pprog = prog; ··· 1884 1875 1885 1876 *pprog = prog; 1886 1877 return 0; 1878 + } 1879 + 1880 + static bool is_valid_bpf_tramp_flags(unsigned int flags) 1881 + { 1882 + if ((flags & BPF_TRAMP_F_RESTORE_REGS) && 1883 + (flags & BPF_TRAMP_F_SKIP_FRAME)) 1884 + return false; 1885 + 1886 + /* 1887 + * BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops, 1888 + * and it must be used alone. 1889 + */ 1890 + if ((flags & BPF_TRAMP_F_RET_FENTRY_RET) && 1891 + (flags & ~BPF_TRAMP_F_RET_FENTRY_RET)) 1892 + return false; 1893 + 1894 + return true; 1887 1895 } 1888 1896 1889 1897 /* Example: ··· 1975 1949 struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN]; 1976 1950 u8 **branches = NULL; 1977 1951 u8 *prog; 1952 + bool save_ret; 1978 1953 1979 1954 /* x86-64 supports up to 6 arguments. 7+ can be added in the future */ 1980 1955 if (nr_args > 6) 1981 1956 return -ENOTSUPP; 1982 1957 1983 - if ((flags & BPF_TRAMP_F_RESTORE_REGS) && 1984 - (flags & BPF_TRAMP_F_SKIP_FRAME)) 1958 + if (!is_valid_bpf_tramp_flags(flags)) 1985 1959 return -EINVAL; 1986 1960 1987 - if (flags & BPF_TRAMP_F_CALL_ORIG) 1988 - stack_size += 8; /* room for return value of orig_call */ 1961 + /* room for return value of orig_call or fentry prog */ 1962 + save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); 1963 + if (save_ret) 1964 + stack_size += 8; 1989 1965 1990 1966 if (flags & BPF_TRAMP_F_IP_ARG) 1991 1967 stack_size += 8; /* room for IP address argument */ ··· 2033 2005 } 2034 2006 2035 2007 if (fentry->nr_progs) 2036 - if (invoke_bpf(m, &prog, fentry, stack_size)) 2008 + if (invoke_bpf(m, &prog, fentry, stack_size, 2009 + flags & BPF_TRAMP_F_RET_FENTRY_RET)) 2037 2010 return -EINVAL; 2038 2011 2039 2012 if (fmod_ret->nr_progs) { ··· 2081 2052 } 2082 2053 2083 2054 if (fexit->nr_progs) 2084 - if (invoke_bpf(m, &prog, fexit, stack_size)) { 2055 + if (invoke_bpf(m, &prog, fexit, stack_size, false)) { 2085 2056 ret = -EINVAL; 2086 2057 goto cleanup; 2087 2058 } ··· 2101 2072 ret = -EINVAL; 2102 2073 goto cleanup; 2103 2074 } 2104 - /* restore original return value back into RAX */ 2105 - emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); 2106 2075 } 2076 + /* restore return value of orig_call or fentry prog back into RAX */ 2077 + if (save_ret) 2078 + emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); 2107 2079 2108 2080 EMIT1(0x5B); /* pop rbx */ 2109 2081 EMIT1(0xC9); /* leave */
+9 -6
arch/x86/xen/enlighten_pv.c
··· 755 755 preempt_enable(); 756 756 } 757 757 758 - static void xen_convert_trap_info(const struct desc_ptr *desc, 759 - struct trap_info *traps) 758 + static unsigned xen_convert_trap_info(const struct desc_ptr *desc, 759 + struct trap_info *traps, bool full) 760 760 { 761 761 unsigned in, out, count; 762 762 ··· 766 766 for (in = out = 0; in < count; in++) { 767 767 gate_desc *entry = (gate_desc *)(desc->address) + in; 768 768 769 - if (cvt_gate_to_trap(in, entry, &traps[out])) 769 + if (cvt_gate_to_trap(in, entry, &traps[out]) || full) 770 770 out++; 771 771 } 772 - traps[out].address = 0; 772 + 773 + return out; 773 774 } 774 775 775 776 void xen_copy_trap_info(struct trap_info *traps) 776 777 { 777 778 const struct desc_ptr *desc = this_cpu_ptr(&idt_desc); 778 779 779 - xen_convert_trap_info(desc, traps); 780 + xen_convert_trap_info(desc, traps, true); 780 781 } 781 782 782 783 /* Load a new IDT into Xen. In principle this can be per-CPU, so we ··· 787 786 { 788 787 static DEFINE_SPINLOCK(lock); 789 788 static struct trap_info traps[257]; 789 + unsigned out; 790 790 791 791 trace_xen_cpu_load_idt(desc); 792 792 ··· 795 793 796 794 memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc)); 797 795 798 - xen_convert_trap_info(desc, traps); 796 + out = xen_convert_trap_info(desc, traps, false); 797 + memset(&traps[out], 0, sizeof(traps[0])); 799 798 800 799 xen_mc_flush(); 801 800 if (HYPERVISOR_set_trap_table(traps))
+2 -2
arch/x86/xen/pci-swiotlb-xen.c
··· 18 18 #endif 19 19 #include <linux/export.h> 20 20 21 - int xen_swiotlb __read_mostly; 21 + static int xen_swiotlb __read_mostly; 22 22 23 23 /* 24 24 * pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary ··· 56 56 return xen_swiotlb; 57 57 } 58 58 59 - void __init pci_xen_swiotlb_init(void) 59 + static void __init pci_xen_swiotlb_init(void) 60 60 { 61 61 if (xen_swiotlb) { 62 62 xen_swiotlb_init_early();
-4
arch/x86/xen/smp_pv.c
··· 290 290 291 291 gdt = get_cpu_gdt_rw(cpu); 292 292 293 - memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); 294 - 295 293 /* 296 294 * Bring up the CPU in cpu_bringup_and_idle() with the stack 297 295 * pointing just below where pt_regs would be if it were a normal ··· 305 307 ctxt->user_regs.esp = (unsigned long)task_pt_regs(idle); 306 308 307 309 xen_copy_trap_info(ctxt->trap_ctxt); 308 - 309 - ctxt->ldt_ents = 0; 310 310 311 311 BUG_ON((unsigned long)gdt & ~PAGE_MASK); 312 312
+3 -13
block/bfq-iosched.c
··· 2662 2662 * are likely to increase the throughput. 2663 2663 */ 2664 2664 bfqq->new_bfqq = new_bfqq; 2665 - /* 2666 - * The above assignment schedules the following redirections: 2667 - * each time some I/O for bfqq arrives, the process that 2668 - * generated that I/O is disassociated from bfqq and 2669 - * associated with new_bfqq. Here we increases new_bfqq->ref 2670 - * in advance, adding the number of processes that are 2671 - * expected to be associated with new_bfqq as they happen to 2672 - * issue I/O. 2673 - */ 2674 2665 new_bfqq->ref += process_refs; 2675 2666 return new_bfqq; 2676 2667 } ··· 2723 2732 void *io_struct, bool request, struct bfq_io_cq *bic) 2724 2733 { 2725 2734 struct bfq_queue *in_service_bfqq, *new_bfqq; 2726 - 2727 - /* if a merge has already been setup, then proceed with that first */ 2728 - if (bfqq->new_bfqq) 2729 - return bfqq->new_bfqq; 2730 2735 2731 2736 /* 2732 2737 * Check delayed stable merge for rotational or non-queueing ··· 2824 2837 */ 2825 2838 if (bfq_too_late_for_merging(bfqq)) 2826 2839 return NULL; 2840 + 2841 + if (bfqq->new_bfqq) 2842 + return bfqq->new_bfqq; 2827 2843 2828 2844 if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) 2829 2845 return NULL;
+1 -1
block/bio.c
··· 1466 1466 if (!bio_integrity_endio(bio)) 1467 1467 return; 1468 1468 1469 - if (bio->bi_bdev) 1469 + if (bio->bi_bdev && bio_flagged(bio, BIO_TRACKED)) 1470 1470 rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio); 1471 1471 1472 1472 if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
+15 -8
block/bsg.c
··· 165 165 .llseek = default_llseek, 166 166 }; 167 167 168 + static void bsg_device_release(struct device *dev) 169 + { 170 + struct bsg_device *bd = container_of(dev, struct bsg_device, device); 171 + 172 + ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt)); 173 + kfree(bd); 174 + } 175 + 168 176 void bsg_unregister_queue(struct bsg_device *bd) 169 177 { 170 178 if (bd->queue->kobj.sd) 171 179 sysfs_remove_link(&bd->queue->kobj, "bsg"); 172 180 cdev_device_del(&bd->cdev, &bd->device); 173 - ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt)); 174 - kfree(bd); 181 + put_device(&bd->device); 175 182 } 176 183 EXPORT_SYMBOL_GPL(bsg_unregister_queue); 177 184 ··· 200 193 if (ret < 0) { 201 194 if (ret == -ENOSPC) 202 195 dev_err(parent, "bsg: too many bsg devices\n"); 203 - goto out_kfree; 196 + kfree(bd); 197 + return ERR_PTR(ret); 204 198 } 205 199 bd->device.devt = MKDEV(bsg_major, ret); 206 200 bd->device.class = bsg_class; 207 201 bd->device.parent = parent; 202 + bd->device.release = bsg_device_release; 208 203 dev_set_name(&bd->device, "%s", name); 209 204 device_initialize(&bd->device); 210 205 ··· 214 205 bd->cdev.owner = THIS_MODULE; 215 206 ret = cdev_device_add(&bd->cdev, &bd->device); 216 207 if (ret) 217 - goto out_ida_remove; 208 + goto out_put_device; 218 209 219 210 if (q->kobj.sd) { 220 211 ret = sysfs_create_link(&q->kobj, &bd->device.kobj, "bsg"); ··· 226 217 227 218 out_device_del: 228 219 cdev_device_del(&bd->cdev, &bd->device); 229 - out_ida_remove: 230 - ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt)); 231 - out_kfree: 232 - kfree(bd); 220 + out_put_device: 221 + put_device(&bd->device); 233 222 return ERR_PTR(ret); 234 223 } 235 224 EXPORT_SYMBOL_GPL(bsg_register_queue);
+10 -11
block/fops.c
··· 14 14 #include <linux/task_io_accounting_ops.h> 15 15 #include <linux/falloc.h> 16 16 #include <linux/suspend.h> 17 + #include <linux/fs.h> 17 18 #include "blk.h" 18 19 19 20 static struct inode *bdev_file_inode(struct file *file) ··· 554 553 static long blkdev_fallocate(struct file *file, int mode, loff_t start, 555 554 loff_t len) 556 555 { 557 - struct block_device *bdev = I_BDEV(bdev_file_inode(file)); 556 + struct inode *inode = bdev_file_inode(file); 557 + struct block_device *bdev = I_BDEV(inode); 558 558 loff_t end = start + len - 1; 559 559 loff_t isize; 560 560 int error; ··· 582 580 if ((start | len) & (bdev_logical_block_size(bdev) - 1)) 583 581 return -EINVAL; 584 582 583 + filemap_invalidate_lock(inode->i_mapping); 584 + 585 585 /* Invalidate the page cache, including dirty pages. */ 586 586 error = truncate_bdev_range(bdev, file->f_mode, start, end); 587 587 if (error) 588 - return error; 588 + goto fail; 589 589 590 590 switch (mode) { 591 591 case FALLOC_FL_ZERO_RANGE: ··· 604 600 GFP_KERNEL, 0); 605 601 break; 606 602 default: 607 - return -EOPNOTSUPP; 603 + error = -EOPNOTSUPP; 608 604 } 609 - if (error) 610 - return error; 611 605 612 - /* 613 - * Invalidate the page cache again; if someone wandered in and dirtied 614 - * a page, we just discard it - userspace has no way of knowing whether 615 - * the write happened before or after discard completing... 616 - */ 617 - return truncate_bdev_range(bdev, file->f_mode, start, end); 606 + fail: 607 + filemap_invalidate_unlock(inode->i_mapping); 608 + return error; 618 609 } 619 610 620 611 const struct file_operations def_blk_fops = {
+12
drivers/acpi/nfit/core.c
··· 3007 3007 ndr_desc->target_node = NUMA_NO_NODE; 3008 3008 } 3009 3009 3010 + /* Fallback to address based numa information if node lookup failed */ 3011 + if (ndr_desc->numa_node == NUMA_NO_NODE) { 3012 + ndr_desc->numa_node = memory_add_physaddr_to_nid(spa->address); 3013 + dev_info(acpi_desc->dev, "changing numa node from %d to %d for nfit region [%pa-%pa]", 3014 + NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end); 3015 + } 3016 + if (ndr_desc->target_node == NUMA_NO_NODE) { 3017 + ndr_desc->target_node = phys_to_target_node(spa->address); 3018 + dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]", 3019 + NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end); 3020 + } 3021 + 3010 3022 /* 3011 3023 * Persistence domain bits are hierarchical, if 3012 3024 * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
+7 -16
drivers/acpi/osl.c
··· 284 284 #define should_use_kmap(pfn) page_is_ram(pfn) 285 285 #endif 286 286 287 - static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz, 288 - bool memory) 287 + static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz) 289 288 { 290 289 unsigned long pfn; 291 290 ··· 294 295 return NULL; 295 296 return (void __iomem __force *)kmap(pfn_to_page(pfn)); 296 297 } else 297 - return memory ? acpi_os_memmap(pg_off, pg_sz) : 298 - acpi_os_ioremap(pg_off, pg_sz); 298 + return acpi_os_ioremap(pg_off, pg_sz); 299 299 } 300 300 301 301 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) ··· 309 311 } 310 312 311 313 /** 312 - * __acpi_os_map_iomem - Get a virtual address for a given physical address range. 314 + * acpi_os_map_iomem - Get a virtual address for a given physical address range. 313 315 * @phys: Start of the physical address range to map. 314 316 * @size: Size of the physical address range to map. 315 - * @memory: true if remapping memory, false if IO 316 317 * 317 318 * Look up the given physical address range in the list of existing ACPI memory 318 319 * mappings. If found, get a reference to it and return a pointer to it (its ··· 321 324 * During early init (when acpi_permanent_mmap has not been set yet) this 322 325 * routine simply calls __acpi_map_table() to get the job done. 323 326 */ 324 - static void __iomem __ref 325 - *__acpi_os_map_iomem(acpi_physical_address phys, acpi_size size, bool memory) 327 + void __iomem __ref 328 + *acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) 326 329 { 327 330 struct acpi_ioremap *map; 328 331 void __iomem *virt; ··· 353 356 354 357 pg_off = round_down(phys, PAGE_SIZE); 355 358 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; 356 - virt = acpi_map(phys, size, memory); 359 + virt = acpi_map(phys, size); 357 360 if (!virt) { 358 361 mutex_unlock(&acpi_ioremap_lock); 359 362 kfree(map); ··· 372 375 mutex_unlock(&acpi_ioremap_lock); 373 376 return map->virt + (phys - map->phys); 374 377 } 375 - 376 - void __iomem *__ref 377 - acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) 378 - { 379 - return __acpi_os_map_iomem(phys, size, false); 380 - } 381 378 EXPORT_SYMBOL_GPL(acpi_os_map_iomem); 382 379 383 380 void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size) 384 381 { 385 - return (void *)__acpi_os_map_iomem(phys, size, true); 382 + return (void *)acpi_os_map_iomem(phys, size); 386 383 } 387 384 EXPORT_SYMBOL_GPL(acpi_os_map_memory); 388 385
+46 -12
drivers/android/binder.c
··· 1852 1852 } 1853 1853 1854 1854 static void binder_transaction_buffer_release(struct binder_proc *proc, 1855 + struct binder_thread *thread, 1855 1856 struct binder_buffer *buffer, 1856 1857 binder_size_t failed_at, 1857 1858 bool is_failure) ··· 2012 2011 &proc->alloc, &fd, buffer, 2013 2012 offset, sizeof(fd)); 2014 2013 WARN_ON(err); 2015 - if (!err) 2014 + if (!err) { 2016 2015 binder_deferred_fd_close(fd); 2016 + /* 2017 + * Need to make sure the thread goes 2018 + * back to userspace to complete the 2019 + * deferred close 2020 + */ 2021 + if (thread) 2022 + thread->looper_need_return = true; 2023 + } 2017 2024 } 2018 2025 } break; 2019 2026 default: ··· 3047 3038 if (reply) { 3048 3039 binder_enqueue_thread_work(thread, tcomplete); 3049 3040 binder_inner_proc_lock(target_proc); 3050 - if (target_thread->is_dead || target_proc->is_frozen) { 3051 - return_error = target_thread->is_dead ? 3052 - BR_DEAD_REPLY : BR_FROZEN_REPLY; 3041 + if (target_thread->is_dead) { 3042 + return_error = BR_DEAD_REPLY; 3053 3043 binder_inner_proc_unlock(target_proc); 3054 3044 goto err_dead_proc_or_thread; 3055 3045 } ··· 3113 3105 err_copy_data_failed: 3114 3106 binder_free_txn_fixups(t); 3115 3107 trace_binder_transaction_failed_buffer_release(t->buffer); 3116 - binder_transaction_buffer_release(target_proc, t->buffer, 3108 + binder_transaction_buffer_release(target_proc, NULL, t->buffer, 3117 3109 buffer_offset, true); 3118 3110 if (target_node) 3119 3111 binder_dec_node_tmpref(target_node); ··· 3192 3184 * Cleanup buffer and free it. 3193 3185 */ 3194 3186 static void 3195 - binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer) 3187 + binder_free_buf(struct binder_proc *proc, 3188 + struct binder_thread *thread, 3189 + struct binder_buffer *buffer) 3196 3190 { 3197 3191 binder_inner_proc_lock(proc); 3198 3192 if (buffer->transaction) { ··· 3222 3212 binder_node_inner_unlock(buf_node); 3223 3213 } 3224 3214 trace_binder_transaction_buffer_release(buffer); 3225 - binder_transaction_buffer_release(proc, buffer, 0, false); 3215 + binder_transaction_buffer_release(proc, thread, buffer, 0, false); 3226 3216 binder_alloc_free_buf(&proc->alloc, buffer); 3227 3217 } 3228 3218 ··· 3424 3414 proc->pid, thread->pid, (u64)data_ptr, 3425 3415 buffer->debug_id, 3426 3416 buffer->transaction ? "active" : "finished"); 3427 - binder_free_buf(proc, buffer); 3417 + binder_free_buf(proc, thread, buffer); 3428 3418 break; 3429 3419 } 3430 3420 ··· 4117 4107 buffer->transaction = NULL; 4118 4108 binder_cleanup_transaction(t, "fd fixups failed", 4119 4109 BR_FAILED_REPLY); 4120 - binder_free_buf(proc, buffer); 4110 + binder_free_buf(proc, thread, buffer); 4121 4111 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 4122 4112 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n", 4123 4113 proc->pid, thread->pid, ··· 4658 4648 return 0; 4659 4649 } 4660 4650 4651 + static bool binder_txns_pending_ilocked(struct binder_proc *proc) 4652 + { 4653 + struct rb_node *n; 4654 + struct binder_thread *thread; 4655 + 4656 + if (proc->outstanding_txns > 0) 4657 + return true; 4658 + 4659 + for (n = rb_first(&proc->threads); n; n = rb_next(n)) { 4660 + thread = rb_entry(n, struct binder_thread, rb_node); 4661 + if (thread->transaction_stack) 4662 + return true; 4663 + } 4664 + return false; 4665 + } 4666 + 4661 4667 static int binder_ioctl_freeze(struct binder_freeze_info *info, 4662 4668 struct binder_proc *target_proc) 4663 4669 { ··· 4705 4679 (!target_proc->outstanding_txns), 4706 4680 msecs_to_jiffies(info->timeout_ms)); 4707 4681 4708 - if (!ret && target_proc->outstanding_txns) 4709 - ret = -EAGAIN; 4682 + /* Check pending transactions that wait for reply */ 4683 + if (ret >= 0) { 4684 + binder_inner_proc_lock(target_proc); 4685 + if (binder_txns_pending_ilocked(target_proc)) 4686 + ret = -EAGAIN; 4687 + binder_inner_proc_unlock(target_proc); 4688 + } 4710 4689 4711 4690 if (ret < 0) { 4712 4691 binder_inner_proc_lock(target_proc); ··· 4727 4696 { 4728 4697 struct binder_proc *target_proc; 4729 4698 bool found = false; 4699 + __u32 txns_pending; 4730 4700 4731 4701 info->sync_recv = 0; 4732 4702 info->async_recv = 0; ··· 4737 4705 if (target_proc->pid == info->pid) { 4738 4706 found = true; 4739 4707 binder_inner_proc_lock(target_proc); 4740 - info->sync_recv |= target_proc->sync_recv; 4708 + txns_pending = binder_txns_pending_ilocked(target_proc); 4709 + info->sync_recv |= target_proc->sync_recv | 4710 + (txns_pending << 1); 4741 4711 info->async_recv |= target_proc->async_recv; 4742 4712 binder_inner_proc_unlock(target_proc); 4743 4713 }
+2
drivers/android/binder_internal.h
··· 378 378 * binder transactions 379 379 * (protected by @inner_lock) 380 380 * @sync_recv: process received sync transactions since last frozen 381 + * bit 0: received sync transaction after being frozen 382 + * bit 1: new pending sync transaction during freezing 381 383 * (protected by @inner_lock) 382 384 * @async_recv: process received async transactions since last frozen 383 385 * (protected by @inner_lock)
+63 -27
drivers/base/core.c
··· 95 95 96 96 list_add(&link->s_hook, &sup->consumers); 97 97 list_add(&link->c_hook, &con->suppliers); 98 + pr_debug("%pfwP Linked as a fwnode consumer to %pfwP\n", 99 + con, sup); 98 100 out: 99 101 mutex_unlock(&fwnode_link_lock); 100 102 101 103 return ret; 104 + } 105 + 106 + /** 107 + * __fwnode_link_del - Delete a link between two fwnode_handles. 108 + * @link: the fwnode_link to be deleted 109 + * 110 + * The fwnode_link_lock needs to be held when this function is called. 111 + */ 112 + static void __fwnode_link_del(struct fwnode_link *link) 113 + { 114 + pr_debug("%pfwP Dropping the fwnode link to %pfwP\n", 115 + link->consumer, link->supplier); 116 + list_del(&link->s_hook); 117 + list_del(&link->c_hook); 118 + kfree(link); 102 119 } 103 120 104 121 /** ··· 129 112 struct fwnode_link *link, *tmp; 130 113 131 114 mutex_lock(&fwnode_link_lock); 132 - list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) { 133 - list_del(&link->s_hook); 134 - list_del(&link->c_hook); 135 - kfree(link); 136 - } 115 + list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) 116 + __fwnode_link_del(link); 137 117 mutex_unlock(&fwnode_link_lock); 138 118 } 139 119 ··· 145 131 struct fwnode_link *link, *tmp; 146 132 147 133 mutex_lock(&fwnode_link_lock); 148 - list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) { 149 - list_del(&link->s_hook); 150 - list_del(&link->c_hook); 151 - kfree(link); 152 - } 134 + list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) 135 + __fwnode_link_del(link); 153 136 mutex_unlock(&fwnode_link_lock); 154 137 } 155 138 ··· 986 975 { 987 976 struct device_link *link; 988 977 int ret = 0; 978 + struct fwnode_handle *sup_fw; 989 979 990 980 /* 991 981 * Device waiting for supplier to become available is not allowed to ··· 995 983 mutex_lock(&fwnode_link_lock); 996 984 if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) && 997 985 !fw_devlink_is_permissive()) { 998 - dev_dbg(dev, "probe deferral - wait for supplier %pfwP\n", 999 - list_first_entry(&dev->fwnode->suppliers, 1000 - struct fwnode_link, 1001 - c_hook)->supplier); 986 + sup_fw = list_first_entry(&dev->fwnode->suppliers, 987 + struct fwnode_link, 988 + c_hook)->supplier; 989 + dev_err_probe(dev, -EPROBE_DEFER, "wait for supplier %pfwP\n", 990 + sup_fw); 1002 991 mutex_unlock(&fwnode_link_lock); 1003 992 return -EPROBE_DEFER; 1004 993 } ··· 1014 1001 if (link->status != DL_STATE_AVAILABLE && 1015 1002 !(link->flags & DL_FLAG_SYNC_STATE_ONLY)) { 1016 1003 device_links_missing_supplier(dev); 1017 - dev_dbg(dev, "probe deferral - supplier %s not ready\n", 1018 - dev_name(link->supplier)); 1004 + dev_err_probe(dev, -EPROBE_DEFER, 1005 + "supplier %s not ready\n", 1006 + dev_name(link->supplier)); 1019 1007 ret = -EPROBE_DEFER; 1020 1008 break; 1021 1009 } ··· 1736 1722 struct device *sup_dev; 1737 1723 int ret = 0; 1738 1724 1725 + /* 1726 + * In some cases, a device P might also be a supplier to its child node 1727 + * C. However, this would defer the probe of C until the probe of P 1728 + * completes successfully. This is perfectly fine in the device driver 1729 + * model. device_add() doesn't guarantee probe completion of the device 1730 + * by the time it returns. 1731 + * 1732 + * However, there are a few drivers that assume C will finish probing 1733 + * as soon as it's added and before P finishes probing. So, we provide 1734 + * a flag to let fw_devlink know not to delay the probe of C until the 1735 + * probe of P completes successfully. 1736 + * 1737 + * When such a flag is set, we can't create device links where P is the 1738 + * supplier of C as that would delay the probe of C. 1739 + */ 1740 + if (sup_handle->flags & FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD && 1741 + fwnode_is_ancestor_of(sup_handle, con->fwnode)) 1742 + return -EINVAL; 1743 + 1739 1744 sup_dev = get_dev_from_fwnode(sup_handle); 1740 1745 if (sup_dev) { 1741 1746 /* ··· 1805 1772 * be broken by applying logic. Check for these types of cycles and 1806 1773 * break them so that devices in the cycle probe properly. 1807 1774 * 1808 - * If the supplier's parent is dependent on the consumer, then 1809 - * the consumer-supplier dependency is a false dependency. So, 1810 - * treat it as an invalid link. 1775 + * If the supplier's parent is dependent on the consumer, then the 1776 + * consumer and supplier have a cyclic dependency. Since fw_devlink 1777 + * can't tell which of the inferred dependencies are incorrect, don't 1778 + * enforce probe ordering between any of the devices in this cyclic 1779 + * dependency. Do this by relaxing all the fw_devlink device links in 1780 + * this cycle and by treating the fwnode link between the consumer and 1781 + * the supplier as an invalid dependency. 1811 1782 */ 1812 1783 sup_dev = fwnode_get_next_parent_dev(sup_handle); 1813 1784 if (sup_dev && device_is_dependent(con, sup_dev)) { 1814 - dev_dbg(con, "Not linking to %pfwP - False link\n", 1815 - sup_handle); 1785 + dev_info(con, "Fixing up cyclic dependency with %pfwP (%s)\n", 1786 + sup_handle, dev_name(sup_dev)); 1787 + device_links_write_lock(); 1788 + fw_devlink_relax_cycle(con, sup_dev); 1789 + device_links_write_unlock(); 1816 1790 ret = -EINVAL; 1817 1791 } else { 1818 1792 /* ··· 1898 1858 if (!own_link || ret == -EAGAIN) 1899 1859 continue; 1900 1860 1901 - list_del(&link->s_hook); 1902 - list_del(&link->c_hook); 1903 - kfree(link); 1861 + __fwnode_link_del(link); 1904 1862 } 1905 1863 } 1906 1864 ··· 1950 1912 if (!own_link || ret == -EAGAIN) 1951 1913 continue; 1952 1914 1953 - list_del(&link->s_hook); 1954 - list_del(&link->c_hook); 1955 - kfree(link); 1915 + __fwnode_link_del(link); 1956 1916 1957 1917 /* If no device link was created, nothing more to do. */ 1958 1918 if (ret)
+3
drivers/base/swnode.c
··· 1116 1116 to_swnode(fwnode)->managed = true; 1117 1117 set_secondary_fwnode(dev, fwnode); 1118 1118 1119 + if (device_is_registered(dev)) 1120 + software_node_notify(dev); 1121 + 1119 1122 return 0; 1120 1123 } 1121 1124 EXPORT_SYMBOL_GPL(device_create_managed_software_node);
+17 -12
drivers/block/nbd.c
··· 97 97 98 98 atomic_t recv_threads; 99 99 wait_queue_head_t recv_wq; 100 - loff_t blksize; 100 + unsigned int blksize_bits; 101 101 loff_t bytesize; 102 102 #if IS_ENABLED(CONFIG_DEBUG_FS) 103 103 struct dentry *dbg_dir; 104 104 #endif 105 105 }; 106 + 107 + static inline unsigned int nbd_blksize(struct nbd_config *config) 108 + { 109 + return 1u << config->blksize_bits; 110 + } 106 111 107 112 struct nbd_device { 108 113 struct blk_mq_tag_set tag_set; ··· 151 146 152 147 #define NBD_MAGIC 0x68797548 153 148 154 - #define NBD_DEF_BLKSIZE 1024 149 + #define NBD_DEF_BLKSIZE_BITS 10 155 150 156 151 static unsigned int nbds_max = 16; 157 152 static int max_part = 16; ··· 322 317 loff_t blksize) 323 318 { 324 319 if (!blksize) 325 - blksize = NBD_DEF_BLKSIZE; 320 + blksize = 1u << NBD_DEF_BLKSIZE_BITS; 326 321 if (blksize < 512 || blksize > PAGE_SIZE || !is_power_of_2(blksize)) 327 322 return -EINVAL; 328 323 329 324 nbd->config->bytesize = bytesize; 330 - nbd->config->blksize = blksize; 325 + nbd->config->blksize_bits = __ffs(blksize); 331 326 332 327 if (!nbd->task_recv) 333 328 return 0; ··· 1342 1337 args->index = i; 1343 1338 queue_work(nbd->recv_workq, &args->work); 1344 1339 } 1345 - return nbd_set_size(nbd, config->bytesize, config->blksize); 1340 + return nbd_set_size(nbd, config->bytesize, nbd_blksize(config)); 1346 1341 } 1347 1342 1348 1343 static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev) ··· 1411 1406 case NBD_SET_BLKSIZE: 1412 1407 return nbd_set_size(nbd, config->bytesize, arg); 1413 1408 case NBD_SET_SIZE: 1414 - return nbd_set_size(nbd, arg, config->blksize); 1409 + return nbd_set_size(nbd, arg, nbd_blksize(config)); 1415 1410 case NBD_SET_SIZE_BLOCKS: 1416 - if (check_mul_overflow((loff_t)arg, config->blksize, &bytesize)) 1411 + if (check_shl_overflow(arg, config->blksize_bits, &bytesize)) 1417 1412 return -EINVAL; 1418 - return nbd_set_size(nbd, bytesize, config->blksize); 1413 + return nbd_set_size(nbd, bytesize, nbd_blksize(config)); 1419 1414 case NBD_SET_TIMEOUT: 1420 1415 nbd_set_cmd_timeout(nbd, arg); 1421 1416 return 0; ··· 1481 1476 atomic_set(&config->recv_threads, 0); 1482 1477 init_waitqueue_head(&config->recv_wq); 1483 1478 init_waitqueue_head(&config->conn_wait); 1484 - config->blksize = NBD_DEF_BLKSIZE; 1479 + config->blksize_bits = NBD_DEF_BLKSIZE_BITS; 1485 1480 atomic_set(&config->live_connections, 0); 1486 1481 try_module_get(THIS_MODULE); 1487 1482 return config; ··· 1609 1604 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops); 1610 1605 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize); 1611 1606 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout); 1612 - debugfs_create_u64("blocksize", 0444, dir, &config->blksize); 1607 + debugfs_create_u32("blocksize_bits", 0444, dir, &config->blksize_bits); 1613 1608 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops); 1614 1609 1615 1610 return 0; ··· 1831 1826 static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd) 1832 1827 { 1833 1828 struct nbd_config *config = nbd->config; 1834 - u64 bsize = config->blksize; 1829 + u64 bsize = nbd_blksize(config); 1835 1830 u64 bytes = config->bytesize; 1836 1831 1837 1832 if (info->attrs[NBD_ATTR_SIZE_BYTES]) ··· 1840 1835 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) 1841 1836 bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]); 1842 1837 1843 - if (bytes != config->bytesize || bsize != config->blksize) 1838 + if (bytes != config->bytesize || bsize != nbd_blksize(config)) 1844 1839 return nbd_set_size(nbd, bytes, bsize); 1845 1840 return 0; 1846 1841 }
+4
drivers/bus/ti-sysc.c
··· 1464 1464 /* Quirks that need to be set based on detected module */ 1465 1465 SYSC_QUIRK("aess", 0, 0, 0x10, -ENODEV, 0x40000000, 0xffffffff, 1466 1466 SYSC_MODULE_QUIRK_AESS), 1467 + /* Errata i893 handling for dra7 dcan1 and 2 */ 1468 + SYSC_QUIRK("dcan", 0x4ae3c000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, 1469 + SYSC_QUIRK_CLKDM_NOAUTO), 1467 1470 SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, 1468 1471 SYSC_QUIRK_CLKDM_NOAUTO), 1469 1472 SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff, ··· 2957 2954 break; 2958 2955 case SOC_AM3: 2959 2956 sysc_add_disabled(0x48310000); /* rng */ 2957 + break; 2960 2958 default: 2961 2959 break; 2962 2960 }
+1
drivers/comedi/comedi_fops.c
··· 3090 3090 mutex_lock(&dev->mutex); 3091 3091 rc = do_insnlist_ioctl(dev, insns, insnlist32.n_insns, file); 3092 3092 mutex_unlock(&dev->mutex); 3093 + kfree(insns); 3093 3094 return rc; 3094 3095 } 3095 3096
+8 -6
drivers/crypto/ccp/ccp-ops.c
··· 778 778 in_place ? DMA_BIDIRECTIONAL 779 779 : DMA_TO_DEVICE); 780 780 if (ret) 781 - goto e_ctx; 781 + goto e_aad; 782 782 783 783 if (in_place) { 784 784 dst = src; ··· 863 863 op.u.aes.size = 0; 864 864 ret = cmd_q->ccp->vdata->perform->aes(&op); 865 865 if (ret) 866 - goto e_dst; 866 + goto e_final_wa; 867 867 868 868 if (aes->action == CCP_AES_ACTION_ENCRYPT) { 869 869 /* Put the ciphered tag after the ciphertext. */ ··· 873 873 ret = ccp_init_dm_workarea(&tag, cmd_q, authsize, 874 874 DMA_BIDIRECTIONAL); 875 875 if (ret) 876 - goto e_tag; 876 + goto e_final_wa; 877 877 ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize); 878 - if (ret) 879 - goto e_tag; 878 + if (ret) { 879 + ccp_dm_free(&tag); 880 + goto e_final_wa; 881 + } 880 882 881 883 ret = crypto_memneq(tag.address, final_wa.address, 882 884 authsize) ? -EBADMSG : 0; 883 885 ccp_dm_free(&tag); 884 886 } 885 887 886 - e_tag: 888 + e_final_wa: 887 889 ccp_dm_free(&final_wa); 888 890 889 891 e_dst:
+1 -1
drivers/edac/dmc520_edac.c
··· 464 464 dimm->grain = pvt->mem_width_in_bytes; 465 465 dimm->dtype = dt; 466 466 dimm->mtype = mt; 467 - dimm->edac_mode = EDAC_FLAG_SECDED; 467 + dimm->edac_mode = EDAC_SECDED; 468 468 dimm->nr_pages = pages_per_rank / csi->nr_channels; 469 469 } 470 470 }
+1 -1
drivers/edac/synopsys_edac.c
··· 782 782 783 783 for (j = 0; j < csi->nr_channels; j++) { 784 784 dimm = csi->channels[j]->dimm; 785 - dimm->edac_mode = EDAC_FLAG_SECDED; 785 + dimm->edac_mode = EDAC_SECDED; 786 786 dimm->mtype = p_data->get_mtype(priv->baseaddr); 787 787 dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels; 788 788 dimm->grain = SYNPS_EDAC_ERR_GRAIN;
+1 -1
drivers/firmware/Kconfig
··· 204 204 205 205 config QCOM_SCM 206 206 tristate "Qcom SCM driver" 207 - depends on ARM || ARM64 207 + depends on ARCH_QCOM || COMPILE_TEST 208 208 depends on HAVE_ARM_SMCCC 209 209 select RESET_CONTROLLER 210 210
+1 -1
drivers/firmware/arm_scmi/Kconfig
··· 68 68 69 69 config ARM_SCMI_TRANSPORT_VIRTIO 70 70 bool "SCMI transport based on VirtIO" 71 - depends on VIRTIO 71 + depends on VIRTIO=y || VIRTIO=ARM_SCMI_PROTOCOL 72 72 select ARM_SCMI_HAVE_TRANSPORT 73 73 select ARM_SCMI_HAVE_MSG 74 74 help
+31 -13
drivers/firmware/arm_scmi/virtio.c
··· 110 110 if (vioch->is_rx) { 111 111 scmi_vio_feed_vq_rx(vioch, msg); 112 112 } else { 113 - unsigned long flags; 114 - 115 - spin_lock_irqsave(&vioch->lock, flags); 113 + /* Here IRQs are assumed to be already disabled by the caller */ 114 + spin_lock(&vioch->lock); 116 115 list_add(&msg->list, &vioch->free_list); 117 - spin_unlock_irqrestore(&vioch->lock, flags); 116 + spin_unlock(&vioch->lock); 118 117 } 119 118 } 120 119 121 120 static void scmi_vio_complete_cb(struct virtqueue *vqueue) 122 121 { 123 122 unsigned long ready_flags; 124 - unsigned long flags; 125 123 unsigned int length; 126 124 struct scmi_vio_channel *vioch; 127 125 struct scmi_vio_msg *msg; ··· 138 140 goto unlock_ready_out; 139 141 } 140 142 141 - spin_lock_irqsave(&vioch->lock, flags); 143 + /* IRQs already disabled here no need to irqsave */ 144 + spin_lock(&vioch->lock); 142 145 if (cb_enabled) { 143 146 virtqueue_disable_cb(vqueue); 144 147 cb_enabled = false; ··· 150 151 goto unlock_out; 151 152 cb_enabled = true; 152 153 } 153 - spin_unlock_irqrestore(&vioch->lock, flags); 154 + spin_unlock(&vioch->lock); 154 155 155 156 if (msg) { 156 157 msg->rx_len = length; ··· 160 161 scmi_finalize_message(vioch, msg); 161 162 } 162 163 164 + /* 165 + * Release ready_lock and re-enable IRQs between loop iterations 166 + * to allow virtio_chan_free() to possibly kick in and set the 167 + * flag vioch->ready to false even in between processing of 168 + * messages, so as to force outstanding messages to be ignored 169 + * when system is shutting down. 170 + */ 163 171 spin_unlock_irqrestore(&vioch->ready_lock, ready_flags); 164 172 } 165 173 166 174 unlock_out: 167 - spin_unlock_irqrestore(&vioch->lock, flags); 175 + spin_unlock(&vioch->lock); 168 176 unlock_ready_out: 169 177 spin_unlock_irqrestore(&vioch->ready_lock, ready_flags); 170 178 } ··· 390 384 struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT]; 391 385 392 386 /* Only one SCMI VirtiO device allowed */ 393 - if (scmi_vdev) 394 - return -EINVAL; 387 + if (scmi_vdev) { 388 + dev_err(dev, 389 + "One SCMI Virtio device was already initialized: only one allowed.\n"); 390 + return -EBUSY; 391 + } 395 392 396 393 have_vq_rx = scmi_vio_have_vq_rx(vdev); 397 394 vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1; ··· 437 428 } 438 429 439 430 vdev->priv = channels; 440 - scmi_vdev = vdev; 431 + /* Ensure initialized scmi_vdev is visible */ 432 + smp_store_mb(scmi_vdev, vdev); 441 433 442 434 return 0; 443 435 } 444 436 445 437 static void scmi_vio_remove(struct virtio_device *vdev) 446 438 { 439 + /* 440 + * Once we get here, virtio_chan_free() will have already been called by 441 + * the SCMI core for any existing channel and, as a consequence, all the 442 + * virtio channels will have been already marked NOT ready, causing any 443 + * outstanding message on any vqueue to be ignored by complete_cb: now 444 + * we can just stop processing buffers and destroy the vqueues. 445 + */ 447 446 vdev->config->reset(vdev); 448 447 vdev->config->del_vqs(vdev); 449 - scmi_vdev = NULL; 448 + /* Ensure scmi_vdev is visible as NULL */ 449 + smp_store_mb(scmi_vdev, NULL); 450 450 } 451 451 452 452 static int scmi_vio_validate(struct virtio_device *vdev) ··· 494 476 return register_virtio_driver(&virtio_scmi_driver); 495 477 } 496 478 497 - static void __exit virtio_scmi_exit(void) 479 + static void virtio_scmi_exit(void) 498 480 { 499 481 unregister_virtio_driver(&virtio_scmi_driver); 500 482 }
+8 -6
drivers/fpga/dfl.c
··· 1019 1019 { 1020 1020 unsigned int irq_base, nr_irqs; 1021 1021 struct dfl_feature_info *finfo; 1022 + u8 revision = 0; 1022 1023 int ret; 1023 - u8 revision; 1024 1024 u64 v; 1025 1025 1026 - v = readq(binfo->ioaddr + ofst); 1027 - revision = FIELD_GET(DFH_REVISION, v); 1026 + if (fid != FEATURE_ID_AFU) { 1027 + v = readq(binfo->ioaddr + ofst); 1028 + revision = FIELD_GET(DFH_REVISION, v); 1028 1029 1029 - /* read feature size and id if inputs are invalid */ 1030 - size = size ? size : feature_size(v); 1031 - fid = fid ? fid : feature_id(v); 1030 + /* read feature size and id if inputs are invalid */ 1031 + size = size ? size : feature_size(v); 1032 + fid = fid ? fid : feature_id(v); 1033 + } 1032 1034 1033 1035 if (binfo->len - ofst < size) 1034 1036 return -EINVAL;
+5 -1
drivers/fpga/machxo2-spi.c
··· 225 225 goto fail; 226 226 227 227 get_status(spi, &status); 228 - if (test_bit(FAIL, &status)) 228 + if (test_bit(FAIL, &status)) { 229 + ret = -EINVAL; 229 230 goto fail; 231 + } 230 232 dump_status_reg(&status); 231 233 232 234 spi_message_init(&msg); ··· 315 313 dump_status_reg(&status); 316 314 if (!test_bit(DONE, &status)) { 317 315 machxo2_cleanup(mgr); 316 + ret = -EINVAL; 318 317 goto fail; 319 318 } 320 319 ··· 338 335 break; 339 336 if (++refreshloop == MACHXO2_MAX_REFRESH_LOOP) { 340 337 machxo2_cleanup(mgr); 338 + ret = -EINVAL; 341 339 goto fail; 342 340 } 343 341 } while (1);
+1 -1
drivers/gpio/gpio-aspeed-sgpio.c
··· 395 395 reg = ioread32(bank_reg(data, bank, reg_irq_status)); 396 396 397 397 for_each_set_bit(p, &reg, 32) 398 - generic_handle_domain_irq(gc->irq.domain, i * 32 + p); 398 + generic_handle_domain_irq(gc->irq.domain, i * 32 + p * 2); 399 399 } 400 400 401 401 chained_irq_exit(ic, desc);
+2 -9
drivers/gpio/gpio-pca953x.c
··· 468 468 mutex_lock(&chip->i2c_lock); 469 469 ret = regmap_read(chip->regmap, inreg, &reg_val); 470 470 mutex_unlock(&chip->i2c_lock); 471 - if (ret < 0) { 472 - /* 473 - * NOTE: 474 - * diagnostic already emitted; that's all we should 475 - * do unless gpio_*_value_cansleep() calls become different 476 - * from their nonsleeping siblings (and report faults). 477 - */ 478 - return 0; 479 - } 471 + if (ret < 0) 472 + return ret; 480 473 481 474 return !!(reg_val & bit); 482 475 }
+24 -2
drivers/gpio/gpio-rockchip.c
··· 141 141 u32 data; 142 142 143 143 data = rockchip_gpio_readl_bit(bank, offset, bank->gpio_regs->port_ddr); 144 - if (data & BIT(offset)) 144 + if (data) 145 145 return GPIO_LINE_DIRECTION_OUT; 146 146 147 147 return GPIO_LINE_DIRECTION_IN; ··· 195 195 unsigned int cur_div_reg; 196 196 u64 div; 197 197 198 - if (!IS_ERR(bank->db_clk)) { 198 + if (bank->gpio_type == GPIO_TYPE_V2 && !IS_ERR(bank->db_clk)) { 199 199 div_debounce_support = true; 200 200 freq = clk_get_rate(bank->db_clk); 201 201 max_debounce = (GENMASK(23, 0) + 1) * 2 * 1000000 / freq; ··· 689 689 struct device_node *pctlnp = of_get_parent(np); 690 690 struct pinctrl_dev *pctldev = NULL; 691 691 struct rockchip_pin_bank *bank = NULL; 692 + struct rockchip_pin_output_deferred *cfg; 692 693 static int gpio; 693 694 int id, ret; 694 695 ··· 717 716 if (ret) 718 717 return ret; 719 718 719 + /* 720 + * Prevent clashes with a deferred output setting 721 + * being added right at this moment. 722 + */ 723 + mutex_lock(&bank->deferred_lock); 724 + 720 725 ret = rockchip_gpiolib_register(bank); 721 726 if (ret) { 722 727 clk_disable_unprepare(bank->clk); 728 + mutex_unlock(&bank->deferred_lock); 723 729 return ret; 724 730 } 731 + 732 + while (!list_empty(&bank->deferred_output)) { 733 + cfg = list_first_entry(&bank->deferred_output, 734 + struct rockchip_pin_output_deferred, head); 735 + list_del(&cfg->head); 736 + 737 + ret = rockchip_gpio_direction_output(&bank->gpio_chip, cfg->pin, cfg->arg); 738 + if (ret) 739 + dev_warn(dev, "setting output pin %u to %u failed\n", cfg->pin, cfg->arg); 740 + 741 + kfree(cfg); 742 + } 743 + 744 + mutex_unlock(&bank->deferred_lock); 725 745 726 746 platform_set_drvdata(pdev, bank); 727 747 dev_info(dev, "probed %pOF\n", np);
+2 -2
drivers/gpio/gpio-uniphier.c
··· 184 184 185 185 uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, 0); 186 186 187 - return irq_chip_mask_parent(data); 187 + irq_chip_mask_parent(data); 188 188 } 189 189 190 190 static void uniphier_gpio_irq_unmask(struct irq_data *data) ··· 194 194 195 195 uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, mask); 196 196 197 - return irq_chip_unmask_parent(data); 197 + irq_chip_unmask_parent(data); 198 198 } 199 199 200 200 static int uniphier_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+4 -2
drivers/gpio/gpiolib-acpi.c
··· 313 313 314 314 ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout); 315 315 if (ret) 316 - gpiochip_free_own_desc(desc); 316 + dev_warn(chip->parent, 317 + "Failed to set debounce-timeout for pin 0x%04X, err %d\n", 318 + pin, ret); 317 319 318 - return ret ? ERR_PTR(ret) : desc; 320 + return desc; 319 321 } 320 322 321 323 static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)
+31
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
··· 837 837 return 0; 838 838 } 839 839 840 + /* Mirrors the is_displayable check in radeonsi's gfx6_compute_surface */ 841 + static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb) 842 + { 843 + u64 micro_tile_mode; 844 + 845 + /* Zero swizzle mode means linear */ 846 + if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0) 847 + return 0; 848 + 849 + micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE); 850 + switch (micro_tile_mode) { 851 + case 0: /* DISPLAY */ 852 + case 3: /* RENDER */ 853 + return 0; 854 + default: 855 + drm_dbg_kms(afb->base.dev, 856 + "Micro tile mode %llu not supported for scanout\n", 857 + micro_tile_mode); 858 + return -EINVAL; 859 + } 860 + } 861 + 840 862 static void get_block_dimensions(unsigned int block_log2, unsigned int cpp, 841 863 unsigned int *width, unsigned int *height) 842 864 { ··· 1125 1103 const struct drm_mode_fb_cmd2 *mode_cmd, 1126 1104 struct drm_gem_object *obj) 1127 1105 { 1106 + struct amdgpu_device *adev = drm_to_adev(dev); 1128 1107 int ret, i; 1129 1108 1130 1109 /* ··· 1144 1121 ret = amdgpu_display_get_fb_info(rfb, &rfb->tiling_flags, &rfb->tmz_surface); 1145 1122 if (ret) 1146 1123 return ret; 1124 + 1125 + if (!dev->mode_config.allow_fb_modifiers) { 1126 + drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI, 1127 + "GFX9+ requires FB check based on format modifier\n"); 1128 + ret = check_tiling_flags_gfx6(rfb); 1129 + if (ret) 1130 + return ret; 1131 + } 1147 1132 1148 1133 if (dev->mode_config.allow_fb_modifiers && 1149 1134 !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
+1 -1
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 3599 3599 3600 3600 /* set static priority for a queue/ring */ 3601 3601 gfx_v9_0_mqd_set_priority(ring, mqd); 3602 - mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM); 3602 + mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, mmCP_HQD_QUANTUM); 3603 3603 3604 3604 /* map_queues packet doesn't need activate the queue, 3605 3605 * so only kiq need set this field.
+2 -1
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
··· 1098 1098 { 1099 1099 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1100 1100 1101 + gmc_v10_0_gart_disable(adev); 1102 + 1101 1103 if (amdgpu_sriov_vf(adev)) { 1102 1104 /* full access mode, so don't touch any GMC register */ 1103 1105 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); ··· 1108 1106 1109 1107 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); 1110 1108 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); 1111 - gmc_v10_0_gart_disable(adev); 1112 1109 1113 1110 return 0; 1114 1111 }
+2 -1
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 1794 1794 { 1795 1795 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1796 1796 1797 + gmc_v9_0_gart_disable(adev); 1798 + 1797 1799 if (amdgpu_sriov_vf(adev)) { 1798 1800 /* full access mode, so don't touch any GMC register */ 1799 1801 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); ··· 1804 1802 1805 1803 amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); 1806 1804 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); 1807 - gmc_v9_0_gart_disable(adev); 1808 1805 1809 1806 return 0; 1810 1807 }
+8
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
··· 868 868 msleep(1000); 869 869 } 870 870 871 + /* TODO: check whether can submit a doorbell request to raise 872 + * a doorbell fence to exit gfxoff. 873 + */ 874 + if (adev->in_s0ix) 875 + amdgpu_gfx_off_ctrl(adev, false); 876 + 871 877 sdma_v5_2_soft_reset(adev); 872 878 /* unhalt the MEs */ 873 879 sdma_v5_2_enable(adev, true); ··· 882 876 883 877 /* start the gfx rings and rlc compute queues */ 884 878 r = sdma_v5_2_gfx_resume(adev); 879 + if (adev->in_s0ix) 880 + amdgpu_gfx_off_ctrl(adev, true); 885 881 if (r) 886 882 return r; 887 883 r = sdma_v5_2_rlc_resume(adev);
-1
drivers/gpu/drm/amd/amdkfd/kfd_device.c
··· 971 971 void kgd2kfd_device_exit(struct kfd_dev *kfd) 972 972 { 973 973 if (kfd->init_complete) { 974 - svm_migrate_fini((struct amdgpu_device *)kfd->kgd); 975 974 device_queue_manager_uninit(kfd->dqm); 976 975 kfd_interrupt_exit(kfd); 977 976 kfd_topology_remove_device(kfd);
+7 -9
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
··· 891 891 pgmap->ops = &svm_migrate_pgmap_ops; 892 892 pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev); 893 893 pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; 894 + 895 + /* Device manager releases device-specific resources, memory region and 896 + * pgmap when driver disconnects from device. 897 + */ 894 898 r = devm_memremap_pages(adev->dev, pgmap); 895 899 if (IS_ERR(r)) { 896 900 pr_err("failed to register HMM device memory\n"); 901 + 902 + /* Disable SVM support capability */ 903 + pgmap->type = 0; 897 904 devm_release_mem_region(adev->dev, res->start, 898 905 res->end - res->start + 1); 899 906 return PTR_ERR(r); ··· 914 907 pr_info("HMM registered %ldMB device memory\n", size >> 20); 915 908 916 909 return 0; 917 - } 918 - 919 - void svm_migrate_fini(struct amdgpu_device *adev) 920 - { 921 - struct dev_pagemap *pgmap = &adev->kfd.dev->pgmap; 922 - 923 - devm_memunmap_pages(adev->dev, pgmap); 924 - devm_release_mem_region(adev->dev, pgmap->range.start, 925 - pgmap->range.end - pgmap->range.start + 1); 926 910 }
-5
drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
··· 47 47 svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr); 48 48 49 49 int svm_migrate_init(struct amdgpu_device *adev); 50 - void svm_migrate_fini(struct amdgpu_device *adev); 51 50 52 51 #else 53 52 54 53 static inline int svm_migrate_init(struct amdgpu_device *adev) 55 54 { 56 55 return 0; 57 - } 58 - static inline void svm_migrate_fini(struct amdgpu_device *adev) 59 - { 60 - /* empty */ 61 56 } 62 57 63 58 #endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */
+19 -5
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
··· 118 118 mmu_interval_notifier_remove(&prange->notifier); 119 119 } 120 120 121 + static bool 122 + svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr) 123 + { 124 + return dma_addr && !dma_mapping_error(dev, dma_addr) && 125 + !(dma_addr & SVM_RANGE_VRAM_DOMAIN); 126 + } 127 + 121 128 static int 122 129 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, 123 130 unsigned long offset, unsigned long npages, ··· 146 139 147 140 addr += offset; 148 141 for (i = 0; i < npages; i++) { 149 - if (WARN_ONCE(addr[i] && !dma_mapping_error(dev, addr[i]), 150 - "leaking dma mapping\n")) 142 + if (svm_is_valid_dma_mapping_addr(dev, addr[i])) 151 143 dma_unmap_page(dev, addr[i], PAGE_SIZE, dir); 152 144 153 145 page = hmm_pfn_to_page(hmm_pfns[i]); ··· 215 209 return; 216 210 217 211 for (i = offset; i < offset + npages; i++) { 218 - if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i])) 212 + if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i])) 219 213 continue; 220 214 pr_debug("dma unmapping 0x%llx\n", dma_addr[i] >> PAGE_SHIFT); 221 215 dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir); ··· 1171 1165 unsigned long last_start; 1172 1166 int last_domain; 1173 1167 int r = 0; 1174 - int64_t i; 1168 + int64_t i, j; 1175 1169 1176 1170 last_start = prange->start + offset; 1177 1171 ··· 1184 1178 for (i = offset; i < offset + npages; i++) { 1185 1179 last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN; 1186 1180 dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN; 1187 - if ((prange->start + i) < prange->last && 1181 + 1182 + /* Collect all pages in the same address range and memory domain 1183 + * that can be mapped with a single call to update mapping. 1184 + */ 1185 + if (i < offset + npages - 1 && 1188 1186 last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN)) 1189 1187 continue; 1190 1188 ··· 1211 1201 NULL, dma_addr, 1212 1202 &vm->last_update, 1213 1203 &table_freed); 1204 + 1205 + for (j = last_start - prange->start; j <= i; j++) 1206 + dma_addr[j] |= last_domain; 1207 + 1214 1208 if (r) { 1215 1209 pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start); 1216 1210 goto out;
+2
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 1115 1115 1116 1116 init_data.asic_id.pci_revision_id = adev->pdev->revision; 1117 1117 init_data.asic_id.hw_internal_rev = adev->external_rev_id; 1118 + init_data.asic_id.chip_id = adev->pdev->device; 1118 1119 1119 1120 init_data.asic_id.vram_width = adev->gmc.vram_width; 1120 1121 /* TODO: initialize init_data.asic_id.vram_type here!!!! */ ··· 1720 1719 linear_lut[i] = 0xFFFF * i / 15; 1721 1720 1722 1721 params.set = 0; 1722 + params.backlight_ramping_override = false; 1723 1723 params.backlight_ramping_start = 0xCCCC; 1724 1724 params.backlight_ramping_reduction = 0xCCCCCCCC; 1725 1725 params.backlight_lut_array_size = 16;
+2 -2
drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
··· 42 42 #define DC_LOGGER \ 43 43 engine->ctx->logger 44 44 45 - #define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ 45 + #define DC_TRACE_LEVEL_MESSAGE(...) do { } while (0) 46 46 #define IS_DC_I2CAUX_LOGGING_ENABLED() (false) 47 47 #define LOG_FLAG_Error_I2cAux LOG_ERROR 48 48 #define LOG_FLAG_I2cAux_DceAux LOG_I2C_AUX ··· 76 76 #define DEFAULT_AUX_ENGINE_MULT 0 77 77 #define DEFAULT_AUX_ENGINE_LENGTH 69 78 78 79 - #define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ 79 + #define DC_TRACE_LEVEL_MESSAGE(...) do { } while (0) 80 80 81 81 static void release_engine( 82 82 struct dce_aux *engine)
+2
drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
··· 6867 6867 si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); 6868 6868 si_thermal_start_thermal_controller(adev); 6869 6869 6870 + ni_update_current_ps(adev, boot_ps); 6871 + 6870 6872 return 0; 6871 6873 } 6872 6874
+1 -3
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
··· 793 793 { 794 794 struct device *dev = &pdev->dev; 795 795 struct decon_context *ctx; 796 - struct resource *res; 797 796 int ret; 798 797 int i; 799 798 ··· 817 818 ctx->clks[i] = clk; 818 819 } 819 820 820 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 821 - ctx->addr = devm_ioremap_resource(dev, res); 821 + ctx->addr = devm_platform_ioremap_resource(pdev, 0); 822 822 if (IS_ERR(ctx->addr)) 823 823 return PTR_ERR(ctx->addr); 824 824
+1 -3
drivers/gpu/drm/exynos/exynos_drm_dsi.c
··· 1738 1738 static int exynos_dsi_probe(struct platform_device *pdev) 1739 1739 { 1740 1740 struct device *dev = &pdev->dev; 1741 - struct resource *res; 1742 1741 struct exynos_dsi *dsi; 1743 1742 int ret, i; 1744 1743 ··· 1788 1789 } 1789 1790 } 1790 1791 1791 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1792 - dsi->reg_base = devm_ioremap_resource(dev, res); 1792 + dsi->reg_base = devm_platform_ioremap_resource(pdev, 0); 1793 1793 if (IS_ERR(dsi->reg_base)) 1794 1794 return PTR_ERR(dsi->reg_base); 1795 1795
+1 -4
drivers/gpu/drm/exynos/exynos_drm_fimc.c
··· 85 85 /* 86 86 * A structure of fimc context. 87 87 * 88 - * @regs_res: register resources. 89 88 * @regs: memory mapped io registers. 90 89 * @lock: locking of operations. 91 90 * @clocks: fimc clocks. ··· 102 103 struct exynos_drm_ipp_formats *formats; 103 104 unsigned int num_formats; 104 105 105 - struct resource *regs_res; 106 106 void __iomem *regs; 107 107 spinlock_t lock; 108 108 struct clk *clocks[FIMC_CLKS_MAX]; ··· 1325 1327 ctx->num_formats = num_formats; 1326 1328 1327 1329 /* resource memory */ 1328 - ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1329 - ctx->regs = devm_ioremap_resource(dev, ctx->regs_res); 1330 + ctx->regs = devm_platform_ioremap_resource(pdev, 0); 1330 1331 if (IS_ERR(ctx->regs)) 1331 1332 return PTR_ERR(ctx->regs); 1332 1333
+1 -3
drivers/gpu/drm/exynos/exynos_drm_fimd.c
··· 1202 1202 return PTR_ERR(ctx->lcd_clk); 1203 1203 } 1204 1204 1205 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1206 - 1207 - ctx->regs = devm_ioremap_resource(dev, res); 1205 + ctx->regs = devm_platform_ioremap_resource(pdev, 0); 1208 1206 if (IS_ERR(ctx->regs)) 1209 1207 return PTR_ERR(ctx->regs); 1210 1208
+1 -4
drivers/gpu/drm/exynos/exynos_drm_g2d.c
··· 1449 1449 static int g2d_probe(struct platform_device *pdev) 1450 1450 { 1451 1451 struct device *dev = &pdev->dev; 1452 - struct resource *res; 1453 1452 struct g2d_data *g2d; 1454 1453 int ret; 1455 1454 ··· 1490 1491 clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags); 1491 1492 clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags); 1492 1493 1493 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1494 - 1495 - g2d->regs = devm_ioremap_resource(dev, res); 1494 + g2d->regs = devm_platform_ioremap_resource(pdev, 0); 1496 1495 if (IS_ERR(g2d->regs)) { 1497 1496 ret = PTR_ERR(g2d->regs); 1498 1497 goto err_put_clk;
+1 -5
drivers/gpu/drm/exynos/exynos_drm_gsc.c
··· 86 86 /* 87 87 * A structure of gsc context. 88 88 * 89 - * @regs_res: register resources. 90 89 * @regs: memory mapped io registers. 91 90 * @gsc_clk: gsc gate clock. 92 91 * @sc: scaler infomations. ··· 102 103 struct exynos_drm_ipp_formats *formats; 103 104 unsigned int num_formats; 104 105 105 - struct resource *regs_res; 106 106 void __iomem *regs; 107 107 const char **clk_names; 108 108 struct clk *clocks[GSC_MAX_CLOCKS]; ··· 1270 1272 } 1271 1273 } 1272 1274 1273 - /* resource memory */ 1274 - ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1275 - ctx->regs = devm_ioremap_resource(dev, ctx->regs_res); 1275 + ctx->regs = devm_platform_ioremap_resource(pdev, 0); 1276 1276 if (IS_ERR(ctx->regs)) 1277 1277 return PTR_ERR(ctx->regs); 1278 1278
+1 -3
drivers/gpu/drm/exynos/exynos_drm_rotator.c
··· 278 278 static int rotator_probe(struct platform_device *pdev) 279 279 { 280 280 struct device *dev = &pdev->dev; 281 - struct resource *regs_res; 282 281 struct rot_context *rot; 283 282 const struct rot_variant *variant; 284 283 int irq; ··· 291 292 rot->formats = variant->formats; 292 293 rot->num_formats = variant->num_formats; 293 294 rot->dev = dev; 294 - regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 295 - rot->regs = devm_ioremap_resource(dev, regs_res); 295 + rot->regs = devm_platform_ioremap_resource(pdev, 0); 296 296 if (IS_ERR(rot->regs)) 297 297 return PTR_ERR(rot->regs); 298 298
+1 -3
drivers/gpu/drm/exynos/exynos_drm_scaler.c
··· 485 485 static int scaler_probe(struct platform_device *pdev) 486 486 { 487 487 struct device *dev = &pdev->dev; 488 - struct resource *regs_res; 489 488 struct scaler_context *scaler; 490 489 int irq; 491 490 int ret, i; ··· 497 498 (struct scaler_data *)of_device_get_match_data(dev); 498 499 499 500 scaler->dev = dev; 500 - regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 501 - scaler->regs = devm_ioremap_resource(dev, regs_res); 501 + scaler->regs = devm_platform_ioremap_resource(pdev, 0); 502 502 if (IS_ERR(scaler->regs)) 503 503 return PTR_ERR(scaler->regs); 504 504
+1 -3
drivers/gpu/drm/exynos/exynos_hdmi.c
··· 1957 1957 struct hdmi_audio_infoframe *audio_infoframe; 1958 1958 struct device *dev = &pdev->dev; 1959 1959 struct hdmi_context *hdata; 1960 - struct resource *res; 1961 1960 int ret; 1962 1961 1963 1962 hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL); ··· 1978 1979 return ret; 1979 1980 } 1980 1981 1981 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1982 - hdata->regs = devm_ioremap_resource(dev, res); 1982 + hdata->regs = devm_platform_ioremap_resource(pdev, 0); 1983 1983 if (IS_ERR(hdata->regs)) { 1984 1984 ret = PTR_ERR(hdata->regs); 1985 1985 return ret;
+16 -3
drivers/gpu/drm/i915/display/intel_bw.c
··· 222 222 223 223 struct intel_sa_info { 224 224 u16 displayrtids; 225 - u8 deburst, deprogbwlimit; 225 + u8 deburst, deprogbwlimit, derating; 226 226 }; 227 227 228 228 static const struct intel_sa_info icl_sa_info = { 229 229 .deburst = 8, 230 230 .deprogbwlimit = 25, /* GB/s */ 231 231 .displayrtids = 128, 232 + .derating = 10, 232 233 }; 233 234 234 235 static const struct intel_sa_info tgl_sa_info = { 235 236 .deburst = 16, 236 237 .deprogbwlimit = 34, /* GB/s */ 237 238 .displayrtids = 256, 239 + .derating = 10, 238 240 }; 239 241 240 242 static const struct intel_sa_info rkl_sa_info = { 241 243 .deburst = 16, 242 244 .deprogbwlimit = 20, /* GB/s */ 243 245 .displayrtids = 128, 246 + .derating = 10, 244 247 }; 245 248 246 249 static const struct intel_sa_info adls_sa_info = { 247 250 .deburst = 16, 248 251 .deprogbwlimit = 38, /* GB/s */ 249 252 .displayrtids = 256, 253 + .derating = 10, 254 + }; 255 + 256 + static const struct intel_sa_info adlp_sa_info = { 257 + .deburst = 16, 258 + .deprogbwlimit = 38, /* GB/s */ 259 + .displayrtids = 256, 260 + .derating = 20, 250 261 }; 251 262 252 263 static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa) ··· 313 302 bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct); 314 303 315 304 bi->deratedbw[j] = min(maxdebw, 316 - bw * 9 / 10); /* 90% */ 305 + bw * (100 - sa->derating) / 100); 317 306 318 307 drm_dbg_kms(&dev_priv->drm, 319 308 "BW%d / QGV %d: num_planes=%d deratedbw=%u\n", ··· 411 400 412 401 if (IS_DG2(dev_priv)) 413 402 dg2_get_bw_info(dev_priv); 414 - else if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv)) 403 + else if (IS_ALDERLAKE_P(dev_priv)) 404 + icl_get_bw_info(dev_priv, &adlp_sa_info); 405 + else if (IS_ALDERLAKE_S(dev_priv)) 415 406 icl_get_bw_info(dev_priv, &adls_sa_info); 416 407 else if (IS_ROCKETLAKE(dev_priv)) 417 408 icl_get_bw_info(dev_priv, &rkl_sa_info);
+4 -1
drivers/gpu/drm/i915/display/intel_dmc.c
··· 805 805 */ 806 806 void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv) 807 807 { 808 + int id; 809 + 808 810 if (!HAS_DMC(dev_priv)) 809 811 return; 810 812 811 813 intel_dmc_ucode_suspend(dev_priv); 812 814 drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref); 813 815 814 - kfree(dev_priv->dmc.dmc_info[DMC_FW_MAIN].payload); 816 + for (id = 0; id < DMC_FW_MAX; id++) 817 + kfree(dev_priv->dmc.dmc_info[id].payload); 815 818 }
+5 -4
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
··· 356 356 { 357 357 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 358 358 359 - if (likely(obj)) { 360 - /* This releases all gem object bindings to the backend. */ 359 + if (likely(obj)) 361 360 i915_ttm_free_cached_io_st(obj); 362 - __i915_gem_free_object(obj); 363 - } 364 361 } 365 362 366 363 static struct intel_memory_region * ··· 872 875 { 873 876 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); 874 877 878 + /* This releases all gem object bindings to the backend. */ 879 + __i915_gem_free_object(obj); 880 + 875 881 i915_gem_object_release_memory_region(obj); 876 882 mutex_destroy(&obj->ttm.get_io_page.lock); 883 + 877 884 if (obj->ttm.created) 878 885 call_rcu(&obj->rcu, __i915_gem_free_object_rcu); 879 886 }
+3 -2
drivers/gpu/drm/i915/gt/intel_context.c
··· 362 362 return 0; 363 363 } 364 364 365 - static int sw_fence_dummy_notify(struct i915_sw_fence *sf, 366 - enum i915_sw_fence_notify state) 365 + static int __i915_sw_fence_call 366 + sw_fence_dummy_notify(struct i915_sw_fence *sf, 367 + enum i915_sw_fence_notify state) 367 368 { 368 369 return NOTIFY_DONE; 369 370 }
-2
drivers/gpu/drm/i915/gt/intel_rps.c
··· 882 882 if (!intel_rps_is_enabled(rps)) 883 883 return; 884 884 885 - GEM_BUG_ON(atomic_read(&rps->num_waiters)); 886 - 887 885 if (!intel_rps_clear_active(rps)) 888 886 return; 889 887
+5 -5
drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
··· 102 102 * | +-------+--------------------------------------------------------------+ 103 103 * | | 7:0 | NUM_DWORDS = length (in dwords) of the embedded HXG message | 104 104 * +---+-------+--------------------------------------------------------------+ 105 - * | 1 | 31:0 | +--------------------------------------------------------+ | 106 - * +---+-------+ | | | 107 - * |...| | | Embedded `HXG Message`_ | | 108 - * +---+-------+ | | | 109 - * | n | 31:0 | +--------------------------------------------------------+ | 105 + * | 1 | 31:0 | | 106 + * +---+-------+ | 107 + * |...| | [Embedded `HXG Message`_] | 108 + * +---+-------+ | 109 + * | n | 31:0 | | 110 110 * +---+-------+--------------------------------------------------------------+ 111 111 */ 112 112
+5 -5
drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h
··· 38 38 * +---+-------+--------------------------------------------------------------+ 39 39 * | | Bits | Description | 40 40 * +===+=======+==============================================================+ 41 - * | 0 | 31:0 | +--------------------------------------------------------+ | 42 - * +---+-------+ | | | 43 - * |...| | | Embedded `HXG Message`_ | | 44 - * +---+-------+ | | | 45 - * | n | 31:0 | +--------------------------------------------------------+ | 41 + * | 0 | 31:0 | | 42 + * +---+-------+ | 43 + * |...| | [Embedded `HXG Message`_] | 44 + * +---+-------+ | 45 + * | n | 31:0 | | 46 46 * +---+-------+--------------------------------------------------------------+ 47 47 */ 48 48
+2 -2
drivers/gpu/drm/i915/gvt/scheduler.c
··· 576 576 577 577 /* No one is going to touch shadow bb from now on. */ 578 578 i915_gem_object_flush_map(bb->obj); 579 - i915_gem_object_unlock(bb->obj); 579 + i915_gem_ww_ctx_fini(&ww); 580 580 } 581 581 } 582 582 return 0; ··· 630 630 return ret; 631 631 } 632 632 633 - i915_gem_object_unlock(wa_ctx->indirect_ctx.obj); 633 + i915_gem_ww_ctx_fini(&ww); 634 634 635 635 /* FIXME: we are not tracking our pinned VMA leaving it 636 636 * up to the core to fix up the stray pin_count upon
+2 -9
drivers/gpu/drm/i915/i915_request.c
··· 829 829 i915_sw_fence_init(&rq->submit, submit_notify); 830 830 i915_sw_fence_init(&rq->semaphore, semaphore_notify); 831 831 832 - dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0); 833 - 834 832 rq->capture_list = NULL; 835 833 836 834 init_llist_head(&rq->execute_cb); ··· 903 905 rq->ring = ce->ring; 904 906 rq->execution_mask = ce->engine->mask; 905 907 906 - kref_init(&rq->fence.refcount); 907 - rq->fence.flags = 0; 908 - rq->fence.error = 0; 909 - INIT_LIST_HEAD(&rq->fence.cb_list); 910 - 911 908 ret = intel_timeline_get_seqno(tl, rq, &seqno); 912 909 if (ret) 913 910 goto err_free; 914 911 915 - rq->fence.context = tl->fence_context; 916 - rq->fence.seqno = seqno; 912 + dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 913 + tl->fence_context, seqno); 917 914 918 915 RCU_INIT_POINTER(rq->timeline, tl); 919 916 rq->hwsp_seqno = tl->hwsp_seqno;
-3
drivers/gpu/drm/tegra/dc.c
··· 1845 1845 bool prepare_bandwidth_transition) 1846 1846 { 1847 1847 const struct tegra_plane_state *old_tegra_state, *new_tegra_state; 1848 - const struct tegra_dc_state *old_dc_state, *new_dc_state; 1849 1848 u32 i, new_avg_bw, old_avg_bw, new_peak_bw, old_peak_bw; 1850 1849 const struct drm_plane_state *old_plane_state; 1851 1850 const struct drm_crtc_state *old_crtc_state; ··· 1857 1858 return; 1858 1859 1859 1860 old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); 1860 - old_dc_state = to_const_dc_state(old_crtc_state); 1861 - new_dc_state = to_const_dc_state(crtc->state); 1862 1861 1863 1862 if (!crtc->state->active) { 1864 1863 if (!old_crtc_state->active)
-6
drivers/gpu/drm/tegra/dc.h
··· 35 35 return NULL; 36 36 } 37 37 38 - static inline const struct tegra_dc_state * 39 - to_const_dc_state(const struct drm_crtc_state *state) 40 - { 41 - return to_dc_state((struct drm_crtc_state *)state); 42 - } 43 - 44 38 struct tegra_dc_stats { 45 39 unsigned long frames; 46 40 unsigned long vblank;
+1 -1
drivers/gpu/drm/tegra/uapi.c
··· 222 222 mapping->iova = sg_dma_address(mapping->sgt->sgl); 223 223 } 224 224 225 - mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->size; 225 + mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size; 226 226 227 227 err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX), 228 228 GFP_KERNEL);
+4 -2
drivers/gpu/host1x/fence.c
··· 15 15 #include "intr.h" 16 16 #include "syncpt.h" 17 17 18 - DEFINE_SPINLOCK(lock); 18 + static DEFINE_SPINLOCK(lock); 19 19 20 20 struct host1x_syncpt_fence { 21 21 struct dma_fence base; ··· 152 152 return ERR_PTR(-ENOMEM); 153 153 154 154 fence->waiter = kzalloc(sizeof(*fence->waiter), GFP_KERNEL); 155 - if (!fence->waiter) 155 + if (!fence->waiter) { 156 + kfree(fence); 156 157 return ERR_PTR(-ENOMEM); 158 + } 157 159 158 160 fence->sp = sp; 159 161 fence->threshold = threshold;
+4 -4
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
··· 255 255 if (!privdata->cl_data) 256 256 return -ENOMEM; 257 257 258 - rc = devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata); 258 + mp2_select_ops(privdata); 259 + 260 + rc = amd_sfh_hid_client_init(privdata); 259 261 if (rc) 260 262 return rc; 261 263 262 - mp2_select_ops(privdata); 263 - 264 - return amd_sfh_hid_client_init(privdata); 264 + return devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata); 265 265 } 266 266 267 267 static int __maybe_unused amd_mp2_pci_resume(struct device *dev)
+7
drivers/hid/hid-apple.c
··· 336 336 337 337 /* 338 338 * MacBook JIS keyboard has wrong logical maximum 339 + * Magic Keyboard JIS has wrong logical maximum 339 340 */ 340 341 static __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc, 341 342 unsigned int *rsize) 342 343 { 343 344 struct apple_sc *asc = hid_get_drvdata(hdev); 345 + 346 + if(*rsize >=71 && rdesc[70] == 0x65 && rdesc[64] == 0x65) { 347 + hid_info(hdev, 348 + "fixing up Magic Keyboard JIS report descriptor\n"); 349 + rdesc[64] = rdesc[70] = 0xe7; 350 + } 344 351 345 352 if ((asc->quirks & APPLE_RDESC_JIS) && *rsize >= 60 && 346 353 rdesc[53] == 0x65 && rdesc[59] == 0x65) {
+10 -3
drivers/hid/hid-betopff.c
··· 56 56 { 57 57 struct betopff_device *betopff; 58 58 struct hid_report *report; 59 - struct hid_input *hidinput = 60 - list_first_entry(&hid->inputs, struct hid_input, list); 59 + struct hid_input *hidinput; 61 60 struct list_head *report_list = 62 61 &hid->report_enum[HID_OUTPUT_REPORT].report_list; 63 - struct input_dev *dev = hidinput->input; 62 + struct input_dev *dev; 64 63 int field_count = 0; 65 64 int error; 66 65 int i, j; 66 + 67 + if (list_empty(&hid->inputs)) { 68 + hid_err(hid, "no inputs found\n"); 69 + return -ENODEV; 70 + } 71 + 72 + hidinput = list_first_entry(&hid->inputs, struct hid_input, list); 73 + dev = hidinput->input; 67 74 68 75 if (list_empty(report_list)) { 69 76 hid_err(hid, "no output reports found\n");
+3 -1
drivers/hid/hid-u2fzero.c
··· 198 198 } 199 199 200 200 ret = u2fzero_recv(dev, &req, &resp); 201 - if (ret < 0) 201 + 202 + /* ignore errors or packets without data */ 203 + if (ret < offsetof(struct u2f_hid_msg, init.data)) 202 204 return 0; 203 205 204 206 /* only take the minimum amount of data it is safe to take */
+8
drivers/hid/wacom_wac.c
··· 4746 4746 { "Wacom Intuos Pro S", 31920, 19950, 8191, 63, 4747 4747 INTUOSP2S_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7, 4748 4748 .touch_max = 10 }; 4749 + static const struct wacom_features wacom_features_0x3c6 = 4750 + { "Wacom Intuos BT S", 15200, 9500, 4095, 63, 4751 + INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 }; 4752 + static const struct wacom_features wacom_features_0x3c8 = 4753 + { "Wacom Intuos BT M", 21600, 13500, 4095, 63, 4754 + INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 }; 4749 4755 4750 4756 static const struct wacom_features wacom_features_HID_ANY_ID = 4751 4757 { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID }; ··· 4925 4919 { USB_DEVICE_WACOM(0x37A) }, 4926 4920 { USB_DEVICE_WACOM(0x37B) }, 4927 4921 { BT_DEVICE_WACOM(0x393) }, 4922 + { BT_DEVICE_WACOM(0x3c6) }, 4923 + { BT_DEVICE_WACOM(0x3c8) }, 4928 4924 { USB_DEVICE_WACOM(0x4001) }, 4929 4925 { USB_DEVICE_WACOM(0x4004) }, 4930 4926 { USB_DEVICE_WACOM(0x5000) },
-6
drivers/hwmon/k10temp.c
··· 362 362 HWMON_T_INPUT | HWMON_T_LABEL, 363 363 HWMON_T_INPUT | HWMON_T_LABEL, 364 364 HWMON_T_INPUT | HWMON_T_LABEL), 365 - HWMON_CHANNEL_INFO(in, 366 - HWMON_I_INPUT | HWMON_I_LABEL, 367 - HWMON_I_INPUT | HWMON_I_LABEL), 368 - HWMON_CHANNEL_INFO(curr, 369 - HWMON_C_INPUT | HWMON_C_LABEL, 370 - HWMON_C_INPUT | HWMON_C_LABEL), 371 365 NULL 372 366 }; 373 367
+6 -2
drivers/hwmon/ltc2947-core.c
··· 989 989 return ret; 990 990 991 991 /* check external clock presence */ 992 - extclk = devm_clk_get(st->dev, NULL); 993 - if (!IS_ERR(extclk)) { 992 + extclk = devm_clk_get_optional(st->dev, NULL); 993 + if (IS_ERR(extclk)) 994 + return dev_err_probe(st->dev, PTR_ERR(extclk), 995 + "Failed to get external clock\n"); 996 + 997 + if (extclk) { 994 998 unsigned long rate_hz; 995 999 u8 pre = 0, div, tbctl; 996 1000 u64 aux;
+9 -3
drivers/hwmon/mlxreg-fan.c
··· 315 315 { 316 316 struct mlxreg_fan *fan = cdev->devdata; 317 317 unsigned long cur_state; 318 + int i, config = 0; 318 319 u32 regval; 319 - int i; 320 320 int err; 321 321 322 322 /* ··· 329 329 * overwritten. 330 330 */ 331 331 if (state >= MLXREG_FAN_SPEED_MIN && state <= MLXREG_FAN_SPEED_MAX) { 332 + /* 333 + * This is configuration change, which is only supported through sysfs. 334 + * For configuration non-zero value is to be returned to avoid thermal 335 + * statistics update. 336 + */ 337 + config = 1; 332 338 state -= MLXREG_FAN_MAX_STATE; 333 339 for (i = 0; i < state; i++) 334 340 fan->cooling_levels[i] = state; ··· 349 343 350 344 cur_state = MLXREG_FAN_PWM_DUTY2STATE(regval); 351 345 if (state < cur_state) 352 - return 0; 346 + return config; 353 347 354 348 state = cur_state; 355 349 } ··· 365 359 dev_err(fan->dev, "Failed to write PWM duty\n"); 366 360 return err; 367 361 } 368 - return 0; 362 + return config; 369 363 } 370 364 371 365 static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
+5 -12
drivers/hwmon/occ/common.c
··· 340 340 if (val == OCC_TEMP_SENSOR_FAULT) 341 341 return -EREMOTEIO; 342 342 343 - /* 344 - * VRM doesn't return temperature, only alarm bit. This 345 - * attribute maps to tempX_alarm instead of tempX_input for 346 - * VRM 347 - */ 348 - if (temp->fru_type != OCC_FRU_TYPE_VRM) { 349 - /* sensor not ready */ 350 - if (val == 0) 351 - return -EAGAIN; 343 + /* sensor not ready */ 344 + if (val == 0) 345 + return -EAGAIN; 352 346 353 - val *= 1000; 354 - } 347 + val *= 1000; 355 348 break; 356 349 case 2: 357 350 val = temp->fru_type; ··· 879 886 0, i); 880 887 attr++; 881 888 882 - if (sensors->temp.version > 1 && 889 + if (sensors->temp.version == 2 && 883 890 temp->fru_type == OCC_FRU_TYPE_VRM) { 884 891 snprintf(attr->name, sizeof(attr->name), 885 892 "temp%d_alarm", s);
+8 -2
drivers/hwmon/pmbus/ibm-cffps.c
··· 171 171 cmd = CFFPS_SN_CMD; 172 172 break; 173 173 case CFFPS_DEBUGFS_MAX_POWER_OUT: 174 - rc = i2c_smbus_read_word_swapped(psu->client, 175 - CFFPS_MAX_POWER_OUT_CMD); 174 + if (psu->version == cffps1) { 175 + rc = i2c_smbus_read_word_swapped(psu->client, 176 + CFFPS_MAX_POWER_OUT_CMD); 177 + } else { 178 + rc = i2c_smbus_read_word_data(psu->client, 179 + CFFPS_MAX_POWER_OUT_CMD); 180 + } 181 + 176 182 if (rc < 0) 177 183 return rc; 178 184
+1 -1
drivers/hwmon/pmbus/mp2975.c
··· 54 54 55 55 #define MP2975_RAIL2_FUNC (PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | \ 56 56 PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | \ 57 - PMBUS_PHASE_VIRTUAL) 57 + PMBUS_HAVE_POUT | PMBUS_PHASE_VIRTUAL) 58 58 59 59 struct mp2975_data { 60 60 struct pmbus_driver_info info;
+40 -33
drivers/hwmon/tmp421.c
··· 100 100 s16 temp[4]; 101 101 }; 102 102 103 - static int temp_from_s16(s16 reg) 103 + static int temp_from_raw(u16 reg, bool extended) 104 104 { 105 105 /* Mask out status bits */ 106 106 int temp = reg & ~0xf; 107 107 108 - return (temp * 1000 + 128) / 256; 108 + if (extended) 109 + temp = temp - 64 * 256; 110 + else 111 + temp = (s16)temp; 112 + 113 + return DIV_ROUND_CLOSEST(temp * 1000, 256); 109 114 } 110 115 111 - static int temp_from_u16(u16 reg) 116 + static int tmp421_update_device(struct tmp421_data *data) 112 117 { 113 - /* Mask out status bits */ 114 - int temp = reg & ~0xf; 115 - 116 - /* Add offset for extended temperature range. */ 117 - temp -= 64 * 256; 118 - 119 - return (temp * 1000 + 128) / 256; 120 - } 121 - 122 - static struct tmp421_data *tmp421_update_device(struct device *dev) 123 - { 124 - struct tmp421_data *data = dev_get_drvdata(dev); 125 118 struct i2c_client *client = data->client; 119 + int ret = 0; 126 120 int i; 127 121 128 122 mutex_lock(&data->update_lock); 129 123 130 124 if (time_after(jiffies, data->last_updated + (HZ / 2)) || 131 125 !data->valid) { 132 - data->config = i2c_smbus_read_byte_data(client, 133 - TMP421_CONFIG_REG_1); 126 + ret = i2c_smbus_read_byte_data(client, TMP421_CONFIG_REG_1); 127 + if (ret < 0) 128 + goto exit; 129 + data->config = ret; 134 130 135 131 for (i = 0; i < data->channels; i++) { 136 - data->temp[i] = i2c_smbus_read_byte_data(client, 137 - TMP421_TEMP_MSB[i]) << 8; 138 - data->temp[i] |= i2c_smbus_read_byte_data(client, 139 - TMP421_TEMP_LSB[i]); 132 + ret = i2c_smbus_read_byte_data(client, TMP421_TEMP_MSB[i]); 133 + if (ret < 0) 134 + goto exit; 135 + data->temp[i] = ret << 8; 136 + 137 + ret = i2c_smbus_read_byte_data(client, TMP421_TEMP_LSB[i]); 138 + if (ret < 0) 139 + goto exit; 140 + data->temp[i] |= ret; 140 141 } 141 142 data->last_updated = jiffies; 142 143 data->valid = 1; 143 144 } 144 145 146 + exit: 145 147 mutex_unlock(&data->update_lock); 146 148 147 - return data; 149 + if (ret < 0) { 150 + data->valid = 0; 151 + return ret; 152 + } 153 + 154 + return 0; 148 155 } 149 156 150 157 static int tmp421_read(struct device *dev, enum hwmon_sensor_types type, 151 158 u32 attr, int channel, long *val) 152 159 { 153 - struct tmp421_data *tmp421 = tmp421_update_device(dev); 160 + struct tmp421_data *tmp421 = dev_get_drvdata(dev); 161 + int ret = 0; 162 + 163 + ret = tmp421_update_device(tmp421); 164 + if (ret) 165 + return ret; 154 166 155 167 switch (attr) { 156 168 case hwmon_temp_input: 157 - if (tmp421->config & TMP421_CONFIG_RANGE) 158 - *val = temp_from_u16(tmp421->temp[channel]); 159 - else 160 - *val = temp_from_s16(tmp421->temp[channel]); 169 + *val = temp_from_raw(tmp421->temp[channel], 170 + tmp421->config & TMP421_CONFIG_RANGE); 161 171 return 0; 162 172 case hwmon_temp_fault: 163 173 /* 164 - * The OPEN bit signals a fault. This is bit 0 of the temperature 165 - * register (low byte). 174 + * Any of OPEN or /PVLD bits indicate a hardware mulfunction 175 + * and the conversion result may be incorrect 166 176 */ 167 - *val = tmp421->temp[channel] & 0x01; 177 + *val = !!(tmp421->temp[channel] & 0x03); 168 178 return 0; 169 179 default: 170 180 return -EOPNOTSUPP; ··· 187 177 { 188 178 switch (attr) { 189 179 case hwmon_temp_fault: 190 - if (channel == 0) 191 - return 0; 192 - return 0444; 193 180 case hwmon_temp_input: 194 181 return 0444; 195 182 default:
+11 -18
drivers/hwmon/w83791d.c
··· 273 273 char valid; /* !=0 if following fields are valid */ 274 274 unsigned long last_updated; /* In jiffies */ 275 275 276 - /* array of 2 pointers to subclients */ 277 - struct i2c_client *lm75[2]; 278 - 279 276 /* volts */ 280 277 u8 in[NUMBER_OF_VIN]; /* Register value */ 281 278 u8 in_max[NUMBER_OF_VIN]; /* Register value */ ··· 1254 1257 static int w83791d_detect_subclients(struct i2c_client *client) 1255 1258 { 1256 1259 struct i2c_adapter *adapter = client->adapter; 1257 - struct w83791d_data *data = i2c_get_clientdata(client); 1258 1260 int address = client->addr; 1259 1261 int i, id; 1260 1262 u8 val; ··· 1276 1280 } 1277 1281 1278 1282 val = w83791d_read(client, W83791D_REG_I2C_SUBADDR); 1279 - if (!(val & 0x08)) 1280 - data->lm75[0] = devm_i2c_new_dummy_device(&client->dev, adapter, 1281 - 0x48 + (val & 0x7)); 1282 - if (!(val & 0x80)) { 1283 - if (!IS_ERR(data->lm75[0]) && 1284 - ((val & 0x7) == ((val >> 4) & 0x7))) { 1285 - dev_err(&client->dev, 1286 - "duplicate addresses 0x%x, " 1287 - "use force_subclient\n", 1288 - data->lm75[0]->addr); 1289 - return -ENODEV; 1290 - } 1291 - data->lm75[1] = devm_i2c_new_dummy_device(&client->dev, adapter, 1292 - 0x48 + ((val >> 4) & 0x7)); 1283 + 1284 + if (!(val & 0x88) && (val & 0x7) == ((val >> 4) & 0x7)) { 1285 + dev_err(&client->dev, 1286 + "duplicate addresses 0x%x, use force_subclient\n", 0x48 + (val & 0x7)); 1287 + return -ENODEV; 1293 1288 } 1289 + 1290 + if (!(val & 0x08)) 1291 + devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + (val & 0x7)); 1292 + 1293 + if (!(val & 0x80)) 1294 + devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + ((val >> 4) & 0x7)); 1294 1295 1295 1296 return 0; 1296 1297 }
+11 -17
drivers/hwmon/w83792d.c
··· 264 264 char valid; /* !=0 if following fields are valid */ 265 265 unsigned long last_updated; /* In jiffies */ 266 266 267 - /* array of 2 pointers to subclients */ 268 - struct i2c_client *lm75[2]; 269 - 270 267 u8 in[9]; /* Register value */ 271 268 u8 in_max[9]; /* Register value */ 272 269 u8 in_min[9]; /* Register value */ ··· 924 927 int address = new_client->addr; 925 928 u8 val; 926 929 struct i2c_adapter *adapter = new_client->adapter; 927 - struct w83792d_data *data = i2c_get_clientdata(new_client); 928 930 929 931 id = i2c_adapter_id(adapter); 930 932 if (force_subclients[0] == id && force_subclients[1] == address) { ··· 942 946 } 943 947 944 948 val = w83792d_read_value(new_client, W83792D_REG_I2C_SUBADDR); 945 - if (!(val & 0x08)) 946 - data->lm75[0] = devm_i2c_new_dummy_device(&new_client->dev, adapter, 947 - 0x48 + (val & 0x7)); 948 - if (!(val & 0x80)) { 949 - if (!IS_ERR(data->lm75[0]) && 950 - ((val & 0x7) == ((val >> 4) & 0x7))) { 951 - dev_err(&new_client->dev, 952 - "duplicate addresses 0x%x, use force_subclient\n", 953 - data->lm75[0]->addr); 954 - return -ENODEV; 955 - } 956 - data->lm75[1] = devm_i2c_new_dummy_device(&new_client->dev, adapter, 957 - 0x48 + ((val >> 4) & 0x7)); 949 + 950 + if (!(val & 0x88) && (val & 0x7) == ((val >> 4) & 0x7)) { 951 + dev_err(&new_client->dev, 952 + "duplicate addresses 0x%x, use force_subclient\n", 0x48 + (val & 0x7)); 953 + return -ENODEV; 958 954 } 955 + 956 + if (!(val & 0x08)) 957 + devm_i2c_new_dummy_device(&new_client->dev, adapter, 0x48 + (val & 0x7)); 958 + 959 + if (!(val & 0x80)) 960 + devm_i2c_new_dummy_device(&new_client->dev, adapter, 0x48 + ((val >> 4) & 0x7)); 959 961 960 962 return 0; 961 963 }
+11 -15
drivers/hwmon/w83793.c
··· 202 202 } 203 203 204 204 struct w83793_data { 205 - struct i2c_client *lm75[2]; 206 205 struct device *hwmon_dev; 207 206 struct mutex update_lock; 208 207 char valid; /* !=0 if following fields are valid */ ··· 1565 1566 int address = client->addr; 1566 1567 u8 tmp; 1567 1568 struct i2c_adapter *adapter = client->adapter; 1568 - struct w83793_data *data = i2c_get_clientdata(client); 1569 1569 1570 1570 id = i2c_adapter_id(adapter); 1571 1571 if (force_subclients[0] == id && force_subclients[1] == address) { ··· 1584 1586 } 1585 1587 1586 1588 tmp = w83793_read_value(client, W83793_REG_I2C_SUBADDR); 1587 - if (!(tmp & 0x08)) 1588 - data->lm75[0] = devm_i2c_new_dummy_device(&client->dev, adapter, 1589 - 0x48 + (tmp & 0x7)); 1590 - if (!(tmp & 0x80)) { 1591 - if (!IS_ERR(data->lm75[0]) 1592 - && ((tmp & 0x7) == ((tmp >> 4) & 0x7))) { 1593 - dev_err(&client->dev, 1594 - "duplicate addresses 0x%x, " 1595 - "use force_subclients\n", data->lm75[0]->addr); 1596 - return -ENODEV; 1597 - } 1598 - data->lm75[1] = devm_i2c_new_dummy_device(&client->dev, adapter, 1599 - 0x48 + ((tmp >> 4) & 0x7)); 1589 + 1590 + if (!(tmp & 0x88) && (tmp & 0x7) == ((tmp >> 4) & 0x7)) { 1591 + dev_err(&client->dev, 1592 + "duplicate addresses 0x%x, use force_subclient\n", 0x48 + (tmp & 0x7)); 1593 + return -ENODEV; 1600 1594 } 1595 + 1596 + if (!(tmp & 0x08)) 1597 + devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + (tmp & 0x7)); 1598 + 1599 + if (!(tmp & 0x80)) 1600 + devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + ((tmp >> 4) & 0x7)); 1601 1601 1602 1602 return 0; 1603 1603 }
+1
drivers/hwtracing/coresight/coresight-syscfg.c
··· 5 5 */ 6 6 7 7 #include <linux/platform_device.h> 8 + #include <linux/slab.h> 8 9 9 10 #include "coresight-config.h" 10 11 #include "coresight-etm-perf.h"
+45 -6
drivers/infiniband/core/cma.c
··· 1746 1746 } 1747 1747 } 1748 1748 1749 - static void cma_cancel_listens(struct rdma_id_private *id_priv) 1749 + static void _cma_cancel_listens(struct rdma_id_private *id_priv) 1750 1750 { 1751 1751 struct rdma_id_private *dev_id_priv; 1752 + 1753 + lockdep_assert_held(&lock); 1752 1754 1753 1755 /* 1754 1756 * Remove from listen_any_list to prevent added devices from spawning 1755 1757 * additional listen requests. 1756 1758 */ 1757 - mutex_lock(&lock); 1758 1759 list_del(&id_priv->list); 1759 1760 1760 1761 while (!list_empty(&id_priv->listen_list)) { ··· 1769 1768 rdma_destroy_id(&dev_id_priv->id); 1770 1769 mutex_lock(&lock); 1771 1770 } 1771 + } 1772 + 1773 + static void cma_cancel_listens(struct rdma_id_private *id_priv) 1774 + { 1775 + mutex_lock(&lock); 1776 + _cma_cancel_listens(id_priv); 1772 1777 mutex_unlock(&lock); 1773 1778 } 1774 1779 ··· 1783 1776 { 1784 1777 switch (state) { 1785 1778 case RDMA_CM_ADDR_QUERY: 1779 + /* 1780 + * We can avoid doing the rdma_addr_cancel() based on state, 1781 + * only RDMA_CM_ADDR_QUERY has a work that could still execute. 1782 + * Notice that the addr_handler work could still be exiting 1783 + * outside this state, however due to the interaction with the 1784 + * handler_mutex the work is guaranteed not to touch id_priv 1785 + * during exit. 1786 + */ 1786 1787 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); 1787 1788 break; 1788 1789 case RDMA_CM_ROUTE_QUERY: ··· 1825 1810 static void destroy_mc(struct rdma_id_private *id_priv, 1826 1811 struct cma_multicast *mc) 1827 1812 { 1813 + bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); 1814 + 1828 1815 if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num)) 1829 1816 ib_sa_free_multicast(mc->sa_mc); 1830 1817 ··· 1843 1826 1844 1827 cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr, 1845 1828 &mgid); 1846 - cma_igmp_send(ndev, &mgid, false); 1829 + 1830 + if (!send_only) 1831 + cma_igmp_send(ndev, &mgid, false); 1832 + 1847 1833 dev_put(ndev); 1848 1834 } 1849 1835 ··· 2594 2574 return 0; 2595 2575 2596 2576 err_listen: 2597 - list_del(&id_priv->list); 2577 + _cma_cancel_listens(id_priv); 2598 2578 mutex_unlock(&lock); 2599 2579 if (to_destroy) 2600 2580 rdma_destroy_id(&to_destroy->id); ··· 3433 3413 if (dst_addr->sa_family == AF_IB) { 3434 3414 ret = cma_resolve_ib_addr(id_priv); 3435 3415 } else { 3416 + /* 3417 + * The FSM can return back to RDMA_CM_ADDR_BOUND after 3418 + * rdma_resolve_ip() is called, eg through the error 3419 + * path in addr_handler(). If this happens the existing 3420 + * request must be canceled before issuing a new one. 3421 + * Since canceling a request is a bit slow and this 3422 + * oddball path is rare, keep track once a request has 3423 + * been issued. The track turns out to be a permanent 3424 + * state since this is the only cancel as it is 3425 + * immediately before rdma_resolve_ip(). 3426 + */ 3427 + if (id_priv->used_resolve_ip) 3428 + rdma_addr_cancel(&id->route.addr.dev_addr); 3429 + else 3430 + id_priv->used_resolve_ip = 1; 3436 3431 ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr, 3437 3432 &id->route.addr.dev_addr, 3438 3433 timeout_ms, addr_handler, ··· 3806 3771 int ret; 3807 3772 3808 3773 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) { 3774 + struct sockaddr_in any_in = { 3775 + .sin_family = AF_INET, 3776 + .sin_addr.s_addr = htonl(INADDR_ANY), 3777 + }; 3778 + 3809 3779 /* For a well behaved ULP state will be RDMA_CM_IDLE */ 3810 - id->route.addr.src_addr.ss_family = AF_INET; 3811 - ret = rdma_bind_addr(id, cma_src_addr(id_priv)); 3780 + ret = rdma_bind_addr(id, (struct sockaddr *)&any_in); 3812 3781 if (ret) 3813 3782 return ret; 3814 3783 if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
+1
drivers/infiniband/core/cma_priv.h
··· 91 91 u8 afonly; 92 92 u8 timeout; 93 93 u8 min_rnr_timer; 94 + u8 used_resolve_ip; 94 95 enum ib_gid_type gid_type; 95 96 96 97 /*
+4 -4
drivers/infiniband/hw/hfi1/ipoib_tx.c
··· 876 876 struct hfi1_ipoib_txq *txq = &priv->txqs[q]; 877 877 u64 completed = atomic64_read(&txq->complete_txreqs); 878 878 879 - dd_dev_info(priv->dd, "timeout txq %llx q %u stopped %u stops %d no_desc %d ring_full %d\n", 880 - (unsigned long long)txq, q, 879 + dd_dev_info(priv->dd, "timeout txq %p q %u stopped %u stops %d no_desc %d ring_full %d\n", 880 + txq, q, 881 881 __netif_subqueue_stopped(dev, txq->q_idx), 882 882 atomic_read(&txq->stops), 883 883 atomic_read(&txq->no_desc), 884 884 atomic_read(&txq->ring_full)); 885 - dd_dev_info(priv->dd, "sde %llx engine %u\n", 886 - (unsigned long long)txq->sde, 885 + dd_dev_info(priv->dd, "sde %p engine %u\n", 886 + txq->sde, 887 887 txq->sde ? txq->sde->this_idx : 0); 888 888 dd_dev_info(priv->dd, "flow %x\n", txq->flow.as_int); 889 889 dd_dev_info(priv->dd, "sent %llu completed %llu used %llu\n",
+22 -9
drivers/infiniband/hw/hns/hns_roce_cq.c
··· 326 326 INIT_LIST_HEAD(&hr_cq->rq_list); 327 327 } 328 328 329 - static void set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata, 330 - struct hns_roce_ib_create_cq *ucmd) 329 + static int set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata, 330 + struct hns_roce_ib_create_cq *ucmd) 331 331 { 332 332 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device); 333 333 334 - if (udata) { 335 - if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) 336 - hr_cq->cqe_size = ucmd->cqe_size; 337 - else 338 - hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE; 339 - } else { 334 + if (!udata) { 340 335 hr_cq->cqe_size = hr_dev->caps.cqe_sz; 336 + return 0; 341 337 } 338 + 339 + if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) { 340 + if (ucmd->cqe_size != HNS_ROCE_V2_CQE_SIZE && 341 + ucmd->cqe_size != HNS_ROCE_V3_CQE_SIZE) { 342 + ibdev_err(&hr_dev->ib_dev, 343 + "invalid cqe size %u.\n", ucmd->cqe_size); 344 + return -EINVAL; 345 + } 346 + 347 + hr_cq->cqe_size = ucmd->cqe_size; 348 + } else { 349 + hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE; 350 + } 351 + 352 + return 0; 342 353 } 343 354 344 355 int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, ··· 377 366 378 367 set_cq_param(hr_cq, attr->cqe, attr->comp_vector, &ucmd); 379 368 380 - set_cqe_size(hr_cq, udata, &ucmd); 369 + ret = set_cqe_size(hr_cq, udata, &ucmd); 370 + if (ret) 371 + return ret; 381 372 382 373 ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr); 383 374 if (ret) {
+7 -6
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
··· 3299 3299 dest = get_cqe_v2(hr_cq, (prod_index + nfreed) & 3300 3300 hr_cq->ib_cq.cqe); 3301 3301 owner_bit = hr_reg_read(dest, CQE_OWNER); 3302 - memcpy(dest, cqe, sizeof(*cqe)); 3302 + memcpy(dest, cqe, hr_cq->cqe_size); 3303 3303 hr_reg_write(dest, CQE_OWNER, owner_bit); 3304 3304 } 3305 3305 } ··· 4397 4397 hr_qp->path_mtu = ib_mtu; 4398 4398 4399 4399 mtu = ib_mtu_enum_to_int(ib_mtu); 4400 - if (WARN_ON(mtu < 0)) 4400 + if (WARN_ON(mtu <= 0)) 4401 + return -EINVAL; 4402 + #define MAX_LP_MSG_LEN 65536 4403 + /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */ 4404 + lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu); 4405 + if (WARN_ON(lp_pktn_ini >= 0xF)) 4401 4406 return -EINVAL; 4402 4407 4403 4408 if (attr_mask & IB_QP_PATH_MTU) { 4404 4409 hr_reg_write(context, QPC_MTU, ib_mtu); 4405 4410 hr_reg_clear(qpc_mask, QPC_MTU); 4406 4411 } 4407 - 4408 - #define MAX_LP_MSG_LEN 65536 4409 - /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */ 4410 - lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu); 4411 4412 4412 4413 hr_reg_write(context, QPC_LP_PKTN_INI, lp_pktn_ini); 4413 4414 hr_reg_clear(qpc_mask, QPC_LP_PKTN_INI);
+2 -2
drivers/infiniband/hw/irdma/cm.c
··· 3496 3496 original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT || 3497 3497 last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE || 3498 3498 last_ae == IRDMA_AE_BAD_CLOSE || 3499 - last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->reset)) { 3499 + last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset)) { 3500 3500 issue_close = 1; 3501 3501 iwqp->cm_id = NULL; 3502 3502 qp->term_flags = 0; ··· 4250 4250 teardown_entry); 4251 4251 attr.qp_state = IB_QPS_ERR; 4252 4252 irdma_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL); 4253 - if (iwdev->reset) 4253 + if (iwdev->rf->reset) 4254 4254 irdma_cm_disconn(cm_node->iwqp); 4255 4255 irdma_rem_ref_cm_node(cm_node); 4256 4256 }
+11 -3
drivers/infiniband/hw/irdma/hw.c
··· 176 176 case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR: 177 177 qp->flush_code = FLUSH_GENERAL_ERR; 178 178 break; 179 + case IRDMA_AE_LLP_TOO_MANY_RETRIES: 180 + qp->flush_code = FLUSH_RETRY_EXC_ERR; 181 + break; 182 + case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS: 183 + case IRDMA_AE_AMP_MWBIND_BIND_DISABLED: 184 + case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS: 185 + qp->flush_code = FLUSH_MW_BIND_ERR; 186 + break; 179 187 default: 180 188 qp->flush_code = FLUSH_FATAL_ERR; 181 189 break; ··· 1497 1489 1498 1490 irdma_puda_dele_rsrc(vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, false); 1499 1491 if (irdma_initialize_ieq(iwdev)) { 1500 - iwdev->reset = true; 1492 + iwdev->rf->reset = true; 1501 1493 rf->gen_ops.request_reset(rf); 1502 1494 } 1503 1495 } ··· 1640 1632 case IEQ_CREATED: 1641 1633 if (!iwdev->roce_mode) 1642 1634 irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, 1643 - iwdev->reset); 1635 + iwdev->rf->reset); 1644 1636 fallthrough; 1645 1637 case ILQ_CREATED: 1646 1638 if (!iwdev->roce_mode) 1647 1639 irdma_puda_dele_rsrc(&iwdev->vsi, 1648 1640 IRDMA_PUDA_RSRC_TYPE_ILQ, 1649 - iwdev->reset); 1641 + iwdev->rf->reset); 1650 1642 break; 1651 1643 default: 1652 1644 ibdev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state);
+1 -1
drivers/infiniband/hw/irdma/i40iw_if.c
··· 55 55 56 56 iwdev = to_iwdev(ibdev); 57 57 if (reset) 58 - iwdev->reset = true; 58 + iwdev->rf->reset = true; 59 59 60 60 iwdev->iw_status = 0; 61 61 irdma_port_ibevent(iwdev);
-1
drivers/infiniband/hw/irdma/main.h
··· 346 346 bool roce_mode:1; 347 347 bool roce_dcqcn_en:1; 348 348 bool dcb:1; 349 - bool reset:1; 350 349 bool iw_ooo:1; 351 350 enum init_completion_state init_state; 352 351
+2
drivers/infiniband/hw/irdma/user.h
··· 102 102 FLUSH_REM_OP_ERR, 103 103 FLUSH_LOC_LEN_ERR, 104 104 FLUSH_FATAL_ERR, 105 + FLUSH_RETRY_EXC_ERR, 106 + FLUSH_MW_BIND_ERR, 105 107 }; 106 108 107 109 enum irdma_cmpl_status {
+1 -1
drivers/infiniband/hw/irdma/utils.c
··· 2507 2507 struct irdma_qp *qp = sc_qp->qp_uk.back_qp; 2508 2508 struct ib_qp_attr attr; 2509 2509 2510 - if (qp->iwdev->reset) 2510 + if (qp->iwdev->rf->reset) 2511 2511 return; 2512 2512 attr.qp_state = IB_QPS_ERR; 2513 2513
+6 -3
drivers/infiniband/hw/irdma/verbs.c
··· 535 535 irdma_qp_rem_ref(&iwqp->ibqp); 536 536 wait_for_completion(&iwqp->free_qp); 537 537 irdma_free_lsmm_rsrc(iwqp); 538 - if (!iwdev->reset) 539 - irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp); 538 + irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp); 540 539 541 540 if (!iwqp->user_mode) { 542 541 if (iwqp->iwscq) { ··· 2034 2035 /* Kmode allocations */ 2035 2036 int rsize; 2036 2037 2037 - if (entries > rf->max_cqe) { 2038 + if (entries < 1 || entries > rf->max_cqe) { 2038 2039 err_code = -EINVAL; 2039 2040 goto cq_free_rsrc; 2040 2041 } ··· 3352 3353 return IB_WC_LOC_LEN_ERR; 3353 3354 case FLUSH_GENERAL_ERR: 3354 3355 return IB_WC_WR_FLUSH_ERR; 3356 + case FLUSH_RETRY_EXC_ERR: 3357 + return IB_WC_RETRY_EXC_ERR; 3358 + case FLUSH_MW_BIND_ERR: 3359 + return IB_WC_MW_BIND_ERR; 3355 3360 case FLUSH_FATAL_ERR: 3356 3361 default: 3357 3362 return IB_WC_FATAL_ERR;
+1 -1
drivers/infiniband/hw/qib/qib_sysfs.c
··· 403 403 } 404 404 405 405 #define QIB_DIAGC_ATTR(N) \ 406 - static_assert(&((struct qib_ibport *)0)->rvp.n_##N != (u64 *)NULL); \ 406 + static_assert(__same_type(((struct qib_ibport *)0)->rvp.n_##N, u64)); \ 407 407 static struct qib_diagc_attr qib_diagc_attr_##N = { \ 408 408 .attr = __ATTR(N, 0664, diagc_attr_show, diagc_attr_store), \ 409 409 .counter = \
+1 -1
drivers/infiniband/hw/usnic/usnic_ib.h
··· 90 90 91 91 struct usnic_ib_vf { 92 92 struct usnic_ib_dev *pf; 93 - spinlock_t lock; 93 + struct mutex lock; 94 94 struct usnic_vnic *vnic; 95 95 unsigned int qp_grp_ref_cnt; 96 96 struct usnic_ib_pd *pd;
+1 -1
drivers/infiniband/hw/usnic/usnic_ib_main.c
··· 572 572 } 573 573 574 574 vf->pf = pf; 575 - spin_lock_init(&vf->lock); 575 + mutex_init(&vf->lock); 576 576 mutex_lock(&pf->usdev_lock); 577 577 list_add_tail(&vf->link, &pf->vf_dev_list); 578 578 /*
+8 -8
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
··· 196 196 for (i = 0; dev_list[i]; i++) { 197 197 dev = dev_list[i]; 198 198 vf = dev_get_drvdata(dev); 199 - spin_lock(&vf->lock); 199 + mutex_lock(&vf->lock); 200 200 vnic = vf->vnic; 201 201 if (!usnic_vnic_check_room(vnic, res_spec)) { 202 202 usnic_dbg("Found used vnic %s from %s\n", ··· 208 208 vf, pd, res_spec, 209 209 trans_spec); 210 210 211 - spin_unlock(&vf->lock); 211 + mutex_unlock(&vf->lock); 212 212 goto qp_grp_check; 213 213 } 214 - spin_unlock(&vf->lock); 214 + mutex_unlock(&vf->lock); 215 215 216 216 } 217 217 usnic_uiom_free_dev_list(dev_list); ··· 220 220 221 221 /* Try to find resources on an unused vf */ 222 222 list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) { 223 - spin_lock(&vf->lock); 223 + mutex_lock(&vf->lock); 224 224 vnic = vf->vnic; 225 225 if (vf->qp_grp_ref_cnt == 0 && 226 226 usnic_vnic_check_room(vnic, res_spec) == 0) { ··· 228 228 vf, pd, res_spec, 229 229 trans_spec); 230 230 231 - spin_unlock(&vf->lock); 231 + mutex_unlock(&vf->lock); 232 232 goto qp_grp_check; 233 233 } 234 - spin_unlock(&vf->lock); 234 + mutex_unlock(&vf->lock); 235 235 } 236 236 237 237 usnic_info("No free qp grp found on %s\n", ··· 253 253 254 254 WARN_ON(qp_grp->state != IB_QPS_RESET); 255 255 256 - spin_lock(&vf->lock); 256 + mutex_lock(&vf->lock); 257 257 usnic_ib_qp_grp_destroy(qp_grp); 258 - spin_unlock(&vf->lock); 258 + mutex_unlock(&vf->lock); 259 259 } 260 260 261 261 static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
+19 -6
drivers/interconnect/qcom/sdm660.c
··· 44 44 #define NOC_PERM_MODE_BYPASS (1 << NOC_QOS_MODE_BYPASS) 45 45 46 46 #define NOC_QOS_PRIORITYn_ADDR(n) (0x8 + (n * 0x1000)) 47 - #define NOC_QOS_PRIORITY_MASK 0xf 47 + #define NOC_QOS_PRIORITY_P1_MASK 0xc 48 + #define NOC_QOS_PRIORITY_P0_MASK 0x3 48 49 #define NOC_QOS_PRIORITY_P1_SHIFT 0x2 49 - #define NOC_QOS_PRIORITY_P0_SHIFT 0x3 50 50 51 51 #define NOC_QOS_MODEn_ADDR(n) (0xc + (n * 0x1000)) 52 52 #define NOC_QOS_MODEn_MASK 0x3 ··· 171 171 { .id = "bus" }, 172 172 { .id = "bus_a" }, 173 173 { .id = "iface" }, 174 + }; 175 + 176 + static const struct clk_bulk_data bus_a2noc_clocks[] = { 177 + { .id = "bus" }, 178 + { .id = "bus_a" }, 179 + { .id = "ipa" }, 180 + { .id = "ufs_axi" }, 181 + { .id = "aggre2_ufs_axi" }, 182 + { .id = "aggre2_usb3_axi" }, 183 + { .id = "cfg_noc_usb2_axi" }, 174 184 }; 175 185 176 186 /** ··· 317 307 DEFINE_QNODE(slv_prng, SDM660_SLAVE_PRNG, 4, -1, 44, true, -1, 0, -1, 0); 318 308 DEFINE_QNODE(slv_spdm, SDM660_SLAVE_SPDM, 4, -1, 60, true, -1, 0, -1, 0); 319 309 DEFINE_QNODE(slv_qdss_cfg, SDM660_SLAVE_QDSS_CFG, 4, -1, 63, true, -1, 0, -1, 0); 320 - DEFINE_QNODE(slv_cnoc_mnoc_cfg, SDM660_SLAVE_BLSP_1, 4, -1, 66, true, -1, 0, -1, SDM660_MASTER_CNOC_MNOC_CFG); 310 + DEFINE_QNODE(slv_cnoc_mnoc_cfg, SDM660_SLAVE_CNOC_MNOC_CFG, 4, -1, 66, true, -1, 0, -1, SDM660_MASTER_CNOC_MNOC_CFG); 321 311 DEFINE_QNODE(slv_snoc_cfg, SDM660_SLAVE_SNOC_CFG, 4, -1, 70, true, -1, 0, -1, 0); 322 312 DEFINE_QNODE(slv_qm_cfg, SDM660_SLAVE_QM_CFG, 4, -1, 212, true, -1, 0, -1, 0); 323 313 DEFINE_QNODE(slv_clk_ctl, SDM660_SLAVE_CLK_CTL, 4, -1, 47, true, -1, 0, -1, 0); ··· 634 624 /* Must be updated one at a time, P1 first, P0 last */ 635 625 val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT; 636 626 rc = regmap_update_bits(rmap, NOC_QOS_PRIORITYn_ADDR(qos->qos_port), 637 - NOC_QOS_PRIORITY_MASK, val); 627 + NOC_QOS_PRIORITY_P1_MASK, val); 638 628 if (rc) 639 629 return rc; 640 630 641 - val = qos->prio_level << NOC_QOS_PRIORITY_P0_SHIFT; 642 631 return regmap_update_bits(rmap, NOC_QOS_PRIORITYn_ADDR(qos->qos_port), 643 - NOC_QOS_PRIORITY_MASK, val); 632 + NOC_QOS_PRIORITY_P0_MASK, qos->prio_level); 644 633 } 645 634 646 635 static int qcom_icc_set_noc_qos(struct icc_node *src, u64 max_bw) ··· 819 810 qp->bus_clks = devm_kmemdup(dev, bus_mm_clocks, 820 811 sizeof(bus_mm_clocks), GFP_KERNEL); 821 812 qp->num_clks = ARRAY_SIZE(bus_mm_clocks); 813 + } else if (of_device_is_compatible(dev->of_node, "qcom,sdm660-a2noc")) { 814 + qp->bus_clks = devm_kmemdup(dev, bus_a2noc_clocks, 815 + sizeof(bus_a2noc_clocks), GFP_KERNEL); 816 + qp->num_clks = ARRAY_SIZE(bus_a2noc_clocks); 822 817 } else { 823 818 if (of_device_is_compatible(dev->of_node, "qcom,sdm660-bimc")) 824 819 qp->is_bimc_node = true;
+35 -21
drivers/iommu/apple-dart.c
··· 183 183 184 184 static struct platform_driver apple_dart_driver; 185 185 static const struct iommu_ops apple_dart_iommu_ops; 186 - static const struct iommu_flush_ops apple_dart_tlb_ops; 187 186 188 187 static struct apple_dart_domain *to_dart_domain(struct iommu_domain *dom) 189 188 { ··· 337 338 apple_dart_domain_flush_tlb(to_dart_domain(domain)); 338 339 } 339 340 340 - static void apple_dart_tlb_flush_all(void *cookie) 341 - { 342 - apple_dart_domain_flush_tlb(cookie); 343 - } 344 - 345 - static void apple_dart_tlb_flush_walk(unsigned long iova, size_t size, 346 - size_t granule, void *cookie) 347 - { 348 - apple_dart_domain_flush_tlb(cookie); 349 - } 350 - 351 - static const struct iommu_flush_ops apple_dart_tlb_ops = { 352 - .tlb_flush_all = apple_dart_tlb_flush_all, 353 - .tlb_flush_walk = apple_dart_tlb_flush_walk, 354 - }; 355 - 356 341 static phys_addr_t apple_dart_iova_to_phys(struct iommu_domain *domain, 357 342 dma_addr_t iova) 358 343 { ··· 418 435 .ias = 32, 419 436 .oas = 36, 420 437 .coherent_walk = 1, 421 - .tlb = &apple_dart_tlb_ops, 422 438 .iommu_dev = dart->dev, 423 439 }; 424 440 ··· 643 661 return -EINVAL; 644 662 } 645 663 664 + static DEFINE_MUTEX(apple_dart_groups_lock); 665 + 666 + static void apple_dart_release_group(void *iommu_data) 667 + { 668 + int i, sid; 669 + struct apple_dart_stream_map *stream_map; 670 + struct apple_dart_master_cfg *group_master_cfg = iommu_data; 671 + 672 + mutex_lock(&apple_dart_groups_lock); 673 + 674 + for_each_stream_map(i, group_master_cfg, stream_map) 675 + for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) 676 + stream_map->dart->sid2group[sid] = NULL; 677 + 678 + kfree(iommu_data); 679 + mutex_unlock(&apple_dart_groups_lock); 680 + } 681 + 646 682 static struct iommu_group *apple_dart_device_group(struct device *dev) 647 683 { 648 - static DEFINE_MUTEX(lock); 649 684 int i, sid; 650 685 struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev); 651 686 struct apple_dart_stream_map *stream_map; 687 + struct apple_dart_master_cfg *group_master_cfg; 652 688 struct iommu_group *group = NULL; 653 689 struct iommu_group *res = ERR_PTR(-EINVAL); 654 690 655 - mutex_lock(&lock); 691 + mutex_lock(&apple_dart_groups_lock); 656 692 657 693 for_each_stream_map(i, cfg, stream_map) { 658 694 for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) { ··· 698 698 #endif 699 699 group = generic_device_group(dev); 700 700 701 + res = ERR_PTR(-ENOMEM); 702 + if (!group) 703 + goto out; 704 + 705 + group_master_cfg = kzalloc(sizeof(*group_master_cfg), GFP_KERNEL); 706 + if (!group_master_cfg) { 707 + iommu_group_put(group); 708 + goto out; 709 + } 710 + 711 + memcpy(group_master_cfg, cfg, sizeof(*group_master_cfg)); 712 + iommu_group_set_iommudata(group, group_master_cfg, 713 + apple_dart_release_group); 714 + 701 715 for_each_stream_map(i, cfg, stream_map) 702 716 for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) 703 717 stream_map->dart->sid2group[sid] = group; ··· 719 705 res = group; 720 706 721 707 out: 722 - mutex_unlock(&lock); 708 + mutex_unlock(&apple_dart_groups_lock); 723 709 return res; 724 710 } 725 711
+3 -3
drivers/iommu/intel/dmar.c
··· 1942 1942 reason = dmar_get_fault_reason(fault_reason, &fault_type); 1943 1943 1944 1944 if (fault_type == INTR_REMAP) 1945 - pr_err("[INTR-REMAP] Request device [0x%02x:0x%02x.%d] fault index 0x%llx [fault reason 0x%02x] %s\n", 1945 + pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index 0x%llx [fault reason 0x%02x] %s\n", 1946 1946 source_id >> 8, PCI_SLOT(source_id & 0xFF), 1947 1947 PCI_FUNC(source_id & 0xFF), addr >> 48, 1948 1948 fault_reason, reason); 1949 1949 else if (pasid == INVALID_IOASID) 1950 - pr_err("[%s NO_PASID] Request device [0x%02x:0x%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n", 1950 + pr_err("[%s NO_PASID] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n", 1951 1951 type ? "DMA Read" : "DMA Write", 1952 1952 source_id >> 8, PCI_SLOT(source_id & 0xFF), 1953 1953 PCI_FUNC(source_id & 0xFF), addr, 1954 1954 fault_reason, reason); 1955 1955 else 1956 - pr_err("[%s PASID 0x%x] Request device [0x%02x:0x%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n", 1956 + pr_err("[%s PASID 0x%x] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n", 1957 1957 type ? "DMA Read" : "DMA Write", pasid, 1958 1958 source_id >> 8, PCI_SLOT(source_id & 0xFF), 1959 1959 PCI_FUNC(source_id & 0xFF), addr,
+47 -16
drivers/ipack/devices/ipoctal.c
··· 33 33 unsigned int pointer_read; 34 34 unsigned int pointer_write; 35 35 struct tty_port tty_port; 36 + bool tty_registered; 36 37 union scc2698_channel __iomem *regs; 37 38 union scc2698_block __iomem *block_regs; 38 39 unsigned int board_id; ··· 82 81 return 0; 83 82 } 84 83 85 - static int ipoctal_open(struct tty_struct *tty, struct file *file) 84 + static int ipoctal_install(struct tty_driver *driver, struct tty_struct *tty) 86 85 { 87 86 struct ipoctal_channel *channel = dev_get_drvdata(tty->dev); 88 87 struct ipoctal *ipoctal = chan_to_ipoctal(channel, tty->index); 89 - int err; 90 - 91 - tty->driver_data = channel; 88 + int res; 92 89 93 90 if (!ipack_get_carrier(ipoctal->dev)) 94 91 return -EBUSY; 95 92 96 - err = tty_port_open(&channel->tty_port, tty, file); 97 - if (err) 98 - ipack_put_carrier(ipoctal->dev); 93 + res = tty_standard_install(driver, tty); 94 + if (res) 95 + goto err_put_carrier; 99 96 100 - return err; 97 + tty->driver_data = channel; 98 + 99 + return 0; 100 + 101 + err_put_carrier: 102 + ipack_put_carrier(ipoctal->dev); 103 + 104 + return res; 105 + } 106 + 107 + static int ipoctal_open(struct tty_struct *tty, struct file *file) 108 + { 109 + struct ipoctal_channel *channel = tty->driver_data; 110 + 111 + return tty_port_open(&channel->tty_port, tty, file); 101 112 } 102 113 103 114 static void ipoctal_reset_stats(struct ipoctal_stats *stats) ··· 277 264 int res; 278 265 int i; 279 266 struct tty_driver *tty; 280 - char name[20]; 281 267 struct ipoctal_channel *channel; 282 268 struct ipack_region *region; 283 269 void __iomem *addr; ··· 367 355 /* Fill struct tty_driver with ipoctal data */ 368 356 tty->owner = THIS_MODULE; 369 357 tty->driver_name = KBUILD_MODNAME; 370 - sprintf(name, KBUILD_MODNAME ".%d.%d.", bus_nr, slot); 371 - tty->name = name; 358 + tty->name = kasprintf(GFP_KERNEL, KBUILD_MODNAME ".%d.%d.", bus_nr, slot); 359 + if (!tty->name) { 360 + res = -ENOMEM; 361 + goto err_put_driver; 362 + } 372 363 tty->major = 0; 373 364 374 365 tty->minor_start = 0; ··· 386 371 res = tty_register_driver(tty); 387 372 if (res) { 388 373 dev_err(&ipoctal->dev->dev, "Can't register tty driver.\n"); 389 - tty_driver_kref_put(tty); 390 - return res; 374 + goto err_free_name; 391 375 } 392 376 393 377 /* Save struct tty_driver for use it when uninstalling the device */ ··· 397 383 398 384 channel = &ipoctal->channel[i]; 399 385 tty_port_init(&channel->tty_port); 400 - tty_port_alloc_xmit_buf(&channel->tty_port); 386 + res = tty_port_alloc_xmit_buf(&channel->tty_port); 387 + if (res) 388 + continue; 401 389 channel->tty_port.ops = &ipoctal_tty_port_ops; 402 390 403 391 ipoctal_reset_stats(&channel->stats); ··· 407 391 spin_lock_init(&channel->lock); 408 392 channel->pointer_read = 0; 409 393 channel->pointer_write = 0; 410 - tty_dev = tty_port_register_device(&channel->tty_port, tty, i, NULL); 394 + tty_dev = tty_port_register_device_attr(&channel->tty_port, tty, 395 + i, NULL, channel, NULL); 411 396 if (IS_ERR(tty_dev)) { 412 397 dev_err(&ipoctal->dev->dev, "Failed to register tty device.\n"); 398 + tty_port_free_xmit_buf(&channel->tty_port); 413 399 tty_port_destroy(&channel->tty_port); 414 400 continue; 415 401 } 416 - dev_set_drvdata(tty_dev, channel); 402 + channel->tty_registered = true; 417 403 } 418 404 419 405 /* ··· 427 409 ipoctal_irq_handler, ipoctal); 428 410 429 411 return 0; 412 + 413 + err_free_name: 414 + kfree(tty->name); 415 + err_put_driver: 416 + tty_driver_kref_put(tty); 417 + 418 + return res; 430 419 } 431 420 432 421 static inline int ipoctal_copy_write_buffer(struct ipoctal_channel *channel, ··· 673 648 674 649 static const struct tty_operations ipoctal_fops = { 675 650 .ioctl = NULL, 651 + .install = ipoctal_install, 676 652 .open = ipoctal_open, 677 653 .close = ipoctal_close, 678 654 .write = ipoctal_write_tty, ··· 716 690 717 691 for (i = 0; i < NR_CHANNELS; i++) { 718 692 struct ipoctal_channel *channel = &ipoctal->channel[i]; 693 + 694 + if (!channel->tty_registered) 695 + continue; 696 + 719 697 tty_unregister_device(ipoctal->tty_drv, i); 720 698 tty_port_free_xmit_buf(&channel->tty_port); 721 699 tty_port_destroy(&channel->tty_port); 722 700 } 723 701 724 702 tty_unregister_driver(ipoctal->tty_drv); 703 + kfree(ipoctal->tty_drv->name); 725 704 tty_driver_kref_put(ipoctal->tty_drv); 726 705 kfree(ipoctal); 727 706 }
+1
drivers/irqchip/Kconfig
··· 409 409 config GOLDFISH_PIC 410 410 bool "Goldfish programmable interrupt controller" 411 411 depends on MIPS && (GOLDFISH || COMPILE_TEST) 412 + select GENERIC_IRQ_CHIP 412 413 select IRQ_DOMAIN 413 414 help 414 415 Say yes here to enable Goldfish interrupt controller driver used
+2 -2
drivers/irqchip/irq-armada-370-xp.c
··· 359 359 ARMADA_370_XP_SW_TRIG_INT_OFFS); 360 360 } 361 361 362 - static void armada_370_xp_ipi_eoi(struct irq_data *d) 362 + static void armada_370_xp_ipi_ack(struct irq_data *d) 363 363 { 364 364 writel(~BIT(d->hwirq), per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); 365 365 } 366 366 367 367 static struct irq_chip ipi_irqchip = { 368 368 .name = "IPI", 369 + .irq_ack = armada_370_xp_ipi_ack, 369 370 .irq_mask = armada_370_xp_ipi_mask, 370 371 .irq_unmask = armada_370_xp_ipi_unmask, 371 - .irq_eoi = armada_370_xp_ipi_eoi, 372 372 .ipi_send_mask = armada_370_xp_ipi_send_mask, 373 373 }; 374 374
+1 -1
drivers/irqchip/irq-gic-v3-its.c
··· 4501 4501 4502 4502 if (err) { 4503 4503 if (i > 0) 4504 - its_vpe_irq_domain_free(domain, virq, i - 1); 4504 + its_vpe_irq_domain_free(domain, virq, i); 4505 4505 4506 4506 its_lpi_free(bitmap, base, nr_ids); 4507 4507 its_free_prop_table(vprop_page);
+51 -1
drivers/irqchip/irq-gic.c
··· 107 107 108 108 #endif 109 109 110 + static DEFINE_STATIC_KEY_FALSE(needs_rmw_access); 111 + 110 112 /* 111 113 * The GIC mapping of CPU interfaces does not necessarily match 112 114 * the logical CPU numbering. Let's use a mapping as returned ··· 776 774 #endif 777 775 778 776 #ifdef CONFIG_SMP 777 + static void rmw_writeb(u8 bval, void __iomem *addr) 778 + { 779 + static DEFINE_RAW_SPINLOCK(rmw_lock); 780 + unsigned long offset = (unsigned long)addr & 3UL; 781 + unsigned long shift = offset * 8; 782 + unsigned long flags; 783 + u32 val; 784 + 785 + raw_spin_lock_irqsave(&rmw_lock, flags); 786 + 787 + addr -= offset; 788 + val = readl_relaxed(addr); 789 + val &= ~GENMASK(shift + 7, shift); 790 + val |= bval << shift; 791 + writel_relaxed(val, addr); 792 + 793 + raw_spin_unlock_irqrestore(&rmw_lock, flags); 794 + } 795 + 779 796 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 780 797 bool force) 781 798 { ··· 809 788 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) 810 789 return -EINVAL; 811 790 812 - writeb_relaxed(gic_cpu_map[cpu], reg); 791 + if (static_branch_unlikely(&needs_rmw_access)) 792 + rmw_writeb(gic_cpu_map[cpu], reg); 793 + else 794 + writeb_relaxed(gic_cpu_map[cpu], reg); 813 795 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 814 796 815 797 return IRQ_SET_MASK_OK_DONE; ··· 1399 1375 return true; 1400 1376 } 1401 1377 1378 + static bool gic_enable_rmw_access(void *data) 1379 + { 1380 + /* 1381 + * The EMEV2 class of machines has a broken interconnect, and 1382 + * locks up on accesses that are less than 32bit. So far, only 1383 + * the affinity setting requires it. 1384 + */ 1385 + if (of_machine_is_compatible("renesas,emev2")) { 1386 + static_branch_enable(&needs_rmw_access); 1387 + return true; 1388 + } 1389 + 1390 + return false; 1391 + } 1392 + 1393 + static const struct gic_quirk gic_quirks[] = { 1394 + { 1395 + .desc = "broken byte access", 1396 + .compatible = "arm,pl390", 1397 + .init = gic_enable_rmw_access, 1398 + }, 1399 + { }, 1400 + }; 1401 + 1402 1402 static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node) 1403 1403 { 1404 1404 if (!gic || !node) ··· 1438 1390 1439 1391 if (of_property_read_u32(node, "cpu-offset", &gic->percpu_offset)) 1440 1392 gic->percpu_offset = 0; 1393 + 1394 + gic_enable_of_quirks(node, gic_quirks, gic); 1441 1395 1442 1396 return 0; 1443 1397
+3 -3
drivers/irqchip/irq-mbigen.c
··· 25 25 /* The maximum IRQ pin number of mbigen chip(start from 0) */ 26 26 #define MAXIMUM_IRQ_PIN_NUM 1407 27 27 28 - /** 28 + /* 29 29 * In mbigen vector register 30 30 * bit[21:12]: event id value 31 31 * bit[11:0]: device id ··· 39 39 /* offset of vector register in mbigen node */ 40 40 #define REG_MBIGEN_VEC_OFFSET 0x200 41 41 42 - /** 42 + /* 43 43 * offset of clear register in mbigen node 44 44 * This register is used to clear the status 45 45 * of interrupt 46 46 */ 47 47 #define REG_MBIGEN_CLEAR_OFFSET 0xa000 48 48 49 - /** 49 + /* 50 50 * offset of interrupt type register 51 51 * This register is used to configure interrupt 52 52 * trigger type
+6 -6
drivers/irqchip/irq-renesas-rza1.c
··· 223 223 goto out_put_node; 224 224 } 225 225 226 - priv->chip.name = "rza1-irqc", 227 - priv->chip.irq_mask = irq_chip_mask_parent, 228 - priv->chip.irq_unmask = irq_chip_unmask_parent, 229 - priv->chip.irq_eoi = rza1_irqc_eoi, 230 - priv->chip.irq_retrigger = irq_chip_retrigger_hierarchy, 231 - priv->chip.irq_set_type = rza1_irqc_set_type, 226 + priv->chip.name = "rza1-irqc"; 227 + priv->chip.irq_mask = irq_chip_mask_parent; 228 + priv->chip.irq_unmask = irq_chip_unmask_parent; 229 + priv->chip.irq_eoi = rza1_irqc_eoi; 230 + priv->chip.irq_retrigger = irq_chip_retrigger_hierarchy; 231 + priv->chip.irq_set_type = rza1_irqc_set_type; 232 232 priv->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE; 233 233 234 234 priv->irq_domain = irq_domain_add_hierarchy(parent, 0, IRQC_NUM_IRQ,
+6 -6
drivers/mcb/mcb-core.c
··· 275 275 276 276 bus_nr = ida_simple_get(&mcb_ida, 0, 0, GFP_KERNEL); 277 277 if (bus_nr < 0) { 278 - rc = bus_nr; 279 - goto err_free; 278 + kfree(bus); 279 + return ERR_PTR(bus_nr); 280 280 } 281 281 282 282 bus->bus_nr = bus_nr; ··· 291 291 dev_set_name(&bus->dev, "mcb:%d", bus_nr); 292 292 rc = device_add(&bus->dev); 293 293 if (rc) 294 - goto err_free; 294 + goto err_put; 295 295 296 296 return bus; 297 - err_free: 298 - put_device(carrier); 299 - kfree(bus); 297 + 298 + err_put: 299 + put_device(&bus->dev); 300 300 return ERR_PTR(rc); 301 301 } 302 302 EXPORT_SYMBOL_NS_GPL(mcb_alloc_bus, MCB);
-5
drivers/md/md.c
··· 5700 5700 disk->flags |= GENHD_FL_EXT_DEVT; 5701 5701 disk->events |= DISK_EVENT_MEDIA_CHANGE; 5702 5702 mddev->gendisk = disk; 5703 - /* As soon as we call add_disk(), another thread could get 5704 - * through to md_open, so make sure it doesn't get too far 5705 - */ 5706 - mutex_lock(&mddev->open_mutex); 5707 5703 add_disk(disk); 5708 5704 5709 5705 error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md"); ··· 5714 5718 if (mddev->kobj.sd && 5715 5719 sysfs_create_group(&mddev->kobj, &md_bitmap_group)) 5716 5720 pr_debug("pointless warning\n"); 5717 - mutex_unlock(&mddev->open_mutex); 5718 5721 abort: 5719 5722 mutex_unlock(&disks_mutex); 5720 5723 if (!error && mddev->kobj.sd) {
+9 -9
drivers/media/platform/s5p-jpeg/jpeg-core.c
··· 1140 1140 continue; 1141 1141 length = 0; 1142 1142 switch (c) { 1143 - /* SOF0: baseline JPEG */ 1144 - case SOF0: 1143 + /* JPEG_MARKER_SOF0: baseline JPEG */ 1144 + case JPEG_MARKER_SOF0: 1145 1145 if (get_word_be(&jpeg_buffer, &word)) 1146 1146 break; 1147 1147 length = (long)word - 2; ··· 1172 1172 notfound = 0; 1173 1173 break; 1174 1174 1175 - case DQT: 1175 + case JPEG_MARKER_DQT: 1176 1176 if (get_word_be(&jpeg_buffer, &word)) 1177 1177 break; 1178 1178 length = (long)word - 2; ··· 1185 1185 skip(&jpeg_buffer, length); 1186 1186 break; 1187 1187 1188 - case DHT: 1188 + case JPEG_MARKER_DHT: 1189 1189 if (get_word_be(&jpeg_buffer, &word)) 1190 1190 break; 1191 1191 length = (long)word - 2; ··· 1198 1198 skip(&jpeg_buffer, length); 1199 1199 break; 1200 1200 1201 - case SOS: 1201 + case JPEG_MARKER_SOS: 1202 1202 sos = jpeg_buffer.curr - 2; /* 0xffda */ 1203 1203 break; 1204 1204 1205 1205 /* skip payload-less markers */ 1206 - case RST ... RST + 7: 1207 - case SOI: 1208 - case EOI: 1209 - case TEM: 1206 + case JPEG_MARKER_RST ... JPEG_MARKER_RST + 7: 1207 + case JPEG_MARKER_SOI: 1208 + case JPEG_MARKER_EOI: 1209 + case JPEG_MARKER_TEM: 1210 1210 break; 1211 1211 1212 1212 /* skip uninteresting payload markers */
+14 -14
drivers/media/platform/s5p-jpeg/jpeg-core.h
··· 37 37 #define EXYNOS3250_IRQ_TIMEOUT 0x10000000 38 38 39 39 /* a selection of JPEG markers */ 40 - #define TEM 0x01 41 - #define SOF0 0xc0 42 - #define DHT 0xc4 43 - #define RST 0xd0 44 - #define SOI 0xd8 45 - #define EOI 0xd9 46 - #define SOS 0xda 47 - #define DQT 0xdb 48 - #define DHP 0xde 40 + #define JPEG_MARKER_TEM 0x01 41 + #define JPEG_MARKER_SOF0 0xc0 42 + #define JPEG_MARKER_DHT 0xc4 43 + #define JPEG_MARKER_RST 0xd0 44 + #define JPEG_MARKER_SOI 0xd8 45 + #define JPEG_MARKER_EOI 0xd9 46 + #define JPEG_MARKER_SOS 0xda 47 + #define JPEG_MARKER_DQT 0xdb 48 + #define JPEG_MARKER_DHP 0xde 49 49 50 50 /* Flags that indicate a format can be used for capture/output */ 51 51 #define SJPEG_FMT_FLAG_ENC_CAPTURE (1 << 0) ··· 187 187 * @fmt: driver-specific format of this queue 188 188 * @w: image width 189 189 * @h: image height 190 - * @sos: SOS marker's position relative to the buffer beginning 191 - * @dht: DHT markers' positions relative to the buffer beginning 192 - * @dqt: DQT markers' positions relative to the buffer beginning 193 - * @sof: SOF0 marker's position relative to the buffer beginning 194 - * @sof_len: SOF0 marker's payload length (without length field itself) 190 + * @sos: JPEG_MARKER_SOS's position relative to the buffer beginning 191 + * @dht: JPEG_MARKER_DHT' positions relative to the buffer beginning 192 + * @dqt: JPEG_MARKER_DQT' positions relative to the buffer beginning 193 + * @sof: JPEG_MARKER_SOF0's position relative to the buffer beginning 194 + * @sof_len: JPEG_MARKER_SOF0's payload length (without length field itself) 195 195 * @size: image buffer size in bytes 196 196 */ 197 197 struct s5p_jpeg_q_data {
+20 -1
drivers/media/rc/ir_toy.c
··· 24 24 // End transmit and repeat reset command so we exit sump mode 25 25 static const u8 COMMAND_RESET[] = { 0xff, 0xff, 0, 0, 0, 0, 0 }; 26 26 static const u8 COMMAND_SMODE_ENTER[] = { 's' }; 27 + static const u8 COMMAND_SMODE_EXIT[] = { 0 }; 27 28 static const u8 COMMAND_TXSTART[] = { 0x26, 0x24, 0x25, 0x03 }; 28 29 29 30 #define REPLY_XMITCOUNT 't' ··· 310 309 buf[i] = cpu_to_be16(v); 311 310 } 312 311 313 - buf[count] = cpu_to_be16(0xffff); 312 + buf[count] = 0xffff; 314 313 315 314 irtoy->tx_buf = buf; 316 315 irtoy->tx_len = size; 317 316 irtoy->emitted = 0; 317 + 318 + // There is an issue where if the unit is receiving IR while the 319 + // first TXSTART command is sent, the device might end up hanging 320 + // with its led on. It does not respond to any command when this 321 + // happens. To work around this, re-enter sample mode. 322 + err = irtoy_command(irtoy, COMMAND_SMODE_EXIT, 323 + sizeof(COMMAND_SMODE_EXIT), STATE_RESET); 324 + if (err) { 325 + dev_err(irtoy->dev, "exit sample mode: %d\n", err); 326 + return err; 327 + } 328 + 329 + err = irtoy_command(irtoy, COMMAND_SMODE_ENTER, 330 + sizeof(COMMAND_SMODE_ENTER), STATE_COMMAND); 331 + if (err) { 332 + dev_err(irtoy->dev, "enter sample mode: %d\n", err); 333 + return err; 334 + } 318 335 319 336 err = irtoy_command(irtoy, COMMAND_TXSTART, sizeof(COMMAND_TXSTART), 320 337 STATE_TX);
+3 -3
drivers/misc/bcm-vk/bcm_vk_tty.c
··· 267 267 struct device *tty_dev; 268 268 269 269 tty_port_init(&vk->tty[i].port); 270 - tty_dev = tty_port_register_device(&vk->tty[i].port, tty_drv, 271 - i, dev); 270 + tty_dev = tty_port_register_device_attr(&vk->tty[i].port, 271 + tty_drv, i, dev, vk, 272 + NULL); 272 273 if (IS_ERR(tty_dev)) { 273 274 err = PTR_ERR(tty_dev); 274 275 goto unwind; 275 276 } 276 - dev_set_drvdata(tty_dev, vk); 277 277 vk->tty[i].is_opened = false; 278 278 } 279 279
+1 -1
drivers/misc/genwqe/card_base.c
··· 1090 1090 1091 1091 /* check for 64-bit DMA address supported (DAC) */ 1092 1092 /* check for 32-bit DMA address supported (SAC) */ 1093 - if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)) || 1093 + if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)) && 1094 1094 dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) { 1095 1095 dev_err(&pci_dev->dev, 1096 1096 "err: neither DMA32 nor DMA64 supported\n");
+51 -20
drivers/misc/habanalabs/common/command_submission.c
··· 405 405 static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs) 406 406 { 407 407 bool next_entry_found = false; 408 - struct hl_cs *next; 408 + struct hl_cs *next, *first_cs; 409 409 410 410 if (!cs_needs_timeout(cs)) 411 411 return; ··· 415 415 /* We need to handle tdr only once for the complete staged submission. 416 416 * Hence, we choose the CS that reaches this function first which is 417 417 * the CS marked as 'staged_last'. 418 + * In case single staged cs was submitted which has both first and last 419 + * indications, then "cs_find_first" below will return NULL, since we 420 + * removed the cs node from the list before getting here, 421 + * in such cases just continue with the cs to cancel it's TDR work. 418 422 */ 419 - if (cs->staged_cs && cs->staged_last) 420 - cs = hl_staged_cs_find_first(hdev, cs->staged_sequence); 423 + if (cs->staged_cs && cs->staged_last) { 424 + first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence); 425 + if (first_cs) 426 + cs = first_cs; 427 + } 421 428 422 429 spin_unlock(&hdev->cs_mirror_lock); 423 430 ··· 1295 1288 if (rc) 1296 1289 goto free_cs_object; 1297 1290 1291 + /* If this is a staged submission we must return the staged sequence 1292 + * rather than the internal CS sequence 1293 + */ 1294 + if (cs->staged_cs) 1295 + *cs_seq = cs->staged_sequence; 1296 + 1298 1297 /* Validate ALL the CS chunks before submitting the CS */ 1299 1298 for (i = 0 ; i < num_chunks ; i++) { 1300 1299 struct hl_cs_chunk *chunk = &cs_chunk_array[i]; ··· 2001 1988 goto free_cs_chunk_array; 2002 1989 } 2003 1990 1991 + if (!hdev->nic_ports_mask) { 1992 + atomic64_inc(&ctx->cs_counters.validation_drop_cnt); 1993 + atomic64_inc(&cntr->validation_drop_cnt); 1994 + dev_err(hdev->dev, 1995 + "Collective operations not supported when NIC ports are disabled"); 1996 + rc = -EINVAL; 1997 + goto free_cs_chunk_array; 1998 + } 1999 + 2004 2000 collective_engine_id = chunk->collective_engine_id; 2005 2001 } 2006 2002 ··· 2048 2026 spin_unlock(&ctx->sig_mgr.lock); 2049 2027 2050 2028 if (!handle_found) { 2051 - dev_err(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n", 2029 + /* treat as signal CS already finished */ 2030 + dev_dbg(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n", 2052 2031 signal_seq); 2053 - rc = -EINVAL; 2032 + rc = 0; 2054 2033 goto free_cs_chunk_array; 2055 2034 } 2056 2035 ··· 2636 2613 * completed after the poll function. 2637 2614 */ 2638 2615 if (!mcs_data.completion_bitmap) { 2639 - dev_err(hdev->dev, "Multi-CS got completion on wait but no CS completed\n"); 2616 + dev_warn_ratelimited(hdev->dev, 2617 + "Multi-CS got completion on wait but no CS completed\n"); 2640 2618 rc = -EFAULT; 2641 2619 } 2642 2620 } ··· 2764 2740 else 2765 2741 interrupt = &hdev->user_interrupt[interrupt_offset]; 2766 2742 2743 + /* Add pending user interrupt to relevant list for the interrupt 2744 + * handler to monitor 2745 + */ 2746 + spin_lock_irqsave(&interrupt->wait_list_lock, flags); 2747 + list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head); 2748 + spin_unlock_irqrestore(&interrupt->wait_list_lock, flags); 2749 + 2750 + /* We check for completion value as interrupt could have been received 2751 + * before we added the node to the wait list 2752 + */ 2767 2753 if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) { 2768 2754 dev_err(hdev->dev, "Failed to copy completion value from user\n"); 2769 2755 rc = -EFAULT; 2770 - goto free_fence; 2756 + goto remove_pending_user_interrupt; 2771 2757 } 2772 2758 2773 2759 if (completion_value >= target_value) ··· 2786 2752 *status = CS_WAIT_STATUS_BUSY; 2787 2753 2788 2754 if (!timeout_us || (*status == CS_WAIT_STATUS_COMPLETED)) 2789 - goto free_fence; 2790 - 2791 - /* Add pending user interrupt to relevant list for the interrupt 2792 - * handler to monitor 2793 - */ 2794 - spin_lock_irqsave(&interrupt->wait_list_lock, flags); 2795 - list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head); 2796 - spin_unlock_irqrestore(&interrupt->wait_list_lock, flags); 2755 + goto remove_pending_user_interrupt; 2797 2756 2798 2757 wait_again: 2799 2758 /* Wait for interrupt handler to signal completion */ ··· 2797 2770 * If comparison fails, keep waiting until timeout expires 2798 2771 */ 2799 2772 if (completion_rc > 0) { 2773 + spin_lock_irqsave(&interrupt->wait_list_lock, flags); 2774 + /* reinit_completion must be called before we check for user 2775 + * completion value, otherwise, if interrupt is received after 2776 + * the comparison and before the next wait_for_completion, 2777 + * we will reach timeout and fail 2778 + */ 2779 + reinit_completion(&pend->fence.completion); 2780 + spin_unlock_irqrestore(&interrupt->wait_list_lock, flags); 2781 + 2800 2782 if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) { 2801 2783 dev_err(hdev->dev, "Failed to copy completion value from user\n"); 2802 2784 rc = -EFAULT; ··· 2816 2780 if (completion_value >= target_value) { 2817 2781 *status = CS_WAIT_STATUS_COMPLETED; 2818 2782 } else { 2819 - spin_lock_irqsave(&interrupt->wait_list_lock, flags); 2820 - reinit_completion(&pend->fence.completion); 2821 2783 timeout = completion_rc; 2822 - 2823 - spin_unlock_irqrestore(&interrupt->wait_list_lock, flags); 2824 2784 goto wait_again; 2825 2785 } 2826 2786 } else if (completion_rc == -ERESTARTSYS) { ··· 2834 2802 list_del(&pend->wait_list_node); 2835 2803 spin_unlock_irqrestore(&interrupt->wait_list_lock, flags); 2836 2804 2837 - free_fence: 2838 2805 kfree(pend); 2839 2806 hl_ctx_put(ctx); 2840 2807
+7 -2
drivers/misc/habanalabs/common/hw_queue.c
··· 437 437 struct hl_cs_compl *cs_cmpl) 438 438 { 439 439 struct hl_cs_encaps_sig_handle *handle = cs->encaps_sig_hdl; 440 + u32 offset = 0; 440 441 441 442 cs_cmpl->hw_sob = handle->hw_sob; 442 443 ··· 447 446 * set offset 1 for example he mean to wait only for the first 448 447 * signal only, which will be pre_sob_val, and if he set offset 2 449 448 * then the value required is (pre_sob_val + 1) and so on... 449 + * if user set wait offset to 0, then treat it as legacy wait cs, 450 + * wait for the next signal. 450 451 */ 451 - cs_cmpl->sob_val = handle->pre_sob_val + 452 - (job->encaps_sig_wait_offset - 1); 452 + if (job->encaps_sig_wait_offset) 453 + offset = job->encaps_sig_wait_offset - 1; 454 + 455 + cs_cmpl->sob_val = handle->pre_sob_val + offset; 453 456 } 454 457 455 458 static int init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
+7 -4
drivers/misc/habanalabs/gaudi/gaudi.c
··· 395 395 396 396 static struct hl_hw_obj_name_entry gaudi_monitor_id_to_str[] = { 397 397 { .id = 200, .name = "MON_OBJ_DMA_DOWN_FEEDBACK_RESET" }, 398 - { .id = 201, .name = "MON_OBJ_DMA_UP_FEADBACK_RESET" }, 398 + { .id = 201, .name = "MON_OBJ_DMA_UP_FEEDBACK_RESET" }, 399 399 { .id = 203, .name = "MON_OBJ_DRAM_TO_SRAM_QUEUE_FENCE" }, 400 400 { .id = 204, .name = "MON_OBJ_TPC_0_CLK_GATE" }, 401 401 { .id = 205, .name = "MON_OBJ_TPC_1_CLK_GATE" }, ··· 5802 5802 { 5803 5803 struct gaudi_device *gaudi = hdev->asic_specific; 5804 5804 struct packet_msg_prot *cq_pkt; 5805 + u64 msi_addr; 5805 5806 u32 tmp; 5806 5807 5807 5808 cq_pkt = kernel_address + len - (sizeof(struct packet_msg_prot) * 2); ··· 5824 5823 cq_pkt->ctl = cpu_to_le32(tmp); 5825 5824 cq_pkt->value = cpu_to_le32(1); 5826 5825 5827 - if (!gaudi->multi_msi_mode) 5828 - msi_vec = 0; 5826 + if (gaudi->multi_msi_mode) 5827 + msi_addr = mmPCIE_MSI_INTR_0 + msi_vec * 4; 5828 + else 5829 + msi_addr = mmPCIE_CORE_MSI_REQ; 5829 5830 5830 - cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_MSI_INTR_0 + msi_vec * 4); 5831 + cq_pkt->addr = cpu_to_le64(CFG_BASE + msi_addr); 5831 5832 } 5832 5833 5833 5834 static void gaudi_update_eq_ci(struct hl_device *hdev, u32 val)
+67 -48
drivers/misc/habanalabs/gaudi/gaudi_security.c
··· 8 8 #include "gaudiP.h" 9 9 #include "../include/gaudi/asic_reg/gaudi_regs.h" 10 10 11 - #define GAUDI_NUMBER_OF_RR_REGS 24 12 - #define GAUDI_NUMBER_OF_LBW_RANGES 12 11 + #define GAUDI_NUMBER_OF_LBW_RR_REGS 28 12 + #define GAUDI_NUMBER_OF_HBW_RR_REGS 24 13 + #define GAUDI_NUMBER_OF_LBW_RANGES 10 13 14 14 - static u64 gaudi_rr_lbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = { 15 + static u64 gaudi_rr_lbw_hit_aw_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = { 16 + mmDMA_IF_W_S_SOB_HIT_WPROT, 15 17 mmDMA_IF_W_S_DMA0_HIT_WPROT, 16 18 mmDMA_IF_W_S_DMA1_HIT_WPROT, 19 + mmDMA_IF_E_S_SOB_HIT_WPROT, 17 20 mmDMA_IF_E_S_DMA0_HIT_WPROT, 18 21 mmDMA_IF_E_S_DMA1_HIT_WPROT, 22 + mmDMA_IF_W_N_SOB_HIT_WPROT, 19 23 mmDMA_IF_W_N_DMA0_HIT_WPROT, 20 24 mmDMA_IF_W_N_DMA1_HIT_WPROT, 25 + mmDMA_IF_E_N_SOB_HIT_WPROT, 21 26 mmDMA_IF_E_N_DMA0_HIT_WPROT, 22 27 mmDMA_IF_E_N_DMA1_HIT_WPROT, 23 28 mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AW, ··· 43 38 mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AW, 44 39 }; 45 40 46 - static u64 gaudi_rr_lbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = { 41 + static u64 gaudi_rr_lbw_hit_ar_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = { 42 + mmDMA_IF_W_S_SOB_HIT_RPROT, 47 43 mmDMA_IF_W_S_DMA0_HIT_RPROT, 48 44 mmDMA_IF_W_S_DMA1_HIT_RPROT, 45 + mmDMA_IF_E_S_SOB_HIT_RPROT, 49 46 mmDMA_IF_E_S_DMA0_HIT_RPROT, 50 47 mmDMA_IF_E_S_DMA1_HIT_RPROT, 48 + mmDMA_IF_W_N_SOB_HIT_RPROT, 51 49 mmDMA_IF_W_N_DMA0_HIT_RPROT, 52 50 mmDMA_IF_W_N_DMA1_HIT_RPROT, 51 + mmDMA_IF_E_N_SOB_HIT_RPROT, 53 52 mmDMA_IF_E_N_DMA0_HIT_RPROT, 54 53 mmDMA_IF_E_N_DMA1_HIT_RPROT, 55 54 mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AR, ··· 74 65 mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AR, 75 66 }; 76 67 77 - static u64 gaudi_rr_lbw_min_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = { 68 + static u64 gaudi_rr_lbw_min_aw_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = { 69 + mmDMA_IF_W_S_SOB_MIN_WPROT_0, 78 70 mmDMA_IF_W_S_DMA0_MIN_WPROT_0, 79 71 mmDMA_IF_W_S_DMA1_MIN_WPROT_0, 72 + mmDMA_IF_E_S_SOB_MIN_WPROT_0, 80 73 mmDMA_IF_E_S_DMA0_MIN_WPROT_0, 81 74 mmDMA_IF_E_S_DMA1_MIN_WPROT_0, 75 + mmDMA_IF_W_N_SOB_MIN_WPROT_0, 82 76 mmDMA_IF_W_N_DMA0_MIN_WPROT_0, 83 77 mmDMA_IF_W_N_DMA1_MIN_WPROT_0, 78 + mmDMA_IF_E_N_SOB_MIN_WPROT_0, 84 79 mmDMA_IF_E_N_DMA0_MIN_WPROT_0, 85 80 mmDMA_IF_E_N_DMA1_MIN_WPROT_0, 86 81 mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AW_0, ··· 105 92 mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AW_0, 106 93 }; 107 94 108 - static u64 gaudi_rr_lbw_max_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = { 95 + static u64 gaudi_rr_lbw_max_aw_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = { 96 + mmDMA_IF_W_S_SOB_MAX_WPROT_0, 109 97 mmDMA_IF_W_S_DMA0_MAX_WPROT_0, 110 98 mmDMA_IF_W_S_DMA1_MAX_WPROT_0, 99 + mmDMA_IF_E_S_SOB_MAX_WPROT_0, 111 100 mmDMA_IF_E_S_DMA0_MAX_WPROT_0, 112 101 mmDMA_IF_E_S_DMA1_MAX_WPROT_0, 102 + mmDMA_IF_W_N_SOB_MAX_WPROT_0, 113 103 mmDMA_IF_W_N_DMA0_MAX_WPROT_0, 114 104 mmDMA_IF_W_N_DMA1_MAX_WPROT_0, 105 + mmDMA_IF_E_N_SOB_MAX_WPROT_0, 115 106 mmDMA_IF_E_N_DMA0_MAX_WPROT_0, 116 107 mmDMA_IF_E_N_DMA1_MAX_WPROT_0, 117 108 mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AW_0, ··· 136 119 mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AW_0, 137 120 }; 138 121 139 - static u64 gaudi_rr_lbw_min_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = { 122 + static u64 gaudi_rr_lbw_min_ar_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = { 123 + mmDMA_IF_W_S_SOB_MIN_RPROT_0, 140 124 mmDMA_IF_W_S_DMA0_MIN_RPROT_0, 141 125 mmDMA_IF_W_S_DMA1_MIN_RPROT_0, 126 + mmDMA_IF_E_S_SOB_MIN_RPROT_0, 142 127 mmDMA_IF_E_S_DMA0_MIN_RPROT_0, 143 128 mmDMA_IF_E_S_DMA1_MIN_RPROT_0, 129 + mmDMA_IF_W_N_SOB_MIN_RPROT_0, 144 130 mmDMA_IF_W_N_DMA0_MIN_RPROT_0, 145 131 mmDMA_IF_W_N_DMA1_MIN_RPROT_0, 132 + mmDMA_IF_E_N_SOB_MIN_RPROT_0, 146 133 mmDMA_IF_E_N_DMA0_MIN_RPROT_0, 147 134 mmDMA_IF_E_N_DMA1_MIN_RPROT_0, 148 135 mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AR_0, ··· 167 146 mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AR_0, 168 147 }; 169 148 170 - static u64 gaudi_rr_lbw_max_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = { 149 + static u64 gaudi_rr_lbw_max_ar_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = { 150 + mmDMA_IF_W_S_SOB_MAX_RPROT_0, 171 151 mmDMA_IF_W_S_DMA0_MAX_RPROT_0, 172 152 mmDMA_IF_W_S_DMA1_MAX_RPROT_0, 153 + mmDMA_IF_E_S_SOB_MAX_RPROT_0, 173 154 mmDMA_IF_E_S_DMA0_MAX_RPROT_0, 174 155 mmDMA_IF_E_S_DMA1_MAX_RPROT_0, 156 + mmDMA_IF_W_N_SOB_MAX_RPROT_0, 175 157 mmDMA_IF_W_N_DMA0_MAX_RPROT_0, 176 158 mmDMA_IF_W_N_DMA1_MAX_RPROT_0, 159 + mmDMA_IF_E_N_SOB_MAX_RPROT_0, 177 160 mmDMA_IF_E_N_DMA0_MAX_RPROT_0, 178 161 mmDMA_IF_E_N_DMA1_MAX_RPROT_0, 179 162 mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AR_0, ··· 198 173 mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AR_0, 199 174 }; 200 175 201 - static u64 gaudi_rr_hbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = { 176 + static u64 gaudi_rr_hbw_hit_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = { 202 177 mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AW, 203 178 mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AW, 204 179 mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AW, ··· 225 200 mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AW 226 201 }; 227 202 228 - static u64 gaudi_rr_hbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = { 203 + static u64 gaudi_rr_hbw_hit_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = { 229 204 mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AR, 230 205 mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AR, 231 206 mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AR, ··· 252 227 mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AR 253 228 }; 254 229 255 - static u64 gaudi_rr_hbw_base_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = { 230 + static u64 gaudi_rr_hbw_base_low_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = { 256 231 mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0, 257 232 mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0, 258 233 mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0, ··· 279 254 mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_0 280 255 }; 281 256 282 - static u64 gaudi_rr_hbw_base_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = { 257 + static u64 gaudi_rr_hbw_base_high_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = { 283 258 mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0, 284 259 mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0, 285 260 mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0, ··· 306 281 mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_0 307 282 }; 308 283 309 - static u64 gaudi_rr_hbw_mask_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = { 284 + static u64 gaudi_rr_hbw_mask_low_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = { 310 285 mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0, 311 286 mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0, 312 287 mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0, ··· 333 308 mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_0 334 309 }; 335 310 336 - static u64 gaudi_rr_hbw_mask_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = { 311 + static u64 gaudi_rr_hbw_mask_high_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = { 337 312 mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0, 338 313 mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0, 339 314 mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0, ··· 360 335 mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_0 361 336 }; 362 337 363 - static u64 gaudi_rr_hbw_base_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = { 338 + static u64 gaudi_rr_hbw_base_low_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = { 364 339 mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0, 365 340 mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0, 366 341 mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0, ··· 387 362 mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_0 388 363 }; 389 364 390 - static u64 gaudi_rr_hbw_base_high_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = { 365 + static u64 gaudi_rr_hbw_base_high_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = { 391 366 mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0, 392 367 mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0, 393 368 mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0, ··· 414 389 mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_0 415 390 }; 416 391 417 - static u64 gaudi_rr_hbw_mask_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = { 392 + static u64 gaudi_rr_hbw_mask_low_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = { 418 393 mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0, 419 394 mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0, 420 395 mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0, ··· 441 416 mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_0 442 417 }; 443 418 444 - static u64 gaudi_rr_hbw_mask_high_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = { 419 + static u64 gaudi_rr_hbw_mask_high_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = { 445 420 mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0, 446 421 mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0, 447 422 mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0, ··· 12874 12849 u32 lbw_rng_end[GAUDI_NUMBER_OF_LBW_RANGES]; 12875 12850 int i, j; 12876 12851 12877 - lbw_rng_start[0] = (0xFBFE0000 & 0x3FFFFFF) - 1; 12878 - lbw_rng_end[0] = (0xFBFFF000 & 0x3FFFFFF) + 1; 12852 + lbw_rng_start[0] = (0xFC0E8000 & 0x3FFFFFF) - 1; /* 0x000E7FFF */ 12853 + lbw_rng_end[0] = (0xFC11FFFF & 0x3FFFFFF) + 1; /* 0x00120000 */ 12879 12854 12880 - lbw_rng_start[1] = (0xFC0E8000 & 0x3FFFFFF) - 1; 12881 - lbw_rng_end[1] = (0xFC120000 & 0x3FFFFFF) + 1; 12855 + lbw_rng_start[1] = (0xFC1E8000 & 0x3FFFFFF) - 1; /* 0x001E7FFF */ 12856 + lbw_rng_end[1] = (0xFC48FFFF & 0x3FFFFFF) + 1; /* 0x00490000 */ 12882 12857 12883 - lbw_rng_start[2] = (0xFC1E8000 & 0x3FFFFFF) - 1; 12884 - lbw_rng_end[2] = (0xFC48FFFF & 0x3FFFFFF) + 1; 12858 + lbw_rng_start[2] = (0xFC600000 & 0x3FFFFFF) - 1; /* 0x005FFFFF */ 12859 + lbw_rng_end[2] = (0xFCC48FFF & 0x3FFFFFF) + 1; /* 0x00C49000 */ 12885 12860 12886 - lbw_rng_start[3] = (0xFC600000 & 0x3FFFFFF) - 1; 12887 - lbw_rng_end[3] = (0xFCC48FFF & 0x3FFFFFF) + 1; 12861 + lbw_rng_start[3] = (0xFCC4A000 & 0x3FFFFFF) - 1; /* 0x00C49FFF */ 12862 + lbw_rng_end[3] = (0xFCCDFFFF & 0x3FFFFFF) + 1; /* 0x00CE0000 */ 12888 12863 12889 - lbw_rng_start[4] = (0xFCC4A000 & 0x3FFFFFF) - 1; 12890 - lbw_rng_end[4] = (0xFCCDFFFF & 0x3FFFFFF) + 1; 12864 + lbw_rng_start[4] = (0xFCCE4000 & 0x3FFFFFF) - 1; /* 0x00CE3FFF */ 12865 + lbw_rng_end[4] = (0xFCD1FFFF & 0x3FFFFFF) + 1; /* 0x00D20000 */ 12891 12866 12892 - lbw_rng_start[5] = (0xFCCE4000 & 0x3FFFFFF) - 1; 12893 - lbw_rng_end[5] = (0xFCD1FFFF & 0x3FFFFFF) + 1; 12867 + lbw_rng_start[5] = (0xFCD24000 & 0x3FFFFFF) - 1; /* 0x00D23FFF */ 12868 + lbw_rng_end[5] = (0xFCD5FFFF & 0x3FFFFFF) + 1; /* 0x00D60000 */ 12894 12869 12895 - lbw_rng_start[6] = (0xFCD24000 & 0x3FFFFFF) - 1; 12896 - lbw_rng_end[6] = (0xFCD5FFFF & 0x3FFFFFF) + 1; 12870 + lbw_rng_start[6] = (0xFCD64000 & 0x3FFFFFF) - 1; /* 0x00D63FFF */ 12871 + lbw_rng_end[6] = (0xFCD9FFFF & 0x3FFFFFF) + 1; /* 0x00DA0000 */ 12897 12872 12898 - lbw_rng_start[7] = (0xFCD64000 & 0x3FFFFFF) - 1; 12899 - lbw_rng_end[7] = (0xFCD9FFFF & 0x3FFFFFF) + 1; 12873 + lbw_rng_start[7] = (0xFCDA4000 & 0x3FFFFFF) - 1; /* 0x00DA3FFF */ 12874 + lbw_rng_end[7] = (0xFCDDFFFF & 0x3FFFFFF) + 1; /* 0x00DE0000 */ 12900 12875 12901 - lbw_rng_start[8] = (0xFCDA4000 & 0x3FFFFFF) - 1; 12902 - lbw_rng_end[8] = (0xFCDDFFFF & 0x3FFFFFF) + 1; 12876 + lbw_rng_start[8] = (0xFCDE4000 & 0x3FFFFFF) - 1; /* 0x00DE3FFF */ 12877 + lbw_rng_end[8] = (0xFCE05FFF & 0x3FFFFFF) + 1; /* 0x00E06000 */ 12903 12878 12904 - lbw_rng_start[9] = (0xFCDE4000 & 0x3FFFFFF) - 1; 12905 - lbw_rng_end[9] = (0xFCE05FFF & 0x3FFFFFF) + 1; 12879 + lbw_rng_start[9] = (0xFCFC9000 & 0x3FFFFFF) - 1; /* 0x00FC8FFF */ 12880 + lbw_rng_end[9] = (0xFFFFFFFE & 0x3FFFFFF) + 1; /* 0x03FFFFFF */ 12906 12881 12907 - lbw_rng_start[10] = (0xFEC43000 & 0x3FFFFFF) - 1; 12908 - lbw_rng_end[10] = (0xFEC43FFF & 0x3FFFFFF) + 1; 12909 - 12910 - lbw_rng_start[11] = (0xFE484000 & 0x3FFFFFF) - 1; 12911 - lbw_rng_end[11] = (0xFE484FFF & 0x3FFFFFF) + 1; 12912 - 12913 - for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) { 12882 + for (i = 0 ; i < GAUDI_NUMBER_OF_LBW_RR_REGS ; i++) { 12914 12883 WREG32(gaudi_rr_lbw_hit_aw_regs[i], 12915 12884 (1 << GAUDI_NUMBER_OF_LBW_RANGES) - 1); 12916 12885 WREG32(gaudi_rr_lbw_hit_ar_regs[i], 12917 12886 (1 << GAUDI_NUMBER_OF_LBW_RANGES) - 1); 12918 12887 } 12919 12888 12920 - for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) 12889 + for (i = 0 ; i < GAUDI_NUMBER_OF_LBW_RR_REGS ; i++) 12921 12890 for (j = 0 ; j < GAUDI_NUMBER_OF_LBW_RANGES ; j++) { 12922 12891 WREG32(gaudi_rr_lbw_min_aw_regs[i] + (j << 2), 12923 12892 lbw_rng_start[j]); ··· 12958 12939 * 6th range is the host 12959 12940 */ 12960 12941 12961 - for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) { 12942 + for (i = 0 ; i < GAUDI_NUMBER_OF_HBW_RR_REGS ; i++) { 12962 12943 WREG32(gaudi_rr_hbw_hit_aw_regs[i], 0x1F); 12963 12944 WREG32(gaudi_rr_hbw_hit_ar_regs[i], 0x1D); 12964 12945 } 12965 12946 12966 - for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) { 12947 + for (i = 0 ; i < GAUDI_NUMBER_OF_HBW_RR_REGS ; i++) { 12967 12948 WREG32(gaudi_rr_hbw_base_low_aw_regs[i], dram_addr_lo); 12968 12949 WREG32(gaudi_rr_hbw_base_low_ar_regs[i], dram_addr_lo); 12969 12950
+2
drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
··· 308 308 #define mmPCIE_AUX_FLR_CTRL 0xC07394 309 309 #define mmPCIE_AUX_DBI 0xC07490 310 310 311 + #define mmPCIE_CORE_MSI_REQ 0xC04100 312 + 311 313 #define mmPSOC_PCI_PLL_NR 0xC72100 312 314 #define mmSRAM_W_PLL_NR 0x4C8100 313 315 #define mmPSOC_HBM_PLL_NR 0xC74100
+12 -3
drivers/mmc/host/dw_mmc.c
··· 1802 1802 1803 1803 spin_lock_irqsave(&host->irq_lock, flags); 1804 1804 1805 - if (!host->data_status) 1805 + /* 1806 + * Only inject an error if we haven't already got an error or data over 1807 + * interrupt. 1808 + */ 1809 + if (!host->data_status) { 1806 1810 host->data_status = SDMMC_INT_DCRC; 1807 - set_bit(EVENT_DATA_ERROR, &host->pending_events); 1808 - tasklet_schedule(&host->tasklet); 1811 + set_bit(EVENT_DATA_ERROR, &host->pending_events); 1812 + tasklet_schedule(&host->tasklet); 1813 + } 1809 1814 1810 1815 spin_unlock_irqrestore(&host->irq_lock, flags); 1811 1816 ··· 2726 2721 } 2727 2722 2728 2723 if (pending & DW_MCI_DATA_ERROR_FLAGS) { 2724 + spin_lock(&host->irq_lock); 2725 + 2729 2726 /* if there is an error report DATA_ERROR */ 2730 2727 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); 2731 2728 host->data_status = pending; 2732 2729 smp_wmb(); /* drain writebuffer */ 2733 2730 set_bit(EVENT_DATA_ERROR, &host->pending_events); 2734 2731 tasklet_schedule(&host->tasklet); 2732 + 2733 + spin_unlock(&host->irq_lock); 2735 2734 } 2736 2735 2737 2736 if (pending & SDMMC_INT_DATA_OVER) {
+2
drivers/mmc/host/renesas_sdhi_core.c
··· 561 561 /* Unknown why but without polling reset status, it will hang */ 562 562 read_poll_timeout(reset_control_status, ret, ret == 0, 1, 100, 563 563 false, priv->rstc); 564 + /* At least SDHI_VER_GEN2_SDR50 needs manual release of reset */ 565 + sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); 564 566 priv->needs_adjust_hs400 = false; 565 567 renesas_sdhi_set_clock(host, host->clk_cache); 566 568 } else if (priv->scc_ctl) {
+19 -2
drivers/net/dsa/b53/b53_mdio.c
··· 351 351 static void b53_mdio_remove(struct mdio_device *mdiodev) 352 352 { 353 353 struct b53_device *dev = dev_get_drvdata(&mdiodev->dev); 354 - struct dsa_switch *ds = dev->ds; 355 354 356 - dsa_unregister_switch(ds); 355 + if (!dev) 356 + return; 357 + 358 + b53_switch_remove(dev); 359 + 360 + dev_set_drvdata(&mdiodev->dev, NULL); 361 + } 362 + 363 + static void b53_mdio_shutdown(struct mdio_device *mdiodev) 364 + { 365 + struct b53_device *dev = dev_get_drvdata(&mdiodev->dev); 366 + 367 + if (!dev) 368 + return; 369 + 370 + b53_switch_shutdown(dev); 371 + 372 + dev_set_drvdata(&mdiodev->dev, NULL); 357 373 } 358 374 359 375 static const struct of_device_id b53_of_match[] = { ··· 389 373 static struct mdio_driver b53_mdio_driver = { 390 374 .probe = b53_mdio_probe, 391 375 .remove = b53_mdio_remove, 376 + .shutdown = b53_mdio_shutdown, 392 377 .mdiodrv.driver = { 393 378 .name = "bcm53xx", 394 379 .of_match_table = b53_of_match,
+13
drivers/net/dsa/b53/b53_mmap.c
··· 316 316 if (dev) 317 317 b53_switch_remove(dev); 318 318 319 + platform_set_drvdata(pdev, NULL); 320 + 319 321 return 0; 322 + } 323 + 324 + static void b53_mmap_shutdown(struct platform_device *pdev) 325 + { 326 + struct b53_device *dev = platform_get_drvdata(pdev); 327 + 328 + if (dev) 329 + b53_switch_shutdown(dev); 330 + 331 + platform_set_drvdata(pdev, NULL); 320 332 } 321 333 322 334 static const struct of_device_id b53_mmap_of_table[] = { ··· 343 331 static struct platform_driver b53_mmap_driver = { 344 332 .probe = b53_mmap_probe, 345 333 .remove = b53_mmap_remove, 334 + .shutdown = b53_mmap_shutdown, 346 335 .driver = { 347 336 .name = "b53-switch", 348 337 .of_match_table = b53_mmap_of_table,
+5
drivers/net/dsa/b53/b53_priv.h
··· 228 228 dsa_unregister_switch(dev->ds); 229 229 } 230 230 231 + static inline void b53_switch_shutdown(struct b53_device *dev) 232 + { 233 + dsa_switch_shutdown(dev->ds); 234 + } 235 + 231 236 #define b53_build_op(type_op_size, val_type) \ 232 237 static inline int b53_##type_op_size(struct b53_device *dev, u8 page, \ 233 238 u8 reg, val_type val) \
+13
drivers/net/dsa/b53/b53_spi.c
··· 321 321 if (dev) 322 322 b53_switch_remove(dev); 323 323 324 + spi_set_drvdata(spi, NULL); 325 + 324 326 return 0; 327 + } 328 + 329 + static void b53_spi_shutdown(struct spi_device *spi) 330 + { 331 + struct b53_device *dev = spi_get_drvdata(spi); 332 + 333 + if (dev) 334 + b53_switch_shutdown(dev); 335 + 336 + spi_set_drvdata(spi, NULL); 325 337 } 326 338 327 339 static const struct of_device_id b53_spi_of_match[] = { ··· 356 344 }, 357 345 .probe = b53_spi_probe, 358 346 .remove = b53_spi_remove, 347 + .shutdown = b53_spi_shutdown, 359 348 }; 360 349 361 350 module_spi_driver(b53_spi_driver);
+19 -2
drivers/net/dsa/b53/b53_srab.c
··· 629 629 static int b53_srab_remove(struct platform_device *pdev) 630 630 { 631 631 struct b53_device *dev = platform_get_drvdata(pdev); 632 - struct b53_srab_priv *priv = dev->priv; 633 632 634 - b53_srab_intr_set(priv, false); 633 + if (!dev) 634 + return 0; 635 + 636 + b53_srab_intr_set(dev->priv, false); 635 637 b53_switch_remove(dev); 636 638 639 + platform_set_drvdata(pdev, NULL); 640 + 637 641 return 0; 642 + } 643 + 644 + static void b53_srab_shutdown(struct platform_device *pdev) 645 + { 646 + struct b53_device *dev = platform_get_drvdata(pdev); 647 + 648 + if (!dev) 649 + return; 650 + 651 + b53_switch_shutdown(dev); 652 + 653 + platform_set_drvdata(pdev, NULL); 638 654 } 639 655 640 656 static struct platform_driver b53_srab_driver = { 641 657 .probe = b53_srab_probe, 642 658 .remove = b53_srab_remove, 659 + .shutdown = b53_srab_shutdown, 643 660 .driver = { 644 661 .name = "b53-srab-switch", 645 662 .of_match_table = b53_srab_of_match,
+13 -1
drivers/net/dsa/bcm_sf2.c
··· 68 68 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); 69 69 unsigned int port, count = 0; 70 70 71 - for (port = 0; port < ARRAY_SIZE(priv->port_sts); port++) { 71 + for (port = 0; port < ds->num_ports; port++) { 72 72 if (dsa_is_cpu_port(ds, port)) 73 73 continue; 74 74 if (priv->port_sts[port].enabled) ··· 1512 1512 { 1513 1513 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1514 1514 1515 + if (!priv) 1516 + return 0; 1517 + 1515 1518 priv->wol_ports_mask = 0; 1516 1519 /* Disable interrupts */ 1517 1520 bcm_sf2_intr_disable(priv); ··· 1526 1523 if (priv->type == BCM7278_DEVICE_ID) 1527 1524 reset_control_assert(priv->rcdev); 1528 1525 1526 + platform_set_drvdata(pdev, NULL); 1527 + 1529 1528 return 0; 1530 1529 } 1531 1530 1532 1531 static void bcm_sf2_sw_shutdown(struct platform_device *pdev) 1533 1532 { 1534 1533 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1534 + 1535 + if (!priv) 1536 + return; 1535 1537 1536 1538 /* For a kernel about to be kexec'd we want to keep the GPHY on for a 1537 1539 * successful MDIO bus scan to occur. If we did turn off the GPHY ··· 1546 1538 */ 1547 1539 if (priv->hw_params.num_gphy == 1) 1548 1540 bcm_sf2_gphy_enable_set(priv->dev->ds, true); 1541 + 1542 + dsa_switch_shutdown(priv->dev->ds); 1543 + 1544 + platform_set_drvdata(pdev, NULL); 1549 1545 } 1550 1546 1551 1547 #ifdef CONFIG_PM_SLEEP
+21 -1
drivers/net/dsa/dsa_loop.c
··· 340 340 static void dsa_loop_drv_remove(struct mdio_device *mdiodev) 341 341 { 342 342 struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev); 343 - struct dsa_loop_priv *ps = ds->priv; 343 + struct dsa_loop_priv *ps; 344 + 345 + if (!ds) 346 + return; 347 + 348 + ps = ds->priv; 344 349 345 350 dsa_unregister_switch(ds); 346 351 dev_put(ps->netdev); 352 + 353 + dev_set_drvdata(&mdiodev->dev, NULL); 354 + } 355 + 356 + static void dsa_loop_drv_shutdown(struct mdio_device *mdiodev) 357 + { 358 + struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev); 359 + 360 + if (!ds) 361 + return; 362 + 363 + dsa_switch_shutdown(ds); 364 + 365 + dev_set_drvdata(&mdiodev->dev, NULL); 347 366 } 348 367 349 368 static struct mdio_driver dsa_loop_drv = { ··· 371 352 }, 372 353 .probe = dsa_loop_drv_probe, 373 354 .remove = dsa_loop_drv_remove, 355 + .shutdown = dsa_loop_drv_shutdown, 374 356 }; 375 357 376 358 #define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
+16
drivers/net/dsa/hirschmann/hellcreek.c
··· 1916 1916 { 1917 1917 struct hellcreek *hellcreek = platform_get_drvdata(pdev); 1918 1918 1919 + if (!hellcreek) 1920 + return 0; 1921 + 1919 1922 hellcreek_hwtstamp_free(hellcreek); 1920 1923 hellcreek_ptp_free(hellcreek); 1921 1924 dsa_unregister_switch(hellcreek->ds); 1922 1925 platform_set_drvdata(pdev, NULL); 1923 1926 1924 1927 return 0; 1928 + } 1929 + 1930 + static void hellcreek_shutdown(struct platform_device *pdev) 1931 + { 1932 + struct hellcreek *hellcreek = platform_get_drvdata(pdev); 1933 + 1934 + if (!hellcreek) 1935 + return; 1936 + 1937 + dsa_switch_shutdown(hellcreek->ds); 1938 + 1939 + platform_set_drvdata(pdev, NULL); 1925 1940 } 1926 1941 1927 1942 static const struct hellcreek_platform_data de1soc_r1_pdata = { ··· 1961 1946 static struct platform_driver hellcreek_driver = { 1962 1947 .probe = hellcreek_probe, 1963 1948 .remove = hellcreek_remove, 1949 + .shutdown = hellcreek_shutdown, 1964 1950 .driver = { 1965 1951 .name = "hellcreek", 1966 1952 .of_match_table = hellcreek_of_match,
+6
drivers/net/dsa/lan9303-core.c
··· 1379 1379 } 1380 1380 EXPORT_SYMBOL(lan9303_remove); 1381 1381 1382 + void lan9303_shutdown(struct lan9303 *chip) 1383 + { 1384 + dsa_switch_shutdown(chip->ds); 1385 + } 1386 + EXPORT_SYMBOL(lan9303_shutdown); 1387 + 1382 1388 MODULE_AUTHOR("Juergen Borleis <kernel@pengutronix.de>"); 1383 1389 MODULE_DESCRIPTION("Core driver for SMSC/Microchip LAN9303 three port ethernet switch"); 1384 1390 MODULE_LICENSE("GPL v2");
+1
drivers/net/dsa/lan9303.h
··· 10 10 11 11 int lan9303_probe(struct lan9303 *chip, struct device_node *np); 12 12 int lan9303_remove(struct lan9303 *chip); 13 + void lan9303_shutdown(struct lan9303 *chip);
+20 -4
drivers/net/dsa/lan9303_i2c.c
··· 67 67 68 68 static int lan9303_i2c_remove(struct i2c_client *client) 69 69 { 70 - struct lan9303_i2c *sw_dev; 70 + struct lan9303_i2c *sw_dev = i2c_get_clientdata(client); 71 71 72 - sw_dev = i2c_get_clientdata(client); 73 72 if (!sw_dev) 74 - return -ENODEV; 73 + return 0; 75 74 76 - return lan9303_remove(&sw_dev->chip); 75 + lan9303_remove(&sw_dev->chip); 76 + 77 + i2c_set_clientdata(client, NULL); 78 + 79 + return 0; 80 + } 81 + 82 + static void lan9303_i2c_shutdown(struct i2c_client *client) 83 + { 84 + struct lan9303_i2c *sw_dev = i2c_get_clientdata(client); 85 + 86 + if (!sw_dev) 87 + return; 88 + 89 + lan9303_shutdown(&sw_dev->chip); 90 + 91 + i2c_set_clientdata(client, NULL); 77 92 } 78 93 79 94 /*-------------------------------------------------------------------------*/ ··· 112 97 }, 113 98 .probe = lan9303_i2c_probe, 114 99 .remove = lan9303_i2c_remove, 100 + .shutdown = lan9303_i2c_shutdown, 115 101 .id_table = lan9303_i2c_id, 116 102 }; 117 103 module_i2c_driver(lan9303_i2c_driver);
+15
drivers/net/dsa/lan9303_mdio.c
··· 138 138 return; 139 139 140 140 lan9303_remove(&sw_dev->chip); 141 + 142 + dev_set_drvdata(&mdiodev->dev, NULL); 143 + } 144 + 145 + static void lan9303_mdio_shutdown(struct mdio_device *mdiodev) 146 + { 147 + struct lan9303_mdio *sw_dev = dev_get_drvdata(&mdiodev->dev); 148 + 149 + if (!sw_dev) 150 + return; 151 + 152 + lan9303_shutdown(&sw_dev->chip); 153 + 154 + dev_set_drvdata(&mdiodev->dev, NULL); 141 155 } 142 156 143 157 /*-------------------------------------------------------------------------*/ ··· 169 155 }, 170 156 .probe = lan9303_mdio_probe, 171 157 .remove = lan9303_mdio_remove, 158 + .shutdown = lan9303_mdio_shutdown, 172 159 }; 173 160 mdio_module_driver(lan9303_mdio_driver); 174 161
+18
drivers/net/dsa/lantiq_gswip.c
··· 2184 2184 struct gswip_priv *priv = platform_get_drvdata(pdev); 2185 2185 int i; 2186 2186 2187 + if (!priv) 2188 + return 0; 2189 + 2187 2190 /* disable the switch */ 2188 2191 gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB); 2189 2192 ··· 2200 2197 for (i = 0; i < priv->num_gphy_fw; i++) 2201 2198 gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]); 2202 2199 2200 + platform_set_drvdata(pdev, NULL); 2201 + 2203 2202 return 0; 2203 + } 2204 + 2205 + static void gswip_shutdown(struct platform_device *pdev) 2206 + { 2207 + struct gswip_priv *priv = platform_get_drvdata(pdev); 2208 + 2209 + if (!priv) 2210 + return; 2211 + 2212 + dsa_switch_shutdown(priv->ds); 2213 + 2214 + platform_set_drvdata(pdev, NULL); 2204 2215 } 2205 2216 2206 2217 static const struct gswip_hw_info gswip_xrx200 = { ··· 2240 2223 static struct platform_driver gswip_driver = { 2241 2224 .probe = gswip_probe, 2242 2225 .remove = gswip_remove, 2226 + .shutdown = gswip_shutdown, 2243 2227 .driver = { 2244 2228 .name = "gswip", 2245 2229 .of_match_table = gswip_of_match,
+10 -1
drivers/net/dsa/microchip/ksz8795_spi.c
··· 94 94 if (dev) 95 95 ksz_switch_remove(dev); 96 96 97 + spi_set_drvdata(spi, NULL); 98 + 97 99 return 0; 98 100 } 99 101 ··· 103 101 { 104 102 struct ksz_device *dev = spi_get_drvdata(spi); 105 103 106 - if (dev && dev->dev_ops->shutdown) 104 + if (!dev) 105 + return; 106 + 107 + if (dev->dev_ops->shutdown) 107 108 dev->dev_ops->shutdown(dev); 109 + 110 + dsa_switch_shutdown(dev->ds); 111 + 112 + spi_set_drvdata(spi, NULL); 108 113 } 109 114 110 115 static const struct of_device_id ksz8795_dt_ids[] = {
+13
drivers/net/dsa/microchip/ksz8863_smi.c
··· 191 191 192 192 if (dev) 193 193 ksz_switch_remove(dev); 194 + 195 + dev_set_drvdata(&mdiodev->dev, NULL); 196 + } 197 + 198 + static void ksz8863_smi_shutdown(struct mdio_device *mdiodev) 199 + { 200 + struct ksz_device *dev = dev_get_drvdata(&mdiodev->dev); 201 + 202 + if (dev) 203 + dsa_switch_shutdown(dev->ds); 204 + 205 + dev_set_drvdata(&mdiodev->dev, NULL); 194 206 } 195 207 196 208 static const struct of_device_id ksz8863_dt_ids[] = { ··· 215 203 static struct mdio_driver ksz8863_driver = { 216 204 .probe = ksz8863_smi_probe, 217 205 .remove = ksz8863_smi_remove, 206 + .shutdown = ksz8863_smi_shutdown, 218 207 .mdiodrv.driver = { 219 208 .name = "ksz8863-switch", 220 209 .of_match_table = ksz8863_dt_ids,
+12 -2
drivers/net/dsa/microchip/ksz9477_i2c.c
··· 56 56 { 57 57 struct ksz_device *dev = i2c_get_clientdata(i2c); 58 58 59 - ksz_switch_remove(dev); 59 + if (dev) 60 + ksz_switch_remove(dev); 61 + 62 + i2c_set_clientdata(i2c, NULL); 60 63 61 64 return 0; 62 65 } ··· 68 65 { 69 66 struct ksz_device *dev = i2c_get_clientdata(i2c); 70 67 71 - if (dev && dev->dev_ops->shutdown) 68 + if (!dev) 69 + return; 70 + 71 + if (dev->dev_ops->shutdown) 72 72 dev->dev_ops->shutdown(dev); 73 + 74 + dsa_switch_shutdown(dev->ds); 75 + 76 + i2c_set_clientdata(i2c, NULL); 73 77 } 74 78 75 79 static const struct i2c_device_id ksz9477_i2c_id[] = {
+6 -2
drivers/net/dsa/microchip/ksz9477_spi.c
··· 72 72 if (dev) 73 73 ksz_switch_remove(dev); 74 74 75 + spi_set_drvdata(spi, NULL); 76 + 75 77 return 0; 76 78 } 77 79 ··· 81 79 { 82 80 struct ksz_device *dev = spi_get_drvdata(spi); 83 81 84 - if (dev && dev->dev_ops->shutdown) 85 - dev->dev_ops->shutdown(dev); 82 + if (dev) 83 + dsa_switch_shutdown(dev->ds); 84 + 85 + spi_set_drvdata(spi, NULL); 86 86 } 87 87 88 88 static const struct of_device_id ksz9477_dt_ids[] = {
+18
drivers/net/dsa/mt7530.c
··· 3286 3286 struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev); 3287 3287 int ret = 0; 3288 3288 3289 + if (!priv) 3290 + return; 3291 + 3289 3292 ret = regulator_disable(priv->core_pwr); 3290 3293 if (ret < 0) 3291 3294 dev_err(priv->dev, ··· 3304 3301 3305 3302 dsa_unregister_switch(priv->ds); 3306 3303 mutex_destroy(&priv->reg_mutex); 3304 + 3305 + dev_set_drvdata(&mdiodev->dev, NULL); 3306 + } 3307 + 3308 + static void mt7530_shutdown(struct mdio_device *mdiodev) 3309 + { 3310 + struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev); 3311 + 3312 + if (!priv) 3313 + return; 3314 + 3315 + dsa_switch_shutdown(priv->ds); 3316 + 3317 + dev_set_drvdata(&mdiodev->dev, NULL); 3307 3318 } 3308 3319 3309 3320 static struct mdio_driver mt7530_mdio_driver = { 3310 3321 .probe = mt7530_probe, 3311 3322 .remove = mt7530_remove, 3323 + .shutdown = mt7530_shutdown, 3312 3324 .mdiodrv.driver = { 3313 3325 .name = "mt7530", 3314 3326 .of_match_table = mt7530_of_match,
+18
drivers/net/dsa/mv88e6060.c
··· 290 290 { 291 291 struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev); 292 292 293 + if (!ds) 294 + return; 295 + 293 296 dsa_unregister_switch(ds); 297 + 298 + dev_set_drvdata(&mdiodev->dev, NULL); 299 + } 300 + 301 + static void mv88e6060_shutdown(struct mdio_device *mdiodev) 302 + { 303 + struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev); 304 + 305 + if (!ds) 306 + return; 307 + 308 + dsa_switch_shutdown(ds); 309 + 310 + dev_set_drvdata(&mdiodev->dev, NULL); 294 311 } 295 312 296 313 static const struct of_device_id mv88e6060_of_match[] = { ··· 320 303 static struct mdio_driver mv88e6060_driver = { 321 304 .probe = mv88e6060_probe, 322 305 .remove = mv88e6060_remove, 306 + .shutdown = mv88e6060_shutdown, 323 307 .mdiodrv.driver = { 324 308 .name = "mv88e6060", 325 309 .of_match_table = mv88e6060_of_match,
+45 -10
drivers/net/dsa/mv88e6xxx/chip.c
··· 2834 2834 if (err) 2835 2835 return err; 2836 2836 2837 - /* Port Control 2: don't force a good FCS, set the maximum frame size to 2838 - * 10240 bytes, disable 802.1q tags checking, don't discard tagged or 2837 + /* Port Control 2: don't force a good FCS, set the MTU size to 2838 + * 10222 bytes, disable 802.1q tags checking, don't discard tagged or 2839 2839 * untagged frames on this port, do a destination address lookup on all 2840 2840 * received packets as usual, disable ARP mirroring and don't send a 2841 2841 * copy of all transmitted/received frames on this port to the CPU. ··· 2854 2854 return err; 2855 2855 2856 2856 if (chip->info->ops->port_set_jumbo_size) { 2857 - err = chip->info->ops->port_set_jumbo_size(chip, port, 10240); 2857 + err = chip->info->ops->port_set_jumbo_size(chip, port, 10218); 2858 2858 if (err) 2859 2859 return err; 2860 2860 } ··· 2944 2944 struct mv88e6xxx_chip *chip = ds->priv; 2945 2945 2946 2946 if (chip->info->ops->port_set_jumbo_size) 2947 - return 10240; 2947 + return 10240 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN; 2948 2948 else if (chip->info->ops->set_max_frame_size) 2949 - return 1632; 2950 - return 1522; 2949 + return 1632 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN; 2950 + return 1522 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN; 2951 2951 } 2952 2952 2953 2953 static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu) 2954 2954 { 2955 2955 struct mv88e6xxx_chip *chip = ds->priv; 2956 2956 int ret = 0; 2957 + 2958 + if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) 2959 + new_mtu += EDSA_HLEN; 2957 2960 2958 2961 mv88e6xxx_reg_lock(chip); 2959 2962 if (chip->info->ops->port_set_jumbo_size) ··· 3074 3071 { 3075 3072 mv88e6xxx_teardown_devlink_params(ds); 3076 3073 dsa_devlink_resources_unregister(ds); 3077 - mv88e6xxx_teardown_devlink_regions(ds); 3074 + mv88e6xxx_teardown_devlink_regions_global(ds); 3078 3075 } 3079 3076 3080 3077 static int mv88e6xxx_setup(struct dsa_switch *ds) ··· 3218 3215 if (err) 3219 3216 goto out_resources; 3220 3217 3221 - err = mv88e6xxx_setup_devlink_regions(ds); 3218 + err = mv88e6xxx_setup_devlink_regions_global(ds); 3222 3219 if (err) 3223 3220 goto out_params; 3224 3221 ··· 3230 3227 dsa_devlink_resources_unregister(ds); 3231 3228 3232 3229 return err; 3230 + } 3231 + 3232 + static int mv88e6xxx_port_setup(struct dsa_switch *ds, int port) 3233 + { 3234 + return mv88e6xxx_setup_devlink_regions_port(ds, port); 3235 + } 3236 + 3237 + static void mv88e6xxx_port_teardown(struct dsa_switch *ds, int port) 3238 + { 3239 + mv88e6xxx_teardown_devlink_regions_port(ds, port); 3233 3240 } 3234 3241 3235 3242 /* prod_id for switch families which do not have a PHY model number */ ··· 3728 3715 .port_set_ucast_flood = mv88e6352_port_set_ucast_flood, 3729 3716 .port_set_mcast_flood = mv88e6352_port_set_mcast_flood, 3730 3717 .port_set_ether_type = mv88e6351_port_set_ether_type, 3731 - .port_set_jumbo_size = mv88e6165_port_set_jumbo_size, 3732 3718 .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, 3733 3719 .port_pause_limit = mv88e6097_port_pause_limit, 3734 3720 .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, ··· 3752 3740 .avb_ops = &mv88e6165_avb_ops, 3753 3741 .ptp_ops = &mv88e6165_ptp_ops, 3754 3742 .phylink_validate = mv88e6185_phylink_validate, 3743 + .set_max_frame_size = mv88e6185_g1_set_max_frame_size, 3755 3744 }; 3756 3745 3757 3746 static const struct mv88e6xxx_ops mv88e6165_ops = { ··· 6129 6116 .change_tag_protocol = mv88e6xxx_change_tag_protocol, 6130 6117 .setup = mv88e6xxx_setup, 6131 6118 .teardown = mv88e6xxx_teardown, 6119 + .port_setup = mv88e6xxx_port_setup, 6120 + .port_teardown = mv88e6xxx_port_teardown, 6132 6121 .phylink_validate = mv88e6xxx_validate, 6133 6122 .phylink_mac_link_state = mv88e6xxx_serdes_pcs_get_state, 6134 6123 .phylink_mac_config = mv88e6xxx_mac_config, ··· 6404 6389 static void mv88e6xxx_remove(struct mdio_device *mdiodev) 6405 6390 { 6406 6391 struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev); 6407 - struct mv88e6xxx_chip *chip = ds->priv; 6392 + struct mv88e6xxx_chip *chip; 6393 + 6394 + if (!ds) 6395 + return; 6396 + 6397 + chip = ds->priv; 6408 6398 6409 6399 if (chip->info->ptp_support) { 6410 6400 mv88e6xxx_hwtstamp_free(chip); ··· 6430 6410 mv88e6xxx_g1_irq_free(chip); 6431 6411 else 6432 6412 mv88e6xxx_irq_poll_free(chip); 6413 + 6414 + dev_set_drvdata(&mdiodev->dev, NULL); 6415 + } 6416 + 6417 + static void mv88e6xxx_shutdown(struct mdio_device *mdiodev) 6418 + { 6419 + struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev); 6420 + 6421 + if (!ds) 6422 + return; 6423 + 6424 + dsa_switch_shutdown(ds); 6425 + 6426 + dev_set_drvdata(&mdiodev->dev, NULL); 6433 6427 } 6434 6428 6435 6429 static const struct of_device_id mv88e6xxx_of_match[] = { ··· 6467 6433 static struct mdio_driver mv88e6xxx_driver = { 6468 6434 .probe = mv88e6xxx_probe, 6469 6435 .remove = mv88e6xxx_remove, 6436 + .shutdown = mv88e6xxx_shutdown, 6470 6437 .mdiodrv.driver = { 6471 6438 .name = "mv88e6085", 6472 6439 .of_match_table = mv88e6xxx_of_match,
+1
drivers/net/dsa/mv88e6xxx/chip.h
··· 18 18 #include <linux/timecounter.h> 19 19 #include <net/dsa.h> 20 20 21 + #define EDSA_HLEN 8 21 22 #define MV88E6XXX_N_FID 4096 22 23 23 24 /* PVT limits for 4-bit port and 5-bit switch */
+9 -64
drivers/net/dsa/mv88e6xxx/devlink.c
··· 647 647 }, 648 648 }; 649 649 650 - static void 651 - mv88e6xxx_teardown_devlink_regions_global(struct mv88e6xxx_chip *chip) 650 + void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds) 652 651 { 652 + struct mv88e6xxx_chip *chip = ds->priv; 653 653 int i; 654 654 655 655 for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++) 656 656 dsa_devlink_region_destroy(chip->regions[i]); 657 657 } 658 658 659 - static void 660 - mv88e6xxx_teardown_devlink_regions_port(struct mv88e6xxx_chip *chip, 661 - int port) 659 + void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port) 662 660 { 661 + struct mv88e6xxx_chip *chip = ds->priv; 662 + 663 663 dsa_devlink_region_destroy(chip->ports[port].region); 664 664 } 665 665 666 - static int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds, 667 - struct mv88e6xxx_chip *chip, 668 - int port) 666 + int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds, int port) 669 667 { 668 + struct mv88e6xxx_chip *chip = ds->priv; 670 669 struct devlink_region *region; 671 670 672 671 region = dsa_devlink_port_region_create(ds, ··· 680 681 return 0; 681 682 } 682 683 683 - static void 684 - mv88e6xxx_teardown_devlink_regions_ports(struct mv88e6xxx_chip *chip) 685 - { 686 - int port; 687 - 688 - for (port = 0; port < mv88e6xxx_num_ports(chip); port++) 689 - mv88e6xxx_teardown_devlink_regions_port(chip, port); 690 - } 691 - 692 - static int mv88e6xxx_setup_devlink_regions_ports(struct dsa_switch *ds, 693 - struct mv88e6xxx_chip *chip) 694 - { 695 - int port; 696 - int err; 697 - 698 - for (port = 0; port < mv88e6xxx_num_ports(chip); port++) { 699 - err = mv88e6xxx_setup_devlink_regions_port(ds, chip, port); 700 - if (err) 701 - goto out; 702 - } 703 - 704 - return 0; 705 - 706 - out: 707 - while (port-- > 0) 708 - mv88e6xxx_teardown_devlink_regions_port(chip, port); 709 - 710 - return err; 711 - } 712 - 713 - static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds, 714 - struct mv88e6xxx_chip *chip) 684 + int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds) 715 685 { 716 686 bool (*cond)(struct mv88e6xxx_chip *chip); 687 + struct mv88e6xxx_chip *chip = ds->priv; 717 688 struct devlink_region_ops *ops; 718 689 struct devlink_region *region; 719 690 u64 size; ··· 720 751 dsa_devlink_region_destroy(chip->regions[j]); 721 752 722 753 return PTR_ERR(region); 723 - } 724 - 725 - int mv88e6xxx_setup_devlink_regions(struct dsa_switch *ds) 726 - { 727 - struct mv88e6xxx_chip *chip = ds->priv; 728 - int err; 729 - 730 - err = mv88e6xxx_setup_devlink_regions_global(ds, chip); 731 - if (err) 732 - return err; 733 - 734 - err = mv88e6xxx_setup_devlink_regions_ports(ds, chip); 735 - if (err) 736 - mv88e6xxx_teardown_devlink_regions_global(chip); 737 - 738 - return err; 739 - } 740 - 741 - void mv88e6xxx_teardown_devlink_regions(struct dsa_switch *ds) 742 - { 743 - struct mv88e6xxx_chip *chip = ds->priv; 744 - 745 - mv88e6xxx_teardown_devlink_regions_ports(chip); 746 - mv88e6xxx_teardown_devlink_regions_global(chip); 747 754 } 748 755 749 756 int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
+4 -2
drivers/net/dsa/mv88e6xxx/devlink.h
··· 12 12 struct devlink_param_gset_ctx *ctx); 13 13 int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id, 14 14 struct devlink_param_gset_ctx *ctx); 15 - int mv88e6xxx_setup_devlink_regions(struct dsa_switch *ds); 16 - void mv88e6xxx_teardown_devlink_regions(struct dsa_switch *ds); 15 + int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds); 16 + void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds); 17 + int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds, int port); 18 + void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port); 17 19 18 20 int mv88e6xxx_devlink_info_get(struct dsa_switch *ds, 19 21 struct devlink_info_req *req,
+2
drivers/net/dsa/mv88e6xxx/global1.c
··· 232 232 u16 val; 233 233 int err; 234 234 235 + mtu += ETH_HLEN + ETH_FCS_LEN; 236 + 235 237 err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val); 236 238 if (err) 237 239 return err;
+2
drivers/net/dsa/mv88e6xxx/port.c
··· 1277 1277 u16 reg; 1278 1278 int err; 1279 1279 1280 + size += VLAN_ETH_HLEN + ETH_FCS_LEN; 1281 + 1280 1282 err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, &reg); 1281 1283 if (err) 1282 1284 return err;
+1 -1
drivers/net/dsa/ocelot/felix.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - /* Copyright 2019-2021 NXP Semiconductors 2 + /* Copyright 2019-2021 NXP 3 3 * 4 4 * This is an umbrella module for all network switches that are 5 5 * register-compatible with Ocelot and that perform I/O to their host CPU
+1 -1
drivers/net/dsa/ocelot/felix.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* Copyright 2019 NXP Semiconductors 2 + /* Copyright 2019 NXP 3 3 */ 4 4 #ifndef _MSCC_FELIX_H 5 5 #define _MSCC_FELIX_H
+19 -3
drivers/net/dsa/ocelot/felix_vsc9959.c
··· 1 1 // SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 2 /* Copyright 2017 Microsemi Corporation 3 - * Copyright 2018-2019 NXP Semiconductors 3 + * Copyright 2018-2019 NXP 4 4 */ 5 5 #include <linux/fsl/enetc_mdio.h> 6 6 #include <soc/mscc/ocelot_qsys.h> ··· 1472 1472 1473 1473 static void felix_pci_remove(struct pci_dev *pdev) 1474 1474 { 1475 - struct felix *felix; 1475 + struct felix *felix = pci_get_drvdata(pdev); 1476 1476 1477 - felix = pci_get_drvdata(pdev); 1477 + if (!felix) 1478 + return; 1478 1479 1479 1480 dsa_unregister_switch(felix->ds); 1480 1481 ··· 1483 1482 kfree(felix); 1484 1483 1485 1484 pci_disable_device(pdev); 1485 + 1486 + pci_set_drvdata(pdev, NULL); 1487 + } 1488 + 1489 + static void felix_pci_shutdown(struct pci_dev *pdev) 1490 + { 1491 + struct felix *felix = pci_get_drvdata(pdev); 1492 + 1493 + if (!felix) 1494 + return; 1495 + 1496 + dsa_switch_shutdown(felix->ds); 1497 + 1498 + pci_set_drvdata(pdev, NULL); 1486 1499 } 1487 1500 1488 1501 static struct pci_device_id felix_ids[] = { ··· 1513 1498 .id_table = felix_ids, 1514 1499 .probe = felix_pci_probe, 1515 1500 .remove = felix_pci_remove, 1501 + .shutdown = felix_pci_shutdown, 1516 1502 }; 1517 1503 module_pci_driver(felix_vsc9959_pci_driver); 1518 1504
+18 -2
drivers/net/dsa/ocelot/seville_vsc9953.c
··· 1245 1245 1246 1246 static int seville_remove(struct platform_device *pdev) 1247 1247 { 1248 - struct felix *felix; 1248 + struct felix *felix = platform_get_drvdata(pdev); 1249 1249 1250 - felix = platform_get_drvdata(pdev); 1250 + if (!felix) 1251 + return 0; 1251 1252 1252 1253 dsa_unregister_switch(felix->ds); 1253 1254 1254 1255 kfree(felix->ds); 1255 1256 kfree(felix); 1256 1257 1258 + platform_set_drvdata(pdev, NULL); 1259 + 1257 1260 return 0; 1261 + } 1262 + 1263 + static void seville_shutdown(struct platform_device *pdev) 1264 + { 1265 + struct felix *felix = platform_get_drvdata(pdev); 1266 + 1267 + if (!felix) 1268 + return; 1269 + 1270 + dsa_switch_shutdown(felix->ds); 1271 + 1272 + platform_set_drvdata(pdev, NULL); 1258 1273 } 1259 1274 1260 1275 static const struct of_device_id seville_of_match[] = { ··· 1281 1266 static struct platform_driver seville_vsc9953_driver = { 1282 1267 .probe = seville_probe, 1283 1268 .remove = seville_remove, 1269 + .shutdown = seville_shutdown, 1284 1270 .driver = { 1285 1271 .name = "mscc_seville", 1286 1272 .of_match_table = of_match_ptr(seville_of_match),
+18
drivers/net/dsa/qca/ar9331.c
··· 1083 1083 struct ar9331_sw_priv *priv = dev_get_drvdata(&mdiodev->dev); 1084 1084 unsigned int i; 1085 1085 1086 + if (!priv) 1087 + return; 1088 + 1086 1089 for (i = 0; i < ARRAY_SIZE(priv->port); i++) { 1087 1090 struct ar9331_sw_port *port = &priv->port[i]; 1088 1091 ··· 1097 1094 dsa_unregister_switch(&priv->ds); 1098 1095 1099 1096 reset_control_assert(priv->sw_reset); 1097 + 1098 + dev_set_drvdata(&mdiodev->dev, NULL); 1099 + } 1100 + 1101 + static void ar9331_sw_shutdown(struct mdio_device *mdiodev) 1102 + { 1103 + struct ar9331_sw_priv *priv = dev_get_drvdata(&mdiodev->dev); 1104 + 1105 + if (!priv) 1106 + return; 1107 + 1108 + dsa_switch_shutdown(&priv->ds); 1109 + 1110 + dev_set_drvdata(&mdiodev->dev, NULL); 1100 1111 } 1101 1112 1102 1113 static const struct of_device_id ar9331_sw_of_match[] = { ··· 1121 1104 static struct mdio_driver ar9331_sw_mdio_driver = { 1122 1105 .probe = ar9331_sw_probe, 1123 1106 .remove = ar9331_sw_remove, 1107 + .shutdown = ar9331_sw_shutdown, 1124 1108 .mdiodrv.driver = { 1125 1109 .name = AR9331_SW_NAME, 1126 1110 .of_match_table = ar9331_sw_of_match,
+18
drivers/net/dsa/qca8k.c
··· 1880 1880 struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev); 1881 1881 int i; 1882 1882 1883 + if (!priv) 1884 + return; 1885 + 1883 1886 for (i = 0; i < QCA8K_NUM_PORTS; i++) 1884 1887 qca8k_port_set_status(priv, i, 0); 1885 1888 1886 1889 dsa_unregister_switch(priv->ds); 1890 + 1891 + dev_set_drvdata(&mdiodev->dev, NULL); 1892 + } 1893 + 1894 + static void qca8k_sw_shutdown(struct mdio_device *mdiodev) 1895 + { 1896 + struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev); 1897 + 1898 + if (!priv) 1899 + return; 1900 + 1901 + dsa_switch_shutdown(priv->ds); 1902 + 1903 + dev_set_drvdata(&mdiodev->dev, NULL); 1887 1904 } 1888 1905 1889 1906 #ifdef CONFIG_PM_SLEEP ··· 1957 1940 static struct mdio_driver qca8kmdio_driver = { 1958 1941 .probe = qca8k_sw_probe, 1959 1942 .remove = qca8k_sw_remove, 1943 + .shutdown = qca8k_sw_shutdown, 1960 1944 .mdiodrv.driver = { 1961 1945 .name = "qca8k", 1962 1946 .of_match_table = qca8k_of_match,
+20 -2
drivers/net/dsa/realtek-smi-core.c
··· 368 368 smi->slave_mii_bus->parent = smi->dev; 369 369 smi->ds->slave_mii_bus = smi->slave_mii_bus; 370 370 371 - ret = of_mdiobus_register(smi->slave_mii_bus, mdio_np); 371 + ret = devm_of_mdiobus_register(smi->dev, smi->slave_mii_bus, mdio_np); 372 372 if (ret) { 373 373 dev_err(smi->dev, "unable to register MDIO bus %s\n", 374 374 smi->slave_mii_bus->id); ··· 464 464 465 465 static int realtek_smi_remove(struct platform_device *pdev) 466 466 { 467 - struct realtek_smi *smi = dev_get_drvdata(&pdev->dev); 467 + struct realtek_smi *smi = platform_get_drvdata(pdev); 468 + 469 + if (!smi) 470 + return 0; 468 471 469 472 dsa_unregister_switch(smi->ds); 470 473 if (smi->slave_mii_bus) 471 474 of_node_put(smi->slave_mii_bus->dev.of_node); 472 475 gpiod_set_value(smi->reset, 1); 473 476 477 + platform_set_drvdata(pdev, NULL); 478 + 474 479 return 0; 480 + } 481 + 482 + static void realtek_smi_shutdown(struct platform_device *pdev) 483 + { 484 + struct realtek_smi *smi = platform_get_drvdata(pdev); 485 + 486 + if (!smi) 487 + return; 488 + 489 + dsa_switch_shutdown(smi->ds); 490 + 491 + platform_set_drvdata(pdev, NULL); 475 492 } 476 493 477 494 static const struct of_device_id realtek_smi_of_match[] = { ··· 512 495 }, 513 496 .probe = realtek_smi_probe, 514 497 .remove = realtek_smi_remove, 498 + .shutdown = realtek_smi_shutdown, 515 499 }; 516 500 module_platform_driver(realtek_smi_driver); 517 501
+1 -1
drivers/net/dsa/sja1105/sja1105_clocking.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause 2 - /* Copyright (c) 2016-2018, NXP Semiconductors 2 + /* Copyright 2016-2018 NXP 3 3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> 4 4 */ 5 5 #include <linux/packing.h>
+1 -1
drivers/net/dsa/sja1105/sja1105_devlink.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> 3 - * Copyright 2020 NXP Semiconductors 3 + * Copyright 2020 NXP 4 4 */ 5 5 #include "sja1105.h" 6 6
+1 -1
drivers/net/dsa/sja1105/sja1105_flower.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - /* Copyright 2020, NXP Semiconductors 2 + /* Copyright 2020 NXP 3 3 */ 4 4 #include "sja1105.h" 5 5 #include "sja1105_vl.h"
+19 -2
drivers/net/dsa/sja1105/sja1105_main.c
··· 3335 3335 static int sja1105_remove(struct spi_device *spi) 3336 3336 { 3337 3337 struct sja1105_private *priv = spi_get_drvdata(spi); 3338 - struct dsa_switch *ds = priv->ds; 3339 3338 3340 - dsa_unregister_switch(ds); 3339 + if (!priv) 3340 + return 0; 3341 + 3342 + dsa_unregister_switch(priv->ds); 3343 + 3344 + spi_set_drvdata(spi, NULL); 3341 3345 3342 3346 return 0; 3347 + } 3348 + 3349 + static void sja1105_shutdown(struct spi_device *spi) 3350 + { 3351 + struct sja1105_private *priv = spi_get_drvdata(spi); 3352 + 3353 + if (!priv) 3354 + return; 3355 + 3356 + dsa_switch_shutdown(priv->ds); 3357 + 3358 + spi_set_drvdata(spi, NULL); 3343 3359 } 3344 3360 3345 3361 static const struct of_device_id sja1105_dt_ids[] = { ··· 3381 3365 }, 3382 3366 .probe = sja1105_probe, 3383 3367 .remove = sja1105_remove, 3368 + .shutdown = sja1105_shutdown, 3384 3369 }; 3385 3370 3386 3371 module_spi_driver(sja1105_driver);
+1 -1
drivers/net/dsa/sja1105/sja1105_mdio.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - /* Copyright 2021, NXP Semiconductors 2 + /* Copyright 2021 NXP 3 3 */ 4 4 #include <linux/pcs/pcs-xpcs.h> 5 5 #include <linux/of_mdio.h>
+1 -1
drivers/net/dsa/sja1105/sja1105_spi.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause 2 - /* Copyright (c) 2016-2018, NXP Semiconductors 2 + /* Copyright 2016-2018 NXP 3 3 * Copyright (c) 2018, Sensor-Technik Wiedemann GmbH 4 4 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> 5 5 */
+1 -1
drivers/net/dsa/sja1105/sja1105_static_config.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause 2 - /* Copyright (c) 2016-2018, NXP Semiconductors 2 + /* Copyright 2016-2018 NXP 3 3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> 4 4 */ 5 5 #include "sja1105_static_config.h"
+1 -1
drivers/net/dsa/sja1105/sja1105_static_config.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 - /* Copyright (c) 2016-2018, NXP Semiconductors 2 + /* Copyright 2016-2018 NXP 3 3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> 4 4 */ 5 5 #ifndef _SJA1105_STATIC_CONFIG_H
+1 -1
drivers/net/dsa/sja1105/sja1105_vl.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - /* Copyright 2020, NXP Semiconductors 2 + /* Copyright 2020 NXP 3 3 */ 4 4 #include <net/tc_act/tc_gate.h> 5 5 #include <linux/dsa/8021q.h>
+1 -1
drivers/net/dsa/sja1105/sja1105_vl.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* Copyright 2020, NXP Semiconductors 2 + /* Copyright 2020 NXP 3 3 */ 4 4 #ifndef _SJA1105_VL_H 5 5 #define _SJA1105_VL_H
+6
drivers/net/dsa/vitesse-vsc73xx-core.c
··· 1225 1225 } 1226 1226 EXPORT_SYMBOL(vsc73xx_remove); 1227 1227 1228 + void vsc73xx_shutdown(struct vsc73xx *vsc) 1229 + { 1230 + dsa_switch_shutdown(vsc->ds); 1231 + } 1232 + EXPORT_SYMBOL(vsc73xx_shutdown); 1233 + 1228 1234 MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>"); 1229 1235 MODULE_DESCRIPTION("Vitesse VSC7385/7388/7395/7398 driver"); 1230 1236 MODULE_LICENSE("GPL v2");
+21 -1
drivers/net/dsa/vitesse-vsc73xx-platform.c
··· 116 116 { 117 117 struct vsc73xx_platform *vsc_platform = platform_get_drvdata(pdev); 118 118 119 - return vsc73xx_remove(&vsc_platform->vsc); 119 + if (!vsc_platform) 120 + return 0; 121 + 122 + vsc73xx_remove(&vsc_platform->vsc); 123 + 124 + platform_set_drvdata(pdev, NULL); 125 + 126 + return 0; 127 + } 128 + 129 + static void vsc73xx_platform_shutdown(struct platform_device *pdev) 130 + { 131 + struct vsc73xx_platform *vsc_platform = platform_get_drvdata(pdev); 132 + 133 + if (!vsc_platform) 134 + return; 135 + 136 + vsc73xx_shutdown(&vsc_platform->vsc); 137 + 138 + platform_set_drvdata(pdev, NULL); 120 139 } 121 140 122 141 static const struct vsc73xx_ops vsc73xx_platform_ops = { ··· 163 144 static struct platform_driver vsc73xx_platform_driver = { 164 145 .probe = vsc73xx_platform_probe, 165 146 .remove = vsc73xx_platform_remove, 147 + .shutdown = vsc73xx_platform_shutdown, 166 148 .driver = { 167 149 .name = "vsc73xx-platform", 168 150 .of_match_table = vsc73xx_of_match,
+21 -1
drivers/net/dsa/vitesse-vsc73xx-spi.c
··· 163 163 { 164 164 struct vsc73xx_spi *vsc_spi = spi_get_drvdata(spi); 165 165 166 - return vsc73xx_remove(&vsc_spi->vsc); 166 + if (!vsc_spi) 167 + return 0; 168 + 169 + vsc73xx_remove(&vsc_spi->vsc); 170 + 171 + spi_set_drvdata(spi, NULL); 172 + 173 + return 0; 174 + } 175 + 176 + static void vsc73xx_spi_shutdown(struct spi_device *spi) 177 + { 178 + struct vsc73xx_spi *vsc_spi = spi_get_drvdata(spi); 179 + 180 + if (!vsc_spi) 181 + return; 182 + 183 + vsc73xx_shutdown(&vsc_spi->vsc); 184 + 185 + spi_set_drvdata(spi, NULL); 167 186 } 168 187 169 188 static const struct vsc73xx_ops vsc73xx_spi_ops = { ··· 210 191 static struct spi_driver vsc73xx_spi_driver = { 211 192 .probe = vsc73xx_spi_probe, 212 193 .remove = vsc73xx_spi_remove, 194 + .shutdown = vsc73xx_spi_shutdown, 213 195 .driver = { 214 196 .name = "vsc73xx-spi", 215 197 .of_match_table = vsc73xx_of_match,
+1
drivers/net/dsa/vitesse-vsc73xx.h
··· 27 27 int vsc73xx_is_addr_valid(u8 block, u8 subblock); 28 28 int vsc73xx_probe(struct vsc73xx *vsc); 29 29 int vsc73xx_remove(struct vsc73xx *vsc); 30 + void vsc73xx_shutdown(struct vsc73xx *vsc);
+6
drivers/net/dsa/xrs700x/xrs700x.c
··· 822 822 } 823 823 EXPORT_SYMBOL(xrs700x_switch_remove); 824 824 825 + void xrs700x_switch_shutdown(struct xrs700x *priv) 826 + { 827 + dsa_switch_shutdown(priv->ds); 828 + } 829 + EXPORT_SYMBOL(xrs700x_switch_shutdown); 830 + 825 831 MODULE_AUTHOR("George McCollister <george.mccollister@gmail.com>"); 826 832 MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA driver"); 827 833 MODULE_LICENSE("GPL v2");
+1
drivers/net/dsa/xrs700x/xrs700x.h
··· 40 40 struct xrs700x *xrs700x_switch_alloc(struct device *base, void *devpriv); 41 41 int xrs700x_switch_register(struct xrs700x *priv); 42 42 void xrs700x_switch_remove(struct xrs700x *priv); 43 + void xrs700x_switch_shutdown(struct xrs700x *priv);
+18
drivers/net/dsa/xrs700x/xrs700x_i2c.c
··· 109 109 { 110 110 struct xrs700x *priv = i2c_get_clientdata(i2c); 111 111 112 + if (!priv) 113 + return 0; 114 + 112 115 xrs700x_switch_remove(priv); 113 116 117 + i2c_set_clientdata(i2c, NULL); 118 + 114 119 return 0; 120 + } 121 + 122 + static void xrs700x_i2c_shutdown(struct i2c_client *i2c) 123 + { 124 + struct xrs700x *priv = i2c_get_clientdata(i2c); 125 + 126 + if (!priv) 127 + return; 128 + 129 + xrs700x_switch_shutdown(priv); 130 + 131 + i2c_set_clientdata(i2c, NULL); 115 132 } 116 133 117 134 static const struct i2c_device_id xrs700x_i2c_id[] = { ··· 154 137 }, 155 138 .probe = xrs700x_i2c_probe, 156 139 .remove = xrs700x_i2c_remove, 140 + .shutdown = xrs700x_i2c_shutdown, 157 141 .id_table = xrs700x_i2c_id, 158 142 }; 159 143
+18
drivers/net/dsa/xrs700x/xrs700x_mdio.c
··· 136 136 { 137 137 struct xrs700x *priv = dev_get_drvdata(&mdiodev->dev); 138 138 139 + if (!priv) 140 + return; 141 + 139 142 xrs700x_switch_remove(priv); 143 + 144 + dev_set_drvdata(&mdiodev->dev, NULL); 145 + } 146 + 147 + static void xrs700x_mdio_shutdown(struct mdio_device *mdiodev) 148 + { 149 + struct xrs700x *priv = dev_get_drvdata(&mdiodev->dev); 150 + 151 + if (!priv) 152 + return; 153 + 154 + xrs700x_switch_shutdown(priv); 155 + 156 + dev_set_drvdata(&mdiodev->dev, NULL); 140 157 } 141 158 142 159 static const struct of_device_id __maybe_unused xrs700x_mdio_dt_ids[] = { ··· 172 155 }, 173 156 .probe = xrs700x_mdio_probe, 174 157 .remove = xrs700x_mdio_remove, 158 + .shutdown = xrs700x_mdio_shutdown, 175 159 }; 176 160 177 161 mdio_module_driver(xrs700x_mdio_driver);
+4 -4
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
··· 413 413 if (deep) { 414 414 /* Reinitialize Nic/Vecs objects */ 415 415 aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol); 416 - 417 - ret = aq_nic_init(nic); 418 - if (ret) 419 - goto err_exit; 420 416 } 421 417 422 418 if (netif_running(nic->ndev)) { 419 + ret = aq_nic_init(nic); 420 + if (ret) 421 + goto err_exit; 422 + 423 423 ret = aq_nic_start(nic); 424 424 if (ret) 425 425 goto err_exit;
+2
drivers/net/ethernet/broadcom/bgmac-bcma.c
··· 129 129 bcma_set_drvdata(core, bgmac); 130 130 131 131 err = of_get_mac_address(bgmac->dev->of_node, bgmac->net_dev->dev_addr); 132 + if (err == -EPROBE_DEFER) 133 + return err; 132 134 133 135 /* If no MAC address assigned via device tree, check SPROM */ 134 136 if (err) {
+3
drivers/net/ethernet/broadcom/bgmac-platform.c
··· 192 192 bgmac->dma_dev = &pdev->dev; 193 193 194 194 ret = of_get_mac_address(np, bgmac->net_dev->dev_addr); 195 + if (ret == -EPROBE_DEFER) 196 + return ret; 197 + 195 198 if (ret) 196 199 dev_warn(&pdev->dev, 197 200 "MAC address not present in device tree\n");
+4 -4
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 391 391 * netif_tx_queue_stopped(). 392 392 */ 393 393 smp_mb(); 394 - if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) { 394 + if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) { 395 395 netif_tx_wake_queue(txq); 396 396 return false; 397 397 } ··· 764 764 smp_mb(); 765 765 766 766 if (unlikely(netif_tx_queue_stopped(txq)) && 767 - bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh && 767 + bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh && 768 768 READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING) 769 769 netif_tx_wake_queue(txq); 770 770 } ··· 2416 2416 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { 2417 2417 tx_pkts++; 2418 2418 /* return full budget so NAPI will complete. */ 2419 - if (unlikely(tx_pkts > bp->tx_wake_thresh)) { 2419 + if (unlikely(tx_pkts >= bp->tx_wake_thresh)) { 2420 2420 rx_pkts = budget; 2421 2421 raw_cons = NEXT_RAW_CMP(raw_cons); 2422 2422 if (budget) ··· 3640 3640 u16 i; 3641 3641 3642 3642 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, 3643 - MAX_SKB_FRAGS + 1); 3643 + BNXT_MIN_TX_DESC_CNT); 3644 3644 3645 3645 for (i = 0; i < bp->tx_nr_rings; i++) { 3646 3646 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+5
drivers/net/ethernet/broadcom/bnxt/bnxt.h
··· 629 629 #define BNXT_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1) 630 630 #define BNXT_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1) 631 631 632 + /* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra 633 + * BD because the first TX BD is always a long BD. 634 + */ 635 + #define BNXT_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2) 636 + 632 637 #define RX_RING(x) (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4)) 633 638 #define RX_IDX(x) ((x) & (RX_DESC_CNT - 1)) 634 639
+1 -1
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
··· 798 798 799 799 if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) || 800 800 (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) || 801 - (ering->tx_pending <= MAX_SKB_FRAGS)) 801 + (ering->tx_pending < BNXT_MIN_TX_DESC_CNT)) 802 802 return -EINVAL; 803 803 804 804 if (netif_running(dev))
+2 -5
drivers/net/ethernet/freescale/enetc/enetc.c
··· 419 419 420 420 static void enetc_rx_net_dim(struct enetc_int_vector *v) 421 421 { 422 - struct dim_sample dim_sample; 422 + struct dim_sample dim_sample = {}; 423 423 424 424 v->comp_cnt++; 425 425 ··· 1879 1879 static int enetc_setup_irqs(struct enetc_ndev_priv *priv) 1880 1880 { 1881 1881 struct pci_dev *pdev = priv->si->pdev; 1882 - cpumask_t cpu_mask; 1883 1882 int i, j, err; 1884 1883 1885 1884 for (i = 0; i < priv->bdr_int_num; i++) { ··· 1907 1908 1908 1909 enetc_wr(hw, ENETC_SIMSITRV(idx), entry); 1909 1910 } 1910 - cpumask_clear(&cpu_mask); 1911 - cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); 1912 - irq_set_affinity_hint(irq, &cpu_mask); 1911 + irq_set_affinity_hint(irq, get_cpu_mask(i % num_online_cpus())); 1913 1912 } 1914 1913 1915 1914 return 0;
+1 -1
drivers/net/ethernet/freescale/enetc/enetc_ierb.c
··· 1 1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 - /* Copyright 2021 NXP Semiconductors 2 + /* Copyright 2021 NXP 3 3 * 4 4 * The Integrated Endpoint Register Block (IERB) is configured by pre-boot 5 5 * software and is supposed to be to ENETC what a NVRAM is to a 'real' PCIe
+1 -1
drivers/net/ethernet/freescale/enetc/enetc_ierb.h
··· 1 1 /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ 2 - /* Copyright 2021 NXP Semiconductors */ 2 + /* Copyright 2021 NXP */ 3 3 4 4 #include <linux/pci.h> 5 5 #include <linux/platform_device.h>
+1 -2
drivers/net/ethernet/freescale/enetc/enetc_pf.c
··· 541 541 542 542 if (phy_interface_mode_is_rgmii(phy_mode)) { 543 543 val = enetc_port_rd(hw, ENETC_PM0_IF_MODE); 544 - val &= ~ENETC_PM0_IFM_EN_AUTO; 545 - val &= ENETC_PM0_IFM_IFMODE_MASK; 544 + val &= ~(ENETC_PM0_IFM_EN_AUTO | ENETC_PM0_IFM_IFMODE_MASK); 546 545 val |= ENETC_PM0_IFM_IFMODE_GMII | ENETC_PM0_IFM_RG; 547 546 enetc_port_wr(hw, ENETC_PM0_IF_MODE, val); 548 547 }
-1
drivers/net/ethernet/freescale/fec_main.c
··· 4176 4176 4177 4177 module_platform_driver(fec_driver); 4178 4178 4179 - MODULE_ALIAS("platform:"DRIVER_NAME); 4180 4179 MODULE_LICENSE("GPL");
+1 -1
drivers/net/ethernet/google/gve/gve.h
··· 780 780 gve_num_tx_qpls(priv)); 781 781 782 782 /* we are out of rx qpls */ 783 - if (id == priv->qpl_cfg.qpl_map_size) 783 + if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv)) 784 784 return NULL; 785 785 786 786 set_bit(id, priv->qpl_cfg.qpl_id_map);
+29 -16
drivers/net/ethernet/google/gve/gve_main.c
··· 41 41 { 42 42 struct gve_priv *priv = netdev_priv(dev); 43 43 unsigned int start; 44 + u64 packets, bytes; 44 45 int ring; 45 46 46 47 if (priv->rx) { ··· 49 48 do { 50 49 start = 51 50 u64_stats_fetch_begin(&priv->rx[ring].statss); 52 - s->rx_packets += priv->rx[ring].rpackets; 53 - s->rx_bytes += priv->rx[ring].rbytes; 51 + packets = priv->rx[ring].rpackets; 52 + bytes = priv->rx[ring].rbytes; 54 53 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, 55 54 start)); 55 + s->rx_packets += packets; 56 + s->rx_bytes += bytes; 56 57 } 57 58 } 58 59 if (priv->tx) { ··· 62 59 do { 63 60 start = 64 61 u64_stats_fetch_begin(&priv->tx[ring].statss); 65 - s->tx_packets += priv->tx[ring].pkt_done; 66 - s->tx_bytes += priv->tx[ring].bytes_done; 62 + packets = priv->tx[ring].pkt_done; 63 + bytes = priv->tx[ring].bytes_done; 67 64 } while (u64_stats_fetch_retry(&priv->tx[ring].statss, 68 65 start)); 66 + s->tx_packets += packets; 67 + s->tx_bytes += bytes; 69 68 } 70 69 } 71 70 } ··· 87 82 88 83 static void gve_free_counter_array(struct gve_priv *priv) 89 84 { 85 + if (!priv->counter_array) 86 + return; 87 + 90 88 dma_free_coherent(&priv->pdev->dev, 91 89 priv->num_event_counters * 92 90 sizeof(*priv->counter_array), ··· 150 142 151 143 static void gve_free_stats_report(struct gve_priv *priv) 152 144 { 145 + if (!priv->stats_report) 146 + return; 147 + 153 148 del_timer_sync(&priv->stats_report_timer); 154 149 dma_free_coherent(&priv->pdev->dev, priv->stats_report_len, 155 150 priv->stats_report, priv->stats_report_bus); ··· 381 370 { 382 371 int i; 383 372 384 - if (priv->msix_vectors) { 385 - /* Free the irqs */ 386 - for (i = 0; i < priv->num_ntfy_blks; i++) { 387 - struct gve_notify_block *block = &priv->ntfy_blocks[i]; 388 - int msix_idx = i; 373 + if (!priv->msix_vectors) 374 + return; 389 375 390 - irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, 391 - NULL); 392 - free_irq(priv->msix_vectors[msix_idx].vector, block); 393 - } 394 - free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv); 376 + /* Free the irqs */ 377 + for (i = 0; i < priv->num_ntfy_blks; i++) { 378 + struct gve_notify_block *block = &priv->ntfy_blocks[i]; 379 + int msix_idx = i; 380 + 381 + irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, 382 + NULL); 383 + free_irq(priv->msix_vectors[msix_idx].vector, block); 395 384 } 385 + free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv); 396 386 dma_free_coherent(&priv->pdev->dev, 397 387 priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks), 398 388 priv->ntfy_blocks, priv->ntfy_block_bus); ··· 1197 1185 1198 1186 void gve_handle_report_stats(struct gve_priv *priv) 1199 1187 { 1200 - int idx, stats_idx = 0, tx_bytes; 1201 - unsigned int start = 0; 1202 1188 struct stats *stats = priv->stats_report->stats; 1189 + int idx, stats_idx = 0; 1190 + unsigned int start = 0; 1191 + u64 tx_bytes; 1203 1192 1204 1193 if (!gve_get_report_stats(priv)) 1205 1194 return;
+7 -1
drivers/net/ethernet/google/gve/gve_rx.c
··· 104 104 if (!rx->data.page_info) 105 105 return -ENOMEM; 106 106 107 - if (!rx->data.raw_addressing) 107 + if (!rx->data.raw_addressing) { 108 108 rx->data.qpl = gve_assign_rx_qpl(priv); 109 + if (!rx->data.qpl) { 110 + kvfree(rx->data.page_info); 111 + rx->data.page_info = NULL; 112 + return -ENOMEM; 113 + } 114 + } 109 115 for (i = 0; i < slots; i++) { 110 116 if (!rx->data.raw_addressing) { 111 117 struct page *page = rx->data.qpl->pages[i];
-1
drivers/net/ethernet/hisilicon/hns3/hnae3.h
··· 752 752 u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */ 753 753 u16 tqp_count[HNAE3_MAX_TC]; 754 754 u16 tqp_offset[HNAE3_MAX_TC]; 755 - unsigned long tc_en; /* bitmap of TC enabled */ 756 755 u8 num_tc; /* Total number of enabled TCs */ 757 756 bool mqprio_active; 758 757 };
+7 -9
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
··· 623 623 return ret; 624 624 } 625 625 626 - for (i = 0; i < HNAE3_MAX_TC; i++) { 627 - if (!test_bit(i, &tc_info->tc_en)) 628 - continue; 629 - 626 + for (i = 0; i < tc_info->num_tc; i++) 630 627 netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i], 631 628 tc_info->tqp_offset[i]); 632 - } 633 629 } 634 630 635 631 ret = netif_set_real_num_tx_queues(netdev, queue_size); ··· 774 778 775 779 if (hns3_nic_resetting(netdev)) 776 780 return -EBUSY; 781 + 782 + if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { 783 + netdev_warn(netdev, "net open repeatedly!\n"); 784 + return 0; 785 + } 777 786 778 787 netif_carrier_off(netdev); 779 788 ··· 4866 4865 struct hnae3_tc_info *tc_info = &kinfo->tc_info; 4867 4866 int i; 4868 4867 4869 - for (i = 0; i < HNAE3_MAX_TC; i++) { 4868 + for (i = 0; i < tc_info->num_tc; i++) { 4870 4869 int j; 4871 - 4872 - if (!test_bit(i, &tc_info->tc_en)) 4873 - continue; 4874 4870 4875 4871 for (j = 0; j < tc_info->tqp_count[i]; j++) { 4876 4872 struct hnae3_queue *q;
+4 -2
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
··· 334 334 335 335 #if IS_ENABLED(CONFIG_VLAN_8021Q) 336 336 /* Disable the vlan filter for selftest does not support it */ 337 - if (h->ae_algo->ops->enable_vlan_filter) 337 + if (h->ae_algo->ops->enable_vlan_filter && 338 + ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) 338 339 h->ae_algo->ops->enable_vlan_filter(h, false); 339 340 #endif 340 341 ··· 360 359 h->ae_algo->ops->halt_autoneg(h, false); 361 360 362 361 #if IS_ENABLED(CONFIG_VLAN_8021Q) 363 - if (h->ae_algo->ops->enable_vlan_filter) 362 + if (h->ae_algo->ops->enable_vlan_filter && 363 + ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) 364 364 h->ae_algo->ops->enable_vlan_filter(h, true); 365 365 #endif 366 366
+13 -8
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
··· 467 467 return ret; 468 468 } 469 469 470 - static int hclge_firmware_compat_config(struct hclge_dev *hdev) 470 + static int hclge_firmware_compat_config(struct hclge_dev *hdev, bool en) 471 471 { 472 472 struct hclge_firmware_compat_cmd *req; 473 473 struct hclge_desc desc; ··· 475 475 476 476 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false); 477 477 478 - req = (struct hclge_firmware_compat_cmd *)desc.data; 478 + if (en) { 479 + req = (struct hclge_firmware_compat_cmd *)desc.data; 479 480 480 - hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1); 481 - hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1); 482 - if (hnae3_dev_phy_imp_supported(hdev)) 483 - hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1); 484 - req->compat = cpu_to_le32(compat); 481 + hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1); 482 + hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1); 483 + if (hnae3_dev_phy_imp_supported(hdev)) 484 + hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1); 485 + 486 + req->compat = cpu_to_le32(compat); 487 + } 485 488 486 489 return hclge_cmd_send(&hdev->hw, &desc, 1); 487 490 } ··· 541 538 /* ask the firmware to enable some features, driver can work without 542 539 * it. 543 540 */ 544 - ret = hclge_firmware_compat_config(hdev); 541 + ret = hclge_firmware_compat_config(hdev, true); 545 542 if (ret) 546 543 dev_warn(&hdev->pdev->dev, 547 544 "Firmware compatible features not enabled(%d).\n", ··· 571 568 572 569 void hclge_cmd_uninit(struct hclge_dev *hdev) 573 570 { 571 + hclge_firmware_compat_config(hdev, false); 572 + 574 573 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); 575 574 /* wait to ensure that the firmware completes the possible left 576 575 * over commands.
+16 -13
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
··· 247 247 } 248 248 249 249 hclge_tm_schd_info_update(hdev, num_tc); 250 + if (num_tc > 1) 251 + hdev->flag |= HCLGE_FLAG_DCB_ENABLE; 252 + else 253 + hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; 250 254 251 255 ret = hclge_ieee_ets_to_tm_info(hdev, ets); 252 256 if (ret) ··· 310 306 u8 i, j, pfc_map, *prio_tc; 311 307 int ret; 312 308 313 - if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || 314 - hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE) 309 + if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) 315 310 return -EINVAL; 316 311 317 312 if (pfc->pfc_en == hdev->tm_info.pfc_en) ··· 444 441 static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info, 445 442 struct tc_mqprio_qopt_offload *mqprio_qopt) 446 443 { 447 - int i; 448 - 449 444 memset(tc_info, 0, sizeof(*tc_info)); 450 445 tc_info->num_tc = mqprio_qopt->qopt.num_tc; 451 446 memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map, ··· 452 451 sizeof_field(struct hnae3_tc_info, tqp_count)); 453 452 memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset, 454 453 sizeof_field(struct hnae3_tc_info, tqp_offset)); 455 - 456 - for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) 457 - set_bit(tc_info->prio_tc[i], &tc_info->tc_en); 458 454 } 459 455 460 456 static int hclge_config_tc(struct hclge_dev *hdev, ··· 517 519 return hclge_notify_init_up(hdev); 518 520 519 521 err_out: 520 - /* roll-back */ 521 - memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info)); 522 - if (hclge_config_tc(hdev, &kinfo->tc_info)) 523 - dev_err(&hdev->pdev->dev, 524 - "failed to roll back tc configuration\n"); 525 - 522 + if (!tc) { 523 + dev_warn(&hdev->pdev->dev, 524 + "failed to destroy mqprio, will active after reset, ret = %d\n", 525 + ret); 526 + } else { 527 + /* roll-back */ 528 + memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info)); 529 + if (hclge_config_tc(hdev, &kinfo->tc_info)) 530 + dev_err(&hdev->pdev->dev, 531 + "failed to roll back tc configuration\n"); 532 + } 526 533 hclge_notify_init_up(hdev); 527 534 528 535 return ret;
+24 -4
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
··· 719 719 sprintf(result[(*index)++], "%6u", para->rate); 720 720 } 721 721 722 - static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len) 722 + static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str, 723 + char *buf, int len) 723 724 { 724 - char data_str[ARRAY_SIZE(tm_pg_items)][HCLGE_DBG_DATA_STR_LEN]; 725 725 struct hclge_tm_shaper_para c_shaper_para, p_shaper_para; 726 726 char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str; 727 727 u8 pg_id, sch_mode, weight, pri_bit_map, i, j; ··· 729 729 int pos = 0; 730 730 int ret; 731 731 732 - for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) 733 - result[i] = &data_str[i][0]; 732 + for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) { 733 + result[i] = data_str; 734 + data_str += HCLGE_DBG_DATA_STR_LEN; 735 + } 734 736 735 737 hclge_dbg_fill_content(content, sizeof(content), tm_pg_items, 736 738 NULL, ARRAY_SIZE(tm_pg_items)); ··· 781 779 } 782 780 783 781 return 0; 782 + } 783 + 784 + static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len) 785 + { 786 + char *data_str; 787 + int ret; 788 + 789 + data_str = kcalloc(ARRAY_SIZE(tm_pg_items), 790 + HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL); 791 + 792 + if (!data_str) 793 + return -ENOMEM; 794 + 795 + ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len); 796 + 797 + kfree(data_str); 798 + 799 + return ret; 784 800 } 785 801 786 802 static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev, char *buf, int len)
+4 -4
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
··· 2445 2445 return; 2446 2446 } 2447 2447 2448 - dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%u), queue_id(%u)\n", 2448 + dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vport(%u), queue_id(%u)\n", 2449 2449 vf_id, q_id); 2450 2450 2451 2451 if (vf_id) { 2452 2452 if (vf_id >= hdev->num_alloc_vport) { 2453 - dev_err(dev, "invalid vf id(%u)\n", vf_id); 2453 + dev_err(dev, "invalid vport(%u)\n", vf_id); 2454 2454 return; 2455 2455 } 2456 2456 ··· 2463 2463 2464 2464 ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]); 2465 2465 if (ret) 2466 - dev_err(dev, "inform reset to vf(%u) failed %d!\n", 2467 - hdev->vport->vport_id, ret); 2466 + dev_err(dev, "inform reset to vport(%u) failed %d!\n", 2467 + vf_id, ret); 2468 2468 } else { 2469 2469 set_bit(HNAE3_FUNC_RESET, reset_requests); 2470 2470 }
+70 -37
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
··· 3661 3661 if (ret) { 3662 3662 dev_err(&hdev->pdev->dev, 3663 3663 "set vf(%u) rst failed %d!\n", 3664 - vport->vport_id, ret); 3664 + vport->vport_id - HCLGE_VF_VPORT_START_NUM, 3665 + ret); 3665 3666 return ret; 3666 3667 } 3667 3668 ··· 3677 3676 if (ret) 3678 3677 dev_warn(&hdev->pdev->dev, 3679 3678 "inform reset to vf(%u) failed %d!\n", 3680 - vport->vport_id, ret); 3679 + vport->vport_id - HCLGE_VF_VPORT_START_NUM, 3680 + ret); 3681 3681 } 3682 3682 3683 3683 return 0; ··· 4743 4741 return 0; 4744 4742 } 4745 4743 4744 + static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc, 4745 + u8 *hash_algo) 4746 + { 4747 + switch (hfunc) { 4748 + case ETH_RSS_HASH_TOP: 4749 + *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; 4750 + return 0; 4751 + case ETH_RSS_HASH_XOR: 4752 + *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE; 4753 + return 0; 4754 + case ETH_RSS_HASH_NO_CHANGE: 4755 + *hash_algo = vport->rss_algo; 4756 + return 0; 4757 + default: 4758 + return -EINVAL; 4759 + } 4760 + } 4761 + 4746 4762 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, 4747 4763 const u8 *key, const u8 hfunc) 4748 4764 { ··· 4770 4750 u8 hash_algo; 4771 4751 int ret, i; 4772 4752 4753 + ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo); 4754 + if (ret) { 4755 + dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc); 4756 + return ret; 4757 + } 4758 + 4773 4759 /* Set the RSS Hash Key if specififed by the user */ 4774 4760 if (key) { 4775 - switch (hfunc) { 4776 - case ETH_RSS_HASH_TOP: 4777 - hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; 4778 - break; 4779 - case ETH_RSS_HASH_XOR: 4780 - hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE; 4781 - break; 4782 - case ETH_RSS_HASH_NO_CHANGE: 4783 - hash_algo = vport->rss_algo; 4784 - break; 4785 - default: 4786 - return -EINVAL; 4787 - } 4788 - 4789 4761 ret = hclge_set_rss_algo_key(hdev, hash_algo, key); 4790 4762 if (ret) 4791 4763 return ret; 4792 4764 4793 4765 /* Update the shadow RSS key with user specified qids */ 4794 4766 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); 4795 - vport->rss_algo = hash_algo; 4767 + } else { 4768 + ret = hclge_set_rss_algo_key(hdev, hash_algo, 4769 + vport->rss_hash_key); 4770 + if (ret) 4771 + return ret; 4796 4772 } 4773 + vport->rss_algo = hash_algo; 4797 4774 4798 4775 /* Update the shadow RSS table with user specified qids */ 4799 4776 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) ··· 6644 6627 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie); 6645 6628 u16 tqps; 6646 6629 6630 + /* To keep consistent with user's configuration, minus 1 when 6631 + * printing 'vf', because vf id from ethtool is added 1 for vf. 6632 + */ 6647 6633 if (vf > hdev->num_req_vfs) { 6648 6634 dev_err(&hdev->pdev->dev, 6649 - "Error: vf id (%u) > max vf num (%u)\n", 6650 - vf, hdev->num_req_vfs); 6635 + "Error: vf id (%u) should be less than %u\n", 6636 + vf - 1, hdev->num_req_vfs); 6651 6637 return -EINVAL; 6652 6638 } 6653 6639 ··· 8708 8688 } 8709 8689 8710 8690 /* check if we just hit the duplicate */ 8711 - if (!ret) { 8712 - dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n", 8713 - vport->vport_id, addr); 8714 - return 0; 8715 - } 8716 - 8717 - dev_err(&hdev->pdev->dev, 8718 - "PF failed to add unicast entry(%pM) in the MAC table\n", 8719 - addr); 8691 + if (!ret) 8692 + return -EEXIST; 8720 8693 8721 8694 return ret; 8722 8695 } ··· 8861 8848 } else { 8862 8849 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, 8863 8850 &vport->state); 8864 - break; 8851 + 8852 + /* If one unicast mac address is existing in hardware, 8853 + * we need to try whether other unicast mac addresses 8854 + * are new addresses that can be added. 8855 + */ 8856 + if (ret != -EEXIST) 8857 + break; 8865 8858 } 8866 8859 } 8867 8860 } ··· 9816 9797 if (is_kill && !vlan_id) 9817 9798 return 0; 9818 9799 9800 + if (vlan_id >= VLAN_N_VID) 9801 + return -EINVAL; 9802 + 9819 9803 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id); 9820 9804 if (ret) { 9821 9805 dev_err(&hdev->pdev->dev, ··· 10725 10703 return 0; 10726 10704 } 10727 10705 10728 - static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) 10706 + static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id, 10707 + u8 *reset_status) 10729 10708 { 10730 10709 struct hclge_reset_tqp_queue_cmd *req; 10731 10710 struct hclge_desc desc; ··· 10744 10721 return ret; 10745 10722 } 10746 10723 10747 - return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); 10724 + *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); 10725 + 10726 + return 0; 10748 10727 } 10749 10728 10750 10729 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id) ··· 10765 10740 struct hclge_vport *vport = hclge_get_vport(handle); 10766 10741 struct hclge_dev *hdev = vport->back; 10767 10742 u16 reset_try_times = 0; 10768 - int reset_status; 10743 + u8 reset_status; 10769 10744 u16 queue_gid; 10770 10745 int ret; 10771 10746 u16 i; ··· 10781 10756 } 10782 10757 10783 10758 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { 10784 - reset_status = hclge_get_reset_status(hdev, queue_gid); 10759 + ret = hclge_get_reset_status(hdev, queue_gid, 10760 + &reset_status); 10761 + if (ret) 10762 + return ret; 10763 + 10785 10764 if (reset_status) 10786 10765 break; 10787 10766 ··· 11478 11449 struct hclge_vport *vport = &hdev->vport[i]; 11479 11450 int ret; 11480 11451 11481 - /* Send cmd to clear VF's FUNC_RST_ING */ 11452 + /* Send cmd to clear vport's FUNC_RST_ING */ 11482 11453 ret = hclge_set_vf_rst(hdev, vport->vport_id, false); 11483 11454 if (ret) 11484 11455 dev_warn(&hdev->pdev->dev, 11485 - "clear vf(%u) rst failed %d!\n", 11456 + "clear vport(%u) rst failed %d!\n", 11486 11457 vport->vport_id, ret); 11487 11458 } 11488 11459 } ··· 12796 12767 continue; 12797 12768 12798 12769 if (vport->vf_info.trusted) { 12799 - uc_en = vport->vf_info.request_uc_en > 0; 12800 - mc_en = vport->vf_info.request_mc_en > 0; 12770 + uc_en = vport->vf_info.request_uc_en > 0 || 12771 + vport->overflow_promisc_flags & 12772 + HNAE3_OVERFLOW_UPE; 12773 + mc_en = vport->vf_info.request_mc_en > 0 || 12774 + vport->overflow_promisc_flags & 12775 + HNAE3_OVERFLOW_MPE; 12801 12776 } 12802 12777 bc_en = vport->vf_info.request_bc_en > 0; 12803 12778
+9 -1
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
··· 566 566 struct hclge_dev *hdev = vport->back; 567 567 568 568 dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!", 569 - vport->vport_id); 569 + vport->vport_id - HCLGE_VF_VPORT_START_NUM); 570 570 571 571 return hclge_func_reset_cmd(hdev, vport->vport_id); 572 572 } ··· 590 590 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 591 591 struct hclge_respond_to_vf_msg *resp_msg) 592 592 { 593 + struct hnae3_handle *handle = &vport->nic; 594 + struct hclge_dev *hdev = vport->back; 593 595 u16 queue_id, qid_in_pf; 594 596 595 597 memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id)); 598 + if (queue_id >= handle->kinfo.num_tqps) { 599 + dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n", 600 + queue_id, mbx_req->mbx_src_vfid); 601 + return; 602 + } 603 + 596 604 qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id); 597 605 memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf)); 598 606 resp_msg->len = sizeof(qid_in_pf);
+6 -29
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
··· 581 581 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 582 582 if (ret) { 583 583 dev_err(&hdev->pdev->dev, 584 - "vf%u, qs%u failed to set tx_rate:%d, ret=%d\n", 584 + "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n", 585 585 vport->vport_id, shap_cfg_cmd->qs_id, 586 586 max_tx_rate, ret); 587 587 return ret; ··· 687 687 688 688 for (i = 0; i < HNAE3_MAX_TC; i++) { 689 689 if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) { 690 - set_bit(i, &kinfo->tc_info.tc_en); 691 690 kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size; 692 691 kinfo->tc_info.tqp_count[i] = kinfo->rss_size; 693 692 } else { 694 693 /* Set to default queue if TC is disable */ 695 - clear_bit(i, &kinfo->tc_info.tc_en); 696 694 kinfo->tc_info.tqp_offset[i] = 0; 697 695 kinfo->tc_info.tqp_count[i] = 1; 698 696 } ··· 727 729 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) 728 730 hdev->tm_info.prio_tc[i] = 729 731 (i >= hdev->tm_info.num_tc) ? 0 : i; 730 - 731 - /* DCB is enabled if we have more than 1 TC or pfc_en is 732 - * non-zero. 733 - */ 734 - if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en) 735 - hdev->flag |= HCLGE_FLAG_DCB_ENABLE; 736 - else 737 - hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; 738 732 } 739 733 740 734 static void hclge_tm_pg_info_init(struct hclge_dev *hdev) ··· 757 767 758 768 static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev) 759 769 { 760 - if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) { 770 + if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) { 761 771 if (hdev->fc_mode_last_time == HCLGE_FC_PFC) 762 772 dev_warn(&hdev->pdev->dev, 763 - "DCB is disable, but last mode is FC_PFC\n"); 773 + "Only 1 tc used, but last mode is FC_PFC\n"); 764 774 765 775 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 766 776 } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { ··· 786 796 } 787 797 } 788 798 789 - static void hclge_pfc_info_init(struct hclge_dev *hdev) 799 + void hclge_tm_pfc_info_update(struct hclge_dev *hdev) 790 800 { 791 801 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) 792 802 hclge_update_fc_mode(hdev); ··· 802 812 803 813 hclge_tm_vport_info_update(hdev); 804 814 805 - hclge_pfc_info_init(hdev); 815 + hclge_tm_pfc_info_update(hdev); 806 816 } 807 817 808 818 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev) ··· 1548 1558 hclge_tm_schd_info_init(hdev); 1549 1559 } 1550 1560 1551 - void hclge_tm_pfc_info_update(struct hclge_dev *hdev) 1552 - { 1553 - /* DCB is enabled if we have more than 1 TC or pfc_en is 1554 - * non-zero. 1555 - */ 1556 - if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en) 1557 - hdev->flag |= HCLGE_FLAG_DCB_ENABLE; 1558 - else 1559 - hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; 1560 - 1561 - hclge_pfc_info_init(hdev); 1562 - } 1563 - 1564 1561 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init) 1565 1562 { 1566 1563 int ret; ··· 1593 1616 if (ret) 1594 1617 return ret; 1595 1618 1596 - if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) 1619 + if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) 1597 1620 return 0; 1598 1621 1599 1622 return hclge_tm_bp_setup(hdev);
+34 -18
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
··· 816 816 return 0; 817 817 } 818 818 819 + static int hclgevf_parse_rss_hfunc(struct hclgevf_dev *hdev, const u8 hfunc, 820 + u8 *hash_algo) 821 + { 822 + switch (hfunc) { 823 + case ETH_RSS_HASH_TOP: 824 + *hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 825 + return 0; 826 + case ETH_RSS_HASH_XOR: 827 + *hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; 828 + return 0; 829 + case ETH_RSS_HASH_NO_CHANGE: 830 + *hash_algo = hdev->rss_cfg.hash_algo; 831 + return 0; 832 + default: 833 + return -EINVAL; 834 + } 835 + } 836 + 819 837 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 820 838 const u8 *key, const u8 hfunc) 821 839 { 822 840 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 823 841 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; 842 + u8 hash_algo; 824 843 int ret, i; 825 844 826 845 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 846 + ret = hclgevf_parse_rss_hfunc(hdev, hfunc, &hash_algo); 847 + if (ret) 848 + return ret; 849 + 827 850 /* Set the RSS Hash Key if specififed by the user */ 828 851 if (key) { 829 - switch (hfunc) { 830 - case ETH_RSS_HASH_TOP: 831 - rss_cfg->hash_algo = 832 - HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; 833 - break; 834 - case ETH_RSS_HASH_XOR: 835 - rss_cfg->hash_algo = 836 - HCLGEVF_RSS_HASH_ALGO_SIMPLE; 837 - break; 838 - case ETH_RSS_HASH_NO_CHANGE: 839 - break; 840 - default: 841 - return -EINVAL; 842 - } 843 - 844 - ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, 845 - key); 846 - if (ret) 852 + ret = hclgevf_set_rss_algo_key(hdev, hash_algo, key); 853 + if (ret) { 854 + dev_err(&hdev->pdev->dev, 855 + "invalid hfunc type %u\n", hfunc); 847 856 return ret; 857 + } 848 858 849 859 /* Update the shadow RSS key with user specified qids */ 850 860 memcpy(rss_cfg->rss_hash_key, key, 851 861 HCLGEVF_RSS_KEY_SIZE); 862 + } else { 863 + ret = hclgevf_set_rss_algo_key(hdev, hash_algo, 864 + rss_cfg->rss_hash_key); 865 + if (ret) 866 + return ret; 852 867 } 868 + rss_cfg->hash_algo = hash_algo; 853 869 } 854 870 855 871 /* update the shadow RSS table with user specified qids */
+1 -1
drivers/net/ethernet/hisilicon/hns_mdio.c
··· 354 354 355 355 if (dev_of_node(bus->parent)) { 356 356 if (!mdio_dev->subctrl_vbase) { 357 - dev_err(&bus->dev, "mdio sys ctl reg has not maped\n"); 357 + dev_err(&bus->dev, "mdio sys ctl reg has not mapped\n"); 358 358 return -ENODEV; 359 359 } 360 360
-8
drivers/net/ethernet/ibm/ibmvnic.c
··· 4708 4708 return 0; 4709 4709 } 4710 4710 4711 - if (adapter->failover_pending) { 4712 - adapter->init_done_rc = -EAGAIN; 4713 - netdev_dbg(netdev, "Failover pending, ignoring login response\n"); 4714 - complete(&adapter->init_done); 4715 - /* login response buffer will be released on reset */ 4716 - return 0; 4717 - } 4718 - 4719 4711 netdev->mtu = adapter->req_mtu - ETH_HLEN; 4720 4712 4721 4713 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
+1
drivers/net/ethernet/intel/Kconfig
··· 335 335 tristate "Intel(R) Ethernet Controller I225-LM/I225-V support" 336 336 default n 337 337 depends on PCI 338 + depends on PTP_1588_CLOCK_OPTIONAL 338 339 help 339 340 This driver supports Intel(R) Ethernet Controller I225-LM/I225-V 340 341 family of adapters.
+15 -7
drivers/net/ethernet/intel/e100.c
··· 2437 2437 sizeof(info->bus_info)); 2438 2438 } 2439 2439 2440 - #define E100_PHY_REGS 0x1C 2440 + #define E100_PHY_REGS 0x1D 2441 2441 static int e100_get_regs_len(struct net_device *netdev) 2442 2442 { 2443 2443 struct nic *nic = netdev_priv(netdev); 2444 - return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf); 2444 + 2445 + /* We know the number of registers, and the size of the dump buffer. 2446 + * Calculate the total size in bytes. 2447 + */ 2448 + return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf); 2445 2449 } 2446 2450 2447 2451 static void e100_get_regs(struct net_device *netdev, ··· 2459 2455 buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 | 2460 2456 ioread8(&nic->csr->scb.cmd_lo) << 16 | 2461 2457 ioread16(&nic->csr->scb.status); 2462 - for (i = E100_PHY_REGS; i >= 0; i--) 2463 - buff[1 + E100_PHY_REGS - i] = 2464 - mdio_read(netdev, nic->mii.phy_id, i); 2458 + for (i = 0; i < E100_PHY_REGS; i++) 2459 + /* Note that we read the registers in reverse order. This 2460 + * ordering is the ABI apparently used by ethtool and other 2461 + * applications. 2462 + */ 2463 + buff[1 + i] = mdio_read(netdev, nic->mii.phy_id, 2464 + E100_PHY_REGS - 1 - i); 2465 2465 memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf)); 2466 2466 e100_exec_cb(nic, NULL, e100_dump); 2467 2467 msleep(10); 2468 - memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf, 2469 - sizeof(nic->mem->dump_buf)); 2468 + memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf, 2469 + sizeof(nic->mem->dump_buf)); 2470 2470 } 2471 2471 2472 2472 static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+3 -2
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 4871 4871 { 4872 4872 int i; 4873 4873 4874 - i40e_free_misc_vector(pf); 4874 + if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) 4875 + i40e_free_misc_vector(pf); 4875 4876 4876 4877 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector, 4877 4878 I40E_IWARP_IRQ_PILE_ID); ··· 10114 10113 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { 10115 10114 /* retry with a larger buffer */ 10116 10115 buf_len = data_size; 10117 - } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { 10116 + } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) { 10118 10117 dev_info(&pf->pdev->dev, 10119 10118 "capability discovery failed, err %s aq_err %s\n", 10120 10119 i40e_stat_str(&pf->hw, err),
-1
drivers/net/ethernet/intel/iavf/iavf_main.c
··· 1965 1965 } 1966 1966 adapter->aq_required = 0; 1967 1967 adapter->current_op = VIRTCHNL_OP_UNKNOWN; 1968 - mutex_unlock(&adapter->crit_lock); 1969 1968 queue_delayed_work(iavf_wq, 1970 1969 &adapter->watchdog_task, 1971 1970 msecs_to_jiffies(10));
+1 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
··· 3208 3208 max_combined = ixgbe_max_rss_indices(adapter); 3209 3209 } 3210 3210 3211 - return max_combined; 3211 + return min_t(int, max_combined, num_online_cpus()); 3212 3212 } 3213 3213 3214 3214 static void ixgbe_get_channels(struct net_device *dev,
+6 -2
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 10112 10112 struct ixgbe_adapter *adapter = netdev_priv(dev); 10113 10113 struct bpf_prog *old_prog; 10114 10114 bool need_reset; 10115 + int num_queues; 10115 10116 10116 10117 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 10117 10118 return -EINVAL; ··· 10162 10161 /* Kick start the NAPI context if there is an AF_XDP socket open 10163 10162 * on that queue id. This so that receiving will start. 10164 10163 */ 10165 - if (need_reset && prog) 10166 - for (i = 0; i < adapter->num_rx_queues; i++) 10164 + if (need_reset && prog) { 10165 + num_queues = min_t(int, adapter->num_rx_queues, 10166 + adapter->num_xdp_queues); 10167 + for (i = 0; i < num_queues; i++) 10167 10168 if (adapter->xdp_ring[i]->xsk_pool) 10168 10169 (void)ixgbe_xsk_wakeup(adapter->netdev, i, 10169 10170 XDP_WAKEUP_RX); 10171 + } 10170 10172 10171 10173 return 0; 10172 10174 }
+3
drivers/net/ethernet/mediatek/mtk_ppe_offload.c
··· 186 186 int hash; 187 187 int i; 188 188 189 + if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params)) 190 + return -EEXIST; 191 + 189 192 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) { 190 193 struct flow_match_meta match; 191 194
+32 -18
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
··· 372 372 int nhoff = skb_network_offset(skb); 373 373 int ret = 0; 374 374 375 + if (skb->encapsulation) 376 + return -EPROTONOSUPPORT; 377 + 375 378 if (skb->protocol != htons(ETH_P_IP)) 376 379 return -EPROTONOSUPPORT; 377 380 ··· 1272 1269 if (!netif_carrier_ok(dev)) { 1273 1270 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { 1274 1271 if (priv->port_state.link_state) { 1275 - priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; 1276 1272 netif_carrier_on(dev); 1277 1273 en_dbg(LINK, priv, "Link Up\n"); 1278 1274 } ··· 1559 1557 mutex_unlock(&mdev->state_lock); 1560 1558 } 1561 1559 1562 - static void mlx4_en_linkstate(struct work_struct *work) 1560 + static void mlx4_en_linkstate(struct mlx4_en_priv *priv) 1561 + { 1562 + struct mlx4_en_port_state *port_state = &priv->port_state; 1563 + struct mlx4_en_dev *mdev = priv->mdev; 1564 + struct net_device *dev = priv->dev; 1565 + bool up; 1566 + 1567 + if (mlx4_en_QUERY_PORT(mdev, priv->port)) 1568 + port_state->link_state = MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN; 1569 + 1570 + up = port_state->link_state == MLX4_PORT_STATE_DEV_EVENT_PORT_UP; 1571 + if (up == netif_carrier_ok(dev)) 1572 + netif_carrier_event(dev); 1573 + if (!up) { 1574 + en_info(priv, "Link Down\n"); 1575 + netif_carrier_off(dev); 1576 + } else { 1577 + en_info(priv, "Link Up\n"); 1578 + netif_carrier_on(dev); 1579 + } 1580 + } 1581 + 1582 + static void mlx4_en_linkstate_work(struct work_struct *work) 1563 1583 { 1564 1584 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1565 1585 linkstate_task); 1566 1586 struct mlx4_en_dev *mdev = priv->mdev; 1567 - int linkstate = priv->link_state; 1568 1587 1569 1588 mutex_lock(&mdev->state_lock); 1570 - /* If observable port state changed set carrier state and 1571 - * report to system log */ 1572 - if (priv->last_link_state != linkstate) { 1573 - if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 1574 - en_info(priv, "Link Down\n"); 1575 - netif_carrier_off(priv->dev); 1576 - } else { 1577 - en_info(priv, "Link Up\n"); 1578 - netif_carrier_on(priv->dev); 1579 - } 1580 - } 1581 - priv->last_link_state = linkstate; 1589 + mlx4_en_linkstate(priv); 1582 1590 mutex_unlock(&mdev->state_lock); 1583 1591 } 1584 1592 ··· 2091 2079 mlx4_en_clear_stats(dev); 2092 2080 2093 2081 err = mlx4_en_start_port(dev); 2094 - if (err) 2082 + if (err) { 2095 2083 en_err(priv, "Failed starting port:%d\n", priv->port); 2096 - 2084 + goto out; 2085 + } 2086 + mlx4_en_linkstate(priv); 2097 2087 out: 2098 2088 mutex_unlock(&mdev->state_lock); 2099 2089 return err; ··· 3182 3168 spin_lock_init(&priv->stats_lock); 3183 3169 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); 3184 3170 INIT_WORK(&priv->restart_task, mlx4_en_restart); 3185 - INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 3171 + INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate_work); 3186 3172 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 3187 3173 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); 3188 3174 #ifdef CONFIG_RFS_ACCEL
-1
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
··· 552 552 553 553 struct mlx4_hwq_resources res; 554 554 int link_state; 555 - int last_link_state; 556 555 bool port_up; 557 556 int port; 558 557 int registered;
+5 -7
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 252 252 struct { 253 253 u16 mode; 254 254 u8 num_tc; 255 + struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; 255 256 } mqprio; 256 257 bool rx_cqe_compress_def; 257 258 bool tunneled_offload_en; ··· 846 845 struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS]; 847 846 struct mlx5e_channel_stats trap_stats; 848 847 struct mlx5e_ptp_stats ptp_stats; 848 + u16 stats_nch; 849 849 u16 max_nch; 850 850 u8 max_opened_tc; 851 851 bool tx_ptp_opened; ··· 1102 1100 struct ethtool_pauseparam *pauseparam); 1103 1101 1104 1102 /* mlx5e generic netdev management API */ 1105 - static inline unsigned int 1106 - mlx5e_calc_max_nch(struct mlx5e_priv *priv, const struct mlx5e_profile *profile) 1107 - { 1108 - return priv->netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1); 1109 - } 1110 - 1111 1103 static inline bool 1112 1104 mlx5e_tx_mpwqe_supported(struct mlx5_core_dev *mdev) 1113 1105 { ··· 1110 1114 } 1111 1115 1112 1116 int mlx5e_priv_init(struct mlx5e_priv *priv, 1117 + const struct mlx5e_profile *profile, 1113 1118 struct net_device *netdev, 1114 1119 struct mlx5_core_dev *mdev); 1115 1120 void mlx5e_priv_cleanup(struct mlx5e_priv *priv); 1116 1121 struct net_device * 1117 - mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int rxqs); 1122 + mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile, 1123 + unsigned int txqs, unsigned int rxqs); 1118 1124 int mlx5e_attach_netdev(struct mlx5e_priv *priv); 1119 1125 void mlx5e_detach_netdev(struct mlx5e_priv *priv); 1120 1126 void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
+3 -3
drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
··· 35 35 { 36 36 int ch, i = 0; 37 37 38 - for (ch = 0; ch < priv->max_nch; ch++) { 38 + for (ch = 0; ch < priv->stats_nch; ch++) { 39 39 void *buf = data + i; 40 40 41 41 if (WARN_ON_ONCE(buf + ··· 51 51 static int mlx5e_hv_vhca_stats_buf_size(struct mlx5e_priv *priv) 52 52 { 53 53 return (sizeof(struct mlx5e_hv_vhca_per_ring_stats) * 54 - priv->max_nch); 54 + priv->stats_nch); 55 55 } 56 56 57 57 static void mlx5e_hv_vhca_stats_work(struct work_struct *work) ··· 100 100 sagent = &priv->stats_agent; 101 101 102 102 block->version = MLX5_HV_VHCA_STATS_VERSION; 103 - block->rings = priv->max_nch; 103 + block->rings = priv->stats_nch; 104 104 105 105 if (!block->command) { 106 106 cancel_delayed_work_sync(&priv->stats_agent.work);
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
··· 13 13 bool valid; 14 14 }; 15 15 16 - #define MLX5E_PTP_CHANNEL_IX 0 17 - 18 16 struct mlx5e_ptp_params { 19 17 struct mlx5e_params params; 20 18 struct mlx5e_sq_param txq_sq_param; ··· 507 509 rq->mdev = mdev; 508 510 rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); 509 511 rq->stats = &c->priv->ptp_stats.rq; 512 + rq->ix = MLX5E_PTP_CHANNEL_IX; 510 513 rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev); 511 514 err = mlx5e_rq_set_handlers(rq, params, false); 512 515 if (err)
+2
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
··· 8 8 #include "en_stats.h" 9 9 #include <linux/ptp_classify.h> 10 10 11 + #define MLX5E_PTP_CHANNEL_IX 0 12 + 11 13 struct mlx5e_ptpsq { 12 14 struct mlx5e_txqsq txqsq; 13 15 struct mlx5e_cq ts_cq;
+11
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 2036 2036 } 2037 2037 2038 2038 new_params = priv->channels.params; 2039 + /* Don't allow enabling TX-port-TS if MQPRIO mode channel offload is 2040 + * active, since it defines explicitly which TC accepts the packet. 2041 + * This conflicts with TX-port-TS hijacking the PTP traffic to a specific 2042 + * HW TX-queue. 2043 + */ 2044 + if (enable && new_params.mqprio.mode == TC_MQPRIO_MODE_CHANNEL) { 2045 + netdev_err(priv->netdev, 2046 + "%s: MQPRIO mode channel offload is active, cannot set the TX-port-TS\n", 2047 + __func__); 2048 + return -EINVAL; 2049 + } 2039 2050 MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_PORT_TS, enable); 2040 2051 /* No need to verify SQ stop room as 2041 2052 * ptpsq.txqsq.stop_room <= generic_sq->stop_room, and both
+130 -48
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 2264 2264 } 2265 2265 2266 2266 static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc, 2267 - struct tc_mqprio_qopt_offload *mqprio) 2267 + struct netdev_tc_txq *tc_to_txq) 2268 2268 { 2269 2269 int tc, err; 2270 2270 ··· 2282 2282 for (tc = 0; tc < ntc; tc++) { 2283 2283 u16 count, offset; 2284 2284 2285 - /* For DCB mode, map netdev TCs to offset 0 2286 - * We have our own UP to TXQ mapping for QoS 2287 - */ 2288 - count = mqprio ? mqprio->qopt.count[tc] : nch; 2289 - offset = mqprio ? mqprio->qopt.offset[tc] : 0; 2285 + count = tc_to_txq[tc].count; 2286 + offset = tc_to_txq[tc].offset; 2290 2287 netdev_set_tc_queue(netdev, tc, count, offset); 2291 2288 } 2292 2289 ··· 2312 2315 2313 2316 static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) 2314 2317 { 2318 + struct netdev_tc_txq old_tc_to_txq[TC_MAX_QUEUE], *tc_to_txq; 2315 2319 struct net_device *netdev = priv->netdev; 2316 2320 int old_num_txqs, old_ntc; 2317 2321 int num_rxqs, nch, ntc; 2318 2322 int err; 2323 + int i; 2319 2324 2320 2325 old_num_txqs = netdev->real_num_tx_queues; 2321 2326 old_ntc = netdev->num_tc ? : 1; 2327 + for (i = 0; i < ARRAY_SIZE(old_tc_to_txq); i++) 2328 + old_tc_to_txq[i] = netdev->tc_to_txq[i]; 2322 2329 2323 2330 nch = priv->channels.params.num_channels; 2324 - ntc = mlx5e_get_dcb_num_tc(&priv->channels.params); 2331 + ntc = priv->channels.params.mqprio.num_tc; 2325 2332 num_rxqs = nch * priv->profile->rq_groups; 2333 + tc_to_txq = priv->channels.params.mqprio.tc_to_txq; 2326 2334 2327 - err = mlx5e_netdev_set_tcs(netdev, nch, ntc, NULL); 2335 + err = mlx5e_netdev_set_tcs(netdev, nch, ntc, tc_to_txq); 2328 2336 if (err) 2329 2337 goto err_out; 2330 2338 err = mlx5e_update_tx_netdev_queues(priv); ··· 2352 2350 WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs)); 2353 2351 2354 2352 err_tcs: 2355 - mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc, NULL); 2353 + WARN_ON_ONCE(mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc, 2354 + old_tc_to_txq)); 2356 2355 err_out: 2357 2356 return err; 2358 2357 } 2358 + 2359 + static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_netdev_queues); 2359 2360 2360 2361 static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv, 2361 2362 struct mlx5e_params *params) ··· 2866 2861 return 0; 2867 2862 } 2868 2863 2864 + static void mlx5e_mqprio_build_default_tc_to_txq(struct netdev_tc_txq *tc_to_txq, 2865 + int ntc, int nch) 2866 + { 2867 + int tc; 2868 + 2869 + memset(tc_to_txq, 0, sizeof(*tc_to_txq) * TC_MAX_QUEUE); 2870 + 2871 + /* Map netdev TCs to offset 0. 2872 + * We have our own UP to TXQ mapping for DCB mode of QoS 2873 + */ 2874 + for (tc = 0; tc < ntc; tc++) { 2875 + tc_to_txq[tc] = (struct netdev_tc_txq) { 2876 + .count = nch, 2877 + .offset = 0, 2878 + }; 2879 + } 2880 + } 2881 + 2882 + static void mlx5e_mqprio_build_tc_to_txq(struct netdev_tc_txq *tc_to_txq, 2883 + struct tc_mqprio_qopt *qopt) 2884 + { 2885 + int tc; 2886 + 2887 + for (tc = 0; tc < TC_MAX_QUEUE; tc++) { 2888 + tc_to_txq[tc] = (struct netdev_tc_txq) { 2889 + .count = qopt->count[tc], 2890 + .offset = qopt->offset[tc], 2891 + }; 2892 + } 2893 + } 2894 + 2895 + static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc) 2896 + { 2897 + params->mqprio.mode = TC_MQPRIO_MODE_DCB; 2898 + params->mqprio.num_tc = num_tc; 2899 + mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc, 2900 + params->num_channels); 2901 + } 2902 + 2903 + static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params, 2904 + struct tc_mqprio_qopt *qopt) 2905 + { 2906 + params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL; 2907 + params->mqprio.num_tc = qopt->num_tc; 2908 + mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt); 2909 + } 2910 + 2911 + static void mlx5e_params_mqprio_reset(struct mlx5e_params *params) 2912 + { 2913 + mlx5e_params_mqprio_dcb_set(params, 1); 2914 + } 2915 + 2869 2916 static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv, 2870 2917 struct tc_mqprio_qopt *mqprio) 2871 2918 { ··· 2931 2874 return -EINVAL; 2932 2875 2933 2876 new_params = priv->channels.params; 2934 - new_params.mqprio.mode = TC_MQPRIO_MODE_DCB; 2935 - new_params.mqprio.num_tc = tc ? tc : 1; 2877 + mlx5e_params_mqprio_dcb_set(&new_params, tc ? tc : 1); 2936 2878 2937 2879 err = mlx5e_safe_switch_params(priv, &new_params, 2938 2880 mlx5e_num_channels_changed_ctx, NULL, true); ··· 2945 2889 struct tc_mqprio_qopt_offload *mqprio) 2946 2890 { 2947 2891 struct net_device *netdev = priv->netdev; 2892 + struct mlx5e_ptp *ptp_channel; 2948 2893 int agg_count = 0; 2949 2894 int i; 2895 + 2896 + ptp_channel = priv->channels.ptp; 2897 + if (ptp_channel && test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state)) { 2898 + netdev_err(netdev, 2899 + "Cannot activate MQPRIO mode channel since it conflicts with TX port TS\n"); 2900 + return -EINVAL; 2901 + } 2950 2902 2951 2903 if (mqprio->qopt.offset[0] != 0 || mqprio->qopt.num_tc < 1 || 2952 2904 mqprio->qopt.num_tc > MLX5E_MAX_NUM_MQPRIO_CH_TC) ··· 2990 2926 return 0; 2991 2927 } 2992 2928 2993 - static int mlx5e_mqprio_channel_set_tcs_ctx(struct mlx5e_priv *priv, void *ctx) 2994 - { 2995 - struct tc_mqprio_qopt_offload *mqprio = (struct tc_mqprio_qopt_offload *)ctx; 2996 - struct net_device *netdev = priv->netdev; 2997 - u8 num_tc; 2998 - 2999 - if (priv->channels.params.mqprio.mode != TC_MQPRIO_MODE_CHANNEL) 3000 - return -EINVAL; 3001 - 3002 - num_tc = priv->channels.params.mqprio.num_tc; 3003 - mlx5e_netdev_set_tcs(netdev, 0, num_tc, mqprio); 3004 - 3005 - return 0; 3006 - } 3007 - 3008 2929 static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv, 3009 2930 struct tc_mqprio_qopt_offload *mqprio) 3010 2931 { 2932 + mlx5e_fp_preactivate preactivate; 3011 2933 struct mlx5e_params new_params; 2934 + bool nch_changed; 3012 2935 int err; 3013 2936 3014 2937 err = mlx5e_mqprio_channel_validate(priv, mqprio); ··· 3003 2952 return err; 3004 2953 3005 2954 new_params = priv->channels.params; 3006 - new_params.mqprio.mode = TC_MQPRIO_MODE_CHANNEL; 3007 - new_params.mqprio.num_tc = mqprio->qopt.num_tc; 3008 - err = mlx5e_safe_switch_params(priv, &new_params, 3009 - mlx5e_mqprio_channel_set_tcs_ctx, mqprio, true); 2955 + mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt); 3010 2956 3011 - return err; 2957 + nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1; 2958 + preactivate = nch_changed ? mlx5e_num_channels_changed_ctx : 2959 + mlx5e_update_netdev_queues_ctx; 2960 + return mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true); 3012 2961 } 3013 2962 3014 2963 static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, ··· 3116 3065 { 3117 3066 int i; 3118 3067 3119 - for (i = 0; i < priv->max_nch; i++) { 3068 + for (i = 0; i < priv->stats_nch; i++) { 3120 3069 struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i]; 3121 3070 struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq; 3122 3071 struct mlx5e_rq_stats *rq_stats = &channel_stats->rq; ··· 4237 4186 struct mlx5_core_dev *mdev = priv->mdev; 4238 4187 u8 rx_cq_period_mode; 4239 4188 4240 - priv->max_nch = mlx5e_calc_max_nch(priv, priv->profile); 4241 - 4242 4189 params->sw_mtu = mtu; 4243 4190 params->hard_mtu = MLX5E_ETH_HARD_MTU; 4244 4191 params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2, 4245 4192 priv->max_nch); 4246 - params->mqprio.num_tc = 1; 4193 + mlx5e_params_mqprio_reset(params); 4247 4194 4248 4195 /* Set an initial non-zero value, so that mlx5e_select_queue won't 4249 4196 * divide by zero if called before first activating channels. ··· 4731 4682 .rx_ptp_support = true, 4732 4683 }; 4733 4684 4685 + static unsigned int 4686 + mlx5e_calc_max_nch(struct mlx5_core_dev *mdev, struct net_device *netdev, 4687 + const struct mlx5e_profile *profile) 4688 + 4689 + { 4690 + unsigned int max_nch, tmp; 4691 + 4692 + /* core resources */ 4693 + max_nch = mlx5e_get_max_num_channels(mdev); 4694 + 4695 + /* netdev rx queues */ 4696 + tmp = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1); 4697 + max_nch = min_t(unsigned int, max_nch, tmp); 4698 + 4699 + /* netdev tx queues */ 4700 + tmp = netdev->num_tx_queues; 4701 + if (mlx5_qos_is_supported(mdev)) 4702 + tmp -= mlx5e_qos_max_leaf_nodes(mdev); 4703 + if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn)) 4704 + tmp -= profile->max_tc; 4705 + tmp = tmp / profile->max_tc; 4706 + max_nch = min_t(unsigned int, max_nch, tmp); 4707 + 4708 + return max_nch; 4709 + } 4710 + 4734 4711 /* mlx5e generic netdev management API (move to en_common.c) */ 4735 4712 int mlx5e_priv_init(struct mlx5e_priv *priv, 4713 + const struct mlx5e_profile *profile, 4736 4714 struct net_device *netdev, 4737 4715 struct mlx5_core_dev *mdev) 4738 4716 { ··· 4767 4691 priv->mdev = mdev; 4768 4692 priv->netdev = netdev; 4769 4693 priv->msglevel = MLX5E_MSG_LEVEL; 4694 + priv->max_nch = mlx5e_calc_max_nch(mdev, netdev, profile); 4695 + priv->stats_nch = priv->max_nch; 4770 4696 priv->max_opened_tc = 1; 4771 4697 4772 4698 if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL)) ··· 4812 4734 } 4813 4735 4814 4736 struct net_device * 4815 - mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int rxqs) 4737 + mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile, 4738 + unsigned int txqs, unsigned int rxqs) 4816 4739 { 4817 4740 struct net_device *netdev; 4818 4741 int err; ··· 4824 4745 return NULL; 4825 4746 } 4826 4747 4827 - err = mlx5e_priv_init(netdev_priv(netdev), netdev, mdev); 4748 + err = mlx5e_priv_init(netdev_priv(netdev), profile, netdev, mdev); 4828 4749 if (err) { 4829 4750 mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err); 4830 4751 goto err_free_netdev; ··· 4866 4787 clear_bit(MLX5E_STATE_DESTROYING, &priv->state); 4867 4788 4868 4789 /* max number of channels may have changed */ 4869 - max_nch = mlx5e_get_max_num_channels(priv->mdev); 4790 + max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile); 4870 4791 if (priv->channels.params.num_channels > max_nch) { 4871 4792 mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch); 4872 4793 /* Reducing the number of channels - RXFH has to be reset, and ··· 4874 4795 */ 4875 4796 priv->netdev->priv_flags &= ~IFF_RXFH_CONFIGURED; 4876 4797 priv->channels.params.num_channels = max_nch; 4798 + if (priv->channels.params.mqprio.mode == TC_MQPRIO_MODE_CHANNEL) { 4799 + mlx5_core_warn(priv->mdev, "MLX5E: Disabling MQPRIO channel mode\n"); 4800 + mlx5e_params_mqprio_reset(&priv->channels.params); 4801 + } 4877 4802 } 4803 + if (max_nch != priv->max_nch) { 4804 + mlx5_core_warn(priv->mdev, 4805 + "MLX5E: Updating max number of channels from %u to %u\n", 4806 + priv->max_nch, max_nch); 4807 + priv->max_nch = max_nch; 4808 + } 4809 + 4878 4810 /* 1. Set the real number of queues in the kernel the first time. 4879 4811 * 2. Set our default XPS cpumask. 4880 4812 * 3. Build the RQT. ··· 4950 4860 struct mlx5e_priv *priv = netdev_priv(netdev); 4951 4861 int err; 4952 4862 4953 - err = mlx5e_priv_init(priv, netdev, mdev); 4863 + err = mlx5e_priv_init(priv, new_profile, netdev, mdev); 4954 4864 if (err) { 4955 4865 mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err); 4956 4866 return err; ··· 4976 4886 int mlx5e_netdev_change_profile(struct mlx5e_priv *priv, 4977 4887 const struct mlx5e_profile *new_profile, void *new_ppriv) 4978 4888 { 4979 - unsigned int new_max_nch = mlx5e_calc_max_nch(priv, new_profile); 4980 4889 const struct mlx5e_profile *orig_profile = priv->profile; 4981 4890 struct net_device *netdev = priv->netdev; 4982 4891 struct mlx5_core_dev *mdev = priv->mdev; 4983 4892 void *orig_ppriv = priv->ppriv; 4984 4893 int err, rollback_err; 4985 - 4986 - /* sanity */ 4987 - if (new_max_nch != priv->max_nch) { 4988 - netdev_warn(netdev, "%s: Replacing profile with different max channels\n", 4989 - __func__); 4990 - return -EINVAL; 4991 - } 4992 4894 4993 4895 /* cleanup old profile */ 4994 4896 mlx5e_detach_netdev(priv); ··· 5077 4995 nch = mlx5e_get_max_num_channels(mdev); 5078 4996 txqs = nch * profile->max_tc + ptp_txqs + qos_sqs; 5079 4997 rxqs = nch * profile->rq_groups; 5080 - netdev = mlx5e_create_netdev(mdev, txqs, rxqs); 4998 + netdev = mlx5e_create_netdev(mdev, profile, txqs, rxqs); 5081 4999 if (!netdev) { 5082 5000 mlx5_core_err(mdev, "mlx5e_create_netdev failed\n"); 5083 5001 return -ENOMEM;
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 596 596 MLX5_CQ_PERIOD_MODE_START_FROM_CQE : 597 597 MLX5_CQ_PERIOD_MODE_START_FROM_EQE; 598 598 599 - priv->max_nch = mlx5e_calc_max_nch(priv, priv->profile); 600 599 params = &priv->channels.params; 601 600 602 601 params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS; ··· 1168 1169 nch = mlx5e_get_max_num_channels(dev); 1169 1170 txqs = nch * profile->max_tc; 1170 1171 rxqs = nch * profile->rq_groups; 1171 - netdev = mlx5e_create_netdev(dev, txqs, rxqs); 1172 + netdev = mlx5e_create_netdev(dev, profile, txqs, rxqs); 1172 1173 if (!netdev) { 1173 1174 mlx5_core_warn(dev, 1174 1175 "Failed to create representor netdev for vport %d\n",
+1 -6
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 1001 1001 goto csum_unnecessary; 1002 1002 1003 1003 if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) { 1004 - u8 ipproto = get_ip_proto(skb, network_depth, proto); 1005 - 1006 - if (unlikely(ipproto == IPPROTO_SCTP)) 1004 + if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP)) 1007 1005 goto csum_unnecessary; 1008 - 1009 - if (unlikely(mlx5_ipsec_is_rx_flow(cqe))) 1010 - goto csum_none; 1011 1006 1012 1007 stats->csum_complete++; 1013 1008 skb->ip_summed = CHECKSUM_COMPLETE;
+6 -5
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
··· 34 34 #include "en.h" 35 35 #include "en_accel/tls.h" 36 36 #include "en_accel/en_accel.h" 37 + #include "en/ptp.h" 37 38 38 39 static unsigned int stats_grps_num(struct mlx5e_priv *priv) 39 40 { ··· 451 450 452 451 memset(s, 0, sizeof(*s)); 453 452 454 - for (i = 0; i < priv->max_nch; i++) { 453 + for (i = 0; i < priv->stats_nch; i++) { 455 454 struct mlx5e_channel_stats *channel_stats = 456 455 &priv->channel_stats[i]; 457 456 int j; ··· 2077 2076 if (priv->rx_ptp_opened) { 2078 2077 for (i = 0; i < NUM_PTP_RQ_STATS; i++) 2079 2078 sprintf(data + (idx++) * ETH_GSTRING_LEN, 2080 - ptp_rq_stats_desc[i].format); 2079 + ptp_rq_stats_desc[i].format, MLX5E_PTP_CHANNEL_IX); 2081 2080 } 2082 2081 return idx; 2083 2082 } ··· 2120 2119 2121 2120 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels) 2122 2121 { 2123 - int max_nch = priv->max_nch; 2122 + int max_nch = priv->stats_nch; 2124 2123 2125 2124 return (NUM_RQ_STATS * max_nch) + 2126 2125 (NUM_CH_STATS * max_nch) + ··· 2134 2133 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels) 2135 2134 { 2136 2135 bool is_xsk = priv->xsk.ever_used; 2137 - int max_nch = priv->max_nch; 2136 + int max_nch = priv->stats_nch; 2138 2137 int i, j, tc; 2139 2138 2140 2139 for (i = 0; i < max_nch; i++) ··· 2176 2175 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels) 2177 2176 { 2178 2177 bool is_xsk = priv->xsk.ever_used; 2179 - int max_nch = priv->max_nch; 2178 + int max_nch = priv->stats_nch; 2180 2179 int i, j, tc; 2181 2180 2182 2181 for (i = 0; i < max_nch; i++)
+8 -4
drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
··· 79 79 int dest_num = 0; 80 80 int err = 0; 81 81 82 - if (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) { 82 + if (vport->egress.legacy.drop_counter) { 83 + drop_counter = vport->egress.legacy.drop_counter; 84 + } else if (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) { 83 85 drop_counter = mlx5_fc_create(esw->dev, false); 84 - if (IS_ERR(drop_counter)) 86 + if (IS_ERR(drop_counter)) { 85 87 esw_warn(esw->dev, 86 88 "vport[%d] configure egress drop rule counter err(%ld)\n", 87 89 vport->vport, PTR_ERR(drop_counter)); 90 + drop_counter = NULL; 91 + } 88 92 vport->egress.legacy.drop_counter = drop_counter; 89 93 } 90 94 ··· 127 123 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; 128 124 129 125 /* Attach egress drop flow counter */ 130 - if (!IS_ERR_OR_NULL(drop_counter)) { 126 + if (drop_counter) { 131 127 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; 132 128 drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 133 129 drop_ctr_dst.counter_id = mlx5_fc_id(drop_counter); ··· 166 162 esw_acl_egress_table_destroy(vport); 167 163 168 164 clean_drop_counter: 169 - if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter)) { 165 + if (vport->egress.legacy.drop_counter) { 170 166 mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter); 171 167 vport->egress.legacy.drop_counter = NULL; 172 168 }
+3 -1
drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
··· 160 160 161 161 esw_acl_ingress_lgcy_rules_destroy(vport); 162 162 163 - if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) { 163 + if (vport->ingress.legacy.drop_counter) { 164 + counter = vport->ingress.legacy.drop_counter; 165 + } else if (MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) { 164 166 counter = mlx5_fc_create(esw->dev, false); 165 167 if (IS_ERR(counter)) { 166 168 esw_warn(esw->dev,
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
··· 113 113 struct mlx5e_sw_stats s = { 0 }; 114 114 int i, j; 115 115 116 - for (i = 0; i < priv->max_nch; i++) { 116 + for (i = 0; i < priv->stats_nch; i++) { 117 117 struct mlx5e_channel_stats *channel_stats; 118 118 struct mlx5e_rq_stats *rq_stats; 119 119 ··· 711 711 goto destroy_ht; 712 712 } 713 713 714 - err = mlx5e_priv_init(epriv, netdev, mdev); 714 + err = mlx5e_priv_init(epriv, prof, netdev, mdev); 715 715 if (err) 716 716 goto destroy_mdev_resources; 717 717
+16 -21
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
··· 448 448 return cycles_now + cycles_delta; 449 449 } 450 450 451 - static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev, 452 - s64 sec, u32 nsec) 451 + static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev, s64 sec) 453 452 { 454 - struct timespec64 ts; 453 + struct timespec64 ts = {}; 455 454 s64 target_ns; 456 455 457 456 ts.tv_sec = sec; 458 - ts.tv_nsec = nsec; 459 457 target_ns = timespec64_to_ns(&ts); 460 458 461 459 return find_target_cycles(mdev, target_ns); 462 460 } 463 461 464 - static u64 perout_conf_real_time(s64 sec, u32 nsec) 462 + static u64 perout_conf_real_time(s64 sec) 465 463 { 466 - return (u64)nsec | (u64)sec << 32; 464 + return (u64)sec << 32; 467 465 } 468 466 469 467 static int mlx5_perout_configure(struct ptp_clock_info *ptp, ··· 472 474 container_of(ptp, struct mlx5_clock, ptp_info); 473 475 struct mlx5_core_dev *mdev = 474 476 container_of(clock, struct mlx5_core_dev, clock); 477 + bool rt_mode = mlx5_real_time_mode(mdev); 475 478 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; 476 479 struct timespec64 ts; 477 480 u32 field_select = 0; ··· 500 501 501 502 if (on) { 502 503 bool rt_mode = mlx5_real_time_mode(mdev); 503 - u32 nsec; 504 - s64 sec; 504 + s64 sec = rq->perout.start.sec; 505 + 506 + if (rq->perout.start.nsec) 507 + return -EINVAL; 505 508 506 509 pin_mode = MLX5_PIN_MODE_OUT; 507 510 pattern = MLX5_OUT_PATTERN_PERIODIC; ··· 514 513 if ((ns >> 1) != 500000000LL) 515 514 return -EINVAL; 516 515 517 - nsec = rq->perout.start.nsec; 518 - sec = rq->perout.start.sec; 519 - 520 516 if (rt_mode && sec > U32_MAX) 521 517 return -EINVAL; 522 518 523 - time_stamp = rt_mode ? perout_conf_real_time(sec, nsec) : 524 - perout_conf_internal_timer(mdev, sec, nsec); 519 + time_stamp = rt_mode ? perout_conf_real_time(sec) : 520 + perout_conf_internal_timer(mdev, sec); 525 521 526 522 field_select |= MLX5_MTPPS_FS_PIN_MODE | 527 523 MLX5_MTPPS_FS_PATTERN | ··· 535 537 err = mlx5_set_mtpps(mdev, in, sizeof(in)); 536 538 if (err) 537 539 return err; 540 + 541 + if (rt_mode) 542 + return 0; 538 543 539 544 return mlx5_set_mtppse(mdev, pin, 0, 540 545 MLX5_EVENT_MODE_REPETETIVE & on); ··· 706 705 static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev, 707 706 struct mlx5_clock *clock) 708 707 { 709 - bool rt_mode = mlx5_real_time_mode(mdev); 710 708 struct timespec64 ts; 711 709 s64 target_ns; 712 710 713 - if (rt_mode) 714 - ts = mlx5_ptp_gettimex_real_time(mdev, NULL); 715 - else 716 - mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL); 717 - 711 + mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL); 718 712 ts_next_sec(&ts); 719 713 target_ns = timespec64_to_ns(&ts); 720 714 721 - return rt_mode ? perout_conf_real_time(ts.tv_sec, ts.tv_nsec) : 722 - find_target_cycles(mdev, target_ns); 715 + return find_target_cycles(mdev, target_ns); 723 716 } 724 717 725 718 static int mlx5_pps_event(struct notifier_block *nb,
+5 -4
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
··· 13 13 #endif 14 14 15 15 #define MLX5_MAX_IRQ_NAME (32) 16 - /* max irq_index is 255. three chars */ 17 - #define MLX5_MAX_IRQ_IDX_CHARS (3) 16 + /* max irq_index is 2047, so four chars */ 17 + #define MLX5_MAX_IRQ_IDX_CHARS (4) 18 18 19 19 #define MLX5_SFS_PER_CTRL_IRQ 64 20 20 #define MLX5_IRQ_CTRL_SF_MAX 8 ··· 633 633 int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table) 634 634 { 635 635 if (table->sf_comp_pool) 636 - return table->sf_comp_pool->xa_num_irqs.max - 637 - table->sf_comp_pool->xa_num_irqs.min + 1; 636 + return min_t(int, num_online_cpus(), 637 + table->sf_comp_pool->xa_num_irqs.max - 638 + table->sf_comp_pool->xa_num_irqs.min + 1); 638 639 else 639 640 return mlx5_irq_table_get_num_comp(table); 640 641 }
+2 -4
drivers/net/ethernet/micrel/Makefile
··· 4 4 # 5 5 6 6 obj-$(CONFIG_KS8842) += ks8842.o 7 - obj-$(CONFIG_KS8851) += ks8851.o 8 - ks8851-objs = ks8851_common.o ks8851_spi.o 9 - obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o 10 - ks8851_mll-objs = ks8851_common.o ks8851_par.o 7 + obj-$(CONFIG_KS8851) += ks8851_common.o ks8851_spi.o 8 + obj-$(CONFIG_KS8851_MLL) += ks8851_common.o ks8851_par.o 11 9 obj-$(CONFIG_KSZ884X_PCI) += ksz884x.o
+8
drivers/net/ethernet/micrel/ks8851_common.c
··· 1057 1057 1058 1058 return 0; 1059 1059 } 1060 + EXPORT_SYMBOL_GPL(ks8851_suspend); 1060 1061 1061 1062 int ks8851_resume(struct device *dev) 1062 1063 { ··· 1071 1070 1072 1071 return 0; 1073 1072 } 1073 + EXPORT_SYMBOL_GPL(ks8851_resume); 1074 1074 #endif 1075 1075 1076 1076 static int ks8851_register_mdiobus(struct ks8851_net *ks, struct device *dev) ··· 1245 1243 err_reg_io: 1246 1244 return ret; 1247 1245 } 1246 + EXPORT_SYMBOL_GPL(ks8851_probe_common); 1248 1247 1249 1248 int ks8851_remove_common(struct device *dev) 1250 1249 { ··· 1264 1261 1265 1262 return 0; 1266 1263 } 1264 + EXPORT_SYMBOL_GPL(ks8851_remove_common); 1265 + 1266 + MODULE_DESCRIPTION("KS8851 Network driver"); 1267 + MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); 1268 + MODULE_LICENSE("GPL");
+8 -13
drivers/net/ethernet/mscc/ocelot.c
··· 563 563 ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA | 564 564 DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG); 565 565 566 - /* Take MAC, Port, Phy (intern) and PCS (SGMII/Serdes) clock out of 567 - * reset 568 - */ 569 - ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(speed), 570 - DEV_CLOCK_CFG); 571 - 572 - /* No PFC */ 573 - ocelot_write_gix(ocelot, ANA_PFC_PFC_CFG_FC_LINK_SPEED(speed), 574 - ANA_PFC_PFC_CFG, port); 575 - 576 566 /* Core: Enable port for frame transfer */ 577 567 ocelot_fields_write(ocelot, port, 578 568 QSYS_SWITCH_PORT_MODE_PORT_ENA, 1); ··· 1293 1303 return mask; 1294 1304 } 1295 1305 1296 - static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, 1306 + static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port, 1297 1307 struct net_device *bridge) 1298 1308 { 1309 + struct ocelot_port *ocelot_port = ocelot->ports[src_port]; 1299 1310 u32 mask = 0; 1300 1311 int port; 1301 1312 1313 + if (!ocelot_port || ocelot_port->bridge != bridge || 1314 + ocelot_port->stp_state != BR_STATE_FORWARDING) 1315 + return 0; 1316 + 1302 1317 for (port = 0; port < ocelot->num_phys_ports; port++) { 1303 - struct ocelot_port *ocelot_port = ocelot->ports[port]; 1318 + ocelot_port = ocelot->ports[port]; 1304 1319 1305 1320 if (!ocelot_port) 1306 1321 continue; ··· 1371 1376 struct net_device *bridge = ocelot_port->bridge; 1372 1377 struct net_device *bond = ocelot_port->bond; 1373 1378 1374 - mask = ocelot_get_bridge_fwd_mask(ocelot, bridge); 1379 + mask = ocelot_get_bridge_fwd_mask(ocelot, port, bridge); 1375 1380 mask |= cpu_fwd_mask; 1376 1381 mask &= ~BIT(port); 1377 1382 if (bond) {
+1 -1
drivers/net/ethernet/mscc/ocelot_devlink.c
··· 1 1 // SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 - /* Copyright 2020-2021 NXP Semiconductors 2 + /* Copyright 2020-2021 NXP 3 3 */ 4 4 #include <net/devlink.h> 5 5 #include "ocelot.h"
+1 -1
drivers/net/ethernet/mscc/ocelot_mrp.c
··· 2 2 /* Microsemi Ocelot Switch driver 3 3 * 4 4 * Copyright (c) 2017, 2019 Microsemi Corporation 5 - * Copyright 2020-2021 NXP Semiconductors 5 + * Copyright 2020-2021 NXP 6 6 */ 7 7 8 8 #include <linux/if_bridge.h>
+1 -1
drivers/net/ethernet/mscc/ocelot_net.c
··· 5 5 * mscc_ocelot_switch_lib. 6 6 * 7 7 * Copyright (c) 2017, 2019 Microsemi Corporation 8 - * Copyright 2020-2021 NXP Semiconductors 8 + * Copyright 2020-2021 NXP 9 9 */ 10 10 11 11 #include <linux/if_bridge.h>
+2 -2
drivers/net/ethernet/mscc/ocelot_vcap.c
··· 998 998 } 999 999 1000 1000 struct ocelot_vcap_filter * 1001 - ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int cookie, 1002 - bool tc_offload) 1001 + ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, 1002 + unsigned long cookie, bool tc_offload) 1003 1003 { 1004 1004 struct ocelot_vcap_filter *filter; 1005 1005
+3 -1
drivers/net/ethernet/pensando/ionic/ionic_lif.c
··· 1292 1292 if (err && err != -EEXIST) { 1293 1293 /* set the state back to NEW so we can try again later */ 1294 1294 f = ionic_rx_filter_by_addr(lif, addr); 1295 - if (f && f->state == IONIC_FILTER_STATE_SYNCED) 1295 + if (f && f->state == IONIC_FILTER_STATE_SYNCED) { 1296 1296 f->state = IONIC_FILTER_STATE_NEW; 1297 + set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state); 1298 + } 1297 1299 1298 1300 spin_unlock_bh(&lif->rx_filters.lock); 1299 1301
-3
drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
··· 349 349 list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) { 350 350 (void)ionic_lif_addr_add(lif, sync_item->f.cmd.mac.addr); 351 351 352 - if (sync_item->f.state != IONIC_FILTER_STATE_SYNCED) 353 - set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state); 354 - 355 352 list_del(&sync_item->list); 356 353 devm_kfree(dev, sync_item); 357 354 }
-9
drivers/net/ethernet/pensando/ionic/ionic_stats.c
··· 380 380 &ionic_dbg_intr_stats_desc[i]); 381 381 (*buf)++; 382 382 } 383 - for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) { 384 - **buf = IONIC_READ_STAT64(&txqcq->napi_stats, 385 - &ionic_dbg_napi_stats_desc[i]); 386 - (*buf)++; 387 - } 388 - for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) { 389 - **buf = txqcq->napi_stats.work_done_cntr[i]; 390 - (*buf)++; 391 - } 392 383 for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) { 393 384 **buf = txstats->sg_cntr[i]; 394 385 (*buf)++;
+8
drivers/net/ethernet/qlogic/qed/qed_iwarp.c
··· 1297 1297 prev_weight = weight; 1298 1298 1299 1299 while (weight) { 1300 + /* If the HW device is during recovery, all resources are 1301 + * immediately reset without receiving a per-cid indication 1302 + * from HW. In this case we don't expect the cid_map to be 1303 + * cleared. 1304 + */ 1305 + if (p_hwfn->cdev->recov_in_prog) 1306 + return 0; 1307 + 1300 1308 msleep(QED_IWARP_MAX_CID_CLEAN_TIME); 1301 1309 1302 1310 weight = bitmap_weight(bmap->bitmap, bmap->max_count);
+8
drivers/net/ethernet/qlogic/qed/qed_roce.c
··· 77 77 * Beyond the added delay we clear the bitmap anyway. 78 78 */ 79 79 while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) { 80 + /* If the HW device is during recovery, all resources are 81 + * immediately reset without receiving a per-cid indication 82 + * from HW. In this case we don't expect the cid bitmap to be 83 + * cleared. 84 + */ 85 + if (p_hwfn->cdev->recov_in_prog) 86 + return; 87 + 80 88 msleep(100); 81 89 if (wait_count++ > 20) { 82 90 DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
+5
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
··· 21 21 #include <linux/delay.h> 22 22 #include <linux/mfd/syscon.h> 23 23 #include <linux/regmap.h> 24 + #include <linux/pm_runtime.h> 24 25 25 26 #include "stmmac_platform.h" 26 27 ··· 1529 1528 return ret; 1530 1529 } 1531 1530 1531 + pm_runtime_get_sync(dev); 1532 + 1532 1533 if (bsp_priv->integrated_phy) 1533 1534 rk_gmac_integrated_phy_powerup(bsp_priv); 1534 1535 ··· 1541 1538 { 1542 1539 if (gmac->integrated_phy) 1543 1540 rk_gmac_integrated_phy_powerdown(gmac); 1541 + 1542 + pm_runtime_put_sync(&gmac->pdev->dev); 1544 1543 1545 1544 phy_power_on(gmac, false); 1546 1545 gmac_clk_enable(gmac, false);
+9 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 477 477 stmmac_lpi_entry_timer_config(priv, 0); 478 478 del_timer_sync(&priv->eee_ctrl_timer); 479 479 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); 480 + if (priv->hw->xpcs) 481 + xpcs_config_eee(priv->hw->xpcs, 482 + priv->plat->mult_fact_100ns, 483 + false); 480 484 } 481 485 mutex_unlock(&priv->lock); 482 486 return false; ··· 490 486 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); 491 487 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, 492 488 eee_tw_timer); 489 + if (priv->hw->xpcs) 490 + xpcs_config_eee(priv->hw->xpcs, 491 + priv->plat->mult_fact_100ns, 492 + true); 493 493 } 494 494 495 495 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { ··· 1042 1034 stmmac_mac_set(priv, priv->ioaddr, false); 1043 1035 priv->eee_active = false; 1044 1036 priv->tx_lpi_enabled = false; 1045 - stmmac_eee_init(priv); 1037 + priv->eee_enabled = stmmac_eee_init(priv); 1046 1038 stmmac_set_eee_pls(priv, priv->hw, false); 1047 1039 1048 1040 if (priv->dma_cap.fpesel)
+1
drivers/net/ethernet/sun/Kconfig
··· 73 73 config SUNVNET_COMMON 74 74 tristate "Common routines to support Sun Virtual Networking" 75 75 depends on SUN_LDOMS 76 + depends on INET 76 77 default m 77 78 78 79 config SUNVNET
+1
drivers/net/hamradio/Kconfig
··· 48 48 config DMASCC 49 49 tristate "High-speed (DMA) SCC driver for AX.25" 50 50 depends on ISA && AX25 && BROKEN_ON_SMP && ISA_DMA_API 51 + depends on VIRT_TO_BUS 51 52 help 52 53 This is a driver for high-speed SCC boards, i.e. those supporting 53 54 DMA on one port. You usually use those boards to connect your
+5 -1
drivers/net/mdio/mdio-ipq4019.c
··· 207 207 { 208 208 struct ipq4019_mdio_data *priv; 209 209 struct mii_bus *bus; 210 + struct resource *res; 210 211 int ret; 211 212 212 213 bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*priv)); ··· 225 224 return PTR_ERR(priv->mdio_clk); 226 225 227 226 /* The platform resource is provided on the chipset IPQ5018 */ 228 - priv->eth_ldo_rdy = devm_platform_ioremap_resource(pdev, 1); 227 + /* This resource is optional */ 228 + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 229 + if (res) 230 + priv->eth_ldo_rdy = devm_ioremap_resource(&pdev->dev, res); 229 231 230 232 bus->name = "ipq4019_mdio"; 231 233 bus->read = ipq4019_mdio_read;
+10 -5
drivers/net/mdio/mdio-mscc-miim.c
··· 134 134 135 135 static int mscc_miim_probe(struct platform_device *pdev) 136 136 { 137 - struct mii_bus *bus; 138 137 struct mscc_miim_dev *dev; 138 + struct resource *res; 139 + struct mii_bus *bus; 139 140 int ret; 140 141 141 142 bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*dev)); ··· 157 156 return PTR_ERR(dev->regs); 158 157 } 159 158 160 - dev->phy_regs = devm_platform_ioremap_resource(pdev, 1); 161 - if (IS_ERR(dev->phy_regs)) { 162 - dev_err(&pdev->dev, "Unable to map internal phy registers\n"); 163 - return PTR_ERR(dev->phy_regs); 159 + /* This resource is optional */ 160 + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 161 + if (res) { 162 + dev->phy_regs = devm_ioremap_resource(&pdev->dev, res); 163 + if (IS_ERR(dev->phy_regs)) { 164 + dev_err(&pdev->dev, "Unable to map internal phy registers\n"); 165 + return PTR_ERR(dev->phy_regs); 166 + } 164 167 } 165 168 166 169 ret = of_mdiobus_register(bus, pdev->dev.of_node);
+1 -5
drivers/net/mhi_net.c
··· 321 321 /* Start MHI channels */ 322 322 err = mhi_prepare_for_transfer(mhi_dev); 323 323 if (err) 324 - goto out_err; 324 + return err; 325 325 326 326 /* Number of transfer descriptors determines size of the queue */ 327 327 mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE); ··· 331 331 return err; 332 332 333 333 return 0; 334 - 335 - out_err: 336 - free_netdev(ndev); 337 - return err; 338 334 } 339 335 340 336 static void mhi_net_dellink(struct mhi_device *mhi_dev, struct net_device *ndev)
+1 -1
drivers/net/pcs/pcs-xpcs-nxp.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - /* Copyright 2021 NXP Semiconductors 2 + /* Copyright 2021 NXP 3 3 */ 4 4 #include <linux/pcs/pcs-xpcs.h> 5 5 #include "pcs-xpcs.h"
+36 -9
drivers/net/pcs/pcs-xpcs.c
··· 666 666 { 667 667 int ret; 668 668 669 + ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0); 670 + if (ret < 0) 671 + return ret; 672 + 669 673 if (enable) { 670 674 /* Enable EEE */ 671 675 ret = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN | ··· 677 673 DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL | 678 674 mult_fact_100ns << DW_VR_MII_EEE_MULT_FACT_100NS_SHIFT; 679 675 } else { 680 - ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0); 681 - if (ret < 0) 682 - return ret; 683 676 ret &= ~(DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN | 684 677 DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN | 685 678 DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL | ··· 691 690 if (ret < 0) 692 691 return ret; 693 692 694 - ret |= DW_VR_MII_EEE_TRN_LPI; 693 + if (enable) 694 + ret |= DW_VR_MII_EEE_TRN_LPI; 695 + else 696 + ret &= ~DW_VR_MII_EEE_TRN_LPI; 697 + 695 698 return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL1, ret); 696 699 } 697 700 EXPORT_SYMBOL_GPL(xpcs_config_eee); 698 701 699 702 static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, unsigned int mode) 700 703 { 701 - int ret; 704 + int ret, mdio_ctrl; 702 705 703 706 /* For AN for C37 SGMII mode, the settings are :- 704 - * 1) VR_MII_AN_CTRL Bit(2:1)[PCS_MODE] = 10b (SGMII AN) 705 - * 2) VR_MII_AN_CTRL Bit(3) [TX_CONFIG] = 0b (MAC side SGMII) 707 + * 1) VR_MII_MMD_CTRL Bit(12) [AN_ENABLE] = 0b (Disable SGMII AN in case 708 + it is already enabled) 709 + * 2) VR_MII_AN_CTRL Bit(2:1)[PCS_MODE] = 10b (SGMII AN) 710 + * 3) VR_MII_AN_CTRL Bit(3) [TX_CONFIG] = 0b (MAC side SGMII) 706 711 * DW xPCS used with DW EQoS MAC is always MAC side SGMII. 707 - * 3) VR_MII_DIG_CTRL1 Bit(9) [MAC_AUTO_SW] = 1b (Automatic 712 + * 4) VR_MII_DIG_CTRL1 Bit(9) [MAC_AUTO_SW] = 1b (Automatic 708 713 * speed/duplex mode change by HW after SGMII AN complete) 714 + * 5) VR_MII_MMD_CTRL Bit(12) [AN_ENABLE] = 1b (Enable SGMII AN) 709 715 * 710 716 * Note: Since it is MAC side SGMII, there is no need to set 711 717 * SR_MII_AN_ADV. MAC side SGMII receives AN Tx Config from ··· 720 712 * between PHY and Link Partner. There is also no need to 721 713 * trigger AN restart for MAC-side SGMII. 722 714 */ 715 + mdio_ctrl = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL); 716 + if (mdio_ctrl < 0) 717 + return mdio_ctrl; 718 + 719 + if (mdio_ctrl & AN_CL37_EN) { 720 + ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL, 721 + mdio_ctrl & ~AN_CL37_EN); 722 + if (ret < 0) 723 + return ret; 724 + } 725 + 723 726 ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_AN_CTRL); 724 727 if (ret < 0) 725 728 return ret; ··· 755 736 else 756 737 ret &= ~DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW; 757 738 758 - return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret); 739 + ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret); 740 + if (ret < 0) 741 + return ret; 742 + 743 + if (phylink_autoneg_inband(mode)) 744 + ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL, 745 + mdio_ctrl | AN_CL37_EN); 746 + 747 + return ret; 759 748 } 760 749 761 750 static int xpcs_config_2500basex(struct dw_xpcs *xpcs)
+110 -4
drivers/net/phy/bcm7xxx.c
··· 27 27 #define MII_BCM7XXX_SHD_2_ADDR_CTRL 0xe 28 28 #define MII_BCM7XXX_SHD_2_CTRL_STAT 0xf 29 29 #define MII_BCM7XXX_SHD_2_BIAS_TRIM 0x1a 30 + #define MII_BCM7XXX_SHD_3_PCS_CTRL 0x0 31 + #define MII_BCM7XXX_SHD_3_PCS_STATUS 0x1 32 + #define MII_BCM7XXX_SHD_3_EEE_CAP 0x2 30 33 #define MII_BCM7XXX_SHD_3_AN_EEE_ADV 0x3 34 + #define MII_BCM7XXX_SHD_3_EEE_LP 0x4 35 + #define MII_BCM7XXX_SHD_3_EEE_WK_ERR 0x5 31 36 #define MII_BCM7XXX_SHD_3_PCS_CTRL_2 0x6 32 37 #define MII_BCM7XXX_PCS_CTRL_2_DEF 0x4400 33 38 #define MII_BCM7XXX_SHD_3_AN_STAT 0xb ··· 221 216 return genphy_config_aneg(phydev); 222 217 } 223 218 224 - static int phy_set_clr_bits(struct phy_device *dev, int location, 225 - int set_mask, int clr_mask) 219 + static int __phy_set_clr_bits(struct phy_device *dev, int location, 220 + int set_mask, int clr_mask) 226 221 { 227 222 int v, ret; 228 223 229 - v = phy_read(dev, location); 224 + v = __phy_read(dev, location); 230 225 if (v < 0) 231 226 return v; 232 227 233 228 v &= ~clr_mask; 234 229 v |= set_mask; 235 230 236 - ret = phy_write(dev, location, v); 231 + ret = __phy_write(dev, location, v); 237 232 if (ret < 0) 238 233 return ret; 239 234 240 235 return v; 236 + } 237 + 238 + static int phy_set_clr_bits(struct phy_device *dev, int location, 239 + int set_mask, int clr_mask) 240 + { 241 + int ret; 242 + 243 + mutex_lock(&dev->mdio.bus->mdio_lock); 244 + ret = __phy_set_clr_bits(dev, location, set_mask, clr_mask); 245 + mutex_unlock(&dev->mdio.bus->mdio_lock); 246 + 247 + return ret; 241 248 } 242 249 243 250 static int bcm7xxx_28nm_ephy_01_afe_config_init(struct phy_device *phydev) ··· 413 396 return ret; 414 397 415 398 return bcm7xxx_28nm_ephy_apd_enable(phydev); 399 + } 400 + 401 + #define MII_BCM7XXX_REG_INVALID 0xff 402 + 403 + static u8 bcm7xxx_28nm_ephy_regnum_to_shd(u16 regnum) 404 + { 405 + switch (regnum) { 406 + case MDIO_CTRL1: 407 + return MII_BCM7XXX_SHD_3_PCS_CTRL; 408 + case MDIO_STAT1: 409 + return MII_BCM7XXX_SHD_3_PCS_STATUS; 410 + case MDIO_PCS_EEE_ABLE: 411 + return MII_BCM7XXX_SHD_3_EEE_CAP; 412 + case MDIO_AN_EEE_ADV: 413 + return MII_BCM7XXX_SHD_3_AN_EEE_ADV; 414 + case MDIO_AN_EEE_LPABLE: 415 + return MII_BCM7XXX_SHD_3_EEE_LP; 416 + case MDIO_PCS_EEE_WK_ERR: 417 + return MII_BCM7XXX_SHD_3_EEE_WK_ERR; 418 + default: 419 + return MII_BCM7XXX_REG_INVALID; 420 + } 421 + } 422 + 423 + static bool bcm7xxx_28nm_ephy_dev_valid(int devnum) 424 + { 425 + return devnum == MDIO_MMD_AN || devnum == MDIO_MMD_PCS; 426 + } 427 + 428 + static int bcm7xxx_28nm_ephy_read_mmd(struct phy_device *phydev, 429 + int devnum, u16 regnum) 430 + { 431 + u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum); 432 + int ret; 433 + 434 + if (!bcm7xxx_28nm_ephy_dev_valid(devnum) || 435 + shd == MII_BCM7XXX_REG_INVALID) 436 + return -EOPNOTSUPP; 437 + 438 + /* set shadow mode 2 */ 439 + ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 440 + MII_BCM7XXX_SHD_MODE_2, 0); 441 + if (ret < 0) 442 + return ret; 443 + 444 + /* Access the desired shadow register address */ 445 + ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd); 446 + if (ret < 0) 447 + goto reset_shadow_mode; 448 + 449 + ret = __phy_read(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT); 450 + 451 + reset_shadow_mode: 452 + /* reset shadow mode 2 */ 453 + __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0, 454 + MII_BCM7XXX_SHD_MODE_2); 455 + return ret; 456 + } 457 + 458 + static int bcm7xxx_28nm_ephy_write_mmd(struct phy_device *phydev, 459 + int devnum, u16 regnum, u16 val) 460 + { 461 + u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum); 462 + int ret; 463 + 464 + if (!bcm7xxx_28nm_ephy_dev_valid(devnum) || 465 + shd == MII_BCM7XXX_REG_INVALID) 466 + return -EOPNOTSUPP; 467 + 468 + /* set shadow mode 2 */ 469 + ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 470 + MII_BCM7XXX_SHD_MODE_2, 0); 471 + if (ret < 0) 472 + return ret; 473 + 474 + /* Access the desired shadow register address */ 475 + ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd); 476 + if (ret < 0) 477 + goto reset_shadow_mode; 478 + 479 + /* Write the desired value in the shadow register */ 480 + __phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT, val); 481 + 482 + reset_shadow_mode: 483 + /* reset shadow mode 2 */ 484 + return __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0, 485 + MII_BCM7XXX_SHD_MODE_2); 416 486 } 417 487 418 488 static int bcm7xxx_28nm_ephy_resume(struct phy_device *phydev) ··· 699 595 .get_stats = bcm7xxx_28nm_get_phy_stats, \ 700 596 .probe = bcm7xxx_28nm_probe, \ 701 597 .remove = bcm7xxx_28nm_remove, \ 598 + .read_mmd = bcm7xxx_28nm_ephy_read_mmd, \ 599 + .write_mmd = bcm7xxx_28nm_ephy_write_mmd, \ 702 600 } 703 601 704 602 #define BCM7XXX_40NM_EPHY(_oui, _name) \
+11
drivers/net/phy/mdio_bus.c
··· 525 525 NULL == bus->read || NULL == bus->write) 526 526 return -EINVAL; 527 527 528 + if (bus->parent && bus->parent->of_node) 529 + bus->parent->of_node->fwnode.flags |= 530 + FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD; 531 + 528 532 BUG_ON(bus->state != MDIOBUS_ALLOCATED && 529 533 bus->state != MDIOBUS_UNREGISTERED); 530 534 ··· 537 533 bus->dev.class = &mdio_bus_class; 538 534 bus->dev.groups = NULL; 539 535 dev_set_name(&bus->dev, "%s", bus->id); 536 + 537 + /* We need to set state to MDIOBUS_UNREGISTERED to correctly release 538 + * the device in mdiobus_free() 539 + * 540 + * State will be updated later in this function in case of success 541 + */ 542 + bus->state = MDIOBUS_UNREGISTERED; 540 543 541 544 err = device_register(&bus->dev); 542 545 if (err) {
+11
drivers/net/phy/mdio_device.c
··· 179 179 return 0; 180 180 } 181 181 182 + static void mdio_shutdown(struct device *dev) 183 + { 184 + struct mdio_device *mdiodev = to_mdio_device(dev); 185 + struct device_driver *drv = mdiodev->dev.driver; 186 + struct mdio_driver *mdiodrv = to_mdio_driver(drv); 187 + 188 + if (mdiodrv->shutdown) 189 + mdiodrv->shutdown(mdiodev); 190 + } 191 + 182 192 /** 183 193 * mdio_driver_register - register an mdio_driver with the MDIO layer 184 194 * @drv: new mdio_driver to register ··· 203 193 mdiodrv->driver.bus = &mdio_bus_type; 204 194 mdiodrv->driver.probe = mdio_probe; 205 195 mdiodrv->driver.remove = mdio_remove; 196 + mdiodrv->driver.shutdown = mdio_shutdown; 206 197 207 198 retval = driver_register(&mdiodrv->driver); 208 199 if (retval) {
+21 -2
drivers/net/phy/mxl-gpy.c
··· 493 493 return ret; 494 494 } 495 495 496 + static int gpy115_loopback(struct phy_device *phydev, bool enable) 497 + { 498 + int ret; 499 + int fw_minor; 500 + 501 + if (enable) 502 + return gpy_loopback(phydev, enable); 503 + 504 + ret = phy_read(phydev, PHY_FWV); 505 + if (ret < 0) 506 + return ret; 507 + 508 + fw_minor = FIELD_GET(PHY_FWV_MINOR_MASK, ret); 509 + if (fw_minor > 0x0076) 510 + return gpy_loopback(phydev, 0); 511 + 512 + return genphy_soft_reset(phydev); 513 + } 514 + 496 515 static struct phy_driver gpy_drivers[] = { 497 516 { 498 517 PHY_ID_MATCH_MODEL(PHY_ID_GPY2xx), ··· 546 527 .handle_interrupt = gpy_handle_interrupt, 547 528 .set_wol = gpy_set_wol, 548 529 .get_wol = gpy_get_wol, 549 - .set_loopback = gpy_loopback, 530 + .set_loopback = gpy115_loopback, 550 531 }, 551 532 { 552 533 PHY_ID_MATCH_MODEL(PHY_ID_GPY115C), ··· 563 544 .handle_interrupt = gpy_handle_interrupt, 564 545 .set_wol = gpy_set_wol, 565 546 .get_wol = gpy_get_wol, 566 - .set_loopback = gpy_loopback, 547 + .set_loopback = gpy115_loopback, 567 548 }, 568 549 { 569 550 .phy_id = PHY_ID_GPY211B,
+1 -1
drivers/net/phy/sfp.c
··· 134 134 [SFP_S_LINK_UP] = "link_up", 135 135 [SFP_S_TX_FAULT] = "tx_fault", 136 136 [SFP_S_REINIT] = "reinit", 137 - [SFP_S_TX_DISABLE] = "rx_disable", 137 + [SFP_S_TX_DISABLE] = "tx_disable", 138 138 }; 139 139 140 140 static const char *sm_state_to_str(unsigned short sm_state)
+5 -7
drivers/net/usb/hso.c
··· 2719 2719 2720 2720 serial = kzalloc(sizeof(*serial), GFP_KERNEL); 2721 2721 if (!serial) 2722 - goto exit; 2722 + goto err_free_dev; 2723 2723 2724 2724 hso_dev->port_data.dev_serial = serial; 2725 2725 serial->parent = hso_dev; 2726 2726 2727 2727 if (hso_serial_common_create 2728 2728 (serial, 1, CTRL_URB_RX_SIZE, CTRL_URB_TX_SIZE)) 2729 - goto exit; 2729 + goto err_free_serial; 2730 2730 2731 2731 serial->tx_data_length--; 2732 2732 serial->write_data = hso_mux_serial_write_data; ··· 2742 2742 /* done, return it */ 2743 2743 return hso_dev; 2744 2744 2745 - exit: 2746 - if (serial) { 2747 - tty_unregister_device(tty_drv, serial->minor); 2748 - kfree(serial); 2749 - } 2745 + err_free_serial: 2746 + kfree(serial); 2747 + err_free_dev: 2750 2748 kfree(hso_dev); 2751 2749 return NULL; 2752 2750
+15 -1
drivers/net/usb/r8152.c
··· 767 767 PHY_RESET, 768 768 SCHEDULE_TASKLET, 769 769 GREEN_ETHERNET, 770 + RX_EPROTO, 770 771 }; 771 772 772 773 #define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082 ··· 1771 1770 rtl_set_unplug(tp); 1772 1771 netif_device_detach(tp->netdev); 1773 1772 return; 1773 + case -EPROTO: 1774 + urb->actual_length = 0; 1775 + spin_lock_irqsave(&tp->rx_lock, flags); 1776 + list_add_tail(&agg->list, &tp->rx_done); 1777 + spin_unlock_irqrestore(&tp->rx_lock, flags); 1778 + set_bit(RX_EPROTO, &tp->flags); 1779 + schedule_delayed_work(&tp->schedule, 1); 1780 + return; 1774 1781 case -ENOENT: 1775 1782 return; /* the urb is in unlink state */ 1776 1783 case -ETIME: ··· 2434 2425 if (list_empty(&tp->rx_done)) 2435 2426 goto out1; 2436 2427 2428 + clear_bit(RX_EPROTO, &tp->flags); 2437 2429 INIT_LIST_HEAD(&rx_queue); 2438 2430 spin_lock_irqsave(&tp->rx_lock, flags); 2439 2431 list_splice_init(&tp->rx_done, &rx_queue); ··· 2451 2441 2452 2442 agg = list_entry(cursor, struct rx_agg, list); 2453 2443 urb = agg->urb; 2454 - if (urb->actual_length < ETH_ZLEN) 2444 + if (urb->status != 0 || urb->actual_length < ETH_ZLEN) 2455 2445 goto submit; 2456 2446 2457 2447 agg_free = rtl_get_free_rx(tp, GFP_ATOMIC); ··· 6652 6642 if (test_and_clear_bit(SCHEDULE_TASKLET, &tp->flags) && 6653 6643 netif_carrier_ok(tp->netdev)) 6654 6644 tasklet_schedule(&tp->tx_tl); 6645 + 6646 + if (test_and_clear_bit(RX_EPROTO, &tp->flags) && 6647 + !list_empty(&tp->rx_done)) 6648 + napi_schedule(&tp->napi); 6655 6649 6656 6650 mutex_unlock(&tp->control); 6657 6651
+3
drivers/net/usb/smsc95xx.c
··· 1178 1178 1179 1179 static void smsc95xx_handle_link_change(struct net_device *net) 1180 1180 { 1181 + struct usbnet *dev = netdev_priv(net); 1182 + 1181 1183 phy_print_status(net->phydev); 1184 + usbnet_defer_kevent(dev, EVENT_LINK_CHANGE); 1182 1185 } 1183 1186 1184 1187 static int smsc95xx_start_phy(struct usbnet *dev)
+4
drivers/net/virtio_net.c
··· 423 423 424 424 skb_reserve(skb, p - buf); 425 425 skb_put(skb, len); 426 + 427 + page = (struct page *)page->private; 428 + if (page) 429 + give_pages(rq, page); 426 430 goto ok; 427 431 } 428 432
+1 -1
drivers/net/vxlan.c
··· 4756 4756 LIST_HEAD(list); 4757 4757 unsigned int h; 4758 4758 4759 - rtnl_lock(); 4760 4759 list_for_each_entry(net, net_list, exit_list) { 4761 4760 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 4762 4761 4763 4762 unregister_nexthop_notifier(net, &vn->nexthop_notifier_block); 4764 4763 } 4764 + rtnl_lock(); 4765 4765 list_for_each_entry(net, net_list, exit_list) 4766 4766 vxlan_destroy_tunnels(net, &list); 4767 4767
+1 -3
drivers/net/wireless/ath/ath5k/Kconfig
··· 3 3 tristate "Atheros 5xxx wireless cards support" 4 4 depends on (PCI || ATH25) && MAC80211 5 5 select ATH_COMMON 6 - select MAC80211_LEDS 7 - select LEDS_CLASS 8 - select NEW_LEDS 6 + select MAC80211_LEDS if LEDS_CLASS=y || LEDS_CLASS=MAC80211 9 7 select ATH5K_AHB if ATH25 10 8 select ATH5K_PCI if !ATH25 11 9 help
+6 -4
drivers/net/wireless/ath/ath5k/led.c
··· 89 89 90 90 void ath5k_led_enable(struct ath5k_hw *ah) 91 91 { 92 - if (test_bit(ATH_STAT_LEDSOFT, ah->status)) { 92 + if (IS_ENABLED(CONFIG_MAC80211_LEDS) && 93 + test_bit(ATH_STAT_LEDSOFT, ah->status)) { 93 94 ath5k_hw_set_gpio_output(ah, ah->led_pin); 94 95 ath5k_led_off(ah); 95 96 } ··· 105 104 106 105 void ath5k_led_off(struct ath5k_hw *ah) 107 106 { 108 - if (!test_bit(ATH_STAT_LEDSOFT, ah->status)) 107 + if (!IS_ENABLED(CONFIG_MAC80211_LEDS) || 108 + !test_bit(ATH_STAT_LEDSOFT, ah->status)) 109 109 return; 110 110 ath5k_hw_set_gpio(ah, ah->led_pin, !ah->led_on); 111 111 } ··· 148 146 static void 149 147 ath5k_unregister_led(struct ath5k_led *led) 150 148 { 151 - if (!led->ah) 149 + if (!IS_ENABLED(CONFIG_MAC80211_LEDS) || !led->ah) 152 150 return; 153 151 led_classdev_unregister(&led->led_dev); 154 152 ath5k_led_off(led->ah); ··· 171 169 char name[ATH5K_LED_MAX_NAME_LEN + 1]; 172 170 const struct pci_device_id *match; 173 171 174 - if (!ah->pdev) 172 + if (!IS_ENABLED(CONFIG_MAC80211_LEDS) || !ah->pdev) 175 173 return 0; 176 174 177 175 #ifdef CONFIG_ATH5K_AHB
+6 -11
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
··· 7463 7463 s32 found_index; 7464 7464 int i; 7465 7465 7466 + country_codes = drvr->settings->country_codes; 7467 + if (!country_codes) { 7468 + brcmf_dbg(TRACE, "No country codes configured for device\n"); 7469 + return -EINVAL; 7470 + } 7471 + 7466 7472 if ((alpha2[0] == ccreq->country_abbrev[0]) && 7467 7473 (alpha2[1] == ccreq->country_abbrev[1])) { 7468 7474 brcmf_dbg(TRACE, "Country code already set\n"); 7469 7475 return -EAGAIN; 7470 - } 7471 - 7472 - country_codes = drvr->settings->country_codes; 7473 - if (!country_codes) { 7474 - brcmf_dbg(TRACE, "No country codes configured for device, using ISO3166 code and 0 rev\n"); 7475 - memset(ccreq, 0, sizeof(*ccreq)); 7476 - ccreq->country_abbrev[0] = alpha2[0]; 7477 - ccreq->country_abbrev[1] = alpha2[1]; 7478 - ccreq->ccode[0] = alpha2[0]; 7479 - ccreq->ccode[1] = alpha2[1]; 7480 - return 0; 7481 7476 } 7482 7477 7483 7478 found_index = -1;
+3 -2
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
··· 160 160 mvm->ptk_icvlen = key->icv_len; 161 161 mvm->gtk_ivlen = key->iv_len; 162 162 mvm->gtk_icvlen = key->icv_len; 163 + mutex_unlock(&mvm->mutex); 163 164 164 165 /* don't upload key again */ 165 166 return; ··· 361 360 if (sta) { 362 361 rsc = data->rsc->ucast_rsc; 363 362 } else { 364 - if (WARN_ON(data->gtks > ARRAY_SIZE(data->gtk_ids))) 363 + if (WARN_ON(data->gtks >= ARRAY_SIZE(data->gtk_ids))) 365 364 return; 366 365 data->gtk_ids[data->gtks] = key->keyidx; 367 366 rsc = data->rsc->mcast_rsc[data->gtks % 2]; 368 - if (WARN_ON(key->keyidx > 367 + if (WARN_ON(key->keyidx >= 369 368 ARRAY_SIZE(data->rsc->mcast_key_id_map))) 370 369 return; 371 370 data->rsc->mcast_key_id_map[key->keyidx] = data->gtks % 2;
+2 -1
drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
··· 662 662 u32 *uid) 663 663 { 664 664 u32 id; 665 - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); 665 + struct iwl_mvm_vif *mvmvif; 666 666 enum nl80211_iftype iftype; 667 667 668 668 if (!te_data->vif) 669 669 return false; 670 670 671 + mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); 671 672 iftype = te_data->vif->type; 672 673 673 674 /*
+2
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
··· 547 547 IWL_DEV_INFO(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL), 548 548 IWL_DEV_INFO(0x43F0, 0x0078, iwl_ax201_cfg_qu_hr, NULL), 549 549 IWL_DEV_INFO(0x43F0, 0x007C, iwl_ax201_cfg_qu_hr, NULL), 550 + IWL_DEV_INFO(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650s_name), 551 + IWL_DEV_INFO(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, iwl_ax201_killer_1650i_name), 550 552 IWL_DEV_INFO(0x43F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL), 551 553 IWL_DEV_INFO(0x43F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL), 552 554 IWL_DEV_INFO(0xA0F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
+2 -2
drivers/net/wireless/mac80211_hwsim.c
··· 1867 1867 bcn_int -= data->bcn_delta; 1868 1868 data->bcn_delta = 0; 1869 1869 } 1870 - hrtimer_forward(&data->beacon_timer, hrtimer_get_expires(timer), 1871 - ns_to_ktime(bcn_int * NSEC_PER_USEC)); 1870 + hrtimer_forward_now(&data->beacon_timer, 1871 + ns_to_ktime(bcn_int * NSEC_PER_USEC)); 1872 1872 return HRTIMER_RESTART; 1873 1873 } 1874 1874
+2 -2
drivers/net/wireless/marvell/mwifiex/sta_tx.c
··· 62 62 63 63 pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0; 64 64 65 - pad = ((void *)skb->data - (sizeof(*local_tx_pd) + hroom)- 66 - NULL) & (MWIFIEX_DMA_ALIGN_SZ - 1); 65 + pad = ((uintptr_t)skb->data - (sizeof(*local_tx_pd) + hroom)) & 66 + (MWIFIEX_DMA_ALIGN_SZ - 1); 67 67 skb_push(skb, sizeof(*local_tx_pd) + pad); 68 68 69 69 local_tx_pd = (struct txpd *) skb->data;
+2 -2
drivers/net/wireless/marvell/mwifiex/uap_txrx.c
··· 475 475 476 476 pkt_type = mwifiex_is_skb_mgmt_frame(skb) ? PKT_TYPE_MGMT : 0; 477 477 478 - pad = ((void *)skb->data - (sizeof(*txpd) + hroom) - NULL) & 479 - (MWIFIEX_DMA_ALIGN_SZ - 1); 478 + pad = ((uintptr_t)skb->data - (sizeof(*txpd) + hroom)) & 479 + (MWIFIEX_DMA_ALIGN_SZ - 1); 480 480 481 481 skb_push(skb, sizeof(*txpd) + pad); 482 482
+1 -1
drivers/net/xen-netback/netback.c
··· 499 499 * the header's copy failed, and they are 500 500 * sharing a slot, send an error 501 501 */ 502 - if (i == 0 && sharedslot) 502 + if (i == 0 && !first_shinfo && sharedslot) 503 503 xenvif_idx_release(queue, pending_idx, 504 504 XEN_NETIF_RSP_ERROR); 505 505 else
+1
drivers/nfc/st-nci/spi.c
··· 278 278 279 279 static struct spi_device_id st_nci_spi_id_table[] = { 280 280 {ST_NCI_SPI_DRIVER_NAME, 0}, 281 + {"st21nfcb-spi", 0}, 281 282 {} 282 283 }; 283 284 MODULE_DEVICE_TABLE(spi, st_nci_spi_id_table);
+1 -4
drivers/nvdimm/pmem.c
··· 380 380 struct nd_pfn_sb *pfn_sb; 381 381 struct pmem_device *pmem; 382 382 struct request_queue *q; 383 - struct device *gendev; 384 383 struct gendisk *disk; 385 384 void *addr; 386 385 int rc; ··· 488 489 } 489 490 dax_write_cache(dax_dev, nvdimm_has_cache(nd_region)); 490 491 pmem->dax_dev = dax_dev; 491 - gendev = disk_to_dev(disk); 492 - gendev->groups = pmem_attribute_groups; 493 492 494 - device_add_disk(dev, disk, NULL); 493 + device_add_disk(dev, disk, pmem_attribute_groups); 495 494 if (devm_add_action_or_reset(dev, pmem_release_disk, pmem)) 496 495 return -ENOMEM; 497 496
+20 -17
drivers/nvme/host/core.c
··· 13 13 #include <linux/kernel.h> 14 14 #include <linux/module.h> 15 15 #include <linux/backing-dev.h> 16 - #include <linux/list_sort.h> 17 16 #include <linux/slab.h> 18 17 #include <linux/types.h> 19 18 #include <linux/pr.h> ··· 978 979 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req) 979 980 { 980 981 struct nvme_command *cmd = nvme_req(req)->cmd; 982 + struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; 981 983 blk_status_t ret = BLK_STS_OK; 982 984 983 985 if (!(req->rq_flags & RQF_DONTPREP)) { ··· 1027 1027 return BLK_STS_IOERR; 1028 1028 } 1029 1029 1030 - nvme_req(req)->genctr++; 1030 + if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN)) 1031 + nvme_req(req)->genctr++; 1031 1032 cmd->common.command_id = nvme_cid(req); 1032 1033 trace_nvme_setup_cmd(req, cmd); 1033 1034 return ret; ··· 3717 3716 return ret; 3718 3717 } 3719 3718 3720 - static int ns_cmp(void *priv, const struct list_head *a, 3721 - const struct list_head *b) 3722 - { 3723 - struct nvme_ns *nsa = container_of(a, struct nvme_ns, list); 3724 - struct nvme_ns *nsb = container_of(b, struct nvme_ns, list); 3725 - 3726 - return nsa->head->ns_id - nsb->head->ns_id; 3727 - } 3728 - 3729 3719 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) 3730 3720 { 3731 3721 struct nvme_ns *ns, *ret = NULL; ··· 3736 3744 return ret; 3737 3745 } 3738 3746 EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU); 3747 + 3748 + /* 3749 + * Add the namespace to the controller list while keeping the list ordered. 3750 + */ 3751 + static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns) 3752 + { 3753 + struct nvme_ns *tmp; 3754 + 3755 + list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) { 3756 + if (tmp->head->ns_id < ns->head->ns_id) { 3757 + list_add(&ns->list, &tmp->list); 3758 + return; 3759 + } 3760 + } 3761 + list_add(&ns->list, &ns->ctrl->namespaces); 3762 + } 3739 3763 3740 3764 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid, 3741 3765 struct nvme_ns_ids *ids) ··· 3803 3795 goto out_unlink_ns; 3804 3796 3805 3797 down_write(&ctrl->namespaces_rwsem); 3806 - list_add_tail(&ns->list, &ctrl->namespaces); 3798 + nvme_ns_add_to_ctrl_list(ns); 3807 3799 up_write(&ctrl->namespaces_rwsem); 3808 - 3809 3800 nvme_get_ctrl(ctrl); 3810 3801 3811 3802 if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups)) ··· 4087 4080 if (nvme_scan_ns_list(ctrl) != 0) 4088 4081 nvme_scan_ns_sequential(ctrl); 4089 4082 mutex_unlock(&ctrl->scan_lock); 4090 - 4091 - down_write(&ctrl->namespaces_rwsem); 4092 - list_sort(NULL, &ctrl->namespaces, ns_cmp); 4093 - up_write(&ctrl->namespaces_rwsem); 4094 4083 } 4095 4084 4096 4085 /*
+9 -9
drivers/nvme/host/fc.c
··· 2487 2487 */ 2488 2488 if (ctrl->ctrl.queue_count > 1) { 2489 2489 nvme_stop_queues(&ctrl->ctrl); 2490 + nvme_sync_io_queues(&ctrl->ctrl); 2490 2491 blk_mq_tagset_busy_iter(&ctrl->tag_set, 2491 2492 nvme_fc_terminate_exchange, &ctrl->ctrl); 2492 2493 blk_mq_tagset_wait_completed_request(&ctrl->tag_set); ··· 2511 2510 * clean up the admin queue. Same thing as above. 2512 2511 */ 2513 2512 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); 2513 + blk_sync_queue(ctrl->ctrl.admin_q); 2514 2514 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, 2515 2515 nvme_fc_terminate_exchange, &ctrl->ctrl); 2516 2516 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set); ··· 2953 2951 if (ctrl->ctrl.queue_count == 1) 2954 2952 return 0; 2955 2953 2954 + if (prior_ioq_cnt != nr_io_queues) { 2955 + dev_info(ctrl->ctrl.device, 2956 + "reconnect: revising io queue count from %d to %d\n", 2957 + prior_ioq_cnt, nr_io_queues); 2958 + blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues); 2959 + } 2960 + 2956 2961 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 2957 2962 if (ret) 2958 2963 goto out_free_io_queues; ··· 2967 2958 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); 2968 2959 if (ret) 2969 2960 goto out_delete_hw_queues; 2970 - 2971 - if (prior_ioq_cnt != nr_io_queues) { 2972 - dev_info(ctrl->ctrl.device, 2973 - "reconnect: revising io queue count from %d to %d\n", 2974 - prior_ioq_cnt, nr_io_queues); 2975 - nvme_wait_freeze(&ctrl->ctrl); 2976 - blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues); 2977 - nvme_unfreeze(&ctrl->ctrl); 2978 - } 2979 2961 2980 2962 return 0; 2981 2963
+6
drivers/nvme/host/nvme.h
··· 138 138 * 48 bits. 139 139 */ 140 140 NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16), 141 + 142 + /* 143 + * The controller requires the command_id value be be limited, so skip 144 + * encoding the generation sequence number. 145 + */ 146 + NVME_QUIRK_SKIP_CID_GEN = (1 << 17), 141 147 }; 142 148 143 149 /*
+2 -1
drivers/nvme/host/pci.c
··· 3369 3369 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005), 3370 3370 .driver_data = NVME_QUIRK_SINGLE_VECTOR | 3371 3371 NVME_QUIRK_128_BYTES_SQES | 3372 - NVME_QUIRK_SHARED_TAGS }, 3372 + NVME_QUIRK_SHARED_TAGS | 3373 + NVME_QUIRK_SKIP_CID_GEN }, 3373 3374 3374 3375 { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 3375 3376 { 0, }
+10 -3
drivers/nvme/host/tcp.c
··· 620 620 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst); 621 621 data->ttag = pdu->ttag; 622 622 data->command_id = nvme_cid(rq); 623 - data->data_offset = cpu_to_le32(req->data_sent); 623 + data->data_offset = pdu->r2t_offset; 624 624 data->data_length = cpu_to_le32(req->pdu_len); 625 625 return 0; 626 626 } ··· 953 953 nvme_tcp_ddgst_update(queue->snd_hash, page, 954 954 offset, ret); 955 955 956 - /* fully successful last write*/ 956 + /* 957 + * update the request iterator except for the last payload send 958 + * in the request where we don't want to modify it as we may 959 + * compete with the RX path completing the request. 960 + */ 961 + if (req->data_sent + ret < req->data_len) 962 + nvme_tcp_advance_req(req, ret); 963 + 964 + /* fully successful last send in current PDU */ 957 965 if (last && ret == len) { 958 966 if (queue->data_digest) { 959 967 nvme_tcp_ddgst_final(queue->snd_hash, ··· 973 965 } 974 966 return 1; 975 967 } 976 - nvme_tcp_advance_req(req, ret); 977 968 } 978 969 return -EAGAIN; 979 970 }
+1
drivers/nvmem/Kconfig
··· 109 109 110 110 config NVMEM_NINTENDO_OTP 111 111 tristate "Nintendo Wii and Wii U OTP Support" 112 + depends on WII || COMPILE_TEST 112 113 help 113 114 This is a driver exposing the OTP of a Nintendo Wii or Wii U console. 114 115
+1 -1
drivers/pci/Kconfig
··· 110 110 111 111 config XEN_PCIDEV_FRONTEND 112 112 tristate "Xen PCI Frontend" 113 - depends on X86 && XEN 113 + depends on XEN_PV 114 114 select PCI_XEN 115 115 select XEN_XENBUS_FRONTEND 116 116 default y
+10 -3
drivers/pci/controller/pci-hyperv.c
··· 3301 3301 return 0; 3302 3302 3303 3303 if (!keep_devs) { 3304 - /* Delete any children which might still exist. */ 3304 + struct list_head removed; 3305 + 3306 + /* Move all present children to the list on stack */ 3307 + INIT_LIST_HEAD(&removed); 3305 3308 spin_lock_irqsave(&hbus->device_list_lock, flags); 3306 - list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry) { 3309 + list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry) 3310 + list_move_tail(&hpdev->list_entry, &removed); 3311 + spin_unlock_irqrestore(&hbus->device_list_lock, flags); 3312 + 3313 + /* Remove all children in the list */ 3314 + list_for_each_entry_safe(hpdev, tmp, &removed, list_entry) { 3307 3315 list_del(&hpdev->list_entry); 3308 3316 if (hpdev->pci_slot) 3309 3317 pci_destroy_slot(hpdev->pci_slot); ··· 3319 3311 put_pcichild(hpdev); 3320 3312 put_pcichild(hpdev); 3321 3313 } 3322 - spin_unlock_irqrestore(&hbus->device_list_lock, flags); 3323 3314 } 3324 3315 3325 3316 ret = hv_send_resources_released(hdev);
+2
drivers/perf/arm_pmu.c
··· 952 952 pmu->name, pmu->num_events, 953 953 has_nmi ? ", using NMIs" : ""); 954 954 955 + kvm_host_pmu_init(pmu); 956 + 955 957 return 0; 956 958 957 959 out_destroy:
+1 -1
drivers/pinctrl/core.c
··· 2306 2306 2307 2307 /** 2308 2308 * devm_pinctrl_unregister() - Resource managed version of pinctrl_unregister(). 2309 - * @dev: device for which which resource was allocated 2309 + * @dev: device for which resource was allocated 2310 2310 * @pctldev: the pinctrl device to unregister. 2311 2311 */ 2312 2312 void devm_pinctrl_unregister(struct device *dev, struct pinctrl_dev *pctldev)
+14 -5
drivers/pinctrl/pinctrl-amd.c
··· 445 445 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 446 446 struct amd_gpio *gpio_dev = gpiochip_get_data(gc); 447 447 u32 wake_mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3); 448 + int err; 448 449 449 450 raw_spin_lock_irqsave(&gpio_dev->lock, flags); 450 451 pin_reg = readl(gpio_dev->base + (d->hwirq)*4); ··· 457 456 458 457 writel(pin_reg, gpio_dev->base + (d->hwirq)*4); 459 458 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); 459 + 460 + if (on) 461 + err = enable_irq_wake(gpio_dev->irq); 462 + else 463 + err = disable_irq_wake(gpio_dev->irq); 464 + 465 + if (err) 466 + dev_err(&gpio_dev->pdev->dev, "failed to %s wake-up interrupt\n", 467 + on ? "enable" : "disable"); 460 468 461 469 return 0; 462 470 } ··· 912 902 static int amd_gpio_probe(struct platform_device *pdev) 913 903 { 914 904 int ret = 0; 915 - int irq_base; 916 905 struct resource *res; 917 906 struct amd_gpio *gpio_dev; 918 907 struct gpio_irq_chip *girq; ··· 934 925 if (!gpio_dev->base) 935 926 return -ENOMEM; 936 927 937 - irq_base = platform_get_irq(pdev, 0); 938 - if (irq_base < 0) 939 - return irq_base; 928 + gpio_dev->irq = platform_get_irq(pdev, 0); 929 + if (gpio_dev->irq < 0) 930 + return gpio_dev->irq; 940 931 941 932 #ifdef CONFIG_PM_SLEEP 942 933 gpio_dev->saved_regs = devm_kcalloc(&pdev->dev, amd_pinctrl_desc.npins, ··· 996 987 goto out2; 997 988 } 998 989 999 - ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler, 990 + ret = devm_request_irq(&pdev->dev, gpio_dev->irq, amd_gpio_irq_handler, 1000 991 IRQF_SHARED, KBUILD_MODNAME, gpio_dev); 1001 992 if (ret) 1002 993 goto out2;
+1
drivers/pinctrl/pinctrl-amd.h
··· 98 98 struct resource *res; 99 99 struct platform_device *pdev; 100 100 u32 *saved_regs; 101 + int irq; 101 102 }; 102 103 103 104 /* KERNCZ configuration*/
+67
drivers/pinctrl/pinctrl-rockchip.c
··· 2092 2092 return false; 2093 2093 } 2094 2094 2095 + static int rockchip_pinconf_defer_output(struct rockchip_pin_bank *bank, 2096 + unsigned int pin, u32 arg) 2097 + { 2098 + struct rockchip_pin_output_deferred *cfg; 2099 + 2100 + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); 2101 + if (!cfg) 2102 + return -ENOMEM; 2103 + 2104 + cfg->pin = pin; 2105 + cfg->arg = arg; 2106 + 2107 + list_add_tail(&cfg->head, &bank->deferred_output); 2108 + 2109 + return 0; 2110 + } 2111 + 2095 2112 /* set the pin config settings for a specified pin */ 2096 2113 static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, 2097 2114 unsigned long *configs, unsigned num_configs) ··· 2152 2135 RK_FUNC_GPIO); 2153 2136 if (rc != RK_FUNC_GPIO) 2154 2137 return -EINVAL; 2138 + 2139 + /* 2140 + * Check for gpio driver not being probed yet. 2141 + * The lock makes sure that either gpio-probe has completed 2142 + * or the gpio driver hasn't probed yet. 2143 + */ 2144 + mutex_lock(&bank->deferred_lock); 2145 + if (!gpio || !gpio->direction_output) { 2146 + rc = rockchip_pinconf_defer_output(bank, pin - bank->pin_base, arg); 2147 + mutex_unlock(&bank->deferred_lock); 2148 + if (rc) 2149 + return rc; 2150 + 2151 + break; 2152 + } 2153 + mutex_unlock(&bank->deferred_lock); 2155 2154 2156 2155 rc = gpio->direction_output(gpio, pin - bank->pin_base, 2157 2156 arg); ··· 2236 2203 rc = rockchip_get_mux(bank, pin - bank->pin_base); 2237 2204 if (rc != RK_FUNC_GPIO) 2238 2205 return -EINVAL; 2206 + 2207 + if (!gpio || !gpio->get) { 2208 + arg = 0; 2209 + break; 2210 + } 2239 2211 2240 2212 rc = gpio->get(gpio, pin - bank->pin_base); 2241 2213 if (rc < 0) ··· 2488 2450 pin_bank->name, pin); 2489 2451 pdesc++; 2490 2452 } 2453 + 2454 + INIT_LIST_HEAD(&pin_bank->deferred_output); 2455 + mutex_init(&pin_bank->deferred_lock); 2491 2456 } 2492 2457 2493 2458 ret = rockchip_pinctrl_parse_dt(pdev, info); ··· 2752 2711 if (ret) { 2753 2712 dev_err(&pdev->dev, "failed to register gpio device\n"); 2754 2713 return ret; 2714 + } 2715 + 2716 + return 0; 2717 + } 2718 + 2719 + static int rockchip_pinctrl_remove(struct platform_device *pdev) 2720 + { 2721 + struct rockchip_pinctrl *info = platform_get_drvdata(pdev); 2722 + struct rockchip_pin_bank *bank; 2723 + struct rockchip_pin_output_deferred *cfg; 2724 + int i; 2725 + 2726 + of_platform_depopulate(&pdev->dev); 2727 + 2728 + for (i = 0; i < info->ctrl->nr_banks; i++) { 2729 + bank = &info->ctrl->pin_banks[i]; 2730 + 2731 + mutex_lock(&bank->deferred_lock); 2732 + while (!list_empty(&bank->deferred_output)) { 2733 + cfg = list_first_entry(&bank->deferred_output, 2734 + struct rockchip_pin_output_deferred, head); 2735 + list_del(&cfg->head); 2736 + kfree(cfg); 2737 + } 2738 + mutex_unlock(&bank->deferred_lock); 2755 2739 } 2756 2740 2757 2741 return 0; ··· 3241 3175 3242 3176 static struct platform_driver rockchip_pinctrl_driver = { 3243 3177 .probe = rockchip_pinctrl_probe, 3178 + .remove = rockchip_pinctrl_remove, 3244 3179 .driver = { 3245 3180 .name = "rockchip-pinctrl", 3246 3181 .pm = &rockchip_pinctrl_dev_pm_ops,
+10
drivers/pinctrl/pinctrl-rockchip.h
··· 141 141 * @toggle_edge_mode: bit mask to toggle (falling/rising) edge mode 142 142 * @recalced_mask: bit mask to indicate a need to recalulate the mask 143 143 * @route_mask: bits describing the routing pins of per bank 144 + * @deferred_output: gpio output settings to be done after gpio bank probed 145 + * @deferred_lock: mutex for the deferred_output shared btw gpio and pinctrl 144 146 */ 145 147 struct rockchip_pin_bank { 146 148 struct device *dev; ··· 171 169 u32 toggle_edge_mode; 172 170 u32 recalced_mask; 173 171 u32 route_mask; 172 + struct list_head deferred_output; 173 + struct mutex deferred_lock; 174 174 }; 175 175 176 176 /** ··· 245 241 unsigned int func; 246 242 unsigned long *configs; 247 243 unsigned int nconfigs; 244 + }; 245 + 246 + struct rockchip_pin_output_deferred { 247 + struct list_head head; 248 + unsigned int pin; 249 + u32 arg; 248 250 }; 249 251 250 252 /**
+1
drivers/pinctrl/qcom/pinctrl-sc7280.c
··· 1496 1496 static struct platform_driver sc7280_pinctrl_driver = { 1497 1497 .driver = { 1498 1498 .name = "sc7280-pinctrl", 1499 + .pm = &msm_pinctrl_dev_pm_ops, 1499 1500 .of_match_table = sc7280_pinctrl_of_match, 1500 1501 }, 1501 1502 .probe = sc7280_pinctrl_probe,
+34 -3
drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 2 /* 3 - * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. 3 + * Copyright (c) 2012-2014, 2016-2021 The Linux Foundation. All rights reserved. 4 4 */ 5 5 6 6 #include <linux/gpio/driver.h> ··· 14 14 #include <linux/platform_device.h> 15 15 #include <linux/regmap.h> 16 16 #include <linux/slab.h> 17 + #include <linux/spmi.h> 17 18 #include <linux/types.h> 18 19 19 20 #include <dt-bindings/pinctrl/qcom,pmic-gpio.h> ··· 172 171 struct pinctrl_dev *ctrl; 173 172 struct gpio_chip chip; 174 173 struct irq_chip irq; 174 + u8 usid; 175 + u8 pid_base; 175 176 }; 176 177 177 178 static const struct pinconf_generic_params pmic_gpio_bindings[] = { ··· 952 949 unsigned int *parent_hwirq, 953 950 unsigned int *parent_type) 954 951 { 955 - *parent_hwirq = child_hwirq + 0xc0; 952 + struct pmic_gpio_state *state = gpiochip_get_data(chip); 953 + 954 + *parent_hwirq = child_hwirq + state->pid_base; 956 955 *parent_type = child_type; 957 956 958 957 return 0; 958 + } 959 + 960 + static void *pmic_gpio_populate_parent_fwspec(struct gpio_chip *chip, 961 + unsigned int parent_hwirq, 962 + unsigned int parent_type) 963 + { 964 + struct pmic_gpio_state *state = gpiochip_get_data(chip); 965 + struct irq_fwspec *fwspec; 966 + 967 + fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL); 968 + if (!fwspec) 969 + return NULL; 970 + 971 + fwspec->fwnode = chip->irq.parent_domain->fwnode; 972 + 973 + fwspec->param_count = 4; 974 + fwspec->param[0] = state->usid; 975 + fwspec->param[1] = parent_hwirq; 976 + /* param[2] must be left as 0 */ 977 + fwspec->param[3] = parent_type; 978 + 979 + return fwspec; 959 980 } 960 981 961 982 static int pmic_gpio_probe(struct platform_device *pdev) ··· 992 965 struct pmic_gpio_pad *pad, *pads; 993 966 struct pmic_gpio_state *state; 994 967 struct gpio_irq_chip *girq; 968 + const struct spmi_device *parent_spmi_dev; 995 969 int ret, npins, i; 996 970 u32 reg; 997 971 ··· 1012 984 1013 985 state->dev = &pdev->dev; 1014 986 state->map = dev_get_regmap(dev->parent, NULL); 987 + parent_spmi_dev = to_spmi_device(dev->parent); 988 + state->usid = parent_spmi_dev->usid; 989 + state->pid_base = reg >> 8; 1015 990 1016 991 pindesc = devm_kcalloc(dev, npins, sizeof(*pindesc), GFP_KERNEL); 1017 992 if (!pindesc) ··· 1090 1059 girq->fwnode = of_node_to_fwnode(state->dev->of_node); 1091 1060 girq->parent_domain = parent_domain; 1092 1061 girq->child_to_parent_hwirq = pmic_gpio_child_to_parent_hwirq; 1093 - girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_fourcell; 1062 + girq->populate_parent_alloc_arg = pmic_gpio_populate_parent_fwspec; 1094 1063 girq->child_offset_to_irq = pmic_gpio_child_offset_to_irq; 1095 1064 girq->child_irq_domain_ops.translate = pmic_gpio_domain_translate; 1096 1065
+1 -1
drivers/platform/x86/amd-pmc.c
··· 71 71 #define AMD_CPU_ID_YC 0x14B5 72 72 73 73 #define PMC_MSG_DELAY_MIN_US 100 74 - #define RESPONSE_REGISTER_LOOP_MAX 200 74 + #define RESPONSE_REGISTER_LOOP_MAX 20000 75 75 76 76 #define SOC_SUBSYSTEM_IP_MAX 12 77 77 #define DELAY_MIN_US 2000
+1 -2
drivers/platform/x86/dell/Kconfig
··· 166 166 167 167 config DELL_WMI_PRIVACY 168 168 bool "Dell WMI Hardware Privacy Support" 169 - depends on DELL_WMI 170 - depends on LEDS_TRIGGER_AUDIO 169 + depends on LEDS_TRIGGER_AUDIO = y || DELL_WMI = LEDS_TRIGGER_AUDIO 171 170 help 172 171 This option adds integration with the "Dell Hardware Privacy" 173 172 feature of Dell laptops to the dell-wmi driver.
+1
drivers/platform/x86/gigabyte-wmi.c
··· 144 144 DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"), 145 145 DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"), 146 146 DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"), 147 + DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550I AORUS PRO AX"), 147 148 DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"), 148 149 DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"), 149 150 DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
+22 -5
drivers/platform/x86/intel/hid.c
··· 118 118 { } 119 119 }; 120 120 121 + /* 122 + * Some devices, even non convertible ones, can send incorrect SW_TABLET_MODE 123 + * reports. Accept such reports only from devices in this list. 124 + */ 125 + static const struct dmi_system_id dmi_auto_add_switch[] = { 126 + { 127 + .matches = { 128 + DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "31" /* Convertible */), 129 + }, 130 + }, 131 + { 132 + .matches = { 133 + DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "32" /* Detachable */), 134 + }, 135 + }, 136 + {} /* Array terminator */ 137 + }; 138 + 121 139 struct intel_hid_priv { 122 140 struct input_dev *input_dev; 123 141 struct input_dev *array; 124 142 struct input_dev *switches; 125 143 bool wakeup_mode; 126 - bool dual_accel; 144 + bool auto_add_switch; 127 145 }; 128 146 129 147 #define HID_EVENT_FILTER_UUID "eeec56b3-4442-408f-a792-4edd4d758054" ··· 470 452 * Some convertible have unreliable VGBS return which could cause incorrect 471 453 * SW_TABLET_MODE report, in these cases we enable support when receiving 472 454 * the first event instead of during driver setup. 473 - * 474 - * See dual_accel_detect.h for more info on the dual_accel check. 475 455 */ 476 - if (!priv->switches && !priv->dual_accel && (event == 0xcc || event == 0xcd)) { 456 + if (!priv->switches && priv->auto_add_switch && (event == 0xcc || event == 0xcd)) { 477 457 dev_info(&device->dev, "switch event received, enable switches supports\n"); 478 458 err = intel_hid_switches_setup(device); 479 459 if (err) ··· 612 596 return -ENOMEM; 613 597 dev_set_drvdata(&device->dev, priv); 614 598 615 - priv->dual_accel = dual_accel_detect(); 599 + /* See dual_accel_detect.h for more info on the dual_accel check. */ 600 + priv->auto_add_switch = dmi_check_system(dmi_auto_add_switch) && !dual_accel_detect(); 616 601 617 602 err = intel_hid_input_setup(device); 618 603 if (err) {
+1 -2
drivers/platform/x86/intel/punit_ipc.c
··· 8 8 * which provide mailbox interface for power management usage. 9 9 */ 10 10 11 - #include <linux/acpi.h> 12 11 #include <linux/bitops.h> 13 12 #include <linux/delay.h> 14 13 #include <linux/device.h> ··· 318 319 .remove = intel_punit_ipc_remove, 319 320 .driver = { 320 321 .name = "intel_punit_ipc", 321 - .acpi_match_table = ACPI_PTR(punit_ipc_acpi_ids), 322 + .acpi_match_table = punit_ipc_acpi_ids, 322 323 }, 323 324 }; 324 325
+1 -1
drivers/platform/x86/lg-laptop.c
··· 655 655 goto out_platform_registered; 656 656 } 657 657 product = dmi_get_system_info(DMI_PRODUCT_NAME); 658 - if (strlen(product) > 4) 658 + if (product && strlen(product) > 4) 659 659 switch (product[4]) { 660 660 case '5': 661 661 case '6':
+50 -4
drivers/platform/x86/touchscreen_dmi.c
··· 100 100 }; 101 101 102 102 static const struct property_entry chuwi_hi10_plus_props[] = { 103 - PROPERTY_ENTRY_U32("touchscreen-min-x", 0), 104 - PROPERTY_ENTRY_U32("touchscreen-min-y", 5), 105 - PROPERTY_ENTRY_U32("touchscreen-size-x", 1914), 106 - PROPERTY_ENTRY_U32("touchscreen-size-y", 1283), 103 + PROPERTY_ENTRY_U32("touchscreen-min-x", 12), 104 + PROPERTY_ENTRY_U32("touchscreen-min-y", 10), 105 + PROPERTY_ENTRY_U32("touchscreen-size-x", 1908), 106 + PROPERTY_ENTRY_U32("touchscreen-size-y", 1270), 107 107 PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10plus.fw"), 108 108 PROPERTY_ENTRY_U32("silead,max-fingers", 10), 109 109 PROPERTY_ENTRY_BOOL("silead,home-button"), ··· 111 111 }; 112 112 113 113 static const struct ts_dmi_data chuwi_hi10_plus_data = { 114 + .embedded_fw = { 115 + .name = "silead/gsl1680-chuwi-hi10plus.fw", 116 + .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 }, 117 + .length = 34056, 118 + .sha256 = { 0xfd, 0x0a, 0x08, 0x08, 0x3c, 0xa6, 0x34, 0x4e, 119 + 0x2c, 0x49, 0x9c, 0xcd, 0x7d, 0x44, 0x9d, 0x38, 120 + 0x10, 0x68, 0xb5, 0xbd, 0xb7, 0x2a, 0x63, 0xb5, 121 + 0x67, 0x0b, 0x96, 0xbd, 0x89, 0x67, 0x85, 0x09 }, 122 + }, 114 123 .acpi_name = "MSSL0017:00", 115 124 .properties = chuwi_hi10_plus_props, 116 125 }; ··· 148 139 }, 149 140 .acpi_name = "MSSL1680:00", 150 141 .properties = chuwi_hi10_pro_props, 142 + }; 143 + 144 + static const struct property_entry chuwi_hibook_props[] = { 145 + PROPERTY_ENTRY_U32("touchscreen-min-x", 30), 146 + PROPERTY_ENTRY_U32("touchscreen-min-y", 4), 147 + PROPERTY_ENTRY_U32("touchscreen-size-x", 1892), 148 + PROPERTY_ENTRY_U32("touchscreen-size-y", 1276), 149 + PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), 150 + PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), 151 + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hibook.fw"), 152 + PROPERTY_ENTRY_U32("silead,max-fingers", 10), 153 + PROPERTY_ENTRY_BOOL("silead,home-button"), 154 + { } 155 + }; 156 + 157 + static const struct ts_dmi_data chuwi_hibook_data = { 158 + .embedded_fw = { 159 + .name = "silead/gsl1680-chuwi-hibook.fw", 160 + .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 }, 161 + .length = 40392, 162 + .sha256 = { 0xf7, 0xc0, 0xe8, 0x5a, 0x6c, 0xf2, 0xeb, 0x8d, 163 + 0x12, 0xc4, 0x45, 0xbf, 0x55, 0x13, 0x4c, 0x1a, 164 + 0x13, 0x04, 0x31, 0x08, 0x65, 0x73, 0xf7, 0xa8, 165 + 0x1b, 0x7d, 0x59, 0xc9, 0xe6, 0x97, 0xf7, 0x38 }, 166 + }, 167 + .acpi_name = "MSSL0017:00", 168 + .properties = chuwi_hibook_props, 151 169 }; 152 170 153 171 static const struct property_entry chuwi_vi8_props[] = { ··· 1013 977 DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"), 1014 978 DMI_MATCH(DMI_PRODUCT_NAME, "Hi10 pro tablet"), 1015 979 DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"), 980 + }, 981 + }, 982 + { 983 + /* Chuwi HiBook (CWI514) */ 984 + .driver_data = (void *)&chuwi_hibook_data, 985 + .matches = { 986 + DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"), 987 + DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"), 988 + /* Above matches are too generic, add bios-date match */ 989 + DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"), 1016 990 }, 1017 991 }, 1018 992 {
+1
drivers/ptp/Kconfig
··· 174 174 depends on I2C && MTD 175 175 depends on SERIAL_8250 176 176 depends on !S390 177 + depends on COMMON_CLK 177 178 select NET_DEVLINK 178 179 help 179 180 This driver adds support for an OpenCompute time card.
+2 -7
drivers/ptp/ptp_kvm_x86.c
··· 15 15 #include <linux/ptp_clock_kernel.h> 16 16 #include <linux/ptp_kvm.h> 17 17 18 - struct pvclock_vsyscall_time_info *hv_clock; 19 - 20 18 static phys_addr_t clock_pair_gpa; 21 19 static struct kvm_clock_pairing clock_pair; 22 20 ··· 26 28 return -ENODEV; 27 29 28 30 clock_pair_gpa = slow_virt_to_phys(&clock_pair); 29 - hv_clock = pvclock_get_pvti_cpu0_va(); 30 - if (!hv_clock) 31 + if (!pvclock_get_pvti_cpu0_va()) 31 32 return -ENODEV; 32 33 33 34 ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa, ··· 61 64 struct pvclock_vcpu_time_info *src; 62 65 unsigned int version; 63 66 long ret; 64 - int cpu; 65 67 66 - cpu = smp_processor_id(); 67 - src = &hv_clock[cpu].pvti; 68 + src = this_cpu_pvti(); 68 69 69 70 do { 70 71 /*
+1
drivers/ptp/ptp_pch.c
··· 644 644 }, 645 645 {0} 646 646 }; 647 + MODULE_DEVICE_TABLE(pci, pch_ieee1588_pcidev_id); 647 648 648 649 static SIMPLE_DEV_PM_OPS(pch_pm_ops, pch_suspend, pch_resume); 649 650
+5 -3
drivers/s390/cio/blacklist.c
··· 262 262 263 263 if (strcmp("free", parm) == 0) { 264 264 rc = blacklist_parse_parameters(buf, free, 0); 265 - /* There could be subchannels without proper devices connected. 266 - * evaluate all the entries 265 + /* 266 + * Evaluate the subchannels without an online device. This way, 267 + * no path-verification will be triggered on those subchannels 268 + * and it avoids unnecessary delays. 267 269 */ 268 - css_schedule_eval_all(); 270 + css_schedule_eval_cond(CSS_EVAL_NOT_ONLINE, 0); 269 271 } else if (strcmp("add", parm) == 0) 270 272 rc = blacklist_parse_parameters(buf, add, 0); 271 273 else if (strcmp("purge", parm) == 0)
+8 -2
drivers/s390/cio/ccwgroup.c
··· 77 77 /** 78 78 * ccwgroup_set_offline() - disable a ccwgroup device 79 79 * @gdev: target ccwgroup device 80 + * @call_gdrv: Call the registered gdrv set_offline function 80 81 * 81 82 * This function attempts to put the ccwgroup device into the offline state. 82 83 * Returns: 83 84 * %0 on success and a negative error value on failure. 84 85 */ 85 - int ccwgroup_set_offline(struct ccwgroup_device *gdev) 86 + int ccwgroup_set_offline(struct ccwgroup_device *gdev, bool call_gdrv) 86 87 { 87 88 struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); 88 89 int ret = -EINVAL; ··· 92 91 return -EAGAIN; 93 92 if (gdev->state == CCWGROUP_OFFLINE) 94 93 goto out; 94 + if (!call_gdrv) { 95 + ret = 0; 96 + goto offline; 97 + } 95 98 if (gdrv->set_offline) 96 99 ret = gdrv->set_offline(gdev); 97 100 if (ret) 98 101 goto out; 99 102 103 + offline: 100 104 gdev->state = CCWGROUP_OFFLINE; 101 105 out: 102 106 atomic_set(&gdev->onoff, 0); ··· 130 124 if (value == 1) 131 125 ret = ccwgroup_set_online(gdev); 132 126 else if (value == 0) 133 - ret = ccwgroup_set_offline(gdev); 127 + ret = ccwgroup_set_offline(gdev, true); 134 128 else 135 129 ret = -EINVAL; 136 130 out:
+31 -9
drivers/s390/cio/css.c
··· 788 788 return 0; 789 789 } 790 790 791 - void css_schedule_eval_all_unreg(unsigned long delay) 791 + static int __unset_online(struct device *dev, void *data) 792 + { 793 + struct idset *set = data; 794 + struct subchannel *sch = to_subchannel(dev); 795 + struct ccw_device *cdev = sch_get_cdev(sch); 796 + 797 + if (cdev && cdev->online) 798 + idset_sch_del(set, sch->schid); 799 + 800 + return 0; 801 + } 802 + 803 + void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay) 792 804 { 793 805 unsigned long flags; 794 - struct idset *unreg_set; 806 + struct idset *set; 795 807 796 808 /* Find unregistered subchannels. */ 797 - unreg_set = idset_sch_new(); 798 - if (!unreg_set) { 809 + set = idset_sch_new(); 810 + if (!set) { 799 811 /* Fallback. */ 800 812 css_schedule_eval_all(); 801 813 return; 802 814 } 803 - idset_fill(unreg_set); 804 - bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered); 815 + idset_fill(set); 816 + switch (cond) { 817 + case CSS_EVAL_UNREG: 818 + bus_for_each_dev(&css_bus_type, NULL, set, __unset_registered); 819 + break; 820 + case CSS_EVAL_NOT_ONLINE: 821 + bus_for_each_dev(&css_bus_type, NULL, set, __unset_online); 822 + break; 823 + default: 824 + break; 825 + } 826 + 805 827 /* Apply to slow_subchannel_set. */ 806 828 spin_lock_irqsave(&slow_subchannel_lock, flags); 807 - idset_add_set(slow_subchannel_set, unreg_set); 829 + idset_add_set(slow_subchannel_set, set); 808 830 atomic_set(&css_eval_scheduled, 1); 809 831 queue_delayed_work(cio_work_q, &slow_path_work, delay); 810 832 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 811 - idset_free(unreg_set); 833 + idset_free(set); 812 834 } 813 835 814 836 void css_wait_for_slow_path(void) ··· 842 820 void css_schedule_reprobe(void) 843 821 { 844 822 /* Schedule with a delay to allow merging of subsequent calls. */ 845 - css_schedule_eval_all_unreg(1 * HZ); 823 + css_schedule_eval_cond(CSS_EVAL_UNREG, 1 * HZ); 846 824 } 847 825 EXPORT_SYMBOL_GPL(css_schedule_reprobe); 848 826
+9 -1
drivers/s390/cio/css.h
··· 34 34 #define SNID_STATE3_MULTI_PATH 1 35 35 #define SNID_STATE3_SINGLE_PATH 0 36 36 37 + /* 38 + * Conditions used to specify which subchannels need evaluation 39 + */ 40 + enum css_eval_cond { 41 + CSS_EVAL_UNREG, /* unregistered subchannels */ 42 + CSS_EVAL_NOT_ONLINE /* sch without an online-device */ 43 + }; 44 + 37 45 struct path_state { 38 46 __u8 state1 : 2; /* path state value 1 */ 39 47 __u8 state2 : 2; /* path state value 2 */ ··· 144 136 /* Helper functions to build lists for the slow path. */ 145 137 void css_schedule_eval(struct subchannel_id schid); 146 138 void css_schedule_eval_all(void); 147 - void css_schedule_eval_all_unreg(unsigned long delay); 139 + void css_schedule_eval_cond(enum css_eval_cond, unsigned long delay); 148 140 int css_complete_work(void); 149 141 150 142 int sch_is_pseudo_sch(struct subchannel *);
+3 -1
drivers/s390/crypto/vfio_ap_ops.c
··· 361 361 mutex_lock(&matrix_dev->lock); 362 362 list_del(&matrix_mdev->node); 363 363 mutex_unlock(&matrix_dev->lock); 364 + vfio_uninit_group_dev(&matrix_mdev->vdev); 364 365 kfree(matrix_mdev); 365 366 err_dec_available: 366 367 atomic_inc(&matrix_dev->available_instances); ··· 377 376 mutex_lock(&matrix_dev->lock); 378 377 vfio_ap_mdev_reset_queues(matrix_mdev); 379 378 list_del(&matrix_mdev->node); 379 + mutex_unlock(&matrix_dev->lock); 380 + vfio_uninit_group_dev(&matrix_mdev->vdev); 380 381 kfree(matrix_mdev); 381 382 atomic_inc(&matrix_dev->available_instances); 382 - mutex_unlock(&matrix_dev->lock); 383 383 } 384 384 385 385 static ssize_t name_show(struct mdev_type *mtype,
-1
drivers/s390/net/qeth_core.h
··· 858 858 struct napi_struct napi; 859 859 struct qeth_rx rx; 860 860 struct delayed_work buffer_reclaim_work; 861 - struct work_struct close_dev_work; 862 861 }; 863 862 864 863 static inline bool qeth_card_hw_is_reachable(struct qeth_card *card)
+9 -13
drivers/s390/net/qeth_core_main.c
··· 70 70 static int qeth_qdio_establish(struct qeth_card *); 71 71 static void qeth_free_qdio_queues(struct qeth_card *card); 72 72 73 - static void qeth_close_dev_handler(struct work_struct *work) 74 - { 75 - struct qeth_card *card; 76 - 77 - card = container_of(work, struct qeth_card, close_dev_work); 78 - QETH_CARD_TEXT(card, 2, "cldevhdl"); 79 - ccwgroup_set_offline(card->gdev); 80 - } 81 - 82 73 static const char *qeth_get_cardname(struct qeth_card *card) 83 74 { 84 75 if (IS_VM_NIC(card)) { ··· 192 201 list_for_each_entry_safe(pool_entry, tmp, 193 202 &card->qdio.in_buf_pool.entry_list, list) 194 203 list_del(&pool_entry->list); 204 + 205 + if (!queue) 206 + return; 195 207 196 208 for (i = 0; i < ARRAY_SIZE(queue->bufs); i++) 197 209 queue->bufs[i].pool_entry = NULL; ··· 786 792 case IPA_CMD_STOPLAN: 787 793 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) { 788 794 dev_err(&card->gdev->dev, 789 - "Interface %s is down because the adjacent port is no longer in reflective relay mode\n", 795 + "Adjacent port of interface %s is no longer in reflective relay mode, trigger recovery\n", 790 796 netdev_name(card->dev)); 791 - schedule_work(&card->close_dev_work); 797 + /* Set offline, then probably fail to set online: */ 798 + qeth_schedule_recovery(card); 792 799 } else { 800 + /* stay online for subsequent STARTLAN */ 793 801 dev_warn(&card->gdev->dev, 794 802 "The link for interface %s on CHPID 0x%X failed\n", 795 803 netdev_name(card->dev), card->info.chpid); ··· 1533 1537 INIT_LIST_HEAD(&card->ipato.entries); 1534 1538 qeth_init_qdio_info(card); 1535 1539 INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); 1536 - INIT_WORK(&card->close_dev_work, qeth_close_dev_handler); 1537 1540 hash_init(card->rx_mode_addrs); 1538 1541 hash_init(card->local_addrs4); 1539 1542 hash_init(card->local_addrs6); ··· 5514 5519 dev_info(&card->gdev->dev, 5515 5520 "Device successfully recovered!\n"); 5516 5521 } else { 5517 - ccwgroup_set_offline(card->gdev); 5522 + qeth_set_offline(card, disc, true); 5523 + ccwgroup_set_offline(card->gdev, false); 5518 5524 dev_warn(&card->gdev->dev, 5519 5525 "The qeth device driver failed to recover an error on the device\n"); 5520 5526 }
-1
drivers/s390/net/qeth_l2_main.c
··· 2307 2307 if (gdev->state == CCWGROUP_ONLINE) 2308 2308 qeth_set_offline(card, card->discipline, false); 2309 2309 2310 - cancel_work_sync(&card->close_dev_work); 2311 2310 if (card->dev->reg_state == NETREG_REGISTERED) { 2312 2311 priv = netdev_priv(card->dev); 2313 2312 if (priv->brport_features & BR_LEARNING_SYNC) {
-1
drivers/s390/net/qeth_l3_main.c
··· 1969 1969 if (cgdev->state == CCWGROUP_ONLINE) 1970 1970 qeth_set_offline(card, card->discipline, false); 1971 1971 1972 - cancel_work_sync(&card->close_dev_work); 1973 1972 if (card->dev->reg_state == NETREG_REGISTERED) 1974 1973 unregister_netdev(card->dev); 1975 1974
-11
drivers/scsi/arm/Kconfig
··· 10 10 This enables support for the Acorn SCSI card (aka30). If you have an 11 11 Acorn system with one of these, say Y. If unsure, say N. 12 12 13 - config SCSI_ACORNSCSI_TAGGED_QUEUE 14 - bool "Support SCSI 2 Tagged queueing" 15 - depends on SCSI_ACORNSCSI_3 16 - help 17 - Say Y here to enable tagged queuing support on the Acorn SCSI card. 18 - 19 - This is a feature of SCSI-2 which improves performance: the host 20 - adapter can send several SCSI commands to a device's queue even if 21 - previous commands haven't finished yet. Some SCSI devices don't 22 - implement this properly, so the safe answer is N. 23 - 24 13 config SCSI_ACORNSCSI_SYNC 25 14 bool "Support SCSI 2 Synchronous Transfers" 26 15 depends on SCSI_ACORNSCSI_3
+22 -81
drivers/scsi/arm/acornscsi.c
··· 52 52 * You can tell if you have a device that supports tagged queueing my 53 53 * cating (eg) /proc/scsi/acornscsi/0 and see if the SCSI revision is reported 54 54 * as '2 TAG'. 55 - * 56 - * Also note that CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE is normally set in the config 57 - * scripts, but disabled here. Once debugged, remove the #undef, otherwise to debug, 58 - * comment out the undef. 59 55 */ 60 - #undef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 56 + 61 57 /* 62 58 * SCSI-II Synchronous transfer support. 63 59 * ··· 167 171 unsigned int result); 168 172 static int acornscsi_reconnect_finish(AS_Host *host); 169 173 static void acornscsi_dma_cleanup(AS_Host *host); 170 - static void acornscsi_abortcmd(AS_Host *host, unsigned char tag); 174 + static void acornscsi_abortcmd(AS_Host *host); 171 175 172 176 /* ==================================================================================== 173 177 * Miscellaneous ··· 737 741 #endif 738 742 739 743 if (from_queue) { 740 - #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 741 - /* 742 - * tagged queueing - allocate a new tag to this command 743 - */ 744 - if (SCpnt->device->simple_tags) { 745 - SCpnt->device->current_tag += 1; 746 - if (SCpnt->device->current_tag == 0) 747 - SCpnt->device->current_tag = 1; 748 - SCpnt->tag = SCpnt->device->current_tag; 749 - } else 750 - #endif 751 744 set_bit(SCpnt->device->id * 8 + 752 745 (u8)(SCpnt->device->lun & 0x07), host->busyluns); 753 746 ··· 1177 1192 * the device recognises the attention. 1178 1193 */ 1179 1194 if (dmac_read(host, DMAC_STATUS) & STATUS_RQ0) { 1180 - acornscsi_abortcmd(host, host->SCpnt->tag); 1195 + acornscsi_abortcmd(host); 1181 1196 1182 1197 dmac_write(host, DMAC_TXCNTLO, 0); 1183 1198 dmac_write(host, DMAC_TXCNTHI, 0); ··· 1545 1560 acornscsi_sbic_issuecmd(host, CMND_ASSERTATN); 1546 1561 1547 1562 switch (host->scsi.last_message) { 1548 - #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 1549 - case HEAD_OF_QUEUE_TAG: 1550 - case ORDERED_QUEUE_TAG: 1551 - case SIMPLE_QUEUE_TAG: 1552 - /* 1553 - * ANSI standard says: (Section SCSI-2 Rev. 10c Sect 5.6.17) 1554 - * If a target does not implement tagged queuing and a queue tag 1555 - * message is received, it shall respond with a MESSAGE REJECT 1556 - * message and accept the I/O process as if it were untagged. 1557 - */ 1558 - printk(KERN_NOTICE "scsi%d.%c: disabling tagged queueing\n", 1559 - host->host->host_no, acornscsi_target(host)); 1560 - host->SCpnt->device->simple_tags = 0; 1561 - set_bit(host->SCpnt->device->id * 8 + 1562 - (u8)(host->SCpnt->device->lun & 0x7), host->busyluns); 1563 - break; 1564 - #endif 1565 1563 case EXTENDED_MESSAGE | (EXTENDED_SDTR << 8): 1566 1564 /* 1567 1565 * Target can't handle synchronous transfers ··· 1655 1687 #if 0 1656 1688 /* does the device need the current command aborted */ 1657 1689 if (cmd_aborted) { 1658 - acornscsi_abortcmd(host->SCpnt->tag); 1690 + acornscsi_abortcmd(host); 1659 1691 return; 1660 1692 } 1661 1693 #endif 1662 1694 1663 - #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 1664 - if (host->SCpnt->tag) { 1665 - unsigned int tag_type; 1666 - 1667 - if (host->SCpnt->cmnd[0] == REQUEST_SENSE || 1668 - host->SCpnt->cmnd[0] == TEST_UNIT_READY || 1669 - host->SCpnt->cmnd[0] == INQUIRY) 1670 - tag_type = HEAD_OF_QUEUE_TAG; 1671 - else 1672 - tag_type = SIMPLE_QUEUE_TAG; 1673 - msgqueue_addmsg(&host->scsi.msgs, 2, tag_type, host->SCpnt->tag); 1674 - } 1675 - #endif 1676 1695 1677 1696 #ifdef CONFIG_SCSI_ACORNSCSI_SYNC 1678 1697 if (host->device[host->SCpnt->device->id].sync_state == SYNC_NEGOCIATE) { ··· 1753 1798 "to reconnect with\n", 1754 1799 host->host->host_no, '0' + target); 1755 1800 acornscsi_dumplog(host, target); 1756 - acornscsi_abortcmd(host, 0); 1801 + acornscsi_abortcmd(host); 1757 1802 if (host->SCpnt) { 1758 1803 queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt); 1759 1804 host->SCpnt = NULL; ··· 1776 1821 host->scsi.disconnectable = 0; 1777 1822 if (host->SCpnt->device->id == host->scsi.reconnected.target && 1778 1823 host->SCpnt->device->lun == host->scsi.reconnected.lun && 1779 - host->SCpnt->tag == host->scsi.reconnected.tag) { 1824 + scsi_cmd_to_tag(host->SCpnt) == host->scsi.reconnected.tag) { 1780 1825 #if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON)) 1781 1826 DBG(host->SCpnt, printk("scsi%d.%c: reconnected", 1782 1827 host->host->host_no, acornscsi_target(host))); ··· 1803 1848 } 1804 1849 1805 1850 if (!host->SCpnt) 1806 - acornscsi_abortcmd(host, host->scsi.reconnected.tag); 1851 + acornscsi_abortcmd(host); 1807 1852 else { 1808 1853 /* 1809 1854 * Restore data pointer from SAVED pointers. ··· 1844 1889 * Function: void acornscsi_abortcmd(AS_host *host, unsigned char tag) 1845 1890 * Purpose : abort a currently executing command 1846 1891 * Params : host - host with connected command to abort 1847 - * tag - tag to abort 1848 1892 */ 1849 1893 static 1850 - void acornscsi_abortcmd(AS_Host *host, unsigned char tag) 1894 + void acornscsi_abortcmd(AS_Host *host) 1851 1895 { 1852 1896 host->scsi.phase = PHASE_ABORTED; 1853 1897 sbic_arm_write(host, SBIC_CMND, CMND_ASSERTATN); 1854 1898 1855 1899 msgqueue_flush(&host->scsi.msgs); 1856 - #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 1857 - if (tag) 1858 - msgqueue_addmsg(&host->scsi.msgs, 2, ABORT_TAG, tag); 1859 - else 1860 - #endif 1861 - msgqueue_addmsg(&host->scsi.msgs, 1, ABORT); 1900 + msgqueue_addmsg(&host->scsi.msgs, 1, ABORT); 1862 1901 } 1863 1902 1864 1903 /* ========================================================================================== ··· 1942 1993 printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTING, SSR %02X?\n", 1943 1994 host->host->host_no, acornscsi_target(host), ssr); 1944 1995 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); 1945 - acornscsi_abortcmd(host, host->SCpnt->tag); 1996 + acornscsi_abortcmd(host); 1946 1997 } 1947 1998 return INTR_PROCESSING; 1948 1999 ··· 1978 2029 printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTED, SSR %02X?\n", 1979 2030 host->host->host_no, acornscsi_target(host), ssr); 1980 2031 acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8); 1981 - acornscsi_abortcmd(host, host->SCpnt->tag); 2032 + acornscsi_abortcmd(host); 1982 2033 } 1983 2034 return INTR_PROCESSING; 1984 2035 ··· 2024 2075 case 0x18: /* -> PHASE_DATAOUT */ 2025 2076 /* COMMAND -> DATA OUT */ 2026 2077 if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len) 2027 - acornscsi_abortcmd(host, host->SCpnt->tag); 2078 + acornscsi_abortcmd(host); 2028 2079 acornscsi_dma_setup(host, DMA_OUT); 2029 2080 if (!acornscsi_starttransfer(host)) 2030 - acornscsi_abortcmd(host, host->SCpnt->tag); 2081 + acornscsi_abortcmd(host); 2031 2082 host->scsi.phase = PHASE_DATAOUT; 2032 2083 return INTR_IDLE; 2033 2084 2034 2085 case 0x19: /* -> PHASE_DATAIN */ 2035 2086 /* COMMAND -> DATA IN */ 2036 2087 if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len) 2037 - acornscsi_abortcmd(host, host->SCpnt->tag); 2088 + acornscsi_abortcmd(host); 2038 2089 acornscsi_dma_setup(host, DMA_IN); 2039 2090 if (!acornscsi_starttransfer(host)) 2040 - acornscsi_abortcmd(host, host->SCpnt->tag); 2091 + acornscsi_abortcmd(host); 2041 2092 host->scsi.phase = PHASE_DATAIN; 2042 2093 return INTR_IDLE; 2043 2094 ··· 2105 2156 /* MESSAGE IN -> DATA OUT */ 2106 2157 acornscsi_dma_setup(host, DMA_OUT); 2107 2158 if (!acornscsi_starttransfer(host)) 2108 - acornscsi_abortcmd(host, host->SCpnt->tag); 2159 + acornscsi_abortcmd(host); 2109 2160 host->scsi.phase = PHASE_DATAOUT; 2110 2161 return INTR_IDLE; 2111 2162 ··· 2114 2165 /* MESSAGE IN -> DATA IN */ 2115 2166 acornscsi_dma_setup(host, DMA_IN); 2116 2167 if (!acornscsi_starttransfer(host)) 2117 - acornscsi_abortcmd(host, host->SCpnt->tag); 2168 + acornscsi_abortcmd(host); 2118 2169 host->scsi.phase = PHASE_DATAIN; 2119 2170 return INTR_IDLE; 2120 2171 ··· 2155 2206 switch (ssr) { 2156 2207 case 0x19: /* -> PHASE_DATAIN */ 2157 2208 case 0x89: /* -> PHASE_DATAIN */ 2158 - acornscsi_abortcmd(host, host->SCpnt->tag); 2209 + acornscsi_abortcmd(host); 2159 2210 return INTR_IDLE; 2160 2211 2161 2212 case 0x1b: /* -> PHASE_STATUSIN */ ··· 2204 2255 switch (ssr) { 2205 2256 case 0x18: /* -> PHASE_DATAOUT */ 2206 2257 case 0x88: /* -> PHASE_DATAOUT */ 2207 - acornscsi_abortcmd(host, host->SCpnt->tag); 2258 + acornscsi_abortcmd(host); 2208 2259 return INTR_IDLE; 2209 2260 2210 2261 case 0x1b: /* -> PHASE_STATUSIN */ ··· 2431 2482 SCpnt->scsi_done = done; 2432 2483 SCpnt->host_scribble = NULL; 2433 2484 SCpnt->result = 0; 2434 - SCpnt->tag = 0; 2435 2485 SCpnt->SCp.phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]); 2436 2486 SCpnt->SCp.sent_command = 0; 2437 2487 SCpnt->SCp.scsi_xferred = 0; ··· 2529 2581 break; 2530 2582 2531 2583 default: 2532 - acornscsi_abortcmd(host, host->SCpnt->tag); 2584 + acornscsi_abortcmd(host); 2533 2585 res = res_snooze; 2534 2586 } 2535 2587 local_irq_restore(flags); ··· 2695 2747 #ifdef CONFIG_SCSI_ACORNSCSI_SYNC 2696 2748 " SYNC" 2697 2749 #endif 2698 - #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 2699 - " TAG" 2700 - #endif 2701 2750 #if (DEBUG & DEBUG_NO_WRITE) 2702 2751 " NOWRITE (" __stringify(NO_WRITE) ")" 2703 2752 #endif ··· 2714 2769 seq_printf(m, "AcornSCSI driver v%d.%d.%d" 2715 2770 #ifdef CONFIG_SCSI_ACORNSCSI_SYNC 2716 2771 " SYNC" 2717 - #endif 2718 - #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE 2719 - " TAG" 2720 2772 #endif 2721 2773 #if (DEBUG & DEBUG_NO_WRITE) 2722 2774 " NOWRITE (" __stringify(NO_WRITE) ")" ··· 2769 2827 seq_printf(m, "Device/Lun TaggedQ Sync\n"); 2770 2828 seq_printf(m, " %d/%llu ", scd->id, scd->lun); 2771 2829 if (scd->tagged_supported) 2772 - seq_printf(m, "%3sabled(%3d) ", 2773 - scd->simple_tags ? "en" : "dis", 2774 - scd->current_tag); 2830 + seq_printf(m, "%3sabled ", 2831 + scd->simple_tags ? "en" : "dis"); 2775 2832 else 2776 2833 seq_printf(m, "unsupported "); 2777 2834
+8 -23
drivers/scsi/arm/fas216.c
··· 77 77 * I was thinking that this was a good chip until I found this restriction ;( 78 78 */ 79 79 #define SCSI2_SYNC 80 - #undef SCSI2_TAG 81 80 82 81 #undef DEBUG_CONNECT 83 82 #undef DEBUG_MESSAGES ··· 989 990 info->scsi.disconnectable = 0; 990 991 if (info->SCpnt->device->id == target && 991 992 info->SCpnt->device->lun == lun && 992 - info->SCpnt->tag == tag) { 993 + scsi_cmd_to_rq(info->SCpnt)->tag == tag) { 993 994 fas216_log(info, LOG_CONNECT, "reconnected previously executing command"); 994 995 } else { 995 996 queue_add_cmd_tail(&info->queues.disconnected, info->SCpnt); ··· 1790 1791 /* 1791 1792 * add tag message if required 1792 1793 */ 1793 - if (SCpnt->tag) 1794 - msgqueue_addmsg(&info->scsi.msgs, 2, SIMPLE_QUEUE_TAG, SCpnt->tag); 1794 + if (SCpnt->device->simple_tags) 1795 + msgqueue_addmsg(&info->scsi.msgs, 2, SIMPLE_QUEUE_TAG, 1796 + scsi_cmd_to_rq(SCpnt)->tag); 1795 1797 1796 1798 do { 1797 1799 #ifdef SCSI2_SYNC ··· 1815 1815 1816 1816 static void fas216_allocate_tag(FAS216_Info *info, struct scsi_cmnd *SCpnt) 1817 1817 { 1818 - #ifdef SCSI2_TAG 1819 - /* 1820 - * tagged queuing - allocate a new tag to this command 1821 - */ 1822 - if (SCpnt->device->simple_tags && SCpnt->cmnd[0] != REQUEST_SENSE && 1823 - SCpnt->cmnd[0] != INQUIRY) { 1824 - SCpnt->device->current_tag += 1; 1825 - if (SCpnt->device->current_tag == 0) 1826 - SCpnt->device->current_tag = 1; 1827 - SCpnt->tag = SCpnt->device->current_tag; 1828 - } else 1829 - #endif 1830 - set_bit(SCpnt->device->id * 8 + 1831 - (u8)(SCpnt->device->lun & 0x7), info->busyluns); 1818 + set_bit(SCpnt->device->id * 8 + 1819 + (u8)(SCpnt->device->lun & 0x7), info->busyluns); 1832 1820 1833 1821 info->stats.removes += 1; 1834 1822 switch (SCpnt->cmnd[0]) { ··· 2105 2117 init_SCp(SCpnt); 2106 2118 SCpnt->SCp.Message = 0; 2107 2119 SCpnt->SCp.Status = 0; 2108 - SCpnt->tag = 0; 2109 2120 SCpnt->host_scribble = (void *)fas216_rq_sns_done; 2110 2121 2111 2122 /* ··· 2210 2223 init_SCp(SCpnt); 2211 2224 2212 2225 info->stats.queues += 1; 2213 - SCpnt->tag = 0; 2214 2226 2215 2227 spin_lock(&info->host_lock); 2216 2228 ··· 2989 3003 dev = &info->device[scd->id]; 2990 3004 seq_printf(m, " %d/%llu ", scd->id, scd->lun); 2991 3005 if (scd->tagged_supported) 2992 - seq_printf(m, "%3sabled(%3d) ", 2993 - scd->simple_tags ? "en" : "dis", 2994 - scd->current_tag); 3006 + seq_printf(m, "%3sabled ", 3007 + scd->simple_tags ? "en" : "dis"); 2995 3008 else 2996 3009 seq_puts(m, "unsupported "); 2997 3010
+1 -1
drivers/scsi/arm/queue.c
··· 214 214 list_for_each(l, &queue->head) { 215 215 QE_t *q = list_entry(l, QE_t, list); 216 216 if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun && 217 - q->SCpnt->tag == tag) { 217 + scsi_cmd_to_rq(q->SCpnt)->tag == tag) { 218 218 SCpnt = __queue_remove(queue, l); 219 219 break; 220 220 }
+1
drivers/scsi/csiostor/csio_init.c
··· 1254 1254 MODULE_VERSION(CSIO_DRV_VERSION); 1255 1255 MODULE_FIRMWARE(FW_FNAME_T5); 1256 1256 MODULE_FIRMWARE(FW_FNAME_T6); 1257 + MODULE_SOFTDEP("pre: cxgb4");
+2 -2
drivers/scsi/elx/efct/efct_lio.c
··· 880 880 struct efct *efct = lio_vport->efct; 881 881 unsigned long flags = 0; 882 882 883 - spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags); 884 - 885 883 if (lio_vport->fc_vport) 886 884 fc_vport_terminate(lio_vport->fc_vport); 885 + 886 + spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags); 887 887 888 888 list_for_each_entry_safe(vport, next_vport, &efct->tgt_efct.vport_list, 889 889 list_entry) {
+3 -4
drivers/scsi/elx/libefc/efc_device.c
··· 928 928 break; 929 929 930 930 case EFC_EVT_NPORT_TOPOLOGY_NOTIFY: { 931 - enum efc_nport_topology topology = 932 - (enum efc_nport_topology)arg; 931 + enum efc_nport_topology *topology = arg; 933 932 934 933 WARN_ON(node->nport->domain->attached); 935 934 936 935 WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI); 937 936 938 937 node_printf(node, "topology notification, topology=%d\n", 939 - topology); 938 + *topology); 940 939 941 940 /* At the time the PLOGI was received, the topology was unknown, 942 941 * so we didn't know which node would perform the domain attach: 943 942 * 1. The node from which the PLOGI was sent (p2p) or 944 943 * 2. The node to which the FLOGI was sent (fabric). 945 944 */ 946 - if (topology == EFC_NPORT_TOPO_P2P) { 945 + if (*topology == EFC_NPORT_TOPO_P2P) { 947 946 /* if this is p2p, need to attach to the domain using 948 947 * the d_id from the PLOGI received 949 948 */
+1 -2
drivers/scsi/elx/libefc/efc_fabric.c
··· 107 107 efc_fabric_notify_topology(struct efc_node *node) 108 108 { 109 109 struct efc_node *tmp_node; 110 - enum efc_nport_topology topology = node->nport->topology; 111 110 unsigned long index; 112 111 113 112 /* ··· 117 118 if (tmp_node != node) { 118 119 efc_node_post_event(tmp_node, 119 120 EFC_EVT_NPORT_TOPOLOGY_NOTIFY, 120 - (void *)topology); 121 + &node->nport->topology); 121 122 } 122 123 } 123 124 }
+4 -6
drivers/scsi/lpfc/lpfc_attr.c
··· 285 285 "6312 Catching potential buffer " 286 286 "overflow > PAGE_SIZE = %lu bytes\n", 287 287 PAGE_SIZE); 288 - strscpy(buf + PAGE_SIZE - 1 - 289 - strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1), 290 - LPFC_INFO_MORE_STR, 291 - strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1) 292 - + 1); 288 + strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR), 289 + LPFC_INFO_MORE_STR, sizeof(LPFC_INFO_MORE_STR) + 1); 293 290 } 294 291 return len; 295 292 } ··· 6201 6204 len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n", 6202 6205 phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt); 6203 6206 6204 - len += scnprintf(buf + len, PAGE_SIZE, "Cfg: %d SCSI: %d NVME: %d\n", 6207 + len += scnprintf(buf + len, PAGE_SIZE - len, 6208 + "Cfg: %d SCSI: %d NVME: %d\n", 6205 6209 phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt, 6206 6210 phba->cfg_nvme_seg_cnt); 6207 6211 return len;
+5 -5
drivers/scsi/lpfc/lpfc_els.c
··· 4015 4015 be32_to_cpu(pcgd->desc_tag), 4016 4016 be32_to_cpu(pcgd->desc_len), 4017 4017 be32_to_cpu(pcgd->xmt_signal_capability), 4018 - be32_to_cpu(pcgd->xmt_signal_frequency.count), 4019 - be32_to_cpu(pcgd->xmt_signal_frequency.units), 4018 + be16_to_cpu(pcgd->xmt_signal_frequency.count), 4019 + be16_to_cpu(pcgd->xmt_signal_frequency.units), 4020 4020 be32_to_cpu(pcgd->rcv_signal_capability), 4021 - be32_to_cpu(pcgd->rcv_signal_frequency.count), 4022 - be32_to_cpu(pcgd->rcv_signal_frequency.units)); 4021 + be16_to_cpu(pcgd->rcv_signal_frequency.count), 4022 + be16_to_cpu(pcgd->rcv_signal_frequency.units)); 4023 4023 4024 4024 /* Compare driver and Fport capabilities and choose 4025 4025 * least common. ··· 9387 9387 /* Extract the next WWPN from the payload */ 9388 9388 wwn = *wwnlist++; 9389 9389 wwpn = be64_to_cpu(wwn); 9390 - len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ, 9390 + len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len, 9391 9391 " %016llx", wwpn); 9392 9392 9393 9393 /* Log a message if we are on the last WWPN
+1 -1
drivers/scsi/lpfc/lpfc_hw4.h
··· 1167 1167 #define lpfc_mbx_rd_object_rlen_MASK 0x00FFFFFF 1168 1168 #define lpfc_mbx_rd_object_rlen_WORD word0 1169 1169 uint32_t rd_object_offset; 1170 - uint32_t rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW]; 1170 + __le32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW]; 1171 1171 #define LPFC_OBJ_NAME_SZ 104 /* 26 x sizeof(uint32_t) is 104. */ 1172 1172 uint32_t rd_object_cnt; 1173 1173 struct lpfc_mbx_host_buf rd_object_hbuf[4];
+10 -10
drivers/scsi/lpfc/lpfc_init.c
··· 5518 5518 if (phba->cgn_fpin_frequency && 5519 5519 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { 5520 5520 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; 5521 - cp->cgn_stat_npm = cpu_to_le32(value); 5521 + cp->cgn_stat_npm = value; 5522 5522 } 5523 5523 value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, 5524 5524 LPFC_CGN_CRC32_SEED); ··· 5547 5547 uint32_t mbps; 5548 5548 uint32_t dvalue, wvalue, lvalue, avalue; 5549 5549 uint64_t latsum; 5550 - uint16_t *ptr; 5551 - uint32_t *lptr; 5552 - uint16_t *mptr; 5550 + __le16 *ptr; 5551 + __le32 *lptr; 5552 + __le16 *mptr; 5553 5553 5554 5554 /* Make sure we have a congestion info buffer */ 5555 5555 if (!phba->cgn_i) ··· 5570 5570 if (phba->cgn_fpin_frequency && 5571 5571 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { 5572 5572 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; 5573 - cp->cgn_stat_npm = cpu_to_le32(value); 5573 + cp->cgn_stat_npm = value; 5574 5574 } 5575 5575 5576 5576 /* Read and clear the latency counters for this minute */ ··· 5753 5753 dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]); 5754 5754 wvalue += le32_to_cpu(cp->cgn_warn_hr[i]); 5755 5755 lvalue += le32_to_cpu(cp->cgn_latency_hr[i]); 5756 - mbps += le32_to_cpu(cp->cgn_bw_hr[i]); 5756 + mbps += le16_to_cpu(cp->cgn_bw_hr[i]); 5757 5757 avalue += le32_to_cpu(cp->cgn_alarm_hr[i]); 5758 5758 } 5759 5759 if (lvalue) /* Avg of latency averages */ ··· 8277 8277 return 0; 8278 8278 8279 8279 out_free_hba_hdwq_info: 8280 - free_percpu(phba->sli4_hba.c_stat); 8281 8280 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 8281 + free_percpu(phba->sli4_hba.c_stat); 8282 8282 out_free_hba_idle_stat: 8283 - kfree(phba->sli4_hba.idle_stat); 8284 8283 #endif 8284 + kfree(phba->sli4_hba.idle_stat); 8285 8285 out_free_hba_eq_info: 8286 8286 free_percpu(phba->sli4_hba.eq_info); 8287 8287 out_free_hba_cpu_map: ··· 13411 13411 13412 13412 /* last used Index initialized to 0xff already */ 13413 13413 13414 - cp->cgn_warn_freq = LPFC_FPIN_INIT_FREQ; 13415 - cp->cgn_alarm_freq = LPFC_FPIN_INIT_FREQ; 13414 + cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); 13415 + cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ); 13416 13416 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); 13417 13417 cp->cgn_info_crc = cpu_to_le32(crc); 13418 13418
-2
drivers/scsi/lpfc/lpfc_nvme.c
··· 1489 1489 struct lpfc_nvme_qhandle *lpfc_queue_info; 1490 1490 struct lpfc_nvme_fcpreq_priv *freqpriv; 1491 1491 struct nvme_common_command *sqe; 1492 - #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1493 1492 uint64_t start = 0; 1494 - #endif 1495 1493 1496 1494 /* Validate pointers. LLDD fault handling with transport does 1497 1495 * have timing races.
+2 -7
drivers/scsi/lpfc/lpfc_scsi.c
··· 1495 1495 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1496 1496 uint8_t *txop, uint8_t *rxop) 1497 1497 { 1498 - uint8_t ret = 0; 1499 1498 1500 1499 if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) { 1501 1500 switch (scsi_get_prot_op(sc)) { ··· 1547 1548 } 1548 1549 } 1549 1550 1550 - return ret; 1551 + return 0; 1551 1552 } 1552 1553 #endif 1553 1554 ··· 5577 5578 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 5578 5579 int err, idx; 5579 5580 u8 *uuid = NULL; 5580 - #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 5581 - uint64_t start = 0L; 5581 + uint64_t start; 5582 5582 5583 - if (phba->ktime_on) 5584 - start = ktime_get_ns(); 5585 - #endif 5586 5583 start = ktime_get_ns(); 5587 5584 rdata = lpfc_rport_data_from_scsi_device(cmnd->device); 5588 5585
+3 -2
drivers/scsi/lpfc/lpfc_sli.c
··· 22090 22090 uint32_t shdr_status, shdr_add_status; 22091 22091 union lpfc_sli4_cfg_shdr *shdr; 22092 22092 struct lpfc_dmabuf *pcmd; 22093 + u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0}; 22093 22094 22094 22095 /* sanity check on queue memory */ 22095 22096 if (!datap) ··· 22114 22113 22115 22114 memset((void *)read_object->u.request.rd_object_name, 0, 22116 22115 LPFC_OBJ_NAME_SZ); 22117 - sprintf((uint8_t *)read_object->u.request.rd_object_name, rdobject); 22116 + scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject); 22118 22117 for (j = 0; j < strlen(rdobject); j++) 22119 22118 read_object->u.request.rd_object_name[j] = 22120 - cpu_to_le32(read_object->u.request.rd_object_name[j]); 22119 + cpu_to_le32(rd_object_name[j]); 22121 22120 22122 22121 pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL); 22123 22122 if (pcmd)
+3 -4
drivers/scsi/megaraid/megaraid_sas_base.c
··· 1916 1916 raid = MR_LdRaidGet(ld, local_map_ptr); 1917 1917 1918 1918 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) 1919 - blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1919 + blk_queue_update_dma_alignment(sdev->request_queue, 0x7); 1920 1920 1921 1921 mr_device_priv_data->is_tm_capable = 1922 1922 raid->capability.tmCapable; ··· 8033 8033 8034 8034 if (instance->adapter_type != MFI_SERIES) { 8035 8035 megasas_release_fusion(instance); 8036 - pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 8036 + pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 8037 8037 (sizeof(struct MR_PD_CFG_SEQ) * 8038 8038 (MAX_PHYSICAL_DEVICES - 1)); 8039 8039 for (i = 0; i < 2 ; i++) { ··· 8773 8773 8774 8774 if (event_type & SCAN_VD_CHANNEL) { 8775 8775 if (!instance->requestorId || 8776 - (instance->requestorId && 8777 - megasas_get_ld_vf_affiliation(instance, 0))) { 8776 + megasas_get_ld_vf_affiliation(instance, 0)) { 8778 8777 dcmd_ret = megasas_ld_list_query(instance, 8779 8778 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); 8780 8779 if (dcmd_ret != DCMD_SUCCESS)
+3 -1
drivers/scsi/mpt3sas/mpt3sas_base.c
··· 1582 1582 * wait for current poll to complete. 1583 1583 */ 1584 1584 for (qid = 0; qid < iopoll_q_count; qid++) { 1585 - while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) 1585 + while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) { 1586 + cpu_relax(); 1586 1587 udelay(500); 1588 + } 1587 1589 } 1588 1590 } 1589 1591
+1 -1
drivers/scsi/mpt3sas/mpt3sas_ctl.c
··· 2178 2178 mpt3sas_check_cmd_timeout(ioc, 2179 2179 ioc->ctl_cmds.status, mpi_request, 2180 2180 sizeof(Mpi2DiagReleaseRequest_t)/4, reset_needed); 2181 - *issue_reset = reset_needed; 2181 + *issue_reset = reset_needed; 2182 2182 rc = -EFAULT; 2183 2183 goto out; 2184 2184 }
+1 -2
drivers/scsi/mpt3sas/mpt3sas_scsih.c
··· 10749 10749 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 10750 10750 _scsih_pcie_topology_change_event(ioc, fw_event); 10751 10751 ioc->current_event = NULL; 10752 - return; 10753 - break; 10752 + return; 10754 10753 } 10755 10754 out: 10756 10755 fw_event_work_put(fw_event);
-23
drivers/scsi/ncr53c8xx.c
··· 1939 1939 static void ncr_put_start_queue(struct ncb *np, struct ccb *cp); 1940 1940 1941 1941 static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd); 1942 - static struct scsi_cmnd *retrieve_from_waiting_list(int to_remove, struct ncb *np, struct scsi_cmnd *cmd); 1943 1942 static void process_waiting_list(struct ncb *np, int sts); 1944 1943 1945 - #define remove_from_waiting_list(np, cmd) \ 1946 - retrieve_from_waiting_list(1, (np), (cmd)) 1947 1944 #define requeue_waiting_list(np) process_waiting_list((np), DID_OK) 1948 1945 #define reset_waiting_list(np) process_waiting_list((np), DID_RESET) 1949 1946 ··· 7992 7995 wcmd = (struct scsi_cmnd *) wcmd->next_wcmd; 7993 7996 wcmd->next_wcmd = (char *) cmd; 7994 7997 } 7995 - } 7996 - 7997 - static struct scsi_cmnd *retrieve_from_waiting_list(int to_remove, struct ncb *np, struct scsi_cmnd *cmd) 7998 - { 7999 - struct scsi_cmnd **pcmd = &np->waiting_list; 8000 - 8001 - while (*pcmd) { 8002 - if (cmd == *pcmd) { 8003 - if (to_remove) { 8004 - *pcmd = (struct scsi_cmnd *) cmd->next_wcmd; 8005 - cmd->next_wcmd = NULL; 8006 - } 8007 - #ifdef DEBUG_WAITING_LIST 8008 - printk("%s: cmd %lx retrieved from waiting list\n", ncr_name(np), (u_long) cmd); 8009 - #endif 8010 - return cmd; 8011 - } 8012 - pcmd = (struct scsi_cmnd **) &(*pcmd)->next_wcmd; 8013 - } 8014 - return NULL; 8015 7998 } 8016 7999 8017 8000 static void process_waiting_list(struct ncb *np, int sts)
+2 -1
drivers/scsi/qla2xxx/qla_init.c
··· 7169 7169 return 0; 7170 7170 break; 7171 7171 case QLA2XXX_INI_MODE_DUAL: 7172 - if (!qla_dual_mode_enabled(vha)) 7172 + if (!qla_dual_mode_enabled(vha) && 7173 + !qla_ini_mode_enabled(vha)) 7173 7174 return 0; 7174 7175 break; 7175 7176 case QLA2XXX_INI_MODE_ENABLED:
+2 -2
drivers/scsi/qla2xxx/qla_isr.c
··· 2634 2634 } 2635 2635 2636 2636 if (unlikely(logit)) 2637 - ql_log(ql_log_warn, fcport->vha, 0x5060, 2637 + ql_log(ql_dbg_io, fcport->vha, 0x5060, 2638 2638 "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n", 2639 2639 sp->name, sp->handle, comp_status, 2640 2640 fd->transferred_length, le32_to_cpu(sts->residual_len), ··· 3491 3491 3492 3492 out: 3493 3493 if (logit) 3494 - ql_log(ql_log_warn, fcport->vha, 0x3022, 3494 + ql_log(ql_dbg_io, fcport->vha, 0x3022, 3495 3495 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", 3496 3496 comp_status, scsi_status, res, vha->host_no, 3497 3497 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
+4 -4
drivers/scsi/scsi_transport_iscsi.c
··· 441 441 struct iscsi_transport *t = iface->transport; 442 442 int param = -1; 443 443 444 - if (attr == &dev_attr_iface_enabled.attr) 445 - param = ISCSI_NET_PARAM_IFACE_ENABLE; 446 - else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr) 444 + if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr) 447 445 param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO; 448 446 else if (attr == &dev_attr_iface_header_digest.attr) 449 447 param = ISCSI_IFACE_PARAM_HDRDGST_EN; ··· 481 483 if (param != -1) 482 484 return t->attr_is_visible(ISCSI_IFACE_PARAM, param); 483 485 484 - if (attr == &dev_attr_iface_vlan_id.attr) 486 + if (attr == &dev_attr_iface_enabled.attr) 487 + param = ISCSI_NET_PARAM_IFACE_ENABLE; 488 + else if (attr == &dev_attr_iface_vlan_id.attr) 485 489 param = ISCSI_NET_PARAM_VLAN_ID; 486 490 else if (attr == &dev_attr_iface_vlan_priority.attr) 487 491 param = ISCSI_NET_PARAM_VLAN_PRIORITY;
+9 -5
drivers/scsi/sd.c
··· 2124 2124 retries = 0; 2125 2125 2126 2126 do { 2127 + bool media_was_present = sdkp->media_present; 2128 + 2127 2129 cmd[0] = TEST_UNIT_READY; 2128 2130 memset((void *) &cmd[1], 0, 9); 2129 2131 ··· 2140 2138 * with any more polling. 2141 2139 */ 2142 2140 if (media_not_present(sdkp, &sshdr)) { 2143 - sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n"); 2141 + if (media_was_present) 2142 + sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n"); 2144 2143 return; 2145 2144 } 2146 2145 ··· 3404 3401 } 3405 3402 3406 3403 device_initialize(&sdkp->dev); 3407 - sdkp->dev.parent = dev; 3404 + sdkp->dev.parent = get_device(dev); 3408 3405 sdkp->dev.class = &sd_disk_class; 3409 3406 dev_set_name(&sdkp->dev, "%s", dev_name(dev)); 3410 3407 3411 3408 error = device_add(&sdkp->dev); 3412 - if (error) 3413 - goto out_free_index; 3409 + if (error) { 3410 + put_device(&sdkp->dev); 3411 + goto out; 3412 + } 3414 3413 3415 - get_device(dev); 3416 3414 dev_set_drvdata(dev, sdkp); 3417 3415 3418 3416 gd->major = sd_major((index & 0xf0) >> 4);
+4 -4
drivers/scsi/sd_zbc.c
··· 154 154 155 155 /* 156 156 * Report zone buffer size should be at most 64B times the number of 157 - * zones requested plus the 64B reply header, but should be at least 158 - * SECTOR_SIZE for ATA devices. 157 + * zones requested plus the 64B reply header, but should be aligned 158 + * to SECTOR_SIZE for ATA devices. 159 159 * Make sure that this size does not exceed the hardware capabilities. 160 160 * Furthermore, since the report zone command cannot be split, make 161 161 * sure that the allocated buffer can always be mapped by limiting the ··· 174 174 *buflen = bufsize; 175 175 return buf; 176 176 } 177 - bufsize >>= 1; 177 + bufsize = rounddown(bufsize >> 1, SECTOR_SIZE); 178 178 } 179 179 180 180 return NULL; ··· 280 280 { 281 281 struct scsi_disk *sdkp; 282 282 unsigned long flags; 283 - unsigned int zno; 283 + sector_t zno; 284 284 int ret; 285 285 286 286 sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work);
+19 -5
drivers/scsi/ses.c
··· 87 87 0 88 88 }; 89 89 unsigned char recv_page_code; 90 + unsigned int retries = SES_RETRIES; 91 + struct scsi_sense_hdr sshdr; 90 92 91 - ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, 92 - NULL, SES_TIMEOUT, SES_RETRIES, NULL); 93 + do { 94 + ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, 95 + &sshdr, SES_TIMEOUT, 1, NULL); 96 + } while (ret > 0 && --retries && scsi_sense_valid(&sshdr) && 97 + (sshdr.sense_key == NOT_READY || 98 + (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29))); 99 + 93 100 if (unlikely(ret)) 94 101 return ret; 95 102 ··· 118 111 static int ses_send_diag(struct scsi_device *sdev, int page_code, 119 112 void *buf, int bufflen) 120 113 { 121 - u32 result; 114 + int result; 122 115 123 116 unsigned char cmd[] = { 124 117 SEND_DIAGNOSTIC, ··· 128 121 bufflen & 0xff, 129 122 0 130 123 }; 124 + struct scsi_sense_hdr sshdr; 125 + unsigned int retries = SES_RETRIES; 131 126 132 - result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen, 133 - NULL, SES_TIMEOUT, SES_RETRIES, NULL); 127 + do { 128 + result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen, 129 + &sshdr, SES_TIMEOUT, 1, NULL); 130 + } while (result > 0 && --retries && scsi_sense_valid(&sshdr) && 131 + (sshdr.sense_key == NOT_READY || 132 + (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29))); 133 + 134 134 if (result) 135 135 sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n", 136 136 result);
+1 -1
drivers/scsi/sr_ioctl.c
··· 523 523 return rc; 524 524 cd->readcd_known = 0; 525 525 sr_printk(KERN_INFO, cd, 526 - "CDROM does'nt support READ CD (0xbe) command\n"); 526 + "CDROM doesn't support READ CD (0xbe) command\n"); 527 527 /* fall & retry the other way */ 528 528 } 529 529 /* ... if this fails, we switch the blocksize using MODE SELECT */
+1
drivers/scsi/st.c
··· 3823 3823 case CDROM_SEND_PACKET: 3824 3824 if (!capable(CAP_SYS_RAWIO)) 3825 3825 return -EPERM; 3826 + break; 3826 3827 default: 3827 3828 break; 3828 3829 }
+78
drivers/scsi/ufs/ufshcd-pci.c
··· 128 128 return err; 129 129 } 130 130 131 + static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes) 132 + { 133 + struct ufs_pa_layer_attr pwr_info = hba->pwr_info; 134 + int ret; 135 + 136 + pwr_info.lane_rx = lanes; 137 + pwr_info.lane_tx = lanes; 138 + ret = ufshcd_config_pwr_mode(hba, &pwr_info); 139 + if (ret) 140 + dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n", 141 + __func__, lanes, ret); 142 + return ret; 143 + } 144 + 145 + static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba, 146 + enum ufs_notify_change_status status, 147 + struct ufs_pa_layer_attr *dev_max_params, 148 + struct ufs_pa_layer_attr *dev_req_params) 149 + { 150 + int err = 0; 151 + 152 + switch (status) { 153 + case PRE_CHANGE: 154 + if (ufshcd_is_hs_mode(dev_max_params) && 155 + (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2)) 156 + ufs_intel_set_lanes(hba, 2); 157 + memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params)); 158 + break; 159 + case POST_CHANGE: 160 + if (ufshcd_is_hs_mode(dev_req_params)) { 161 + u32 peer_granularity; 162 + 163 + usleep_range(1000, 1250); 164 + err = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), 165 + &peer_granularity); 166 + } 167 + break; 168 + default: 169 + break; 170 + } 171 + 172 + return err; 173 + } 174 + 175 + static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba) 176 + { 177 + u32 granularity, peer_granularity; 178 + u32 pa_tactivate, peer_pa_tactivate; 179 + int ret; 180 + 181 + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity); 182 + if (ret) 183 + goto out; 184 + 185 + ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &peer_granularity); 186 + if (ret) 187 + goto out; 188 + 189 + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate); 190 + if (ret) 191 + goto out; 192 + 193 + ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &peer_pa_tactivate); 194 + if (ret) 195 + goto out; 196 + 197 + if (granularity == peer_granularity) { 198 + u32 new_peer_pa_tactivate = pa_tactivate + 2; 199 + 200 + ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), new_peer_pa_tactivate); 201 + } 202 + out: 203 + return ret; 204 + } 205 + 131 206 #define INTEL_ACTIVELTR 0x804 132 207 #define INTEL_IDLELTR 0x808 133 208 ··· 426 351 struct ufs_host *ufs_host; 427 352 int err; 428 353 354 + hba->nop_out_timeout = 200; 429 355 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; 430 356 hba->caps |= UFSHCD_CAP_CRYPTO; 431 357 err = ufs_intel_common_init(hba); ··· 457 381 .exit = ufs_intel_common_exit, 458 382 .hce_enable_notify = ufs_intel_hce_enable_notify, 459 383 .link_startup_notify = ufs_intel_link_startup_notify, 384 + .pwr_change_notify = ufs_intel_lkf_pwr_change_notify, 385 + .apply_dev_quirks = ufs_intel_lkf_apply_dev_quirks, 460 386 .resume = ufs_intel_resume, 461 387 .device_reset = ufs_intel_device_reset, 462 388 };
+58 -61
drivers/scsi/ufs/ufshcd.c
··· 17 17 #include <linux/blk-pm.h> 18 18 #include <linux/blkdev.h> 19 19 #include <scsi/scsi_driver.h> 20 - #include <scsi/scsi_transport.h> 21 - #include "../scsi_transport_api.h" 22 20 #include "ufshcd.h" 23 21 #include "ufs_quirks.h" 24 22 #include "unipro.h" ··· 235 237 static irqreturn_t ufshcd_intr(int irq, void *__hba); 236 238 static int ufshcd_change_power_mode(struct ufs_hba *hba, 237 239 struct ufs_pa_layer_attr *pwr_mode); 240 + static void ufshcd_schedule_eh_work(struct ufs_hba *hba); 238 241 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on); 239 242 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on); 240 243 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, ··· 318 319 static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag, 319 320 enum ufs_trace_str_t str_t) 320 321 { 321 - int off = (int)tag - hba->nutrs; 322 - struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off]; 322 + struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag]; 323 323 324 324 if (!trace_ufshcd_upiu_enabled()) 325 325 return; ··· 2757 2759 out: 2758 2760 up_read(&hba->clk_scaling_lock); 2759 2761 2760 - if (ufs_trigger_eh()) 2761 - scsi_schedule_eh(hba->host); 2762 + if (ufs_trigger_eh()) { 2763 + unsigned long flags; 2764 + 2765 + spin_lock_irqsave(hba->host->host_lock, flags); 2766 + ufshcd_schedule_eh_work(hba); 2767 + spin_unlock_irqrestore(hba->host->host_lock, flags); 2768 + } 2762 2769 2763 2770 return err; 2764 2771 } ··· 3922 3919 } 3923 3920 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); 3924 3921 3925 - static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba) 3926 - { 3927 - lockdep_assert_held(hba->host->host_lock); 3928 - 3929 - return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) || 3930 - (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)); 3931 - } 3932 - 3933 - static void ufshcd_schedule_eh(struct ufs_hba *hba) 3934 - { 3935 - bool schedule_eh = false; 3936 - unsigned long flags; 3937 - 3938 - spin_lock_irqsave(hba->host->host_lock, flags); 3939 - /* handle fatal errors only when link is not in error state */ 3940 - if (hba->ufshcd_state != UFSHCD_STATE_ERROR) { 3941 - if (hba->force_reset || ufshcd_is_link_broken(hba) || 3942 - ufshcd_is_saved_err_fatal(hba)) 3943 - hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL; 3944 - else 3945 - hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL; 3946 - schedule_eh = true; 3947 - } 3948 - spin_unlock_irqrestore(hba->host->host_lock, flags); 3949 - 3950 - if (schedule_eh) 3951 - scsi_schedule_eh(hba->host); 3952 - } 3953 - 3954 3922 /** 3955 3923 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power 3956 3924 * state) and waits for it to take effect. ··· 3942 3968 { 3943 3969 DECLARE_COMPLETION_ONSTACK(uic_async_done); 3944 3970 unsigned long flags; 3945 - bool schedule_eh = false; 3946 3971 u8 status; 3947 3972 int ret; 3948 3973 bool reenable_intr = false; ··· 4011 4038 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); 4012 4039 if (ret) { 4013 4040 ufshcd_set_link_broken(hba); 4014 - schedule_eh = true; 4041 + ufshcd_schedule_eh_work(hba); 4015 4042 } 4016 - 4017 4043 out_unlock: 4018 4044 spin_unlock_irqrestore(hba->host->host_lock, flags); 4019 - 4020 - if (schedule_eh) 4021 - ufshcd_schedule_eh(hba); 4022 4045 mutex_unlock(&hba->uic_cmd_mutex); 4023 4046 4024 4047 return ret; ··· 4745 4776 mutex_lock(&hba->dev_cmd.lock); 4746 4777 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { 4747 4778 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, 4748 - NOP_OUT_TIMEOUT); 4779 + hba->nop_out_timeout); 4749 4780 4750 4781 if (!err || err == -ETIMEDOUT) 4751 4782 break; ··· 5880 5911 return err_handling; 5881 5912 } 5882 5913 5914 + /* host lock must be held before calling this func */ 5915 + static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba) 5916 + { 5917 + return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) || 5918 + (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)); 5919 + } 5920 + 5921 + /* host lock must be held before calling this func */ 5922 + static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba) 5923 + { 5924 + /* handle fatal errors only when link is not in error state */ 5925 + if (hba->ufshcd_state != UFSHCD_STATE_ERROR) { 5926 + if (hba->force_reset || ufshcd_is_link_broken(hba) || 5927 + ufshcd_is_saved_err_fatal(hba)) 5928 + hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL; 5929 + else 5930 + hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL; 5931 + queue_work(hba->eh_wq, &hba->eh_work); 5932 + } 5933 + } 5934 + 5883 5935 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow) 5884 5936 { 5885 5937 down_write(&hba->clk_scaling_lock); ··· 6034 6044 6035 6045 /** 6036 6046 * ufshcd_err_handler - handle UFS errors that require s/w attention 6037 - * @host: SCSI host pointer 6047 + * @work: pointer to work structure 6038 6048 */ 6039 - static void ufshcd_err_handler(struct Scsi_Host *host) 6049 + static void ufshcd_err_handler(struct work_struct *work) 6040 6050 { 6041 - struct ufs_hba *hba = shost_priv(host); 6051 + struct ufs_hba *hba; 6042 6052 unsigned long flags; 6043 6053 bool err_xfer = false; 6044 6054 bool err_tm = false; ··· 6046 6056 int tag; 6047 6057 bool needs_reset = false, needs_restore = false; 6048 6058 6059 + hba = container_of(work, struct ufs_hba, eh_work); 6060 + 6049 6061 down(&hba->host_sem); 6050 6062 spin_lock_irqsave(hba->host->host_lock, flags); 6051 - hba->host->host_eh_scheduled = 0; 6052 6063 if (ufshcd_err_handling_should_stop(hba)) { 6053 6064 if (hba->ufshcd_state != UFSHCD_STATE_ERROR) 6054 6065 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; ··· 6362 6371 "host_regs: "); 6363 6372 ufshcd_print_pwr_info(hba); 6364 6373 } 6374 + ufshcd_schedule_eh_work(hba); 6365 6375 retval |= IRQ_HANDLED; 6366 6376 } 6367 6377 /* ··· 6374 6382 hba->errors = 0; 6375 6383 hba->uic_error = 0; 6376 6384 spin_unlock(hba->host->host_lock); 6377 - 6378 - if (queue_eh_work) 6379 - ufshcd_schedule_eh(hba); 6380 - 6381 6385 return retval; 6382 6386 } 6383 6387 ··· 6864 6876 err = ufshcd_clear_cmd(hba, pos); 6865 6877 if (err) 6866 6878 break; 6867 - __ufshcd_transfer_req_compl(hba, pos, /*retry_requests=*/true); 6879 + __ufshcd_transfer_req_compl(hba, 1U << pos, false); 6868 6880 } 6869 6881 } 6870 6882 ··· 7036 7048 * will be to send LU reset which, again, is a spec violation. 7037 7049 * To avoid these unnecessary/illegal steps, first we clean up 7038 7050 * the lrb taken by this cmd and re-set it in outstanding_reqs, 7039 - * then queue the error handler and bail. 7051 + * then queue the eh_work and bail. 7040 7052 */ 7041 7053 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) { 7042 7054 ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun); 7043 7055 7044 7056 spin_lock_irqsave(host->host_lock, flags); 7045 7057 hba->force_reset = true; 7058 + ufshcd_schedule_eh_work(hba); 7046 7059 spin_unlock_irqrestore(host->host_lock, flags); 7047 - 7048 - ufshcd_schedule_eh(hba); 7049 - 7050 7060 goto release; 7051 7061 } 7052 7062 ··· 7177 7191 7178 7192 spin_lock_irqsave(hba->host->host_lock, flags); 7179 7193 hba->force_reset = true; 7194 + ufshcd_schedule_eh_work(hba); 7180 7195 dev_err(hba->dev, "%s: reset in progress - 1\n", __func__); 7181 7196 spin_unlock_irqrestore(hba->host->host_lock, flags); 7182 7197 7183 - ufshcd_err_handler(hba->host); 7198 + flush_work(&hba->eh_work); 7184 7199 7185 7200 spin_lock_irqsave(hba->host->host_lock, flags); 7186 7201 if (hba->ufshcd_state == UFSHCD_STATE_ERROR) ··· 8591 8604 if (hba->is_powered) { 8592 8605 ufshcd_exit_clk_scaling(hba); 8593 8606 ufshcd_exit_clk_gating(hba); 8607 + if (hba->eh_wq) 8608 + destroy_workqueue(hba->eh_wq); 8594 8609 ufs_debugfs_hba_exit(hba); 8595 8610 ufshcd_variant_hba_exit(hba); 8596 8611 ufshcd_setup_vreg(hba, false); ··· 9437 9448 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); 9438 9449 } 9439 9450 9440 - static struct scsi_transport_template ufshcd_transport_template = { 9441 - .eh_strategy_handler = ufshcd_err_handler, 9442 - }; 9443 - 9444 9451 /** 9445 9452 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA) 9446 9453 * @dev: pointer to device handle ··· 9463 9478 err = -ENOMEM; 9464 9479 goto out_error; 9465 9480 } 9466 - host->transportt = &ufshcd_transport_template; 9467 9481 hba = shost_priv(host); 9468 9482 hba->host = host; 9469 9483 hba->dev = dev; 9470 9484 hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL; 9485 + hba->nop_out_timeout = NOP_OUT_TIMEOUT; 9471 9486 INIT_LIST_HEAD(&hba->clk_list_head); 9472 9487 spin_lock_init(&hba->outstanding_lock); 9473 9488 ··· 9502 9517 int err; 9503 9518 struct Scsi_Host *host = hba->host; 9504 9519 struct device *dev = hba->dev; 9520 + char eh_wq_name[sizeof("ufs_eh_wq_00")]; 9505 9521 9506 9522 if (!mmio_base) { 9507 9523 dev_err(hba->dev, ··· 9556 9570 9557 9571 hba->max_pwr_info.is_valid = false; 9558 9572 9573 + /* Initialize work queues */ 9574 + snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d", 9575 + hba->host->host_no); 9576 + hba->eh_wq = create_singlethread_workqueue(eh_wq_name); 9577 + if (!hba->eh_wq) { 9578 + dev_err(hba->dev, "%s: failed to create eh workqueue\n", 9579 + __func__); 9580 + err = -ENOMEM; 9581 + goto out_disable; 9582 + } 9583 + INIT_WORK(&hba->eh_work, ufshcd_err_handler); 9559 9584 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); 9560 9585 9561 9586 sema_init(&hba->host_sem, 1);
+5
drivers/scsi/ufs/ufshcd.h
··· 741 741 * @is_powered: flag to check if HBA is powered 742 742 * @shutting_down: flag to check if shutdown has been invoked 743 743 * @host_sem: semaphore used to serialize concurrent contexts 744 + * @eh_wq: Workqueue that eh_work works on 745 + * @eh_work: Worker to handle UFS errors that require s/w attention 744 746 * @eeh_work: Worker to handle exception events 745 747 * @errors: HBA errors 746 748 * @uic_error: UFS interconnect layer error status ··· 845 843 struct semaphore host_sem; 846 844 847 845 /* Work Queues */ 846 + struct workqueue_struct *eh_wq; 847 + struct work_struct eh_work; 848 848 struct work_struct eeh_work; 849 849 850 850 /* HBA Errors */ ··· 862 858 /* Device management request data */ 863 859 struct ufs_dev_cmd dev_cmd; 864 860 ktime_t last_dme_cmd_tstamp; 861 + int nop_out_timeout; 865 862 866 863 /* Keeps information of the UFS device connected to this host */ 867 864 struct ufs_dev_info dev_info;
+3 -5
drivers/scsi/ufs/ufshpb.c
··· 333 333 } 334 334 335 335 static void 336 - ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshpb_lu *hpb, 337 - struct ufshcd_lrb *lrbp, u32 lpn, __be64 ppn, 338 - u8 transfer_len, int read_id) 336 + ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, 337 + __be64 ppn, u8 transfer_len, int read_id) 339 338 { 340 339 unsigned char *cdb = lrbp->cmd->cmnd; 341 340 __be64 ppn_tmp = ppn; ··· 702 703 } 703 704 } 704 705 705 - ufshpb_set_hpb_read_to_upiu(hba, hpb, lrbp, lpn, ppn, transfer_len, 706 - read_id); 706 + ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len, read_id); 707 707 708 708 hpb->stats.hit_cnt++; 709 709 return 0;
+2 -2
drivers/scsi/virtio_scsi.c
··· 300 300 } 301 301 break; 302 302 default: 303 - pr_info("Unsupport virtio scsi event reason %x\n", event->reason); 303 + pr_info("Unsupported virtio scsi event reason %x\n", event->reason); 304 304 } 305 305 } 306 306 ··· 392 392 virtscsi_handle_param_change(vscsi, event); 393 393 break; 394 394 default: 395 - pr_err("Unsupport virtio scsi event %x\n", event->event); 395 + pr_err("Unsupported virtio scsi event %x\n", event->event); 396 396 } 397 397 virtscsi_kick_event(vscsi, event_node); 398 398 }
+1 -1
drivers/soc/qcom/mdt_loader.c
··· 98 98 if (ehdr->e_phnum < 2) 99 99 return ERR_PTR(-EINVAL); 100 100 101 - if (phdrs[0].p_type == PT_LOAD || phdrs[1].p_type == PT_LOAD) 101 + if (phdrs[0].p_type == PT_LOAD) 102 102 return ERR_PTR(-EINVAL); 103 103 104 104 if ((phdrs[1].p_flags & QCOM_MDT_TYPE_MASK) != QCOM_MDT_TYPE_HASH)
+1 -1
drivers/soc/qcom/socinfo.c
··· 628 628 /* Feed the soc specific unique data into entropy pool */ 629 629 add_device_randomness(info, item_size); 630 630 631 - platform_set_drvdata(pdev, qs->soc_dev); 631 + platform_set_drvdata(pdev, qs); 632 632 633 633 return 0; 634 634 }
+16 -13
drivers/soc/ti/omap_prm.c
··· 825 825 writel_relaxed(v, reset->prm->base + reset->prm->data->rstctrl); 826 826 spin_unlock_irqrestore(&reset->lock, flags); 827 827 828 - if (!has_rstst) 829 - goto exit; 830 - 831 - /* wait for the status to be set */ 828 + /* wait for the reset bit to clear */ 832 829 ret = readl_relaxed_poll_timeout_atomic(reset->prm->base + 833 - reset->prm->data->rstst, 834 - v, v & BIT(st_bit), 1, 835 - OMAP_RESET_MAX_WAIT); 830 + reset->prm->data->rstctrl, 831 + v, !(v & BIT(id)), 1, 832 + OMAP_RESET_MAX_WAIT); 836 833 if (ret) 837 834 pr_err("%s: timedout waiting for %s:%lu\n", __func__, 838 835 reset->prm->data->name, id); 839 836 840 - exit: 841 - if (reset->clkdm) { 842 - /* At least dra7 iva needs a delay before clkdm idle */ 843 - if (has_rstst) 844 - udelay(1); 845 - pdata->clkdm_allow_idle(reset->clkdm); 837 + /* wait for the status to be set */ 838 + if (has_rstst) { 839 + ret = readl_relaxed_poll_timeout_atomic(reset->prm->base + 840 + reset->prm->data->rstst, 841 + v, v & BIT(st_bit), 1, 842 + OMAP_RESET_MAX_WAIT); 843 + if (ret) 844 + pr_err("%s: timedout waiting for %s:%lu\n", __func__, 845 + reset->prm->data->name, id); 846 846 } 847 + 848 + if (reset->clkdm) 849 + pdata->clkdm_allow_idle(reset->clkdm); 847 850 848 851 return ret; 849 852 }
-8
drivers/spi/spi.c
··· 58 58 const struct spi_device *spi = to_spi_device(dev); 59 59 int len; 60 60 61 - len = of_device_modalias(dev, buf, PAGE_SIZE); 62 - if (len != -ENODEV) 63 - return len; 64 - 65 61 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); 66 62 if (len != -ENODEV) 67 63 return len; ··· 362 366 { 363 367 const struct spi_device *spi = to_spi_device(dev); 364 368 int rc; 365 - 366 - rc = of_device_uevent_modalias(dev, env); 367 - if (rc != -ENODEV) 368 - return rc; 369 369 370 370 rc = acpi_device_uevent_modalias(dev, env); 371 371 if (rc != -ENODEV)
+32 -30
drivers/staging/greybus/uart.c
··· 761 761 gbphy_runtime_put_autosuspend(gb_tty->gbphy_dev); 762 762 } 763 763 764 + static void gb_tty_port_destruct(struct tty_port *port) 765 + { 766 + struct gb_tty *gb_tty = container_of(port, struct gb_tty, port); 767 + 768 + if (gb_tty->minor != GB_NUM_MINORS) 769 + release_minor(gb_tty); 770 + kfifo_free(&gb_tty->write_fifo); 771 + kfree(gb_tty->buffer); 772 + kfree(gb_tty); 773 + } 774 + 764 775 static const struct tty_operations gb_ops = { 765 776 .install = gb_tty_install, 766 777 .open = gb_tty_open, ··· 797 786 .dtr_rts = gb_tty_dtr_rts, 798 787 .activate = gb_tty_port_activate, 799 788 .shutdown = gb_tty_port_shutdown, 789 + .destruct = gb_tty_port_destruct, 800 790 }; 801 791 802 792 static int gb_uart_probe(struct gbphy_device *gbphy_dev, ··· 810 798 int retval; 811 799 int minor; 812 800 813 - gb_tty = kzalloc(sizeof(*gb_tty), GFP_KERNEL); 814 - if (!gb_tty) 815 - return -ENOMEM; 816 - 817 801 connection = gb_connection_create(gbphy_dev->bundle, 818 802 le16_to_cpu(gbphy_dev->cport_desc->id), 819 803 gb_uart_request_handler); 820 - if (IS_ERR(connection)) { 821 - retval = PTR_ERR(connection); 822 - goto exit_tty_free; 823 - } 804 + if (IS_ERR(connection)) 805 + return PTR_ERR(connection); 824 806 825 807 max_payload = gb_operation_get_payload_size_max(connection); 826 808 if (max_payload < sizeof(struct gb_uart_send_data_request)) { ··· 822 816 goto exit_connection_destroy; 823 817 } 824 818 819 + gb_tty = kzalloc(sizeof(*gb_tty), GFP_KERNEL); 820 + if (!gb_tty) { 821 + retval = -ENOMEM; 822 + goto exit_connection_destroy; 823 + } 824 + 825 + tty_port_init(&gb_tty->port); 826 + gb_tty->port.ops = &gb_port_ops; 827 + gb_tty->minor = GB_NUM_MINORS; 828 + 825 829 gb_tty->buffer_payload_max = max_payload - 826 830 sizeof(struct gb_uart_send_data_request); 827 831 828 832 gb_tty->buffer = kzalloc(gb_tty->buffer_payload_max, GFP_KERNEL); 829 833 if (!gb_tty->buffer) { 830 834 retval = -ENOMEM; 831 - goto exit_connection_destroy; 835 + goto exit_put_port; 832 836 } 833 837 834 838 INIT_WORK(&gb_tty->tx_work, gb_uart_tx_write_work); ··· 846 830 retval = kfifo_alloc(&gb_tty->write_fifo, GB_UART_WRITE_FIFO_SIZE, 847 831 GFP_KERNEL); 848 832 if (retval) 849 - goto exit_buf_free; 833 + goto exit_put_port; 850 834 851 835 gb_tty->credits = GB_UART_FIRMWARE_CREDITS; 852 836 init_completion(&gb_tty->credits_complete); ··· 860 844 } else { 861 845 retval = minor; 862 846 } 863 - goto exit_kfifo_free; 847 + goto exit_put_port; 864 848 } 865 849 866 850 gb_tty->minor = minor; ··· 869 853 init_waitqueue_head(&gb_tty->wioctl); 870 854 mutex_init(&gb_tty->mutex); 871 855 872 - tty_port_init(&gb_tty->port); 873 - gb_tty->port.ops = &gb_port_ops; 874 - 875 856 gb_tty->connection = connection; 876 857 gb_tty->gbphy_dev = gbphy_dev; 877 858 gb_connection_set_data(connection, gb_tty); ··· 876 863 877 864 retval = gb_connection_enable_tx(connection); 878 865 if (retval) 879 - goto exit_release_minor; 866 + goto exit_put_port; 880 867 881 868 send_control(gb_tty, gb_tty->ctrlout); 882 869 ··· 903 890 904 891 exit_connection_disable: 905 892 gb_connection_disable(connection); 906 - exit_release_minor: 907 - release_minor(gb_tty); 908 - exit_kfifo_free: 909 - kfifo_free(&gb_tty->write_fifo); 910 - exit_buf_free: 911 - kfree(gb_tty->buffer); 893 + exit_put_port: 894 + tty_port_put(&gb_tty->port); 912 895 exit_connection_destroy: 913 896 gb_connection_destroy(connection); 914 - exit_tty_free: 915 - kfree(gb_tty); 916 897 917 898 return retval; 918 899 } ··· 937 930 gb_connection_disable_rx(connection); 938 931 tty_unregister_device(gb_tty_driver, gb_tty->minor); 939 932 940 - /* FIXME - free transmit / receive buffers */ 941 - 942 933 gb_connection_disable(connection); 943 - tty_port_destroy(&gb_tty->port); 944 934 gb_connection_destroy(connection); 945 - release_minor(gb_tty); 946 - kfifo_free(&gb_tty->write_fifo); 947 - kfree(gb_tty->buffer); 948 - kfree(gb_tty); 935 + 936 + tty_port_put(&gb_tty->port); 949 937 } 950 938 951 939 static int gb_tty_init(void)
+2
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c
··· 1646 1646 default: 1647 1647 return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED; 1648 1648 } 1649 + 1650 + return INPUT_SYSTEM_ERR_NO_ERROR; 1649 1651 } 1650 1652 1651 1653 // Test flags and set structure.
+1 -1
drivers/staging/media/hantro/hantro_drv.c
··· 919 919 if (!vpu->variant->irqs[i].handler) 920 920 continue; 921 921 922 - if (vpu->variant->num_clocks > 1) { 922 + if (vpu->variant->num_irqs > 1) { 923 923 irq_name = vpu->variant->irqs[i].name; 924 924 irq = platform_get_irq_byname(vpu->pdev, irq_name); 925 925 } else {
+1 -1
drivers/staging/media/sunxi/cedrus/cedrus_video.c
··· 135 135 sizeimage = bytesperline * height; 136 136 137 137 /* Chroma plane size. */ 138 - sizeimage += bytesperline * height / 2; 138 + sizeimage += bytesperline * ALIGN(height, 64) / 2; 139 139 140 140 break; 141 141
+4 -4
drivers/staging/r8188eu/os_dep/ioctl_linux.c
··· 5372 5372 5373 5373 pnext++; 5374 5374 if (*pnext != '\0') { 5375 - strtout = simple_strtoul(pnext, &ptmp, 16); 5376 - sprintf(extra, "%s %d", extra, strtout); 5375 + strtout = simple_strtoul(pnext, &ptmp, 16); 5376 + sprintf(extra + strlen(extra), " %d", strtout); 5377 5377 } else { 5378 5378 break; 5379 5379 } ··· 5405 5405 pnext++; 5406 5406 if (*pnext != '\0') { 5407 5407 strtout = simple_strtoul(pnext, &ptmp, 16); 5408 - sprintf(extra, "%s %d", extra, strtout); 5408 + sprintf(extra + strlen(extra), " %d", strtout); 5409 5409 } else { 5410 5410 break; 5411 5411 } ··· 5512 5512 pnext++; 5513 5513 if (*pnext != '\0') { 5514 5514 strtou = simple_strtoul(pnext, &ptmp, 16); 5515 - sprintf(extra, "%s %d", extra, strtou); 5515 + sprintf(extra + strlen(extra), " %d", strtou); 5516 5516 } else { 5517 5517 break; 5518 5518 }
+20 -12
drivers/target/target_core_configfs.c
··· 1110 1110 { 1111 1111 struct se_dev_attrib *da = to_attrib(item); 1112 1112 struct se_device *dev = da->da_dev; 1113 - bool flag; 1113 + bool flag, oldflag; 1114 1114 int ret; 1115 + 1116 + ret = strtobool(page, &flag); 1117 + if (ret < 0) 1118 + return ret; 1119 + 1120 + oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA); 1121 + if (flag == oldflag) 1122 + return count; 1115 1123 1116 1124 if (!(dev->transport->transport_flags_changeable & 1117 1125 TRANSPORT_FLAG_PASSTHROUGH_ALUA)) { 1118 1126 pr_err("dev[%p]: Unable to change SE Device alua_support:" 1119 1127 " alua_support has fixed value\n", dev); 1120 - return -EINVAL; 1128 + return -ENOSYS; 1121 1129 } 1122 - 1123 - ret = strtobool(page, &flag); 1124 - if (ret < 0) 1125 - return ret; 1126 1130 1127 1131 if (flag) 1128 1132 dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA; ··· 1149 1145 { 1150 1146 struct se_dev_attrib *da = to_attrib(item); 1151 1147 struct se_device *dev = da->da_dev; 1152 - bool flag; 1148 + bool flag, oldflag; 1153 1149 int ret; 1150 + 1151 + ret = strtobool(page, &flag); 1152 + if (ret < 0) 1153 + return ret; 1154 + 1155 + oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR); 1156 + if (flag == oldflag) 1157 + return count; 1154 1158 1155 1159 if (!(dev->transport->transport_flags_changeable & 1156 1160 TRANSPORT_FLAG_PASSTHROUGH_PGR)) { 1157 1161 pr_err("dev[%p]: Unable to change SE Device pgr_support:" 1158 1162 " pgr_support has fixed value\n", dev); 1159 - return -EINVAL; 1163 + return -ENOSYS; 1160 1164 } 1161 - 1162 - ret = strtobool(page, &flag); 1163 - if (ret < 0) 1164 - return ret; 1165 1165 1166 1166 if (flag) 1167 1167 dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_PGR;
+1 -1
drivers/target/target_core_pr.c
··· 269 269 spin_lock(&dev->dev_reservation_lock); 270 270 if (dev->reservation_holder && 271 271 dev->reservation_holder->se_node_acl != sess->se_node_acl) { 272 - pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n", 272 + pr_err("SCSI-2 RESERVATION CONFLICT for %s fabric\n", 273 273 tpg->se_tpg_tfo->fabric_name); 274 274 pr_err("Original reserver LUN: %llu %s\n", 275 275 cmd->se_lun->unpacked_lun,
+1 -1
drivers/tee/optee/shm_pool.c
··· 35 35 unsigned int nr_pages = 1 << order, i; 36 36 struct page **pages; 37 37 38 - pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL); 38 + pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL); 39 39 if (!pages) { 40 40 rc = -ENOMEM; 41 41 goto err;
+3 -2
drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
··· 107 107 return 0; 108 108 } 109 109 110 - static unsigned int tcc_offset_save; 110 + static int tcc_offset_save = -1; 111 111 112 112 static ssize_t tcc_offset_degree_celsius_store(struct device *dev, 113 113 struct device_attribute *attr, const char *buf, ··· 352 352 proc_dev = dev_get_drvdata(dev); 353 353 proc_thermal_read_ppcc(proc_dev); 354 354 355 - tcc_offset_update(tcc_offset_save); 355 + if (tcc_offset_save >= 0) 356 + tcc_offset_update(tcc_offset_save); 356 357 357 358 return 0; 358 359 }
+2 -2
drivers/thermal/qcom/tsens.c
··· 417 417 const struct tsens_sensor *s = &priv->sensor[i]; 418 418 u32 hw_id = s->hw_id; 419 419 420 - if (IS_ERR(s->tzd)) 420 + if (!s->tzd) 421 421 continue; 422 422 if (!tsens_threshold_violated(priv, hw_id, &d)) 423 423 continue; ··· 467 467 const struct tsens_sensor *s = &priv->sensor[i]; 468 468 u32 hw_id = s->hw_id; 469 469 470 - if (IS_ERR(s->tzd)) 470 + if (!s->tzd) 471 471 continue; 472 472 if (!tsens_threshold_violated(priv, hw_id, &d)) 473 473 continue;
+3 -4
drivers/thermal/thermal_core.c
··· 222 222 { 223 223 struct thermal_governor *pos; 224 224 ssize_t count = 0; 225 - ssize_t size = PAGE_SIZE; 226 225 227 226 mutex_lock(&thermal_governor_lock); 228 227 229 228 list_for_each_entry(pos, &thermal_governor_list, governor_list) { 230 - size = PAGE_SIZE - count; 231 - count += scnprintf(buf + count, size, "%s ", pos->name); 229 + count += scnprintf(buf + count, PAGE_SIZE - count, "%s ", 230 + pos->name); 232 231 } 233 - count += scnprintf(buf + count, size, "\n"); 232 + count += scnprintf(buf + count, PAGE_SIZE - count, "\n"); 234 233 235 234 mutex_unlock(&thermal_governor_lock); 236 235
+1 -1
drivers/tty/serial/8250/8250_omap.c
··· 106 106 #define UART_OMAP_EFR2_TIMEOUT_BEHAVE BIT(6) 107 107 108 108 /* RX FIFO occupancy indicator */ 109 - #define UART_OMAP_RX_LVL 0x64 109 + #define UART_OMAP_RX_LVL 0x19 110 110 111 111 struct omap8250_priv { 112 112 int line;
+1 -1
drivers/tty/serial/mvebu-uart.c
··· 163 163 st = readl(port->membase + UART_STAT); 164 164 spin_unlock_irqrestore(&port->lock, flags); 165 165 166 - return (st & STAT_TX_FIFO_EMP) ? TIOCSER_TEMT : 0; 166 + return (st & STAT_TX_EMP) ? TIOCSER_TEMT : 0; 167 167 } 168 168 169 169 static unsigned int mvebu_uart_get_mctrl(struct uart_port *port)
-1
drivers/tty/tty_ldisc.c
··· 812 812 813 813 tty_ldisc_debug(tty, "released\n"); 814 814 } 815 - EXPORT_SYMBOL_GPL(tty_ldisc_release); 816 815 817 816 /** 818 817 * tty_ldisc_init - ldisc setup for new tty
+14
drivers/usb/cdns3/cdns3-gadget.c
··· 1100 1100 return 0; 1101 1101 } 1102 1102 1103 + static void cdns3_rearm_drdy_if_needed(struct cdns3_endpoint *priv_ep) 1104 + { 1105 + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; 1106 + 1107 + if (priv_dev->dev_ver < DEV_VER_V3) 1108 + return; 1109 + 1110 + if (readl(&priv_dev->regs->ep_sts) & EP_STS_TRBERR) { 1111 + writel(EP_STS_TRBERR, &priv_dev->regs->ep_sts); 1112 + writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1113 + } 1114 + } 1115 + 1103 1116 /** 1104 1117 * cdns3_ep_run_transfer - start transfer on no-default endpoint hardware 1105 1118 * @priv_ep: endpoint object ··· 1364 1351 /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/ 1365 1352 writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts); 1366 1353 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); 1354 + cdns3_rearm_drdy_if_needed(priv_ep); 1367 1355 trace_cdns3_doorbell_epx(priv_ep->name, 1368 1356 readl(&priv_dev->regs->ep_traddr)); 1369 1357 }
+5 -2
drivers/usb/class/cdc-acm.c
··· 726 726 { 727 727 struct acm *acm = container_of(port, struct acm, port); 728 728 729 - acm_release_minor(acm); 729 + if (acm->minor != ACM_MINOR_INVALID) 730 + acm_release_minor(acm); 730 731 usb_put_intf(acm->control); 731 732 kfree(acm->country_codes); 732 733 kfree(acm); ··· 1324 1323 usb_get_intf(acm->control); /* undone in destruct() */ 1325 1324 1326 1325 minor = acm_alloc_minor(acm); 1327 - if (minor < 0) 1326 + if (minor < 0) { 1327 + acm->minor = ACM_MINOR_INVALID; 1328 1328 goto err_put_port; 1329 + } 1329 1330 1330 1331 acm->minor = minor; 1331 1332 acm->dev = usb_dev;
+2
drivers/usb/class/cdc-acm.h
··· 22 22 #define ACM_TTY_MAJOR 166 23 23 #define ACM_TTY_MINORS 256 24 24 25 + #define ACM_MINOR_INVALID ACM_TTY_MINORS 26 + 25 27 /* 26 28 * Requests. 27 29 */
+46 -24
drivers/usb/core/hcd.c
··· 2761 2761 } 2762 2762 2763 2763 /** 2764 + * usb_stop_hcd - Halt the HCD 2765 + * @hcd: the usb_hcd that has to be halted 2766 + * 2767 + * Stop the root-hub polling timer and invoke the HCD's ->stop callback. 2768 + */ 2769 + static void usb_stop_hcd(struct usb_hcd *hcd) 2770 + { 2771 + hcd->rh_pollable = 0; 2772 + clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 2773 + del_timer_sync(&hcd->rh_timer); 2774 + 2775 + hcd->driver->stop(hcd); 2776 + hcd->state = HC_STATE_HALT; 2777 + 2778 + /* In case the HCD restarted the timer, stop it again. */ 2779 + clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 2780 + del_timer_sync(&hcd->rh_timer); 2781 + } 2782 + 2783 + /** 2764 2784 * usb_add_hcd - finish generic HCD structure initialization and register 2765 2785 * @hcd: the usb_hcd structure to initialize 2766 2786 * @irqnum: Interrupt line to allocate ··· 2795 2775 { 2796 2776 int retval; 2797 2777 struct usb_device *rhdev; 2778 + struct usb_hcd *shared_hcd; 2798 2779 2799 2780 if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) { 2800 2781 hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev); ··· 2956 2935 goto err_hcd_driver_start; 2957 2936 } 2958 2937 2959 - /* starting here, usbcore will pay attention to this root hub */ 2960 - retval = register_root_hub(hcd); 2961 - if (retval != 0) 2962 - goto err_register_root_hub; 2938 + /* starting here, usbcore will pay attention to the shared HCD roothub */ 2939 + shared_hcd = hcd->shared_hcd; 2940 + if (!usb_hcd_is_primary_hcd(hcd) && shared_hcd && HCD_DEFER_RH_REGISTER(shared_hcd)) { 2941 + retval = register_root_hub(shared_hcd); 2942 + if (retval != 0) 2943 + goto err_register_root_hub; 2963 2944 2964 - if (hcd->uses_new_polling && HCD_POLL_RH(hcd)) 2965 - usb_hcd_poll_rh_status(hcd); 2945 + if (shared_hcd->uses_new_polling && HCD_POLL_RH(shared_hcd)) 2946 + usb_hcd_poll_rh_status(shared_hcd); 2947 + } 2948 + 2949 + /* starting here, usbcore will pay attention to this root hub */ 2950 + if (!HCD_DEFER_RH_REGISTER(hcd)) { 2951 + retval = register_root_hub(hcd); 2952 + if (retval != 0) 2953 + goto err_register_root_hub; 2954 + 2955 + if (hcd->uses_new_polling && HCD_POLL_RH(hcd)) 2956 + usb_hcd_poll_rh_status(hcd); 2957 + } 2966 2958 2967 2959 return retval; 2968 2960 2969 2961 err_register_root_hub: 2970 - hcd->rh_pollable = 0; 2971 - clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 2972 - del_timer_sync(&hcd->rh_timer); 2973 - hcd->driver->stop(hcd); 2974 - hcd->state = HC_STATE_HALT; 2975 - clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 2976 - del_timer_sync(&hcd->rh_timer); 2962 + usb_stop_hcd(hcd); 2977 2963 err_hcd_driver_start: 2978 2964 if (usb_hcd_is_primary_hcd(hcd) && hcd->irq > 0) 2979 2965 free_irq(irqnum, hcd); ··· 3013 2985 void usb_remove_hcd(struct usb_hcd *hcd) 3014 2986 { 3015 2987 struct usb_device *rhdev = hcd->self.root_hub; 2988 + bool rh_registered; 3016 2989 3017 2990 dev_info(hcd->self.controller, "remove, state %x\n", hcd->state); 3018 2991 ··· 3024 2995 3025 2996 dev_dbg(hcd->self.controller, "roothub graceful disconnect\n"); 3026 2997 spin_lock_irq (&hcd_root_hub_lock); 2998 + rh_registered = hcd->rh_registered; 3027 2999 hcd->rh_registered = 0; 3028 3000 spin_unlock_irq (&hcd_root_hub_lock); 3029 3001 ··· 3034 3004 cancel_work_sync(&hcd->died_work); 3035 3005 3036 3006 mutex_lock(&usb_bus_idr_lock); 3037 - usb_disconnect(&rhdev); /* Sets rhdev to NULL */ 3007 + if (rh_registered) 3008 + usb_disconnect(&rhdev); /* Sets rhdev to NULL */ 3038 3009 mutex_unlock(&usb_bus_idr_lock); 3039 3010 3040 3011 /* ··· 3053 3022 * interrupt occurs), but usb_hcd_poll_rh_status() won't invoke 3054 3023 * the hub_status_data() callback. 3055 3024 */ 3056 - hcd->rh_pollable = 0; 3057 - clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 3058 - del_timer_sync(&hcd->rh_timer); 3059 - 3060 - hcd->driver->stop(hcd); 3061 - hcd->state = HC_STATE_HALT; 3062 - 3063 - /* In case the HCD restarted the timer, stop it again. */ 3064 - clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 3065 - del_timer_sync(&hcd->rh_timer); 3025 + usb_stop_hcd(hcd); 3066 3026 3067 3027 if (usb_hcd_is_primary_hcd(hcd)) { 3068 3028 if (hcd->irq > 0)
+108 -87
drivers/usb/dwc2/gadget.c
··· 115 115 */ 116 116 static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep) 117 117 { 118 + struct dwc2_hsotg *hsotg = hs_ep->parent; 119 + u16 limit = DSTS_SOFFN_LIMIT; 120 + 121 + if (hsotg->gadget.speed != USB_SPEED_HIGH) 122 + limit >>= 3; 123 + 118 124 hs_ep->target_frame += hs_ep->interval; 119 - if (hs_ep->target_frame > DSTS_SOFFN_LIMIT) { 125 + if (hs_ep->target_frame > limit) { 120 126 hs_ep->frame_overrun = true; 121 - hs_ep->target_frame &= DSTS_SOFFN_LIMIT; 127 + hs_ep->target_frame &= limit; 122 128 } else { 123 129 hs_ep->frame_overrun = false; 124 130 } ··· 142 136 */ 143 137 static inline void dwc2_gadget_dec_frame_num_by_one(struct dwc2_hsotg_ep *hs_ep) 144 138 { 139 + struct dwc2_hsotg *hsotg = hs_ep->parent; 140 + u16 limit = DSTS_SOFFN_LIMIT; 141 + 142 + if (hsotg->gadget.speed != USB_SPEED_HIGH) 143 + limit >>= 3; 144 + 145 145 if (hs_ep->target_frame) 146 146 hs_ep->target_frame -= 1; 147 147 else 148 - hs_ep->target_frame = DSTS_SOFFN_LIMIT; 148 + hs_ep->target_frame = limit; 149 149 } 150 150 151 151 /** ··· 1030 1018 dwc2_writel(hsotg, ctrl, depctl); 1031 1019 } 1032 1020 1021 + static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep); 1022 + static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg, 1023 + struct dwc2_hsotg_ep *hs_ep, 1024 + struct dwc2_hsotg_req *hs_req, 1025 + int result); 1026 + 1033 1027 /** 1034 1028 * dwc2_hsotg_start_req - start a USB request from an endpoint's queue 1035 1029 * @hsotg: The controller state. ··· 1188 1170 } 1189 1171 } 1190 1172 1191 - if (hs_ep->isochronous && hs_ep->interval == 1) { 1192 - hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg); 1193 - dwc2_gadget_incr_frame_num(hs_ep); 1194 - 1195 - if (hs_ep->target_frame & 0x1) 1196 - ctrl |= DXEPCTL_SETODDFR; 1197 - else 1198 - ctrl |= DXEPCTL_SETEVENFR; 1173 + if (hs_ep->isochronous) { 1174 + if (!dwc2_gadget_target_frame_elapsed(hs_ep)) { 1175 + if (hs_ep->interval == 1) { 1176 + if (hs_ep->target_frame & 0x1) 1177 + ctrl |= DXEPCTL_SETODDFR; 1178 + else 1179 + ctrl |= DXEPCTL_SETEVENFR; 1180 + } 1181 + ctrl |= DXEPCTL_CNAK; 1182 + } else { 1183 + dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA); 1184 + return; 1185 + } 1199 1186 } 1200 1187 1201 1188 ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */ ··· 1348 1325 u32 target_frame = hs_ep->target_frame; 1349 1326 u32 current_frame = hsotg->frame_number; 1350 1327 bool frame_overrun = hs_ep->frame_overrun; 1328 + u16 limit = DSTS_SOFFN_LIMIT; 1329 + 1330 + if (hsotg->gadget.speed != USB_SPEED_HIGH) 1331 + limit >>= 3; 1351 1332 1352 1333 if (!frame_overrun && current_frame >= target_frame) 1353 1334 return true; 1354 1335 1355 1336 if (frame_overrun && current_frame >= target_frame && 1356 - ((current_frame - target_frame) < DSTS_SOFFN_LIMIT / 2)) 1337 + ((current_frame - target_frame) < limit / 2)) 1357 1338 return true; 1358 1339 1359 1340 return false; ··· 1740 1713 */ 1741 1714 static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep) 1742 1715 { 1743 - u32 mask; 1744 1716 struct dwc2_hsotg *hsotg = hs_ep->parent; 1745 1717 int dir_in = hs_ep->dir_in; 1746 1718 struct dwc2_hsotg_req *hs_req; 1747 - u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK; 1748 1719 1749 1720 if (!list_empty(&hs_ep->queue)) { 1750 1721 hs_req = get_ep_head(hs_ep); ··· 1758 1733 } else { 1759 1734 dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n", 1760 1735 __func__); 1761 - mask = dwc2_readl(hsotg, epmsk_reg); 1762 - mask |= DOEPMSK_OUTTKNEPDISMSK; 1763 - dwc2_writel(hsotg, mask, epmsk_reg); 1764 1736 } 1765 1737 } 1766 1738 ··· 2328 2306 dwc2_hsotg_program_zlp(hsotg, hsotg->eps_out[0]); 2329 2307 } 2330 2308 2331 - static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg, 2332 - u32 epctl_reg) 2333 - { 2334 - u32 ctrl; 2335 - 2336 - ctrl = dwc2_readl(hsotg, epctl_reg); 2337 - if (ctrl & DXEPCTL_EOFRNUM) 2338 - ctrl |= DXEPCTL_SETEVENFR; 2339 - else 2340 - ctrl |= DXEPCTL_SETODDFR; 2341 - dwc2_writel(hsotg, ctrl, epctl_reg); 2342 - } 2343 - 2344 2309 /* 2345 2310 * dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc 2346 2311 * @hs_ep - The endpoint on which transfer went ··· 2448 2439 dwc2_hsotg_ep0_zlp(hsotg, true); 2449 2440 } 2450 2441 2451 - /* 2452 - * Slave mode OUT transfers do not go through XferComplete so 2453 - * adjust the ISOC parity here. 2454 - */ 2455 - if (!using_dma(hsotg)) { 2456 - if (hs_ep->isochronous && hs_ep->interval == 1) 2457 - dwc2_hsotg_change_ep_iso_parity(hsotg, DOEPCTL(epnum)); 2458 - else if (hs_ep->isochronous && hs_ep->interval > 1) 2459 - dwc2_gadget_incr_frame_num(hs_ep); 2460 - } 2461 - 2462 2442 /* Set actual frame number for completed transfers */ 2463 - if (!using_desc_dma(hsotg) && hs_ep->isochronous) 2464 - req->frame_number = hsotg->frame_number; 2443 + if (!using_desc_dma(hsotg) && hs_ep->isochronous) { 2444 + req->frame_number = hs_ep->target_frame; 2445 + dwc2_gadget_incr_frame_num(hs_ep); 2446 + } 2465 2447 2466 2448 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result); 2467 2449 } ··· 2766 2766 return; 2767 2767 } 2768 2768 2769 + /* Set actual frame number for completed transfers */ 2770 + if (!using_desc_dma(hsotg) && hs_ep->isochronous) { 2771 + hs_req->req.frame_number = hs_ep->target_frame; 2772 + dwc2_gadget_incr_frame_num(hs_ep); 2773 + } 2774 + 2769 2775 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0); 2770 2776 } 2771 2777 ··· 2832 2826 2833 2827 dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index); 2834 2828 2835 - if (hs_ep->isochronous) { 2836 - dwc2_hsotg_complete_in(hsotg, hs_ep); 2837 - return; 2838 - } 2839 - 2840 2829 if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) { 2841 2830 int dctl = dwc2_readl(hsotg, DCTL); 2842 2831 2843 2832 dctl |= DCTL_CGNPINNAK; 2844 2833 dwc2_writel(hsotg, dctl, DCTL); 2845 2834 } 2846 - return; 2847 - } 2835 + } else { 2848 2836 2849 - if (dctl & DCTL_GOUTNAKSTS) { 2850 - dctl |= DCTL_CGOUTNAK; 2851 - dwc2_writel(hsotg, dctl, DCTL); 2837 + if (dctl & DCTL_GOUTNAKSTS) { 2838 + dctl |= DCTL_CGOUTNAK; 2839 + dwc2_writel(hsotg, dctl, DCTL); 2840 + } 2852 2841 } 2853 2842 2854 2843 if (!hs_ep->isochronous) ··· 2864 2863 /* Update current frame number value. */ 2865 2864 hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg); 2866 2865 } while (dwc2_gadget_target_frame_elapsed(hs_ep)); 2867 - 2868 - dwc2_gadget_start_next_request(hs_ep); 2869 2866 } 2870 2867 2871 2868 /** ··· 2880 2881 static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep) 2881 2882 { 2882 2883 struct dwc2_hsotg *hsotg = ep->parent; 2884 + struct dwc2_hsotg_req *hs_req; 2883 2885 int dir_in = ep->dir_in; 2884 - u32 doepmsk; 2885 2886 2886 2887 if (dir_in || !ep->isochronous) 2887 2888 return; ··· 2895 2896 return; 2896 2897 } 2897 2898 2898 - if (ep->interval > 1 && 2899 - ep->target_frame == TARGET_FRAME_INITIAL) { 2899 + if (ep->target_frame == TARGET_FRAME_INITIAL) { 2900 2900 u32 ctrl; 2901 2901 2902 2902 ep->target_frame = hsotg->frame_number; 2903 - dwc2_gadget_incr_frame_num(ep); 2903 + if (ep->interval > 1) { 2904 + ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index)); 2905 + if (ep->target_frame & 0x1) 2906 + ctrl |= DXEPCTL_SETODDFR; 2907 + else 2908 + ctrl |= DXEPCTL_SETEVENFR; 2904 2909 2905 - ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index)); 2906 - if (ep->target_frame & 0x1) 2907 - ctrl |= DXEPCTL_SETODDFR; 2908 - else 2909 - ctrl |= DXEPCTL_SETEVENFR; 2910 - 2911 - dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index)); 2910 + dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index)); 2911 + } 2912 2912 } 2913 2913 2914 - dwc2_gadget_start_next_request(ep); 2915 - doepmsk = dwc2_readl(hsotg, DOEPMSK); 2916 - doepmsk &= ~DOEPMSK_OUTTKNEPDISMSK; 2917 - dwc2_writel(hsotg, doepmsk, DOEPMSK); 2914 + while (dwc2_gadget_target_frame_elapsed(ep)) { 2915 + hs_req = get_ep_head(ep); 2916 + if (hs_req) 2917 + dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA); 2918 + 2919 + dwc2_gadget_incr_frame_num(ep); 2920 + /* Update current frame number value. */ 2921 + hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg); 2922 + } 2923 + 2924 + if (!ep->req) 2925 + dwc2_gadget_start_next_request(ep); 2926 + 2918 2927 } 2928 + 2929 + static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg, 2930 + struct dwc2_hsotg_ep *hs_ep); 2919 2931 2920 2932 /** 2921 2933 * dwc2_gadget_handle_nak - handle NAK interrupt ··· 2945 2935 static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep) 2946 2936 { 2947 2937 struct dwc2_hsotg *hsotg = hs_ep->parent; 2938 + struct dwc2_hsotg_req *hs_req; 2948 2939 int dir_in = hs_ep->dir_in; 2940 + u32 ctrl; 2949 2941 2950 2942 if (!dir_in || !hs_ep->isochronous) 2951 2943 return; ··· 2989 2977 2990 2978 dwc2_writel(hsotg, ctrl, DIEPCTL(hs_ep->index)); 2991 2979 } 2992 - 2993 - dwc2_hsotg_complete_request(hsotg, hs_ep, 2994 - get_ep_head(hs_ep), 0); 2995 2980 } 2996 2981 2997 - if (!using_desc_dma(hsotg)) 2982 + if (using_desc_dma(hsotg)) 2983 + return; 2984 + 2985 + ctrl = dwc2_readl(hsotg, DIEPCTL(hs_ep->index)); 2986 + if (ctrl & DXEPCTL_EPENA) 2987 + dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep); 2988 + else 2989 + dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index); 2990 + 2991 + while (dwc2_gadget_target_frame_elapsed(hs_ep)) { 2992 + hs_req = get_ep_head(hs_ep); 2993 + if (hs_req) 2994 + dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA); 2995 + 2998 2996 dwc2_gadget_incr_frame_num(hs_ep); 2997 + /* Update current frame number value. */ 2998 + hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg); 2999 + } 3000 + 3001 + if (!hs_ep->req) 3002 + dwc2_gadget_start_next_request(hs_ep); 2999 3003 } 3000 3004 3001 3005 /** ··· 3067 3039 3068 3040 /* In DDMA handle isochronous requests separately */ 3069 3041 if (using_desc_dma(hsotg) && hs_ep->isochronous) { 3070 - /* XferCompl set along with BNA */ 3071 - if (!(ints & DXEPINT_BNAINTR)) 3072 - dwc2_gadget_complete_isoc_request_ddma(hs_ep); 3042 + dwc2_gadget_complete_isoc_request_ddma(hs_ep); 3073 3043 } else if (dir_in) { 3074 3044 /* 3075 3045 * We get OutDone from the FIFO, so we only 3076 3046 * need to look at completing IN requests here 3077 3047 * if operating slave mode 3078 3048 */ 3079 - if (hs_ep->isochronous && hs_ep->interval > 1) 3080 - dwc2_gadget_incr_frame_num(hs_ep); 3081 - 3082 - dwc2_hsotg_complete_in(hsotg, hs_ep); 3083 - if (ints & DXEPINT_NAKINTRPT) 3084 - ints &= ~DXEPINT_NAKINTRPT; 3049 + if (!hs_ep->isochronous || !(ints & DXEPINT_NAKINTRPT)) 3050 + dwc2_hsotg_complete_in(hsotg, hs_ep); 3085 3051 3086 3052 if (idx == 0 && !hs_ep->req) 3087 3053 dwc2_hsotg_enqueue_setup(hsotg); ··· 3084 3062 * We're using DMA, we need to fire an OutDone here 3085 3063 * as we ignore the RXFIFO. 3086 3064 */ 3087 - if (hs_ep->isochronous && hs_ep->interval > 1) 3088 - dwc2_gadget_incr_frame_num(hs_ep); 3089 - 3090 - dwc2_hsotg_handle_outdone(hsotg, idx); 3065 + if (!hs_ep->isochronous || !(ints & DXEPINT_OUTTKNEPDIS)) 3066 + dwc2_hsotg_handle_outdone(hsotg, idx); 3091 3067 } 3092 3068 } 3093 3069 ··· 4105 4085 mask |= DIEPMSK_NAKMSK; 4106 4086 dwc2_writel(hsotg, mask, DIEPMSK); 4107 4087 } else { 4088 + epctrl |= DXEPCTL_SNAK; 4108 4089 mask = dwc2_readl(hsotg, DOEPMSK); 4109 4090 mask |= DOEPMSK_OUTTKNEPDISMSK; 4110 4091 dwc2_writel(hsotg, mask, DOEPMSK);
+4
drivers/usb/dwc2/hcd.c
··· 5191 5191 hcd->has_tt = 1; 5192 5192 5193 5193 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 5194 + if (!res) { 5195 + retval = -EINVAL; 5196 + goto error1; 5197 + } 5194 5198 hcd->rsrc_start = res->start; 5195 5199 hcd->rsrc_len = resource_size(res); 5196 5200
+13 -17
drivers/usb/dwc3/core.c
··· 264 264 { 265 265 u32 reg; 266 266 int retries = 1000; 267 - int ret; 268 - 269 - usb_phy_init(dwc->usb2_phy); 270 - usb_phy_init(dwc->usb3_phy); 271 - ret = phy_init(dwc->usb2_generic_phy); 272 - if (ret < 0) 273 - return ret; 274 - 275 - ret = phy_init(dwc->usb3_generic_phy); 276 - if (ret < 0) { 277 - phy_exit(dwc->usb2_generic_phy); 278 - return ret; 279 - } 280 267 281 268 /* 282 269 * We're resetting only the device side because, if we're in host mode, ··· 296 309 else 297 310 udelay(1); 298 311 } while (--retries); 299 - 300 - phy_exit(dwc->usb3_generic_phy); 301 - phy_exit(dwc->usb2_generic_phy); 302 312 303 313 return -ETIMEDOUT; 304 314 ··· 966 982 dwc->phys_ready = true; 967 983 } 968 984 985 + usb_phy_init(dwc->usb2_phy); 986 + usb_phy_init(dwc->usb3_phy); 987 + ret = phy_init(dwc->usb2_generic_phy); 988 + if (ret < 0) 989 + goto err0a; 990 + 991 + ret = phy_init(dwc->usb3_generic_phy); 992 + if (ret < 0) { 993 + phy_exit(dwc->usb2_generic_phy); 994 + goto err0a; 995 + } 996 + 969 997 ret = dwc3_core_soft_reset(dwc); 970 998 if (ret) 971 - goto err0a; 999 + goto err1; 972 1000 973 1001 if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD && 974 1002 !DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) {
+18 -1
drivers/usb/gadget/function/f_uac2.c
··· 406 406 .bInterval = 4, 407 407 }; 408 408 409 + static struct usb_ss_ep_comp_descriptor ss_epin_fback_desc_comp = { 410 + .bLength = sizeof(ss_epin_fback_desc_comp), 411 + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, 412 + .bMaxBurst = 0, 413 + .bmAttributes = 0, 414 + .wBytesPerInterval = cpu_to_le16(4), 415 + }; 416 + 409 417 410 418 /* Audio Streaming IN Interface - Alt0 */ 411 419 static struct usb_interface_descriptor std_as_in_if0_desc = { ··· 605 597 (struct usb_descriptor_header *)&ss_epout_desc_comp, 606 598 (struct usb_descriptor_header *)&as_iso_out_desc, 607 599 (struct usb_descriptor_header *)&ss_epin_fback_desc, 600 + (struct usb_descriptor_header *)&ss_epin_fback_desc_comp, 608 601 609 602 (struct usb_descriptor_header *)&std_as_in_if0_desc, 610 603 (struct usb_descriptor_header *)&std_as_in_if1_desc, ··· 714 705 { 715 706 struct usb_ss_ep_comp_descriptor *epout_desc_comp = NULL; 716 707 struct usb_ss_ep_comp_descriptor *epin_desc_comp = NULL; 708 + struct usb_ss_ep_comp_descriptor *epin_fback_desc_comp = NULL; 717 709 struct usb_endpoint_descriptor *epout_desc; 718 710 struct usb_endpoint_descriptor *epin_desc; 719 711 struct usb_endpoint_descriptor *epin_fback_desc; ··· 740 730 epout_desc_comp = &ss_epout_desc_comp; 741 731 epin_desc_comp = &ss_epin_desc_comp; 742 732 epin_fback_desc = &ss_epin_fback_desc; 733 + epin_fback_desc_comp = &ss_epin_fback_desc_comp; 743 734 ep_int_desc = &ss_ep_int_desc; 744 735 } 745 736 ··· 784 773 785 774 headers[i++] = USBDHDR(&as_iso_out_desc); 786 775 787 - if (EPOUT_FBACK_IN_EN(opts)) 776 + if (EPOUT_FBACK_IN_EN(opts)) { 788 777 headers[i++] = USBDHDR(epin_fback_desc); 778 + if (epin_fback_desc_comp) 779 + headers[i++] = USBDHDR(epin_fback_desc_comp); 780 + } 789 781 } 790 782 791 783 if (EPIN_EN(opts)) { ··· 1177 1163 le16_to_cpu(ss_epin_desc.wMaxPacketSize)); 1178 1164 agdev->out_ep_maxpsize = max_t(u16, agdev->out_ep_maxpsize, 1179 1165 le16_to_cpu(ss_epout_desc.wMaxPacketSize)); 1166 + 1167 + ss_epin_desc_comp.wBytesPerInterval = ss_epin_desc.wMaxPacketSize; 1168 + ss_epout_desc_comp.wBytesPerInterval = ss_epout_desc.wMaxPacketSize; 1180 1169 1181 1170 // HS and SS endpoint addresses are copied from autoconfigured FS descriptors 1182 1171 hs_ep_int_desc.bEndpointAddress = fs_ep_int_desc.bEndpointAddress;
+10 -3
drivers/usb/gadget/function/u_audio.c
··· 96 96 }; 97 97 98 98 static void u_audio_set_fback_frequency(enum usb_device_speed speed, 99 + struct usb_ep *out_ep, 99 100 unsigned long long freq, 100 101 unsigned int pitch, 101 102 void *buf) 102 103 { 103 104 u32 ff = 0; 105 + const struct usb_endpoint_descriptor *ep_desc; 104 106 105 107 /* 106 108 * Because the pitch base is 1000000, the final divider here ··· 130 128 * byte fromat (that is Q16.16) 131 129 * 132 130 * ff = (freq << 16) / 8000 131 + * 132 + * Win10 and OSX UAC2 drivers require number of samples per packet 133 + * in order to honor the feedback value. 134 + * Linux snd-usb-audio detects the applied bit-shift automatically. 133 135 */ 134 - freq <<= 4; 136 + ep_desc = out_ep->desc; 137 + freq <<= 4 + (ep_desc->bInterval - 1); 135 138 } 136 139 137 140 ff = DIV_ROUND_CLOSEST_ULL((freq * pitch), 1953125); ··· 274 267 pr_debug("%s: iso_complete status(%d) %d/%d\n", 275 268 __func__, status, req->actual, req->length); 276 269 277 - u_audio_set_fback_frequency(audio_dev->gadget->speed, 270 + u_audio_set_fback_frequency(audio_dev->gadget->speed, audio_dev->out_ep, 278 271 params->c_srate, prm->pitch, 279 272 req->buf); 280 273 ··· 533 526 * be meauserd at start of playback 534 527 */ 535 528 prm->pitch = 1000000; 536 - u_audio_set_fback_frequency(audio_dev->gadget->speed, 529 + u_audio_set_fback_frequency(audio_dev->gadget->speed, ep, 537 530 params->c_srate, prm->pitch, 538 531 req_fback->buf); 539 532
+1 -1
drivers/usb/gadget/udc/r8a66597-udc.c
··· 1250 1250 do { 1251 1251 tmp = r8a66597_read(r8a66597, INTSTS0) & CTSQ; 1252 1252 udelay(1); 1253 - } while (tmp != CS_IDST || timeout-- > 0); 1253 + } while (tmp != CS_IDST && timeout-- > 0); 1254 1254 1255 1255 if (tmp == CS_IDST) 1256 1256 r8a66597_bset(r8a66597,
+1 -4
drivers/usb/host/bcma-hcd.c
··· 406 406 return -ENOMEM; 407 407 usb_dev->core = core; 408 408 409 - if (core->dev.of_node) { 409 + if (core->dev.of_node) 410 410 usb_dev->gpio_desc = devm_gpiod_get(&core->dev, "vcc", 411 411 GPIOD_OUT_HIGH); 412 - if (IS_ERR(usb_dev->gpio_desc)) 413 - return PTR_ERR(usb_dev->gpio_desc); 414 - } 415 412 416 413 switch (core->id.id) { 417 414 case BCMA_CORE_USB20_HOST:
+27 -48
drivers/usb/host/ehci-hcd.c
··· 26 26 #include <linux/moduleparam.h> 27 27 #include <linux/dma-mapping.h> 28 28 #include <linux/debugfs.h> 29 + #include <linux/platform_device.h> 29 30 #include <linux/slab.h> 30 31 31 32 #include <asm/byteorder.h> ··· 1279 1278 1280 1279 #ifdef CONFIG_USB_EHCI_SH 1281 1280 #include "ehci-sh.c" 1282 - #define PLATFORM_DRIVER ehci_hcd_sh_driver 1283 1281 #endif 1284 1282 1285 1283 #ifdef CONFIG_PPC_PS3 1286 1284 #include "ehci-ps3.c" 1287 - #define PS3_SYSTEM_BUS_DRIVER ps3_ehci_driver 1288 1285 #endif 1289 1286 1290 1287 #ifdef CONFIG_USB_EHCI_HCD_PPC_OF 1291 1288 #include "ehci-ppc-of.c" 1292 - #define OF_PLATFORM_DRIVER ehci_hcd_ppc_of_driver 1293 1289 #endif 1294 1290 1295 1291 #ifdef CONFIG_XPS_USB_HCD_XILINX 1296 1292 #include "ehci-xilinx-of.c" 1297 - #define XILINX_OF_PLATFORM_DRIVER ehci_hcd_xilinx_of_driver 1298 1293 #endif 1299 1294 1300 1295 #ifdef CONFIG_SPARC_LEON 1301 1296 #include "ehci-grlib.c" 1302 - #define PLATFORM_DRIVER ehci_grlib_driver 1303 1297 #endif 1298 + 1299 + static struct platform_driver * const platform_drivers[] = { 1300 + #ifdef CONFIG_USB_EHCI_SH 1301 + &ehci_hcd_sh_driver, 1302 + #endif 1303 + #ifdef CONFIG_USB_EHCI_HCD_PPC_OF 1304 + &ehci_hcd_ppc_of_driver, 1305 + #endif 1306 + #ifdef CONFIG_XPS_USB_HCD_XILINX 1307 + &ehci_hcd_xilinx_of_driver, 1308 + #endif 1309 + #ifdef CONFIG_SPARC_LEON 1310 + &ehci_grlib_driver, 1311 + #endif 1312 + }; 1304 1313 1305 1314 static int __init ehci_hcd_init(void) 1306 1315 { ··· 1335 1324 ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root); 1336 1325 #endif 1337 1326 1338 - #ifdef PLATFORM_DRIVER 1339 - retval = platform_driver_register(&PLATFORM_DRIVER); 1327 + retval = platform_register_drivers(platform_drivers, ARRAY_SIZE(platform_drivers)); 1340 1328 if (retval < 0) 1341 1329 goto clean0; 1342 - #endif 1343 1330 1344 - #ifdef PS3_SYSTEM_BUS_DRIVER 1345 - retval = ps3_ehci_driver_register(&PS3_SYSTEM_BUS_DRIVER); 1331 + #ifdef CONFIG_PPC_PS3 1332 + retval = ps3_ehci_driver_register(&ps3_ehci_driver); 1346 1333 if (retval < 0) 1347 - goto clean2; 1334 + goto clean1; 1348 1335 #endif 1349 1336 1350 - #ifdef OF_PLATFORM_DRIVER 1351 - retval = platform_driver_register(&OF_PLATFORM_DRIVER); 1352 - if (retval < 0) 1353 - goto clean3; 1354 - #endif 1337 + return 0; 1355 1338 1356 - #ifdef XILINX_OF_PLATFORM_DRIVER 1357 - retval = platform_driver_register(&XILINX_OF_PLATFORM_DRIVER); 1358 - if (retval < 0) 1359 - goto clean4; 1339 + #ifdef CONFIG_PPC_PS3 1340 + clean1: 1360 1341 #endif 1361 - return retval; 1362 - 1363 - #ifdef XILINX_OF_PLATFORM_DRIVER 1364 - /* platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER); */ 1365 - clean4: 1366 - #endif 1367 - #ifdef OF_PLATFORM_DRIVER 1368 - platform_driver_unregister(&OF_PLATFORM_DRIVER); 1369 - clean3: 1370 - #endif 1371 - #ifdef PS3_SYSTEM_BUS_DRIVER 1372 - ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER); 1373 - clean2: 1374 - #endif 1375 - #ifdef PLATFORM_DRIVER 1376 - platform_driver_unregister(&PLATFORM_DRIVER); 1342 + platform_unregister_drivers(platform_drivers, ARRAY_SIZE(platform_drivers)); 1377 1343 clean0: 1378 - #endif 1379 1344 #ifdef CONFIG_DYNAMIC_DEBUG 1380 1345 debugfs_remove(ehci_debug_root); 1381 1346 ehci_debug_root = NULL; ··· 1363 1376 1364 1377 static void __exit ehci_hcd_cleanup(void) 1365 1378 { 1366 - #ifdef XILINX_OF_PLATFORM_DRIVER 1367 - platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER); 1379 + #ifdef CONFIG_PPC_PS3 1380 + ps3_ehci_driver_unregister(&ps3_ehci_driver); 1368 1381 #endif 1369 - #ifdef OF_PLATFORM_DRIVER 1370 - platform_driver_unregister(&OF_PLATFORM_DRIVER); 1371 - #endif 1372 - #ifdef PLATFORM_DRIVER 1373 - platform_driver_unregister(&PLATFORM_DRIVER); 1374 - #endif 1375 - #ifdef PS3_SYSTEM_BUS_DRIVER 1376 - ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER); 1377 - #endif 1382 + platform_unregister_drivers(platform_drivers, ARRAY_SIZE(platform_drivers)); 1378 1383 #ifdef CONFIG_DYNAMIC_DEBUG 1379 1384 debugfs_remove(ehci_debug_root); 1380 1385 #endif
+2 -70
drivers/usb/host/ohci-omap.c
··· 40 40 #include <mach/usb.h> 41 41 42 42 43 - /* OMAP-1510 OHCI has its own MMU for DMA */ 44 - #define OMAP1510_LB_MEMSIZE 32 /* Should be same as SDRAM size */ 45 - #define OMAP1510_LB_CLOCK_DIV 0xfffec10c 46 - #define OMAP1510_LB_MMU_CTL 0xfffec208 47 - #define OMAP1510_LB_MMU_LCK 0xfffec224 48 - #define OMAP1510_LB_MMU_LD_TLB 0xfffec228 49 - #define OMAP1510_LB_MMU_CAM_H 0xfffec22c 50 - #define OMAP1510_LB_MMU_CAM_L 0xfffec230 51 - #define OMAP1510_LB_MMU_RAM_H 0xfffec234 52 - #define OMAP1510_LB_MMU_RAM_L 0xfffec238 53 - 54 43 #define DRIVER_DESC "OHCI OMAP driver" 55 44 56 45 struct ohci_omap_priv { ··· 92 103 93 104 return 0; 94 105 } 95 - 96 - #ifdef CONFIG_ARCH_OMAP15XX 97 - /* 98 - * OMAP-1510 specific Local Bus clock on/off 99 - */ 100 - static int omap_1510_local_bus_power(int on) 101 - { 102 - if (on) { 103 - omap_writel((1 << 1) | (1 << 0), OMAP1510_LB_MMU_CTL); 104 - udelay(200); 105 - } else { 106 - omap_writel(0, OMAP1510_LB_MMU_CTL); 107 - } 108 - 109 - return 0; 110 - } 111 - 112 - /* 113 - * OMAP-1510 specific Local Bus initialization 114 - * NOTE: This assumes 32MB memory size in OMAP1510LB_MEMSIZE. 115 - * See also arch/mach-omap/memory.h for __virt_to_dma() and 116 - * __dma_to_virt() which need to match with the physical 117 - * Local Bus address below. 118 - */ 119 - static int omap_1510_local_bus_init(void) 120 - { 121 - unsigned int tlb; 122 - unsigned long lbaddr, physaddr; 123 - 124 - omap_writel((omap_readl(OMAP1510_LB_CLOCK_DIV) & 0xfffffff8) | 0x4, 125 - OMAP1510_LB_CLOCK_DIV); 126 - 127 - /* Configure the Local Bus MMU table */ 128 - for (tlb = 0; tlb < OMAP1510_LB_MEMSIZE; tlb++) { 129 - lbaddr = tlb * 0x00100000 + OMAP1510_LB_OFFSET; 130 - physaddr = tlb * 0x00100000 + PHYS_OFFSET; 131 - omap_writel((lbaddr & 0x0fffffff) >> 22, OMAP1510_LB_MMU_CAM_H); 132 - omap_writel(((lbaddr & 0x003ffc00) >> 6) | 0xc, 133 - OMAP1510_LB_MMU_CAM_L); 134 - omap_writel(physaddr >> 16, OMAP1510_LB_MMU_RAM_H); 135 - omap_writel((physaddr & 0x0000fc00) | 0x300, OMAP1510_LB_MMU_RAM_L); 136 - omap_writel(tlb << 4, OMAP1510_LB_MMU_LCK); 137 - omap_writel(0x1, OMAP1510_LB_MMU_LD_TLB); 138 - } 139 - 140 - /* Enable the walking table */ 141 - omap_writel(omap_readl(OMAP1510_LB_MMU_CTL) | (1 << 3), OMAP1510_LB_MMU_CTL); 142 - udelay(200); 143 - 144 - return 0; 145 - } 146 - #else 147 - #define omap_1510_local_bus_power(x) {} 148 - #define omap_1510_local_bus_init() {} 149 - #endif 150 106 151 107 #ifdef CONFIG_USB_OTG 152 108 ··· 163 229 164 230 omap_ohci_clock_power(priv, 1); 165 231 166 - if (cpu_is_omap15xx()) { 167 - omap_1510_local_bus_power(1); 168 - omap_1510_local_bus_init(); 169 - } 232 + if (config->lb_reset) 233 + config->lb_reset(); 170 234 171 235 ret = ohci_setup(hcd); 172 236 if (ret < 0)
+1
drivers/usb/host/xhci.c
··· 692 692 if (ret) 693 693 xhci_free_command(xhci, command); 694 694 } 695 + set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags); 695 696 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 696 697 "Finished xhci_run for USB2 roothub"); 697 698
+1
drivers/usb/musb/tusb6010.c
··· 190 190 } 191 191 if (len > 0) { 192 192 /* Write the rest 1 - 3 bytes to FIFO */ 193 + val = 0; 193 194 memcpy(&val, buf, len); 194 195 musb_writel(fifo, 0, val); 195 196 }
+38
drivers/usb/serial/cp210x.c
··· 233 233 { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */ 234 234 { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */ 235 235 { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */ 236 + { USB_DEVICE(0x2184, 0x0030) }, /* GW Instek GDM-834x Digital Multimeter */ 236 237 { USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */ 237 238 { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */ 238 239 { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */ ··· 259 258 speed_t max_speed; 260 259 bool use_actual_rate; 261 260 bool no_flow_control; 261 + bool no_event_mode; 262 262 }; 263 263 264 264 enum cp210x_event_state { ··· 1115 1113 1116 1114 static void cp210x_enable_event_mode(struct usb_serial_port *port) 1117 1115 { 1116 + struct cp210x_serial_private *priv = usb_get_serial_data(port->serial); 1118 1117 struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); 1119 1118 int ret; 1120 1119 1121 1120 if (port_priv->event_mode) 1121 + return; 1122 + 1123 + if (priv->no_event_mode) 1122 1124 return; 1123 1125 1124 1126 port_priv->event_state = ES_DATA; ··· 2080 2074 priv->use_actual_rate = use_actual_rate; 2081 2075 } 2082 2076 2077 + static void cp2102_determine_quirks(struct usb_serial *serial) 2078 + { 2079 + struct cp210x_serial_private *priv = usb_get_serial_data(serial); 2080 + u8 *buf; 2081 + int ret; 2082 + 2083 + buf = kmalloc(2, GFP_KERNEL); 2084 + if (!buf) 2085 + return; 2086 + /* 2087 + * Some (possibly counterfeit) CP2102 do not support event-insertion 2088 + * mode and respond differently to malformed vendor requests. 2089 + * Specifically, they return one instead of two bytes when sent a 2090 + * two-byte part-number request. 2091 + */ 2092 + ret = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 2093 + CP210X_VENDOR_SPECIFIC, REQTYPE_DEVICE_TO_HOST, 2094 + CP210X_GET_PARTNUM, 0, buf, 2, USB_CTRL_GET_TIMEOUT); 2095 + if (ret == 1) { 2096 + dev_dbg(&serial->interface->dev, 2097 + "device does not support event-insertion mode\n"); 2098 + priv->no_event_mode = true; 2099 + } 2100 + 2101 + kfree(buf); 2102 + } 2103 + 2083 2104 static int cp210x_get_fw_version(struct usb_serial *serial, u16 value) 2084 2105 { 2085 2106 struct cp210x_serial_private *priv = usb_get_serial_data(serial); ··· 2141 2108 return; 2142 2109 } 2143 2110 2111 + dev_dbg(&serial->interface->dev, "partnum = 0x%02x\n", priv->partnum); 2112 + 2144 2113 switch (priv->partnum) { 2114 + case CP210X_PARTNUM_CP2102: 2115 + cp2102_determine_quirks(serial); 2116 + break; 2145 2117 case CP210X_PARTNUM_CP2105: 2146 2118 case CP210X_PARTNUM_CP2108: 2147 2119 cp210x_get_fw_version(serial, CP210X_GET_FW_VER);
-2
drivers/usb/serial/mos7840.c
··· 107 107 #define BANDB_DEVICE_ID_USOPTL4_2P 0xBC02 108 108 #define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 109 109 #define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03 110 - #define BANDB_DEVICE_ID_USOPTL2_4 0xAC24 111 110 112 111 /* Interrupt Routine Defines */ 113 112 ··· 185 186 { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P) }, 186 187 { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4) }, 187 188 { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P) }, 188 - { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4) }, 189 189 {} /* terminating entry */ 190 190 }; 191 191 MODULE_DEVICE_TABLE(usb, id_table);
+10 -1
drivers/usb/serial/option.c
··· 1205 1205 .driver_info = NCTRL(0) | RSVD(1) }, 1206 1206 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1056, 0xff), /* Telit FD980 */ 1207 1207 .driver_info = NCTRL(2) | RSVD(3) }, 1208 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1060, 0xff), /* Telit LN920 (rmnet) */ 1209 + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, 1210 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1061, 0xff), /* Telit LN920 (MBIM) */ 1211 + .driver_info = NCTRL(0) | RSVD(1) }, 1212 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1062, 0xff), /* Telit LN920 (RNDIS) */ 1213 + .driver_info = NCTRL(2) | RSVD(3) }, 1214 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff), /* Telit LN920 (ECM) */ 1215 + .driver_info = NCTRL(0) | RSVD(1) }, 1208 1216 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), 1209 1217 .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, 1210 1218 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), ··· 1658 1650 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) }, 1659 1651 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) }, 1660 1652 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, 1661 - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) }, 1662 1653 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff), 1663 1654 .driver_info = RSVD(1) }, 1664 1655 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff), ··· 2075 2068 .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, 2076 2069 { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */ 2077 2070 .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, 2071 + { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0db, 0xff), /* Foxconn T99W265 MBIM */ 2072 + .driver_info = RSVD(3) }, 2078 2073 { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */ 2079 2074 .driver_info = RSVD(4) | RSVD(5) | RSVD(6) }, 2080 2075 { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
+8 -1
drivers/usb/storage/unusual_devs.h
··· 416 416 USB_SC_UFI, USB_PR_DEVICE, NULL, US_FL_FIX_INQUIRY | US_FL_SINGLE_LUN), 417 417 418 418 /* 419 - * Reported by Ondrej Zary <linux@rainbow-software.org> 419 + * Reported by Ondrej Zary <linux@zary.sk> 420 420 * The device reports one sector more and breaks when that sector is accessed 421 + * Firmwares older than 2.6c (the latest one and the only that claims Linux 422 + * support) have also broken tag handling 421 423 */ 424 + UNUSUAL_DEV( 0x04ce, 0x0002, 0x0000, 0x026b, 425 + "ScanLogic", 426 + "SL11R-IDE", 427 + USB_SC_DEVICE, USB_PR_DEVICE, NULL, 428 + US_FL_FIX_CAPACITY | US_FL_BULK_IGNORE_TAG), 422 429 UNUSUAL_DEV( 0x04ce, 0x0002, 0x026c, 0x026c, 423 430 "ScanLogic", 424 431 "SL11R-IDE",
+1 -1
drivers/usb/storage/unusual_uas.h
··· 50 50 "LaCie", 51 51 "Rugged USB3-FW", 52 52 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 53 - US_FL_IGNORE_UAS), 53 + US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME), 54 54 55 55 /* 56 56 * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
+5
drivers/vdpa/mlx5/net/mlx5_vnet.c
··· 1714 1714 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); 1715 1715 struct mlx5_vdpa_virtqueue *mvq; 1716 1716 1717 + if (!mvdev->actual_features) 1718 + return; 1719 + 1717 1720 if (!is_index_valid(mvdev, idx)) 1718 1721 return; 1719 1722 ··· 2148 2145 2149 2146 for (i = 0; i < ndev->mvdev.max_vqs; i++) 2150 2147 ndev->vqs[i].ready = false; 2148 + 2149 + ndev->mvdev.cvq.ready = false; 2151 2150 } 2152 2151 2153 2152 static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
+5 -5
drivers/vdpa/vdpa_user/vduse_dev.c
··· 665 665 static int vduse_vdpa_reset(struct vdpa_device *vdpa) 666 666 { 667 667 struct vduse_dev *dev = vdpa_to_vduse(vdpa); 668 - 669 - if (vduse_dev_set_status(dev, 0)) 670 - return -EIO; 668 + int ret = vduse_dev_set_status(dev, 0); 671 669 672 670 vduse_dev_reset(dev); 673 671 674 - return 0; 672 + return ret; 675 673 } 676 674 677 675 static u32 vduse_vdpa_get_generation(struct vdpa_device *vdpa) ··· 1591 1593 1592 1594 vduse_irq_wq = alloc_workqueue("vduse-irq", 1593 1595 WQ_HIGHPRI | WQ_SYSFS | WQ_UNBOUND, 0); 1594 - if (!vduse_irq_wq) 1596 + if (!vduse_irq_wq) { 1597 + ret = -ENOMEM; 1595 1598 goto err_wq; 1599 + } 1596 1600 1597 1601 ret = vduse_domain_init(); 1598 1602 if (ret)
+1 -1
drivers/vfio/pci/vfio_pci_core.c
··· 565 565 } 566 566 567 567 struct vfio_pci_walk_info { 568 - int (*fn)(struct pci_dev *, void *data); 568 + int (*fn)(struct pci_dev *pdev, void *data); 569 569 void *data; 570 570 struct pci_dev *pdev; 571 571 bool slot;
+1 -1
drivers/vhost/vdpa.c
··· 640 640 u64 offset, map_size, map_iova = iova; 641 641 struct vdpa_map_file *map_file; 642 642 struct vm_area_struct *vma; 643 - int ret; 643 + int ret = 0; 644 644 645 645 mmap_read_lock(dev->mm); 646 646
+6 -1
drivers/virtio/virtio.c
··· 345 345 ret = snprintf(compat, sizeof(compat), "virtio,device%x", dev->id.device); 346 346 BUG_ON(ret >= sizeof(compat)); 347 347 348 + /* 349 + * On powerpc/pseries virtio devices are PCI devices so PCI 350 + * vendor/device ids play the role of the "compatible" property. 351 + * Simply don't init of_node in this case. 352 + */ 348 353 if (!of_device_is_compatible(np, compat)) { 349 - ret = -EINVAL; 354 + ret = 0; 350 355 goto out; 351 356 } 352 357
+1 -1
drivers/watchdog/Kconfig
··· 1666 1666 1667 1667 config SIBYTE_WDOG 1668 1668 tristate "Sibyte SoC hardware watchdog" 1669 - depends on CPU_SB1 || (MIPS && COMPILE_TEST) 1669 + depends on CPU_SB1 1670 1670 help 1671 1671 Watchdog driver for the built in watchdog hardware in Sibyte 1672 1672 SoC processors. There are apparently two watchdog timers
+1
drivers/xen/Kconfig
··· 177 177 178 178 config SWIOTLB_XEN 179 179 def_bool y 180 + depends on XEN_PV || ARM || ARM64 180 181 select DMA_OPS 181 182 select SWIOTLB 182 183
+2 -2
drivers/xen/balloon.c
··· 522 522 timeout = 3600 * HZ; 523 523 credit = current_credit(); 524 524 525 - wait_event_interruptible_timeout(balloon_thread_wq, 526 - balloon_thread_cond(state, credit), timeout); 525 + wait_event_freezable_timeout(balloon_thread_wq, 526 + balloon_thread_cond(state, credit), timeout); 527 527 528 528 if (kthread_should_stop()) 529 529 return 0;
+8
drivers/xen/gntdev.c
··· 381 381 map->unmap_ops[offset+i].handle, 382 382 map->unmap_ops[offset+i].status); 383 383 map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE; 384 + if (use_ptemod) { 385 + if (map->kunmap_ops[offset+i].status) 386 + err = -EINVAL; 387 + pr_debug("kunmap handle=%u st=%d\n", 388 + map->kunmap_ops[offset+i].handle, 389 + map->kunmap_ops[offset+i].status); 390 + map->kunmap_ops[offset+i].handle = INVALID_GRANT_HANDLE; 391 + } 384 392 } 385 393 return err; 386 394 }
+4 -3
drivers/xen/swiotlb-xen.c
··· 230 230 /* 231 231 * Get IO TLB memory from any location. 232 232 */ 233 - start = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE); 233 + start = memblock_alloc(PAGE_ALIGN(bytes), 234 + IO_TLB_SEGSIZE << IO_TLB_SHIFT); 234 235 if (!start) 235 - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 236 - __func__, PAGE_ALIGN(bytes), PAGE_SIZE); 236 + panic("%s: Failed to allocate %lu bytes\n", 237 + __func__, PAGE_ALIGN(bytes)); 237 238 238 239 /* 239 240 * And replace that memory with pages under 4GB.
+4 -4
fs/9p/cache.c
··· 23 23 .version = 0, 24 24 }; 25 25 26 - /** 26 + /* 27 27 * v9fs_random_cachetag - Generate a random tag to be associated 28 28 * with a new cache session. 29 29 * ··· 233 233 unlock_page(page); 234 234 } 235 235 236 - /** 236 + /* 237 237 * __v9fs_readpage_from_fscache - read a page from cache 238 238 * 239 239 * Returns 0 if the pages are in cache and a BIO is submitted, ··· 268 268 } 269 269 } 270 270 271 - /** 271 + /* 272 272 * __v9fs_readpages_from_fscache - read multiple pages from cache 273 273 * 274 274 * Returns 0 if the pages are in cache and a BIO is submitted, ··· 308 308 } 309 309 } 310 310 311 - /** 311 + /* 312 312 * __v9fs_readpage_to_fscache - write a page to the cache 313 313 * 314 314 */
+7 -7
fs/9p/fid.c
··· 19 19 #include "v9fs_vfs.h" 20 20 #include "fid.h" 21 21 22 + static inline void __add_fid(struct dentry *dentry, struct p9_fid *fid) 23 + { 24 + hlist_add_head(&fid->dlist, (struct hlist_head *)&dentry->d_fsdata); 25 + } 26 + 27 + 22 28 /** 23 29 * v9fs_fid_add - add a fid to a dentry 24 30 * @dentry: dentry that the fid is being added to 25 31 * @fid: fid to add 26 32 * 27 33 */ 28 - 29 - static inline void __add_fid(struct dentry *dentry, struct p9_fid *fid) 30 - { 31 - hlist_add_head(&fid->dlist, (struct hlist_head *)&dentry->d_fsdata); 32 - } 33 - 34 34 void v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid) 35 35 { 36 36 spin_lock(&dentry->d_lock); ··· 67 67 68 68 /** 69 69 * v9fs_open_fid_add - add an open fid to an inode 70 - * @dentry: inode that the fid is being added to 70 + * @inode: inode that the fid is being added to 71 71 * @fid: fid to add 72 72 * 73 73 */
+3 -5
fs/9p/v9fs.c
··· 155 155 /** 156 156 * v9fs_parse_options - parse mount options into session structure 157 157 * @v9ses: existing v9fs session information 158 + * @opts: The mount option string 158 159 * 159 160 * Return 0 upon success, -ERRNO upon failure. 160 161 */ ··· 543 542 static struct kobject *v9fs_kobj; 544 543 545 544 #ifdef CONFIG_9P_FSCACHE 546 - /** 547 - * caches_show - list caches associated with a session 548 - * 549 - * Returns the size of buffer written. 545 + /* 546 + * List caches associated with a session 550 547 */ 551 - 552 548 static ssize_t caches_show(struct kobject *kobj, 553 549 struct kobj_attribute *attr, 554 550 char *buf)
+9 -5
fs/9p/vfs_addr.c
··· 30 30 31 31 /** 32 32 * v9fs_fid_readpage - read an entire page in from 9P 33 - * 34 - * @fid: fid being read 33 + * @data: Opaque pointer to the fid being read 35 34 * @page: structure to page 36 35 * 37 36 */ ··· 115 116 116 117 /** 117 118 * v9fs_release_page - release the private state associated with a page 119 + * @page: The page to be released 120 + * @gfp: The caller's allocation restrictions 118 121 * 119 122 * Returns 1 if the page can be released, false otherwise. 120 123 */ ··· 130 129 131 130 /** 132 131 * v9fs_invalidate_page - Invalidate a page completely or partially 133 - * 134 - * @page: structure to page 135 - * @offset: offset in the page 132 + * @page: The page to be invalidated 133 + * @offset: offset of the invalidated region 134 + * @length: length of the invalidated region 136 135 */ 137 136 138 137 static void v9fs_invalidate_page(struct page *page, unsigned int offset, ··· 200 199 201 200 /** 202 201 * v9fs_launder_page - Writeback a dirty page 202 + * @page: The page to be cleaned up 203 + * 203 204 * Returns 0 on success. 204 205 */ 205 206 ··· 222 219 /** 223 220 * v9fs_direct_IO - 9P address space operation for direct I/O 224 221 * @iocb: target I/O control block 222 + * @iter: The data/buffer to use 225 223 * 226 224 * The presence of v9fs_direct_IO() in the address space ops vector 227 225 * allowes open() O_DIRECT flags which would have failed otherwise.
+12 -21
fs/9p/vfs_file.c
··· 359 359 } 360 360 361 361 /** 362 - * v9fs_file_read - read from a file 363 - * @filp: file pointer to read 364 - * @udata: user data buffer to read data into 365 - * @count: size of buffer 366 - * @offset: offset at which to read data 362 + * v9fs_file_read_iter - read from a file 363 + * @iocb: The operation parameters 364 + * @to: The buffer to read into 367 365 * 368 366 */ 369 - 370 367 static ssize_t 371 368 v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 372 369 { ··· 385 388 } 386 389 387 390 /** 388 - * v9fs_file_write - write to a file 389 - * @filp: file pointer to write 390 - * @data: data buffer to write data from 391 - * @count: size of buffer 392 - * @offset: offset at which to write data 391 + * v9fs_file_write_iter - write to a file 392 + * @iocb: The operation parameters 393 + * @from: The data to write 393 394 * 394 395 */ 395 396 static ssize_t ··· 556 561 } 557 562 558 563 /** 559 - * v9fs_mmap_file_read - read from a file 560 - * @filp: file pointer to read 561 - * @data: user data buffer to read data into 562 - * @count: size of buffer 563 - * @offset: offset at which to read data 564 + * v9fs_mmap_file_read_iter - read from a file 565 + * @iocb: The operation parameters 566 + * @to: The buffer to read into 564 567 * 565 568 */ 566 569 static ssize_t ··· 569 576 } 570 577 571 578 /** 572 - * v9fs_mmap_file_write - write to a file 573 - * @filp: file pointer to write 574 - * @data: data buffer to write data from 575 - * @count: size of buffer 576 - * @offset: offset at which to write data 579 + * v9fs_mmap_file_write_iter - write to a file 580 + * @iocb: The operation parameters 581 + * @from: The data to write 577 582 * 578 583 */ 579 584 static ssize_t
+16 -8
fs/9p/vfs_inode.c
··· 218 218 219 219 /** 220 220 * v9fs_alloc_inode - helper function to allocate an inode 221 - * 221 + * @sb: The superblock to allocate the inode from 222 222 */ 223 223 struct inode *v9fs_alloc_inode(struct super_block *sb) 224 224 { ··· 238 238 239 239 /** 240 240 * v9fs_free_inode - destroy an inode 241 - * 241 + * @inode: The inode to be freed 242 242 */ 243 243 244 244 void v9fs_free_inode(struct inode *inode) ··· 343 343 * v9fs_get_inode - helper function to setup an inode 344 344 * @sb: superblock 345 345 * @mode: mode to setup inode with 346 - * 346 + * @rdev: The device numbers to set 347 347 */ 348 348 349 349 struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t rdev) ··· 369 369 } 370 370 371 371 /** 372 - * v9fs_clear_inode - release an inode 372 + * v9fs_evict_inode - Remove an inode from the inode cache 373 373 * @inode: inode to release 374 374 * 375 375 */ ··· 665 665 666 666 /** 667 667 * v9fs_vfs_create - VFS hook to create a regular file 668 + * @mnt_userns: The user namespace of the mount 669 + * @dir: The parent directory 670 + * @dentry: The name of file to be created 671 + * @mode: The UNIX file mode to set 672 + * @excl: True if the file must not yet exist 668 673 * 669 674 * open(.., O_CREAT) is handled in v9fs_vfs_atomic_open(). This is only called 670 675 * for mknod(2). 671 - * 672 - * @dir: directory inode that is being created 673 - * @dentry: dentry that is being deleted 674 - * @mode: create permissions 675 676 * 676 677 */ 677 678 ··· 697 696 698 697 /** 699 698 * v9fs_vfs_mkdir - VFS mkdir hook to create a directory 699 + * @mnt_userns: The user namespace of the mount 700 700 * @dir: inode that is being unlinked 701 701 * @dentry: dentry that is being unlinked 702 702 * @mode: mode for new directory ··· 902 900 903 901 /** 904 902 * v9fs_vfs_rename - VFS hook to rename an inode 903 + * @mnt_userns: The user namespace of the mount 905 904 * @old_dir: old dir inode 906 905 * @old_dentry: old dentry 907 906 * @new_dir: new dir inode 908 907 * @new_dentry: new dentry 908 + * @flags: RENAME_* flags 909 909 * 910 910 */ 911 911 ··· 1013 1009 1014 1010 /** 1015 1011 * v9fs_vfs_getattr - retrieve file metadata 1012 + * @mnt_userns: The user namespace of the mount 1016 1013 * @path: Object to query 1017 1014 * @stat: metadata structure to populate 1018 1015 * @request_mask: Mask of STATX_xxx flags indicating the caller's interests ··· 1055 1050 1056 1051 /** 1057 1052 * v9fs_vfs_setattr - set file metadata 1053 + * @mnt_userns: The user namespace of the mount 1058 1054 * @dentry: file whose metadata to set 1059 1055 * @iattr: metadata assignment structure 1060 1056 * ··· 1291 1285 1292 1286 /** 1293 1287 * v9fs_vfs_symlink - helper function to create symlinks 1288 + * @mnt_userns: The user namespace of the mount 1294 1289 * @dir: directory inode containing symlink 1295 1290 * @dentry: dentry for symlink 1296 1291 * @symname: symlink data ··· 1347 1340 1348 1341 /** 1349 1342 * v9fs_vfs_mknod - create a special file 1343 + * @mnt_userns: The user namespace of the mount 1350 1344 * @dir: inode destination for new link 1351 1345 * @dentry: dentry for file 1352 1346 * @mode: mode for creation
+9 -2
fs/9p/vfs_inode_dotl.c
··· 37 37 struct dentry *dentry, umode_t omode, dev_t rdev); 38 38 39 39 /** 40 - * v9fs_get_fsgid_for_create - Helper function to get the gid for creating a 40 + * v9fs_get_fsgid_for_create - Helper function to get the gid for a new object 41 + * @dir_inode: The directory inode 42 + * 43 + * Helper function to get the gid for creating a 41 44 * new file system object. This checks the S_ISGID to determine the owning 42 45 * group of the new file system object. 43 46 */ ··· 214 211 215 212 /** 216 213 * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol. 214 + * @mnt_userns: The user namespace of the mount 217 215 * @dir: directory inode that is being created 218 216 * @dentry: dentry that is being deleted 219 217 * @omode: create permissions 218 + * @excl: True if the file must not yet exist 220 219 * 221 220 */ 222 - 223 221 static int 224 222 v9fs_vfs_create_dotl(struct user_namespace *mnt_userns, struct inode *dir, 225 223 struct dentry *dentry, umode_t omode, bool excl) ··· 365 361 366 362 /** 367 363 * v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory 364 + * @mnt_userns: The user namespace of the mount 368 365 * @dir: inode that is being unlinked 369 366 * @dentry: dentry that is being unlinked 370 367 * @omode: mode for new directory ··· 542 537 543 538 /** 544 539 * v9fs_vfs_setattr_dotl - set file metadata 540 + * @mnt_userns: The user namespace of the mount 545 541 * @dentry: file whose metadata to set 546 542 * @iattr: metadata assignment structure 547 543 * ··· 822 816 823 817 /** 824 818 * v9fs_vfs_mknod_dotl - create a special file 819 + * @mnt_userns: The user namespace of the mount 825 820 * @dir: inode destination for new link 826 821 * @dentry: dentry for file 827 822 * @omode: mode for creation
+2 -2
fs/afs/dir_silly.c
··· 86 86 return afs_do_sync_operation(op); 87 87 } 88 88 89 - /** 90 - * afs_sillyrename - Perform a silly-rename of a dentry 89 + /* 90 + * Perform silly-rename of a dentry. 91 91 * 92 92 * AFS is stateless and the server doesn't know when the client is holding a 93 93 * file open. To prevent application problems when a file is unlinked while
+1 -2
fs/afs/write.c
··· 974 974 iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len); 975 975 976 976 trace_afs_page_dirty(vnode, tracepoint_string("launder"), page); 977 - ret = afs_store_data(vnode, &iter, (loff_t)page->index * PAGE_SIZE, 978 - true); 977 + ret = afs_store_data(vnode, &iter, page_offset(page) + f, true); 979 978 } 980 979 981 980 trace_afs_page_dirty(vnode, tracepoint_string("laundered"), page);
+1 -1
fs/binfmt_elf.c
··· 630 630 631 631 vaddr = eppnt->p_vaddr; 632 632 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) 633 - elf_type |= MAP_FIXED_NOREPLACE; 633 + elf_type |= MAP_FIXED; 634 634 else if (no_base && interp_elf_ex->e_type == ET_DYN) 635 635 load_addr = -vaddr; 636 636
+12 -1
fs/btrfs/file-item.c
··· 665 665 666 666 if (!ordered) { 667 667 ordered = btrfs_lookup_ordered_extent(inode, offset); 668 - BUG_ON(!ordered); /* Logic error */ 668 + /* 669 + * The bio range is not covered by any ordered extent, 670 + * must be a code logic error. 671 + */ 672 + if (unlikely(!ordered)) { 673 + WARN(1, KERN_WARNING 674 + "no ordered extent for root %llu ino %llu offset %llu\n", 675 + inode->root->root_key.objectid, 676 + btrfs_ino(inode), offset); 677 + kvfree(sums); 678 + return BLK_STS_IOERR; 679 + } 669 680 } 670 681 671 682 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
+3 -2
fs/btrfs/space-info.c
··· 414 414 { 415 415 lockdep_assert_held(&info->lock); 416 416 417 - btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull", 417 + /* The free space could be negative in case of overcommit */ 418 + btrfs_info(fs_info, "space_info %llu has %lld free, is %sfull", 418 419 info->flags, 419 - info->total_bytes - btrfs_space_info_used(info, true), 420 + (s64)(info->total_bytes - btrfs_space_info_used(info, true)), 420 421 info->full ? "" : "not "); 421 422 btrfs_info(fs_info, 422 423 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
+4 -2
fs/btrfs/verity.c
··· 451 451 */ 452 452 static int rollback_verity(struct btrfs_inode *inode) 453 453 { 454 - struct btrfs_trans_handle *trans; 454 + struct btrfs_trans_handle *trans = NULL; 455 455 struct btrfs_root *root = inode->root; 456 456 int ret; 457 457 ··· 473 473 trans = btrfs_start_transaction(root, 2); 474 474 if (IS_ERR(trans)) { 475 475 ret = PTR_ERR(trans); 476 + trans = NULL; 476 477 btrfs_handle_fs_error(root->fs_info, ret, 477 478 "failed to start transaction in verity rollback %llu", 478 479 (u64)inode->vfs_inode.i_ino); ··· 491 490 btrfs_abort_transaction(trans, ret); 492 491 goto out; 493 492 } 494 - btrfs_end_transaction(trans); 495 493 out: 494 + if (trans) 495 + btrfs_end_transaction(trans); 496 496 return ret; 497 497 } 498 498
+13
fs/btrfs/volumes.c
··· 1137 1137 atomic_set(&device->dev_stats_ccnt, 0); 1138 1138 extent_io_tree_release(&device->alloc_state); 1139 1139 1140 + /* 1141 + * Reset the flush error record. We might have a transient flush error 1142 + * in this mount, and if so we aborted the current transaction and set 1143 + * the fs to an error state, guaranteeing no super blocks can be further 1144 + * committed. However that error might be transient and if we unmount the 1145 + * filesystem and mount it again, we should allow the mount to succeed 1146 + * (btrfs_check_rw_degradable() should not fail) - if after mounting the 1147 + * filesystem again we still get flush errors, then we will again abort 1148 + * any transaction and set the error state, guaranteeing no commits of 1149 + * unsafe super blocks. 1150 + */ 1151 + device->last_flush_error = 0; 1152 + 1140 1153 /* Verify the device is back in a pristine state */ 1141 1154 ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)); 1142 1155 ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
+6 -2
fs/buffer.c
··· 1425 1425 } 1426 1426 EXPORT_SYMBOL_GPL(invalidate_bh_lrus); 1427 1427 1428 - void invalidate_bh_lrus_cpu(int cpu) 1428 + /* 1429 + * It's called from workqueue context so we need a bh_lru_lock to close 1430 + * the race with preemption/irq. 1431 + */ 1432 + void invalidate_bh_lrus_cpu(void) 1429 1433 { 1430 1434 struct bh_lru *b; 1431 1435 1432 1436 bh_lru_lock(); 1433 - b = per_cpu_ptr(&bh_lrus, cpu); 1437 + b = this_cpu_ptr(&bh_lrus); 1434 1438 __invalidate_bh_lrus(b); 1435 1439 bh_lru_unlock(); 1436 1440 }
+2 -2
fs/ceph/caps.c
··· 2263 2263 list_for_each_entry(req, &ci->i_unsafe_dirops, 2264 2264 r_unsafe_dir_item) { 2265 2265 s = req->r_session; 2266 - if (unlikely(s->s_mds > max)) { 2266 + if (unlikely(s->s_mds >= max)) { 2267 2267 spin_unlock(&ci->i_unsafe_lock); 2268 2268 goto retry; 2269 2269 } ··· 2277 2277 list_for_each_entry(req, &ci->i_unsafe_iops, 2278 2278 r_unsafe_target_item) { 2279 2279 s = req->r_session; 2280 - if (unlikely(s->s_mds > max)) { 2280 + if (unlikely(s->s_mds >= max)) { 2281 2281 spin_unlock(&ci->i_unsafe_lock); 2282 2282 goto retry; 2283 2283 }
+3 -2
fs/cifs/connect.c
··· 2389 2389 spin_lock(&cifs_tcp_ses_lock); 2390 2390 cifs_sb = CIFS_SB(sb); 2391 2391 tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb)); 2392 - if (IS_ERR(tlink)) { 2392 + if (tlink == NULL) { 2393 + /* can not match superblock if tlink were ever null */ 2393 2394 spin_unlock(&cifs_tcp_ses_lock); 2394 - return rc; 2395 + return 0; 2395 2396 } 2396 2397 tcon = tlink_tcon(tlink); 2397 2398 ses = tcon->ses;
+2 -2
fs/cifs/file.c
··· 884 884 cinode->lease_granted && 885 885 !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) && 886 886 dclose) { 887 - if (test_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) { 887 + if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) { 888 888 inode->i_ctime = inode->i_mtime = current_time(inode); 889 889 cifs_fscache_update_inode_cookie(inode); 890 890 } ··· 3113 3113 struct cifs_tcon *tcon; 3114 3114 struct cifs_sb_info *cifs_sb; 3115 3115 struct dentry *dentry = ctx->cfile->dentry; 3116 - int rc; 3116 + ssize_t rc; 3117 3117 3118 3118 tcon = tlink_tcon(ctx->cfile->tlink); 3119 3119 cifs_sb = CIFS_SB(dentry->d_sb);
+14 -3
fs/cifs/misc.c
··· 264 264 265 265 /* Uid is not converted */ 266 266 buffer->Uid = treeCon->ses->Suid; 267 - buffer->Mid = get_next_mid(treeCon->ses->server); 267 + if (treeCon->ses->server) 268 + buffer->Mid = get_next_mid(treeCon->ses->server); 268 269 } 269 270 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS) 270 271 buffer->Flags2 |= SMBFLG2_DFS; ··· 591 590 592 591 /** 593 592 * cifs_queue_oplock_break - queue the oplock break handler for cfile 593 + * @cfile: The file to break the oplock on 594 594 * 595 595 * This function is called from the demultiplex thread when it 596 596 * receives an oplock break for @cfile. ··· 1067 1065 1068 1066 /** 1069 1067 * cifs_alloc_hash - allocate hash and hash context together 1068 + * @name: The name of the crypto hash algo 1069 + * @shash: Where to put the pointer to the hash algo 1070 + * @sdesc: Where to put the pointer to the hash descriptor 1070 1071 * 1071 1072 * The caller has to make sure @sdesc is initialized to either NULL or 1072 1073 * a valid context. Both can be freed via cifs_free_hash(). ··· 1108 1103 1109 1104 /** 1110 1105 * cifs_free_hash - free hash and hash context together 1106 + * @shash: Where to find the pointer to the hash algo 1107 + * @sdesc: Where to find the pointer to the hash descriptor 1111 1108 * 1112 1109 * Freeing a NULL hash or context is safe. 1113 1110 */ ··· 1125 1118 1126 1119 /** 1127 1120 * rqst_page_get_length - obtain the length and offset for a page in smb_rqst 1128 - * Input: rqst - a smb_rqst, page - a page index for rqst 1129 - * Output: *len - the length for this page, *offset - the offset for this page 1121 + * @rqst: The request descriptor 1122 + * @page: The index of the page to query 1123 + * @len: Where to store the length for this page: 1124 + * @offset: Where to store the offset for this page 1130 1125 */ 1131 1126 void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page, 1132 1127 unsigned int *len, unsigned int *offset) ··· 1161 1152 1162 1153 /** 1163 1154 * copy_path_name - copy src path to dst, possibly truncating 1155 + * @dst: The destination buffer 1156 + * @src: The source name 1164 1157 * 1165 1158 * returns number of bytes written (including trailing nul) 1166 1159 */
+2 -2
fs/cifs/smb2pdu.c
··· 2397 2397 buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd); 2398 2398 /* Ship the ACL for now. we will copy it into buf later. */ 2399 2399 aclptr = ptr; 2400 - ptr += sizeof(struct cifs_acl); 2400 + ptr += sizeof(struct smb3_acl); 2401 2401 2402 2402 /* create one ACE to hold the mode embedded in reserved special SID */ 2403 2403 acelen = setup_special_mode_ACE((struct cifs_ace *)ptr, (__u64)mode); ··· 2422 2422 acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */ 2423 2423 acl.AclSize = cpu_to_le16(acl_size); 2424 2424 acl.AceCount = cpu_to_le16(ace_count); 2425 - memcpy(aclptr, &acl, sizeof(struct cifs_acl)); 2425 + memcpy(aclptr, &acl, sizeof(struct smb3_acl)); 2426 2426 2427 2427 buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd); 2428 2428 *len = roundup(ptr - (__u8 *)buf, 8);
+1 -1
fs/debugfs/inode.c
··· 528 528 { 529 529 struct dentry *de = debugfs_create_file(name, mode, parent, data, fops); 530 530 531 - if (de) 531 + if (!IS_ERR(de)) 532 532 d_inode(de)->i_size = file_size; 533 533 } 534 534 EXPORT_SYMBOL_GPL(debugfs_create_file_size);
+1 -1
fs/erofs/inode.c
··· 176 176 } 177 177 178 178 if (vi->datalayout == EROFS_INODE_CHUNK_BASED) { 179 - if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_ALL)) { 179 + if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) { 180 180 erofs_err(inode->i_sb, 181 181 "unsupported chunk format %x of nid %llu", 182 182 vi->chunkformat, vi->nid);
+2 -1
fs/erofs/zmap.c
··· 369 369 if (compacted_4b_initial == 32 / 4) 370 370 compacted_4b_initial = 0; 371 371 372 - if (vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) 372 + if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) && 373 + compacted_4b_initial < totalidx) 373 374 compacted_2b = rounddown(totalidx - compacted_4b_initial, 16); 374 375 else 375 376 compacted_2b = 0;
+6 -8
fs/ext2/balloc.c
··· 48 48 struct ext2_sb_info *sbi = EXT2_SB(sb); 49 49 50 50 if (block_group >= sbi->s_groups_count) { 51 - ext2_error (sb, "ext2_get_group_desc", 52 - "block_group >= groups_count - " 53 - "block_group = %d, groups_count = %lu", 54 - block_group, sbi->s_groups_count); 51 + WARN(1, "block_group >= groups_count - " 52 + "block_group = %d, groups_count = %lu", 53 + block_group, sbi->s_groups_count); 55 54 56 55 return NULL; 57 56 } ··· 58 59 group_desc = block_group >> EXT2_DESC_PER_BLOCK_BITS(sb); 59 60 offset = block_group & (EXT2_DESC_PER_BLOCK(sb) - 1); 60 61 if (!sbi->s_group_desc[group_desc]) { 61 - ext2_error (sb, "ext2_get_group_desc", 62 - "Group descriptor not loaded - " 63 - "block_group = %d, group_desc = %lu, desc = %lu", 64 - block_group, group_desc, offset); 62 + WARN(1, "Group descriptor not loaded - " 63 + "block_group = %d, group_desc = %lu, desc = %lu", 64 + block_group, group_desc, offset); 65 65 return NULL; 66 66 } 67 67
+3 -3
fs/ext4/dir.c
··· 551 551 struct dir_private_info *info = file->private_data; 552 552 struct inode *inode = file_inode(file); 553 553 struct fname *fname; 554 - int ret; 554 + int ret = 0; 555 555 556 556 if (!info) { 557 557 info = ext4_htree_create_dir_info(file, ctx->pos); ··· 599 599 info->curr_minor_hash, 600 600 &info->next_hash); 601 601 if (ret < 0) 602 - return ret; 602 + goto finished; 603 603 if (ret == 0) { 604 604 ctx->pos = ext4_get_htree_eof(file); 605 605 break; ··· 630 630 } 631 631 finished: 632 632 info->last_pos = ctx->pos; 633 - return 0; 633 + return ret < 0 ? ret : 0; 634 634 } 635 635 636 636 static int ext4_release_dir(struct inode *inode, struct file *filp)
-3
fs/ext4/ext4.h
··· 3593 3593 unsigned flags, 3594 3594 struct page **pagep, 3595 3595 void **fsdata); 3596 - extern int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos, 3597 - unsigned len, unsigned copied, 3598 - struct page *page); 3599 3596 extern int ext4_try_add_inline_entry(handle_t *handle, 3600 3597 struct ext4_filename *fname, 3601 3598 struct inode *dir, struct inode *inode);
+14 -5
fs/ext4/extents.c
··· 5916 5916 } 5917 5917 5918 5918 /* Check if *cur is a hole and if it is, skip it */ 5919 - static void skip_hole(struct inode *inode, ext4_lblk_t *cur) 5919 + static int skip_hole(struct inode *inode, ext4_lblk_t *cur) 5920 5920 { 5921 5921 int ret; 5922 5922 struct ext4_map_blocks map; ··· 5925 5925 map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur; 5926 5926 5927 5927 ret = ext4_map_blocks(NULL, inode, &map, 0); 5928 + if (ret < 0) 5929 + return ret; 5928 5930 if (ret != 0) 5929 - return; 5931 + return 0; 5930 5932 *cur = *cur + map.m_len; 5933 + return 0; 5931 5934 } 5932 5935 5933 5936 /* Count number of blocks used by this inode and update i_blocks */ ··· 5979 5976 * iblocks by total number of differences found. 5980 5977 */ 5981 5978 cur = 0; 5982 - skip_hole(inode, &cur); 5979 + ret = skip_hole(inode, &cur); 5980 + if (ret < 0) 5981 + goto out; 5983 5982 path = ext4_find_extent(inode, cur, NULL, 0); 5984 5983 if (IS_ERR(path)) 5985 5984 goto out; ··· 6000 5995 } 6001 5996 cur = max(cur + 1, le32_to_cpu(ex->ee_block) + 6002 5997 ext4_ext_get_actual_len(ex)); 6003 - skip_hole(inode, &cur); 6004 - 5998 + ret = skip_hole(inode, &cur); 5999 + if (ret < 0) { 6000 + ext4_ext_drop_refs(path); 6001 + kfree(path); 6002 + break; 6003 + } 6005 6004 path2 = ext4_find_extent(inode, cur, NULL, 0); 6006 6005 if (IS_ERR(path2)) { 6007 6006 ext4_ext_drop_refs(path);
+6
fs/ext4/fast_commit.c
··· 892 892 sizeof(lrange), (u8 *)&lrange, crc)) 893 893 return -ENOSPC; 894 894 } else { 895 + unsigned int max = (map.m_flags & EXT4_MAP_UNWRITTEN) ? 896 + EXT_UNWRITTEN_MAX_LEN : EXT_INIT_MAX_LEN; 897 + 898 + /* Limit the number of blocks in one extent */ 899 + map.m_len = min(max, map.m_len); 900 + 895 901 fc_ext.fc_ino = cpu_to_le32(inode->i_ino); 896 902 ex = (struct ext4_extent *)&fc_ext.fc_ex; 897 903 ex->ee_block = cpu_to_le32(map.m_lblk);
+85 -65
fs/ext4/inline.c
··· 7 7 #include <linux/iomap.h> 8 8 #include <linux/fiemap.h> 9 9 #include <linux/iversion.h> 10 + #include <linux/backing-dev.h> 10 11 11 12 #include "ext4_jbd2.h" 12 13 #include "ext4.h" ··· 734 733 int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, 735 734 unsigned copied, struct page *page) 736 735 { 737 - int ret, no_expand; 736 + handle_t *handle = ext4_journal_current_handle(); 737 + int no_expand; 738 738 void *kaddr; 739 739 struct ext4_iloc iloc; 740 + int ret = 0, ret2; 740 741 741 - if (unlikely(copied < len)) { 742 - if (!PageUptodate(page)) { 743 - copied = 0; 742 + if (unlikely(copied < len) && !PageUptodate(page)) 743 + copied = 0; 744 + 745 + if (likely(copied)) { 746 + ret = ext4_get_inode_loc(inode, &iloc); 747 + if (ret) { 748 + unlock_page(page); 749 + put_page(page); 750 + ext4_std_error(inode->i_sb, ret); 744 751 goto out; 745 752 } 746 - } 753 + ext4_write_lock_xattr(inode, &no_expand); 754 + BUG_ON(!ext4_has_inline_data(inode)); 747 755 748 - ret = ext4_get_inode_loc(inode, &iloc); 749 - if (ret) { 750 - ext4_std_error(inode->i_sb, ret); 751 - copied = 0; 752 - goto out; 753 - } 756 + /* 757 + * ei->i_inline_off may have changed since 758 + * ext4_write_begin() called 759 + * ext4_try_to_write_inline_data() 760 + */ 761 + (void) ext4_find_inline_data_nolock(inode); 754 762 755 - ext4_write_lock_xattr(inode, &no_expand); 756 - BUG_ON(!ext4_has_inline_data(inode)); 763 + kaddr = kmap_atomic(page); 764 + ext4_write_inline_data(inode, &iloc, kaddr, pos, copied); 765 + kunmap_atomic(kaddr); 766 + SetPageUptodate(page); 767 + /* clear page dirty so that writepages wouldn't work for us. */ 768 + ClearPageDirty(page); 769 + 770 + ext4_write_unlock_xattr(inode, &no_expand); 771 + brelse(iloc.bh); 772 + 773 + /* 774 + * It's important to update i_size while still holding page 775 + * lock: page writeout could otherwise come in and zero 776 + * beyond i_size. 777 + */ 778 + ext4_update_inode_size(inode, pos + copied); 779 + } 780 + unlock_page(page); 781 + put_page(page); 757 782 758 783 /* 759 - * ei->i_inline_off may have changed since ext4_write_begin() 760 - * called ext4_try_to_write_inline_data() 784 + * Don't mark the inode dirty under page lock. First, it unnecessarily 785 + * makes the holding time of page lock longer. Second, it forces lock 786 + * ordering of page lock and transaction start for journaling 787 + * filesystems. 761 788 */ 762 - (void) ext4_find_inline_data_nolock(inode); 763 - 764 - kaddr = kmap_atomic(page); 765 - ext4_write_inline_data(inode, &iloc, kaddr, pos, len); 766 - kunmap_atomic(kaddr); 767 - SetPageUptodate(page); 768 - /* clear page dirty so that writepages wouldn't work for us. */ 769 - ClearPageDirty(page); 770 - 771 - ext4_write_unlock_xattr(inode, &no_expand); 772 - brelse(iloc.bh); 773 - mark_inode_dirty(inode); 789 + if (likely(copied)) 790 + mark_inode_dirty(inode); 774 791 out: 775 - return copied; 792 + /* 793 + * If we didn't copy as much data as expected, we need to trim back 794 + * size of xattr containing inline data. 795 + */ 796 + if (pos + len > inode->i_size && ext4_can_truncate(inode)) 797 + ext4_orphan_add(handle, inode); 798 + 799 + ret2 = ext4_journal_stop(handle); 800 + if (!ret) 801 + ret = ret2; 802 + if (pos + len > inode->i_size) { 803 + ext4_truncate_failed_write(inode); 804 + /* 805 + * If truncate failed early the inode might still be 806 + * on the orphan list; we need to make sure the inode 807 + * is removed from the orphan list in that case. 808 + */ 809 + if (inode->i_nlink) 810 + ext4_orphan_del(NULL, inode); 811 + } 812 + return ret ? ret : copied; 776 813 } 777 814 778 815 struct buffer_head * ··· 990 951 out: 991 952 brelse(iloc.bh); 992 953 return ret; 993 - } 994 - 995 - int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos, 996 - unsigned len, unsigned copied, 997 - struct page *page) 998 - { 999 - int ret; 1000 - 1001 - ret = ext4_write_inline_data_end(inode, pos, len, copied, page); 1002 - if (ret < 0) { 1003 - unlock_page(page); 1004 - put_page(page); 1005 - return ret; 1006 - } 1007 - copied = ret; 1008 - 1009 - /* 1010 - * No need to use i_size_read() here, the i_size 1011 - * cannot change under us because we hold i_mutex. 1012 - * 1013 - * But it's important to update i_size while still holding page lock: 1014 - * page writeout could otherwise come in and zero beyond i_size. 1015 - */ 1016 - if (pos+copied > inode->i_size) 1017 - i_size_write(inode, pos+copied); 1018 - unlock_page(page); 1019 - put_page(page); 1020 - 1021 - /* 1022 - * Don't mark the inode dirty under page lock. First, it unnecessarily 1023 - * makes the holding time of page lock longer. Second, it forces lock 1024 - * ordering of page lock and transaction start for journaling 1025 - * filesystems. 1026 - */ 1027 - mark_inode_dirty(inode); 1028 - 1029 - return copied; 1030 954 } 1031 955 1032 956 #ifdef INLINE_DIR_DEBUG ··· 1919 1917 EXT4_I(inode)->i_disksize = i_size; 1920 1918 1921 1919 if (i_size < inline_size) { 1920 + /* 1921 + * if there's inline data to truncate and this file was 1922 + * converted to extents after that inline data was written, 1923 + * the extent status cache must be cleared to avoid leaving 1924 + * behind stale delayed allocated extent entries 1925 + */ 1926 + if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 1927 + retry: 1928 + err = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS); 1929 + if (err == -ENOMEM) { 1930 + cond_resched(); 1931 + congestion_wait(BLK_RW_ASYNC, HZ/50); 1932 + goto retry; 1933 + } 1934 + if (err) 1935 + goto out_error; 1936 + } 1937 + 1922 1938 /* Clear the content in the xattr space. */ 1923 1939 if (inline_size > EXT4_MIN_INLINE_DATA_SIZE) { 1924 1940 if ((err = ext4_xattr_ibody_find(inode, &i, &is)) != 0)
+60 -118
fs/ext4/inode.c
··· 1284 1284 loff_t old_size = inode->i_size; 1285 1285 int ret = 0, ret2; 1286 1286 int i_size_changed = 0; 1287 - int inline_data = ext4_has_inline_data(inode); 1288 1287 bool verity = ext4_verity_in_progress(inode); 1289 1288 1290 1289 trace_ext4_write_end(inode, pos, len, copied); 1291 - if (inline_data) { 1292 - ret = ext4_write_inline_data_end(inode, pos, len, 1293 - copied, page); 1294 - if (ret < 0) { 1295 - unlock_page(page); 1296 - put_page(page); 1297 - goto errout; 1298 - } 1299 - copied = ret; 1300 - } else 1301 - copied = block_write_end(file, mapping, pos, 1302 - len, copied, page, fsdata); 1290 + 1291 + if (ext4_has_inline_data(inode)) 1292 + return ext4_write_inline_data_end(inode, pos, len, copied, page); 1293 + 1294 + copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 1303 1295 /* 1304 1296 * it's important to update i_size while still holding page lock: 1305 1297 * page writeout could otherwise come in and zero beyond i_size. ··· 1312 1320 * ordering of page lock and transaction start for journaling 1313 1321 * filesystems. 1314 1322 */ 1315 - if (i_size_changed || inline_data) 1323 + if (i_size_changed) 1316 1324 ret = ext4_mark_inode_dirty(handle, inode); 1317 1325 1318 1326 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode)) ··· 1321 1329 * inode->i_size. So truncate them 1322 1330 */ 1323 1331 ext4_orphan_add(handle, inode); 1324 - errout: 1332 + 1325 1333 ret2 = ext4_journal_stop(handle); 1326 1334 if (!ret) 1327 1335 ret = ret2; ··· 1387 1395 int partial = 0; 1388 1396 unsigned from, to; 1389 1397 int size_changed = 0; 1390 - int inline_data = ext4_has_inline_data(inode); 1391 1398 bool verity = ext4_verity_in_progress(inode); 1392 1399 1393 1400 trace_ext4_journalled_write_end(inode, pos, len, copied); ··· 1395 1404 1396 1405 BUG_ON(!ext4_handle_valid(handle)); 1397 1406 1398 - if (inline_data) { 1399 - ret = ext4_write_inline_data_end(inode, pos, len, 1400 - copied, page); 1401 - if (ret < 0) { 1402 - unlock_page(page); 1403 - put_page(page); 1404 - goto errout; 1405 - } 1406 - copied = ret; 1407 - } else if (unlikely(copied < len) && !PageUptodate(page)) { 1407 + if (ext4_has_inline_data(inode)) 1408 + return ext4_write_inline_data_end(inode, pos, len, copied, page); 1409 + 1410 + if (unlikely(copied < len) && !PageUptodate(page)) { 1408 1411 copied = 0; 1409 1412 ext4_journalled_zero_new_buffers(handle, inode, page, from, to); 1410 1413 } else { ··· 1421 1436 if (old_size < pos && !verity) 1422 1437 pagecache_isize_extended(inode, old_size, pos); 1423 1438 1424 - if (size_changed || inline_data) { 1439 + if (size_changed) { 1425 1440 ret2 = ext4_mark_inode_dirty(handle, inode); 1426 1441 if (!ret) 1427 1442 ret = ret2; ··· 1434 1449 */ 1435 1450 ext4_orphan_add(handle, inode); 1436 1451 1437 - errout: 1438 1452 ret2 = ext4_journal_stop(handle); 1439 1453 if (!ret) 1440 1454 ret = ret2; ··· 1628 1644 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1629 1645 int ret; 1630 1646 bool allocated = false; 1647 + bool reserved = false; 1631 1648 1632 1649 /* 1633 1650 * If the cluster containing lblk is shared with a delayed, ··· 1645 1660 ret = ext4_da_reserve_space(inode); 1646 1661 if (ret != 0) /* ENOSPC */ 1647 1662 goto errout; 1663 + reserved = true; 1648 1664 } else { /* bigalloc */ 1649 1665 if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) { 1650 1666 if (!ext4_es_scan_clu(inode, ··· 1658 1672 ret = ext4_da_reserve_space(inode); 1659 1673 if (ret != 0) /* ENOSPC */ 1660 1674 goto errout; 1675 + reserved = true; 1661 1676 } else { 1662 1677 allocated = true; 1663 1678 } ··· 1669 1682 } 1670 1683 1671 1684 ret = ext4_es_insert_delayed_block(inode, lblk, allocated); 1685 + if (ret && reserved) 1686 + ext4_da_release_space(inode, 1); 1672 1687 1673 1688 errout: 1674 1689 return ret; ··· 1711 1722 } 1712 1723 1713 1724 /* 1714 - * Delayed extent could be allocated by fallocate. 1715 - * So we need to check it. 1725 + * the buffer head associated with a delayed and not unwritten 1726 + * block found in the extent status cache must contain an 1727 + * invalid block number and have its BH_New and BH_Delay bits 1728 + * set, reflecting the state assigned when the block was 1729 + * initially delayed allocated 1716 1730 */ 1717 - if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) { 1718 - map_bh(bh, inode->i_sb, invalid_block); 1719 - set_buffer_new(bh); 1720 - set_buffer_delay(bh); 1731 + if (ext4_es_is_delonly(&es)) { 1732 + BUG_ON(bh->b_blocknr != invalid_block); 1733 + BUG_ON(!buffer_new(bh)); 1734 + BUG_ON(!buffer_delay(bh)); 1721 1735 return 0; 1722 1736 } 1723 1737 ··· 2924 2932 return 0; 2925 2933 } 2926 2934 2927 - /* We always reserve for an inode update; the superblock could be there too */ 2928 - static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len) 2929 - { 2930 - if (likely(ext4_has_feature_large_file(inode->i_sb))) 2931 - return 1; 2932 - 2933 - if (pos + len <= 0x7fffffffULL) 2934 - return 1; 2935 - 2936 - /* We might need to update the superblock to set LARGE_FILE */ 2937 - return 2; 2938 - } 2939 - 2940 2935 static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 2941 2936 loff_t pos, unsigned len, unsigned flags, 2942 2937 struct page **pagep, void **fsdata) ··· 2932 2953 struct page *page; 2933 2954 pgoff_t index; 2934 2955 struct inode *inode = mapping->host; 2935 - handle_t *handle; 2936 2956 2937 2957 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) 2938 2958 return -EIO; ··· 2957 2979 return 0; 2958 2980 } 2959 2981 2960 - /* 2961 - * grab_cache_page_write_begin() can take a long time if the 2962 - * system is thrashing due to memory pressure, or if the page 2963 - * is being written back. So grab it first before we start 2964 - * the transaction handle. This also allows us to allocate 2965 - * the page (if needed) without using GFP_NOFS. 2966 - */ 2967 - retry_grab: 2982 + retry: 2968 2983 page = grab_cache_page_write_begin(mapping, index, flags); 2969 2984 if (!page) 2970 2985 return -ENOMEM; 2971 - unlock_page(page); 2972 2986 2973 - /* 2974 - * With delayed allocation, we don't log the i_disksize update 2975 - * if there is delayed block allocation. But we still need 2976 - * to journalling the i_disksize update if writes to the end 2977 - * of file which has an already mapped buffer. 2978 - */ 2979 - retry_journal: 2980 - handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 2981 - ext4_da_write_credits(inode, pos, len)); 2982 - if (IS_ERR(handle)) { 2983 - put_page(page); 2984 - return PTR_ERR(handle); 2985 - } 2986 - 2987 - lock_page(page); 2988 - if (page->mapping != mapping) { 2989 - /* The page got truncated from under us */ 2990 - unlock_page(page); 2991 - put_page(page); 2992 - ext4_journal_stop(handle); 2993 - goto retry_grab; 2994 - } 2995 2987 /* In case writeback began while the page was unlocked */ 2996 2988 wait_for_stable_page(page); 2997 2989 ··· 2973 3025 #endif 2974 3026 if (ret < 0) { 2975 3027 unlock_page(page); 2976 - ext4_journal_stop(handle); 3028 + put_page(page); 2977 3029 /* 2978 3030 * block_write_begin may have instantiated a few blocks 2979 3031 * outside i_size. Trim these off again. Don't need 2980 - * i_size_read because we hold i_mutex. 3032 + * i_size_read because we hold inode lock. 2981 3033 */ 2982 3034 if (pos + len > inode->i_size) 2983 3035 ext4_truncate_failed_write(inode); 2984 3036 2985 3037 if (ret == -ENOSPC && 2986 3038 ext4_should_retry_alloc(inode->i_sb, &retries)) 2987 - goto retry_journal; 2988 - 2989 - put_page(page); 3039 + goto retry; 2990 3040 return ret; 2991 3041 } 2992 3042 ··· 3021 3075 struct page *page, void *fsdata) 3022 3076 { 3023 3077 struct inode *inode = mapping->host; 3024 - int ret = 0, ret2; 3025 - handle_t *handle = ext4_journal_current_handle(); 3026 3078 loff_t new_i_size; 3027 3079 unsigned long start, end; 3028 3080 int write_mode = (int)(unsigned long)fsdata; ··· 3030 3086 len, copied, page, fsdata); 3031 3087 3032 3088 trace_ext4_da_write_end(inode, pos, len, copied); 3033 - start = pos & (PAGE_SIZE - 1); 3034 - end = start + copied - 1; 3035 - 3036 - /* 3037 - * generic_write_end() will run mark_inode_dirty() if i_size 3038 - * changes. So let's piggyback the i_disksize mark_inode_dirty 3039 - * into that. 3040 - */ 3041 - new_i_size = pos + copied; 3042 - if (copied && new_i_size > EXT4_I(inode)->i_disksize) { 3043 - if (ext4_has_inline_data(inode) || 3044 - ext4_da_should_update_i_disksize(page, end)) { 3045 - ext4_update_i_disksize(inode, new_i_size); 3046 - /* We need to mark inode dirty even if 3047 - * new_i_size is less that inode->i_size 3048 - * bu greater than i_disksize.(hint delalloc) 3049 - */ 3050 - ret = ext4_mark_inode_dirty(handle, inode); 3051 - } 3052 - } 3053 3089 3054 3090 if (write_mode != CONVERT_INLINE_DATA && 3055 3091 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) && 3056 3092 ext4_has_inline_data(inode)) 3057 - ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied, 3058 - page); 3059 - else 3060 - ret2 = generic_write_end(file, mapping, pos, len, copied, 3061 - page, fsdata); 3093 + return ext4_write_inline_data_end(inode, pos, len, copied, page); 3062 3094 3063 - copied = ret2; 3064 - if (ret2 < 0) 3065 - ret = ret2; 3066 - ret2 = ext4_journal_stop(handle); 3067 - if (unlikely(ret2 && !ret)) 3068 - ret = ret2; 3095 + start = pos & (PAGE_SIZE - 1); 3096 + end = start + copied - 1; 3069 3097 3070 - return ret ? ret : copied; 3098 + /* 3099 + * Since we are holding inode lock, we are sure i_disksize <= 3100 + * i_size. We also know that if i_disksize < i_size, there are 3101 + * delalloc writes pending in the range upto i_size. If the end of 3102 + * the current write is <= i_size, there's no need to touch 3103 + * i_disksize since writeback will push i_disksize upto i_size 3104 + * eventually. If the end of the current write is > i_size and 3105 + * inside an allocated block (ext4_da_should_update_i_disksize() 3106 + * check), we need to update i_disksize here as neither 3107 + * ext4_writepage() nor certain ext4_writepages() paths not 3108 + * allocating blocks update i_disksize. 3109 + * 3110 + * Note that we defer inode dirtying to generic_write_end() / 3111 + * ext4_da_write_inline_data_end(). 3112 + */ 3113 + new_i_size = pos + copied; 3114 + if (copied && new_i_size > inode->i_size && 3115 + ext4_da_should_update_i_disksize(page, end)) 3116 + ext4_update_i_disksize(inode, new_i_size); 3117 + 3118 + return generic_write_end(file, mapping, pos, len, copied, page, fsdata); 3071 3119 } 3072 3120 3073 3121 /* ··· 4276 4340 goto has_buffer; 4277 4341 4278 4342 lock_buffer(bh); 4343 + if (ext4_buffer_uptodate(bh)) { 4344 + /* Someone brought it uptodate while we waited */ 4345 + unlock_buffer(bh); 4346 + goto has_buffer; 4347 + } 4348 + 4279 4349 /* 4280 4350 * If we have all information of the inode in memory and this 4281 4351 * is the only valid inode in the block, we need not read the
+15 -6
fs/ext4/super.c
··· 658 658 * constraints, it may not be safe to do it right here so we 659 659 * defer superblock flushing to a workqueue. 660 660 */ 661 - if (continue_fs) 661 + if (continue_fs && journal) 662 662 schedule_work(&EXT4_SB(sb)->s_error_work); 663 663 else 664 664 ext4_commit_super(sb); ··· 1350 1350 true); 1351 1351 dump_stack(); 1352 1352 } 1353 + 1354 + if (EXT4_I(inode)->i_reserved_data_blocks) 1355 + ext4_msg(inode->i_sb, KERN_ERR, 1356 + "Inode %lu (%p): i_reserved_data_blocks (%u) not cleared!", 1357 + inode->i_ino, EXT4_I(inode), 1358 + EXT4_I(inode)->i_reserved_data_blocks); 1353 1359 } 1354 1360 1355 1361 static void init_once(void *foo) ··· 3027 3021 */ 3028 3022 static loff_t ext4_max_bitmap_size(int bits, int has_huge_files) 3029 3023 { 3030 - loff_t res = EXT4_NDIR_BLOCKS; 3024 + unsigned long long upper_limit, res = EXT4_NDIR_BLOCKS; 3031 3025 int meta_blocks; 3032 - loff_t upper_limit; 3033 - /* This is calculated to be the largest file size for a dense, block 3026 + 3027 + /* 3028 + * This is calculated to be the largest file size for a dense, block 3034 3029 * mapped file such that the file's total number of 512-byte sectors, 3035 3030 * including data and all indirect blocks, does not exceed (2^48 - 1). 3036 3031 * 3037 3032 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total 3038 3033 * number of 512-byte sectors of the file. 3039 3034 */ 3040 - 3041 3035 if (!has_huge_files) { 3042 3036 /* 3043 3037 * !has_huge_files or implies that the inode i_block field ··· 3080 3074 if (res > MAX_LFS_FILESIZE) 3081 3075 res = MAX_LFS_FILESIZE; 3082 3076 3083 - return res; 3077 + return (loff_t)res; 3084 3078 } 3085 3079 3086 3080 static ext4_fsblk_t descriptor_loc(struct super_block *sb, ··· 5048 5042 sbi->s_ea_block_cache = NULL; 5049 5043 5050 5044 if (sbi->s_journal) { 5045 + /* flush s_error_work before journal destroy. */ 5046 + flush_work(&sbi->s_error_work); 5051 5047 jbd2_journal_destroy(sbi->s_journal); 5052 5048 sbi->s_journal = NULL; 5053 5049 } 5054 5050 failed_mount3a: 5055 5051 ext4_es_unregister_shrinker(sbi); 5056 5052 failed_mount3: 5053 + /* flush s_error_work before sbi destroy */ 5057 5054 flush_work(&sbi->s_error_work); 5058 5055 del_timer_sync(&sbi->s_err_report); 5059 5056 ext4_stop_mmpd(sbi);
+1 -1
fs/fscache/object.c
··· 77 77 static WORK_STATE(PARENT_READY, "PRDY", fscache_parent_ready); 78 78 static WORK_STATE(ABORT_INIT, "ABRT", fscache_abort_initialisation); 79 79 static WORK_STATE(LOOK_UP_OBJECT, "LOOK", fscache_look_up_object); 80 - static WORK_STATE(CREATE_OBJECT, "CRTO", fscache_look_up_object); 81 80 static WORK_STATE(OBJECT_AVAILABLE, "AVBL", fscache_object_available); 82 81 static WORK_STATE(JUMPSTART_DEPS, "JUMP", fscache_jumpstart_dependents); 83 82 ··· 906 907 * @object: The object to ask about 907 908 * @data: The auxiliary data for the object 908 909 * @datalen: The size of the auxiliary data 910 + * @object_size: The size of the object according to the server. 909 911 * 910 912 * This function consults the netfs about the coherency state of an object. 911 913 * The caller must be holding a ref on cookie->n_active (held by
+3
fs/fscache/operation.c
··· 22 22 23 23 /** 24 24 * fscache_operation_init - Do basic initialisation of an operation 25 + * @cookie: The cookie to operate on 25 26 * @op: The operation to initialise 27 + * @processor: The function to perform the operation 28 + * @cancel: A function to handle operation cancellation 26 29 * @release: The release function to assign 27 30 * 28 31 * Do basic initialisation of an operation. The caller must still set flags,
+4 -2
fs/inode.c
··· 190 190 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); 191 191 mapping->private_data = NULL; 192 192 mapping->writeback_index = 0; 193 - __init_rwsem(&mapping->invalidate_lock, "mapping.invalidate_lock", 194 - &sb->s_type->invalidate_lock_key); 193 + init_rwsem(&mapping->invalidate_lock); 194 + lockdep_set_class_and_name(&mapping->invalidate_lock, 195 + &sb->s_type->invalidate_lock_key, 196 + "mapping.invalidate_lock"); 195 197 inode->i_private = NULL; 196 198 inode->i_mapping = mapping; 197 199 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
+1 -3
fs/io-wq.c
··· 584 584 585 585 if (!get_signal(&ksig)) 586 586 continue; 587 - if (fatal_signal_pending(current)) 588 - break; 589 - continue; 587 + break; 590 588 } 591 589 last_timeout = !ret; 592 590 }
+72 -30
fs/io_uring.c
··· 403 403 struct wait_queue_head cq_wait; 404 404 unsigned cq_extra; 405 405 atomic_t cq_timeouts; 406 - struct fasync_struct *cq_fasync; 407 406 unsigned cq_last_tm_flush; 408 407 } ____cacheline_aligned_in_smp; 409 408 ··· 501 502 struct io_close { 502 503 struct file *file; 503 504 int fd; 505 + u32 file_slot; 504 506 }; 505 507 506 508 struct io_timeout_data { ··· 1098 1098 1099 1099 static int io_install_fixed_file(struct io_kiocb *req, struct file *file, 1100 1100 unsigned int issue_flags, u32 slot_index); 1101 + static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags); 1102 + 1101 1103 static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer); 1102 1104 1103 1105 static struct kmem_cache *req_cachep; ··· 1613 1611 wake_up(&ctx->sq_data->wait); 1614 1612 if (io_should_trigger_evfd(ctx)) 1615 1613 eventfd_signal(ctx->cq_ev_fd, 1); 1616 - if (waitqueue_active(&ctx->poll_wait)) { 1614 + if (waitqueue_active(&ctx->poll_wait)) 1617 1615 wake_up_interruptible(&ctx->poll_wait); 1618 - kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN); 1619 - } 1620 1616 } 1621 1617 1622 1618 static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) ··· 1628 1628 } 1629 1629 if (io_should_trigger_evfd(ctx)) 1630 1630 eventfd_signal(ctx->cq_ev_fd, 1); 1631 - if (waitqueue_active(&ctx->poll_wait)) { 1631 + if (waitqueue_active(&ctx->poll_wait)) 1632 1632 wake_up_interruptible(&ctx->poll_wait); 1633 - kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN); 1634 - } 1635 1633 } 1636 1634 1637 1635 /* Returns true if there are no backlogged entries after the flush */ ··· 3603 3605 iov_iter_save_state(iter, state); 3604 3606 } 3605 3607 req->result = iov_iter_count(iter); 3606 - ret2 = 0; 3607 3608 3608 3609 /* Ensure we clear previously set non-block flag */ 3609 3610 if (!force_nonblock) ··· 3667 3670 } else { 3668 3671 copy_iov: 3669 3672 iov_iter_restore(iter, state); 3670 - if (ret2 > 0) 3671 - iov_iter_advance(iter, ret2); 3672 3673 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); 3673 3674 return ret ?: -EAGAIN; 3674 3675 } ··· 4382 4387 int i, bid = pbuf->bid; 4383 4388 4384 4389 for (i = 0; i < pbuf->nbufs; i++) { 4385 - buf = kmalloc(sizeof(*buf), GFP_KERNEL); 4390 + buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT); 4386 4391 if (!buf) 4387 4392 break; 4388 4393 ··· 4589 4594 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) 4590 4595 return -EINVAL; 4591 4596 if (sqe->ioprio || sqe->off || sqe->addr || sqe->len || 4592 - sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in) 4597 + sqe->rw_flags || sqe->buf_index) 4593 4598 return -EINVAL; 4594 4599 if (req->flags & REQ_F_FIXED_FILE) 4595 4600 return -EBADF; 4596 4601 4597 4602 req->close.fd = READ_ONCE(sqe->fd); 4603 + req->close.file_slot = READ_ONCE(sqe->file_index); 4604 + if (req->close.file_slot && req->close.fd) 4605 + return -EINVAL; 4606 + 4598 4607 return 0; 4599 4608 } 4600 4609 ··· 4609 4610 struct fdtable *fdt; 4610 4611 struct file *file = NULL; 4611 4612 int ret = -EBADF; 4613 + 4614 + if (req->close.file_slot) { 4615 + ret = io_close_fixed(req, issue_flags); 4616 + goto err; 4617 + } 4612 4618 4613 4619 spin_lock(&files->file_lock); 4614 4620 fdt = files_fdtable(files); ··· 5342 5338 if (req->poll.events & EPOLLONESHOT) 5343 5339 flags = 0; 5344 5340 if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) { 5345 - req->poll.done = true; 5341 + req->poll.events |= EPOLLONESHOT; 5346 5342 flags = 0; 5347 5343 } 5348 5344 if (flags & IORING_CQE_F_MORE) ··· 5371 5367 } else { 5372 5368 bool done; 5373 5369 5370 + if (req->poll.done) { 5371 + spin_unlock(&ctx->completion_lock); 5372 + return; 5373 + } 5374 5374 done = __io_poll_complete(req, req->result); 5375 5375 if (done) { 5376 5376 io_poll_remove_double(req); 5377 5377 hash_del(&req->hash_node); 5378 + req->poll.done = true; 5378 5379 } else { 5379 5380 req->result = 0; 5380 5381 add_wait_queue(req->poll.head, &req->poll.wait); ··· 5517 5508 5518 5509 hash_del(&req->hash_node); 5519 5510 io_poll_remove_double(req); 5511 + apoll->poll.done = true; 5520 5512 spin_unlock(&ctx->completion_lock); 5521 5513 5522 5514 if (!READ_ONCE(apoll->poll.canceled)) ··· 5838 5828 struct io_ring_ctx *ctx = req->ctx; 5839 5829 struct io_poll_table ipt; 5840 5830 __poll_t mask; 5831 + bool done; 5841 5832 5842 5833 ipt.pt._qproc = io_poll_queue_proc; 5843 5834 ··· 5847 5836 5848 5837 if (mask) { /* no async, we'd stolen it */ 5849 5838 ipt.error = 0; 5850 - io_poll_complete(req, mask); 5839 + done = io_poll_complete(req, mask); 5851 5840 } 5852 5841 spin_unlock(&ctx->completion_lock); 5853 5842 5854 5843 if (mask) { 5855 5844 io_cqring_ev_posted(ctx); 5856 - if (poll->events & EPOLLONESHOT) 5845 + if (done) 5857 5846 io_put_req(req); 5858 5847 } 5859 5848 return ipt.error; ··· 6344 6333 struct io_uring_rsrc_update2 up; 6345 6334 int ret; 6346 6335 6347 - if (issue_flags & IO_URING_F_NONBLOCK) 6348 - return -EAGAIN; 6349 - 6350 6336 up.offset = req->rsrc_update.offset; 6351 6337 up.data = req->rsrc_update.arg; 6352 6338 up.nr = 0; 6353 6339 up.tags = 0; 6354 6340 up.resv = 0; 6355 6341 6356 - mutex_lock(&ctx->uring_lock); 6342 + io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); 6357 6343 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE, 6358 6344 &up, req->rsrc_update.nr_args); 6359 - mutex_unlock(&ctx->uring_lock); 6345 + io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); 6360 6346 6361 6347 if (ret < 0) 6362 6348 req_set_fail(req); ··· 8408 8400 return ret; 8409 8401 } 8410 8402 8403 + static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags) 8404 + { 8405 + unsigned int offset = req->close.file_slot - 1; 8406 + struct io_ring_ctx *ctx = req->ctx; 8407 + struct io_fixed_file *file_slot; 8408 + struct file *file; 8409 + int ret, i; 8410 + 8411 + io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); 8412 + ret = -ENXIO; 8413 + if (unlikely(!ctx->file_data)) 8414 + goto out; 8415 + ret = -EINVAL; 8416 + if (offset >= ctx->nr_user_files) 8417 + goto out; 8418 + ret = io_rsrc_node_switch_start(ctx); 8419 + if (ret) 8420 + goto out; 8421 + 8422 + i = array_index_nospec(offset, ctx->nr_user_files); 8423 + file_slot = io_fixed_file_slot(&ctx->file_table, i); 8424 + ret = -EBADF; 8425 + if (!file_slot->file_ptr) 8426 + goto out; 8427 + 8428 + file = (struct file *)(file_slot->file_ptr & FFS_MASK); 8429 + ret = io_queue_rsrc_removal(ctx->file_data, offset, ctx->rsrc_node, file); 8430 + if (ret) 8431 + goto out; 8432 + 8433 + file_slot->file_ptr = 0; 8434 + io_rsrc_node_switch(ctx, ctx->file_data); 8435 + ret = 0; 8436 + out: 8437 + io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK)); 8438 + return ret; 8439 + } 8440 + 8411 8441 static int __io_sqe_files_update(struct io_ring_ctx *ctx, 8412 8442 struct io_uring_rsrc_update2 *up, 8413 8443 unsigned nr_args) ··· 9212 9166 struct io_buffer *buf; 9213 9167 unsigned long index; 9214 9168 9215 - xa_for_each(&ctx->io_buffers, index, buf) 9169 + xa_for_each(&ctx->io_buffers, index, buf) { 9216 9170 __io_remove_buffers(ctx, buf, index, -1U); 9171 + cond_resched(); 9172 + } 9217 9173 } 9218 9174 9219 9175 static void io_req_cache_free(struct list_head *list) ··· 9338 9290 mask |= EPOLLIN | EPOLLRDNORM; 9339 9291 9340 9292 return mask; 9341 - } 9342 - 9343 - static int io_uring_fasync(int fd, struct file *file, int on) 9344 - { 9345 - struct io_ring_ctx *ctx = file->private_data; 9346 - 9347 - return fasync_helper(fd, file, on, &ctx->cq_fasync); 9348 9293 } 9349 9294 9350 9295 static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id) ··· 9706 9665 struct io_tctx_node *node; 9707 9666 unsigned long index; 9708 9667 9709 - xa_for_each(&tctx->xa, index, node) 9668 + xa_for_each(&tctx->xa, index, node) { 9710 9669 io_uring_del_tctx_node(index); 9670 + cond_resched(); 9671 + } 9711 9672 if (wq) { 9712 9673 /* 9713 9674 * Must be after io_uring_del_task_file() (removes nodes under ··· 10133 10090 .mmap_capabilities = io_uring_nommu_mmap_capabilities, 10134 10091 #endif 10135 10092 .poll = io_uring_poll, 10136 - .fasync = io_uring_fasync, 10137 10093 #ifdef CONFIG_PROC_FS 10138 10094 .show_fdinfo = io_uring_show_fdinfo, 10139 10095 #endif
+7 -2
fs/kernfs/dir.c
··· 1116 1116 if (!inode) 1117 1117 inode = ERR_PTR(-ENOMEM); 1118 1118 } 1119 - /* Needed only for negative dentry validation */ 1120 - if (!inode) 1119 + /* 1120 + * Needed for negative dentry validation. 1121 + * The negative dentry can be created in kernfs_iop_lookup() 1122 + * or transforms from positive dentry in dentry_unlink_inode() 1123 + * called from vfs_rmdir(). 1124 + */ 1125 + if (!IS_ERR(inode)) 1121 1126 kernfs_set_rev(parent, dentry); 1122 1127 up_read(&kernfs_rwsem); 1123 1128
-205
fs/ksmbd/auth.c
··· 68 68 memcpy(buf, NEGOTIATE_GSS_HEADER, AUTH_GSS_LENGTH); 69 69 } 70 70 71 - static void 72 - str_to_key(unsigned char *str, unsigned char *key) 73 - { 74 - int i; 75 - 76 - key[0] = str[0] >> 1; 77 - key[1] = ((str[0] & 0x01) << 6) | (str[1] >> 2); 78 - key[2] = ((str[1] & 0x03) << 5) | (str[2] >> 3); 79 - key[3] = ((str[2] & 0x07) << 4) | (str[3] >> 4); 80 - key[4] = ((str[3] & 0x0F) << 3) | (str[4] >> 5); 81 - key[5] = ((str[4] & 0x1F) << 2) | (str[5] >> 6); 82 - key[6] = ((str[5] & 0x3F) << 1) | (str[6] >> 7); 83 - key[7] = str[6] & 0x7F; 84 - for (i = 0; i < 8; i++) 85 - key[i] = (key[i] << 1); 86 - } 87 - 88 - static int 89 - smbhash(unsigned char *out, const unsigned char *in, unsigned char *key) 90 - { 91 - unsigned char key2[8]; 92 - struct des_ctx ctx; 93 - 94 - if (fips_enabled) { 95 - ksmbd_debug(AUTH, "FIPS compliance enabled: DES not permitted\n"); 96 - return -ENOENT; 97 - } 98 - 99 - str_to_key(key, key2); 100 - des_expand_key(&ctx, key2, DES_KEY_SIZE); 101 - des_encrypt(&ctx, out, in); 102 - memzero_explicit(&ctx, sizeof(ctx)); 103 - return 0; 104 - } 105 - 106 - static int ksmbd_enc_p24(unsigned char *p21, const unsigned char *c8, unsigned char *p24) 107 - { 108 - int rc; 109 - 110 - rc = smbhash(p24, c8, p21); 111 - if (rc) 112 - return rc; 113 - rc = smbhash(p24 + 8, c8, p21 + 7); 114 - if (rc) 115 - return rc; 116 - return smbhash(p24 + 16, c8, p21 + 14); 117 - } 118 - 119 - /* produce a md4 message digest from data of length n bytes */ 120 - static int ksmbd_enc_md4(unsigned char *md4_hash, unsigned char *link_str, 121 - int link_len) 122 - { 123 - int rc; 124 - struct ksmbd_crypto_ctx *ctx; 125 - 126 - ctx = ksmbd_crypto_ctx_find_md4(); 127 - if (!ctx) { 128 - ksmbd_debug(AUTH, "Crypto md4 allocation error\n"); 129 - return -ENOMEM; 130 - } 131 - 132 - rc = crypto_shash_init(CRYPTO_MD4(ctx)); 133 - if (rc) { 134 - ksmbd_debug(AUTH, "Could not init md4 shash\n"); 135 - goto out; 136 - } 137 - 138 - rc = crypto_shash_update(CRYPTO_MD4(ctx), link_str, link_len); 139 - if (rc) { 140 - ksmbd_debug(AUTH, "Could not update with link_str\n"); 141 - goto out; 142 - } 143 - 144 - rc = crypto_shash_final(CRYPTO_MD4(ctx), md4_hash); 145 - if (rc) 146 - ksmbd_debug(AUTH, "Could not generate md4 hash\n"); 147 - out: 148 - ksmbd_release_crypto_ctx(ctx); 149 - return rc; 150 - } 151 - 152 - static int ksmbd_enc_update_sess_key(unsigned char *md5_hash, char *nonce, 153 - char *server_challenge, int len) 154 - { 155 - int rc; 156 - struct ksmbd_crypto_ctx *ctx; 157 - 158 - ctx = ksmbd_crypto_ctx_find_md5(); 159 - if (!ctx) { 160 - ksmbd_debug(AUTH, "Crypto md5 allocation error\n"); 161 - return -ENOMEM; 162 - } 163 - 164 - rc = crypto_shash_init(CRYPTO_MD5(ctx)); 165 - if (rc) { 166 - ksmbd_debug(AUTH, "Could not init md5 shash\n"); 167 - goto out; 168 - } 169 - 170 - rc = crypto_shash_update(CRYPTO_MD5(ctx), server_challenge, len); 171 - if (rc) { 172 - ksmbd_debug(AUTH, "Could not update with challenge\n"); 173 - goto out; 174 - } 175 - 176 - rc = crypto_shash_update(CRYPTO_MD5(ctx), nonce, len); 177 - if (rc) { 178 - ksmbd_debug(AUTH, "Could not update with nonce\n"); 179 - goto out; 180 - } 181 - 182 - rc = crypto_shash_final(CRYPTO_MD5(ctx), md5_hash); 183 - if (rc) 184 - ksmbd_debug(AUTH, "Could not generate md5 hash\n"); 185 - out: 186 - ksmbd_release_crypto_ctx(ctx); 187 - return rc; 188 - } 189 - 190 71 /** 191 72 * ksmbd_gen_sess_key() - function to generate session key 192 73 * @sess: session of connection ··· 206 325 } 207 326 208 327 /** 209 - * ksmbd_auth_ntlm() - NTLM authentication handler 210 - * @sess: session of connection 211 - * @pw_buf: NTLM challenge response 212 - * @passkey: user password 213 - * 214 - * Return: 0 on success, error number on error 215 - */ 216 - int ksmbd_auth_ntlm(struct ksmbd_session *sess, char *pw_buf) 217 - { 218 - int rc; 219 - unsigned char p21[21]; 220 - char key[CIFS_AUTH_RESP_SIZE]; 221 - 222 - memset(p21, '\0', 21); 223 - memcpy(p21, user_passkey(sess->user), CIFS_NTHASH_SIZE); 224 - rc = ksmbd_enc_p24(p21, sess->ntlmssp.cryptkey, key); 225 - if (rc) { 226 - pr_err("password processing failed\n"); 227 - return rc; 228 - } 229 - 230 - ksmbd_enc_md4(sess->sess_key, user_passkey(sess->user), 231 - CIFS_SMB1_SESSKEY_SIZE); 232 - memcpy(sess->sess_key + CIFS_SMB1_SESSKEY_SIZE, key, 233 - CIFS_AUTH_RESP_SIZE); 234 - sess->sequence_number = 1; 235 - 236 - if (strncmp(pw_buf, key, CIFS_AUTH_RESP_SIZE) != 0) { 237 - ksmbd_debug(AUTH, "ntlmv1 authentication failed\n"); 238 - return -EINVAL; 239 - } 240 - 241 - ksmbd_debug(AUTH, "ntlmv1 authentication pass\n"); 242 - return 0; 243 - } 244 - 245 - /** 246 328 * ksmbd_auth_ntlmv2() - NTLMv2 authentication handler 247 329 * @sess: session of connection 248 330 * @ntlmv2: NTLMv2 challenge response ··· 286 442 } 287 443 288 444 /** 289 - * __ksmbd_auth_ntlmv2() - NTLM2(extended security) authentication handler 290 - * @sess: session of connection 291 - * @client_nonce: client nonce from LM response. 292 - * @ntlm_resp: ntlm response data from client. 293 - * 294 - * Return: 0 on success, error number on error 295 - */ 296 - static int __ksmbd_auth_ntlmv2(struct ksmbd_session *sess, char *client_nonce, 297 - char *ntlm_resp) 298 - { 299 - char sess_key[CIFS_SMB1_SESSKEY_SIZE] = {0}; 300 - int rc; 301 - unsigned char p21[21]; 302 - char key[CIFS_AUTH_RESP_SIZE]; 303 - 304 - rc = ksmbd_enc_update_sess_key(sess_key, 305 - client_nonce, 306 - (char *)sess->ntlmssp.cryptkey, 8); 307 - if (rc) { 308 - pr_err("password processing failed\n"); 309 - goto out; 310 - } 311 - 312 - memset(p21, '\0', 21); 313 - memcpy(p21, user_passkey(sess->user), CIFS_NTHASH_SIZE); 314 - rc = ksmbd_enc_p24(p21, sess_key, key); 315 - if (rc) { 316 - pr_err("password processing failed\n"); 317 - goto out; 318 - } 319 - 320 - if (memcmp(ntlm_resp, key, CIFS_AUTH_RESP_SIZE) != 0) 321 - rc = -EINVAL; 322 - out: 323 - return rc; 324 - } 325 - 326 - /** 327 445 * ksmbd_decode_ntlmssp_auth_blob() - helper function to construct 328 446 * authenticate blob 329 447 * @authblob: authenticate blob source pointer ··· 317 511 lm_off = le32_to_cpu(authblob->LmChallengeResponse.BufferOffset); 318 512 nt_off = le32_to_cpu(authblob->NtChallengeResponse.BufferOffset); 319 513 nt_len = le16_to_cpu(authblob->NtChallengeResponse.Length); 320 - 321 - /* process NTLM authentication */ 322 - if (nt_len == CIFS_AUTH_RESP_SIZE) { 323 - if (le32_to_cpu(authblob->NegotiateFlags) & 324 - NTLMSSP_NEGOTIATE_EXTENDED_SEC) 325 - return __ksmbd_auth_ntlmv2(sess, (char *)authblob + 326 - lm_off, (char *)authblob + nt_off); 327 - else 328 - return ksmbd_auth_ntlm(sess, (char *)authblob + 329 - nt_off); 330 - } 331 514 332 515 /* TODO : use domain name that imported from configuration file */ 333 516 domain_name = smb_strndup_from_utf16((const char *)authblob +
-16
fs/ksmbd/crypto_ctx.c
··· 81 81 case CRYPTO_SHASH_SHA512: 82 82 tfm = crypto_alloc_shash("sha512", 0, 0); 83 83 break; 84 - case CRYPTO_SHASH_MD4: 85 - tfm = crypto_alloc_shash("md4", 0, 0); 86 - break; 87 - case CRYPTO_SHASH_MD5: 88 - tfm = crypto_alloc_shash("md5", 0, 0); 89 - break; 90 84 default: 91 85 return NULL; 92 86 } ··· 206 212 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha512(void) 207 213 { 208 214 return ____crypto_shash_ctx_find(CRYPTO_SHASH_SHA512); 209 - } 210 - 211 - struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md4(void) 212 - { 213 - return ____crypto_shash_ctx_find(CRYPTO_SHASH_MD4); 214 - } 215 - 216 - struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md5(void) 217 - { 218 - return ____crypto_shash_ctx_find(CRYPTO_SHASH_MD5); 219 215 } 220 216 221 217 static struct ksmbd_crypto_ctx *____crypto_aead_ctx_find(int id)
-8
fs/ksmbd/crypto_ctx.h
··· 15 15 CRYPTO_SHASH_CMACAES, 16 16 CRYPTO_SHASH_SHA256, 17 17 CRYPTO_SHASH_SHA512, 18 - CRYPTO_SHASH_MD4, 19 - CRYPTO_SHASH_MD5, 20 18 CRYPTO_SHASH_MAX, 21 19 }; 22 20 ··· 41 43 #define CRYPTO_CMACAES(c) ((c)->desc[CRYPTO_SHASH_CMACAES]) 42 44 #define CRYPTO_SHA256(c) ((c)->desc[CRYPTO_SHASH_SHA256]) 43 45 #define CRYPTO_SHA512(c) ((c)->desc[CRYPTO_SHASH_SHA512]) 44 - #define CRYPTO_MD4(c) ((c)->desc[CRYPTO_SHASH_MD4]) 45 - #define CRYPTO_MD5(c) ((c)->desc[CRYPTO_SHASH_MD5]) 46 46 47 47 #define CRYPTO_HMACMD5_TFM(c) ((c)->desc[CRYPTO_SHASH_HMACMD5]->tfm) 48 48 #define CRYPTO_HMACSHA256_TFM(c)\ ··· 48 52 #define CRYPTO_CMACAES_TFM(c) ((c)->desc[CRYPTO_SHASH_CMACAES]->tfm) 49 53 #define CRYPTO_SHA256_TFM(c) ((c)->desc[CRYPTO_SHASH_SHA256]->tfm) 50 54 #define CRYPTO_SHA512_TFM(c) ((c)->desc[CRYPTO_SHASH_SHA512]->tfm) 51 - #define CRYPTO_MD4_TFM(c) ((c)->desc[CRYPTO_SHASH_MD4]->tfm) 52 - #define CRYPTO_MD5_TFM(c) ((c)->desc[CRYPTO_SHASH_MD5]->tfm) 53 55 54 56 #define CRYPTO_GCM(c) ((c)->ccmaes[CRYPTO_AEAD_AES_GCM]) 55 57 #define CRYPTO_CCM(c) ((c)->ccmaes[CRYPTO_AEAD_AES_CCM]) ··· 58 64 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_cmacaes(void); 59 65 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha512(void); 60 66 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha256(void); 61 - struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md4(void); 62 - struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md5(void); 63 67 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_gcm(void); 64 68 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_ccm(void); 65 69 void ksmbd_crypto_destroy(void);
+16 -81
fs/ksmbd/misc.c
··· 158 158 * Return : windows path string or error 159 159 */ 160 160 161 - char *convert_to_nt_pathname(char *filename, char *sharepath) 161 + char *convert_to_nt_pathname(char *filename) 162 162 { 163 163 char *ab_pathname; 164 - int len, name_len; 165 164 166 - name_len = strlen(filename); 167 - ab_pathname = kmalloc(name_len, GFP_KERNEL); 165 + if (strlen(filename) == 0) 166 + filename = "\\"; 167 + 168 + ab_pathname = kstrdup(filename, GFP_KERNEL); 168 169 if (!ab_pathname) 169 170 return NULL; 170 171 171 - ab_pathname[0] = '\\'; 172 - ab_pathname[1] = '\0'; 173 - 174 - len = strlen(sharepath); 175 - if (!strncmp(filename, sharepath, len) && name_len != len) { 176 - strscpy(ab_pathname, &filename[len], name_len); 177 - ksmbd_conv_path_to_windows(ab_pathname); 178 - } 179 - 172 + ksmbd_conv_path_to_windows(ab_pathname); 180 173 return ab_pathname; 181 174 } 182 175 ··· 184 191 return nlink; 185 192 } 186 193 187 - char *ksmbd_conv_path_to_unix(char *path) 194 + void ksmbd_conv_path_to_unix(char *path) 188 195 { 189 - size_t path_len, remain_path_len, out_path_len; 190 - char *out_path, *out_next; 191 - int i, pre_dotdot_cnt = 0, slash_cnt = 0; 192 - bool is_last; 193 - 194 196 strreplace(path, '\\', '/'); 195 - path_len = strlen(path); 196 - remain_path_len = path_len; 197 - if (path_len == 0) 198 - return ERR_PTR(-EINVAL); 197 + } 199 198 200 - out_path = kzalloc(path_len + 2, GFP_KERNEL); 201 - if (!out_path) 202 - return ERR_PTR(-ENOMEM); 203 - out_path_len = 0; 204 - out_next = out_path; 199 + void ksmbd_strip_last_slash(char *path) 200 + { 201 + int len = strlen(path); 205 202 206 - do { 207 - char *name = path + path_len - remain_path_len; 208 - char *next = strchrnul(name, '/'); 209 - size_t name_len = next - name; 210 - 211 - is_last = !next[0]; 212 - if (name_len == 2 && name[0] == '.' && name[1] == '.') { 213 - pre_dotdot_cnt++; 214 - /* handle the case that path ends with "/.." */ 215 - if (is_last) 216 - goto follow_dotdot; 217 - } else { 218 - if (pre_dotdot_cnt) { 219 - follow_dotdot: 220 - slash_cnt = 0; 221 - for (i = out_path_len - 1; i >= 0; i--) { 222 - if (out_path[i] == '/' && 223 - ++slash_cnt == pre_dotdot_cnt + 1) 224 - break; 225 - } 226 - 227 - if (i < 0 && 228 - slash_cnt != pre_dotdot_cnt) { 229 - kfree(out_path); 230 - return ERR_PTR(-EINVAL); 231 - } 232 - 233 - out_next = &out_path[i+1]; 234 - *out_next = '\0'; 235 - out_path_len = i + 1; 236 - 237 - } 238 - 239 - if (name_len != 0 && 240 - !(name_len == 1 && name[0] == '.') && 241 - !(name_len == 2 && name[0] == '.' && name[1] == '.')) { 242 - next[0] = '\0'; 243 - sprintf(out_next, "%s/", name); 244 - out_next += name_len + 1; 245 - out_path_len += name_len + 1; 246 - next[0] = '/'; 247 - } 248 - pre_dotdot_cnt = 0; 249 - } 250 - 251 - remain_path_len -= name_len + 1; 252 - } while (!is_last); 253 - 254 - if (out_path_len > 0) 255 - out_path[out_path_len-1] = '\0'; 256 - path[path_len] = '\0'; 257 - return out_path; 203 + while (len && path[len - 1] == '/') { 204 + path[len - 1] = '\0'; 205 + len--; 206 + } 258 207 } 259 208 260 209 void ksmbd_conv_path_to_windows(char *path) ··· 233 298 * 234 299 * Return: converted name on success, otherwise NULL 235 300 */ 236 - char *convert_to_unix_name(struct ksmbd_share_config *share, char *name) 301 + char *convert_to_unix_name(struct ksmbd_share_config *share, const char *name) 237 302 { 238 303 int no_slash = 0, name_len, path_len; 239 304 char *new_name;
+4 -3
fs/ksmbd/misc.h
··· 14 14 int match_pattern(const char *str, size_t len, const char *pattern); 15 15 int ksmbd_validate_filename(char *filename); 16 16 int parse_stream_name(char *filename, char **stream_name, int *s_type); 17 - char *convert_to_nt_pathname(char *filename, char *sharepath); 17 + char *convert_to_nt_pathname(char *filename); 18 18 int get_nlink(struct kstat *st); 19 - char *ksmbd_conv_path_to_unix(char *path); 19 + void ksmbd_conv_path_to_unix(char *path); 20 + void ksmbd_strip_last_slash(char *path); 20 21 void ksmbd_conv_path_to_windows(char *path); 21 22 char *ksmbd_extract_sharename(char *treename); 22 - char *convert_to_unix_name(struct ksmbd_share_config *share, char *name); 23 + char *convert_to_unix_name(struct ksmbd_share_config *share, const char *name); 23 24 24 25 #define KSMBD_DIR_INFO_ALIGNMENT 8 25 26 struct ksmbd_dir_info;
+31 -10
fs/ksmbd/oplock.c
··· 1451 1451 */ 1452 1452 struct create_context *smb2_find_context_vals(void *open_req, const char *tag) 1453 1453 { 1454 - char *data_offset; 1455 1454 struct create_context *cc; 1456 1455 unsigned int next = 0; 1457 1456 char *name; 1458 1457 struct smb2_create_req *req = (struct smb2_create_req *)open_req; 1458 + unsigned int remain_len, name_off, name_len, value_off, value_len, 1459 + cc_len; 1459 1460 1460 - data_offset = (char *)req + 4 + le32_to_cpu(req->CreateContextsOffset); 1461 - cc = (struct create_context *)data_offset; 1461 + /* 1462 + * CreateContextsOffset and CreateContextsLength are guaranteed to 1463 + * be valid because of ksmbd_smb2_check_message(). 1464 + */ 1465 + cc = (struct create_context *)((char *)req + 4 + 1466 + le32_to_cpu(req->CreateContextsOffset)); 1467 + remain_len = le32_to_cpu(req->CreateContextsLength); 1462 1468 do { 1463 - int val; 1464 - 1465 1469 cc = (struct create_context *)((char *)cc + next); 1466 - name = le16_to_cpu(cc->NameOffset) + (char *)cc; 1467 - val = le16_to_cpu(cc->NameLength); 1468 - if (val < 4) 1470 + if (remain_len < offsetof(struct create_context, Buffer)) 1469 1471 return ERR_PTR(-EINVAL); 1470 1472 1471 - if (memcmp(name, tag, val) == 0) 1472 - return cc; 1473 1473 next = le32_to_cpu(cc->Next); 1474 + name_off = le16_to_cpu(cc->NameOffset); 1475 + name_len = le16_to_cpu(cc->NameLength); 1476 + value_off = le16_to_cpu(cc->DataOffset); 1477 + value_len = le32_to_cpu(cc->DataLength); 1478 + cc_len = next ? next : remain_len; 1479 + 1480 + if ((next & 0x7) != 0 || 1481 + next > remain_len || 1482 + name_off != offsetof(struct create_context, Buffer) || 1483 + name_len < 4 || 1484 + name_off + name_len > cc_len || 1485 + (value_off & 0x7) != 0 || 1486 + (value_off && (value_off < name_off + name_len)) || 1487 + ((u64)value_off + value_len > cc_len)) 1488 + return ERR_PTR(-EINVAL); 1489 + 1490 + name = (char *)cc + name_off; 1491 + if (memcmp(name, tag, name_len) == 0) 1492 + return cc; 1493 + 1494 + remain_len -= next; 1474 1495 } while (next != 0); 1475 1496 1476 1497 return NULL;
+3
fs/ksmbd/server.c
··· 584 584 ret = ksmbd_workqueue_init(); 585 585 if (ret) 586 586 goto err_crypto_destroy; 587 + 588 + pr_warn_once("The ksmbd server is experimental, use at your own risk.\n"); 589 + 587 590 return 0; 588 591 589 592 err_crypto_destroy:
+235 -142
fs/ksmbd/smb2pdu.c
··· 433 433 work->compound_pfid = KSMBD_NO_FID; 434 434 } 435 435 memset((char *)rsp_hdr + 4, 0, sizeof(struct smb2_hdr) + 2); 436 - rsp_hdr->ProtocolId = rcv_hdr->ProtocolId; 436 + rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER; 437 437 rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE; 438 438 rsp_hdr->Command = rcv_hdr->Command; 439 439 ··· 459 459 bool is_chained_smb2_message(struct ksmbd_work *work) 460 460 { 461 461 struct smb2_hdr *hdr = work->request_buf; 462 - unsigned int len; 462 + unsigned int len, next_cmd; 463 463 464 464 if (hdr->ProtocolId != SMB2_PROTO_NUMBER) 465 465 return false; 466 466 467 467 hdr = ksmbd_req_buf_next(work); 468 - if (le32_to_cpu(hdr->NextCommand) > 0) { 468 + next_cmd = le32_to_cpu(hdr->NextCommand); 469 + if (next_cmd > 0) { 470 + if ((u64)work->next_smb2_rcv_hdr_off + next_cmd + 471 + __SMB2_HEADER_STRUCTURE_SIZE > 472 + get_rfc1002_len(work->request_buf)) { 473 + pr_err("next command(%u) offset exceeds smb msg size\n", 474 + next_cmd); 475 + return false; 476 + } 477 + 469 478 ksmbd_debug(SMB, "got SMB2 chained command\n"); 470 479 init_chained_smb2_rsp(work); 471 480 return true; ··· 643 634 smb2_get_name(struct ksmbd_share_config *share, const char *src, 644 635 const int maxlen, struct nls_table *local_nls) 645 636 { 646 - char *name, *norm_name, *unixname; 637 + char *name; 647 638 648 639 name = smb_strndup_from_utf16(src, maxlen, 1, local_nls); 649 640 if (IS_ERR(name)) { ··· 651 642 return name; 652 643 } 653 644 654 - /* change it to absolute unix name */ 655 - norm_name = ksmbd_conv_path_to_unix(name); 656 - if (IS_ERR(norm_name)) { 657 - kfree(name); 658 - return norm_name; 659 - } 660 - kfree(name); 661 - 662 - unixname = convert_to_unix_name(share, norm_name); 663 - kfree(norm_name); 664 - if (!unixname) { 665 - pr_err("can not convert absolute name\n"); 666 - return ERR_PTR(-ENOMEM); 667 - } 668 - 669 - ksmbd_debug(SMB, "absolute name = %s\n", unixname); 670 - return unixname; 645 + ksmbd_conv_path_to_unix(name); 646 + ksmbd_strip_last_slash(name); 647 + return name; 671 648 } 672 649 673 650 int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg) ··· 1067 1072 struct smb2_negotiate_req *req = work->request_buf; 1068 1073 struct smb2_negotiate_rsp *rsp = work->response_buf; 1069 1074 int rc = 0; 1075 + unsigned int smb2_buf_len, smb2_neg_size; 1070 1076 __le32 status; 1071 1077 1072 1078 ksmbd_debug(SMB, "Received negotiate request\n"); ··· 1083 1087 rsp->hdr.Status = STATUS_INVALID_PARAMETER; 1084 1088 rc = -EINVAL; 1085 1089 goto err_out; 1090 + } 1091 + 1092 + smb2_buf_len = get_rfc1002_len(work->request_buf); 1093 + smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects) - 4; 1094 + if (smb2_neg_size > smb2_buf_len) { 1095 + rsp->hdr.Status = STATUS_INVALID_PARAMETER; 1096 + rc = -EINVAL; 1097 + goto err_out; 1098 + } 1099 + 1100 + if (conn->dialect == SMB311_PROT_ID) { 1101 + unsigned int nego_ctxt_off = le32_to_cpu(req->NegotiateContextOffset); 1102 + 1103 + if (smb2_buf_len < nego_ctxt_off) { 1104 + rsp->hdr.Status = STATUS_INVALID_PARAMETER; 1105 + rc = -EINVAL; 1106 + goto err_out; 1107 + } 1108 + 1109 + if (smb2_neg_size > nego_ctxt_off) { 1110 + rsp->hdr.Status = STATUS_INVALID_PARAMETER; 1111 + rc = -EINVAL; 1112 + goto err_out; 1113 + } 1114 + 1115 + if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) > 1116 + nego_ctxt_off) { 1117 + rsp->hdr.Status = STATUS_INVALID_PARAMETER; 1118 + rc = -EINVAL; 1119 + goto err_out; 1120 + } 1121 + } else { 1122 + if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) > 1123 + smb2_buf_len) { 1124 + rsp->hdr.Status = STATUS_INVALID_PARAMETER; 1125 + rc = -EINVAL; 1126 + goto err_out; 1127 + } 1086 1128 } 1087 1129 1088 1130 conn->cli_cap = le32_to_cpu(req->Capabilities); ··· 2141 2107 * smb2_set_ea() - handler for setting extended attributes using set 2142 2108 * info command 2143 2109 * @eabuf: set info command buffer 2110 + * @buf_len: set info command buffer length 2144 2111 * @path: dentry path for get ea 2145 2112 * 2146 2113 * Return: 0 on success, otherwise error 2147 2114 */ 2148 - static int smb2_set_ea(struct smb2_ea_info *eabuf, struct path *path) 2115 + static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len, 2116 + struct path *path) 2149 2117 { 2150 2118 struct user_namespace *user_ns = mnt_user_ns(path->mnt); 2151 2119 char *attr_name = NULL, *value; 2152 2120 int rc = 0; 2153 - int next = 0; 2121 + unsigned int next = 0; 2122 + 2123 + if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength + 2124 + le16_to_cpu(eabuf->EaValueLength)) 2125 + return -EINVAL; 2154 2126 2155 2127 attr_name = kmalloc(XATTR_NAME_MAX + 1, GFP_KERNEL); 2156 2128 if (!attr_name) ··· 2221 2181 2222 2182 next: 2223 2183 next = le32_to_cpu(eabuf->NextEntryOffset); 2184 + if (next == 0 || buf_len < next) 2185 + break; 2186 + buf_len -= next; 2224 2187 eabuf = (struct smb2_ea_info *)((char *)eabuf + next); 2188 + if (next < (u32)eabuf->EaNameLength + le16_to_cpu(eabuf->EaValueLength)) 2189 + break; 2190 + 2225 2191 } while (next != 0); 2226 2192 2227 2193 kfree(attr_name); ··· 2398 2352 return rc; 2399 2353 } 2400 2354 2401 - rc = ksmbd_vfs_kern_path(name, 0, path, 0); 2355 + rc = ksmbd_vfs_kern_path(work, name, 0, path, 0); 2402 2356 if (rc) { 2403 2357 pr_err("cannot get linux path (%s), err = %d\n", 2404 2358 name, rc); ··· 2427 2381 ksmbd_debug(SMB, 2428 2382 "Set ACLs using SMB2_CREATE_SD_BUFFER context\n"); 2429 2383 sd_buf = (struct create_sd_buf_req *)context; 2384 + if (le16_to_cpu(context->DataOffset) + 2385 + le32_to_cpu(context->DataLength) < 2386 + sizeof(struct create_sd_buf_req)) 2387 + return -EINVAL; 2430 2388 return set_info_sec(work->conn, work->tcon, path, &sd_buf->ntsd, 2431 2389 le32_to_cpu(sd_buf->ccontext.DataLength), true); 2432 2390 } ··· 2477 2427 struct oplock_info *opinfo; 2478 2428 __le32 *next_ptr = NULL; 2479 2429 int req_op_level = 0, open_flags = 0, may_flags = 0, file_info = 0; 2480 - int rc = 0, len = 0; 2430 + int rc = 0; 2481 2431 int contxt_cnt = 0, query_disk_id = 0; 2482 2432 int maximal_access_ctxt = 0, posix_ctxt = 0; 2483 2433 int s_type = 0; ··· 2549 2499 goto err_out1; 2550 2500 } 2551 2501 } else { 2552 - len = strlen(share->path); 2553 - ksmbd_debug(SMB, "share path len %d\n", len); 2554 - name = kmalloc(len + 1, GFP_KERNEL); 2502 + name = kstrdup("", GFP_KERNEL); 2555 2503 if (!name) { 2556 - rsp->hdr.Status = STATUS_NO_MEMORY; 2557 2504 rc = -ENOMEM; 2558 2505 goto err_out1; 2559 2506 } 2560 - 2561 - memcpy(name, share->path, len); 2562 - *(name + len) = '\0'; 2563 2507 } 2564 2508 2565 2509 req_op_level = req->RequestedOplockLevel; ··· 2625 2581 goto err_out1; 2626 2582 } else if (context) { 2627 2583 ea_buf = (struct create_ea_buf_req *)context; 2584 + if (le16_to_cpu(context->DataOffset) + 2585 + le32_to_cpu(context->DataLength) < 2586 + sizeof(struct create_ea_buf_req)) { 2587 + rc = -EINVAL; 2588 + goto err_out1; 2589 + } 2628 2590 if (req->CreateOptions & FILE_NO_EA_KNOWLEDGE_LE) { 2629 2591 rsp->hdr.Status = STATUS_ACCESS_DENIED; 2630 2592 rc = -EACCES; ··· 2669 2619 } else if (context) { 2670 2620 struct create_posix *posix = 2671 2621 (struct create_posix *)context; 2622 + if (le16_to_cpu(context->DataOffset) + 2623 + le32_to_cpu(context->DataLength) < 2624 + sizeof(struct create_posix)) { 2625 + rc = -EINVAL; 2626 + goto err_out1; 2627 + } 2672 2628 ksmbd_debug(SMB, "get posix context\n"); 2673 2629 2674 2630 posix_mode = le32_to_cpu(posix->Mode); ··· 2688 2632 goto err_out1; 2689 2633 } 2690 2634 2691 - if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE) { 2692 - /* 2693 - * On delete request, instead of following up, need to 2694 - * look the current entity 2695 - */ 2696 - rc = ksmbd_vfs_kern_path(name, 0, &path, 1); 2697 - if (!rc) { 2635 + rc = ksmbd_vfs_kern_path(work, name, LOOKUP_NO_SYMLINKS, &path, 1); 2636 + if (!rc) { 2637 + if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE) { 2698 2638 /* 2699 2639 * If file exists with under flags, return access 2700 2640 * denied error. ··· 2709 2657 path_put(&path); 2710 2658 goto err_out; 2711 2659 } 2712 - } 2713 - } else { 2714 - if (test_share_config_flag(work->tcon->share_conf, 2715 - KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS)) { 2716 - /* 2717 - * Use LOOKUP_FOLLOW to follow the path of 2718 - * symlink in path buildup 2719 - */ 2720 - rc = ksmbd_vfs_kern_path(name, LOOKUP_FOLLOW, &path, 1); 2721 - if (rc) { /* Case for broken link ?*/ 2722 - rc = ksmbd_vfs_kern_path(name, 0, &path, 1); 2723 - } 2724 - } else { 2725 - rc = ksmbd_vfs_kern_path(name, 0, &path, 1); 2726 - if (!rc && d_is_symlink(path.dentry)) { 2727 - rc = -EACCES; 2728 - path_put(&path); 2729 - goto err_out; 2730 - } 2660 + } else if (d_is_symlink(path.dentry)) { 2661 + rc = -EACCES; 2662 + path_put(&path); 2663 + goto err_out; 2731 2664 } 2732 2665 } 2733 2666 2734 2667 if (rc) { 2735 - if (rc == -EACCES) { 2736 - ksmbd_debug(SMB, 2737 - "User does not have right permission\n"); 2668 + if (rc != -ENOENT) 2738 2669 goto err_out; 2739 - } 2740 2670 ksmbd_debug(SMB, "can not get linux path for %s, rc = %d\n", 2741 2671 name, rc); 2742 2672 rc = 0; ··· 2824 2790 created = true; 2825 2791 user_ns = mnt_user_ns(path.mnt); 2826 2792 if (ea_buf) { 2827 - rc = smb2_set_ea(&ea_buf->ea, &path); 2793 + if (le32_to_cpu(ea_buf->ccontext.DataLength) < 2794 + sizeof(struct smb2_ea_info)) { 2795 + rc = -EINVAL; 2796 + goto err_out; 2797 + } 2798 + 2799 + rc = smb2_set_ea(&ea_buf->ea, 2800 + le32_to_cpu(ea_buf->ccontext.DataLength), 2801 + &path); 2828 2802 if (rc == -EOPNOTSUPP) 2829 2803 rc = 0; 2830 2804 else if (rc) ··· 3065 3023 rc = PTR_ERR(az_req); 3066 3024 goto err_out; 3067 3025 } else if (az_req) { 3068 - loff_t alloc_size = le64_to_cpu(az_req->AllocationSize); 3026 + loff_t alloc_size; 3069 3027 int err; 3070 3028 3029 + if (le16_to_cpu(az_req->ccontext.DataOffset) + 3030 + le32_to_cpu(az_req->ccontext.DataLength) < 3031 + sizeof(struct create_alloc_size_req)) { 3032 + rc = -EINVAL; 3033 + goto err_out; 3034 + } 3035 + alloc_size = le64_to_cpu(az_req->AllocationSize); 3071 3036 ksmbd_debug(SMB, 3072 3037 "request smb2 create allocate size : %llu\n", 3073 3038 alloc_size); ··· 3229 3180 rsp->hdr.Status = STATUS_INVALID_PARAMETER; 3230 3181 else if (rc == -EOPNOTSUPP) 3231 3182 rsp->hdr.Status = STATUS_NOT_SUPPORTED; 3232 - else if (rc == -EACCES || rc == -ESTALE) 3183 + else if (rc == -EACCES || rc == -ESTALE || rc == -EXDEV) 3233 3184 rsp->hdr.Status = STATUS_ACCESS_DENIED; 3234 3185 else if (rc == -ENOENT) 3235 3186 rsp->hdr.Status = STATUS_OBJECT_NAME_INVALID; ··· 4243 4194 static int get_file_basic_info(struct smb2_query_info_rsp *rsp, 4244 4195 struct ksmbd_file *fp, void *rsp_org) 4245 4196 { 4246 - struct smb2_file_all_info *basic_info; 4197 + struct smb2_file_basic_info *basic_info; 4247 4198 struct kstat stat; 4248 4199 u64 time; 4249 4200 ··· 4253 4204 return -EACCES; 4254 4205 } 4255 4206 4256 - basic_info = (struct smb2_file_all_info *)rsp->Buffer; 4207 + basic_info = (struct smb2_file_basic_info *)rsp->Buffer; 4257 4208 generic_fillattr(file_mnt_user_ns(fp->filp), file_inode(fp->filp), 4258 4209 &stat); 4259 4210 basic_info->CreationTime = cpu_to_le64(fp->create_time); ··· 4266 4217 basic_info->Attributes = fp->f_ci->m_fattr; 4267 4218 basic_info->Pad1 = 0; 4268 4219 rsp->OutputBufferLength = 4269 - cpu_to_le32(offsetof(struct smb2_file_all_info, AllocationSize)); 4270 - inc_rfc1001_len(rsp_org, offsetof(struct smb2_file_all_info, 4271 - AllocationSize)); 4220 + cpu_to_le32(sizeof(struct smb2_file_basic_info)); 4221 + inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_basic_info)); 4272 4222 return 0; 4273 4223 } 4274 4224 ··· 4344 4296 return -EACCES; 4345 4297 } 4346 4298 4347 - filename = convert_to_nt_pathname(fp->filename, 4348 - work->tcon->share_conf->path); 4299 + filename = convert_to_nt_pathname(fp->filename); 4349 4300 if (!filename) 4350 4301 return -ENOMEM; 4351 4302 ··· 4475 4428 file_info->NextEntryOffset = cpu_to_le32(next); 4476 4429 } 4477 4430 4478 - if (nbytes) { 4431 + if (!S_ISDIR(stat.mode)) { 4479 4432 file_info = (struct smb2_file_stream_info *) 4480 4433 &rsp->Buffer[nbytes]; 4481 4434 streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName, 4482 4435 "::$DATA", 7, conn->local_nls, 0); 4483 4436 streamlen *= 2; 4484 4437 file_info->StreamNameLength = cpu_to_le32(streamlen); 4485 - file_info->StreamSize = S_ISDIR(stat.mode) ? 0 : 4486 - cpu_to_le64(stat.size); 4487 - file_info->StreamAllocationSize = S_ISDIR(stat.mode) ? 0 : 4488 - cpu_to_le64(stat.size); 4438 + file_info->StreamSize = 0; 4439 + file_info->StreamAllocationSize = 0; 4489 4440 nbytes += sizeof(struct smb2_file_stream_info) + streamlen; 4490 4441 } 4491 4442 ··· 4798 4753 struct path path; 4799 4754 int rc = 0, len; 4800 4755 int fs_infoclass_size = 0; 4801 - int lookup_flags = 0; 4802 4756 4803 - if (test_share_config_flag(share, KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS)) 4804 - lookup_flags = LOOKUP_FOLLOW; 4805 - 4806 - rc = ksmbd_vfs_kern_path(share->path, lookup_flags, &path, 0); 4757 + rc = kern_path(share->path, LOOKUP_NO_SYMLINKS, &path); 4807 4758 if (rc) { 4808 4759 pr_err("cannot create vfs path\n"); 4809 4760 return -EIO; ··· 5348 5307 goto out; 5349 5308 5350 5309 len = strlen(new_name); 5351 - if (new_name[len - 1] != '/') { 5310 + if (len > 0 && new_name[len - 1] != '/') { 5352 5311 pr_err("not allow base filename in rename\n"); 5353 5312 rc = -ESHARE; 5354 5313 goto out; ··· 5376 5335 } 5377 5336 5378 5337 ksmbd_debug(SMB, "new name %s\n", new_name); 5379 - rc = ksmbd_vfs_kern_path(new_name, 0, &path, 1); 5380 - if (rc) 5338 + rc = ksmbd_vfs_kern_path(work, new_name, LOOKUP_NO_SYMLINKS, &path, 1); 5339 + if (rc) { 5340 + if (rc != -ENOENT) 5341 + goto out; 5381 5342 file_present = false; 5382 - else 5343 + } else { 5383 5344 path_put(&path); 5345 + } 5384 5346 5385 5347 if (ksmbd_share_veto_filename(share, new_name)) { 5386 5348 rc = -ENOENT; ··· 5423 5379 static int smb2_create_link(struct ksmbd_work *work, 5424 5380 struct ksmbd_share_config *share, 5425 5381 struct smb2_file_link_info *file_info, 5426 - struct file *filp, 5382 + unsigned int buf_len, struct file *filp, 5427 5383 struct nls_table *local_nls) 5428 5384 { 5429 5385 char *link_name = NULL, *target_name = NULL, *pathname = NULL; 5430 5386 struct path path; 5431 5387 bool file_present = true; 5432 5388 int rc; 5389 + 5390 + if (buf_len < (u64)sizeof(struct smb2_file_link_info) + 5391 + le32_to_cpu(file_info->FileNameLength)) 5392 + return -EINVAL; 5433 5393 5434 5394 ksmbd_debug(SMB, "setting FILE_LINK_INFORMATION\n"); 5435 5395 pathname = kmalloc(PATH_MAX, GFP_KERNEL); ··· 5457 5409 } 5458 5410 5459 5411 ksmbd_debug(SMB, "target name is %s\n", target_name); 5460 - rc = ksmbd_vfs_kern_path(link_name, 0, &path, 0); 5461 - if (rc) 5412 + rc = ksmbd_vfs_kern_path(work, link_name, LOOKUP_NO_SYMLINKS, &path, 0); 5413 + if (rc) { 5414 + if (rc != -ENOENT) 5415 + goto out; 5462 5416 file_present = false; 5463 - else 5417 + } else { 5464 5418 path_put(&path); 5419 + } 5465 5420 5466 5421 if (file_info->ReplaceIfExists) { 5467 5422 if (file_present) { ··· 5494 5443 return rc; 5495 5444 } 5496 5445 5497 - static int set_file_basic_info(struct ksmbd_file *fp, char *buf, 5446 + static int set_file_basic_info(struct ksmbd_file *fp, 5447 + struct smb2_file_basic_info *file_info, 5498 5448 struct ksmbd_share_config *share) 5499 5449 { 5500 - struct smb2_file_all_info *file_info; 5501 5450 struct iattr attrs; 5502 5451 struct timespec64 ctime; 5503 5452 struct file *filp; ··· 5508 5457 if (!(fp->daccess & FILE_WRITE_ATTRIBUTES_LE)) 5509 5458 return -EACCES; 5510 5459 5511 - file_info = (struct smb2_file_all_info *)buf; 5512 5460 attrs.ia_valid = 0; 5513 5461 filp = fp->filp; 5514 5462 inode = file_inode(filp); ··· 5584 5534 } 5585 5535 5586 5536 static int set_file_allocation_info(struct ksmbd_work *work, 5587 - struct ksmbd_file *fp, char *buf) 5537 + struct ksmbd_file *fp, 5538 + struct smb2_file_alloc_info *file_alloc_info) 5588 5539 { 5589 5540 /* 5590 5541 * TODO : It's working fine only when store dos attributes ··· 5593 5542 * properly with any smb.conf option 5594 5543 */ 5595 5544 5596 - struct smb2_file_alloc_info *file_alloc_info; 5597 5545 loff_t alloc_blks; 5598 5546 struct inode *inode; 5599 5547 int rc; ··· 5600 5550 if (!(fp->daccess & FILE_WRITE_DATA_LE)) 5601 5551 return -EACCES; 5602 5552 5603 - file_alloc_info = (struct smb2_file_alloc_info *)buf; 5604 5553 alloc_blks = (le64_to_cpu(file_alloc_info->AllocationSize) + 511) >> 9; 5605 5554 inode = file_inode(fp->filp); 5606 5555 ··· 5622 5573 * inode size is retained by backup inode size. 5623 5574 */ 5624 5575 size = i_size_read(inode); 5625 - rc = ksmbd_vfs_truncate(work, NULL, fp, alloc_blks * 512); 5576 + rc = ksmbd_vfs_truncate(work, fp, alloc_blks * 512); 5626 5577 if (rc) { 5627 5578 pr_err("truncate failed! filename : %s, err %d\n", 5628 5579 fp->filename, rc); ··· 5635 5586 } 5636 5587 5637 5588 static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp, 5638 - char *buf) 5589 + struct smb2_file_eof_info *file_eof_info) 5639 5590 { 5640 - struct smb2_file_eof_info *file_eof_info; 5641 5591 loff_t newsize; 5642 5592 struct inode *inode; 5643 5593 int rc; ··· 5644 5596 if (!(fp->daccess & FILE_WRITE_DATA_LE)) 5645 5597 return -EACCES; 5646 5598 5647 - file_eof_info = (struct smb2_file_eof_info *)buf; 5648 5599 newsize = le64_to_cpu(file_eof_info->EndOfFile); 5649 5600 inode = file_inode(fp->filp); 5650 5601 ··· 5657 5610 if (inode->i_sb->s_magic != MSDOS_SUPER_MAGIC) { 5658 5611 ksmbd_debug(SMB, "filename : %s truncated to newsize %lld\n", 5659 5612 fp->filename, newsize); 5660 - rc = ksmbd_vfs_truncate(work, NULL, fp, newsize); 5613 + rc = ksmbd_vfs_truncate(work, fp, newsize); 5661 5614 if (rc) { 5662 5615 ksmbd_debug(SMB, "truncate failed! filename : %s err %d\n", 5663 5616 fp->filename, rc); ··· 5670 5623 } 5671 5624 5672 5625 static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp, 5673 - char *buf) 5626 + struct smb2_file_rename_info *rename_info, 5627 + unsigned int buf_len) 5674 5628 { 5675 5629 struct user_namespace *user_ns; 5676 5630 struct ksmbd_file *parent_fp; ··· 5683 5635 pr_err("no right to delete : 0x%x\n", fp->daccess); 5684 5636 return -EACCES; 5685 5637 } 5638 + 5639 + if (buf_len < (u64)sizeof(struct smb2_file_rename_info) + 5640 + le32_to_cpu(rename_info->FileNameLength)) 5641 + return -EINVAL; 5686 5642 5687 5643 user_ns = file_mnt_user_ns(fp->filp); 5688 5644 if (ksmbd_stream_fd(fp)) ··· 5710 5658 } 5711 5659 } 5712 5660 next: 5713 - return smb2_rename(work, fp, user_ns, 5714 - (struct smb2_file_rename_info *)buf, 5661 + return smb2_rename(work, fp, user_ns, rename_info, 5715 5662 work->sess->conn->local_nls); 5716 5663 } 5717 5664 5718 - static int set_file_disposition_info(struct ksmbd_file *fp, char *buf) 5665 + static int set_file_disposition_info(struct ksmbd_file *fp, 5666 + struct smb2_file_disposition_info *file_info) 5719 5667 { 5720 - struct smb2_file_disposition_info *file_info; 5721 5668 struct inode *inode; 5722 5669 5723 5670 if (!(fp->daccess & FILE_DELETE_LE)) { ··· 5725 5674 } 5726 5675 5727 5676 inode = file_inode(fp->filp); 5728 - file_info = (struct smb2_file_disposition_info *)buf; 5729 5677 if (file_info->DeletePending) { 5730 5678 if (S_ISDIR(inode->i_mode) && 5731 5679 ksmbd_vfs_empty_dir(fp) == -ENOTEMPTY) ··· 5736 5686 return 0; 5737 5687 } 5738 5688 5739 - static int set_file_position_info(struct ksmbd_file *fp, char *buf) 5689 + static int set_file_position_info(struct ksmbd_file *fp, 5690 + struct smb2_file_pos_info *file_info) 5740 5691 { 5741 - struct smb2_file_pos_info *file_info; 5742 5692 loff_t current_byte_offset; 5743 5693 unsigned long sector_size; 5744 5694 struct inode *inode; 5745 5695 5746 5696 inode = file_inode(fp->filp); 5747 - file_info = (struct smb2_file_pos_info *)buf; 5748 5697 current_byte_offset = le64_to_cpu(file_info->CurrentByteOffset); 5749 5698 sector_size = inode->i_sb->s_blocksize; 5750 5699 ··· 5759 5710 return 0; 5760 5711 } 5761 5712 5762 - static int set_file_mode_info(struct ksmbd_file *fp, char *buf) 5713 + static int set_file_mode_info(struct ksmbd_file *fp, 5714 + struct smb2_file_mode_info *file_info) 5763 5715 { 5764 - struct smb2_file_mode_info *file_info; 5765 5716 __le32 mode; 5766 5717 5767 - file_info = (struct smb2_file_mode_info *)buf; 5768 5718 mode = file_info->Mode; 5769 5719 5770 5720 if ((mode & ~FILE_MODE_INFO_MASK) || ··· 5793 5745 * TODO: need to implement an error handling for STATUS_INFO_LENGTH_MISMATCH 5794 5746 */ 5795 5747 static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp, 5796 - int info_class, char *buf, 5748 + struct smb2_set_info_req *req, 5797 5749 struct ksmbd_share_config *share) 5798 5750 { 5799 - switch (info_class) { 5751 + unsigned int buf_len = le32_to_cpu(req->BufferLength); 5752 + 5753 + switch (req->FileInfoClass) { 5800 5754 case FILE_BASIC_INFORMATION: 5801 - return set_file_basic_info(fp, buf, share); 5755 + { 5756 + if (buf_len < sizeof(struct smb2_file_basic_info)) 5757 + return -EINVAL; 5802 5758 5759 + return set_file_basic_info(fp, (struct smb2_file_basic_info *)req->Buffer, share); 5760 + } 5803 5761 case FILE_ALLOCATION_INFORMATION: 5804 - return set_file_allocation_info(work, fp, buf); 5762 + { 5763 + if (buf_len < sizeof(struct smb2_file_alloc_info)) 5764 + return -EINVAL; 5805 5765 5766 + return set_file_allocation_info(work, fp, 5767 + (struct smb2_file_alloc_info *)req->Buffer); 5768 + } 5806 5769 case FILE_END_OF_FILE_INFORMATION: 5807 - return set_end_of_file_info(work, fp, buf); 5770 + { 5771 + if (buf_len < sizeof(struct smb2_file_eof_info)) 5772 + return -EINVAL; 5808 5773 5774 + return set_end_of_file_info(work, fp, 5775 + (struct smb2_file_eof_info *)req->Buffer); 5776 + } 5809 5777 case FILE_RENAME_INFORMATION: 5778 + { 5810 5779 if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) { 5811 5780 ksmbd_debug(SMB, 5812 5781 "User does not have write permission\n"); 5813 5782 return -EACCES; 5814 5783 } 5815 - return set_rename_info(work, fp, buf); 5816 5784 5785 + if (buf_len < sizeof(struct smb2_file_rename_info)) 5786 + return -EINVAL; 5787 + 5788 + return set_rename_info(work, fp, 5789 + (struct smb2_file_rename_info *)req->Buffer, 5790 + buf_len); 5791 + } 5817 5792 case FILE_LINK_INFORMATION: 5818 - return smb2_create_link(work, work->tcon->share_conf, 5819 - (struct smb2_file_link_info *)buf, fp->filp, 5820 - work->sess->conn->local_nls); 5793 + { 5794 + if (buf_len < sizeof(struct smb2_file_link_info)) 5795 + return -EINVAL; 5821 5796 5797 + return smb2_create_link(work, work->tcon->share_conf, 5798 + (struct smb2_file_link_info *)req->Buffer, 5799 + buf_len, fp->filp, 5800 + work->sess->conn->local_nls); 5801 + } 5822 5802 case FILE_DISPOSITION_INFORMATION: 5803 + { 5823 5804 if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) { 5824 5805 ksmbd_debug(SMB, 5825 5806 "User does not have write permission\n"); 5826 5807 return -EACCES; 5827 5808 } 5828 - return set_file_disposition_info(fp, buf); 5829 5809 5810 + if (buf_len < sizeof(struct smb2_file_disposition_info)) 5811 + return -EINVAL; 5812 + 5813 + return set_file_disposition_info(fp, 5814 + (struct smb2_file_disposition_info *)req->Buffer); 5815 + } 5830 5816 case FILE_FULL_EA_INFORMATION: 5831 5817 { 5832 5818 if (!(fp->daccess & FILE_WRITE_EA_LE)) { ··· 5869 5787 return -EACCES; 5870 5788 } 5871 5789 5872 - return smb2_set_ea((struct smb2_ea_info *)buf, 5873 - &fp->filp->f_path); 5874 - } 5790 + if (buf_len < sizeof(struct smb2_ea_info)) 5791 + return -EINVAL; 5875 5792 5793 + return smb2_set_ea((struct smb2_ea_info *)req->Buffer, 5794 + buf_len, &fp->filp->f_path); 5795 + } 5876 5796 case FILE_POSITION_INFORMATION: 5877 - return set_file_position_info(fp, buf); 5797 + { 5798 + if (buf_len < sizeof(struct smb2_file_pos_info)) 5799 + return -EINVAL; 5878 5800 5801 + return set_file_position_info(fp, (struct smb2_file_pos_info *)req->Buffer); 5802 + } 5879 5803 case FILE_MODE_INFORMATION: 5880 - return set_file_mode_info(fp, buf); 5804 + { 5805 + if (buf_len < sizeof(struct smb2_file_mode_info)) 5806 + return -EINVAL; 5807 + 5808 + return set_file_mode_info(fp, (struct smb2_file_mode_info *)req->Buffer); 5809 + } 5881 5810 } 5882 5811 5883 - pr_err("Unimplemented Fileinfoclass :%d\n", info_class); 5812 + pr_err("Unimplemented Fileinfoclass :%d\n", req->FileInfoClass); 5884 5813 return -EOPNOTSUPP; 5885 5814 } 5886 5815 ··· 5952 5859 switch (req->InfoType) { 5953 5860 case SMB2_O_INFO_FILE: 5954 5861 ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n"); 5955 - rc = smb2_set_info_file(work, fp, req->FileInfoClass, 5956 - req->Buffer, work->tcon->share_conf); 5862 + rc = smb2_set_info_file(work, fp, req, work->tcon->share_conf); 5957 5863 break; 5958 5864 case SMB2_O_INFO_SECURITY: 5959 5865 ksmbd_debug(SMB, "GOT SMB2_O_INFO_SECURITY\n"); ··· 5979 5887 return 0; 5980 5888 5981 5889 err_out: 5982 - if (rc == -EACCES || rc == -EPERM) 5890 + if (rc == -EACCES || rc == -EPERM || rc == -EXDEV) 5983 5891 rsp->hdr.Status = STATUS_ACCESS_DENIED; 5984 5892 else if (rc == -EINVAL) 5985 5893 rsp->hdr.Status = STATUS_INVALID_PARAMETER; ··· 8306 8214 8307 8215 WORK_BUFFERS(work, req, rsp); 8308 8216 8309 - if (le16_to_cpu(req->Command) == SMB2_NEGOTIATE_HE) 8217 + if (le16_to_cpu(req->Command) == SMB2_NEGOTIATE_HE && 8218 + conn->preauth_info) 8310 8219 ksmbd_gen_preauth_integrity_hash(conn, (char *)rsp, 8311 8220 conn->preauth_info->Preauth_HashValue); 8312 8221 ··· 8414 8321 unsigned int buf_data_size = pdu_length + 4 - 8415 8322 sizeof(struct smb2_transform_hdr); 8416 8323 struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf; 8417 - unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize); 8418 8324 int rc = 0; 8419 - 8420 - sess = ksmbd_session_lookup_all(conn, le64_to_cpu(tr_hdr->SessionId)); 8421 - if (!sess) { 8422 - pr_err("invalid session id(%llx) in transform header\n", 8423 - le64_to_cpu(tr_hdr->SessionId)); 8424 - return -ECONNABORTED; 8425 - } 8426 8325 8427 8326 if (pdu_length + 4 < 8428 8327 sizeof(struct smb2_transform_hdr) + sizeof(struct smb2_hdr)) { ··· 8423 8338 return -ECONNABORTED; 8424 8339 } 8425 8340 8426 - if (pdu_length + 4 < orig_len + sizeof(struct smb2_transform_hdr)) { 8341 + if (pdu_length + 4 < 8342 + le32_to_cpu(tr_hdr->OriginalMessageSize) + sizeof(struct smb2_transform_hdr)) { 8427 8343 pr_err("Transform message is broken\n"); 8344 + return -ECONNABORTED; 8345 + } 8346 + 8347 + sess = ksmbd_session_lookup_all(conn, le64_to_cpu(tr_hdr->SessionId)); 8348 + if (!sess) { 8349 + pr_err("invalid session id(%llx) in transform header\n", 8350 + le64_to_cpu(tr_hdr->SessionId)); 8428 8351 return -ECONNABORTED; 8429 8352 } 8430 8353
+9
fs/ksmbd/smb2pdu.h
··· 1464 1464 char FileName[1]; 1465 1465 } __packed; /* level 18 Query */ 1466 1466 1467 + struct smb2_file_basic_info { /* data block encoding of response to level 18 */ 1468 + __le64 CreationTime; /* Beginning of FILE_BASIC_INFO equivalent */ 1469 + __le64 LastAccessTime; 1470 + __le64 LastWriteTime; 1471 + __le64 ChangeTime; 1472 + __le32 Attributes; 1473 + __u32 Pad1; /* End of FILE_BASIC_INFO_INFO equivalent */ 1474 + } __packed; 1475 + 1467 1476 struct smb2_file_alt_name_info { 1468 1477 __le32 FileNameLength; 1469 1478 char FileName[0];
+37 -23
fs/ksmbd/smb_common.c
··· 129 129 * 130 130 * check for valid smb signature and packet direction(request/response) 131 131 * 132 - * Return: 0 on success, otherwise 1 132 + * Return: 0 on success, otherwise -EINVAL 133 133 */ 134 134 int ksmbd_verify_smb_message(struct ksmbd_work *work) 135 135 { 136 - struct smb2_hdr *smb2_hdr = work->request_buf; 136 + struct smb2_hdr *smb2_hdr = work->request_buf + work->next_smb2_rcv_hdr_off; 137 + struct smb_hdr *hdr; 137 138 138 139 if (smb2_hdr->ProtocolId == SMB2_PROTO_NUMBER) 139 140 return ksmbd_smb2_check_message(work); 140 141 141 - return 0; 142 + hdr = work->request_buf; 143 + if (*(__le32 *)hdr->Protocol == SMB1_PROTO_NUMBER && 144 + hdr->Command == SMB_COM_NEGOTIATE) 145 + return 0; 146 + 147 + return -EINVAL; 142 148 } 143 149 144 150 /** ··· 155 149 */ 156 150 bool ksmbd_smb_request(struct ksmbd_conn *conn) 157 151 { 158 - int type = *(char *)conn->request_buf; 159 - 160 - switch (type) { 161 - case RFC1002_SESSION_MESSAGE: 162 - /* Regular SMB request */ 163 - return true; 164 - case RFC1002_SESSION_KEEP_ALIVE: 165 - ksmbd_debug(SMB, "RFC 1002 session keep alive\n"); 166 - break; 167 - default: 168 - ksmbd_debug(SMB, "RFC 1002 unknown request type 0x%x\n", type); 169 - } 170 - 171 - return false; 152 + return conn->request_buf[0] == 0; 172 153 } 173 154 174 155 static bool supported_protocol(int idx) ··· 169 176 idx <= server_conf.max_protocol); 170 177 } 171 178 172 - static char *next_dialect(char *dialect, int *next_off) 179 + static char *next_dialect(char *dialect, int *next_off, int bcount) 173 180 { 174 181 dialect = dialect + *next_off; 175 - *next_off = strlen(dialect); 182 + *next_off = strnlen(dialect, bcount); 183 + if (dialect[*next_off] != '\0') 184 + return NULL; 176 185 return dialect; 177 186 } 178 187 ··· 189 194 dialect = cli_dialects; 190 195 bcount = le16_to_cpu(byte_count); 191 196 do { 192 - dialect = next_dialect(dialect, &next); 197 + dialect = next_dialect(dialect, &next, bcount); 198 + if (!dialect) 199 + break; 193 200 ksmbd_debug(SMB, "client requested dialect %s\n", 194 201 dialect); 195 202 if (!strcmp(dialect, smb1_protos[i].name)) { ··· 239 242 240 243 static int ksmbd_negotiate_smb_dialect(void *buf) 241 244 { 242 - __le32 proto; 245 + int smb_buf_length = get_rfc1002_len(buf); 246 + __le32 proto = ((struct smb2_hdr *)buf)->ProtocolId; 243 247 244 - proto = ((struct smb2_hdr *)buf)->ProtocolId; 245 248 if (proto == SMB2_PROTO_NUMBER) { 246 249 struct smb2_negotiate_req *req; 250 + int smb2_neg_size = 251 + offsetof(struct smb2_negotiate_req, Dialects) - 4; 247 252 248 253 req = (struct smb2_negotiate_req *)buf; 254 + if (smb2_neg_size > smb_buf_length) 255 + goto err_out; 256 + 257 + if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) > 258 + smb_buf_length) 259 + goto err_out; 260 + 249 261 return ksmbd_lookup_dialect_by_id(req->Dialects, 250 262 req->DialectCount); 251 263 } ··· 264 258 struct smb_negotiate_req *req; 265 259 266 260 req = (struct smb_negotiate_req *)buf; 261 + if (le16_to_cpu(req->ByteCount) < 2) 262 + goto err_out; 263 + 264 + if (offsetof(struct smb_negotiate_req, DialectsArray) - 4 + 265 + le16_to_cpu(req->ByteCount) > smb_buf_length) { 266 + goto err_out; 267 + } 268 + 267 269 return ksmbd_lookup_dialect_by_name(req->DialectsArray, 268 270 req->ByteCount); 269 271 } 270 272 273 + err_out: 271 274 return BAD_PROT_ID; 272 275 } 273 276 274 - #define SMB_COM_NEGOTIATE 0x72 275 277 int ksmbd_init_smb_server(struct ksmbd_work *work) 276 278 { 277 279 struct ksmbd_conn *conn = work->conn;
+1 -8
fs/ksmbd/smb_common.h
··· 48 48 #define CIFS_DEFAULT_IOSIZE (64 * 1024) 49 49 #define MAX_CIFS_SMALL_BUFFER_SIZE 448 /* big enough for most */ 50 50 51 - /* RFC 1002 session packet types */ 52 - #define RFC1002_SESSION_MESSAGE 0x00 53 - #define RFC1002_SESSION_REQUEST 0x81 54 - #define RFC1002_POSITIVE_SESSION_RESPONSE 0x82 55 - #define RFC1002_NEGATIVE_SESSION_RESPONSE 0x83 56 - #define RFC1002_RETARGET_SESSION_RESPONSE 0x84 57 - #define RFC1002_SESSION_KEEP_ALIVE 0x85 58 - 59 51 /* Responses when opening a file. */ 60 52 #define F_SUPERSEDED 0 61 53 #define F_OPENED 1 ··· 202 210 FILE_READ_ATTRIBUTES | FILE_WRITE_ATTRIBUTES) 203 211 204 212 #define SMB1_PROTO_NUMBER cpu_to_le32(0x424d53ff) 213 + #define SMB_COM_NEGOTIATE 0x72 205 214 206 215 #define SMB1_CLIENT_GUID_SIZE (16) 207 216 struct smb_hdr {
+19 -2
fs/ksmbd/smbacl.c
··· 380 380 { 381 381 int i, ret; 382 382 int num_aces = 0; 383 - int acl_size; 383 + unsigned int acl_size; 384 384 char *acl_base; 385 385 struct smb_ace **ppace; 386 386 struct posix_acl_entry *cf_pace, *cf_pdace; ··· 392 392 return; 393 393 394 394 /* validate that we do not go past end of acl */ 395 - if (end_of_acl <= (char *)pdacl || 395 + if (end_of_acl < (char *)pdacl + sizeof(struct smb_acl) || 396 396 end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) { 397 397 pr_err("ACL too small to parse DACL\n"); 398 398 return; ··· 431 431 * user/group/other have no permissions 432 432 */ 433 433 for (i = 0; i < num_aces; ++i) { 434 + if (end_of_acl - acl_base < acl_size) 435 + break; 436 + 434 437 ppace[i] = (struct smb_ace *)(acl_base + acl_size); 435 438 acl_base = (char *)ppace[i]; 439 + acl_size = offsetof(struct smb_ace, sid) + 440 + offsetof(struct smb_sid, sub_auth); 441 + 442 + if (end_of_acl - acl_base < acl_size || 443 + ppace[i]->sid.num_subauth > SID_MAX_SUB_AUTHORITIES || 444 + (end_of_acl - acl_base < 445 + acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth) || 446 + (le16_to_cpu(ppace[i]->size) < 447 + acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth)) 448 + break; 449 + 436 450 acl_size = le16_to_cpu(ppace[i]->size); 437 451 ppace[i]->access_req = 438 452 smb_map_generic_desired_access(ppace[i]->access_req); ··· 820 806 821 807 if (!pntsd) 822 808 return -EIO; 809 + 810 + if (acl_len < sizeof(struct smb_ntsd)) 811 + return -EINVAL; 823 812 824 813 owner_sid_ptr = (struct smb_sid *)((char *)pntsd + 825 814 le32_to_cpu(pntsd->osidoffset));
+2 -2
fs/ksmbd/transport_tcp.c
··· 215 215 * ksmbd_kthread_fn() - listen to new SMB connections and callback server 216 216 * @p: arguments to forker thread 217 217 * 218 - * Return: Returns a task_struct or ERR_PTR 218 + * Return: 0 on success, error number otherwise 219 219 */ 220 220 static int ksmbd_kthread_fn(void *p) 221 221 { ··· 387 387 /** 388 388 * create_socket - create socket for ksmbd/0 389 389 * 390 - * Return: Returns a task_struct or ERR_PTR 390 + * Return: 0 on success, error number otherwise 391 391 */ 392 392 static int create_socket(struct interface *iface) 393 393 {
+88 -94
fs/ksmbd/vfs.c
··· 19 19 #include <linux/sched/xacct.h> 20 20 #include <linux/crc32c.h> 21 21 22 + #include "../internal.h" /* for vfs_path_lookup */ 23 + 22 24 #include "glob.h" 23 25 #include "oplock.h" 24 26 #include "connection.h" ··· 46 44 p++; 47 45 } else { 48 46 p = NULL; 49 - pr_err("Invalid path %s\n", path); 50 47 } 51 48 return p; 52 49 } ··· 156 155 /** 157 156 * ksmbd_vfs_create() - vfs helper for smb create file 158 157 * @work: work 159 - * @name: file name 158 + * @name: file name that is relative to share 160 159 * @mode: file create mode 161 160 * 162 161 * Return: 0 on success, otherwise error ··· 167 166 struct dentry *dentry; 168 167 int err; 169 168 170 - dentry = kern_path_create(AT_FDCWD, name, &path, 0); 169 + dentry = ksmbd_vfs_kern_path_create(work, name, 170 + LOOKUP_NO_SYMLINKS, &path); 171 171 if (IS_ERR(dentry)) { 172 172 err = PTR_ERR(dentry); 173 173 if (err != -ENOENT) ··· 193 191 /** 194 192 * ksmbd_vfs_mkdir() - vfs helper for smb create directory 195 193 * @work: work 196 - * @name: directory name 194 + * @name: directory name that is relative to share 197 195 * @mode: directory create mode 198 196 * 199 197 * Return: 0 on success, otherwise error ··· 205 203 struct dentry *dentry; 206 204 int err; 207 205 208 - dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY); 206 + dentry = ksmbd_vfs_kern_path_create(work, name, 207 + LOOKUP_NO_SYMLINKS | LOOKUP_DIRECTORY, 208 + &path); 209 209 if (IS_ERR(dentry)) { 210 210 err = PTR_ERR(dentry); 211 211 if (err != -EEXIST) ··· 582 578 583 579 /** 584 580 * ksmbd_vfs_remove_file() - vfs helper for smb rmdir or unlink 585 - * @name: absolute directory or file name 581 + * @name: directory or file name that is relative to share 586 582 * 587 583 * Return: 0 on success, otherwise error 588 584 */ ··· 592 588 struct path path; 593 589 struct dentry *parent; 594 590 int err; 595 - int flags = 0; 596 591 597 592 if (ksmbd_override_fsids(work)) 598 593 return -ENOMEM; 599 594 600 - if (test_share_config_flag(work->tcon->share_conf, 601 - KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS)) 602 - flags = LOOKUP_FOLLOW; 603 - 604 - err = kern_path(name, flags, &path); 595 + err = ksmbd_vfs_kern_path(work, name, LOOKUP_NO_SYMLINKS, &path, false); 605 596 if (err) { 606 597 ksmbd_debug(VFS, "can't get %s, err %d\n", name, err); 607 598 ksmbd_revert_fsids(work); ··· 641 642 /** 642 643 * ksmbd_vfs_link() - vfs helper for creating smb hardlink 643 644 * @oldname: source file name 644 - * @newname: hardlink name 645 + * @newname: hardlink name that is relative to share 645 646 * 646 647 * Return: 0 on success, otherwise error 647 648 */ ··· 651 652 struct path oldpath, newpath; 652 653 struct dentry *dentry; 653 654 int err; 654 - int flags = 0; 655 655 656 656 if (ksmbd_override_fsids(work)) 657 657 return -ENOMEM; 658 658 659 - if (test_share_config_flag(work->tcon->share_conf, 660 - KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS)) 661 - flags = LOOKUP_FOLLOW; 662 - 663 - err = kern_path(oldname, flags, &oldpath); 659 + err = kern_path(oldname, LOOKUP_NO_SYMLINKS, &oldpath); 664 660 if (err) { 665 661 pr_err("cannot get linux path for %s, err = %d\n", 666 662 oldname, err); 667 663 goto out1; 668 664 } 669 665 670 - dentry = kern_path_create(AT_FDCWD, newname, &newpath, 671 - flags | LOOKUP_REVAL); 666 + dentry = ksmbd_vfs_kern_path_create(work, newname, 667 + LOOKUP_NO_SYMLINKS | LOOKUP_REVAL, 668 + &newpath); 672 669 if (IS_ERR(dentry)) { 673 670 err = PTR_ERR(dentry); 674 671 pr_err("path create err for %s, err %d\n", newname, err); ··· 783 788 struct dentry *src_dent, *trap_dent, *src_child; 784 789 char *dst_name; 785 790 int err; 786 - int flags; 787 791 788 792 dst_name = extract_last_component(newname); 789 - if (!dst_name) 790 - return -EINVAL; 793 + if (!dst_name) { 794 + dst_name = newname; 795 + newname = ""; 796 + } 791 797 792 798 src_dent_parent = dget_parent(fp->filp->f_path.dentry); 793 799 src_dent = fp->filp->f_path.dentry; 794 800 795 - flags = LOOKUP_DIRECTORY; 796 - if (test_share_config_flag(work->tcon->share_conf, 797 - KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS)) 798 - flags |= LOOKUP_FOLLOW; 799 - 800 - err = kern_path(newname, flags, &dst_path); 801 + err = ksmbd_vfs_kern_path(work, newname, 802 + LOOKUP_NO_SYMLINKS | LOOKUP_DIRECTORY, 803 + &dst_path, false); 801 804 if (err) { 802 805 ksmbd_debug(VFS, "Cannot get path for %s [%d]\n", newname, err); 803 806 goto out; ··· 841 848 /** 842 849 * ksmbd_vfs_truncate() - vfs helper for smb file truncate 843 850 * @work: work 844 - * @name: old filename 845 851 * @fid: file id of old file 846 852 * @size: truncate to given size 847 853 * 848 854 * Return: 0 on success, otherwise error 849 855 */ 850 - int ksmbd_vfs_truncate(struct ksmbd_work *work, const char *name, 856 + int ksmbd_vfs_truncate(struct ksmbd_work *work, 851 857 struct ksmbd_file *fp, loff_t size) 852 858 { 853 - struct path path; 854 859 int err = 0; 860 + struct file *filp; 855 861 856 - if (name) { 857 - err = kern_path(name, 0, &path); 862 + filp = fp->filp; 863 + 864 + /* Do we need to break any of a levelII oplock? */ 865 + smb_break_all_levII_oplock(work, fp, 1); 866 + 867 + if (!work->tcon->posix_extensions) { 868 + struct inode *inode = file_inode(filp); 869 + 870 + if (size < inode->i_size) { 871 + err = check_lock_range(filp, size, 872 + inode->i_size - 1, WRITE); 873 + } else { 874 + err = check_lock_range(filp, inode->i_size, 875 + size - 1, WRITE); 876 + } 877 + 858 878 if (err) { 859 - pr_err("cannot get linux path for %s, err %d\n", 860 - name, err); 861 - return err; 879 + pr_err("failed due to lock\n"); 880 + return -EAGAIN; 862 881 } 863 - err = vfs_truncate(&path, size); 864 - if (err) 865 - pr_err("truncate failed for %s err %d\n", 866 - name, err); 867 - path_put(&path); 868 - } else { 869 - struct file *filp; 870 - 871 - filp = fp->filp; 872 - 873 - /* Do we need to break any of a levelII oplock? */ 874 - smb_break_all_levII_oplock(work, fp, 1); 875 - 876 - if (!work->tcon->posix_extensions) { 877 - struct inode *inode = file_inode(filp); 878 - 879 - if (size < inode->i_size) { 880 - err = check_lock_range(filp, size, 881 - inode->i_size - 1, WRITE); 882 - } else { 883 - err = check_lock_range(filp, inode->i_size, 884 - size - 1, WRITE); 885 - } 886 - 887 - if (err) { 888 - pr_err("failed due to lock\n"); 889 - return -EAGAIN; 890 - } 891 - } 892 - 893 - err = vfs_truncate(&filp->f_path, size); 894 - if (err) 895 - pr_err("truncate failed for filename : %s err %d\n", 896 - fp->filename, err); 897 882 } 898 883 884 + err = vfs_truncate(&filp->f_path, size); 885 + if (err) 886 + pr_err("truncate failed for filename : %s err %d\n", 887 + fp->filename, err); 899 888 return err; 900 889 } 901 890 ··· 1195 1220 1196 1221 /** 1197 1222 * ksmbd_vfs_kern_path() - lookup a file and get path info 1198 - * @name: name of file for lookup 1223 + * @name: file path that is relative to share 1199 1224 * @flags: lookup flags 1200 1225 * @path: if lookup succeed, return path info 1201 1226 * @caseless: caseless filename lookup 1202 1227 * 1203 1228 * Return: 0 on success, otherwise error 1204 1229 */ 1205 - int ksmbd_vfs_kern_path(char *name, unsigned int flags, struct path *path, 1206 - bool caseless) 1230 + int ksmbd_vfs_kern_path(struct ksmbd_work *work, char *name, 1231 + unsigned int flags, struct path *path, bool caseless) 1207 1232 { 1233 + struct ksmbd_share_config *share_conf = work->tcon->share_conf; 1208 1234 int err; 1209 1235 1210 - if (name[0] != '/') 1211 - return -EINVAL; 1212 - 1213 - err = kern_path(name, flags, path); 1236 + flags |= LOOKUP_BENEATH; 1237 + err = vfs_path_lookup(share_conf->vfs_path.dentry, 1238 + share_conf->vfs_path.mnt, 1239 + name, 1240 + flags, 1241 + path); 1214 1242 if (!err) 1215 1243 return 0; 1216 1244 ··· 1227 1249 return -ENOMEM; 1228 1250 1229 1251 path_len = strlen(filepath); 1230 - remain_len = path_len - 1; 1252 + remain_len = path_len; 1231 1253 1232 - err = kern_path("/", flags, &parent); 1233 - if (err) 1234 - goto out; 1254 + parent = share_conf->vfs_path; 1255 + path_get(&parent); 1235 1256 1236 1257 while (d_can_lookup(parent.dentry)) { 1237 1258 char *filename = filepath + path_len - remain_len; ··· 1243 1266 1244 1267 err = ksmbd_vfs_lookup_in_dir(&parent, filename, 1245 1268 filename_len); 1246 - if (err) { 1247 - path_put(&parent); 1248 - goto out; 1249 - } 1250 - 1251 1269 path_put(&parent); 1252 - next[0] = '\0'; 1253 - 1254 - err = kern_path(filepath, flags, &parent); 1255 1270 if (err) 1256 1271 goto out; 1257 1272 1258 - if (is_last) { 1259 - path->mnt = parent.mnt; 1260 - path->dentry = parent.dentry; 1273 + next[0] = '\0'; 1274 + 1275 + err = vfs_path_lookup(share_conf->vfs_path.dentry, 1276 + share_conf->vfs_path.mnt, 1277 + filepath, 1278 + flags, 1279 + &parent); 1280 + if (err) 1281 + goto out; 1282 + else if (is_last) { 1283 + *path = parent; 1261 1284 goto out; 1262 1285 } 1263 1286 ··· 1271 1294 kfree(filepath); 1272 1295 } 1273 1296 return err; 1297 + } 1298 + 1299 + struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work, 1300 + const char *name, 1301 + unsigned int flags, 1302 + struct path *path) 1303 + { 1304 + char *abs_name; 1305 + struct dentry *dent; 1306 + 1307 + abs_name = convert_to_unix_name(work->tcon->share_conf, name); 1308 + if (!abs_name) 1309 + return ERR_PTR(-ENOMEM); 1310 + 1311 + dent = kern_path_create(AT_FDCWD, abs_name, path, flags); 1312 + kfree(abs_name); 1313 + return dent; 1274 1314 } 1275 1315 1276 1316 int ksmbd_vfs_remove_acl_xattrs(struct user_namespace *user_ns,
+7 -2
fs/ksmbd/vfs.h
··· 126 126 int ksmbd_vfs_getattr(struct path *path, struct kstat *stat); 127 127 int ksmbd_vfs_fp_rename(struct ksmbd_work *work, struct ksmbd_file *fp, 128 128 char *newname); 129 - int ksmbd_vfs_truncate(struct ksmbd_work *work, const char *name, 129 + int ksmbd_vfs_truncate(struct ksmbd_work *work, 130 130 struct ksmbd_file *fp, loff_t size); 131 131 struct srv_copychunk; 132 132 int ksmbd_vfs_copy_file_ranges(struct ksmbd_work *work, ··· 152 152 size_t *xattr_stream_name_size, int s_type); 153 153 int ksmbd_vfs_remove_xattr(struct user_namespace *user_ns, 154 154 struct dentry *dentry, char *attr_name); 155 - int ksmbd_vfs_kern_path(char *name, unsigned int flags, struct path *path, 155 + int ksmbd_vfs_kern_path(struct ksmbd_work *work, 156 + char *name, unsigned int flags, struct path *path, 156 157 bool caseless); 158 + struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work, 159 + const char *name, 160 + unsigned int flags, 161 + struct path *path); 157 162 int ksmbd_vfs_empty_dir(struct ksmbd_file *fp); 158 163 void ksmbd_vfs_set_fadvise(struct file *filp, __le32 option); 159 164 int ksmbd_vfs_zero_data(struct ksmbd_work *work, struct ksmbd_file *fp,
+2 -11
fs/lockd/svcxdr.h
··· 134 134 static inline bool 135 135 svcxdr_encode_owner(struct xdr_stream *xdr, const struct xdr_netobj *obj) 136 136 { 137 - unsigned int quadlen = XDR_QUADLEN(obj->len); 138 - __be32 *p; 139 - 140 - if (xdr_stream_encode_u32(xdr, obj->len) < 0) 137 + if (obj->len > XDR_MAX_NETOBJ) 141 138 return false; 142 - p = xdr_reserve_space(xdr, obj->len); 143 - if (!p) 144 - return false; 145 - p[quadlen - 1] = 0; /* XDR pad */ 146 - memcpy(p, obj->data, obj->len); 147 - 148 - return true; 139 + return xdr_stream_encode_opaque(xdr, obj->data, obj->len) > 0; 149 140 } 150 141 151 142 #endif /* _LOCKD_SVCXDR_H_ */
+1 -1
fs/netfs/read_helper.c
··· 150 150 { 151 151 struct iov_iter iter; 152 152 153 - iov_iter_xarray(&iter, WRITE, &subreq->rreq->mapping->i_pages, 153 + iov_iter_xarray(&iter, READ, &subreq->rreq->mapping->i_pages, 154 154 subreq->start + subreq->transferred, 155 155 subreq->len - subreq->transferred); 156 156 iov_iter_zero(iov_iter_count(&iter), &iter);
-1
fs/nfs_common/grace.c
··· 42 42 43 43 /** 44 44 * locks_end_grace 45 - * @net: net namespace that this lock manager belongs to 46 45 * @lm: who this grace period is for 47 46 * 48 47 * Call this function to state that the given lock manager is ready to
+1 -1
fs/nfsd/filecache.c
··· 542 542 } 543 543 544 544 /** 545 - * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file 545 + * nfsd_file_close_inode - attempt a delayed close of a nfsd_file 546 546 * @inode: inode of the file to attempt to remove 547 547 * 548 548 * Walk the whole hash bucket, looking for any files that correspond to "inode".
+13 -3
fs/nfsd/nfs4state.c
··· 3570 3570 } 3571 3571 3572 3572 static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst, 3573 - struct nfsd4_session *session, u32 req) 3573 + struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn) 3574 3574 { 3575 3575 struct nfs4_client *clp = session->se_client; 3576 3576 struct svc_xprt *xpt = rqst->rq_xprt; ··· 3593 3593 else 3594 3594 status = nfserr_inval; 3595 3595 spin_unlock(&clp->cl_lock); 3596 + if (status == nfs_ok && conn) 3597 + *conn = c; 3596 3598 return status; 3597 3599 } 3598 3600 ··· 3619 3617 status = nfserr_wrong_cred; 3620 3618 if (!nfsd4_mach_creds_match(session->se_client, rqstp)) 3621 3619 goto out; 3622 - status = nfsd4_match_existing_connection(rqstp, session, bcts->dir); 3623 - if (status == nfs_ok || status == nfserr_inval) 3620 + status = nfsd4_match_existing_connection(rqstp, session, 3621 + bcts->dir, &conn); 3622 + if (status == nfs_ok) { 3623 + if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH || 3624 + bcts->dir == NFS4_CDFC4_BACK) 3625 + conn->cn_flags |= NFS4_CDFC4_BACK; 3626 + nfsd4_probe_callback(session->se_client); 3627 + goto out; 3628 + } 3629 + if (status == nfserr_inval) 3624 3630 goto out; 3625 3631 status = nfsd4_map_bcts_dir(&bcts->dir); 3626 3632 if (status)
+11 -8
fs/nfsd/nfs4xdr.c
··· 3544 3544 goto fail; 3545 3545 cd->rd_maxcount -= entry_bytes; 3546 3546 /* 3547 - * RFC 3530 14.2.24 describes rd_dircount as only a "hint", so 3548 - * let's always let through the first entry, at least: 3547 + * RFC 3530 14.2.24 describes rd_dircount as only a "hint", and 3548 + * notes that it could be zero. If it is zero, then the server 3549 + * should enforce only the rd_maxcount value. 3549 3550 */ 3550 - if (!cd->rd_dircount) 3551 - goto fail; 3552 - name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8; 3553 - if (name_and_cookie > cd->rd_dircount && cd->cookie_offset) 3554 - goto fail; 3555 - cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie); 3551 + if (cd->rd_dircount) { 3552 + name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8; 3553 + if (name_and_cookie > cd->rd_dircount && cd->cookie_offset) 3554 + goto fail; 3555 + cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie); 3556 + if (!cd->rd_dircount) 3557 + cd->rd_maxcount = 0; 3558 + } 3556 3559 3557 3560 cd->cookie_offset = cookie_offset; 3558 3561 skip_entry:
+5 -2
fs/nfsd/nfsctl.c
··· 793 793 svc_xprt_put(xprt); 794 794 } 795 795 out_err: 796 - nfsd_destroy(net); 796 + if (!list_empty(&nn->nfsd_serv->sv_permsocks)) 797 + nn->nfsd_serv->sv_nrthreads--; 798 + else 799 + nfsd_destroy(net); 797 800 return err; 798 801 } 799 802 ··· 1548 1545 goto out_free_all; 1549 1546 return 0; 1550 1547 out_free_all: 1551 - unregister_pernet_subsys(&nfsd_net_ops); 1548 + unregister_filesystem(&nfsd_fs_type); 1552 1549 out_free_exports: 1553 1550 remove_proc_entry("fs/nfs/exports", NULL); 1554 1551 remove_proc_entry("fs/nfs", NULL);
+2 -1
fs/ocfs2/dlmglue.c
··· 3951 3951 oi = OCFS2_I(inode); 3952 3952 oi->ip_dir_lock_gen++; 3953 3953 mlog(0, "generation: %u\n", oi->ip_dir_lock_gen); 3954 - goto out; 3954 + goto out_forget; 3955 3955 } 3956 3956 3957 3957 if (!S_ISREG(inode->i_mode)) ··· 3982 3982 filemap_fdatawait(mapping); 3983 3983 } 3984 3984 3985 + out_forget: 3985 3986 forget_all_cached_acls(inode); 3986 3987 3987 3988 out:
+7 -3
fs/overlayfs/dir.c
··· 1219 1219 goto out_dput; 1220 1220 } 1221 1221 } else { 1222 - if (!d_is_negative(newdentry) && 1223 - (!new_opaque || !ovl_is_whiteout(newdentry))) 1224 - goto out_dput; 1222 + if (!d_is_negative(newdentry)) { 1223 + if (!new_opaque || !ovl_is_whiteout(newdentry)) 1224 + goto out_dput; 1225 + } else { 1226 + if (flags & RENAME_EXCHANGE) 1227 + goto out_dput; 1228 + } 1225 1229 } 1226 1230 1227 1231 if (olddentry == trap)
+14 -1
fs/overlayfs/file.c
··· 296 296 if (ret) 297 297 return ret; 298 298 299 + ret = -EINVAL; 300 + if (iocb->ki_flags & IOCB_DIRECT && 301 + (!real.file->f_mapping->a_ops || 302 + !real.file->f_mapping->a_ops->direct_IO)) 303 + goto out_fdput; 304 + 299 305 old_cred = ovl_override_creds(file_inode(file)->i_sb); 300 306 if (is_sync_kiocb(iocb)) { 301 307 ret = vfs_iter_read(real.file, iter, &iocb->ki_pos, ··· 326 320 out: 327 321 revert_creds(old_cred); 328 322 ovl_file_accessed(file); 329 - 323 + out_fdput: 330 324 fdput(real); 331 325 332 326 return ret; ··· 354 348 ret = ovl_real_fdget(file, &real); 355 349 if (ret) 356 350 goto out_unlock; 351 + 352 + ret = -EINVAL; 353 + if (iocb->ki_flags & IOCB_DIRECT && 354 + (!real.file->f_mapping->a_ops || 355 + !real.file->f_mapping->a_ops->direct_IO)) 356 + goto out_fdput; 357 357 358 358 if (!ovl_should_sync(OVL_FS(inode->i_sb))) 359 359 ifl &= ~(IOCB_DSYNC | IOCB_SYNC); ··· 396 384 } 397 385 out: 398 386 revert_creds(old_cred); 387 + out_fdput: 399 388 fdput(real); 400 389 401 390 out_unlock:
+2 -10
fs/vboxsf/super.c
··· 21 21 22 22 #define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */ 23 23 24 - #define VBSF_MOUNT_SIGNATURE_BYTE_0 ('\000') 25 - #define VBSF_MOUNT_SIGNATURE_BYTE_1 ('\377') 26 - #define VBSF_MOUNT_SIGNATURE_BYTE_2 ('\376') 27 - #define VBSF_MOUNT_SIGNATURE_BYTE_3 ('\375') 24 + static const unsigned char VBSF_MOUNT_SIGNATURE[4] = "\000\377\376\375"; 28 25 29 26 static int follow_symlinks; 30 27 module_param(follow_symlinks, int, 0444); ··· 383 386 384 387 static int vboxsf_parse_monolithic(struct fs_context *fc, void *data) 385 388 { 386 - unsigned char *options = data; 387 - 388 - if (options && options[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 && 389 - options[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 && 390 - options[2] == VBSF_MOUNT_SIGNATURE_BYTE_2 && 391 - options[3] == VBSF_MOUNT_SIGNATURE_BYTE_3) { 389 + if (data && !memcmp(data, VBSF_MOUNT_SIGNATURE, 4)) { 392 390 vbg_err("vboxsf: Old binary mount data not supported, remove obsolete mount.vboxsf and/or update your VBoxService.\n"); 393 391 return -EINVAL; 394 392 }
+1 -1
fs/verity/enable.c
··· 177 177 * (level 0) and ascending to the root node (level 'num_levels - 1'). 178 178 * Then at the end (level 'num_levels'), calculate the root hash. 179 179 */ 180 - blocks = (inode->i_size + params->block_size - 1) >> 180 + blocks = ((u64)inode->i_size + params->block_size - 1) >> 181 181 params->log_blocksize; 182 182 for (level = 0; level <= params->num_levels; level++) { 183 183 err = build_merkle_tree_level(filp, level, blocks, params,
+1 -1
fs/verity/open.c
··· 89 89 */ 90 90 91 91 /* Compute number of levels and the number of blocks in each level */ 92 - blocks = (inode->i_size + params->block_size - 1) >> log_blocksize; 92 + blocks = ((u64)inode->i_size + params->block_size - 1) >> log_blocksize; 93 93 pr_debug("Data is %lld bytes (%llu blocks)\n", inode->i_size, blocks); 94 94 while (blocks > 1) { 95 95 if (params->num_levels >= FS_VERITY_MAX_LEVELS) {
-8
include/acpi/acpi_io.h
··· 14 14 } 15 15 #endif 16 16 17 - #ifndef acpi_os_memmap 18 - static inline void __iomem *acpi_os_memmap(acpi_physical_address phys, 19 - acpi_size size) 20 - { 21 - return ioremap_cache(phys, size); 22 - } 23 - #endif 24 - 25 17 extern bool acpi_permanent_mmap; 26 18 27 19 void __iomem __ref
-3
include/kvm/arm_pmu.h
··· 61 61 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, 62 62 struct kvm_device_attr *attr); 63 63 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu); 64 - int kvm_pmu_probe_pmuver(void); 65 64 #else 66 65 struct kvm_pmu { 67 66 }; ··· 116 117 { 117 118 return 0; 118 119 } 119 - 120 - static inline int kvm_pmu_probe_pmuver(void) { return 0xf; } 121 120 122 121 #endif 123 122
+2 -1
include/linux/bpf.h
··· 578 578 * programs only. Should not be used with normal calls and indirect calls. 579 579 */ 580 580 #define BPF_TRAMP_F_SKIP_FRAME BIT(2) 581 - 582 581 /* Store IP address of the caller on the trampoline stack, 583 582 * so it's available for trampoline's programs. 584 583 */ 585 584 #define BPF_TRAMP_F_IP_ARG BIT(3) 585 + /* Return the return value of fentry prog. Only used by bpf_struct_ops. */ 586 + #define BPF_TRAMP_F_RET_FENTRY_RET BIT(4) 586 587 587 588 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 588 589 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
+2 -2
include/linux/buffer_head.h
··· 194 194 struct buffer_head *__bread_gfp(struct block_device *, 195 195 sector_t block, unsigned size, gfp_t gfp); 196 196 void invalidate_bh_lrus(void); 197 - void invalidate_bh_lrus_cpu(int cpu); 197 + void invalidate_bh_lrus_cpu(void); 198 198 bool has_bh_in_lru(int cpu, void *dummy); 199 199 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); 200 200 void free_buffer_head(struct buffer_head * bh); ··· 408 408 static inline void invalidate_inode_buffers(struct inode *inode) {} 409 409 static inline int remove_inode_buffers(struct inode *inode) { return 1; } 410 410 static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } 411 - static inline void invalidate_bh_lrus_cpu(int cpu) {} 411 + static inline void invalidate_bh_lrus_cpu(void) {} 412 412 static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; } 413 413 #define buffer_heads_over_limit 0 414 414
+4 -3
include/linux/cpumask.h
··· 996 996 * cpumask; Typically used by bin_attribute to export cpumask bitmask 997 997 * ABI. 998 998 * 999 - * Returns the length of how many bytes have been copied. 999 + * Returns the length of how many bytes have been copied, excluding 1000 + * terminating '\0'. 1000 1001 */ 1001 1002 static inline ssize_t 1002 1003 cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask, 1003 1004 loff_t off, size_t count) 1004 1005 { 1005 1006 return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask), 1006 - nr_cpu_ids, off, count); 1007 + nr_cpu_ids, off, count) - 1; 1007 1008 } 1008 1009 1009 1010 /** ··· 1019 1018 loff_t off, size_t count) 1020 1019 { 1021 1020 return bitmap_print_list_to_buf(buf, cpumask_bits(mask), 1022 - nr_cpu_ids, off, count); 1021 + nr_cpu_ids, off, count) - 1; 1023 1022 } 1024 1023 1025 1024 #if NR_CPUS <= BITS_PER_LONG
+1 -1
include/linux/dsa/ocelot.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 2 - * Copyright 2019-2021 NXP Semiconductors 2 + * Copyright 2019-2021 NXP 3 3 */ 4 4 5 5 #ifndef _NET_DSA_TAG_OCELOT_H
+1 -1
include/linux/etherdevice.h
··· 308 308 */ 309 309 static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr) 310 310 { 311 - ether_addr_copy(dev->dev_addr, addr); 311 + __dev_addr_set(dev, addr, ETH_ALEN); 312 312 } 313 313 314 314 /**
+8 -3
include/linux/fwnode.h
··· 22 22 * LINKS_ADDED: The fwnode has already be parsed to add fwnode links. 23 23 * NOT_DEVICE: The fwnode will never be populated as a struct device. 24 24 * INITIALIZED: The hardware corresponding to fwnode has been initialized. 25 + * NEEDS_CHILD_BOUND_ON_ADD: For this fwnode/device to probe successfully, its 26 + * driver needs its child devices to be bound with 27 + * their respective drivers as soon as they are 28 + * added. 25 29 */ 26 - #define FWNODE_FLAG_LINKS_ADDED BIT(0) 27 - #define FWNODE_FLAG_NOT_DEVICE BIT(1) 28 - #define FWNODE_FLAG_INITIALIZED BIT(2) 30 + #define FWNODE_FLAG_LINKS_ADDED BIT(0) 31 + #define FWNODE_FLAG_NOT_DEVICE BIT(1) 32 + #define FWNODE_FLAG_INITIALIZED BIT(2) 33 + #define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD BIT(3) 29 34 30 35 struct fwnode_handle { 31 36 struct fwnode_handle *secondary;
+1 -1
include/linux/irqdomain.h
··· 251 251 } 252 252 253 253 void irq_domain_free_fwnode(struct fwnode_handle *fwnode); 254 - struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, 254 + struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size, 255 255 irq_hw_number_t hwirq_max, int direct_max, 256 256 const struct irq_domain_ops *ops, 257 257 void *host_data);
-6
include/linux/kvm_host.h
··· 608 608 unsigned long mmu_notifier_range_start; 609 609 unsigned long mmu_notifier_range_end; 610 610 #endif 611 - long tlbs_dirty; 612 611 struct list_head devices; 613 612 u64 manual_dirty_log_protect; 614 613 struct dentry *debugfs_dentry; ··· 718 719 if (vcpu->vcpu_id == id) 719 720 return vcpu; 720 721 return NULL; 721 - } 722 - 723 - static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu) 724 - { 725 - return vcpu->vcpu_idx; 726 722 } 727 723 728 724 #define kvm_for_each_memslot(memslot, slots) \
+3
include/linux/mdio.h
··· 80 80 81 81 /* Clears up any memory if needed */ 82 82 void (*remove)(struct mdio_device *mdiodev); 83 + 84 + /* Quiesces the device on system shutdown, turns off interrupts etc */ 85 + void (*shutdown)(struct mdio_device *mdiodev); 83 86 }; 84 87 85 88 static inline struct mdio_driver *
+5 -1
include/linux/migrate.h
··· 19 19 */ 20 20 #define MIGRATEPAGE_SUCCESS 0 21 21 22 + /* 23 + * Keep sync with: 24 + * - macro MIGRATE_REASON in include/trace/events/migrate.h 25 + * - migrate_reason_names[MR_TYPES] in mm/debug.c 26 + */ 22 27 enum migrate_reason { 23 28 MR_COMPACTION, 24 29 MR_MEMORY_FAILURE, ··· 37 32 MR_TYPES 38 33 }; 39 34 40 - /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */ 41 35 extern const char *migrate_reason_names[MR_TYPES]; 42 36 43 37 #ifdef CONFIG_MIGRATION
+14
include/linux/nvmem-consumer.h
··· 150 150 return -EOPNOTSUPP; 151 151 } 152 152 153 + static inline int nvmem_cell_read_variable_le_u32(struct device *dev, 154 + const char *cell_id, 155 + u32 *val) 156 + { 157 + return -EOPNOTSUPP; 158 + } 159 + 160 + static inline int nvmem_cell_read_variable_le_u64(struct device *dev, 161 + const char *cell_id, 162 + u64 *val) 163 + { 164 + return -EOPNOTSUPP; 165 + } 166 + 153 167 static inline struct nvmem_device *nvmem_device_get(struct device *dev, 154 168 const char *name) 155 169 {
+1 -1
include/linux/packing.h
··· 1 1 /* SPDX-License-Identifier: BSD-3-Clause 2 - * Copyright (c) 2016-2018, NXP Semiconductors 2 + * Copyright 2016-2018 NXP 3 3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> 4 4 */ 5 5 #ifndef _LINUX_PACKING_H
+6
include/linux/perf/arm_pmu.h
··· 163 163 static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; } 164 164 #endif 165 165 166 + #ifdef CONFIG_KVM 167 + void kvm_host_pmu_init(struct arm_pmu *pmu); 168 + #else 169 + #define kvm_host_pmu_init(x) do { } while(0) 170 + #endif 171 + 166 172 /* Internal functions only for core arm_pmu code */ 167 173 struct arm_pmu *armpmu_alloc(void); 168 174 struct arm_pmu *armpmu_alloc_atomic(void);
+3 -1
include/linux/perf_event.h
··· 683 683 /* 684 684 * timestamp shadows the actual context timing but it can 685 685 * be safely used in NMI interrupt context. It reflects the 686 - * context time as it was when the event was last scheduled in. 686 + * context time as it was when the event was last scheduled in, 687 + * or when ctx_sched_in failed to schedule the event because we 688 + * run out of PMC. 687 689 * 688 690 * ctx_time already accounts for ctx->timestamp. Therefore to 689 691 * compute ctx_time for a sample, simply add perf_clock().
+2
include/linux/pkeys.h
··· 4 4 5 5 #include <linux/mm.h> 6 6 7 + #define ARCH_DEFAULT_PKEY 0 8 + 7 9 #ifdef CONFIG_ARCH_HAS_PKEYS 8 10 #include <asm/pkeys.h> 9 11 #else /* ! CONFIG_ARCH_HAS_PKEYS */
+2
include/linux/platform_data/usb-omap1.h
··· 48 48 u32 (*usb2_init)(unsigned nwires, unsigned alt_pingroup); 49 49 50 50 int (*ocpi_enable)(void); 51 + 52 + void (*lb_reset)(void); 51 53 }; 52 54 53 55 #endif /* __LINUX_USB_OMAP1_H */
+1 -1
include/linux/sched.h
··· 1720 1720 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1721 1721 #define used_math() tsk_used_math(current) 1722 1722 1723 - static inline bool is_percpu_thread(void) 1723 + static __always_inline bool is_percpu_thread(void) 1724 1724 { 1725 1725 #ifdef CONFIG_SMP 1726 1726 return (current->flags & PF_NO_SETAFFINITY) &&
+2
include/linux/tracehook.h
··· 197 197 198 198 mem_cgroup_handle_over_high(); 199 199 blkcg_maybe_throttle_current(); 200 + 201 + rseq_handle_notify_resume(NULL, regs); 200 202 } 201 203 202 204 /*
+2
include/linux/usb/hcd.h
··· 124 124 #define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */ 125 125 #define HCD_FLAG_DEAD 6 /* controller has died? */ 126 126 #define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */ 127 + #define HCD_FLAG_DEFER_RH_REGISTER 8 /* Defer roothub registration */ 127 128 128 129 /* The flags can be tested using these macros; they are likely to 129 130 * be slightly faster than test_bit(). ··· 135 134 #define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING)) 136 135 #define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING)) 137 136 #define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD)) 137 + #define HCD_DEFER_RH_REGISTER(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEFER_RH_REGISTER)) 138 138 139 139 /* 140 140 * Specifies if interfaces are authorized by default
+9
include/net/dsa.h
··· 585 585 int (*change_tag_protocol)(struct dsa_switch *ds, int port, 586 586 enum dsa_tag_protocol proto); 587 587 588 + /* Optional switch-wide initialization and destruction methods */ 588 589 int (*setup)(struct dsa_switch *ds); 589 590 void (*teardown)(struct dsa_switch *ds); 591 + 592 + /* Per-port initialization and destruction methods. Mandatory if the 593 + * driver registers devlink port regions, optional otherwise. 594 + */ 595 + int (*port_setup)(struct dsa_switch *ds, int port); 596 + void (*port_teardown)(struct dsa_switch *ds, int port); 597 + 590 598 u32 (*get_phy_flags)(struct dsa_switch *ds, int port); 591 599 592 600 /* ··· 1054 1046 1055 1047 void dsa_unregister_switch(struct dsa_switch *ds); 1056 1048 int dsa_register_switch(struct dsa_switch *ds); 1049 + void dsa_switch_shutdown(struct dsa_switch *ds); 1057 1050 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index); 1058 1051 #ifdef CONFIG_PM_SLEEP 1059 1052 int dsa_switch_suspend(struct dsa_switch *ds);
+1 -1
include/net/ip_fib.h
··· 597 597 int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh, 598 598 u8 rt_family, unsigned char *flags, bool skip_oif); 599 599 int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh, 600 - int nh_weight, u8 rt_family); 600 + int nh_weight, u8 rt_family, u32 nh_tclassid); 601 601 #endif /* _NET_FIB_H */
+4 -4
include/net/mac80211.h
··· 2818 2818 * Mac80211 drivers should set the @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 flag 2819 2819 * when they are able to replace in-use PTK keys according to the following 2820 2820 * requirements: 2821 - * 1) They do not hand over frames decrypted with the old key to 2822 - mac80211 once the call to set_key() with command %DISABLE_KEY has been 2823 - completed when also setting @IEEE80211_KEY_FLAG_GENERATE_IV for any key, 2821 + * 1) They do not hand over frames decrypted with the old key to mac80211 2822 + once the call to set_key() with command %DISABLE_KEY has been completed, 2824 2823 2) either drop or continue to use the old key for any outgoing frames queued 2825 2824 at the time of the key deletion (including re-transmits), 2826 2825 3) never send out a frame queued prior to the set_key() %SET_KEY command 2827 - encrypted with the new key and 2826 + encrypted with the new key when also needing 2827 + @IEEE80211_KEY_FLAG_GENERATE_IV and 2828 2828 4) never send out a frame unencrypted when it should be encrypted. 2829 2829 Mac80211 will not queue any new frames for a deleted key to the driver. 2830 2830 */
-1
include/net/netfilter/ipv6/nf_defrag_ipv6.h
··· 17 17 struct nft_ct_frag6_pernet { 18 18 struct ctl_table_header *nf_frag_frags_hdr; 19 19 struct fqdir *fqdir; 20 - unsigned int users; 21 20 }; 22 21 23 22 #endif /* _NF_DEFRAG_IPV6_H */
+1 -1
include/net/netfilter/nf_tables.h
··· 1202 1202 1203 1203 void nft_obj_notify(struct net *net, const struct nft_table *table, 1204 1204 struct nft_object *obj, u32 portid, u32 seq, 1205 - int event, int family, int report, gfp_t gfp); 1205 + int event, u16 flags, int family, int report, gfp_t gfp); 1206 1206 1207 1207 /** 1208 1208 * struct nft_object_type - stateful object type
+6
include/net/netns/netfilter.h
··· 27 27 #if IS_ENABLED(CONFIG_DECNET) 28 28 struct nf_hook_entries __rcu *hooks_decnet[NF_DN_NUMHOOKS]; 29 29 #endif 30 + #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) 31 + unsigned int defrag_ipv4_users; 32 + #endif 33 + #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) 34 + unsigned int defrag_ipv6_users; 35 + #endif 30 36 }; 31 37 #endif
+1 -1
include/net/nexthop.h
··· 325 325 struct fib_nh_common *nhc = &nhi->fib_nhc; 326 326 int weight = nhg->nh_entries[i].weight; 327 327 328 - if (fib_add_nexthop(skb, nhc, weight, rt_family) < 0) 328 + if (fib_add_nexthop(skb, nhc, weight, rt_family, 0) < 0) 329 329 return -EMSGSIZE; 330 330 } 331 331
+1
include/net/pkt_sched.h
··· 11 11 #include <uapi/linux/pkt_sched.h> 12 12 13 13 #define DEFAULT_TX_QUEUE_LEN 1000 14 + #define STAB_SIZE_LOG_MAX 30 14 15 15 16 struct qdisc_walker { 16 17 int stop;
+34 -1
include/net/sock.h
··· 307 307 * @sk_priority: %SO_PRIORITY setting 308 308 * @sk_type: socket type (%SOCK_STREAM, etc) 309 309 * @sk_protocol: which protocol this socket belongs in this network family 310 + * @sk_peer_lock: lock protecting @sk_peer_pid and @sk_peer_cred 310 311 * @sk_peer_pid: &struct pid for this socket's peer 311 312 * @sk_peer_cred: %SO_PEERCRED setting 312 313 * @sk_rcvlowat: %SO_RCVLOWAT setting ··· 489 488 u8 sk_prefer_busy_poll; 490 489 u16 sk_busy_poll_budget; 491 490 #endif 491 + spinlock_t sk_peer_lock; 492 492 struct pid *sk_peer_pid; 493 493 const struct cred *sk_peer_cred; 494 + 494 495 long sk_rcvtimeo; 495 496 ktime_t sk_stamp; 496 497 #if BITS_PER_LONG==32 ··· 1626 1623 SINGLE_DEPTH_NESTING) 1627 1624 #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) 1628 1625 1629 - bool lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock); 1626 + bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock); 1627 + 1628 + /** 1629 + * lock_sock_fast - fast version of lock_sock 1630 + * @sk: socket 1631 + * 1632 + * This version should be used for very small section, where process wont block 1633 + * return false if fast path is taken: 1634 + * 1635 + * sk_lock.slock locked, owned = 0, BH disabled 1636 + * 1637 + * return true if slow path is taken: 1638 + * 1639 + * sk_lock.slock unlocked, owned = 1, BH enabled 1640 + */ 1641 + static inline bool lock_sock_fast(struct sock *sk) 1642 + { 1643 + /* The sk_lock has mutex_lock() semantics here. */ 1644 + mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); 1645 + 1646 + return __lock_sock_fast(sk); 1647 + } 1648 + 1649 + /* fast socket lock variant for caller already holding a [different] socket lock */ 1650 + static inline bool lock_sock_fast_nested(struct sock *sk) 1651 + { 1652 + mutex_acquire(&sk->sk_lock.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_); 1653 + 1654 + return __lock_sock_fast(sk); 1655 + } 1630 1656 1631 1657 /** 1632 1658 * unlock_sock_fast - complement of lock_sock_fast ··· 1672 1640 release_sock(sk); 1673 1641 __release(&sk->sk_lock.slock); 1674 1642 } else { 1643 + mutex_release(&sk->sk_lock.dep_map, _RET_IP_); 1675 1644 spin_unlock_bh(&sk->sk_lock.slock); 1676 1645 } 1677 1646 }
-1
include/scsi/scsi_device.h
··· 146 146 struct scsi_vpd __rcu *vpd_pg83; 147 147 struct scsi_vpd __rcu *vpd_pg80; 148 148 struct scsi_vpd __rcu *vpd_pg89; 149 - unsigned char current_tag; /* current tag */ 150 149 struct scsi_target *sdev_target; 151 150 152 151 blist_flags_t sdev_bflags; /* black/white flags as also found in
+2 -2
include/soc/mscc/ocelot_vcap.h
··· 694 694 int ocelot_vcap_filter_del(struct ocelot *ocelot, 695 695 struct ocelot_vcap_filter *rule); 696 696 struct ocelot_vcap_filter * 697 - ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int id, 698 - bool tc_offload); 697 + ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, 698 + unsigned long cookie, bool tc_offload); 699 699 700 700 #endif /* _OCELOT_VCAP_H_ */
+1
include/sound/rawmidi.h
··· 98 98 struct snd_rawmidi *rmidi; 99 99 struct snd_rawmidi_substream *input; 100 100 struct snd_rawmidi_substream *output; 101 + unsigned int user_pversion; /* supported protocol version */ 101 102 }; 102 103 103 104 struct snd_rawmidi_str {
+3 -3
include/trace/events/cachefiles.h
··· 178 178 ), 179 179 180 180 TP_fast_assign( 181 - __entry->obj = obj->fscache.debug_id; 181 + __entry->obj = obj ? obj->fscache.debug_id : UINT_MAX; 182 182 __entry->de = de; 183 183 __entry->why = why; 184 184 ), ··· 205 205 ), 206 206 207 207 TP_fast_assign( 208 - __entry->obj = obj->fscache.debug_id; 208 + __entry->obj = obj ? obj->fscache.debug_id : UINT_MAX; 209 209 __entry->de = de; 210 210 __entry->to = to; 211 211 __entry->why = why; ··· 305 305 ), 306 306 307 307 TP_fast_assign( 308 - __entry->obj = obj->fscache.debug_id; 308 + __entry->obj = obj ? obj->fscache.debug_id : UINT_MAX; 309 309 __entry->de = de; 310 310 __entry->why = why; 311 311 ),
+3 -3
include/trace/events/erofs.h
··· 35 35 TP_STRUCT__entry( 36 36 __field(dev_t, dev ) 37 37 __field(erofs_nid_t, nid ) 38 - __field(const char *, name ) 38 + __string(name, dentry->d_name.name ) 39 39 __field(unsigned int, flags ) 40 40 ), 41 41 42 42 TP_fast_assign( 43 43 __entry->dev = dir->i_sb->s_dev; 44 44 __entry->nid = EROFS_I(dir)->nid; 45 - __entry->name = dentry->d_name.name; 45 + __assign_str(name, dentry->d_name.name); 46 46 __entry->flags = flags; 47 47 ), 48 48 49 49 TP_printk("dev = (%d,%d), pnid = %llu, name:%s, flags:%x", 50 50 show_dev_nid(__entry), 51 - __entry->name, 51 + __get_str(name), 52 52 __entry->flags) 53 53 ); 54 54
+7
include/uapi/linux/android/binder.h
··· 225 225 226 226 struct binder_frozen_status_info { 227 227 __u32 pid; 228 + 229 + /* process received sync transactions since last frozen 230 + * bit 0: received sync transaction after being frozen 231 + * bit 1: new pending sync transaction during freezing 232 + */ 228 233 __u32 sync_recv; 234 + 235 + /* process received async transactions since last frozen */ 229 236 __u32 async_recv; 230 237 }; 231 238
+1 -1
include/uapi/linux/hyperv.h
··· 26 26 #ifndef _UAPI_HYPERV_H 27 27 #define _UAPI_HYPERV_H 28 28 29 - #include <linux/uuid.h> 29 + #include <linux/types.h> 30 30 31 31 /* 32 32 * Framework version for util services.
+9 -6
include/uapi/linux/xfrm.h
··· 213 213 XFRM_MSG_GETSPDINFO, 214 214 #define XFRM_MSG_GETSPDINFO XFRM_MSG_GETSPDINFO 215 215 216 + XFRM_MSG_MAPPING, 217 + #define XFRM_MSG_MAPPING XFRM_MSG_MAPPING 218 + 216 219 XFRM_MSG_SETDEFAULT, 217 220 #define XFRM_MSG_SETDEFAULT XFRM_MSG_SETDEFAULT 218 221 XFRM_MSG_GETDEFAULT, 219 222 #define XFRM_MSG_GETDEFAULT XFRM_MSG_GETDEFAULT 220 - 221 - XFRM_MSG_MAPPING, 222 - #define XFRM_MSG_MAPPING XFRM_MSG_MAPPING 223 223 __XFRM_MSG_MAX 224 224 }; 225 225 #define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1) ··· 514 514 #define XFRM_OFFLOAD_INBOUND 2 515 515 516 516 struct xfrm_userpolicy_default { 517 - #define XFRM_USERPOLICY_DIRMASK_MAX (sizeof(__u8) * 8) 518 - __u8 dirmask; 519 - __u8 action; 517 + #define XFRM_USERPOLICY_UNSPEC 0 518 + #define XFRM_USERPOLICY_BLOCK 1 519 + #define XFRM_USERPOLICY_ACCEPT 2 520 + __u8 in; 521 + __u8 fwd; 522 + __u8 out; 520 523 }; 521 524 522 525 #ifndef __KERNEL__
+1
include/uapi/sound/asound.h
··· 784 784 785 785 #define SNDRV_RAWMIDI_IOCTL_PVERSION _IOR('W', 0x00, int) 786 786 #define SNDRV_RAWMIDI_IOCTL_INFO _IOR('W', 0x01, struct snd_rawmidi_info) 787 + #define SNDRV_RAWMIDI_IOCTL_USER_PVERSION _IOW('W', 0x02, int) 787 788 #define SNDRV_RAWMIDI_IOCTL_PARAMS _IOWR('W', 0x10, struct snd_rawmidi_params) 788 789 #define SNDRV_RAWMIDI_IOCTL_STATUS _IOWR('W', 0x20, struct snd_rawmidi_status) 789 790 #define SNDRV_RAWMIDI_IOCTL_DROP _IOW('W', 0x30, int)
-12
include/xen/xen-ops.h
··· 46 46 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, 47 47 unsigned int address_bits, 48 48 dma_addr_t *dma_handle); 49 - 50 49 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order); 51 - #else 52 - static inline int xen_create_contiguous_region(phys_addr_t pstart, 53 - unsigned int order, 54 - unsigned int address_bits, 55 - dma_addr_t *dma_handle) 56 - { 57 - return 0; 58 - } 59 - 60 - static inline void xen_destroy_contiguous_region(phys_addr_t pstart, 61 - unsigned int order) { } 62 50 #endif 63 51 64 52 #if defined(CONFIG_XEN_PV)
+16 -14
init/do_mounts.c
··· 338 338 __setup("rootfstype=", fs_names_setup); 339 339 __setup("rootdelay=", root_delay_setup); 340 340 341 - static int __init split_fs_names(char *page, char *names) 341 + /* This can return zero length strings. Caller should check */ 342 + static int __init split_fs_names(char *page, size_t size, char *names) 342 343 { 343 - int count = 0; 344 + int count = 1; 344 345 char *p = page; 345 346 346 - strcpy(p, root_fs_names); 347 + strlcpy(p, root_fs_names, size); 347 348 while (*p++) { 348 - if (p[-1] == ',') 349 + if (p[-1] == ',') { 349 350 p[-1] = '\0'; 351 + count++; 352 + } 350 353 } 351 - *p = '\0'; 352 - 353 - for (p = page; *p; p += strlen(p)+1) 354 - count++; 355 354 356 355 return count; 357 356 } ··· 403 404 scnprintf(b, BDEVNAME_SIZE, "unknown-block(%u,%u)", 404 405 MAJOR(ROOT_DEV), MINOR(ROOT_DEV)); 405 406 if (root_fs_names) 406 - num_fs = split_fs_names(fs_names, root_fs_names); 407 + num_fs = split_fs_names(fs_names, PAGE_SIZE, root_fs_names); 407 408 else 408 409 num_fs = list_bdev_fs_names(fs_names, PAGE_SIZE); 409 410 retry: 410 411 for (i = 0, p = fs_names; i < num_fs; i++, p += strlen(p)+1) { 411 - int err = do_mount_root(name, p, flags, root_mount_data); 412 + int err; 413 + 414 + if (!*p) 415 + continue; 416 + err = do_mount_root(name, p, flags, root_mount_data); 412 417 switch (err) { 413 418 case 0: 414 419 goto out; ··· 546 543 fs_names = (void *)__get_free_page(GFP_KERNEL); 547 544 if (!fs_names) 548 545 return -EINVAL; 549 - num_fs = split_fs_names(fs_names, root_fs_names); 546 + num_fs = split_fs_names(fs_names, PAGE_SIZE, root_fs_names); 550 547 551 548 for (i = 0, fstype = fs_names; i < num_fs; 552 549 i++, fstype += strlen(fstype) + 1) { 550 + if (!*fstype) 551 + continue; 553 552 if (!fs_is_nodev(fstype)) 554 553 continue; 555 554 err = do_mount_root(root_device_name, fstype, root_mountflags, 556 555 root_mount_data); 557 556 if (!err) 558 557 break; 559 - if (err != -EACCES && err != -EINVAL) 560 - panic("VFS: Unable to mount root \"%s\" (%s), err=%d\n", 561 - root_device_name, fstype, err); 562 558 } 563 559 564 560 free_page((unsigned long)fs_names);
+3 -3
init/main.c
··· 1242 1242 { 1243 1243 ktime_t *calltime = (ktime_t *)data; 1244 1244 1245 - printk(KERN_DEBUG "calling %pS @ %i irqs_disabled() %d\n", fn, task_pid_nr(current), irqs_disabled()); 1245 + printk(KERN_DEBUG "calling %pS @ %i\n", fn, task_pid_nr(current)); 1246 1246 *calltime = ktime_get(); 1247 1247 } 1248 1248 ··· 1256 1256 rettime = ktime_get(); 1257 1257 delta = ktime_sub(rettime, *calltime); 1258 1258 duration = (unsigned long long) ktime_to_ns(delta) >> 10; 1259 - printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs, irqs_disabled() %d\n", 1260 - fn, ret, duration, irqs_disabled()); 1259 + printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs\n", 1260 + fn, ret, duration); 1261 1261 } 1262 1262 1263 1263 static ktime_t initcall_calltime;
+5 -2
kernel/bpf/bpf_struct_ops.c
··· 368 368 const struct btf_type *mtype, *ptype; 369 369 struct bpf_prog *prog; 370 370 u32 moff; 371 + u32 flags; 371 372 372 373 moff = btf_member_bit_offset(t, member) / 8; 373 374 ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL); ··· 432 431 433 432 tprogs[BPF_TRAMP_FENTRY].progs[0] = prog; 434 433 tprogs[BPF_TRAMP_FENTRY].nr_progs = 1; 434 + flags = st_ops->func_models[i].ret_size > 0 ? 435 + BPF_TRAMP_F_RET_FENTRY_RET : 0; 435 436 err = arch_prepare_bpf_trampoline(NULL, image, 436 437 st_map->image + PAGE_SIZE, 437 - &st_ops->func_models[i], 0, 438 - tprogs, NULL); 438 + &st_ops->func_models[i], 439 + flags, tprogs, NULL); 439 440 if (err < 0) 440 441 goto reset_unlock; 441 442
+1 -1
kernel/bpf/core.c
··· 827 827 { 828 828 if (atomic_long_add_return(pages, &bpf_jit_current) > 829 829 (bpf_jit_limit >> PAGE_SHIFT)) { 830 - if (!capable(CAP_SYS_ADMIN)) { 830 + if (!bpf_capable()) { 831 831 atomic_long_sub(pages, &bpf_jit_current); 832 832 return -EPERM; 833 833 }
+2 -1
kernel/bpf/stackmap.c
··· 63 63 64 64 static int prealloc_elems_and_freelist(struct bpf_stack_map *smap) 65 65 { 66 - u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size; 66 + u64 elem_size = sizeof(struct stack_map_bucket) + 67 + (u64)smap->map.value_size; 67 68 int err; 68 69 69 70 smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
+12 -5
kernel/cgroup/cgroup.c
··· 6574 6574 6575 6575 void cgroup_sk_alloc(struct sock_cgroup_data *skcd) 6576 6576 { 6577 - /* Don't associate the sock with unrelated interrupted task's cgroup. */ 6578 - if (in_interrupt()) 6579 - return; 6577 + struct cgroup *cgroup; 6580 6578 6581 6579 rcu_read_lock(); 6580 + /* Don't associate the sock with unrelated interrupted task's cgroup. */ 6581 + if (in_interrupt()) { 6582 + cgroup = &cgrp_dfl_root.cgrp; 6583 + cgroup_get(cgroup); 6584 + goto out; 6585 + } 6586 + 6582 6587 while (true) { 6583 6588 struct css_set *cset; 6584 6589 6585 6590 cset = task_css_set(current); 6586 6591 if (likely(cgroup_tryget(cset->dfl_cgrp))) { 6587 - skcd->cgroup = cset->dfl_cgrp; 6588 - cgroup_bpf_get(cset->dfl_cgrp); 6592 + cgroup = cset->dfl_cgrp; 6589 6593 break; 6590 6594 } 6591 6595 cpu_relax(); 6592 6596 } 6597 + out: 6598 + skcd->cgroup = cgroup; 6599 + cgroup_bpf_get(cgroup); 6593 6600 rcu_read_unlock(); 6594 6601 } 6595 6602
+1 -3
kernel/entry/common.c
··· 171 171 if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) 172 172 handle_signal_work(regs, ti_work); 173 173 174 - if (ti_work & _TIF_NOTIFY_RESUME) { 174 + if (ti_work & _TIF_NOTIFY_RESUME) 175 175 tracehook_notify_resume(regs); 176 - rseq_handle_notify_resume(NULL, regs); 177 - } 178 176 179 177 /* Architecture specific TIF work */ 180 178 arch_exit_to_user_mode_work(regs, ti_work);
+30 -4
kernel/events/core.c
··· 3707 3707 return 0; 3708 3708 } 3709 3709 3710 + static inline bool event_update_userpage(struct perf_event *event) 3711 + { 3712 + if (likely(!atomic_read(&event->mmap_count))) 3713 + return false; 3714 + 3715 + perf_event_update_time(event); 3716 + perf_set_shadow_time(event, event->ctx); 3717 + perf_event_update_userpage(event); 3718 + 3719 + return true; 3720 + } 3721 + 3722 + static inline void group_update_userpage(struct perf_event *group_event) 3723 + { 3724 + struct perf_event *event; 3725 + 3726 + if (!event_update_userpage(group_event)) 3727 + return; 3728 + 3729 + for_each_sibling_event(event, group_event) 3730 + event_update_userpage(event); 3731 + } 3732 + 3710 3733 static int merge_sched_in(struct perf_event *event, void *data) 3711 3734 { 3712 3735 struct perf_event_context *ctx = event->ctx; ··· 3748 3725 } 3749 3726 3750 3727 if (event->state == PERF_EVENT_STATE_INACTIVE) { 3728 + *can_add_hw = 0; 3751 3729 if (event->attr.pinned) { 3752 3730 perf_cgroup_event_disable(event, ctx); 3753 3731 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); 3732 + } else { 3733 + ctx->rotate_necessary = 1; 3734 + perf_mux_hrtimer_restart(cpuctx); 3735 + group_update_userpage(event); 3754 3736 } 3755 - 3756 - *can_add_hw = 0; 3757 - ctx->rotate_necessary = 1; 3758 - perf_mux_hrtimer_restart(cpuctx); 3759 3737 } 3760 3738 3761 3739 return 0; ··· 6348 6324 6349 6325 ring_buffer_attach(event, rb); 6350 6326 6327 + perf_event_update_time(event); 6328 + perf_set_shadow_time(event, event->ctx); 6351 6329 perf_event_init_userpage(event); 6352 6330 perf_event_update_userpage(event); 6353 6331 } else {
+1 -1
kernel/irq/irqdomain.c
··· 136 136 * Allocates and initializes an irq_domain structure. 137 137 * Returns pointer to IRQ domain, or NULL on failure. 138 138 */ 139 - struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, 139 + struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size, 140 140 irq_hw_number_t hwirq_max, int direct_max, 141 141 const struct irq_domain_ops *ops, 142 142 void *host_data)
+11 -3
kernel/rseq.c
··· 282 282 283 283 if (unlikely(t->flags & PF_EXITING)) 284 284 return; 285 - ret = rseq_ip_fixup(regs); 286 - if (unlikely(ret < 0)) 287 - goto error; 285 + 286 + /* 287 + * regs is NULL if and only if the caller is in a syscall path. Skip 288 + * fixup and leave rseq_cs as is so that rseq_sycall() will detect and 289 + * kill a misbehaving userspace on debug kernels. 290 + */ 291 + if (regs) { 292 + ret = rseq_ip_fixup(regs); 293 + if (unlikely(ret < 0)) 294 + goto error; 295 + } 288 296 if (unlikely(rseq_update_cpu_id(t))) 289 297 goto error; 290 298 return;
+7 -1
kernel/sched/debug.c
··· 173 173 size_t cnt, loff_t *ppos) 174 174 { 175 175 char buf[16]; 176 + unsigned int scaling; 176 177 177 178 if (cnt > 15) 178 179 cnt = 15; 179 180 180 181 if (copy_from_user(&buf, ubuf, cnt)) 181 182 return -EFAULT; 183 + buf[cnt] = '\0'; 182 184 183 - if (kstrtouint(buf, 10, &sysctl_sched_tunable_scaling)) 185 + if (kstrtouint(buf, 10, &scaling)) 184 186 return -EINVAL; 185 187 188 + if (scaling >= SCHED_TUNABLESCALING_END) 189 + return -EINVAL; 190 + 191 + sysctl_sched_tunable_scaling = scaling; 186 192 if (sched_update_scaling()) 187 193 return -EINVAL; 188 194
+5 -1
kernel/sched/fair.c
··· 4936 4936 /* update hierarchical throttle state */ 4937 4937 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); 4938 4938 4939 - if (!cfs_rq->load.weight) 4939 + /* Nothing to run but something to decay (on_list)? Complete the branch */ 4940 + if (!cfs_rq->load.weight) { 4941 + if (cfs_rq->on_list) 4942 + goto unthrottle_throttle; 4940 4943 return; 4944 + } 4941 4945 4942 4946 task_delta = cfs_rq->h_nr_running; 4943 4947 idle_task_delta = cfs_rq->idle_h_nr_running;
+2 -1
kernel/time/posix-cpu-timers.c
··· 1404 1404 } 1405 1405 } 1406 1406 1407 - *newval += now; 1407 + if (*newval) 1408 + *newval += now; 1408 1409 } 1409 1410 1410 1411 /*
+8
kernel/trace/blktrace.c
··· 1605 1605 if (bt == NULL) 1606 1606 return -EINVAL; 1607 1607 1608 + if (bt->trace_state == Blktrace_running) { 1609 + bt->trace_state = Blktrace_stopped; 1610 + spin_lock_irq(&running_trace_lock); 1611 + list_del_init(&bt->running_list); 1612 + spin_unlock_irq(&running_trace_lock); 1613 + relay_flush(bt->rchan); 1614 + } 1615 + 1608 1616 put_probe_ref(); 1609 1617 synchronize_rcu(); 1610 1618 blk_trace_free(bt);
+1 -1
lib/Kconfig.debug
··· 346 346 int "Warn for stack frames larger than" 347 347 range 0 8192 348 348 default 2048 if GCC_PLUGIN_LATENT_ENTROPY 349 - default 1536 if (!64BIT && PARISC) 349 + default 1536 if (!64BIT && (PARISC || XTENSA)) 350 350 default 1024 if (!64BIT && !PARISC) 351 351 default 2048 if 64BIT 352 352 help
+2
lib/Kconfig.kasan
··· 66 66 config KASAN_GENERIC 67 67 bool "Generic mode" 68 68 depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC 69 + depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS 69 70 select SLUB_DEBUG if SLUB 70 71 select CONSTRUCTORS 71 72 help ··· 87 86 config KASAN_SW_TAGS 88 87 bool "Software tag-based mode" 89 88 depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS 89 + depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS 90 90 select SLUB_DEBUG if SLUB 91 91 select CONSTRUCTORS 92 92 help
+1 -1
lib/packing.c
··· 1 1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 - /* Copyright (c) 2016-2018, NXP Semiconductors 2 + /* Copyright 2016-2018 NXP 3 3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com> 4 4 */ 5 5 #include <linux/packing.h>
+6 -7
lib/zlib_inflate/inffast.c
··· 253 253 254 254 sfrom = (unsigned short *)(from); 255 255 loops = len >> 1; 256 - do 257 - #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 258 - *sout++ = *sfrom++; 259 - #else 260 - *sout++ = get_unaligned16(sfrom++); 261 - #endif 262 - while (--loops); 256 + do { 257 + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 258 + *sout++ = *sfrom++; 259 + else 260 + *sout++ = get_unaligned16(sfrom++); 261 + } while (--loops); 263 262 out = (unsigned char *)sout; 264 263 from = (unsigned char *)sfrom; 265 264 } else { /* dist == 1 or dist == 2 */
+8 -8
mm/damon/dbgfs-test.h
··· 20 20 ssize_t nr_integers = 0, i; 21 21 22 22 question = "123"; 23 - answers = str_to_target_ids(question, strnlen(question, 128), 23 + answers = str_to_target_ids(question, strlen(question), 24 24 &nr_integers); 25 25 KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers); 26 26 KUNIT_EXPECT_EQ(test, 123ul, answers[0]); 27 27 kfree(answers); 28 28 29 29 question = "123abc"; 30 - answers = str_to_target_ids(question, strnlen(question, 128), 30 + answers = str_to_target_ids(question, strlen(question), 31 31 &nr_integers); 32 32 KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers); 33 33 KUNIT_EXPECT_EQ(test, 123ul, answers[0]); 34 34 kfree(answers); 35 35 36 36 question = "a123"; 37 - answers = str_to_target_ids(question, strnlen(question, 128), 37 + answers = str_to_target_ids(question, strlen(question), 38 38 &nr_integers); 39 39 KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers); 40 40 kfree(answers); 41 41 42 42 question = "12 35"; 43 - answers = str_to_target_ids(question, strnlen(question, 128), 43 + answers = str_to_target_ids(question, strlen(question), 44 44 &nr_integers); 45 45 KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers); 46 46 for (i = 0; i < nr_integers; i++) ··· 48 48 kfree(answers); 49 49 50 50 question = "12 35 46"; 51 - answers = str_to_target_ids(question, strnlen(question, 128), 51 + answers = str_to_target_ids(question, strlen(question), 52 52 &nr_integers); 53 53 KUNIT_EXPECT_EQ(test, (ssize_t)3, nr_integers); 54 54 for (i = 0; i < nr_integers; i++) ··· 56 56 kfree(answers); 57 57 58 58 question = "12 35 abc 46"; 59 - answers = str_to_target_ids(question, strnlen(question, 128), 59 + answers = str_to_target_ids(question, strlen(question), 60 60 &nr_integers); 61 61 KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers); 62 62 for (i = 0; i < 2; i++) ··· 64 64 kfree(answers); 65 65 66 66 question = ""; 67 - answers = str_to_target_ids(question, strnlen(question, 128), 67 + answers = str_to_target_ids(question, strlen(question), 68 68 &nr_integers); 69 69 KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers); 70 70 kfree(answers); 71 71 72 72 question = "\n"; 73 - answers = str_to_target_ids(question, strnlen(question, 128), 73 + answers = str_to_target_ids(question, strlen(question), 74 74 &nr_integers); 75 75 KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers); 76 76 kfree(answers);
+3 -1
mm/debug.c
··· 24 24 "syscall_or_cpuset", 25 25 "mempolicy_mbind", 26 26 "numa_misplaced", 27 - "cma", 27 + "contig_range", 28 + "longterm_pin", 29 + "demotion", 28 30 }; 29 31 30 32 const struct trace_print_flags pageflag_names[] = {
-10
mm/memcontrol.c
··· 106 106 /* memcg and lruvec stats flushing */ 107 107 static void flush_memcg_stats_dwork(struct work_struct *w); 108 108 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork); 109 - static void flush_memcg_stats_work(struct work_struct *w); 110 - static DECLARE_WORK(stats_flush_work, flush_memcg_stats_work); 111 - static DEFINE_PER_CPU(unsigned int, stats_flush_threshold); 112 109 static DEFINE_SPINLOCK(stats_flush_lock); 113 110 114 111 #define THRESHOLDS_EVENTS_TARGET 128 ··· 679 682 680 683 /* Update lruvec */ 681 684 __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val); 682 - if (!(__this_cpu_inc_return(stats_flush_threshold) % MEMCG_CHARGE_BATCH)) 683 - queue_work(system_unbound_wq, &stats_flush_work); 684 685 } 685 686 686 687 /** ··· 5354 5359 { 5355 5360 mem_cgroup_flush_stats(); 5356 5361 queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ); 5357 - } 5358 - 5359 - static void flush_memcg_stats_work(struct work_struct *w) 5360 - { 5361 - mem_cgroup_flush_stats(); 5362 5362 } 5363 5363 5364 5364 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
+6 -6
mm/memory-failure.c
··· 306 306 struct vm_area_struct *vma) 307 307 { 308 308 unsigned long address = vma_address(page, vma); 309 + unsigned long ret = 0; 309 310 pgd_t *pgd; 310 311 p4d_t *p4d; 311 312 pud_t *pud; ··· 330 329 if (pmd_devmap(*pmd)) 331 330 return PMD_SHIFT; 332 331 pte = pte_offset_map(pmd, address); 333 - if (!pte_present(*pte)) 334 - return 0; 335 - if (pte_devmap(*pte)) 336 - return PAGE_SHIFT; 337 - return 0; 332 + if (pte_present(*pte) && pte_devmap(*pte)) 333 + ret = PAGE_SHIFT; 334 + pte_unmap(pte); 335 + return ret; 338 336 } 339 337 340 338 /* ··· 1126 1126 */ 1127 1127 static inline bool HWPoisonHandlable(struct page *page) 1128 1128 { 1129 - return PageLRU(page) || __PageMovable(page); 1129 + return PageLRU(page) || __PageMovable(page) || is_free_buddy_page(page); 1130 1130 } 1131 1131 1132 1132 static int __get_hwpoison_page(struct page *page)
+2 -2
mm/shmem.c
··· 490 490 case SHMEM_HUGE_ALWAYS: 491 491 return true; 492 492 case SHMEM_HUGE_WITHIN_SIZE: 493 - index = round_up(index, HPAGE_PMD_NR); 493 + index = round_up(index + 1, HPAGE_PMD_NR); 494 494 i_size = round_up(i_size_read(inode), PAGE_SIZE); 495 - if (i_size >= HPAGE_PMD_SIZE && (i_size >> PAGE_SHIFT) >= index) 495 + if (i_size >> PAGE_SHIFT >= index) 496 496 return true; 497 497 fallthrough; 498 498 case SHMEM_HUGE_ADVISE:
+16 -3
mm/swap.c
··· 620 620 pagevec_lru_move_fn(pvec, lru_lazyfree_fn); 621 621 622 622 activate_page_drain(cpu); 623 - invalidate_bh_lrus_cpu(cpu); 624 623 } 625 624 626 625 /** ··· 702 703 local_unlock(&lru_pvecs.lock); 703 704 } 704 705 706 + /* 707 + * It's called from per-cpu workqueue context in SMP case so 708 + * lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on 709 + * the same cpu. It shouldn't be a problem in !SMP case since 710 + * the core is only one and the locks will disable preemption. 711 + */ 712 + static void lru_add_and_bh_lrus_drain(void) 713 + { 714 + local_lock(&lru_pvecs.lock); 715 + lru_add_drain_cpu(smp_processor_id()); 716 + local_unlock(&lru_pvecs.lock); 717 + invalidate_bh_lrus_cpu(); 718 + } 719 + 705 720 void lru_add_drain_cpu_zone(struct zone *zone) 706 721 { 707 722 local_lock(&lru_pvecs.lock); ··· 730 717 731 718 static void lru_add_drain_per_cpu(struct work_struct *dummy) 732 719 { 733 - lru_add_drain(); 720 + lru_add_and_bh_lrus_drain(); 734 721 } 735 722 736 723 /* ··· 871 858 */ 872 859 __lru_add_drain_all(true); 873 860 #else 874 - lru_add_drain(); 861 + lru_add_and_bh_lrus_drain(); 875 862 #endif 876 863 } 877 864
+2 -2
mm/util.c
··· 787 787 size_t *lenp, loff_t *ppos) 788 788 { 789 789 struct ctl_table t; 790 - int new_policy; 790 + int new_policy = -1; 791 791 int ret; 792 792 793 793 /* ··· 805 805 t = *table; 806 806 t.data = &new_policy; 807 807 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 808 - if (ret) 808 + if (ret || new_policy == -1) 809 809 return ret; 810 810 811 811 mm_compute_batch(new_policy);
+1
mm/workingset.c
··· 352 352 353 353 inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file); 354 354 355 + mem_cgroup_flush_stats(); 355 356 /* 356 357 * Compare the distance to the existing workingset size. We 357 358 * don't activate pages that couldn't stay resident even if
+9 -5
net/bpf/test_run.c
··· 552 552 __skb->gso_segs = skb_shinfo(skb)->gso_segs; 553 553 } 554 554 555 + static struct proto bpf_dummy_proto = { 556 + .name = "bpf_dummy", 557 + .owner = THIS_MODULE, 558 + .obj_size = sizeof(struct sock), 559 + }; 560 + 555 561 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, 556 562 union bpf_attr __user *uattr) 557 563 { ··· 602 596 break; 603 597 } 604 598 605 - sk = kzalloc(sizeof(struct sock), GFP_USER); 599 + sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1); 606 600 if (!sk) { 607 601 kfree(data); 608 602 kfree(ctx); 609 603 return -ENOMEM; 610 604 } 611 - sock_net_set(sk, net); 612 605 sock_init_data(NULL, sk); 613 606 614 607 skb = build_skb(data, 0); 615 608 if (!skb) { 616 609 kfree(data); 617 610 kfree(ctx); 618 - kfree(sk); 611 + sk_free(sk); 619 612 return -ENOMEM; 620 613 } 621 614 skb->sk = sk; ··· 687 682 if (dev && dev != net->loopback_dev) 688 683 dev_put(dev); 689 684 kfree_skb(skb); 690 - bpf_sk_storage_free(sk); 691 - kfree(sk); 685 + sk_free(sk); 692 686 kfree(ctx); 693 687 return ret; 694 688 }
+2 -4
net/bridge/br_multicast.c
··· 1677 1677 int ifindex, 1678 1678 struct br_ip *saddr) 1679 1679 { 1680 - lockdep_assert_held_once(&brmctx->br->multicast_lock); 1681 - 1682 1680 write_seqcount_begin(&querier->seq); 1683 1681 querier->port_ifidx = ifindex; 1684 1682 memcpy(&querier->addr, saddr, sizeof(*saddr)); ··· 3865 3867 3866 3868 brmctx->ip4_other_query.delay_time = 0; 3867 3869 brmctx->ip4_querier.port_ifidx = 0; 3868 - seqcount_init(&brmctx->ip4_querier.seq); 3870 + seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock); 3869 3871 brmctx->multicast_igmp_version = 2; 3870 3872 #if IS_ENABLED(CONFIG_IPV6) 3871 3873 brmctx->multicast_mld_version = 1; 3872 3874 brmctx->ip6_other_query.delay_time = 0; 3873 3875 brmctx->ip6_querier.port_ifidx = 0; 3874 - seqcount_init(&brmctx->ip6_querier.seq); 3876 + seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock); 3875 3877 #endif 3876 3878 3877 3879 timer_setup(&brmctx->ip4_mc_router_timer,
+2 -1
net/bridge/br_netlink.c
··· 1666 1666 } 1667 1667 1668 1668 return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) + 1669 - nla_total_size(sizeof(struct br_mcast_stats)) + 1669 + nla_total_size_64bit(sizeof(struct br_mcast_stats)) + 1670 + (p ? nla_total_size_64bit(sizeof(p->stp_xstats)) : 0) + 1670 1671 nla_total_size(0); 1671 1672 } 1672 1673
+1 -1
net/bridge/br_private.h
··· 82 82 struct bridge_mcast_querier { 83 83 struct br_ip addr; 84 84 int port_ifidx; 85 - seqcount_t seq; 85 + seqcount_spinlock_t seq; 86 86 }; 87 87 88 88 /* IGMP/MLD statistics */
+10 -6
net/core/dev.c
··· 6923 6923 */ 6924 6924 void napi_enable(struct napi_struct *n) 6925 6925 { 6926 - BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); 6927 - smp_mb__before_atomic(); 6928 - clear_bit(NAPI_STATE_SCHED, &n->state); 6929 - clear_bit(NAPI_STATE_NPSVC, &n->state); 6930 - if (n->dev->threaded && n->thread) 6931 - set_bit(NAPI_STATE_THREADED, &n->state); 6926 + unsigned long val, new; 6927 + 6928 + do { 6929 + val = READ_ONCE(n->state); 6930 + BUG_ON(!test_bit(NAPI_STATE_SCHED, &val)); 6931 + 6932 + new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC); 6933 + if (n->dev->threaded && n->thread) 6934 + new |= NAPIF_STATE_THREADED; 6935 + } while (cmpxchg(&n->state, val, new) != val); 6932 6936 } 6933 6937 EXPORT_SYMBOL(napi_enable); 6934 6938
+6
net/core/dev_addr_lists.c
··· 50 50 if (addr_len > MAX_ADDR_LEN) 51 51 return -EINVAL; 52 52 53 + ha = list_first_entry(&list->list, struct netdev_hw_addr, list); 54 + if (ha && !memcmp(addr, ha->addr, addr_len) && 55 + (!addr_type || addr_type == ha->type)) 56 + goto found_it; 57 + 53 58 while (*ins_point) { 54 59 int diff; 55 60 ··· 69 64 } else if (diff > 0) { 70 65 ins_point = &parent->rb_right; 71 66 } else { 67 + found_it: 72 68 if (exclusive) 73 69 return -EEXIST; 74 70 if (global) {
+1 -1
net/core/rtnetlink.c
··· 5262 5262 static size_t if_nlmsg_stats_size(const struct net_device *dev, 5263 5263 u32 filter_mask) 5264 5264 { 5265 - size_t size = 0; 5265 + size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg)); 5266 5266 5267 5267 if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0)) 5268 5268 size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64));
+48 -35
net/core/sock.c
··· 1376 1376 } 1377 1377 EXPORT_SYMBOL(sock_setsockopt); 1378 1378 1379 + static const struct cred *sk_get_peer_cred(struct sock *sk) 1380 + { 1381 + const struct cred *cred; 1382 + 1383 + spin_lock(&sk->sk_peer_lock); 1384 + cred = get_cred(sk->sk_peer_cred); 1385 + spin_unlock(&sk->sk_peer_lock); 1386 + 1387 + return cred; 1388 + } 1379 1389 1380 1390 static void cred_to_ucred(struct pid *pid, const struct cred *cred, 1381 1391 struct ucred *ucred) ··· 1562 1552 struct ucred peercred; 1563 1553 if (len > sizeof(peercred)) 1564 1554 len = sizeof(peercred); 1555 + 1556 + spin_lock(&sk->sk_peer_lock); 1565 1557 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); 1558 + spin_unlock(&sk->sk_peer_lock); 1559 + 1566 1560 if (copy_to_user(optval, &peercred, len)) 1567 1561 return -EFAULT; 1568 1562 goto lenout; ··· 1574 1560 1575 1561 case SO_PEERGROUPS: 1576 1562 { 1563 + const struct cred *cred; 1577 1564 int ret, n; 1578 1565 1579 - if (!sk->sk_peer_cred) 1566 + cred = sk_get_peer_cred(sk); 1567 + if (!cred) 1580 1568 return -ENODATA; 1581 1569 1582 - n = sk->sk_peer_cred->group_info->ngroups; 1570 + n = cred->group_info->ngroups; 1583 1571 if (len < n * sizeof(gid_t)) { 1584 1572 len = n * sizeof(gid_t); 1573 + put_cred(cred); 1585 1574 return put_user(len, optlen) ? -EFAULT : -ERANGE; 1586 1575 } 1587 1576 len = n * sizeof(gid_t); 1588 1577 1589 - ret = groups_to_user((gid_t __user *)optval, 1590 - sk->sk_peer_cred->group_info); 1578 + ret = groups_to_user((gid_t __user *)optval, cred->group_info); 1579 + put_cred(cred); 1591 1580 if (ret) 1592 1581 return ret; 1593 1582 goto lenout; ··· 1952 1935 sk->sk_frag.page = NULL; 1953 1936 } 1954 1937 1955 - if (sk->sk_peer_cred) 1956 - put_cred(sk->sk_peer_cred); 1938 + /* We do not need to acquire sk->sk_peer_lock, we are the last user. */ 1939 + put_cred(sk->sk_peer_cred); 1957 1940 put_pid(sk->sk_peer_pid); 1941 + 1958 1942 if (likely(sk->sk_net_refcnt)) 1959 1943 put_net(sock_net(sk)); 1960 1944 sk_prot_free(sk->sk_prot_creator, sk); ··· 3163 3145 3164 3146 sk->sk_peer_pid = NULL; 3165 3147 sk->sk_peer_cred = NULL; 3148 + spin_lock_init(&sk->sk_peer_lock); 3149 + 3166 3150 sk->sk_write_pending = 0; 3167 3151 sk->sk_rcvlowat = 1; 3168 3152 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; ··· 3199 3179 3200 3180 void lock_sock_nested(struct sock *sk, int subclass) 3201 3181 { 3182 + /* The sk_lock has mutex_lock() semantics here. */ 3183 + mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); 3184 + 3202 3185 might_sleep(); 3203 3186 spin_lock_bh(&sk->sk_lock.slock); 3204 3187 if (sk->sk_lock.owned) 3205 3188 __lock_sock(sk); 3206 3189 sk->sk_lock.owned = 1; 3207 - spin_unlock(&sk->sk_lock.slock); 3208 - /* 3209 - * The sk_lock has mutex_lock() semantics here: 3210 - */ 3211 - mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); 3212 - local_bh_enable(); 3190 + spin_unlock_bh(&sk->sk_lock.slock); 3213 3191 } 3214 3192 EXPORT_SYMBOL(lock_sock_nested); 3215 3193 ··· 3230 3212 } 3231 3213 EXPORT_SYMBOL(release_sock); 3232 3214 3233 - /** 3234 - * lock_sock_fast - fast version of lock_sock 3235 - * @sk: socket 3236 - * 3237 - * This version should be used for very small section, where process wont block 3238 - * return false if fast path is taken: 3239 - * 3240 - * sk_lock.slock locked, owned = 0, BH disabled 3241 - * 3242 - * return true if slow path is taken: 3243 - * 3244 - * sk_lock.slock unlocked, owned = 1, BH enabled 3245 - */ 3246 - bool lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock) 3215 + bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock) 3247 3216 { 3248 3217 might_sleep(); 3249 3218 spin_lock_bh(&sk->sk_lock.slock); 3250 3219 3251 - if (!sk->sk_lock.owned) 3220 + if (!sk->sk_lock.owned) { 3252 3221 /* 3253 - * Note : We must disable BH 3222 + * Fast path return with bottom halves disabled and 3223 + * sock::sk_lock.slock held. 3224 + * 3225 + * The 'mutex' is not contended and holding 3226 + * sock::sk_lock.slock prevents all other lockers to 3227 + * proceed so the corresponding unlock_sock_fast() can 3228 + * avoid the slow path of release_sock() completely and 3229 + * just release slock. 3230 + * 3231 + * From a semantical POV this is equivalent to 'acquiring' 3232 + * the 'mutex', hence the corresponding lockdep 3233 + * mutex_release() has to happen in the fast path of 3234 + * unlock_sock_fast(). 3254 3235 */ 3255 3236 return false; 3237 + } 3256 3238 3257 3239 __lock_sock(sk); 3258 3240 sk->sk_lock.owned = 1; 3259 - spin_unlock(&sk->sk_lock.slock); 3260 - /* 3261 - * The sk_lock has mutex_lock() semantics here: 3262 - */ 3263 - mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); 3264 3241 __acquire(&sk->sk_lock.slock); 3265 - local_bh_enable(); 3242 + spin_unlock_bh(&sk->sk_lock.slock); 3266 3243 return true; 3267 3244 } 3268 - EXPORT_SYMBOL(lock_sock_fast); 3245 + EXPORT_SYMBOL(__lock_sock_fast); 3269 3246 3270 3247 int sock_gettstamp(struct socket *sock, void __user *userstamp, 3271 3248 bool timeval, bool time32)
+106 -8
net/dsa/dsa2.c
··· 429 429 { 430 430 struct devlink_port *dlp = &dp->devlink_port; 431 431 bool dsa_port_link_registered = false; 432 + struct dsa_switch *ds = dp->ds; 432 433 bool dsa_port_enabled = false; 433 434 int err = 0; 434 435 ··· 438 437 439 438 INIT_LIST_HEAD(&dp->fdbs); 440 439 INIT_LIST_HEAD(&dp->mdbs); 440 + 441 + if (ds->ops->port_setup) { 442 + err = ds->ops->port_setup(ds, dp->index); 443 + if (err) 444 + return err; 445 + } 441 446 442 447 switch (dp->type) { 443 448 case DSA_PORT_TYPE_UNUSED: ··· 487 480 dsa_port_disable(dp); 488 481 if (err && dsa_port_link_registered) 489 482 dsa_port_link_unregister_of(dp); 490 - if (err) 483 + if (err) { 484 + if (ds->ops->port_teardown) 485 + ds->ops->port_teardown(ds, dp->index); 491 486 return err; 487 + } 492 488 493 489 dp->setup = true; 494 490 ··· 543 533 static void dsa_port_teardown(struct dsa_port *dp) 544 534 { 545 535 struct devlink_port *dlp = &dp->devlink_port; 536 + struct dsa_switch *ds = dp->ds; 546 537 struct dsa_mac_addr *a, *tmp; 547 538 548 539 if (!dp->setup) 549 540 return; 541 + 542 + if (ds->ops->port_teardown) 543 + ds->ops->port_teardown(ds, dp->index); 550 544 551 545 devlink_port_type_clear(dlp); 552 546 ··· 593 579 if (dp->devlink_port_setup) 594 580 devlink_port_unregister(dlp); 595 581 dp->devlink_port_setup = false; 582 + } 583 + 584 + /* Destroy the current devlink port, and create a new one which has the UNUSED 585 + * flavour. At this point, any call to ds->ops->port_setup has been already 586 + * balanced out by a call to ds->ops->port_teardown, so we know that any 587 + * devlink port regions the driver had are now unregistered. We then call its 588 + * ds->ops->port_setup again, in order for the driver to re-create them on the 589 + * new devlink port. 590 + */ 591 + static int dsa_port_reinit_as_unused(struct dsa_port *dp) 592 + { 593 + struct dsa_switch *ds = dp->ds; 594 + int err; 595 + 596 + dsa_port_devlink_teardown(dp); 597 + dp->type = DSA_PORT_TYPE_UNUSED; 598 + err = dsa_port_devlink_setup(dp); 599 + if (err) 600 + return err; 601 + 602 + if (ds->ops->port_setup) { 603 + /* On error, leave the devlink port registered, 604 + * dsa_switch_teardown will clean it up later. 605 + */ 606 + err = ds->ops->port_setup(ds, dp->index); 607 + if (err) 608 + return err; 609 + } 610 + 611 + return 0; 596 612 } 597 613 598 614 static int dsa_devlink_info_get(struct devlink *dl, ··· 880 836 devlink_params_publish(ds->devlink); 881 837 882 838 if (!ds->slave_mii_bus && ds->ops->phy_read) { 883 - ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev); 839 + ds->slave_mii_bus = mdiobus_alloc(); 884 840 if (!ds->slave_mii_bus) { 885 841 err = -ENOMEM; 886 842 goto teardown; ··· 890 846 891 847 err = mdiobus_register(ds->slave_mii_bus); 892 848 if (err < 0) 893 - goto teardown; 849 + goto free_slave_mii_bus; 894 850 } 895 851 896 852 ds->setup = true; 897 853 898 854 return 0; 899 855 856 + free_slave_mii_bus: 857 + if (ds->slave_mii_bus && ds->ops->phy_read) 858 + mdiobus_free(ds->slave_mii_bus); 900 859 teardown: 901 860 if (ds->ops->teardown) 902 861 ds->ops->teardown(ds); ··· 924 877 if (!ds->setup) 925 878 return; 926 879 927 - if (ds->slave_mii_bus && ds->ops->phy_read) 880 + if (ds->slave_mii_bus && ds->ops->phy_read) { 928 881 mdiobus_unregister(ds->slave_mii_bus); 882 + mdiobus_free(ds->slave_mii_bus); 883 + ds->slave_mii_bus = NULL; 884 + } 929 885 930 886 dsa_switch_unregister_notifier(ds); 931 887 ··· 988 938 list_for_each_entry(dp, &dst->ports, list) { 989 939 err = dsa_port_setup(dp); 990 940 if (err) { 991 - dsa_port_devlink_teardown(dp); 992 - dp->type = DSA_PORT_TYPE_UNUSED; 993 - err = dsa_port_devlink_setup(dp); 941 + err = dsa_port_reinit_as_unused(dp); 994 942 if (err) 995 943 goto teardown; 996 - continue; 997 944 } 998 945 } 999 946 ··· 1095 1048 teardown_master: 1096 1049 dsa_tree_teardown_master(dst); 1097 1050 teardown_switches: 1051 + dsa_tree_teardown_ports(dst); 1098 1052 dsa_tree_teardown_switches(dst); 1099 1053 teardown_cpu_ports: 1100 1054 dsa_tree_teardown_cpu_ports(dst); ··· 1610 1562 mutex_unlock(&dsa2_mutex); 1611 1563 } 1612 1564 EXPORT_SYMBOL_GPL(dsa_unregister_switch); 1565 + 1566 + /* If the DSA master chooses to unregister its net_device on .shutdown, DSA is 1567 + * blocking that operation from completion, due to the dev_hold taken inside 1568 + * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of 1569 + * the DSA master, so that the system can reboot successfully. 1570 + */ 1571 + void dsa_switch_shutdown(struct dsa_switch *ds) 1572 + { 1573 + struct net_device *master, *slave_dev; 1574 + LIST_HEAD(unregister_list); 1575 + struct dsa_port *dp; 1576 + 1577 + mutex_lock(&dsa2_mutex); 1578 + rtnl_lock(); 1579 + 1580 + list_for_each_entry(dp, &ds->dst->ports, list) { 1581 + if (dp->ds != ds) 1582 + continue; 1583 + 1584 + if (!dsa_port_is_user(dp)) 1585 + continue; 1586 + 1587 + master = dp->cpu_dp->master; 1588 + slave_dev = dp->slave; 1589 + 1590 + netdev_upper_dev_unlink(master, slave_dev); 1591 + /* Just unlinking ourselves as uppers of the master is not 1592 + * sufficient. When the master net device unregisters, that will 1593 + * also call dev_close, which we will catch as NETDEV_GOING_DOWN 1594 + * and trigger a dev_close on our own devices (dsa_slave_close). 1595 + * In turn, that will call dev_mc_unsync on the master's net 1596 + * device. If the master is also a DSA switch port, this will 1597 + * trigger dsa_slave_set_rx_mode which will call dev_mc_sync on 1598 + * its own master. Lockdep will complain about the fact that 1599 + * all cascaded masters have the same dsa_master_addr_list_lock_key, 1600 + * which it normally would not do if the cascaded masters would 1601 + * be in a proper upper/lower relationship, which we've just 1602 + * destroyed. 1603 + * To suppress the lockdep warnings, let's actually unregister 1604 + * the DSA slave interfaces too, to avoid the nonsensical 1605 + * multicast address list synchronization on shutdown. 1606 + */ 1607 + unregister_netdevice_queue(slave_dev, &unregister_list); 1608 + } 1609 + unregister_netdevice_many(&unregister_list); 1610 + 1611 + rtnl_unlock(); 1612 + mutex_unlock(&dsa2_mutex); 1613 + } 1614 + EXPORT_SYMBOL_GPL(dsa_switch_shutdown);
+1 -1
net/dsa/tag_dsa.c
··· 210 210 cmd = dsa_header[0] >> 6; 211 211 switch (cmd) { 212 212 case DSA_CMD_FORWARD: 213 - trunk = !!(dsa_header[1] & 7); 213 + trunk = !!(dsa_header[1] & 4); 214 214 break; 215 215 216 216 case DSA_CMD_TO_CPU:
+1 -1
net/dsa/tag_ocelot.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - /* Copyright 2019 NXP Semiconductors 2 + /* Copyright 2019 NXP 3 3 */ 4 4 #include <linux/dsa/ocelot.h> 5 5 #include <soc/mscc/ocelot.h>
+1 -1
net/dsa/tag_ocelot_8021q.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - /* Copyright 2020-2021 NXP Semiconductors 2 + /* Copyright 2020-2021 NXP 3 3 * 4 4 * An implementation of the software-defined tag_8021q.c tagger format, which 5 5 * also preserves full functionality under a vlan_filtering bridge. It does
+9 -7
net/ipv4/fib_semantics.c
··· 1661 1661 1662 1662 #if IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) || IS_ENABLED(CONFIG_IPV6) 1663 1663 int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc, 1664 - int nh_weight, u8 rt_family) 1664 + int nh_weight, u8 rt_family, u32 nh_tclassid) 1665 1665 { 1666 1666 const struct net_device *dev = nhc->nhc_dev; 1667 1667 struct rtnexthop *rtnh; ··· 1678 1678 goto nla_put_failure; 1679 1679 1680 1680 rtnh->rtnh_flags = flags; 1681 + 1682 + if (nh_tclassid && nla_put_u32(skb, RTA_FLOW, nh_tclassid)) 1683 + goto nla_put_failure; 1681 1684 1682 1685 /* length of rtnetlink header + attributes */ 1683 1686 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh; ··· 1709 1706 } 1710 1707 1711 1708 for_nexthops(fi) { 1712 - if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight, 1713 - AF_INET) < 0) 1714 - goto nla_put_failure; 1709 + u32 nh_tclassid = 0; 1715 1710 #ifdef CONFIG_IP_ROUTE_CLASSID 1716 - if (nh->nh_tclassid && 1717 - nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid)) 1718 - goto nla_put_failure; 1711 + nh_tclassid = nh->nh_tclassid; 1719 1712 #endif 1713 + if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight, 1714 + AF_INET, nh_tclassid) < 0) 1715 + goto nla_put_failure; 1720 1716 } endfor_nexthops(fi); 1721 1717 1722 1718 mp_end:
+3 -1
net/ipv4/inet_hashtables.c
··· 242 242 243 243 if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif)) 244 244 return -1; 245 + score = sk->sk_bound_dev_if ? 2 : 1; 245 246 246 - score = sk->sk_family == PF_INET ? 2 : 1; 247 + if (sk->sk_family == PF_INET) 248 + score++; 247 249 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) 248 250 score++; 249 251 }
+1 -1
net/ipv4/netfilter/iptable_raw.c
··· 42 42 43 43 static struct nf_hook_ops *rawtable_ops __read_mostly; 44 44 45 - static int __net_init iptable_raw_table_init(struct net *net) 45 + static int iptable_raw_table_init(struct net *net) 46 46 { 47 47 struct ipt_replace *repl; 48 48 const struct xt_table *table = &packet_raw;
+9 -21
net/ipv4/netfilter/nf_defrag_ipv4.c
··· 20 20 #endif 21 21 #include <net/netfilter/nf_conntrack_zones.h> 22 22 23 - static unsigned int defrag4_pernet_id __read_mostly; 24 23 static DEFINE_MUTEX(defrag4_mutex); 25 - 26 - struct defrag4_pernet { 27 - unsigned int users; 28 - }; 29 24 30 25 static int nf_ct_ipv4_gather_frags(struct net *net, struct sk_buff *skb, 31 26 u_int32_t user) ··· 106 111 107 112 static void __net_exit defrag4_net_exit(struct net *net) 108 113 { 109 - struct defrag4_pernet *nf_defrag = net_generic(net, defrag4_pernet_id); 110 - 111 - if (nf_defrag->users) { 114 + if (net->nf.defrag_ipv4_users) { 112 115 nf_unregister_net_hooks(net, ipv4_defrag_ops, 113 116 ARRAY_SIZE(ipv4_defrag_ops)); 114 - nf_defrag->users = 0; 117 + net->nf.defrag_ipv4_users = 0; 115 118 } 116 119 } 117 120 118 121 static struct pernet_operations defrag4_net_ops = { 119 122 .exit = defrag4_net_exit, 120 - .id = &defrag4_pernet_id, 121 - .size = sizeof(struct defrag4_pernet), 122 123 }; 123 124 124 125 static int __init nf_defrag_init(void) ··· 129 138 130 139 int nf_defrag_ipv4_enable(struct net *net) 131 140 { 132 - struct defrag4_pernet *nf_defrag = net_generic(net, defrag4_pernet_id); 133 141 int err = 0; 134 142 135 143 mutex_lock(&defrag4_mutex); 136 - if (nf_defrag->users == UINT_MAX) { 144 + if (net->nf.defrag_ipv4_users == UINT_MAX) { 137 145 err = -EOVERFLOW; 138 146 goto out_unlock; 139 147 } 140 148 141 - if (nf_defrag->users) { 142 - nf_defrag->users++; 149 + if (net->nf.defrag_ipv4_users) { 150 + net->nf.defrag_ipv4_users++; 143 151 goto out_unlock; 144 152 } 145 153 146 154 err = nf_register_net_hooks(net, ipv4_defrag_ops, 147 155 ARRAY_SIZE(ipv4_defrag_ops)); 148 156 if (err == 0) 149 - nf_defrag->users = 1; 157 + net->nf.defrag_ipv4_users = 1; 150 158 151 159 out_unlock: 152 160 mutex_unlock(&defrag4_mutex); ··· 155 165 156 166 void nf_defrag_ipv4_disable(struct net *net) 157 167 { 158 - struct defrag4_pernet *nf_defrag = net_generic(net, defrag4_pernet_id); 159 - 160 168 mutex_lock(&defrag4_mutex); 161 - if (nf_defrag->users) { 162 - nf_defrag->users--; 163 - if (nf_defrag->users == 0) 169 + if (net->nf.defrag_ipv4_users) { 170 + net->nf.defrag_ipv4_users--; 171 + if (net->nf.defrag_ipv4_users == 0) 164 172 nf_unregister_net_hooks(net, ipv4_defrag_ops, 165 173 ARRAY_SIZE(ipv4_defrag_ops)); 166 174 }
+16 -5
net/ipv4/nexthop.c
··· 1982 1982 rcu_assign_pointer(old->nh_grp, newg); 1983 1983 1984 1984 if (newg->resilient) { 1985 + /* Make sure concurrent readers are not using 'oldg' anymore. */ 1986 + synchronize_net(); 1985 1987 rcu_assign_pointer(oldg->res_table, tmp_table); 1986 1988 rcu_assign_pointer(oldg->spare->res_table, tmp_table); 1987 1989 } ··· 3567 3565 }; 3568 3566 3569 3567 static int nexthops_dump(struct net *net, struct notifier_block *nb, 3568 + enum nexthop_event_type event_type, 3570 3569 struct netlink_ext_ack *extack) 3571 3570 { 3572 3571 struct rb_root *root = &net->nexthop.rb_root; ··· 3578 3575 struct nexthop *nh; 3579 3576 3580 3577 nh = rb_entry(node, struct nexthop, rb_node); 3581 - err = call_nexthop_notifier(nb, net, NEXTHOP_EVENT_REPLACE, nh, 3582 - extack); 3578 + err = call_nexthop_notifier(nb, net, event_type, nh, extack); 3583 3579 if (err) 3584 3580 break; 3585 3581 } ··· 3592 3590 int err; 3593 3591 3594 3592 rtnl_lock(); 3595 - err = nexthops_dump(net, nb, extack); 3593 + err = nexthops_dump(net, nb, NEXTHOP_EVENT_REPLACE, extack); 3596 3594 if (err) 3597 3595 goto unlock; 3598 3596 err = blocking_notifier_chain_register(&net->nexthop.notifier_chain, ··· 3605 3603 3606 3604 int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb) 3607 3605 { 3608 - return blocking_notifier_chain_unregister(&net->nexthop.notifier_chain, 3609 - nb); 3606 + int err; 3607 + 3608 + rtnl_lock(); 3609 + err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain, 3610 + nb); 3611 + if (err) 3612 + goto unlock; 3613 + nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL); 3614 + unlock: 3615 + rtnl_unlock(); 3616 + return err; 3610 3617 } 3611 3618 EXPORT_SYMBOL(unregister_nexthop_notifier); 3612 3619
+7 -6
net/ipv4/udp.c
··· 390 390 dif, sdif); 391 391 if (!dev_match) 392 392 return -1; 393 - score += 4; 393 + if (sk->sk_bound_dev_if) 394 + score += 4; 394 395 395 396 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) 396 397 score++; ··· 1054 1053 __be16 dport; 1055 1054 u8 tos; 1056 1055 int err, is_udplite = IS_UDPLITE(sk); 1057 - int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; 1056 + int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE; 1058 1057 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 1059 1058 struct sk_buff *skb; 1060 1059 struct ip_options_data opt_copy; ··· 1362 1361 } 1363 1362 1364 1363 up->len += size; 1365 - if (!(up->corkflag || (flags&MSG_MORE))) 1364 + if (!(READ_ONCE(up->corkflag) || (flags&MSG_MORE))) 1366 1365 ret = udp_push_pending_frames(sk); 1367 1366 if (!ret) 1368 1367 ret = size; ··· 2663 2662 switch (optname) { 2664 2663 case UDP_CORK: 2665 2664 if (val != 0) { 2666 - up->corkflag = 1; 2665 + WRITE_ONCE(up->corkflag, 1); 2667 2666 } else { 2668 - up->corkflag = 0; 2667 + WRITE_ONCE(up->corkflag, 0); 2669 2668 lock_sock(sk); 2670 2669 push_pending_frames(sk); 2671 2670 release_sock(sk); ··· 2788 2787 2789 2788 switch (optname) { 2790 2789 case UDP_CORK: 2791 - val = up->corkflag; 2790 + val = READ_ONCE(up->corkflag); 2792 2791 break; 2793 2792 2794 2793 case UDP_ENCAP:
+1 -1
net/ipv6/inet6_hashtables.c
··· 106 106 if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif)) 107 107 return -1; 108 108 109 - score = 1; 109 + score = sk->sk_bound_dev_if ? 2 : 1; 110 110 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) 111 111 score++; 112 112 }
+1
net/ipv6/netfilter/ip6_tables.c
··· 273 273 * things we don't know, ie. tcp syn flag or ports). If the 274 274 * rule is also a fragment-specific rule, non-fragments won't 275 275 * match it. */ 276 + acpar.fragoff = 0; 276 277 acpar.hotdrop = false; 277 278 acpar.state = state; 278 279
+1 -1
net/ipv6/netfilter/nf_conntrack_reasm.c
··· 33 33 34 34 static const char nf_frags_cache_name[] = "nf-frags"; 35 35 36 - unsigned int nf_frag_pernet_id __read_mostly; 36 + static unsigned int nf_frag_pernet_id __read_mostly; 37 37 static struct inet_frags nf_frags; 38 38 39 39 static struct nft_ct_frag6_pernet *nf_frag_pernet(struct net *net)
+9 -16
net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
··· 25 25 #include <net/netfilter/nf_conntrack_zones.h> 26 26 #include <net/netfilter/ipv6/nf_defrag_ipv6.h> 27 27 28 - extern unsigned int nf_frag_pernet_id; 29 - 30 28 static DEFINE_MUTEX(defrag6_mutex); 31 29 32 30 static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum, ··· 89 91 90 92 static void __net_exit defrag6_net_exit(struct net *net) 91 93 { 92 - struct nft_ct_frag6_pernet *nf_frag = net_generic(net, nf_frag_pernet_id); 93 - 94 - if (nf_frag->users) { 94 + if (net->nf.defrag_ipv6_users) { 95 95 nf_unregister_net_hooks(net, ipv6_defrag_ops, 96 96 ARRAY_SIZE(ipv6_defrag_ops)); 97 - nf_frag->users = 0; 97 + net->nf.defrag_ipv6_users = 0; 98 98 } 99 99 } 100 100 ··· 130 134 131 135 int nf_defrag_ipv6_enable(struct net *net) 132 136 { 133 - struct nft_ct_frag6_pernet *nf_frag = net_generic(net, nf_frag_pernet_id); 134 137 int err = 0; 135 138 136 139 mutex_lock(&defrag6_mutex); 137 - if (nf_frag->users == UINT_MAX) { 140 + if (net->nf.defrag_ipv6_users == UINT_MAX) { 138 141 err = -EOVERFLOW; 139 142 goto out_unlock; 140 143 } 141 144 142 - if (nf_frag->users) { 143 - nf_frag->users++; 145 + if (net->nf.defrag_ipv6_users) { 146 + net->nf.defrag_ipv6_users++; 144 147 goto out_unlock; 145 148 } 146 149 147 150 err = nf_register_net_hooks(net, ipv6_defrag_ops, 148 151 ARRAY_SIZE(ipv6_defrag_ops)); 149 152 if (err == 0) 150 - nf_frag->users = 1; 153 + net->nf.defrag_ipv6_users = 1; 151 154 152 155 out_unlock: 153 156 mutex_unlock(&defrag6_mutex); ··· 156 161 157 162 void nf_defrag_ipv6_disable(struct net *net) 158 163 { 159 - struct nft_ct_frag6_pernet *nf_frag = net_generic(net, nf_frag_pernet_id); 160 - 161 164 mutex_lock(&defrag6_mutex); 162 - if (nf_frag->users) { 163 - nf_frag->users--; 164 - if (nf_frag->users == 0) 165 + if (net->nf.defrag_ipv6_users) { 166 + net->nf.defrag_ipv6_users--; 167 + if (net->nf.defrag_ipv6_users == 0) 165 168 nf_unregister_net_hooks(net, ipv6_defrag_ops, 166 169 ARRAY_SIZE(ipv6_defrag_ops)); 167 170 }
+3 -2
net/ipv6/route.c
··· 5681 5681 goto nla_put_failure; 5682 5682 5683 5683 if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common, 5684 - rt->fib6_nh->fib_nh_weight, AF_INET6) < 0) 5684 + rt->fib6_nh->fib_nh_weight, AF_INET6, 5685 + 0) < 0) 5685 5686 goto nla_put_failure; 5686 5687 5687 5688 list_for_each_entry_safe(sibling, next_sibling, 5688 5689 &rt->fib6_siblings, fib6_siblings) { 5689 5690 if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common, 5690 5691 sibling->fib6_nh->fib_nh_weight, 5691 - AF_INET6) < 0) 5692 + AF_INET6, 0) < 0) 5692 5693 goto nla_put_failure; 5693 5694 } 5694 5695
+3 -2
net/ipv6/udp.c
··· 133 133 dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif); 134 134 if (!dev_match) 135 135 return -1; 136 - score++; 136 + if (sk->sk_bound_dev_if) 137 + score++; 137 138 138 139 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) 139 140 score++; ··· 1304 1303 int addr_len = msg->msg_namelen; 1305 1304 bool connected = false; 1306 1305 int ulen = len; 1307 - int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; 1306 + int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE; 1308 1307 int err; 1309 1308 int is_udplite = IS_UDPLITE(sk); 1310 1309 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
+4 -1
net/mac80211/mesh_pathtbl.c
··· 60 60 atomic_set(&newtbl->entries, 0); 61 61 spin_lock_init(&newtbl->gates_lock); 62 62 spin_lock_init(&newtbl->walk_lock); 63 - rhashtable_init(&newtbl->rhead, &mesh_rht_params); 63 + if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) { 64 + kfree(newtbl); 65 + return NULL; 66 + } 64 67 65 68 return newtbl; 66 69 }
+2 -1
net/mac80211/mesh_ps.c
··· 2 2 /* 3 3 * Copyright 2012-2013, Marco Porsch <marco.porsch@s2005.tu-chemnitz.de> 4 4 * Copyright 2012-2013, cozybit Inc. 5 + * Copyright (C) 2021 Intel Corporation 5 6 */ 6 7 7 8 #include "mesh.h" ··· 589 588 590 589 /* only transmit to PS STA with announced, non-zero awake window */ 591 590 if (test_sta_flag(sta, WLAN_STA_PS_STA) && 592 - (!elems->awake_window || !le16_to_cpu(*elems->awake_window))) 591 + (!elems->awake_window || !get_unaligned_le16(elems->awake_window))) 593 592 return; 594 593 595 594 if (!test_sta_flag(sta, WLAN_STA_MPSP_OWNER))
-4
net/mac80211/rate.c
··· 392 392 int mcast_rate; 393 393 bool use_basicrate = false; 394 394 395 - if (ieee80211_is_tx_data(txrc->skb) && 396 - info->flags & IEEE80211_TX_CTL_NO_ACK) 397 - return false; 398 - 399 395 if (!pubsta || rc_no_data_or_no_ack_use_min(txrc)) { 400 396 __rate_control_send_low(txrc->hw, sband, pubsta, info, 401 397 txrc->rate_idx_mask);
+2 -1
net/mac80211/rx.c
··· 4131 4131 if (!bssid) 4132 4132 return false; 4133 4133 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) || 4134 - ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2)) 4134 + ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) || 4135 + !is_valid_ether_addr(hdr->addr2)) 4135 4136 return false; 4136 4137 if (ieee80211_is_beacon(hdr->frame_control)) 4137 4138 return true;
+12
net/mac80211/tx.c
··· 2209 2209 } 2210 2210 2211 2211 vht_mcs = iterator.this_arg[4] >> 4; 2212 + if (vht_mcs > 11) 2213 + vht_mcs = 0; 2212 2214 vht_nss = iterator.this_arg[4] & 0xF; 2215 + if (!vht_nss || vht_nss > 8) 2216 + vht_nss = 1; 2213 2217 break; 2214 2218 2215 2219 /* ··· 3383 3379 3384 3380 if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head)) 3385 3381 goto out; 3382 + 3383 + /* If n == 2, the "while (*frag_tail)" loop above didn't execute 3384 + * and frag_tail should be &skb_shinfo(head)->frag_list. 3385 + * However, ieee80211_amsdu_prepare_head() can reallocate it. 3386 + * Reload frag_tail to have it pointing to the correct place. 3387 + */ 3388 + if (n == 2) 3389 + frag_tail = &skb_shinfo(head)->frag_list; 3386 3390 3387 3391 /* 3388 3392 * Pad out the previous subframe to a multiple of 4 by adding the
+6
net/mac80211/wpa.c
··· 520 520 return RX_DROP_UNUSABLE; 521 521 } 522 522 523 + /* reload hdr - skb might have been reallocated */ 524 + hdr = (void *)rx->skb->data; 525 + 523 526 data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len; 524 527 if (!rx->sta || data_len < 0) 525 528 return RX_DROP_UNUSABLE; ··· 751 748 if (skb_linearize(rx->skb)) 752 749 return RX_DROP_UNUSABLE; 753 750 } 751 + 752 + /* reload hdr - skb might have been reallocated */ 753 + hdr = (void *)rx->skb->data; 754 754 755 755 data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - mic_len; 756 756 if (!rx->sta || data_len < 0)
+1 -1
net/mptcp/mptcp_diag.c
··· 36 36 struct sock *sk; 37 37 38 38 net = sock_net(in_skb->sk); 39 - msk = mptcp_token_get_sock(req->id.idiag_cookie[0]); 39 + msk = mptcp_token_get_sock(net, req->id.idiag_cookie[0]); 40 40 if (!msk) 41 41 goto out_nosk; 42 42
+1 -3
net/mptcp/pm_netlink.c
··· 1718 1718 1719 1719 list_for_each_entry(entry, &pernet->local_addr_list, list) { 1720 1720 if (addresses_equal(&entry->addr, &addr.addr, true)) { 1721 - ret = mptcp_nl_addr_backup(net, &entry->addr, bkup); 1722 - if (ret) 1723 - return ret; 1721 + mptcp_nl_addr_backup(net, &entry->addr, bkup); 1724 1722 1725 1723 if (bkup) 1726 1724 entry->flags |= MPTCP_PM_ADDR_FLAG_BACKUP;
+3 -3
net/mptcp/protocol.c
··· 1316 1316 goto alloc_skb; 1317 1317 } 1318 1318 1319 - must_collapse = (info->size_goal - skb->len > 0) && 1319 + must_collapse = (info->size_goal > skb->len) && 1320 1320 (skb_shinfo(skb)->nr_frags < sysctl_max_skb_frags); 1321 1321 if (must_collapse) { 1322 1322 size_bias = skb->len; ··· 1325 1325 } 1326 1326 1327 1327 alloc_skb: 1328 - if (!must_collapse && !ssk->sk_tx_skb_cache && 1328 + if (!must_collapse && 1329 1329 !mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held)) 1330 1330 return 0; 1331 1331 ··· 2735 2735 inet_csk(sk)->icsk_mtup.probe_timestamp = tcp_jiffies32; 2736 2736 mptcp_for_each_subflow(mptcp_sk(sk), subflow) { 2737 2737 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); 2738 - bool slow = lock_sock_fast(ssk); 2738 + bool slow = lock_sock_fast_nested(ssk); 2739 2739 2740 2740 sock_orphan(ssk); 2741 2741 unlock_sock_fast(ssk, slow);
+1 -1
net/mptcp/protocol.h
··· 709 709 void mptcp_token_accept(struct mptcp_subflow_request_sock *r, 710 710 struct mptcp_sock *msk); 711 711 bool mptcp_token_exists(u32 token); 712 - struct mptcp_sock *mptcp_token_get_sock(u32 token); 712 + struct mptcp_sock *mptcp_token_get_sock(struct net *net, u32 token); 713 713 struct mptcp_sock *mptcp_token_iter_next(const struct net *net, long *s_slot, 714 714 long *s_num); 715 715 void mptcp_token_destroy(struct mptcp_sock *msk);
+1 -1
net/mptcp/subflow.c
··· 86 86 struct mptcp_sock *msk; 87 87 int local_id; 88 88 89 - msk = mptcp_token_get_sock(subflow_req->token); 89 + msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token); 90 90 if (!msk) { 91 91 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN); 92 92 return NULL;
+1 -12
net/mptcp/syncookies.c
··· 108 108 109 109 e->valid = 0; 110 110 111 - msk = mptcp_token_get_sock(e->token); 111 + msk = mptcp_token_get_sock(net, e->token); 112 112 if (!msk) { 113 113 spin_unlock_bh(&join_entry_locks[i]); 114 114 return false; 115 115 } 116 - 117 - /* If this fails, the token got re-used in the mean time by another 118 - * mptcp socket in a different netns, i.e. entry is outdated. 119 - */ 120 - if (!net_eq(sock_net((struct sock *)msk), net)) 121 - goto err_put; 122 116 123 117 subflow_req->remote_nonce = e->remote_nonce; 124 118 subflow_req->local_nonce = e->local_nonce; ··· 122 128 subflow_req->msk = msk; 123 129 spin_unlock_bh(&join_entry_locks[i]); 124 130 return true; 125 - 126 - err_put: 127 - spin_unlock_bh(&join_entry_locks[i]); 128 - sock_put((struct sock *)msk); 129 - return false; 130 131 } 131 132 132 133 void __init mptcp_join_cookie_init(void)
+8 -3
net/mptcp/token.c
··· 231 231 232 232 /** 233 233 * mptcp_token_get_sock - retrieve mptcp connection sock using its token 234 + * @net: restrict to this namespace 234 235 * @token: token of the mptcp connection to retrieve 235 236 * 236 237 * This function returns the mptcp connection structure with the given token. ··· 239 238 * 240 239 * returns NULL if no connection with the given token value exists. 241 240 */ 242 - struct mptcp_sock *mptcp_token_get_sock(u32 token) 241 + struct mptcp_sock *mptcp_token_get_sock(struct net *net, u32 token) 243 242 { 244 243 struct hlist_nulls_node *pos; 245 244 struct token_bucket *bucket; ··· 252 251 again: 253 252 sk_nulls_for_each_rcu(sk, pos, &bucket->msk_chain) { 254 253 msk = mptcp_sk(sk); 255 - if (READ_ONCE(msk->token) != token) 254 + if (READ_ONCE(msk->token) != token || 255 + !net_eq(sock_net(sk), net)) 256 256 continue; 257 + 257 258 if (!refcount_inc_not_zero(&sk->sk_refcnt)) 258 259 goto not_found; 259 - if (READ_ONCE(msk->token) != token) { 260 + 261 + if (READ_ONCE(msk->token) != token || 262 + !net_eq(sock_net(sk), net)) { 260 263 sock_put(sk); 261 264 goto again; 262 265 }
+8 -6
net/mptcp/token_test.c
··· 11 11 GFP_USER); 12 12 KUNIT_EXPECT_NOT_ERR_OR_NULL(test, req); 13 13 mptcp_token_init_request((struct request_sock *)req); 14 + sock_net_set((struct sock *)req, &init_net); 14 15 return req; 15 16 } 16 17 ··· 23 22 KUNIT_ASSERT_EQ(test, 0, 24 23 mptcp_token_new_request((struct request_sock *)req)); 25 24 KUNIT_EXPECT_NE(test, 0, (int)req->token); 26 - KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(req->token)); 25 + KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(&init_net, req->token)); 27 26 28 27 /* cleanup */ 29 28 mptcp_token_destroy_request((struct request_sock *)req); ··· 56 55 msk = kunit_kzalloc(test, sizeof(struct mptcp_sock), GFP_USER); 57 56 KUNIT_EXPECT_NOT_ERR_OR_NULL(test, msk); 58 57 refcount_set(&((struct sock *)msk)->sk_refcnt, 1); 58 + sock_net_set((struct sock *)msk, &init_net); 59 59 return msk; 60 60 } 61 61 ··· 76 74 mptcp_token_new_connect((struct sock *)icsk)); 77 75 KUNIT_EXPECT_NE(test, 0, (int)ctx->token); 78 76 KUNIT_EXPECT_EQ(test, ctx->token, msk->token); 79 - KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(ctx->token)); 77 + KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(&init_net, ctx->token)); 80 78 KUNIT_EXPECT_EQ(test, 2, (int)refcount_read(&sk->sk_refcnt)); 81 79 82 80 mptcp_token_destroy(msk); 83 - KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(ctx->token)); 81 + KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(&init_net, ctx->token)); 84 82 } 85 83 86 84 static void mptcp_token_test_accept(struct kunit *test) ··· 92 90 mptcp_token_new_request((struct request_sock *)req)); 93 91 msk->token = req->token; 94 92 mptcp_token_accept(req, msk); 95 - KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(msk->token)); 93 + KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(&init_net, msk->token)); 96 94 97 95 /* this is now a no-op */ 98 96 mptcp_token_destroy_request((struct request_sock *)req); 99 - KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(msk->token)); 97 + KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(&init_net, msk->token)); 100 98 101 99 /* cleanup */ 102 100 mptcp_token_destroy(msk); ··· 118 116 119 117 /* simulate race on removal */ 120 118 refcount_set(&sk->sk_refcnt, 0); 121 - KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(msk->token)); 119 + KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(&init_net, msk->token)); 122 120 123 121 /* cleanup */ 124 122 mptcp_token_destroy(msk);
+2 -2
net/netfilter/ipset/ip_set_hash_gen.h
··· 130 130 { 131 131 size_t hsize; 132 132 133 - /* We must fit both into u32 in jhash and size_t */ 133 + /* We must fit both into u32 in jhash and INT_MAX in kvmalloc_node() */ 134 134 if (hbits > 31) 135 135 return 0; 136 136 hsize = jhash_size(hbits); 137 - if ((((size_t)-1) - sizeof(struct htable)) / sizeof(struct hbucket *) 137 + if ((INT_MAX - sizeof(struct htable)) / sizeof(struct hbucket *) 138 138 < hsize) 139 139 return 0; 140 140
+4
net/netfilter/ipvs/ip_vs_conn.c
··· 1468 1468 int idx; 1469 1469 1470 1470 /* Compute size and mask */ 1471 + if (ip_vs_conn_tab_bits < 8 || ip_vs_conn_tab_bits > 20) { 1472 + pr_info("conn_tab_bits not in [8, 20]. Using default value\n"); 1473 + ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS; 1474 + } 1471 1475 ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits; 1472 1476 ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1; 1473 1477
+100 -54
net/netfilter/nf_conntrack_core.c
··· 74 74 static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock); 75 75 static __read_mostly bool nf_conntrack_locks_all; 76 76 77 + /* serialize hash resizes and nf_ct_iterate_cleanup */ 78 + static DEFINE_MUTEX(nf_conntrack_mutex); 79 + 77 80 #define GC_SCAN_INTERVAL (120u * HZ) 78 81 #define GC_SCAN_MAX_DURATION msecs_to_jiffies(10) 79 82 80 - #define MAX_CHAINLEN 64u 83 + #define MIN_CHAINLEN 8u 84 + #define MAX_CHAINLEN (32u - MIN_CHAINLEN) 81 85 82 86 static struct conntrack_gc_work conntrack_gc_work; 83 87 ··· 192 188 static siphash_key_t nf_conntrack_hash_rnd __read_mostly; 193 189 194 190 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, 191 + unsigned int zoneid, 195 192 const struct net *net) 196 193 { 197 194 struct { 198 195 struct nf_conntrack_man src; 199 196 union nf_inet_addr dst_addr; 197 + unsigned int zone; 200 198 u32 net_mix; 201 199 u16 dport; 202 200 u16 proto; ··· 211 205 /* The direction must be ignored, so handle usable members manually. */ 212 206 combined.src = tuple->src; 213 207 combined.dst_addr = tuple->dst.u3; 208 + combined.zone = zoneid; 214 209 combined.net_mix = net_hash_mix(net); 215 210 combined.dport = (__force __u16)tuple->dst.u.all; 216 211 combined.proto = tuple->dst.protonum; ··· 226 219 227 220 static u32 __hash_conntrack(const struct net *net, 228 221 const struct nf_conntrack_tuple *tuple, 222 + unsigned int zoneid, 229 223 unsigned int size) 230 224 { 231 - return reciprocal_scale(hash_conntrack_raw(tuple, net), size); 225 + return reciprocal_scale(hash_conntrack_raw(tuple, zoneid, net), size); 232 226 } 233 227 234 228 static u32 hash_conntrack(const struct net *net, 235 - const struct nf_conntrack_tuple *tuple) 229 + const struct nf_conntrack_tuple *tuple, 230 + unsigned int zoneid) 236 231 { 237 - return scale_hash(hash_conntrack_raw(tuple, net)); 232 + return scale_hash(hash_conntrack_raw(tuple, zoneid, net)); 238 233 } 239 234 240 235 static bool nf_ct_get_tuple_ports(const struct sk_buff *skb, ··· 659 650 do { 660 651 sequence = read_seqcount_begin(&nf_conntrack_generation); 661 652 hash = hash_conntrack(net, 662 - &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 653 + &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 654 + nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_ORIGINAL)); 663 655 reply_hash = hash_conntrack(net, 664 - &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 656 + &ct->tuplehash[IP_CT_DIR_REPLY].tuple, 657 + nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY)); 665 658 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); 666 659 667 660 clean_from_lists(ct); ··· 830 819 nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone, 831 820 const struct nf_conntrack_tuple *tuple) 832 821 { 833 - return __nf_conntrack_find_get(net, zone, tuple, 834 - hash_conntrack_raw(tuple, net)); 822 + unsigned int rid, zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL); 823 + struct nf_conntrack_tuple_hash *thash; 824 + 825 + thash = __nf_conntrack_find_get(net, zone, tuple, 826 + hash_conntrack_raw(tuple, zone_id, net)); 827 + 828 + if (thash) 829 + return thash; 830 + 831 + rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY); 832 + if (rid != zone_id) 833 + return __nf_conntrack_find_get(net, zone, tuple, 834 + hash_conntrack_raw(tuple, rid, net)); 835 + return thash; 835 836 } 836 837 EXPORT_SYMBOL_GPL(nf_conntrack_find_get); 837 838 ··· 865 842 unsigned int hash, reply_hash; 866 843 struct nf_conntrack_tuple_hash *h; 867 844 struct hlist_nulls_node *n; 845 + unsigned int max_chainlen; 868 846 unsigned int chainlen = 0; 869 847 unsigned int sequence; 870 848 int err = -EEXIST; ··· 876 852 do { 877 853 sequence = read_seqcount_begin(&nf_conntrack_generation); 878 854 hash = hash_conntrack(net, 879 - &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 855 + &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 856 + nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_ORIGINAL)); 880 857 reply_hash = hash_conntrack(net, 881 - &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 858 + &ct->tuplehash[IP_CT_DIR_REPLY].tuple, 859 + nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY)); 882 860 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); 861 + 862 + max_chainlen = MIN_CHAINLEN + prandom_u32_max(MAX_CHAINLEN); 883 863 884 864 /* See if there's one in the list already, including reverse */ 885 865 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) { ··· 891 863 zone, net)) 892 864 goto out; 893 865 894 - if (chainlen++ > MAX_CHAINLEN) 866 + if (chainlen++ > max_chainlen) 895 867 goto chaintoolong; 896 868 } 897 869 ··· 901 873 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, 902 874 zone, net)) 903 875 goto out; 904 - if (chainlen++ > MAX_CHAINLEN) 876 + if (chainlen++ > max_chainlen) 905 877 goto chaintoolong; 906 878 } 907 879 ··· 1131 1103 int 1132 1104 __nf_conntrack_confirm(struct sk_buff *skb) 1133 1105 { 1106 + unsigned int chainlen = 0, sequence, max_chainlen; 1134 1107 const struct nf_conntrack_zone *zone; 1135 - unsigned int chainlen = 0, sequence; 1136 1108 unsigned int hash, reply_hash; 1137 1109 struct nf_conntrack_tuple_hash *h; 1138 1110 struct nf_conn *ct; ··· 1161 1133 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev; 1162 1134 hash = scale_hash(hash); 1163 1135 reply_hash = hash_conntrack(net, 1164 - &ct->tuplehash[IP_CT_DIR_REPLY].tuple); 1165 - 1136 + &ct->tuplehash[IP_CT_DIR_REPLY].tuple, 1137 + nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY)); 1166 1138 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); 1167 1139 1168 1140 /* We're not in hash table, and we refuse to set up related ··· 1196 1168 goto dying; 1197 1169 } 1198 1170 1171 + max_chainlen = MIN_CHAINLEN + prandom_u32_max(MAX_CHAINLEN); 1199 1172 /* See if there's one in the list already, including reverse: 1200 1173 NAT could have grabbed it without realizing, since we're 1201 1174 not in the hash. If there is, we lost race. */ ··· 1204 1175 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 1205 1176 zone, net)) 1206 1177 goto out; 1207 - if (chainlen++ > MAX_CHAINLEN) 1178 + if (chainlen++ > max_chainlen) 1208 1179 goto chaintoolong; 1209 1180 } 1210 1181 ··· 1213 1184 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, 1214 1185 zone, net)) 1215 1186 goto out; 1216 - if (chainlen++ > MAX_CHAINLEN) { 1187 + if (chainlen++ > max_chainlen) { 1217 1188 chaintoolong: 1218 1189 nf_ct_add_to_dying_list(ct); 1219 1190 NF_CT_STAT_INC(net, chaintoolong); ··· 1275 1246 rcu_read_lock(); 1276 1247 begin: 1277 1248 nf_conntrack_get_ht(&ct_hash, &hsize); 1278 - hash = __hash_conntrack(net, tuple, hsize); 1249 + hash = __hash_conntrack(net, tuple, nf_ct_zone_id(zone, IP_CT_DIR_REPLY), hsize); 1279 1250 1280 1251 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) { 1281 1252 ct = nf_ct_tuplehash_to_ctrack(h); ··· 1716 1687 struct nf_conntrack_tuple_hash *h; 1717 1688 enum ip_conntrack_info ctinfo; 1718 1689 struct nf_conntrack_zone tmp; 1690 + u32 hash, zone_id, rid; 1719 1691 struct nf_conn *ct; 1720 - u32 hash; 1721 1692 1722 1693 if (!nf_ct_get_tuple(skb, skb_network_offset(skb), 1723 1694 dataoff, state->pf, protonum, state->net, ··· 1728 1699 1729 1700 /* look for tuple match */ 1730 1701 zone = nf_ct_zone_tmpl(tmpl, skb, &tmp); 1731 - hash = hash_conntrack_raw(&tuple, state->net); 1702 + 1703 + zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL); 1704 + hash = hash_conntrack_raw(&tuple, zone_id, state->net); 1732 1705 h = __nf_conntrack_find_get(state->net, zone, &tuple, hash); 1706 + 1707 + if (!h) { 1708 + rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY); 1709 + if (zone_id != rid) { 1710 + u32 tmp = hash_conntrack_raw(&tuple, rid, state->net); 1711 + 1712 + h = __nf_conntrack_find_get(state->net, zone, &tuple, tmp); 1713 + } 1714 + } 1715 + 1733 1716 if (!h) { 1734 1717 h = init_conntrack(state->net, tmpl, &tuple, 1735 1718 skb, dataoff, hash); ··· 2266 2225 spinlock_t *lockp; 2267 2226 2268 2227 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { 2228 + struct hlist_nulls_head *hslot = &nf_conntrack_hash[*bucket]; 2229 + 2230 + if (hlist_nulls_empty(hslot)) 2231 + continue; 2232 + 2269 2233 lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS]; 2270 2234 local_bh_disable(); 2271 2235 nf_conntrack_lock(lockp); 2272 - if (*bucket < nf_conntrack_htable_size) { 2273 - hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) { 2274 - if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY) 2275 - continue; 2276 - /* All nf_conn objects are added to hash table twice, one 2277 - * for original direction tuple, once for the reply tuple. 2278 - * 2279 - * Exception: In the IPS_NAT_CLASH case, only the reply 2280 - * tuple is added (the original tuple already existed for 2281 - * a different object). 2282 - * 2283 - * We only need to call the iterator once for each 2284 - * conntrack, so we just use the 'reply' direction 2285 - * tuple while iterating. 2286 - */ 2287 - ct = nf_ct_tuplehash_to_ctrack(h); 2288 - if (iter(ct, data)) 2289 - goto found; 2290 - } 2236 + hlist_nulls_for_each_entry(h, n, hslot, hnnode) { 2237 + if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY) 2238 + continue; 2239 + /* All nf_conn objects are added to hash table twice, one 2240 + * for original direction tuple, once for the reply tuple. 2241 + * 2242 + * Exception: In the IPS_NAT_CLASH case, only the reply 2243 + * tuple is added (the original tuple already existed for 2244 + * a different object). 2245 + * 2246 + * We only need to call the iterator once for each 2247 + * conntrack, so we just use the 'reply' direction 2248 + * tuple while iterating. 2249 + */ 2250 + ct = nf_ct_tuplehash_to_ctrack(h); 2251 + if (iter(ct, data)) 2252 + goto found; 2291 2253 } 2292 2254 spin_unlock(lockp); 2293 2255 local_bh_enable(); ··· 2308 2264 static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), 2309 2265 void *data, u32 portid, int report) 2310 2266 { 2311 - unsigned int bucket = 0, sequence; 2267 + unsigned int bucket = 0; 2312 2268 struct nf_conn *ct; 2313 2269 2314 2270 might_sleep(); 2315 2271 2316 - for (;;) { 2317 - sequence = read_seqcount_begin(&nf_conntrack_generation); 2272 + mutex_lock(&nf_conntrack_mutex); 2273 + while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) { 2274 + /* Time to push up daises... */ 2318 2275 2319 - while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) { 2320 - /* Time to push up daises... */ 2321 - 2322 - nf_ct_delete(ct, portid, report); 2323 - nf_ct_put(ct); 2324 - cond_resched(); 2325 - } 2326 - 2327 - if (!read_seqcount_retry(&nf_conntrack_generation, sequence)) 2328 - break; 2329 - bucket = 0; 2276 + nf_ct_delete(ct, portid, report); 2277 + nf_ct_put(ct); 2278 + cond_resched(); 2330 2279 } 2280 + mutex_unlock(&nf_conntrack_mutex); 2331 2281 } 2332 2282 2333 2283 struct iter_data { ··· 2557 2519 if (!hash) 2558 2520 return -ENOMEM; 2559 2521 2522 + mutex_lock(&nf_conntrack_mutex); 2560 2523 old_size = nf_conntrack_htable_size; 2561 2524 if (old_size == hashsize) { 2525 + mutex_unlock(&nf_conntrack_mutex); 2562 2526 kvfree(hash); 2563 2527 return 0; 2564 2528 } ··· 2577 2537 2578 2538 for (i = 0; i < nf_conntrack_htable_size; i++) { 2579 2539 while (!hlist_nulls_empty(&nf_conntrack_hash[i])) { 2540 + unsigned int zone_id; 2541 + 2580 2542 h = hlist_nulls_entry(nf_conntrack_hash[i].first, 2581 2543 struct nf_conntrack_tuple_hash, hnnode); 2582 2544 ct = nf_ct_tuplehash_to_ctrack(h); 2583 2545 hlist_nulls_del_rcu(&h->hnnode); 2546 + 2547 + zone_id = nf_ct_zone_id(nf_ct_zone(ct), NF_CT_DIRECTION(h)); 2584 2548 bucket = __hash_conntrack(nf_ct_net(ct), 2585 - &h->tuple, hashsize); 2549 + &h->tuple, zone_id, hashsize); 2586 2550 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); 2587 2551 } 2588 2552 } ··· 2599 2555 write_seqcount_end(&nf_conntrack_generation); 2600 2556 nf_conntrack_all_unlock(); 2601 2557 local_bh_enable(); 2558 + 2559 + mutex_unlock(&nf_conntrack_mutex); 2602 2560 2603 2561 synchronize_net(); 2604 2562 kvfree(old_hash);
+12 -5
net/netfilter/nf_nat_core.c
··· 150 150 151 151 /* We keep an extra hash for each conntrack, for fast searching. */ 152 152 static unsigned int 153 - hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple) 153 + hash_by_src(const struct net *net, 154 + const struct nf_conntrack_zone *zone, 155 + const struct nf_conntrack_tuple *tuple) 154 156 { 155 157 unsigned int hash; 156 158 struct { 157 159 struct nf_conntrack_man src; 158 160 u32 net_mix; 159 161 u32 protonum; 162 + u32 zone; 160 163 } __aligned(SIPHASH_ALIGNMENT) combined; 161 164 162 165 get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd)); ··· 168 165 169 166 /* Original src, to ensure we map it consistently if poss. */ 170 167 combined.src = tuple->src; 171 - combined.net_mix = net_hash_mix(n); 168 + combined.net_mix = net_hash_mix(net); 172 169 combined.protonum = tuple->dst.protonum; 170 + 171 + /* Zone ID can be used provided its valid for both directions */ 172 + if (zone->dir == NF_CT_DEFAULT_ZONE_DIR) 173 + combined.zone = zone->id; 173 174 174 175 hash = siphash(&combined, sizeof(combined), &nf_nat_hash_rnd); 175 176 ··· 279 272 struct nf_conntrack_tuple *result, 280 273 const struct nf_nat_range2 *range) 281 274 { 282 - unsigned int h = hash_by_src(net, tuple); 275 + unsigned int h = hash_by_src(net, zone, tuple); 283 276 const struct nf_conn *ct; 284 277 285 278 hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) { ··· 626 619 unsigned int srchash; 627 620 spinlock_t *lock; 628 621 629 - srchash = hash_by_src(net, 622 + srchash = hash_by_src(net, nf_ct_zone(ct), 630 623 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 631 624 lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS]; 632 625 spin_lock_bh(lock); ··· 795 788 { 796 789 unsigned int h; 797 790 798 - h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 791 + h = hash_by_src(nf_ct_net(ct), nf_ct_zone(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); 799 792 spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]); 800 793 hlist_del_rcu(&ct->nat_bysource); 801 794 spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
+97 -71
net/netfilter/nf_nat_masquerade.c
··· 9 9 10 10 #include <net/netfilter/nf_nat_masquerade.h> 11 11 12 + struct masq_dev_work { 13 + struct work_struct work; 14 + struct net *net; 15 + union nf_inet_addr addr; 16 + int ifindex; 17 + int (*iter)(struct nf_conn *i, void *data); 18 + }; 19 + 20 + #define MAX_MASQ_WORKER_COUNT 16 21 + 12 22 static DEFINE_MUTEX(masq_mutex); 13 23 static unsigned int masq_refcnt __read_mostly; 24 + static atomic_t masq_worker_count __read_mostly; 14 25 15 26 unsigned int 16 27 nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum, ··· 74 63 } 75 64 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4); 76 65 77 - static int device_cmp(struct nf_conn *i, void *ifindex) 66 + static void iterate_cleanup_work(struct work_struct *work) 67 + { 68 + struct masq_dev_work *w; 69 + 70 + w = container_of(work, struct masq_dev_work, work); 71 + 72 + nf_ct_iterate_cleanup_net(w->net, w->iter, (void *)w, 0, 0); 73 + 74 + put_net(w->net); 75 + kfree(w); 76 + atomic_dec(&masq_worker_count); 77 + module_put(THIS_MODULE); 78 + } 79 + 80 + /* Iterate conntrack table in the background and remove conntrack entries 81 + * that use the device/address being removed. 82 + * 83 + * In case too many work items have been queued already or memory allocation 84 + * fails iteration is skipped, conntrack entries will time out eventually. 85 + */ 86 + static void nf_nat_masq_schedule(struct net *net, union nf_inet_addr *addr, 87 + int ifindex, 88 + int (*iter)(struct nf_conn *i, void *data), 89 + gfp_t gfp_flags) 90 + { 91 + struct masq_dev_work *w; 92 + 93 + if (atomic_read(&masq_worker_count) > MAX_MASQ_WORKER_COUNT) 94 + return; 95 + 96 + net = maybe_get_net(net); 97 + if (!net) 98 + return; 99 + 100 + if (!try_module_get(THIS_MODULE)) 101 + goto err_module; 102 + 103 + w = kzalloc(sizeof(*w), gfp_flags); 104 + if (w) { 105 + /* We can overshoot MAX_MASQ_WORKER_COUNT, no big deal */ 106 + atomic_inc(&masq_worker_count); 107 + 108 + INIT_WORK(&w->work, iterate_cleanup_work); 109 + w->ifindex = ifindex; 110 + w->net = net; 111 + w->iter = iter; 112 + if (addr) 113 + w->addr = *addr; 114 + schedule_work(&w->work); 115 + return; 116 + } 117 + 118 + module_put(THIS_MODULE); 119 + err_module: 120 + put_net(net); 121 + } 122 + 123 + static int device_cmp(struct nf_conn *i, void *arg) 78 124 { 79 125 const struct nf_conn_nat *nat = nfct_nat(i); 126 + const struct masq_dev_work *w = arg; 80 127 81 128 if (!nat) 82 129 return 0; 83 - return nat->masq_index == (int)(long)ifindex; 130 + return nat->masq_index == w->ifindex; 84 131 } 85 132 86 133 static int masq_device_event(struct notifier_block *this, ··· 154 85 * and forget them. 155 86 */ 156 87 157 - nf_ct_iterate_cleanup_net(net, device_cmp, 158 - (void *)(long)dev->ifindex, 0, 0); 88 + nf_nat_masq_schedule(net, NULL, dev->ifindex, 89 + device_cmp, GFP_KERNEL); 159 90 } 160 91 161 92 return NOTIFY_DONE; ··· 163 94 164 95 static int inet_cmp(struct nf_conn *ct, void *ptr) 165 96 { 166 - struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; 167 - struct net_device *dev = ifa->ifa_dev->dev; 168 97 struct nf_conntrack_tuple *tuple; 98 + struct masq_dev_work *w = ptr; 169 99 170 - if (!device_cmp(ct, (void *)(long)dev->ifindex)) 100 + if (!device_cmp(ct, ptr)) 171 101 return 0; 172 102 173 103 tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; 174 104 175 - return ifa->ifa_address == tuple->dst.u3.ip; 105 + return nf_inet_addr_cmp(&w->addr, &tuple->dst.u3); 176 106 } 177 107 178 108 static int masq_inet_event(struct notifier_block *this, 179 109 unsigned long event, 180 110 void *ptr) 181 111 { 182 - struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev; 183 - struct net *net = dev_net(idev->dev); 112 + const struct in_ifaddr *ifa = ptr; 113 + const struct in_device *idev; 114 + const struct net_device *dev; 115 + union nf_inet_addr addr; 116 + 117 + if (event != NETDEV_DOWN) 118 + return NOTIFY_DONE; 184 119 185 120 /* The masq_dev_notifier will catch the case of the device going 186 121 * down. So if the inetdev is dead and being destroyed we have 187 122 * no work to do. Otherwise this is an individual address removal 188 123 * and we have to perform the flush. 189 124 */ 125 + idev = ifa->ifa_dev; 190 126 if (idev->dead) 191 127 return NOTIFY_DONE; 192 128 193 - if (event == NETDEV_DOWN) 194 - nf_ct_iterate_cleanup_net(net, inet_cmp, ptr, 0, 0); 129 + memset(&addr, 0, sizeof(addr)); 130 + 131 + addr.ip = ifa->ifa_address; 132 + 133 + dev = idev->dev; 134 + nf_nat_masq_schedule(dev_net(idev->dev), &addr, dev->ifindex, 135 + inet_cmp, GFP_KERNEL); 195 136 196 137 return NOTIFY_DONE; 197 138 } ··· 215 136 }; 216 137 217 138 #if IS_ENABLED(CONFIG_IPV6) 218 - static atomic_t v6_worker_count __read_mostly; 219 - 220 139 static int 221 140 nat_ipv6_dev_get_saddr(struct net *net, const struct net_device *dev, 222 141 const struct in6_addr *daddr, unsigned int srcprefs, ··· 264 187 } 265 188 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6); 266 189 267 - struct masq_dev_work { 268 - struct work_struct work; 269 - struct net *net; 270 - struct in6_addr addr; 271 - int ifindex; 272 - }; 273 - 274 - static int inet6_cmp(struct nf_conn *ct, void *work) 275 - { 276 - struct masq_dev_work *w = (struct masq_dev_work *)work; 277 - struct nf_conntrack_tuple *tuple; 278 - 279 - if (!device_cmp(ct, (void *)(long)w->ifindex)) 280 - return 0; 281 - 282 - tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; 283 - 284 - return ipv6_addr_equal(&w->addr, &tuple->dst.u3.in6); 285 - } 286 - 287 - static void iterate_cleanup_work(struct work_struct *work) 288 - { 289 - struct masq_dev_work *w; 290 - 291 - w = container_of(work, struct masq_dev_work, work); 292 - 293 - nf_ct_iterate_cleanup_net(w->net, inet6_cmp, (void *)w, 0, 0); 294 - 295 - put_net(w->net); 296 - kfree(w); 297 - atomic_dec(&v6_worker_count); 298 - module_put(THIS_MODULE); 299 - } 300 - 301 190 /* atomic notifier; can't call nf_ct_iterate_cleanup_net (it can sleep). 302 191 * 303 192 * Defer it to the system workqueue. ··· 276 233 { 277 234 struct inet6_ifaddr *ifa = ptr; 278 235 const struct net_device *dev; 279 - struct masq_dev_work *w; 280 - struct net *net; 236 + union nf_inet_addr addr; 281 237 282 - if (event != NETDEV_DOWN || atomic_read(&v6_worker_count) >= 16) 238 + if (event != NETDEV_DOWN) 283 239 return NOTIFY_DONE; 284 240 285 241 dev = ifa->idev->dev; 286 - net = maybe_get_net(dev_net(dev)); 287 - if (!net) 288 - return NOTIFY_DONE; 289 242 290 - if (!try_module_get(THIS_MODULE)) 291 - goto err_module; 243 + memset(&addr, 0, sizeof(addr)); 292 244 293 - w = kmalloc(sizeof(*w), GFP_ATOMIC); 294 - if (w) { 295 - atomic_inc(&v6_worker_count); 245 + addr.in6 = ifa->addr; 296 246 297 - INIT_WORK(&w->work, iterate_cleanup_work); 298 - w->ifindex = dev->ifindex; 299 - w->net = net; 300 - w->addr = ifa->addr; 301 - schedule_work(&w->work); 302 - 303 - return NOTIFY_DONE; 304 - } 305 - 306 - module_put(THIS_MODULE); 307 - err_module: 308 - put_net(net); 247 + nf_nat_masq_schedule(dev_net(dev), &addr, dev->ifindex, inet_cmp, 248 + GFP_ATOMIC); 309 249 return NOTIFY_DONE; 310 250 } 311 251
+83 -38
net/netfilter/nf_tables_api.c
··· 780 780 { 781 781 struct nftables_pernet *nft_net; 782 782 struct sk_buff *skb; 783 + u16 flags = 0; 783 784 int err; 784 785 785 786 if (!ctx->report && ··· 791 790 if (skb == NULL) 792 791 goto err; 793 792 793 + if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL)) 794 + flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL); 795 + 794 796 err = nf_tables_fill_table_info(skb, ctx->net, ctx->portid, ctx->seq, 795 - event, 0, ctx->family, ctx->table); 797 + event, flags, ctx->family, ctx->table); 796 798 if (err < 0) { 797 799 kfree_skb(skb); 798 800 goto err; ··· 1567 1563 { 1568 1564 struct nftables_pernet *nft_net; 1569 1565 struct sk_buff *skb; 1566 + u16 flags = 0; 1570 1567 int err; 1571 1568 1572 1569 if (!ctx->report && ··· 1578 1573 if (skb == NULL) 1579 1574 goto err; 1580 1575 1576 + if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL)) 1577 + flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL); 1578 + 1581 1579 err = nf_tables_fill_chain_info(skb, ctx->net, ctx->portid, ctx->seq, 1582 - event, 0, ctx->family, ctx->table, 1580 + event, flags, ctx->family, ctx->table, 1583 1581 ctx->chain); 1584 1582 if (err < 0) { 1585 1583 kfree_skb(skb); ··· 2874 2866 u32 flags, int family, 2875 2867 const struct nft_table *table, 2876 2868 const struct nft_chain *chain, 2877 - const struct nft_rule *rule, 2878 - const struct nft_rule *prule) 2869 + const struct nft_rule *rule, u64 handle) 2879 2870 { 2880 2871 struct nlmsghdr *nlh; 2881 2872 const struct nft_expr *expr, *next; ··· 2894 2887 NFTA_RULE_PAD)) 2895 2888 goto nla_put_failure; 2896 2889 2897 - if (event != NFT_MSG_DELRULE && prule) { 2898 - if (nla_put_be64(skb, NFTA_RULE_POSITION, 2899 - cpu_to_be64(prule->handle), 2890 + if (event != NFT_MSG_DELRULE && handle) { 2891 + if (nla_put_be64(skb, NFTA_RULE_POSITION, cpu_to_be64(handle), 2900 2892 NFTA_RULE_PAD)) 2901 2893 goto nla_put_failure; 2902 2894 } ··· 2931 2925 const struct nft_rule *rule, int event) 2932 2926 { 2933 2927 struct nftables_pernet *nft_net = nft_pernet(ctx->net); 2928 + const struct nft_rule *prule; 2934 2929 struct sk_buff *skb; 2930 + u64 handle = 0; 2931 + u16 flags = 0; 2935 2932 int err; 2936 2933 2937 2934 if (!ctx->report && ··· 2945 2936 if (skb == NULL) 2946 2937 goto err; 2947 2938 2939 + if (event == NFT_MSG_NEWRULE && 2940 + !list_is_first(&rule->list, &ctx->chain->rules) && 2941 + !list_is_last(&rule->list, &ctx->chain->rules)) { 2942 + prule = list_prev_entry(rule, list); 2943 + handle = prule->handle; 2944 + } 2945 + if (ctx->flags & (NLM_F_APPEND | NLM_F_REPLACE)) 2946 + flags |= NLM_F_APPEND; 2947 + if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL)) 2948 + flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL); 2949 + 2948 2950 err = nf_tables_fill_rule_info(skb, ctx->net, ctx->portid, ctx->seq, 2949 - event, 0, ctx->family, ctx->table, 2950 - ctx->chain, rule, NULL); 2951 + event, flags, ctx->family, ctx->table, 2952 + ctx->chain, rule, handle); 2951 2953 if (err < 0) { 2952 2954 kfree_skb(skb); 2953 2955 goto err; ··· 2984 2964 struct net *net = sock_net(skb->sk); 2985 2965 const struct nft_rule *rule, *prule; 2986 2966 unsigned int s_idx = cb->args[0]; 2967 + u64 handle; 2987 2968 2988 2969 prule = NULL; 2989 2970 list_for_each_entry_rcu(rule, &chain->rules, list) { ··· 2996 2975 memset(&cb->args[1], 0, 2997 2976 sizeof(cb->args) - sizeof(cb->args[0])); 2998 2977 } 2978 + if (prule) 2979 + handle = prule->handle; 2980 + else 2981 + handle = 0; 2982 + 2999 2983 if (nf_tables_fill_rule_info(skb, net, NETLINK_CB(cb->skb).portid, 3000 2984 cb->nlh->nlmsg_seq, 3001 2985 NFT_MSG_NEWRULE, 3002 2986 NLM_F_MULTI | NLM_F_APPEND, 3003 2987 table->family, 3004 - table, chain, rule, prule) < 0) 2988 + table, chain, rule, handle) < 0) 3005 2989 return 1; 3006 2990 3007 2991 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); ··· 3169 3143 3170 3144 err = nf_tables_fill_rule_info(skb2, net, NETLINK_CB(skb).portid, 3171 3145 info->nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0, 3172 - family, table, chain, rule, NULL); 3146 + family, table, chain, rule, 0); 3173 3147 if (err < 0) 3174 3148 goto err_fill_rule_info; 3175 3149 ··· 3429 3403 } 3430 3404 3431 3405 if (info->nlh->nlmsg_flags & NLM_F_REPLACE) { 3406 + err = nft_delrule(&ctx, old_rule); 3407 + if (err < 0) 3408 + goto err_destroy_flow_rule; 3409 + 3432 3410 trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule); 3433 3411 if (trans == NULL) { 3434 3412 err = -ENOMEM; 3435 3413 goto err_destroy_flow_rule; 3436 3414 } 3437 - err = nft_delrule(&ctx, old_rule); 3438 - if (err < 0) { 3439 - nft_trans_destroy(trans); 3440 - goto err_destroy_flow_rule; 3441 - } 3442 - 3443 3415 list_add_tail_rcu(&rule->list, &old_rule->list); 3444 3416 } else { 3445 3417 trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule); ··· 3967 3943 gfp_t gfp_flags) 3968 3944 { 3969 3945 struct nftables_pernet *nft_net = nft_pernet(ctx->net); 3970 - struct sk_buff *skb; 3971 3946 u32 portid = ctx->portid; 3947 + struct sk_buff *skb; 3948 + u16 flags = 0; 3972 3949 int err; 3973 3950 3974 3951 if (!ctx->report && ··· 3980 3955 if (skb == NULL) 3981 3956 goto err; 3982 3957 3983 - err = nf_tables_fill_set(skb, ctx, set, event, 0); 3958 + if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL)) 3959 + flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL); 3960 + 3961 + err = nf_tables_fill_set(skb, ctx, set, event, flags); 3984 3962 if (err < 0) { 3985 3963 kfree_skb(skb); 3986 3964 goto err; ··· 4364 4336 if (ops->privsize != NULL) 4365 4337 size = ops->privsize(nla, &desc); 4366 4338 alloc_size = sizeof(*set) + size + udlen; 4367 - if (alloc_size < size) 4339 + if (alloc_size < size || alloc_size > INT_MAX) 4368 4340 return -ENOMEM; 4369 4341 set = kvzalloc(alloc_size, GFP_KERNEL); 4370 4342 if (!set) ··· 5259 5231 static void nf_tables_setelem_notify(const struct nft_ctx *ctx, 5260 5232 const struct nft_set *set, 5261 5233 const struct nft_set_elem *elem, 5262 - int event, u16 flags) 5234 + int event) 5263 5235 { 5264 5236 struct nftables_pernet *nft_net; 5265 5237 struct net *net = ctx->net; 5266 5238 u32 portid = ctx->portid; 5267 5239 struct sk_buff *skb; 5240 + u16 flags = 0; 5268 5241 int err; 5269 5242 5270 5243 if (!ctx->report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) ··· 5274 5245 skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 5275 5246 if (skb == NULL) 5276 5247 goto err; 5248 + 5249 + if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL)) 5250 + flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL); 5277 5251 5278 5252 err = nf_tables_fill_setelem_info(skb, ctx, 0, portid, event, flags, 5279 5253 set, elem); ··· 6953 6921 6954 6922 void nft_obj_notify(struct net *net, const struct nft_table *table, 6955 6923 struct nft_object *obj, u32 portid, u32 seq, int event, 6956 - int family, int report, gfp_t gfp) 6924 + u16 flags, int family, int report, gfp_t gfp) 6957 6925 { 6958 6926 struct nftables_pernet *nft_net = nft_pernet(net); 6959 6927 struct sk_buff *skb; ··· 6978 6946 if (skb == NULL) 6979 6947 goto err; 6980 6948 6981 - err = nf_tables_fill_obj_info(skb, net, portid, seq, event, 0, family, 6982 - table, obj, false); 6949 + err = nf_tables_fill_obj_info(skb, net, portid, seq, event, 6950 + flags & (NLM_F_CREATE | NLM_F_EXCL), 6951 + family, table, obj, false); 6983 6952 if (err < 0) { 6984 6953 kfree_skb(skb); 6985 6954 goto err; ··· 6997 6964 struct nft_object *obj, int event) 6998 6965 { 6999 6966 nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid, ctx->seq, event, 7000 - ctx->family, ctx->report, GFP_KERNEL); 6967 + ctx->flags, ctx->family, ctx->report, GFP_KERNEL); 7001 6968 } 7002 6969 7003 6970 /* ··· 7778 7745 { 7779 7746 struct nftables_pernet *nft_net = nft_pernet(ctx->net); 7780 7747 struct sk_buff *skb; 7748 + u16 flags = 0; 7781 7749 int err; 7782 7750 7783 7751 if (!ctx->report && ··· 7789 7755 if (skb == NULL) 7790 7756 goto err; 7791 7757 7758 + if (ctx->flags & (NLM_F_CREATE | NLM_F_EXCL)) 7759 + flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL); 7760 + 7792 7761 err = nf_tables_fill_flowtable_info(skb, ctx->net, ctx->portid, 7793 - ctx->seq, event, 0, 7762 + ctx->seq, event, flags, 7794 7763 ctx->family, flowtable, hook_list); 7795 7764 if (err < 0) { 7796 7765 kfree_skb(skb); ··· 8671 8634 nft_setelem_activate(net, te->set, &te->elem); 8672 8635 nf_tables_setelem_notify(&trans->ctx, te->set, 8673 8636 &te->elem, 8674 - NFT_MSG_NEWSETELEM, 0); 8637 + NFT_MSG_NEWSETELEM); 8675 8638 nft_trans_destroy(trans); 8676 8639 break; 8677 8640 case NFT_MSG_DELSETELEM: ··· 8679 8642 8680 8643 nf_tables_setelem_notify(&trans->ctx, te->set, 8681 8644 &te->elem, 8682 - NFT_MSG_DELSETELEM, 0); 8645 + NFT_MSG_DELSETELEM); 8683 8646 nft_setelem_remove(net, te->set, &te->elem); 8684 8647 if (!nft_setelem_is_catchall(te->set, &te->elem)) { 8685 8648 atomic_dec(&te->set->nelems); ··· 9636 9599 table->use--; 9637 9600 nf_tables_chain_destroy(&ctx); 9638 9601 } 9639 - list_del(&table->list); 9640 9602 nf_tables_table_destroy(&ctx); 9641 9603 } 9642 9604 ··· 9648 9612 if (nft_table_has_owner(table)) 9649 9613 continue; 9650 9614 9615 + list_del(&table->list); 9616 + 9651 9617 __nft_release_table(net, table); 9652 9618 } 9653 9619 } ··· 9657 9619 static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event, 9658 9620 void *ptr) 9659 9621 { 9622 + struct nft_table *table, *to_delete[8]; 9660 9623 struct nftables_pernet *nft_net; 9661 9624 struct netlink_notify *n = ptr; 9662 - struct nft_table *table, *nt; 9663 9625 struct net *net = n->net; 9664 - bool release = false; 9626 + unsigned int deleted; 9627 + bool restart = false; 9665 9628 9666 9629 if (event != NETLINK_URELEASE || n->protocol != NETLINK_NETFILTER) 9667 9630 return NOTIFY_DONE; 9668 9631 9669 9632 nft_net = nft_pernet(net); 9633 + deleted = 0; 9670 9634 mutex_lock(&nft_net->commit_mutex); 9635 + again: 9671 9636 list_for_each_entry(table, &nft_net->tables, list) { 9672 9637 if (nft_table_has_owner(table) && 9673 9638 n->portid == table->nlpid) { 9674 9639 __nft_release_hook(net, table); 9675 - release = true; 9640 + list_del_rcu(&table->list); 9641 + to_delete[deleted++] = table; 9642 + if (deleted >= ARRAY_SIZE(to_delete)) 9643 + break; 9676 9644 } 9677 9645 } 9678 - if (release) { 9646 + if (deleted) { 9647 + restart = deleted >= ARRAY_SIZE(to_delete); 9679 9648 synchronize_rcu(); 9680 - list_for_each_entry_safe(table, nt, &nft_net->tables, list) { 9681 - if (nft_table_has_owner(table) && 9682 - n->portid == table->nlpid) 9683 - __nft_release_table(net, table); 9684 - } 9649 + while (deleted) 9650 + __nft_release_table(net, to_delete[--deleted]); 9651 + 9652 + if (restart) 9653 + goto again; 9685 9654 } 9686 9655 mutex_unlock(&nft_net->commit_mutex); 9687 9656
+16 -1
net/netfilter/nft_compat.c
··· 19 19 #include <linux/netfilter_bridge/ebtables.h> 20 20 #include <linux/netfilter_arp/arp_tables.h> 21 21 #include <net/netfilter/nf_tables.h> 22 + #include <net/netfilter/nf_log.h> 22 23 23 24 /* Used for matches where *info is larger than X byte */ 24 25 #define NFT_MATCH_LARGE_THRESH 192 ··· 258 257 nft_compat_wait_for_destructors(); 259 258 260 259 ret = xt_check_target(&par, size, proto, inv); 261 - if (ret < 0) 260 + if (ret < 0) { 261 + if (ret == -ENOENT) { 262 + const char *modname = NULL; 263 + 264 + if (strcmp(target->name, "LOG") == 0) 265 + modname = "nf_log_syslog"; 266 + else if (strcmp(target->name, "NFLOG") == 0) 267 + modname = "nfnetlink_log"; 268 + 269 + if (modname && 270 + nft_request_module(ctx->net, "%s", modname) == -EAGAIN) 271 + return -EAGAIN; 272 + } 273 + 262 274 return ret; 275 + } 263 276 264 277 /* The standard target cannot be used */ 265 278 if (!target->target)
+1 -1
net/netfilter/nft_quota.c
··· 60 60 if (overquota && 61 61 !test_and_set_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags)) 62 62 nft_obj_notify(nft_net(pkt), obj->key.table, obj, 0, 0, 63 - NFT_MSG_NEWOBJ, nft_pf(pkt), 0, GFP_ATOMIC); 63 + NFT_MSG_NEWOBJ, 0, nft_pf(pkt), 0, GFP_ATOMIC); 64 64 } 65 65 66 66 static int nft_quota_do_init(const struct nlattr * const tb[],
+9 -1
net/netfilter/xt_LOG.c
··· 44 44 static int log_tg_check(const struct xt_tgchk_param *par) 45 45 { 46 46 const struct xt_log_info *loginfo = par->targinfo; 47 + int ret; 47 48 48 49 if (par->family != NFPROTO_IPV4 && par->family != NFPROTO_IPV6) 49 50 return -EINVAL; ··· 59 58 return -EINVAL; 60 59 } 61 60 62 - return nf_logger_find_get(par->family, NF_LOG_TYPE_LOG); 61 + ret = nf_logger_find_get(par->family, NF_LOG_TYPE_LOG); 62 + if (ret != 0 && !par->nft_compat) { 63 + request_module("%s", "nf_log_syslog"); 64 + 65 + ret = nf_logger_find_get(par->family, NF_LOG_TYPE_LOG); 66 + } 67 + 68 + return ret; 63 69 } 64 70 65 71 static void log_tg_destroy(const struct xt_tgdtor_param *par)
+9 -1
net/netfilter/xt_NFLOG.c
··· 42 42 static int nflog_tg_check(const struct xt_tgchk_param *par) 43 43 { 44 44 const struct xt_nflog_info *info = par->targinfo; 45 + int ret; 45 46 46 47 if (info->flags & ~XT_NFLOG_MASK) 47 48 return -EINVAL; 48 49 if (info->prefix[sizeof(info->prefix) - 1] != '\0') 49 50 return -EINVAL; 50 51 51 - return nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG); 52 + ret = nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG); 53 + if (ret != 0 && !par->nft_compat) { 54 + request_module("%s", "nfnetlink_log"); 55 + 56 + ret = nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG); 57 + } 58 + 59 + return ret; 52 60 } 53 61 54 62 static void nflog_tg_destroy(const struct xt_tgdtor_param *par)
+10 -4
net/netlink/af_netlink.c
··· 594 594 595 595 /* We need to ensure that the socket is hashed and visible. */ 596 596 smp_wmb(); 597 - nlk_sk(sk)->bound = portid; 597 + /* Paired with lockless reads from netlink_bind(), 598 + * netlink_connect() and netlink_sendmsg(). 599 + */ 600 + WRITE_ONCE(nlk_sk(sk)->bound, portid); 598 601 599 602 err: 600 603 release_sock(sk); ··· 1015 1012 if (nlk->ngroups < BITS_PER_LONG) 1016 1013 groups &= (1UL << nlk->ngroups) - 1; 1017 1014 1018 - bound = nlk->bound; 1015 + /* Paired with WRITE_ONCE() in netlink_insert() */ 1016 + bound = READ_ONCE(nlk->bound); 1019 1017 if (bound) { 1020 1018 /* Ensure nlk->portid is up-to-date. */ 1021 1019 smp_rmb(); ··· 1102 1098 1103 1099 /* No need for barriers here as we return to user-space without 1104 1100 * using any of the bound attributes. 1101 + * Paired with WRITE_ONCE() in netlink_insert(). 1105 1102 */ 1106 - if (!nlk->bound) 1103 + if (!READ_ONCE(nlk->bound)) 1107 1104 err = netlink_autobind(sock); 1108 1105 1109 1106 if (err == 0) { ··· 1893 1888 dst_group = nlk->dst_group; 1894 1889 } 1895 1890 1896 - if (!nlk->bound) { 1891 + /* Paired with WRITE_ONCE() in netlink_insert() */ 1892 + if (!READ_ONCE(nlk->bound)) { 1897 1893 err = netlink_autobind(sock); 1898 1894 if (err) 1899 1895 goto out;
+6
net/sched/cls_flower.c
··· 2188 2188 2189 2189 arg->count = arg->skip; 2190 2190 2191 + rcu_read_lock(); 2191 2192 idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) { 2192 2193 /* don't return filters that are being deleted */ 2193 2194 if (!refcount_inc_not_zero(&f->refcnt)) 2194 2195 continue; 2196 + rcu_read_unlock(); 2197 + 2195 2198 if (arg->fn(tp, f, arg) < 0) { 2196 2199 __fl_put(f); 2197 2200 arg->stop = 1; 2201 + rcu_read_lock(); 2198 2202 break; 2199 2203 } 2200 2204 __fl_put(f); 2201 2205 arg->count++; 2206 + rcu_read_lock(); 2202 2207 } 2208 + rcu_read_unlock(); 2203 2209 arg->cookie = id; 2204 2210 } 2205 2211
+6
net/sched/sch_api.c
··· 513 513 return stab; 514 514 } 515 515 516 + if (s->size_log > STAB_SIZE_LOG_MAX || 517 + s->cell_log > STAB_SIZE_LOG_MAX) { 518 + NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table"); 519 + return ERR_PTR(-EINVAL); 520 + } 521 + 516 522 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL); 517 523 if (!stab) 518 524 return ERR_PTR(-ENOMEM);
+3
net/sched/sch_fifo.c
··· 233 233 if (strncmp(q->ops->id + 1, "fifo", 4) != 0) 234 234 return 0; 235 235 236 + if (!q->ops->change) 237 + return 0; 238 + 236 239 nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); 237 240 if (nla) { 238 241 nla->nla_type = RTM_NEWQDISC;
+4
net/sched/sch_taprio.c
··· 1641 1641 list_del(&q->taprio_list); 1642 1642 spin_unlock(&taprio_list_lock); 1643 1643 1644 + /* Note that taprio_reset() might not be called if an error 1645 + * happens in qdisc_create(), after taprio_init() has been called. 1646 + */ 1647 + hrtimer_cancel(&q->advance_timer); 1644 1648 1645 1649 taprio_disable_offload(dev, q, NULL); 1646 1650
+1 -1
net/sctp/input.c
··· 702 702 ch = skb_header_pointer(skb, offset, sizeof(*ch), &_ch); 703 703 704 704 /* Break out if chunk length is less then minimal. */ 705 - if (ntohs(ch->length) < sizeof(_ch)) 705 + if (!ch || ntohs(ch->length) < sizeof(_ch)) 706 706 break; 707 707 708 708 ch_end = offset + SCTP_PAD4(ntohs(ch->length));
+2 -1
net/smc/smc_clc.c
··· 230 230 goto out_rel; 231 231 } 232 232 /* get address to which the internal TCP socket is bound */ 233 - kernel_getsockname(clcsock, (struct sockaddr *)&addrs); 233 + if (kernel_getsockname(clcsock, (struct sockaddr *)&addrs) < 0) 234 + goto out_rel; 234 235 /* analyze IP specific data of net_device belonging to TCP socket */ 235 236 addr6 = (struct sockaddr_in6 *)&addrs; 236 237 rcu_read_lock();
+2
net/smc/smc_core.c
··· 1474 1474 abort_work); 1475 1475 struct smc_sock *smc = container_of(conn, struct smc_sock, conn); 1476 1476 1477 + lock_sock(&smc->sk); 1477 1478 smc_conn_kill(conn, true); 1479 + release_sock(&smc->sk); 1478 1480 sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */ 1479 1481 } 1480 1482
+1 -1
net/sunrpc/auth_gss/svcauth_gss.c
··· 645 645 } 646 646 __set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win); 647 647 goto ok; 648 - } else if (seq_num <= sd->sd_max - GSS_SEQ_WIN) { 648 + } else if (seq_num + GSS_SEQ_WIN <= sd->sd_max) { 649 649 goto toolow; 650 650 } 651 651 if (__test_and_set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win))
+65 -27
net/unix/af_unix.c
··· 608 608 609 609 static void init_peercred(struct sock *sk) 610 610 { 611 - put_pid(sk->sk_peer_pid); 612 - if (sk->sk_peer_cred) 613 - put_cred(sk->sk_peer_cred); 611 + const struct cred *old_cred; 612 + struct pid *old_pid; 613 + 614 + spin_lock(&sk->sk_peer_lock); 615 + old_pid = sk->sk_peer_pid; 616 + old_cred = sk->sk_peer_cred; 614 617 sk->sk_peer_pid = get_pid(task_tgid(current)); 615 618 sk->sk_peer_cred = get_current_cred(); 619 + spin_unlock(&sk->sk_peer_lock); 620 + 621 + put_pid(old_pid); 622 + put_cred(old_cred); 616 623 } 617 624 618 625 static void copy_peercred(struct sock *sk, struct sock *peersk) 619 626 { 620 - put_pid(sk->sk_peer_pid); 621 - if (sk->sk_peer_cred) 622 - put_cred(sk->sk_peer_cred); 627 + const struct cred *old_cred; 628 + struct pid *old_pid; 629 + 630 + if (sk < peersk) { 631 + spin_lock(&sk->sk_peer_lock); 632 + spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING); 633 + } else { 634 + spin_lock(&peersk->sk_peer_lock); 635 + spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING); 636 + } 637 + old_pid = sk->sk_peer_pid; 638 + old_cred = sk->sk_peer_cred; 623 639 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid); 624 640 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred); 641 + 642 + spin_unlock(&sk->sk_peer_lock); 643 + spin_unlock(&peersk->sk_peer_lock); 644 + 645 + put_pid(old_pid); 646 + put_cred(old_cred); 625 647 } 626 648 627 649 static int unix_listen(struct socket *sock, int backlog) ··· 850 828 851 829 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type) 852 830 { 853 - struct sock *sk = NULL; 854 831 struct unix_sock *u; 832 + struct sock *sk; 833 + int err; 855 834 856 835 atomic_long_inc(&unix_nr_socks); 857 - if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) 858 - goto out; 836 + if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) { 837 + err = -ENFILE; 838 + goto err; 839 + } 859 840 860 841 if (type == SOCK_STREAM) 861 842 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern); 862 843 else /*dgram and seqpacket */ 863 844 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern); 864 845 865 - if (!sk) 866 - goto out; 846 + if (!sk) { 847 + err = -ENOMEM; 848 + goto err; 849 + } 867 850 868 851 sock_init_data(sock, sk); 869 852 ··· 888 861 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay); 889 862 memset(&u->scm_stat, 0, sizeof(struct scm_stat)); 890 863 unix_insert_socket(unix_sockets_unbound(sk), sk); 891 - out: 892 - if (sk == NULL) 893 - atomic_long_dec(&unix_nr_socks); 894 - else { 895 - local_bh_disable(); 896 - sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 897 - local_bh_enable(); 898 - } 864 + 865 + local_bh_disable(); 866 + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 867 + local_bh_enable(); 868 + 899 869 return sk; 870 + 871 + err: 872 + atomic_long_dec(&unix_nr_socks); 873 + return ERR_PTR(err); 900 874 } 901 875 902 876 static int unix_create(struct net *net, struct socket *sock, int protocol, 903 877 int kern) 904 878 { 879 + struct sock *sk; 880 + 905 881 if (protocol && protocol != PF_UNIX) 906 882 return -EPROTONOSUPPORT; 907 883 ··· 931 901 return -ESOCKTNOSUPPORT; 932 902 } 933 903 934 - return unix_create1(net, sock, kern, sock->type) ? 0 : -ENOMEM; 904 + sk = unix_create1(net, sock, kern, sock->type); 905 + if (IS_ERR(sk)) 906 + return PTR_ERR(sk); 907 + 908 + return 0; 935 909 } 936 910 937 911 static int unix_release(struct socket *sock) ··· 1348 1314 we will have to recheck all again in any case. 1349 1315 */ 1350 1316 1351 - err = -ENOMEM; 1352 - 1353 1317 /* create new sock for complete connection */ 1354 1318 newsk = unix_create1(sock_net(sk), NULL, 0, sock->type); 1355 - if (newsk == NULL) 1319 + if (IS_ERR(newsk)) { 1320 + err = PTR_ERR(newsk); 1321 + newsk = NULL; 1356 1322 goto out; 1323 + } 1324 + 1325 + err = -ENOMEM; 1357 1326 1358 1327 /* Allocate skb for sending to listening sock */ 1359 1328 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL); ··· 2882 2845 2883 2846 unix_state_lock(sk); 2884 2847 sk->sk_shutdown |= mode; 2848 + if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && 2849 + mode == SHUTDOWN_MASK) 2850 + sk->sk_state = TCP_CLOSE; 2885 2851 other = unix_peer(sk); 2886 2852 if (other) 2887 2853 sock_hold(other); ··· 2907 2867 other->sk_shutdown |= peer_mode; 2908 2868 unix_state_unlock(other); 2909 2869 other->sk_state_change(other); 2910 - if (peer_mode == SHUTDOWN_MASK) { 2870 + if (peer_mode == SHUTDOWN_MASK) 2911 2871 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP); 2912 - other->sk_state = TCP_CLOSE; 2913 - } else if (peer_mode & RCV_SHUTDOWN) { 2872 + else if (peer_mode & RCV_SHUTDOWN) 2914 2873 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN); 2915 - } 2916 2874 } 2917 2875 if (other) 2918 2876 sock_put(other);
+55 -12
net/xfrm/xfrm_user.c
··· 1961 1961 return skb; 1962 1962 } 1963 1963 1964 + static int xfrm_notify_userpolicy(struct net *net) 1965 + { 1966 + struct xfrm_userpolicy_default *up; 1967 + int len = NLMSG_ALIGN(sizeof(*up)); 1968 + struct nlmsghdr *nlh; 1969 + struct sk_buff *skb; 1970 + int err; 1971 + 1972 + skb = nlmsg_new(len, GFP_ATOMIC); 1973 + if (skb == NULL) 1974 + return -ENOMEM; 1975 + 1976 + nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_GETDEFAULT, sizeof(*up), 0); 1977 + if (nlh == NULL) { 1978 + kfree_skb(skb); 1979 + return -EMSGSIZE; 1980 + } 1981 + 1982 + up = nlmsg_data(nlh); 1983 + up->in = net->xfrm.policy_default & XFRM_POL_DEFAULT_IN ? 1984 + XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT; 1985 + up->fwd = net->xfrm.policy_default & XFRM_POL_DEFAULT_FWD ? 1986 + XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT; 1987 + up->out = net->xfrm.policy_default & XFRM_POL_DEFAULT_OUT ? 1988 + XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT; 1989 + 1990 + nlmsg_end(skb, nlh); 1991 + 1992 + rcu_read_lock(); 1993 + err = xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY); 1994 + rcu_read_unlock(); 1995 + 1996 + return err; 1997 + } 1998 + 1964 1999 static int xfrm_set_default(struct sk_buff *skb, struct nlmsghdr *nlh, 1965 2000 struct nlattr **attrs) 1966 2001 { 1967 2002 struct net *net = sock_net(skb->sk); 1968 2003 struct xfrm_userpolicy_default *up = nlmsg_data(nlh); 1969 - u8 dirmask; 1970 - u8 old_default = net->xfrm.policy_default; 1971 2004 1972 - if (up->dirmask >= XFRM_USERPOLICY_DIRMASK_MAX) 1973 - return -EINVAL; 2005 + if (up->in == XFRM_USERPOLICY_BLOCK) 2006 + net->xfrm.policy_default |= XFRM_POL_DEFAULT_IN; 2007 + else if (up->in == XFRM_USERPOLICY_ACCEPT) 2008 + net->xfrm.policy_default &= ~XFRM_POL_DEFAULT_IN; 1974 2009 1975 - dirmask = (1 << up->dirmask) & XFRM_POL_DEFAULT_MASK; 2010 + if (up->fwd == XFRM_USERPOLICY_BLOCK) 2011 + net->xfrm.policy_default |= XFRM_POL_DEFAULT_FWD; 2012 + else if (up->fwd == XFRM_USERPOLICY_ACCEPT) 2013 + net->xfrm.policy_default &= ~XFRM_POL_DEFAULT_FWD; 1976 2014 1977 - net->xfrm.policy_default = (old_default & (0xff ^ dirmask)) 1978 - | (up->action << up->dirmask); 2015 + if (up->out == XFRM_USERPOLICY_BLOCK) 2016 + net->xfrm.policy_default |= XFRM_POL_DEFAULT_OUT; 2017 + else if (up->out == XFRM_USERPOLICY_ACCEPT) 2018 + net->xfrm.policy_default &= ~XFRM_POL_DEFAULT_OUT; 1979 2019 1980 2020 rt_genid_bump_all(net); 1981 2021 2022 + xfrm_notify_userpolicy(net); 1982 2023 return 0; 1983 2024 } 1984 2025 ··· 2029 1988 struct sk_buff *r_skb; 2030 1989 struct nlmsghdr *r_nlh; 2031 1990 struct net *net = sock_net(skb->sk); 2032 - struct xfrm_userpolicy_default *r_up, *up; 1991 + struct xfrm_userpolicy_default *r_up; 2033 1992 int len = NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_default)); 2034 1993 u32 portid = NETLINK_CB(skb).portid; 2035 1994 u32 seq = nlh->nlmsg_seq; 2036 - 2037 - up = nlmsg_data(nlh); 2038 1995 2039 1996 r_skb = nlmsg_new(len, GFP_ATOMIC); 2040 1997 if (!r_skb) ··· 2046 2007 2047 2008 r_up = nlmsg_data(r_nlh); 2048 2009 2049 - r_up->action = ((net->xfrm.policy_default & (1 << up->dirmask)) >> up->dirmask); 2050 - r_up->dirmask = up->dirmask; 2010 + r_up->in = net->xfrm.policy_default & XFRM_POL_DEFAULT_IN ? 2011 + XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT; 2012 + r_up->fwd = net->xfrm.policy_default & XFRM_POL_DEFAULT_FWD ? 2013 + XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT; 2014 + r_up->out = net->xfrm.policy_default & XFRM_POL_DEFAULT_OUT ? 2015 + XFRM_USERPOLICY_BLOCK : XFRM_USERPOLICY_ACCEPT; 2051 2016 nlmsg_end(r_skb, r_nlh); 2052 2017 2053 2018 return nlmsg_unicast(net->xfrm.nlsk, r_skb, portid);
+8 -9
samples/bpf/Makefile
··· 322 322 323 323 -include $(BPF_SAMPLES_PATH)/Makefile.target 324 324 325 - VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \ 326 - $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \ 327 - ../../../../vmlinux \ 328 - /sys/kernel/btf/vmlinux \ 329 - /boot/vmlinux-$(shell uname -r) 325 + VMLINUX_BTF_PATHS ?= $(abspath $(if $(O),$(O)/vmlinux)) \ 326 + $(abspath $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux)) \ 327 + $(abspath ./vmlinux) 330 328 VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS)))) 331 - 332 - ifeq ($(VMLINUX_BTF),) 333 - $(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)") 334 - endif 335 329 336 330 $(obj)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) 337 331 ifeq ($(VMLINUX_H),) 338 332 $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@ 339 333 else 340 334 $(Q)cp "$(VMLINUX_H)" $@ 335 + endif 336 + 337 + ifeq ($(VMLINUX_BTF),) 338 + $(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)",\ 339 + build the kernel or set VMLINUX_BTF variable) 341 340 endif 342 341 343 342 clean-files += vmlinux.h
+1 -1
samples/bpf/bpf_insn.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 1 + /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ 2 2 /* eBPF instruction mini library */ 3 3 #ifndef __BPF_INSN_H 4 4 #define __BPF_INSN_H
-5
samples/bpf/xdp_redirect_map_multi.bpf.c
··· 5 5 #include "xdp_sample.bpf.h" 6 6 #include "xdp_sample_shared.h" 7 7 8 - enum { 9 - BPF_F_BROADCAST = (1ULL << 3), 10 - BPF_F_EXCLUDE_INGRESS = (1ULL << 4), 11 - }; 12 - 13 8 struct { 14 9 __uint(type, BPF_MAP_TYPE_DEVMAP_HASH); 15 10 __uint(key_size, sizeof(int));
+2 -1
scripts/Makefile.kasan
··· 33 33 CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \ 34 34 $(call cc-param,asan-globals=1) \ 35 35 $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \ 36 - $(call cc-param,asan-stack=$(stack_enable)) \ 37 36 $(call cc-param,asan-instrument-allocas=1) 38 37 endif 38 + 39 + CFLAGS_KASAN += $(call cc-param,asan-stack=$(stack_enable)) 39 40 40 41 endif # CONFIG_KASAN_GENERIC 41 42
+4
scripts/sorttable.c
··· 54 54 #define EM_ARCV2 195 55 55 #endif 56 56 57 + #ifndef EM_RISCV 58 + #define EM_RISCV 243 59 + #endif 60 + 57 61 static uint32_t (*r)(const uint32_t *); 58 62 static uint16_t (*r2)(const uint16_t *); 59 63 static uint64_t (*r8)(const uint64_t *);
+2 -2
security/selinux/hooks.c
··· 2157 2157 static int selinux_ptrace_traceme(struct task_struct *parent) 2158 2158 { 2159 2159 return avc_has_perm(&selinux_state, 2160 - task_sid_subj(parent), task_sid_obj(current), 2160 + task_sid_obj(parent), task_sid_obj(current), 2161 2161 SECCLASS_PROCESS, PROCESS__PTRACE, NULL); 2162 2162 } 2163 2163 ··· 6222 6222 struct ipc_security_struct *isec; 6223 6223 struct msg_security_struct *msec; 6224 6224 struct common_audit_data ad; 6225 - u32 sid = task_sid_subj(target); 6225 + u32 sid = task_sid_obj(target); 6226 6226 int rc; 6227 6227 6228 6228 isec = selinux_ipc(msq);
+3 -1
security/selinux/nlmsgtab.c
··· 126 126 { XFRM_MSG_NEWSPDINFO, NETLINK_XFRM_SOCKET__NLMSG_WRITE }, 127 127 { XFRM_MSG_GETSPDINFO, NETLINK_XFRM_SOCKET__NLMSG_READ }, 128 128 { XFRM_MSG_MAPPING, NETLINK_XFRM_SOCKET__NLMSG_READ }, 129 + { XFRM_MSG_SETDEFAULT, NETLINK_XFRM_SOCKET__NLMSG_WRITE }, 130 + { XFRM_MSG_GETDEFAULT, NETLINK_XFRM_SOCKET__NLMSG_READ }, 129 131 }; 130 132 131 133 static const struct nlmsg_perm nlmsg_audit_perms[] = ··· 191 189 * structures at the top of this file with the new mappings 192 190 * before updating the BUILD_BUG_ON() macro! 193 191 */ 194 - BUILD_BUG_ON(XFRM_MSG_MAX != XFRM_MSG_MAPPING); 192 + BUILD_BUG_ON(XFRM_MSG_MAX != XFRM_MSG_GETDEFAULT); 195 193 err = nlmsg_perm(nlmsg_type, perm, nlmsg_xfrm_perms, 196 194 sizeof(nlmsg_xfrm_perms)); 197 195 break;
+2 -2
security/smack/smack_lsm.c
··· 2016 2016 const char *caller) 2017 2017 { 2018 2018 struct smk_audit_info ad; 2019 - struct smack_known *skp = smk_of_task_struct_subj(p); 2019 + struct smack_known *skp = smk_of_task_struct_obj(p); 2020 2020 int rc; 2021 2021 2022 2022 smk_ad_init(&ad, caller, LSM_AUDIT_DATA_TASK); ··· 3480 3480 */ 3481 3481 static int smack_getprocattr(struct task_struct *p, char *name, char **value) 3482 3482 { 3483 - struct smack_known *skp = smk_of_task_struct_subj(p); 3483 + struct smack_known *skp = smk_of_task_struct_obj(p); 3484 3484 char *cp; 3485 3485 int slen; 3486 3486
+9
sound/core/rawmidi.c
··· 873 873 return -EINVAL; 874 874 } 875 875 } 876 + case SNDRV_RAWMIDI_IOCTL_USER_PVERSION: 877 + if (get_user(rfile->user_pversion, (unsigned int __user *)arg)) 878 + return -EFAULT; 879 + return 0; 880 + 876 881 case SNDRV_RAWMIDI_IOCTL_PARAMS: 877 882 { 878 883 struct snd_rawmidi_params params; 879 884 880 885 if (copy_from_user(&params, argp, sizeof(struct snd_rawmidi_params))) 881 886 return -EFAULT; 887 + if (rfile->user_pversion < SNDRV_PROTOCOL_VERSION(2, 0, 2)) { 888 + params.mode = 0; 889 + memset(params.reserved, 0, sizeof(params.reserved)); 890 + } 882 891 switch (params.stream) { 883 892 case SNDRV_RAWMIDI_STREAM_OUTPUT: 884 893 if (rfile->output == NULL)
+1 -1
sound/drivers/pcsp/pcsp_lib.c
··· 143 143 if (pointer_update) 144 144 pcsp_pointer_update(chip); 145 145 146 - hrtimer_forward(handle, hrtimer_get_expires(handle), ns_to_ktime(ns)); 146 + hrtimer_forward_now(handle, ns_to_ktime(ns)); 147 147 148 148 return HRTIMER_RESTART; 149 149 }
+4 -3
sound/firewire/motu/amdtp-motu.c
··· 276 276 277 277 /* This is just for v2/v3 protocol. */ 278 278 for (i = 0; i < data_blocks; ++i) { 279 - *frames = (be32_to_cpu(buffer[1]) << 16) | 280 - (be32_to_cpu(buffer[2]) >> 16); 279 + *frames = be32_to_cpu(buffer[1]); 280 + *frames <<= 16; 281 + *frames |= be32_to_cpu(buffer[2]) >> 16; 282 + ++frames; 281 283 buffer += data_block_quadlets; 282 - frames++; 283 284 } 284 285 } 285 286
+8 -5
sound/firewire/oxfw/oxfw.c
··· 184 184 model = val; 185 185 } 186 186 187 - /* 188 - * Mackie Onyx Satellite with base station has a quirk to report a wrong 189 - * value in 'dbs' field of CIP header against its format information. 190 - */ 191 - if (vendor == VENDOR_LOUD && model == MODEL_SATELLITE) 187 + if (vendor == VENDOR_LOUD) { 188 + // Mackie Onyx Satellite with base station has a quirk to report a wrong 189 + // value in 'dbs' field of CIP header against its format information. 192 190 oxfw->quirks |= SND_OXFW_QUIRK_WRONG_DBS; 191 + 192 + // OXFW971-based models may transfer events by blocking method. 193 + if (!(oxfw->quirks & SND_OXFW_QUIRK_JUMBO_PAYLOAD)) 194 + oxfw->quirks |= SND_OXFW_QUIRK_BLOCKING_TRANSMISSION; 195 + } 193 196 194 197 return 0; 195 198 }
+9 -3
sound/pci/hda/hda_intel.c
··· 883 883 return azx_get_pos_posbuf(chip, azx_dev); 884 884 } 885 885 886 - static void azx_shutdown_chip(struct azx *chip) 886 + static void __azx_shutdown_chip(struct azx *chip, bool skip_link_reset) 887 887 { 888 888 azx_stop_chip(chip); 889 - azx_enter_link_reset(chip); 889 + if (!skip_link_reset) 890 + azx_enter_link_reset(chip); 890 891 azx_clear_irq_pending(chip); 891 892 display_power(chip, false); 892 893 } ··· 895 894 #ifdef CONFIG_PM 896 895 static DEFINE_MUTEX(card_list_lock); 897 896 static LIST_HEAD(card_list); 897 + 898 + static void azx_shutdown_chip(struct azx *chip) 899 + { 900 + __azx_shutdown_chip(chip, false); 901 + } 898 902 899 903 static void azx_add_card_list(struct azx *chip) 900 904 { ··· 2363 2357 return; 2364 2358 chip = card->private_data; 2365 2359 if (chip && chip->running) 2366 - azx_shutdown_chip(chip); 2360 + __azx_shutdown_chip(chip, true); 2367 2361 } 2368 2362 2369 2363 /* PCI IDs */
+3
sound/pci/hda/patch_cs8409.c
··· 1207 1207 snd_hda_jack_add_kctl(codec, DOLPHIN_LO_PIN_NID, "Line Out", true, 1208 1208 SND_JACK_HEADPHONE, NULL); 1209 1209 1210 + snd_hda_jack_add_kctl(codec, DOLPHIN_AMIC_PIN_NID, "Microphone", true, 1211 + SND_JACK_MICROPHONE, NULL); 1212 + 1210 1213 cs8409_fix_caps(codec, DOLPHIN_HP_PIN_NID); 1211 1214 cs8409_fix_caps(codec, DOLPHIN_LO_PIN_NID); 1212 1215 cs8409_fix_caps(codec, DOLPHIN_AMIC_PIN_NID);
+129
sound/pci/hda/patch_realtek.c
··· 6429 6429 hda_fixup_thinkpad_acpi(codec, fix, action); 6430 6430 } 6431 6431 6432 + /* Fixup for Lenovo Legion 15IMHg05 speaker output on headset removal. */ 6433 + static void alc287_fixup_legion_15imhg05_speakers(struct hda_codec *codec, 6434 + const struct hda_fixup *fix, 6435 + int action) 6436 + { 6437 + struct alc_spec *spec = codec->spec; 6438 + 6439 + switch (action) { 6440 + case HDA_FIXUP_ACT_PRE_PROBE: 6441 + spec->gen.suppress_auto_mute = 1; 6442 + break; 6443 + } 6444 + } 6445 + 6432 6446 /* for alc295_fixup_hp_top_speakers */ 6433 6447 #include "hp_x360_helper.c" 6434 6448 ··· 6660 6646 ALC623_FIXUP_LENOVO_THINKSTATION_P340, 6661 6647 ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, 6662 6648 ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST, 6649 + ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS, 6650 + ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE, 6651 + ALC287_FIXUP_YOGA7_14ITL_SPEAKERS, 6652 + ALC287_FIXUP_13S_GEN2_SPEAKERS 6663 6653 }; 6664 6654 6665 6655 static const struct hda_fixup alc269_fixups[] = { ··· 8254 8236 .chained = true, 8255 8237 .chain_id = ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF, 8256 8238 }, 8239 + [ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS] = { 8240 + .type = HDA_FIXUP_VERBS, 8241 + //.v.verbs = legion_15imhg05_coefs, 8242 + .v.verbs = (const struct hda_verb[]) { 8243 + // set left speaker Legion 7i. 8244 + { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 }, 8245 + { 0x20, AC_VERB_SET_PROC_COEF, 0x41 }, 8246 + 8247 + { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, 8248 + { 0x20, AC_VERB_SET_PROC_COEF, 0xc }, 8249 + { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, 8250 + { 0x20, AC_VERB_SET_PROC_COEF, 0x1a }, 8251 + { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, 8252 + 8253 + { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, 8254 + { 0x20, AC_VERB_SET_PROC_COEF, 0x2 }, 8255 + { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, 8256 + { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, 8257 + { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, 8258 + 8259 + // set right speaker Legion 7i. 8260 + { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 }, 8261 + { 0x20, AC_VERB_SET_PROC_COEF, 0x42 }, 8262 + 8263 + { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, 8264 + { 0x20, AC_VERB_SET_PROC_COEF, 0xc }, 8265 + { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, 8266 + { 0x20, AC_VERB_SET_PROC_COEF, 0x2a }, 8267 + { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, 8268 + 8269 + { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, 8270 + { 0x20, AC_VERB_SET_PROC_COEF, 0x2 }, 8271 + { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, 8272 + { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, 8273 + { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, 8274 + {} 8275 + }, 8276 + .chained = true, 8277 + .chain_id = ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE, 8278 + }, 8279 + [ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE] = { 8280 + .type = HDA_FIXUP_FUNC, 8281 + .v.func = alc287_fixup_legion_15imhg05_speakers, 8282 + .chained = true, 8283 + .chain_id = ALC269_FIXUP_HEADSET_MODE, 8284 + }, 8285 + [ALC287_FIXUP_YOGA7_14ITL_SPEAKERS] = { 8286 + .type = HDA_FIXUP_VERBS, 8287 + .v.verbs = (const struct hda_verb[]) { 8288 + // set left speaker Yoga 7i. 8289 + { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 }, 8290 + { 0x20, AC_VERB_SET_PROC_COEF, 0x41 }, 8291 + 8292 + { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, 8293 + { 0x20, AC_VERB_SET_PROC_COEF, 0xc }, 8294 + { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, 8295 + { 0x20, AC_VERB_SET_PROC_COEF, 0x1a }, 8296 + { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, 8297 + 8298 + { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, 8299 + { 0x20, AC_VERB_SET_PROC_COEF, 0x2 }, 8300 + { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, 8301 + { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, 8302 + { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, 8303 + 8304 + // set right speaker Yoga 7i. 8305 + { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 }, 8306 + { 0x20, AC_VERB_SET_PROC_COEF, 0x46 }, 8307 + 8308 + { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, 8309 + { 0x20, AC_VERB_SET_PROC_COEF, 0xc }, 8310 + { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, 8311 + { 0x20, AC_VERB_SET_PROC_COEF, 0x2a }, 8312 + { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, 8313 + 8314 + { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, 8315 + { 0x20, AC_VERB_SET_PROC_COEF, 0x2 }, 8316 + { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, 8317 + { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, 8318 + { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, 8319 + {} 8320 + }, 8321 + .chained = true, 8322 + .chain_id = ALC269_FIXUP_HEADSET_MODE, 8323 + }, 8324 + [ALC287_FIXUP_13S_GEN2_SPEAKERS] = { 8325 + .type = HDA_FIXUP_VERBS, 8326 + .v.verbs = (const struct hda_verb[]) { 8327 + { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 }, 8328 + { 0x20, AC_VERB_SET_PROC_COEF, 0x41 }, 8329 + { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, 8330 + { 0x20, AC_VERB_SET_PROC_COEF, 0x2 }, 8331 + { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, 8332 + { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, 8333 + { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, 8334 + { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 }, 8335 + { 0x20, AC_VERB_SET_PROC_COEF, 0x42 }, 8336 + { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 }, 8337 + { 0x20, AC_VERB_SET_PROC_COEF, 0x2 }, 8338 + { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, 8339 + { 0x20, AC_VERB_SET_PROC_COEF, 0x0 }, 8340 + { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 }, 8341 + {} 8342 + }, 8343 + .chained = true, 8344 + .chain_id = ALC269_FIXUP_HEADSET_MODE, 8345 + }, 8257 8346 }; 8258 8347 8259 8348 static const struct snd_pci_quirk alc269_fixup_tbl[] = { ··· 8755 8630 SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME), 8756 8631 SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF), 8757 8632 SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP), 8633 + SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS), 8634 + SND_PCI_QUIRK(0x17aa, 0x3852, "Lenovo Yoga 7 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), 8635 + SND_PCI_QUIRK(0x17aa, 0x3853, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), 8636 + SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS), 8758 8637 SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), 8759 8638 SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), 8760 8639 SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+1 -1
sound/pci/pcxhr/pcxhr_core.c
··· 52 52 #define PCXHR_DSP 2 53 53 54 54 #if (PCXHR_DSP_OFFSET_MAX > PCXHR_PLX_OFFSET_MIN) 55 - #undef PCXHR_REG_TO_PORT(x) 55 + #error PCXHR_REG_TO_PORT(x) 56 56 #else 57 57 #define PCXHR_REG_TO_PORT(x) ((x)>PCXHR_DSP_OFFSET_MAX ? PCXHR_PLX : PCXHR_DSP) 58 58 #endif
+10 -6
sound/soc/fsl/fsl_esai.c
··· 1073 1073 if (ret < 0) 1074 1074 goto err_pm_get_sync; 1075 1075 1076 + /* 1077 + * Register platform component before registering cpu dai for there 1078 + * is not defer probe for platform component in snd_soc_add_pcm_runtime(). 1079 + */ 1080 + ret = imx_pcm_dma_init(pdev, IMX_ESAI_DMABUF_SIZE); 1081 + if (ret) { 1082 + dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret); 1083 + goto err_pm_get_sync; 1084 + } 1085 + 1076 1086 ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component, 1077 1087 &fsl_esai_dai, 1); 1078 1088 if (ret) { ··· 1091 1081 } 1092 1082 1093 1083 INIT_WORK(&esai_priv->work, fsl_esai_hw_reset); 1094 - 1095 - ret = imx_pcm_dma_init(pdev, IMX_ESAI_DMABUF_SIZE); 1096 - if (ret) { 1097 - dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret); 1098 - goto err_pm_get_sync; 1099 - } 1100 1084 1101 1085 return ret; 1102 1086
+10 -5
sound/soc/fsl/fsl_micfil.c
··· 737 737 pm_runtime_enable(&pdev->dev); 738 738 regcache_cache_only(micfil->regmap, true); 739 739 740 + /* 741 + * Register platform component before registering cpu dai for there 742 + * is not defer probe for platform component in snd_soc_add_pcm_runtime(). 743 + */ 744 + ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0); 745 + if (ret) { 746 + dev_err(&pdev->dev, "failed to pcm register\n"); 747 + return ret; 748 + } 749 + 740 750 ret = devm_snd_soc_register_component(&pdev->dev, &fsl_micfil_component, 741 751 &fsl_micfil_dai, 1); 742 752 if (ret) { 743 753 dev_err(&pdev->dev, "failed to register component %s\n", 744 754 fsl_micfil_component.name); 745 - return ret; 746 755 } 747 - 748 - ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0); 749 - if (ret) 750 - dev_err(&pdev->dev, "failed to pcm register\n"); 751 756 752 757 return ret; 753 758 }
+9 -5
sound/soc/fsl/fsl_sai.c
··· 1152 1152 if (ret < 0) 1153 1153 goto err_pm_get_sync; 1154 1154 1155 - ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component, 1156 - &sai->cpu_dai_drv, 1); 1157 - if (ret) 1158 - goto err_pm_get_sync; 1159 - 1155 + /* 1156 + * Register platform component before registering cpu dai for there 1157 + * is not defer probe for platform component in snd_soc_add_pcm_runtime(). 1158 + */ 1160 1159 if (sai->soc_data->use_imx_pcm) { 1161 1160 ret = imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE); 1162 1161 if (ret) ··· 1165 1166 if (ret) 1166 1167 goto err_pm_get_sync; 1167 1168 } 1169 + 1170 + ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component, 1171 + &sai->cpu_dai_drv, 1); 1172 + if (ret) 1173 + goto err_pm_get_sync; 1168 1174 1169 1175 return ret; 1170 1176
+10 -6
sound/soc/fsl/fsl_spdif.c
··· 1434 1434 pm_runtime_enable(&pdev->dev); 1435 1435 regcache_cache_only(spdif_priv->regmap, true); 1436 1436 1437 + /* 1438 + * Register platform component before registering cpu dai for there 1439 + * is not defer probe for platform component in snd_soc_add_pcm_runtime(). 1440 + */ 1441 + ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE); 1442 + if (ret) { 1443 + dev_err_probe(&pdev->dev, ret, "imx_pcm_dma_init failed\n"); 1444 + goto err_pm_disable; 1445 + } 1446 + 1437 1447 ret = devm_snd_soc_register_component(&pdev->dev, &fsl_spdif_component, 1438 1448 &spdif_priv->cpu_dai_drv, 1); 1439 1449 if (ret) { 1440 1450 dev_err(&pdev->dev, "failed to register DAI: %d\n", ret); 1441 - goto err_pm_disable; 1442 - } 1443 - 1444 - ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE); 1445 - if (ret) { 1446 - dev_err_probe(&pdev->dev, ret, "imx_pcm_dma_init failed\n"); 1447 1451 goto err_pm_disable; 1448 1452 } 1449 1453
+10 -5
sound/soc/fsl/fsl_xcvr.c
··· 1215 1215 pm_runtime_enable(dev); 1216 1216 regcache_cache_only(xcvr->regmap, true); 1217 1217 1218 + /* 1219 + * Register platform component before registering cpu dai for there 1220 + * is not defer probe for platform component in snd_soc_add_pcm_runtime(). 1221 + */ 1222 + ret = devm_snd_dmaengine_pcm_register(dev, NULL, 0); 1223 + if (ret) { 1224 + dev_err(dev, "failed to pcm register\n"); 1225 + return ret; 1226 + } 1227 + 1218 1228 ret = devm_snd_soc_register_component(dev, &fsl_xcvr_comp, 1219 1229 &fsl_xcvr_dai, 1); 1220 1230 if (ret) { 1221 1231 dev_err(dev, "failed to register component %s\n", 1222 1232 fsl_xcvr_comp.name); 1223 - return ret; 1224 1233 } 1225 - 1226 - ret = devm_snd_dmaengine_pcm_register(dev, NULL, 0); 1227 - if (ret) 1228 - dev_err(dev, "failed to pcm register\n"); 1229 1234 1230 1235 return ret; 1231 1236 }
+5
sound/soc/intel/boards/sof_sdw.c
··· 929 929 cpus + *cpu_id, cpu_dai_num, 930 930 codecs, codec_num, 931 931 NULL, &sdw_ops); 932 + /* 933 + * SoundWire DAILINKs use 'stream' functions and Bank Switch operations 934 + * based on wait_for_completion(), tag them as 'nonatomic'. 935 + */ 936 + dai_links[*be_index].nonatomic = true; 932 937 933 938 ret = set_codec_init_func(card, link, dai_links + (*be_index)++, 934 939 playback, group_id);
+3
sound/soc/mediatek/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 config SND_SOC_MEDIATEK 3 3 tristate 4 + select REGMAP_MMIO 4 5 5 6 config SND_SOC_MT2701 6 7 tristate "ASoC support for Mediatek MT2701 chip" ··· 189 188 config SND_SOC_MT8195 190 189 tristate "ASoC support for Mediatek MT8195 chip" 191 190 depends on ARCH_MEDIATEK || COMPILE_TEST 191 + depends on COMMON_CLK 192 192 select SND_SOC_MEDIATEK 193 + select MFD_SYSCON if SND_SOC_MT6359 193 194 help 194 195 This adds ASoC platform driver support for Mediatek MT8195 chip 195 196 that can be used with other codecs.
+11 -8
sound/soc/mediatek/common/mtk-afe-fe-dai.c
··· 334 334 devm_kcalloc(dev, afe->reg_back_up_list_num, 335 335 sizeof(unsigned int), GFP_KERNEL); 336 336 337 - for (i = 0; i < afe->reg_back_up_list_num; i++) 338 - regmap_read(regmap, afe->reg_back_up_list[i], 339 - &afe->reg_back_up[i]); 337 + if (afe->reg_back_up) { 338 + for (i = 0; i < afe->reg_back_up_list_num; i++) 339 + regmap_read(regmap, afe->reg_back_up_list[i], 340 + &afe->reg_back_up[i]); 341 + } 340 342 341 343 afe->suspended = true; 342 344 afe->runtime_suspend(dev); ··· 358 356 359 357 afe->runtime_resume(dev); 360 358 361 - if (!afe->reg_back_up) 359 + if (!afe->reg_back_up) { 362 360 dev_dbg(dev, "%s no reg_backup\n", __func__); 363 - 364 - for (i = 0; i < afe->reg_back_up_list_num; i++) 365 - mtk_regmap_write(regmap, afe->reg_back_up_list[i], 366 - afe->reg_back_up[i]); 361 + } else { 362 + for (i = 0; i < afe->reg_back_up_list_num; i++) 363 + mtk_regmap_write(regmap, afe->reg_back_up_list[i], 364 + afe->reg_back_up[i]); 365 + } 367 366 368 367 afe->suspended = false; 369 368 return 0;
+3 -4
sound/soc/mediatek/mt8195/mt8195-mt6359-rt1019-rt5682.c
··· 424 424 return snd_soc_component_set_jack(cmpnt_codec, &priv->hdmi_jack, NULL); 425 425 } 426 426 427 - static int mt8195_hdmitx_dptx_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, 428 - struct snd_pcm_hw_params *params) 427 + static int mt8195_dptx_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, 428 + struct snd_pcm_hw_params *params) 429 429 430 430 { 431 431 /* fix BE i2s format to 32bit, clean param mask first */ ··· 902 902 .no_pcm = 1, 903 903 .dpcm_playback = 1, 904 904 .ops = &mt8195_dptx_ops, 905 - .be_hw_params_fixup = mt8195_hdmitx_dptx_hw_params_fixup, 905 + .be_hw_params_fixup = mt8195_dptx_hw_params_fixup, 906 906 SND_SOC_DAILINK_REG(DPTX_BE), 907 907 }, 908 908 [DAI_LINK_ETDM1_IN_BE] = { ··· 953 953 SND_SOC_DAIFMT_NB_NF | 954 954 SND_SOC_DAIFMT_CBS_CFS, 955 955 .dpcm_playback = 1, 956 - .be_hw_params_fixup = mt8195_hdmitx_dptx_hw_params_fixup, 957 956 SND_SOC_DAILINK_REG(ETDM3_OUT_BE), 958 957 }, 959 958 [DAI_LINK_PCM1_BE] = {
+1 -3
sound/soc/sof/core.c
··· 371 371 dev_warn(dev, "error: %d failed to prepare DSP for device removal", 372 372 ret); 373 373 374 - snd_sof_fw_unload(sdev); 375 374 snd_sof_ipc_free(sdev); 376 375 snd_sof_free_debug(sdev); 377 376 snd_sof_free_trace(sdev); ··· 393 394 snd_sof_remove(sdev); 394 395 395 396 /* release firmware */ 396 - release_firmware(pdata->fw); 397 - pdata->fw = NULL; 397 + snd_sof_fw_unload(sdev); 398 398 399 399 return 0; 400 400 }
+8 -1
sound/soc/sof/imx/imx8.c
··· 365 365 /* on i.MX8 there is 1 to 1 match between type and BAR idx */ 366 366 static int imx8_get_bar_index(struct snd_sof_dev *sdev, u32 type) 367 367 { 368 - return type; 368 + /* Only IRAM and SRAM bars are valid */ 369 + switch (type) { 370 + case SOF_FW_BLK_TYPE_IRAM: 371 + case SOF_FW_BLK_TYPE_SRAM: 372 + return type; 373 + default: 374 + return -EINVAL; 375 + } 369 376 } 370 377 371 378 static void imx8_ipc_msg_data(struct snd_sof_dev *sdev,
+8 -1
sound/soc/sof/imx/imx8m.c
··· 228 228 /* on i.MX8 there is 1 to 1 match between type and BAR idx */ 229 229 static int imx8m_get_bar_index(struct snd_sof_dev *sdev, u32 type) 230 230 { 231 - return type; 231 + /* Only IRAM and SRAM bars are valid */ 232 + switch (type) { 233 + case SOF_FW_BLK_TYPE_IRAM: 234 + case SOF_FW_BLK_TYPE_SRAM: 235 + return type; 236 + default: 237 + return -EINVAL; 238 + } 232 239 } 233 240 234 241 static void imx8m_ipc_msg_data(struct snd_sof_dev *sdev,
+5 -3
sound/soc/sof/loader.c
··· 729 729 ret = request_firmware(&plat_data->fw, fw_filename, sdev->dev); 730 730 731 731 if (ret < 0) { 732 - dev_err(sdev->dev, "error: request firmware %s failed err: %d\n", 733 - fw_filename, ret); 734 732 dev_err(sdev->dev, 735 - "you may need to download the firmware from https://github.com/thesofproject/sof-bin/\n"); 733 + "error: sof firmware file is missing, you might need to\n"); 734 + dev_err(sdev->dev, 735 + " download it from https://github.com/thesofproject/sof-bin/\n"); 736 736 goto err; 737 737 } else { 738 738 dev_dbg(sdev->dev, "request_firmware %s successful\n", ··· 880 880 void snd_sof_fw_unload(struct snd_sof_dev *sdev) 881 881 { 882 882 /* TODO: support module unloading at runtime */ 883 + release_firmware(sdev->pdata->fw); 884 + sdev->pdata->fw = NULL; 883 885 } 884 886 EXPORT_SYMBOL(snd_sof_fw_unload);
-1
sound/soc/sof/trace.c
··· 530 530 return; 531 531 532 532 if (sdev->dtrace_is_enabled) { 533 - dev_err(sdev->dev, "error: waking up any trace sleepers\n"); 534 533 sdev->dtrace_error = true; 535 534 wake_up(&sdev->trace_sleep); 536 535 }
+2 -2
sound/soc/sof/xtensa/core.c
··· 122 122 * 0x0049fbb0: 8000f2d0 0049fc00 6f6c6c61 00632e63 123 123 */ 124 124 for (i = 0; i < stack_words; i += 4) { 125 - hex_dump_to_buffer(stack + i * 4, 16, 16, 4, 125 + hex_dump_to_buffer(stack + i, 16, 16, 4, 126 126 buf, sizeof(buf), false); 127 - dev_err(sdev->dev, "0x%08x: %s\n", stack_ptr + i, buf); 127 + dev_err(sdev->dev, "0x%08x: %s\n", stack_ptr + i * 4, buf); 128 128 } 129 129 } 130 130
+4 -14
sound/usb/card.c
··· 1054 1054 return 0; 1055 1055 } 1056 1056 1057 - static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume) 1057 + static int usb_audio_resume(struct usb_interface *intf) 1058 1058 { 1059 1059 struct snd_usb_audio *chip = usb_get_intfdata(intf); 1060 1060 struct snd_usb_stream *as; ··· 1080 1080 * we just notify and restart the mixers 1081 1081 */ 1082 1082 list_for_each_entry(mixer, &chip->mixer_list, list) { 1083 - err = snd_usb_mixer_resume(mixer, reset_resume); 1083 + err = snd_usb_mixer_resume(mixer); 1084 1084 if (err < 0) 1085 1085 goto err_out; 1086 1086 } ··· 1100 1100 atomic_dec(&chip->active); /* allow autopm after this point */ 1101 1101 return err; 1102 1102 } 1103 - 1104 - static int usb_audio_resume(struct usb_interface *intf) 1105 - { 1106 - return __usb_audio_resume(intf, false); 1107 - } 1108 - 1109 - static int usb_audio_reset_resume(struct usb_interface *intf) 1110 - { 1111 - return __usb_audio_resume(intf, true); 1112 - } 1113 1103 #else 1114 1104 #define usb_audio_suspend NULL 1115 1105 #define usb_audio_resume NULL 1116 - #define usb_audio_reset_resume NULL 1106 + #define usb_audio_resume NULL 1117 1107 #endif /* CONFIG_PM */ 1118 1108 1119 1109 static const struct usb_device_id usb_audio_ids [] = { ··· 1125 1135 .disconnect = usb_audio_disconnect, 1126 1136 .suspend = usb_audio_suspend, 1127 1137 .resume = usb_audio_resume, 1128 - .reset_resume = usb_audio_reset_resume, 1138 + .reset_resume = usb_audio_resume, 1129 1139 .id_table = usb_audio_ids, 1130 1140 .supports_autosuspend = 1, 1131 1141 };
+4 -22
sound/usb/mixer.c
··· 3653 3653 return 0; 3654 3654 } 3655 3655 3656 - static int default_mixer_reset_resume(struct usb_mixer_elem_list *list) 3657 - { 3658 - int err; 3659 - 3660 - if (list->resume) { 3661 - err = list->resume(list); 3662 - if (err < 0) 3663 - return err; 3664 - } 3665 - return restore_mixer_value(list); 3666 - } 3667 - 3668 - int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume) 3656 + int snd_usb_mixer_resume(struct usb_mixer_interface *mixer) 3669 3657 { 3670 3658 struct usb_mixer_elem_list *list; 3671 - usb_mixer_elem_resume_func_t f; 3672 3659 int id, err; 3673 3660 3674 3661 /* restore cached mixer values */ 3675 3662 for (id = 0; id < MAX_ID_ELEMS; id++) { 3676 3663 for_each_mixer_elem(list, mixer, id) { 3677 - if (reset_resume) 3678 - f = list->reset_resume; 3679 - else 3680 - f = list->resume; 3681 - if (f) { 3682 - err = f(list); 3664 + if (list->resume) { 3665 + err = list->resume(list); 3683 3666 if (err < 0) 3684 3667 return err; 3685 3668 } ··· 3683 3700 list->id = unitid; 3684 3701 list->dump = snd_usb_mixer_dump_cval; 3685 3702 #ifdef CONFIG_PM 3686 - list->resume = NULL; 3687 - list->reset_resume = default_mixer_reset_resume; 3703 + list->resume = restore_mixer_value; 3688 3704 #endif 3689 3705 }
+1 -2
sound/usb/mixer.h
··· 70 70 bool is_std_info; 71 71 usb_mixer_elem_dump_func_t dump; 72 72 usb_mixer_elem_resume_func_t resume; 73 - usb_mixer_elem_resume_func_t reset_resume; 74 73 }; 75 74 76 75 /* iterate over mixer element list of the given unit id */ ··· 120 121 121 122 #ifdef CONFIG_PM 122 123 int snd_usb_mixer_suspend(struct usb_mixer_interface *mixer); 123 - int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume); 124 + int snd_usb_mixer_resume(struct usb_mixer_interface *mixer); 124 125 #endif 125 126 126 127 int snd_usb_set_cur_mix_value(struct usb_mixer_elem_info *cval, int channel,
+1 -1
sound/usb/mixer_quirks.c
··· 151 151 *listp = list; 152 152 list->mixer = mixer; 153 153 list->id = id; 154 - list->reset_resume = resume; 154 + list->resume = resume; 155 155 kctl = snd_ctl_new1(knew, list); 156 156 if (!kctl) { 157 157 kfree(list);
tools/arch/x86/include/asm/unistd_32.h tools/arch/x86/include/uapi/asm/unistd_32.h
-3
tools/arch/x86/include/asm/unistd_64.h tools/arch/x86/include/uapi/asm/unistd_64.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef __NR_userfaultfd 3 - #define __NR_userfaultfd 282 4 - #endif 5 2 #ifndef __NR_perf_event_open 6 3 # define __NR_perf_event_open 298 7 4 #endif
+2 -2
tools/arch/x86/lib/insn.c
··· 37 37 ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr) 38 38 39 39 #define __get_next(t, insn) \ 40 - ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); leXX_to_cpu(t, r); }) 40 + ({ t r; memcpy(&r, insn->next_byte, sizeof(t)); insn->next_byte += sizeof(t); leXX_to_cpu(t, r); }) 41 41 42 42 #define __peek_nbyte_next(t, insn, n) \ 43 - ({ t r = *(t*)((insn)->next_byte + n); leXX_to_cpu(t, r); }) 43 + ({ t r; memcpy(&r, (insn)->next_byte + n, sizeof(t)); leXX_to_cpu(t, r); }) 44 44 45 45 #define get_next(t, insn) \ 46 46 ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
+1
tools/include/uapi/sound/asound.h
··· 784 784 785 785 #define SNDRV_RAWMIDI_IOCTL_PVERSION _IOR('W', 0x00, int) 786 786 #define SNDRV_RAWMIDI_IOCTL_INFO _IOR('W', 0x01, struct snd_rawmidi_info) 787 + #define SNDRV_RAWMIDI_IOCTL_USER_PVERSION _IOW('W', 0x02, int) 787 788 #define SNDRV_RAWMIDI_IOCTL_PARAMS _IOWR('W', 0x10, struct snd_rawmidi_params) 788 789 #define SNDRV_RAWMIDI_IOCTL_STATUS _IOWR('W', 0x20, struct snd_rawmidi_status) 789 790 #define SNDRV_RAWMIDI_IOCTL_DROP _IOW('W', 0x30, int)
+2 -1
tools/lib/bpf/libbpf.c
··· 6894 6894 6895 6895 if (obj->gen_loader) { 6896 6896 /* reset FDs */ 6897 - btf__set_fd(obj->btf, -1); 6897 + if (obj->btf) 6898 + btf__set_fd(obj->btf, -1); 6898 6899 for (i = 0; i < obj->nr_maps; i++) 6899 6900 obj->maps[i].fd = -1; 6900 6901 if (!err)
+7 -1
tools/lib/bpf/linker.c
··· 1649 1649 static int find_glob_sym_btf(struct src_obj *obj, Elf64_Sym *sym, const char *sym_name, 1650 1650 int *out_btf_sec_id, int *out_btf_id) 1651 1651 { 1652 - int i, j, n = btf__get_nr_types(obj->btf), m, btf_id = 0; 1652 + int i, j, n, m, btf_id = 0; 1653 1653 const struct btf_type *t; 1654 1654 const struct btf_var_secinfo *vi; 1655 1655 const char *name; 1656 1656 1657 + if (!obj->btf) { 1658 + pr_warn("failed to find BTF info for object '%s'\n", obj->filename); 1659 + return -EINVAL; 1660 + } 1661 + 1662 + n = btf__get_nr_types(obj->btf); 1657 1663 for (i = 1; i <= n; i++) { 1658 1664 t = btf__type_by_id(obj->btf, i); 1659 1665
+1
tools/lib/bpf/strset.c
··· 88 88 89 89 hashmap__free(set->strs_hash); 90 90 free(set->strs_data); 91 + free(set); 91 92 } 92 93 93 94 size_t strset__data_size(const struct strset *set)
+30 -8
tools/objtool/special.c
··· 58 58 { 59 59 } 60 60 61 + static bool reloc2sec_off(struct reloc *reloc, struct section **sec, unsigned long *off) 62 + { 63 + switch (reloc->sym->type) { 64 + case STT_FUNC: 65 + *sec = reloc->sym->sec; 66 + *off = reloc->sym->offset + reloc->addend; 67 + return true; 68 + 69 + case STT_SECTION: 70 + *sec = reloc->sym->sec; 71 + *off = reloc->addend; 72 + return true; 73 + 74 + default: 75 + return false; 76 + } 77 + } 78 + 61 79 static int get_alt_entry(struct elf *elf, struct special_entry *entry, 62 80 struct section *sec, int idx, 63 81 struct special_alt *alt) ··· 109 91 WARN_FUNC("can't find orig reloc", sec, offset + entry->orig); 110 92 return -1; 111 93 } 112 - if (orig_reloc->sym->type != STT_SECTION) { 113 - WARN_FUNC("don't know how to handle non-section reloc symbol %s", 114 - sec, offset + entry->orig, orig_reloc->sym->name); 94 + if (!reloc2sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off)) { 95 + WARN_FUNC("don't know how to handle reloc symbol type %d: %s", 96 + sec, offset + entry->orig, 97 + orig_reloc->sym->type, 98 + orig_reloc->sym->name); 115 99 return -1; 116 100 } 117 - 118 - alt->orig_sec = orig_reloc->sym->sec; 119 - alt->orig_off = orig_reloc->addend; 120 101 121 102 if (!entry->group || alt->new_len) { 122 103 new_reloc = find_reloc_by_dest(elf, sec, offset + entry->new); ··· 133 116 if (arch_is_retpoline(new_reloc->sym)) 134 117 return 1; 135 118 136 - alt->new_sec = new_reloc->sym->sec; 137 - alt->new_off = (unsigned int)new_reloc->addend; 119 + if (!reloc2sec_off(new_reloc, &alt->new_sec, &alt->new_off)) { 120 + WARN_FUNC("don't know how to handle reloc symbol type %d: %s", 121 + sec, offset + entry->new, 122 + new_reloc->sym->type, 123 + new_reloc->sym->name); 124 + return -1; 125 + } 138 126 139 127 /* _ASM_EXTABLE_EX hack */ 140 128 if (alt->new_off >= 0x7ffffff0)
+1 -1
tools/perf/Documentation/jitdump-specification.txt
··· 164 164 The EH Frame header follows the Linux Standard Base (LSB) specification as described in the document at https://refspecs.linuxfoundation.org/LSB_1.3.0/gLSB/gLSB/ehframehdr.html 165 165 166 166 167 - The EH Frame follows the LSB specicfication as described in the document at https://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html 167 + The EH Frame follows the LSB specification as described in the document at https://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html 168 168 169 169 170 170 NOTE: The mapped_size is generally either the same as unwind_data_size (if the unwinding data was mapped in memory by the running process) or zero (if the unwinding data is not mapped by the process). If the unwinding data was not mapped, then only the EH Frame Header will be read, which can be used to specify FP based unwinding for a function which does not have unwinding information.
+1 -1
tools/perf/Documentation/perf-c2c.txt
··· 261 261 User can specify how to sort offsets for cacheline. 262 262 263 263 Following fields are available and governs the final 264 - output fields set for caheline offsets output: 264 + output fields set for cacheline offsets output: 265 265 266 266 tid - coalesced by process TIDs 267 267 pid - coalesced by process PIDs
+1 -1
tools/perf/Documentation/perf-intel-pt.txt
··· 883 883 884 884 "Transactions" events correspond to the start or end of transactions. The 885 885 'flags' field can be used in perf script to determine whether the event is a 886 - tranasaction start, commit or abort. 886 + transaction start, commit or abort. 887 887 888 888 Note that "instructions", "branches" and "transactions" events depend on code 889 889 flow packets which can be disabled by using the config term "branch=0". Refer
+1 -1
tools/perf/Documentation/perf-lock.txt
··· 44 44 45 45 -f:: 46 46 --force:: 47 - Don't complan, do it. 47 + Don't complain, do it. 48 48 49 49 REPORT OPTIONS 50 50 --------------
+1 -1
tools/perf/Documentation/perf-script-perl.txt
··· 54 54 Traces meant to be processed using a script should be recorded with 55 55 the above option: -a to enable system-wide collection. 56 56 57 - The format file for the sched_wakep event defines the following fields 57 + The format file for the sched_wakeup event defines the following fields 58 58 (see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format): 59 59 60 60 ----
+1 -1
tools/perf/Documentation/perf-script-python.txt
··· 448 448 Traces meant to be processed using a script should be recorded with 449 449 the above option: -a to enable system-wide collection. 450 450 451 - The format file for the sched_wakep event defines the following fields 451 + The format file for the sched_wakeup event defines the following fields 452 452 (see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format): 453 453 454 454 ----
+1 -1
tools/perf/Documentation/perf-stat.txt
··· 385 385 Print metrics or metricgroups specified in a comma separated list. 386 386 For a group all metrics from the group are added. 387 387 The events from the metrics are automatically measured. 388 - See perf list output for the possble metrics and metricgroups. 388 + See perf list output for the possible metrics and metricgroups. 389 389 390 390 -A:: 391 391 --no-aggr::
+1 -1
tools/perf/Documentation/topdown.txt
··· 2 2 ----------------------------------- 3 3 4 4 Intel CPUs (since Sandy Bridge and Silvermont) support a TopDown 5 - methology to break down CPU pipeline execution into 4 bottlenecks: 5 + methodology to break down CPU pipeline execution into 4 bottlenecks: 6 6 frontend bound, backend bound, bad speculation, retiring. 7 7 8 8 For more details on Topdown see [1][5]
+1 -1
tools/perf/Makefile.config
··· 143 143 ifdef CSINCLUDES 144 144 LIBOPENCSD_CFLAGS := -I$(CSINCLUDES) 145 145 endif 146 - OPENCSDLIBS := -lopencsd_c_api -lopencsd 146 + OPENCSDLIBS := -lopencsd_c_api -lopencsd -lstdc++ 147 147 ifdef CSLIBS 148 148 LIBOPENCSD_LDFLAGS := -L$(CSLIBS) 149 149 endif
+1 -1
tools/perf/Makefile.perf
··· 804 804 805 805 $(patsubst perf-%,%.o,$(PROGRAMS)): $(wildcard */*.h) 806 806 807 - LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ) 'EXTRA_CFLAGS=$(EXTRA_CFLAGS)' 'LDFLAGS=$(LDFLAGS)' 807 + LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ) 'EXTRA_CFLAGS=$(EXTRA_CFLAGS)' 'LDFLAGS=$(filter-out -static,$(LDFLAGS))' 808 808 809 809 $(LIBTRACEEVENT): FORCE 810 810 $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent.a
+4 -4
tools/perf/arch/arm/util/auxtrace.c
··· 8 8 #include <linux/coresight-pmu.h> 9 9 #include <linux/zalloc.h> 10 10 11 - #include "../../util/auxtrace.h" 12 - #include "../../util/debug.h" 13 - #include "../../util/evlist.h" 14 - #include "../../util/pmu.h" 11 + #include "../../../util/auxtrace.h" 12 + #include "../../../util/debug.h" 13 + #include "../../../util/evlist.h" 14 + #include "../../../util/pmu.h" 15 15 #include "cs-etm.h" 16 16 #include "arm-spe.h" 17 17
+12 -12
tools/perf/arch/arm/util/cs-etm.c
··· 16 16 #include <linux/zalloc.h> 17 17 18 18 #include "cs-etm.h" 19 - #include "../../util/debug.h" 20 - #include "../../util/record.h" 21 - #include "../../util/auxtrace.h" 22 - #include "../../util/cpumap.h" 23 - #include "../../util/event.h" 24 - #include "../../util/evlist.h" 25 - #include "../../util/evsel.h" 26 - #include "../../util/perf_api_probe.h" 27 - #include "../../util/evsel_config.h" 28 - #include "../../util/pmu.h" 29 - #include "../../util/cs-etm.h" 19 + #include "../../../util/debug.h" 20 + #include "../../../util/record.h" 21 + #include "../../../util/auxtrace.h" 22 + #include "../../../util/cpumap.h" 23 + #include "../../../util/event.h" 24 + #include "../../../util/evlist.h" 25 + #include "../../../util/evsel.h" 26 + #include "../../../util/perf_api_probe.h" 27 + #include "../../../util/evsel_config.h" 28 + #include "../../../util/pmu.h" 29 + #include "../../../util/cs-etm.h" 30 30 #include <internal/lib.h> // page_size 31 - #include "../../util/session.h" 31 + #include "../../../util/session.h" 32 32 33 33 #include <errno.h> 34 34 #include <stdlib.h>
+1 -1
tools/perf/arch/arm/util/perf_regs.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - #include "../../util/perf_regs.h" 2 + #include "../../../util/perf_regs.h" 3 3 4 4 const struct sample_reg sample_reg_masks[] = { 5 5 SMPL_REG_END
+1 -1
tools/perf/arch/arm/util/pmu.c
··· 10 10 #include <linux/string.h> 11 11 12 12 #include "arm-spe.h" 13 - #include "../../util/pmu.h" 13 + #include "../../../util/pmu.h" 14 14 15 15 struct perf_event_attr 16 16 *perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
+3 -3
tools/perf/arch/arm/util/unwind-libdw.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include <elfutils/libdwfl.h> 3 - #include "../../util/unwind-libdw.h" 4 - #include "../../util/perf_regs.h" 5 - #include "../../util/event.h" 3 + #include "../../../util/unwind-libdw.h" 4 + #include "../../../util/perf_regs.h" 5 + #include "../../../util/event.h" 6 6 7 7 bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg) 8 8 {
+2 -2
tools/perf/arch/arm/util/unwind-libunwind.c
··· 3 3 #include <errno.h> 4 4 #include <libunwind.h> 5 5 #include "perf_regs.h" 6 - #include "../../util/unwind.h" 7 - #include "../../util/debug.h" 6 + #include "../../../util/unwind.h" 7 + #include "../../../util/debug.h" 8 8 9 9 int libunwind__arch_reg_id(int regnum) 10 10 {
+1 -1
tools/perf/arch/x86/util/iostat.c
··· 432 432 u8 die = ((struct iio_root_port *)evsel->priv)->die; 433 433 struct perf_counts_values *count = perf_counts(evsel->counts, die, 0); 434 434 435 - if (count->run && count->ena) { 435 + if (count && count->run && count->ena) { 436 436 if (evsel->prev_raw_counts && !out->force_header) { 437 437 struct perf_counts_values *prev_count = 438 438 perf_counts(evsel->prev_raw_counts, die, 0);
+2
tools/perf/builtin-stat.c
··· 2408 2408 goto out; 2409 2409 } else if (verbose) 2410 2410 iostat_list(evsel_list, &stat_config); 2411 + if (iostat_mode == IOSTAT_RUN && !target__has_cpu(&target)) 2412 + target.system_wide = true; 2411 2413 } 2412 2414 2413 2415 if (add_default_attributes())
+1 -1
tools/perf/pmu-events/arch/powerpc/power8/other.json
··· 1046 1046 { 1047 1047 "EventCode": "0x4e010", 1048 1048 "EventName": "PM_GCT_NOSLOT_IC_L3MISS", 1049 - "BriefDescription": "Gct empty for this thread due to icach l3 miss", 1049 + "BriefDescription": "Gct empty for this thread due to icache l3 miss", 1050 1050 "PublicDescription": "" 1051 1051 }, 1052 1052 {
+2
tools/perf/pmu-events/jevents.c
··· 1297 1297 } 1298 1298 1299 1299 free_arch_std_events(); 1300 + free_sys_event_tables(); 1300 1301 free(mapfile); 1301 1302 return 0; 1302 1303 ··· 1319 1318 create_empty_mapping(output_file); 1320 1319 err_out: 1321 1320 free_arch_std_events(); 1321 + free_sys_event_tables(); 1322 1322 free(mapfile); 1323 1323 return ret; 1324 1324 }
+97
tools/perf/tests/attr/test-stat-default
··· 68 68 type=0 69 69 config=5 70 70 optional=1 71 + 72 + # PERF_TYPE_RAW / slots (0x400) 73 + [event11:base-stat] 74 + fd=11 75 + group_fd=-1 76 + type=4 77 + config=1024 78 + read_format=15 79 + optional=1 80 + 81 + # PERF_TYPE_RAW / topdown-retiring (0x8000) 82 + [event12:base-stat] 83 + fd=12 84 + group_fd=11 85 + type=4 86 + config=32768 87 + disabled=0 88 + enable_on_exec=0 89 + read_format=15 90 + optional=1 91 + 92 + # PERF_TYPE_RAW / topdown-bad-spec (0x8100) 93 + [event13:base-stat] 94 + fd=13 95 + group_fd=11 96 + type=4 97 + config=33024 98 + disabled=0 99 + enable_on_exec=0 100 + read_format=15 101 + optional=1 102 + 103 + # PERF_TYPE_RAW / topdown-fe-bound (0x8200) 104 + [event14:base-stat] 105 + fd=14 106 + group_fd=11 107 + type=4 108 + config=33280 109 + disabled=0 110 + enable_on_exec=0 111 + read_format=15 112 + optional=1 113 + 114 + # PERF_TYPE_RAW / topdown-be-bound (0x8300) 115 + [event15:base-stat] 116 + fd=15 117 + group_fd=11 118 + type=4 119 + config=33536 120 + disabled=0 121 + enable_on_exec=0 122 + read_format=15 123 + optional=1 124 + 125 + # PERF_TYPE_RAW / topdown-heavy-ops (0x8400) 126 + [event16:base-stat] 127 + fd=16 128 + group_fd=11 129 + type=4 130 + config=33792 131 + disabled=0 132 + enable_on_exec=0 133 + read_format=15 134 + optional=1 135 + 136 + # PERF_TYPE_RAW / topdown-br-mispredict (0x8500) 137 + [event17:base-stat] 138 + fd=17 139 + group_fd=11 140 + type=4 141 + config=34048 142 + disabled=0 143 + enable_on_exec=0 144 + read_format=15 145 + optional=1 146 + 147 + # PERF_TYPE_RAW / topdown-fetch-lat (0x8600) 148 + [event18:base-stat] 149 + fd=18 150 + group_fd=11 151 + type=4 152 + config=34304 153 + disabled=0 154 + enable_on_exec=0 155 + read_format=15 156 + optional=1 157 + 158 + # PERF_TYPE_RAW / topdown-mem-bound (0x8700) 159 + [event19:base-stat] 160 + fd=19 161 + group_fd=11 162 + type=4 163 + config=34560 164 + disabled=0 165 + enable_on_exec=0 166 + read_format=15 167 + optional=1
+105 -8
tools/perf/tests/attr/test-stat-detailed-1
··· 70 70 config=5 71 71 optional=1 72 72 73 + # PERF_TYPE_RAW / slots (0x400) 74 + [event11:base-stat] 75 + fd=11 76 + group_fd=-1 77 + type=4 78 + config=1024 79 + read_format=15 80 + optional=1 81 + 82 + # PERF_TYPE_RAW / topdown-retiring (0x8000) 83 + [event12:base-stat] 84 + fd=12 85 + group_fd=11 86 + type=4 87 + config=32768 88 + disabled=0 89 + enable_on_exec=0 90 + read_format=15 91 + optional=1 92 + 93 + # PERF_TYPE_RAW / topdown-bad-spec (0x8100) 94 + [event13:base-stat] 95 + fd=13 96 + group_fd=11 97 + type=4 98 + config=33024 99 + disabled=0 100 + enable_on_exec=0 101 + read_format=15 102 + optional=1 103 + 104 + # PERF_TYPE_RAW / topdown-fe-bound (0x8200) 105 + [event14:base-stat] 106 + fd=14 107 + group_fd=11 108 + type=4 109 + config=33280 110 + disabled=0 111 + enable_on_exec=0 112 + read_format=15 113 + optional=1 114 + 115 + # PERF_TYPE_RAW / topdown-be-bound (0x8300) 116 + [event15:base-stat] 117 + fd=15 118 + group_fd=11 119 + type=4 120 + config=33536 121 + disabled=0 122 + enable_on_exec=0 123 + read_format=15 124 + optional=1 125 + 126 + # PERF_TYPE_RAW / topdown-heavy-ops (0x8400) 127 + [event16:base-stat] 128 + fd=16 129 + group_fd=11 130 + type=4 131 + config=33792 132 + disabled=0 133 + enable_on_exec=0 134 + read_format=15 135 + optional=1 136 + 137 + # PERF_TYPE_RAW / topdown-br-mispredict (0x8500) 138 + [event17:base-stat] 139 + fd=17 140 + group_fd=11 141 + type=4 142 + config=34048 143 + disabled=0 144 + enable_on_exec=0 145 + read_format=15 146 + optional=1 147 + 148 + # PERF_TYPE_RAW / topdown-fetch-lat (0x8600) 149 + [event18:base-stat] 150 + fd=18 151 + group_fd=11 152 + type=4 153 + config=34304 154 + disabled=0 155 + enable_on_exec=0 156 + read_format=15 157 + optional=1 158 + 159 + # PERF_TYPE_RAW / topdown-mem-bound (0x8700) 160 + [event19:base-stat] 161 + fd=19 162 + group_fd=11 163 + type=4 164 + config=34560 165 + disabled=0 166 + enable_on_exec=0 167 + read_format=15 168 + optional=1 169 + 73 170 # PERF_TYPE_HW_CACHE / 74 171 # PERF_COUNT_HW_CACHE_L1D << 0 | 75 172 # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 76 173 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 77 - [event11:base-stat] 78 - fd=11 174 + [event20:base-stat] 175 + fd=20 79 176 type=3 80 177 config=0 81 178 optional=1 ··· 181 84 # PERF_COUNT_HW_CACHE_L1D << 0 | 182 85 # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 183 86 # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 184 - [event12:base-stat] 185 - fd=12 87 + [event21:base-stat] 88 + fd=21 186 89 type=3 187 90 config=65536 188 91 optional=1 ··· 191 94 # PERF_COUNT_HW_CACHE_LL << 0 | 192 95 # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 193 96 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 194 - [event13:base-stat] 195 - fd=13 97 + [event22:base-stat] 98 + fd=22 196 99 type=3 197 100 config=2 198 101 optional=1 ··· 201 104 # PERF_COUNT_HW_CACHE_LL << 0 | 202 105 # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 203 106 # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 204 - [event14:base-stat] 205 - fd=14 107 + [event23:base-stat] 108 + fd=23 206 109 type=3 207 110 config=65538 208 111 optional=1
+117 -20
tools/perf/tests/attr/test-stat-detailed-2
··· 70 70 config=5 71 71 optional=1 72 72 73 + # PERF_TYPE_RAW / slots (0x400) 74 + [event11:base-stat] 75 + fd=11 76 + group_fd=-1 77 + type=4 78 + config=1024 79 + read_format=15 80 + optional=1 81 + 82 + # PERF_TYPE_RAW / topdown-retiring (0x8000) 83 + [event12:base-stat] 84 + fd=12 85 + group_fd=11 86 + type=4 87 + config=32768 88 + disabled=0 89 + enable_on_exec=0 90 + read_format=15 91 + optional=1 92 + 93 + # PERF_TYPE_RAW / topdown-bad-spec (0x8100) 94 + [event13:base-stat] 95 + fd=13 96 + group_fd=11 97 + type=4 98 + config=33024 99 + disabled=0 100 + enable_on_exec=0 101 + read_format=15 102 + optional=1 103 + 104 + # PERF_TYPE_RAW / topdown-fe-bound (0x8200) 105 + [event14:base-stat] 106 + fd=14 107 + group_fd=11 108 + type=4 109 + config=33280 110 + disabled=0 111 + enable_on_exec=0 112 + read_format=15 113 + optional=1 114 + 115 + # PERF_TYPE_RAW / topdown-be-bound (0x8300) 116 + [event15:base-stat] 117 + fd=15 118 + group_fd=11 119 + type=4 120 + config=33536 121 + disabled=0 122 + enable_on_exec=0 123 + read_format=15 124 + optional=1 125 + 126 + # PERF_TYPE_RAW / topdown-heavy-ops (0x8400) 127 + [event16:base-stat] 128 + fd=16 129 + group_fd=11 130 + type=4 131 + config=33792 132 + disabled=0 133 + enable_on_exec=0 134 + read_format=15 135 + optional=1 136 + 137 + # PERF_TYPE_RAW / topdown-br-mispredict (0x8500) 138 + [event17:base-stat] 139 + fd=17 140 + group_fd=11 141 + type=4 142 + config=34048 143 + disabled=0 144 + enable_on_exec=0 145 + read_format=15 146 + optional=1 147 + 148 + # PERF_TYPE_RAW / topdown-fetch-lat (0x8600) 149 + [event18:base-stat] 150 + fd=18 151 + group_fd=11 152 + type=4 153 + config=34304 154 + disabled=0 155 + enable_on_exec=0 156 + read_format=15 157 + optional=1 158 + 159 + # PERF_TYPE_RAW / topdown-mem-bound (0x8700) 160 + [event19:base-stat] 161 + fd=19 162 + group_fd=11 163 + type=4 164 + config=34560 165 + disabled=0 166 + enable_on_exec=0 167 + read_format=15 168 + optional=1 169 + 73 170 # PERF_TYPE_HW_CACHE / 74 171 # PERF_COUNT_HW_CACHE_L1D << 0 | 75 172 # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 76 173 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 77 - [event11:base-stat] 78 - fd=11 174 + [event20:base-stat] 175 + fd=20 79 176 type=3 80 177 config=0 81 178 optional=1 ··· 181 84 # PERF_COUNT_HW_CACHE_L1D << 0 | 182 85 # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 183 86 # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 184 - [event12:base-stat] 185 - fd=12 87 + [event21:base-stat] 88 + fd=21 186 89 type=3 187 90 config=65536 188 91 optional=1 ··· 191 94 # PERF_COUNT_HW_CACHE_LL << 0 | 192 95 # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 193 96 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 194 - [event13:base-stat] 195 - fd=13 97 + [event22:base-stat] 98 + fd=22 196 99 type=3 197 100 config=2 198 101 optional=1 ··· 201 104 # PERF_COUNT_HW_CACHE_LL << 0 | 202 105 # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 203 106 # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 204 - [event14:base-stat] 205 - fd=14 107 + [event23:base-stat] 108 + fd=23 206 109 type=3 207 110 config=65538 208 111 optional=1 ··· 211 114 # PERF_COUNT_HW_CACHE_L1I << 0 | 212 115 # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 213 116 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 214 - [event15:base-stat] 215 - fd=15 117 + [event24:base-stat] 118 + fd=24 216 119 type=3 217 120 config=1 218 121 optional=1 ··· 221 124 # PERF_COUNT_HW_CACHE_L1I << 0 | 222 125 # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 223 126 # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 224 - [event16:base-stat] 225 - fd=16 127 + [event25:base-stat] 128 + fd=25 226 129 type=3 227 130 config=65537 228 131 optional=1 ··· 231 134 # PERF_COUNT_HW_CACHE_DTLB << 0 | 232 135 # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 233 136 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 234 - [event17:base-stat] 235 - fd=17 137 + [event26:base-stat] 138 + fd=26 236 139 type=3 237 140 config=3 238 141 optional=1 ··· 241 144 # PERF_COUNT_HW_CACHE_DTLB << 0 | 242 145 # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 243 146 # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 244 - [event18:base-stat] 245 - fd=18 147 + [event27:base-stat] 148 + fd=27 246 149 type=3 247 150 config=65539 248 151 optional=1 ··· 251 154 # PERF_COUNT_HW_CACHE_ITLB << 0 | 252 155 # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 253 156 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 254 - [event19:base-stat] 255 - fd=19 157 + [event28:base-stat] 158 + fd=28 256 159 type=3 257 160 config=4 258 161 optional=1 ··· 261 164 # PERF_COUNT_HW_CACHE_ITLB << 0 | 262 165 # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 263 166 # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 264 - [event20:base-stat] 265 - fd=20 167 + [event29:base-stat] 168 + fd=29 266 169 type=3 267 170 config=65540 268 171 optional=1
+121 -24
tools/perf/tests/attr/test-stat-detailed-3
··· 70 70 config=5 71 71 optional=1 72 72 73 + # PERF_TYPE_RAW / slots (0x400) 74 + [event11:base-stat] 75 + fd=11 76 + group_fd=-1 77 + type=4 78 + config=1024 79 + read_format=15 80 + optional=1 81 + 82 + # PERF_TYPE_RAW / topdown-retiring (0x8000) 83 + [event12:base-stat] 84 + fd=12 85 + group_fd=11 86 + type=4 87 + config=32768 88 + disabled=0 89 + enable_on_exec=0 90 + read_format=15 91 + optional=1 92 + 93 + # PERF_TYPE_RAW / topdown-bad-spec (0x8100) 94 + [event13:base-stat] 95 + fd=13 96 + group_fd=11 97 + type=4 98 + config=33024 99 + disabled=0 100 + enable_on_exec=0 101 + read_format=15 102 + optional=1 103 + 104 + # PERF_TYPE_RAW / topdown-fe-bound (0x8200) 105 + [event14:base-stat] 106 + fd=14 107 + group_fd=11 108 + type=4 109 + config=33280 110 + disabled=0 111 + enable_on_exec=0 112 + read_format=15 113 + optional=1 114 + 115 + # PERF_TYPE_RAW / topdown-be-bound (0x8300) 116 + [event15:base-stat] 117 + fd=15 118 + group_fd=11 119 + type=4 120 + config=33536 121 + disabled=0 122 + enable_on_exec=0 123 + read_format=15 124 + optional=1 125 + 126 + # PERF_TYPE_RAW / topdown-heavy-ops (0x8400) 127 + [event16:base-stat] 128 + fd=16 129 + group_fd=11 130 + type=4 131 + config=33792 132 + disabled=0 133 + enable_on_exec=0 134 + read_format=15 135 + optional=1 136 + 137 + # PERF_TYPE_RAW / topdown-br-mispredict (0x8500) 138 + [event17:base-stat] 139 + fd=17 140 + group_fd=11 141 + type=4 142 + config=34048 143 + disabled=0 144 + enable_on_exec=0 145 + read_format=15 146 + optional=1 147 + 148 + # PERF_TYPE_RAW / topdown-fetch-lat (0x8600) 149 + [event18:base-stat] 150 + fd=18 151 + group_fd=11 152 + type=4 153 + config=34304 154 + disabled=0 155 + enable_on_exec=0 156 + read_format=15 157 + optional=1 158 + 159 + # PERF_TYPE_RAW / topdown-mem-bound (0x8700) 160 + [event19:base-stat] 161 + fd=19 162 + group_fd=11 163 + type=4 164 + config=34560 165 + disabled=0 166 + enable_on_exec=0 167 + read_format=15 168 + optional=1 169 + 73 170 # PERF_TYPE_HW_CACHE / 74 171 # PERF_COUNT_HW_CACHE_L1D << 0 | 75 172 # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 76 173 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 77 - [event11:base-stat] 78 - fd=11 174 + [event20:base-stat] 175 + fd=20 79 176 type=3 80 177 config=0 81 178 optional=1 ··· 181 84 # PERF_COUNT_HW_CACHE_L1D << 0 | 182 85 # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 183 86 # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 184 - [event12:base-stat] 185 - fd=12 87 + [event21:base-stat] 88 + fd=21 186 89 type=3 187 90 config=65536 188 91 optional=1 ··· 191 94 # PERF_COUNT_HW_CACHE_LL << 0 | 192 95 # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 193 96 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 194 - [event13:base-stat] 195 - fd=13 97 + [event22:base-stat] 98 + fd=22 196 99 type=3 197 100 config=2 198 101 optional=1 ··· 201 104 # PERF_COUNT_HW_CACHE_LL << 0 | 202 105 # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 203 106 # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 204 - [event14:base-stat] 205 - fd=14 107 + [event23:base-stat] 108 + fd=23 206 109 type=3 207 110 config=65538 208 111 optional=1 ··· 211 114 # PERF_COUNT_HW_CACHE_L1I << 0 | 212 115 # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 213 116 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 214 - [event15:base-stat] 215 - fd=15 117 + [event24:base-stat] 118 + fd=24 216 119 type=3 217 120 config=1 218 121 optional=1 ··· 221 124 # PERF_COUNT_HW_CACHE_L1I << 0 | 222 125 # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 223 126 # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 224 - [event16:base-stat] 225 - fd=16 127 + [event25:base-stat] 128 + fd=25 226 129 type=3 227 130 config=65537 228 131 optional=1 ··· 231 134 # PERF_COUNT_HW_CACHE_DTLB << 0 | 232 135 # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 233 136 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 234 - [event17:base-stat] 235 - fd=17 137 + [event26:base-stat] 138 + fd=26 236 139 type=3 237 140 config=3 238 141 optional=1 ··· 241 144 # PERF_COUNT_HW_CACHE_DTLB << 0 | 242 145 # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 243 146 # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 244 - [event18:base-stat] 245 - fd=18 147 + [event27:base-stat] 148 + fd=27 246 149 type=3 247 150 config=65539 248 151 optional=1 ··· 251 154 # PERF_COUNT_HW_CACHE_ITLB << 0 | 252 155 # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 253 156 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 254 - [event19:base-stat] 255 - fd=19 157 + [event28:base-stat] 158 + fd=28 256 159 type=3 257 160 config=4 258 161 optional=1 ··· 261 164 # PERF_COUNT_HW_CACHE_ITLB << 0 | 262 165 # (PERF_COUNT_HW_CACHE_OP_READ << 8) | 263 166 # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 264 - [event20:base-stat] 265 - fd=20 167 + [event29:base-stat] 168 + fd=29 266 169 type=3 267 170 config=65540 268 171 optional=1 ··· 271 174 # PERF_COUNT_HW_CACHE_L1D << 0 | 272 175 # (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 273 176 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) 274 - [event21:base-stat] 275 - fd=21 177 + [event30:base-stat] 178 + fd=30 276 179 type=3 277 180 config=512 278 181 optional=1 ··· 281 184 # PERF_COUNT_HW_CACHE_L1D << 0 | 282 185 # (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | 283 186 # (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) 284 - [event22:base-stat] 285 - fd=22 187 + [event31:base-stat] 188 + fd=31 286 189 type=3 287 190 config=66048 288 191 optional=1
+2 -2
tools/perf/tests/code-reading.c
··· 229 229 struct thread *thread, struct state *state) 230 230 { 231 231 struct addr_location al; 232 - unsigned char buf1[BUFSZ]; 233 - unsigned char buf2[BUFSZ]; 232 + unsigned char buf1[BUFSZ] = {0}; 233 + unsigned char buf2[BUFSZ] = {0}; 234 234 size_t ret_len; 235 235 u64 objdump_addr; 236 236 const char *objdump_name;
+32 -7
tools/perf/tests/dwarf-unwind.c
··· 20 20 /* For bsearch. We try to unwind functions in shared object. */ 21 21 #include <stdlib.h> 22 22 23 + /* 24 + * The test will assert frames are on the stack but tail call optimizations lose 25 + * the frame of the caller. Clang can disable this optimization on a called 26 + * function but GCC currently (11/2020) lacks this attribute. The barrier is 27 + * used to inhibit tail calls in these cases. 28 + */ 29 + #ifdef __has_attribute 30 + #if __has_attribute(disable_tail_calls) 31 + #define NO_TAIL_CALL_ATTRIBUTE __attribute__((disable_tail_calls)) 32 + #define NO_TAIL_CALL_BARRIER 33 + #endif 34 + #endif 35 + #ifndef NO_TAIL_CALL_ATTRIBUTE 36 + #define NO_TAIL_CALL_ATTRIBUTE 37 + #define NO_TAIL_CALL_BARRIER __asm__ __volatile__("" : : : "memory"); 38 + #endif 39 + 23 40 static int mmap_handler(struct perf_tool *tool __maybe_unused, 24 41 union perf_event *event, 25 42 struct perf_sample *sample, ··· 108 91 return strcmp((const char *) symbol, funcs[idx]); 109 92 } 110 93 111 - noinline int test_dwarf_unwind__thread(struct thread *thread) 94 + NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__thread(struct thread *thread) 112 95 { 113 96 struct perf_sample sample; 114 97 unsigned long cnt = 0; ··· 139 122 140 123 static int global_unwind_retval = -INT_MAX; 141 124 142 - noinline int test_dwarf_unwind__compare(void *p1, void *p2) 125 + NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__compare(void *p1, void *p2) 143 126 { 144 127 /* Any possible value should be 'thread' */ 145 128 struct thread *thread = *(struct thread **)p1; ··· 158 141 return p1 - p2; 159 142 } 160 143 161 - noinline int test_dwarf_unwind__krava_3(struct thread *thread) 144 + NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_3(struct thread *thread) 162 145 { 163 146 struct thread *array[2] = {thread, thread}; 164 147 void *fp = &bsearch; ··· 177 160 return global_unwind_retval; 178 161 } 179 162 180 - noinline int test_dwarf_unwind__krava_2(struct thread *thread) 163 + NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_2(struct thread *thread) 181 164 { 182 - return test_dwarf_unwind__krava_3(thread); 165 + int ret; 166 + 167 + ret = test_dwarf_unwind__krava_3(thread); 168 + NO_TAIL_CALL_BARRIER; 169 + return ret; 183 170 } 184 171 185 - noinline int test_dwarf_unwind__krava_1(struct thread *thread) 172 + NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_1(struct thread *thread) 186 173 { 187 - return test_dwarf_unwind__krava_2(thread); 174 + int ret; 175 + 176 + ret = test_dwarf_unwind__krava_2(thread); 177 + NO_TAIL_CALL_BARRIER; 178 + return ret; 188 179 } 189 180 190 181 int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unused)
+1 -1
tools/perf/util/config.c
··· 801 801 section->name, item->name); 802 802 ret = fn(key, value, data); 803 803 if (ret < 0) { 804 - pr_err("Error: wrong config key-value pair %s=%s\n", 804 + pr_err("Error in the given config file: wrong config key-value pair %s=%s\n", 805 805 key, value); 806 806 /* 807 807 * Can't be just a 'break', as perf_config_set__for_each_entry()
+5 -2
tools/testing/selftests/arm64/signal/test_signals_utils.c
··· 266 266 td->feats_supported |= FEAT_SSBS; 267 267 if (getauxval(AT_HWCAP) & HWCAP_SVE) 268 268 td->feats_supported |= FEAT_SVE; 269 - if (feats_ok(td)) 269 + if (feats_ok(td)) { 270 270 fprintf(stderr, 271 271 "Required Features: [%s] supported\n", 272 272 feats_to_string(td->feats_required & 273 273 td->feats_supported)); 274 - else 274 + } else { 275 275 fprintf(stderr, 276 276 "Required Features: [%s] NOT supported\n", 277 277 feats_to_string(td->feats_required & 278 278 ~td->feats_supported)); 279 + td->result = KSFT_SKIP; 280 + return 0; 281 + } 279 282 } 280 283 281 284 /* Perform test specific additional initialization */
+2 -1
tools/testing/selftests/bpf/Makefile
··· 375 375 $(TRUNNER_BPF_PROGS_DIR)/%.c \ 376 376 $(TRUNNER_BPF_PROGS_DIR)/*.h \ 377 377 $$(INCLUDE_DIR)/vmlinux.h \ 378 - $(wildcard $(BPFDIR)/bpf_*.h) | $(TRUNNER_OUTPUT) 378 + $(wildcard $(BPFDIR)/bpf_*.h) \ 379 + | $(TRUNNER_OUTPUT) $$(BPFOBJ) 379 380 $$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \ 380 381 $(TRUNNER_BPF_CFLAGS)) 381 382
+8 -5
tools/testing/selftests/bpf/test_lwt_ip_encap.sh
··· 112 112 ip netns add "${NS2}" 113 113 ip netns add "${NS3}" 114 114 115 + # rp_filter gets confused by what these tests are doing, so disable it 116 + ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0 117 + ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0 118 + ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0 119 + ip netns exec ${NS1} sysctl -wq net.ipv4.conf.default.rp_filter=0 120 + ip netns exec ${NS2} sysctl -wq net.ipv4.conf.default.rp_filter=0 121 + ip netns exec ${NS3} sysctl -wq net.ipv4.conf.default.rp_filter=0 122 + 115 123 ip link add veth1 type veth peer name veth2 116 124 ip link add veth3 type veth peer name veth4 117 125 ip link add veth5 type veth peer name veth6 ··· 243 235 ip -netns ${NS3} -6 addr add ${IPv6_GRE} nodad dev gre6_dev 244 236 ip -netns ${NS1} -6 route add ${IPv6_GRE}/128 dev veth5 via ${IPv6_6} ${VRF} 245 237 ip -netns ${NS2} -6 route add ${IPv6_GRE}/128 dev veth7 via ${IPv6_8} ${VRF} 246 - 247 - # rp_filter gets confused by what these tests are doing, so disable it 248 - ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0 249 - ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0 250 - ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0 251 238 252 239 TMPFILE=$(mktemp /tmp/test_lwt_ip_encap.XXXXXX) 253 240
+4 -1
tools/testing/selftests/drivers/dma-buf/udmabuf.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + #define _GNU_SOURCE 3 + #define __EXPORTED_HEADERS__ 4 + 2 5 #include <stdio.h> 3 6 #include <stdlib.h> 4 7 #include <unistd.h> 5 8 #include <string.h> 6 9 #include <errno.h> 7 - #include <linux/fcntl.h> 10 + #include <fcntl.h> 8 11 #include <malloc.h> 9 12 10 13 #include <sys/ioctl.h>
+1 -1
tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
··· 1 1 #!/bin/bash 2 2 # SPDX-License-Identifier: GPL-2.0 3 - # Copyright 2020 NXP Semiconductors 3 + # Copyright 2020 NXP 4 4 5 5 WAIT_TIME=1 6 6 NUM_NETIFS=4
+2
tools/testing/selftests/kvm/.gitignore
··· 24 24 /x86_64/smm_test 25 25 /x86_64/state_test 26 26 /x86_64/svm_vmcall_test 27 + /x86_64/svm_int_ctl_test 27 28 /x86_64/sync_regs_test 28 29 /x86_64/tsc_msrs_test 29 30 /x86_64/userspace_msr_exit_test ··· 49 48 /kvm_page_table_test 50 49 /memslot_modification_stress_test 51 50 /memslot_perf_test 51 + /rseq_test 52 52 /set_memory_region_test 53 53 /steal_time 54 54 /kvm_binary_stats_test
+4
tools/testing/selftests/kvm/Makefile
··· 56 56 TEST_GEN_PROGS_x86_64 += x86_64/state_test 57 57 TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test 58 58 TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test 59 + TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test 59 60 TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test 60 61 TEST_GEN_PROGS_x86_64 += x86_64/userspace_msr_exit_test 61 62 TEST_GEN_PROGS_x86_64 += x86_64/vmx_apic_access_test ··· 81 80 TEST_GEN_PROGS_x86_64 += kvm_page_table_test 82 81 TEST_GEN_PROGS_x86_64 += memslot_modification_stress_test 83 82 TEST_GEN_PROGS_x86_64 += memslot_perf_test 83 + TEST_GEN_PROGS_x86_64 += rseq_test 84 84 TEST_GEN_PROGS_x86_64 += set_memory_region_test 85 85 TEST_GEN_PROGS_x86_64 += steal_time 86 86 TEST_GEN_PROGS_x86_64 += kvm_binary_stats_test ··· 95 93 TEST_GEN_PROGS_aarch64 += dirty_log_perf_test 96 94 TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus 97 95 TEST_GEN_PROGS_aarch64 += kvm_page_table_test 96 + TEST_GEN_PROGS_aarch64 += rseq_test 98 97 TEST_GEN_PROGS_aarch64 += set_memory_region_test 99 98 TEST_GEN_PROGS_aarch64 += steal_time 100 99 TEST_GEN_PROGS_aarch64 += kvm_binary_stats_test ··· 107 104 TEST_GEN_PROGS_s390x += dirty_log_test 108 105 TEST_GEN_PROGS_s390x += kvm_create_max_vcpus 109 106 TEST_GEN_PROGS_s390x += kvm_page_table_test 107 + TEST_GEN_PROGS_s390x += rseq_test 110 108 TEST_GEN_PROGS_s390x += set_memory_region_test 111 109 TEST_GEN_PROGS_s390x += kvm_binary_stats_test 112 110
+2 -4
tools/testing/selftests/kvm/access_tracking_perf_test.c
··· 371 371 printf(" -v: specify the number of vCPUs to run.\n"); 372 372 printf(" -o: Overlap guest memory accesses instead of partitioning\n" 373 373 " them into a separate region of memory for each vCPU.\n"); 374 - printf(" -s: specify the type of memory that should be used to\n" 375 - " back the guest data region.\n\n"); 376 - backing_src_help(); 374 + backing_src_help("-s"); 377 375 puts(""); 378 376 exit(0); 379 377 } ··· 379 381 int main(int argc, char *argv[]) 380 382 { 381 383 struct test_params params = { 382 - .backing_src = VM_MEM_SRC_ANONYMOUS, 384 + .backing_src = DEFAULT_VM_MEM_SRC, 383 385 .vcpu_memory_bytes = DEFAULT_PER_VCPU_MEM_SIZE, 384 386 .vcpus = 1, 385 387 };
+7 -8
tools/testing/selftests/kvm/demand_paging_test.c
··· 179 179 return NULL; 180 180 } 181 181 182 - if (!pollfd[0].revents & POLLIN) 182 + if (!(pollfd[0].revents & POLLIN)) 183 183 continue; 184 184 185 185 r = read(uffd, &msg, sizeof(msg)); ··· 416 416 { 417 417 puts(""); 418 418 printf("usage: %s [-h] [-m vm_mode] [-u uffd_mode] [-d uffd_delay_usec]\n" 419 - " [-b memory] [-t type] [-v vcpus] [-o]\n", name); 419 + " [-b memory] [-s type] [-v vcpus] [-o]\n", name); 420 420 guest_modes_help(); 421 421 printf(" -u: use userfaultfd to handle vCPU page faults. Mode is a\n" 422 422 " UFFD registration mode: 'MISSING' or 'MINOR'.\n"); ··· 426 426 printf(" -b: specify the size of the memory region which should be\n" 427 427 " demand paged by each vCPU. e.g. 10M or 3G.\n" 428 428 " Default: 1G\n"); 429 - printf(" -t: The type of backing memory to use. Default: anonymous\n"); 430 - backing_src_help(); 429 + backing_src_help("-s"); 431 430 printf(" -v: specify the number of vCPUs to run.\n"); 432 431 printf(" -o: Overlap guest memory accesses instead of partitioning\n" 433 432 " them into a separate region of memory for each vCPU.\n"); ··· 438 439 { 439 440 int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS); 440 441 struct test_params p = { 441 - .src_type = VM_MEM_SRC_ANONYMOUS, 442 + .src_type = DEFAULT_VM_MEM_SRC, 442 443 .partition_vcpu_memory_access = true, 443 444 }; 444 445 int opt; 445 446 446 447 guest_modes_append_default(); 447 448 448 - while ((opt = getopt(argc, argv, "hm:u:d:b:t:v:o")) != -1) { 449 + while ((opt = getopt(argc, argv, "hm:u:d:b:s:v:o")) != -1) { 449 450 switch (opt) { 450 451 case 'm': 451 452 guest_modes_cmdline(optarg); ··· 464 465 case 'b': 465 466 guest_percpu_mem_size = parse_size(optarg); 466 467 break; 467 - case 't': 468 + case 's': 468 469 p.src_type = parse_backing_src_type(optarg); 469 470 break; 470 471 case 'v': ··· 484 485 485 486 if (p.uffd_mode == UFFDIO_REGISTER_MODE_MINOR && 486 487 !backing_src_is_shared(p.src_type)) { 487 - TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -t"); 488 + TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -s"); 488 489 } 489 490 490 491 for_each_guest_mode(run_test, &p);
+42 -20
tools/testing/selftests/kvm/dirty_log_perf_test.c
··· 118 118 toggle_dirty_logging(vm, slots, false); 119 119 } 120 120 121 - static void get_dirty_log(struct kvm_vm *vm, int slots, unsigned long *bitmap, 122 - uint64_t nr_pages) 121 + static void get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots) 123 122 { 124 - uint64_t slot_pages = nr_pages / slots; 125 123 int i; 126 124 127 125 for (i = 0; i < slots; i++) { 128 126 int slot = PERF_TEST_MEM_SLOT_INDEX + i; 129 - unsigned long *slot_bitmap = bitmap + i * slot_pages; 130 127 131 - kvm_vm_get_dirty_log(vm, slot, slot_bitmap); 128 + kvm_vm_get_dirty_log(vm, slot, bitmaps[i]); 132 129 } 133 130 } 134 131 135 - static void clear_dirty_log(struct kvm_vm *vm, int slots, unsigned long *bitmap, 136 - uint64_t nr_pages) 132 + static void clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], 133 + int slots, uint64_t pages_per_slot) 137 134 { 138 - uint64_t slot_pages = nr_pages / slots; 139 135 int i; 140 136 141 137 for (i = 0; i < slots; i++) { 142 138 int slot = PERF_TEST_MEM_SLOT_INDEX + i; 143 - unsigned long *slot_bitmap = bitmap + i * slot_pages; 144 139 145 - kvm_vm_clear_dirty_log(vm, slot, slot_bitmap, 0, slot_pages); 140 + kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot); 146 141 } 142 + } 143 + 144 + static unsigned long **alloc_bitmaps(int slots, uint64_t pages_per_slot) 145 + { 146 + unsigned long **bitmaps; 147 + int i; 148 + 149 + bitmaps = malloc(slots * sizeof(bitmaps[0])); 150 + TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array."); 151 + 152 + for (i = 0; i < slots; i++) { 153 + bitmaps[i] = bitmap_zalloc(pages_per_slot); 154 + TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap."); 155 + } 156 + 157 + return bitmaps; 158 + } 159 + 160 + static void free_bitmaps(unsigned long *bitmaps[], int slots) 161 + { 162 + int i; 163 + 164 + for (i = 0; i < slots; i++) 165 + free(bitmaps[i]); 166 + 167 + free(bitmaps); 147 168 } 148 169 149 170 static void run_test(enum vm_guest_mode mode, void *arg) ··· 172 151 struct test_params *p = arg; 173 152 pthread_t *vcpu_threads; 174 153 struct kvm_vm *vm; 175 - unsigned long *bmap; 154 + unsigned long **bitmaps; 176 155 uint64_t guest_num_pages; 177 156 uint64_t host_num_pages; 157 + uint64_t pages_per_slot; 178 158 int vcpu_id; 179 159 struct timespec start; 180 160 struct timespec ts_diff; ··· 193 171 guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm); 194 172 guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages); 195 173 host_num_pages = vm_num_host_pages(mode, guest_num_pages); 196 - bmap = bitmap_zalloc(host_num_pages); 174 + pages_per_slot = host_num_pages / p->slots; 175 + 176 + bitmaps = alloc_bitmaps(p->slots, pages_per_slot); 197 177 198 178 if (dirty_log_manual_caps) { 199 179 cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2; ··· 263 239 iteration, ts_diff.tv_sec, ts_diff.tv_nsec); 264 240 265 241 clock_gettime(CLOCK_MONOTONIC, &start); 266 - get_dirty_log(vm, p->slots, bmap, host_num_pages); 242 + get_dirty_log(vm, bitmaps, p->slots); 267 243 ts_diff = timespec_elapsed(start); 268 244 get_dirty_log_total = timespec_add(get_dirty_log_total, 269 245 ts_diff); ··· 272 248 273 249 if (dirty_log_manual_caps) { 274 250 clock_gettime(CLOCK_MONOTONIC, &start); 275 - clear_dirty_log(vm, p->slots, bmap, host_num_pages); 251 + clear_dirty_log(vm, bitmaps, p->slots, pages_per_slot); 276 252 ts_diff = timespec_elapsed(start); 277 253 clear_dirty_log_total = timespec_add(clear_dirty_log_total, 278 254 ts_diff); ··· 305 281 clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec); 306 282 } 307 283 308 - free(bmap); 284 + free_bitmaps(bitmaps, p->slots); 309 285 free(vcpu_threads); 310 286 perf_test_destroy_vm(vm); 311 287 } ··· 332 308 printf(" -v: specify the number of vCPUs to run.\n"); 333 309 printf(" -o: Overlap guest memory accesses instead of partitioning\n" 334 310 " them into a separate region of memory for each vCPU.\n"); 335 - printf(" -s: specify the type of memory that should be used to\n" 336 - " back the guest data region.\n\n"); 311 + backing_src_help("-s"); 337 312 printf(" -x: Split the memory region into this number of memslots.\n" 338 - " (default: 1)"); 339 - backing_src_help(); 313 + " (default: 1)\n"); 340 314 puts(""); 341 315 exit(0); 342 316 } ··· 346 324 .iterations = TEST_HOST_LOOP_N, 347 325 .wr_fract = 1, 348 326 .partition_vcpu_memory_access = true, 349 - .backing_src = VM_MEM_SRC_ANONYMOUS, 327 + .backing_src = DEFAULT_VM_MEM_SRC, 350 328 .slots = 1, 351 329 }; 352 330 int opt;
+6 -1
tools/testing/selftests/kvm/include/test_util.h
··· 90 90 NUM_SRC_TYPES, 91 91 }; 92 92 93 + #define DEFAULT_VM_MEM_SRC VM_MEM_SRC_ANONYMOUS 94 + 93 95 struct vm_mem_backing_src_alias { 94 96 const char *name; 95 97 uint32_t flag; 96 98 }; 99 + 100 + #define MIN_RUN_DELAY_NS 200000UL 97 101 98 102 bool thp_configured(void); 99 103 size_t get_trans_hugepagesz(void); 100 104 size_t get_def_hugetlb_pagesz(void); 101 105 const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i); 102 106 size_t get_backing_src_pagesz(uint32_t i); 103 - void backing_src_help(void); 107 + void backing_src_help(const char *flag); 104 108 enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name); 109 + long get_run_delay(void); 105 110 106 111 /* 107 112 * Whether or not the given source type is shared memory (as opposed to
+17 -17
tools/testing/selftests/kvm/include/x86_64/processor.h
··· 312 312 } 313 313 } 314 314 315 - typedef unsigned long v1di __attribute__ ((vector_size (8))); 315 + #define GET_XMM(__xmm) \ 316 + ({ \ 317 + unsigned long __val; \ 318 + asm volatile("movq %%"#__xmm", %0" : "=r"(__val)); \ 319 + __val; \ 320 + }) 321 + 316 322 static inline unsigned long get_xmm(int n) 317 323 { 318 324 assert(n >= 0 && n <= 7); 319 325 320 - register v1di xmm0 __asm__("%xmm0"); 321 - register v1di xmm1 __asm__("%xmm1"); 322 - register v1di xmm2 __asm__("%xmm2"); 323 - register v1di xmm3 __asm__("%xmm3"); 324 - register v1di xmm4 __asm__("%xmm4"); 325 - register v1di xmm5 __asm__("%xmm5"); 326 - register v1di xmm6 __asm__("%xmm6"); 327 - register v1di xmm7 __asm__("%xmm7"); 328 326 switch (n) { 329 327 case 0: 330 - return (unsigned long)xmm0; 328 + return GET_XMM(xmm0); 331 329 case 1: 332 - return (unsigned long)xmm1; 330 + return GET_XMM(xmm1); 333 331 case 2: 334 - return (unsigned long)xmm2; 332 + return GET_XMM(xmm2); 335 333 case 3: 336 - return (unsigned long)xmm3; 334 + return GET_XMM(xmm3); 337 335 case 4: 338 - return (unsigned long)xmm4; 336 + return GET_XMM(xmm4); 339 337 case 5: 340 - return (unsigned long)xmm5; 338 + return GET_XMM(xmm5); 341 339 case 6: 342 - return (unsigned long)xmm6; 340 + return GET_XMM(xmm6); 343 341 case 7: 344 - return (unsigned long)xmm7; 342 + return GET_XMM(xmm7); 345 343 } 344 + 345 + /* never reached */ 346 346 return 0; 347 347 } 348 348
+2 -5
tools/testing/selftests/kvm/kvm_page_table_test.c
··· 456 456 " (default: 1G)\n"); 457 457 printf(" -v: specify the number of vCPUs to run\n" 458 458 " (default: 1)\n"); 459 - printf(" -s: specify the type of memory that should be used to\n" 460 - " back the guest data region.\n" 461 - " (default: anonymous)\n\n"); 462 - backing_src_help(); 459 + backing_src_help("-s"); 463 460 puts(""); 464 461 } 465 462 ··· 465 468 int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS); 466 469 struct test_params p = { 467 470 .test_mem_size = DEFAULT_TEST_MEM_SIZE, 468 - .src_type = VM_MEM_SRC_ANONYMOUS, 471 + .src_type = DEFAULT_VM_MEM_SRC, 469 472 }; 470 473 int opt; 471 474
+34 -5
tools/testing/selftests/kvm/lib/test_util.c
··· 11 11 #include <stdlib.h> 12 12 #include <time.h> 13 13 #include <sys/stat.h> 14 + #include <sys/syscall.h> 14 15 #include <linux/mman.h> 15 16 #include "linux/kernel.h" 16 17 ··· 130 129 { 131 130 size_t size; 132 131 FILE *f; 132 + int ret; 133 133 134 134 TEST_ASSERT(thp_configured(), "THP is not configured in host kernel"); 135 135 136 136 f = fopen("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", "r"); 137 137 TEST_ASSERT(f != NULL, "Error in opening transparent_hugepage/hpage_pmd_size"); 138 138 139 - fscanf(f, "%ld", &size); 139 + ret = fscanf(f, "%ld", &size); 140 + ret = fscanf(f, "%ld", &size); 141 + TEST_ASSERT(ret < 1, "Error reading transparent_hugepage/hpage_pmd_size"); 140 142 fclose(f); 141 143 142 144 return size; ··· 283 279 } 284 280 } 285 281 286 - void backing_src_help(void) 282 + static void print_available_backing_src_types(const char *prefix) 287 283 { 288 284 int i; 289 285 290 - printf("Available backing src types:\n"); 286 + printf("%sAvailable backing src types:\n", prefix); 287 + 291 288 for (i = 0; i < NUM_SRC_TYPES; i++) 292 - printf("\t%s\n", vm_mem_backing_src_alias(i)->name); 289 + printf("%s %s\n", prefix, vm_mem_backing_src_alias(i)->name); 290 + } 291 + 292 + void backing_src_help(const char *flag) 293 + { 294 + printf(" %s: specify the type of memory that should be used to\n" 295 + " back the guest data region. (default: %s)\n", 296 + flag, vm_mem_backing_src_alias(DEFAULT_VM_MEM_SRC)->name); 297 + print_available_backing_src_types(" "); 293 298 } 294 299 295 300 enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name) ··· 309 296 if (!strcmp(type_name, vm_mem_backing_src_alias(i)->name)) 310 297 return i; 311 298 312 - backing_src_help(); 299 + print_available_backing_src_types(""); 313 300 TEST_FAIL("Unknown backing src type: %s", type_name); 314 301 return -1; 302 + } 303 + 304 + long get_run_delay(void) 305 + { 306 + char path[64]; 307 + long val[2]; 308 + FILE *fp; 309 + 310 + sprintf(path, "/proc/%ld/schedstat", syscall(SYS_gettid)); 311 + fp = fopen(path, "r"); 312 + /* Return MIN_RUN_DELAY_NS upon failure just to be safe */ 313 + if (fscanf(fp, "%ld %ld ", &val[0], &val[1]) < 2) 314 + val[1] = MIN_RUN_DELAY_NS; 315 + fclose(fp); 316 + 317 + return val[1]; 315 318 }
+286
tools/testing/selftests/kvm/rseq_test.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + #define _GNU_SOURCE /* for program_invocation_short_name */ 3 + #include <errno.h> 4 + #include <fcntl.h> 5 + #include <pthread.h> 6 + #include <sched.h> 7 + #include <stdio.h> 8 + #include <stdlib.h> 9 + #include <string.h> 10 + #include <signal.h> 11 + #include <syscall.h> 12 + #include <sys/ioctl.h> 13 + #include <sys/sysinfo.h> 14 + #include <asm/barrier.h> 15 + #include <linux/atomic.h> 16 + #include <linux/rseq.h> 17 + #include <linux/unistd.h> 18 + 19 + #include "kvm_util.h" 20 + #include "processor.h" 21 + #include "test_util.h" 22 + 23 + #define VCPU_ID 0 24 + 25 + static __thread volatile struct rseq __rseq = { 26 + .cpu_id = RSEQ_CPU_ID_UNINITIALIZED, 27 + }; 28 + 29 + /* 30 + * Use an arbitrary, bogus signature for configuring rseq, this test does not 31 + * actually enter an rseq critical section. 32 + */ 33 + #define RSEQ_SIG 0xdeadbeef 34 + 35 + /* 36 + * Any bug related to task migration is likely to be timing-dependent; perform 37 + * a large number of migrations to reduce the odds of a false negative. 38 + */ 39 + #define NR_TASK_MIGRATIONS 100000 40 + 41 + static pthread_t migration_thread; 42 + static cpu_set_t possible_mask; 43 + static int min_cpu, max_cpu; 44 + static bool done; 45 + 46 + static atomic_t seq_cnt; 47 + 48 + static void guest_code(void) 49 + { 50 + for (;;) 51 + GUEST_SYNC(0); 52 + } 53 + 54 + static void sys_rseq(int flags) 55 + { 56 + int r; 57 + 58 + r = syscall(__NR_rseq, &__rseq, sizeof(__rseq), flags, RSEQ_SIG); 59 + TEST_ASSERT(!r, "rseq failed, errno = %d (%s)", errno, strerror(errno)); 60 + } 61 + 62 + static int next_cpu(int cpu) 63 + { 64 + /* 65 + * Advance to the next CPU, skipping those that weren't in the original 66 + * affinity set. Sadly, there is no CPU_SET_FOR_EACH, and cpu_set_t's 67 + * data storage is considered as opaque. Note, if this task is pinned 68 + * to a small set of discontigous CPUs, e.g. 2 and 1023, this loop will 69 + * burn a lot cycles and the test will take longer than normal to 70 + * complete. 71 + */ 72 + do { 73 + cpu++; 74 + if (cpu > max_cpu) { 75 + cpu = min_cpu; 76 + TEST_ASSERT(CPU_ISSET(cpu, &possible_mask), 77 + "Min CPU = %d must always be usable", cpu); 78 + break; 79 + } 80 + } while (!CPU_ISSET(cpu, &possible_mask)); 81 + 82 + return cpu; 83 + } 84 + 85 + static void *migration_worker(void *ign) 86 + { 87 + cpu_set_t allowed_mask; 88 + int r, i, cpu; 89 + 90 + CPU_ZERO(&allowed_mask); 91 + 92 + for (i = 0, cpu = min_cpu; i < NR_TASK_MIGRATIONS; i++, cpu = next_cpu(cpu)) { 93 + CPU_SET(cpu, &allowed_mask); 94 + 95 + /* 96 + * Bump the sequence count twice to allow the reader to detect 97 + * that a migration may have occurred in between rseq and sched 98 + * CPU ID reads. An odd sequence count indicates a migration 99 + * is in-progress, while a completely different count indicates 100 + * a migration occurred since the count was last read. 101 + */ 102 + atomic_inc(&seq_cnt); 103 + 104 + /* 105 + * Ensure the odd count is visible while sched_getcpu() isn't 106 + * stable, i.e. while changing affinity is in-progress. 107 + */ 108 + smp_wmb(); 109 + r = sched_setaffinity(0, sizeof(allowed_mask), &allowed_mask); 110 + TEST_ASSERT(!r, "sched_setaffinity failed, errno = %d (%s)", 111 + errno, strerror(errno)); 112 + smp_wmb(); 113 + atomic_inc(&seq_cnt); 114 + 115 + CPU_CLR(cpu, &allowed_mask); 116 + 117 + /* 118 + * Wait 1-10us before proceeding to the next iteration and more 119 + * specifically, before bumping seq_cnt again. A delay is 120 + * needed on three fronts: 121 + * 122 + * 1. To allow sched_setaffinity() to prompt migration before 123 + * ioctl(KVM_RUN) enters the guest so that TIF_NOTIFY_RESUME 124 + * (or TIF_NEED_RESCHED, which indirectly leads to handling 125 + * NOTIFY_RESUME) is handled in KVM context. 126 + * 127 + * If NOTIFY_RESUME/NEED_RESCHED is set after KVM enters 128 + * the guest, the guest will trigger a IO/MMIO exit all the 129 + * way to userspace and the TIF flags will be handled by 130 + * the generic "exit to userspace" logic, not by KVM. The 131 + * exit to userspace is necessary to give the test a chance 132 + * to check the rseq CPU ID (see #2). 133 + * 134 + * Alternatively, guest_code() could include an instruction 135 + * to trigger an exit that is handled by KVM, but any such 136 + * exit requires architecture specific code. 137 + * 138 + * 2. To let ioctl(KVM_RUN) make its way back to the test 139 + * before the next round of migration. The test's check on 140 + * the rseq CPU ID must wait for migration to complete in 141 + * order to avoid false positive, thus any kernel rseq bug 142 + * will be missed if the next migration starts before the 143 + * check completes. 144 + * 145 + * 3. To ensure the read-side makes efficient forward progress, 146 + * e.g. if sched_getcpu() involves a syscall. Stalling the 147 + * read-side means the test will spend more time waiting for 148 + * sched_getcpu() to stabilize and less time trying to hit 149 + * the timing-dependent bug. 150 + * 151 + * Because any bug in this area is likely to be timing-dependent, 152 + * run with a range of delays at 1us intervals from 1us to 10us 153 + * as a best effort to avoid tuning the test to the point where 154 + * it can hit _only_ the original bug and not detect future 155 + * regressions. 156 + * 157 + * The original bug can reproduce with a delay up to ~500us on 158 + * x86-64, but starts to require more iterations to reproduce 159 + * as the delay creeps above ~10us, and the average runtime of 160 + * each iteration obviously increases as well. Cap the delay 161 + * at 10us to keep test runtime reasonable while minimizing 162 + * potential coverage loss. 163 + * 164 + * The lower bound for reproducing the bug is likely below 1us, 165 + * e.g. failures occur on x86-64 with nanosleep(0), but at that 166 + * point the overhead of the syscall likely dominates the delay. 167 + * Use usleep() for simplicity and to avoid unnecessary kernel 168 + * dependencies. 169 + */ 170 + usleep((i % 10) + 1); 171 + } 172 + done = true; 173 + return NULL; 174 + } 175 + 176 + static int calc_min_max_cpu(void) 177 + { 178 + int i, cnt, nproc; 179 + 180 + if (CPU_COUNT(&possible_mask) < 2) 181 + return -EINVAL; 182 + 183 + /* 184 + * CPU_SET doesn't provide a FOR_EACH helper, get the min/max CPU that 185 + * this task is affined to in order to reduce the time spent querying 186 + * unusable CPUs, e.g. if this task is pinned to a small percentage of 187 + * total CPUs. 188 + */ 189 + nproc = get_nprocs_conf(); 190 + min_cpu = -1; 191 + max_cpu = -1; 192 + cnt = 0; 193 + 194 + for (i = 0; i < nproc; i++) { 195 + if (!CPU_ISSET(i, &possible_mask)) 196 + continue; 197 + if (min_cpu == -1) 198 + min_cpu = i; 199 + max_cpu = i; 200 + cnt++; 201 + } 202 + 203 + return (cnt < 2) ? -EINVAL : 0; 204 + } 205 + 206 + int main(int argc, char *argv[]) 207 + { 208 + int r, i, snapshot; 209 + struct kvm_vm *vm; 210 + u32 cpu, rseq_cpu; 211 + 212 + /* Tell stdout not to buffer its content */ 213 + setbuf(stdout, NULL); 214 + 215 + r = sched_getaffinity(0, sizeof(possible_mask), &possible_mask); 216 + TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno, 217 + strerror(errno)); 218 + 219 + if (calc_min_max_cpu()) { 220 + print_skip("Only one usable CPU, task migration not possible"); 221 + exit(KSFT_SKIP); 222 + } 223 + 224 + sys_rseq(0); 225 + 226 + /* 227 + * Create and run a dummy VM that immediately exits to userspace via 228 + * GUEST_SYNC, while concurrently migrating the process by setting its 229 + * CPU affinity. 230 + */ 231 + vm = vm_create_default(VCPU_ID, 0, guest_code); 232 + ucall_init(vm, NULL); 233 + 234 + pthread_create(&migration_thread, NULL, migration_worker, 0); 235 + 236 + for (i = 0; !done; i++) { 237 + vcpu_run(vm, VCPU_ID); 238 + TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC, 239 + "Guest failed?"); 240 + 241 + /* 242 + * Verify rseq's CPU matches sched's CPU. Ensure migration 243 + * doesn't occur between sched_getcpu() and reading the rseq 244 + * cpu_id by rereading both if the sequence count changes, or 245 + * if the count is odd (migration in-progress). 246 + */ 247 + do { 248 + /* 249 + * Drop bit 0 to force a mismatch if the count is odd, 250 + * i.e. if a migration is in-progress. 251 + */ 252 + snapshot = atomic_read(&seq_cnt) & ~1; 253 + 254 + /* 255 + * Ensure reading sched_getcpu() and rseq.cpu_id 256 + * complete in a single "no migration" window, i.e. are 257 + * not reordered across the seq_cnt reads. 258 + */ 259 + smp_rmb(); 260 + cpu = sched_getcpu(); 261 + rseq_cpu = READ_ONCE(__rseq.cpu_id); 262 + smp_rmb(); 263 + } while (snapshot != atomic_read(&seq_cnt)); 264 + 265 + TEST_ASSERT(rseq_cpu == cpu, 266 + "rseq CPU = %d, sched CPU = %d\n", rseq_cpu, cpu); 267 + } 268 + 269 + /* 270 + * Sanity check that the test was able to enter the guest a reasonable 271 + * number of times, e.g. didn't get stalled too often/long waiting for 272 + * sched_getcpu() to stabilize. A 2:1 migration:KVM_RUN ratio is a 273 + * fairly conservative ratio on x86-64, which can do _more_ KVM_RUNs 274 + * than migrations given the 1us+ delay in the migration task. 275 + */ 276 + TEST_ASSERT(i > (NR_TASK_MIGRATIONS / 2), 277 + "Only performed %d KVM_RUNs, task stalled too much?\n", i); 278 + 279 + pthread_join(migration_thread, NULL); 280 + 281 + kvm_vm_free(vm); 282 + 283 + sys_rseq(RSEQ_FLAG_UNREGISTER); 284 + 285 + return 0; 286 + }
+2 -18
tools/testing/selftests/kvm/steal_time.c
··· 10 10 #include <sched.h> 11 11 #include <pthread.h> 12 12 #include <linux/kernel.h> 13 - #include <sys/syscall.h> 14 13 #include <asm/kvm.h> 15 14 #include <asm/kvm_para.h> 16 15 ··· 19 20 20 21 #define NR_VCPUS 4 21 22 #define ST_GPA_BASE (1 << 30) 22 - #define MIN_RUN_DELAY_NS 200000UL 23 23 24 24 static void *st_gva[NR_VCPUS]; 25 25 static uint64_t guest_stolen_time[NR_VCPUS]; ··· 116 118 uint64_t st_time; 117 119 }; 118 120 119 - static int64_t smccc(uint32_t func, uint32_t arg) 121 + static int64_t smccc(uint32_t func, uint64_t arg) 120 122 { 121 123 unsigned long ret; 122 124 123 125 asm volatile( 124 - "mov x0, %1\n" 126 + "mov w0, %w1\n" 125 127 "mov x1, %2\n" 126 128 "hvc #0\n" 127 129 "mov %0, x0\n" ··· 214 216 } 215 217 216 218 #endif 217 - 218 - static long get_run_delay(void) 219 - { 220 - char path[64]; 221 - long val[2]; 222 - FILE *fp; 223 - 224 - sprintf(path, "/proc/%ld/schedstat", syscall(SYS_gettid)); 225 - fp = fopen(path, "r"); 226 - fscanf(fp, "%ld %ld ", &val[0], &val[1]); 227 - fclose(fp); 228 - 229 - return val[1]; 230 - } 231 219 232 220 static void *do_steal_time(void *arg) 233 221 {
+2 -1
tools/testing/selftests/kvm/x86_64/mmio_warning_test.c
··· 82 82 FILE *f; 83 83 84 84 f = popen("dmesg | grep \"WARNING:\" | wc -l", "r"); 85 - fscanf(f, "%d", &warnings); 85 + if (fscanf(f, "%d", &warnings) < 1) 86 + warnings = 0; 86 87 fclose(f); 87 88 88 89 return warnings;
+128
tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * svm_int_ctl_test 4 + * 5 + * Copyright (C) 2021, Red Hat, Inc. 6 + * 7 + * Nested SVM testing: test simultaneous use of V_IRQ from L1 and L0. 8 + */ 9 + 10 + #include "test_util.h" 11 + #include "kvm_util.h" 12 + #include "processor.h" 13 + #include "svm_util.h" 14 + #include "apic.h" 15 + 16 + #define VCPU_ID 0 17 + 18 + static struct kvm_vm *vm; 19 + 20 + bool vintr_irq_called; 21 + bool intr_irq_called; 22 + 23 + #define VINTR_IRQ_NUMBER 0x20 24 + #define INTR_IRQ_NUMBER 0x30 25 + 26 + static void vintr_irq_handler(struct ex_regs *regs) 27 + { 28 + vintr_irq_called = true; 29 + } 30 + 31 + static void intr_irq_handler(struct ex_regs *regs) 32 + { 33 + x2apic_write_reg(APIC_EOI, 0x00); 34 + intr_irq_called = true; 35 + } 36 + 37 + static void l2_guest_code(struct svm_test_data *svm) 38 + { 39 + /* This code raises interrupt INTR_IRQ_NUMBER in the L1's LAPIC, 40 + * and since L1 didn't enable virtual interrupt masking, 41 + * L2 should receive it and not L1. 42 + * 43 + * L2 also has virtual interrupt 'VINTR_IRQ_NUMBER' pending in V_IRQ 44 + * so it should also receive it after the following 'sti'. 45 + */ 46 + x2apic_write_reg(APIC_ICR, 47 + APIC_DEST_SELF | APIC_INT_ASSERT | INTR_IRQ_NUMBER); 48 + 49 + __asm__ __volatile__( 50 + "sti\n" 51 + "nop\n" 52 + ); 53 + 54 + GUEST_ASSERT(vintr_irq_called); 55 + GUEST_ASSERT(intr_irq_called); 56 + 57 + __asm__ __volatile__( 58 + "vmcall\n" 59 + ); 60 + } 61 + 62 + static void l1_guest_code(struct svm_test_data *svm) 63 + { 64 + #define L2_GUEST_STACK_SIZE 64 65 + unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; 66 + struct vmcb *vmcb = svm->vmcb; 67 + 68 + x2apic_enable(); 69 + 70 + /* Prepare for L2 execution. */ 71 + generic_svm_setup(svm, l2_guest_code, 72 + &l2_guest_stack[L2_GUEST_STACK_SIZE]); 73 + 74 + /* No virtual interrupt masking */ 75 + vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 76 + 77 + /* No intercepts for real and virtual interrupts */ 78 + vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR | INTERCEPT_VINTR); 79 + 80 + /* Make a virtual interrupt VINTR_IRQ_NUMBER pending */ 81 + vmcb->control.int_ctl |= V_IRQ_MASK | (0x1 << V_INTR_PRIO_SHIFT); 82 + vmcb->control.int_vector = VINTR_IRQ_NUMBER; 83 + 84 + run_guest(vmcb, svm->vmcb_gpa); 85 + GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); 86 + GUEST_DONE(); 87 + } 88 + 89 + int main(int argc, char *argv[]) 90 + { 91 + vm_vaddr_t svm_gva; 92 + 93 + nested_svm_check_supported(); 94 + 95 + vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code); 96 + 97 + vm_init_descriptor_tables(vm); 98 + vcpu_init_descriptor_tables(vm, VCPU_ID); 99 + 100 + vm_install_exception_handler(vm, VINTR_IRQ_NUMBER, vintr_irq_handler); 101 + vm_install_exception_handler(vm, INTR_IRQ_NUMBER, intr_irq_handler); 102 + 103 + vcpu_alloc_svm(vm, &svm_gva); 104 + vcpu_args_set(vm, VCPU_ID, 1, svm_gva); 105 + 106 + struct kvm_run *run = vcpu_state(vm, VCPU_ID); 107 + struct ucall uc; 108 + 109 + vcpu_run(vm, VCPU_ID); 110 + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, 111 + "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", 112 + run->exit_reason, 113 + exit_reason_str(run->exit_reason)); 114 + 115 + switch (get_ucall(vm, VCPU_ID, &uc)) { 116 + case UCALL_ABORT: 117 + TEST_FAIL("%s", (const char *)uc.args[0]); 118 + break; 119 + /* NOT REACHED */ 120 + case UCALL_DONE: 121 + goto done; 122 + default: 123 + TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd); 124 + } 125 + done: 126 + kvm_vm_free(vm); 127 + return 0; 128 + }
-15
tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
··· 14 14 #include <stdint.h> 15 15 #include <time.h> 16 16 #include <sched.h> 17 - #include <sys/syscall.h> 18 17 19 18 #define VCPU_ID 5 20 19 ··· 95 96 GUEST_ASSERT(rs->time[RUNSTATE_runnable] >= MIN_STEAL_TIME); 96 97 97 98 GUEST_DONE(); 98 - } 99 - 100 - static long get_run_delay(void) 101 - { 102 - char path[64]; 103 - long val[2]; 104 - FILE *fp; 105 - 106 - sprintf(path, "/proc/%ld/schedstat", syscall(SYS_gettid)); 107 - fp = fopen(path, "r"); 108 - fscanf(fp, "%ld %ld ", &val[0], &val[1]); 109 - fclose(fp); 110 - 111 - return val[1]; 112 99 } 113 100 114 101 static int cmp_timespec(struct timespec *a, struct timespec *b)
+1
tools/testing/selftests/lib.mk
··· 48 48 # When local build is done, headers are installed in the default 49 49 # INSTALL_HDR_PATH usr/include. 50 50 .PHONY: khdr 51 + .NOTPARALLEL: 51 52 khdr: 52 53 ifndef KSFT_KHDR_INSTALL_DONE 53 54 ifeq (1,$(DEFAULT_INSTALL_HDR_PATH))
+1 -4
tools/testing/selftests/net/af_unix/Makefile
··· 1 - ##TEST_GEN_FILES := test_unix_oob 2 - TEST_PROGS := test_unix_oob 1 + TEST_GEN_PROGS := test_unix_oob 3 2 include ../../lib.mk 4 - 5 - all: $(TEST_PROGS)
+3 -2
tools/testing/selftests/net/af_unix/test_unix_oob.c
··· 271 271 read_oob(pfd, &oob); 272 272 273 273 if (!signal_recvd || len != 127 || oob != '%' || atmark != 1) { 274 - fprintf(stderr, "Test 3 failed, sigurg %d len %d OOB %c ", 275 - "atmark %d\n", signal_recvd, len, oob, atmark); 274 + fprintf(stderr, 275 + "Test 3 failed, sigurg %d len %d OOB %c atmark %d\n", 276 + signal_recvd, len, oob, atmark); 276 277 die(1); 277 278 } 278 279
+309
tools/testing/selftests/netfilter/nft_nat_zones.sh
··· 1 + #!/bin/bash 2 + # 3 + # Test connection tracking zone and NAT source port reallocation support. 4 + # 5 + 6 + # Kselftest framework requirement - SKIP code is 4. 7 + ksft_skip=4 8 + 9 + # Don't increase too much, 2000 clients should work 10 + # just fine but script can then take several minutes with 11 + # KASAN/debug builds. 12 + maxclients=100 13 + 14 + have_iperf=1 15 + ret=0 16 + 17 + # client1---. 18 + # veth1-. 19 + # | 20 + # NAT Gateway --veth0--> Server 21 + # | | 22 + # veth2-' | 23 + # client2---' | 24 + # .... | 25 + # clientX----vethX---' 26 + 27 + # All clients share identical IP address. 28 + # NAT Gateway uses policy routing and conntrack zones to isolate client 29 + # namespaces. Each client connects to Server, each with colliding tuples: 30 + # clientsaddr:10000 -> serveraddr:dport 31 + # NAT Gateway is supposed to do port reallocation for each of the 32 + # connections. 33 + 34 + sfx=$(mktemp -u "XXXXXXXX") 35 + gw="ns-gw-$sfx" 36 + cl1="ns-cl1-$sfx" 37 + cl2="ns-cl2-$sfx" 38 + srv="ns-srv-$sfx" 39 + 40 + v4gc1=$(sysctl -n net.ipv4.neigh.default.gc_thresh1 2>/dev/null) 41 + v4gc2=$(sysctl -n net.ipv4.neigh.default.gc_thresh2 2>/dev/null) 42 + v4gc3=$(sysctl -n net.ipv4.neigh.default.gc_thresh3 2>/dev/null) 43 + v6gc1=$(sysctl -n net.ipv6.neigh.default.gc_thresh1 2>/dev/null) 44 + v6gc2=$(sysctl -n net.ipv6.neigh.default.gc_thresh2 2>/dev/null) 45 + v6gc3=$(sysctl -n net.ipv6.neigh.default.gc_thresh3 2>/dev/null) 46 + 47 + cleanup() 48 + { 49 + ip netns del $gw 50 + ip netns del $srv 51 + for i in $(seq 1 $maxclients); do 52 + ip netns del ns-cl$i-$sfx 2>/dev/null 53 + done 54 + 55 + sysctl -q net.ipv4.neigh.default.gc_thresh1=$v4gc1 2>/dev/null 56 + sysctl -q net.ipv4.neigh.default.gc_thresh2=$v4gc2 2>/dev/null 57 + sysctl -q net.ipv4.neigh.default.gc_thresh3=$v4gc3 2>/dev/null 58 + sysctl -q net.ipv6.neigh.default.gc_thresh1=$v6gc1 2>/dev/null 59 + sysctl -q net.ipv6.neigh.default.gc_thresh2=$v6gc2 2>/dev/null 60 + sysctl -q net.ipv6.neigh.default.gc_thresh3=$v6gc3 2>/dev/null 61 + } 62 + 63 + nft --version > /dev/null 2>&1 64 + if [ $? -ne 0 ];then 65 + echo "SKIP: Could not run test without nft tool" 66 + exit $ksft_skip 67 + fi 68 + 69 + ip -Version > /dev/null 2>&1 70 + if [ $? -ne 0 ];then 71 + echo "SKIP: Could not run test without ip tool" 72 + exit $ksft_skip 73 + fi 74 + 75 + conntrack -V > /dev/null 2>&1 76 + if [ $? -ne 0 ];then 77 + echo "SKIP: Could not run test without conntrack tool" 78 + exit $ksft_skip 79 + fi 80 + 81 + iperf3 -v >/dev/null 2>&1 82 + if [ $? -ne 0 ];then 83 + have_iperf=0 84 + fi 85 + 86 + ip netns add "$gw" 87 + if [ $? -ne 0 ];then 88 + echo "SKIP: Could not create net namespace $gw" 89 + exit $ksft_skip 90 + fi 91 + ip -net "$gw" link set lo up 92 + 93 + trap cleanup EXIT 94 + 95 + ip netns add "$srv" 96 + if [ $? -ne 0 ];then 97 + echo "SKIP: Could not create server netns $srv" 98 + exit $ksft_skip 99 + fi 100 + 101 + ip link add veth0 netns "$gw" type veth peer name eth0 netns "$srv" 102 + ip -net "$gw" link set veth0 up 103 + ip -net "$srv" link set lo up 104 + ip -net "$srv" link set eth0 up 105 + 106 + sysctl -q net.ipv6.neigh.default.gc_thresh1=512 2>/dev/null 107 + sysctl -q net.ipv6.neigh.default.gc_thresh2=1024 2>/dev/null 108 + sysctl -q net.ipv6.neigh.default.gc_thresh3=4096 2>/dev/null 109 + sysctl -q net.ipv4.neigh.default.gc_thresh1=512 2>/dev/null 110 + sysctl -q net.ipv4.neigh.default.gc_thresh2=1024 2>/dev/null 111 + sysctl -q net.ipv4.neigh.default.gc_thresh3=4096 2>/dev/null 112 + 113 + for i in $(seq 1 $maxclients);do 114 + cl="ns-cl$i-$sfx" 115 + 116 + ip netns add "$cl" 117 + if [ $? -ne 0 ];then 118 + echo "SKIP: Could not create client netns $cl" 119 + exit $ksft_skip 120 + fi 121 + ip link add veth$i netns "$gw" type veth peer name eth0 netns "$cl" > /dev/null 2>&1 122 + if [ $? -ne 0 ];then 123 + echo "SKIP: No virtual ethernet pair device support in kernel" 124 + exit $ksft_skip 125 + fi 126 + done 127 + 128 + for i in $(seq 1 $maxclients);do 129 + cl="ns-cl$i-$sfx" 130 + echo netns exec "$cl" ip link set lo up 131 + echo netns exec "$cl" ip link set eth0 up 132 + echo netns exec "$cl" sysctl -q net.ipv4.tcp_syn_retries=2 133 + echo netns exec "$gw" ip link set veth$i up 134 + echo netns exec "$gw" sysctl -q net.ipv4.conf.veth$i.arp_ignore=2 135 + echo netns exec "$gw" sysctl -q net.ipv4.conf.veth$i.rp_filter=0 136 + 137 + # clients have same IP addresses. 138 + echo netns exec "$cl" ip addr add 10.1.0.3/24 dev eth0 139 + echo netns exec "$cl" ip addr add dead:1::3/64 dev eth0 140 + echo netns exec "$cl" ip route add default via 10.1.0.2 dev eth0 141 + echo netns exec "$cl" ip route add default via dead:1::2 dev eth0 142 + 143 + # NB: same addresses on client-facing interfaces. 144 + echo netns exec "$gw" ip addr add 10.1.0.2/24 dev veth$i 145 + echo netns exec "$gw" ip addr add dead:1::2/64 dev veth$i 146 + 147 + # gw: policy routing 148 + echo netns exec "$gw" ip route add 10.1.0.0/24 dev veth$i table $((1000+i)) 149 + echo netns exec "$gw" ip route add dead:1::0/64 dev veth$i table $((1000+i)) 150 + echo netns exec "$gw" ip route add 10.3.0.0/24 dev veth0 table $((1000+i)) 151 + echo netns exec "$gw" ip route add dead:3::0/64 dev veth0 table $((1000+i)) 152 + echo netns exec "$gw" ip rule add fwmark $i lookup $((1000+i)) 153 + done | ip -batch /dev/stdin 154 + 155 + ip -net "$gw" addr add 10.3.0.1/24 dev veth0 156 + ip -net "$gw" addr add dead:3::1/64 dev veth0 157 + 158 + ip -net "$srv" addr add 10.3.0.99/24 dev eth0 159 + ip -net "$srv" addr add dead:3::99/64 dev eth0 160 + 161 + ip netns exec $gw nft -f /dev/stdin<<EOF 162 + table inet raw { 163 + map iiftomark { 164 + type ifname : mark 165 + } 166 + 167 + map iiftozone { 168 + typeof iifname : ct zone 169 + } 170 + 171 + set inicmp { 172 + flags dynamic 173 + type ipv4_addr . ifname . ipv4_addr 174 + } 175 + set inflows { 176 + flags dynamic 177 + type ipv4_addr . inet_service . ifname . ipv4_addr . inet_service 178 + } 179 + 180 + set inflows6 { 181 + flags dynamic 182 + type ipv6_addr . inet_service . ifname . ipv6_addr . inet_service 183 + } 184 + 185 + chain prerouting { 186 + type filter hook prerouting priority -64000; policy accept; 187 + ct original zone set meta iifname map @iiftozone 188 + meta mark set meta iifname map @iiftomark 189 + 190 + tcp flags & (syn|ack) == ack add @inflows { ip saddr . tcp sport . meta iifname . ip daddr . tcp dport counter } 191 + add @inflows6 { ip6 saddr . tcp sport . meta iifname . ip6 daddr . tcp dport counter } 192 + ip protocol icmp add @inicmp { ip saddr . meta iifname . ip daddr counter } 193 + } 194 + 195 + chain nat_postrouting { 196 + type nat hook postrouting priority 0; policy accept; 197 + ct mark set meta mark meta oifname veth0 masquerade 198 + } 199 + 200 + chain mangle_prerouting { 201 + type filter hook prerouting priority -100; policy accept; 202 + ct direction reply meta mark set ct mark 203 + } 204 + } 205 + EOF 206 + 207 + ( echo add element inet raw iiftomark \{ 208 + for i in $(seq 1 $((maxclients-1))); do 209 + echo \"veth$i\" : $i, 210 + done 211 + echo \"veth$maxclients\" : $maxclients \} 212 + echo add element inet raw iiftozone \{ 213 + for i in $(seq 1 $((maxclients-1))); do 214 + echo \"veth$i\" : $i, 215 + done 216 + echo \"veth$maxclients\" : $maxclients \} 217 + ) | ip netns exec $gw nft -f /dev/stdin 218 + 219 + ip netns exec "$gw" sysctl -q net.ipv4.conf.all.forwarding=1 > /dev/null 220 + ip netns exec "$gw" sysctl -q net.ipv6.conf.all.forwarding=1 > /dev/null 221 + ip netns exec "$gw" sysctl -q net.ipv4.conf.all.rp_filter=0 >/dev/null 222 + 223 + # useful for debugging: allows to use 'ping' from clients to gateway. 224 + ip netns exec "$gw" sysctl -q net.ipv4.fwmark_reflect=1 > /dev/null 225 + ip netns exec "$gw" sysctl -q net.ipv6.fwmark_reflect=1 > /dev/null 226 + 227 + for i in $(seq 1 $maxclients); do 228 + cl="ns-cl$i-$sfx" 229 + ip netns exec $cl ping -i 0.5 -q -c 3 10.3.0.99 > /dev/null 2>&1 & 230 + if [ $? -ne 0 ]; then 231 + echo FAIL: Ping failure from $cl 1>&2 232 + ret=1 233 + break 234 + fi 235 + done 236 + 237 + wait 238 + 239 + for i in $(seq 1 $maxclients); do 240 + ip netns exec $gw nft get element inet raw inicmp "{ 10.1.0.3 . \"veth$i\" . 10.3.0.99 }" | grep -q "{ 10.1.0.3 . \"veth$i\" . 10.3.0.99 counter packets 3 bytes 252 }" 241 + if [ $? -ne 0 ];then 242 + ret=1 243 + echo "FAIL: counter icmp mismatch for veth$i" 1>&2 244 + ip netns exec $gw nft get element inet raw inicmp "{ 10.1.0.3 . \"veth$i\" . 10.3.0.99 }" 1>&2 245 + break 246 + fi 247 + done 248 + 249 + ip netns exec $gw nft get element inet raw inicmp "{ 10.3.0.99 . \"veth0\" . 10.3.0.1 }" | grep -q "{ 10.3.0.99 . \"veth0\" . 10.3.0.1 counter packets $((3 * $maxclients)) bytes $((252 * $maxclients)) }" 250 + if [ $? -ne 0 ];then 251 + ret=1 252 + echo "FAIL: counter icmp mismatch for veth0: { 10.3.0.99 . \"veth0\" . 10.3.0.1 counter packets $((3 * $maxclients)) bytes $((252 * $maxclients)) }" 253 + ip netns exec $gw nft get element inet raw inicmp "{ 10.3.99 . \"veth0\" . 10.3.0.1 }" 1>&2 254 + fi 255 + 256 + if [ $ret -eq 0 ]; then 257 + echo "PASS: ping test from all $maxclients namespaces" 258 + fi 259 + 260 + if [ $have_iperf -eq 0 ];then 261 + echo "SKIP: iperf3 not installed" 262 + if [ $ret -ne 0 ];then 263 + exit $ret 264 + fi 265 + exit $ksft_skip 266 + fi 267 + 268 + ip netns exec $srv iperf3 -s > /dev/null 2>&1 & 269 + iperfpid=$! 270 + sleep 1 271 + 272 + for i in $(seq 1 $maxclients); do 273 + if [ $ret -ne 0 ]; then 274 + break 275 + fi 276 + cl="ns-cl$i-$sfx" 277 + ip netns exec $cl iperf3 -c 10.3.0.99 --cport 10000 -n 1 > /dev/null 278 + if [ $? -ne 0 ]; then 279 + echo FAIL: Failure to connect for $cl 1>&2 280 + ip netns exec $gw conntrack -S 1>&2 281 + ret=1 282 + fi 283 + done 284 + if [ $ret -eq 0 ];then 285 + echo "PASS: iperf3 connections for all $maxclients net namespaces" 286 + fi 287 + 288 + kill $iperfpid 289 + wait 290 + 291 + for i in $(seq 1 $maxclients); do 292 + ip netns exec $gw nft get element inet raw inflows "{ 10.1.0.3 . 10000 . \"veth$i\" . 10.3.0.99 . 5201 }" > /dev/null 293 + if [ $? -ne 0 ];then 294 + ret=1 295 + echo "FAIL: can't find expected tcp entry for veth$i" 1>&2 296 + break 297 + fi 298 + done 299 + if [ $ret -eq 0 ];then 300 + echo "PASS: Found client connection for all $maxclients net namespaces" 301 + fi 302 + 303 + ip netns exec $gw nft get element inet raw inflows "{ 10.3.0.99 . 5201 . \"veth0\" . 10.3.0.1 . 10000 }" > /dev/null 304 + if [ $? -ne 0 ];then 305 + ret=1 306 + echo "FAIL: cannot find return entry on veth0" 1>&2 307 + fi 308 + 309 + exit $ret
+156
tools/testing/selftests/netfilter/nft_zones_many.sh
··· 1 + #!/bin/bash 2 + 3 + # Test insertion speed for packets with identical addresses/ports 4 + # that are all placed in distinct conntrack zones. 5 + 6 + sfx=$(mktemp -u "XXXXXXXX") 7 + ns="ns-$sfx" 8 + 9 + # Kselftest framework requirement - SKIP code is 4. 10 + ksft_skip=4 11 + 12 + zones=20000 13 + have_ct_tool=0 14 + ret=0 15 + 16 + cleanup() 17 + { 18 + ip netns del $ns 19 + } 20 + 21 + ip netns add $ns 22 + if [ $? -ne 0 ];then 23 + echo "SKIP: Could not create net namespace $gw" 24 + exit $ksft_skip 25 + fi 26 + 27 + trap cleanup EXIT 28 + 29 + conntrack -V > /dev/null 2>&1 30 + if [ $? -eq 0 ];then 31 + have_ct_tool=1 32 + fi 33 + 34 + ip -net "$ns" link set lo up 35 + 36 + test_zones() { 37 + local max_zones=$1 38 + 39 + ip netns exec $ns sysctl -q net.netfilter.nf_conntrack_udp_timeout=3600 40 + ip netns exec $ns nft -f /dev/stdin<<EOF 41 + flush ruleset 42 + table inet raw { 43 + map rndzone { 44 + typeof numgen inc mod $max_zones : ct zone 45 + } 46 + 47 + chain output { 48 + type filter hook output priority -64000; policy accept; 49 + udp dport 12345 ct zone set numgen inc mod 65536 map @rndzone 50 + } 51 + } 52 + EOF 53 + ( 54 + echo "add element inet raw rndzone {" 55 + for i in $(seq 1 $max_zones);do 56 + echo -n "$i : $i" 57 + if [ $i -lt $max_zones ]; then 58 + echo "," 59 + else 60 + echo "}" 61 + fi 62 + done 63 + ) | ip netns exec $ns nft -f /dev/stdin 64 + 65 + local i=0 66 + local j=0 67 + local outerstart=$(date +%s%3N) 68 + local stop=$outerstart 69 + 70 + while [ $i -lt $max_zones ]; do 71 + local start=$(date +%s%3N) 72 + i=$((i + 10000)) 73 + j=$((j + 1)) 74 + dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" nc -w 1 -q 1 -u -p 12345 127.0.0.1 12345 > /dev/null 75 + if [ $? -ne 0 ] ;then 76 + ret=1 77 + break 78 + fi 79 + 80 + stop=$(date +%s%3N) 81 + local duration=$((stop-start)) 82 + echo "PASS: added 10000 entries in $duration ms (now $i total, loop $j)" 83 + done 84 + 85 + if [ $have_ct_tool -eq 1 ]; then 86 + local count=$(ip netns exec "$ns" conntrack -C) 87 + local duration=$((stop-outerstart)) 88 + 89 + if [ $count -eq $max_zones ]; then 90 + echo "PASS: inserted $count entries from packet path in $duration ms total" 91 + else 92 + ip netns exec $ns conntrack -S 1>&2 93 + echo "FAIL: inserted $count entries from packet path in $duration ms total, expected $max_zones entries" 94 + ret=1 95 + fi 96 + fi 97 + 98 + if [ $ret -ne 0 ];then 99 + echo "FAIL: insert $max_zones entries from packet path" 1>&2 100 + fi 101 + } 102 + 103 + test_conntrack_tool() { 104 + local max_zones=$1 105 + 106 + ip netns exec $ns conntrack -F >/dev/null 2>/dev/null 107 + 108 + local outerstart=$(date +%s%3N) 109 + local start=$(date +%s%3N) 110 + local stop=$start 111 + local i=0 112 + while [ $i -lt $max_zones ]; do 113 + i=$((i + 1)) 114 + ip netns exec "$ns" conntrack -I -s 1.1.1.1 -d 2.2.2.2 --protonum 6 \ 115 + --timeout 3600 --state ESTABLISHED --sport 12345 --dport 1000 --zone $i >/dev/null 2>&1 116 + if [ $? -ne 0 ];then 117 + ip netns exec "$ns" conntrack -I -s 1.1.1.1 -d 2.2.2.2 --protonum 6 \ 118 + --timeout 3600 --state ESTABLISHED --sport 12345 --dport 1000 --zone $i > /dev/null 119 + echo "FAIL: conntrack -I returned an error" 120 + ret=1 121 + break 122 + fi 123 + 124 + if [ $((i%10000)) -eq 0 ];then 125 + stop=$(date +%s%3N) 126 + 127 + local duration=$((stop-start)) 128 + echo "PASS: added 10000 entries in $duration ms (now $i total)" 129 + start=$stop 130 + fi 131 + done 132 + 133 + local count=$(ip netns exec "$ns" conntrack -C) 134 + local duration=$((stop-outerstart)) 135 + 136 + if [ $count -eq $max_zones ]; then 137 + echo "PASS: inserted $count entries via ctnetlink in $duration ms" 138 + else 139 + ip netns exec $ns conntrack -S 1>&2 140 + echo "FAIL: inserted $count entries via ctnetlink in $duration ms, expected $max_zones entries ($duration ms)" 141 + ret=1 142 + fi 143 + } 144 + 145 + test_zones $zones 146 + 147 + if [ $have_ct_tool -eq 1 ];then 148 + test_conntrack_tool $zones 149 + else 150 + echo "SKIP: Could not run ctnetlink insertion test without conntrack tool" 151 + if [ $ret -eq 0 ];then 152 + exit $ksft_skip 153 + fi 154 + fi 155 + 156 + exit $ret
+8 -6
tools/usb/testusb.c
··· 265 265 } 266 266 267 267 entry->ifnum = ifnum; 268 - 269 - /* FIXME update USBDEVFS_CONNECTINFO so it tells about high speed etc */ 270 - 271 - fprintf(stderr, "%s speed\t%s\t%u\n", 272 - speed(entry->speed), entry->name, entry->ifnum); 273 - 274 268 entry->next = testdevs; 275 269 testdevs = entry; 276 270 return 0; ··· 292 298 perror ("can't open dev file r/w"); 293 299 return 0; 294 300 } 301 + 302 + status = ioctl(fd, USBDEVFS_GET_SPEED, NULL); 303 + if (status < 0) 304 + fprintf(stderr, "USBDEVFS_GET_SPEED failed %d\n", status); 305 + else 306 + dev->speed = status; 307 + fprintf(stderr, "%s speed\t%s\t%u\n", 308 + speed(dev->speed), dev->name, dev->ifnum); 295 309 296 310 restart: 297 311 for (i = 0; i < TEST_CASES; i++) {
+1 -1
tools/vm/page-types.c
··· 1331 1331 if (opt_list && opt_list_mapcnt) 1332 1332 kpagecount_fd = checked_open(PROC_KPAGECOUNT, O_RDONLY); 1333 1333 1334 - if (opt_mark_idle && opt_file) 1334 + if (opt_mark_idle) 1335 1335 page_idle_fd = checked_open(SYS_KERNEL_MM_PAGE_IDLE, O_RDWR); 1336 1336 1337 1337 if (opt_list && opt_pid)
+49 -19
virt/kvm/kvm_main.c
··· 235 235 { 236 236 } 237 237 238 - static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait) 238 + static inline bool kvm_kick_many_cpus(cpumask_var_t tmp, bool wait) 239 239 { 240 - if (unlikely(!cpus)) 240 + const struct cpumask *cpus; 241 + 242 + if (likely(cpumask_available(tmp))) 243 + cpus = tmp; 244 + else 241 245 cpus = cpu_online_mask; 242 246 243 247 if (cpumask_empty(cpus)) ··· 267 263 continue; 268 264 269 265 kvm_make_request(req, vcpu); 270 - cpu = vcpu->cpu; 271 266 272 267 if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu)) 273 268 continue; 274 269 275 - if (tmp != NULL && cpu != -1 && cpu != me && 276 - kvm_request_needs_ipi(vcpu, req)) 277 - __cpumask_set_cpu(cpu, tmp); 270 + /* 271 + * tmp can be "unavailable" if cpumasks are allocated off stack 272 + * as allocation of the mask is deliberately not fatal and is 273 + * handled by falling back to kicking all online CPUs. 274 + */ 275 + if (!cpumask_available(tmp)) 276 + continue; 277 + 278 + /* 279 + * Note, the vCPU could get migrated to a different pCPU at any 280 + * point after kvm_request_needs_ipi(), which could result in 281 + * sending an IPI to the previous pCPU. But, that's ok because 282 + * the purpose of the IPI is to ensure the vCPU returns to 283 + * OUTSIDE_GUEST_MODE, which is satisfied if the vCPU migrates. 284 + * Entering READING_SHADOW_PAGE_TABLES after this point is also 285 + * ok, as the requirement is only that KVM wait for vCPUs that 286 + * were reading SPTEs _before_ any changes were finalized. See 287 + * kvm_vcpu_kick() for more details on handling requests. 288 + */ 289 + if (kvm_request_needs_ipi(vcpu, req)) { 290 + cpu = READ_ONCE(vcpu->cpu); 291 + if (cpu != -1 && cpu != me) 292 + __cpumask_set_cpu(cpu, tmp); 293 + } 278 294 } 279 295 280 296 called = kvm_kick_many_cpus(tmp, !!(req & KVM_REQUEST_WAIT)); ··· 326 302 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL 327 303 void kvm_flush_remote_tlbs(struct kvm *kvm) 328 304 { 329 - /* 330 - * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in 331 - * kvm_make_all_cpus_request. 332 - */ 333 - long dirty_count = smp_load_acquire(&kvm->tlbs_dirty); 334 - 335 305 ++kvm->stat.generic.remote_tlb_flush_requests; 306 + 336 307 /* 337 308 * We want to publish modifications to the page tables before reading 338 309 * mode. Pairs with a memory barrier in arch-specific code. ··· 342 323 if (!kvm_arch_flush_remote_tlb(kvm) 343 324 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 344 325 ++kvm->stat.generic.remote_tlb_flush; 345 - cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); 346 326 } 347 327 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs); 348 328 #endif ··· 546 528 } 547 529 } 548 530 549 - if (range->flush_on_ret && (ret || kvm->tlbs_dirty)) 531 + if (range->flush_on_ret && ret) 550 532 kvm_flush_remote_tlbs(kvm); 551 533 552 534 if (locked) ··· 3152 3134 3153 3135 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) 3154 3136 { 3155 - unsigned int old, val, shrink; 3137 + unsigned int old, val, shrink, grow_start; 3156 3138 3157 3139 old = val = vcpu->halt_poll_ns; 3158 3140 shrink = READ_ONCE(halt_poll_ns_shrink); 3141 + grow_start = READ_ONCE(halt_poll_ns_grow_start); 3159 3142 if (shrink == 0) 3160 3143 val = 0; 3161 3144 else 3162 3145 val /= shrink; 3146 + 3147 + if (val < grow_start) 3148 + val = 0; 3163 3149 3164 3150 vcpu->halt_poll_ns = val; 3165 3151 trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old); ··· 3312 3290 */ 3313 3291 void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 3314 3292 { 3315 - int me; 3316 - int cpu = vcpu->cpu; 3293 + int me, cpu; 3317 3294 3318 3295 if (kvm_vcpu_wake_up(vcpu)) 3319 3296 return; 3320 3297 3298 + /* 3299 + * Note, the vCPU could get migrated to a different pCPU at any point 3300 + * after kvm_arch_vcpu_should_kick(), which could result in sending an 3301 + * IPI to the previous pCPU. But, that's ok because the purpose of the 3302 + * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the 3303 + * vCPU also requires it to leave IN_GUEST_MODE. 3304 + */ 3321 3305 me = get_cpu(); 3322 - if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) 3323 - if (kvm_arch_vcpu_should_kick(vcpu)) 3306 + if (kvm_arch_vcpu_should_kick(vcpu)) { 3307 + cpu = READ_ONCE(vcpu->cpu); 3308 + if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) 3324 3309 smp_send_reschedule(cpu); 3310 + } 3325 3311 put_cpu(); 3326 3312 } 3327 3313 EXPORT_SYMBOL_GPL(kvm_vcpu_kick);