Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'linus' into locking/core, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>

+3149 -1752
+9 -1
Documentation/bpf/bpf_devel_QA.txt
··· 557 557 pulls in some header files containing file scope host assembly codes. 558 558 - You can add "-fno-jump-tables" to work around the switch table issue. 559 559 560 - Otherwise, you can use bpf target. 560 + Otherwise, you can use bpf target. Additionally, you _must_ use bpf target 561 + when: 562 + 563 + - Your program uses data structures with pointer or long / unsigned long 564 + types that interface with BPF helpers or context data structures. Access 565 + into these structures is verified by the BPF verifier and may result 566 + in verification failures if the native architecture is not aligned with 567 + the BPF architecture, e.g. 64-bit. An example of this is 568 + BPF_PROG_TYPE_SK_MSG require '-target bpf' 561 569 562 570 Happy BPF hacking!
+7
Documentation/devicetree/bindings/input/atmel,maxtouch.txt
··· 4 4 - compatible: 5 5 atmel,maxtouch 6 6 7 + The following compatibles have been used in various products but are 8 + deprecated: 9 + atmel,qt602240_ts 10 + atmel,atmel_mxt_ts 11 + atmel,atmel_mxt_tp 12 + atmel,mXT224 13 + 7 14 - reg: The I2C address of the device 8 15 9 16 - interrupts: The sink for the touchpad's IRQ output
+1 -1
Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt
··· 21 21 - interrupts : identifier to the device interrupt 22 22 - clocks : a list of phandle + clock-specifier pairs, one for each 23 23 entry in clock names. 24 - - clocks-names : 24 + - clock-names : 25 25 * "xtal" for external xtal clock identifier 26 26 * "pclk" for the bus core clock, either the clk81 clock or the gate clock 27 27 * "baud" for the source of the baudrate generator, can be either the xtal
+1 -1
Documentation/devicetree/bindings/serial/mvebu-uart.txt
··· 24 24 - Must contain two elements for the extended variant of the IP 25 25 (marvell,armada-3700-uart-ext): "uart-tx" and "uart-rx", 26 26 respectively the UART TX interrupt and the UART RX interrupt. A 27 - corresponding interrupts-names property must be defined. 27 + corresponding interrupt-names property must be defined. 28 28 - For backward compatibility reasons, a single element interrupts 29 29 property is also supported for the standard variant of the IP, 30 30 containing only the UART sum interrupt. This form is deprecated
+2
Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
··· 17 17 - "renesas,scifa-r8a7745" for R8A7745 (RZ/G1E) SCIFA compatible UART. 18 18 - "renesas,scifb-r8a7745" for R8A7745 (RZ/G1E) SCIFB compatible UART. 19 19 - "renesas,hscif-r8a7745" for R8A7745 (RZ/G1E) HSCIF compatible UART. 20 + - "renesas,scif-r8a77470" for R8A77470 (RZ/G1C) SCIF compatible UART. 21 + - "renesas,hscif-r8a77470" for R8A77470 (RZ/G1C) HSCIF compatible UART. 20 22 - "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART. 21 23 - "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART. 22 24 - "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART.
+4 -1
Documentation/devicetree/bindings/usb/usb-xhci.txt
··· 28 28 - interrupts: one XHCI interrupt should be described here. 29 29 30 30 Optional properties: 31 - - clocks: reference to a clock 31 + - clocks: reference to the clocks 32 + - clock-names: mandatory if there is a second clock, in this case 33 + the name must be "core" for the first clock and "reg" for the 34 + second one 32 35 - usb2-lpm-disable: indicate if we don't want to enable USB2 HW LPM 33 36 - usb3-lpm-capable: determines if platform is USB3 LPM capable 34 37 - quirk-broken-port-ped: set if the controller has broken port disable mechanism
+8 -8
Documentation/driver-api/firmware/request_firmware.rst
··· 17 17 18 18 request_firmware 19 19 ---------------- 20 - .. kernel-doc:: drivers/base/firmware_class.c 20 + .. kernel-doc:: drivers/base/firmware_loader/main.c 21 21 :functions: request_firmware 22 22 23 23 request_firmware_direct 24 24 ----------------------- 25 - .. kernel-doc:: drivers/base/firmware_class.c 25 + .. kernel-doc:: drivers/base/firmware_loader/main.c 26 26 :functions: request_firmware_direct 27 27 28 28 request_firmware_into_buf 29 29 ------------------------- 30 - .. kernel-doc:: drivers/base/firmware_class.c 30 + .. kernel-doc:: drivers/base/firmware_loader/main.c 31 31 :functions: request_firmware_into_buf 32 32 33 33 Asynchronous firmware requests ··· 41 41 42 42 request_firmware_nowait 43 43 ----------------------- 44 - .. kernel-doc:: drivers/base/firmware_class.c 44 + .. kernel-doc:: drivers/base/firmware_loader/main.c 45 45 :functions: request_firmware_nowait 46 46 47 47 Special optimizations on reboot ··· 50 50 Some devices have an optimization in place to enable the firmware to be 51 51 retained during system reboot. When such optimizations are used the driver 52 52 author must ensure the firmware is still available on resume from suspend, 53 - this can be done with firmware_request_cache() insted of requesting for the 54 - firmare to be loaded. 53 + this can be done with firmware_request_cache() instead of requesting for the 54 + firmware to be loaded. 55 55 56 56 firmware_request_cache() 57 - ----------------------- 58 - .. kernel-doc:: drivers/base/firmware_class.c 57 + ------------------------ 58 + .. kernel-doc:: drivers/base/firmware_loader/main.c 59 59 :functions: firmware_request_cache 60 60 61 61 request firmware API expected driver use
+1 -1
Documentation/driver-api/infrastructure.rst
··· 28 28 .. kernel-doc:: drivers/base/node.c 29 29 :internal: 30 30 31 - .. kernel-doc:: drivers/base/firmware_class.c 31 + .. kernel-doc:: drivers/base/firmware_loader/main.c 32 32 :export: 33 33 34 34 .. kernel-doc:: drivers/base/transport_class.c
+1 -1
Documentation/driver-api/usb/typec.rst
··· 210 210 role. USB Type-C Connector Class does not supply separate API for them. The 211 211 port drivers can use USB Role Class API with those. 212 212 213 - Illustration of the muxes behind a connector that supports an alternate mode: 213 + Illustration of the muxes behind a connector that supports an alternate mode:: 214 214 215 215 ------------------------ 216 216 | Connector |
+14 -18
Documentation/i2c/dev-interface
··· 9 9 the i2c-tools package. 10 10 11 11 I2C device files are character device files with major device number 89 12 - and a minor device number corresponding to the number assigned as 13 - explained above. They should be called "i2c-%d" (i2c-0, i2c-1, ..., 12 + and a minor device number corresponding to the number assigned as 13 + explained above. They should be called "i2c-%d" (i2c-0, i2c-1, ..., 14 14 i2c-10, ...). All 256 minor device numbers are reserved for i2c. 15 15 16 16 ··· 23 23 #include <linux/i2c-dev.h> 24 24 #include <i2c/smbus.h> 25 25 26 - (Please note that there are two files named "i2c-dev.h" out there. One is 27 - distributed with the Linux kernel and the other one is included in the 28 - source tree of i2c-tools. They used to be different in content but since 2012 29 - they're identical. You should use "linux/i2c-dev.h"). 30 - 31 26 Now, you have to decide which adapter you want to access. You should 32 27 inspect /sys/class/i2c-dev/ or run "i2cdetect -l" to decide this. 33 28 Adapter numbers are assigned somewhat dynamically, so you can not ··· 33 38 int file; 34 39 int adapter_nr = 2; /* probably dynamically determined */ 35 40 char filename[20]; 36 - 41 + 37 42 snprintf(filename, 19, "/dev/i2c-%d", adapter_nr); 38 43 file = open(filename, O_RDWR); 39 44 if (file < 0) { ··· 67 72 /* res contains the read word */ 68 73 } 69 74 70 - /* Using I2C Write, equivalent of 71 - i2c_smbus_write_word_data(file, reg, 0x6543) */ 75 + /* 76 + * Using I2C Write, equivalent of 77 + * i2c_smbus_write_word_data(file, reg, 0x6543) 78 + */ 72 79 buf[0] = reg; 73 80 buf[1] = 0x43; 74 81 buf[2] = 0x65; ··· 137 140 set in each message, overriding the values set with the above ioctl's. 138 141 139 142 ioctl(file, I2C_SMBUS, struct i2c_smbus_ioctl_data *args) 140 - Not meant to be called directly; instead, use the access functions 141 - below. 143 + If possible, use the provided i2c_smbus_* methods described below instead 144 + of issuing direct ioctls. 142 145 143 146 You can do plain i2c transactions by using read(2) and write(2) calls. 144 147 You do not need to pass the address byte; instead, set it through 145 148 ioctl I2C_SLAVE before you try to access the device. 146 149 147 - You can do SMBus level transactions (see documentation file smbus-protocol 150 + You can do SMBus level transactions (see documentation file smbus-protocol 148 151 for details) through the following functions: 149 152 __s32 i2c_smbus_write_quick(int file, __u8 value); 150 153 __s32 i2c_smbus_read_byte(int file); ··· 155 158 __s32 i2c_smbus_write_word_data(int file, __u8 command, __u16 value); 156 159 __s32 i2c_smbus_process_call(int file, __u8 command, __u16 value); 157 160 __s32 i2c_smbus_read_block_data(int file, __u8 command, __u8 *values); 158 - __s32 i2c_smbus_write_block_data(int file, __u8 command, __u8 length, 161 + __s32 i2c_smbus_write_block_data(int file, __u8 command, __u8 length, 159 162 __u8 *values); 160 163 All these transactions return -1 on failure; you can read errno to see 161 164 what happened. The 'write' transactions return 0 on success; the ··· 163 166 returns the number of values read. The block buffers need not be longer 164 167 than 32 bytes. 165 168 166 - The above functions are all inline functions, that resolve to calls to 167 - the i2c_smbus_access function, that on its turn calls a specific ioctl 168 - with the data in a specific format. Read the source code if you 169 - want to know what happens behind the screens. 169 + The above functions are made available by linking against the libi2c library, 170 + which is provided by the i2c-tools project. See: 171 + https://git.kernel.org/pub/scm/utils/i2c-tools/i2c-tools.git/. 170 172 171 173 172 174 Implementation details
-2
Documentation/ioctl/ioctl-number.txt
··· 217 217 'd' 02-40 pcmcia/ds.h conflict! 218 218 'd' F0-FF linux/digi1.h 219 219 'e' all linux/digi1.h conflict! 220 - 'e' 00-1F drivers/net/irda/irtty-sir.h conflict! 221 220 'f' 00-1F linux/ext2_fs.h conflict! 222 221 'f' 00-1F linux/ext3_fs.h conflict! 223 222 'f' 00-0F fs/jfs/jfs_dinode.h conflict! ··· 246 247 'm' all linux/synclink.h conflict! 247 248 'm' 00-19 drivers/message/fusion/mptctl.h conflict! 248 249 'm' 00 drivers/scsi/megaraid/megaraid_ioctl.h conflict! 249 - 'm' 00-1F net/irda/irmod.h conflict! 250 250 'n' 00-7F linux/ncp_fs.h and fs/ncpfs/ioctl.c 251 251 'n' 80-8F uapi/linux/nilfs2_api.h NILFS2 252 252 'n' E0-FF linux/matroxfb.h matroxfb
-15
Documentation/networking/ip-sysctl.txt
··· 2126 2126 2127 2127 Default: 10 2128 2128 2129 - 2130 - UNDOCUMENTED: 2131 - 2132 - /proc/sys/net/irda/* 2133 - fast_poll_increase FIXME 2134 - warn_noreply_time FIXME 2135 - discovery_slots FIXME 2136 - slot_timeout FIXME 2137 - max_baud_rate FIXME 2138 - discovery_timeout FIXME 2139 - lap_keepalive_time FIXME 2140 - max_noreply_time FIXME 2141 - max_tx_data_size FIXME 2142 - max_tx_window FIXME 2143 - min_tx_turn_time FIXME
+1 -1
Documentation/power/suspend-and-cpuhotplug.txt
··· 168 168 169 169 [Please bear in mind that the kernel requests the microcode images from 170 170 userspace, using the request_firmware() function defined in 171 - drivers/base/firmware_class.c] 171 + drivers/base/firmware_loader/main.c] 172 172 173 173 174 174 a. When all the CPUs are identical:
-3
Documentation/process/magic-number.rst
··· 157 157 OSS sound drivers have their magic numbers constructed from the soundcard PCI 158 158 ID - these are not listed here as well. 159 159 160 - IrDA subsystem also uses large number of own magic numbers, see 161 - ``include/net/irda/irda.h`` for a complete list of them. 162 - 163 160 HFS is another larger user of magic numbers - you can find them in 164 161 ``fs/hfs/hfs.h``.
+11 -3
Documentation/trace/ftrace.rst
··· 461 461 and ticks at the same rate as the hardware clocksource. 462 462 463 463 boot: 464 - Same as mono. Used to be a separate clock which accounted 465 - for the time spent in suspend while CLOCK_MONOTONIC did 466 - not. 464 + This is the boot clock (CLOCK_BOOTTIME) and is based on the 465 + fast monotonic clock, but also accounts for time spent in 466 + suspend. Since the clock access is designed for use in 467 + tracing in the suspend path, some side effects are possible 468 + if clock is accessed after the suspend time is accounted before 469 + the fast mono clock is updated. In this case, the clock update 470 + appears to happen slightly sooner than it normally would have. 471 + Also on 32-bit systems, it's possible that the 64-bit boot offset 472 + sees a partial update. These effects are rare and post 473 + processing should be able to handle them. See comments in the 474 + ktime_get_boot_fast_ns() function for more information. 467 475 468 476 To set a clock, simply echo the clock name into this file:: 469 477
+8 -1
Documentation/virtual/kvm/api.txt
··· 1960 1960 ARM 64-bit FP registers have the following id bit patterns: 1961 1961 0x4030 0000 0012 0 <regno:12> 1962 1962 1963 + ARM firmware pseudo-registers have the following bit pattern: 1964 + 0x4030 0000 0014 <regno:16> 1965 + 1963 1966 1964 1967 arm64 registers are mapped using the lower 32 bits. The upper 16 of 1965 1968 that is the register group type, or coprocessor number: ··· 1978 1975 1979 1976 arm64 system registers have the following id bit patterns: 1980 1977 0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3> 1978 + 1979 + arm64 firmware pseudo-registers have the following bit pattern: 1980 + 0x6030 0000 0014 <regno:16> 1981 1981 1982 1982 1983 1983 MIPS registers are mapped using the lower 32 bits. The upper 16 of that is ··· 2516 2510 and execute guest code when KVM_RUN is called. 2517 2511 - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode. 2518 2512 Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only). 2519 - - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU. 2513 + - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 (or a future revision 2514 + backward compatible with v0.2) for the CPU. 2520 2515 Depends on KVM_CAP_ARM_PSCI_0_2. 2521 2516 - KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU. 2522 2517 Depends on KVM_CAP_ARM_PMU_V3.
+30
Documentation/virtual/kvm/arm/psci.txt
··· 1 + KVM implements the PSCI (Power State Coordination Interface) 2 + specification in order to provide services such as CPU on/off, reset 3 + and power-off to the guest. 4 + 5 + The PSCI specification is regularly updated to provide new features, 6 + and KVM implements these updates if they make sense from a virtualization 7 + point of view. 8 + 9 + This means that a guest booted on two different versions of KVM can 10 + observe two different "firmware" revisions. This could cause issues if 11 + a given guest is tied to a particular PSCI revision (unlikely), or if 12 + a migration causes a different PSCI version to be exposed out of the 13 + blue to an unsuspecting guest. 14 + 15 + In order to remedy this situation, KVM exposes a set of "firmware 16 + pseudo-registers" that can be manipulated using the GET/SET_ONE_REG 17 + interface. These registers can be saved/restored by userspace, and set 18 + to a convenient value if required. 19 + 20 + The following register is defined: 21 + 22 + * KVM_REG_ARM_PSCI_VERSION: 23 + 24 + - Only valid if the vcpu has the KVM_ARM_VCPU_PSCI_0_2 feature set 25 + (and thus has already been initialized) 26 + - Returns the current PSCI version on GET_ONE_REG (defaulting to the 27 + highest PSCI version implemented by KVM and compatible with v0.2) 28 + - Allows any PSCI version implemented by KVM and compatible with 29 + v0.2 to be set with SET_ONE_REG 30 + - Affects the whole VM (even if the register view is per-vcpu)
+10 -18
MAINTAINERS
··· 564 564 F: drivers/media/dvb-frontends/af9033* 565 565 566 566 AFFS FILE SYSTEM 567 + M: David Sterba <dsterba@suse.com> 567 568 L: linux-fsdevel@vger.kernel.org 568 - S: Orphan 569 + S: Odd Fixes 569 570 F: Documentation/filesystems/affs.txt 570 571 F: fs/affs/ 571 572 ··· 906 905 M: Laura Abbott <labbott@redhat.com> 907 906 M: Sumit Semwal <sumit.semwal@linaro.org> 908 907 L: devel@driverdev.osuosl.org 908 + L: dri-devel@lists.freedesktop.org 909 + L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers) 909 910 S: Supported 910 911 F: drivers/staging/android/ion 911 912 F: drivers/staging/android/uapi/ion.h ··· 1211 1208 ARM/ARTPEC MACHINE SUPPORT 1212 1209 M: Jesper Nilsson <jesper.nilsson@axis.com> 1213 1210 M: Lars Persson <lars.persson@axis.com> 1214 - M: Niklas Cassel <niklas.cassel@axis.com> 1215 1211 S: Maintained 1216 1212 L: linux-arm-kernel@axis.com 1217 1213 F: arch/arm/mach-artpec ··· 7413 7411 F: include/uapi/linux/ipx.h 7414 7412 F: drivers/staging/ipx/ 7415 7413 7416 - IRDA SUBSYSTEM 7417 - M: Samuel Ortiz <samuel@sortiz.org> 7418 - L: irda-users@lists.sourceforge.net (subscribers-only) 7419 - L: netdev@vger.kernel.org 7420 - W: http://irda.sourceforge.net/ 7421 - S: Obsolete 7422 - T: git git://git.kernel.org/pub/scm/linux/kernel/git/sameo/irda-2.6.git 7423 - F: Documentation/networking/irda.txt 7424 - F: drivers/staging/irda/ 7425 - 7426 7414 IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) 7427 7415 M: Marc Zyngier <marc.zyngier@arm.com> 7428 7416 S: Maintained ··· 7745 7753 F: arch/x86/kvm/svm.c 7746 7754 7747 7755 KERNEL VIRTUAL MACHINE FOR ARM (KVM/arm) 7748 - M: Christoffer Dall <christoffer.dall@linaro.org> 7756 + M: Christoffer Dall <christoffer.dall@arm.com> 7749 7757 M: Marc Zyngier <marc.zyngier@arm.com> 7750 7758 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 7751 7759 L: kvmarm@lists.cs.columbia.edu ··· 7759 7767 F: include/kvm/arm_* 7760 7768 7761 7769 KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64) 7762 - M: Christoffer Dall <christoffer.dall@linaro.org> 7770 + M: Christoffer Dall <christoffer.dall@arm.com> 7763 7771 M: Marc Zyngier <marc.zyngier@arm.com> 7764 7772 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 7765 7773 L: kvmarm@lists.cs.columbia.edu ··· 9726 9734 F: net/core/drop_monitor.c 9727 9735 9728 9736 NETWORKING DRIVERS 9737 + M: "David S. Miller" <davem@davemloft.net> 9729 9738 L: netdev@vger.kernel.org 9730 9739 W: http://www.linuxfoundation.org/en/Net 9731 9740 Q: http://patchwork.ozlabs.org/project/netdev/list/ ··· 10903 10910 F: drivers/pci/dwc/ 10904 10911 10905 10912 PCIE DRIVER FOR AXIS ARTPEC 10906 - M: Niklas Cassel <niklas.cassel@axis.com> 10907 10913 M: Jesper Nilsson <jesper.nilsson@axis.com> 10908 10914 L: linux-arm-kernel@axis.com 10909 10915 L: linux-pci@vger.kernel.org ··· 12500 12508 SCTP PROTOCOL 12501 12509 M: Vlad Yasevich <vyasevich@gmail.com> 12502 12510 M: Neil Horman <nhorman@tuxdriver.com> 12511 + M: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> 12503 12512 L: linux-sctp@vger.kernel.org 12504 12513 W: http://lksctp.sourceforge.net 12505 12514 S: Maintained ··· 13856 13863 F: drivers/iommu/tegra* 13857 13864 13858 13865 TEGRA KBC DRIVER 13859 - M: Rakesh Iyer <riyer@nvidia.com> 13860 13866 M: Laxman Dewangan <ldewangan@nvidia.com> 13861 13867 S: Supported 13862 13868 F: drivers/input/keyboard/tegra-kbc.c ··· 13958 13966 M: Andreas Noever <andreas.noever@gmail.com> 13959 13967 M: Michael Jamet <michael.jamet@intel.com> 13960 13968 M: Mika Westerberg <mika.westerberg@linux.intel.com> 13961 - M: Yehezkel Bernat <yehezkel.bernat@intel.com> 13969 + M: Yehezkel Bernat <YehezkelShB@gmail.com> 13962 13970 T: git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git 13963 13971 S: Maintained 13964 13972 F: Documentation/admin-guide/thunderbolt.rst ··· 13968 13976 THUNDERBOLT NETWORK DRIVER 13969 13977 M: Michael Jamet <michael.jamet@intel.com> 13970 13978 M: Mika Westerberg <mika.westerberg@linux.intel.com> 13971 - M: Yehezkel Bernat <yehezkel.bernat@intel.com> 13979 + M: Yehezkel Bernat <YehezkelShB@gmail.com> 13972 13980 L: netdev@vger.kernel.org 13973 13981 S: Maintained 13974 13982 F: drivers/net/thunderbolt.c
+1 -1
Makefile
··· 2 2 VERSION = 4 3 3 PATCHLEVEL = 17 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc2 5 + EXTRAVERSION = -rc3 6 6 NAME = Fearless Coyote 7 7 8 8 # *DOCUMENTATION*
+14 -14
arch/arm/boot/dts/gemini-nas4220b.dts
··· 134 134 function = "gmii"; 135 135 groups = "gmii_gmac0_grp"; 136 136 }; 137 - /* Settings come from OpenWRT */ 137 + /* Settings come from OpenWRT, pins on SL3516 */ 138 138 conf0 { 139 - pins = "R8 GMAC0 RXDV", "U11 GMAC1 RXDV"; 139 + pins = "V8 GMAC0 RXDV", "T10 GMAC1 RXDV"; 140 140 skew-delay = <0>; 141 141 }; 142 142 conf1 { 143 - pins = "T8 GMAC0 RXC", "T11 GMAC1 RXC"; 143 + pins = "Y7 GMAC0 RXC", "Y11 GMAC1 RXC"; 144 144 skew-delay = <15>; 145 145 }; 146 146 conf2 { 147 - pins = "P8 GMAC0 TXEN", "V11 GMAC1 TXEN"; 147 + pins = "T8 GMAC0 TXEN", "W11 GMAC1 TXEN"; 148 148 skew-delay = <7>; 149 149 }; 150 150 conf3 { 151 - pins = "V7 GMAC0 TXC"; 151 + pins = "U8 GMAC0 TXC"; 152 152 skew-delay = <11>; 153 153 }; 154 154 conf4 { 155 - pins = "P10 GMAC1 TXC"; 155 + pins = "V11 GMAC1 TXC"; 156 156 skew-delay = <10>; 157 157 }; 158 158 conf5 { 159 159 /* The data lines all have default skew */ 160 - pins = "U8 GMAC0 RXD0", "V8 GMAC0 RXD1", 161 - "P9 GMAC0 RXD2", "R9 GMAC0 RXD3", 162 - "U7 GMAC0 TXD0", "T7 GMAC0 TXD1", 163 - "R7 GMAC0 TXD2", "P7 GMAC0 TXD3", 164 - "R11 GMAC1 RXD0", "P11 GMAC1 RXD1", 165 - "V12 GMAC1 RXD2", "U12 GMAC1 RXD3", 166 - "R10 GMAC1 TXD0", "T10 GMAC1 TXD1", 167 - "U10 GMAC1 TXD2", "V10 GMAC1 TXD3"; 160 + pins = "W8 GMAC0 RXD0", "V9 GMAC0 RXD1", 161 + "Y8 GMAC0 RXD2", "U9 GMAC0 RXD3", 162 + "T7 GMAC0 TXD0", "U6 GMAC0 TXD1", 163 + "V7 GMAC0 TXD2", "U7 GMAC0 TXD3", 164 + "Y12 GMAC1 RXD0", "V12 GMAC1 RXD1", 165 + "T11 GMAC1 RXD2", "W12 GMAC1 RXD3", 166 + "U10 GMAC1 TXD0", "Y10 GMAC1 TXD1", 167 + "W10 GMAC1 TXD2", "T9 GMAC1 TXD3"; 168 168 skew-delay = <7>; 169 169 }; 170 170 /* Set up drive strength on GMAC0 to 16 mA */
+4 -4
arch/arm/boot/dts/omap4.dtsi
··· 163 163 164 164 cm2: cm2@8000 { 165 165 compatible = "ti,omap4-cm2", "simple-bus"; 166 - reg = <0x8000 0x3000>; 166 + reg = <0x8000 0x2000>; 167 167 #address-cells = <1>; 168 168 #size-cells = <1>; 169 - ranges = <0 0x8000 0x3000>; 169 + ranges = <0 0x8000 0x2000>; 170 170 171 171 cm2_clocks: clocks { 172 172 #address-cells = <1>; ··· 250 250 251 251 prm: prm@6000 { 252 252 compatible = "ti,omap4-prm"; 253 - reg = <0x6000 0x3000>; 253 + reg = <0x6000 0x2000>; 254 254 interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>; 255 255 #address-cells = <1>; 256 256 #size-cells = <1>; 257 - ranges = <0 0x6000 0x3000>; 257 + ranges = <0 0x6000 0x2000>; 258 258 259 259 prm_clocks: clocks { 260 260 #address-cells = <1>;
+25 -2
arch/arm/configs/gemini_defconfig
··· 1 1 # CONFIG_LOCALVERSION_AUTO is not set 2 2 CONFIG_SYSVIPC=y 3 3 CONFIG_NO_HZ_IDLE=y 4 + CONFIG_HIGH_RES_TIMERS=y 4 5 CONFIG_BSD_PROCESS_ACCT=y 5 6 CONFIG_USER_NS=y 6 7 CONFIG_RELAY=y ··· 13 12 CONFIG_PCI=y 14 13 CONFIG_PREEMPT=y 15 14 CONFIG_AEABI=y 15 + CONFIG_HIGHMEM=y 16 + CONFIG_CMA=y 16 17 CONFIG_CMDLINE="console=ttyS0,115200n8" 17 18 CONFIG_KEXEC=y 18 19 CONFIG_BINFMT_MISC=y 19 20 CONFIG_PM=y 21 + CONFIG_NET=y 22 + CONFIG_UNIX=y 23 + CONFIG_INET=y 20 24 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 21 25 CONFIG_DEVTMPFS=y 22 26 CONFIG_MTD=y 23 27 CONFIG_MTD_BLOCK=y 24 28 CONFIG_MTD_CFI=y 29 + CONFIG_MTD_JEDECPROBE=y 25 30 CONFIG_MTD_CFI_INTELEXT=y 26 31 CONFIG_MTD_CFI_AMDSTD=y 27 32 CONFIG_MTD_CFI_STAA=y ··· 40 33 # CONFIG_SCSI_LOWLEVEL is not set 41 34 CONFIG_ATA=y 42 35 CONFIG_PATA_FTIDE010=y 36 + CONFIG_NETDEVICES=y 37 + CONFIG_GEMINI_ETHERNET=y 38 + CONFIG_MDIO_BITBANG=y 39 + CONFIG_MDIO_GPIO=y 40 + CONFIG_REALTEK_PHY=y 43 41 CONFIG_INPUT_EVDEV=y 44 42 CONFIG_KEYBOARD_GPIO=y 45 43 # CONFIG_INPUT_MOUSE is not set ··· 55 43 CONFIG_SERIAL_8250_RUNTIME_UARTS=1 56 44 CONFIG_SERIAL_OF_PLATFORM=y 57 45 # CONFIG_HW_RANDOM is not set 58 - # CONFIG_HWMON is not set 46 + CONFIG_I2C_GPIO=y 47 + CONFIG_SPI=y 48 + CONFIG_SPI_GPIO=y 49 + CONFIG_SENSORS_GPIO_FAN=y 50 + CONFIG_SENSORS_LM75=y 51 + CONFIG_THERMAL=y 59 52 CONFIG_WATCHDOG=y 60 - CONFIG_GEMINI_WATCHDOG=y 53 + CONFIG_REGULATOR=y 54 + CONFIG_REGULATOR_FIXED_VOLTAGE=y 55 + CONFIG_DRM=y 56 + CONFIG_DRM_PANEL_ILITEK_IL9322=y 57 + CONFIG_DRM_TVE200=y 58 + CONFIG_LOGO=y 61 59 CONFIG_USB=y 62 60 CONFIG_USB_MON=y 63 61 CONFIG_USB_FOTG210_HCD=y ··· 76 54 CONFIG_LEDS_CLASS=y 77 55 CONFIG_LEDS_GPIO=y 78 56 CONFIG_LEDS_TRIGGERS=y 57 + CONFIG_LEDS_TRIGGER_DISK=y 79 58 CONFIG_LEDS_TRIGGER_HEARTBEAT=y 80 59 CONFIG_RTC_CLASS=y 81 60 CONFIG_DMADEVICES=y
+1
arch/arm/configs/socfpga_defconfig
··· 57 57 CONFIG_MTD_NAND=y 58 58 CONFIG_MTD_NAND_DENALI_DT=y 59 59 CONFIG_MTD_SPI_NOR=y 60 + # CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set 60 61 CONFIG_SPI_CADENCE_QUADSPI=y 61 62 CONFIG_OF_OVERLAY=y 62 63 CONFIG_OF_CONFIGFS=y
+3
arch/arm/include/asm/kvm_host.h
··· 77 77 /* Interrupt controller */ 78 78 struct vgic_dist vgic; 79 79 int max_vcpus; 80 + 81 + /* Mandated version of PSCI */ 82 + u32 psci_version; 80 83 }; 81 84 82 85 #define KVM_NR_MEM_OBJS 40
+6
arch/arm/include/uapi/asm/kvm.h
··· 195 195 #define KVM_REG_ARM_VFP_FPINST 0x1009 196 196 #define KVM_REG_ARM_VFP_FPINST2 0x100A 197 197 198 + /* KVM-as-firmware specific pseudo-registers */ 199 + #define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT) 200 + #define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \ 201 + KVM_REG_ARM_FW | ((r) & 0xffff)) 202 + #define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) 203 + 198 204 /* Device Control API: ARM VGIC */ 199 205 #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 200 206 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
+13
arch/arm/kvm/guest.c
··· 22 22 #include <linux/module.h> 23 23 #include <linux/vmalloc.h> 24 24 #include <linux/fs.h> 25 + #include <kvm/arm_psci.h> 25 26 #include <asm/cputype.h> 26 27 #include <linux/uaccess.h> 27 28 #include <asm/kvm.h> ··· 177 176 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) 178 177 { 179 178 return num_core_regs() + kvm_arm_num_coproc_regs(vcpu) 179 + + kvm_arm_get_fw_num_regs(vcpu) 180 180 + NUM_TIMER_REGS; 181 181 } 182 182 ··· 198 196 uindices++; 199 197 } 200 198 199 + ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); 200 + if (ret) 201 + return ret; 202 + uindices += kvm_arm_get_fw_num_regs(vcpu); 203 + 201 204 ret = copy_timer_indices(vcpu, uindices); 202 205 if (ret) 203 206 return ret; ··· 221 214 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 222 215 return get_core_reg(vcpu, reg); 223 216 217 + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) 218 + return kvm_arm_get_fw_reg(vcpu, reg); 219 + 224 220 if (is_timer_reg(reg->id)) 225 221 return get_timer_reg(vcpu, reg); 226 222 ··· 239 229 /* Register group 16 means we set a core register. */ 240 230 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 241 231 return set_core_reg(vcpu, reg); 232 + 233 + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) 234 + return kvm_arm_set_fw_reg(vcpu, reg); 242 235 243 236 if (is_timer_reg(reg->id)) 244 237 return set_timer_reg(vcpu, reg);
+1 -5
arch/arm/mach-omap2/Makefile
··· 243 243 include/generated/ti-pm-asm-offsets.h: arch/arm/mach-omap2/pm-asm-offsets.s FORCE 244 244 $(call filechk,offsets,__TI_PM_ASM_OFFSETS_H__) 245 245 246 - # For rule to generate ti-emif-asm-offsets.h dependency 247 - include drivers/memory/Makefile.asm-offsets 248 - 249 - arch/arm/mach-omap2/sleep33xx.o: include/generated/ti-pm-asm-offsets.h include/generated/ti-emif-asm-offsets.h 250 - arch/arm/mach-omap2/sleep43xx.o: include/generated/ti-pm-asm-offsets.h include/generated/ti-emif-asm-offsets.h 246 + $(obj)/sleep33xx.o $(obj)/sleep43xx.o: include/generated/ti-pm-asm-offsets.h
+3
arch/arm/mach-omap2/pm-asm-offsets.c
··· 7 7 8 8 #include <linux/kbuild.h> 9 9 #include <linux/platform_data/pm33xx.h> 10 + #include <linux/ti-emif-sram.h> 10 11 11 12 int main(void) 12 13 { 14 + ti_emif_asm_offsets(); 15 + 13 16 DEFINE(AMX3_PM_WFI_FLAGS_OFFSET, 14 17 offsetof(struct am33xx_pm_sram_data, wfi_flags)); 15 18 DEFINE(AMX3_PM_L2_AUX_CTRL_VAL_OFFSET,
-1
arch/arm/mach-omap2/sleep33xx.S
··· 6 6 * Dave Gerlach, Vaibhav Bedia 7 7 */ 8 8 9 - #include <generated/ti-emif-asm-offsets.h> 10 9 #include <generated/ti-pm-asm-offsets.h> 11 10 #include <linux/linkage.h> 12 11 #include <linux/ti-emif-sram.h>
-1
arch/arm/mach-omap2/sleep43xx.S
··· 6 6 * Dave Gerlach, Vaibhav Bedia 7 7 */ 8 8 9 - #include <generated/ti-emif-asm-offsets.h> 10 9 #include <generated/ti-pm-asm-offsets.h> 11 10 #include <linux/linkage.h> 12 11 #include <linux/ti-emif-sram.h>
+2 -2
arch/arm/mach-s3c24xx/mach-jive.c
··· 427 427 .dev_id = "spi_gpio", 428 428 .table = { 429 429 GPIO_LOOKUP("GPIOB", 4, 430 - "gpio-sck", GPIO_ACTIVE_HIGH), 430 + "sck", GPIO_ACTIVE_HIGH), 431 431 GPIO_LOOKUP("GPIOB", 9, 432 - "gpio-mosi", GPIO_ACTIVE_HIGH), 432 + "mosi", GPIO_ACTIVE_HIGH), 433 433 GPIO_LOOKUP("GPIOH", 10, 434 434 "cs", GPIO_ACTIVE_HIGH), 435 435 { },
+4
arch/arm64/Makefile
··· 56 56 KBUILD_CFLAGS += $(call cc-option,-mabi=lp64) 57 57 KBUILD_AFLAGS += $(call cc-option,-mabi=lp64) 58 58 59 + ifeq ($(cc-name),clang) 60 + KBUILD_CFLAGS += -DCONFIG_ARCH_SUPPORTS_INT128 61 + else 59 62 KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0500, -DCONFIG_ARCH_SUPPORTS_INT128) 63 + endif 60 64 61 65 ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) 62 66 KBUILD_CPPFLAGS += -mbig-endian
+4
arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
··· 212 212 pinctrl-0 = <&uart_ao_a_pins>; 213 213 pinctrl-names = "default"; 214 214 }; 215 + 216 + &usb0 { 217 + status = "okay"; 218 + };
+12
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
··· 271 271 pinctrl-0 = <&uart_ao_a_pins>; 272 272 pinctrl-names = "default"; 273 273 }; 274 + 275 + &usb0 { 276 + status = "okay"; 277 + }; 278 + 279 + &usb2_phy0 { 280 + /* 281 + * even though the schematics don't show it: 282 + * HDMI_5V is also used as supply for the USB VBUS. 283 + */ 284 + phy-supply = <&hdmi_5v>; 285 + };
+4
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
··· 215 215 pinctrl-0 = <&uart_ao_a_pins>; 216 216 pinctrl-names = "default"; 217 217 }; 218 + 219 + &usb0 { 220 + status = "okay"; 221 + };
+4
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
··· 185 185 pinctrl-0 = <&uart_ao_a_pins>; 186 186 pinctrl-names = "default"; 187 187 }; 188 + 189 + &usb0 { 190 + status = "okay"; 191 + };
+61
arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
··· 20 20 no-map; 21 21 }; 22 22 }; 23 + 24 + soc { 25 + usb0: usb@c9000000 { 26 + status = "disabled"; 27 + compatible = "amlogic,meson-gxl-dwc3"; 28 + #address-cells = <2>; 29 + #size-cells = <2>; 30 + ranges; 31 + 32 + clocks = <&clkc CLKID_USB>; 33 + clock-names = "usb_general"; 34 + resets = <&reset RESET_USB_OTG>; 35 + reset-names = "usb_otg"; 36 + 37 + dwc3: dwc3@c9000000 { 38 + compatible = "snps,dwc3"; 39 + reg = <0x0 0xc9000000 0x0 0x100000>; 40 + interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>; 41 + dr_mode = "host"; 42 + maximum-speed = "high-speed"; 43 + snps,dis_u2_susphy_quirk; 44 + phys = <&usb3_phy>, <&usb2_phy0>, <&usb2_phy1>; 45 + }; 46 + }; 47 + }; 48 + }; 49 + 50 + &apb { 51 + usb2_phy0: phy@78000 { 52 + compatible = "amlogic,meson-gxl-usb2-phy"; 53 + #phy-cells = <0>; 54 + reg = <0x0 0x78000 0x0 0x20>; 55 + clocks = <&clkc CLKID_USB>; 56 + clock-names = "phy"; 57 + resets = <&reset RESET_USB_OTG>; 58 + reset-names = "phy"; 59 + status = "okay"; 60 + }; 61 + 62 + usb2_phy1: phy@78020 { 63 + compatible = "amlogic,meson-gxl-usb2-phy"; 64 + #phy-cells = <0>; 65 + reg = <0x0 0x78020 0x0 0x20>; 66 + clocks = <&clkc CLKID_USB>; 67 + clock-names = "phy"; 68 + resets = <&reset RESET_USB_OTG>; 69 + reset-names = "phy"; 70 + status = "okay"; 71 + }; 72 + 73 + usb3_phy: phy@78080 { 74 + compatible = "amlogic,meson-gxl-usb3-phy"; 75 + #phy-cells = <0>; 76 + reg = <0x0 0x78080 0x0 0x20>; 77 + interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>; 78 + clocks = <&clkc CLKID_USB>, <&clkc_AO CLKID_AO_CEC_32K>; 79 + clock-names = "phy", "peripheral"; 80 + resets = <&reset RESET_USB_OTG>, <&reset RESET_USB_OTG>; 81 + reset-names = "phy", "peripheral"; 82 + status = "okay"; 83 + }; 23 84 }; 24 85 25 86 &ethmac {
+4
arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
··· 406 406 status = "okay"; 407 407 vref-supply = <&vddio_ao18>; 408 408 }; 409 + 410 + &usb0 { 411 + status = "okay"; 412 + };
+17
arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
··· 80 80 }; 81 81 }; 82 82 83 + &apb { 84 + usb2_phy2: phy@78040 { 85 + compatible = "amlogic,meson-gxl-usb2-phy"; 86 + #phy-cells = <0>; 87 + reg = <0x0 0x78040 0x0 0x20>; 88 + clocks = <&clkc CLKID_USB>; 89 + clock-names = "phy"; 90 + resets = <&reset RESET_USB_OTG>; 91 + reset-names = "phy"; 92 + status = "okay"; 93 + }; 94 + }; 95 + 83 96 &clkc_AO { 84 97 compatible = "amlogic,meson-gxm-aoclkc", "amlogic,meson-gx-aoclkc"; 85 98 }; ··· 112 99 113 100 &hdmi_tx { 114 101 compatible = "amlogic,meson-gxm-dw-hdmi", "amlogic,meson-gx-dw-hdmi"; 102 + }; 103 + 104 + &dwc3 { 105 + phys = <&usb3_phy>, <&usb2_phy0>, <&usb2_phy1>, <&usb2_phy2>; 115 106 };
-2
arch/arm64/boot/dts/arm/juno-motherboard.dtsi
··· 56 56 57 57 gpio_keys { 58 58 compatible = "gpio-keys"; 59 - #address-cells = <1>; 60 - #size-cells = <0>; 61 59 62 60 power-button { 63 61 debounce_interval = <50>;
+40 -40
arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi
··· 36 36 #size-cells = <1>; 37 37 ranges = <0x0 0x0 0x67d00000 0x00800000>; 38 38 39 - sata0: ahci@210000 { 39 + sata0: ahci@0 { 40 40 compatible = "brcm,iproc-ahci", "generic-ahci"; 41 - reg = <0x00210000 0x1000>; 41 + reg = <0x00000000 0x1000>; 42 42 reg-names = "ahci"; 43 - interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>; 43 + interrupts = <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>; 44 44 #address-cells = <1>; 45 45 #size-cells = <0>; 46 46 status = "disabled"; ··· 52 52 }; 53 53 }; 54 54 55 - sata_phy0: sata_phy@212100 { 55 + sata_phy0: sata_phy@2100 { 56 56 compatible = "brcm,iproc-sr-sata-phy"; 57 - reg = <0x00212100 0x1000>; 57 + reg = <0x00002100 0x1000>; 58 58 reg-names = "phy"; 59 59 #address-cells = <1>; 60 60 #size-cells = <0>; ··· 66 66 }; 67 67 }; 68 68 69 - sata1: ahci@310000 { 69 + sata1: ahci@10000 { 70 70 compatible = "brcm,iproc-ahci", "generic-ahci"; 71 - reg = <0x00310000 0x1000>; 71 + reg = <0x00010000 0x1000>; 72 72 reg-names = "ahci"; 73 - interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>; 73 + interrupts = <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>; 74 74 #address-cells = <1>; 75 75 #size-cells = <0>; 76 76 status = "disabled"; ··· 82 82 }; 83 83 }; 84 84 85 - sata_phy1: sata_phy@312100 { 85 + sata_phy1: sata_phy@12100 { 86 86 compatible = "brcm,iproc-sr-sata-phy"; 87 - reg = <0x00312100 0x1000>; 87 + reg = <0x00012100 0x1000>; 88 88 reg-names = "phy"; 89 89 #address-cells = <1>; 90 90 #size-cells = <0>; ··· 96 96 }; 97 97 }; 98 98 99 - sata2: ahci@120000 { 99 + sata2: ahci@20000 { 100 100 compatible = "brcm,iproc-ahci", "generic-ahci"; 101 - reg = <0x00120000 0x1000>; 101 + reg = <0x00020000 0x1000>; 102 102 reg-names = "ahci"; 103 - interrupts = <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>; 103 + interrupts = <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>; 104 104 #address-cells = <1>; 105 105 #size-cells = <0>; 106 106 status = "disabled"; ··· 112 112 }; 113 113 }; 114 114 115 - sata_phy2: sata_phy@122100 { 115 + sata_phy2: sata_phy@22100 { 116 116 compatible = "brcm,iproc-sr-sata-phy"; 117 - reg = <0x00122100 0x1000>; 117 + reg = <0x00022100 0x1000>; 118 118 reg-names = "phy"; 119 119 #address-cells = <1>; 120 120 #size-cells = <0>; ··· 126 126 }; 127 127 }; 128 128 129 - sata3: ahci@130000 { 129 + sata3: ahci@30000 { 130 130 compatible = "brcm,iproc-ahci", "generic-ahci"; 131 - reg = <0x00130000 0x1000>; 131 + reg = <0x00030000 0x1000>; 132 132 reg-names = "ahci"; 133 - interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>; 133 + interrupts = <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>; 134 134 #address-cells = <1>; 135 135 #size-cells = <0>; 136 136 status = "disabled"; ··· 142 142 }; 143 143 }; 144 144 145 - sata_phy3: sata_phy@132100 { 145 + sata_phy3: sata_phy@32100 { 146 146 compatible = "brcm,iproc-sr-sata-phy"; 147 - reg = <0x00132100 0x1000>; 147 + reg = <0x00032100 0x1000>; 148 148 reg-names = "phy"; 149 149 #address-cells = <1>; 150 150 #size-cells = <0>; ··· 156 156 }; 157 157 }; 158 158 159 - sata4: ahci@330000 { 159 + sata4: ahci@100000 { 160 160 compatible = "brcm,iproc-ahci", "generic-ahci"; 161 - reg = <0x00330000 0x1000>; 161 + reg = <0x00100000 0x1000>; 162 162 reg-names = "ahci"; 163 - interrupts = <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>; 163 + interrupts = <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>; 164 164 #address-cells = <1>; 165 165 #size-cells = <0>; 166 166 status = "disabled"; ··· 172 172 }; 173 173 }; 174 174 175 - sata_phy4: sata_phy@332100 { 175 + sata_phy4: sata_phy@102100 { 176 176 compatible = "brcm,iproc-sr-sata-phy"; 177 - reg = <0x00332100 0x1000>; 177 + reg = <0x00102100 0x1000>; 178 178 reg-names = "phy"; 179 179 #address-cells = <1>; 180 180 #size-cells = <0>; ··· 186 186 }; 187 187 }; 188 188 189 - sata5: ahci@400000 { 189 + sata5: ahci@110000 { 190 190 compatible = "brcm,iproc-ahci", "generic-ahci"; 191 - reg = <0x00400000 0x1000>; 191 + reg = <0x00110000 0x1000>; 192 192 reg-names = "ahci"; 193 - interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>; 193 + interrupts = <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>; 194 194 #address-cells = <1>; 195 195 #size-cells = <0>; 196 196 status = "disabled"; ··· 202 202 }; 203 203 }; 204 204 205 - sata_phy5: sata_phy@402100 { 205 + sata_phy5: sata_phy@112100 { 206 206 compatible = "brcm,iproc-sr-sata-phy"; 207 - reg = <0x00402100 0x1000>; 207 + reg = <0x00112100 0x1000>; 208 208 reg-names = "phy"; 209 209 #address-cells = <1>; 210 210 #size-cells = <0>; ··· 216 216 }; 217 217 }; 218 218 219 - sata6: ahci@410000 { 219 + sata6: ahci@120000 { 220 220 compatible = "brcm,iproc-ahci", "generic-ahci"; 221 - reg = <0x00410000 0x1000>; 221 + reg = <0x00120000 0x1000>; 222 222 reg-names = "ahci"; 223 - interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>; 223 + interrupts = <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>; 224 224 #address-cells = <1>; 225 225 #size-cells = <0>; 226 226 status = "disabled"; ··· 232 232 }; 233 233 }; 234 234 235 - sata_phy6: sata_phy@412100 { 235 + sata_phy6: sata_phy@122100 { 236 236 compatible = "brcm,iproc-sr-sata-phy"; 237 - reg = <0x00412100 0x1000>; 237 + reg = <0x00122100 0x1000>; 238 238 reg-names = "phy"; 239 239 #address-cells = <1>; 240 240 #size-cells = <0>; ··· 246 246 }; 247 247 }; 248 248 249 - sata7: ahci@420000 { 249 + sata7: ahci@130000 { 250 250 compatible = "brcm,iproc-ahci", "generic-ahci"; 251 - reg = <0x00420000 0x1000>; 251 + reg = <0x00130000 0x1000>; 252 252 reg-names = "ahci"; 253 - interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>; 253 + interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>; 254 254 #address-cells = <1>; 255 255 #size-cells = <0>; 256 256 status = "disabled"; ··· 262 262 }; 263 263 }; 264 264 265 - sata_phy7: sata_phy@422100 { 265 + sata_phy7: sata_phy@132100 { 266 266 compatible = "brcm,iproc-sr-sata-phy"; 267 - reg = <0x00422100 0x1000>; 267 + reg = <0x00132100 0x1000>; 268 268 reg-names = "phy"; 269 269 #address-cells = <1>; 270 270 #size-cells = <0>;
+3
arch/arm64/include/asm/kvm_host.h
··· 75 75 76 76 /* Interrupt controller */ 77 77 struct vgic_dist vgic; 78 + 79 + /* Mandated version of PSCI */ 80 + u32 psci_version; 78 81 }; 79 82 80 83 #define KVM_NR_MEM_OBJS 40
+1 -1
arch/arm64/include/asm/module.h
··· 39 39 u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela, 40 40 Elf64_Sym *sym); 41 41 42 - u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val); 42 + u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val); 43 43 44 44 #ifdef CONFIG_RANDOMIZE_BASE 45 45 extern u64 module_alloc_base;
+2 -2
arch/arm64/include/asm/pgtable.h
··· 230 230 } 231 231 } 232 232 233 - extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); 233 + extern void __sync_icache_dcache(pte_t pteval); 234 234 235 235 /* 236 236 * PTE bits configuration in the presence of hardware Dirty Bit Management ··· 253 253 pte_t old_pte; 254 254 255 255 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) 256 - __sync_icache_dcache(pte, addr); 256 + __sync_icache_dcache(pte); 257 257 258 258 /* 259 259 * If the existing pte is valid, check for potential race with
+6
arch/arm64/include/uapi/asm/kvm.h
··· 206 206 #define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) 207 207 #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) 208 208 209 + /* KVM-as-firmware specific pseudo-registers */ 210 + #define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT) 211 + #define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ 212 + KVM_REG_ARM_FW | ((r) & 0xffff)) 213 + #define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) 214 + 209 215 /* Device Control API: ARM VGIC */ 210 216 #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 211 217 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
+1
arch/arm64/kernel/cpufeature.c
··· 868 868 static const struct midr_range kpti_safe_list[] = { 869 869 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), 870 870 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), 871 + { /* sentinel */ } 871 872 }; 872 873 char const *str = "command line option"; 873 874
+1 -1
arch/arm64/kernel/module-plts.c
··· 43 43 } 44 44 45 45 #ifdef CONFIG_ARM64_ERRATUM_843419 46 - u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val) 46 + u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val) 47 47 { 48 48 struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core : 49 49 &mod->arch.init;
+1 -1
arch/arm64/kernel/module.c
··· 215 215 insn &= ~BIT(31); 216 216 } else { 217 217 /* out of range for ADR -> emit a veneer */ 218 - val = module_emit_adrp_veneer(mod, place, val & ~0xfff); 218 + val = module_emit_veneer_for_adrp(mod, place, val & ~0xfff); 219 219 if (!val) 220 220 return -ENOEXEC; 221 221 insn = aarch64_insn_gen_branch_imm((u64)place, val,
+10 -10
arch/arm64/kernel/ptrace.c
··· 25 25 #include <linux/sched/signal.h> 26 26 #include <linux/sched/task_stack.h> 27 27 #include <linux/mm.h> 28 + #include <linux/nospec.h> 28 29 #include <linux/smp.h> 29 30 #include <linux/ptrace.h> 30 31 #include <linux/user.h> ··· 250 249 251 250 switch (note_type) { 252 251 case NT_ARM_HW_BREAK: 253 - if (idx < ARM_MAX_BRP) 254 - bp = tsk->thread.debug.hbp_break[idx]; 252 + if (idx >= ARM_MAX_BRP) 253 + goto out; 254 + idx = array_index_nospec(idx, ARM_MAX_BRP); 255 + bp = tsk->thread.debug.hbp_break[idx]; 255 256 break; 256 257 case NT_ARM_HW_WATCH: 257 - if (idx < ARM_MAX_WRP) 258 - bp = tsk->thread.debug.hbp_watch[idx]; 258 + if (idx >= ARM_MAX_WRP) 259 + goto out; 260 + idx = array_index_nospec(idx, ARM_MAX_WRP); 261 + bp = tsk->thread.debug.hbp_watch[idx]; 259 262 break; 260 263 } 261 264 265 + out: 262 266 return bp; 263 267 } 264 268 ··· 1464 1458 { 1465 1459 int ret; 1466 1460 u32 kdata; 1467 - mm_segment_t old_fs = get_fs(); 1468 1461 1469 - set_fs(KERNEL_DS); 1470 1462 /* Watchpoint */ 1471 1463 if (num < 0) { 1472 1464 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); ··· 1475 1471 } else { 1476 1472 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); 1477 1473 } 1478 - set_fs(old_fs); 1479 1474 1480 1475 if (!ret) 1481 1476 ret = put_user(kdata, data); ··· 1487 1484 { 1488 1485 int ret; 1489 1486 u32 kdata = 0; 1490 - mm_segment_t old_fs = get_fs(); 1491 1487 1492 1488 if (num == 0) 1493 1489 return 0; ··· 1495 1493 if (ret) 1496 1494 return ret; 1497 1495 1498 - set_fs(KERNEL_DS); 1499 1496 if (num < 0) 1500 1497 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); 1501 1498 else 1502 1499 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); 1503 - set_fs(old_fs); 1504 1500 1505 1501 return ret; 1506 1502 }
+2 -1
arch/arm64/kernel/traps.c
··· 277 277 * If we were single stepping, we want to get the step exception after 278 278 * we return from the trap. 279 279 */ 280 - user_fastforward_single_step(current); 280 + if (user_mode(regs)) 281 + user_fastforward_single_step(current); 281 282 } 282 283 283 284 static LIST_HEAD(undef_hook);
+13 -1
arch/arm64/kvm/guest.c
··· 25 25 #include <linux/module.h> 26 26 #include <linux/vmalloc.h> 27 27 #include <linux/fs.h> 28 + #include <kvm/arm_psci.h> 28 29 #include <asm/cputype.h> 29 30 #include <linux/uaccess.h> 30 31 #include <asm/kvm.h> ··· 206 205 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) 207 206 { 208 207 return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu) 209 - + NUM_TIMER_REGS; 208 + + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS; 210 209 } 211 210 212 211 /** ··· 226 225 uindices++; 227 226 } 228 227 228 + ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); 229 + if (ret) 230 + return ret; 231 + uindices += kvm_arm_get_fw_num_regs(vcpu); 232 + 229 233 ret = copy_timer_indices(vcpu, uindices); 230 234 if (ret) 231 235 return ret; ··· 249 243 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 250 244 return get_core_reg(vcpu, reg); 251 245 246 + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) 247 + return kvm_arm_get_fw_reg(vcpu, reg); 248 + 252 249 if (is_timer_reg(reg->id)) 253 250 return get_timer_reg(vcpu, reg); 254 251 ··· 267 258 /* Register group 16 means we set a core register. */ 268 259 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 269 260 return set_core_reg(vcpu, reg); 261 + 262 + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) 263 + return kvm_arm_set_fw_reg(vcpu, reg); 270 264 271 265 if (is_timer_reg(reg->id)) 272 266 return set_timer_reg(vcpu, reg);
+2 -4
arch/arm64/kvm/sys_regs.c
··· 996 996 997 997 if (id == SYS_ID_AA64PFR0_EL1) { 998 998 if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT)) 999 - pr_err_once("kvm [%i]: SVE unsupported for guests, suppressing\n", 1000 - task_pid_nr(current)); 999 + kvm_debug("SVE unsupported for guests, suppressing\n"); 1001 1000 1002 1001 val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); 1003 1002 } else if (id == SYS_ID_AA64MMFR1_EL1) { 1004 1003 if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT)) 1005 - pr_err_once("kvm [%i]: LORegions unsupported for guests, suppressing\n", 1006 - task_pid_nr(current)); 1004 + kvm_debug("LORegions unsupported for guests, suppressing\n"); 1007 1005 1008 1006 val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT); 1009 1007 }
+4
arch/arm64/lib/Makefile
··· 19 19 -fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \ 20 20 -fcall-saved-x18 -fomit-frame-pointer 21 21 CFLAGS_REMOVE_atomic_ll_sc.o := -pg 22 + GCOV_PROFILE_atomic_ll_sc.o := n 23 + KASAN_SANITIZE_atomic_ll_sc.o := n 24 + KCOV_INSTRUMENT_atomic_ll_sc.o := n 25 + UBSAN_SANITIZE_atomic_ll_sc.o := n 22 26 23 27 lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
+1 -1
arch/arm64/mm/flush.c
··· 58 58 flush_ptrace_access(vma, page, uaddr, dst, len); 59 59 } 60 60 61 - void __sync_icache_dcache(pte_t pte, unsigned long addr) 61 + void __sync_icache_dcache(pte_t pte) 62 62 { 63 63 struct page *page = pte_page(pte); 64 64
+6
arch/hexagon/include/asm/io.h
··· 216 216 memcpy((void *) dst, src, count); 217 217 } 218 218 219 + static inline void memset_io(volatile void __iomem *addr, int value, 220 + size_t size) 221 + { 222 + memset((void __force *)addr, value, size); 223 + } 224 + 219 225 #define PCI_IO_ADDR (volatile void __iomem *) 220 226 221 227 /*
+1
arch/hexagon/lib/checksum.c
··· 199 199 memcpy(dst, src, len); 200 200 return csum_partial(dst, len, sum); 201 201 } 202 + EXPORT_SYMBOL(csum_partial_copy_nocheck);
+3
arch/parisc/Makefile
··· 123 123 124 124 PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS) 125 125 126 + # Default kernel to build 127 + all: bzImage 128 + 126 129 zImage: vmlinuz 127 130 Image: vmlinux 128 131
+4 -3
arch/parisc/kernel/drivers.c
··· 448 448 * Checks all the children of @parent for a matching @id. If none 449 449 * found, it allocates a new device and returns it. 450 450 */ 451 - static struct parisc_device * alloc_tree_node(struct device *parent, char id) 451 + static struct parisc_device * __init alloc_tree_node( 452 + struct device *parent, char id) 452 453 { 453 454 struct match_id_data d = { 454 455 .id = id, ··· 826 825 * devices which are not physically connected (such as extra serial & 827 826 * keyboard ports). This problem is not yet solved. 828 827 */ 829 - static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high, 830 - struct device *parent) 828 + static void __init walk_native_bus(unsigned long io_io_low, 829 + unsigned long io_io_high, struct device *parent) 831 830 { 832 831 int i, devices_found = 0; 833 832 unsigned long hpa = io_io_low;
+1 -1
arch/parisc/kernel/pci.c
··· 174 174 * pcibios_init_bridge() initializes cache line and default latency 175 175 * for pci controllers and pci-pci bridges 176 176 */ 177 - void __init pcibios_init_bridge(struct pci_dev *dev) 177 + void __ref pcibios_init_bridge(struct pci_dev *dev) 178 178 { 179 179 unsigned short bridge_ctl, bridge_ctl_new; 180 180
+1 -1
arch/parisc/kernel/time.c
··· 205 205 device_initcall(rtc_init); 206 206 #endif 207 207 208 - void read_persistent_clock(struct timespec *ts) 208 + void read_persistent_clock64(struct timespec64 *ts) 209 209 { 210 210 static struct pdc_tod tod_data; 211 211 if (pdc_tod_read(&tod_data) == 0) {
+11
arch/parisc/kernel/traps.c
··· 837 837 if (pdc_instr(&instr) == PDC_OK) 838 838 ivap[0] = instr; 839 839 840 + /* 841 + * Rules for the checksum of the HPMC handler: 842 + * 1. The IVA does not point to PDC/PDH space (ie: the OS has installed 843 + * its own IVA). 844 + * 2. The word at IVA + 32 is nonzero. 845 + * 3. If Length (IVA + 60) is not zero, then Length (IVA + 60) and 846 + * Address (IVA + 56) are word-aligned. 847 + * 4. The checksum of the 8 words starting at IVA + 32 plus the sum of 848 + * the Length/4 words starting at Address is zero. 849 + */ 850 + 840 851 /* Compute Checksum for HPMC handler */ 841 852 length = os_hpmc_size; 842 853 ivap[7] = length;
+1 -1
arch/parisc/mm/init.c
··· 516 516 } 517 517 } 518 518 519 - void free_initmem(void) 519 + void __ref free_initmem(void) 520 520 { 521 521 unsigned long init_begin = (unsigned long)__init_begin; 522 522 unsigned long init_end = (unsigned long)__init_end;
+1 -1
arch/powerpc/include/asm/powernv.h
··· 15 15 extern void powernv_set_nmmu_ptcr(unsigned long ptcr); 16 16 extern struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, 17 17 unsigned long flags, 18 - struct npu_context *(*cb)(struct npu_context *, void *), 18 + void (*cb)(struct npu_context *, void *), 19 19 void *priv); 20 20 extern void pnv_npu2_destroy_context(struct npu_context *context, 21 21 struct pci_dev *gpdev);
+2 -5
arch/powerpc/kernel/mce_power.c
··· 441 441 if (pfn != ULONG_MAX) { 442 442 *phys_addr = 443 443 (pfn << PAGE_SHIFT); 444 - handled = 1; 445 444 } 446 445 } 447 446 } ··· 531 532 * kernel/exception-64s.h 532 533 */ 533 534 if (get_paca()->in_mce < MAX_MCE_DEPTH) 534 - if (!mce_find_instr_ea_and_pfn(regs, addr, 535 - phys_addr)) 536 - handled = 1; 535 + mce_find_instr_ea_and_pfn(regs, addr, phys_addr); 537 536 } 538 537 found = 1; 539 538 } ··· 569 572 const struct mce_ierror_table itable[]) 570 573 { 571 574 struct mce_error_info mce_err = { 0 }; 572 - uint64_t addr, phys_addr; 575 + uint64_t addr, phys_addr = ULONG_MAX; 573 576 uint64_t srr1 = regs->msr; 574 577 long handled; 575 578
+42 -7
arch/powerpc/kernel/smp.c
··· 566 566 #endif 567 567 568 568 #ifdef CONFIG_NMI_IPI 569 - static void stop_this_cpu(struct pt_regs *regs) 570 - #else 569 + static void nmi_stop_this_cpu(struct pt_regs *regs) 570 + { 571 + /* 572 + * This is a special case because it never returns, so the NMI IPI 573 + * handling would never mark it as done, which makes any later 574 + * smp_send_nmi_ipi() call spin forever. Mark it done now. 575 + * 576 + * IRQs are already hard disabled by the smp_handle_nmi_ipi. 577 + */ 578 + nmi_ipi_lock(); 579 + nmi_ipi_busy_count--; 580 + nmi_ipi_unlock(); 581 + 582 + /* Remove this CPU */ 583 + set_cpu_online(smp_processor_id(), false); 584 + 585 + spin_begin(); 586 + while (1) 587 + spin_cpu_relax(); 588 + } 589 + 590 + void smp_send_stop(void) 591 + { 592 + smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000); 593 + } 594 + 595 + #else /* CONFIG_NMI_IPI */ 596 + 571 597 static void stop_this_cpu(void *dummy) 572 - #endif 573 598 { 574 599 /* Remove this CPU */ 575 600 set_cpu_online(smp_processor_id(), false); ··· 607 582 608 583 void smp_send_stop(void) 609 584 { 610 - #ifdef CONFIG_NMI_IPI 611 - smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, stop_this_cpu, 1000000); 612 - #else 585 + static bool stopped = false; 586 + 587 + /* 588 + * Prevent waiting on csd lock from a previous smp_send_stop. 589 + * This is racy, but in general callers try to do the right 590 + * thing and only fire off one smp_send_stop (e.g., see 591 + * kernel/panic.c) 592 + */ 593 + if (stopped) 594 + return; 595 + 596 + stopped = true; 597 + 613 598 smp_call_function(stop_this_cpu, NULL, 0); 614 - #endif 615 599 } 600 + #endif /* CONFIG_NMI_IPI */ 616 601 617 602 struct thread_info *current_set[NR_CPUS]; 618 603
+7
arch/powerpc/kvm/booke.c
··· 305 305 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); 306 306 } 307 307 308 + #ifdef CONFIG_ALTIVEC 309 + void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu) 310 + { 311 + kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL); 312 + } 313 + #endif 314 + 308 315 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) 309 316 { 310 317 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
+2
arch/powerpc/mm/mem.c
··· 133 133 start, start + size, rc); 134 134 return -EFAULT; 135 135 } 136 + flush_inval_dcache_range(start, start + size); 136 137 137 138 return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); 138 139 } ··· 160 159 161 160 /* Remove htab bolted mappings for this section of memory */ 162 161 start = (unsigned long)__va(start); 162 + flush_inval_dcache_range(start, start + size); 163 163 ret = remove_section_mapping(start, start + size); 164 164 165 165 /* Ensure all vmalloc mappings are flushed in case they also
-17
arch/powerpc/platforms/powernv/memtrace.c
··· 82 82 .open = simple_open, 83 83 }; 84 84 85 - static void flush_memory_region(u64 base, u64 size) 86 - { 87 - unsigned long line_size = ppc64_caches.l1d.size; 88 - u64 end = base + size; 89 - u64 addr; 90 - 91 - base = round_down(base, line_size); 92 - end = round_up(end, line_size); 93 - 94 - for (addr = base; addr < end; addr += line_size) 95 - asm volatile("dcbf 0,%0" : "=r" (addr) :: "memory"); 96 - } 97 - 98 85 static int check_memblock_online(struct memory_block *mem, void *arg) 99 86 { 100 87 if (mem->state != MEM_ONLINE) ··· 118 131 119 132 walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE, 120 133 change_memblock_state); 121 - 122 - /* RCU grace period? */ 123 - flush_memory_region((u64)__va(start_pfn << PAGE_SHIFT), 124 - nr_pages << PAGE_SHIFT); 125 134 126 135 lock_device_hotplug(); 127 136 remove_memory(nid, start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
+73 -15
arch/powerpc/platforms/powernv/npu-dma.c
··· 34 34 #define npu_to_phb(x) container_of(x, struct pnv_phb, npu) 35 35 36 36 /* 37 + * spinlock to protect initialisation of an npu_context for a particular 38 + * mm_struct. 39 + */ 40 + static DEFINE_SPINLOCK(npu_context_lock); 41 + 42 + /* 43 + * When an address shootdown range exceeds this threshold we invalidate the 44 + * entire TLB on the GPU for the given PID rather than each specific address in 45 + * the range. 46 + */ 47 + #define ATSD_THRESHOLD (2*1024*1024) 48 + 49 + /* 37 50 * Other types of TCE cache invalidation are not functional in the 38 51 * hardware. 39 52 */ ··· 414 401 bool nmmu_flush; 415 402 416 403 /* Callback to stop translation requests on a given GPU */ 417 - struct npu_context *(*release_cb)(struct npu_context *, void *); 404 + void (*release_cb)(struct npu_context *context, void *priv); 418 405 419 406 /* 420 407 * Private pointer passed to the above callback for usage by ··· 684 671 struct npu_context *npu_context = mn_to_npu_context(mn); 685 672 unsigned long address; 686 673 687 - for (address = start; address < end; address += PAGE_SIZE) 688 - mmio_invalidate(npu_context, 1, address, false); 674 + if (end - start > ATSD_THRESHOLD) { 675 + /* 676 + * Just invalidate the entire PID if the address range is too 677 + * large. 678 + */ 679 + mmio_invalidate(npu_context, 0, 0, true); 680 + } else { 681 + for (address = start; address < end; address += PAGE_SIZE) 682 + mmio_invalidate(npu_context, 1, address, false); 689 683 690 - /* Do the flush only on the final addess == end */ 691 - mmio_invalidate(npu_context, 1, address, true); 684 + /* Do the flush only on the final addess == end */ 685 + mmio_invalidate(npu_context, 1, address, true); 686 + } 692 687 } 693 688 694 689 static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { ··· 717 696 * Returns an error if there no contexts are currently available or a 718 697 * npu_context which should be passed to pnv_npu2_handle_fault(). 719 698 * 720 - * mmap_sem must be held in write mode. 699 + * mmap_sem must be held in write mode and must not be called from interrupt 700 + * context. 721 701 */ 722 702 struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev, 723 703 unsigned long flags, 724 - struct npu_context *(*cb)(struct npu_context *, void *), 704 + void (*cb)(struct npu_context *, void *), 725 705 void *priv) 726 706 { 727 707 int rc; ··· 765 743 /* 766 744 * Setup the NPU context table for a particular GPU. These need to be 767 745 * per-GPU as we need the tables to filter ATSDs when there are no 768 - * active contexts on a particular GPU. 746 + * active contexts on a particular GPU. It is safe for these to be 747 + * called concurrently with destroy as the OPAL call takes appropriate 748 + * locks and refcounts on init/destroy. 769 749 */ 770 750 rc = opal_npu_init_context(nphb->opal_id, mm->context.id, flags, 771 751 PCI_DEVID(gpdev->bus->number, gpdev->devfn)); ··· 778 754 * We store the npu pci device so we can more easily get at the 779 755 * associated npus. 780 756 */ 757 + spin_lock(&npu_context_lock); 781 758 npu_context = mm->context.npu_context; 759 + if (npu_context) { 760 + if (npu_context->release_cb != cb || 761 + npu_context->priv != priv) { 762 + spin_unlock(&npu_context_lock); 763 + opal_npu_destroy_context(nphb->opal_id, mm->context.id, 764 + PCI_DEVID(gpdev->bus->number, 765 + gpdev->devfn)); 766 + return ERR_PTR(-EINVAL); 767 + } 768 + 769 + WARN_ON(!kref_get_unless_zero(&npu_context->kref)); 770 + } 771 + spin_unlock(&npu_context_lock); 772 + 782 773 if (!npu_context) { 774 + /* 775 + * We can set up these fields without holding the 776 + * npu_context_lock as the npu_context hasn't been returned to 777 + * the caller meaning it can't be destroyed. Parallel allocation 778 + * is protected against by mmap_sem. 779 + */ 783 780 rc = -ENOMEM; 784 781 npu_context = kzalloc(sizeof(struct npu_context), GFP_KERNEL); 785 782 if (npu_context) { ··· 819 774 } 820 775 821 776 mm->context.npu_context = npu_context; 822 - } else { 823 - WARN_ON(!kref_get_unless_zero(&npu_context->kref)); 824 777 } 825 778 826 779 npu_context->release_cb = cb; ··· 857 814 mm_context_remove_copro(npu_context->mm); 858 815 859 816 npu_context->mm->context.npu_context = NULL; 860 - mmu_notifier_unregister(&npu_context->mn, 861 - npu_context->mm); 862 - 863 - kfree(npu_context); 864 817 } 865 818 819 + /* 820 + * Destroy a context on the given GPU. May free the npu_context if it is no 821 + * longer active on any GPUs. Must not be called from interrupt context. 822 + */ 866 823 void pnv_npu2_destroy_context(struct npu_context *npu_context, 867 824 struct pci_dev *gpdev) 868 825 { 826 + int removed; 869 827 struct pnv_phb *nphb; 870 828 struct npu *npu; 871 829 struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0); ··· 888 844 WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL); 889 845 opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id, 890 846 PCI_DEVID(gpdev->bus->number, gpdev->devfn)); 891 - kref_put(&npu_context->kref, pnv_npu2_release_context); 847 + spin_lock(&npu_context_lock); 848 + removed = kref_put(&npu_context->kref, pnv_npu2_release_context); 849 + spin_unlock(&npu_context_lock); 850 + 851 + /* 852 + * We need to do this outside of pnv_npu2_release_context so that it is 853 + * outside the spinlock as mmu_notifier_destroy uses SRCU. 854 + */ 855 + if (removed) { 856 + mmu_notifier_unregister(&npu_context->mn, 857 + npu_context->mm); 858 + 859 + kfree(npu_context); 860 + } 861 + 892 862 } 893 863 EXPORT_SYMBOL(pnv_npu2_destroy_context); 894 864
+5 -3
arch/powerpc/platforms/powernv/opal-rtc.c
··· 48 48 49 49 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { 50 50 rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); 51 - if (rc == OPAL_BUSY_EVENT) 51 + if (rc == OPAL_BUSY_EVENT) { 52 + mdelay(OPAL_BUSY_DELAY_MS); 52 53 opal_poll_events(NULL); 53 - else if (rc == OPAL_BUSY) 54 - mdelay(10); 54 + } else if (rc == OPAL_BUSY) { 55 + mdelay(OPAL_BUSY_DELAY_MS); 56 + } 55 57 } 56 58 if (rc != OPAL_SUCCESS) 57 59 return 0;
+1 -1
arch/sparc/include/uapi/asm/oradax.h
··· 3 3 * 4 4 * This program is free software: you can redistribute it and/or modify 5 5 * it under the terms of the GNU General Public License as published by 6 - * the Free Software Foundation, either version 3 of the License, or 6 + * the Free Software Foundation, either version 2 of the License, or 7 7 * (at your option) any later version. 8 8 * 9 9 * This program is distributed in the hope that it will be useful,
+1 -1
arch/sparc/kernel/vio.c
··· 403 403 if (err) { 404 404 printk(KERN_ERR "VIO: Could not register device %s, err=%d\n", 405 405 dev_name(&vdev->dev), err); 406 - kfree(vdev); 406 + put_device(&vdev->dev); 407 407 return NULL; 408 408 } 409 409 if (vdev->dp)
+4
arch/x86/Kconfig
··· 52 52 select ARCH_HAS_DEVMEM_IS_ALLOWED 53 53 select ARCH_HAS_ELF_RANDOMIZE 54 54 select ARCH_HAS_FAST_MULTIPLIER 55 + select ARCH_HAS_FILTER_PGPROT 55 56 select ARCH_HAS_FORTIFY_SOURCE 56 57 select ARCH_HAS_GCOV_PROFILE_ALL 57 58 select ARCH_HAS_KCOV if X86_64 ··· 272 271 def_bool y 273 272 274 273 config ARCH_HAS_CACHE_LINE_SIZE 274 + def_bool y 275 + 276 + config ARCH_HAS_FILTER_PGPROT 275 277 def_bool y 276 278 277 279 config HAVE_SETUP_PER_CPU_AREA
+4 -4
arch/x86/entry/entry_64_compat.S
··· 84 84 pushq %rdx /* pt_regs->dx */ 85 85 pushq %rcx /* pt_regs->cx */ 86 86 pushq $-ENOSYS /* pt_regs->ax */ 87 - pushq $0 /* pt_regs->r8 = 0 */ 87 + pushq %r8 /* pt_regs->r8 */ 88 88 xorl %r8d, %r8d /* nospec r8 */ 89 - pushq $0 /* pt_regs->r9 = 0 */ 89 + pushq %r9 /* pt_regs->r9 */ 90 90 xorl %r9d, %r9d /* nospec r9 */ 91 - pushq $0 /* pt_regs->r10 = 0 */ 91 + pushq %r10 /* pt_regs->r10 */ 92 92 xorl %r10d, %r10d /* nospec r10 */ 93 - pushq $0 /* pt_regs->r11 = 0 */ 93 + pushq %r11 /* pt_regs->r11 */ 94 94 xorl %r11d, %r11d /* nospec r11 */ 95 95 pushq %rbx /* pt_regs->rbx */ 96 96 xorl %ebx, %ebx /* nospec rbx */
+6 -3
arch/x86/events/intel/core.c
··· 3339 3339 3340 3340 cpuc->lbr_sel = NULL; 3341 3341 3342 - flip_smm_bit(&x86_pmu.attr_freeze_on_smi); 3342 + if (x86_pmu.version > 1) 3343 + flip_smm_bit(&x86_pmu.attr_freeze_on_smi); 3343 3344 3344 3345 if (!cpuc->shared_regs) 3345 3346 return; ··· 3503 3502 .cpu_dying = intel_pmu_cpu_dying, 3504 3503 }; 3505 3504 3505 + static struct attribute *intel_pmu_attrs[]; 3506 + 3506 3507 static __initconst const struct x86_pmu intel_pmu = { 3507 3508 .name = "Intel", 3508 3509 .handle_irq = intel_pmu_handle_irq, ··· 3535 3532 3536 3533 .format_attrs = intel_arch3_formats_attr, 3537 3534 .events_sysfs_show = intel_event_sysfs_show, 3535 + 3536 + .attrs = intel_pmu_attrs, 3538 3537 3539 3538 .cpu_prepare = intel_pmu_cpu_prepare, 3540 3539 .cpu_starting = intel_pmu_cpu_starting, ··· 3916 3911 3917 3912 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters); 3918 3913 3919 - 3920 - x86_pmu.attrs = intel_pmu_attrs; 3921 3914 /* 3922 3915 * Quirk: v2 perfmon does not report fixed-purpose events, so 3923 3916 * assume at least 3 events, when not running in a hypervisor:
+1
arch/x86/include/asm/cpufeatures.h
··· 320 320 #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ 321 321 #define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ 322 322 #define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ 323 + #define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */ 323 324 324 325 /* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ 325 326 #define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
-7
arch/x86/include/asm/irq_vectors.h
··· 34 34 * (0x80 is the syscall vector, 0x30-0x3f are for ISA) 35 35 */ 36 36 #define FIRST_EXTERNAL_VECTOR 0x20 37 - /* 38 - * We start allocating at 0x21 to spread out vectors evenly between 39 - * priority levels. (0x80 is the syscall vector) 40 - */ 41 - #define VECTOR_OFFSET_START 1 42 37 43 38 /* 44 39 * Reserve the lowest usable vector (and hence lowest priority) 0x20 for ··· 113 118 #else 114 119 #define FIRST_SYSTEM_VECTOR NR_VECTORS 115 120 #endif 116 - 117 - #define FPU_IRQ 13 118 121 119 122 /* 120 123 * Size the maximum number of interrupts.
+1 -1
arch/x86/include/asm/jailhouse_para.h
··· 1 - /* SPDX-License-Identifier: GPL2.0 */ 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 2 3 3 /* 4 4 * Jailhouse paravirt detection
+5
arch/x86/include/asm/pgtable.h
··· 601 601 602 602 #define canon_pgprot(p) __pgprot(massage_pgprot(p)) 603 603 604 + static inline pgprot_t arch_filter_pgprot(pgprot_t prot) 605 + { 606 + return canon_pgprot(prot); 607 + } 608 + 604 609 static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, 605 610 enum page_cache_mode pcm, 606 611 enum page_cache_mode new_pcm)
+4 -4
arch/x86/include/asm/pgtable_64_types.h
··· 105 105 #define LDT_PGD_ENTRY (pgtable_l5_enabled ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4) 106 106 #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) 107 107 108 - #define __VMALLOC_BASE_L4 0xffffc90000000000 109 - #define __VMALLOC_BASE_L5 0xffa0000000000000 108 + #define __VMALLOC_BASE_L4 0xffffc90000000000UL 109 + #define __VMALLOC_BASE_L5 0xffa0000000000000UL 110 110 111 111 #define VMALLOC_SIZE_TB_L4 32UL 112 112 #define VMALLOC_SIZE_TB_L5 12800UL 113 113 114 - #define __VMEMMAP_BASE_L4 0xffffea0000000000 115 - #define __VMEMMAP_BASE_L5 0xffd4000000000000 114 + #define __VMEMMAP_BASE_L4 0xffffea0000000000UL 115 + #define __VMEMMAP_BASE_L5 0xffd4000000000000UL 116 116 117 117 #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT 118 118 # define VMALLOC_START vmalloc_base
+31
arch/x86/include/uapi/asm/msgbuf.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 + #ifndef __ASM_X64_MSGBUF_H 3 + #define __ASM_X64_MSGBUF_H 4 + 5 + #if !defined(__x86_64__) || !defined(__ILP32__) 1 6 #include <asm-generic/msgbuf.h> 7 + #else 8 + /* 9 + * The msqid64_ds structure for x86 architecture with x32 ABI. 10 + * 11 + * On x86-32 and x86-64 we can just use the generic definition, but 12 + * x32 uses the same binary layout as x86_64, which is differnet 13 + * from other 32-bit architectures. 14 + */ 15 + 16 + struct msqid64_ds { 17 + struct ipc64_perm msg_perm; 18 + __kernel_time_t msg_stime; /* last msgsnd time */ 19 + __kernel_time_t msg_rtime; /* last msgrcv time */ 20 + __kernel_time_t msg_ctime; /* last change time */ 21 + __kernel_ulong_t msg_cbytes; /* current number of bytes on queue */ 22 + __kernel_ulong_t msg_qnum; /* number of messages in queue */ 23 + __kernel_ulong_t msg_qbytes; /* max number of bytes on queue */ 24 + __kernel_pid_t msg_lspid; /* pid of last msgsnd */ 25 + __kernel_pid_t msg_lrpid; /* last receive pid */ 26 + __kernel_ulong_t __unused4; 27 + __kernel_ulong_t __unused5; 28 + }; 29 + 30 + #endif 31 + 32 + #endif /* __ASM_GENERIC_MSGBUF_H */
+42
arch/x86/include/uapi/asm/shmbuf.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 + #ifndef __ASM_X86_SHMBUF_H 3 + #define __ASM_X86_SHMBUF_H 4 + 5 + #if !defined(__x86_64__) || !defined(__ILP32__) 1 6 #include <asm-generic/shmbuf.h> 7 + #else 8 + /* 9 + * The shmid64_ds structure for x86 architecture with x32 ABI. 10 + * 11 + * On x86-32 and x86-64 we can just use the generic definition, but 12 + * x32 uses the same binary layout as x86_64, which is differnet 13 + * from other 32-bit architectures. 14 + */ 15 + 16 + struct shmid64_ds { 17 + struct ipc64_perm shm_perm; /* operation perms */ 18 + size_t shm_segsz; /* size of segment (bytes) */ 19 + __kernel_time_t shm_atime; /* last attach time */ 20 + __kernel_time_t shm_dtime; /* last detach time */ 21 + __kernel_time_t shm_ctime; /* last change time */ 22 + __kernel_pid_t shm_cpid; /* pid of creator */ 23 + __kernel_pid_t shm_lpid; /* pid of last operator */ 24 + __kernel_ulong_t shm_nattch; /* no. of current attaches */ 25 + __kernel_ulong_t __unused4; 26 + __kernel_ulong_t __unused5; 27 + }; 28 + 29 + struct shminfo64 { 30 + __kernel_ulong_t shmmax; 31 + __kernel_ulong_t shmmin; 32 + __kernel_ulong_t shmmni; 33 + __kernel_ulong_t shmseg; 34 + __kernel_ulong_t shmall; 35 + __kernel_ulong_t __unused1; 36 + __kernel_ulong_t __unused2; 37 + __kernel_ulong_t __unused3; 38 + __kernel_ulong_t __unused4; 39 + }; 40 + 41 + #endif 42 + 43 + #endif /* __ASM_X86_SHMBUF_H */
+3
arch/x86/kernel/cpu/intel.c
··· 835 835 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" }, 836 836 { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" }, 837 837 { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" }, 838 + { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" }, 839 + { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" }, 840 + { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" }, 838 841 { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" }, 839 842 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" }, 840 843 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
+2 -4
arch/x86/kernel/cpu/microcode/core.c
··· 564 564 apply_microcode_local(&err); 565 565 spin_unlock(&update_lock); 566 566 567 + /* siblings return UCODE_OK because their engine got updated already */ 567 568 if (err > UCODE_NFOUND) { 568 569 pr_warn("Error reloading microcode on CPU %d\n", cpu); 569 - return -1; 570 - /* siblings return UCODE_OK because their engine got updated already */ 570 + ret = -1; 571 571 } else if (err == UCODE_UPDATED || err == UCODE_OK) { 572 572 ret = 1; 573 - } else { 574 - return ret; 575 573 } 576 574 577 575 /*
-2
arch/x86/kernel/cpu/microcode/intel.c
··· 485 485 */ 486 486 static void save_mc_for_early(u8 *mc, unsigned int size) 487 487 { 488 - #ifdef CONFIG_HOTPLUG_CPU 489 488 /* Synchronization during CPU hotplug. */ 490 489 static DEFINE_MUTEX(x86_cpu_microcode_mutex); 491 490 ··· 494 495 show_saved_mc(); 495 496 496 497 mutex_unlock(&x86_cpu_microcode_mutex); 497 - #endif 498 498 } 499 499 500 500 static bool load_builtin_intel_microcode(struct cpio_data *cp)
+1 -1
arch/x86/kernel/jailhouse.c
··· 1 - // SPDX-License-Identifier: GPL2.0 1 + // SPDX-License-Identifier: GPL-2.0 2 2 /* 3 3 * Jailhouse paravirt_ops implementation 4 4 *
+6
arch/x86/kernel/setup.c
··· 50 50 #include <linux/init_ohci1394_dma.h> 51 51 #include <linux/kvm_para.h> 52 52 #include <linux/dma-contiguous.h> 53 + #include <xen/xen.h> 53 54 54 55 #include <linux/errno.h> 55 56 #include <linux/kernel.h> ··· 533 532 if (ret != 0 || crash_size <= 0) 534 533 return; 535 534 high = true; 535 + } 536 + 537 + if (xen_pv_domain()) { 538 + pr_info("Ignoring crashkernel for a Xen PV domain\n"); 539 + return; 536 540 } 537 541 538 542 /* 0 means: find the address automatically */
+2
arch/x86/kernel/smpboot.c
··· 1571 1571 void *mwait_ptr; 1572 1572 int i; 1573 1573 1574 + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) 1575 + return; 1574 1576 if (!this_cpu_has(X86_FEATURE_MWAIT)) 1575 1577 return; 1576 1578 if (!this_cpu_has(X86_FEATURE_CLFLUSH))
+4 -10
arch/x86/kvm/vmx.c
··· 4544 4544 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa); 4545 4545 } 4546 4546 4547 - static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu) 4548 - { 4549 - if (enable_ept) 4550 - vmx_flush_tlb(vcpu, true); 4551 - } 4552 - 4553 4547 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) 4554 4548 { 4555 4549 ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; ··· 9272 9278 } else { 9273 9279 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 9274 9280 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 9275 - vmx_flush_tlb_ept_only(vcpu); 9281 + vmx_flush_tlb(vcpu, true); 9276 9282 } 9277 9283 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); 9278 9284 ··· 9300 9306 !nested_cpu_has2(get_vmcs12(&vmx->vcpu), 9301 9307 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 9302 9308 vmcs_write64(APIC_ACCESS_ADDR, hpa); 9303 - vmx_flush_tlb_ept_only(vcpu); 9309 + vmx_flush_tlb(vcpu, true); 9304 9310 } 9305 9311 } 9306 9312 ··· 11214 11220 } 11215 11221 } else if (nested_cpu_has2(vmcs12, 11216 11222 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 11217 - vmx_flush_tlb_ept_only(vcpu); 11223 + vmx_flush_tlb(vcpu, true); 11218 11224 } 11219 11225 11220 11226 /* ··· 12067 12073 } else if (!nested_cpu_has_ept(vmcs12) && 12068 12074 nested_cpu_has2(vmcs12, 12069 12075 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { 12070 - vmx_flush_tlb_ept_only(vcpu); 12076 + vmx_flush_tlb(vcpu, true); 12071 12077 } 12072 12078 12073 12079 /* This is needed for same reason as it was needed in prepare_vmcs02 */
-7
arch/x86/kvm/x86.h
··· 302 302 __rem; \ 303 303 }) 304 304 305 - #define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0) 306 - #define KVM_X86_DISABLE_EXITS_HTL (1 << 1) 307 - #define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2) 308 - #define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \ 309 - KVM_X86_DISABLE_EXITS_HTL | \ 310 - KVM_X86_DISABLE_EXITS_PAUSE) 311 - 312 305 static inline bool kvm_mwait_in_guest(struct kvm *kvm) 313 306 { 314 307 return kvm->arch.mwait_in_guest;
+36 -16
arch/x86/mm/pageattr.c
··· 93 93 static inline void split_page_count(int level) { } 94 94 #endif 95 95 96 + static inline int 97 + within(unsigned long addr, unsigned long start, unsigned long end) 98 + { 99 + return addr >= start && addr < end; 100 + } 101 + 102 + static inline int 103 + within_inclusive(unsigned long addr, unsigned long start, unsigned long end) 104 + { 105 + return addr >= start && addr <= end; 106 + } 107 + 96 108 #ifdef CONFIG_X86_64 97 109 98 110 static inline unsigned long highmap_start_pfn(void) ··· 118 106 return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT; 119 107 } 120 108 109 + static bool __cpa_pfn_in_highmap(unsigned long pfn) 110 + { 111 + /* 112 + * Kernel text has an alias mapping at a high address, known 113 + * here as "highmap". 114 + */ 115 + return within_inclusive(pfn, highmap_start_pfn(), highmap_end_pfn()); 116 + } 117 + 118 + #else 119 + 120 + static bool __cpa_pfn_in_highmap(unsigned long pfn) 121 + { 122 + /* There is no highmap on 32-bit */ 123 + return false; 124 + } 125 + 121 126 #endif 122 - 123 - static inline int 124 - within(unsigned long addr, unsigned long start, unsigned long end) 125 - { 126 - return addr >= start && addr < end; 127 - } 128 - 129 - static inline int 130 - within_inclusive(unsigned long addr, unsigned long start, unsigned long end) 131 - { 132 - return addr >= start && addr <= end; 133 - } 134 127 135 128 /* 136 129 * Flushing functions ··· 189 172 190 173 static void cpa_flush_all(unsigned long cache) 191 174 { 192 - BUG_ON(irqs_disabled()); 175 + BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); 193 176 194 177 on_each_cpu(__cpa_flush_all, (void *) cache, 1); 195 178 } ··· 253 236 unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */ 254 237 #endif 255 238 256 - BUG_ON(irqs_disabled()); 239 + BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); 257 240 258 241 on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1); 259 242 ··· 1200 1183 cpa->numpages = 1; 1201 1184 cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; 1202 1185 return 0; 1186 + 1187 + } else if (__cpa_pfn_in_highmap(cpa->pfn)) { 1188 + /* Faults in the highmap are OK, so do not warn: */ 1189 + return -EFAULT; 1203 1190 } else { 1204 1191 WARN(1, KERN_WARNING "CPA: called for zero pte. " 1205 1192 "vaddr = %lx cpa->vaddr = %lx\n", vaddr, ··· 1356 1335 * to touch the high mapped kernel as well: 1357 1336 */ 1358 1337 if (!within(vaddr, (unsigned long)_text, _brk_end) && 1359 - within_inclusive(cpa->pfn, highmap_start_pfn(), 1360 - highmap_end_pfn())) { 1338 + __cpa_pfn_in_highmap(cpa->pfn)) { 1361 1339 unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + 1362 1340 __START_KERNEL_map - phys_base; 1363 1341 alias_cpa = *cpa;
+23 -3
arch/x86/mm/pti.c
··· 421 421 if (boot_cpu_has(X86_FEATURE_K8)) 422 422 return false; 423 423 424 + /* 425 + * RANDSTRUCT derives its hardening benefits from the 426 + * attacker's lack of knowledge about the layout of kernel 427 + * data structures. Keep the kernel image non-global in 428 + * cases where RANDSTRUCT is in use to help keep the layout a 429 + * secret. 430 + */ 431 + if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT)) 432 + return false; 433 + 424 434 return true; 425 435 } 426 436 ··· 440 430 */ 441 431 void pti_clone_kernel_text(void) 442 432 { 433 + /* 434 + * rodata is part of the kernel image and is normally 435 + * readable on the filesystem or on the web. But, do not 436 + * clone the areas past rodata, they might contain secrets. 437 + */ 443 438 unsigned long start = PFN_ALIGN(_text); 444 - unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE); 439 + unsigned long end = (unsigned long)__end_rodata_hpage_align; 445 440 446 441 if (!pti_kernel_image_global_ok()) 447 442 return; 448 443 444 + pr_debug("mapping partial kernel image into user address space\n"); 445 + 446 + /* 447 + * Note that this will undo _some_ of the work that 448 + * pti_set_kernel_image_nonglobal() did to clear the 449 + * global bit. 450 + */ 449 451 pti_clone_pmds(start, end, _PAGE_RW); 450 452 } 451 453 ··· 479 457 480 458 if (pti_kernel_image_global_ok()) 481 459 return; 482 - 483 - pr_debug("set kernel image non-global\n"); 484 460 485 461 set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT); 486 462 }
+14 -4
arch/x86/net/bpf_jit_comp.c
··· 1027 1027 break; 1028 1028 1029 1029 case BPF_JMP | BPF_JA: 1030 - jmp_offset = addrs[i + insn->off] - addrs[i]; 1030 + if (insn->off == -1) 1031 + /* -1 jmp instructions will always jump 1032 + * backwards two bytes. Explicitly handling 1033 + * this case avoids wasting too many passes 1034 + * when there are long sequences of replaced 1035 + * dead code. 1036 + */ 1037 + jmp_offset = -2; 1038 + else 1039 + jmp_offset = addrs[i + insn->off] - addrs[i]; 1040 + 1031 1041 if (!jmp_offset) 1032 1042 /* optimize out nop jumps */ 1033 1043 break; ··· 1236 1226 for (pass = 0; pass < 20 || image; pass++) { 1237 1227 proglen = do_jit(prog, addrs, image, oldproglen, &ctx); 1238 1228 if (proglen <= 0) { 1229 + out_image: 1239 1230 image = NULL; 1240 1231 if (header) 1241 1232 bpf_jit_binary_free(header); ··· 1247 1236 if (proglen != oldproglen) { 1248 1237 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", 1249 1238 proglen, oldproglen); 1250 - prog = orig_prog; 1251 - goto out_addrs; 1239 + goto out_image; 1252 1240 } 1253 1241 break; 1254 1242 } ··· 1283 1273 prog = orig_prog; 1284 1274 } 1285 1275 1286 - if (!prog->is_func || extra_pass) { 1276 + if (!image || !prog->is_func || extra_pass) { 1287 1277 out_addrs: 1288 1278 kfree(addrs); 1289 1279 kfree(jit_data);
+8 -3
crypto/api.c
··· 204 204 205 205 down_read(&crypto_alg_sem); 206 206 alg = __crypto_alg_lookup(name, type | test, mask | test); 207 - if (!alg && test) 208 - alg = __crypto_alg_lookup(name, type, mask) ? 209 - ERR_PTR(-ELIBBAD) : NULL; 207 + if (!alg && test) { 208 + alg = __crypto_alg_lookup(name, type, mask); 209 + if (alg && !crypto_is_larval(alg)) { 210 + /* Test failed */ 211 + crypto_mod_put(alg); 212 + alg = ERR_PTR(-ELIBBAD); 213 + } 214 + } 210 215 up_read(&crypto_alg_sem); 211 216 212 217 return alg;
+2
crypto/drbg.c
··· 1134 1134 if (!drbg) 1135 1135 return; 1136 1136 kzfree(drbg->Vbuf); 1137 + drbg->Vbuf = NULL; 1137 1138 drbg->V = NULL; 1138 1139 kzfree(drbg->Cbuf); 1140 + drbg->Cbuf = NULL; 1139 1141 drbg->C = NULL; 1140 1142 kzfree(drbg->scratchpadbuf); 1141 1143 drbg->scratchpadbuf = NULL;
+11 -6
drivers/amba/bus.c
··· 69 69 struct device_attribute *attr, char *buf) 70 70 { 71 71 struct amba_device *dev = to_amba_device(_dev); 72 + ssize_t len; 72 73 73 - if (!dev->driver_override) 74 - return 0; 75 - 76 - return sprintf(buf, "%s\n", dev->driver_override); 74 + device_lock(_dev); 75 + len = sprintf(buf, "%s\n", dev->driver_override); 76 + device_unlock(_dev); 77 + return len; 77 78 } 78 79 79 80 static ssize_t driver_override_store(struct device *_dev, ··· 82 81 const char *buf, size_t count) 83 82 { 84 83 struct amba_device *dev = to_amba_device(_dev); 85 - char *driver_override, *old = dev->driver_override, *cp; 84 + char *driver_override, *old, *cp; 86 85 87 - if (count > PATH_MAX) 86 + /* We need to keep extra room for a newline */ 87 + if (count >= (PAGE_SIZE - 1)) 88 88 return -EINVAL; 89 89 90 90 driver_override = kstrndup(buf, count, GFP_KERNEL); ··· 96 94 if (cp) 97 95 *cp = '\0'; 98 96 97 + device_lock(_dev); 98 + old = dev->driver_override; 99 99 if (strlen(driver_override)) { 100 100 dev->driver_override = driver_override; 101 101 } else { 102 102 kfree(driver_override); 103 103 dev->driver_override = NULL; 104 104 } 105 + device_unlock(_dev); 105 106 106 107 kfree(old); 107 108
+8
drivers/android/binder.c
··· 2839 2839 else 2840 2840 return_error = BR_DEAD_REPLY; 2841 2841 mutex_unlock(&context->context_mgr_node_lock); 2842 + if (target_node && target_proc == proc) { 2843 + binder_user_error("%d:%d got transaction to context manager from process owning it\n", 2844 + proc->pid, thread->pid); 2845 + return_error = BR_FAILED_REPLY; 2846 + return_error_param = -EINVAL; 2847 + return_error_line = __LINE__; 2848 + goto err_invalid_target_handle; 2849 + } 2842 2850 } 2843 2851 if (!target_node) { 2844 2852 /*
+2 -2
drivers/base/firmware_loader/fallback.c
··· 537 537 } 538 538 539 539 /** 540 - * fw_load_sysfs_fallback - load a firmware via the syfs fallback mechanism 541 - * @fw_sysfs: firmware syfs information for the firmware to load 540 + * fw_load_sysfs_fallback - load a firmware via the sysfs fallback mechanism 541 + * @fw_sysfs: firmware sysfs information for the firmware to load 542 542 * @opt_flags: flags of options, FW_OPT_* 543 543 * @timeout: timeout to wait for the load 544 544 *
+1 -1
drivers/base/firmware_loader/fallback.h
··· 6 6 #include <linux/device.h> 7 7 8 8 /** 9 - * struct firmware_fallback_config - firmware fallback configuratioon settings 9 + * struct firmware_fallback_config - firmware fallback configuration settings 10 10 * 11 11 * Helps describe and fine tune the fallback mechanism. 12 12 *
+1
drivers/bus/Kconfig
··· 33 33 bool "Support for ISA I/O space on HiSilicon Hip06/7" 34 34 depends on ARM64 && (ARCH_HISI || COMPILE_TEST) 35 35 select INDIRECT_PIO 36 + select MFD_CORE if ACPI 36 37 help 37 38 Driver to enable I/O access to devices attached to the Low Pin 38 39 Count bus on the HiSilicon Hip06/7 SoC.
+11 -3
drivers/cpufreq/powernv-cpufreq.c
··· 679 679 680 680 if (!spin_trylock(&gpstates->gpstate_lock)) 681 681 return; 682 + /* 683 + * If the timer has migrated to the different cpu then bring 684 + * it back to one of the policy->cpus 685 + */ 686 + if (!cpumask_test_cpu(raw_smp_processor_id(), policy->cpus)) { 687 + gpstates->timer.expires = jiffies + msecs_to_jiffies(1); 688 + add_timer_on(&gpstates->timer, cpumask_first(policy->cpus)); 689 + spin_unlock(&gpstates->gpstate_lock); 690 + return; 691 + } 682 692 683 693 /* 684 694 * If PMCR was last updated was using fast_swtich then ··· 728 718 if (gpstate_idx != gpstates->last_lpstate_idx) 729 719 queue_gpstate_timer(gpstates); 730 720 721 + set_pstate(&freq_data); 731 722 spin_unlock(&gpstates->gpstate_lock); 732 - 733 - /* Timer may get migrated to a different cpu on cpu hot unplug */ 734 - smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1); 735 723 } 736 724 737 725 /*
+1 -1
drivers/firmware/arm_scmi/clock.c
··· 284 284 struct clock_info *ci = handle->clk_priv; 285 285 struct scmi_clock_info *clk = ci->clk + clk_id; 286 286 287 - if (!clk->name || !clk->name[0]) 287 + if (!clk->name[0]) 288 288 return NULL; 289 289 290 290 return clk;
+1 -1
drivers/fpga/altera-ps-spi.c
··· 249 249 250 250 conf->data = of_id->data; 251 251 conf->spi = spi; 252 - conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_HIGH); 252 + conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_LOW); 253 253 if (IS_ERR(conf->config)) { 254 254 dev_err(&spi->dev, "Failed to get config gpio: %ld\n", 255 255 PTR_ERR(conf->config));
+5 -2
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
··· 1459 1459 static const u32 vgpr_init_regs[] = 1460 1460 { 1461 1461 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff, 1462 - mmCOMPUTE_RESOURCE_LIMITS, 0, 1462 + mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */ 1463 1463 mmCOMPUTE_NUM_THREAD_X, 256*4, 1464 1464 mmCOMPUTE_NUM_THREAD_Y, 1, 1465 1465 mmCOMPUTE_NUM_THREAD_Z, 1, 1466 + mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */ 1466 1467 mmCOMPUTE_PGM_RSRC2, 20, 1467 1468 mmCOMPUTE_USER_DATA_0, 0xedcedc00, 1468 1469 mmCOMPUTE_USER_DATA_1, 0xedcedc01, ··· 1480 1479 static const u32 sgpr1_init_regs[] = 1481 1480 { 1482 1481 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f, 1483 - mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, 1482 + mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */ 1484 1483 mmCOMPUTE_NUM_THREAD_X, 256*5, 1485 1484 mmCOMPUTE_NUM_THREAD_Y, 1, 1486 1485 mmCOMPUTE_NUM_THREAD_Z, 1, 1486 + mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */ 1487 1487 mmCOMPUTE_PGM_RSRC2, 20, 1488 1488 mmCOMPUTE_USER_DATA_0, 0xedcedc00, 1489 1489 mmCOMPUTE_USER_DATA_1, 0xedcedc01, ··· 1505 1503 mmCOMPUTE_NUM_THREAD_X, 256*5, 1506 1504 mmCOMPUTE_NUM_THREAD_Y, 1, 1507 1505 mmCOMPUTE_NUM_THREAD_Z, 1, 1506 + mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */ 1508 1507 mmCOMPUTE_PGM_RSRC2, 20, 1509 1508 mmCOMPUTE_USER_DATA_0, 0xedcedc00, 1510 1509 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
+1
drivers/gpu/drm/amd/amdkfd/Kconfig
··· 6 6 tristate "HSA kernel driver for AMD GPU devices" 7 7 depends on DRM_AMDGPU && X86_64 8 8 imply AMD_IOMMU_V2 9 + select MMU_NOTIFIER 9 10 help 10 11 Enable this if you want to use HSA features on AMD GPU devices.
+9 -8
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
··· 749 749 struct timespec64 time; 750 750 751 751 dev = kfd_device_by_id(args->gpu_id); 752 - if (dev == NULL) 753 - return -EINVAL; 754 - 755 - /* Reading GPU clock counter from KGD */ 756 - args->gpu_clock_counter = 757 - dev->kfd2kgd->get_gpu_clock_counter(dev->kgd); 752 + if (dev) 753 + /* Reading GPU clock counter from KGD */ 754 + args->gpu_clock_counter = 755 + dev->kfd2kgd->get_gpu_clock_counter(dev->kgd); 756 + else 757 + /* Node without GPU resource */ 758 + args->gpu_clock_counter = 0; 758 759 759 760 /* No access to rdtsc. Using raw monotonic time */ 760 761 getrawmonotonic64(&time); ··· 1148 1147 return ret; 1149 1148 } 1150 1149 1151 - bool kfd_dev_is_large_bar(struct kfd_dev *dev) 1150 + static bool kfd_dev_is_large_bar(struct kfd_dev *dev) 1152 1151 { 1153 1152 struct kfd_local_mem_info mem_info; 1154 1153 ··· 1422 1421 1423 1422 pdd = kfd_get_process_device_data(dev, p); 1424 1423 if (!pdd) { 1425 - err = PTR_ERR(pdd); 1424 + err = -EINVAL; 1426 1425 goto bind_process_to_device_failed; 1427 1426 } 1428 1427
+9 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 4557 4557 struct amdgpu_dm_connector *aconnector = NULL; 4558 4558 struct drm_connector_state *new_con_state = NULL; 4559 4559 struct dm_connector_state *dm_conn_state = NULL; 4560 + struct drm_plane_state *new_plane_state = NULL; 4560 4561 4561 4562 new_stream = NULL; 4562 4563 4563 4564 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 4564 4565 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 4565 4566 acrtc = to_amdgpu_crtc(crtc); 4567 + 4568 + new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary); 4569 + 4570 + if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) { 4571 + ret = -EINVAL; 4572 + goto fail; 4573 + } 4566 4574 4567 4575 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); 4568 4576 ··· 4768 4760 if (!dm_old_crtc_state->stream) 4769 4761 continue; 4770 4762 4771 - DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n", 4763 + DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", 4772 4764 plane->base.id, old_plane_crtc->base.id); 4773 4765 4774 4766 if (!dc_remove_plane_from_context(
+3 -2
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
··· 329 329 { 330 330 int src; 331 331 struct irq_list_head *lh; 332 + unsigned long irq_table_flags; 332 333 DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n"); 333 - 334 334 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { 335 - 335 + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 336 336 /* The handler was removed from the table, 337 337 * it means it is safe to flush all the 'work' 338 338 * (because no code can schedule a new one). */ 339 339 lh = &adev->dm.irq_handler_list_low_tab[src]; 340 + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); 340 341 flush_work(&lh->work); 341 342 } 342 343 }
+22 -32
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 161 161 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); 162 162 struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder; 163 163 164 + if (amdgpu_dm_connector->edid) { 165 + kfree(amdgpu_dm_connector->edid); 166 + amdgpu_dm_connector->edid = NULL; 167 + } 168 + 164 169 drm_encoder_cleanup(&amdgpu_encoder->base); 165 170 kfree(amdgpu_encoder); 166 171 drm_connector_cleanup(connector); ··· 186 181 void dm_dp_mst_dc_sink_create(struct drm_connector *connector) 187 182 { 188 183 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 189 - struct edid *edid; 190 184 struct dc_sink *dc_sink; 191 185 struct dc_sink_init_data init_params = { 192 186 .link = aconnector->dc_link, 193 187 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; 188 + 189 + /* FIXME none of this is safe. we shouldn't touch aconnector here in 190 + * atomic_check 191 + */ 194 192 195 193 /* 196 194 * TODO: Need to further figure out why ddc.algo is NULL while MST port exists ··· 201 193 if (!aconnector->port || !aconnector->port->aux.ddc.algo) 202 194 return; 203 195 204 - edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); 205 - 206 - if (!edid) { 207 - drm_mode_connector_update_edid_property( 208 - &aconnector->base, 209 - NULL); 210 - return; 211 - } 212 - 213 - aconnector->edid = edid; 196 + ASSERT(aconnector->edid); 214 197 215 198 dc_sink = dc_link_add_remote_sink( 216 199 aconnector->dc_link, ··· 214 215 215 216 amdgpu_dm_add_sink_to_freesync_module( 216 217 connector, aconnector->edid); 217 - 218 - drm_mode_connector_update_edid_property( 219 - &aconnector->base, aconnector->edid); 220 218 } 221 219 222 220 static int dm_dp_mst_get_modes(struct drm_connector *connector) ··· 226 230 227 231 if (!aconnector->edid) { 228 232 struct edid *edid; 229 - struct dc_sink *dc_sink; 230 - struct dc_sink_init_data init_params = { 231 - .link = aconnector->dc_link, 232 - .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; 233 233 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); 234 234 235 235 if (!edid) { ··· 236 244 } 237 245 238 246 aconnector->edid = edid; 247 + } 239 248 249 + if (!aconnector->dc_sink) { 250 + struct dc_sink *dc_sink; 251 + struct dc_sink_init_data init_params = { 252 + .link = aconnector->dc_link, 253 + .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; 240 254 dc_sink = dc_link_add_remote_sink( 241 255 aconnector->dc_link, 242 - (uint8_t *)edid, 243 - (edid->extensions + 1) * EDID_LENGTH, 256 + (uint8_t *)aconnector->edid, 257 + (aconnector->edid->extensions + 1) * EDID_LENGTH, 244 258 &init_params); 245 259 246 260 dc_sink->priv = aconnector; ··· 254 256 255 257 if (aconnector->dc_sink) 256 258 amdgpu_dm_add_sink_to_freesync_module( 257 - connector, edid); 258 - 259 - drm_mode_connector_update_edid_property( 260 - &aconnector->base, edid); 259 + connector, aconnector->edid); 261 260 } 261 + 262 + drm_mode_connector_update_edid_property( 263 + &aconnector->base, aconnector->edid); 262 264 263 265 ret = drm_add_edid_modes(connector, aconnector->edid); 264 266 ··· 422 424 dc_sink_release(aconnector->dc_sink); 423 425 aconnector->dc_sink = NULL; 424 426 } 425 - if (aconnector->edid) { 426 - kfree(aconnector->edid); 427 - aconnector->edid = NULL; 428 - } 429 - 430 - drm_mode_connector_update_edid_property( 431 - &aconnector->base, 432 - NULL); 433 427 434 428 aconnector->mst_connected = false; 435 429 }
+3 -8
drivers/gpu/drm/drm_edid.c
··· 4451 4451 info->max_tmds_clock = 0; 4452 4452 info->dvi_dual = false; 4453 4453 info->has_hdmi_infoframe = false; 4454 + memset(&info->hdmi, 0, sizeof(info->hdmi)); 4454 4455 4455 4456 info->non_desktop = 0; 4456 4457 } ··· 4463 4462 4464 4463 u32 quirks = edid_get_quirks(edid); 4465 4464 4465 + drm_reset_display_info(connector); 4466 + 4466 4467 info->width_mm = edid->width_cm * 10; 4467 4468 info->height_mm = edid->height_cm * 10; 4468 - 4469 - /* driver figures it out in this case */ 4470 - info->bpc = 0; 4471 - info->color_formats = 0; 4472 - info->cea_rev = 0; 4473 - info->max_tmds_clock = 0; 4474 - info->dvi_dual = false; 4475 - info->has_hdmi_infoframe = false; 4476 4469 4477 4470 info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP); 4478 4471
+14 -2
drivers/gpu/drm/i915/intel_cdclk.c
··· 2140 2140 } 2141 2141 } 2142 2142 2143 - /* According to BSpec, "The CD clock frequency must be at least twice 2143 + /* 2144 + * According to BSpec, "The CD clock frequency must be at least twice 2144 2145 * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default. 2146 + * 2147 + * FIXME: Check the actual, not default, BCLK being used. 2148 + * 2149 + * FIXME: This does not depend on ->has_audio because the higher CDCLK 2150 + * is required for audio probe, also when there are no audio capable 2151 + * displays connected at probe time. This leads to unnecessarily high 2152 + * CDCLK when audio is not required. 2153 + * 2154 + * FIXME: This limit is only applied when there are displays connected 2155 + * at probe time. If we probe without displays, we'll still end up using 2156 + * the platform minimum CDCLK, failing audio probe. 2145 2157 */ 2146 - if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) 2158 + if (INTEL_GEN(dev_priv) >= 9) 2147 2159 min_cdclk = max(2 * 96000, min_cdclk); 2148 2160 2149 2161 /*
+2 -2
drivers/gpu/drm/i915/intel_drv.h
··· 49 49 * check the condition before the timeout. 50 50 */ 51 51 #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \ 52 - unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1; \ 52 + const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \ 53 53 long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \ 54 54 int ret__; \ 55 55 might_sleep(); \ 56 56 for (;;) { \ 57 - bool expired__ = time_after(jiffies, timeout__); \ 57 + const bool expired__ = ktime_after(ktime_get_raw(), end__); \ 58 58 OP; \ 59 59 if (COND) { \ 60 60 ret__ = 0; \
+1 -1
drivers/gpu/drm/i915/intel_fbdev.c
··· 806 806 return; 807 807 808 808 intel_fbdev_sync(ifbdev); 809 - if (ifbdev->vma) 809 + if (ifbdev->vma || ifbdev->helper.deferred_setup) 810 810 drm_fb_helper_hotplug_event(&ifbdev->helper); 811 811 } 812 812
+5 -6
drivers/gpu/drm/i915/intel_runtime_pm.c
··· 641 641 642 642 DRM_DEBUG_KMS("Enabling DC6\n"); 643 643 644 - gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 644 + /* Wa Display #1183: skl,kbl,cfl */ 645 + if (IS_GEN9_BC(dev_priv)) 646 + I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | 647 + SKL_SELECT_ALTERNATE_DC_EXIT); 645 648 649 + gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 646 650 } 647 651 648 652 void skl_disable_dc6(struct drm_i915_private *dev_priv) 649 653 { 650 654 DRM_DEBUG_KMS("Disabling DC6\n"); 651 - 652 - /* Wa Display #1183: skl,kbl,cfl */ 653 - if (IS_GEN9_BC(dev_priv)) 654 - I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | 655 - SKL_SELECT_ALTERNATE_DC_EXIT); 656 655 657 656 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 658 657 }
+1
drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
··· 351 351 352 352 spin_lock_irqsave(&dev->event_lock, flags); 353 353 mdp4_crtc->event = crtc->state->event; 354 + crtc->state->event = NULL; 354 355 spin_unlock_irqrestore(&dev->event_lock, flags); 355 356 356 357 blend_setup(crtc);
+1
drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
··· 708 708 709 709 spin_lock_irqsave(&dev->event_lock, flags); 710 710 mdp5_crtc->event = crtc->state->event; 711 + crtc->state->event = NULL; 711 712 spin_unlock_irqrestore(&dev->event_lock, flags); 712 713 713 714 /*
+2 -1
drivers/gpu/drm/msm/disp/mdp_format.c
··· 171 171 return i; 172 172 } 173 173 174 - const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format) 174 + const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, 175 + uint64_t modifier) 175 176 { 176 177 int i; 177 178 for (i = 0; i < ARRAY_SIZE(formats); i++) {
+1 -1
drivers/gpu/drm/msm/disp/mdp_kms.h
··· 98 98 #define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv) 99 99 100 100 uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only); 101 - const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format); 101 + const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier); 102 102 103 103 /* MDP capabilities */ 104 104 #define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */
+12 -4
drivers/gpu/drm/msm/dsi/dsi_host.c
··· 173 173 174 174 bool registered; 175 175 bool power_on; 176 + bool enabled; 176 177 int irq; 177 178 }; 178 179 ··· 776 775 switch (mipi_fmt) { 777 776 case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888; 778 777 case MIPI_DSI_FMT_RGB666_PACKED: 779 - case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666; 778 + case MIPI_DSI_FMT_RGB666: return CMD_DST_FORMAT_RGB666; 780 779 case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565; 781 780 default: return CMD_DST_FORMAT_RGB888; 782 781 } ··· 987 986 988 987 static void dsi_wait4video_done(struct msm_dsi_host *msm_host) 989 988 { 989 + u32 ret = 0; 990 + struct device *dev = &msm_host->pdev->dev; 991 + 990 992 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1); 991 993 992 994 reinit_completion(&msm_host->video_comp); 993 995 994 - wait_for_completion_timeout(&msm_host->video_comp, 996 + ret = wait_for_completion_timeout(&msm_host->video_comp, 995 997 msecs_to_jiffies(70)); 998 + 999 + if (ret <= 0) 1000 + dev_err(dev, "wait for video done timed out\n"); 996 1001 997 1002 dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0); 998 1003 } ··· 1008 1001 if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO)) 1009 1002 return; 1010 1003 1011 - if (msm_host->power_on) { 1004 + if (msm_host->power_on && msm_host->enabled) { 1012 1005 dsi_wait4video_done(msm_host); 1013 1006 /* delay 4 ms to skip BLLP */ 1014 1007 usleep_range(2000, 4000); ··· 2210 2203 * pm_runtime_put_autosuspend(&msm_host->pdev->dev); 2211 2204 * } 2212 2205 */ 2213 - 2206 + msm_host->enabled = true; 2214 2207 return 0; 2215 2208 } 2216 2209 ··· 2218 2211 { 2219 2212 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 2220 2213 2214 + msm_host->enabled = false; 2221 2215 dsi_op_mode_config(msm_host, 2222 2216 !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false); 2223 2217
+109
drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
··· 265 265 return 0; 266 266 } 267 267 268 + int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, 269 + struct msm_dsi_phy_clk_request *clk_req) 270 + { 271 + const unsigned long bit_rate = clk_req->bitclk_rate; 272 + const unsigned long esc_rate = clk_req->escclk_rate; 273 + s32 ui, ui_x8, lpx; 274 + s32 tmax, tmin; 275 + s32 pcnt0 = 50; 276 + s32 pcnt1 = 50; 277 + s32 pcnt2 = 10; 278 + s32 pcnt3 = 30; 279 + s32 pcnt4 = 10; 280 + s32 pcnt5 = 2; 281 + s32 coeff = 1000; /* Precision, should avoid overflow */ 282 + s32 hb_en, hb_en_ckln; 283 + s32 temp; 284 + 285 + if (!bit_rate || !esc_rate) 286 + return -EINVAL; 287 + 288 + timing->hs_halfbyte_en = 0; 289 + hb_en = 0; 290 + timing->hs_halfbyte_en_ckln = 0; 291 + hb_en_ckln = 0; 292 + 293 + ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); 294 + ui_x8 = ui << 3; 295 + lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000); 296 + 297 + temp = S_DIV_ROUND_UP(38 * coeff, ui_x8); 298 + tmin = max_t(s32, temp, 0); 299 + temp = (95 * coeff) / ui_x8; 300 + tmax = max_t(s32, temp, 0); 301 + timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false); 302 + 303 + temp = 300 * coeff - (timing->clk_prepare << 3) * ui; 304 + tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1; 305 + tmax = (tmin > 255) ? 511 : 255; 306 + timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false); 307 + 308 + tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8); 309 + temp = 105 * coeff + 12 * ui - 20 * coeff; 310 + tmax = (temp + 3 * ui) / ui_x8; 311 + timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false); 312 + 313 + temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8); 314 + tmin = max_t(s32, temp, 0); 315 + temp = (85 * coeff + 6 * ui) / ui_x8; 316 + tmax = max_t(s32, temp, 0); 317 + timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false); 318 + 319 + temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui; 320 + tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1; 321 + tmax = 255; 322 + timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false); 323 + 324 + tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1; 325 + temp = 105 * coeff + 12 * ui - 20 * coeff; 326 + tmax = (temp / ui_x8) - 1; 327 + timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false); 328 + 329 + temp = 50 * coeff + ((hb_en << 2) - 8) * ui; 330 + timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8); 331 + 332 + tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1; 333 + tmax = 255; 334 + timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false); 335 + 336 + temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui; 337 + timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8); 338 + 339 + temp = 60 * coeff + 52 * ui - 43 * ui; 340 + tmin = DIV_ROUND_UP(temp, ui_x8) - 1; 341 + tmax = 63; 342 + timing->shared_timings.clk_post = 343 + linear_inter(tmax, tmin, pcnt2, 0, false); 344 + 345 + temp = 8 * ui + (timing->clk_prepare << 3) * ui; 346 + temp += (((timing->clk_zero + 3) << 3) + 11) * ui; 347 + temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) : 348 + (((timing->hs_rqst_ckln << 3) + 8) * ui); 349 + tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1; 350 + tmax = 63; 351 + if (tmin > tmax) { 352 + temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false); 353 + timing->shared_timings.clk_pre = temp >> 1; 354 + timing->shared_timings.clk_pre_inc_by_2 = 1; 355 + } else { 356 + timing->shared_timings.clk_pre = 357 + linear_inter(tmax, tmin, pcnt2, 0, false); 358 + timing->shared_timings.clk_pre_inc_by_2 = 0; 359 + } 360 + 361 + timing->ta_go = 3; 362 + timing->ta_sure = 0; 363 + timing->ta_get = 4; 364 + 365 + DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", 366 + timing->shared_timings.clk_pre, timing->shared_timings.clk_post, 367 + timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero, 368 + timing->clk_trail, timing->clk_prepare, timing->hs_exit, 369 + timing->hs_zero, timing->hs_prepare, timing->hs_trail, 370 + timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en, 371 + timing->hs_halfbyte_en_ckln, timing->hs_prep_dly, 372 + timing->hs_prep_dly_ckln); 373 + 374 + return 0; 375 + } 376 + 268 377 void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, 269 378 u32 bit_mask) 270 379 {
+2
drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
··· 101 101 struct msm_dsi_phy_clk_request *clk_req); 102 102 int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing, 103 103 struct msm_dsi_phy_clk_request *clk_req); 104 + int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, 105 + struct msm_dsi_phy_clk_request *clk_req); 104 106 void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, 105 107 u32 bit_mask); 106 108 int msm_dsi_phy_init_common(struct msm_dsi_phy *phy);
-28
drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
··· 79 79 dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04); 80 80 } 81 81 82 - static int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing, 83 - struct msm_dsi_phy_clk_request *clk_req) 84 - { 85 - /* 86 - * TODO: These params need to be computed, they're currently hardcoded 87 - * for a 1440x2560@60Hz panel with a byteclk of 100.618 Mhz, and a 88 - * default escape clock of 19.2 Mhz. 89 - */ 90 - 91 - timing->hs_halfbyte_en = 0; 92 - timing->clk_zero = 0x1c; 93 - timing->clk_prepare = 0x07; 94 - timing->clk_trail = 0x07; 95 - timing->hs_exit = 0x23; 96 - timing->hs_zero = 0x21; 97 - timing->hs_prepare = 0x07; 98 - timing->hs_trail = 0x07; 99 - timing->hs_rqst = 0x05; 100 - timing->ta_sure = 0x00; 101 - timing->ta_go = 0x03; 102 - timing->ta_get = 0x04; 103 - 104 - timing->shared_timings.clk_pre = 0x2d; 105 - timing->shared_timings.clk_post = 0x0d; 106 - 107 - return 0; 108 - } 109 - 110 82 static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, 111 83 struct msm_dsi_phy_clk_request *clk_req) 112 84 {
+2 -1
drivers/gpu/drm/msm/msm_fb.c
··· 183 183 hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format); 184 184 vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format); 185 185 186 - format = kms->funcs->get_format(kms, mode_cmd->pixel_format); 186 + format = kms->funcs->get_format(kms, mode_cmd->pixel_format, 187 + mode_cmd->modifier[0]); 187 188 if (!format) { 188 189 dev_err(dev->dev, "unsupported pixel format: %4.4s\n", 189 190 (char *)&mode_cmd->pixel_format);
+2 -9
drivers/gpu/drm/msm/msm_fbdev.c
··· 92 92 93 93 if (IS_ERR(fb)) { 94 94 dev_err(dev->dev, "failed to allocate fb\n"); 95 - ret = PTR_ERR(fb); 96 - goto fail; 95 + return PTR_ERR(fb); 97 96 } 98 97 99 98 bo = msm_framebuffer_bo(fb, 0); ··· 150 151 151 152 fail_unlock: 152 153 mutex_unlock(&dev->struct_mutex); 153 - fail: 154 - 155 - if (ret) { 156 - if (fb) 157 - drm_framebuffer_remove(fb); 158 - } 159 - 154 + drm_framebuffer_remove(fb); 160 155 return ret; 161 156 } 162 157
+11 -9
drivers/gpu/drm/msm/msm_gem.c
··· 132 132 struct msm_gem_object *msm_obj = to_msm_bo(obj); 133 133 134 134 if (msm_obj->pages) { 135 - /* For non-cached buffers, ensure the new pages are clean 136 - * because display controller, GPU, etc. are not coherent: 137 - */ 138 - if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 139 - dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, 140 - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); 135 + if (msm_obj->sgt) { 136 + /* For non-cached buffers, ensure the new 137 + * pages are clean because display controller, 138 + * GPU, etc. are not coherent: 139 + */ 140 + if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) 141 + dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, 142 + msm_obj->sgt->nents, 143 + DMA_BIDIRECTIONAL); 141 144 142 - if (msm_obj->sgt) 143 145 sg_free_table(msm_obj->sgt); 144 - 145 - kfree(msm_obj->sgt); 146 + kfree(msm_obj->sgt); 147 + } 146 148 147 149 if (use_pages(obj)) 148 150 drm_gem_put_pages(obj, msm_obj->pages, true, false);
+4 -1
drivers/gpu/drm/msm/msm_kms.h
··· 48 48 /* functions to wait for atomic commit completed on each CRTC */ 49 49 void (*wait_for_crtc_commit_done)(struct msm_kms *kms, 50 50 struct drm_crtc *crtc); 51 + /* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */ 52 + const struct msm_format *(*get_format)(struct msm_kms *kms, 53 + const uint32_t format, 54 + const uint64_t modifiers); 51 55 /* misc: */ 52 - const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format); 53 56 long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, 54 57 struct drm_encoder *encoder); 55 58 int (*set_split_display)(struct msm_kms *kms,
+2 -4
drivers/gpu/drm/qxl/qxl_cmd.c
··· 179 179 uint32_t type, bool interruptible) 180 180 { 181 181 struct qxl_command cmd; 182 - struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); 183 182 184 183 cmd.type = type; 185 - cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); 184 + cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset); 186 185 187 186 return qxl_ring_push(qdev->command_ring, &cmd, interruptible); 188 187 } ··· 191 192 uint32_t type, bool interruptible) 192 193 { 193 194 struct qxl_command cmd; 194 - struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); 195 195 196 196 cmd.type = type; 197 - cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); 197 + cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset); 198 198 199 199 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); 200 200 }
+1
drivers/gpu/drm/qxl/qxl_drv.h
··· 167 167 168 168 int id; 169 169 int type; 170 + struct qxl_bo *release_bo; 170 171 uint32_t release_offset; 171 172 uint32_t surface_release_id; 172 173 struct ww_acquire_ctx ticket;
+2 -2
drivers/gpu/drm/qxl/qxl_ioctl.c
··· 182 182 goto out_free_reloc; 183 183 184 184 /* TODO copy slow path code from i915 */ 185 - fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); 185 + fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK)); 186 186 unwritten = __copy_from_user_inatomic_nocache 187 - (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), 187 + (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK), 188 188 u64_to_user_ptr(cmd->command), cmd->command_size); 189 189 190 190 {
+9 -9
drivers/gpu/drm/qxl/qxl_release.c
··· 173 173 list_del(&entry->tv.head); 174 174 kfree(entry); 175 175 } 176 + release->release_bo = NULL; 176 177 } 177 178 178 179 void ··· 297 296 { 298 297 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { 299 298 int idr_ret; 300 - struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head); 301 299 struct qxl_bo *bo; 302 300 union qxl_release_info *info; 303 301 ··· 304 304 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); 305 305 if (idr_ret < 0) 306 306 return idr_ret; 307 - bo = to_qxl_bo(entry->tv.bo); 307 + bo = create_rel->release_bo; 308 308 309 + (*release)->release_bo = bo; 309 310 (*release)->release_offset = create_rel->release_offset + 64; 310 311 311 312 qxl_release_list_add(*release, bo); ··· 366 365 367 366 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); 368 367 368 + (*release)->release_bo = bo; 369 369 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx]; 370 370 qdev->current_release_bo_offset[cur_idx]++; 371 371 ··· 410 408 { 411 409 void *ptr; 412 410 union qxl_release_info *info; 413 - struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); 414 - struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); 411 + struct qxl_bo *bo = release->release_bo; 415 412 416 - ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); 413 + ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK); 417 414 if (!ptr) 418 415 return NULL; 419 - info = ptr + (release->release_offset & ~PAGE_SIZE); 416 + info = ptr + (release->release_offset & ~PAGE_MASK); 420 417 return info; 421 418 } 422 419 ··· 423 422 struct qxl_release *release, 424 423 union qxl_release_info *info) 425 424 { 426 - struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); 427 - struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); 425 + struct qxl_bo *bo = release->release_bo; 428 426 void *ptr; 429 427 430 - ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); 428 + ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK); 431 429 qxl_bo_kunmap_atomic_page(qdev, bo, ptr); 432 430 } 433 431
-55
drivers/gpu/drm/sun4i/sun4i_lvds.c
··· 94 94 } 95 95 } 96 96 97 - static enum drm_mode_status sun4i_lvds_encoder_mode_valid(struct drm_encoder *crtc, 98 - const struct drm_display_mode *mode) 99 - { 100 - struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(crtc); 101 - struct sun4i_tcon *tcon = lvds->tcon; 102 - u32 hsync = mode->hsync_end - mode->hsync_start; 103 - u32 vsync = mode->vsync_end - mode->vsync_start; 104 - unsigned long rate = mode->clock * 1000; 105 - long rounded_rate; 106 - 107 - DRM_DEBUG_DRIVER("Validating modes...\n"); 108 - 109 - if (hsync < 1) 110 - return MODE_HSYNC_NARROW; 111 - 112 - if (hsync > 0x3ff) 113 - return MODE_HSYNC_WIDE; 114 - 115 - if ((mode->hdisplay < 1) || (mode->htotal < 1)) 116 - return MODE_H_ILLEGAL; 117 - 118 - if ((mode->hdisplay > 0x7ff) || (mode->htotal > 0xfff)) 119 - return MODE_BAD_HVALUE; 120 - 121 - DRM_DEBUG_DRIVER("Horizontal parameters OK\n"); 122 - 123 - if (vsync < 1) 124 - return MODE_VSYNC_NARROW; 125 - 126 - if (vsync > 0x3ff) 127 - return MODE_VSYNC_WIDE; 128 - 129 - if ((mode->vdisplay < 1) || (mode->vtotal < 1)) 130 - return MODE_V_ILLEGAL; 131 - 132 - if ((mode->vdisplay > 0x7ff) || (mode->vtotal > 0xfff)) 133 - return MODE_BAD_VVALUE; 134 - 135 - DRM_DEBUG_DRIVER("Vertical parameters OK\n"); 136 - 137 - tcon->dclk_min_div = 7; 138 - tcon->dclk_max_div = 7; 139 - rounded_rate = clk_round_rate(tcon->dclk, rate); 140 - if (rounded_rate < rate) 141 - return MODE_CLOCK_LOW; 142 - 143 - if (rounded_rate > rate) 144 - return MODE_CLOCK_HIGH; 145 - 146 - DRM_DEBUG_DRIVER("Clock rate OK\n"); 147 - 148 - return MODE_OK; 149 - } 150 - 151 97 static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = { 152 98 .disable = sun4i_lvds_encoder_disable, 153 99 .enable = sun4i_lvds_encoder_enable, 154 - .mode_valid = sun4i_lvds_encoder_mode_valid, 155 100 }; 156 101 157 102 static const struct drm_encoder_funcs sun4i_lvds_enc_funcs = {
+2 -2
drivers/gpu/drm/virtio/virtgpu_vq.c
··· 293 293 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); 294 294 if (ret == -ENOSPC) { 295 295 spin_unlock(&vgdev->ctrlq.qlock); 296 - wait_event(vgdev->ctrlq.ack_queue, vq->num_free); 296 + wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt); 297 297 spin_lock(&vgdev->ctrlq.qlock); 298 298 goto retry; 299 299 } else { ··· 368 368 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC); 369 369 if (ret == -ENOSPC) { 370 370 spin_unlock(&vgdev->cursorq.qlock); 371 - wait_event(vgdev->cursorq.ack_queue, vq->num_free); 371 + wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt); 372 372 spin_lock(&vgdev->cursorq.qlock); 373 373 goto retry; 374 374 } else {
-3
drivers/i2c/busses/Kconfig
··· 707 707 config I2C_MT65XX 708 708 tristate "MediaTek I2C adapter" 709 709 depends on ARCH_MEDIATEK || COMPILE_TEST 710 - depends on HAS_DMA 711 710 help 712 711 This selects the MediaTek(R) Integrated Inter Circuit bus driver 713 712 for MT65xx and MT81xx. ··· 884 885 885 886 config I2C_SH_MOBILE 886 887 tristate "SuperH Mobile I2C Controller" 887 - depends on HAS_DMA 888 888 depends on ARCH_SHMOBILE || ARCH_RENESAS || COMPILE_TEST 889 889 help 890 890 If you say yes to this option, support will be included for the ··· 1096 1098 1097 1099 config I2C_RCAR 1098 1100 tristate "Renesas R-Car I2C Controller" 1099 - depends on HAS_DMA 1100 1101 depends on ARCH_RENESAS || COMPILE_TEST 1101 1102 select I2C_SLAVE 1102 1103 help
+18 -4
drivers/i2c/busses/i2c-sprd.c
··· 86 86 u32 count; 87 87 int irq; 88 88 int err; 89 + bool is_suspended; 89 90 }; 90 91 91 92 static void sprd_i2c_set_count(struct sprd_i2c *i2c_dev, u32 count) ··· 284 283 struct sprd_i2c *i2c_dev = i2c_adap->algo_data; 285 284 int im, ret; 286 285 286 + if (i2c_dev->is_suspended) 287 + return -EBUSY; 288 + 287 289 ret = pm_runtime_get_sync(i2c_dev->dev); 288 290 if (ret < 0) 289 291 return ret; ··· 368 364 struct sprd_i2c *i2c_dev = dev_id; 369 365 struct i2c_msg *msg = i2c_dev->msg; 370 366 bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK); 371 - u32 i2c_count = readl(i2c_dev->base + I2C_COUNT); 372 367 u32 i2c_tran; 373 368 374 369 if (msg->flags & I2C_M_RD) 375 370 i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD; 376 371 else 377 - i2c_tran = i2c_count; 372 + i2c_tran = i2c_dev->count; 378 373 379 374 /* 380 375 * If we got one ACK from slave when writing data, and we did not ··· 411 408 { 412 409 struct sprd_i2c *i2c_dev = dev_id; 413 410 struct i2c_msg *msg = i2c_dev->msg; 414 - u32 i2c_count = readl(i2c_dev->base + I2C_COUNT); 415 411 bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK); 416 412 u32 i2c_tran; 417 413 418 414 if (msg->flags & I2C_M_RD) 419 415 i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD; 420 416 else 421 - i2c_tran = i2c_count; 417 + i2c_tran = i2c_dev->count; 422 418 423 419 /* 424 420 * If we did not get one ACK from slave when writing data, then we ··· 588 586 589 587 static int __maybe_unused sprd_i2c_suspend_noirq(struct device *pdev) 590 588 { 589 + struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev); 590 + 591 + i2c_lock_adapter(&i2c_dev->adap); 592 + i2c_dev->is_suspended = true; 593 + i2c_unlock_adapter(&i2c_dev->adap); 594 + 591 595 return pm_runtime_force_suspend(pdev); 592 596 } 593 597 594 598 static int __maybe_unused sprd_i2c_resume_noirq(struct device *pdev) 595 599 { 600 + struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev); 601 + 602 + i2c_lock_adapter(&i2c_dev->adap); 603 + i2c_dev->is_suspended = false; 604 + i2c_unlock_adapter(&i2c_dev->adap); 605 + 596 606 return pm_runtime_force_resume(pdev); 597 607 } 598 608
+1 -1
drivers/i2c/i2c-dev.c
··· 280 280 */ 281 281 if (msgs[i].flags & I2C_M_RECV_LEN) { 282 282 if (!(msgs[i].flags & I2C_M_RD) || 283 - msgs[i].buf[0] < 1 || 283 + msgs[i].len < 1 || msgs[i].buf[0] < 1 || 284 284 msgs[i].len < msgs[i].buf[0] + 285 285 I2C_SMBUS_BLOCK_MAX) { 286 286 res = -EINVAL;
+1 -1
drivers/infiniband/hw/mlx5/main.c
··· 4757 4757 { 4758 4758 struct mlx5_ib_dev *dev = to_mdev(ibdev); 4759 4759 4760 - return mlx5_get_vector_affinity(dev->mdev, comp_vector); 4760 + return mlx5_get_vector_affinity_hint(dev->mdev, comp_vector); 4761 4761 } 4762 4762 4763 4763 /* The mlx5_ib_multiport_mutex should be held when calling this function */
+6 -1
drivers/input/evdev.c
··· 31 31 enum evdev_clock_type { 32 32 EV_CLK_REAL = 0, 33 33 EV_CLK_MONO, 34 + EV_CLK_BOOT, 34 35 EV_CLK_MAX 35 36 }; 36 37 ··· 198 197 case CLOCK_REALTIME: 199 198 clk_type = EV_CLK_REAL; 200 199 break; 201 - case CLOCK_BOOTTIME: 202 200 case CLOCK_MONOTONIC: 203 201 clk_type = EV_CLK_MONO; 202 + break; 203 + case CLOCK_BOOTTIME: 204 + clk_type = EV_CLK_BOOT; 204 205 break; 205 206 default: 206 207 return -EINVAL; ··· 314 311 315 312 ev_time[EV_CLK_MONO] = ktime_get(); 316 313 ev_time[EV_CLK_REAL] = ktime_mono_to_real(ev_time[EV_CLK_MONO]); 314 + ev_time[EV_CLK_BOOT] = ktime_mono_to_any(ev_time[EV_CLK_MONO], 315 + TK_OFFS_BOOT); 317 316 318 317 rcu_read_lock(); 319 318
+5 -5
drivers/input/input-leds.c
··· 88 88 const struct input_device_id *id) 89 89 { 90 90 struct input_leds *leds; 91 + struct input_led *led; 91 92 unsigned int num_leds; 92 93 unsigned int led_code; 93 94 int led_no; ··· 120 119 121 120 led_no = 0; 122 121 for_each_set_bit(led_code, dev->ledbit, LED_CNT) { 123 - struct input_led *led = &leds->leds[led_no]; 124 - 125 - led->handle = &leds->handle; 126 - led->code = led_code; 127 - 128 122 if (!input_led_info[led_code].name) 129 123 continue; 124 + 125 + led = &leds->leds[led_no]; 126 + led->handle = &leds->handle; 127 + led->code = led_code; 130 128 131 129 led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s", 132 130 dev_name(&dev->dev),
+1 -1
drivers/input/mouse/alps.c
··· 583 583 584 584 x = (s8)(((packet[0] & 0x20) << 2) | (packet[1] & 0x7f)); 585 585 y = (s8)(((packet[0] & 0x10) << 3) | (packet[2] & 0x7f)); 586 - z = packet[4] & 0x7c; 586 + z = packet[4] & 0x7f; 587 587 588 588 /* 589 589 * The x and y values tend to be quite large, and when used
+5 -2
drivers/input/rmi4/rmi_spi.c
··· 147 147 if (len > RMI_SPI_XFER_SIZE_LIMIT) 148 148 return -EINVAL; 149 149 150 - if (rmi_spi->xfer_buf_size < len) 151 - rmi_spi_manage_pools(rmi_spi, len); 150 + if (rmi_spi->xfer_buf_size < len) { 151 + ret = rmi_spi_manage_pools(rmi_spi, len); 152 + if (ret < 0) 153 + return ret; 154 + } 152 155 153 156 if (addr == 0) 154 157 /*
+1 -1
drivers/input/touchscreen/Kconfig
··· 362 362 363 363 If unsure, say N. 364 364 365 - To compile this driver as a moudle, choose M here : the 365 + To compile this driver as a module, choose M here : the 366 366 module will be called hideep_ts. 367 367 368 368 config TOUCHSCREEN_ILI210X
+124 -76
drivers/input/touchscreen/atmel_mxt_ts.c
··· 280 280 struct input_dev *input_dev; 281 281 char phys[64]; /* device physical location */ 282 282 struct mxt_object *object_table; 283 - struct mxt_info info; 283 + struct mxt_info *info; 284 + void *raw_info_block; 284 285 unsigned int irq; 285 286 unsigned int max_x; 286 287 unsigned int max_y; ··· 461 460 { 462 461 u8 appmode = data->client->addr; 463 462 u8 bootloader; 463 + u8 family_id = data->info ? data->info->family_id : 0; 464 464 465 465 switch (appmode) { 466 466 case 0x4a: 467 467 case 0x4b: 468 468 /* Chips after 1664S use different scheme */ 469 - if (retry || data->info.family_id >= 0xa2) { 469 + if (retry || family_id >= 0xa2) { 470 470 bootloader = appmode - 0x24; 471 471 break; 472 472 } ··· 694 692 struct mxt_object *object; 695 693 int i; 696 694 697 - for (i = 0; i < data->info.object_num; i++) { 695 + for (i = 0; i < data->info->object_num; i++) { 698 696 object = data->object_table + i; 699 697 if (object->type == type) 700 698 return object; ··· 1464 1462 data_pos += offset; 1465 1463 } 1466 1464 1467 - if (cfg_info.family_id != data->info.family_id) { 1465 + if (cfg_info.family_id != data->info->family_id) { 1468 1466 dev_err(dev, "Family ID mismatch!\n"); 1469 1467 return -EINVAL; 1470 1468 } 1471 1469 1472 - if (cfg_info.variant_id != data->info.variant_id) { 1470 + if (cfg_info.variant_id != data->info->variant_id) { 1473 1471 dev_err(dev, "Variant ID mismatch!\n"); 1474 1472 return -EINVAL; 1475 1473 } ··· 1514 1512 1515 1513 /* Malloc memory to store configuration */ 1516 1514 cfg_start_ofs = MXT_OBJECT_START + 1517 - data->info.object_num * sizeof(struct mxt_object) + 1515 + data->info->object_num * sizeof(struct mxt_object) + 1518 1516 MXT_INFO_CHECKSUM_SIZE; 1519 1517 config_mem_size = data->mem_size - cfg_start_ofs; 1520 1518 config_mem = kzalloc(config_mem_size, GFP_KERNEL); ··· 1565 1563 return ret; 1566 1564 } 1567 1565 1568 - static int mxt_get_info(struct mxt_data *data) 1569 - { 1570 - struct i2c_client *client = data->client; 1571 - struct mxt_info *info = &data->info; 1572 - int error; 1573 - 1574 - /* Read 7-byte info block starting at address 0 */ 1575 - error = __mxt_read_reg(client, 0, sizeof(*info), info); 1576 - if (error) 1577 - return error; 1578 - 1579 - return 0; 1580 - } 1581 - 1582 1566 static void mxt_free_input_device(struct mxt_data *data) 1583 1567 { 1584 1568 if (data->input_dev) { ··· 1579 1591 video_unregister_device(&data->dbg.vdev); 1580 1592 v4l2_device_unregister(&data->dbg.v4l2); 1581 1593 #endif 1582 - 1583 - kfree(data->object_table); 1584 1594 data->object_table = NULL; 1595 + data->info = NULL; 1596 + kfree(data->raw_info_block); 1597 + data->raw_info_block = NULL; 1585 1598 kfree(data->msg_buf); 1586 1599 data->msg_buf = NULL; 1587 1600 data->T5_address = 0; ··· 1598 1609 data->max_reportid = 0; 1599 1610 } 1600 1611 1601 - static int mxt_get_object_table(struct mxt_data *data) 1612 + static int mxt_parse_object_table(struct mxt_data *data, 1613 + struct mxt_object *object_table) 1602 1614 { 1603 1615 struct i2c_client *client = data->client; 1604 - size_t table_size; 1605 - struct mxt_object *object_table; 1606 - int error; 1607 1616 int i; 1608 1617 u8 reportid; 1609 1618 u16 end_address; 1610 1619 1611 - table_size = data->info.object_num * sizeof(struct mxt_object); 1612 - object_table = kzalloc(table_size, GFP_KERNEL); 1613 - if (!object_table) { 1614 - dev_err(&data->client->dev, "Failed to allocate memory\n"); 1615 - return -ENOMEM; 1616 - } 1617 - 1618 - error = __mxt_read_reg(client, MXT_OBJECT_START, table_size, 1619 - object_table); 1620 - if (error) { 1621 - kfree(object_table); 1622 - return error; 1623 - } 1624 - 1625 1620 /* Valid Report IDs start counting from 1 */ 1626 1621 reportid = 1; 1627 1622 data->mem_size = 0; 1628 - for (i = 0; i < data->info.object_num; i++) { 1623 + for (i = 0; i < data->info->object_num; i++) { 1629 1624 struct mxt_object *object = object_table + i; 1630 1625 u8 min_id, max_id; 1631 1626 ··· 1633 1660 1634 1661 switch (object->type) { 1635 1662 case MXT_GEN_MESSAGE_T5: 1636 - if (data->info.family_id == 0x80 && 1637 - data->info.version < 0x20) { 1663 + if (data->info->family_id == 0x80 && 1664 + data->info->version < 0x20) { 1638 1665 /* 1639 1666 * On mXT224 firmware versions prior to V2.0 1640 1667 * read and discard unused CRC byte otherwise ··· 1689 1716 /* If T44 exists, T5 position has to be directly after */ 1690 1717 if (data->T44_address && (data->T5_address != data->T44_address + 1)) { 1691 1718 dev_err(&client->dev, "Invalid T44 position\n"); 1692 - error = -EINVAL; 1693 - goto free_object_table; 1719 + return -EINVAL; 1694 1720 } 1695 1721 1696 1722 data->msg_buf = kcalloc(data->max_reportid, 1697 1723 data->T5_msg_size, GFP_KERNEL); 1698 - if (!data->msg_buf) { 1699 - dev_err(&client->dev, "Failed to allocate message buffer\n"); 1724 + if (!data->msg_buf) 1725 + return -ENOMEM; 1726 + 1727 + return 0; 1728 + } 1729 + 1730 + static int mxt_read_info_block(struct mxt_data *data) 1731 + { 1732 + struct i2c_client *client = data->client; 1733 + int error; 1734 + size_t size; 1735 + void *id_buf, *buf; 1736 + uint8_t num_objects; 1737 + u32 calculated_crc; 1738 + u8 *crc_ptr; 1739 + 1740 + /* If info block already allocated, free it */ 1741 + if (data->raw_info_block) 1742 + mxt_free_object_table(data); 1743 + 1744 + /* Read 7-byte ID information block starting at address 0 */ 1745 + size = sizeof(struct mxt_info); 1746 + id_buf = kzalloc(size, GFP_KERNEL); 1747 + if (!id_buf) 1748 + return -ENOMEM; 1749 + 1750 + error = __mxt_read_reg(client, 0, size, id_buf); 1751 + if (error) 1752 + goto err_free_mem; 1753 + 1754 + /* Resize buffer to give space for rest of info block */ 1755 + num_objects = ((struct mxt_info *)id_buf)->object_num; 1756 + size += (num_objects * sizeof(struct mxt_object)) 1757 + + MXT_INFO_CHECKSUM_SIZE; 1758 + 1759 + buf = krealloc(id_buf, size, GFP_KERNEL); 1760 + if (!buf) { 1700 1761 error = -ENOMEM; 1701 - goto free_object_table; 1762 + goto err_free_mem; 1763 + } 1764 + id_buf = buf; 1765 + 1766 + /* Read rest of info block */ 1767 + error = __mxt_read_reg(client, MXT_OBJECT_START, 1768 + size - MXT_OBJECT_START, 1769 + id_buf + MXT_OBJECT_START); 1770 + if (error) 1771 + goto err_free_mem; 1772 + 1773 + /* Extract & calculate checksum */ 1774 + crc_ptr = id_buf + size - MXT_INFO_CHECKSUM_SIZE; 1775 + data->info_crc = crc_ptr[0] | (crc_ptr[1] << 8) | (crc_ptr[2] << 16); 1776 + 1777 + calculated_crc = mxt_calculate_crc(id_buf, 0, 1778 + size - MXT_INFO_CHECKSUM_SIZE); 1779 + 1780 + /* 1781 + * CRC mismatch can be caused by data corruption due to I2C comms 1782 + * issue or else device is not using Object Based Protocol (eg i2c-hid) 1783 + */ 1784 + if ((data->info_crc == 0) || (data->info_crc != calculated_crc)) { 1785 + dev_err(&client->dev, 1786 + "Info Block CRC error calculated=0x%06X read=0x%06X\n", 1787 + calculated_crc, data->info_crc); 1788 + error = -EIO; 1789 + goto err_free_mem; 1702 1790 } 1703 1791 1704 - data->object_table = object_table; 1792 + data->raw_info_block = id_buf; 1793 + data->info = (struct mxt_info *)id_buf; 1794 + 1795 + dev_info(&client->dev, 1796 + "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n", 1797 + data->info->family_id, data->info->variant_id, 1798 + data->info->version >> 4, data->info->version & 0xf, 1799 + data->info->build, data->info->object_num); 1800 + 1801 + /* Parse object table information */ 1802 + error = mxt_parse_object_table(data, id_buf + MXT_OBJECT_START); 1803 + if (error) { 1804 + dev_err(&client->dev, "Error %d parsing object table\n", error); 1805 + mxt_free_object_table(data); 1806 + goto err_free_mem; 1807 + } 1808 + 1809 + data->object_table = (struct mxt_object *)(id_buf + MXT_OBJECT_START); 1705 1810 1706 1811 return 0; 1707 1812 1708 - free_object_table: 1709 - mxt_free_object_table(data); 1813 + err_free_mem: 1814 + kfree(id_buf); 1710 1815 return error; 1711 1816 } 1712 1817 ··· 2097 2046 int error; 2098 2047 2099 2048 while (1) { 2100 - error = mxt_get_info(data); 2049 + error = mxt_read_info_block(data); 2101 2050 if (!error) 2102 2051 break; 2103 2052 ··· 2128 2077 msleep(MXT_FW_RESET_TIME); 2129 2078 } 2130 2079 2131 - /* Get object table information */ 2132 - error = mxt_get_object_table(data); 2133 - if (error) { 2134 - dev_err(&client->dev, "Error %d reading object table\n", error); 2135 - return error; 2136 - } 2137 - 2138 2080 error = mxt_acquire_irq(data); 2139 2081 if (error) 2140 - goto err_free_object_table; 2082 + return error; 2141 2083 2142 2084 error = request_firmware_nowait(THIS_MODULE, true, MXT_CFG_NAME, 2143 2085 &client->dev, GFP_KERNEL, data, ··· 2138 2094 if (error) { 2139 2095 dev_err(&client->dev, "Failed to invoke firmware loader: %d\n", 2140 2096 error); 2141 - goto err_free_object_table; 2097 + return error; 2142 2098 } 2143 2099 2144 2100 return 0; 2145 - 2146 - err_free_object_table: 2147 - mxt_free_object_table(data); 2148 - return error; 2149 2101 } 2150 2102 2151 2103 static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep) ··· 2202 2162 static u16 mxt_get_debug_value(struct mxt_data *data, unsigned int x, 2203 2163 unsigned int y) 2204 2164 { 2205 - struct mxt_info *info = &data->info; 2165 + struct mxt_info *info = data->info; 2206 2166 struct mxt_dbg *dbg = &data->dbg; 2207 2167 unsigned int ofs, page; 2208 2168 unsigned int col = 0; ··· 2530 2490 2531 2491 static void mxt_debug_init(struct mxt_data *data) 2532 2492 { 2533 - struct mxt_info *info = &data->info; 2493 + struct mxt_info *info = data->info; 2534 2494 struct mxt_dbg *dbg = &data->dbg; 2535 2495 struct mxt_object *object; 2536 2496 int error; ··· 2616 2576 const struct firmware *cfg) 2617 2577 { 2618 2578 struct device *dev = &data->client->dev; 2619 - struct mxt_info *info = &data->info; 2620 2579 int error; 2621 2580 2622 2581 error = mxt_init_t7_power_cfg(data); ··· 2640 2601 2641 2602 mxt_debug_init(data); 2642 2603 2643 - dev_info(dev, 2644 - "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n", 2645 - info->family_id, info->variant_id, info->version >> 4, 2646 - info->version & 0xf, info->build, info->object_num); 2647 - 2648 2604 return 0; 2649 2605 } 2650 2606 ··· 2648 2614 struct device_attribute *attr, char *buf) 2649 2615 { 2650 2616 struct mxt_data *data = dev_get_drvdata(dev); 2651 - struct mxt_info *info = &data->info; 2617 + struct mxt_info *info = data->info; 2652 2618 return scnprintf(buf, PAGE_SIZE, "%u.%u.%02X\n", 2653 2619 info->version >> 4, info->version & 0xf, info->build); 2654 2620 } ··· 2658 2624 struct device_attribute *attr, char *buf) 2659 2625 { 2660 2626 struct mxt_data *data = dev_get_drvdata(dev); 2661 - struct mxt_info *info = &data->info; 2627 + struct mxt_info *info = data->info; 2662 2628 return scnprintf(buf, PAGE_SIZE, "%u.%u\n", 2663 2629 info->family_id, info->variant_id); 2664 2630 } ··· 2697 2663 return -ENOMEM; 2698 2664 2699 2665 error = 0; 2700 - for (i = 0; i < data->info.object_num; i++) { 2666 + for (i = 0; i < data->info->object_num; i++) { 2701 2667 object = data->object_table + i; 2702 2668 2703 2669 if (!mxt_object_readable(object->type)) ··· 3069 3035 .driver_data = samus_platform_data, 3070 3036 }, 3071 3037 { 3038 + /* Samsung Chromebook Pro */ 3039 + .ident = "Samsung Chromebook Pro", 3040 + .matches = { 3041 + DMI_MATCH(DMI_SYS_VENDOR, "Google"), 3042 + DMI_MATCH(DMI_PRODUCT_NAME, "Caroline"), 3043 + }, 3044 + .driver_data = samus_platform_data, 3045 + }, 3046 + { 3072 3047 /* Other Google Chromebooks */ 3073 3048 .ident = "Chromebook", 3074 3049 .matches = { ··· 3297 3254 3298 3255 static const struct of_device_id mxt_of_match[] = { 3299 3256 { .compatible = "atmel,maxtouch", }, 3257 + /* Compatibles listed below are deprecated */ 3258 + { .compatible = "atmel,qt602240_ts", }, 3259 + { .compatible = "atmel,atmel_mxt_ts", }, 3260 + { .compatible = "atmel,atmel_mxt_tp", }, 3261 + { .compatible = "atmel,mXT224", }, 3300 3262 {}, 3301 3263 }; 3302 3264 MODULE_DEVICE_TABLE(of, mxt_of_match);
+1 -71
drivers/memory/emif-asm-offsets.c
··· 16 16 17 17 int main(void) 18 18 { 19 - DEFINE(EMIF_SDCFG_VAL_OFFSET, 20 - offsetof(struct emif_regs_amx3, emif_sdcfg_val)); 21 - DEFINE(EMIF_TIMING1_VAL_OFFSET, 22 - offsetof(struct emif_regs_amx3, emif_timing1_val)); 23 - DEFINE(EMIF_TIMING2_VAL_OFFSET, 24 - offsetof(struct emif_regs_amx3, emif_timing2_val)); 25 - DEFINE(EMIF_TIMING3_VAL_OFFSET, 26 - offsetof(struct emif_regs_amx3, emif_timing3_val)); 27 - DEFINE(EMIF_REF_CTRL_VAL_OFFSET, 28 - offsetof(struct emif_regs_amx3, emif_ref_ctrl_val)); 29 - DEFINE(EMIF_ZQCFG_VAL_OFFSET, 30 - offsetof(struct emif_regs_amx3, emif_zqcfg_val)); 31 - DEFINE(EMIF_PMCR_VAL_OFFSET, 32 - offsetof(struct emif_regs_amx3, emif_pmcr_val)); 33 - DEFINE(EMIF_PMCR_SHDW_VAL_OFFSET, 34 - offsetof(struct emif_regs_amx3, emif_pmcr_shdw_val)); 35 - DEFINE(EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET, 36 - offsetof(struct emif_regs_amx3, emif_rd_wr_level_ramp_ctrl)); 37 - DEFINE(EMIF_RD_WR_EXEC_THRESH_OFFSET, 38 - offsetof(struct emif_regs_amx3, emif_rd_wr_exec_thresh)); 39 - DEFINE(EMIF_COS_CONFIG_OFFSET, 40 - offsetof(struct emif_regs_amx3, emif_cos_config)); 41 - DEFINE(EMIF_PRIORITY_TO_COS_MAPPING_OFFSET, 42 - offsetof(struct emif_regs_amx3, emif_priority_to_cos_mapping)); 43 - DEFINE(EMIF_CONNECT_ID_SERV_1_MAP_OFFSET, 44 - offsetof(struct emif_regs_amx3, emif_connect_id_serv_1_map)); 45 - DEFINE(EMIF_CONNECT_ID_SERV_2_MAP_OFFSET, 46 - offsetof(struct emif_regs_amx3, emif_connect_id_serv_2_map)); 47 - DEFINE(EMIF_OCP_CONFIG_VAL_OFFSET, 48 - offsetof(struct emif_regs_amx3, emif_ocp_config_val)); 49 - DEFINE(EMIF_LPDDR2_NVM_TIM_OFFSET, 50 - offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim)); 51 - DEFINE(EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET, 52 - offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim_shdw)); 53 - DEFINE(EMIF_DLL_CALIB_CTRL_VAL_OFFSET, 54 - offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val)); 55 - DEFINE(EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET, 56 - offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val_shdw)); 57 - DEFINE(EMIF_DDR_PHY_CTLR_1_OFFSET, 58 - offsetof(struct emif_regs_amx3, emif_ddr_phy_ctlr_1)); 59 - DEFINE(EMIF_EXT_PHY_CTRL_VALS_OFFSET, 60 - offsetof(struct emif_regs_amx3, emif_ext_phy_ctrl_vals)); 61 - DEFINE(EMIF_REGS_AMX3_SIZE, sizeof(struct emif_regs_amx3)); 62 - 63 - BLANK(); 64 - 65 - DEFINE(EMIF_PM_BASE_ADDR_VIRT_OFFSET, 66 - offsetof(struct ti_emif_pm_data, ti_emif_base_addr_virt)); 67 - DEFINE(EMIF_PM_BASE_ADDR_PHYS_OFFSET, 68 - offsetof(struct ti_emif_pm_data, ti_emif_base_addr_phys)); 69 - DEFINE(EMIF_PM_CONFIG_OFFSET, 70 - offsetof(struct ti_emif_pm_data, ti_emif_sram_config)); 71 - DEFINE(EMIF_PM_REGS_VIRT_OFFSET, 72 - offsetof(struct ti_emif_pm_data, regs_virt)); 73 - DEFINE(EMIF_PM_REGS_PHYS_OFFSET, 74 - offsetof(struct ti_emif_pm_data, regs_phys)); 75 - DEFINE(EMIF_PM_DATA_SIZE, sizeof(struct ti_emif_pm_data)); 76 - 77 - BLANK(); 78 - 79 - DEFINE(EMIF_PM_SAVE_CONTEXT_OFFSET, 80 - offsetof(struct ti_emif_pm_functions, save_context)); 81 - DEFINE(EMIF_PM_RESTORE_CONTEXT_OFFSET, 82 - offsetof(struct ti_emif_pm_functions, restore_context)); 83 - DEFINE(EMIF_PM_ENTER_SR_OFFSET, 84 - offsetof(struct ti_emif_pm_functions, enter_sr)); 85 - DEFINE(EMIF_PM_EXIT_SR_OFFSET, 86 - offsetof(struct ti_emif_pm_functions, exit_sr)); 87 - DEFINE(EMIF_PM_ABORT_SR_OFFSET, 88 - offsetof(struct ti_emif_pm_functions, abort_sr)); 89 - DEFINE(EMIF_PM_FUNCTIONS_SIZE, sizeof(struct ti_emif_pm_functions)); 19 + ti_emif_asm_offsets(); 90 20 91 21 return 0; 92 22 }
+28 -5
drivers/mtd/chips/cfi_cmdset_0001.c
··· 45 45 #define I82802AB 0x00ad 46 46 #define I82802AC 0x00ac 47 47 #define PF38F4476 0x881c 48 + #define M28F00AP30 0x8963 48 49 /* STMicroelectronics chips */ 49 50 #define M50LPW080 0x002F 50 51 #define M50FLW080A 0x0080 ··· 374 373 if (cfi->mfr == CFI_MFR_INTEL && 375 374 cfi->id == PF38F4476 && extp->MinorVersion == '3') 376 375 extp->MinorVersion = '1'; 376 + } 377 + 378 + static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip) 379 + { 380 + /* 381 + * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t 382 + * Erase Supend for their small Erase Blocks(0x8000) 383 + */ 384 + if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30) 385 + return 1; 386 + return 0; 377 387 } 378 388 379 389 static inline struct cfi_pri_intelext * ··· 843 831 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1)))) 844 832 goto sleep; 845 833 834 + /* Do not allow suspend iff read/write to EB address */ 835 + if ((adr & chip->in_progress_block_mask) == 836 + chip->in_progress_block_addr) 837 + goto sleep; 838 + 839 + /* do not suspend small EBs, buggy Micron Chips */ 840 + if (cfi_is_micron_28F00AP30(cfi, chip) && 841 + (chip->in_progress_block_mask == ~(0x8000-1))) 842 + goto sleep; 846 843 847 844 /* Erase suspend */ 848 - map_write(map, CMD(0xB0), adr); 845 + map_write(map, CMD(0xB0), chip->in_progress_block_addr); 849 846 850 847 /* If the flash has finished erasing, then 'erase suspend' 851 848 * appears to make some (28F320) flash devices switch to 852 849 * 'read' mode. Make sure that we switch to 'read status' 853 850 * mode so we get the right data. --rmk 854 851 */ 855 - map_write(map, CMD(0x70), adr); 852 + map_write(map, CMD(0x70), chip->in_progress_block_addr); 856 853 chip->oldstate = FL_ERASING; 857 854 chip->state = FL_ERASE_SUSPENDING; 858 855 chip->erase_suspended = 1; 859 856 for (;;) { 860 - status = map_read(map, adr); 857 + status = map_read(map, chip->in_progress_block_addr); 861 858 if (map_word_andequal(map, status, status_OK, status_OK)) 862 859 break; 863 860 ··· 1062 1041 sending the 0x70 (Read Status) command to an erasing 1063 1042 chip and expecting it to be ignored, that's what we 1064 1043 do. */ 1065 - map_write(map, CMD(0xd0), adr); 1066 - map_write(map, CMD(0x70), adr); 1044 + map_write(map, CMD(0xd0), chip->in_progress_block_addr); 1045 + map_write(map, CMD(0x70), chip->in_progress_block_addr); 1067 1046 chip->oldstate = FL_READY; 1068 1047 chip->state = FL_ERASING; 1069 1048 break; ··· 1954 1933 map_write(map, CMD(0xD0), adr); 1955 1934 chip->state = FL_ERASING; 1956 1935 chip->erase_suspended = 0; 1936 + chip->in_progress_block_addr = adr; 1937 + chip->in_progress_block_mask = ~(len - 1); 1957 1938 1958 1939 ret = INVAL_CACHE_AND_WAIT(map, chip, adr, 1959 1940 adr, len,
+6 -3
drivers/mtd/chips/cfi_cmdset_0002.c
··· 816 816 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) 817 817 goto sleep; 818 818 819 - /* We could check to see if we're trying to access the sector 820 - * that is currently being erased. However, no user will try 821 - * anything like that so we just wait for the timeout. */ 819 + /* Do not allow suspend iff read/write to EB address */ 820 + if ((adr & chip->in_progress_block_mask) == 821 + chip->in_progress_block_addr) 822 + goto sleep; 822 823 823 824 /* Erase suspend */ 824 825 /* It's harmless to issue the Erase-Suspend and Erase-Resume ··· 2268 2267 chip->state = FL_ERASING; 2269 2268 chip->erase_suspended = 0; 2270 2269 chip->in_progress_block_addr = adr; 2270 + chip->in_progress_block_mask = ~(map->size - 1); 2271 2271 2272 2272 INVALIDATE_CACHE_UDELAY(map, chip, 2273 2273 adr, map->size, ··· 2358 2356 chip->state = FL_ERASING; 2359 2357 chip->erase_suspended = 0; 2360 2358 chip->in_progress_block_addr = adr; 2359 + chip->in_progress_block_mask = ~(len - 1); 2361 2360 2362 2361 INVALIDATE_CACHE_UDELAY(map, chip, 2363 2362 adr, len,
-3
drivers/mtd/nand/core.c
··· 162 162 ret = nanddev_erase(nand, &pos); 163 163 if (ret) { 164 164 einfo->fail_addr = nanddev_pos_to_offs(nand, &pos); 165 - einfo->state = MTD_ERASE_FAILED; 166 165 167 166 return ret; 168 167 } 169 168 170 169 nanddev_pos_next_eraseblock(nand, &pos); 171 170 } 172 - 173 - einfo->state = MTD_ERASE_DONE; 174 171 175 172 return 0; 176 173 }
+8 -17
drivers/mtd/nand/raw/marvell_nand.c
··· 2299 2299 /* 2300 2300 * The legacy "num-cs" property indicates the number of CS on the only 2301 2301 * chip connected to the controller (legacy bindings does not support 2302 - * more than one chip). CS are only incremented one by one while the RB 2303 - * pin is always the #0. 2302 + * more than one chip). The CS and RB pins are always the #0. 2304 2303 * 2305 2304 * When not using legacy bindings, a couple of "reg" and "nand-rb" 2306 2305 * properties must be filled. For each chip, expressed as a subnode, 2307 2306 * "reg" points to the CS lines and "nand-rb" to the RB line. 2308 2307 */ 2309 - if (pdata) { 2308 + if (pdata || nfc->caps->legacy_of_bindings) { 2310 2309 nsels = 1; 2311 - } else if (nfc->caps->legacy_of_bindings && 2312 - !of_get_property(np, "num-cs", &nsels)) { 2313 - dev_err(dev, "missing num-cs property\n"); 2314 - return -EINVAL; 2315 - } else if (!of_get_property(np, "reg", &nsels)) { 2316 - dev_err(dev, "missing reg property\n"); 2317 - return -EINVAL; 2318 - } 2319 - 2320 - if (!pdata) 2321 - nsels /= sizeof(u32); 2322 - if (!nsels) { 2323 - dev_err(dev, "invalid reg property size\n"); 2324 - return -EINVAL; 2310 + } else { 2311 + nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32)); 2312 + if (nsels <= 0) { 2313 + dev_err(dev, "missing/invalid reg property\n"); 2314 + return -EINVAL; 2315 + } 2325 2316 } 2326 2317 2327 2318 /* Alloc the nand chip structure */
+1 -1
drivers/mtd/nand/raw/tango_nand.c
··· 645 645 646 646 writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE); 647 647 648 - clk = clk_get(&pdev->dev, NULL); 648 + clk = devm_clk_get(&pdev->dev, NULL); 649 649 if (IS_ERR(clk)) 650 650 return PTR_ERR(clk); 651 651
+17 -2
drivers/mtd/spi-nor/cadence-quadspi.c
··· 501 501 void __iomem *reg_base = cqspi->iobase; 502 502 void __iomem *ahb_base = cqspi->ahb_base; 503 503 unsigned int remaining = n_rx; 504 + unsigned int mod_bytes = n_rx % 4; 504 505 unsigned int bytes_to_read = 0; 506 + u8 *rxbuf_end = rxbuf + n_rx; 505 507 int ret = 0; 506 508 507 509 writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR); ··· 532 530 } 533 531 534 532 while (bytes_to_read != 0) { 533 + unsigned int word_remain = round_down(remaining, 4); 534 + 535 535 bytes_to_read *= cqspi->fifo_width; 536 536 bytes_to_read = bytes_to_read > remaining ? 537 537 remaining : bytes_to_read; 538 - ioread32_rep(ahb_base, rxbuf, 539 - DIV_ROUND_UP(bytes_to_read, 4)); 538 + bytes_to_read = round_down(bytes_to_read, 4); 539 + /* Read 4 byte word chunks then single bytes */ 540 + if (bytes_to_read) { 541 + ioread32_rep(ahb_base, rxbuf, 542 + (bytes_to_read / 4)); 543 + } else if (!word_remain && mod_bytes) { 544 + unsigned int temp = ioread32(ahb_base); 545 + 546 + bytes_to_read = mod_bytes; 547 + memcpy(rxbuf, &temp, min((unsigned int) 548 + (rxbuf_end - rxbuf), 549 + bytes_to_read)); 550 + } 540 551 rxbuf += bytes_to_read; 541 552 remaining -= bytes_to_read; 542 553 bytes_to_read = cqspi_get_rd_sram_level(cqspi);
+13 -5
drivers/net/ethernet/broadcom/bcmsysport.c
··· 2144 2144 .ndo_select_queue = bcm_sysport_select_queue, 2145 2145 }; 2146 2146 2147 - static int bcm_sysport_map_queues(struct net_device *dev, 2147 + static int bcm_sysport_map_queues(struct notifier_block *nb, 2148 2148 struct dsa_notifier_register_info *info) 2149 2149 { 2150 - struct bcm_sysport_priv *priv = netdev_priv(dev); 2151 2150 struct bcm_sysport_tx_ring *ring; 2151 + struct bcm_sysport_priv *priv; 2152 2152 struct net_device *slave_dev; 2153 2153 unsigned int num_tx_queues; 2154 2154 unsigned int q, start, port; 2155 + struct net_device *dev; 2156 + 2157 + priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier); 2158 + if (priv->netdev != info->master) 2159 + return 0; 2160 + 2161 + dev = info->master; 2155 2162 2156 2163 /* We can't be setting up queue inspection for non directly attached 2157 2164 * switches ··· 2181 2174 if (priv->is_lite) 2182 2175 netif_set_real_num_tx_queues(slave_dev, 2183 2176 slave_dev->num_tx_queues / 2); 2177 + 2184 2178 num_tx_queues = slave_dev->real_num_tx_queues; 2185 2179 2186 2180 if (priv->per_port_num_tx_queues && 2187 2181 priv->per_port_num_tx_queues != num_tx_queues) 2188 - netdev_warn(slave_dev, "asymetric number of per-port queues\n"); 2182 + netdev_warn(slave_dev, "asymmetric number of per-port queues\n"); 2189 2183 2190 2184 priv->per_port_num_tx_queues = num_tx_queues; 2191 2185 ··· 2209 2201 return 0; 2210 2202 } 2211 2203 2212 - static int bcm_sysport_dsa_notifier(struct notifier_block *unused, 2204 + static int bcm_sysport_dsa_notifier(struct notifier_block *nb, 2213 2205 unsigned long event, void *ptr) 2214 2206 { 2215 2207 struct dsa_notifier_register_info *info; ··· 2219 2211 2220 2212 info = ptr; 2221 2213 2222 - return notifier_from_errno(bcm_sysport_map_queues(info->master, info)); 2214 + return notifier_from_errno(bcm_sysport_map_queues(nb, info)); 2223 2215 } 2224 2216 2225 2217 #define REV_FMT "v%2x.%02x"
+1 -1
drivers/net/ethernet/freescale/ucc_geth_ethtool.c
··· 61 61 static const char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { 62 62 "tx-single-collision", 63 63 "tx-multiple-collision", 64 - "tx-late-collsion", 64 + "tx-late-collision", 65 65 "tx-aborted-frames", 66 66 "tx-lost-frames", 67 67 "tx-carrier-sense-errors",
+23 -7
drivers/net/ethernet/marvell/mvpp2.c
··· 942 942 struct clk *pp_clk; 943 943 struct clk *gop_clk; 944 944 struct clk *mg_clk; 945 + struct clk *mg_core_clk; 945 946 struct clk *axi_clk; 946 947 947 948 /* List of pointers to port structures */ ··· 8769 8768 err = clk_prepare_enable(priv->mg_clk); 8770 8769 if (err < 0) 8771 8770 goto err_gop_clk; 8771 + 8772 + priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk"); 8773 + if (IS_ERR(priv->mg_core_clk)) { 8774 + priv->mg_core_clk = NULL; 8775 + } else { 8776 + err = clk_prepare_enable(priv->mg_core_clk); 8777 + if (err < 0) 8778 + goto err_mg_clk; 8779 + } 8772 8780 } 8773 8781 8774 8782 priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk"); 8775 8783 if (IS_ERR(priv->axi_clk)) { 8776 8784 err = PTR_ERR(priv->axi_clk); 8777 8785 if (err == -EPROBE_DEFER) 8778 - goto err_gop_clk; 8786 + goto err_mg_core_clk; 8779 8787 priv->axi_clk = NULL; 8780 8788 } else { 8781 8789 err = clk_prepare_enable(priv->axi_clk); 8782 8790 if (err < 0) 8783 - goto err_gop_clk; 8791 + goto err_mg_core_clk; 8784 8792 } 8785 8793 8786 8794 /* Get system's tclk rate */ ··· 8803 8793 if (priv->hw_version == MVPP22) { 8804 8794 err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK); 8805 8795 if (err) 8806 - goto err_mg_clk; 8796 + goto err_axi_clk; 8807 8797 /* Sadly, the BM pools all share the same register to 8808 8798 * store the high 32 bits of their address. So they 8809 8799 * must all have the same high 32 bits, which forces ··· 8811 8801 */ 8812 8802 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 8813 8803 if (err) 8814 - goto err_mg_clk; 8804 + goto err_axi_clk; 8815 8805 } 8816 8806 8817 8807 /* Initialize network controller */ 8818 8808 err = mvpp2_init(pdev, priv); 8819 8809 if (err < 0) { 8820 8810 dev_err(&pdev->dev, "failed to initialize controller\n"); 8821 - goto err_mg_clk; 8811 + goto err_axi_clk; 8822 8812 } 8823 8813 8824 8814 /* Initialize ports */ ··· 8831 8821 if (priv->port_count == 0) { 8832 8822 dev_err(&pdev->dev, "no ports enabled\n"); 8833 8823 err = -ENODEV; 8834 - goto err_mg_clk; 8824 + goto err_axi_clk; 8835 8825 } 8836 8826 8837 8827 /* Statistics must be gathered regularly because some of them (like ··· 8859 8849 mvpp2_port_remove(priv->port_list[i]); 8860 8850 i++; 8861 8851 } 8862 - err_mg_clk: 8852 + err_axi_clk: 8863 8853 clk_disable_unprepare(priv->axi_clk); 8854 + 8855 + err_mg_core_clk: 8856 + if (priv->hw_version == MVPP22) 8857 + clk_disable_unprepare(priv->mg_core_clk); 8858 + err_mg_clk: 8864 8859 if (priv->hw_version == MVPP22) 8865 8860 clk_disable_unprepare(priv->mg_clk); 8866 8861 err_gop_clk: ··· 8912 8897 return 0; 8913 8898 8914 8899 clk_disable_unprepare(priv->axi_clk); 8900 + clk_disable_unprepare(priv->mg_core_clk); 8915 8901 clk_disable_unprepare(priv->mg_clk); 8916 8902 clk_disable_unprepare(priv->pp_clk); 8917 8903 clk_disable_unprepare(priv->gop_clk);
+1 -1
drivers/net/ethernet/mellanox/mlx4/main.c
··· 1317 1317 1318 1318 ret = mlx4_unbond_fs_rules(dev); 1319 1319 if (ret) 1320 - mlx4_warn(dev, "multifunction unbond for flow rules failedi (%d)\n", ret); 1320 + mlx4_warn(dev, "multifunction unbond for flow rules failed (%d)\n", ret); 1321 1321 ret1 = mlx4_unbond_mac_table(dev); 1322 1322 if (ret1) { 1323 1323 mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1);
+5 -3
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
··· 1007 1007 1008 1008 mutex_lock(&priv->state_lock); 1009 1009 1010 - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) 1011 - goto out; 1012 - 1013 1010 new_channels.params = priv->channels.params; 1014 1011 mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params); 1012 + 1013 + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { 1014 + priv->channels.params = new_channels.params; 1015 + goto out; 1016 + } 1015 1017 1016 1018 /* Skip if tx_min_inline is the same */ 1017 1019 if (new_channels.params.tx_min_inline_mode ==
+3 -2
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 877 877 }; 878 878 879 879 static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, 880 - struct mlx5e_params *params) 880 + struct mlx5e_params *params, u16 mtu) 881 881 { 882 882 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 883 883 MLX5_CQ_PERIOD_MODE_START_FROM_CQE : 884 884 MLX5_CQ_PERIOD_MODE_START_FROM_EQE; 885 885 886 886 params->hard_mtu = MLX5E_ETH_HARD_MTU; 887 + params->sw_mtu = mtu; 887 888 params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE; 888 889 params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; 889 890 params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE; ··· 932 931 933 932 priv->channels.params.num_channels = profile->max_nch(mdev); 934 933 935 - mlx5e_build_rep_params(mdev, &priv->channels.params); 934 + mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu); 936 935 mlx5e_build_rep_netdev(netdev); 937 936 938 937 mlx5e_timestamp_init(priv);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
··· 290 290 291 291 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { 292 292 netdev_err(priv->netdev, 293 - "\tCan't perform loobpack test while device is down\n"); 293 + "\tCan't perform loopback test while device is down\n"); 294 294 return -ENODEV; 295 295 } 296 296
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 1864 1864 } 1865 1865 1866 1866 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); 1867 - if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { 1867 + if (modify_ip_header && ip_proto != IPPROTO_TCP && 1868 + ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) { 1868 1869 pr_info("can't offload re-write of ip proto %d\n", ip_proto); 1869 1870 return false; 1870 1871 }
+10 -10
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
··· 255 255 dma_addr = dma_map_single(sq->pdev, skb_data, headlen, 256 256 DMA_TO_DEVICE); 257 257 if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) 258 - return -ENOMEM; 258 + goto dma_unmap_wqe_err; 259 259 260 260 dseg->addr = cpu_to_be64(dma_addr); 261 261 dseg->lkey = sq->mkey_be; ··· 273 273 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, 274 274 DMA_TO_DEVICE); 275 275 if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) 276 - return -ENOMEM; 276 + goto dma_unmap_wqe_err; 277 277 278 278 dseg->addr = cpu_to_be64(dma_addr); 279 279 dseg->lkey = sq->mkey_be; ··· 285 285 } 286 286 287 287 return num_dma; 288 + 289 + dma_unmap_wqe_err: 290 + mlx5e_dma_unmap_wqe_err(sq, num_dma); 291 + return -ENOMEM; 288 292 } 289 293 290 294 static inline void ··· 384 380 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, 385 381 (struct mlx5_wqe_data_seg *)cseg + ds_cnt); 386 382 if (unlikely(num_dma < 0)) 387 - goto dma_unmap_wqe_err; 383 + goto err_drop; 388 384 389 385 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, 390 386 num_bytes, num_dma, wi, cseg); 391 387 392 388 return NETDEV_TX_OK; 393 389 394 - dma_unmap_wqe_err: 390 + err_drop: 395 391 sq->stats.dropped++; 396 - mlx5e_dma_unmap_wqe_err(sq, wi->num_dma); 397 - 398 392 dev_kfree_skb_any(skb); 399 393 400 394 return NETDEV_TX_OK; ··· 647 645 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, 648 646 (struct mlx5_wqe_data_seg *)cseg + ds_cnt); 649 647 if (unlikely(num_dma < 0)) 650 - goto dma_unmap_wqe_err; 648 + goto err_drop; 651 649 652 650 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, 653 651 num_bytes, num_dma, wi, cseg); 654 652 655 653 return NETDEV_TX_OK; 656 654 657 - dma_unmap_wqe_err: 655 + err_drop: 658 656 sq->stats.dropped++; 659 - mlx5e_dma_unmap_wqe_err(sq, wi->num_dma); 660 - 661 657 dev_kfree_skb_any(skb); 662 658 663 659 return NETDEV_TX_OK;
+16 -10
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 187 187 static void del_sw_hw_rule(struct fs_node *node); 188 188 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, 189 189 struct mlx5_flow_destination *d2); 190 + static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns); 190 191 static struct mlx5_flow_rule * 191 192 find_flow_rule(struct fs_fte *fte, 192 193 struct mlx5_flow_destination *dest); ··· 482 481 483 482 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER && 484 483 --fte->dests_size) { 485 - modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION); 484 + modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) | 485 + BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS); 486 486 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT; 487 487 update_fte = true; 488 488 goto out; ··· 2353 2351 2354 2352 static int init_root_ns(struct mlx5_flow_steering *steering) 2355 2353 { 2354 + int err; 2355 + 2356 2356 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX); 2357 2357 if (!steering->root_ns) 2358 - goto cleanup; 2358 + return -ENOMEM; 2359 2359 2360 - if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node)) 2361 - goto cleanup; 2360 + err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node); 2361 + if (err) 2362 + goto out_err; 2362 2363 2363 2364 set_prio_attrs(steering->root_ns); 2364 - 2365 - if (create_anchor_flow_table(steering)) 2366 - goto cleanup; 2365 + err = create_anchor_flow_table(steering); 2366 + if (err) 2367 + goto out_err; 2367 2368 2368 2369 return 0; 2369 2370 2370 - cleanup: 2371 - mlx5_cleanup_fs(steering->dev); 2372 - return -ENOMEM; 2371 + out_err: 2372 + cleanup_root_ns(steering->root_ns); 2373 + steering->root_ns = NULL; 2374 + return err; 2373 2375 } 2374 2376 2375 2377 static void clean_tree(struct fs_node *node)
+5 -7
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
··· 1718 1718 struct net_device *dev = mlxsw_sp_port->dev; 1719 1719 int err; 1720 1720 1721 - if (bridge_port->bridge_device->multicast_enabled) { 1722 - if (bridge_port->bridge_device->multicast_enabled) { 1723 - err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, 1724 - false); 1725 - if (err) 1726 - netdev_err(dev, "Unable to remove port from SMID\n"); 1727 - } 1721 + if (bridge_port->bridge_device->multicast_enabled && 1722 + !bridge_port->mrouter) { 1723 + err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false); 1724 + if (err) 1725 + netdev_err(dev, "Unable to remove port from SMID\n"); 1728 1726 } 1729 1727 1730 1728 err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
+8 -2
drivers/net/ethernet/netronome/nfp/flower/action.c
··· 183 183 nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun, 184 184 const struct tc_action *action, 185 185 struct nfp_fl_pre_tunnel *pre_tun, 186 - enum nfp_flower_tun_type tun_type) 186 + enum nfp_flower_tun_type tun_type, 187 + struct net_device *netdev) 187 188 { 188 189 size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun); 189 190 struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action); 190 191 u32 tmp_set_ip_tun_type_index = 0; 191 192 /* Currently support one pre-tunnel so index is always 0. */ 192 193 int pretun_idx = 0; 194 + struct net *net; 193 195 194 196 if (ip_tun->options_len) 195 197 return -EOPNOTSUPP; 198 + 199 + net = dev_net(netdev); 196 200 197 201 set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL; 198 202 set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ; ··· 208 204 209 205 set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index); 210 206 set_tun->tun_id = ip_tun->key.tun_id; 207 + set_tun->ttl = net->ipv4.sysctl_ip_default_ttl; 211 208 212 209 /* Complete pre_tunnel action. */ 213 210 pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst; ··· 516 511 *a_len += sizeof(struct nfp_fl_pre_tunnel); 517 512 518 513 set_tun = (void *)&nfp_fl->action_data[*a_len]; 519 - err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type); 514 + err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type, 515 + netdev); 520 516 if (err) 521 517 return err; 522 518 *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
+4 -1
drivers/net/ethernet/netronome/nfp/flower/cmsg.h
··· 190 190 __be16 reserved; 191 191 __be64 tun_id __packed; 192 192 __be32 tun_type_index; 193 - __be32 extra[3]; 193 + __be16 reserved2; 194 + u8 ttl; 195 + u8 reserved3; 196 + __be32 extra[2]; 194 197 }; 195 198 196 199 /* Metadata with L2 (1W/4B)
+1 -1
drivers/net/ethernet/netronome/nfp/flower/main.c
··· 360 360 } 361 361 362 362 SET_NETDEV_DEV(repr, &priv->nn->pdev->dev); 363 - nfp_net_get_mac_addr(app->pf, port); 363 + nfp_net_get_mac_addr(app->pf, repr, port); 364 364 365 365 cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port); 366 366 err = nfp_repr_init(app, repr,
+1 -1
drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
··· 69 69 if (err) 70 70 return err < 0 ? err : 0; 71 71 72 - nfp_net_get_mac_addr(app->pf, nn->port); 72 + nfp_net_get_mac_addr(app->pf, nn->dp.netdev, nn->port); 73 73 74 74 return 0; 75 75 }
+3 -1
drivers/net/ethernet/netronome/nfp/nfp_main.h
··· 171 171 int nfp_hwmon_register(struct nfp_pf *pf); 172 172 void nfp_hwmon_unregister(struct nfp_pf *pf); 173 173 174 - void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port); 174 + void 175 + nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev, 176 + struct nfp_port *port); 175 177 176 178 bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb); 177 179
+18 -13
drivers/net/ethernet/netronome/nfp/nfp_net_main.c
··· 67 67 /** 68 68 * nfp_net_get_mac_addr() - Get the MAC address. 69 69 * @pf: NFP PF handle 70 + * @netdev: net_device to set MAC address on 70 71 * @port: NFP port structure 71 72 * 72 73 * First try to get the MAC address from NSP ETH table. If that 73 74 * fails generate a random address. 74 75 */ 75 - void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port) 76 + void 77 + nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev, 78 + struct nfp_port *port) 76 79 { 77 80 struct nfp_eth_table_port *eth_port; 78 81 79 82 eth_port = __nfp_port_get_eth_port(port); 80 83 if (!eth_port) { 81 - eth_hw_addr_random(port->netdev); 84 + eth_hw_addr_random(netdev); 82 85 return; 83 86 } 84 87 85 - ether_addr_copy(port->netdev->dev_addr, eth_port->mac_addr); 86 - ether_addr_copy(port->netdev->perm_addr, eth_port->mac_addr); 88 + ether_addr_copy(netdev->dev_addr, eth_port->mac_addr); 89 + ether_addr_copy(netdev->perm_addr, eth_port->mac_addr); 87 90 } 88 91 89 92 static struct nfp_eth_table_port * ··· 514 511 return PTR_ERR(mem); 515 512 } 516 513 517 - min_size = NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1); 518 - pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats", 519 - "net.macstats", min_size, 520 - &pf->mac_stats_bar); 521 - if (IS_ERR(pf->mac_stats_mem)) { 522 - if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) { 523 - err = PTR_ERR(pf->mac_stats_mem); 524 - goto err_unmap_ctrl; 514 + if (pf->eth_tbl) { 515 + min_size = NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1); 516 + pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats", 517 + "net.macstats", min_size, 518 + &pf->mac_stats_bar); 519 + if (IS_ERR(pf->mac_stats_mem)) { 520 + if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) { 521 + err = PTR_ERR(pf->mac_stats_mem); 522 + goto err_unmap_ctrl; 523 + } 524 + pf->mac_stats_mem = NULL; 525 525 } 526 - pf->mac_stats_mem = NULL; 527 526 } 528 527 529 528 pf->vf_cfg_mem = nfp_net_pf_map_rtsym(pf, "net.vfcfg",
+1 -1
drivers/net/ethernet/qlogic/qed/qed_ll2.c
··· 2370 2370 u8 flags = 0; 2371 2371 2372 2372 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) { 2373 - DP_INFO(cdev, "Cannot transmit a checksumed packet\n"); 2373 + DP_INFO(cdev, "Cannot transmit a checksummed packet\n"); 2374 2374 return -EINVAL; 2375 2375 } 2376 2376
+1 -1
drivers/net/ethernet/qlogic/qed/qed_roce.c
··· 848 848 849 849 if (!(qp->resp_offloaded)) { 850 850 DP_NOTICE(p_hwfn, 851 - "The responder's qp should be offloded before requester's\n"); 851 + "The responder's qp should be offloaded before requester's\n"); 852 852 return -EINVAL; 853 853 } 854 854
+1 -1
drivers/net/ethernet/realtek/8139too.c
··· 2224 2224 struct rtl8139_private *tp = netdev_priv(dev); 2225 2225 const int irq = tp->pci_dev->irq; 2226 2226 2227 - disable_irq(irq); 2227 + disable_irq_nosync(irq); 2228 2228 rtl8139_interrupt(irq, dev); 2229 2229 enable_irq(irq); 2230 2230 }
+3 -2
drivers/net/ethernet/sfc/ef10.c
··· 4784 4784 * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that 4785 4785 * the rule is not removed by efx_rps_hash_del() below. 4786 4786 */ 4787 - ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority, 4788 - filter_idx, true) == 0; 4787 + if (ret) 4788 + ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority, 4789 + filter_idx, true) == 0; 4789 4790 /* While we can't safely dereference rule (we dropped the lock), we can 4790 4791 * still test it for NULL. 4791 4792 */
+2
drivers/net/ethernet/sfc/rx.c
··· 839 839 int rc; 840 840 841 841 rc = efx->type->filter_insert(efx, &req->spec, true); 842 + if (rc >= 0) 843 + rc %= efx->type->max_rx_ip_filters; 842 844 if (efx->rps_hash_table) { 843 845 spin_lock_bh(&efx->rps_hash_lock); 844 846 rule = efx_rps_hash_find(efx, &req->spec);
+2
drivers/net/ethernet/ti/cpsw.c
··· 1340 1340 cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, 1341 1341 HOST_PORT_NUM, ALE_VLAN | 1342 1342 ALE_SECURE, slave->port_vlan); 1343 + cpsw_ale_control_set(cpsw->ale, slave_port, 1344 + ALE_PORT_DROP_UNKNOWN_VLAN, 1); 1343 1345 } 1344 1346 1345 1347 static void soft_reset_slave(struct cpsw_slave *slave)
+10 -1
drivers/net/phy/phy_device.c
··· 535 535 536 536 /* Grab the bits from PHYIR1, and put them in the upper half */ 537 537 phy_reg = mdiobus_read(bus, addr, MII_PHYSID1); 538 - if (phy_reg < 0) 538 + if (phy_reg < 0) { 539 + /* if there is no device, return without an error so scanning 540 + * the bus works properly 541 + */ 542 + if (phy_reg == -EIO || phy_reg == -ENODEV) { 543 + *phy_id = 0xffffffff; 544 + return 0; 545 + } 546 + 539 547 return -EIO; 548 + } 540 549 541 550 *phy_id = (phy_reg & 0xffff) << 16; 542 551
+13
drivers/net/usb/qmi_wwan.c
··· 1098 1098 {QMI_FIXED_INTF(0x05c6, 0x9080, 8)}, 1099 1099 {QMI_FIXED_INTF(0x05c6, 0x9083, 3)}, 1100 1100 {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, 1101 + {QMI_FIXED_INTF(0x05c6, 0x90b2, 3)}, /* ublox R410M */ 1101 1102 {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, 1102 1103 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, 1103 1104 {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */ ··· 1342 1341 if (!id->driver_info) { 1343 1342 dev_dbg(&intf->dev, "setting defaults for dynamic device id\n"); 1344 1343 id->driver_info = (unsigned long)&qmi_wwan_info; 1344 + } 1345 + 1346 + /* There are devices where the same interface number can be 1347 + * configured as different functions. We should only bind to 1348 + * vendor specific functions when matching on interface number 1349 + */ 1350 + if (id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER && 1351 + desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) { 1352 + dev_dbg(&intf->dev, 1353 + "Rejecting interface number match for class %02x\n", 1354 + desc->bInterfaceClass); 1355 + return -ENODEV; 1345 1356 } 1346 1357 1347 1358 /* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */
+20 -16
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
··· 459 459 kfree(req); 460 460 } 461 461 462 - static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) 462 + static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) 463 463 { 464 464 struct brcmf_fw *fwctx = ctx; 465 465 struct brcmf_fw_item *cur; ··· 498 498 brcmf_dbg(TRACE, "nvram %p len %d\n", nvram, nvram_length); 499 499 cur->nv_data.data = nvram; 500 500 cur->nv_data.len = nvram_length; 501 - return; 501 + return 0; 502 502 503 503 fail: 504 - brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); 505 - fwctx->done(fwctx->dev, -ENOENT, NULL); 506 - brcmf_fw_free_request(fwctx->req); 507 - kfree(fwctx); 504 + return -ENOENT; 508 505 } 509 506 510 507 static int brcmf_fw_request_next_item(struct brcmf_fw *fwctx, bool async) ··· 550 553 brcmf_dbg(TRACE, "enter: firmware %s %sfound\n", cur->path, 551 554 fw ? "" : "not "); 552 555 553 - if (fw) { 554 - if (cur->type == BRCMF_FW_TYPE_BINARY) 555 - cur->binary = fw; 556 - else if (cur->type == BRCMF_FW_TYPE_NVRAM) 557 - brcmf_fw_request_nvram_done(fw, fwctx); 558 - else 559 - release_firmware(fw); 560 - } else if (cur->type == BRCMF_FW_TYPE_NVRAM) { 561 - brcmf_fw_request_nvram_done(NULL, fwctx); 562 - } else if (!(cur->flags & BRCMF_FW_REQF_OPTIONAL)) { 556 + if (!fw) 563 557 ret = -ENOENT; 558 + 559 + switch (cur->type) { 560 + case BRCMF_FW_TYPE_NVRAM: 561 + ret = brcmf_fw_request_nvram_done(fw, fwctx); 562 + break; 563 + case BRCMF_FW_TYPE_BINARY: 564 + cur->binary = fw; 565 + break; 566 + default: 567 + /* something fishy here so bail out early */ 568 + brcmf_err("unknown fw type: %d\n", cur->type); 569 + release_firmware(fw); 570 + ret = -EINVAL; 564 571 goto fail; 565 572 } 573 + 574 + if (ret < 0 && !(cur->flags & BRCMF_FW_REQF_OPTIONAL)) 575 + goto fail; 566 576 567 577 do { 568 578 if (++fwctx->curpos == fwctx->req->n_items) {
+5 -8
drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
··· 8 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 + * Copyright(c) 2018 Intel Corporation 11 12 * 12 13 * This program is free software; you can redistribute it and/or modify 13 14 * it under the terms of version 2 of the GNU General Public License as ··· 31 30 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 32 31 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 33 32 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 34 - * Copyright(c) 2018 Intel Corporation 33 + * Copyright(c) 2018 Intel Corporation 35 34 * All rights reserved. 36 35 * 37 36 * Redistribution and use in source and binary forms, with or without ··· 750 749 } __packed; 751 750 752 751 #define IWL_SCAN_REQ_UMAC_SIZE_V8 sizeof(struct iwl_scan_req_umac) 753 - #define IWL_SCAN_REQ_UMAC_SIZE_V7 (sizeof(struct iwl_scan_req_umac) - \ 754 - 4 * sizeof(u8)) 755 - #define IWL_SCAN_REQ_UMAC_SIZE_V6 (sizeof(struct iwl_scan_req_umac) - \ 756 - 2 * sizeof(u8) - sizeof(__le16)) 757 - #define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \ 758 - 2 * sizeof(__le32) - 2 * sizeof(u8) - \ 759 - sizeof(__le16)) 752 + #define IWL_SCAN_REQ_UMAC_SIZE_V7 48 753 + #define IWL_SCAN_REQ_UMAC_SIZE_V6 44 754 + #define IWL_SCAN_REQ_UMAC_SIZE_V1 36 760 755 761 756 /** 762 757 * struct iwl_umac_scan_abort
+95 -16
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
··· 76 76 #include "iwl-io.h" 77 77 #include "iwl-csr.h" 78 78 #include "fw/acpi.h" 79 + #include "fw/api/nvm-reg.h" 79 80 80 81 /* NVM offsets (in words) definitions */ 81 82 enum nvm_offsets { ··· 147 146 149, 153, 157, 161, 165, 169, 173, 177, 181 148 147 }; 149 148 150 - #define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels) 151 - #define IWL_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels) 149 + #define IWL_NVM_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels) 150 + #define IWL_NVM_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels) 152 151 #define NUM_2GHZ_CHANNELS 14 153 152 #define NUM_2GHZ_CHANNELS_EXT 14 154 153 #define FIRST_2GHZ_HT_MINUS 5 ··· 302 301 const u8 *nvm_chan; 303 302 304 303 if (cfg->nvm_type != IWL_NVM_EXT) { 305 - num_of_ch = IWL_NUM_CHANNELS; 304 + num_of_ch = IWL_NVM_NUM_CHANNELS; 306 305 nvm_chan = &iwl_nvm_channels[0]; 307 306 num_2ghz_channels = NUM_2GHZ_CHANNELS; 308 307 } else { 309 - num_of_ch = IWL_NUM_CHANNELS_EXT; 308 + num_of_ch = IWL_NVM_NUM_CHANNELS_EXT; 310 309 nvm_chan = &iwl_ext_nvm_channels[0]; 311 310 num_2ghz_channels = NUM_2GHZ_CHANNELS_EXT; 312 311 } ··· 721 720 if (cfg->nvm_type != IWL_NVM_EXT) 722 721 data = kzalloc(sizeof(*data) + 723 722 sizeof(struct ieee80211_channel) * 724 - IWL_NUM_CHANNELS, 723 + IWL_NVM_NUM_CHANNELS, 725 724 GFP_KERNEL); 726 725 else 727 726 data = kzalloc(sizeof(*data) + 728 727 sizeof(struct ieee80211_channel) * 729 - IWL_NUM_CHANNELS_EXT, 728 + IWL_NVM_NUM_CHANNELS_EXT, 730 729 GFP_KERNEL); 731 730 if (!data) 732 731 return NULL; ··· 843 842 return flags; 844 843 } 845 844 845 + struct regdb_ptrs { 846 + struct ieee80211_wmm_rule *rule; 847 + u32 token; 848 + }; 849 + 846 850 struct ieee80211_regdomain * 847 851 iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, 848 - int num_of_ch, __le32 *channels, u16 fw_mcc) 852 + int num_of_ch, __le32 *channels, u16 fw_mcc, 853 + u16 geo_info) 849 854 { 850 855 int ch_idx; 851 856 u16 ch_flags; 852 857 u32 reg_rule_flags, prev_reg_rule_flags = 0; 853 858 const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ? 854 859 iwl_ext_nvm_channels : iwl_nvm_channels; 855 - struct ieee80211_regdomain *regd; 856 - int size_of_regd; 860 + struct ieee80211_regdomain *regd, *copy_rd; 861 + int size_of_regd, regd_to_copy, wmms_to_copy; 862 + int size_of_wmms = 0; 857 863 struct ieee80211_reg_rule *rule; 864 + struct ieee80211_wmm_rule *wmm_rule, *d_wmm, *s_wmm; 865 + struct regdb_ptrs *regdb_ptrs; 858 866 enum nl80211_band band; 859 867 int center_freq, prev_center_freq = 0; 860 - int valid_rules = 0; 868 + int valid_rules = 0, n_wmms = 0; 869 + int i; 861 870 bool new_rule; 862 871 int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? 863 - IWL_NUM_CHANNELS_EXT : IWL_NUM_CHANNELS; 872 + IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS; 864 873 865 874 if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) 866 875 return ERR_PTR(-EINVAL); ··· 886 875 sizeof(struct ieee80211_regdomain) + 887 876 num_of_ch * sizeof(struct ieee80211_reg_rule); 888 877 889 - regd = kzalloc(size_of_regd, GFP_KERNEL); 878 + if (geo_info & GEO_WMM_ETSI_5GHZ_INFO) 879 + size_of_wmms = 880 + num_of_ch * sizeof(struct ieee80211_wmm_rule); 881 + 882 + regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL); 890 883 if (!regd) 891 884 return ERR_PTR(-ENOMEM); 885 + 886 + regdb_ptrs = kcalloc(num_of_ch, sizeof(*regdb_ptrs), GFP_KERNEL); 887 + if (!regdb_ptrs) { 888 + copy_rd = ERR_PTR(-ENOMEM); 889 + goto out; 890 + } 891 + 892 + /* set alpha2 from FW. */ 893 + regd->alpha2[0] = fw_mcc >> 8; 894 + regd->alpha2[1] = fw_mcc & 0xff; 895 + 896 + wmm_rule = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); 892 897 893 898 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { 894 899 ch_flags = (u16)__le32_to_cpup(channels + ch_idx); ··· 954 927 955 928 iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, 956 929 nvm_chan[ch_idx], ch_flags); 930 + 931 + if (!(geo_info & GEO_WMM_ETSI_5GHZ_INFO) || 932 + band == NL80211_BAND_2GHZ) 933 + continue; 934 + 935 + if (!reg_query_regdb_wmm(regd->alpha2, center_freq, 936 + &regdb_ptrs[n_wmms].token, wmm_rule)) { 937 + /* Add only new rules */ 938 + for (i = 0; i < n_wmms; i++) { 939 + if (regdb_ptrs[i].token == 940 + regdb_ptrs[n_wmms].token) { 941 + rule->wmm_rule = regdb_ptrs[i].rule; 942 + break; 943 + } 944 + } 945 + if (i == n_wmms) { 946 + rule->wmm_rule = wmm_rule; 947 + regdb_ptrs[n_wmms++].rule = wmm_rule; 948 + wmm_rule++; 949 + } 950 + } 957 951 } 958 952 959 953 regd->n_reg_rules = valid_rules; 954 + regd->n_wmm_rules = n_wmms; 960 955 961 - /* set alpha2 from FW. */ 962 - regd->alpha2[0] = fw_mcc >> 8; 963 - regd->alpha2[1] = fw_mcc & 0xff; 956 + /* 957 + * Narrow down regdom for unused regulatory rules to prevent hole 958 + * between reg rules to wmm rules. 959 + */ 960 + regd_to_copy = sizeof(struct ieee80211_regdomain) + 961 + valid_rules * sizeof(struct ieee80211_reg_rule); 964 962 965 - return regd; 963 + wmms_to_copy = sizeof(struct ieee80211_wmm_rule) * n_wmms; 964 + 965 + copy_rd = kzalloc(regd_to_copy + wmms_to_copy, GFP_KERNEL); 966 + if (!copy_rd) { 967 + copy_rd = ERR_PTR(-ENOMEM); 968 + goto out; 969 + } 970 + 971 + memcpy(copy_rd, regd, regd_to_copy); 972 + memcpy((u8 *)copy_rd + regd_to_copy, (u8 *)regd + size_of_regd, 973 + wmms_to_copy); 974 + 975 + d_wmm = (struct ieee80211_wmm_rule *)((u8 *)copy_rd + regd_to_copy); 976 + s_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd); 977 + 978 + for (i = 0; i < regd->n_reg_rules; i++) { 979 + if (!regd->reg_rules[i].wmm_rule) 980 + continue; 981 + 982 + copy_rd->reg_rules[i].wmm_rule = d_wmm + 983 + (regd->reg_rules[i].wmm_rule - s_wmm) / 984 + sizeof(struct ieee80211_wmm_rule); 985 + } 986 + 987 + out: 988 + kfree(regdb_ptrs); 989 + kfree(regd); 990 + return copy_rd; 966 991 } 967 992 IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info);
+4 -2
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
··· 101 101 * 102 102 * This function parses the regulatory channel data received as a 103 103 * MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain, 104 - * to be fed into the regulatory core. An ERR_PTR is returned on error. 104 + * to be fed into the regulatory core. In case the geo_info is set handle 105 + * accordingly. An ERR_PTR is returned on error. 105 106 * If not given to the regulatory core, the user is responsible for freeing 106 107 * the regdomain returned here with kfree. 107 108 */ 108 109 struct ieee80211_regdomain * 109 110 iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, 110 - int num_of_ch, __le32 *channels, u16 fw_mcc); 111 + int num_of_ch, __le32 *channels, u16 fw_mcc, 112 + u16 geo_info); 111 113 112 114 #endif /* __iwl_nvm_parse_h__ */
+2 -1
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
··· 311 311 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, 312 312 __le32_to_cpu(resp->n_channels), 313 313 resp->channels, 314 - __le16_to_cpu(resp->mcc)); 314 + __le16_to_cpu(resp->mcc), 315 + __le16_to_cpu(resp->geo_info)); 315 316 /* Store the return source id */ 316 317 src_id = resp->source_id; 317 318 kfree(resp);
-15
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
··· 158 158 159 159 static u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv) 160 160 { 161 - struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params; 162 - 163 - /* override ant_num / ant_path */ 164 - if (mod_params->ant_sel) { 165 - rtlpriv->btcoexist.btc_info.ant_num = 166 - (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1); 167 - 168 - rtlpriv->btcoexist.btc_info.single_ant_path = 169 - (mod_params->ant_sel == 1 ? 0 : 1); 170 - } 171 161 return rtlpriv->btcoexist.btc_info.single_ant_path; 172 162 } 173 163 ··· 168 178 169 179 static u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv) 170 180 { 171 - struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params; 172 181 u8 num; 173 182 174 183 if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2) 175 184 num = 2; 176 185 else 177 186 num = 1; 178 - 179 - /* override ant_num / ant_path */ 180 - if (mod_params->ant_sel) 181 - num = (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1) + 1; 182 187 183 188 return num; 184 189 }
+7 -4
drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
··· 848 848 return false; 849 849 } 850 850 851 + if (rtlpriv->cfg->ops->get_btc_status()) 852 + rtlpriv->btcoexist.btc_ops->btc_power_on_setting(rtlpriv); 853 + 851 854 bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL); 852 855 rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3)); 853 856 ··· 2699 2696 rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B; 2700 2697 rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1); 2701 2698 rtlpriv->btcoexist.btc_info.single_ant_path = 2702 - (value & 0x40); /*0xc3[6]*/ 2699 + (value & 0x40 ? ANT_AUX : ANT_MAIN); /*0xc3[6]*/ 2703 2700 } else { 2704 2701 rtlpriv->btcoexist.btc_info.btcoexist = 0; 2705 2702 rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B; 2706 2703 rtlpriv->btcoexist.btc_info.ant_num = ANT_X2; 2707 - rtlpriv->btcoexist.btc_info.single_ant_path = 0; 2704 + rtlpriv->btcoexist.btc_info.single_ant_path = ANT_MAIN; 2708 2705 } 2709 2706 2710 2707 /* override ant_num / ant_path */ 2711 2708 if (mod_params->ant_sel) { 2712 2709 rtlpriv->btcoexist.btc_info.ant_num = 2713 - (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1); 2710 + (mod_params->ant_sel == 1 ? ANT_X1 : ANT_X2); 2714 2711 2715 2712 rtlpriv->btcoexist.btc_info.single_ant_path = 2716 - (mod_params->ant_sel == 1 ? 0 : 1); 2713 + (mod_params->ant_sel == 1 ? ANT_AUX : ANT_MAIN); 2717 2714 } 2718 2715 } 2719 2716
+5
drivers/net/wireless/realtek/rtlwifi/wifi.h
··· 2823 2823 ANT_X1 = 1, 2824 2824 }; 2825 2825 2826 + enum bt_ant_path { 2827 + ANT_MAIN = 0, 2828 + ANT_AUX = 1, 2829 + }; 2830 + 2826 2831 enum bt_co_type { 2827 2832 BT_2WIRE = 0, 2828 2833 BT_ISSC_3WIRE = 1,
+5 -2
drivers/of/fdt.c
··· 942 942 int offset; 943 943 const char *p, *q, *options = NULL; 944 944 int l; 945 - const struct earlycon_id *match; 945 + const struct earlycon_id **p_match; 946 946 const void *fdt = initial_boot_params; 947 947 948 948 offset = fdt_path_offset(fdt, "/chosen"); ··· 969 969 return 0; 970 970 } 971 971 972 - for (match = __earlycon_table; match < __earlycon_table_end; match++) { 972 + for (p_match = __earlycon_table; p_match < __earlycon_table_end; 973 + p_match++) { 974 + const struct earlycon_id *match = *p_match; 975 + 973 976 if (!match->compatible[0]) 974 977 continue; 975 978
+1 -1
drivers/parisc/ccio-dma.c
··· 1263 1263 * I/O Page Directory, the resource map, and initalizing the 1264 1264 * U2/Uturn chip into virtual mode. 1265 1265 */ 1266 - static void 1266 + static void __init 1267 1267 ccio_ioc_init(struct ioc *ioc) 1268 1268 { 1269 1269 int i;
+23 -14
drivers/rtc/rtc-opal.c
··· 57 57 58 58 static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm) 59 59 { 60 - long rc = OPAL_BUSY; 60 + s64 rc = OPAL_BUSY; 61 61 int retries = 10; 62 62 u32 y_m_d; 63 63 u64 h_m_s_ms; ··· 66 66 67 67 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { 68 68 rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); 69 - if (rc == OPAL_BUSY_EVENT) 69 + if (rc == OPAL_BUSY_EVENT) { 70 + msleep(OPAL_BUSY_DELAY_MS); 70 71 opal_poll_events(NULL); 71 - else if (retries-- && (rc == OPAL_HARDWARE 72 - || rc == OPAL_INTERNAL_ERROR)) 73 - msleep(10); 74 - else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT) 75 - break; 72 + } else if (rc == OPAL_BUSY) { 73 + msleep(OPAL_BUSY_DELAY_MS); 74 + } else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) { 75 + if (retries--) { 76 + msleep(10); /* Wait 10ms before retry */ 77 + rc = OPAL_BUSY; /* go around again */ 78 + } 79 + } 76 80 } 77 81 78 82 if (rc != OPAL_SUCCESS) ··· 91 87 92 88 static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm) 93 89 { 94 - long rc = OPAL_BUSY; 90 + s64 rc = OPAL_BUSY; 95 91 int retries = 10; 96 92 u32 y_m_d = 0; 97 93 u64 h_m_s_ms = 0; 98 94 99 95 tm_to_opal(tm, &y_m_d, &h_m_s_ms); 96 + 100 97 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { 101 98 rc = opal_rtc_write(y_m_d, h_m_s_ms); 102 - if (rc == OPAL_BUSY_EVENT) 99 + if (rc == OPAL_BUSY_EVENT) { 100 + msleep(OPAL_BUSY_DELAY_MS); 103 101 opal_poll_events(NULL); 104 - else if (retries-- && (rc == OPAL_HARDWARE 105 - || rc == OPAL_INTERNAL_ERROR)) 106 - msleep(10); 107 - else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT) 108 - break; 102 + } else if (rc == OPAL_BUSY) { 103 + msleep(OPAL_BUSY_DELAY_MS); 104 + } else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) { 105 + if (retries--) { 106 + msleep(10); /* Wait 10ms before retry */ 107 + rc = OPAL_BUSY; /* go around again */ 108 + } 109 + } 109 110 } 110 111 111 112 return rc == OPAL_SUCCESS ? 0 : -EIO;
+1 -1
drivers/sbus/char/oradax.c
··· 3 3 * 4 4 * This program is free software: you can redistribute it and/or modify 5 5 * it under the terms of the GNU General Public License as published by 6 - * the Free Software Foundation, either version 3 of the License, or 6 + * the Free Software Foundation, either version 2 of the License, or 7 7 * (at your option) any later version. 8 8 * 9 9 * This program is distributed in the hope that it will be useful,
+1 -2
drivers/scsi/isci/port_config.c
··· 291 291 * Note: We have not moved the current phy_index so we will actually 292 292 * compare the startting phy with itself. 293 293 * This is expected and required to add the phy to the port. */ 294 - while (phy_index < SCI_MAX_PHYS) { 294 + for (; phy_index < SCI_MAX_PHYS; phy_index++) { 295 295 if ((phy_mask & (1 << phy_index)) == 0) 296 296 continue; 297 297 sci_phy_get_sas_address(&ihost->phys[phy_index], ··· 311 311 &ihost->phys[phy_index]); 312 312 313 313 assigned_phy_mask |= (1 << phy_index); 314 - phy_index++; 315 314 } 316 315 317 316 }
+5 -2
drivers/scsi/storvsc_drv.c
··· 1722 1722 max_targets = STORVSC_MAX_TARGETS; 1723 1723 max_channels = STORVSC_MAX_CHANNELS; 1724 1724 /* 1725 - * On Windows8 and above, we support sub-channels for storage. 1725 + * On Windows8 and above, we support sub-channels for storage 1726 + * on SCSI and FC controllers. 1726 1727 * The number of sub-channels offerred is based on the number of 1727 1728 * VCPUs in the guest. 1728 1729 */ 1729 - max_sub_channels = (num_cpus / storvsc_vcpus_per_sub_channel); 1730 + if (!dev_is_ide) 1731 + max_sub_channels = 1732 + (num_cpus - 1) / storvsc_vcpus_per_sub_channel; 1730 1733 } 1731 1734 1732 1735 scsi_driver.can_queue = (max_outstanding_req_per_channel *
+1 -1
drivers/slimbus/messaging.c
··· 183 183 0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7 184 184 }; 185 185 186 - clamp(code, 1, (int)ARRAY_SIZE(sizetocode)); 186 + code = clamp(code, 1, (int)ARRAY_SIZE(sizetocode)); 187 187 188 188 return sizetocode[code - 1]; 189 189 }
+1 -1
drivers/soc/bcm/raspberrypi-power.c
··· 45 45 struct rpi_power_domain_packet { 46 46 u32 domain; 47 47 u32 on; 48 - } __packet; 48 + }; 49 49 50 50 /* 51 51 * Asks the firmware to enable or disable power on a specific power
+1 -1
drivers/staging/wilc1000/host_interface.c
··· 1390 1390 } 1391 1391 1392 1392 if (hif_drv->usr_conn_req.ies) { 1393 - conn_info.req_ies = kmemdup(conn_info.req_ies, 1393 + conn_info.req_ies = kmemdup(hif_drv->usr_conn_req.ies, 1394 1394 hif_drv->usr_conn_req.ies_len, 1395 1395 GFP_KERNEL); 1396 1396 if (conn_info.req_ies)
+4 -4
drivers/target/target_core_iblock.c
··· 427 427 { 428 428 struct se_device *dev = cmd->se_dev; 429 429 struct scatterlist *sg = &cmd->t_data_sg[0]; 430 - unsigned char *buf, zero = 0x00, *p = &zero; 431 - int rc, ret; 430 + unsigned char *buf, *not_zero; 431 + int ret; 432 432 433 433 buf = kmap(sg_page(sg)) + sg->offset; 434 434 if (!buf) ··· 437 437 * Fall back to block_execute_write_same() slow-path if 438 438 * incoming WRITE_SAME payload does not contain zeros. 439 439 */ 440 - rc = memcmp(buf, p, cmd->data_length); 440 + not_zero = memchr_inv(buf, 0x00, cmd->data_length); 441 441 kunmap(sg_page(sg)); 442 442 443 - if (rc) 443 + if (not_zero) 444 444 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 445 445 446 446 ret = blkdev_issue_zeroout(bdev,
+22 -1
drivers/tty/n_gsm.c
··· 121 121 struct mutex mutex; 122 122 123 123 /* Link layer */ 124 + int mode; 125 + #define DLCI_MODE_ABM 0 /* Normal Asynchronous Balanced Mode */ 126 + #define DLCI_MODE_ADM 1 /* Asynchronous Disconnected Mode */ 124 127 spinlock_t lock; /* Protects the internal state */ 125 128 struct timer_list t1; /* Retransmit timer for SABM and UA */ 126 129 int retries; ··· 1367 1364 ctrl->data = data; 1368 1365 ctrl->len = clen; 1369 1366 gsm->pending_cmd = ctrl; 1370 - gsm->cretries = gsm->n2; 1367 + 1368 + /* If DLCI0 is in ADM mode skip retries, it won't respond */ 1369 + if (gsm->dlci[0]->mode == DLCI_MODE_ADM) 1370 + gsm->cretries = 1; 1371 + else 1372 + gsm->cretries = gsm->n2; 1373 + 1371 1374 mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100); 1372 1375 gsm_control_transmit(gsm, ctrl); 1373 1376 spin_unlock_irqrestore(&gsm->control_lock, flags); ··· 1481 1472 if (debug & 8) 1482 1473 pr_info("DLCI %d opening in ADM mode.\n", 1483 1474 dlci->addr); 1475 + dlci->mode = DLCI_MODE_ADM; 1484 1476 gsm_dlci_open(dlci); 1485 1477 } else { 1486 1478 gsm_dlci_close(dlci); ··· 2871 2861 static int gsm_carrier_raised(struct tty_port *port) 2872 2862 { 2873 2863 struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port); 2864 + struct gsm_mux *gsm = dlci->gsm; 2865 + 2874 2866 /* Not yet open so no carrier info */ 2875 2867 if (dlci->state != DLCI_OPEN) 2876 2868 return 0; 2877 2869 if (debug & 2) 2878 2870 return 1; 2871 + 2872 + /* 2873 + * Basic mode with control channel in ADM mode may not respond 2874 + * to CMD_MSC at all and modem_rx is empty. 2875 + */ 2876 + if (gsm->encoding == 0 && gsm->dlci[0]->mode == DLCI_MODE_ADM && 2877 + !dlci->modem_rx) 2878 + return 1; 2879 + 2879 2880 return dlci->modem_rx & TIOCM_CD; 2880 2881 } 2881 2882
+4 -2
drivers/tty/serial/earlycon.c
··· 169 169 */ 170 170 int __init setup_earlycon(char *buf) 171 171 { 172 - const struct earlycon_id *match; 172 + const struct earlycon_id **p_match; 173 173 174 174 if (!buf || !buf[0]) 175 175 return -EINVAL; ··· 177 177 if (early_con.flags & CON_ENABLED) 178 178 return -EALREADY; 179 179 180 - for (match = __earlycon_table; match < __earlycon_table_end; match++) { 180 + for (p_match = __earlycon_table; p_match < __earlycon_table_end; 181 + p_match++) { 182 + const struct earlycon_id *match = *p_match; 181 183 size_t len = strlen(match->name); 182 184 183 185 if (strncmp(buf, match->name, len))
+18 -1
drivers/tty/serial/imx.c
··· 316 316 * differ from the value that was last written. As it only 317 317 * clears after being set, reread conditionally. 318 318 */ 319 - if (sport->ucr2 & UCR2_SRST) 319 + if (!(sport->ucr2 & UCR2_SRST)) 320 320 sport->ucr2 = readl(sport->port.membase + offset); 321 321 return sport->ucr2; 322 322 break; ··· 1833 1833 rs485conf->flags &= ~SER_RS485_ENABLED; 1834 1834 1835 1835 if (rs485conf->flags & SER_RS485_ENABLED) { 1836 + /* Enable receiver if low-active RTS signal is requested */ 1837 + if (sport->have_rtscts && !sport->have_rtsgpio && 1838 + !(rs485conf->flags & SER_RS485_RTS_ON_SEND)) 1839 + rs485conf->flags |= SER_RS485_RX_DURING_TX; 1840 + 1836 1841 /* disable transmitter */ 1837 1842 ucr2 = imx_uart_readl(sport, UCR2); 1838 1843 if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND) ··· 2269 2264 if (sport->port.rs485.flags & SER_RS485_ENABLED && 2270 2265 (!sport->have_rtscts && !sport->have_rtsgpio)) 2271 2266 dev_err(&pdev->dev, "no RTS control, disabling rs485\n"); 2267 + 2268 + /* 2269 + * If using the i.MX UART RTS/CTS control then the RTS (CTS_B) 2270 + * signal cannot be set low during transmission in case the 2271 + * receiver is off (limitation of the i.MX UART IP). 2272 + */ 2273 + if (sport->port.rs485.flags & SER_RS485_ENABLED && 2274 + sport->have_rtscts && !sport->have_rtsgpio && 2275 + (!(sport->port.rs485.flags & SER_RS485_RTS_ON_SEND) && 2276 + !(sport->port.rs485.flags & SER_RS485_RX_DURING_TX))) 2277 + dev_err(&pdev->dev, 2278 + "low-active RTS not possible when receiver is off, enabling receiver\n"); 2272 2279 2273 2280 imx_uart_rs485_config(&sport->port, &sport->port.rs485); 2274 2281
-1
drivers/tty/serial/mvebu-uart.c
··· 495 495 termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR); 496 496 termios->c_cflag &= CREAD | CBAUD; 497 497 termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD); 498 - termios->c_lflag = old->c_lflag; 499 498 } 500 499 501 500 spin_unlock_irqrestore(&port->lock, flags);
+6 -4
drivers/tty/serial/qcom_geni_serial.c
··· 1022 1022 struct qcom_geni_serial_port *port; 1023 1023 struct uart_port *uport; 1024 1024 struct resource *res; 1025 + int irq; 1025 1026 1026 1027 if (pdev->dev.of_node) 1027 1028 line = of_alias_get_id(pdev->dev.of_node, "serial"); ··· 1062 1061 port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS; 1063 1062 port->tx_fifo_width = DEF_FIFO_WIDTH_BITS; 1064 1063 1065 - uport->irq = platform_get_irq(pdev, 0); 1066 - if (uport->irq < 0) { 1067 - dev_err(&pdev->dev, "Failed to get IRQ %d\n", uport->irq); 1068 - return uport->irq; 1064 + irq = platform_get_irq(pdev, 0); 1065 + if (irq < 0) { 1066 + dev_err(&pdev->dev, "Failed to get IRQ %d\n", irq); 1067 + return irq; 1069 1068 } 1069 + uport->irq = irq; 1070 1070 1071 1071 uport->private_data = &qcom_geni_console_driver; 1072 1072 platform_set_drvdata(pdev, port);
+1 -1
drivers/tty/serial/xilinx_uartps.c
··· 1181 1181 /* only set baud if specified on command line - otherwise 1182 1182 * assume it has been initialized by a boot loader. 1183 1183 */ 1184 - if (device->baud) { 1184 + if (port->uartclk && device->baud) { 1185 1185 u32 cd = 0, bdiv = 0; 1186 1186 u32 mr; 1187 1187 int div8;
+4 -1
drivers/tty/tty_io.c
··· 2816 2816 2817 2817 kref_init(&tty->kref); 2818 2818 tty->magic = TTY_MAGIC; 2819 - tty_ldisc_init(tty); 2819 + if (tty_ldisc_init(tty)) { 2820 + kfree(tty); 2821 + return NULL; 2822 + } 2820 2823 tty->session = NULL; 2821 2824 tty->pgrp = NULL; 2822 2825 mutex_init(&tty->legacy_mutex);
+13 -16
drivers/tty/tty_ldisc.c
··· 176 176 return ERR_CAST(ldops); 177 177 } 178 178 179 - ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL); 180 - if (ld == NULL) { 181 - put_ldops(ldops); 182 - return ERR_PTR(-ENOMEM); 183 - } 184 - 179 + /* 180 + * There is no way to handle allocation failure of only 16 bytes. 181 + * Let's simplify error handling and save more memory. 182 + */ 183 + ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL | __GFP_NOFAIL); 185 184 ld->ops = ldops; 186 185 ld->tty = tty; 187 186 ··· 526 527 static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old) 527 528 { 528 529 /* There is an outstanding reference here so this is safe */ 529 - old = tty_ldisc_get(tty, old->ops->num); 530 - WARN_ON(IS_ERR(old)); 531 - tty->ldisc = old; 532 - tty_set_termios_ldisc(tty, old->ops->num); 533 - if (tty_ldisc_open(tty, old) < 0) { 534 - tty_ldisc_put(old); 530 + if (tty_ldisc_failto(tty, old->ops->num) < 0) { 531 + const char *name = tty_name(tty); 532 + 533 + pr_warn("Falling back ldisc for %s.\n", name); 535 534 /* The traditional behaviour is to fall back to N_TTY, we 536 535 want to avoid falling back to N_NULL unless we have no 537 536 choice to avoid the risk of breaking anything */ 538 537 if (tty_ldisc_failto(tty, N_TTY) < 0 && 539 538 tty_ldisc_failto(tty, N_NULL) < 0) 540 - panic("Couldn't open N_NULL ldisc for %s.", 541 - tty_name(tty)); 539 + panic("Couldn't open N_NULL ldisc for %s.", name); 542 540 } 543 541 } 544 542 ··· 820 824 * the tty structure is not completely set up when this call is made. 821 825 */ 822 826 823 - void tty_ldisc_init(struct tty_struct *tty) 827 + int tty_ldisc_init(struct tty_struct *tty) 824 828 { 825 829 struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY); 826 830 if (IS_ERR(ld)) 827 - panic("n_tty: init_tty"); 831 + return PTR_ERR(ld); 828 832 tty->ldisc = ld; 833 + return 0; 829 834 } 830 835 831 836 /**
+23 -49
drivers/uio/uio_hv_generic.c
··· 19 19 * # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \ 20 20 * > /sys/bus/vmbus/drivers/uio_hv_generic/bind 21 21 */ 22 - 22 + #define DEBUG 1 23 23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 24 24 25 25 #include <linux/device.h> ··· 94 94 */ 95 95 static void hv_uio_channel_cb(void *context) 96 96 { 97 - struct hv_uio_private_data *pdata = context; 98 - struct hv_device *dev = pdata->device; 97 + struct vmbus_channel *chan = context; 98 + struct hv_device *hv_dev = chan->device_obj; 99 + struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev); 99 100 100 - dev->channel->inbound.ring_buffer->interrupt_mask = 1; 101 + chan->inbound.ring_buffer->interrupt_mask = 1; 101 102 virt_mb(); 102 103 103 104 uio_event_notify(&pdata->info); ··· 122 121 uio_event_notify(&pdata->info); 123 122 } 124 123 125 - /* 126 - * Handle fault when looking for sub channel ring buffer 127 - * Subchannel ring buffer is same as resource 0 which is main ring buffer 128 - * This is derived from uio_vma_fault 124 + /* Sysfs API to allow mmap of the ring buffers 125 + * The ring buffer is allocated as contiguous memory by vmbus_open 129 126 */ 130 - static int hv_uio_vma_fault(struct vm_fault *vmf) 131 - { 132 - struct vm_area_struct *vma = vmf->vma; 133 - void *ring_buffer = vma->vm_private_data; 134 - struct page *page; 135 - void *addr; 136 - 137 - addr = ring_buffer + (vmf->pgoff << PAGE_SHIFT); 138 - page = virt_to_page(addr); 139 - get_page(page); 140 - vmf->page = page; 141 - return 0; 142 - } 143 - 144 - static const struct vm_operations_struct hv_uio_vm_ops = { 145 - .fault = hv_uio_vma_fault, 146 - }; 147 - 148 - /* Sysfs API to allow mmap of the ring buffers */ 149 127 static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj, 150 128 struct bin_attribute *attr, 151 129 struct vm_area_struct *vma) 152 130 { 153 131 struct vmbus_channel *channel 154 132 = container_of(kobj, struct vmbus_channel, kobj); 155 - unsigned long requested_pages, actual_pages; 133 + struct hv_device *dev = channel->primary_channel->device_obj; 134 + u16 q_idx = channel->offermsg.offer.sub_channel_index; 156 135 157 - if (vma->vm_end < vma->vm_start) 158 - return -EINVAL; 136 + dev_dbg(&dev->device, "mmap channel %u pages %#lx at %#lx\n", 137 + q_idx, vma_pages(vma), vma->vm_pgoff); 159 138 160 - /* only allow 0 for now */ 161 - if (vma->vm_pgoff > 0) 162 - return -EINVAL; 163 - 164 - requested_pages = vma_pages(vma); 165 - actual_pages = 2 * HV_RING_SIZE; 166 - if (requested_pages > actual_pages) 167 - return -EINVAL; 168 - 169 - vma->vm_private_data = channel->ringbuffer_pages; 170 - vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 171 - vma->vm_ops = &hv_uio_vm_ops; 172 - return 0; 139 + return vm_iomap_memory(vma, virt_to_phys(channel->ringbuffer_pages), 140 + channel->ringbuffer_pagecount << PAGE_SHIFT); 173 141 } 174 142 175 - static struct bin_attribute ring_buffer_bin_attr __ro_after_init = { 143 + static const struct bin_attribute ring_buffer_bin_attr = { 176 144 .attr = { 177 145 .name = "ring", 178 146 .mode = 0600, 179 - /* size is set at init time */ 180 147 }, 148 + .size = 2 * HV_RING_SIZE * PAGE_SIZE, 181 149 .mmap = hv_uio_ring_mmap, 182 150 }; 183 151 184 - /* Callback from VMBUS subystem when new channel created. */ 152 + /* Callback from VMBUS subsystem when new channel created. */ 185 153 static void 186 154 hv_uio_new_channel(struct vmbus_channel *new_sc) 187 155 { 188 156 struct hv_device *hv_dev = new_sc->primary_channel->device_obj; 189 157 struct device *device = &hv_dev->device; 190 - struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev); 191 158 const size_t ring_bytes = HV_RING_SIZE * PAGE_SIZE; 192 159 int ret; 193 160 194 161 /* Create host communication ring */ 195 162 ret = vmbus_open(new_sc, ring_bytes, ring_bytes, NULL, 0, 196 - hv_uio_channel_cb, pdata); 163 + hv_uio_channel_cb, new_sc); 197 164 if (ret) { 198 165 dev_err(device, "vmbus_open subchannel failed: %d\n", ret); 199 166 return; ··· 203 234 204 235 ret = vmbus_open(dev->channel, HV_RING_SIZE * PAGE_SIZE, 205 236 HV_RING_SIZE * PAGE_SIZE, NULL, 0, 206 - hv_uio_channel_cb, pdata); 237 + hv_uio_channel_cb, dev->channel); 207 238 if (ret) 208 239 goto fail; 209 240 ··· 294 325 295 326 vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind); 296 327 vmbus_set_sc_create_callback(dev->channel, hv_uio_new_channel); 328 + 329 + ret = sysfs_create_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr); 330 + if (ret) 331 + dev_notice(&dev->device, 332 + "sysfs create ring bin file failed; %d\n", ret); 297 333 298 334 hv_set_drvdata(dev, pdata); 299 335
+1
drivers/usb/Kconfig
··· 207 207 208 208 config USB_ROLE_SWITCH 209 209 tristate 210 + select USB_COMMON 210 211 211 212 endif # USB_SUPPORT
+13 -6
drivers/usb/core/hcd.c
··· 2262 2262 hcd->state = HC_STATE_SUSPENDED; 2263 2263 2264 2264 if (!PMSG_IS_AUTO(msg)) 2265 - usb_phy_roothub_power_off(hcd->phy_roothub); 2265 + usb_phy_roothub_suspend(hcd->self.sysdev, 2266 + hcd->phy_roothub); 2266 2267 2267 2268 /* Did we race with a root-hub wakeup event? */ 2268 2269 if (rhdev->do_remote_wakeup) { ··· 2303 2302 } 2304 2303 2305 2304 if (!PMSG_IS_AUTO(msg)) { 2306 - status = usb_phy_roothub_power_on(hcd->phy_roothub); 2305 + status = usb_phy_roothub_resume(hcd->self.sysdev, 2306 + hcd->phy_roothub); 2307 2307 if (status) 2308 2308 return status; 2309 2309 } ··· 2346 2344 } 2347 2345 } else { 2348 2346 hcd->state = old_state; 2349 - usb_phy_roothub_power_off(hcd->phy_roothub); 2347 + usb_phy_roothub_suspend(hcd->self.sysdev, hcd->phy_roothub); 2350 2348 dev_dbg(&rhdev->dev, "bus %s fail, err %d\n", 2351 2349 "resume", status); 2352 2350 if (status != -ESHUTDOWN) ··· 2379 2377 2380 2378 spin_lock_irqsave (&hcd_root_hub_lock, flags); 2381 2379 if (hcd->rh_registered) { 2380 + pm_wakeup_event(&hcd->self.root_hub->dev, 0); 2382 2381 set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags); 2383 2382 queue_work(pm_wq, &hcd->wakeup_work); 2384 2383 } ··· 2761 2758 } 2762 2759 2763 2760 if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) { 2764 - hcd->phy_roothub = usb_phy_roothub_init(hcd->self.sysdev); 2761 + hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev); 2765 2762 if (IS_ERR(hcd->phy_roothub)) { 2766 2763 retval = PTR_ERR(hcd->phy_roothub); 2767 - goto err_phy_roothub_init; 2764 + goto err_phy_roothub_alloc; 2768 2765 } 2766 + 2767 + retval = usb_phy_roothub_init(hcd->phy_roothub); 2768 + if (retval) 2769 + goto err_phy_roothub_alloc; 2769 2770 2770 2771 retval = usb_phy_roothub_power_on(hcd->phy_roothub); 2771 2772 if (retval) ··· 2943 2936 usb_phy_roothub_power_off(hcd->phy_roothub); 2944 2937 err_usb_phy_roothub_power_on: 2945 2938 usb_phy_roothub_exit(hcd->phy_roothub); 2946 - err_phy_roothub_init: 2939 + err_phy_roothub_alloc: 2947 2940 if (hcd->remove_phy && hcd->usb_phy) { 2948 2941 usb_phy_shutdown(hcd->usb_phy); 2949 2942 usb_put_phy(hcd->usb_phy);
+9 -1
drivers/usb/core/hub.c
··· 653 653 unsigned int portnum) 654 654 { 655 655 struct usb_hub *hub; 656 + struct usb_port *port_dev; 656 657 657 658 if (!hdev) 658 659 return; 659 660 660 661 hub = usb_hub_to_struct_hub(hdev); 661 662 if (hub) { 663 + port_dev = hub->ports[portnum - 1]; 664 + if (port_dev && port_dev->child) 665 + pm_wakeup_event(&port_dev->child->dev, 0); 666 + 662 667 set_bit(portnum, hub->wakeup_bits); 663 668 kick_hub_wq(hub); 664 669 } ··· 3439 3434 3440 3435 /* Skip the initial Clear-Suspend step for a remote wakeup */ 3441 3436 status = hub_port_status(hub, port1, &portstatus, &portchange); 3442 - if (status == 0 && !port_is_suspended(hub, portstatus)) 3437 + if (status == 0 && !port_is_suspended(hub, portstatus)) { 3438 + if (portchange & USB_PORT_STAT_C_SUSPEND) 3439 + pm_wakeup_event(&udev->dev, 0); 3443 3440 goto SuspendCleared; 3441 + } 3444 3442 3445 3443 /* see 7.1.7.7; affects power usage, but not budgeting */ 3446 3444 if (hub_is_superspeed(hub->hdev))
+66 -27
drivers/usb/core/phy.c
··· 19 19 struct list_head list; 20 20 }; 21 21 22 - static struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev) 23 - { 24 - struct usb_phy_roothub *roothub_entry; 25 - 26 - roothub_entry = devm_kzalloc(dev, sizeof(*roothub_entry), GFP_KERNEL); 27 - if (!roothub_entry) 28 - return ERR_PTR(-ENOMEM); 29 - 30 - INIT_LIST_HEAD(&roothub_entry->list); 31 - 32 - return roothub_entry; 33 - } 34 - 35 22 static int usb_phy_roothub_add_phy(struct device *dev, int index, 36 23 struct list_head *list) 37 24 { ··· 32 45 return PTR_ERR(phy); 33 46 } 34 47 35 - roothub_entry = usb_phy_roothub_alloc(dev); 36 - if (IS_ERR(roothub_entry)) 37 - return PTR_ERR(roothub_entry); 48 + roothub_entry = devm_kzalloc(dev, sizeof(*roothub_entry), GFP_KERNEL); 49 + if (!roothub_entry) 50 + return -ENOMEM; 51 + 52 + INIT_LIST_HEAD(&roothub_entry->list); 38 53 39 54 roothub_entry->phy = phy; 40 55 ··· 45 56 return 0; 46 57 } 47 58 48 - struct usb_phy_roothub *usb_phy_roothub_init(struct device *dev) 59 + struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev) 49 60 { 50 61 struct usb_phy_roothub *phy_roothub; 51 - struct usb_phy_roothub *roothub_entry; 52 - struct list_head *head; 53 62 int i, num_phys, err; 63 + 64 + if (!IS_ENABLED(CONFIG_GENERIC_PHY)) 65 + return NULL; 54 66 55 67 num_phys = of_count_phandle_with_args(dev->of_node, "phys", 56 68 "#phy-cells"); 57 69 if (num_phys <= 0) 58 70 return NULL; 59 71 60 - phy_roothub = usb_phy_roothub_alloc(dev); 61 - if (IS_ERR(phy_roothub)) 62 - return phy_roothub; 72 + phy_roothub = devm_kzalloc(dev, sizeof(*phy_roothub), GFP_KERNEL); 73 + if (!phy_roothub) 74 + return ERR_PTR(-ENOMEM); 75 + 76 + INIT_LIST_HEAD(&phy_roothub->list); 63 77 64 78 for (i = 0; i < num_phys; i++) { 65 79 err = usb_phy_roothub_add_phy(dev, i, &phy_roothub->list); 66 80 if (err) 67 - goto err_out; 81 + return ERR_PTR(err); 68 82 } 83 + 84 + return phy_roothub; 85 + } 86 + EXPORT_SYMBOL_GPL(usb_phy_roothub_alloc); 87 + 88 + int usb_phy_roothub_init(struct usb_phy_roothub *phy_roothub) 89 + { 90 + struct usb_phy_roothub *roothub_entry; 91 + struct list_head *head; 92 + int err; 93 + 94 + if (!phy_roothub) 95 + return 0; 69 96 70 97 head = &phy_roothub->list; 71 98 ··· 91 86 goto err_exit_phys; 92 87 } 93 88 94 - return phy_roothub; 89 + return 0; 95 90 96 91 err_exit_phys: 97 92 list_for_each_entry_continue_reverse(roothub_entry, head, list) 98 93 phy_exit(roothub_entry->phy); 99 94 100 - err_out: 101 - return ERR_PTR(err); 95 + return err; 102 96 } 103 97 EXPORT_SYMBOL_GPL(usb_phy_roothub_init); 104 98 ··· 115 111 list_for_each_entry(roothub_entry, head, list) { 116 112 err = phy_exit(roothub_entry->phy); 117 113 if (err) 118 - ret = ret; 114 + ret = err; 119 115 } 120 116 121 117 return ret; ··· 160 156 phy_power_off(roothub_entry->phy); 161 157 } 162 158 EXPORT_SYMBOL_GPL(usb_phy_roothub_power_off); 159 + 160 + int usb_phy_roothub_suspend(struct device *controller_dev, 161 + struct usb_phy_roothub *phy_roothub) 162 + { 163 + usb_phy_roothub_power_off(phy_roothub); 164 + 165 + /* keep the PHYs initialized so the device can wake up the system */ 166 + if (device_may_wakeup(controller_dev)) 167 + return 0; 168 + 169 + return usb_phy_roothub_exit(phy_roothub); 170 + } 171 + EXPORT_SYMBOL_GPL(usb_phy_roothub_suspend); 172 + 173 + int usb_phy_roothub_resume(struct device *controller_dev, 174 + struct usb_phy_roothub *phy_roothub) 175 + { 176 + int err; 177 + 178 + /* if the device can't wake up the system _exit was called */ 179 + if (!device_may_wakeup(controller_dev)) { 180 + err = usb_phy_roothub_init(phy_roothub); 181 + if (err) 182 + return err; 183 + } 184 + 185 + err = usb_phy_roothub_power_on(phy_roothub); 186 + 187 + /* undo _init if _power_on failed */ 188 + if (err && !device_may_wakeup(controller_dev)) 189 + usb_phy_roothub_exit(phy_roothub); 190 + 191 + return err; 192 + } 193 + EXPORT_SYMBOL_GPL(usb_phy_roothub_resume);
+21 -1
drivers/usb/core/phy.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + /* 3 + * USB roothub wrapper 4 + * 5 + * Copyright (C) 2018 Martin Blumenstingl <martin.blumenstingl@googlemail.com> 6 + */ 7 + 8 + #ifndef __USB_CORE_PHY_H_ 9 + #define __USB_CORE_PHY_H_ 10 + 11 + struct device; 1 12 struct usb_phy_roothub; 2 13 3 - struct usb_phy_roothub *usb_phy_roothub_init(struct device *dev); 14 + struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev); 15 + 16 + int usb_phy_roothub_init(struct usb_phy_roothub *phy_roothub); 4 17 int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub); 5 18 6 19 int usb_phy_roothub_power_on(struct usb_phy_roothub *phy_roothub); 7 20 void usb_phy_roothub_power_off(struct usb_phy_roothub *phy_roothub); 21 + 22 + int usb_phy_roothub_suspend(struct device *controller_dev, 23 + struct usb_phy_roothub *phy_roothub); 24 + int usb_phy_roothub_resume(struct device *controller_dev, 25 + struct usb_phy_roothub *phy_roothub); 26 + 27 + #endif /* __USB_CORE_PHY_H_ */
+3
drivers/usb/core/quirks.c
··· 186 186 { USB_DEVICE(0x03f0, 0x0701), .driver_info = 187 187 USB_QUIRK_STRING_FETCH_255 }, 188 188 189 + /* HP v222w 16GB Mini USB Drive */ 190 + { USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT }, 191 + 189 192 /* Creative SB Audigy 2 NX */ 190 193 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, 191 194
+5 -3
drivers/usb/host/xhci-dbgtty.c
··· 320 320 321 321 void xhci_dbc_tty_unregister_driver(void) 322 322 { 323 - tty_unregister_driver(dbc_tty_driver); 324 - put_tty_driver(dbc_tty_driver); 325 - dbc_tty_driver = NULL; 323 + if (dbc_tty_driver) { 324 + tty_unregister_driver(dbc_tty_driver); 325 + put_tty_driver(dbc_tty_driver); 326 + dbc_tty_driver = NULL; 327 + } 326 328 } 327 329 328 330 static void dbc_rx_push(unsigned long _port)
+4 -1
drivers/usb/host/xhci-pci.c
··· 126 126 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) 127 127 xhci->quirks |= XHCI_AMD_PLL_FIX; 128 128 129 - if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x43bb) 129 + if (pdev->vendor == PCI_VENDOR_ID_AMD && 130 + (pdev->device == 0x15e0 || 131 + pdev->device == 0x15e1 || 132 + pdev->device == 0x43bb)) 130 133 xhci->quirks |= XHCI_SUSPEND_DELAY; 131 134 132 135 if (pdev->vendor == PCI_VENDOR_ID_AMD)
+23 -9
drivers/usb/host/xhci-plat.c
··· 157 157 struct resource *res; 158 158 struct usb_hcd *hcd; 159 159 struct clk *clk; 160 + struct clk *reg_clk; 160 161 int ret; 161 162 int irq; 162 163 ··· 227 226 hcd->rsrc_len = resource_size(res); 228 227 229 228 /* 230 - * Not all platforms have a clk so it is not an error if the 231 - * clock does not exists. 229 + * Not all platforms have clks so it is not an error if the 230 + * clock do not exist. 232 231 */ 232 + reg_clk = devm_clk_get(&pdev->dev, "reg"); 233 + if (!IS_ERR(reg_clk)) { 234 + ret = clk_prepare_enable(reg_clk); 235 + if (ret) 236 + goto put_hcd; 237 + } else if (PTR_ERR(reg_clk) == -EPROBE_DEFER) { 238 + ret = -EPROBE_DEFER; 239 + goto put_hcd; 240 + } 241 + 233 242 clk = devm_clk_get(&pdev->dev, NULL); 234 243 if (!IS_ERR(clk)) { 235 244 ret = clk_prepare_enable(clk); 236 245 if (ret) 237 - goto put_hcd; 246 + goto disable_reg_clk; 238 247 } else if (PTR_ERR(clk) == -EPROBE_DEFER) { 239 248 ret = -EPROBE_DEFER; 240 - goto put_hcd; 249 + goto disable_reg_clk; 241 250 } 242 251 243 252 xhci = hcd_to_xhci(hcd); ··· 263 252 device_wakeup_enable(hcd->self.controller); 264 253 265 254 xhci->clk = clk; 255 + xhci->reg_clk = reg_clk; 266 256 xhci->main_hcd = hcd; 267 257 xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev, 268 258 dev_name(&pdev->dev), hcd); ··· 332 320 usb_put_hcd(xhci->shared_hcd); 333 321 334 322 disable_clk: 335 - if (!IS_ERR(clk)) 336 - clk_disable_unprepare(clk); 323 + clk_disable_unprepare(clk); 324 + 325 + disable_reg_clk: 326 + clk_disable_unprepare(reg_clk); 337 327 338 328 put_hcd: 339 329 usb_put_hcd(hcd); ··· 352 338 struct usb_hcd *hcd = platform_get_drvdata(dev); 353 339 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 354 340 struct clk *clk = xhci->clk; 341 + struct clk *reg_clk = xhci->reg_clk; 355 342 356 343 xhci->xhc_state |= XHCI_STATE_REMOVING; 357 344 ··· 362 347 usb_remove_hcd(hcd); 363 348 usb_put_hcd(xhci->shared_hcd); 364 349 365 - if (!IS_ERR(clk)) 366 - clk_disable_unprepare(clk); 350 + clk_disable_unprepare(clk); 351 + clk_disable_unprepare(reg_clk); 367 352 usb_put_hcd(hcd); 368 353 369 354 pm_runtime_set_suspended(&dev->dev); ··· 435 420 static struct platform_driver usb_xhci_driver = { 436 421 .probe = xhci_plat_probe, 437 422 .remove = xhci_plat_remove, 438 - .shutdown = usb_hcd_platform_shutdown, 439 423 .driver = { 440 424 .name = "xhci-hcd", 441 425 .pm = &xhci_plat_pm_ops,
+2 -1
drivers/usb/host/xhci.h
··· 1729 1729 int page_shift; 1730 1730 /* msi-x vectors */ 1731 1731 int msix_count; 1732 - /* optional clock */ 1732 + /* optional clocks */ 1733 1733 struct clk *clk; 1734 + struct clk *reg_clk; 1734 1735 /* data structures */ 1735 1736 struct xhci_device_context_array *dcbaa; 1736 1737 struct xhci_ring *cmd_ring;
-2
drivers/usb/musb/musb_dsps.c
··· 451 451 if (!rev) 452 452 return -ENODEV; 453 453 454 - usb_phy_init(musb->xceiv); 455 454 if (IS_ERR(musb->phy)) { 456 455 musb->phy = NULL; 457 456 } else { ··· 500 501 struct dsps_glue *glue = dev_get_drvdata(dev->parent); 501 502 502 503 del_timer_sync(&musb->dev_timer); 503 - usb_phy_shutdown(musb->xceiv); 504 504 phy_power_off(musb->phy); 505 505 phy_exit(musb->phy); 506 506 debugfs_remove_recursive(glue->dbgfs_root);
+1
drivers/usb/musb/musb_host.c
··· 2754 2754 hcd->self.otg_port = 1; 2755 2755 musb->xceiv->otg->host = &hcd->self; 2756 2756 hcd->power_budget = 2 * (power_budget ? : 250); 2757 + hcd->skip_phy_initialization = 1; 2757 2758 2758 2759 ret = usb_add_hcd(hcd, 0, 0); 2759 2760 if (ret < 0)
+1
drivers/usb/serial/Kconfig
··· 62 62 - Fundamental Software dongle. 63 63 - Google USB serial devices 64 64 - HP4x calculators 65 + - Libtransistor USB console 65 66 - a number of Motorola phones 66 67 - Motorola Tetra devices 67 68 - Novatel Wireless GPS receivers
+1
drivers/usb/serial/cp210x.c
··· 214 214 { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */ 215 215 { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */ 216 216 { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */ 217 + { USB_DEVICE(0x3923, 0x7A0B) }, /* National Instruments USB Serial Console */ 217 218 { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */ 218 219 { } /* Terminating Entry */ 219 220 };
+2 -1
drivers/usb/serial/ftdi_sio.c
··· 1898 1898 return ftdi_jtag_probe(serial); 1899 1899 1900 1900 if (udev->product && 1901 - (!strcmp(udev->product, "BeagleBone/XDS100V2") || 1901 + (!strcmp(udev->product, "Arrow USB Blaster") || 1902 + !strcmp(udev->product, "BeagleBone/XDS100V2") || 1902 1903 !strcmp(udev->product, "SNAP Connect E10"))) 1903 1904 return ftdi_jtag_probe(serial); 1904 1905
+7
drivers/usb/serial/usb-serial-simple.c
··· 63 63 0x01) } 64 64 DEVICE(google, GOOGLE_IDS); 65 65 66 + /* Libtransistor USB console */ 67 + #define LIBTRANSISTOR_IDS() \ 68 + { USB_DEVICE(0x1209, 0x8b00) } 69 + DEVICE(libtransistor, LIBTRANSISTOR_IDS); 70 + 66 71 /* ViVOpay USB Serial Driver */ 67 72 #define VIVOPAY_IDS() \ 68 73 { USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */ ··· 115 110 &funsoft_device, 116 111 &flashloader_device, 117 112 &google_device, 113 + &libtransistor_device, 118 114 &vivopay_device, 119 115 &moto_modem_device, 120 116 &motorola_tetra_device, ··· 132 126 FUNSOFT_IDS(), 133 127 FLASHLOADER_IDS(), 134 128 GOOGLE_IDS(), 129 + LIBTRANSISTOR_IDS(), 135 130 VIVOPAY_IDS(), 136 131 MOTO_IDS(), 137 132 MOTOROLA_TETRA_IDS(),
+1 -1
drivers/usb/typec/ucsi/Makefile
··· 5 5 6 6 typec_ucsi-y := ucsi.o 7 7 8 - typec_ucsi-$(CONFIG_FTRACE) += trace.o 8 + typec_ucsi-$(CONFIG_TRACING) += trace.o 9 9 10 10 obj-$(CONFIG_UCSI_ACPI) += ucsi_acpi.o
+1 -1
drivers/usb/typec/ucsi/ucsi.c
··· 28 28 * difficult to estimate the time it takes for the system to process the command 29 29 * before it is actually passed to the PPM. 30 30 */ 31 - #define UCSI_TIMEOUT_MS 1000 31 + #define UCSI_TIMEOUT_MS 5000 32 32 33 33 /* 34 34 * UCSI_SWAP_TIMEOUT_MS - Timeout for role swap requests
+5
drivers/usb/usbip/stub_main.c
··· 186 186 if (!bid) 187 187 return -ENODEV; 188 188 189 + /* device_attach() callers should hold parent lock for USB */ 190 + if (bid->udev->dev.parent) 191 + device_lock(bid->udev->dev.parent); 189 192 ret = device_attach(&bid->udev->dev); 193 + if (bid->udev->dev.parent) 194 + device_unlock(bid->udev->dev.parent); 190 195 if (ret < 0) { 191 196 dev_err(&bid->udev->dev, "rebind failed\n"); 192 197 return ret;
+1 -1
drivers/usb/usbip/usbip_common.h
··· 243 243 #define VUDC_EVENT_ERROR_USB (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE) 244 244 #define VUDC_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE) 245 245 246 - #define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_BYE) 246 + #define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_RESET | USBIP_EH_BYE) 247 247 #define VDEV_EVENT_DOWN (USBIP_EH_SHUTDOWN | USBIP_EH_RESET) 248 248 #define VDEV_EVENT_ERROR_TCP (USBIP_EH_SHUTDOWN | USBIP_EH_RESET) 249 249 #define VDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
-4
drivers/usb/usbip/usbip_event.c
··· 91 91 unset_event(ud, USBIP_EH_UNUSABLE); 92 92 } 93 93 94 - /* Stop the error handler. */ 95 - if (ud->event & USBIP_EH_BYE) 96 - usbip_dbg_eh("removed %p\n", ud); 97 - 98 94 wake_up(&ud->eh_waitq); 99 95 } 100 96 }
+13
drivers/usb/usbip/vhci_hcd.c
··· 354 354 usbip_dbg_vhci_rh(" ClearHubFeature\n"); 355 355 break; 356 356 case ClearPortFeature: 357 + if (rhport < 0) 358 + goto error; 357 359 switch (wValue) { 358 360 case USB_PORT_FEAT_SUSPEND: 359 361 if (hcd->speed == HCD_USB3) { ··· 513 511 goto error; 514 512 } 515 513 514 + if (rhport < 0) 515 + goto error; 516 + 516 517 vhci_hcd->port_status[rhport] |= USB_PORT_STAT_SUSPEND; 517 518 break; 518 519 case USB_PORT_FEAT_POWER: 519 520 usbip_dbg_vhci_rh( 520 521 " SetPortFeature: USB_PORT_FEAT_POWER\n"); 522 + if (rhport < 0) 523 + goto error; 521 524 if (hcd->speed == HCD_USB3) 522 525 vhci_hcd->port_status[rhport] |= USB_SS_PORT_STAT_POWER; 523 526 else ··· 531 524 case USB_PORT_FEAT_BH_PORT_RESET: 532 525 usbip_dbg_vhci_rh( 533 526 " SetPortFeature: USB_PORT_FEAT_BH_PORT_RESET\n"); 527 + if (rhport < 0) 528 + goto error; 534 529 /* Applicable only for USB3.0 hub */ 535 530 if (hcd->speed != HCD_USB3) { 536 531 pr_err("USB_PORT_FEAT_BH_PORT_RESET req not " ··· 543 534 case USB_PORT_FEAT_RESET: 544 535 usbip_dbg_vhci_rh( 545 536 " SetPortFeature: USB_PORT_FEAT_RESET\n"); 537 + if (rhport < 0) 538 + goto error; 546 539 /* if it's already enabled, disable */ 547 540 if (hcd->speed == HCD_USB3) { 548 541 vhci_hcd->port_status[rhport] = 0; ··· 565 554 default: 566 555 usbip_dbg_vhci_rh(" SetPortFeature: default %d\n", 567 556 wValue); 557 + if (rhport < 0) 558 + goto error; 568 559 if (hcd->speed == HCD_USB3) { 569 560 if ((vhci_hcd->port_status[rhport] & 570 561 USB_SS_PORT_STAT_POWER) != 0) {
+38 -32
drivers/virt/vboxguest/vboxguest_core.c
··· 114 114 } 115 115 116 116 out: 117 - kfree(req); 117 + vbg_req_free(req, sizeof(*req)); 118 118 kfree(pages); 119 119 } 120 120 ··· 144 144 145 145 rc = vbg_req_perform(gdev, req); 146 146 147 - kfree(req); 147 + vbg_req_free(req, sizeof(*req)); 148 148 149 149 if (rc < 0) { 150 150 vbg_err("%s error: %d\n", __func__, rc); ··· 214 214 ret = vbg_status_code_to_errno(rc); 215 215 216 216 out_free: 217 - kfree(req2); 218 - kfree(req1); 217 + vbg_req_free(req2, sizeof(*req2)); 218 + vbg_req_free(req1, sizeof(*req1)); 219 219 return ret; 220 220 } 221 221 ··· 245 245 if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */ 246 246 rc = VINF_SUCCESS; 247 247 248 - kfree(req); 248 + vbg_req_free(req, sizeof(*req)); 249 249 250 250 return vbg_status_code_to_errno(rc); 251 251 } ··· 431 431 rc = vbg_req_perform(gdev, req); 432 432 do_div(req->interval_ns, 1000000); /* ns -> ms */ 433 433 gdev->heartbeat_interval_ms = req->interval_ns; 434 - kfree(req); 434 + vbg_req_free(req, sizeof(*req)); 435 435 436 436 return vbg_status_code_to_errno(rc); 437 437 } ··· 454 454 if (ret < 0) 455 455 return ret; 456 456 457 - /* 458 - * Preallocate the request to use it from the timer callback because: 459 - * 1) on Windows vbg_req_alloc must be called at IRQL <= APC_LEVEL 460 - * and the timer callback runs at DISPATCH_LEVEL; 461 - * 2) avoid repeated allocations. 462 - */ 463 457 gdev->guest_heartbeat_req = vbg_req_alloc( 464 458 sizeof(*gdev->guest_heartbeat_req), 465 459 VMMDEVREQ_GUEST_HEARTBEAT); ··· 475 481 { 476 482 del_timer_sync(&gdev->heartbeat_timer); 477 483 vbg_heartbeat_host_config(gdev, false); 478 - kfree(gdev->guest_heartbeat_req); 479 - 484 + vbg_req_free(gdev->guest_heartbeat_req, 485 + sizeof(*gdev->guest_heartbeat_req)); 480 486 } 481 487 482 488 /** ··· 537 543 if (rc < 0) 538 544 vbg_err("%s error, rc: %d\n", __func__, rc); 539 545 540 - kfree(req); 546 + vbg_req_free(req, sizeof(*req)); 541 547 return vbg_status_code_to_errno(rc); 542 548 } 543 549 ··· 611 617 612 618 out: 613 619 mutex_unlock(&gdev->session_mutex); 614 - kfree(req); 620 + vbg_req_free(req, sizeof(*req)); 615 621 616 622 return ret; 617 623 } ··· 636 642 if (rc < 0) 637 643 vbg_err("%s error, rc: %d\n", __func__, rc); 638 644 639 - kfree(req); 645 + vbg_req_free(req, sizeof(*req)); 640 646 return vbg_status_code_to_errno(rc); 641 647 } 642 648 ··· 706 712 707 713 out: 708 714 mutex_unlock(&gdev->session_mutex); 709 - kfree(req); 715 + vbg_req_free(req, sizeof(*req)); 710 716 711 717 return ret; 712 718 } ··· 727 733 728 734 rc = vbg_req_perform(gdev, req); 729 735 ret = vbg_status_code_to_errno(rc); 730 - if (ret) 736 + if (ret) { 737 + vbg_err("%s error: %d\n", __func__, rc); 731 738 goto out; 739 + } 732 740 733 741 snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u", 734 742 req->major, req->minor, req->build, req->revision); ··· 745 749 } 746 750 747 751 out: 748 - kfree(req); 752 + vbg_req_free(req, sizeof(*req)); 749 753 return ret; 750 754 } 751 755 ··· 843 847 return 0; 844 848 845 849 err_free_reqs: 846 - kfree(gdev->mouse_status_req); 847 - kfree(gdev->ack_events_req); 848 - kfree(gdev->cancel_req); 849 - kfree(gdev->mem_balloon.change_req); 850 - kfree(gdev->mem_balloon.get_req); 850 + vbg_req_free(gdev->mouse_status_req, 851 + sizeof(*gdev->mouse_status_req)); 852 + vbg_req_free(gdev->ack_events_req, 853 + sizeof(*gdev->ack_events_req)); 854 + vbg_req_free(gdev->cancel_req, 855 + sizeof(*gdev->cancel_req)); 856 + vbg_req_free(gdev->mem_balloon.change_req, 857 + sizeof(*gdev->mem_balloon.change_req)); 858 + vbg_req_free(gdev->mem_balloon.get_req, 859 + sizeof(*gdev->mem_balloon.get_req)); 851 860 return ret; 852 861 } 853 862 ··· 873 872 vbg_reset_host_capabilities(gdev); 874 873 vbg_core_set_mouse_status(gdev, 0); 875 874 876 - kfree(gdev->mouse_status_req); 877 - kfree(gdev->ack_events_req); 878 - kfree(gdev->cancel_req); 879 - kfree(gdev->mem_balloon.change_req); 880 - kfree(gdev->mem_balloon.get_req); 875 + vbg_req_free(gdev->mouse_status_req, 876 + sizeof(*gdev->mouse_status_req)); 877 + vbg_req_free(gdev->ack_events_req, 878 + sizeof(*gdev->ack_events_req)); 879 + vbg_req_free(gdev->cancel_req, 880 + sizeof(*gdev->cancel_req)); 881 + vbg_req_free(gdev->mem_balloon.change_req, 882 + sizeof(*gdev->mem_balloon.change_req)); 883 + vbg_req_free(gdev->mem_balloon.get_req, 884 + sizeof(*gdev->mem_balloon.get_req)); 881 885 } 882 886 883 887 /** ··· 1421 1415 req->flags = dump->u.in.flags; 1422 1416 dump->hdr.rc = vbg_req_perform(gdev, req); 1423 1417 1424 - kfree(req); 1418 + vbg_req_free(req, sizeof(*req)); 1425 1419 return 0; 1426 1420 } 1427 1421 ··· 1519 1513 if (rc < 0) 1520 1514 vbg_err("%s error, rc: %d\n", __func__, rc); 1521 1515 1522 - kfree(req); 1516 + vbg_req_free(req, sizeof(*req)); 1523 1517 return vbg_status_code_to_errno(rc); 1524 1518 } 1525 1519
+9
drivers/virt/vboxguest/vboxguest_core.h
··· 171 171 172 172 void vbg_linux_mouse_event(struct vbg_dev *gdev); 173 173 174 + /* Private (non exported) functions form vboxguest_utils.c */ 175 + void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type); 176 + void vbg_req_free(void *req, size_t len); 177 + int vbg_req_perform(struct vbg_dev *gdev, void *req); 178 + int vbg_hgcm_call32( 179 + struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms, 180 + struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count, 181 + int *vbox_status); 182 + 174 183 #endif
+16 -3
drivers/virt/vboxguest/vboxguest_linux.c
··· 87 87 struct vbg_session *session = filp->private_data; 88 88 size_t returned_size, size; 89 89 struct vbg_ioctl_hdr hdr; 90 + bool is_vmmdev_req; 90 91 int ret = 0; 91 92 void *buf; 92 93 ··· 107 106 if (size > SZ_16M) 108 107 return -E2BIG; 109 108 110 - /* __GFP_DMA32 because IOCTL_VMMDEV_REQUEST passes this to the host */ 111 - buf = kmalloc(size, GFP_KERNEL | __GFP_DMA32); 109 + /* 110 + * IOCTL_VMMDEV_REQUEST needs the buffer to be below 4G to avoid 111 + * the need for a bounce-buffer and another copy later on. 112 + */ 113 + is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) || 114 + req == VBG_IOCTL_VMMDEV_REQUEST_BIG; 115 + 116 + if (is_vmmdev_req) 117 + buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT); 118 + else 119 + buf = kmalloc(size, GFP_KERNEL); 112 120 if (!buf) 113 121 return -ENOMEM; 114 122 ··· 142 132 ret = -EFAULT; 143 133 144 134 out: 145 - kfree(buf); 135 + if (is_vmmdev_req) 136 + vbg_req_free(buf, size); 137 + else 138 + kfree(buf); 146 139 147 140 return ret; 148 141 }
+13 -4
drivers/virt/vboxguest/vboxguest_utils.c
··· 65 65 void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type) 66 66 { 67 67 struct vmmdev_request_header *req; 68 + int order = get_order(PAGE_ALIGN(len)); 68 69 69 - req = kmalloc(len, GFP_KERNEL | __GFP_DMA32); 70 + req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order); 70 71 if (!req) 71 72 return NULL; 72 73 ··· 81 80 req->reserved2 = 0; 82 81 83 82 return req; 83 + } 84 + 85 + void vbg_req_free(void *req, size_t len) 86 + { 87 + if (!req) 88 + return; 89 + 90 + free_pages((unsigned long)req, get_order(PAGE_ALIGN(len))); 84 91 } 85 92 86 93 /* Note this function returns a VBox status code, not a negative errno!! */ ··· 146 137 rc = hgcm_connect->header.result; 147 138 } 148 139 149 - kfree(hgcm_connect); 140 + vbg_req_free(hgcm_connect, sizeof(*hgcm_connect)); 150 141 151 142 *vbox_status = rc; 152 143 return 0; ··· 175 166 if (rc >= 0) 176 167 rc = hgcm_disconnect->header.result; 177 168 178 - kfree(hgcm_disconnect); 169 + vbg_req_free(hgcm_disconnect, sizeof(*hgcm_disconnect)); 179 170 180 171 *vbox_status = rc; 181 172 return 0; ··· 632 623 } 633 624 634 625 if (!leak_it) 635 - kfree(call); 626 + vbg_req_free(call, size); 636 627 637 628 free_bounce_bufs: 638 629 if (bounce_bufs) {
+25 -3
fs/ceph/xattr.c
··· 228 228 229 229 static bool ceph_vxattrcb_quota_exists(struct ceph_inode_info *ci) 230 230 { 231 - return (ci->i_max_files || ci->i_max_bytes); 231 + bool ret = false; 232 + spin_lock(&ci->i_ceph_lock); 233 + if ((ci->i_max_files || ci->i_max_bytes) && 234 + ci->i_vino.snap == CEPH_NOSNAP && 235 + ci->i_snap_realm && 236 + ci->i_snap_realm->ino == ci->i_vino.ino) 237 + ret = true; 238 + spin_unlock(&ci->i_ceph_lock); 239 + return ret; 232 240 } 233 241 234 242 static size_t ceph_vxattrcb_quota(struct ceph_inode_info *ci, char *val, ··· 1016 1008 char *newval = NULL; 1017 1009 struct ceph_inode_xattr *xattr = NULL; 1018 1010 int required_blob_size; 1011 + bool check_realm = false; 1019 1012 bool lock_snap_rwsem = false; 1020 1013 1021 1014 if (ceph_snap(inode) != CEPH_NOSNAP) 1022 1015 return -EROFS; 1023 1016 1024 1017 vxattr = ceph_match_vxattr(inode, name); 1025 - if (vxattr && vxattr->readonly) 1026 - return -EOPNOTSUPP; 1018 + if (vxattr) { 1019 + if (vxattr->readonly) 1020 + return -EOPNOTSUPP; 1021 + if (value && !strncmp(vxattr->name, "ceph.quota", 10)) 1022 + check_realm = true; 1023 + } 1027 1024 1028 1025 /* pass any unhandled ceph.* xattrs through to the MDS */ 1029 1026 if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN)) ··· 1122 1109 err = -EBUSY; 1123 1110 } else { 1124 1111 err = ceph_sync_setxattr(inode, name, value, size, flags); 1112 + if (err >= 0 && check_realm) { 1113 + /* check if snaprealm was created for quota inode */ 1114 + spin_lock(&ci->i_ceph_lock); 1115 + if ((ci->i_max_files || ci->i_max_bytes) && 1116 + !(ci->i_snap_realm && 1117 + ci->i_snap_realm->ino == ci->i_vino.ino)) 1118 + err = -EOPNOTSUPP; 1119 + spin_unlock(&ci->i_ceph_lock); 1120 + } 1125 1121 } 1126 1122 out: 1127 1123 ceph_free_cap_flush(prealloc_cf);
+3
fs/cifs/cifssmb.c
··· 455 455 server->sign = true; 456 456 } 457 457 458 + if (cifs_rdma_enabled(server) && server->sign) 459 + cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled"); 460 + 458 461 return 0; 459 462 } 460 463
+16 -16
fs/cifs/connect.c
··· 2959 2959 } 2960 2960 } 2961 2961 2962 + if (volume_info->seal) { 2963 + if (ses->server->vals->protocol_id == 0) { 2964 + cifs_dbg(VFS, 2965 + "SMB3 or later required for encryption\n"); 2966 + rc = -EOPNOTSUPP; 2967 + goto out_fail; 2968 + } else if (tcon->ses->server->capabilities & 2969 + SMB2_GLOBAL_CAP_ENCRYPTION) 2970 + tcon->seal = true; 2971 + else { 2972 + cifs_dbg(VFS, "Encryption is not supported on share\n"); 2973 + rc = -EOPNOTSUPP; 2974 + goto out_fail; 2975 + } 2976 + } 2977 + 2962 2978 /* 2963 2979 * BB Do we need to wrap session_mutex around this TCon call and Unix 2964 2980 * SetFS as we do on SessSetup and reconnect? ··· 3021 3005 goto out_fail; 3022 3006 } 3023 3007 tcon->use_resilient = true; 3024 - } 3025 - 3026 - if (volume_info->seal) { 3027 - if (ses->server->vals->protocol_id == 0) { 3028 - cifs_dbg(VFS, 3029 - "SMB3 or later required for encryption\n"); 3030 - rc = -EOPNOTSUPP; 3031 - goto out_fail; 3032 - } else if (tcon->ses->server->capabilities & 3033 - SMB2_GLOBAL_CAP_ENCRYPTION) 3034 - tcon->seal = true; 3035 - else { 3036 - cifs_dbg(VFS, "Encryption is not supported on share\n"); 3037 - rc = -EOPNOTSUPP; 3038 - goto out_fail; 3039 - } 3040 3008 } 3041 3009 3042 3010 /*
+14 -4
fs/cifs/smb2ops.c
··· 252 252 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE; 253 253 wsize = min_t(unsigned int, wsize, server->max_write); 254 254 #ifdef CONFIG_CIFS_SMB_DIRECT 255 - if (server->rdma) 256 - wsize = min_t(unsigned int, 255 + if (server->rdma) { 256 + if (server->sign) 257 + wsize = min_t(unsigned int, 258 + wsize, server->smbd_conn->max_fragmented_send_size); 259 + else 260 + wsize = min_t(unsigned int, 257 261 wsize, server->smbd_conn->max_readwrite_size); 262 + } 258 263 #endif 259 264 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU)) 260 265 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE); ··· 277 272 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE; 278 273 rsize = min_t(unsigned int, rsize, server->max_read); 279 274 #ifdef CONFIG_CIFS_SMB_DIRECT 280 - if (server->rdma) 281 - rsize = min_t(unsigned int, 275 + if (server->rdma) { 276 + if (server->sign) 277 + rsize = min_t(unsigned int, 278 + rsize, server->smbd_conn->max_fragmented_recv_size); 279 + else 280 + rsize = min_t(unsigned int, 282 281 rsize, server->smbd_conn->max_readwrite_size); 282 + } 283 283 #endif 284 284 285 285 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
+7 -6
fs/cifs/smb2pdu.c
··· 383 383 build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt) 384 384 { 385 385 pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES; 386 - pneg_ctxt->DataLength = cpu_to_le16(6); 387 - pneg_ctxt->CipherCount = cpu_to_le16(2); 388 - pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM; 389 - pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM; 386 + pneg_ctxt->DataLength = cpu_to_le16(4); /* Cipher Count + le16 cipher */ 387 + pneg_ctxt->CipherCount = cpu_to_le16(1); 388 + /* pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;*/ /* not supported yet */ 389 + pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_CCM; 390 390 } 391 391 392 392 static void ··· 444 444 return -EINVAL; 445 445 } 446 446 server->cipher_type = ctxt->Ciphers[0]; 447 + server->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION; 447 448 return 0; 448 449 } 449 450 ··· 2591 2590 * If we want to do a RDMA write, fill in and append 2592 2591 * smbd_buffer_descriptor_v1 to the end of read request 2593 2592 */ 2594 - if (server->rdma && rdata && 2593 + if (server->rdma && rdata && !server->sign && 2595 2594 rdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) { 2596 2595 2597 2596 struct smbd_buffer_descriptor_v1 *v1; ··· 2969 2968 * If we want to do a server RDMA read, fill in and append 2970 2969 * smbd_buffer_descriptor_v1 to the end of write request 2971 2970 */ 2972 - if (server->rdma && wdata->bytes >= 2971 + if (server->rdma && !server->sign && wdata->bytes >= 2973 2972 server->smbd_conn->rdma_readwrite_threshold) { 2974 2973 2975 2974 struct smbd_buffer_descriptor_v1 *v1;
+1 -1
fs/cifs/smb2pdu.h
··· 297 297 __le16 DataLength; 298 298 __le32 Reserved; 299 299 __le16 CipherCount; /* AES-128-GCM and AES-128-CCM */ 300 - __le16 Ciphers[2]; /* Ciphers[0] since only one used now */ 300 + __le16 Ciphers[1]; /* Ciphers[0] since only one used now */ 301 301 } __packed; 302 302 303 303 struct smb2_negotiate_rsp {
+12 -24
fs/cifs/smbdirect.c
··· 2086 2086 int start, i, j; 2087 2087 int max_iov_size = 2088 2088 info->max_send_size - sizeof(struct smbd_data_transfer); 2089 - struct kvec iov[SMBDIRECT_MAX_SGE]; 2089 + struct kvec *iov; 2090 2090 int rc; 2091 2091 2092 2092 info->smbd_send_pending++; ··· 2096 2096 } 2097 2097 2098 2098 /* 2099 - * This usually means a configuration error 2100 - * We use RDMA read/write for packet size > rdma_readwrite_threshold 2101 - * as long as it's properly configured we should never get into this 2102 - * situation 2103 - */ 2104 - if (rqst->rq_nvec + rqst->rq_npages > SMBDIRECT_MAX_SGE) { 2105 - log_write(ERR, "maximum send segment %x exceeding %x\n", 2106 - rqst->rq_nvec + rqst->rq_npages, SMBDIRECT_MAX_SGE); 2107 - rc = -EINVAL; 2108 - goto done; 2109 - } 2110 - 2111 - /* 2112 - * Remove the RFC1002 length defined in MS-SMB2 section 2.1 2113 - * It is used only for TCP transport 2099 + * Skip the RFC1002 length defined in MS-SMB2 section 2.1 2100 + * It is used only for TCP transport in the iov[0] 2114 2101 * In future we may want to add a transport layer under protocol 2115 2102 * layer so this will only be issued to TCP transport 2116 2103 */ 2117 - iov[0].iov_base = (char *)rqst->rq_iov[0].iov_base + 4; 2118 - iov[0].iov_len = rqst->rq_iov[0].iov_len - 4; 2119 - buflen += iov[0].iov_len; 2104 + 2105 + if (rqst->rq_iov[0].iov_len != 4) { 2106 + log_write(ERR, "expected the pdu length in 1st iov, but got %zu\n", rqst->rq_iov[0].iov_len); 2107 + return -EINVAL; 2108 + } 2109 + iov = &rqst->rq_iov[1]; 2120 2110 2121 2111 /* total up iov array first */ 2122 - for (i = 1; i < rqst->rq_nvec; i++) { 2123 - iov[i].iov_base = rqst->rq_iov[i].iov_base; 2124 - iov[i].iov_len = rqst->rq_iov[i].iov_len; 2112 + for (i = 0; i < rqst->rq_nvec-1; i++) { 2125 2113 buflen += iov[i].iov_len; 2126 2114 } 2127 2115 ··· 2186 2198 goto done; 2187 2199 } 2188 2200 i++; 2189 - if (i == rqst->rq_nvec) 2201 + if (i == rqst->rq_nvec-1) 2190 2202 break; 2191 2203 } 2192 2204 start = i; 2193 2205 buflen = 0; 2194 2206 } else { 2195 2207 i++; 2196 - if (i == rqst->rq_nvec) { 2208 + if (i == rqst->rq_nvec-1) { 2197 2209 /* send out all remaining vecs */ 2198 2210 remaining_data_length -= buflen; 2199 2211 log_write(INFO,
+6 -3
fs/cifs/transport.c
··· 753 753 goto out; 754 754 755 755 #ifdef CONFIG_CIFS_SMB311 756 - if (ses->status == CifsNew) 756 + if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) 757 757 smb311_update_preauth_hash(ses, rqst->rq_iov+1, 758 758 rqst->rq_nvec-1); 759 759 #endif ··· 798 798 *resp_buf_type = CIFS_SMALL_BUFFER; 799 799 800 800 #ifdef CONFIG_CIFS_SMB311 801 - if (ses->status == CifsNew) { 801 + if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) { 802 802 struct kvec iov = { 803 803 .iov_base = buf + 4, 804 804 .iov_len = get_rfc1002_length(buf) ··· 834 834 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) { 835 835 new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1), 836 836 GFP_KERNEL); 837 - if (!new_iov) 837 + if (!new_iov) { 838 + /* otherwise cifs_send_recv below sets resp_buf_type */ 839 + *resp_buf_type = CIFS_NO_BUFFER; 838 840 return -ENOMEM; 841 + } 839 842 } else 840 843 new_iov = s_iov; 841 844
+5 -4
fs/ext4/balloc.c
··· 321 321 struct ext4_sb_info *sbi = EXT4_SB(sb); 322 322 ext4_grpblk_t offset; 323 323 ext4_grpblk_t next_zero_bit; 324 + ext4_grpblk_t max_bit = EXT4_CLUSTERS_PER_GROUP(sb); 324 325 ext4_fsblk_t blk; 325 326 ext4_fsblk_t group_first_block; 326 327 ··· 339 338 /* check whether block bitmap block number is set */ 340 339 blk = ext4_block_bitmap(sb, desc); 341 340 offset = blk - group_first_block; 342 - if (offset < 0 || EXT4_B2C(sbi, offset) >= sb->s_blocksize || 341 + if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit || 343 342 !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data)) 344 343 /* bad block bitmap */ 345 344 return blk; ··· 347 346 /* check whether the inode bitmap block number is set */ 348 347 blk = ext4_inode_bitmap(sb, desc); 349 348 offset = blk - group_first_block; 350 - if (offset < 0 || EXT4_B2C(sbi, offset) >= sb->s_blocksize || 349 + if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit || 351 350 !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data)) 352 351 /* bad block bitmap */ 353 352 return blk; ··· 355 354 /* check whether the inode table block number is set */ 356 355 blk = ext4_inode_table(sb, desc); 357 356 offset = blk - group_first_block; 358 - if (offset < 0 || EXT4_B2C(sbi, offset) >= sb->s_blocksize || 359 - EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= sb->s_blocksize) 357 + if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit || 358 + EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= max_bit) 360 359 return blk; 361 360 next_zero_bit = ext4_find_next_zero_bit(bh->b_data, 362 361 EXT4_B2C(sbi, offset + sbi->s_itb_per_group),
+11 -5
fs/ext4/extents.c
··· 5329 5329 stop = le32_to_cpu(extent->ee_block); 5330 5330 5331 5331 /* 5332 - * In case of left shift, Don't start shifting extents until we make 5333 - * sure the hole is big enough to accommodate the shift. 5332 + * For left shifts, make sure the hole on the left is big enough to 5333 + * accommodate the shift. For right shifts, make sure the last extent 5334 + * won't be shifted beyond EXT_MAX_BLOCKS. 5334 5335 */ 5335 5336 if (SHIFT == SHIFT_LEFT) { 5336 5337 path = ext4_find_extent(inode, start - 1, &path, ··· 5351 5350 5352 5351 if ((start == ex_start && shift > ex_start) || 5353 5352 (shift > start - ex_end)) { 5354 - ext4_ext_drop_refs(path); 5355 - kfree(path); 5356 - return -EINVAL; 5353 + ret = -EINVAL; 5354 + goto out; 5355 + } 5356 + } else { 5357 + if (shift > EXT_MAX_BLOCKS - 5358 + (stop + ext4_ext_get_actual_len(extent))) { 5359 + ret = -EINVAL; 5360 + goto out; 5357 5361 } 5358 5362 } 5359 5363
+1
fs/ext4/super.c
··· 5886 5886 MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); 5887 5887 MODULE_DESCRIPTION("Fourth Extended Filesystem"); 5888 5888 MODULE_LICENSE("GPL"); 5889 + MODULE_SOFTDEP("pre: crc32c"); 5889 5890 module_init(ext4_init_fs) 5890 5891 module_exit(ext4_exit_fs)
+1
fs/jbd2/transaction.c
··· 532 532 */ 533 533 ret = start_this_handle(journal, handle, GFP_NOFS); 534 534 if (ret < 0) { 535 + handle->h_journal = journal; 535 536 jbd2_journal_free_reserved(handle); 536 537 return ret; 537 538 }
+8 -1
fs/xfs/libxfs/xfs_attr.c
··· 511 511 if (args->flags & ATTR_CREATE) 512 512 return retval; 513 513 retval = xfs_attr_shortform_remove(args); 514 - ASSERT(retval == 0); 514 + if (retval) 515 + return retval; 516 + /* 517 + * Since we have removed the old attr, clear ATTR_REPLACE so 518 + * that the leaf format add routine won't trip over the attr 519 + * not being around. 520 + */ 521 + args->flags &= ~ATTR_REPLACE; 515 522 } 516 523 517 524 if (args->namelen >= XFS_ATTR_SF_ENTSIZE_MAX ||
+4
fs/xfs/libxfs/xfs_bmap.c
··· 725 725 *logflagsp = 0; 726 726 if ((error = xfs_alloc_vextent(&args))) { 727 727 xfs_iroot_realloc(ip, -1, whichfork); 728 + ASSERT(ifp->if_broot == NULL); 729 + XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 728 730 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 729 731 return error; 730 732 } 731 733 732 734 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) { 733 735 xfs_iroot_realloc(ip, -1, whichfork); 736 + ASSERT(ifp->if_broot == NULL); 737 + XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS); 734 738 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 735 739 return -ENOSPC; 736 740 }
+21
fs/xfs/libxfs/xfs_inode_buf.c
··· 466 466 return __this_address; 467 467 if (di_size > XFS_DFORK_DSIZE(dip, mp)) 468 468 return __this_address; 469 + if (dip->di_nextents) 470 + return __this_address; 469 471 /* fall through */ 470 472 case XFS_DINODE_FMT_EXTENTS: 471 473 case XFS_DINODE_FMT_BTREE: ··· 486 484 if (XFS_DFORK_Q(dip)) { 487 485 switch (dip->di_aformat) { 488 486 case XFS_DINODE_FMT_LOCAL: 487 + if (dip->di_anextents) 488 + return __this_address; 489 + /* fall through */ 489 490 case XFS_DINODE_FMT_EXTENTS: 490 491 case XFS_DINODE_FMT_BTREE: 491 492 break; 492 493 default: 493 494 return __this_address; 494 495 } 496 + } else { 497 + /* 498 + * If there is no fork offset, this may be a freshly-made inode 499 + * in a new disk cluster, in which case di_aformat is zeroed. 500 + * Otherwise, such an inode must be in EXTENTS format; this goes 501 + * for freed inodes as well. 502 + */ 503 + switch (dip->di_aformat) { 504 + case 0: 505 + case XFS_DINODE_FMT_EXTENTS: 506 + break; 507 + default: 508 + return __this_address; 509 + } 510 + if (dip->di_anextents) 511 + return __this_address; 495 512 } 496 513 497 514 /* only version 3 or greater inodes are extensively verified here */
+9 -5
fs/xfs/xfs_file.c
··· 778 778 if (error) 779 779 goto out_unlock; 780 780 } else if (mode & FALLOC_FL_INSERT_RANGE) { 781 - unsigned int blksize_mask = i_blocksize(inode) - 1; 781 + unsigned int blksize_mask = i_blocksize(inode) - 1; 782 + loff_t isize = i_size_read(inode); 782 783 783 - new_size = i_size_read(inode) + len; 784 784 if (offset & blksize_mask || len & blksize_mask) { 785 785 error = -EINVAL; 786 786 goto out_unlock; 787 787 } 788 788 789 - /* check the new inode size does not wrap through zero */ 790 - if (new_size > inode->i_sb->s_maxbytes) { 789 + /* 790 + * New inode size must not exceed ->s_maxbytes, accounting for 791 + * possible signed overflow. 792 + */ 793 + if (inode->i_sb->s_maxbytes - isize < len) { 791 794 error = -EFBIG; 792 795 goto out_unlock; 793 796 } 797 + new_size = isize + len; 794 798 795 799 /* Offset should be less than i_size */ 796 - if (offset >= i_size_read(inode)) { 800 + if (offset >= isize) { 797 801 error = -EINVAL; 798 802 goto out_unlock; 799 803 }
+1 -1
include/asm-generic/vmlinux.lds.h
··· 188 188 #endif 189 189 190 190 #ifdef CONFIG_SERIAL_EARLYCON 191 - #define EARLYCON_TABLE() STRUCT_ALIGN(); \ 191 + #define EARLYCON_TABLE() . = ALIGN(8); \ 192 192 VMLINUX_SYMBOL(__earlycon_table) = .; \ 193 193 KEEP(*(__earlycon_table)) \ 194 194 VMLINUX_SYMBOL(__earlycon_table_end) = .;
+14 -2
include/kvm/arm_psci.h
··· 37 37 * Our PSCI implementation stays the same across versions from 38 38 * v0.2 onward, only adding the few mandatory functions (such 39 39 * as FEATURES with 1.0) that are required by newer 40 - * revisions. It is thus safe to return the latest. 40 + * revisions. It is thus safe to return the latest, unless 41 + * userspace has instructed us otherwise. 41 42 */ 42 - if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) 43 + if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) { 44 + if (vcpu->kvm->arch.psci_version) 45 + return vcpu->kvm->arch.psci_version; 46 + 43 47 return KVM_ARM_PSCI_LATEST; 48 + } 44 49 45 50 return KVM_ARM_PSCI_0_1; 46 51 } 47 52 48 53 49 54 int kvm_hvc_call_handler(struct kvm_vcpu *vcpu); 55 + 56 + struct kvm_one_reg; 57 + 58 + int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu); 59 + int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); 60 + int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 61 + int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); 50 62 51 63 #endif /* __KVM_ARM_PSCI_H__ */
+3 -1
include/linux/bpf.h
··· 31 31 void (*map_release)(struct bpf_map *map, struct file *map_file); 32 32 void (*map_free)(struct bpf_map *map); 33 33 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); 34 + void (*map_release_uref)(struct bpf_map *map); 34 35 35 36 /* funcs callable from userspace and from eBPF programs */ 36 37 void *(*map_lookup_elem)(struct bpf_map *map, void *key); ··· 352 351 struct bpf_prog **_prog, *__prog; \ 353 352 struct bpf_prog_array *_array; \ 354 353 u32 _ret = 1; \ 354 + preempt_disable(); \ 355 355 rcu_read_lock(); \ 356 356 _array = rcu_dereference(array); \ 357 357 if (unlikely(check_non_null && !_array))\ ··· 364 362 } \ 365 363 _out: \ 366 364 rcu_read_unlock(); \ 365 + preempt_enable_no_resched(); \ 367 366 _ret; \ 368 367 }) 369 368 ··· 437 434 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 438 435 void *key, void *value, u64 map_flags); 439 436 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); 440 - void bpf_fd_array_map_clear(struct bpf_map *map); 441 437 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, 442 438 void *key, void *value, u64 map_flags); 443 439 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
+4 -2
include/linux/device.h
··· 256 256 * automatically. 257 257 * @pm: Power management operations of the device which matched 258 258 * this driver. 259 - * @coredump: Called through sysfs to initiate a device coredump. 259 + * @coredump: Called when sysfs entry is written to. The device driver 260 + * is expected to call the dev_coredump API resulting in a 261 + * uevent. 260 262 * @p: Driver core's private data, no one other than the driver 261 263 * core can touch this. 262 264 * ··· 290 288 const struct attribute_group **groups; 291 289 292 290 const struct dev_pm_ops *pm; 293 - int (*coredump) (struct device *dev); 291 + void (*coredump) (struct device *dev); 294 292 295 293 struct driver_private *p; 296 294 };
+2
include/linux/hrtimer.h
··· 161 161 enum hrtimer_base_type { 162 162 HRTIMER_BASE_MONOTONIC, 163 163 HRTIMER_BASE_REALTIME, 164 + HRTIMER_BASE_BOOTTIME, 164 165 HRTIMER_BASE_TAI, 165 166 HRTIMER_BASE_MONOTONIC_SOFT, 166 167 HRTIMER_BASE_REALTIME_SOFT, 168 + HRTIMER_BASE_BOOTTIME_SOFT, 167 169 HRTIMER_BASE_TAI_SOFT, 168 170 HRTIMER_MAX_CLOCK_BASES, 169 171 };
+3 -9
include/linux/mlx5/driver.h
··· 1284 1284 }; 1285 1285 1286 1286 static inline const struct cpumask * 1287 - mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector) 1287 + mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector) 1288 1288 { 1289 - const struct cpumask *mask; 1290 1289 struct irq_desc *desc; 1291 1290 unsigned int irq; 1292 1291 int eqn; 1293 1292 int err; 1294 1293 1295 - err = mlx5_vector2eqn(dev, MLX5_EQ_VEC_COMP_BASE + vector, &eqn, &irq); 1294 + err = mlx5_vector2eqn(dev, vector, &eqn, &irq); 1296 1295 if (err) 1297 1296 return NULL; 1298 1297 1299 1298 desc = irq_to_desc(irq); 1300 - #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 1301 - mask = irq_data_get_effective_affinity_mask(&desc->irq_data); 1302 - #else 1303 - mask = desc->irq_common_data.affinity; 1304 - #endif 1305 - return mask; 1299 + return desc->affinity_hint; 1306 1300 } 1307 1301 1308 1302 #endif /* MLX5_DRIVER_H */
+1
include/linux/mtd/flashchip.h
··· 85 85 unsigned int write_suspended:1; 86 86 unsigned int erase_suspended:1; 87 87 unsigned long in_progress_block_addr; 88 + unsigned long in_progress_block_mask; 88 89 89 90 struct mutex mutex; 90 91 wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
+14 -7
include/linux/serial_core.h
··· 351 351 char name[16]; 352 352 char compatible[128]; 353 353 int (*setup)(struct earlycon_device *, const char *options); 354 - } __aligned(32); 354 + }; 355 355 356 - extern const struct earlycon_id __earlycon_table[]; 357 - extern const struct earlycon_id __earlycon_table_end[]; 356 + extern const struct earlycon_id *__earlycon_table[]; 357 + extern const struct earlycon_id *__earlycon_table_end[]; 358 358 359 359 #if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE) 360 360 #define EARLYCON_USED_OR_UNUSED __used ··· 362 362 #define EARLYCON_USED_OR_UNUSED __maybe_unused 363 363 #endif 364 364 365 - #define OF_EARLYCON_DECLARE(_name, compat, fn) \ 366 - static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \ 367 - EARLYCON_USED_OR_UNUSED __section(__earlycon_table) \ 365 + #define _OF_EARLYCON_DECLARE(_name, compat, fn, unique_id) \ 366 + static const struct earlycon_id unique_id \ 367 + EARLYCON_USED_OR_UNUSED __initconst \ 368 368 = { .name = __stringify(_name), \ 369 369 .compatible = compat, \ 370 - .setup = fn } 370 + .setup = fn }; \ 371 + static const struct earlycon_id EARLYCON_USED_OR_UNUSED \ 372 + __section(__earlycon_table) \ 373 + * const __PASTE(__p, unique_id) = &unique_id 374 + 375 + #define OF_EARLYCON_DECLARE(_name, compat, fn) \ 376 + _OF_EARLYCON_DECLARE(_name, compat, fn, \ 377 + __UNIQUE_ID(__earlycon_##_name)) 371 378 372 379 #define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn) 373 380
+2 -2
include/linux/stringhash.h
··· 50 50 * losing bits). This also has the property (wanted by the dcache) 51 51 * that the msbits make a good hash table index. 52 52 */ 53 - static inline unsigned long end_name_hash(unsigned long hash) 53 + static inline unsigned int end_name_hash(unsigned long hash) 54 54 { 55 - return __hash_32((unsigned int)hash); 55 + return hash_long(hash, 32); 56 56 } 57 57 58 58 /*
+75
include/linux/ti-emif-sram.h
··· 60 60 u32 abort_sr; 61 61 } __packed __aligned(8); 62 62 63 + static inline void ti_emif_asm_offsets(void) 64 + { 65 + DEFINE(EMIF_SDCFG_VAL_OFFSET, 66 + offsetof(struct emif_regs_amx3, emif_sdcfg_val)); 67 + DEFINE(EMIF_TIMING1_VAL_OFFSET, 68 + offsetof(struct emif_regs_amx3, emif_timing1_val)); 69 + DEFINE(EMIF_TIMING2_VAL_OFFSET, 70 + offsetof(struct emif_regs_amx3, emif_timing2_val)); 71 + DEFINE(EMIF_TIMING3_VAL_OFFSET, 72 + offsetof(struct emif_regs_amx3, emif_timing3_val)); 73 + DEFINE(EMIF_REF_CTRL_VAL_OFFSET, 74 + offsetof(struct emif_regs_amx3, emif_ref_ctrl_val)); 75 + DEFINE(EMIF_ZQCFG_VAL_OFFSET, 76 + offsetof(struct emif_regs_amx3, emif_zqcfg_val)); 77 + DEFINE(EMIF_PMCR_VAL_OFFSET, 78 + offsetof(struct emif_regs_amx3, emif_pmcr_val)); 79 + DEFINE(EMIF_PMCR_SHDW_VAL_OFFSET, 80 + offsetof(struct emif_regs_amx3, emif_pmcr_shdw_val)); 81 + DEFINE(EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET, 82 + offsetof(struct emif_regs_amx3, emif_rd_wr_level_ramp_ctrl)); 83 + DEFINE(EMIF_RD_WR_EXEC_THRESH_OFFSET, 84 + offsetof(struct emif_regs_amx3, emif_rd_wr_exec_thresh)); 85 + DEFINE(EMIF_COS_CONFIG_OFFSET, 86 + offsetof(struct emif_regs_amx3, emif_cos_config)); 87 + DEFINE(EMIF_PRIORITY_TO_COS_MAPPING_OFFSET, 88 + offsetof(struct emif_regs_amx3, emif_priority_to_cos_mapping)); 89 + DEFINE(EMIF_CONNECT_ID_SERV_1_MAP_OFFSET, 90 + offsetof(struct emif_regs_amx3, emif_connect_id_serv_1_map)); 91 + DEFINE(EMIF_CONNECT_ID_SERV_2_MAP_OFFSET, 92 + offsetof(struct emif_regs_amx3, emif_connect_id_serv_2_map)); 93 + DEFINE(EMIF_OCP_CONFIG_VAL_OFFSET, 94 + offsetof(struct emif_regs_amx3, emif_ocp_config_val)); 95 + DEFINE(EMIF_LPDDR2_NVM_TIM_OFFSET, 96 + offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim)); 97 + DEFINE(EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET, 98 + offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim_shdw)); 99 + DEFINE(EMIF_DLL_CALIB_CTRL_VAL_OFFSET, 100 + offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val)); 101 + DEFINE(EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET, 102 + offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val_shdw)); 103 + DEFINE(EMIF_DDR_PHY_CTLR_1_OFFSET, 104 + offsetof(struct emif_regs_amx3, emif_ddr_phy_ctlr_1)); 105 + DEFINE(EMIF_EXT_PHY_CTRL_VALS_OFFSET, 106 + offsetof(struct emif_regs_amx3, emif_ext_phy_ctrl_vals)); 107 + DEFINE(EMIF_REGS_AMX3_SIZE, sizeof(struct emif_regs_amx3)); 108 + 109 + BLANK(); 110 + 111 + DEFINE(EMIF_PM_BASE_ADDR_VIRT_OFFSET, 112 + offsetof(struct ti_emif_pm_data, ti_emif_base_addr_virt)); 113 + DEFINE(EMIF_PM_BASE_ADDR_PHYS_OFFSET, 114 + offsetof(struct ti_emif_pm_data, ti_emif_base_addr_phys)); 115 + DEFINE(EMIF_PM_CONFIG_OFFSET, 116 + offsetof(struct ti_emif_pm_data, ti_emif_sram_config)); 117 + DEFINE(EMIF_PM_REGS_VIRT_OFFSET, 118 + offsetof(struct ti_emif_pm_data, regs_virt)); 119 + DEFINE(EMIF_PM_REGS_PHYS_OFFSET, 120 + offsetof(struct ti_emif_pm_data, regs_phys)); 121 + DEFINE(EMIF_PM_DATA_SIZE, sizeof(struct ti_emif_pm_data)); 122 + 123 + BLANK(); 124 + 125 + DEFINE(EMIF_PM_SAVE_CONTEXT_OFFSET, 126 + offsetof(struct ti_emif_pm_functions, save_context)); 127 + DEFINE(EMIF_PM_RESTORE_CONTEXT_OFFSET, 128 + offsetof(struct ti_emif_pm_functions, restore_context)); 129 + DEFINE(EMIF_PM_ENTER_SR_OFFSET, 130 + offsetof(struct ti_emif_pm_functions, enter_sr)); 131 + DEFINE(EMIF_PM_EXIT_SR_OFFSET, 132 + offsetof(struct ti_emif_pm_functions, exit_sr)); 133 + DEFINE(EMIF_PM_ABORT_SR_OFFSET, 134 + offsetof(struct ti_emif_pm_functions, abort_sr)); 135 + DEFINE(EMIF_PM_FUNCTIONS_SIZE, sizeof(struct ti_emif_pm_functions)); 136 + } 137 + 63 138 struct gen_pool; 64 139 65 140 int ti_emif_copy_pm_function_table(struct gen_pool *sram_pool, void *dst);
-2
include/linux/timekeeper_internal.h
··· 52 52 * @offs_real: Offset clock monotonic -> clock realtime 53 53 * @offs_boot: Offset clock monotonic -> clock boottime 54 54 * @offs_tai: Offset clock monotonic -> clock tai 55 - * @time_suspended: Accumulated suspend time 56 55 * @tai_offset: The current UTC to TAI offset in seconds 57 56 * @clock_was_set_seq: The sequence number of clock was set events 58 57 * @cs_was_changed_seq: The sequence number of clocksource change events ··· 94 95 ktime_t offs_real; 95 96 ktime_t offs_boot; 96 97 ktime_t offs_tai; 97 - ktime_t time_suspended; 98 98 s32 tai_offset; 99 99 unsigned int clock_was_set_seq; 100 100 u8 cs_was_changed_seq;
+25 -12
include/linux/timekeeping.h
··· 33 33 extern time64_t ktime_get_seconds(void); 34 34 extern time64_t __ktime_get_real_seconds(void); 35 35 extern time64_t ktime_get_real_seconds(void); 36 - extern void ktime_get_active_ts64(struct timespec64 *ts); 37 36 38 37 extern int __getnstimeofday64(struct timespec64 *tv); 39 38 extern void getnstimeofday64(struct timespec64 *tv); 40 39 extern void getboottime64(struct timespec64 *ts); 41 40 42 - #define ktime_get_real_ts64(ts) getnstimeofday64(ts) 43 - 44 - /* Clock BOOTTIME compatibility wrappers */ 45 - static inline void get_monotonic_boottime64(struct timespec64 *ts) 46 - { 47 - ktime_get_ts64(ts); 48 - } 41 + #define ktime_get_real_ts64(ts) getnstimeofday64(ts) 49 42 50 43 /* 51 44 * ktime_t based interfaces 52 45 */ 46 + 53 47 enum tk_offsets { 54 48 TK_OFFS_REAL, 49 + TK_OFFS_BOOT, 55 50 TK_OFFS_TAI, 56 51 TK_OFFS_MAX, 57 52 }; ··· 57 62 extern ktime_t ktime_get_raw(void); 58 63 extern u32 ktime_get_resolution_ns(void); 59 64 60 - /* Clock BOOTTIME compatibility wrappers */ 61 - static inline ktime_t ktime_get_boottime(void) { return ktime_get(); } 62 - static inline u64 ktime_get_boot_ns(void) { return ktime_get(); } 63 - 64 65 /** 65 66 * ktime_get_real - get the real (wall-) time in ktime_t format 66 67 */ 67 68 static inline ktime_t ktime_get_real(void) 68 69 { 69 70 return ktime_get_with_offset(TK_OFFS_REAL); 71 + } 72 + 73 + /** 74 + * ktime_get_boottime - Returns monotonic time since boot in ktime_t format 75 + * 76 + * This is similar to CLOCK_MONTONIC/ktime_get, but also includes the 77 + * time spent in suspend. 78 + */ 79 + static inline ktime_t ktime_get_boottime(void) 80 + { 81 + return ktime_get_with_offset(TK_OFFS_BOOT); 70 82 } 71 83 72 84 /** ··· 102 100 return ktime_to_ns(ktime_get_real()); 103 101 } 104 102 103 + static inline u64 ktime_get_boot_ns(void) 104 + { 105 + return ktime_to_ns(ktime_get_boottime()); 106 + } 107 + 105 108 static inline u64 ktime_get_tai_ns(void) 106 109 { 107 110 return ktime_to_ns(ktime_get_clocktai()); ··· 119 112 120 113 extern u64 ktime_get_mono_fast_ns(void); 121 114 extern u64 ktime_get_raw_fast_ns(void); 115 + extern u64 ktime_get_boot_fast_ns(void); 122 116 extern u64 ktime_get_real_fast_ns(void); 123 117 124 118 /* 125 119 * timespec64 interfaces utilizing the ktime based ones 126 120 */ 121 + static inline void get_monotonic_boottime64(struct timespec64 *ts) 122 + { 123 + *ts = ktime_to_timespec64(ktime_get_boottime()); 124 + } 125 + 127 126 static inline void timekeeping_clocktai64(struct timespec64 *ts) 128 127 { 129 128 *ts = ktime_to_timespec64(ktime_get_clocktai());
+1 -1
include/linux/tty.h
··· 701 701 extern int tty_set_ldisc(struct tty_struct *tty, int disc); 702 702 extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty); 703 703 extern void tty_ldisc_release(struct tty_struct *tty); 704 - extern void tty_ldisc_init(struct tty_struct *tty); 704 + extern int __must_check tty_ldisc_init(struct tty_struct *tty); 705 705 extern void tty_ldisc_deinit(struct tty_struct *tty); 706 706 extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p, 707 707 char *f, int count);
-23
include/linux/vbox_utils.h
··· 24 24 #define vbg_debug pr_debug 25 25 #endif 26 26 27 - /** 28 - * Allocate memory for generic request and initialize the request header. 29 - * 30 - * Return: the allocated memory 31 - * @len: Size of memory block required for the request. 32 - * @req_type: The generic request type. 33 - */ 34 - void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type); 35 - 36 - /** 37 - * Perform a generic request. 38 - * 39 - * Return: VBox status code 40 - * @gdev: The Guest extension device. 41 - * @req: Pointer to the request structure. 42 - */ 43 - int vbg_req_perform(struct vbg_dev *gdev, void *req); 44 - 45 27 int vbg_hgcm_connect(struct vbg_dev *gdev, 46 28 struct vmmdev_hgcm_service_location *loc, 47 29 u32 *client_id, int *vbox_status); ··· 33 51 int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function, 34 52 u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms, 35 53 u32 parm_count, int *vbox_status); 36 - 37 - int vbg_hgcm_call32( 38 - struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms, 39 - struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count, 40 - int *vbox_status); 41 54 42 55 /** 43 56 * Convert a VirtualBox status code to a standard Linux kernel return value.
+1
include/net/tls.h
··· 148 148 struct scatterlist *partially_sent_record; 149 149 u16 partially_sent_offset; 150 150 unsigned long flags; 151 + bool in_tcp_sendpages; 151 152 152 153 u16 pending_open_record_frags; 153 154 int (*push_pending_record)(struct sock *sk, int flags);
+2 -2
include/soc/bcm2835/raspberrypi-firmware.h
··· 143 143 static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag, 144 144 void *data, size_t len) 145 145 { 146 - return 0; 146 + return -ENOSYS; 147 147 } 148 148 149 149 static inline int rpi_firmware_property_list(struct rpi_firmware *fw, 150 150 void *data, size_t tag_size) 151 151 { 152 - return 0; 152 + return -ENOSYS; 153 153 } 154 154 155 155 static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
+5 -2
include/sound/control.h
··· 23 23 */ 24 24 25 25 #include <linux/wait.h> 26 + #include <linux/nospec.h> 26 27 #include <sound/asound.h> 27 28 28 29 #define snd_kcontrol_chip(kcontrol) ((kcontrol)->private_data) ··· 149 148 150 149 static inline unsigned int snd_ctl_get_ioffnum(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id) 151 150 { 152 - return id->numid - kctl->id.numid; 151 + unsigned int ioff = id->numid - kctl->id.numid; 152 + return array_index_nospec(ioff, kctl->count); 153 153 } 154 154 155 155 static inline unsigned int snd_ctl_get_ioffidx(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id) 156 156 { 157 - return id->index - kctl->id.index; 157 + unsigned int ioff = id->index - kctl->id.index; 158 + return array_index_nospec(ioff, kctl->count); 158 159 } 159 160 160 161 static inline unsigned int snd_ctl_get_ioff(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
+11 -3
include/trace/events/initcall.h
··· 31 31 TP_ARGS(func), 32 32 33 33 TP_STRUCT__entry( 34 - __field(initcall_t, func) 34 + /* 35 + * Use field_struct to avoid is_signed_type() 36 + * comparison of a function pointer 37 + */ 38 + __field_struct(initcall_t, func) 35 39 ), 36 40 37 41 TP_fast_assign( ··· 52 48 TP_ARGS(func, ret), 53 49 54 50 TP_STRUCT__entry( 55 - __field(initcall_t, func) 56 - __field(int, ret) 51 + /* 52 + * Use field_struct to avoid is_signed_type() 53 + * comparison of a function pointer 54 + */ 55 + __field_struct(initcall_t, func) 56 + __field(int, ret) 57 57 ), 58 58 59 59 TP_fast_assign(
+7
include/uapi/linux/kvm.h
··· 676 676 __u8 pad[36]; 677 677 }; 678 678 679 + #define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0) 680 + #define KVM_X86_DISABLE_EXITS_HTL (1 << 1) 681 + #define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2) 682 + #define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \ 683 + KVM_X86_DISABLE_EXITS_HTL | \ 684 + KVM_X86_DISABLE_EXITS_PAUSE) 685 + 679 686 /* for KVM_ENABLE_CAP */ 680 687 struct kvm_enable_cap { 681 688 /* in */
-18
include/uapi/linux/sysctl.h
··· 780 780 NET_BRIDGE_NF_FILTER_PPPOE_TAGGED = 5, 781 781 }; 782 782 783 - /* proc/sys/net/irda */ 784 - enum { 785 - NET_IRDA_DISCOVERY=1, 786 - NET_IRDA_DEVNAME=2, 787 - NET_IRDA_DEBUG=3, 788 - NET_IRDA_FAST_POLL=4, 789 - NET_IRDA_DISCOVERY_SLOTS=5, 790 - NET_IRDA_DISCOVERY_TIMEOUT=6, 791 - NET_IRDA_SLOT_TIMEOUT=7, 792 - NET_IRDA_MAX_BAUD_RATE=8, 793 - NET_IRDA_MIN_TX_TURN_TIME=9, 794 - NET_IRDA_MAX_TX_DATA_SIZE=10, 795 - NET_IRDA_MAX_TX_WINDOW=11, 796 - NET_IRDA_MAX_NOREPLY_TIME=12, 797 - NET_IRDA_WARN_NOREPLY_TIME=13, 798 - NET_IRDA_LAP_KEEPALIVE_TIME=14, 799 - }; 800 - 801 783 802 784 /* CTL_FS names: */ 803 785 enum
-1
include/uapi/linux/time.h
··· 73 73 */ 74 74 #define CLOCK_SGI_CYCLE 10 75 75 #define CLOCK_TAI 11 76 - #define CLOCK_MONOTONIC_ACTIVE 12 77 76 78 77 #define MAX_CLOCKS 16 79 78 #define CLOCKS_MASK (CLOCK_REALTIME | CLOCK_MONOTONIC)
+2 -1
kernel/bpf/arraymap.c
··· 476 476 } 477 477 478 478 /* decrement refcnt of all bpf_progs that are stored in this map */ 479 - void bpf_fd_array_map_clear(struct bpf_map *map) 479 + static void bpf_fd_array_map_clear(struct bpf_map *map) 480 480 { 481 481 struct bpf_array *array = container_of(map, struct bpf_array, map); 482 482 int i; ··· 495 495 .map_fd_get_ptr = prog_fd_array_get_ptr, 496 496 .map_fd_put_ptr = prog_fd_array_put_ptr, 497 497 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, 498 + .map_release_uref = bpf_fd_array_map_clear, 498 499 }; 499 500 500 501 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
+73 -26
kernel/bpf/sockmap.c
··· 43 43 #include <net/tcp.h> 44 44 #include <linux/ptr_ring.h> 45 45 #include <net/inet_common.h> 46 + #include <linux/sched/signal.h> 46 47 47 48 #define SOCK_CREATE_FLAG_MASK \ 48 49 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) ··· 326 325 if (ret > 0) { 327 326 if (apply) 328 327 apply_bytes -= ret; 328 + 329 + sg->offset += ret; 330 + sg->length -= ret; 329 331 size -= ret; 330 332 offset += ret; 331 333 if (uncharge) ··· 336 332 goto retry; 337 333 } 338 334 339 - sg->length = size; 340 - sg->offset = offset; 341 335 return ret; 342 336 } 343 337 ··· 393 391 } while (i != md->sg_end); 394 392 } 395 393 396 - static void free_bytes_sg(struct sock *sk, int bytes, struct sk_msg_buff *md) 394 + static void free_bytes_sg(struct sock *sk, int bytes, 395 + struct sk_msg_buff *md, bool charge) 397 396 { 398 397 struct scatterlist *sg = md->sg_data; 399 398 int i = md->sg_start, free; ··· 404 401 if (bytes < free) { 405 402 sg[i].length -= bytes; 406 403 sg[i].offset += bytes; 407 - sk_mem_uncharge(sk, bytes); 404 + if (charge) 405 + sk_mem_uncharge(sk, bytes); 408 406 break; 409 407 } 410 408 411 - sk_mem_uncharge(sk, sg[i].length); 409 + if (charge) 410 + sk_mem_uncharge(sk, sg[i].length); 412 411 put_page(sg_page(&sg[i])); 413 412 bytes -= sg[i].length; 414 413 sg[i].length = 0; ··· 421 416 if (i == MAX_SKB_FRAGS) 422 417 i = 0; 423 418 } 419 + md->sg_start = i; 424 420 } 425 421 426 422 static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md) ··· 529 523 i = md->sg_start; 530 524 531 525 do { 532 - r->sg_data[i] = md->sg_data[i]; 533 - 534 526 size = (apply && apply_bytes < md->sg_data[i].length) ? 535 527 apply_bytes : md->sg_data[i].length; 536 528 ··· 539 535 } 540 536 541 537 sk_mem_charge(sk, size); 538 + r->sg_data[i] = md->sg_data[i]; 542 539 r->sg_data[i].length = size; 543 540 md->sg_data[i].length -= size; 544 541 md->sg_data[i].offset += size; ··· 580 575 struct sk_msg_buff *md, 581 576 int flags) 582 577 { 578 + bool ingress = !!(md->flags & BPF_F_INGRESS); 583 579 struct smap_psock *psock; 584 580 struct scatterlist *sg; 585 - int i, err, free = 0; 586 - bool ingress = !!(md->flags & BPF_F_INGRESS); 581 + int err = 0; 587 582 588 583 sg = md->sg_data; 589 584 ··· 611 606 out_rcu: 612 607 rcu_read_unlock(); 613 608 out: 614 - i = md->sg_start; 615 - while (sg[i].length) { 616 - free += sg[i].length; 617 - put_page(sg_page(&sg[i])); 618 - sg[i].length = 0; 619 - i++; 620 - if (i == MAX_SKB_FRAGS) 621 - i = 0; 622 - } 623 - return free; 609 + free_bytes_sg(NULL, send, md, false); 610 + return err; 624 611 } 625 612 626 613 static inline void bpf_md_init(struct smap_psock *psock) ··· 697 700 err = bpf_tcp_sendmsg_do_redirect(redir, send, m, flags); 698 701 lock_sock(sk); 699 702 703 + if (unlikely(err < 0)) { 704 + free_start_sg(sk, m); 705 + psock->sg_size = 0; 706 + if (!cork) 707 + *copied -= send; 708 + } else { 709 + psock->sg_size -= send; 710 + } 711 + 700 712 if (cork) { 701 713 free_start_sg(sk, m); 714 + psock->sg_size = 0; 702 715 kfree(m); 703 716 m = NULL; 717 + err = 0; 704 718 } 705 - if (unlikely(err)) 706 - *copied -= err; 707 - else 708 - psock->sg_size -= send; 709 719 break; 710 720 case __SK_DROP: 711 721 default: 712 - free_bytes_sg(sk, send, m); 722 + free_bytes_sg(sk, send, m, true); 713 723 apply_bytes_dec(psock, send); 714 724 *copied -= send; 715 725 psock->sg_size -= send; ··· 734 730 735 731 out_err: 736 732 return err; 733 + } 734 + 735 + static int bpf_wait_data(struct sock *sk, 736 + struct smap_psock *psk, int flags, 737 + long timeo, int *err) 738 + { 739 + int rc; 740 + 741 + DEFINE_WAIT_FUNC(wait, woken_wake_function); 742 + 743 + add_wait_queue(sk_sleep(sk), &wait); 744 + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 745 + rc = sk_wait_event(sk, &timeo, 746 + !list_empty(&psk->ingress) || 747 + !skb_queue_empty(&sk->sk_receive_queue), 748 + &wait); 749 + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 750 + remove_wait_queue(sk_sleep(sk), &wait); 751 + 752 + return rc; 737 753 } 738 754 739 755 static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, ··· 779 755 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); 780 756 781 757 lock_sock(sk); 758 + bytes_ready: 782 759 while (copied != len) { 783 760 struct scatterlist *sg; 784 761 struct sk_msg_buff *md; ··· 832 807 consume_skb(md->skb); 833 808 kfree(md); 834 809 } 810 + } 811 + 812 + if (!copied) { 813 + long timeo; 814 + int data; 815 + int err = 0; 816 + 817 + timeo = sock_rcvtimeo(sk, nonblock); 818 + data = bpf_wait_data(sk, psock, flags, timeo, &err); 819 + 820 + if (data) { 821 + if (!skb_queue_empty(&sk->sk_receive_queue)) { 822 + release_sock(sk); 823 + smap_release_sock(psock, sk); 824 + copied = tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); 825 + return copied; 826 + } 827 + goto bytes_ready; 828 + } 829 + 830 + if (err) 831 + copied = err; 835 832 } 836 833 837 834 release_sock(sk); ··· 1878 1831 return err; 1879 1832 } 1880 1833 1881 - static void sock_map_release(struct bpf_map *map, struct file *map_file) 1834 + static void sock_map_release(struct bpf_map *map) 1882 1835 { 1883 1836 struct bpf_stab *stab = container_of(map, struct bpf_stab, map); 1884 1837 struct bpf_prog *orig; ··· 1902 1855 .map_get_next_key = sock_map_get_next_key, 1903 1856 .map_update_elem = sock_map_update_elem, 1904 1857 .map_delete_elem = sock_map_delete_elem, 1905 - .map_release = sock_map_release, 1858 + .map_release_uref = sock_map_release, 1906 1859 }; 1907 1860 1908 1861 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
+2 -2
kernel/bpf/syscall.c
··· 257 257 static void bpf_map_put_uref(struct bpf_map *map) 258 258 { 259 259 if (atomic_dec_and_test(&map->usercnt)) { 260 - if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) 261 - bpf_fd_array_map_clear(map); 260 + if (map->ops->map_release_uref) 261 + map->ops->map_release_uref(map); 262 262 } 263 263 } 264 264
+3 -4
kernel/events/uprobes.c
··· 491 491 if (!uprobe) 492 492 return NULL; 493 493 494 - uprobe->inode = igrab(inode); 494 + uprobe->inode = inode; 495 495 uprobe->offset = offset; 496 496 init_rwsem(&uprobe->register_rwsem); 497 497 init_rwsem(&uprobe->consumer_rwsem); ··· 502 502 if (cur_uprobe) { 503 503 kfree(uprobe); 504 504 uprobe = cur_uprobe; 505 - iput(inode); 506 505 } 507 506 508 507 return uprobe; ··· 700 701 rb_erase(&uprobe->rb_node, &uprobes_tree); 701 702 spin_unlock(&uprobes_treelock); 702 703 RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */ 703 - iput(uprobe->inode); 704 704 put_uprobe(uprobe); 705 705 } 706 706 ··· 871 873 * tuple). Creation refcount stops uprobe_unregister from freeing the 872 874 * @uprobe even before the register operation is complete. Creation 873 875 * refcount is released when the last @uc for the @uprobe 874 - * unregisters. 876 + * unregisters. Caller of uprobe_register() is required to keep @inode 877 + * (and the containing mount) referenced. 875 878 * 876 879 * Return errno if it cannot successully install probes 877 880 * else return 0 (success)
+2 -1
kernel/module.c
··· 1472 1472 { 1473 1473 struct module_sect_attr *sattr = 1474 1474 container_of(mattr, struct module_sect_attr, mattr); 1475 - return sprintf(buf, "0x%pK\n", (void *)sattr->address); 1475 + return sprintf(buf, "0x%px\n", kptr_restrict < 2 ? 1476 + (void *)sattr->address : NULL); 1476 1477 } 1477 1478 1478 1479 static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
+1 -19
kernel/sysctl_binary.c
··· 704 704 {} 705 705 }; 706 706 707 - static const struct bin_table bin_net_irda_table[] = { 708 - { CTL_INT, NET_IRDA_DISCOVERY, "discovery" }, 709 - { CTL_STR, NET_IRDA_DEVNAME, "devname" }, 710 - { CTL_INT, NET_IRDA_DEBUG, "debug" }, 711 - { CTL_INT, NET_IRDA_FAST_POLL, "fast_poll_increase" }, 712 - { CTL_INT, NET_IRDA_DISCOVERY_SLOTS, "discovery_slots" }, 713 - { CTL_INT, NET_IRDA_DISCOVERY_TIMEOUT, "discovery_timeout" }, 714 - { CTL_INT, NET_IRDA_SLOT_TIMEOUT, "slot_timeout" }, 715 - { CTL_INT, NET_IRDA_MAX_BAUD_RATE, "max_baud_rate" }, 716 - { CTL_INT, NET_IRDA_MIN_TX_TURN_TIME, "min_tx_turn_time" }, 717 - { CTL_INT, NET_IRDA_MAX_TX_DATA_SIZE, "max_tx_data_size" }, 718 - { CTL_INT, NET_IRDA_MAX_TX_WINDOW, "max_tx_window" }, 719 - { CTL_INT, NET_IRDA_MAX_NOREPLY_TIME, "max_noreply_time" }, 720 - { CTL_INT, NET_IRDA_WARN_NOREPLY_TIME, "warn_noreply_time" }, 721 - { CTL_INT, NET_IRDA_LAP_KEEPALIVE_TIME, "lap_keepalive_time" }, 722 - {} 723 - }; 724 - 725 707 static const struct bin_table bin_net_table[] = { 726 708 { CTL_DIR, NET_CORE, "core", bin_net_core_table }, 727 709 /* NET_ETHER not used */ ··· 725 743 { CTL_DIR, NET_LLC, "llc", bin_net_llc_table }, 726 744 { CTL_DIR, NET_NETFILTER, "netfilter", bin_net_netfilter_table }, 727 745 /* NET_DCCP "dccp" no longer used */ 728 - { CTL_DIR, NET_IRDA, "irda", bin_net_irda_table }, 746 + /* NET_IRDA "irda" no longer used */ 729 747 { CTL_INT, 2089, "nf_conntrack_max" }, 730 748 {} 731 749 };
+14 -2
kernel/time/hrtimer.c
··· 91 91 .get_time = &ktime_get_real, 92 92 }, 93 93 { 94 + .index = HRTIMER_BASE_BOOTTIME, 95 + .clockid = CLOCK_BOOTTIME, 96 + .get_time = &ktime_get_boottime, 97 + }, 98 + { 94 99 .index = HRTIMER_BASE_TAI, 95 100 .clockid = CLOCK_TAI, 96 101 .get_time = &ktime_get_clocktai, ··· 111 106 .get_time = &ktime_get_real, 112 107 }, 113 108 { 109 + .index = HRTIMER_BASE_BOOTTIME_SOFT, 110 + .clockid = CLOCK_BOOTTIME, 111 + .get_time = &ktime_get_boottime, 112 + }, 113 + { 114 114 .index = HRTIMER_BASE_TAI_SOFT, 115 115 .clockid = CLOCK_TAI, 116 116 .get_time = &ktime_get_clocktai, ··· 129 119 130 120 [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME, 131 121 [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC, 132 - [CLOCK_BOOTTIME] = HRTIMER_BASE_MONOTONIC, 122 + [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME, 133 123 [CLOCK_TAI] = HRTIMER_BASE_TAI, 134 124 }; 135 125 ··· 581 571 static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) 582 572 { 583 573 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; 574 + ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset; 584 575 ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset; 585 576 586 577 ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq, 587 - offs_real, offs_tai); 578 + offs_real, offs_boot, offs_tai); 588 579 589 580 base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real; 581 + base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot; 590 582 base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai; 591 583 592 584 return now;
-2
kernel/time/posix-stubs.c
··· 83 83 case CLOCK_BOOTTIME: 84 84 get_monotonic_boottime64(tp); 85 85 break; 86 - case CLOCK_MONOTONIC_ACTIVE: 87 - ktime_get_active_ts64(tp); 88 86 default: 89 87 return -EINVAL; 90 88 }
+17 -9
kernel/time/posix-timers.c
··· 252 252 return 0; 253 253 } 254 254 255 - static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp) 255 + static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp) 256 256 { 257 - timekeeping_clocktai64(tp); 257 + get_monotonic_boottime64(tp); 258 258 return 0; 259 259 } 260 260 261 - static int posix_get_monotonic_active(clockid_t which_clock, 262 - struct timespec64 *tp) 261 + static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp) 263 262 { 264 - ktime_get_active_ts64(tp); 263 + timekeeping_clocktai64(tp); 265 264 return 0; 266 265 } 267 266 ··· 1316 1317 .timer_arm = common_hrtimer_arm, 1317 1318 }; 1318 1319 1319 - static const struct k_clock clock_monotonic_active = { 1320 + static const struct k_clock clock_boottime = { 1320 1321 .clock_getres = posix_get_hrtimer_res, 1321 - .clock_get = posix_get_monotonic_active, 1322 + .clock_get = posix_get_boottime, 1323 + .nsleep = common_nsleep, 1324 + .timer_create = common_timer_create, 1325 + .timer_set = common_timer_set, 1326 + .timer_get = common_timer_get, 1327 + .timer_del = common_timer_del, 1328 + .timer_rearm = common_hrtimer_rearm, 1329 + .timer_forward = common_hrtimer_forward, 1330 + .timer_remaining = common_hrtimer_remaining, 1331 + .timer_try_to_cancel = common_hrtimer_try_to_cancel, 1332 + .timer_arm = common_hrtimer_arm, 1322 1333 }; 1323 1334 1324 1335 static const struct k_clock * const posix_clocks[] = { ··· 1339 1330 [CLOCK_MONOTONIC_RAW] = &clock_monotonic_raw, 1340 1331 [CLOCK_REALTIME_COARSE] = &clock_realtime_coarse, 1341 1332 [CLOCK_MONOTONIC_COARSE] = &clock_monotonic_coarse, 1342 - [CLOCK_BOOTTIME] = &clock_monotonic, 1333 + [CLOCK_BOOTTIME] = &clock_boottime, 1343 1334 [CLOCK_REALTIME_ALARM] = &alarm_clock, 1344 1335 [CLOCK_BOOTTIME_ALARM] = &alarm_clock, 1345 1336 [CLOCK_TAI] = &clock_tai, 1346 - [CLOCK_MONOTONIC_ACTIVE] = &clock_monotonic_active, 1347 1337 }; 1348 1338 1349 1339 static const struct k_clock *clockid_to_kclock(const clockid_t id)
-15
kernel/time/tick-common.c
··· 419 419 clockevents_shutdown(td->evtdev); 420 420 } 421 421 422 - static void tick_forward_next_period(void) 423 - { 424 - ktime_t delta, now = ktime_get(); 425 - u64 n; 426 - 427 - delta = ktime_sub(now, tick_next_period); 428 - n = ktime_divns(delta, tick_period); 429 - tick_next_period += n * tick_period; 430 - if (tick_next_period < now) 431 - tick_next_period += tick_period; 432 - tick_sched_forward_next_period(); 433 - } 434 - 435 422 /** 436 423 * tick_resume_local - Resume the local tick device 437 424 * ··· 430 443 { 431 444 struct tick_device *td = this_cpu_ptr(&tick_cpu_device); 432 445 bool broadcast = tick_resume_check_broadcast(); 433 - 434 - tick_forward_next_period(); 435 446 436 447 clockevents_tick_resume(td->evtdev); 437 448 if (!broadcast) {
-6
kernel/time/tick-internal.h
··· 141 141 static inline bool tick_broadcast_oneshot_available(void) { return tick_oneshot_possible(); } 142 142 #endif /* !(BROADCAST && ONESHOT) */ 143 143 144 - #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS) 145 - extern void tick_sched_forward_next_period(void); 146 - #else 147 - static inline void tick_sched_forward_next_period(void) { } 148 - #endif 149 - 150 144 /* NO_HZ_FULL internal */ 151 145 #ifdef CONFIG_NO_HZ_FULL 152 146 extern void tick_nohz_init(void);
+5 -14
kernel/time/tick-sched.c
··· 52 52 static ktime_t last_jiffies_update; 53 53 54 54 /* 55 - * Called after resume. Make sure that jiffies are not fast forwarded due to 56 - * clock monotonic being forwarded by the suspended time. 57 - */ 58 - void tick_sched_forward_next_period(void) 59 - { 60 - last_jiffies_update = tick_next_period; 61 - } 62 - 63 - /* 64 55 * Must be called with interrupts disabled ! 65 56 */ 66 57 static void tick_do_update_jiffies64(ktime_t now) ··· 795 804 return; 796 805 } 797 806 798 - hrtimer_set_expires(&ts->sched_timer, tick); 799 - 800 - if (ts->nohz_mode == NOHZ_MODE_HIGHRES) 801 - hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED); 802 - else 807 + if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { 808 + hrtimer_start(&ts->sched_timer, tick, HRTIMER_MODE_ABS_PINNED); 809 + } else { 810 + hrtimer_set_expires(&ts->sched_timer, tick); 803 811 tick_program_event(tick, 1); 812 + } 804 813 } 805 814 806 815 static void tick_nohz_retain_tick(struct tick_sched *ts)
+37 -41
kernel/time/timekeeping.c
··· 138 138 139 139 static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) 140 140 { 141 - /* Update both bases so mono and raw stay coupled. */ 142 - tk->tkr_mono.base += delta; 143 - tk->tkr_raw.base += delta; 144 - 145 - /* Accumulate time spent in suspend */ 146 - tk->time_suspended += delta; 141 + tk->offs_boot = ktime_add(tk->offs_boot, delta); 147 142 } 148 143 149 144 /* ··· 468 473 } 469 474 EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns); 470 475 476 + /** 477 + * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock. 478 + * 479 + * To keep it NMI safe since we're accessing from tracing, we're not using a 480 + * separate timekeeper with updates to monotonic clock and boot offset 481 + * protected with seqlocks. This has the following minor side effects: 482 + * 483 + * (1) Its possible that a timestamp be taken after the boot offset is updated 484 + * but before the timekeeper is updated. If this happens, the new boot offset 485 + * is added to the old timekeeping making the clock appear to update slightly 486 + * earlier: 487 + * CPU 0 CPU 1 488 + * timekeeping_inject_sleeptime64() 489 + * __timekeeping_inject_sleeptime(tk, delta); 490 + * timestamp(); 491 + * timekeeping_update(tk, TK_CLEAR_NTP...); 492 + * 493 + * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be 494 + * partially updated. Since the tk->offs_boot update is a rare event, this 495 + * should be a rare occurrence which postprocessing should be able to handle. 496 + */ 497 + u64 notrace ktime_get_boot_fast_ns(void) 498 + { 499 + struct timekeeper *tk = &tk_core.timekeeper; 500 + 501 + return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot)); 502 + } 503 + EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns); 504 + 505 + 471 506 /* 472 507 * See comment for __ktime_get_fast_ns() vs. timestamp ordering 473 508 */ ··· 789 764 790 765 static ktime_t *offsets[TK_OFFS_MAX] = { 791 766 [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real, 767 + [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot, 792 768 [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai, 793 769 }; 794 770 ··· 885 859 timespec64_add_ns(ts, nsec + tomono.tv_nsec); 886 860 } 887 861 EXPORT_SYMBOL_GPL(ktime_get_ts64); 888 - 889 - /** 890 - * ktime_get_active_ts64 - Get the active non-suspended monotonic clock 891 - * @ts: pointer to timespec variable 892 - * 893 - * The function calculates the monotonic clock from the realtime clock and 894 - * the wall_to_monotonic offset, subtracts the accumulated suspend time and 895 - * stores the result in normalized timespec64 format in the variable 896 - * pointed to by @ts. 897 - */ 898 - void ktime_get_active_ts64(struct timespec64 *ts) 899 - { 900 - struct timekeeper *tk = &tk_core.timekeeper; 901 - struct timespec64 tomono, tsusp; 902 - u64 nsec, nssusp; 903 - unsigned int seq; 904 - 905 - WARN_ON(timekeeping_suspended); 906 - 907 - do { 908 - seq = read_seqcount_begin(&tk_core.seq); 909 - ts->tv_sec = tk->xtime_sec; 910 - nsec = timekeeping_get_ns(&tk->tkr_mono); 911 - tomono = tk->wall_to_monotonic; 912 - nssusp = tk->time_suspended; 913 - } while (read_seqcount_retry(&tk_core.seq, seq)); 914 - 915 - ts->tv_sec += tomono.tv_sec; 916 - ts->tv_nsec = 0; 917 - timespec64_add_ns(ts, nsec + tomono.tv_nsec); 918 - tsusp = ns_to_timespec64(nssusp); 919 - *ts = timespec64_sub(*ts, tsusp); 920 - } 921 862 922 863 /** 923 864 * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC ··· 1586 1593 return; 1587 1594 } 1588 1595 tk_xtime_add(tk, delta); 1596 + tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta)); 1589 1597 tk_update_sleep_time(tk, timespec64_to_ktime(*delta)); 1590 1598 tk_debug_account_sleep_time(delta); 1591 1599 } ··· 2119 2125 void getboottime64(struct timespec64 *ts) 2120 2126 { 2121 2127 struct timekeeper *tk = &tk_core.timekeeper; 2122 - ktime_t t = ktime_sub(tk->offs_real, tk->time_suspended); 2128 + ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot); 2123 2129 2124 2130 *ts = ktime_to_timespec64(t); 2125 2131 } ··· 2182 2188 * ktime_get_update_offsets_now - hrtimer helper 2183 2189 * @cwsseq: pointer to check and store the clock was set sequence number 2184 2190 * @offs_real: pointer to storage for monotonic -> realtime offset 2191 + * @offs_boot: pointer to storage for monotonic -> boottime offset 2185 2192 * @offs_tai: pointer to storage for monotonic -> clock tai offset 2186 2193 * 2187 2194 * Returns current monotonic time and updates the offsets if the ··· 2192 2197 * Called from hrtimer_interrupt() or retrigger_next_event() 2193 2198 */ 2194 2199 ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real, 2195 - ktime_t *offs_tai) 2200 + ktime_t *offs_boot, ktime_t *offs_tai) 2196 2201 { 2197 2202 struct timekeeper *tk = &tk_core.timekeeper; 2198 2203 unsigned int seq; ··· 2209 2214 if (*cwsseq != tk->clock_was_set_seq) { 2210 2215 *cwsseq = tk->clock_was_set_seq; 2211 2216 *offs_real = tk->offs_real; 2217 + *offs_boot = tk->offs_boot; 2212 2218 *offs_tai = tk->offs_tai; 2213 2219 } 2214 2220
+1
kernel/time/timekeeping.h
··· 6 6 */ 7 7 extern ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, 8 8 ktime_t *offs_real, 9 + ktime_t *offs_boot, 9 10 ktime_t *offs_tai); 10 11 11 12 extern int timekeeping_valid_for_hres(void);
+1 -1
kernel/trace/trace.c
··· 1165 1165 { trace_clock, "perf", 1 }, 1166 1166 { ktime_get_mono_fast_ns, "mono", 1 }, 1167 1167 { ktime_get_raw_fast_ns, "mono_raw", 1 }, 1168 - { ktime_get_mono_fast_ns, "boot", 1 }, 1168 + { ktime_get_boot_fast_ns, "boot", 1 }, 1169 1169 ARCH_TRACE_CLOCKS 1170 1170 }; 1171 1171
+12
kernel/trace/trace_events_hist.c
··· 2466 2466 else if (strcmp(modifier, "usecs") == 0) 2467 2467 *flags |= HIST_FIELD_FL_TIMESTAMP_USECS; 2468 2468 else { 2469 + hist_err("Invalid field modifier: ", modifier); 2469 2470 field = ERR_PTR(-EINVAL); 2470 2471 goto out; 2471 2472 } ··· 2482 2481 else { 2483 2482 field = trace_find_event_field(file->event_call, field_name); 2484 2483 if (!field || !field->size) { 2484 + hist_err("Couldn't find field: ", field_name); 2485 2485 field = ERR_PTR(-EINVAL); 2486 2486 goto out; 2487 2487 } ··· 4915 4913 seq_printf(m, "%s", field_name); 4916 4914 } else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP) 4917 4915 seq_puts(m, "common_timestamp"); 4916 + 4917 + if (hist_field->flags) { 4918 + if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) && 4919 + !(hist_field->flags & HIST_FIELD_FL_EXPR)) { 4920 + const char *flags = get_hist_field_flags(hist_field); 4921 + 4922 + if (flags) 4923 + seq_printf(m, ".%s", flags); 4924 + } 4925 + } 4918 4926 } 4919 4927 4920 4928 static int event_hist_trigger_print(struct seq_file *m,
+14 -21
kernel/trace/trace_uprobe.c
··· 55 55 struct list_head list; 56 56 struct trace_uprobe_filter filter; 57 57 struct uprobe_consumer consumer; 58 + struct path path; 58 59 struct inode *inode; 59 60 char *filename; 60 61 unsigned long offset; ··· 290 289 for (i = 0; i < tu->tp.nr_args; i++) 291 290 traceprobe_free_probe_arg(&tu->tp.args[i]); 292 291 293 - iput(tu->inode); 292 + path_put(&tu->path); 294 293 kfree(tu->tp.call.class->system); 295 294 kfree(tu->tp.call.name); 296 295 kfree(tu->filename); ··· 364 363 static int create_trace_uprobe(int argc, char **argv) 365 364 { 366 365 struct trace_uprobe *tu; 367 - struct inode *inode; 368 366 char *arg, *event, *group, *filename; 369 367 char buf[MAX_EVENT_NAME_LEN]; 370 368 struct path path; ··· 371 371 bool is_delete, is_return; 372 372 int i, ret; 373 373 374 - inode = NULL; 375 374 ret = 0; 376 375 is_delete = false; 377 376 is_return = false; ··· 436 437 } 437 438 /* Find the last occurrence, in case the path contains ':' too. */ 438 439 arg = strrchr(argv[1], ':'); 439 - if (!arg) { 440 - ret = -EINVAL; 441 - goto fail_address_parse; 442 - } 440 + if (!arg) 441 + return -EINVAL; 443 442 444 443 *arg++ = '\0'; 445 444 filename = argv[1]; 446 445 ret = kern_path(filename, LOOKUP_FOLLOW, &path); 447 446 if (ret) 448 - goto fail_address_parse; 447 + return ret; 449 448 450 - inode = igrab(d_real_inode(path.dentry)); 451 - path_put(&path); 452 - 453 - if (!inode || !S_ISREG(inode->i_mode)) { 449 + if (!d_is_reg(path.dentry)) { 454 450 ret = -EINVAL; 455 451 goto fail_address_parse; 456 452 } ··· 484 490 goto fail_address_parse; 485 491 } 486 492 tu->offset = offset; 487 - tu->inode = inode; 493 + tu->path = path; 488 494 tu->filename = kstrdup(filename, GFP_KERNEL); 489 495 490 496 if (!tu->filename) { ··· 552 558 return ret; 553 559 554 560 fail_address_parse: 555 - iput(inode); 561 + path_put(&path); 556 562 557 563 pr_info("Failed to parse address or file.\n"); 558 564 ··· 916 922 goto err_flags; 917 923 918 924 tu->consumer.filter = filter; 925 + tu->inode = d_real_inode(tu->path.dentry); 919 926 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); 920 927 if (ret) 921 928 goto err_buffer; ··· 962 967 WARN_ON(!uprobe_filter_is_empty(&tu->filter)); 963 968 964 969 uprobe_unregister(tu->inode, tu->offset, &tu->consumer); 970 + tu->inode = NULL; 965 971 tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE; 966 972 967 973 uprobe_buffer_disable(); ··· 1333 1337 create_local_trace_uprobe(char *name, unsigned long offs, bool is_return) 1334 1338 { 1335 1339 struct trace_uprobe *tu; 1336 - struct inode *inode; 1337 1340 struct path path; 1338 1341 int ret; 1339 1342 ··· 1340 1345 if (ret) 1341 1346 return ERR_PTR(ret); 1342 1347 1343 - inode = igrab(d_inode(path.dentry)); 1344 - path_put(&path); 1345 - 1346 - if (!inode || !S_ISREG(inode->i_mode)) { 1347 - iput(inode); 1348 + if (!d_is_reg(path.dentry)) { 1349 + path_put(&path); 1348 1350 return ERR_PTR(-EINVAL); 1349 1351 } 1350 1352 ··· 1356 1364 if (IS_ERR(tu)) { 1357 1365 pr_info("Failed to allocate trace_uprobe.(%d)\n", 1358 1366 (int)PTR_ERR(tu)); 1367 + path_put(&path); 1359 1368 return ERR_CAST(tu); 1360 1369 } 1361 1370 1362 1371 tu->offset = offs; 1363 - tu->inode = inode; 1372 + tu->path = path; 1364 1373 tu->filename = kstrdup(name, GFP_KERNEL); 1365 1374 init_trace_event_call(tu, &tu->tp.call); 1366 1375
+2 -2
kernel/tracepoint.c
··· 207 207 lockdep_is_held(&tracepoints_mutex)); 208 208 old = func_add(&tp_funcs, func, prio); 209 209 if (IS_ERR(old)) { 210 - WARN_ON_ONCE(1); 210 + WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM); 211 211 return PTR_ERR(old); 212 212 } 213 213 ··· 239 239 lockdep_is_held(&tracepoints_mutex)); 240 240 old = func_remove(&tp_funcs, func); 241 241 if (IS_ERR(old)) { 242 - WARN_ON_ONCE(1); 242 + WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM); 243 243 return PTR_ERR(old); 244 244 } 245 245
+9 -14
lib/errseq.c
··· 111 111 * errseq_sample() - Grab current errseq_t value. 112 112 * @eseq: Pointer to errseq_t to be sampled. 113 113 * 114 - * This function allows callers to sample an errseq_t value, marking it as 115 - * "seen" if required. 114 + * This function allows callers to initialise their errseq_t variable. 115 + * If the error has been "seen", new callers will not see an old error. 116 + * If there is an unseen error in @eseq, the caller of this function will 117 + * see it the next time it checks for an error. 116 118 * 119 + * Context: Any context. 117 120 * Return: The current errseq value. 118 121 */ 119 122 errseq_t errseq_sample(errseq_t *eseq) 120 123 { 121 124 errseq_t old = READ_ONCE(*eseq); 122 - errseq_t new = old; 123 125 124 - /* 125 - * For the common case of no errors ever having been set, we can skip 126 - * marking the SEEN bit. Once an error has been set, the value will 127 - * never go back to zero. 128 - */ 129 - if (old != 0) { 130 - new |= ERRSEQ_SEEN; 131 - if (old != new) 132 - cmpxchg(eseq, old, new); 133 - } 134 - return new; 126 + /* If nobody has seen this error yet, then we can be the first. */ 127 + if (!(old & ERRSEQ_SEEN)) 128 + old = 0; 129 + return old; 135 130 } 136 131 EXPORT_SYMBOL(errseq_sample); 137 132
+5 -6
lib/kobject.c
··· 233 233 234 234 /* be noisy on error issues */ 235 235 if (error == -EEXIST) 236 - WARN(1, 237 - "%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n", 238 - __func__, kobject_name(kobj)); 236 + pr_err("%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n", 237 + __func__, kobject_name(kobj)); 239 238 else 240 - WARN(1, "%s failed for %s (error: %d parent: %s)\n", 241 - __func__, kobject_name(kobj), error, 242 - parent ? kobject_name(parent) : "'none'"); 239 + pr_err("%s failed for %s (error: %d parent: %s)\n", 240 + __func__, kobject_name(kobj), error, 241 + parent ? kobject_name(parent) : "'none'"); 243 242 } else 244 243 kobj->state_in_sysfs = 1; 245 244
+1 -1
lib/swiotlb.c
··· 737 737 swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE, 738 738 DMA_ATTR_SKIP_CPU_SYNC); 739 739 out_warn: 740 - if ((attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) { 740 + if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) { 741 741 dev_warn(dev, 742 742 "swiotlb: coherent allocation failed, size=%zu\n", 743 743 size);
+10 -1
mm/mmap.c
··· 100 100 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 101 101 }; 102 102 103 + #ifndef CONFIG_ARCH_HAS_FILTER_PGPROT 104 + static inline pgprot_t arch_filter_pgprot(pgprot_t prot) 105 + { 106 + return prot; 107 + } 108 + #endif 109 + 103 110 pgprot_t vm_get_page_prot(unsigned long vm_flags) 104 111 { 105 - return __pgprot(pgprot_val(protection_map[vm_flags & 112 + pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags & 106 113 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | 107 114 pgprot_val(arch_vm_get_page_prot(vm_flags))); 115 + 116 + return arch_filter_pgprot(ret); 108 117 } 109 118 EXPORT_SYMBOL(vm_get_page_prot); 110 119
+2 -2
net/bridge/br_if.c
··· 518 518 return -ELOOP; 519 519 } 520 520 521 - /* Device is already being bridged */ 522 - if (br_port_exists(dev)) 521 + /* Device has master upper dev */ 522 + if (netdev_master_upper_dev_get(dev)) 523 523 return -EBUSY; 524 524 525 525 /* No bridging devices that dislike that (e.g. wireless) */
+7
net/ceph/messenger.c
··· 2569 2569 int ret = 1; 2570 2570 2571 2571 dout("try_write start %p state %lu\n", con, con->state); 2572 + if (con->state != CON_STATE_PREOPEN && 2573 + con->state != CON_STATE_CONNECTING && 2574 + con->state != CON_STATE_NEGOTIATING && 2575 + con->state != CON_STATE_OPEN) 2576 + return 0; 2572 2577 2573 2578 more: 2574 2579 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes); ··· 2599 2594 } 2600 2595 2601 2596 more_kvec: 2597 + BUG_ON(!con->sock); 2598 + 2602 2599 /* kvec data queued? */ 2603 2600 if (con->out_kvec_left) { 2604 2601 ret = write_partial_kvec(con);
+11 -3
net/ceph/mon_client.c
··· 209 209 __open_session(monc); 210 210 } 211 211 212 + static void un_backoff(struct ceph_mon_client *monc) 213 + { 214 + monc->hunt_mult /= 2; /* reduce by 50% */ 215 + if (monc->hunt_mult < 1) 216 + monc->hunt_mult = 1; 217 + dout("%s hunt_mult now %d\n", __func__, monc->hunt_mult); 218 + } 219 + 212 220 /* 213 221 * Reschedule delayed work timer. 214 222 */ ··· 971 963 if (!monc->hunting) { 972 964 ceph_con_keepalive(&monc->con); 973 965 __validate_auth(monc); 966 + un_backoff(monc); 974 967 } 975 968 976 969 if (is_auth && ··· 1132 1123 dout("%s found mon%d\n", __func__, monc->cur_mon); 1133 1124 monc->hunting = false; 1134 1125 monc->had_a_connection = true; 1135 - monc->hunt_mult /= 2; /* reduce by 50% */ 1136 - if (monc->hunt_mult < 1) 1137 - monc->hunt_mult = 1; 1126 + un_backoff(monc); 1127 + __schedule_delayed(monc); 1138 1128 } 1139 1129 } 1140 1130
+4 -2
net/compat.c
··· 377 377 optname == SO_ATTACH_REUSEPORT_CBPF) 378 378 return do_set_attach_filter(sock, level, optname, 379 379 optval, optlen); 380 - if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO) 380 + if (!COMPAT_USE_64BIT_TIME && 381 + (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)) 381 382 return do_set_sock_timeout(sock, level, optname, optval, optlen); 382 383 383 384 return sock_setsockopt(sock, level, optname, optval, optlen); ··· 449 448 static int compat_sock_getsockopt(struct socket *sock, int level, int optname, 450 449 char __user *optval, int __user *optlen) 451 450 { 452 - if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO) 451 + if (!COMPAT_USE_64BIT_TIME && 452 + (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)) 453 453 return do_get_sock_timeout(sock, level, optname, optval, optlen); 454 454 return sock_getsockopt(sock, level, optname, optval, optlen); 455 455 }
+5
net/core/ethtool.c
··· 1032 1032 info_size = sizeof(info); 1033 1033 if (copy_from_user(&info, useraddr, info_size)) 1034 1034 return -EFAULT; 1035 + /* Since malicious users may modify the original data, 1036 + * we need to check whether FLOW_RSS is still requested. 1037 + */ 1038 + if (!(info.flow_type & FLOW_RSS)) 1039 + return -EINVAL; 1035 1040 } 1036 1041 1037 1042 if (info.cmd == ETHTOOL_GRXCLSRLALL) {
+1
net/core/filter.c
··· 3240 3240 skb_dst_set(skb, (struct dst_entry *) md); 3241 3241 3242 3242 info = &md->u.tun_info; 3243 + memset(info, 0, sizeof(*info)); 3243 3244 info->mode = IP_TUNNEL_INFO_TX; 3244 3245 3245 3246 info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
+12 -2
net/dccp/ccids/ccid2.c
··· 126 126 DCCPF_SEQ_WMAX)); 127 127 } 128 128 129 + static void dccp_tasklet_schedule(struct sock *sk) 130 + { 131 + struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet; 132 + 133 + if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { 134 + sock_hold(sk); 135 + __tasklet_schedule(t); 136 + } 137 + } 138 + 129 139 static void ccid2_hc_tx_rto_expire(struct timer_list *t) 130 140 { 131 141 struct ccid2_hc_tx_sock *hc = from_timer(hc, t, tx_rtotimer); ··· 176 166 177 167 /* if we were blocked before, we may now send cwnd=1 packet */ 178 168 if (sender_was_blocked) 179 - tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); 169 + dccp_tasklet_schedule(sk); 180 170 /* restart backed-off timer */ 181 171 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); 182 172 out: ··· 716 706 done: 717 707 /* check if incoming Acks allow pending packets to be sent */ 718 708 if (sender_was_blocked && !ccid2_cwnd_network_limited(hc)) 719 - tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); 709 + dccp_tasklet_schedule(sk); 720 710 dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks); 721 711 } 722 712
+1 -1
net/dccp/timer.c
··· 232 232 else 233 233 dccp_write_xmit(sk); 234 234 bh_unlock_sock(sk); 235 + sock_put(sk); 235 236 } 236 237 237 238 static void dccp_write_xmit_timer(struct timer_list *t) ··· 241 240 struct sock *sk = &dp->dccps_inet_connection.icsk_inet.sk; 242 241 243 242 dccp_write_xmitlet((unsigned long)sk); 244 - sock_put(sk); 245 243 } 246 244 247 245 void dccp_init_xmit_timers(struct sock *sk)
+53 -65
net/ipv4/route.c
··· 709 709 fnhe->fnhe_gw = gw; 710 710 fnhe->fnhe_pmtu = pmtu; 711 711 fnhe->fnhe_mtu_locked = lock; 712 - fnhe->fnhe_expires = expires; 712 + fnhe->fnhe_expires = max(1UL, expires); 713 713 714 714 /* Exception created; mark the cached routes for the nexthop 715 715 * stale, so anyone caching it rechecks if this exception ··· 1297 1297 return mtu - lwtunnel_headroom(dst->lwtstate, mtu); 1298 1298 } 1299 1299 1300 + static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr) 1301 + { 1302 + struct fnhe_hash_bucket *hash; 1303 + struct fib_nh_exception *fnhe, __rcu **fnhe_p; 1304 + u32 hval = fnhe_hashfun(daddr); 1305 + 1306 + spin_lock_bh(&fnhe_lock); 1307 + 1308 + hash = rcu_dereference_protected(nh->nh_exceptions, 1309 + lockdep_is_held(&fnhe_lock)); 1310 + hash += hval; 1311 + 1312 + fnhe_p = &hash->chain; 1313 + fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock)); 1314 + while (fnhe) { 1315 + if (fnhe->fnhe_daddr == daddr) { 1316 + rcu_assign_pointer(*fnhe_p, rcu_dereference_protected( 1317 + fnhe->fnhe_next, lockdep_is_held(&fnhe_lock))); 1318 + fnhe_flush_routes(fnhe); 1319 + kfree_rcu(fnhe, rcu); 1320 + break; 1321 + } 1322 + fnhe_p = &fnhe->fnhe_next; 1323 + fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1324 + lockdep_is_held(&fnhe_lock)); 1325 + } 1326 + 1327 + spin_unlock_bh(&fnhe_lock); 1328 + } 1329 + 1300 1330 static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr) 1301 1331 { 1302 1332 struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions); ··· 1340 1310 1341 1311 for (fnhe = rcu_dereference(hash[hval].chain); fnhe; 1342 1312 fnhe = rcu_dereference(fnhe->fnhe_next)) { 1343 - if (fnhe->fnhe_daddr == daddr) 1313 + if (fnhe->fnhe_daddr == daddr) { 1314 + if (fnhe->fnhe_expires && 1315 + time_after(jiffies, fnhe->fnhe_expires)) { 1316 + ip_del_fnhe(nh, daddr); 1317 + break; 1318 + } 1344 1319 return fnhe; 1320 + } 1345 1321 } 1346 1322 return NULL; 1347 1323 } ··· 1672 1636 #endif 1673 1637 } 1674 1638 1675 - static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr) 1676 - { 1677 - struct fnhe_hash_bucket *hash; 1678 - struct fib_nh_exception *fnhe, __rcu **fnhe_p; 1679 - u32 hval = fnhe_hashfun(daddr); 1680 - 1681 - spin_lock_bh(&fnhe_lock); 1682 - 1683 - hash = rcu_dereference_protected(nh->nh_exceptions, 1684 - lockdep_is_held(&fnhe_lock)); 1685 - hash += hval; 1686 - 1687 - fnhe_p = &hash->chain; 1688 - fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock)); 1689 - while (fnhe) { 1690 - if (fnhe->fnhe_daddr == daddr) { 1691 - rcu_assign_pointer(*fnhe_p, rcu_dereference_protected( 1692 - fnhe->fnhe_next, lockdep_is_held(&fnhe_lock))); 1693 - fnhe_flush_routes(fnhe); 1694 - kfree_rcu(fnhe, rcu); 1695 - break; 1696 - } 1697 - fnhe_p = &fnhe->fnhe_next; 1698 - fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1699 - lockdep_is_held(&fnhe_lock)); 1700 - } 1701 - 1702 - spin_unlock_bh(&fnhe_lock); 1703 - } 1704 - 1705 1639 /* called in rcu_read_lock() section */ 1706 1640 static int __mkroute_input(struct sk_buff *skb, 1707 1641 const struct fib_result *res, ··· 1725 1719 1726 1720 fnhe = find_exception(&FIB_RES_NH(*res), daddr); 1727 1721 if (do_cache) { 1728 - if (fnhe) { 1722 + if (fnhe) 1729 1723 rth = rcu_dereference(fnhe->fnhe_rth_input); 1730 - if (rth && rth->dst.expires && 1731 - time_after(jiffies, rth->dst.expires)) { 1732 - ip_del_fnhe(&FIB_RES_NH(*res), daddr); 1733 - fnhe = NULL; 1734 - } else { 1735 - goto rt_cache; 1736 - } 1737 - } 1738 - 1739 - rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); 1740 - 1741 - rt_cache: 1724 + else 1725 + rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input); 1742 1726 if (rt_cache_valid(rth)) { 1743 1727 skb_dst_set_noref(skb, &rth->dst); 1744 1728 goto out; ··· 2212 2216 * the loopback interface and the IP_PKTINFO ipi_ifindex will 2213 2217 * be set to the loopback interface as well. 2214 2218 */ 2215 - fi = NULL; 2219 + do_cache = false; 2216 2220 } 2217 2221 2218 2222 fnhe = NULL; 2219 2223 do_cache &= fi != NULL; 2220 - if (do_cache) { 2224 + if (fi) { 2221 2225 struct rtable __rcu **prth; 2222 2226 struct fib_nh *nh = &FIB_RES_NH(*res); 2223 2227 2224 2228 fnhe = find_exception(nh, fl4->daddr); 2229 + if (!do_cache) 2230 + goto add; 2225 2231 if (fnhe) { 2226 2232 prth = &fnhe->fnhe_rth_output; 2227 - rth = rcu_dereference(*prth); 2228 - if (rth && rth->dst.expires && 2229 - time_after(jiffies, rth->dst.expires)) { 2230 - ip_del_fnhe(nh, fl4->daddr); 2231 - fnhe = NULL; 2232 - } else { 2233 - goto rt_cache; 2233 + } else { 2234 + if (unlikely(fl4->flowi4_flags & 2235 + FLOWI_FLAG_KNOWN_NH && 2236 + !(nh->nh_gw && 2237 + nh->nh_scope == RT_SCOPE_LINK))) { 2238 + do_cache = false; 2239 + goto add; 2234 2240 } 2241 + prth = raw_cpu_ptr(nh->nh_pcpu_rth_output); 2235 2242 } 2236 - 2237 - if (unlikely(fl4->flowi4_flags & 2238 - FLOWI_FLAG_KNOWN_NH && 2239 - !(nh->nh_gw && 2240 - nh->nh_scope == RT_SCOPE_LINK))) { 2241 - do_cache = false; 2242 - goto add; 2243 - } 2244 - prth = raw_cpu_ptr(nh->nh_pcpu_rth_output); 2245 2243 rth = rcu_dereference(*prth); 2246 - 2247 - rt_cache: 2248 2244 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst)) 2249 2245 return rth; 2250 2246 }
+4 -3
net/ipv4/tcp.c
··· 697 697 { 698 698 return skb->len < size_goal && 699 699 sock_net(sk)->ipv4.sysctl_tcp_autocorking && 700 - skb != tcp_write_queue_head(sk) && 700 + !tcp_rtx_queue_empty(sk) && 701 701 refcount_read(&sk->sk_wmem_alloc) > skb->truesize; 702 702 } 703 703 ··· 1204 1204 uarg->zerocopy = 0; 1205 1205 } 1206 1206 1207 - if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) { 1207 + if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) && 1208 + !tp->repair) { 1208 1209 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size); 1209 1210 if (err == -EINPROGRESS && copied_syn > 0) 1210 1211 goto out; ··· 2674 2673 case TCP_REPAIR_QUEUE: 2675 2674 if (!tp->repair) 2676 2675 err = -EPERM; 2677 - else if (val < TCP_QUEUES_NR) 2676 + else if ((unsigned int)val < TCP_QUEUES_NR) 2678 2677 tp->repair_queue = val; 2679 2678 else 2680 2679 err = -EINVAL;
+3 -1
net/ipv4/tcp_bbr.c
··· 806 806 } 807 807 } 808 808 } 809 - bbr->idle_restart = 0; 809 + /* Restart after idle ends only once we process a new S/ACK for data */ 810 + if (rs->delivered > 0) 811 + bbr->idle_restart = 0; 810 812 } 811 813 812 814 static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
+6 -1
net/ipv6/route.c
··· 1835 1835 const struct ipv6hdr *inner_iph; 1836 1836 const struct icmp6hdr *icmph; 1837 1837 struct ipv6hdr _inner_iph; 1838 + struct icmp6hdr _icmph; 1838 1839 1839 1840 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6)) 1840 1841 goto out; 1841 1842 1842 - icmph = icmp6_hdr(skb); 1843 + icmph = skb_header_pointer(skb, skb_transport_offset(skb), 1844 + sizeof(_icmph), &_icmph); 1845 + if (!icmph) 1846 + goto out; 1847 + 1843 1848 if (icmph->icmp6_type != ICMPV6_DEST_UNREACH && 1844 1849 icmph->icmp6_type != ICMPV6_PKT_TOOBIG && 1845 1850 icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
+2 -1
net/rds/ib_cm.c
··· 547 547 rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd, 548 548 ic->i_send_cq, ic->i_recv_cq); 549 549 550 - return ret; 550 + goto out; 551 551 552 552 sends_out: 553 553 vfree(ic->i_sends); ··· 572 572 ic->i_send_cq = NULL; 573 573 rds_ibdev_out: 574 574 rds_ib_remove_conn(rds_ibdev, conn); 575 + out: 575 576 rds_ib_dev_put(rds_ibdev); 576 577 577 578 return ret;
+1
net/rds/recv.c
··· 558 558 struct rds_cmsg_rx_trace t; 559 559 int i, j; 560 560 561 + memset(&t, 0, sizeof(t)); 561 562 inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock(); 562 563 t.rx_traces = rs->rs_rx_traces; 563 564 for (i = 0; i < rs->rs_rx_traces; i++) {
+25 -12
net/sched/sch_fq.c
··· 128 128 return f->next == &detached; 129 129 } 130 130 131 + static bool fq_flow_is_throttled(const struct fq_flow *f) 132 + { 133 + return f->next == &throttled; 134 + } 135 + 136 + static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow) 137 + { 138 + if (head->first) 139 + head->last->next = flow; 140 + else 141 + head->first = flow; 142 + head->last = flow; 143 + flow->next = NULL; 144 + } 145 + 146 + static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f) 147 + { 148 + rb_erase(&f->rate_node, &q->delayed); 149 + q->throttled_flows--; 150 + fq_flow_add_tail(&q->old_flows, f); 151 + } 152 + 131 153 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) 132 154 { 133 155 struct rb_node **p = &q->delayed.rb_node, *parent = NULL; ··· 177 155 178 156 static struct kmem_cache *fq_flow_cachep __read_mostly; 179 157 180 - static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow) 181 - { 182 - if (head->first) 183 - head->last->next = flow; 184 - else 185 - head->first = flow; 186 - head->last = flow; 187 - flow->next = NULL; 188 - } 189 158 190 159 /* limit number of collected flows per round */ 191 160 #define FQ_GC_MAX 8 ··· 280 267 f->socket_hash != sk->sk_hash)) { 281 268 f->credit = q->initial_quantum; 282 269 f->socket_hash = sk->sk_hash; 270 + if (fq_flow_is_throttled(f)) 271 + fq_flow_unset_throttled(q, f); 283 272 f->time_next_packet = 0ULL; 284 273 } 285 274 return f; ··· 453 438 q->time_next_delayed_flow = f->time_next_packet; 454 439 break; 455 440 } 456 - rb_erase(p, &q->delayed); 457 - q->throttled_flows--; 458 - fq_flow_add_tail(&q->old_flows, f); 441 + fq_flow_unset_throttled(q, f); 459 442 } 460 443 } 461 444
+1 -1
net/sctp/inqueue.c
··· 217 217 skb_pull(chunk->skb, sizeof(*ch)); 218 218 chunk->subh.v = NULL; /* Subheader is no longer valid. */ 219 219 220 - if (chunk->chunk_end + sizeof(*ch) < skb_tail_pointer(chunk->skb)) { 220 + if (chunk->chunk_end + sizeof(*ch) <= skb_tail_pointer(chunk->skb)) { 221 221 /* This is not a singleton */ 222 222 chunk->singleton = 0; 223 223 } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
+3
net/sctp/ipv6.c
··· 895 895 if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2)) 896 896 return 1; 897 897 898 + if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET) 899 + return addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr; 900 + 898 901 return __sctp_v6_cmp_addr(addr1, addr2); 899 902 } 900 903
+7 -1
net/sctp/sm_statefuns.c
··· 1794 1794 GFP_ATOMIC)) 1795 1795 goto nomem; 1796 1796 1797 + if (sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC)) 1798 + goto nomem; 1799 + 1797 1800 /* Make sure no new addresses are being added during the 1798 1801 * restart. Though this is a pretty complicated attack 1799 1802 * since you'd have to get inside the cookie. ··· 1907 1904 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; 1908 1905 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init, 1909 1906 GFP_ATOMIC)) 1907 + goto nomem; 1908 + 1909 + if (sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC)) 1910 1910 goto nomem; 1911 1911 1912 1912 /* Update the content of current association. */ ··· 2056 2050 } 2057 2051 } 2058 2052 2059 - repl = sctp_make_cookie_ack(new_asoc, chunk); 2053 + repl = sctp_make_cookie_ack(asoc, chunk); 2060 2054 if (!repl) 2061 2055 goto nomem; 2062 2056
+2
net/sctp/stream.c
··· 240 240 241 241 new->out = NULL; 242 242 new->in = NULL; 243 + new->outcnt = 0; 244 + new->incnt = 0; 243 245 } 244 246 245 247 static int sctp_send_reconf(struct sctp_association *asoc,
+29 -32
net/smc/af_smc.c
··· 292 292 smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC); 293 293 } 294 294 295 + /* register a new rmb */ 296 + static int smc_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc) 297 + { 298 + /* register memory region for new rmb */ 299 + if (smc_wr_reg_send(link, rmb_desc->mr_rx[SMC_SINGLE_LINK])) { 300 + rmb_desc->regerr = 1; 301 + return -EFAULT; 302 + } 303 + return 0; 304 + } 305 + 295 306 static int smc_clnt_conf_first_link(struct smc_sock *smc) 296 307 { 297 308 struct smc_link_group *lgr = smc->conn.lgr; ··· 332 321 333 322 smc_wr_remember_qp_attr(link); 334 323 335 - rc = smc_wr_reg_send(link, 336 - smc->conn.rmb_desc->mr_rx[SMC_SINGLE_LINK]); 337 - if (rc) 324 + if (smc_reg_rmb(link, smc->conn.rmb_desc)) 338 325 return SMC_CLC_DECL_INTERR; 339 326 340 327 /* send CONFIRM LINK response over RoCE fabric */ ··· 482 473 goto decline_rdma_unlock; 483 474 } 484 475 } else { 485 - struct smc_buf_desc *buf_desc = smc->conn.rmb_desc; 486 - 487 - if (!buf_desc->reused) { 488 - /* register memory region for new rmb */ 489 - rc = smc_wr_reg_send(link, 490 - buf_desc->mr_rx[SMC_SINGLE_LINK]); 491 - if (rc) { 476 + if (!smc->conn.rmb_desc->reused) { 477 + if (smc_reg_rmb(link, smc->conn.rmb_desc)) { 492 478 reason_code = SMC_CLC_DECL_INTERR; 493 479 goto decline_rdma_unlock; 494 480 } ··· 723 719 724 720 link = &lgr->lnk[SMC_SINGLE_LINK]; 725 721 726 - rc = smc_wr_reg_send(link, 727 - smc->conn.rmb_desc->mr_rx[SMC_SINGLE_LINK]); 728 - if (rc) 722 + if (smc_reg_rmb(link, smc->conn.rmb_desc)) 729 723 return SMC_CLC_DECL_INTERR; 730 724 731 725 /* send CONFIRM LINK request to client over the RoCE fabric */ ··· 856 854 smc_rx_init(new_smc); 857 855 858 856 if (local_contact != SMC_FIRST_CONTACT) { 859 - struct smc_buf_desc *buf_desc = new_smc->conn.rmb_desc; 860 - 861 - if (!buf_desc->reused) { 862 - /* register memory region for new rmb */ 863 - rc = smc_wr_reg_send(link, 864 - buf_desc->mr_rx[SMC_SINGLE_LINK]); 865 - if (rc) { 857 + if (!new_smc->conn.rmb_desc->reused) { 858 + if (smc_reg_rmb(link, new_smc->conn.rmb_desc)) { 866 859 reason_code = SMC_CLC_DECL_INTERR; 867 860 goto decline_rdma_unlock; 868 861 } ··· 975 978 } 976 979 977 980 out: 978 - if (lsmc->clcsock) { 979 - sock_release(lsmc->clcsock); 980 - lsmc->clcsock = NULL; 981 - } 982 981 release_sock(lsk); 983 982 sock_put(&lsmc->sk); /* sock_hold in smc_listen */ 984 983 } ··· 1163 1170 /* delegate to CLC child sock */ 1164 1171 release_sock(sk); 1165 1172 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); 1166 - /* if non-blocking connect finished ... */ 1167 1173 lock_sock(sk); 1168 - if ((sk->sk_state == SMC_INIT) && (mask & EPOLLOUT)) { 1169 - sk->sk_err = smc->clcsock->sk->sk_err; 1170 - if (sk->sk_err) { 1171 - mask |= EPOLLERR; 1172 - } else { 1174 + sk->sk_err = smc->clcsock->sk->sk_err; 1175 + if (sk->sk_err) { 1176 + mask |= EPOLLERR; 1177 + } else { 1178 + /* if non-blocking connect finished ... */ 1179 + if (sk->sk_state == SMC_INIT && 1180 + mask & EPOLLOUT && 1181 + smc->clcsock->sk->sk_state != TCP_CLOSE) { 1173 1182 rc = smc_connect_rdma(smc); 1174 1183 if (rc < 0) 1175 1184 mask |= EPOLLERR; ··· 1315 1320 1316 1321 smc = smc_sk(sk); 1317 1322 lock_sock(sk); 1318 - if (sk->sk_state != SMC_ACTIVE) 1323 + if (sk->sk_state != SMC_ACTIVE) { 1324 + release_sock(sk); 1319 1325 goto out; 1326 + } 1327 + release_sock(sk); 1320 1328 if (smc->use_fallback) 1321 1329 rc = kernel_sendpage(smc->clcsock, page, offset, 1322 1330 size, flags); ··· 1327 1329 rc = sock_no_sendpage(sock, page, offset, size, flags); 1328 1330 1329 1331 out: 1330 - release_sock(sk); 1331 1332 return rc; 1332 1333 } 1333 1334
+19 -3
net/smc/smc_core.c
··· 32 32 33 33 static u32 smc_lgr_num; /* unique link group number */ 34 34 35 + static void smc_buf_free(struct smc_buf_desc *buf_desc, struct smc_link *lnk, 36 + bool is_rmb); 37 + 35 38 static void smc_lgr_schedule_free_work(struct smc_link_group *lgr) 36 39 { 37 40 /* client link group creation always follows the server link group ··· 237 234 conn->sndbuf_size = 0; 238 235 } 239 236 if (conn->rmb_desc) { 240 - conn->rmb_desc->reused = true; 241 - conn->rmb_desc->used = 0; 242 - conn->rmbe_size = 0; 237 + if (!conn->rmb_desc->regerr) { 238 + conn->rmb_desc->reused = 1; 239 + conn->rmb_desc->used = 0; 240 + conn->rmbe_size = 0; 241 + } else { 242 + /* buf registration failed, reuse not possible */ 243 + struct smc_link_group *lgr = conn->lgr; 244 + struct smc_link *lnk; 245 + 246 + write_lock_bh(&lgr->rmbs_lock); 247 + list_del(&conn->rmb_desc->list); 248 + write_unlock_bh(&lgr->rmbs_lock); 249 + 250 + lnk = &lgr->lnk[SMC_SINGLE_LINK]; 251 + smc_buf_free(conn->rmb_desc, lnk, true); 252 + } 243 253 } 244 254 } 245 255
+2 -1
net/smc/smc_core.h
··· 123 123 */ 124 124 u32 order; /* allocation order */ 125 125 u32 used; /* currently used / unused */ 126 - bool reused; /* new created / reused */ 126 + u8 reused : 1; /* new created / reused */ 127 + u8 regerr : 1; /* err during registration */ 127 128 }; 128 129 129 130 struct smc_rtoken { /* address/key of remote RMB */
+1 -1
net/tipc/node.c
··· 2244 2244 2245 2245 rtnl_lock(); 2246 2246 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) { 2247 - err = __tipc_nl_add_monitor(net, &msg, prev_bearer); 2247 + err = __tipc_nl_add_monitor(net, &msg, bearer_id); 2248 2248 if (err) 2249 2249 break; 2250 2250 }
+7
net/tls/tls_main.c
··· 114 114 size = sg->length - offset; 115 115 offset += sg->offset; 116 116 117 + ctx->in_tcp_sendpages = true; 117 118 while (1) { 118 119 if (sg_is_last(sg)) 119 120 sendpage_flags = flags; ··· 149 148 } 150 149 151 150 clear_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags); 151 + ctx->in_tcp_sendpages = false; 152 + ctx->sk_write_space(sk); 152 153 153 154 return 0; 154 155 } ··· 219 216 static void tls_write_space(struct sock *sk) 220 217 { 221 218 struct tls_context *ctx = tls_get_ctx(sk); 219 + 220 + /* We are already sending pages, ignore notification */ 221 + if (ctx->in_tcp_sendpages) 222 + return; 222 223 223 224 if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) { 224 225 gfp_t sk_allocation = sk->sk_allocation;
+5 -2
samples/sockmap/Makefile
··· 65 65 # asm/sysreg.h - inline assembly used by it is incompatible with llvm. 66 66 # But, there is no easy way to fix it, so just exclude it since it is 67 67 # useless for BPF samples. 68 + # 69 + # -target bpf option required with SK_MSG programs, this is to ensure 70 + # reading 'void *' data types for data and data_end are __u64 reads. 68 71 $(obj)/%.o: $(src)/%.c 69 72 $(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) -I$(obj) \ 70 73 -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \ 71 74 -Wno-compare-distinct-pointer-types \ 72 75 -Wno-gnu-variable-sized-type-not-at-end \ 73 76 -Wno-address-of-packed-member -Wno-tautological-compare \ 74 - -Wno-unknown-warning-option \ 75 - -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@ 77 + -Wno-unknown-warning-option -O2 -target bpf \ 78 + -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
+1 -1
sound/core/control.c
··· 1492 1492 int op_flag) 1493 1493 { 1494 1494 struct snd_ctl_tlv header; 1495 - unsigned int *container; 1495 + unsigned int __user *container; 1496 1496 unsigned int container_size; 1497 1497 struct snd_kcontrol *kctl; 1498 1498 struct snd_ctl_elem_id id;
+4 -3
sound/core/pcm_compat.c
··· 27 27 s32 __user *src) 28 28 { 29 29 snd_pcm_sframes_t delay; 30 + int err; 30 31 31 - delay = snd_pcm_delay(substream); 32 - if (delay < 0) 33 - return delay; 32 + err = snd_pcm_delay(substream, &delay); 33 + if (err) 34 + return err; 34 35 if (put_user(delay, src)) 35 36 return -EFAULT; 36 37 return 0;
+15 -15
sound/core/pcm_native.c
··· 2692 2692 return err; 2693 2693 } 2694 2694 2695 - static snd_pcm_sframes_t snd_pcm_delay(struct snd_pcm_substream *substream) 2695 + static int snd_pcm_delay(struct snd_pcm_substream *substream, 2696 + snd_pcm_sframes_t *delay) 2696 2697 { 2697 2698 struct snd_pcm_runtime *runtime = substream->runtime; 2698 2699 int err; ··· 2709 2708 n += runtime->delay; 2710 2709 } 2711 2710 snd_pcm_stream_unlock_irq(substream); 2712 - return err < 0 ? err : n; 2711 + if (!err) 2712 + *delay = n; 2713 + return err; 2713 2714 } 2714 2715 2715 2716 static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream, ··· 2754 2751 sync_ptr.s.status.hw_ptr = status->hw_ptr; 2755 2752 sync_ptr.s.status.tstamp = status->tstamp; 2756 2753 sync_ptr.s.status.suspended_state = status->suspended_state; 2754 + sync_ptr.s.status.audio_tstamp = status->audio_tstamp; 2757 2755 snd_pcm_stream_unlock_irq(substream); 2758 2756 if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr))) 2759 2757 return -EFAULT; ··· 2920 2916 return snd_pcm_hwsync(substream); 2921 2917 case SNDRV_PCM_IOCTL_DELAY: 2922 2918 { 2923 - snd_pcm_sframes_t delay = snd_pcm_delay(substream); 2919 + snd_pcm_sframes_t delay; 2924 2920 snd_pcm_sframes_t __user *res = arg; 2921 + int err; 2925 2922 2926 - if (delay < 0) 2927 - return delay; 2923 + err = snd_pcm_delay(substream, &delay); 2924 + if (err) 2925 + return err; 2928 2926 if (put_user(delay, res)) 2929 2927 return -EFAULT; 2930 2928 return 0; ··· 3014 3008 case SNDRV_PCM_IOCTL_DROP: 3015 3009 return snd_pcm_drop(substream); 3016 3010 case SNDRV_PCM_IOCTL_DELAY: 3017 - { 3018 - result = snd_pcm_delay(substream); 3019 - if (result < 0) 3020 - return result; 3021 - *frames = result; 3022 - return 0; 3023 - } 3011 + return snd_pcm_delay(substream, frames); 3024 3012 default: 3025 3013 return -EINVAL; 3026 3014 } ··· 3234 3234 /* 3235 3235 * mmap status record 3236 3236 */ 3237 - static int snd_pcm_mmap_status_fault(struct vm_fault *vmf) 3237 + static vm_fault_t snd_pcm_mmap_status_fault(struct vm_fault *vmf) 3238 3238 { 3239 3239 struct snd_pcm_substream *substream = vmf->vma->vm_private_data; 3240 3240 struct snd_pcm_runtime *runtime; ··· 3270 3270 /* 3271 3271 * mmap control record 3272 3272 */ 3273 - static int snd_pcm_mmap_control_fault(struct vm_fault *vmf) 3273 + static vm_fault_t snd_pcm_mmap_control_fault(struct vm_fault *vmf) 3274 3274 { 3275 3275 struct snd_pcm_substream *substream = vmf->vma->vm_private_data; 3276 3276 struct snd_pcm_runtime *runtime; ··· 3359 3359 /* 3360 3360 * fault callback for mmapping a RAM page 3361 3361 */ 3362 - static int snd_pcm_mmap_data_fault(struct vm_fault *vmf) 3362 + static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf) 3363 3363 { 3364 3364 struct snd_pcm_substream *substream = vmf->vma->vm_private_data; 3365 3365 struct snd_pcm_runtime *runtime;
+9 -6
sound/core/seq/oss/seq_oss_event.c
··· 26 26 #include <sound/seq_oss_legacy.h> 27 27 #include "seq_oss_readq.h" 28 28 #include "seq_oss_writeq.h" 29 + #include <linux/nospec.h> 29 30 30 31 31 32 /* ··· 288 287 { 289 288 struct seq_oss_synthinfo *info; 290 289 291 - if (!snd_seq_oss_synth_is_valid(dp, dev)) 290 + info = snd_seq_oss_synth_info(dp, dev); 291 + if (!info) 292 292 return -ENXIO; 293 293 294 - info = &dp->synths[dev]; 295 294 switch (info->arg.event_passing) { 296 295 case SNDRV_SEQ_OSS_PROCESS_EVENTS: 297 296 if (! info->ch || ch < 0 || ch >= info->nr_voices) { ··· 299 298 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev); 300 299 } 301 300 301 + ch = array_index_nospec(ch, info->nr_voices); 302 302 if (note == 255 && info->ch[ch].note >= 0) { 303 303 /* volume control */ 304 304 int type; ··· 349 347 { 350 348 struct seq_oss_synthinfo *info; 351 349 352 - if (!snd_seq_oss_synth_is_valid(dp, dev)) 350 + info = snd_seq_oss_synth_info(dp, dev); 351 + if (!info) 353 352 return -ENXIO; 354 353 355 - info = &dp->synths[dev]; 356 354 switch (info->arg.event_passing) { 357 355 case SNDRV_SEQ_OSS_PROCESS_EVENTS: 358 356 if (! info->ch || ch < 0 || ch >= info->nr_voices) { ··· 360 358 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev); 361 359 } 362 360 361 + ch = array_index_nospec(ch, info->nr_voices); 363 362 if (info->ch[ch].note >= 0) { 364 363 note = info->ch[ch].note; 365 364 info->ch[ch].vel = 0; ··· 384 381 static int 385 382 set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note, int vel, struct snd_seq_event *ev) 386 383 { 387 - if (! snd_seq_oss_synth_is_valid(dp, dev)) 384 + if (!snd_seq_oss_synth_info(dp, dev)) 388 385 return -ENXIO; 389 386 390 387 ev->type = type; ··· 402 399 static int 403 400 set_control_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int param, int val, struct snd_seq_event *ev) 404 401 { 405 - if (! snd_seq_oss_synth_is_valid(dp, dev)) 402 + if (!snd_seq_oss_synth_info(dp, dev)) 406 403 return -ENXIO; 407 404 408 405 ev->type = type;
+2
sound/core/seq/oss/seq_oss_midi.c
··· 29 29 #include "../seq_lock.h" 30 30 #include <linux/init.h> 31 31 #include <linux/slab.h> 32 + #include <linux/nospec.h> 32 33 33 34 34 35 /* ··· 316 315 { 317 316 if (dev < 0 || dev >= dp->max_mididev) 318 317 return NULL; 318 + dev = array_index_nospec(dev, dp->max_mididev); 319 319 return get_mdev(dev); 320 320 } 321 321
+49 -36
sound/core/seq/oss/seq_oss_synth.c
··· 26 26 #include <linux/init.h> 27 27 #include <linux/module.h> 28 28 #include <linux/slab.h> 29 + #include <linux/nospec.h> 29 30 30 31 /* 31 32 * constants ··· 340 339 dp->max_synthdev = 0; 341 340 } 342 341 343 - /* 344 - * check if the specified device is MIDI mapped device 345 - */ 346 - static int 347 - is_midi_dev(struct seq_oss_devinfo *dp, int dev) 342 + static struct seq_oss_synthinfo * 343 + get_synthinfo_nospec(struct seq_oss_devinfo *dp, int dev) 348 344 { 349 345 if (dev < 0 || dev >= dp->max_synthdev) 350 - return 0; 351 - if (dp->synths[dev].is_midi) 352 - return 1; 353 - return 0; 346 + return NULL; 347 + dev = array_index_nospec(dev, SNDRV_SEQ_OSS_MAX_SYNTH_DEVS); 348 + return &dp->synths[dev]; 354 349 } 355 350 356 351 /* ··· 356 359 get_synthdev(struct seq_oss_devinfo *dp, int dev) 357 360 { 358 361 struct seq_oss_synth *rec; 359 - if (dev < 0 || dev >= dp->max_synthdev) 362 + struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev); 363 + 364 + if (!info) 360 365 return NULL; 361 - if (! dp->synths[dev].opened) 366 + if (!info->opened) 362 367 return NULL; 363 - if (dp->synths[dev].is_midi) 364 - return &midi_synth_dev; 365 - if ((rec = get_sdev(dev)) == NULL) 366 - return NULL; 368 + if (info->is_midi) { 369 + rec = &midi_synth_dev; 370 + snd_use_lock_use(&rec->use_lock); 371 + } else { 372 + rec = get_sdev(dev); 373 + if (!rec) 374 + return NULL; 375 + } 367 376 if (! rec->opened) { 368 377 snd_use_lock_free(&rec->use_lock); 369 378 return NULL; ··· 405 402 struct seq_oss_synth *rec; 406 403 struct seq_oss_synthinfo *info; 407 404 408 - if (snd_BUG_ON(dev < 0 || dev >= dp->max_synthdev)) 409 - return; 410 - info = &dp->synths[dev]; 411 - if (! info->opened) 405 + info = get_synthinfo_nospec(dp, dev); 406 + if (!info || !info->opened) 412 407 return; 413 408 if (info->sysex) 414 409 info->sysex->len = 0; /* reset sysex */ ··· 455 454 const char __user *buf, int p, int c) 456 455 { 457 456 struct seq_oss_synth *rec; 457 + struct seq_oss_synthinfo *info; 458 458 int rc; 459 459 460 - if (dev < 0 || dev >= dp->max_synthdev) 460 + info = get_synthinfo_nospec(dp, dev); 461 + if (!info) 461 462 return -ENXIO; 462 463 463 - if (is_midi_dev(dp, dev)) 464 + if (info->is_midi) 464 465 return 0; 465 466 if ((rec = get_synthdev(dp, dev)) == NULL) 466 467 return -ENXIO; ··· 470 467 if (rec->oper.load_patch == NULL) 471 468 rc = -ENXIO; 472 469 else 473 - rc = rec->oper.load_patch(&dp->synths[dev].arg, fmt, buf, p, c); 470 + rc = rec->oper.load_patch(&info->arg, fmt, buf, p, c); 474 471 snd_use_lock_free(&rec->use_lock); 475 472 return rc; 476 473 } 477 474 478 475 /* 479 - * check if the device is valid synth device 476 + * check if the device is valid synth device and return the synth info 480 477 */ 481 - int 482 - snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev) 478 + struct seq_oss_synthinfo * 479 + snd_seq_oss_synth_info(struct seq_oss_devinfo *dp, int dev) 483 480 { 484 481 struct seq_oss_synth *rec; 482 + 485 483 rec = get_synthdev(dp, dev); 486 484 if (rec) { 487 485 snd_use_lock_free(&rec->use_lock); 488 - return 1; 486 + return get_synthinfo_nospec(dp, dev); 489 487 } 490 - return 0; 488 + return NULL; 491 489 } 492 490 493 491 ··· 503 499 int i, send; 504 500 unsigned char *dest; 505 501 struct seq_oss_synth_sysex *sysex; 502 + struct seq_oss_synthinfo *info; 506 503 507 - if (! snd_seq_oss_synth_is_valid(dp, dev)) 504 + info = snd_seq_oss_synth_info(dp, dev); 505 + if (!info) 508 506 return -ENXIO; 509 507 510 - sysex = dp->synths[dev].sysex; 508 + sysex = info->sysex; 511 509 if (sysex == NULL) { 512 510 sysex = kzalloc(sizeof(*sysex), GFP_KERNEL); 513 511 if (sysex == NULL) 514 512 return -ENOMEM; 515 - dp->synths[dev].sysex = sysex; 513 + info->sysex = sysex; 516 514 } 517 515 518 516 send = 0; ··· 559 553 int 560 554 snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev) 561 555 { 562 - if (! snd_seq_oss_synth_is_valid(dp, dev)) 556 + struct seq_oss_synthinfo *info = snd_seq_oss_synth_info(dp, dev); 557 + 558 + if (!info) 563 559 return -EINVAL; 564 - snd_seq_oss_fill_addr(dp, ev, dp->synths[dev].arg.addr.client, 565 - dp->synths[dev].arg.addr.port); 560 + snd_seq_oss_fill_addr(dp, ev, info->arg.addr.client, 561 + info->arg.addr.port); 566 562 return 0; 567 563 } 568 564 ··· 576 568 snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, unsigned long addr) 577 569 { 578 570 struct seq_oss_synth *rec; 571 + struct seq_oss_synthinfo *info; 579 572 int rc; 580 573 581 - if (is_midi_dev(dp, dev)) 574 + info = get_synthinfo_nospec(dp, dev); 575 + if (!info || info->is_midi) 582 576 return -ENXIO; 583 577 if ((rec = get_synthdev(dp, dev)) == NULL) 584 578 return -ENXIO; 585 579 if (rec->oper.ioctl == NULL) 586 580 rc = -ENXIO; 587 581 else 588 - rc = rec->oper.ioctl(&dp->synths[dev].arg, cmd, addr); 582 + rc = rec->oper.ioctl(&info->arg, cmd, addr); 589 583 snd_use_lock_free(&rec->use_lock); 590 584 return rc; 591 585 } ··· 599 589 int 600 590 snd_seq_oss_synth_raw_event(struct seq_oss_devinfo *dp, int dev, unsigned char *data, struct snd_seq_event *ev) 601 591 { 602 - if (! snd_seq_oss_synth_is_valid(dp, dev) || is_midi_dev(dp, dev)) 592 + struct seq_oss_synthinfo *info; 593 + 594 + info = snd_seq_oss_synth_info(dp, dev); 595 + if (!info || info->is_midi) 603 596 return -ENXIO; 604 597 ev->type = SNDRV_SEQ_EVENT_OSS; 605 598 memcpy(ev->data.raw8.d, data, 8);
+2 -1
sound/core/seq/oss/seq_oss_synth.h
··· 37 37 void snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev); 38 38 int snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt, 39 39 const char __user *buf, int p, int c); 40 - int snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev); 40 + struct seq_oss_synthinfo *snd_seq_oss_synth_info(struct seq_oss_devinfo *dp, 41 + int dev); 41 42 int snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf, 42 43 struct snd_seq_event *ev); 43 44 int snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev);
+5 -2
sound/drivers/opl3/opl3_synth.c
··· 21 21 22 22 #include <linux/slab.h> 23 23 #include <linux/export.h> 24 + #include <linux/nospec.h> 24 25 #include <sound/opl3.h> 25 26 #include <sound/asound_fm.h> 26 27 ··· 449 448 { 450 449 unsigned short reg_side; 451 450 unsigned char op_offset; 452 - unsigned char voice_offset; 451 + unsigned char voice_offset, voice_op; 453 452 454 453 unsigned short opl3_reg; 455 454 unsigned char reg_val; ··· 474 473 voice_offset = voice->voice - MAX_OPL2_VOICES; 475 474 } 476 475 /* Get register offset of operator */ 477 - op_offset = snd_opl3_regmap[voice_offset][voice->op]; 476 + voice_offset = array_index_nospec(voice_offset, MAX_OPL2_VOICES); 477 + voice_op = array_index_nospec(voice->op, 4); 478 + op_offset = snd_opl3_regmap[voice_offset][voice_op]; 478 479 479 480 reg_val = 0x00; 480 481 /* Set amplitude modulation (tremolo) effect */
+1 -1
sound/firewire/dice/dice-stream.c
··· 435 435 err = init_stream(dice, AMDTP_IN_STREAM, i); 436 436 if (err < 0) { 437 437 for (; i >= 0; i--) 438 - destroy_stream(dice, AMDTP_OUT_STREAM, i); 438 + destroy_stream(dice, AMDTP_IN_STREAM, i); 439 439 goto end; 440 440 } 441 441 }
+1 -1
sound/firewire/dice/dice.c
··· 14 14 #define OUI_WEISS 0x001c6a 15 15 #define OUI_LOUD 0x000ff2 16 16 #define OUI_FOCUSRITE 0x00130e 17 - #define OUI_TCELECTRONIC 0x001486 17 + #define OUI_TCELECTRONIC 0x000166 18 18 19 19 #define DICE_CATEGORY_ID 0x04 20 20 #define WEISS_CATEGORY_ID 0x00
+9 -4
sound/pci/asihpi/hpimsginit.c
··· 23 23 24 24 #include "hpi_internal.h" 25 25 #include "hpimsginit.h" 26 + #include <linux/nospec.h> 26 27 27 28 /* The actual message size for each object type */ 28 29 static u16 msg_size[HPI_OBJ_MAXINDEX + 1] = HPI_MESSAGE_SIZE_BY_OBJECT; ··· 40 39 { 41 40 u16 size; 42 41 43 - if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) 42 + if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) { 43 + object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1); 44 44 size = msg_size[object]; 45 - else 45 + } else { 46 46 size = sizeof(*phm); 47 + } 47 48 48 49 memset(phm, 0, size); 49 50 phm->size = size; ··· 69 66 { 70 67 u16 size; 71 68 72 - if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) 69 + if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) { 70 + object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1); 73 71 size = res_size[object]; 74 - else 72 + } else { 75 73 size = sizeof(*phr); 74 + } 76 75 77 76 memset(phr, 0, sizeof(*phr)); 78 77 phr->size = size;
+3 -1
sound/pci/asihpi/hpioctl.c
··· 33 33 #include <linux/stringify.h> 34 34 #include <linux/module.h> 35 35 #include <linux/vmalloc.h> 36 + #include <linux/nospec.h> 36 37 37 38 #ifdef MODULE_FIRMWARE 38 39 MODULE_FIRMWARE("asihpi/dsp5000.bin"); ··· 187 186 struct hpi_adapter *pa = NULL; 188 187 189 188 if (hm->h.adapter_index < ARRAY_SIZE(adapters)) 190 - pa = &adapters[hm->h.adapter_index]; 189 + pa = &adapters[array_index_nospec(hm->h.adapter_index, 190 + ARRAY_SIZE(adapters))]; 191 191 192 192 if (!pa || !pa->adapter || !pa->adapter->type) { 193 193 hpi_init_response(&hr->r0, hm->h.object,
+11 -1
sound/pci/hda/hda_hwdep.c
··· 21 21 #include <linux/init.h> 22 22 #include <linux/slab.h> 23 23 #include <linux/compat.h> 24 + #include <linux/nospec.h> 24 25 #include <sound/core.h> 25 26 #include "hda_codec.h" 26 27 #include "hda_local.h" ··· 52 51 53 52 if (get_user(verb, &arg->verb)) 54 53 return -EFAULT; 55 - res = get_wcaps(codec, verb >> 24); 54 + /* open-code get_wcaps(verb>>24) with nospec */ 55 + verb >>= 24; 56 + if (verb < codec->core.start_nid || 57 + verb >= codec->core.start_nid + codec->core.num_nodes) { 58 + res = 0; 59 + } else { 60 + verb -= codec->core.start_nid; 61 + verb = array_index_nospec(verb, codec->core.num_nodes); 62 + res = codec->wcaps[verb]; 63 + } 56 64 if (put_user(res, &arg->res)) 57 65 return -EFAULT; 58 66 return 0;
+8 -1
sound/pci/hda/patch_hdmi.c
··· 1383 1383 pcm = get_pcm_rec(spec, per_pin->pcm_idx); 1384 1384 else 1385 1385 return; 1386 + if (!pcm->pcm) 1387 + return; 1386 1388 if (!test_bit(per_pin->pcm_idx, &spec->pcm_in_use)) 1387 1389 return; 1388 1390 ··· 2153 2151 int dev, err; 2154 2152 int pin_idx, pcm_idx; 2155 2153 2156 - 2157 2154 for (pcm_idx = 0; pcm_idx < spec->pcm_used; pcm_idx++) { 2155 + if (!get_pcm_rec(spec, pcm_idx)->pcm) { 2156 + /* no PCM: mark this for skipping permanently */ 2157 + set_bit(pcm_idx, &spec->pcm_bitmap); 2158 + continue; 2159 + } 2160 + 2158 2161 err = generic_hdmi_build_jack(codec, pcm_idx); 2159 2162 if (err < 0) 2160 2163 return err;
+5
sound/pci/hda/patch_realtek.c
··· 331 331 /* fallthrough */ 332 332 case 0x10ec0215: 333 333 case 0x10ec0233: 334 + case 0x10ec0235: 334 335 case 0x10ec0236: 335 336 case 0x10ec0255: 336 337 case 0x10ec0256: ··· 6576 6575 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 6577 6576 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), 6578 6577 SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 6578 + SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 6579 6579 SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 6580 6580 SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 6581 6581 SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), ··· 7162 7160 case 0x10ec0298: 7163 7161 spec->codec_variant = ALC269_TYPE_ALC298; 7164 7162 break; 7163 + case 0x10ec0235: 7165 7164 case 0x10ec0255: 7166 7165 spec->codec_variant = ALC269_TYPE_ALC255; 7166 + spec->shutup = alc256_shutup; 7167 + spec->init_hook = alc256_init; 7167 7168 break; 7168 7169 case 0x10ec0236: 7169 7170 case 0x10ec0256:
+14 -10
sound/pci/rme9652/hdspm.c
··· 137 137 #include <linux/pci.h> 138 138 #include <linux/math64.h> 139 139 #include <linux/io.h> 140 + #include <linux/nospec.h> 140 141 141 142 #include <sound/core.h> 142 143 #include <sound/control.h> ··· 5699 5698 struct snd_pcm_channel_info *info) 5700 5699 { 5701 5700 struct hdspm *hdspm = snd_pcm_substream_chip(substream); 5701 + unsigned int channel = info->channel; 5702 5702 5703 5703 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 5704 - if (snd_BUG_ON(info->channel >= hdspm->max_channels_out)) { 5704 + if (snd_BUG_ON(channel >= hdspm->max_channels_out)) { 5705 5705 dev_info(hdspm->card->dev, 5706 5706 "snd_hdspm_channel_info: output channel out of range (%d)\n", 5707 - info->channel); 5707 + channel); 5708 5708 return -EINVAL; 5709 5709 } 5710 5710 5711 - if (hdspm->channel_map_out[info->channel] < 0) { 5711 + channel = array_index_nospec(channel, hdspm->max_channels_out); 5712 + if (hdspm->channel_map_out[channel] < 0) { 5712 5713 dev_info(hdspm->card->dev, 5713 5714 "snd_hdspm_channel_info: output channel %d mapped out\n", 5714 - info->channel); 5715 + channel); 5715 5716 return -EINVAL; 5716 5717 } 5717 5718 5718 - info->offset = hdspm->channel_map_out[info->channel] * 5719 + info->offset = hdspm->channel_map_out[channel] * 5719 5720 HDSPM_CHANNEL_BUFFER_BYTES; 5720 5721 } else { 5721 - if (snd_BUG_ON(info->channel >= hdspm->max_channels_in)) { 5722 + if (snd_BUG_ON(channel >= hdspm->max_channels_in)) { 5722 5723 dev_info(hdspm->card->dev, 5723 5724 "snd_hdspm_channel_info: input channel out of range (%d)\n", 5724 - info->channel); 5725 + channel); 5725 5726 return -EINVAL; 5726 5727 } 5727 5728 5728 - if (hdspm->channel_map_in[info->channel] < 0) { 5729 + channel = array_index_nospec(channel, hdspm->max_channels_in); 5730 + if (hdspm->channel_map_in[channel] < 0) { 5729 5731 dev_info(hdspm->card->dev, 5730 5732 "snd_hdspm_channel_info: input channel %d mapped out\n", 5731 - info->channel); 5733 + channel); 5732 5734 return -EINVAL; 5733 5735 } 5734 5736 5735 - info->offset = hdspm->channel_map_in[info->channel] * 5737 + info->offset = hdspm->channel_map_in[channel] * 5736 5738 HDSPM_CHANNEL_BUFFER_BYTES; 5737 5739 } 5738 5740
+4 -2
sound/pci/rme9652/rme9652.c
··· 26 26 #include <linux/pci.h> 27 27 #include <linux/module.h> 28 28 #include <linux/io.h> 29 + #include <linux/nospec.h> 29 30 30 31 #include <sound/core.h> 31 32 #include <sound/control.h> ··· 2072 2071 if (snd_BUG_ON(info->channel >= RME9652_NCHANNELS)) 2073 2072 return -EINVAL; 2074 2073 2075 - if ((chn = rme9652->channel_map[info->channel]) < 0) { 2074 + chn = rme9652->channel_map[array_index_nospec(info->channel, 2075 + RME9652_NCHANNELS)]; 2076 + if (chn < 0) 2076 2077 return -EINVAL; 2077 - } 2078 2078 2079 2079 info->offset = chn * RME9652_CHANNEL_BUFFER_BYTES; 2080 2080 info->first = 0;
+1 -1
sound/soc/amd/acp-da7219-max98357a.c
··· 43 43 #define DUAL_CHANNEL 2 44 44 45 45 static struct snd_soc_jack cz_jack; 46 - struct clk *da7219_dai_clk; 46 + static struct clk *da7219_dai_clk; 47 47 48 48 static int cz_da7219_init(struct snd_soc_pcm_runtime *rtd) 49 49 {
+20 -6
sound/soc/codecs/adau17x1.c
··· 502 502 } 503 503 504 504 if (adau->sigmadsp) { 505 - ret = adau17x1_setup_firmware(adau, params_rate(params)); 505 + ret = adau17x1_setup_firmware(component, params_rate(params)); 506 506 if (ret < 0) 507 507 return ret; 508 508 } ··· 835 835 } 836 836 EXPORT_SYMBOL_GPL(adau17x1_volatile_register); 837 837 838 - int adau17x1_setup_firmware(struct adau *adau, unsigned int rate) 838 + int adau17x1_setup_firmware(struct snd_soc_component *component, 839 + unsigned int rate) 839 840 { 840 841 int ret; 841 - int dspsr; 842 + int dspsr, dsp_run; 843 + struct adau *adau = snd_soc_component_get_drvdata(component); 844 + struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component); 845 + 846 + snd_soc_dapm_mutex_lock(dapm); 842 847 843 848 ret = regmap_read(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, &dspsr); 844 849 if (ret) 845 - return ret; 850 + goto err; 851 + 852 + ret = regmap_read(adau->regmap, ADAU17X1_DSP_RUN, &dsp_run); 853 + if (ret) 854 + goto err; 846 855 847 856 regmap_write(adau->regmap, ADAU17X1_DSP_ENABLE, 1); 848 857 regmap_write(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, 0xf); 858 + regmap_write(adau->regmap, ADAU17X1_DSP_RUN, 0); 849 859 850 860 ret = sigmadsp_setup(adau->sigmadsp, rate); 851 861 if (ret) { 852 862 regmap_write(adau->regmap, ADAU17X1_DSP_ENABLE, 0); 853 - return ret; 863 + goto err; 854 864 } 855 865 regmap_write(adau->regmap, ADAU17X1_DSP_SAMPLING_RATE, dspsr); 866 + regmap_write(adau->regmap, ADAU17X1_DSP_RUN, dsp_run); 856 867 857 - return 0; 868 + err: 869 + snd_soc_dapm_mutex_unlock(dapm); 870 + 871 + return ret; 858 872 } 859 873 EXPORT_SYMBOL_GPL(adau17x1_setup_firmware); 860 874
+2 -1
sound/soc/codecs/adau17x1.h
··· 68 68 69 69 extern const struct snd_soc_dai_ops adau17x1_dai_ops; 70 70 71 - int adau17x1_setup_firmware(struct adau *adau, unsigned int rate); 71 + int adau17x1_setup_firmware(struct snd_soc_component *component, 72 + unsigned int rate); 72 73 bool adau17x1_has_dsp(struct adau *adau); 73 74 74 75 #define ADAU17X1_CLOCK_CONTROL 0x4000
+6 -3
sound/soc/codecs/msm8916-wcd-analog.c
··· 1187 1187 return irq; 1188 1188 } 1189 1189 1190 - ret = devm_request_irq(dev, irq, pm8916_mbhc_switch_irq_handler, 1190 + ret = devm_request_threaded_irq(dev, irq, NULL, 1191 + pm8916_mbhc_switch_irq_handler, 1191 1192 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | 1192 1193 IRQF_ONESHOT, 1193 1194 "mbhc switch irq", priv); ··· 1202 1201 return irq; 1203 1202 } 1204 1203 1205 - ret = devm_request_irq(dev, irq, mbhc_btn_press_irq_handler, 1204 + ret = devm_request_threaded_irq(dev, irq, NULL, 1205 + mbhc_btn_press_irq_handler, 1206 1206 IRQF_TRIGGER_RISING | 1207 1207 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 1208 1208 "mbhc btn press irq", priv); ··· 1216 1214 return irq; 1217 1215 } 1218 1216 1219 - ret = devm_request_irq(dev, irq, mbhc_btn_release_irq_handler, 1217 + ret = devm_request_threaded_irq(dev, irq, NULL, 1218 + mbhc_btn_release_irq_handler, 1220 1219 IRQF_TRIGGER_RISING | 1221 1220 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 1222 1221 "mbhc btn release irq", priv);
+3
sound/soc/codecs/rt5514.c
··· 89 89 {RT5514_PLL3_CALIB_CTRL5, 0x40220012}, 90 90 {RT5514_DELAY_BUF_CTRL1, 0x7fff006a}, 91 91 {RT5514_DELAY_BUF_CTRL3, 0x00000000}, 92 + {RT5514_ASRC_IN_CTRL1, 0x00000003}, 92 93 {RT5514_DOWNFILTER0_CTRL1, 0x00020c2f}, 93 94 {RT5514_DOWNFILTER0_CTRL2, 0x00020c2f}, 94 95 {RT5514_DOWNFILTER0_CTRL3, 0x10000362}, ··· 182 181 case RT5514_PLL3_CALIB_CTRL5: 183 182 case RT5514_DELAY_BUF_CTRL1: 184 183 case RT5514_DELAY_BUF_CTRL3: 184 + case RT5514_ASRC_IN_CTRL1: 185 185 case RT5514_DOWNFILTER0_CTRL1: 186 186 case RT5514_DOWNFILTER0_CTRL2: 187 187 case RT5514_DOWNFILTER0_CTRL3: ··· 240 238 case RT5514_DSP_MAPPING | RT5514_PLL3_CALIB_CTRL5: 241 239 case RT5514_DSP_MAPPING | RT5514_DELAY_BUF_CTRL1: 242 240 case RT5514_DSP_MAPPING | RT5514_DELAY_BUF_CTRL3: 241 + case RT5514_DSP_MAPPING | RT5514_ASRC_IN_CTRL1: 243 242 case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL1: 244 243 case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL2: 245 244 case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL3:
+7
sound/soc/fsl/fsl_esai.c
··· 144 144 145 145 psr = ratio <= 256 * maxfp ? ESAI_xCCR_xPSR_BYPASS : ESAI_xCCR_xPSR_DIV8; 146 146 147 + /* Do not loop-search if PM (1 ~ 256) alone can serve the ratio */ 148 + if (ratio <= 256) { 149 + pm = ratio; 150 + fp = 1; 151 + goto out; 152 + } 153 + 147 154 /* Set the max fluctuation -- 0.1% of the max devisor */ 148 155 savesub = (psr ? 1 : 8) * 256 * maxfp / 1000; 149 156
+11 -3
sound/soc/fsl/fsl_ssi.c
··· 217 217 * @dai_fmt: DAI configuration this device is currently used with 218 218 * @streams: Mask of current active streams: BIT(TX) and BIT(RX) 219 219 * @i2s_net: I2S and Network mode configurations of SCR register 220 + * (this is the initial settings based on the DAI format) 220 221 * @synchronous: Use synchronous mode - both of TX and RX use STCK and SFCK 221 222 * @use_dma: DMA is used or FIQ with stream filter 222 223 * @use_dual_fifo: DMA with support for dual FIFO mode ··· 830 829 } 831 830 832 831 if (!fsl_ssi_is_ac97(ssi)) { 832 + /* 833 + * Keep the ssi->i2s_net intact while having a local variable 834 + * to override settings for special use cases. Otherwise, the 835 + * ssi->i2s_net will lose the settings for regular use cases. 836 + */ 837 + u8 i2s_net = ssi->i2s_net; 838 + 833 839 /* Normal + Network mode to send 16-bit data in 32-bit frames */ 834 840 if (fsl_ssi_is_i2s_cbm_cfs(ssi) && sample_size == 16) 835 - ssi->i2s_net = SSI_SCR_I2S_MODE_NORMAL | SSI_SCR_NET; 841 + i2s_net = SSI_SCR_I2S_MODE_NORMAL | SSI_SCR_NET; 836 842 837 843 /* Use Normal mode to send mono data at 1st slot of 2 slots */ 838 844 if (channels == 1) 839 - ssi->i2s_net = SSI_SCR_I2S_MODE_NORMAL; 845 + i2s_net = SSI_SCR_I2S_MODE_NORMAL; 840 846 841 847 regmap_update_bits(regs, REG_SSI_SCR, 842 - SSI_SCR_I2S_NET_MASK, ssi->i2s_net); 848 + SSI_SCR_I2S_NET_MASK, i2s_net); 843 849 } 844 850 845 851 /* In synchronous mode, the SSI uses STCCR for capture */
+13 -9
sound/soc/intel/Kconfig
··· 72 72 for Baytrail Chromebooks but this option is now deprecated and is 73 73 not recommended, use SND_SST_ATOM_HIFI2_PLATFORM instead. 74 74 75 + config SND_SST_ATOM_HIFI2_PLATFORM 76 + tristate 77 + select SND_SOC_COMPRESS 78 + 75 79 config SND_SST_ATOM_HIFI2_PLATFORM_PCI 76 - tristate "PCI HiFi2 (Medfield, Merrifield) Platforms" 80 + tristate "PCI HiFi2 (Merrifield) Platforms" 77 81 depends on X86 && PCI 78 82 select SND_SST_IPC_PCI 79 - select SND_SOC_COMPRESS 83 + select SND_SST_ATOM_HIFI2_PLATFORM 80 84 help 81 - If you have a Intel Medfield or Merrifield/Edison platform, then 85 + If you have a Intel Merrifield/Edison platform, then 82 86 enable this option by saying Y or m. Distros will typically not 83 - enable this option: Medfield devices are not available to 84 - developers and while Merrifield/Edison can run a mainline kernel with 85 - limited functionality it will require a firmware file which 86 - is not in the standard firmware tree 87 + enable this option: while Merrifield/Edison can run a mainline 88 + kernel with limited functionality it will require a firmware file 89 + which is not in the standard firmware tree 87 90 88 - config SND_SST_ATOM_HIFI2_PLATFORM 91 + config SND_SST_ATOM_HIFI2_PLATFORM_ACPI 89 92 tristate "ACPI HiFi2 (Baytrail, Cherrytrail) Platforms" 93 + default ACPI 90 94 depends on X86 && ACPI 91 95 select SND_SST_IPC_ACPI 92 - select SND_SOC_COMPRESS 96 + select SND_SST_ATOM_HIFI2_PLATFORM 93 97 select SND_SOC_ACPI_INTEL_MATCH 94 98 select IOSF_MBI 95 99 help
+11 -3
sound/soc/omap/omap-dmic.c
··· 281 281 static int omap_dmic_select_fclk(struct omap_dmic *dmic, int clk_id, 282 282 unsigned int freq) 283 283 { 284 - struct clk *parent_clk; 284 + struct clk *parent_clk, *mux; 285 285 char *parent_clk_name; 286 286 int ret = 0; 287 287 ··· 329 329 return -ENODEV; 330 330 } 331 331 332 + mux = clk_get_parent(dmic->fclk); 333 + if (IS_ERR(mux)) { 334 + dev_err(dmic->dev, "can't get fck mux parent\n"); 335 + clk_put(parent_clk); 336 + return -ENODEV; 337 + } 338 + 332 339 mutex_lock(&dmic->mutex); 333 340 if (dmic->active) { 334 341 /* disable clock while reparenting */ 335 342 pm_runtime_put_sync(dmic->dev); 336 - ret = clk_set_parent(dmic->fclk, parent_clk); 343 + ret = clk_set_parent(mux, parent_clk); 337 344 pm_runtime_get_sync(dmic->dev); 338 345 } else { 339 - ret = clk_set_parent(dmic->fclk, parent_clk); 346 + ret = clk_set_parent(mux, parent_clk); 340 347 } 341 348 mutex_unlock(&dmic->mutex); 342 349 ··· 356 349 dmic->fclk_freq = freq; 357 350 358 351 err_busy: 352 + clk_put(mux); 359 353 clk_put(parent_clk); 360 354 361 355 return ret;
+2 -2
sound/soc/sh/rcar/core.c
··· 1536 1536 return ret; 1537 1537 } 1538 1538 1539 - static int rsnd_suspend(struct device *dev) 1539 + static int __maybe_unused rsnd_suspend(struct device *dev) 1540 1540 { 1541 1541 struct rsnd_priv *priv = dev_get_drvdata(dev); 1542 1542 ··· 1545 1545 return 0; 1546 1546 } 1547 1547 1548 - static int rsnd_resume(struct device *dev) 1548 + static int __maybe_unused rsnd_resume(struct device *dev) 1549 1549 { 1550 1550 struct rsnd_priv *priv = dev_get_drvdata(dev); 1551 1551
+9 -5
sound/soc/soc-topology.c
··· 513 513 */ 514 514 if (dobj->widget.kcontrol_type == SND_SOC_TPLG_TYPE_ENUM) { 515 515 /* enumerated widget mixer */ 516 - for (i = 0; i < w->num_kcontrols; i++) { 516 + for (i = 0; w->kcontrols != NULL && i < w->num_kcontrols; i++) { 517 517 struct snd_kcontrol *kcontrol = w->kcontrols[i]; 518 518 struct soc_enum *se = 519 519 (struct soc_enum *)kcontrol->private_value; ··· 530 530 } 531 531 } else { 532 532 /* volume mixer or bytes controls */ 533 - for (i = 0; i < w->num_kcontrols; i++) { 533 + for (i = 0; w->kcontrols != NULL && i < w->num_kcontrols; i++) { 534 534 struct snd_kcontrol *kcontrol = w->kcontrols[i]; 535 535 536 536 if (dobj->widget.kcontrol_type ··· 1325 1325 ec->hdr.name); 1326 1326 1327 1327 kc[i].name = kstrdup(ec->hdr.name, GFP_KERNEL); 1328 - if (kc[i].name == NULL) 1328 + if (kc[i].name == NULL) { 1329 + kfree(se); 1329 1330 goto err_se; 1331 + } 1330 1332 kc[i].private_value = (long)se; 1331 1333 kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER; 1332 1334 kc[i].access = ec->hdr.access; ··· 1444 1442 be->hdr.name, be->hdr.access); 1445 1443 1446 1444 kc[i].name = kstrdup(be->hdr.name, GFP_KERNEL); 1447 - if (kc[i].name == NULL) 1445 + if (kc[i].name == NULL) { 1446 + kfree(sbe); 1448 1447 goto err; 1448 + } 1449 1449 kc[i].private_value = (long)sbe; 1450 1450 kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER; 1451 1451 kc[i].access = be->hdr.access; ··· 2580 2576 2581 2577 /* match index */ 2582 2578 if (dobj->index != index && 2583 - dobj->index != SND_SOC_TPLG_INDEX_ALL) 2579 + index != SND_SOC_TPLG_INDEX_ALL) 2584 2580 continue; 2585 2581 2586 2582 switch (dobj->type) {
+4 -3
sound/usb/mixer.c
··· 1776 1776 build_feature_ctl(state, _ftr, ch_bits, control, 1777 1777 &iterm, unitid, ch_read_only); 1778 1778 if (uac_v2v3_control_is_readable(master_bits, control)) 1779 - build_feature_ctl(state, _ftr, 0, i, &iterm, unitid, 1779 + build_feature_ctl(state, _ftr, 0, control, 1780 + &iterm, unitid, 1780 1781 !uac_v2v3_control_is_writeable(master_bits, 1781 1782 control)); 1782 1783 } ··· 1860 1859 check_input_term(state, d->bTerminalID, &iterm); 1861 1860 if (state->mixer->protocol == UAC_VERSION_2) { 1862 1861 /* Check for jack detection. */ 1863 - if (uac_v2v3_control_is_readable(d->bmControls, 1862 + if (uac_v2v3_control_is_readable(le16_to_cpu(d->bmControls), 1864 1863 UAC2_TE_CONNECTOR)) { 1865 1864 build_connector_control(state, &iterm, true); 1866 1865 } ··· 2562 2561 if (err < 0 && err != -EINVAL) 2563 2562 return err; 2564 2563 2565 - if (uac_v2v3_control_is_readable(desc->bmControls, 2564 + if (uac_v2v3_control_is_readable(le16_to_cpu(desc->bmControls), 2566 2565 UAC2_TE_CONNECTOR)) { 2567 2566 build_connector_control(&state, &state.oterm, 2568 2567 false);
+3
sound/usb/mixer_maps.c
··· 353 353 /* 354 354 * Dell usb dock with ALC4020 codec had a firmware problem where it got 355 355 * screwed up when zero volume is passed; just skip it as a workaround 356 + * 357 + * Also the extension unit gives an access error, so skip it as well. 356 358 */ 357 359 static const struct usbmix_name_map dell_alc4020_map[] = { 360 + { 4, NULL }, /* extension unit */ 358 361 { 16, NULL }, 359 362 { 19, NULL }, 360 363 { 0 }
+1 -1
sound/usb/stream.c
··· 349 349 * TODO: this conversion is not complete, update it 350 350 * after adding UAC3 values to asound.h 351 351 */ 352 - switch (is->bChPurpose) { 352 + switch (is->bChRelationship) { 353 353 case UAC3_CH_MONO: 354 354 map = SNDRV_CHMAP_MONO; 355 355 break;
+1 -1
sound/usb/usx2y/us122l.c
··· 139 139 snd_printdd(KERN_DEBUG "%i\n", atomic_read(&us122l->mmap_count)); 140 140 } 141 141 142 - static int usb_stream_hwdep_vm_fault(struct vm_fault *vmf) 142 + static vm_fault_t usb_stream_hwdep_vm_fault(struct vm_fault *vmf) 143 143 { 144 144 unsigned long offset; 145 145 struct page *page;
+1 -1
sound/usb/usx2y/usX2Yhwdep.c
··· 31 31 #include "usbusx2y.h" 32 32 #include "usX2Yhwdep.h" 33 33 34 - static int snd_us428ctls_vm_fault(struct vm_fault *vmf) 34 + static vm_fault_t snd_us428ctls_vm_fault(struct vm_fault *vmf) 35 35 { 36 36 unsigned long offset; 37 37 struct page * page;
+1 -1
sound/usb/usx2y/usx2yhwdeppcm.c
··· 652 652 } 653 653 654 654 655 - static int snd_usX2Y_hwdep_pcm_vm_fault(struct vm_fault *vmf) 655 + static vm_fault_t snd_usX2Y_hwdep_pcm_vm_fault(struct vm_fault *vmf) 656 656 { 657 657 unsigned long offset; 658 658 void *vaddr;
+2
tools/bpf/Makefile
··· 76 76 $(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $^ 77 77 78 78 $(OUTPUT)bpf_exp.lex.c: $(OUTPUT)bpf_exp.yacc.c 79 + $(OUTPUT)bpf_exp.yacc.o: $(OUTPUT)bpf_exp.yacc.c 80 + $(OUTPUT)bpf_exp.lex.o: $(OUTPUT)bpf_exp.lex.c 79 81 80 82 clean: bpftool_clean 81 83 $(call QUIET_CLEAN, bpf-progs)
+5 -2
tools/bpf/bpf_dbg.c
··· 1063 1063 1064 1064 static int cmd_load(char *arg) 1065 1065 { 1066 - char *subcmd, *cont, *tmp = strdup(arg); 1066 + char *subcmd, *cont = NULL, *tmp = strdup(arg); 1067 1067 int ret = CMD_OK; 1068 1068 1069 1069 subcmd = strtok_r(tmp, " ", &cont); ··· 1073 1073 bpf_reset(); 1074 1074 bpf_reset_breakpoints(); 1075 1075 1076 - ret = cmd_load_bpf(cont); 1076 + if (!cont) 1077 + ret = CMD_ERR; 1078 + else 1079 + ret = cmd_load_bpf(cont); 1077 1080 } else if (matches(subcmd, "pcap") == 0) { 1078 1081 ret = cmd_load_pcap(cont); 1079 1082 } else {
+29 -12
tools/perf/Documentation/perf-mem.txt
··· 28 28 <command>...:: 29 29 Any command you can specify in a shell. 30 30 31 + -i:: 32 + --input=<file>:: 33 + Input file name. 34 + 31 35 -f:: 32 36 --force:: 33 37 Don't do ownership validation 34 38 35 39 -t:: 36 - --type=:: 40 + --type=<type>:: 37 41 Select the memory operation type: load or store (default: load,store) 38 42 39 43 -D:: 40 - --dump-raw-samples=:: 44 + --dump-raw-samples:: 41 45 Dump the raw decoded samples on the screen in a format that is easy to parse with 42 46 one sample per line. 43 47 44 48 -x:: 45 - --field-separator:: 49 + --field-separator=<separator>:: 46 50 Specify the field separator used when dump raw samples (-D option). By default, 47 51 The separator is the space character. 48 52 49 53 -C:: 50 - --cpu-list:: 51 - Restrict dump of raw samples to those provided via this option. Note that the same 52 - option can be passed in record mode. It will be interpreted the same way as perf 53 - record. 54 + --cpu=<cpu>:: 55 + Monitor only on the list of CPUs provided. Multiple CPUs can be provided as a 56 + comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2. Default 57 + is to monitor all CPUS. 58 + -U:: 59 + --hide-unresolved:: 60 + Only display entries resolved to a symbol. 61 + 62 + -p:: 63 + --phys-data:: 64 + Record/Report sample physical addresses 65 + 66 + RECORD OPTIONS 67 + -------------- 68 + -e:: 69 + --event <event>:: 70 + Event selector. Use 'perf mem record -e list' to list available events. 54 71 55 72 -K:: 56 73 --all-kernel:: ··· 77 60 --all-user:: 78 61 Configure all used events to run in user space. 79 62 80 - --ldload:: 81 - Specify desired latency for loads event. 63 + -v:: 64 + --verbose:: 65 + Be more verbose (show counter open errors, etc) 82 66 83 - -p:: 84 - --phys-data:: 85 - Record/Report sample physical addresses 67 + --ldlat <n>:: 68 + Specify desired latency for loads event. 86 69 87 70 In addition, for report all perf report options are valid, and for record 88 71 all perf record options.
+1
tools/perf/arch/s390/util/auxtrace.c
··· 87 87 struct perf_evsel *pos; 88 88 int diagnose = 0; 89 89 90 + *err = 0; 90 91 if (evlist->nr_entries == 0) 91 92 return NULL; 92 93
-18
tools/perf/arch/s390/util/header.c
··· 146 146 zfree(&buf); 147 147 return buf; 148 148 } 149 - 150 - /* 151 - * Compare the cpuid string returned by get_cpuid() function 152 - * with the name generated by the jevents file read from 153 - * pmu-events/arch/s390/mapfile.csv. 154 - * 155 - * Parameter mapcpuid is the cpuid as stored in the 156 - * pmu-events/arch/s390/mapfile.csv. This is just the type number. 157 - * Parameter cpuid is the cpuid returned by function get_cpuid(). 158 - */ 159 - int strcmp_cpuid_str(const char *mapcpuid, const char *cpuid) 160 - { 161 - char *cp = strchr(cpuid, ','); 162 - 163 - if (cp == NULL) 164 - return -1; 165 - return strncmp(cp + 1, mapcpuid, strlen(mapcpuid)); 166 - }
+38 -2
tools/perf/builtin-stat.c
··· 172 172 static const char *output_name; 173 173 static int output_fd; 174 174 static int print_free_counters_hint; 175 + static int print_mixed_hw_group_error; 175 176 176 177 struct perf_stat { 177 178 bool record; ··· 1127 1126 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name); 1128 1127 } 1129 1128 1129 + static bool is_mixed_hw_group(struct perf_evsel *counter) 1130 + { 1131 + struct perf_evlist *evlist = counter->evlist; 1132 + u32 pmu_type = counter->attr.type; 1133 + struct perf_evsel *pos; 1134 + 1135 + if (counter->nr_members < 2) 1136 + return false; 1137 + 1138 + evlist__for_each_entry(evlist, pos) { 1139 + /* software events can be part of any hardware group */ 1140 + if (pos->attr.type == PERF_TYPE_SOFTWARE) 1141 + continue; 1142 + if (pmu_type == PERF_TYPE_SOFTWARE) { 1143 + pmu_type = pos->attr.type; 1144 + continue; 1145 + } 1146 + if (pmu_type != pos->attr.type) 1147 + return true; 1148 + } 1149 + 1150 + return false; 1151 + } 1152 + 1130 1153 static void printout(int id, int nr, struct perf_evsel *counter, double uval, 1131 1154 char *prefix, u64 run, u64 ena, double noise, 1132 1155 struct runtime_stat *st) ··· 1203 1178 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED, 1204 1179 csv_sep); 1205 1180 1206 - if (counter->supported) 1181 + if (counter->supported) { 1207 1182 print_free_counters_hint = 1; 1183 + if (is_mixed_hw_group(counter)) 1184 + print_mixed_hw_group_error = 1; 1185 + } 1208 1186 1209 1187 fprintf(stat_config.output, "%-*s%s", 1210 1188 csv_output ? 0 : unit_width, ··· 1284 1256 char *new_name; 1285 1257 char *config; 1286 1258 1287 - if (!counter->pmu_name || !strncmp(counter->name, counter->pmu_name, 1259 + if (counter->uniquified_name || 1260 + !counter->pmu_name || !strncmp(counter->name, counter->pmu_name, 1288 1261 strlen(counter->pmu_name))) 1289 1262 return; 1290 1263 ··· 1303 1274 counter->name = new_name; 1304 1275 } 1305 1276 } 1277 + 1278 + counter->uniquified_name = true; 1306 1279 } 1307 1280 1308 1281 static void collect_all_aliases(struct perf_evsel *counter, ··· 1788 1757 " echo 0 > /proc/sys/kernel/nmi_watchdog\n" 1789 1758 " perf stat ...\n" 1790 1759 " echo 1 > /proc/sys/kernel/nmi_watchdog\n"); 1760 + 1761 + if (print_mixed_hw_group_error) 1762 + fprintf(output, 1763 + "The events in group usually have to be from " 1764 + "the same PMU. Try reorganizing the group.\n"); 1791 1765 } 1792 1766 1793 1767 static void print_counters(struct timespec *ts, int argc, const char **argv)
+5 -5
tools/perf/pmu-events/arch/s390/mapfile.csv
··· 1 1 Family-model,Version,Filename,EventType 2 - 209[78],1,cf_z10,core 3 - 281[78],1,cf_z196,core 4 - 282[78],1,cf_zec12,core 5 - 296[45],1,cf_z13,core 6 - 3906,3,cf_z14,core 2 + ^IBM.209[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z10,core 3 + ^IBM.281[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z196,core 4 + ^IBM.282[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_zec12,core 5 + ^IBM.296[45].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z13,core 6 + ^IBM.390[67].*[13]\.[1-5].[[:xdigit:]]+$,3,cf_z14,core
+3
tools/perf/tests/attr/test-record-group-sampling
··· 35 35 # sampling disabled 36 36 sample_freq=0 37 37 sample_period=0 38 + freq=0 39 + write_backward=0 40 + sample_id_all=0
+2 -4
tools/perf/tests/shell/record+probe_libc_inet_pton.sh
··· 19 19 expected[1]=".*inet_pton[[:space:]]\($libc\)$" 20 20 case "$(uname -m)" in 21 21 s390x) 22 - eventattr='call-graph=dwarf' 22 + eventattr='call-graph=dwarf,max-stack=4' 23 23 expected[2]="gaih_inet.*[[:space:]]\($libc|inlined\)$" 24 - expected[3]="__GI_getaddrinfo[[:space:]]\($libc|inlined\)$" 24 + expected[3]="(__GI_)?getaddrinfo[[:space:]]\($libc|inlined\)$" 25 25 expected[4]="main[[:space:]]\(.*/bin/ping.*\)$" 26 - expected[5]="__libc_start_main[[:space:]]\($libc\)$" 27 - expected[6]="_start[[:space:]]\(.*/bin/ping.*\)$" 28 26 ;; 29 27 *) 30 28 eventattr='max-stack=3'
+14 -4
tools/perf/util/evsel.c
··· 930 930 * than leader in case leader 'leads' the sampling. 931 931 */ 932 932 if ((leader != evsel) && leader->sample_read) { 933 - attr->sample_freq = 0; 934 - attr->sample_period = 0; 933 + attr->freq = 0; 934 + attr->sample_freq = 0; 935 + attr->sample_period = 0; 936 + attr->write_backward = 0; 937 + attr->sample_id_all = 0; 935 938 } 936 939 937 940 if (opts->no_samples) ··· 1925 1922 goto fallback_missing_features; 1926 1923 } else if (!perf_missing_features.group_read && 1927 1924 evsel->attr.inherit && 1928 - (evsel->attr.read_format & PERF_FORMAT_GROUP)) { 1925 + (evsel->attr.read_format & PERF_FORMAT_GROUP) && 1926 + perf_evsel__is_group_leader(evsel)) { 1929 1927 perf_missing_features.group_read = true; 1930 1928 pr_debug2("switching off group read\n"); 1931 1929 goto fallback_missing_features; ··· 2758 2754 (paranoid = perf_event_paranoid()) > 1) { 2759 2755 const char *name = perf_evsel__name(evsel); 2760 2756 char *new_name; 2757 + const char *sep = ":"; 2761 2758 2762 - if (asprintf(&new_name, "%s%su", name, strchr(name, ':') ? "" : ":") < 0) 2759 + /* Is there already the separator in the name. */ 2760 + if (strchr(name, '/') || 2761 + strchr(name, ':')) 2762 + sep = ""; 2763 + 2764 + if (asprintf(&new_name, "%s%su", name, sep) < 0) 2763 2765 return false; 2764 2766 2765 2767 if (evsel->name)
+1
tools/perf/util/evsel.h
··· 115 115 unsigned int sample_size; 116 116 int id_pos; 117 117 int is_pos; 118 + bool uniquified_name; 118 119 bool snapshot; 119 120 bool supported; 120 121 bool needs_swap;
+18 -12
tools/perf/util/machine.c
··· 1019 1019 return ret; 1020 1020 } 1021 1021 1022 - static void map_groups__fixup_end(struct map_groups *mg) 1023 - { 1024 - int i; 1025 - for (i = 0; i < MAP__NR_TYPES; ++i) 1026 - __map_groups__fixup_end(mg, i); 1027 - } 1028 - 1029 1022 static char *get_kernel_version(const char *root_dir) 1030 1023 { 1031 1024 char version[PATH_MAX]; ··· 1226 1233 { 1227 1234 struct dso *kernel = machine__get_kernel(machine); 1228 1235 const char *name = NULL; 1236 + struct map *map; 1229 1237 u64 addr = 0; 1230 1238 int ret; 1231 1239 ··· 1253 1259 machine__destroy_kernel_maps(machine); 1254 1260 return -1; 1255 1261 } 1256 - machine__set_kernel_mmap(machine, addr, 0); 1262 + 1263 + /* we have a real start address now, so re-order the kmaps */ 1264 + map = machine__kernel_map(machine); 1265 + 1266 + map__get(map); 1267 + map_groups__remove(&machine->kmaps, map); 1268 + 1269 + /* assume it's the last in the kmaps */ 1270 + machine__set_kernel_mmap(machine, addr, ~0ULL); 1271 + 1272 + map_groups__insert(&machine->kmaps, map); 1273 + map__put(map); 1257 1274 } 1258 1275 1259 - /* 1260 - * Now that we have all the maps created, just set the ->end of them: 1261 - */ 1262 - map_groups__fixup_end(&machine->kmaps); 1276 + /* update end address of the kernel map using adjacent module address */ 1277 + map = map__next(machine__kernel_map(machine)); 1278 + if (map) 1279 + machine__set_kernel_mmap(machine, addr, map->start); 1280 + 1263 1281 return 0; 1264 1282 } 1265 1283
+4 -4
tools/perf/util/parse-events.y
··· 224 224 event_bpf_file 225 225 226 226 event_pmu: 227 - PE_NAME opt_event_config 227 + PE_NAME '/' event_config '/' 228 228 { 229 229 struct list_head *list, *orig_terms, *terms; 230 230 231 - if (parse_events_copy_term_list($2, &orig_terms)) 231 + if (parse_events_copy_term_list($3, &orig_terms)) 232 232 YYABORT; 233 233 234 234 ALLOC_LIST(list); 235 - if (parse_events_add_pmu(_parse_state, list, $1, $2, false)) { 235 + if (parse_events_add_pmu(_parse_state, list, $1, $3, false)) { 236 236 struct perf_pmu *pmu = NULL; 237 237 int ok = 0; 238 238 char *pattern; ··· 262 262 if (!ok) 263 263 YYABORT; 264 264 } 265 - parse_events_terms__delete($2); 265 + parse_events_terms__delete($3); 266 266 parse_events_terms__delete(orig_terms); 267 267 $$ = list; 268 268 }
+8 -14
tools/perf/util/pmu.c
··· 539 539 540 540 /* 541 541 * PMU CORE devices have different name other than cpu in sysfs on some 542 - * platforms. looking for possible sysfs files to identify as core device. 542 + * platforms. 543 + * Looking for possible sysfs files to identify the arm core device. 543 544 */ 544 - static int is_pmu_core(const char *name) 545 + static int is_arm_pmu_core(const char *name) 545 546 { 546 547 struct stat st; 547 548 char path[PATH_MAX]; ··· 550 549 551 550 if (!sysfs) 552 551 return 0; 553 - 554 - /* Look for cpu sysfs (x86 and others) */ 555 - scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/cpu", sysfs); 556 - if ((stat(path, &st) == 0) && 557 - (strncmp(name, "cpu", strlen("cpu")) == 0)) 558 - return 1; 559 552 560 553 /* Look for cpu sysfs (specific to arm) */ 561 554 scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s/cpus", ··· 581 586 * cpuid string generated on this platform. 582 587 * Otherwise return non-zero. 583 588 */ 584 - int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid) 589 + int strcmp_cpuid_str(const char *mapcpuid, const char *cpuid) 585 590 { 586 591 regex_t re; 587 592 regmatch_t pmatch[1]; ··· 663 668 struct pmu_events_map *map; 664 669 struct pmu_event *pe; 665 670 const char *name = pmu->name; 671 + const char *pname; 666 672 667 673 map = perf_pmu__find_map(pmu); 668 674 if (!map) ··· 682 686 break; 683 687 } 684 688 685 - if (!is_pmu_core(name)) { 686 - /* check for uncore devices */ 687 - if (pe->pmu == NULL) 688 - continue; 689 - if (strncmp(pe->pmu, name, strlen(pe->pmu))) 689 + if (!is_arm_pmu_core(name)) { 690 + pname = pe->pmu ? pe->pmu : "cpu"; 691 + if (strncmp(pname, name, strlen(pname))) 690 692 continue; 691 693 } 692 694
+2 -2
tools/testing/selftests/bpf/test_progs.c
··· 1108 1108 1109 1109 assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null") 1110 1110 == 0); 1111 - assert(system("./urandom_read if=/dev/urandom of=/dev/zero count=4 2> /dev/null") == 0); 1111 + assert(system("./urandom_read") == 0); 1112 1112 /* disable stack trace collection */ 1113 1113 key = 0; 1114 1114 val = 1; ··· 1158 1158 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); 1159 1159 1160 1160 CHECK(build_id_matches < 1, "build id match", 1161 - "Didn't find expected build ID from the map"); 1161 + "Didn't find expected build ID from the map\n"); 1162 1162 1163 1163 disable_pmu: 1164 1164 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+1
tools/testing/selftests/firmware/Makefile
··· 4 4 all: 5 5 6 6 TEST_PROGS := fw_run_tests.sh 7 + TEST_FILES := fw_fallback.sh fw_filesystem.sh fw_lib.sh 7 8 8 9 include ../lib.mk 9 10
+6 -4
tools/testing/selftests/firmware/fw_lib.sh
··· 154 154 if [ "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then 155 155 echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout 156 156 fi 157 - if [ "$OLD_FWPATH" = "" ]; then 158 - OLD_FWPATH=" " 159 - fi 160 157 if [ "$TEST_REQS_FW_SET_CUSTOM_PATH" = "yes" ]; then 161 - echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path 158 + if [ "$OLD_FWPATH" = "" ]; then 159 + # A zero-length write won't work; write a null byte 160 + printf '\000' >/sys/module/firmware_class/parameters/path 161 + else 162 + echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path 163 + fi 162 164 fi 163 165 if [ -f $FW ]; then 164 166 rm -f "$FW"
+1 -1
tools/testing/selftests/firmware/fw_run_tests.sh
··· 66 66 run_test_config_0003 67 67 else 68 68 echo "Running basic kernel configuration, working with your config" 69 - run_test 69 + run_tests 70 70 fi
+4 -4
tools/testing/selftests/lib.mk
··· 20 20 21 21 .ONESHELL: 22 22 define RUN_TESTS 23 - @export KSFT_TAP_LEVEL=`echo 1`; 24 - @test_num=`echo 0`; 25 - @echo "TAP version 13"; 26 - @for TEST in $(1); do \ 23 + @export KSFT_TAP_LEVEL=`echo 1`; \ 24 + test_num=`echo 0`; \ 25 + echo "TAP version 13"; \ 26 + for TEST in $(1); do \ 27 27 BASENAME_TEST=`basename $$TEST`; \ 28 28 test_num=`echo $$test_num+1 | bc`; \ 29 29 echo "selftests: $$BASENAME_TEST"; \
+2 -1
tools/testing/selftests/net/Makefile
··· 5 5 CFLAGS += -I../../../../usr/include/ 6 6 7 7 TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh 8 - TEST_PROGS += fib_tests.sh fib-onlink-tests.sh in_netns.sh pmtu.sh 8 + TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh 9 + TEST_GEN_PROGS_EXTENDED := in_netns.sh 9 10 TEST_GEN_FILES = socket 10 11 TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy 11 12 TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
+21 -14
tools/testing/selftests/x86/test_syscall_vdso.c
··· 100 100 " shl $32, %r8\n" 101 101 " orq $0x7f7f7f7f, %r8\n" 102 102 " movq %r8, %r9\n" 103 - " movq %r8, %r10\n" 104 - " movq %r8, %r11\n" 105 - " movq %r8, %r12\n" 106 - " movq %r8, %r13\n" 107 - " movq %r8, %r14\n" 108 - " movq %r8, %r15\n" 103 + " incq %r9\n" 104 + " movq %r9, %r10\n" 105 + " incq %r10\n" 106 + " movq %r10, %r11\n" 107 + " incq %r11\n" 108 + " movq %r11, %r12\n" 109 + " incq %r12\n" 110 + " movq %r12, %r13\n" 111 + " incq %r13\n" 112 + " movq %r13, %r14\n" 113 + " incq %r14\n" 114 + " movq %r14, %r15\n" 115 + " incq %r15\n" 109 116 " ret\n" 110 117 " .code32\n" 111 118 " .popsection\n" ··· 135 128 int err = 0; 136 129 int num = 8; 137 130 uint64_t *r64 = &regs64.r8; 131 + uint64_t expected = 0x7f7f7f7f7f7f7f7fULL; 138 132 139 133 if (!kernel_is_64bit) 140 134 return 0; 141 135 142 136 do { 143 - if (*r64 == 0x7f7f7f7f7f7f7f7fULL) 137 + if (*r64 == expected++) 144 138 continue; /* register did not change */ 145 139 if (syscall_addr != (long)&int80) { 146 140 /* ··· 155 147 continue; 156 148 } 157 149 } else { 158 - /* INT80 syscall entrypoint can be used by 150 + /* 151 + * INT80 syscall entrypoint can be used by 159 152 * 64-bit programs too, unlike SYSCALL/SYSENTER. 160 153 * Therefore it must preserve R12+ 161 154 * (they are callee-saved registers in 64-bit C ABI). 162 155 * 163 - * This was probably historically not intended, 164 - * but R8..11 are clobbered (cleared to 0). 165 - * IOW: they are the only registers which aren't 166 - * preserved across INT80 syscall. 156 + * Starting in Linux 4.17 (and any kernel that 157 + * backports the change), R8..11 are preserved. 158 + * Historically (and probably unintentionally), they 159 + * were clobbered or zeroed. 167 160 */ 168 - if (*r64 == 0 && num <= 11) 169 - continue; 170 161 } 171 162 printf("[FAIL]\tR%d has changed:%016llx\n", num, *r64); 172 163 err++;
+10 -5
virt/kvm/arm/arm.c
··· 63 63 static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); 64 64 static u32 kvm_next_vmid; 65 65 static unsigned int kvm_vmid_bits __read_mostly; 66 - static DEFINE_SPINLOCK(kvm_vmid_lock); 66 + static DEFINE_RWLOCK(kvm_vmid_lock); 67 67 68 68 static bool vgic_present; 69 69 ··· 473 473 { 474 474 phys_addr_t pgd_phys; 475 475 u64 vmid; 476 + bool new_gen; 476 477 477 - if (!need_new_vmid_gen(kvm)) 478 + read_lock(&kvm_vmid_lock); 479 + new_gen = need_new_vmid_gen(kvm); 480 + read_unlock(&kvm_vmid_lock); 481 + 482 + if (!new_gen) 478 483 return; 479 484 480 - spin_lock(&kvm_vmid_lock); 485 + write_lock(&kvm_vmid_lock); 481 486 482 487 /* 483 488 * We need to re-check the vmid_gen here to ensure that if another vcpu ··· 490 485 * use the same vmid. 491 486 */ 492 487 if (!need_new_vmid_gen(kvm)) { 493 - spin_unlock(&kvm_vmid_lock); 488 + write_unlock(&kvm_vmid_lock); 494 489 return; 495 490 } 496 491 ··· 524 519 vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits); 525 520 kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid; 526 521 527 - spin_unlock(&kvm_vmid_lock); 522 + write_unlock(&kvm_vmid_lock); 528 523 } 529 524 530 525 static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
+60
virt/kvm/arm/psci.c
··· 18 18 #include <linux/arm-smccc.h> 19 19 #include <linux/preempt.h> 20 20 #include <linux/kvm_host.h> 21 + #include <linux/uaccess.h> 21 22 #include <linux/wait.h> 22 23 23 24 #include <asm/cputype.h> ··· 427 426 428 427 smccc_set_retval(vcpu, val, 0, 0, 0); 429 428 return 1; 429 + } 430 + 431 + int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu) 432 + { 433 + return 1; /* PSCI version */ 434 + } 435 + 436 + int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 437 + { 438 + if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices)) 439 + return -EFAULT; 440 + 441 + return 0; 442 + } 443 + 444 + int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 445 + { 446 + if (reg->id == KVM_REG_ARM_PSCI_VERSION) { 447 + void __user *uaddr = (void __user *)(long)reg->addr; 448 + u64 val; 449 + 450 + val = kvm_psci_version(vcpu, vcpu->kvm); 451 + if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id))) 452 + return -EFAULT; 453 + 454 + return 0; 455 + } 456 + 457 + return -EINVAL; 458 + } 459 + 460 + int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 461 + { 462 + if (reg->id == KVM_REG_ARM_PSCI_VERSION) { 463 + void __user *uaddr = (void __user *)(long)reg->addr; 464 + bool wants_02; 465 + u64 val; 466 + 467 + if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id))) 468 + return -EFAULT; 469 + 470 + wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features); 471 + 472 + switch (val) { 473 + case KVM_ARM_PSCI_0_1: 474 + if (wants_02) 475 + return -EINVAL; 476 + vcpu->kvm->arch.psci_version = val; 477 + return 0; 478 + case KVM_ARM_PSCI_0_2: 479 + case KVM_ARM_PSCI_1_0: 480 + if (!wants_02) 481 + return -EINVAL; 482 + vcpu->kvm->arch.psci_version = val; 483 + return 0; 484 + } 485 + } 486 + 487 + return -EINVAL; 430 488 }
+5
virt/kvm/arm/vgic/vgic-mmio-v2.c
··· 14 14 #include <linux/irqchip/arm-gic.h> 15 15 #include <linux/kvm.h> 16 16 #include <linux/kvm_host.h> 17 + #include <linux/nospec.h> 18 + 17 19 #include <kvm/iodev.h> 18 20 #include <kvm/arm_vgic.h> 19 21 ··· 326 324 327 325 if (n > vgic_v3_max_apr_idx(vcpu)) 328 326 return 0; 327 + 328 + n = array_index_nospec(n, 4); 329 + 329 330 /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */ 330 331 return vgicv3->vgic_ap1r[n]; 331 332 }
+18 -4
virt/kvm/arm/vgic/vgic.c
··· 14 14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 15 */ 16 16 17 + #include <linux/interrupt.h> 18 + #include <linux/irq.h> 17 19 #include <linux/kvm.h> 18 20 #include <linux/kvm_host.h> 19 21 #include <linux/list_sort.h> 20 - #include <linux/interrupt.h> 21 - #include <linux/irq.h> 22 + #include <linux/nospec.h> 23 + 22 24 #include <asm/kvm_hyp.h> 23 25 24 26 #include "vgic.h" ··· 103 101 u32 intid) 104 102 { 105 103 /* SGIs and PPIs */ 106 - if (intid <= VGIC_MAX_PRIVATE) 104 + if (intid <= VGIC_MAX_PRIVATE) { 105 + intid = array_index_nospec(intid, VGIC_MAX_PRIVATE); 107 106 return &vcpu->arch.vgic_cpu.private_irqs[intid]; 107 + } 108 108 109 109 /* SPIs */ 110 - if (intid <= VGIC_MAX_SPI) 110 + if (intid <= VGIC_MAX_SPI) { 111 + intid = array_index_nospec(intid, VGIC_MAX_SPI); 111 112 return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS]; 113 + } 112 114 113 115 /* LPIs */ 114 116 if (intid >= VGIC_MIN_LPI) ··· 600 594 601 595 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { 602 596 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; 597 + bool target_vcpu_needs_kick = false; 603 598 604 599 spin_lock(&irq->irq_lock); 605 600 ··· 671 664 list_del(&irq->ap_list); 672 665 irq->vcpu = target_vcpu; 673 666 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); 667 + target_vcpu_needs_kick = true; 674 668 } 675 669 676 670 spin_unlock(&irq->irq_lock); 677 671 spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); 678 672 spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags); 673 + 674 + if (target_vcpu_needs_kick) { 675 + kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu); 676 + kvm_vcpu_kick(target_vcpu); 677 + } 678 + 679 679 goto retry; 680 680 } 681 681