Merge branch 'next' into for-linus

Prepare input updates for 4.14 merge window.

+11721 -7104
+16
Documentation/admin-guide/kernel-parameters.txt
··· 866 866 867 867 dscc4.setup= [NET] 868 868 869 + dt_cpu_ftrs= [PPC] 870 + Format: {"off" | "known"} 871 + Control how the dt_cpu_ftrs device-tree binding is 872 + used for CPU feature discovery and setup (if it 873 + exists). 874 + off: Do not use it, fall back to legacy cpu table. 875 + known: Do not pass through unknown features to guests 876 + or userspace, only those that the kernel is aware of. 877 + 869 878 dump_apple_properties [X86] 870 879 Dump name and content of EFI device properties on 871 880 x86 Macs. Useful for driver authors to determine ··· 3810 3801 grace period will be considered for automatic 3811 3802 expediting. Set to zero to disable automatic 3812 3803 expediting. 3804 + 3805 + stack_guard_gap= [MM] 3806 + override the default stack gap protection. The value 3807 + is in page units and it defines how many pages prior 3808 + to (for stacks growing down) resp. after (for stacks 3809 + growing up) the main stack are reserved for no other 3810 + mapping. Default value is 256 pages. 3813 3811 3814 3812 stacktrace [FTRACE] 3815 3813 Enabled the stack tracer on boot up.
+4 -3
Documentation/devicetree/bindings/clock/sunxi-ccu.txt
··· 22 22 - #clock-cells : must contain 1 23 23 - #reset-cells : must contain 1 24 24 25 - For the PRCM CCUs on H3/A64, one more clock is needed: 25 + For the PRCM CCUs on H3/A64, two more clocks are needed: 26 + - "pll-periph": the SoC's peripheral PLL from the main CCU 26 27 - "iosc": the SoC's internal frequency oscillator 27 28 28 29 Example for generic CCU: ··· 40 39 r_ccu: clock@01f01400 { 41 40 compatible = "allwinner,sun50i-a64-r-ccu"; 42 41 reg = <0x01f01400 0x100>; 43 - clocks = <&osc24M>, <&osc32k>, <&iosc>; 44 - clock-names = "hosc", "losc", "iosc"; 42 + clocks = <&osc24M>, <&osc32k>, <&iosc>, <&ccu CLK_PLL_PERIPH0>; 43 + clock-names = "hosc", "losc", "iosc", "pll-periph"; 45 44 #clock-cells = <1>; 46 45 #reset-cells = <1>; 47 46 };
+3 -3
Documentation/devicetree/bindings/gpio/gpio-mvebu.txt
··· 41 41 Optional properties: 42 42 43 43 In order to use the GPIO lines in PWM mode, some additional optional 44 - properties are required. Only Armada 370 and XP support these properties. 44 + properties are required. 45 45 46 - - compatible: Must contain "marvell,armada-370-xp-gpio" 46 + - compatible: Must contain "marvell,armada-370-gpio" 47 47 48 48 - reg: an additional register set is needed, for the GPIO Blink 49 49 Counter on/off registers. ··· 71 71 }; 72 72 73 73 gpio1: gpio@18140 { 74 - compatible = "marvell,armada-370-xp-gpio"; 74 + compatible = "marvell,armada-370-gpio"; 75 75 reg = <0x18140 0x40>, <0x181c8 0x08>; 76 76 reg-names = "gpio", "pwm"; 77 77 ngpios = <17>;
+2
Documentation/devicetree/bindings/input/atmel,maxtouch.txt
··· 22 22 experiment to determine which bit corresponds to which input. Use 23 23 KEY_RESERVED for unused padding values. 24 24 25 + - reset-gpios: GPIO specifier for the touchscreen's reset pin (active low) 26 + 25 27 Example: 26 28 27 29 touch@4b {
+1 -1
Documentation/devicetree/bindings/mfd/stm32-timers.txt
··· 31 31 compatible = "st,stm32-timers"; 32 32 reg = <0x40010000 0x400>; 33 33 clocks = <&rcc 0 160>; 34 - clock-names = "clk_int"; 34 + clock-names = "int"; 35 35 36 36 pwm { 37 37 compatible = "st,stm32-pwm";
+1 -1
Documentation/devicetree/bindings/net/dsa/b53.txt
··· 34 34 "brcm,bcm6328-switch" 35 35 "brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch" 36 36 37 - See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional 37 + See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional 38 38 required and optional properties. 39 39 40 40 Examples:
+4
Documentation/devicetree/bindings/net/dsa/marvell.txt
··· 26 26 - interrupt-controller : Indicates the switch is itself an interrupt 27 27 controller. This is used for the PHY interrupts. 28 28 #interrupt-cells = <2> : Controller uses two cells, number and flag 29 + - eeprom-length : Set to the length of an EEPROM connected to the 30 + switch. Must be set if the switch can not detect 31 + the presence and/or size of a connected EEPROM, 32 + otherwise optional. 29 33 - mdio : Container of PHY and devices on the switches MDIO 30 34 bus. 31 35 - mdio? : Container of PHYs and devices on the external MDIO
+1
Documentation/devicetree/bindings/net/smsc911x.txt
··· 27 27 of the device. On many systems this is wired high so the device goes 28 28 out of reset at power-on, but if it is under program control, this 29 29 optional GPIO can wake up in response to it. 30 + - vdd33a-supply, vddvario-supply : 3.3V analog and IO logic power supplies 30 31 31 32 Examples: 32 33
-2
Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
··· 247 247 bias-pull-up - pull up the pin 248 248 bias-pull-down - pull down the pin 249 249 bias-pull-pin-default - use pin-default pull state 250 - bi-directional - pin supports simultaneous input/output operations 251 250 drive-push-pull - drive actively high and low 252 251 drive-open-drain - drive with open drain 253 252 drive-open-source - drive with open source ··· 259 260 power-source - select between different power supplies 260 261 low-power-enable - enable low power mode 261 262 low-power-disable - disable low power mode 262 - output-enable - enable output on pin regardless of output value 263 263 output-low - set the pin to output mode with low level 264 264 output-high - set the pin to output mode with high level 265 265 slew-rate - set the slew rate
+23
Documentation/devicetree/bindings/serio/ps2-gpio.txt
··· 1 + Device-Tree binding for ps/2 gpio device 2 + 3 + Required properties: 4 + - compatible = "ps2-gpio" 5 + - data-gpios: the data pin 6 + - clk-gpios: the clock pin 7 + - interrupts: Should trigger on the falling edge of the clock line. 8 + 9 + Optional properties: 10 + - write-enable: Indicates whether write function is provided 11 + to serio device. Possibly providing the write fn will not work, because 12 + of the tough timing requirements. 13 + 14 + Example nodes: 15 + 16 + ps2@0 { 17 + compatible = "ps2-gpio"; 18 + interrupt-parent = <&gpio>; 19 + interrupts = <23 IRQ_TYPE_EDGE_FALLING>; 20 + data-gpios = <&gpio 24 GPIO_ACTIVE_HIGH>; 21 + clk-gpios = <&gpio 23 GPIO_ACTIVE_HIGH>; 22 + write-enable; 23 + };
+1
Documentation/devicetree/bindings/usb/dwc2.txt
··· 10 10 - "rockchip,rk3288-usb", "rockchip,rk3066-usb", "snps,dwc2": for rk3288 Soc; 11 11 - "lantiq,arx100-usb": The DWC2 USB controller instance in Lantiq ARX SoCs; 12 12 - "lantiq,xrx200-usb": The DWC2 USB controller instance in Lantiq XRX SoCs; 13 + - "amlogic,meson8-usb": The DWC2 USB controller instance in Amlogic Meson8 SoCs; 13 14 - "amlogic,meson8b-usb": The DWC2 USB controller instance in Amlogic Meson8b SoCs; 14 15 - "amlogic,meson-gxbb-usb": The DWC2 USB controller instance in Amlogic S905 SoCs; 15 16 - "amcc,dwc-otg": The DWC2 USB controller instance in AMCC Canyonlands 460EX SoCs;
+5
Documentation/gpio/drivers-on-gpio.txt
··· 84 84 NAND flash MTD subsystem and provides chip access and partition parsing like 85 85 any other NAND driving hardware. 86 86 87 + - ps2-gpio: drivers/input/serio/ps2-gpio.c is used to drive a PS/2 (IBM) serio 88 + bus, data and clock line, by bit banging two GPIO lines. It will appear as 89 + any other serio bus to the system and makes it possible to connect drivers 90 + for e.g. keyboards and other PS/2 protocol based devices. 91 + 87 92 Apart from this there are special GPIO drivers in subsystems like MMC/SD to 88 93 read card detect and write protect GPIO lines, and in the TTY serial subsystem 89 94 to emulate MCTRL (modem control) signals CTS/RTS by using two GPIO lines. The
+194
Documentation/networking/dpaa.txt
··· 1 + The QorIQ DPAA Ethernet Driver 2 + ============================== 3 + 4 + Authors: 5 + Madalin Bucur <madalin.bucur@nxp.com> 6 + Camelia Groza <camelia.groza@nxp.com> 7 + 8 + Contents 9 + ======== 10 + 11 + - DPAA Ethernet Overview 12 + - DPAA Ethernet Supported SoCs 13 + - Configuring DPAA Ethernet in your kernel 14 + - DPAA Ethernet Frame Processing 15 + - DPAA Ethernet Features 16 + - Debugging 17 + 18 + DPAA Ethernet Overview 19 + ====================== 20 + 21 + DPAA stands for Data Path Acceleration Architecture and it is a 22 + set of networking acceleration IPs that are available on several 23 + generations of SoCs, both on PowerPC and ARM64. 24 + 25 + The Freescale DPAA architecture consists of a series of hardware blocks 26 + that support Ethernet connectivity. The Ethernet driver depends upon the 27 + following drivers in the Linux kernel: 28 + 29 + - Peripheral Access Memory Unit (PAMU) (* needed only for PPC platforms) 30 + drivers/iommu/fsl_* 31 + - Frame Manager (FMan) 32 + drivers/net/ethernet/freescale/fman 33 + - Queue Manager (QMan), Buffer Manager (BMan) 34 + drivers/soc/fsl/qbman 35 + 36 + A simplified view of the dpaa_eth interfaces mapped to FMan MACs: 37 + 38 + dpaa_eth /eth0\ ... /ethN\ 39 + driver | | | | 40 + ------------- ---- ----------- ---- ------------- 41 + -Ports / Tx Rx \ ... / Tx Rx \ 42 + FMan | | | | 43 + -MACs | MAC0 | | MACN | 44 + / dtsec0 \ ... / dtsecN \ (or tgec) 45 + / \ / \(or memac) 46 + --------- -------------- --- -------------- --------- 47 + FMan, FMan Port, FMan SP, FMan MURAM drivers 48 + --------------------------------------------------------- 49 + FMan HW blocks: MURAM, MACs, Ports, SP 50 + --------------------------------------------------------- 51 + 52 + The dpaa_eth relation to the QMan, BMan and FMan: 53 + ________________________________ 54 + dpaa_eth / eth0 \ 55 + driver / \ 56 + --------- -^- -^- -^- --- --------- 57 + QMan driver / \ / \ / \ \ / | BMan | 58 + |Rx | |Rx | |Tx | |Tx | | driver | 59 + --------- |Dfl| |Err| |Cnf| |FQs| | | 60 + QMan HW |FQ | |FQ | |FQs| | | | | 61 + / \ / \ / \ \ / | | 62 + --------- --- --- --- -v- --------- 63 + | FMan QMI | | 64 + | FMan HW FMan BMI | BMan HW | 65 + ----------------------- -------- 66 + 67 + where the acronyms used above (and in the code) are: 68 + DPAA = Data Path Acceleration Architecture 69 + FMan = DPAA Frame Manager 70 + QMan = DPAA Queue Manager 71 + BMan = DPAA Buffers Manager 72 + QMI = QMan interface in FMan 73 + BMI = BMan interface in FMan 74 + FMan SP = FMan Storage Profiles 75 + MURAM = Multi-user RAM in FMan 76 + FQ = QMan Frame Queue 77 + Rx Dfl FQ = default reception FQ 78 + Rx Err FQ = Rx error frames FQ 79 + Tx Cnf FQ = Tx confirmation FQs 80 + Tx FQs = transmission frame queues 81 + dtsec = datapath three speed Ethernet controller (10/100/1000 Mbps) 82 + tgec = ten gigabit Ethernet controller (10 Gbps) 83 + memac = multirate Ethernet MAC (10/100/1000/10000) 84 + 85 + DPAA Ethernet Supported SoCs 86 + ============================ 87 + 88 + The DPAA drivers enable the Ethernet controllers present on the following SoCs: 89 + 90 + # PPC 91 + P1023 92 + P2041 93 + P3041 94 + P4080 95 + P5020 96 + P5040 97 + T1023 98 + T1024 99 + T1040 100 + T1042 101 + T2080 102 + T4240 103 + B4860 104 + 105 + # ARM 106 + LS1043A 107 + LS1046A 108 + 109 + Configuring DPAA Ethernet in your kernel 110 + ======================================== 111 + 112 + To enable the DPAA Ethernet driver, the following Kconfig options are required: 113 + 114 + # common for arch/arm64 and arch/powerpc platforms 115 + CONFIG_FSL_DPAA=y 116 + CONFIG_FSL_FMAN=y 117 + CONFIG_FSL_DPAA_ETH=y 118 + CONFIG_FSL_XGMAC_MDIO=y 119 + 120 + # for arch/powerpc only 121 + CONFIG_FSL_PAMU=y 122 + 123 + # common options needed for the PHYs used on the RDBs 124 + CONFIG_VITESSE_PHY=y 125 + CONFIG_REALTEK_PHY=y 126 + CONFIG_AQUANTIA_PHY=y 127 + 128 + DPAA Ethernet Frame Processing 129 + ============================== 130 + 131 + On Rx, buffers for the incoming frames are retrieved from one of the three 132 + existing buffers pools. The driver initializes and seeds these, each with 133 + buffers of different sizes: 1KB, 2KB and 4KB. 134 + 135 + On Tx, all transmitted frames are returned to the driver through Tx 136 + confirmation frame queues. The driver is then responsible for freeing the 137 + buffers. In order to do this properly, a backpointer is added to the buffer 138 + before transmission that points to the skb. When the buffer returns to the 139 + driver on a confirmation FQ, the skb can be correctly consumed. 140 + 141 + DPAA Ethernet Features 142 + ====================== 143 + 144 + Currently the DPAA Ethernet driver enables the basic features required for 145 + a Linux Ethernet driver. The support for advanced features will be added 146 + gradually. 147 + 148 + The driver has Rx and Tx checksum offloading for UDP and TCP. Currently the Rx 149 + checksum offload feature is enabled by default and cannot be controlled through 150 + ethtool. 151 + 152 + The driver has support for multiple prioritized Tx traffic classes. Priorities 153 + range from 0 (lowest) to 3 (highest). These are mapped to HW workqueues with 154 + strict priority levels. Each traffic class contains NR_CPU TX queues. By 155 + default, only one traffic class is enabled and the lowest priority Tx queues 156 + are used. Higher priority traffic classes can be enabled with the mqprio 157 + qdisc. For example, all four traffic classes are enabled on an interface with 158 + the following command. Furthermore, skb priority levels are mapped to traffic 159 + classes as follows: 160 + 161 + * priorities 0 to 3 - traffic class 0 (low priority) 162 + * priorities 4 to 7 - traffic class 1 (medium-low priority) 163 + * priorities 8 to 11 - traffic class 2 (medium-high priority) 164 + * priorities 12 to 15 - traffic class 3 (high priority) 165 + 166 + tc qdisc add dev <int> root handle 1: \ 167 + mqprio num_tc 4 map 0 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3 hw 1 168 + 169 + Debugging 170 + ========= 171 + 172 + The following statistics are exported for each interface through ethtool: 173 + 174 + - interrupt count per CPU 175 + - Rx packets count per CPU 176 + - Tx packets count per CPU 177 + - Tx confirmed packets count per CPU 178 + - Tx S/G frames count per CPU 179 + - Tx error count per CPU 180 + - Rx error count per CPU 181 + - Rx error count per type 182 + - congestion related statistics: 183 + - congestion status 184 + - time spent in congestion 185 + - number of time the device entered congestion 186 + - dropped packets count per cause 187 + 188 + The driver also exports the following information in sysfs: 189 + 190 + - the FQ IDs for each FQ type 191 + /sys/devices/platform/dpaa-ethernet.0/net/<int>/fqids 192 + 193 + - the IDs of the buffer pools in use 194 + /sys/devices/platform/dpaa-ethernet.0/net/<int>/bpids
+1 -1
Documentation/networking/scaling.txt
··· 122 122 or will be computed in the stack. Capable hardware can pass the hash in 123 123 the receive descriptor for the packet; this would usually be the same 124 124 hash used for RSS (e.g. computed Toeplitz hash). The hash is saved in 125 - skb->rx_hash and can be used elsewhere in the stack as a hash of the 125 + skb->hash and can be used elsewhere in the stack as a hash of the 126 126 packet’s flow. 127 127 128 128 Each receive hardware queue has an associated list of CPUs to which
+13 -18
Documentation/networking/tcp.txt
··· 1 1 TCP protocol 2 2 ============ 3 3 4 - Last updated: 9 February 2008 4 + Last updated: 3 June 2017 5 5 6 6 Contents 7 7 ======== ··· 29 29 A congestion control mechanism can be registered through functions in 30 30 tcp_cong.c. The functions used by the congestion control mechanism are 31 31 registered via passing a tcp_congestion_ops struct to 32 - tcp_register_congestion_control. As a minimum name, ssthresh, 33 - cong_avoid must be valid. 32 + tcp_register_congestion_control. As a minimum, the congestion control 33 + mechanism must provide a valid name and must implement either ssthresh, 34 + cong_avoid and undo_cwnd hooks or the "omnipotent" cong_control hook. 34 35 35 36 Private data for a congestion control mechanism is stored in tp->ca_priv. 36 37 tcp_ca(tp) returns a pointer to this space. This is preallocated space - it 37 38 is important to check the size of your private data will fit this space, or 38 - alternatively space could be allocated elsewhere and a pointer to it could 39 + alternatively, space could be allocated elsewhere and a pointer to it could 39 40 be stored here. 40 41 41 42 There are three kinds of congestion control algorithms currently: The 42 43 simplest ones are derived from TCP reno (highspeed, scalable) and just 43 - provide an alternative the congestion window calculation. More complex 44 + provide an alternative congestion window calculation. More complex 44 45 ones like BIC try to look at other events to provide better 45 46 heuristics. There are also round trip time based algorithms like 46 47 Vegas and Westwood+. ··· 50 49 needs to maintain fairness and performance. Please review current 51 50 research and RFC's before developing new modules. 52 51 53 - The method that is used to determine which congestion control mechanism is 54 - determined by the setting of the sysctl net.ipv4.tcp_congestion_control. 55 - The default congestion control will be the last one registered (LIFO); 56 - so if you built everything as modules, the default will be reno. If you 57 - build with the defaults from Kconfig, then CUBIC will be builtin (not a 58 - module) and it will end up the default. 52 + The default congestion control mechanism is chosen based on the 53 + DEFAULT_TCP_CONG Kconfig parameter. If you really want a particular default 54 + value then you can set it using sysctl net.ipv4.tcp_congestion_control. The 55 + module will be autoloaded if needed and you will get the expected protocol. If 56 + you ask for an unknown congestion method, then the sysctl attempt will fail. 59 57 60 - If you really want a particular default value then you will need 61 - to set it with the sysctl. If you use a sysctl, the module will be autoloaded 62 - if needed and you will get the expected protocol. If you ask for an 63 - unknown congestion method, then the sysctl attempt will fail. 64 - 65 - If you remove a tcp congestion control module, then you will get the next 58 + If you remove a TCP congestion control module, then you will get the next 66 59 available one. Since reno cannot be built as a module, and cannot be 67 - deleted, it will always be available. 60 + removed, it will always be available. 68 61 69 62 How the new TCP output machine [nyi] works. 70 63 ===========================================
+17 -18
MAINTAINERS
··· 1172 1172 1173 1173 ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE 1174 1174 M: Hartley Sweeten <hsweeten@visionengravers.com> 1175 - M: Ryan Mallon <rmallon@gmail.com> 1175 + M: Alexander Sverdlin <alexander.sverdlin@gmail.com> 1176 1176 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1177 1177 S: Maintained 1178 1178 F: arch/arm/mach-ep93xx/ ··· 1489 1489 M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> 1490 1490 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1491 1491 S: Maintained 1492 - F: arch/arm/mach-mvebu/ 1493 - F: drivers/rtc/rtc-armada38x.c 1494 1492 F: arch/arm/boot/dts/armada* 1495 1493 F: arch/arm/boot/dts/kirkwood* 1494 + F: arch/arm/configs/mvebu_*_defconfig 1495 + F: arch/arm/mach-mvebu/ 1496 1496 F: arch/arm64/boot/dts/marvell/armada* 1497 1497 F: drivers/cpufreq/mvebu-cpufreq.c 1498 - F: arch/arm/configs/mvebu_*_defconfig 1498 + F: drivers/irqchip/irq-armada-370-xp.c 1499 + F: drivers/irqchip/irq-mvebu-* 1500 + F: drivers/rtc/rtc-armada38x.c 1499 1501 1500 1502 ARM/Marvell Berlin SoC support 1501 1503 M: Jisheng Zhang <jszhang@marvell.com> ··· 1723 1721 ARM/SAMSUNG EXYNOS ARM ARCHITECTURES 1724 1722 M: Kukjin Kim <kgene@kernel.org> 1725 1723 M: Krzysztof Kozlowski <krzk@kernel.org> 1726 - R: Javier Martinez Canillas <javier@osg.samsung.com> 1727 1724 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1728 1725 L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) 1729 1726 Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/ ··· 1830 1829 ARM/STI ARCHITECTURE 1831 1830 M: Patrice Chotard <patrice.chotard@st.com> 1832 1831 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1833 - L: kernel@stlinux.com 1834 1832 W: http://www.stlinux.com 1835 1833 S: Maintained 1836 1834 F: arch/arm/mach-sti/ ··· 2964 2964 2965 2965 C6X ARCHITECTURE 2966 2966 M: Mark Salter <msalter@redhat.com> 2967 - M: Aurelien Jacquiot <a-jacquiot@ti.com> 2967 + M: Aurelien Jacquiot <jacquiot.aurelien@gmail.com> 2968 2968 L: linux-c6x-dev@linux-c6x.org 2969 2969 W: http://www.linux-c6x.org/wiki/index.php/Main_Page 2970 2970 S: Maintained ··· 5628 5628 5629 5629 GENWQE (IBM Generic Workqueue Card) 5630 5630 M: Frank Haverkamp <haver@linux.vnet.ibm.com> 5631 - M: Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com> 5631 + M: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com> 5632 5632 S: Supported 5633 5633 F: drivers/misc/genwqe/ 5634 5634 ··· 5673 5673 5674 5674 GPIO SUBSYSTEM 5675 5675 M: Linus Walleij <linus.walleij@linaro.org> 5676 - M: Alexandre Courbot <gnurou@gmail.com> 5677 5676 L: linux-gpio@vger.kernel.org 5678 5677 T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git 5679 5678 S: Maintained ··· 7714 7715 7715 7716 LIVE PATCHING 7716 7717 M: Josh Poimboeuf <jpoimboe@redhat.com> 7717 - M: Jessica Yu <jeyu@redhat.com> 7718 + M: Jessica Yu <jeyu@kernel.org> 7718 7719 M: Jiri Kosina <jikos@kernel.org> 7719 7720 M: Miroslav Benes <mbenes@suse.cz> 7720 7721 R: Petr Mladek <pmladek@suse.com> ··· 8515 8516 F: drivers/media/radio/radio-miropcm20* 8516 8517 8517 8518 MELLANOX MLX4 core VPI driver 8518 - M: Yishai Hadas <yishaih@mellanox.com> 8519 + M: Tariq Toukan <tariqt@mellanox.com> 8519 8520 L: netdev@vger.kernel.org 8520 8521 L: linux-rdma@vger.kernel.org 8521 8522 W: http://www.mellanox.com ··· 8523 8524 S: Supported 8524 8525 F: drivers/net/ethernet/mellanox/mlx4/ 8525 8526 F: include/linux/mlx4/ 8526 - F: include/uapi/rdma/mlx4-abi.h 8527 8527 8528 8528 MELLANOX MLX4 IB driver 8529 8529 M: Yishai Hadas <yishaih@mellanox.com> ··· 8532 8534 S: Supported 8533 8535 F: drivers/infiniband/hw/mlx4/ 8534 8536 F: include/linux/mlx4/ 8537 + F: include/uapi/rdma/mlx4-abi.h 8535 8538 8536 8539 MELLANOX MLX5 core VPI driver 8537 8540 M: Saeed Mahameed <saeedm@mellanox.com> ··· 8545 8546 S: Supported 8546 8547 F: drivers/net/ethernet/mellanox/mlx5/core/ 8547 8548 F: include/linux/mlx5/ 8548 - F: include/uapi/rdma/mlx5-abi.h 8549 8549 8550 8550 MELLANOX MLX5 IB driver 8551 8551 M: Matan Barak <matanb@mellanox.com> ··· 8555 8557 S: Supported 8556 8558 F: drivers/infiniband/hw/mlx5/ 8557 8559 F: include/linux/mlx5/ 8560 + F: include/uapi/rdma/mlx5-abi.h 8558 8561 8559 8562 MELEXIS MLX90614 DRIVER 8560 8563 M: Crt Mori <cmo@melexis.com> ··· 8595 8596 F: drivers/media/dvb-frontends/mn88473* 8596 8597 8597 8598 MODULE SUPPORT 8598 - M: Jessica Yu <jeyu@redhat.com> 8599 + M: Jessica Yu <jeyu@kernel.org> 8599 8600 M: Rusty Russell <rusty@rustcorp.com.au> 8600 8601 T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next 8601 8602 S: Maintained ··· 10457 10458 10458 10459 PXA RTC DRIVER 10459 10460 M: Robert Jarzmik <robert.jarzmik@free.fr> 10460 - L: rtc-linux@googlegroups.com 10461 + L: linux-rtc@vger.kernel.org 10461 10462 S: Maintained 10462 10463 10463 10464 QAT DRIVER ··· 10764 10765 REAL TIME CLOCK (RTC) SUBSYSTEM 10765 10766 M: Alessandro Zummo <a.zummo@towertech.it> 10766 10767 M: Alexandre Belloni <alexandre.belloni@free-electrons.com> 10767 - L: rtc-linux@googlegroups.com 10768 + L: linux-rtc@vger.kernel.org 10768 10769 Q: http://patchwork.ozlabs.org/project/rtc-linux/list/ 10769 10770 T: git git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux.git 10770 10771 S: Maintained ··· 11275 11276 11276 11277 STI CEC DRIVER 11277 11278 M: Benjamin Gaignard <benjamin.gaignard@linaro.org> 11278 - L: kernel@stlinux.com 11279 11279 S: Maintained 11280 11280 F: drivers/staging/media/st-cec/ 11281 11281 F: Documentation/devicetree/bindings/media/stih-cec.txt ··· 11784 11786 S: Supported 11785 11787 F: arch/arm/mach-davinci/ 11786 11788 F: drivers/i2c/busses/i2c-davinci.c 11789 + F: arch/arm/boot/dts/da850* 11787 11790 11788 11791 TI DAVINCI SERIES MEDIA DRIVER 11789 11792 M: "Lad, Prabhakar" <prabhakar.csengg@gmail.com> ··· 13868 13869 F: drivers/net/wireless/wl3501* 13869 13870 13870 13871 WOLFSON MICROELECTRONICS DRIVERS 13871 - L: patches@opensource.wolfsonmicro.com 13872 + L: patches@opensource.cirrus.com 13872 13873 T: git https://github.com/CirrusLogic/linux-drivers.git 13873 13874 W: https://github.com/CirrusLogic/linux-drivers/wiki 13874 13875 S: Supported
+2 -2
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 12 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc3 4 + EXTRAVERSION = 5 5 NAME = Fearless Coyote 6 6 7 7 # *DOCUMENTATION* ··· 1437 1437 @echo ' make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build' 1438 1438 @echo ' make V=2 [targets] 2 => give reason for rebuild of target' 1439 1439 @echo ' make O=dir [targets] Locate all output files in "dir", including .config' 1440 - @echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)' 1440 + @echo ' make C=1 [targets] Check re-compiled c source with $$CHECK (sparse by default)' 1441 1441 @echo ' make C=2 [targets] Force check of all c source with $$CHECK' 1442 1442 @echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections' 1443 1443 @echo ' make W=n [targets] Enable extra gcc checks, n=1,2,3 where'
-2
arch/arc/include/asm/processor.h
··· 86 86 #define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4) 87 87 #define TSK_K_FP(tsk) TSK_K_REG(tsk, 0) 88 88 89 - #define thread_saved_pc(tsk) TSK_K_BLINK(tsk) 90 - 91 89 extern void start_thread(struct pt_regs * regs, unsigned long pc, 92 90 unsigned long usp); 93 91
+1 -1
arch/arc/mm/mmap.c
··· 65 65 66 66 vma = find_vma(mm, addr); 67 67 if (TASK_SIZE - len >= addr && 68 - (!vma || addr + len <= vma->vm_start)) 68 + (!vma || addr + len <= vm_start_gap(vma))) 69 69 return addr; 70 70 } 71 71
+1
arch/arm/Kconfig
··· 1416 1416 config VMSPLIT_3G 1417 1417 bool "3G/1G user/kernel split" 1418 1418 config VMSPLIT_3G_OPT 1419 + depends on !ARM_LPAE 1419 1420 bool "3G/1G user/kernel split (for full 1G low memory)" 1420 1421 config VMSPLIT_2G 1421 1422 bool "2G/2G user/kernel split"
+2 -3
arch/arm/boot/compressed/efi-header.S
··· 17 17 @ there. 18 18 .inst 'M' | ('Z' << 8) | (0x1310 << 16) @ tstne r0, #0x4d000 19 19 #else 20 - mov r0, r0 20 + AR_CLASS( mov r0, r0 ) 21 + M_CLASS( nop.w ) 21 22 #endif 22 23 .endm 23 24 24 25 .macro __EFI_HEADER 25 26 #ifdef CONFIG_EFI_STUB 26 - b __efi_start 27 - 28 27 .set start_offset, __efi_start - start 29 28 .org start + 0x3c 30 29 @
+10 -7
arch/arm/boot/compressed/head.S
··· 130 130 .rept 7 131 131 __nop 132 132 .endr 133 - ARM( mov r0, r0 ) 134 - ARM( b 1f ) 135 - THUMB( badr r12, 1f ) 136 - THUMB( bx r12 ) 133 + #ifndef CONFIG_THUMB2_KERNEL 134 + mov r0, r0 135 + #else 136 + AR_CLASS( sub pc, pc, #3 ) @ A/R: switch to Thumb2 mode 137 + M_CLASS( nop.w ) @ M: already in Thumb2 mode 138 + .thumb 139 + #endif 140 + W(b) 1f 137 141 138 142 .word _magic_sig @ Magic numbers to help the loader 139 143 .word _magic_start @ absolute load/run zImage address 140 144 .word _magic_end @ zImage end address 141 145 .word 0x04030201 @ endianness flag 142 146 143 - THUMB( .thumb ) 144 - 1: __EFI_HEADER 145 - 147 + __EFI_HEADER 148 + 1: 146 149 ARM_BE8( setend be ) @ go BE8 if compiled for BE8 147 150 AR_CLASS( mrs r9, cpsr ) 148 151 #ifdef CONFIG_ARM_VIRT_EXT
+2 -6
arch/arm/boot/dts/am335x-sl50.dts
··· 220 220 221 221 mmc1_pins: pinmux_mmc1_pins { 222 222 pinctrl-single,pins = < 223 - AM33XX_IOPAD(0x960, PIN_INPUT | MUX_MODE7) /* spi0_cs1.gpio0_6 */ 223 + AM33XX_IOPAD(0x96c, PIN_INPUT | MUX_MODE7) /* uart0_rtsn.gpio1_9 */ 224 224 >; 225 225 }; 226 226 ··· 280 280 AM33XX_IOPAD(0x834, PIN_INPUT_PULLUP | MUX_MODE7) /* nKbdReset - gpmc_ad13.gpio1_13 */ 281 281 AM33XX_IOPAD(0x838, PIN_INPUT_PULLUP | MUX_MODE7) /* nDispReset - gpmc_ad14.gpio1_14 */ 282 282 AM33XX_IOPAD(0x844, PIN_INPUT_PULLUP | MUX_MODE7) /* USB1_enPower - gpmc_a1.gpio1_17 */ 283 - /* AVR Programming - SPI Bus (bit bang) - Screen and Keyboard */ 284 - AM33XX_IOPAD(0x954, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattMOSI spi0_d0.gpio0_3 */ 285 - AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattMISO spi0_d1.gpio0_4 */ 286 - AM33XX_IOPAD(0x950, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattSCLK spi0_clk.gpio0_2 */ 287 283 /* PDI Bus - Battery system */ 288 284 AM33XX_IOPAD(0x840, PIN_INPUT_PULLUP | MUX_MODE7) /* nBattReset gpmc_a0.gpio1_16 */ 289 285 AM33XX_IOPAD(0x83c, PIN_INPUT_PULLUP | MUX_MODE7) /* BattPDIData gpmc_ad15.gpio1_15 */ ··· 380 384 pinctrl-names = "default"; 381 385 pinctrl-0 = <&mmc1_pins>; 382 386 bus-width = <4>; 383 - cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>; 387 + cd-gpios = <&gpio1 9 GPIO_ACTIVE_LOW>; 384 388 vmmc-supply = <&vmmcsd_fixed>; 385 389 }; 386 390
+5
arch/arm/boot/dts/bcm283x.dtsi
··· 3 3 #include <dt-bindings/clock/bcm2835-aux.h> 4 4 #include <dt-bindings/gpio/gpio.h> 5 5 6 + /* firmware-provided startup stubs live here, where the secondary CPUs are 7 + * spinning. 8 + */ 9 + /memreserve/ 0x00000000 0x00001000; 10 + 6 11 /* This include file covers the common peripherals and configuration between 7 12 * bcm2835 and bcm2836 implementations, leaving the CPU configuration to 8 13 * bcm2835.dtsi and bcm2836.dtsi.
+6
arch/arm/boot/dts/imx6ul-14x14-evk.dts
··· 120 120 121 121 ethphy0: ethernet-phy@2 { 122 122 reg = <2>; 123 + micrel,led-mode = <1>; 124 + clocks = <&clks IMX6UL_CLK_ENET_REF>; 125 + clock-names = "rmii-ref"; 123 126 }; 124 127 125 128 ethphy1: ethernet-phy@1 { 126 129 reg = <1>; 130 + micrel,led-mode = <1>; 131 + clocks = <&clks IMX6UL_CLK_ENET2_REF>; 132 + clock-names = "rmii-ref"; 127 133 }; 128 134 }; 129 135 };
+2 -2
arch/arm/boot/dts/keystone-k2l-netcp.dtsi
··· 137 137 /* NetCP address range */ 138 138 ranges = <0 0x26000000 0x1000000>; 139 139 140 - clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>, <&clkosr>; 141 - clock-names = "pa_clk", "ethss_clk", "cpts", "osr_clk"; 140 + clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>; 141 + clock-names = "pa_clk", "ethss_clk", "cpts"; 142 142 dma-coherent; 143 143 144 144 ti,navigator-dmas = <&dma_gbe 0>,
+8
arch/arm/boot/dts/keystone-k2l.dtsi
··· 232 232 }; 233 233 }; 234 234 235 + osr: sram@70000000 { 236 + compatible = "mmio-sram"; 237 + reg = <0x70000000 0x10000>; 238 + #address-cells = <1>; 239 + #size-cells = <1>; 240 + clocks = <&clkosr>; 241 + }; 242 + 235 243 dspgpio0: keystone_dsp_gpio@02620240 { 236 244 compatible = "ti,keystone-dsp-gpio"; 237 245 gpio-controller;
+4 -3
arch/arm/boot/dts/sunxi-h3-h5.dtsi
··· 558 558 }; 559 559 560 560 r_ccu: clock@1f01400 { 561 - compatible = "allwinner,sun50i-a64-r-ccu"; 561 + compatible = "allwinner,sun8i-h3-r-ccu"; 562 562 reg = <0x01f01400 0x100>; 563 - clocks = <&osc24M>, <&osc32k>, <&iosc>; 564 - clock-names = "hosc", "losc", "iosc"; 563 + clocks = <&osc24M>, <&osc32k>, <&iosc>, 564 + <&ccu 9>; 565 + clock-names = "hosc", "losc", "iosc", "pll-periph"; 565 566 #clock-cells = <1>; 566 567 #reset-cells = <1>; 567 568 };
+1 -1
arch/arm/boot/dts/versatile-pb.dts
··· 1 - #include <versatile-ab.dts> 1 + #include "versatile-ab.dts" 2 2 3 3 / { 4 4 model = "ARM Versatile PB";
+3 -3
arch/arm/common/mcpm_entry.c
··· 235 235 return ret; 236 236 } 237 237 238 - typedef void (*phys_reset_t)(unsigned long); 238 + typedef typeof(cpu_reset) phys_reset_t; 239 239 240 240 void mcpm_cpu_power_down(void) 241 241 { ··· 300 300 * on the CPU. 301 301 */ 302 302 phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset); 303 - phys_reset(__pa_symbol(mcpm_entry_point)); 303 + phys_reset(__pa_symbol(mcpm_entry_point), false); 304 304 305 305 /* should never get here */ 306 306 BUG(); ··· 389 389 __mcpm_cpu_down(cpu, cluster); 390 390 391 391 phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset); 392 - phys_reset(__pa_symbol(mcpm_entry_point)); 392 + phys_reset(__pa_symbol(mcpm_entry_point), false); 393 393 BUG(); 394 394 } 395 395
+2 -1
arch/arm/include/asm/device.h
··· 19 19 #ifdef CONFIG_XEN 20 20 const struct dma_map_ops *dev_dma_ops; 21 21 #endif 22 - bool dma_coherent; 22 + unsigned int dma_coherent:1; 23 + unsigned int dma_ops_setup:1; 23 24 }; 24 25 25 26 struct omap_device;
+1
arch/arm/include/asm/pgtable-nommu.h
··· 66 66 #define pgprot_noncached(prot) (prot) 67 67 #define pgprot_writecombine(prot) (prot) 68 68 #define pgprot_dmacoherent(prot) (prot) 69 + #define pgprot_device(prot) (prot) 69 70 70 71 71 72 /*
+1 -1
arch/arm/kernel/setup.c
··· 315 315 if (arch >= CPU_ARCH_ARMv6) { 316 316 unsigned int cachetype = read_cpuid_cachetype(); 317 317 318 - if ((arch == CPU_ARCH_ARMv7M) && !cachetype) { 318 + if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) { 319 319 cacheid = 0; 320 320 } else if ((cachetype & (7 << 29)) == 4 << 29) { 321 321 /* ARMv7 register format */
+2 -3
arch/arm/kvm/init.S
··· 104 104 @ - Write permission implies XN: disabled 105 105 @ - Instruction cache: enabled 106 106 @ - Data/Unified cache: enabled 107 - @ - Memory alignment checks: enabled 108 107 @ - MMU: enabled (this code must be run from an identity mapping) 109 108 mrc p15, 4, r0, c1, c0, 0 @ HSCR 110 109 ldr r2, =HSCTLR_MASK ··· 111 112 mrc p15, 0, r1, c1, c0, 0 @ SCTLR 112 113 ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C) 113 114 and r1, r1, r2 114 - ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) ) 115 - THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) ) 115 + ARM( ldr r2, =(HSCTLR_M) ) 116 + THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) ) 116 117 orr r1, r1, r2 117 118 orr r0, r0, r1 118 119 mcr p15, 4, r0, c1, c0, 0 @ HSCR
+1
arch/arm/mach-at91/Kconfig
··· 1 1 menuconfig ARCH_AT91 2 2 bool "Atmel SoCs" 3 3 depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7 4 + select ARM_CPU_SUSPEND if PM 4 5 select COMMON_CLK_AT91 5 6 select GPIOLIB 6 7 select PINCTRL
+6 -1
arch/arm/mach-davinci/pm.c
··· 153 153 davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL); 154 154 if (!davinci_sram_suspend) { 155 155 pr_err("PM: cannot allocate SRAM memory\n"); 156 - return -ENOMEM; 156 + ret = -ENOMEM; 157 + goto no_sram_mem; 157 158 } 158 159 159 160 davinci_sram_push(davinci_sram_suspend, davinci_cpu_suspend, ··· 162 161 163 162 suspend_set_ops(&davinci_pm_ops); 164 163 164 + return 0; 165 + 166 + no_sram_mem: 167 + iounmap(pm_config.ddrpsc_reg_base); 165 168 no_ddrpsc_mem: 166 169 iounmap(pm_config.ddrpll_reg_base); 167 170 no_ddrpll_mem:
+1 -1
arch/arm/mach-pxa/raumfeld.c
··· 377 377 }, 378 378 }; 379 379 380 - static struct property_entry raumfeld_rotary_properties[] = { 380 + static const struct property_entry raumfeld_rotary_properties[] __initconst = { 381 381 PROPERTY_ENTRY_INTEGER("rotary-encoder,steps-per-period", u32, 24), 382 382 PROPERTY_ENTRY_INTEGER("linux,axis", u32, REL_X), 383 383 PROPERTY_ENTRY_INTEGER("rotary-encoder,relative_axis", u32, 1),
+14 -15
arch/arm/mm/dma-mapping.c
··· 2311 2311 } 2312 2312 EXPORT_SYMBOL_GPL(arm_iommu_attach_device); 2313 2313 2314 - static void __arm_iommu_detach_device(struct device *dev) 2314 + /** 2315 + * arm_iommu_detach_device 2316 + * @dev: valid struct device pointer 2317 + * 2318 + * Detaches the provided device from a previously attached map. 2319 + * This voids the dma operations (dma_map_ops pointer) 2320 + */ 2321 + void arm_iommu_detach_device(struct device *dev) 2315 2322 { 2316 2323 struct dma_iommu_mapping *mapping; 2317 2324 ··· 2331 2324 iommu_detach_device(mapping->domain, dev); 2332 2325 kref_put(&mapping->kref, release_iommu_mapping); 2333 2326 to_dma_iommu_mapping(dev) = NULL; 2327 + set_dma_ops(dev, NULL); 2334 2328 2335 2329 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 2336 - } 2337 - 2338 - /** 2339 - * arm_iommu_detach_device 2340 - * @dev: valid struct device pointer 2341 - * 2342 - * Detaches the provided device from a previously attached map. 2343 - * This voids the dma operations (dma_map_ops pointer) 2344 - */ 2345 - void arm_iommu_detach_device(struct device *dev) 2346 - { 2347 - __arm_iommu_detach_device(dev); 2348 - set_dma_ops(dev, NULL); 2349 2330 } 2350 2331 EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 2351 2332 ··· 2374 2379 if (!mapping) 2375 2380 return; 2376 2381 2377 - __arm_iommu_detach_device(dev); 2382 + arm_iommu_detach_device(dev); 2378 2383 arm_iommu_release_mapping(mapping); 2379 2384 } 2380 2385 ··· 2425 2430 dev->dma_ops = xen_dma_ops; 2426 2431 } 2427 2432 #endif 2433 + dev->archdata.dma_ops_setup = true; 2428 2434 } 2429 2435 2430 2436 void arch_teardown_dma_ops(struct device *dev) 2431 2437 { 2438 + if (!dev->archdata.dma_ops_setup) 2439 + return; 2440 + 2432 2441 arm_teardown_iommu_dma_ops(dev); 2433 2442 }
+2 -2
arch/arm/mm/mmap.c
··· 90 90 91 91 vma = find_vma(mm, addr); 92 92 if (TASK_SIZE - len >= addr && 93 - (!vma || addr + len <= vma->vm_start)) 93 + (!vma || addr + len <= vm_start_gap(vma))) 94 94 return addr; 95 95 } 96 96 ··· 141 141 addr = PAGE_ALIGN(addr); 142 142 vma = find_vma(mm, addr); 143 143 if (TASK_SIZE - len >= addr && 144 - (!vma || addr + len <= vma->vm_start)) 144 + (!vma || addr + len <= vm_start_gap(vma))) 145 145 return addr; 146 146 } 147 147
+4 -4
arch/arm/mm/mmu.c
··· 1218 1218 1219 1219 high_memory = __va(arm_lowmem_limit - 1) + 1; 1220 1220 1221 + if (!memblock_limit) 1222 + memblock_limit = arm_lowmem_limit; 1223 + 1221 1224 /* 1222 1225 * Round the memblock limit down to a pmd size. This 1223 1226 * helps to ensure that we will allocate memory from the 1224 1227 * last full pmd, which should be mapped. 1225 1228 */ 1226 - if (memblock_limit) 1227 - memblock_limit = round_down(memblock_limit, PMD_SIZE); 1228 - if (!memblock_limit) 1229 - memblock_limit = arm_lowmem_limit; 1229 + memblock_limit = round_down(memblock_limit, PMD_SIZE); 1230 1230 1231 1231 if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) { 1232 1232 if (memblock_end_of_DRAM() > arm_lowmem_limit) {
-4
arch/arm64/Kconfig
··· 1084 1084 def_bool y 1085 1085 depends on COMPAT && SYSVIPC 1086 1086 1087 - config KEYS_COMPAT 1088 - def_bool y 1089 - depends on COMPAT && KEYS 1090 - 1091 1087 endmenu 1092 1088 1093 1089 menu "Power management options"
+3 -2
arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
··· 406 406 r_ccu: clock@1f01400 { 407 407 compatible = "allwinner,sun50i-a64-r-ccu"; 408 408 reg = <0x01f01400 0x100>; 409 - clocks = <&osc24M>, <&osc32k>, <&iosc>; 410 - clock-names = "hosc", "losc", "iosc"; 409 + clocks = <&osc24M>, <&osc32k>, <&iosc>, 410 + <&ccu 11>; 411 + clock-names = "hosc", "losc", "iosc", "pll-periph"; 411 412 #clock-cells = <1>; 412 413 #reset-cells = <1>; 413 414 };
+1 -1
arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi
··· 40 40 * OTHER DEALINGS IN THE SOFTWARE. 41 41 */ 42 42 43 - #include "sunxi-h3-h5.dtsi" 43 + #include <arm/sunxi-h3-h5.dtsi> 44 44 45 45 / { 46 46 cpus {
+1 -2
arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
··· 231 231 cpm_crypto: crypto@800000 { 232 232 compatible = "inside-secure,safexcel-eip197"; 233 233 reg = <0x800000 0x200000>; 234 - interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING 235 - | IRQ_TYPE_LEVEL_HIGH)>, 234 + interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>, 236 235 <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>, 237 236 <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>, 238 237 <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+1 -2
arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
··· 221 221 cps_crypto: crypto@800000 { 222 222 compatible = "inside-secure,safexcel-eip197"; 223 223 reg = <0x800000 0x200000>; 224 - interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING 225 - | IRQ_TYPE_LEVEL_HIGH)>, 224 + interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>, 226 225 <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>, 227 226 <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>, 228 227 <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
+10
arch/arm64/configs/defconfig
··· 68 68 CONFIG_PCIE_ARMADA_8K=y 69 69 CONFIG_PCI_AARDVARK=y 70 70 CONFIG_PCIE_RCAR=y 71 + CONFIG_PCIE_ROCKCHIP=m 71 72 CONFIG_PCI_HOST_GENERIC=y 72 73 CONFIG_PCI_XGENE=y 73 74 CONFIG_ARM64_VA_BITS_48=y ··· 209 208 CONFIG_WL18XX=m 210 209 CONFIG_WLCORE_SDIO=m 211 210 CONFIG_INPUT_EVDEV=y 211 + CONFIG_KEYBOARD_ADC=m 212 + CONFIG_KEYBOARD_CROS_EC=y 212 213 CONFIG_KEYBOARD_GPIO=y 213 214 CONFIG_INPUT_MISC=y 214 215 CONFIG_INPUT_PM8941_PWRKEY=y ··· 266 263 CONFIG_SPI_ORION=y 267 264 CONFIG_SPI_PL022=y 268 265 CONFIG_SPI_QUP=y 266 + CONFIG_SPI_ROCKCHIP=y 269 267 CONFIG_SPI_S3C64XX=y 270 268 CONFIG_SPI_SPIDEV=m 271 269 CONFIG_SPMI=y ··· 296 292 CONFIG_CPU_THERMAL=y 297 293 CONFIG_THERMAL_EMULATION=y 298 294 CONFIG_EXYNOS_THERMAL=y 295 + CONFIG_ROCKCHIP_THERMAL=m 299 296 CONFIG_WATCHDOG=y 300 297 CONFIG_S3C2410_WATCHDOG=y 301 298 CONFIG_MESON_GXBB_WATCHDOG=m ··· 305 300 CONFIG_BCM2835_WDT=y 306 301 CONFIG_MFD_CROS_EC=y 307 302 CONFIG_MFD_CROS_EC_I2C=y 303 + CONFIG_MFD_CROS_EC_SPI=y 308 304 CONFIG_MFD_EXYNOS_LPASS=m 309 305 CONFIG_MFD_HI655X_PMIC=y 310 306 CONFIG_MFD_MAX77620=y 311 307 CONFIG_MFD_SPMI_PMIC=y 312 308 CONFIG_MFD_RK808=y 313 309 CONFIG_MFD_SEC_CORE=y 310 + CONFIG_REGULATOR_FAN53555=y 314 311 CONFIG_REGULATOR_FIXED_VOLTAGE=y 315 312 CONFIG_REGULATOR_GPIO=y 316 313 CONFIG_REGULATOR_HI655X=y ··· 480 473 CONFIG_EXTCON_USB_GPIO=y 481 474 CONFIG_IIO=y 482 475 CONFIG_EXYNOS_ADC=y 476 + CONFIG_ROCKCHIP_SARADC=m 483 477 CONFIG_PWM=y 484 478 CONFIG_PWM_BCM2835=m 479 + CONFIG_PWM_CROS_EC=m 485 480 CONFIG_PWM_MESON=m 486 481 CONFIG_PWM_ROCKCHIP=y 487 482 CONFIG_PWM_SAMSUNG=y ··· 493 484 CONFIG_PHY_SUN4I_USB=y 494 485 CONFIG_PHY_ROCKCHIP_INNO_USB2=y 495 486 CONFIG_PHY_ROCKCHIP_EMMC=y 487 + CONFIG_PHY_ROCKCHIP_PCIE=m 496 488 CONFIG_PHY_XGENE=y 497 489 CONFIG_PHY_TEGRA_XUSB=y 498 490 CONFIG_ARM_SCPI_PROTOCOL=y
+3 -3
arch/arm64/include/asm/acpi.h
··· 23 23 #define ACPI_MADT_GICC_LENGTH \ 24 24 (acpi_gbl_FADT.header.revision < 6 ? 76 : 80) 25 25 26 - #define BAD_MADT_GICC_ENTRY(entry, end) \ 27 - (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \ 28 - (entry)->header.length != ACPI_MADT_GICC_LENGTH) 26 + #define BAD_MADT_GICC_ENTRY(entry, end) \ 27 + (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \ 28 + (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end)) 29 29 30 30 /* Basic configuration for ACPI */ 31 31 #ifdef CONFIG_ACPI
+4
arch/arm64/include/asm/sysreg.h
··· 286 286 #define SCTLR_ELx_A (1 << 1) 287 287 #define SCTLR_ELx_M 1 288 288 289 + #define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \ 290 + (1 << 16) | (1 << 18) | (1 << 22) | (1 << 23) | \ 291 + (1 << 28) | (1 << 29)) 292 + 289 293 #define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \ 290 294 SCTLR_ELx_SA | SCTLR_ELx_I) 291 295
+3 -1
arch/arm64/kernel/pci.c
··· 191 191 return NULL; 192 192 193 193 root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node); 194 - if (!root_ops) 194 + if (!root_ops) { 195 + kfree(ri); 195 196 return NULL; 197 + } 196 198 197 199 ri->cfg = pci_acpi_setup_ecam_mapping(root); 198 200 if (!ri->cfg) {
+3 -2
arch/arm64/kernel/vdso.c
··· 221 221 /* tkr_mono.cycle_last == tkr_raw.cycle_last */ 222 222 vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; 223 223 vdso_data->raw_time_sec = tk->raw_time.tv_sec; 224 - vdso_data->raw_time_nsec = tk->raw_time.tv_nsec; 224 + vdso_data->raw_time_nsec = (tk->raw_time.tv_nsec << 225 + tk->tkr_raw.shift) + 226 + tk->tkr_raw.xtime_nsec; 225 227 vdso_data->xtime_clock_sec = tk->xtime_sec; 226 228 vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; 227 - /* tkr_raw.xtime_nsec == 0 */ 228 229 vdso_data->cs_mono_mult = tk->tkr_mono.mult; 229 230 vdso_data->cs_raw_mult = tk->tkr_raw.mult; 230 231 /* tkr_mono.shift == tkr_raw.shift */
-1
arch/arm64/kernel/vdso/gettimeofday.S
··· 256 256 seqcnt_check fail=monotonic_raw 257 257 258 258 /* All computations are done with left-shifted nsecs. */ 259 - lsl x14, x14, x12 260 259 get_nsec_per_sec res=x9 261 260 lsl x9, x9, x12 262 261
+7 -4
arch/arm64/kvm/hyp-init.S
··· 106 106 tlbi alle2 107 107 dsb sy 108 108 109 - mrs x4, sctlr_el2 110 - and x4, x4, #SCTLR_ELx_EE // preserve endianness of EL2 111 - ldr x5, =SCTLR_ELx_FLAGS 112 - orr x4, x4, x5 109 + /* 110 + * Preserve all the RES1 bits while setting the default flags, 111 + * as well as the EE bit on BE. Drop the A flag since the compiler 112 + * is allowed to generate unaligned accesses. 113 + */ 114 + ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A)) 115 + CPU_BE( orr x4, x4, #SCTLR_ELx_EE) 113 116 msr sctlr_el2, x4 114 117 isb 115 118
+5 -5
arch/arm64/kvm/vgic-sys-reg-v3.c
··· 65 65 * Here set VMCR.CTLR in ICC_CTLR_EL1 layout. 66 66 * The vgic_set_vmcr() will convert to ICH_VMCR layout. 67 67 */ 68 - vmcr.ctlr = val & ICC_CTLR_EL1_CBPR_MASK; 69 - vmcr.ctlr |= val & ICC_CTLR_EL1_EOImode_MASK; 68 + vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT; 69 + vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT; 70 70 vgic_set_vmcr(vcpu, &vmcr); 71 71 } else { 72 72 val = 0; ··· 83 83 * The VMCR.CTLR value is in ICC_CTLR_EL1 layout. 84 84 * Extract it directly using ICC_CTLR_EL1 reg definitions. 85 85 */ 86 - val |= vmcr.ctlr & ICC_CTLR_EL1_CBPR_MASK; 87 - val |= vmcr.ctlr & ICC_CTLR_EL1_EOImode_MASK; 86 + val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK; 87 + val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK; 88 88 89 89 p->regval = val; 90 90 } ··· 135 135 p->regval = 0; 136 136 137 137 vgic_get_vmcr(vcpu, &vmcr); 138 - if (!((vmcr.ctlr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT)) { 138 + if (!vmcr.cbpr) { 139 139 if (p->is_write) { 140 140 vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >> 141 141 ICC_BPR1_EL1_SHIFT;
+5 -2
arch/arm64/net/bpf_jit_comp.c
··· 36 36 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) 37 37 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) 38 38 #define TCALL_CNT (MAX_BPF_JIT_REG + 2) 39 + #define TMP_REG_3 (MAX_BPF_JIT_REG + 3) 39 40 40 41 /* Map BPF registers to A64 registers */ 41 42 static const int bpf2a64[] = { ··· 58 57 /* temporary registers for internal BPF JIT */ 59 58 [TMP_REG_1] = A64_R(10), 60 59 [TMP_REG_2] = A64_R(11), 60 + [TMP_REG_3] = A64_R(12), 61 61 /* tail_call_cnt */ 62 62 [TCALL_CNT] = A64_R(26), 63 63 /* temporary register for blinding constants */ ··· 321 319 const u8 src = bpf2a64[insn->src_reg]; 322 320 const u8 tmp = bpf2a64[TMP_REG_1]; 323 321 const u8 tmp2 = bpf2a64[TMP_REG_2]; 322 + const u8 tmp3 = bpf2a64[TMP_REG_3]; 324 323 const s16 off = insn->off; 325 324 const s32 imm = insn->imm; 326 325 const int i = insn - ctx->prog->insnsi; ··· 692 689 emit(A64_PRFM(tmp, PST, L1, STRM), ctx); 693 690 emit(A64_LDXR(isdw, tmp2, tmp), ctx); 694 691 emit(A64_ADD(isdw, tmp2, tmp2, src), ctx); 695 - emit(A64_STXR(isdw, tmp2, tmp, tmp2), ctx); 692 + emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx); 696 693 jmp_offset = -3; 697 694 check_imm19(jmp_offset); 698 - emit(A64_CBNZ(0, tmp2, jmp_offset), ctx); 695 + emit(A64_CBNZ(0, tmp3, jmp_offset), ctx); 699 696 break; 700 697 701 698 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
-5
arch/blackfin/include/asm/processor.h
··· 75 75 { 76 76 } 77 77 78 - /* 79 - * Return saved PC of a blocked thread. 80 - */ 81 - #define thread_saved_pc(tsk) (tsk->thread.pc) 82 - 83 78 unsigned long get_wchan(struct task_struct *p); 84 79 85 80 #define KSTK_EIP(tsk) \
-5
arch/c6x/include/asm/processor.h
··· 96 96 #define release_segments(mm) do { } while (0) 97 97 98 98 /* 99 - * saved PC of a blocked thread. 100 - */ 101 - #define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc) 102 - 103 - /* 104 99 * saved kernel SP and DP of a blocked thread. 105 100 */ 106 101 #ifdef _BIG_ENDIAN
-8
arch/cris/arch-v10/kernel/process.c
··· 69 69 while(1) /* waiting for RETRIBUTION! */ ; 70 70 } 71 71 72 - /* 73 - * Return saved PC of a blocked thread. 74 - */ 75 - unsigned long thread_saved_pc(struct task_struct *t) 76 - { 77 - return task_pt_regs(t)->irp; 78 - } 79 - 80 72 /* setup the child's kernel stack with a pt_regs and switch_stack on it. 81 73 * it will be un-nested during _resume and _ret_from_sys_call when the 82 74 * new thread is scheduled.
-8
arch/cris/arch-v32/kernel/process.c
··· 85 85 } 86 86 87 87 /* 88 - * Return saved PC of a blocked thread. 89 - */ 90 - unsigned long thread_saved_pc(struct task_struct *t) 91 - { 92 - return task_pt_regs(t)->erp; 93 - } 94 - 95 - /* 96 88 * Setup the child's kernel stack with a pt_regs and call switch_stack() on it. 97 89 * It will be unnested during _resume and _ret_from_sys_call when the new thread 98 90 * is scheduled.
-2
arch/cris/include/asm/processor.h
··· 52 52 53 53 #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) 54 54 55 - extern unsigned long thread_saved_pc(struct task_struct *tsk); 56 - 57 55 /* Free all resources held by a thread. */ 58 56 static inline void release_thread(struct task_struct *dead_task) 59 57 {
-5
arch/frv/include/asm/processor.h
··· 96 96 #define release_segments(mm) do { } while (0) 97 97 #define forget_segments() do { } while (0) 98 98 99 - /* 100 - * Return saved PC of a blocked thread. 101 - */ 102 - extern unsigned long thread_saved_pc(struct task_struct *tsk); 103 - 104 99 unsigned long get_wchan(struct task_struct *p); 105 100 106 101 #define KSTK_EIP(tsk) ((tsk)->thread.frame0->pc)
+6
arch/frv/include/asm/timex.h
··· 16 16 #define vxtime_lock() do {} while (0) 17 17 #define vxtime_unlock() do {} while (0) 18 18 19 + /* This attribute is used in include/linux/jiffies.h alongside with 20 + * __cacheline_aligned_in_smp. It is assumed that __cacheline_aligned_in_smp 21 + * for frv does not contain another section specification. 22 + */ 23 + #define __jiffy_arch_data __attribute__((__section__(".data"))) 24 + 19 25 #endif 20 26
-9
arch/frv/kernel/process.c
··· 198 198 return 0; 199 199 } 200 200 201 - unsigned long thread_saved_pc(struct task_struct *tsk) 202 - { 203 - /* Check whether the thread is blocked in resume() */ 204 - if (in_sched_functions(tsk->thread.pc)) 205 - return ((unsigned long *)tsk->thread.fp)[2]; 206 - else 207 - return tsk->thread.pc; 208 - } 209 - 210 201 int elf_check_arch(const struct elf32_hdr *hdr) 211 202 { 212 203 unsigned long hsr0 = __get_HSR(0);
+1 -1
arch/frv/mm/elf-fdpic.c
··· 75 75 addr = PAGE_ALIGN(addr); 76 76 vma = find_vma(current->mm, addr); 77 77 if (TASK_SIZE - len >= addr && 78 - (!vma || addr + len <= vma->vm_start)) 78 + (!vma || addr + len <= vm_start_gap(vma))) 79 79 goto success; 80 80 } 81 81
-4
arch/h8300/include/asm/processor.h
··· 110 110 { 111 111 } 112 112 113 - /* 114 - * Return saved PC of a blocked thread. 115 - */ 116 - unsigned long thread_saved_pc(struct task_struct *tsk); 117 113 unsigned long get_wchan(struct task_struct *p); 118 114 119 115 #define KSTK_EIP(tsk) \
-5
arch/h8300/kernel/process.c
··· 129 129 return 0; 130 130 } 131 131 132 - unsigned long thread_saved_pc(struct task_struct *tsk) 133 - { 134 - return ((struct pt_regs *)tsk->thread.esp0)->pc; 135 - } 136 - 137 132 unsigned long get_wchan(struct task_struct *p) 138 133 { 139 134 unsigned long fp, pc;
-3
arch/hexagon/include/asm/processor.h
··· 33 33 /* task_struct, defined elsewhere, is the "process descriptor" */ 34 34 struct task_struct; 35 35 36 - /* this is defined in arch/process.c */ 37 - extern unsigned long thread_saved_pc(struct task_struct *tsk); 38 - 39 36 extern void start_thread(struct pt_regs *, unsigned long, unsigned long); 40 37 41 38 /*
-8
arch/hexagon/kernel/process.c
··· 61 61 } 62 62 63 63 /* 64 - * Return saved PC of a blocked thread 65 - */ 66 - unsigned long thread_saved_pc(struct task_struct *tsk) 67 - { 68 - return 0; 69 - } 70 - 71 - /* 72 64 * Copy architecture-specific thread state 73 65 */ 74 66 int copy_thread(unsigned long clone_flags, unsigned long usp,
+2 -3
arch/hexagon/mm/uaccess.c
··· 37 37 long uncleared; 38 38 39 39 while (count > PAGE_SIZE) { 40 - uncleared = __copy_to_user_hexagon(dest, &empty_zero_page, 41 - PAGE_SIZE); 40 + uncleared = raw_copy_to_user(dest, &empty_zero_page, PAGE_SIZE); 42 41 if (uncleared) 43 42 return count - (PAGE_SIZE - uncleared); 44 43 count -= PAGE_SIZE; 45 44 dest += PAGE_SIZE; 46 45 } 47 46 if (count) 48 - count = __copy_to_user_hexagon(dest, &empty_zero_page, count); 47 + count = raw_copy_to_user(dest, &empty_zero_page, count); 49 48 50 49 return count; 51 50 }
-17
arch/ia64/include/asm/processor.h
··· 602 602 } 603 603 604 604 /* 605 - * Return saved PC of a blocked thread. 606 - * Note that the only way T can block is through a call to schedule() -> switch_to(). 607 - */ 608 - static inline unsigned long 609 - thread_saved_pc (struct task_struct *t) 610 - { 611 - struct unw_frame_info info; 612 - unsigned long ip; 613 - 614 - unw_init_from_blocked_task(&info, t); 615 - if (unw_unwind(&info) < 0) 616 - return 0; 617 - unw_get_ip(&info, &ip); 618 - return ip; 619 - } 620 - 621 - /* 622 605 * Get the current instruction/program counter value. 623 606 */ 624 607 #define current_text_addr() \
-2
arch/m32r/include/asm/processor.h
··· 122 122 extern void copy_segments(struct task_struct *p, struct mm_struct * mm); 123 123 extern void release_segments(struct mm_struct * mm); 124 124 125 - extern unsigned long thread_saved_pc(struct task_struct *); 126 - 127 125 /* Copy and release all segment info associated with a VM */ 128 126 #define copy_segments(p, mm) do { } while (0) 129 127 #define release_segments(mm) do { } while (0)
-8
arch/m32r/kernel/process.c
··· 39 39 40 40 #include <linux/err.h> 41 41 42 - /* 43 - * Return saved PC of a blocked thread. 44 - */ 45 - unsigned long thread_saved_pc(struct task_struct *tsk) 46 - { 47 - return tsk->thread.lr; 48 - } 49 - 50 42 void (*pm_power_off)(void) = NULL; 51 43 EXPORT_SYMBOL(pm_power_off); 52 44
-2
arch/m68k/include/asm/processor.h
··· 130 130 { 131 131 } 132 132 133 - extern unsigned long thread_saved_pc(struct task_struct *tsk); 134 - 135 133 unsigned long get_wchan(struct task_struct *p); 136 134 137 135 #define KSTK_EIP(tsk) \
-14
arch/m68k/kernel/process.c
··· 40 40 asmlinkage void ret_from_fork(void); 41 41 asmlinkage void ret_from_kernel_thread(void); 42 42 43 - 44 - /* 45 - * Return saved PC from a blocked thread 46 - */ 47 - unsigned long thread_saved_pc(struct task_struct *tsk) 48 - { 49 - struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp; 50 - /* Check whether the thread is blocked in resume() */ 51 - if (in_sched_functions(sw->retpc)) 52 - return ((unsigned long *)sw->a6)[1]; 53 - else 54 - return sw->retpc; 55 - } 56 - 57 43 void arch_cpu_idle(void) 58 44 { 59 45 #if defined(MACH_ATARI_ONLY)
-6
arch/microblaze/include/asm/processor.h
··· 69 69 { 70 70 } 71 71 72 - extern unsigned long thread_saved_pc(struct task_struct *t); 73 - 74 72 extern unsigned long get_wchan(struct task_struct *p); 75 73 76 74 # define KSTK_EIP(tsk) (0) ··· 118 120 static inline void release_thread(struct task_struct *dead_task) 119 121 { 120 122 } 121 - 122 - /* Return saved (kernel) PC of a blocked thread. */ 123 - # define thread_saved_pc(tsk) \ 124 - ((tsk)->thread.regs ? (tsk)->thread.regs->r15 : 0) 125 123 126 124 unsigned long get_wchan(struct task_struct *p); 127 125
-17
arch/microblaze/kernel/process.c
··· 119 119 return 0; 120 120 } 121 121 122 - #ifndef CONFIG_MMU 123 - /* 124 - * Return saved PC of a blocked thread. 125 - */ 126 - unsigned long thread_saved_pc(struct task_struct *tsk) 127 - { 128 - struct cpu_context *ctx = 129 - &(((struct thread_info *)(tsk->stack))->cpu_context); 130 - 131 - /* Check whether the thread is blocked in resume() */ 132 - if (in_sched_functions(ctx->r15)) 133 - return (unsigned long)ctx->r15; 134 - else 135 - return ctx->r14; 136 - } 137 - #endif 138 - 139 122 unsigned long get_wchan(struct task_struct *p) 140 123 { 141 124 /* TBD (used by procfs) */
+5 -5
arch/mips/boot/Makefile
··· 128 128 -DADDR_BITS=$(ADDR_BITS) \ 129 129 -DADDR_CELLS=$(itb_addr_cells) 130 130 131 - $(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE 131 + $(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE 132 132 $(call if_changed_dep,cpp_its_S,none,vmlinux.bin) 133 133 134 - $(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE 134 + $(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE 135 135 $(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz) 136 136 137 - $(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE 137 + $(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE 138 138 $(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2) 139 139 140 - $(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE 140 + $(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE 141 141 $(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma) 142 142 143 - $(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE 143 + $(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE 144 144 $(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo) 145 145 146 146 quiet_cmd_itb-image = ITB $@
+5
arch/mips/include/asm/highmem.h
··· 35 35 * easily, subsequent pte tables have to be allocated in one physical 36 36 * chunk of RAM. 37 37 */ 38 + #ifdef CONFIG_PHYS_ADDR_T_64BIT 39 + #define LAST_PKMAP 512 40 + #else 38 41 #define LAST_PKMAP 1024 42 + #endif 43 + 39 44 #define LAST_PKMAP_MASK (LAST_PKMAP-1) 40 45 #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) 41 46 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+2 -1
arch/mips/include/asm/kprobes.h
··· 43 43 44 44 #define flush_insn_slot(p) \ 45 45 do { \ 46 - flush_icache_range((unsigned long)p->addr, \ 46 + if (p->addr) \ 47 + flush_icache_range((unsigned long)p->addr, \ 47 48 (unsigned long)p->addr + \ 48 49 (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \ 49 50 } while (0)
+6 -1
arch/mips/include/asm/pgtable-32.h
··· 19 19 #define __ARCH_USE_5LEVEL_HACK 20 20 #include <asm-generic/pgtable-nopmd.h> 21 21 22 + #ifdef CONFIG_HIGHMEM 23 + #include <asm/highmem.h> 24 + #endif 25 + 22 26 extern int temp_tlb_entry; 23 27 24 28 /* ··· 66 62 67 63 #define VMALLOC_START MAP_BASE 68 64 69 - #define PKMAP_BASE (0xfe000000UL) 65 + #define PKMAP_END ((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1)) 66 + #define PKMAP_BASE (PKMAP_END - PAGE_SIZE * LAST_PKMAP) 70 67 71 68 #ifdef CONFIG_HIGHMEM 72 69 # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
+3 -1
arch/mips/kernel/branch.c
··· 804 804 break; 805 805 } 806 806 /* Compact branch: BNEZC || JIALC */ 807 - if (insn.i_format.rs) 807 + if (!insn.i_format.rs) { 808 + /* JIALC: set $31/ra */ 808 809 regs->regs[31] = epc + 4; 810 + } 809 811 regs->cp0_epc += 8; 810 812 break; 811 813 #endif
+3
arch/mips/kernel/entry.S
··· 11 11 #include <asm/asm.h> 12 12 #include <asm/asmmacro.h> 13 13 #include <asm/compiler.h> 14 + #include <asm/irqflags.h> 14 15 #include <asm/regdef.h> 15 16 #include <asm/mipsregs.h> 16 17 #include <asm/stackframe.h> ··· 120 119 andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS 121 120 beqz t0, work_notifysig 122 121 work_resched: 122 + TRACE_IRQS_OFF 123 123 jal schedule 124 124 125 125 local_irq_disable # make sure need_resched and ··· 157 155 beqz t0, work_pending # trace bit set? 158 156 local_irq_enable # could let syscall_trace_leave() 159 157 # call schedule() instead 158 + TRACE_IRQS_ON 160 159 move a0, sp 161 160 jal syscall_trace_leave 162 161 b resume_userspace
+5 -19
arch/mips/kernel/ftrace.c
··· 38 38 39 39 #endif 40 40 41 - /* 42 - * Check if the address is in kernel space 43 - * 44 - * Clone core_kernel_text() from kernel/extable.c, but doesn't call 45 - * init_kernel_text() for Ftrace doesn't trace functions in init sections. 46 - */ 47 - static inline int in_kernel_space(unsigned long ip) 48 - { 49 - if (ip >= (unsigned long)_stext && 50 - ip <= (unsigned long)_etext) 51 - return 1; 52 - return 0; 53 - } 54 - 55 41 #ifdef CONFIG_DYNAMIC_FTRACE 56 42 57 43 #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ ··· 184 198 * If ip is in kernel space, no long call, otherwise, long call is 185 199 * needed. 186 200 */ 187 - new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; 201 + new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F; 188 202 #ifdef CONFIG_64BIT 189 203 return ftrace_modify_code(ip, new); 190 204 #else ··· 204 218 unsigned int new; 205 219 unsigned long ip = rec->ip; 206 220 207 - new = in_kernel_space(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0]; 221 + new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0]; 208 222 209 223 #ifdef CONFIG_64BIT 210 224 return ftrace_modify_code(ip, new); 211 225 #else 212 - return ftrace_modify_code_2r(ip, new, in_kernel_space(ip) ? 226 + return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ? 213 227 INSN_NOP : insn_la_mcount[1]); 214 228 #endif 215 229 } ··· 275 289 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for 276 290 * kernel, move after the instruction "move ra, at"(offset is 16) 277 291 */ 278 - ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24); 292 + ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24); 279 293 280 294 /* 281 295 * search the text until finding the non-store instruction or "s{d,w} ··· 380 394 * entries configured through the tracing/set_graph_function interface. 381 395 */ 382 396 383 - insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; 397 + insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; 384 398 trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); 385 399 386 400 /* Only trace if the calling function expects to */
+1 -1
arch/mips/kernel/head.S
··· 106 106 beq t0, t1, dtb_found 107 107 #endif 108 108 li t1, -2 109 - beq a0, t1, dtb_found 110 109 move t2, a1 110 + beq a0, t1, dtb_found 111 111 112 112 li t2, 0 113 113 dtb_found:
+5 -1
arch/mips/kernel/perf_event_mipsxx.c
··· 1597 1597 break; 1598 1598 case CPU_P5600: 1599 1599 case CPU_P6600: 1600 - case CPU_I6400: 1601 1600 /* 8-bit event numbers */ 1602 1601 raw_id = config & 0x1ff; 1603 1602 base_id = raw_id & 0xff; ··· 1608 1609 #ifdef CONFIG_MIPS_MT_SMP 1609 1610 raw_event.range = P; 1610 1611 #endif 1612 + break; 1613 + case CPU_I6400: 1614 + /* 8-bit event numbers */ 1615 + base_id = config & 0xff; 1616 + raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; 1611 1617 break; 1612 1618 case CPU_1004K: 1613 1619 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
+1 -8
arch/mips/kernel/pm-cps.c
··· 56 56 * state. Actually per-core rather than per-CPU. 57 57 */ 58 58 static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); 59 - static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc); 60 59 61 60 /* Indicates online CPUs coupled with the current CPU */ 62 61 static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); ··· 641 642 { 642 643 enum cps_pm_state state; 643 644 unsigned core = cpu_data[cpu].core; 644 - unsigned dlinesz = cpu_data[cpu].dcache.linesz; 645 645 void *entry_fn, *core_rc; 646 646 647 647 for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { ··· 660 662 } 661 663 662 664 if (!per_cpu(ready_count, core)) { 663 - core_rc = kmalloc(dlinesz * 2, GFP_KERNEL); 665 + core_rc = kmalloc(sizeof(u32), GFP_KERNEL); 664 666 if (!core_rc) { 665 667 pr_err("Failed allocate core %u ready_count\n", core); 666 668 return -ENOMEM; 667 669 } 668 - per_cpu(ready_count_alloc, core) = core_rc; 669 - 670 - /* Ensure ready_count is aligned to a cacheline boundary */ 671 - core_rc += dlinesz - 1; 672 - core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1)); 673 670 per_cpu(ready_count, core) = core_rc; 674 671 } 675 672
-1
arch/mips/kernel/process.c
··· 120 120 struct thread_info *ti = task_thread_info(p); 121 121 struct pt_regs *childregs, *regs = current_pt_regs(); 122 122 unsigned long childksp; 123 - p->set_child_tid = p->clear_child_tid = NULL; 124 123 125 124 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; 126 125
+2
arch/mips/kernel/traps.c
··· 201 201 { 202 202 struct pt_regs regs; 203 203 mm_segment_t old_fs = get_fs(); 204 + 205 + regs.cp0_status = KSU_KERNEL; 204 206 if (sp) { 205 207 regs.regs[29] = (unsigned long)sp; 206 208 regs.regs[31] = 0;
+5 -1
arch/mips/kvm/tlb.c
··· 166 166 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va, 167 167 bool user, bool kernel) 168 168 { 169 - int idx_user, idx_kernel; 169 + /* 170 + * Initialize idx_user and idx_kernel to workaround bogus 171 + * maybe-initialized warning when using GCC 6. 172 + */ 173 + int idx_user = 0, idx_kernel = 0; 170 174 unsigned long flags, old_entryhi; 171 175 172 176 local_irq_save(flags);
+4 -1
arch/mips/math-emu/dp_maddf.c
··· 54 54 return ieee754dp_nanxcpt(z); 55 55 case IEEE754_CLASS_DNORM: 56 56 DPDNORMZ; 57 - /* QNAN is handled separately below */ 57 + /* QNAN and ZERO cases are handled separately below */ 58 58 } 59 59 60 60 switch (CLPAIR(xc, yc)) { ··· 209 209 ((rm << (DP_FBITS + 1 + 3 + 1)) != 0); 210 210 } 211 211 assert(rm & (DP_HIDDEN_BIT << 3)); 212 + 213 + if (zc == IEEE754_CLASS_ZERO) 214 + return ieee754dp_format(rs, re, rm); 212 215 213 216 /* And now the addition */ 214 217 assert(zm & DP_HIDDEN_BIT);
+4 -1
arch/mips/math-emu/sp_maddf.c
··· 54 54 return ieee754sp_nanxcpt(z); 55 55 case IEEE754_CLASS_DNORM: 56 56 SPDNORMZ; 57 - /* QNAN is handled separately below */ 57 + /* QNAN and ZERO cases are handled separately below */ 58 58 } 59 59 60 60 switch (CLPAIR(xc, yc)) { ··· 202 202 ((rm << (SP_FBITS + 1 + 3 + 1)) != 0); 203 203 } 204 204 assert(rm & (SP_HIDDEN_BIT << 3)); 205 + 206 + if (zc == IEEE754_CLASS_ZERO) 207 + return ieee754sp_format(rs, re, rm); 205 208 206 209 /* And now the addition */ 207 210
+18 -5
arch/mips/mm/dma-default.c
··· 68 68 * systems and only the R10000 and R12000 are used in such systems, the 69 69 * SGI IP28 Indigo² rsp. SGI IP32 aka O2. 70 70 */ 71 - static inline int cpu_needs_post_dma_flush(struct device *dev) 71 + static inline bool cpu_needs_post_dma_flush(struct device *dev) 72 72 { 73 - return !plat_device_is_coherent(dev) && 74 - (boot_cpu_type() == CPU_R10000 || 75 - boot_cpu_type() == CPU_R12000 || 76 - boot_cpu_type() == CPU_BMIPS5000); 73 + if (plat_device_is_coherent(dev)) 74 + return false; 75 + 76 + switch (boot_cpu_type()) { 77 + case CPU_R10000: 78 + case CPU_R12000: 79 + case CPU_BMIPS5000: 80 + return true; 81 + 82 + default: 83 + /* 84 + * Presence of MAARs suggests that the CPU supports 85 + * speculatively prefetching data, and therefore requires 86 + * the post-DMA flush/invalidate. 87 + */ 88 + return cpu_has_maar; 89 + } 77 90 } 78 91 79 92 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
+1 -1
arch/mips/mm/mmap.c
··· 93 93 94 94 vma = find_vma(mm, addr); 95 95 if (TASK_SIZE - len >= addr && 96 - (!vma || addr + len <= vma->vm_start)) 96 + (!vma || addr + len <= vm_start_gap(vma))) 97 97 return addr; 98 98 } 99 99
+3 -3
arch/mips/mm/pgtable-32.c
··· 51 51 /* 52 52 * Fixed mappings: 53 53 */ 54 - vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 55 - fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base); 54 + vaddr = __fix_to_virt(__end_of_fixed_addresses - 1); 55 + fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base); 56 56 57 57 #ifdef CONFIG_HIGHMEM 58 58 /* 59 59 * Permanent kmaps: 60 60 */ 61 61 vaddr = PKMAP_BASE; 62 - fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); 62 + fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); 63 63 64 64 pgd = swapper_pg_dir + __pgd_offset(vaddr); 65 65 pud = pud_offset(pgd, vaddr);
-5
arch/mn10300/include/asm/processor.h
··· 132 132 /* Free all resources held by a thread. */ 133 133 extern void release_thread(struct task_struct *); 134 134 135 - /* 136 - * Return saved PC of a blocked thread. 137 - */ 138 - extern unsigned long thread_saved_pc(struct task_struct *tsk); 139 - 140 135 unsigned long get_wchan(struct task_struct *p); 141 136 142 137 #define task_pt_regs(task) ((task)->thread.uregs)
-8
arch/mn10300/kernel/process.c
··· 40 40 #include "internal.h" 41 41 42 42 /* 43 - * return saved PC of a blocked thread. 44 - */ 45 - unsigned long thread_saved_pc(struct task_struct *tsk) 46 - { 47 - return ((unsigned long *) tsk->thread.sp)[3]; 48 - } 49 - 50 - /* 51 43 * power off function, if any 52 44 */ 53 45 void (*pm_power_off)(void);
-3
arch/nios2/include/asm/processor.h
··· 75 75 { 76 76 } 77 77 78 - /* Return saved PC of a blocked thread. */ 79 - #define thread_saved_pc(tsk) ((tsk)->thread.kregs->ea) 80 - 81 78 extern unsigned long get_wchan(struct task_struct *p); 82 79 83 80 #define task_pt_regs(p) \
-5
arch/openrisc/include/asm/processor.h
··· 84 84 void release_thread(struct task_struct *); 85 85 unsigned long get_wchan(struct task_struct *p); 86 86 87 - /* 88 - * Return saved PC of a blocked thread. For now, this is the "user" PC 89 - */ 90 - extern unsigned long thread_saved_pc(struct task_struct *t); 91 - 92 87 #define init_stack (init_thread_union.stack) 93 88 94 89 #define cpu_relax() barrier()
-7
arch/openrisc/kernel/process.c
··· 110 110 show_registers(regs); 111 111 } 112 112 113 - unsigned long thread_saved_pc(struct task_struct *t) 114 - { 115 - return (unsigned long)user_regs(t->stack)->pc; 116 - } 117 - 118 113 void release_thread(struct task_struct *dead_task) 119 114 { 120 115 } ··· 161 166 unsigned long top_of_kernel_stack; 162 167 163 168 top_of_kernel_stack = sp; 164 - 165 - p->set_child_tid = p->clear_child_tid = NULL; 166 169 167 170 /* Locate userspace context on stack... */ 168 171 sp -= STACK_FRAME_OVERHEAD; /* redzone */
-5
arch/parisc/include/asm/processor.h
··· 163 163 .flags = 0 \ 164 164 } 165 165 166 - /* 167 - * Return saved PC of a blocked thread. This is used by ps mostly. 168 - */ 169 - 170 166 struct task_struct; 171 - unsigned long thread_saved_pc(struct task_struct *t); 172 167 void show_trace(struct task_struct *task, unsigned long *stack); 173 168 174 169 /*
-5
arch/parisc/kernel/process.c
··· 239 239 return 0; 240 240 } 241 241 242 - unsigned long thread_saved_pc(struct task_struct *t) 243 - { 244 - return t->thread.regs.kpc; 245 - } 246 - 247 242 unsigned long 248 243 get_wchan(struct task_struct *p) 249 244 {
+9 -6
arch/parisc/kernel/sys_parisc.c
··· 90 90 unsigned long len, unsigned long pgoff, unsigned long flags) 91 91 { 92 92 struct mm_struct *mm = current->mm; 93 - struct vm_area_struct *vma; 93 + struct vm_area_struct *vma, *prev; 94 94 unsigned long task_size = TASK_SIZE; 95 95 int do_color_align, last_mmap; 96 96 struct vm_unmapped_area_info info; ··· 117 117 else 118 118 addr = PAGE_ALIGN(addr); 119 119 120 - vma = find_vma(mm, addr); 120 + vma = find_vma_prev(mm, addr, &prev); 121 121 if (task_size - len >= addr && 122 - (!vma || addr + len <= vma->vm_start)) 122 + (!vma || addr + len <= vm_start_gap(vma)) && 123 + (!prev || addr >= vm_end_gap(prev))) 123 124 goto found_addr; 124 125 } 125 126 ··· 144 143 const unsigned long len, const unsigned long pgoff, 145 144 const unsigned long flags) 146 145 { 147 - struct vm_area_struct *vma; 146 + struct vm_area_struct *vma, *prev; 148 147 struct mm_struct *mm = current->mm; 149 148 unsigned long addr = addr0; 150 149 int do_color_align, last_mmap; ··· 178 177 addr = COLOR_ALIGN(addr, last_mmap, pgoff); 179 178 else 180 179 addr = PAGE_ALIGN(addr); 181 - vma = find_vma(mm, addr); 180 + 181 + vma = find_vma_prev(mm, addr, &prev); 182 182 if (TASK_SIZE - len >= addr && 183 - (!vma || addr + len <= vma->vm_start)) 183 + (!vma || addr + len <= vm_start_gap(vma)) && 184 + (!prev || addr >= vm_end_gap(prev))) 184 185 goto found_addr; 185 186 } 186 187
-21
arch/powerpc/Kconfig
··· 380 380 381 381 menu "Kernel options" 382 382 383 - config PPC_DT_CPU_FTRS 384 - bool "Device-tree based CPU feature discovery & setup" 385 - depends on PPC_BOOK3S_64 386 - default n 387 - help 388 - This enables code to use a new device tree binding for describing CPU 389 - compatibility and features. Saying Y here will attempt to use the new 390 - binding if the firmware provides it. Currently only the skiboot 391 - firmware provides this binding. 392 - If you're not sure say Y. 393 - 394 - config PPC_CPUFEATURES_ENABLE_UNKNOWN 395 - bool "cpufeatures pass through unknown features to guest/userspace" 396 - depends on PPC_DT_CPU_FTRS 397 - default y 398 - 399 383 config HIGHMEM 400 384 bool "High memory support" 401 385 depends on PPC32 ··· 1198 1214 source "arch/powerpc/Kconfig.debug" 1199 1215 1200 1216 source "security/Kconfig" 1201 - 1202 - config KEYS_COMPAT 1203 - bool 1204 - depends on COMPAT && KEYS 1205 - default y 1206 1217 1207 1218 source "crypto/Kconfig" 1208 1219
+1 -1
arch/powerpc/include/asm/book3s/64/hash-4k.h
··· 8 8 #define H_PTE_INDEX_SIZE 9 9 9 #define H_PMD_INDEX_SIZE 7 10 10 #define H_PUD_INDEX_SIZE 9 11 - #define H_PGD_INDEX_SIZE 12 11 + #define H_PGD_INDEX_SIZE 9 12 12 13 13 #ifndef __ASSEMBLY__ 14 14 #define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)
+1 -1
arch/powerpc/include/asm/bug.h
··· 104 104 "1: "PPC_TLNEI" %4,0\n" \ 105 105 _EMIT_BUG_ENTRY \ 106 106 : : "i" (__FILE__), "i" (__LINE__), \ 107 - "i" (BUGFLAG_TAINT(TAINT_WARN)), \ 107 + "i" (BUGFLAG_WARNING|BUGFLAG_TAINT(TAINT_WARN)),\ 108 108 "i" (sizeof(struct bug_entry)), \ 109 109 "r" (__ret_warn_on)); \ 110 110 } \
+1 -2
arch/powerpc/include/asm/cputable.h
··· 214 214 #define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000) 215 215 #define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000) 216 216 #define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000) 217 - #define CPU_FTR_SUBCORE LONG_ASM_CONST(0x2000000000000000) 218 217 #define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000) 219 218 220 219 #ifndef __ASSEMBLY__ ··· 462 463 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ 463 464 CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ 464 465 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ 465 - CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_SUBCORE) 466 + CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP) 466 467 #define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG) 467 468 #define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL) 468 469 #define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
+1
arch/powerpc/include/asm/kprobes.h
··· 103 103 extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); 104 104 extern int kprobe_handler(struct pt_regs *regs); 105 105 extern int kprobe_post_handler(struct pt_regs *regs); 106 + extern int is_current_kprobe_addr(unsigned long addr); 106 107 #ifdef CONFIG_KPROBES_ON_FTRACE 107 108 extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs, 108 109 struct kprobe_ctlblk *kcb);
+12 -19
arch/powerpc/include/asm/processor.h
··· 110 110 #define TASK_SIZE_128TB (0x0000800000000000UL) 111 111 #define TASK_SIZE_512TB (0x0002000000000000UL) 112 112 113 - #ifdef CONFIG_PPC_BOOK3S_64 113 + /* 114 + * For now 512TB is only supported with book3s and 64K linux page size. 115 + */ 116 + #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES) 114 117 /* 115 118 * Max value currently used: 116 119 */ 117 - #define TASK_SIZE_USER64 TASK_SIZE_512TB 120 + #define TASK_SIZE_USER64 TASK_SIZE_512TB 121 + #define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_128TB 118 122 #else 119 - #define TASK_SIZE_USER64 TASK_SIZE_64TB 123 + #define TASK_SIZE_USER64 TASK_SIZE_64TB 124 + #define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_64TB 120 125 #endif 121 126 122 127 /* ··· 137 132 * space during mmap's. 138 133 */ 139 134 #define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4)) 140 - #define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_128TB / 4)) 135 + #define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4)) 141 136 142 137 #define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \ 143 138 TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 ) ··· 148 143 * with 128TB and conditionally enable upto 512TB 149 144 */ 150 145 #ifdef CONFIG_PPC_BOOK3S_64 151 - #define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \ 152 - TASK_SIZE_USER32 : TASK_SIZE_128TB) 146 + #define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \ 147 + TASK_SIZE_USER32 : DEFAULT_MAP_WINDOW_USER64) 153 148 #else 154 149 #define DEFAULT_MAP_WINDOW TASK_SIZE 155 150 #endif 156 151 157 152 #ifdef __powerpc64__ 158 153 159 - #ifdef CONFIG_PPC_BOOK3S_64 160 - /* Limit stack to 128TB */ 161 - #define STACK_TOP_USER64 TASK_SIZE_128TB 162 - #else 163 - #define STACK_TOP_USER64 TASK_SIZE_USER64 164 - #endif 165 - 154 + #define STACK_TOP_USER64 DEFAULT_MAP_WINDOW_USER64 166 155 #define STACK_TOP_USER32 TASK_SIZE_USER32 167 156 168 157 #define STACK_TOP (is_32bit_task() ? \ ··· 377 378 .fscr = FSCR_TAR | FSCR_EBB \ 378 379 } 379 380 #endif 380 - 381 - /* 382 - * Return saved PC of a blocked thread. For now, this is the "user" PC 383 - */ 384 - #define thread_saved_pc(tsk) \ 385 - ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0) 386 381 387 382 #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.regs) 388 383
+14
arch/powerpc/include/asm/topology.h
··· 44 44 extern int sysfs_add_device_to_node(struct device *dev, int nid); 45 45 extern void sysfs_remove_device_from_node(struct device *dev, int nid); 46 46 47 + static inline int early_cpu_to_node(int cpu) 48 + { 49 + int nid; 50 + 51 + nid = numa_cpu_lookup_table[cpu]; 52 + 53 + /* 54 + * Fall back to node 0 if nid is unset (it should be, except bugs). 55 + * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)). 56 + */ 57 + return (nid < 0) ? 0 : nid; 58 + } 47 59 #else 60 + 61 + static inline int early_cpu_to_node(int cpu) { return 0; } 48 62 49 63 static inline void dump_numa_cpu_topology(void) {} 50 64
+1 -7
arch/powerpc/include/asm/uaccess.h
··· 267 267 extern unsigned long __copy_tofrom_user(void __user *to, 268 268 const void __user *from, unsigned long size); 269 269 270 - #ifndef __powerpc64__ 271 - 272 - #define INLINE_COPY_FROM_USER 273 - #define INLINE_COPY_TO_USER 274 - 275 - #else /* __powerpc64__ */ 276 - 270 + #ifdef __powerpc64__ 277 271 static inline unsigned long 278 272 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) 279 273 {
+7 -5
arch/powerpc/include/asm/xive.h
··· 94 94 * store at 0 and some ESBs support doing a trigger via a 95 95 * separate trigger page. 96 96 */ 97 - #define XIVE_ESB_GET 0x800 98 - #define XIVE_ESB_SET_PQ_00 0xc00 99 - #define XIVE_ESB_SET_PQ_01 0xd00 100 - #define XIVE_ESB_SET_PQ_10 0xe00 101 - #define XIVE_ESB_SET_PQ_11 0xf00 97 + #define XIVE_ESB_STORE_EOI 0x400 /* Store */ 98 + #define XIVE_ESB_LOAD_EOI 0x000 /* Load */ 99 + #define XIVE_ESB_GET 0x800 /* Load */ 100 + #define XIVE_ESB_SET_PQ_00 0xc00 /* Load */ 101 + #define XIVE_ESB_SET_PQ_01 0xd00 /* Load */ 102 + #define XIVE_ESB_SET_PQ_10 0xe00 /* Load */ 103 + #define XIVE_ESB_SET_PQ_11 0xf00 /* Load */ 102 104 103 105 #define XIVE_ESB_VAL_P 0x2 104 106 #define XIVE_ESB_VAL_Q 0x1
+48 -10
arch/powerpc/kernel/dt_cpu_ftrs.c
··· 8 8 #include <linux/export.h> 9 9 #include <linux/init.h> 10 10 #include <linux/jump_label.h> 11 + #include <linux/libfdt.h> 11 12 #include <linux/memblock.h> 12 13 #include <linux/printk.h> 13 14 #include <linux/sched.h> ··· 643 642 {"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL}, 644 643 {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL}, 645 644 {"processor-utilization-of-resources-register", feat_enable_purr, 0}, 646 - {"subcore", feat_enable, CPU_FTR_SUBCORE}, 647 645 {"no-execute", feat_enable, 0}, 648 646 {"strong-access-ordering", feat_enable, CPU_FTR_SAO}, 649 647 {"cache-inhibited-large-page", feat_enable_large_ci, 0}, ··· 671 671 {"wait-v3", feat_enable, 0}, 672 672 }; 673 673 674 - /* XXX: how to configure this? Default + boot time? */ 675 - #ifdef CONFIG_PPC_CPUFEATURES_ENABLE_UNKNOWN 676 - #define CPU_FEATURE_ENABLE_UNKNOWN 1 677 - #else 678 - #define CPU_FEATURE_ENABLE_UNKNOWN 0 679 - #endif 674 + static bool __initdata using_dt_cpu_ftrs; 675 + static bool __initdata enable_unknown = true; 676 + 677 + static int __init dt_cpu_ftrs_parse(char *str) 678 + { 679 + if (!str) 680 + return 0; 681 + 682 + if (!strcmp(str, "off")) 683 + using_dt_cpu_ftrs = false; 684 + else if (!strcmp(str, "known")) 685 + enable_unknown = false; 686 + else 687 + return 1; 688 + 689 + return 0; 690 + } 691 + early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse); 680 692 681 693 static void __init cpufeatures_setup_start(u32 isa) 682 694 { ··· 719 707 } 720 708 } 721 709 722 - if (!known && CPU_FEATURE_ENABLE_UNKNOWN) { 710 + if (!known && enable_unknown) { 723 711 if (!feat_try_enable_unknown(f)) { 724 712 pr_info("not enabling: %s (unknown and unsupported by kernel)\n", 725 713 f->name); ··· 768 756 cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features); 769 757 } 770 758 759 + static int __init disabled_on_cmdline(void) 760 + { 761 + unsigned long root, chosen; 762 + const char *p; 763 + 764 + root = of_get_flat_dt_root(); 765 + chosen = of_get_flat_dt_subnode_by_name(root, "chosen"); 766 + if (chosen == -FDT_ERR_NOTFOUND) 767 + return false; 768 + 769 + p = of_get_flat_dt_prop(chosen, "bootargs", NULL); 770 + if (!p) 771 + return false; 772 + 773 + if (strstr(p, "dt_cpu_ftrs=off")) 774 + return true; 775 + 776 + return false; 777 + } 778 + 771 779 static int __init fdt_find_cpu_features(unsigned long node, const char *uname, 772 780 int depth, void *data) 773 781 { ··· 798 766 return 0; 799 767 } 800 768 801 - static bool __initdata using_dt_cpu_ftrs = false; 802 - 803 769 bool __init dt_cpu_ftrs_in_use(void) 804 770 { 805 771 return using_dt_cpu_ftrs; ··· 805 775 806 776 bool __init dt_cpu_ftrs_init(void *fdt) 807 777 { 778 + using_dt_cpu_ftrs = false; 779 + 808 780 /* Setup and verify the FDT, if it fails we just bail */ 809 781 if (!early_init_dt_verify(fdt)) 810 782 return false; 811 783 812 784 if (!of_scan_flat_dt(fdt_find_cpu_features, NULL)) 785 + return false; 786 + 787 + if (disabled_on_cmdline()) 813 788 return false; 814 789 815 790 cpufeatures_setup_cpu(); ··· 1062 1027 1063 1028 void __init dt_cpu_ftrs_scan(void) 1064 1029 { 1030 + if (!using_dt_cpu_ftrs) 1031 + return; 1032 + 1065 1033 of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL); 1066 1034 }
+7 -4
arch/powerpc/kernel/exceptions-64s.S
··· 1411 1411 .balign IFETCH_ALIGN_BYTES 1412 1412 do_hash_page: 1413 1413 #ifdef CONFIG_PPC_STD_MMU_64 1414 - andis. r0,r4,0xa410 /* weird error? */ 1414 + andis. r0,r4,0xa450 /* weird error? */ 1415 1415 bne- handle_page_fault /* if not, try to insert a HPTE */ 1416 - andis. r0,r4,DSISR_DABRMATCH@h 1417 - bne- handle_dabr_fault 1418 1416 CURRENT_THREAD_INFO(r11, r1) 1419 1417 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ 1420 1418 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ ··· 1436 1438 1437 1439 /* Error */ 1438 1440 blt- 13f 1441 + 1442 + /* Reload DSISR into r4 for the DABR check below */ 1443 + ld r4,_DSISR(r1) 1439 1444 #endif /* CONFIG_PPC_STD_MMU_64 */ 1440 1445 1441 1446 /* Here we have a page fault that hash_page can't handle. */ 1442 1447 handle_page_fault: 1443 - 11: ld r4,_DAR(r1) 1448 + 11: andis. r0,r4,DSISR_DABRMATCH@h 1449 + bne- handle_dabr_fault 1450 + ld r4,_DAR(r1) 1444 1451 ld r5,_DSISR(r1) 1445 1452 addi r3,r1,STACK_FRAME_OVERHEAD 1446 1453 bl do_page_fault
+17
arch/powerpc/kernel/kprobes.c
··· 43 43 44 44 struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; 45 45 46 + int is_current_kprobe_addr(unsigned long addr) 47 + { 48 + struct kprobe *p = kprobe_running(); 49 + return (p && (unsigned long)p->addr == addr) ? 1 : 0; 50 + } 51 + 46 52 bool arch_within_kprobe_blacklist(unsigned long addr) 47 53 { 48 54 return (addr >= (unsigned long)__kprobes_text_start && ··· 623 617 regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); 624 618 #endif 625 619 620 + /* 621 + * jprobes use jprobe_return() which skips the normal return 622 + * path of the function, and this messes up the accounting of the 623 + * function graph tracer. 624 + * 625 + * Pause function graph tracing while performing the jprobe function. 626 + */ 627 + pause_graph_tracing(); 628 + 626 629 return 1; 627 630 } 628 631 NOKPROBE_SYMBOL(setjmp_pre_handler); ··· 657 642 * saved regs... 658 643 */ 659 644 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); 645 + /* It's OK to start function graph tracing again */ 646 + unpause_graph_tracing(); 660 647 preempt_enable_no_resched(); 661 648 return 1; 662 649 }
+3
arch/powerpc/kernel/process.c
··· 1666 1666 #ifdef CONFIG_VSX 1667 1667 current->thread.used_vsr = 0; 1668 1668 #endif 1669 + current->thread.load_fp = 0; 1669 1670 memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state)); 1670 1671 current->thread.fp_save_area = NULL; 1671 1672 #ifdef CONFIG_ALTIVEC ··· 1675 1674 current->thread.vr_save_area = NULL; 1676 1675 current->thread.vrsave = 0; 1677 1676 current->thread.used_vr = 0; 1677 + current->thread.load_vec = 0; 1678 1678 #endif /* CONFIG_ALTIVEC */ 1679 1679 #ifdef CONFIG_SPE 1680 1680 memset(current->thread.evr, 0, sizeof(current->thread.evr)); ··· 1687 1685 current->thread.tm_tfhar = 0; 1688 1686 current->thread.tm_texasr = 0; 1689 1687 current->thread.tm_tfiar = 0; 1688 + current->thread.load_tm = 0; 1690 1689 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 1691 1690 } 1692 1691 EXPORT_SYMBOL(start_thread);
+1 -1
arch/powerpc/kernel/setup-common.c
··· 928 928 929 929 #ifdef CONFIG_PPC_MM_SLICES 930 930 #ifdef CONFIG_PPC64 931 - init_mm.context.addr_limit = TASK_SIZE_128TB; 931 + init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64; 932 932 #else 933 933 #error "context.addr_limit not initialized." 934 934 #endif
+30 -5
arch/powerpc/kernel/setup_64.c
··· 616 616 #endif 617 617 618 618 /* 619 + * Emergency stacks are used for a range of things, from asynchronous 620 + * NMIs (system reset, machine check) to synchronous, process context. 621 + * We set preempt_count to zero, even though that isn't necessarily correct. To 622 + * get the right value we'd need to copy it from the previous thread_info, but 623 + * doing that might fault causing more problems. 624 + * TODO: what to do with accounting? 625 + */ 626 + static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu) 627 + { 628 + ti->task = NULL; 629 + ti->cpu = cpu; 630 + ti->preempt_count = 0; 631 + ti->local_flags = 0; 632 + ti->flags = 0; 633 + klp_init_thread_info(ti); 634 + } 635 + 636 + /* 619 637 * Stack space used when we detect a bad kernel stack pointer, and 620 638 * early in SMP boots before relocation is enabled. Exclusive emergency 621 639 * stack for machine checks. ··· 651 633 * Since we use these as temporary stacks during secondary CPU 652 634 * bringup, we need to get at them in real mode. This means they 653 635 * must also be within the RMO region. 636 + * 637 + * The IRQ stacks allocated elsewhere in this file are zeroed and 638 + * initialized in kernel/irq.c. These are initialized here in order 639 + * to have emergency stacks available as early as possible. 654 640 */ 655 641 limit = min(safe_stack_limit(), ppc64_rma_size); 656 642 657 643 for_each_possible_cpu(i) { 658 644 struct thread_info *ti; 659 645 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); 660 - klp_init_thread_info(ti); 646 + memset(ti, 0, THREAD_SIZE); 647 + emerg_stack_init_thread_info(ti, i); 661 648 paca[i].emergency_sp = (void *)ti + THREAD_SIZE; 662 649 663 650 #ifdef CONFIG_PPC_BOOK3S_64 664 651 /* emergency stack for NMI exception handling. */ 665 652 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); 666 - klp_init_thread_info(ti); 653 + memset(ti, 0, THREAD_SIZE); 654 + emerg_stack_init_thread_info(ti, i); 667 655 paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE; 668 656 669 657 /* emergency stack for machine check exception handling. */ 670 658 ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); 671 - klp_init_thread_info(ti); 659 + memset(ti, 0, THREAD_SIZE); 660 + emerg_stack_init_thread_info(ti, i); 672 661 paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE; 673 662 #endif 674 663 } ··· 686 661 687 662 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) 688 663 { 689 - return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align, 664 + return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align, 690 665 __pa(MAX_DMA_ADDRESS)); 691 666 } 692 667 ··· 697 672 698 673 static int pcpu_cpu_distance(unsigned int from, unsigned int to) 699 674 { 700 - if (cpu_to_node(from) == cpu_to_node(to)) 675 + if (early_cpu_to_node(from) == early_cpu_to_node(to)) 701 676 return LOCAL_DISTANCE; 702 677 else 703 678 return REMOTE_DISTANCE;
+46 -13
arch/powerpc/kernel/trace/ftrace_64_mprofile.S
··· 45 45 stdu r1,-SWITCH_FRAME_SIZE(r1) 46 46 47 47 /* Save all gprs to pt_regs */ 48 - SAVE_8GPRS(0,r1) 49 - SAVE_8GPRS(8,r1) 50 - SAVE_8GPRS(16,r1) 51 - SAVE_8GPRS(24,r1) 48 + SAVE_GPR(0, r1) 49 + SAVE_10GPRS(2, r1) 50 + SAVE_10GPRS(12, r1) 51 + SAVE_10GPRS(22, r1) 52 + 53 + /* Save previous stack pointer (r1) */ 54 + addi r8, r1, SWITCH_FRAME_SIZE 55 + std r8, GPR1(r1) 52 56 53 57 /* Load special regs for save below */ 54 58 mfmsr r8 ··· 99 95 bl ftrace_stub 100 96 nop 101 97 102 - /* Load ctr with the possibly modified NIP */ 103 - ld r3, _NIP(r1) 104 - mtctr r3 98 + /* Load the possibly modified NIP */ 99 + ld r15, _NIP(r1) 100 + 105 101 #ifdef CONFIG_LIVEPATCH 106 - cmpd r14,r3 /* has NIP been altered? */ 102 + cmpd r14, r15 /* has NIP been altered? */ 107 103 #endif 108 104 105 + #if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE) 106 + /* NIP has not been altered, skip over further checks */ 107 + beq 1f 108 + 109 + /* Check if there is an active kprobe on us */ 110 + subi r3, r14, 4 111 + bl is_current_kprobe_addr 112 + nop 113 + 114 + /* 115 + * If r3 == 1, then this is a kprobe/jprobe. 116 + * else, this is livepatched function. 117 + * 118 + * The conditional branch for livepatch_handler below will use the 119 + * result of this comparison. For kprobe/jprobe, we just need to branch to 120 + * the new NIP, not call livepatch_handler. The branch below is bne, so we 121 + * want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want 122 + * CR0[EQ] = (r3 == 1). 123 + */ 124 + cmpdi r3, 1 125 + 1: 126 + #endif 127 + 128 + /* Load CTR with the possibly modified NIP */ 129 + mtctr r15 130 + 109 131 /* Restore gprs */ 110 - REST_8GPRS(0,r1) 111 - REST_8GPRS(8,r1) 112 - REST_8GPRS(16,r1) 113 - REST_8GPRS(24,r1) 132 + REST_GPR(0,r1) 133 + REST_10GPRS(2,r1) 134 + REST_10GPRS(12,r1) 135 + REST_10GPRS(22,r1) 114 136 115 137 /* Restore possibly modified LR */ 116 138 ld r0, _LINK(r1) ··· 149 119 addi r1, r1, SWITCH_FRAME_SIZE 150 120 151 121 #ifdef CONFIG_LIVEPATCH 152 - /* Based on the cmpd above, if the NIP was altered handle livepatch */ 122 + /* 123 + * Based on the cmpd or cmpdi above, if the NIP was altered and we're 124 + * not on a kprobe/jprobe, then handle livepatch. 125 + */ 153 126 bne- livepatch_handler 154 127 #endif 155 128
+51
arch/powerpc/kvm/book3s_hv.c
··· 1486 1486 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); 1487 1487 break; 1488 1488 case KVM_REG_PPC_TB_OFFSET: 1489 + /* 1490 + * POWER9 DD1 has an erratum where writing TBU40 causes 1491 + * the timebase to lose ticks. So we don't let the 1492 + * timebase offset be changed on P9 DD1. (It is 1493 + * initialized to zero.) 1494 + */ 1495 + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) 1496 + break; 1489 1497 /* round up to multiple of 2^24 */ 1490 1498 vcpu->arch.vcore->tb_offset = 1491 1499 ALIGN(set_reg_val(id, *val), 1UL << 24); ··· 2915 2907 { 2916 2908 int r; 2917 2909 int srcu_idx; 2910 + unsigned long ebb_regs[3] = {}; /* shut up GCC */ 2911 + unsigned long user_tar = 0; 2912 + unsigned int user_vrsave; 2918 2913 2919 2914 if (!vcpu->arch.sane) { 2920 2915 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 2921 2916 return -EINVAL; 2922 2917 } 2918 + 2919 + /* 2920 + * Don't allow entry with a suspended transaction, because 2921 + * the guest entry/exit code will lose it. 2922 + * If the guest has TM enabled, save away their TM-related SPRs 2923 + * (they will get restored by the TM unavailable interrupt). 2924 + */ 2925 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2926 + if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs && 2927 + (current->thread.regs->msr & MSR_TM)) { 2928 + if (MSR_TM_ACTIVE(current->thread.regs->msr)) { 2929 + run->exit_reason = KVM_EXIT_FAIL_ENTRY; 2930 + run->fail_entry.hardware_entry_failure_reason = 0; 2931 + return -EINVAL; 2932 + } 2933 + current->thread.tm_tfhar = mfspr(SPRN_TFHAR); 2934 + current->thread.tm_tfiar = mfspr(SPRN_TFIAR); 2935 + current->thread.tm_texasr = mfspr(SPRN_TEXASR); 2936 + current->thread.regs->msr &= ~MSR_TM; 2937 + } 2938 + #endif 2923 2939 2924 2940 kvmppc_core_prepare_to_enter(vcpu); 2925 2941 ··· 2965 2933 } 2966 2934 2967 2935 flush_all_to_thread(current); 2936 + 2937 + /* Save userspace EBB and other register values */ 2938 + if (cpu_has_feature(CPU_FTR_ARCH_207S)) { 2939 + ebb_regs[0] = mfspr(SPRN_EBBHR); 2940 + ebb_regs[1] = mfspr(SPRN_EBBRR); 2941 + ebb_regs[2] = mfspr(SPRN_BESCR); 2942 + user_tar = mfspr(SPRN_TAR); 2943 + } 2944 + user_vrsave = mfspr(SPRN_VRSAVE); 2968 2945 2969 2946 vcpu->arch.wqp = &vcpu->arch.vcore->wq; 2970 2947 vcpu->arch.pgdir = current->mm->pgd; ··· 3000 2959 r = kvmppc_xics_rm_complete(vcpu, 0); 3001 2960 } 3002 2961 } while (is_kvmppc_resume_guest(r)); 2962 + 2963 + /* Restore userspace EBB and other register values */ 2964 + if (cpu_has_feature(CPU_FTR_ARCH_207S)) { 2965 + mtspr(SPRN_EBBHR, ebb_regs[0]); 2966 + mtspr(SPRN_EBBRR, ebb_regs[1]); 2967 + mtspr(SPRN_BESCR, ebb_regs[2]); 2968 + mtspr(SPRN_TAR, user_tar); 2969 + mtspr(SPRN_FSCR, current->thread.fscr); 2970 + } 2971 + mtspr(SPRN_VRSAVE, user_vrsave); 3003 2972 3004 2973 out: 3005 2974 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
+11 -1
arch/powerpc/kvm/book3s_hv_interrupts.S
··· 121 121 * Put whatever is in the decrementer into the 122 122 * hypervisor decrementer. 123 123 */ 124 + BEGIN_FTR_SECTION 125 + ld r5, HSTATE_KVM_VCORE(r13) 126 + ld r6, VCORE_KVM(r5) 127 + ld r9, KVM_HOST_LPCR(r6) 128 + andis. r9, r9, LPCR_LD@h 129 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 124 130 mfspr r8,SPRN_DEC 125 131 mftb r7 126 - mtspr SPRN_HDEC,r8 132 + BEGIN_FTR_SECTION 133 + /* On POWER9, don't sign-extend if host LPCR[LD] bit is set */ 134 + bne 32f 135 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 127 136 extsw r8,r8 137 + 32: mtspr SPRN_HDEC,r8 128 138 add r8,r8,r7 129 139 std r8,HSTATE_DECEXP(r13) 130 140
+56 -19
arch/powerpc/kvm/book3s_hv_rmhandlers.S
··· 32 32 #include <asm/opal.h> 33 33 #include <asm/xive-regs.h> 34 34 35 + /* Sign-extend HDEC if not on POWER9 */ 36 + #define EXTEND_HDEC(reg) \ 37 + BEGIN_FTR_SECTION; \ 38 + extsw reg, reg; \ 39 + END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) 40 + 35 41 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) 36 42 37 43 /* Values in HSTATE_NAPPING(r13) */ 38 44 #define NAPPING_CEDE 1 39 45 #define NAPPING_NOVCPU 2 46 + 47 + /* Stack frame offsets for kvmppc_hv_entry */ 48 + #define SFS 144 49 + #define STACK_SLOT_TRAP (SFS-4) 50 + #define STACK_SLOT_TID (SFS-16) 51 + #define STACK_SLOT_PSSCR (SFS-24) 52 + #define STACK_SLOT_PID (SFS-32) 53 + #define STACK_SLOT_IAMR (SFS-40) 54 + #define STACK_SLOT_CIABR (SFS-48) 55 + #define STACK_SLOT_DAWR (SFS-56) 56 + #define STACK_SLOT_DAWRX (SFS-64) 40 57 41 58 /* 42 59 * Call kvmppc_hv_entry in real mode. ··· 231 214 kvmppc_primary_no_guest: 232 215 /* We handle this much like a ceded vcpu */ 233 216 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */ 217 + /* HDEC may be larger than DEC for arch >= v3.00, but since the */ 218 + /* HDEC value came from DEC in the first place, it will fit */ 234 219 mfspr r3, SPRN_HDEC 235 220 mtspr SPRN_DEC, r3 236 221 /* ··· 314 295 315 296 /* See if our timeslice has expired (HDEC is negative) */ 316 297 mfspr r0, SPRN_HDEC 298 + EXTEND_HDEC(r0) 317 299 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER 318 - cmpwi r0, 0 300 + cmpdi r0, 0 319 301 blt kvm_novcpu_exit 320 302 321 303 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ ··· 339 319 bl kvmhv_accumulate_time 340 320 #endif 341 321 13: mr r3, r12 342 - stw r12, 112-4(r1) 322 + stw r12, STACK_SLOT_TRAP(r1) 343 323 bl kvmhv_commence_exit 344 324 nop 345 - lwz r12, 112-4(r1) 325 + lwz r12, STACK_SLOT_TRAP(r1) 346 326 b kvmhv_switch_to_host 347 327 348 328 /* ··· 410 390 lbz r4, HSTATE_PTID(r13) 411 391 cmpwi r4, 0 412 392 bne 63f 413 - lis r6, 0x7fff 414 - ori r6, r6, 0xffff 393 + LOAD_REG_ADDR(r6, decrementer_max) 394 + ld r6, 0(r6) 415 395 mtspr SPRN_HDEC, r6 416 396 /* and set per-LPAR registers, if doing dynamic micro-threading */ 417 397 ld r6, HSTATE_SPLIT_MODE(r13) ··· 565 545 * * 566 546 *****************************************************************************/ 567 547 568 - /* Stack frame offsets */ 569 - #define STACK_SLOT_TID (112-16) 570 - #define STACK_SLOT_PSSCR (112-24) 571 - #define STACK_SLOT_PID (112-32) 572 - 573 548 .global kvmppc_hv_entry 574 549 kvmppc_hv_entry: 575 550 ··· 580 565 */ 581 566 mflr r0 582 567 std r0, PPC_LR_STKOFF(r1) 583 - stdu r1, -112(r1) 568 + stdu r1, -SFS(r1) 584 569 585 570 /* Save R1 in the PACA */ 586 571 std r1, HSTATE_HOST_R1(r13) ··· 764 749 mfspr r5, SPRN_TIDR 765 750 mfspr r6, SPRN_PSSCR 766 751 mfspr r7, SPRN_PID 752 + mfspr r8, SPRN_IAMR 767 753 std r5, STACK_SLOT_TID(r1) 768 754 std r6, STACK_SLOT_PSSCR(r1) 769 755 std r7, STACK_SLOT_PID(r1) 756 + std r8, STACK_SLOT_IAMR(r1) 770 757 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 758 + BEGIN_FTR_SECTION 759 + mfspr r5, SPRN_CIABR 760 + mfspr r6, SPRN_DAWR 761 + mfspr r7, SPRN_DAWRX 762 + std r5, STACK_SLOT_CIABR(r1) 763 + std r6, STACK_SLOT_DAWR(r1) 764 + std r7, STACK_SLOT_DAWRX(r1) 765 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 771 766 772 767 BEGIN_FTR_SECTION 773 768 /* Set partition DABR */ ··· 993 968 994 969 /* Check if HDEC expires soon */ 995 970 mfspr r3, SPRN_HDEC 996 - cmpwi r3, 512 /* 1 microsecond */ 971 + EXTEND_HDEC(r3) 972 + cmpdi r3, 512 /* 1 microsecond */ 997 973 blt hdec_soon 998 974 999 975 #ifdef CONFIG_KVM_XICS ··· 1531 1505 * set by the guest could disrupt the host. 1532 1506 */ 1533 1507 li r0, 0 1534 - mtspr SPRN_IAMR, r0 1535 - mtspr SPRN_CIABR, r0 1536 - mtspr SPRN_DAWRX, r0 1508 + mtspr SPRN_PSPB, r0 1537 1509 mtspr SPRN_WORT, r0 1538 1510 BEGIN_FTR_SECTION 1511 + mtspr SPRN_IAMR, r0 1539 1512 mtspr SPRN_TCSCR, r0 1540 1513 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ 1541 1514 li r0, 1 ··· 1550 1525 std r6,VCPU_UAMOR(r9) 1551 1526 li r6,0 1552 1527 mtspr SPRN_AMR,r6 1528 + mtspr SPRN_UAMOR, r6 1553 1529 1554 1530 /* Switch DSCR back to host value */ 1555 1531 mfspr r8, SPRN_DSCR ··· 1696 1670 1697 1671 /* Restore host values of some registers */ 1698 1672 BEGIN_FTR_SECTION 1673 + ld r5, STACK_SLOT_CIABR(r1) 1674 + ld r6, STACK_SLOT_DAWR(r1) 1675 + ld r7, STACK_SLOT_DAWRX(r1) 1676 + mtspr SPRN_CIABR, r5 1677 + mtspr SPRN_DAWR, r6 1678 + mtspr SPRN_DAWRX, r7 1679 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1680 + BEGIN_FTR_SECTION 1699 1681 ld r5, STACK_SLOT_TID(r1) 1700 1682 ld r6, STACK_SLOT_PSSCR(r1) 1701 1683 ld r7, STACK_SLOT_PID(r1) 1684 + ld r8, STACK_SLOT_IAMR(r1) 1702 1685 mtspr SPRN_TIDR, r5 1703 1686 mtspr SPRN_PSSCR, r6 1704 1687 mtspr SPRN_PID, r7 1688 + mtspr SPRN_IAMR, r8 1705 1689 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) 1706 1690 BEGIN_FTR_SECTION 1707 1691 PPC_INVALIDATE_ERAT ··· 1855 1819 li r0, KVM_GUEST_MODE_NONE 1856 1820 stb r0, HSTATE_IN_GUEST(r13) 1857 1821 1858 - ld r0, 112+PPC_LR_STKOFF(r1) 1859 - addi r1, r1, 112 1822 + ld r0, SFS+PPC_LR_STKOFF(r1) 1823 + addi r1, r1, SFS 1860 1824 mtlr r0 1861 1825 blr 1862 1826 ··· 2402 2366 mfspr r3, SPRN_DEC 2403 2367 mfspr r4, SPRN_HDEC 2404 2368 mftb r5 2405 - cmpw r3, r4 2369 + extsw r3, r3 2370 + EXTEND_HDEC(r4) 2371 + cmpd r3, r4 2406 2372 ble 67f 2407 2373 mtspr SPRN_DEC, r4 2408 2374 67: 2409 2375 /* save expiry time of guest decrementer */ 2410 - extsw r3, r3 2411 2376 add r3, r3, r5 2412 2377 ld r4, HSTATE_KVM_VCPU(r13) 2413 2378 ld r5, HSTATE_KVM_VCORE(r13)
+2 -2
arch/powerpc/kvm/book3s_xive_template.c
··· 69 69 { 70 70 /* If the XIVE supports the new "store EOI facility, use it */ 71 71 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) 72 - __x_writeq(0, __x_eoi_page(xd)); 72 + __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI); 73 73 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { 74 74 opal_int_eoi(hw_irq); 75 75 } else { ··· 89 89 * properly. 90 90 */ 91 91 if (xd->flags & XIVE_IRQ_FLAG_LSI) 92 - __x_readq(__x_eoi_page(xd)); 92 + __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI); 93 93 else { 94 94 eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00); 95 95
+1 -1
arch/powerpc/mm/hugetlbpage-radix.c
··· 68 68 addr = ALIGN(addr, huge_page_size(h)); 69 69 vma = find_vma(mm, addr); 70 70 if (mm->task_size - len >= addr && 71 - (!vma || addr + len <= vma->vm_start)) 71 + (!vma || addr + len <= vm_start_gap(vma))) 72 72 return addr; 73 73 } 74 74 /*
+2 -2
arch/powerpc/mm/mmap.c
··· 112 112 addr = PAGE_ALIGN(addr); 113 113 vma = find_vma(mm, addr); 114 114 if (mm->task_size - len >= addr && addr >= mmap_min_addr && 115 - (!vma || addr + len <= vma->vm_start)) 115 + (!vma || addr + len <= vm_start_gap(vma))) 116 116 return addr; 117 117 } 118 118 ··· 157 157 addr = PAGE_ALIGN(addr); 158 158 vma = find_vma(mm, addr); 159 159 if (mm->task_size - len >= addr && addr >= mmap_min_addr && 160 - (!vma || addr + len <= vma->vm_start)) 160 + (!vma || addr + len <= vm_start_gap(vma))) 161 161 return addr; 162 162 } 163 163
+1 -1
arch/powerpc/mm/mmu_context_book3s64.c
··· 99 99 * mm->context.addr_limit. Default to max task size so that we copy the 100 100 * default values to paca which will help us to handle slb miss early. 101 101 */ 102 - mm->context.addr_limit = TASK_SIZE_128TB; 102 + mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64; 103 103 104 104 /* 105 105 * The old code would re-promote on fork, we don't do that when using
+1 -1
arch/powerpc/mm/slice.c
··· 99 99 if ((mm->task_size - len) < addr) 100 100 return 0; 101 101 vma = find_vma(mm, addr); 102 - return (!vma || (addr + len) <= vma->vm_start); 102 + return (!vma || (addr + len) <= vm_start_gap(vma)); 103 103 } 104 104 105 105 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
+2 -1
arch/powerpc/perf/perf_regs.c
··· 101 101 struct pt_regs *regs_user_copy) 102 102 { 103 103 regs_user->regs = task_pt_regs(current); 104 - regs_user->abi = perf_reg_abi(current); 104 + regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) : 105 + PERF_SAMPLE_REGS_ABI_NONE; 105 106 }
+2 -2
arch/powerpc/perf/power9-pmu.c
··· 402 402 .name = "POWER9", 403 403 .n_counter = MAX_PMU_COUNTERS, 404 404 .add_fields = ISA207_ADD_FIELDS, 405 - .test_adder = ISA207_TEST_ADDER, 405 + .test_adder = P9_DD1_TEST_ADDER, 406 406 .compute_mmcr = isa207_compute_mmcr, 407 407 .config_bhrb = power9_config_bhrb, 408 408 .bhrb_filter_map = power9_bhrb_filter_map, ··· 421 421 .name = "POWER9", 422 422 .n_counter = MAX_PMU_COUNTERS, 423 423 .add_fields = ISA207_ADD_FIELDS, 424 - .test_adder = P9_DD1_TEST_ADDER, 424 + .test_adder = ISA207_TEST_ADDER, 425 425 .compute_mmcr = isa207_compute_mmcr, 426 426 .config_bhrb = power9_config_bhrb, 427 427 .bhrb_filter_map = power9_bhrb_filter_map,
+11
arch/powerpc/platforms/Kconfig
··· 59 59 60 60 In case of doubt, say Y 61 61 62 + config PPC_DT_CPU_FTRS 63 + bool "Device-tree based CPU feature discovery & setup" 64 + depends on PPC_BOOK3S_64 65 + default y 66 + help 67 + This enables code to use a new device tree binding for describing CPU 68 + compatibility and features. Saying Y here will attempt to use the new 69 + binding if the firmware provides it. Currently only the skiboot 70 + firmware provides this binding. 71 + If you're not sure say Y. 72 + 62 73 config UDBG_RTAS_CONSOLE 63 74 bool "RTAS based debug console" 64 75 depends on PPC_RTAS
+2
arch/powerpc/platforms/cell/spufs/coredump.c
··· 175 175 skip = roundup(cprm->pos - total + sz, 4) - cprm->pos; 176 176 if (!dump_skip(cprm, skip)) 177 177 goto Eio; 178 + 179 + rc = 0; 178 180 out: 179 181 free_page((unsigned long)buf); 180 182 return rc;
+67 -30
arch/powerpc/platforms/powernv/npu-dma.c
··· 75 75 if (WARN_ON(!gpdev)) 76 76 return NULL; 77 77 78 - if (WARN_ON(!gpdev->dev.of_node)) 78 + /* Not all PCI devices have device-tree nodes */ 79 + if (!gpdev->dev.of_node) 79 80 return NULL; 80 81 81 82 /* Get assoicated PCI device */ ··· 449 448 return mmio_atsd_reg; 450 449 } 451 450 452 - static int mmio_invalidate_pid(struct npu *npu, unsigned long pid) 451 + static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush) 453 452 { 454 453 unsigned long launch; 455 454 ··· 465 464 /* PID */ 466 465 launch |= pid << PPC_BITLSHIFT(38); 467 466 467 + /* No flush */ 468 + launch |= !flush << PPC_BITLSHIFT(39); 469 + 468 470 /* Invalidating the entire process doesn't use a va */ 469 471 return mmio_launch_invalidate(npu, launch, 0); 470 472 } 471 473 472 474 static int mmio_invalidate_va(struct npu *npu, unsigned long va, 473 - unsigned long pid) 475 + unsigned long pid, bool flush) 474 476 { 475 477 unsigned long launch; 476 478 ··· 489 485 /* PID */ 490 486 launch |= pid << PPC_BITLSHIFT(38); 491 487 488 + /* No flush */ 489 + launch |= !flush << PPC_BITLSHIFT(39); 490 + 492 491 return mmio_launch_invalidate(npu, launch, va); 493 492 } 494 493 495 494 #define mn_to_npu_context(x) container_of(x, struct npu_context, mn) 495 + 496 + struct mmio_atsd_reg { 497 + struct npu *npu; 498 + int reg; 499 + }; 500 + 501 + static void mmio_invalidate_wait( 502 + struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush) 503 + { 504 + struct npu *npu; 505 + int i, reg; 506 + 507 + /* Wait for all invalidations to complete */ 508 + for (i = 0; i <= max_npu2_index; i++) { 509 + if (mmio_atsd_reg[i].reg < 0) 510 + continue; 511 + 512 + /* Wait for completion */ 513 + npu = mmio_atsd_reg[i].npu; 514 + reg = mmio_atsd_reg[i].reg; 515 + while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT)) 516 + cpu_relax(); 517 + 518 + put_mmio_atsd_reg(npu, reg); 519 + 520 + /* 521 + * The GPU requires two flush ATSDs to ensure all entries have 522 + * been flushed. We use PID 0 as it will never be used for a 523 + * process on the GPU. 524 + */ 525 + if (flush) 526 + mmio_invalidate_pid(npu, 0, true); 527 + } 528 + } 496 529 497 530 /* 498 531 * Invalidate either a single address or an entire PID depending on 499 532 * the value of va. 500 533 */ 501 534 static void mmio_invalidate(struct npu_context *npu_context, int va, 502 - unsigned long address) 535 + unsigned long address, bool flush) 503 536 { 504 - int i, j, reg; 537 + int i, j; 505 538 struct npu *npu; 506 539 struct pnv_phb *nphb; 507 540 struct pci_dev *npdev; 508 - struct { 509 - struct npu *npu; 510 - int reg; 511 - } mmio_atsd_reg[NV_MAX_NPUS]; 541 + struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS]; 512 542 unsigned long pid = npu_context->mm->context.id; 513 543 514 544 /* ··· 562 524 563 525 if (va) 564 526 mmio_atsd_reg[i].reg = 565 - mmio_invalidate_va(npu, address, pid); 527 + mmio_invalidate_va(npu, address, pid, 528 + flush); 566 529 else 567 530 mmio_atsd_reg[i].reg = 568 - mmio_invalidate_pid(npu, pid); 531 + mmio_invalidate_pid(npu, pid, flush); 569 532 570 533 /* 571 534 * The NPU hardware forwards the shootdown to all GPUs ··· 582 543 */ 583 544 flush_tlb_mm(npu_context->mm); 584 545 585 - /* Wait for all invalidations to complete */ 586 - for (i = 0; i <= max_npu2_index; i++) { 587 - if (mmio_atsd_reg[i].reg < 0) 588 - continue; 589 - 590 - /* Wait for completion */ 591 - npu = mmio_atsd_reg[i].npu; 592 - reg = mmio_atsd_reg[i].reg; 593 - while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT)) 594 - cpu_relax(); 595 - put_mmio_atsd_reg(npu, reg); 596 - } 546 + mmio_invalidate_wait(mmio_atsd_reg, flush); 547 + if (flush) 548 + /* Wait for the flush to complete */ 549 + mmio_invalidate_wait(mmio_atsd_reg, false); 597 550 } 598 551 599 552 static void pnv_npu2_mn_release(struct mmu_notifier *mn, ··· 601 570 * There should be no more translation requests for this PID, but we 602 571 * need to ensure any entries for it are removed from the TLB. 603 572 */ 604 - mmio_invalidate(npu_context, 0, 0); 573 + mmio_invalidate(npu_context, 0, 0, true); 605 574 } 606 575 607 576 static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn, ··· 611 580 { 612 581 struct npu_context *npu_context = mn_to_npu_context(mn); 613 582 614 - mmio_invalidate(npu_context, 1, address); 583 + mmio_invalidate(npu_context, 1, address, true); 615 584 } 616 585 617 586 static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn, ··· 620 589 { 621 590 struct npu_context *npu_context = mn_to_npu_context(mn); 622 591 623 - mmio_invalidate(npu_context, 1, address); 592 + mmio_invalidate(npu_context, 1, address, true); 624 593 } 625 594 626 595 static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, ··· 630 599 struct npu_context *npu_context = mn_to_npu_context(mn); 631 600 unsigned long address; 632 601 633 - for (address = start; address <= end; address += PAGE_SIZE) 634 - mmio_invalidate(npu_context, 1, address); 602 + for (address = start; address < end; address += PAGE_SIZE) 603 + mmio_invalidate(npu_context, 1, address, false); 604 + 605 + /* Do the flush only on the final addess == end */ 606 + mmio_invalidate(npu_context, 1, address, true); 635 607 } 636 608 637 609 static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { ··· 684 650 /* No nvlink associated with this GPU device */ 685 651 return ERR_PTR(-ENODEV); 686 652 687 - if (!mm) { 688 - /* kernel thread contexts are not supported */ 653 + if (!mm || mm->context.id == 0) { 654 + /* 655 + * Kernel thread contexts are not supported and context id 0 is 656 + * reserved on the GPU. 657 + */ 689 658 return ERR_PTR(-EINVAL); 690 659 } 691 660
+7 -1
arch/powerpc/platforms/powernv/subcore.c
··· 407 407 408 408 static int subcore_init(void) 409 409 { 410 - if (!cpu_has_feature(CPU_FTR_SUBCORE)) 410 + unsigned pvr_ver; 411 + 412 + pvr_ver = PVR_VER(mfspr(SPRN_PVR)); 413 + 414 + if (pvr_ver != PVR_POWER8 && 415 + pvr_ver != PVR_POWER8E && 416 + pvr_ver != PVR_POWER8NVL) 411 417 return 0; 412 418 413 419 /*
+2
arch/powerpc/platforms/pseries/hotplug-memory.c
··· 124 124 for (i = 0; i < num_lmbs; i++) { 125 125 lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr); 126 126 lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index); 127 + lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index); 127 128 lmbs[i].flags = be32_to_cpu(lmbs[i].flags); 128 129 } 129 130 ··· 148 147 for (i = 0; i < num_lmbs; i++) { 149 148 lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr); 150 149 lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index); 150 + lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index); 151 151 lmbs[i].flags = cpu_to_be32(lmbs[i].flags); 152 152 } 153 153
+2 -1
arch/powerpc/sysdev/simple_gpio.c
··· 75 75 76 76 static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc) 77 77 { 78 - struct u8_gpio_chip *u8_gc = gpiochip_get_data(&mm_gc->gc); 78 + struct u8_gpio_chip *u8_gc = 79 + container_of(mm_gc, struct u8_gpio_chip, mm_gc); 79 80 80 81 u8_gc->data = in_8(mm_gc->regs); 81 82 }
+1 -1
arch/powerpc/sysdev/xive/common.c
··· 297 297 { 298 298 /* If the XIVE supports the new "store EOI facility, use it */ 299 299 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) 300 - out_be64(xd->eoi_mmio, 0); 300 + out_be64(xd->eoi_mmio + XIVE_ESB_STORE_EOI, 0); 301 301 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { 302 302 /* 303 303 * The FW told us to call it. This happens for some
-3
arch/s390/Kconfig
··· 363 363 config SYSVIPC_COMPAT 364 364 def_bool y if COMPAT && SYSVIPC 365 365 366 - config KEYS_COMPAT 367 - def_bool y if COMPAT && KEYS 368 - 369 366 config SMP 370 367 def_bool y 371 368 prompt "Symmetric multi-processing support"
+33 -6
arch/s390/configs/default_defconfig
··· 30 30 CONFIG_SCHED_AUTOGROUP=y 31 31 CONFIG_BLK_DEV_INITRD=y 32 32 CONFIG_EXPERT=y 33 + # CONFIG_SYSFS_SYSCALL is not set 33 34 CONFIG_BPF_SYSCALL=y 34 35 CONFIG_USERFAULTFD=y 35 36 # CONFIG_COMPAT_BRK is not set ··· 45 44 CONFIG_MODULE_FORCE_UNLOAD=y 46 45 CONFIG_MODVERSIONS=y 47 46 CONFIG_MODULE_SRCVERSION_ALL=y 47 + CONFIG_BLK_DEV_INTEGRITY=y 48 48 CONFIG_BLK_DEV_THROTTLING=y 49 + CONFIG_BLK_WBT=y 50 + CONFIG_BLK_WBT_SQ=y 49 51 CONFIG_PARTITION_ADVANCED=y 50 52 CONFIG_IBM_PARTITION=y 51 53 CONFIG_BSD_DISKLABEL=y ··· 94 90 CONFIG_UNIX_DIAG=m 95 91 CONFIG_XFRM_USER=m 96 92 CONFIG_NET_KEY=m 93 + CONFIG_SMC=m 94 + CONFIG_SMC_DIAG=m 97 95 CONFIG_INET=y 98 96 CONFIG_IP_MULTICAST=y 99 97 CONFIG_IP_ADVANCED_ROUTER=y ··· 365 359 CONFIG_NET_ACT_SKBEDIT=m 366 360 CONFIG_NET_ACT_CSUM=m 367 361 CONFIG_DNS_RESOLVER=y 362 + CONFIG_NETLINK_DIAG=m 368 363 CONFIG_CGROUP_NET_PRIO=y 369 364 CONFIG_BPF_JIT=y 370 365 CONFIG_NET_PKTGEN=m ··· 374 367 CONFIG_DMA_CMA=y 375 368 CONFIG_CMA_SIZE_MBYTES=0 376 369 CONFIG_CONNECTOR=y 370 + CONFIG_ZRAM=m 377 371 CONFIG_BLK_DEV_LOOP=m 378 372 CONFIG_BLK_DEV_CRYPTOLOOP=m 373 + CONFIG_BLK_DEV_DRBD=m 379 374 CONFIG_BLK_DEV_NBD=m 380 375 CONFIG_BLK_DEV_OSD=m 381 376 CONFIG_BLK_DEV_RAM=y 382 377 CONFIG_BLK_DEV_RAM_SIZE=32768 383 - CONFIG_CDROM_PKTCDVD=m 384 - CONFIG_ATA_OVER_ETH=m 378 + CONFIG_BLK_DEV_RAM_DAX=y 385 379 CONFIG_VIRTIO_BLK=y 380 + CONFIG_BLK_DEV_RBD=m 386 381 CONFIG_ENCLOSURE_SERVICES=m 382 + CONFIG_GENWQE=m 387 383 CONFIG_RAID_ATTRS=m 388 384 CONFIG_SCSI=y 389 385 CONFIG_BLK_DEV_SD=y ··· 452 442 # CONFIG_NET_VENDOR_INTEL is not set 453 443 # CONFIG_NET_VENDOR_MARVELL is not set 454 444 CONFIG_MLX4_EN=m 445 + CONFIG_MLX5_CORE=m 446 + CONFIG_MLX5_CORE_EN=y 455 447 # CONFIG_NET_VENDOR_NATSEMI is not set 456 448 CONFIG_PPP=m 457 449 CONFIG_PPP_BSDCOMP=m ··· 464 452 CONFIG_PPPOL2TP=m 465 453 CONFIG_PPP_ASYNC=m 466 454 CONFIG_PPP_SYNC_TTY=m 467 - # CONFIG_INPUT_MOUSEDEV_PSAUX is not set 468 455 # CONFIG_INPUT_KEYBOARD is not set 469 456 # CONFIG_INPUT_MOUSE is not set 470 457 # CONFIG_SERIO is not set ··· 482 471 CONFIG_INFINIBAND=m 483 472 CONFIG_INFINIBAND_USER_ACCESS=m 484 473 CONFIG_MLX4_INFINIBAND=m 474 + CONFIG_MLX5_INFINIBAND=m 485 475 CONFIG_VIRTIO_BALLOON=m 486 476 CONFIG_EXT4_FS=y 487 477 CONFIG_EXT4_FS_POSIX_ACL=y ··· 499 487 CONFIG_XFS_RT=y 500 488 CONFIG_XFS_DEBUG=y 501 489 CONFIG_GFS2_FS=m 490 + CONFIG_GFS2_FS_LOCKING_DLM=y 502 491 CONFIG_OCFS2_FS=m 503 492 CONFIG_BTRFS_FS=y 504 493 CONFIG_BTRFS_FS_POSIX_ACL=y 494 + CONFIG_BTRFS_DEBUG=y 505 495 CONFIG_NILFS2_FS=m 496 + CONFIG_FS_DAX=y 497 + CONFIG_EXPORTFS_BLOCK_OPS=y 506 498 CONFIG_FANOTIFY=y 499 + CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y 507 500 CONFIG_QUOTA_NETLINK_INTERFACE=y 501 + CONFIG_QUOTA_DEBUG=y 508 502 CONFIG_QFMT_V1=m 509 503 CONFIG_QFMT_V2=m 510 504 CONFIG_AUTOFS4_FS=m ··· 576 558 CONFIG_DEBUG_SECTION_MISMATCH=y 577 559 CONFIG_MAGIC_SYSRQ=y 578 560 CONFIG_DEBUG_PAGEALLOC=y 561 + CONFIG_DEBUG_RODATA_TEST=y 579 562 CONFIG_DEBUG_OBJECTS=y 580 563 CONFIG_DEBUG_OBJECTS_SELFTEST=y 581 564 CONFIG_DEBUG_OBJECTS_FREE=y ··· 599 580 CONFIG_WQ_WATCHDOG=y 600 581 CONFIG_PANIC_ON_OOPS=y 601 582 CONFIG_DEBUG_TIMEKEEPING=y 602 - CONFIG_TIMER_STATS=y 603 583 CONFIG_DEBUG_RT_MUTEXES=y 604 584 CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y 605 585 CONFIG_PROVE_LOCKING=y ··· 613 595 CONFIG_RCU_CPU_STALL_TIMEOUT=300 614 596 CONFIG_NOTIFIER_ERROR_INJECTION=m 615 597 CONFIG_PM_NOTIFIER_ERROR_INJECT=m 598 + CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m 616 599 CONFIG_FAULT_INJECTION=y 617 600 CONFIG_FAILSLAB=y 618 601 CONFIG_FAIL_PAGE_ALLOC=y ··· 635 616 CONFIG_TRACE_ENUM_MAP_FILE=y 636 617 CONFIG_LKDTM=m 637 618 CONFIG_TEST_LIST_SORT=y 619 + CONFIG_TEST_SORT=y 638 620 CONFIG_KPROBES_SANITY_TEST=y 639 621 CONFIG_RBTREE_TEST=y 640 622 CONFIG_INTERVAL_TREE_TEST=m 641 623 CONFIG_PERCPU_TEST=m 642 624 CONFIG_ATOMIC64_SELFTEST=y 643 - CONFIG_TEST_STRING_HELPERS=y 644 - CONFIG_TEST_KSTRTOX=y 645 625 CONFIG_DMA_API_DEBUG=y 646 626 CONFIG_TEST_BPF=m 647 627 CONFIG_BUG_ON_DATA_CORRUPTION=y ··· 648 630 CONFIG_ENCRYPTED_KEYS=m 649 631 CONFIG_SECURITY=y 650 632 CONFIG_SECURITY_NETWORK=y 633 + CONFIG_HARDENED_USERCOPY=y 651 634 CONFIG_SECURITY_SELINUX=y 652 635 CONFIG_SECURITY_SELINUX_BOOTPARAM=y 653 636 CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 ··· 659 640 CONFIG_CRYPTO_DH=m 660 641 CONFIG_CRYPTO_ECDH=m 661 642 CONFIG_CRYPTO_USER=m 643 + CONFIG_CRYPTO_PCRYPT=m 662 644 CONFIG_CRYPTO_CRYPTD=m 645 + CONFIG_CRYPTO_MCRYPTD=m 663 646 CONFIG_CRYPTO_TEST=m 664 647 CONFIG_CRYPTO_CCM=m 665 648 CONFIG_CRYPTO_GCM=m ··· 669 648 CONFIG_CRYPTO_LRW=m 670 649 CONFIG_CRYPTO_PCBC=m 671 650 CONFIG_CRYPTO_KEYWRAP=m 651 + CONFIG_CRYPTO_CMAC=m 672 652 CONFIG_CRYPTO_XCBC=m 673 653 CONFIG_CRYPTO_VMAC=m 674 654 CONFIG_CRYPTO_CRC32=m ··· 679 657 CONFIG_CRYPTO_RMD256=m 680 658 CONFIG_CRYPTO_RMD320=m 681 659 CONFIG_CRYPTO_SHA512=m 660 + CONFIG_CRYPTO_SHA3=m 682 661 CONFIG_CRYPTO_TGR192=m 683 662 CONFIG_CRYPTO_WP512=m 663 + CONFIG_CRYPTO_AES_TI=m 684 664 CONFIG_CRYPTO_ANUBIS=m 685 665 CONFIG_CRYPTO_BLOWFISH=m 686 666 CONFIG_CRYPTO_CAMELLIA=m ··· 698 674 CONFIG_CRYPTO_842=m 699 675 CONFIG_CRYPTO_LZ4=m 700 676 CONFIG_CRYPTO_LZ4HC=m 677 + CONFIG_CRYPTO_ANSI_CPRNG=m 701 678 CONFIG_CRYPTO_USER_API_HASH=m 702 679 CONFIG_CRYPTO_USER_API_SKCIPHER=m 703 680 CONFIG_CRYPTO_USER_API_RNG=m ··· 710 685 CONFIG_CRYPTO_SHA512_S390=m 711 686 CONFIG_CRYPTO_DES_S390=m 712 687 CONFIG_CRYPTO_AES_S390=m 688 + CONFIG_CRYPTO_PAES_S390=m 713 689 CONFIG_CRYPTO_GHASH_S390=m 714 690 CONFIG_CRYPTO_CRC32_S390=y 715 691 CONFIG_ASYMMETRIC_KEY_TYPE=y ··· 718 692 CONFIG_X509_CERTIFICATE_PARSER=m 719 693 CONFIG_CRC7=m 720 694 CONFIG_CRC8=m 695 + CONFIG_RANDOM32_SELFTEST=y 721 696 CONFIG_CORDIC=m 722 697 CONFIG_CMM=m 723 698 CONFIG_APPLDATA_BASE=y
+24 -4
arch/s390/configs/gcov_defconfig
··· 31 31 CONFIG_SCHED_AUTOGROUP=y 32 32 CONFIG_BLK_DEV_INITRD=y 33 33 CONFIG_EXPERT=y 34 + # CONFIG_SYSFS_SYSCALL is not set 34 35 CONFIG_BPF_SYSCALL=y 35 36 CONFIG_USERFAULTFD=y 36 37 # CONFIG_COMPAT_BRK is not set ··· 47 46 CONFIG_MODULE_FORCE_UNLOAD=y 48 47 CONFIG_MODVERSIONS=y 49 48 CONFIG_MODULE_SRCVERSION_ALL=y 49 + CONFIG_BLK_DEV_INTEGRITY=y 50 50 CONFIG_BLK_DEV_THROTTLING=y 51 + CONFIG_BLK_WBT=y 52 + CONFIG_BLK_WBT_SQ=y 51 53 CONFIG_PARTITION_ADVANCED=y 52 54 CONFIG_IBM_PARTITION=y 53 55 CONFIG_BSD_DISKLABEL=y ··· 92 88 CONFIG_UNIX_DIAG=m 93 89 CONFIG_XFRM_USER=m 94 90 CONFIG_NET_KEY=m 91 + CONFIG_SMC=m 92 + CONFIG_SMC_DIAG=m 95 93 CONFIG_INET=y 96 94 CONFIG_IP_MULTICAST=y 97 95 CONFIG_IP_ADVANCED_ROUTER=y ··· 362 356 CONFIG_NET_ACT_SKBEDIT=m 363 357 CONFIG_NET_ACT_CSUM=m 364 358 CONFIG_DNS_RESOLVER=y 359 + CONFIG_NETLINK_DIAG=m 365 360 CONFIG_CGROUP_NET_PRIO=y 366 361 CONFIG_BPF_JIT=y 367 362 CONFIG_NET_PKTGEN=m ··· 371 364 CONFIG_DMA_CMA=y 372 365 CONFIG_CMA_SIZE_MBYTES=0 373 366 CONFIG_CONNECTOR=y 367 + CONFIG_ZRAM=m 374 368 CONFIG_BLK_DEV_LOOP=m 375 369 CONFIG_BLK_DEV_CRYPTOLOOP=m 370 + CONFIG_BLK_DEV_DRBD=m 376 371 CONFIG_BLK_DEV_NBD=m 377 372 CONFIG_BLK_DEV_OSD=m 378 373 CONFIG_BLK_DEV_RAM=y 379 374 CONFIG_BLK_DEV_RAM_SIZE=32768 380 - CONFIG_CDROM_PKTCDVD=m 381 - CONFIG_ATA_OVER_ETH=m 375 + CONFIG_BLK_DEV_RAM_DAX=y 382 376 CONFIG_VIRTIO_BLK=y 383 377 CONFIG_ENCLOSURE_SERVICES=m 378 + CONFIG_GENWQE=m 384 379 CONFIG_RAID_ATTRS=m 385 380 CONFIG_SCSI=y 386 381 CONFIG_BLK_DEV_SD=y ··· 448 439 # CONFIG_NET_VENDOR_INTEL is not set 449 440 # CONFIG_NET_VENDOR_MARVELL is not set 450 441 CONFIG_MLX4_EN=m 442 + CONFIG_MLX5_CORE=m 443 + CONFIG_MLX5_CORE_EN=y 451 444 # CONFIG_NET_VENDOR_NATSEMI is not set 452 445 CONFIG_PPP=m 453 446 CONFIG_PPP_BSDCOMP=m ··· 460 449 CONFIG_PPPOL2TP=m 461 450 CONFIG_PPP_ASYNC=m 462 451 CONFIG_PPP_SYNC_TTY=m 463 - # CONFIG_INPUT_MOUSEDEV_PSAUX is not set 464 452 # CONFIG_INPUT_KEYBOARD is not set 465 453 # CONFIG_INPUT_MOUSE is not set 466 454 # CONFIG_SERIO is not set ··· 478 468 CONFIG_INFINIBAND=m 479 469 CONFIG_INFINIBAND_USER_ACCESS=m 480 470 CONFIG_MLX4_INFINIBAND=m 471 + CONFIG_MLX5_INFINIBAND=m 481 472 CONFIG_VIRTIO_BALLOON=m 482 473 CONFIG_EXT4_FS=y 483 474 CONFIG_EXT4_FS_POSIX_ACL=y ··· 494 483 CONFIG_XFS_POSIX_ACL=y 495 484 CONFIG_XFS_RT=y 496 485 CONFIG_GFS2_FS=m 486 + CONFIG_GFS2_FS_LOCKING_DLM=y 497 487 CONFIG_OCFS2_FS=m 498 488 CONFIG_BTRFS_FS=y 499 489 CONFIG_BTRFS_FS_POSIX_ACL=y 500 490 CONFIG_NILFS2_FS=m 491 + CONFIG_FS_DAX=y 492 + CONFIG_EXPORTFS_BLOCK_OPS=y 501 493 CONFIG_FANOTIFY=y 494 + CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y 502 495 CONFIG_QUOTA_NETLINK_INTERFACE=y 503 496 CONFIG_QFMT_V1=m 504 497 CONFIG_QFMT_V2=m ··· 568 553 CONFIG_MAGIC_SYSRQ=y 569 554 CONFIG_DEBUG_MEMORY_INIT=y 570 555 CONFIG_PANIC_ON_OOPS=y 571 - CONFIG_TIMER_STATS=y 572 556 CONFIG_RCU_TORTURE_TEST=m 573 557 CONFIG_RCU_CPU_STALL_TIMEOUT=60 574 558 CONFIG_LATENCYTOP=y ··· 590 576 CONFIG_ENCRYPTED_KEYS=m 591 577 CONFIG_SECURITY=y 592 578 CONFIG_SECURITY_NETWORK=y 579 + CONFIG_HARDENED_USERCOPY=y 593 580 CONFIG_SECURITY_SELINUX=y 594 581 CONFIG_SECURITY_SELINUX_BOOTPARAM=y 595 582 CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 ··· 614 599 CONFIG_CRYPTO_LRW=m 615 600 CONFIG_CRYPTO_PCBC=m 616 601 CONFIG_CRYPTO_KEYWRAP=m 602 + CONFIG_CRYPTO_CMAC=m 617 603 CONFIG_CRYPTO_XCBC=m 618 604 CONFIG_CRYPTO_VMAC=m 619 605 CONFIG_CRYPTO_CRC32=m ··· 627 611 CONFIG_CRYPTO_SHA3=m 628 612 CONFIG_CRYPTO_TGR192=m 629 613 CONFIG_CRYPTO_WP512=m 614 + CONFIG_CRYPTO_AES_TI=m 630 615 CONFIG_CRYPTO_ANUBIS=m 631 616 CONFIG_CRYPTO_BLOWFISH=m 632 617 CONFIG_CRYPTO_CAMELLIA=m ··· 643 626 CONFIG_CRYPTO_842=m 644 627 CONFIG_CRYPTO_LZ4=m 645 628 CONFIG_CRYPTO_LZ4HC=m 629 + CONFIG_CRYPTO_ANSI_CPRNG=m 646 630 CONFIG_CRYPTO_USER_API_HASH=m 647 631 CONFIG_CRYPTO_USER_API_SKCIPHER=m 648 632 CONFIG_CRYPTO_USER_API_RNG=m 649 633 CONFIG_CRYPTO_USER_API_AEAD=m 650 634 CONFIG_ZCRYPT=m 635 + CONFIG_PKEY=m 651 636 CONFIG_CRYPTO_SHA1_S390=m 652 637 CONFIG_CRYPTO_SHA256_S390=m 653 638 CONFIG_CRYPTO_SHA512_S390=m 654 639 CONFIG_CRYPTO_DES_S390=m 655 640 CONFIG_CRYPTO_AES_S390=m 641 + CONFIG_CRYPTO_PAES_S390=m 656 642 CONFIG_CRYPTO_GHASH_S390=m 657 643 CONFIG_CRYPTO_CRC32_S390=y 658 644 CONFIG_CRC7=m
+23 -4
arch/s390/configs/performance_defconfig
··· 31 31 CONFIG_SCHED_AUTOGROUP=y 32 32 CONFIG_BLK_DEV_INITRD=y 33 33 CONFIG_EXPERT=y 34 + # CONFIG_SYSFS_SYSCALL is not set 34 35 CONFIG_BPF_SYSCALL=y 35 36 CONFIG_USERFAULTFD=y 36 37 # CONFIG_COMPAT_BRK is not set ··· 45 44 CONFIG_MODULE_FORCE_UNLOAD=y 46 45 CONFIG_MODVERSIONS=y 47 46 CONFIG_MODULE_SRCVERSION_ALL=y 47 + CONFIG_BLK_DEV_INTEGRITY=y 48 48 CONFIG_BLK_DEV_THROTTLING=y 49 + CONFIG_BLK_WBT=y 50 + CONFIG_BLK_WBT_SQ=y 49 51 CONFIG_PARTITION_ADVANCED=y 50 52 CONFIG_IBM_PARTITION=y 51 53 CONFIG_BSD_DISKLABEL=y ··· 90 86 CONFIG_UNIX_DIAG=m 91 87 CONFIG_XFRM_USER=m 92 88 CONFIG_NET_KEY=m 89 + CONFIG_SMC=m 90 + CONFIG_SMC_DIAG=m 93 91 CONFIG_INET=y 94 92 CONFIG_IP_MULTICAST=y 95 93 CONFIG_IP_ADVANCED_ROUTER=y ··· 360 354 CONFIG_NET_ACT_SKBEDIT=m 361 355 CONFIG_NET_ACT_CSUM=m 362 356 CONFIG_DNS_RESOLVER=y 357 + CONFIG_NETLINK_DIAG=m 363 358 CONFIG_CGROUP_NET_PRIO=y 364 359 CONFIG_BPF_JIT=y 365 360 CONFIG_NET_PKTGEN=m ··· 369 362 CONFIG_DMA_CMA=y 370 363 CONFIG_CMA_SIZE_MBYTES=0 371 364 CONFIG_CONNECTOR=y 365 + CONFIG_ZRAM=m 372 366 CONFIG_BLK_DEV_LOOP=m 373 367 CONFIG_BLK_DEV_CRYPTOLOOP=m 368 + CONFIG_BLK_DEV_DRBD=m 374 369 CONFIG_BLK_DEV_NBD=m 375 370 CONFIG_BLK_DEV_OSD=m 376 371 CONFIG_BLK_DEV_RAM=y 377 372 CONFIG_BLK_DEV_RAM_SIZE=32768 378 - CONFIG_CDROM_PKTCDVD=m 379 - CONFIG_ATA_OVER_ETH=m 373 + CONFIG_BLK_DEV_RAM_DAX=y 380 374 CONFIG_VIRTIO_BLK=y 381 375 CONFIG_ENCLOSURE_SERVICES=m 376 + CONFIG_GENWQE=m 382 377 CONFIG_RAID_ATTRS=m 383 378 CONFIG_SCSI=y 384 379 CONFIG_BLK_DEV_SD=y ··· 446 437 # CONFIG_NET_VENDOR_INTEL is not set 447 438 # CONFIG_NET_VENDOR_MARVELL is not set 448 439 CONFIG_MLX4_EN=m 440 + CONFIG_MLX5_CORE=m 441 + CONFIG_MLX5_CORE_EN=y 449 442 # CONFIG_NET_VENDOR_NATSEMI is not set 450 443 CONFIG_PPP=m 451 444 CONFIG_PPP_BSDCOMP=m ··· 458 447 CONFIG_PPPOL2TP=m 459 448 CONFIG_PPP_ASYNC=m 460 449 CONFIG_PPP_SYNC_TTY=m 461 - # CONFIG_INPUT_MOUSEDEV_PSAUX is not set 462 450 # CONFIG_INPUT_KEYBOARD is not set 463 451 # CONFIG_INPUT_MOUSE is not set 464 452 # CONFIG_SERIO is not set ··· 476 466 CONFIG_INFINIBAND=m 477 467 CONFIG_INFINIBAND_USER_ACCESS=m 478 468 CONFIG_MLX4_INFINIBAND=m 469 + CONFIG_MLX5_INFINIBAND=m 479 470 CONFIG_VIRTIO_BALLOON=m 480 471 CONFIG_EXT4_FS=y 481 472 CONFIG_EXT4_FS_POSIX_ACL=y ··· 492 481 CONFIG_XFS_POSIX_ACL=y 493 482 CONFIG_XFS_RT=y 494 483 CONFIG_GFS2_FS=m 484 + CONFIG_GFS2_FS_LOCKING_DLM=y 495 485 CONFIG_OCFS2_FS=m 496 486 CONFIG_BTRFS_FS=y 497 487 CONFIG_BTRFS_FS_POSIX_ACL=y 498 488 CONFIG_NILFS2_FS=m 489 + CONFIG_FS_DAX=y 490 + CONFIG_EXPORTFS_BLOCK_OPS=y 499 491 CONFIG_FANOTIFY=y 492 + CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y 500 493 CONFIG_QUOTA_NETLINK_INTERFACE=y 501 494 CONFIG_QFMT_V1=m 502 495 CONFIG_QFMT_V2=m ··· 566 551 CONFIG_MAGIC_SYSRQ=y 567 552 CONFIG_DEBUG_MEMORY_INIT=y 568 553 CONFIG_PANIC_ON_OOPS=y 569 - CONFIG_TIMER_STATS=y 570 554 CONFIG_RCU_TORTURE_TEST=m 571 555 CONFIG_RCU_CPU_STALL_TIMEOUT=60 572 556 CONFIG_LATENCYTOP=y ··· 588 574 CONFIG_ENCRYPTED_KEYS=m 589 575 CONFIG_SECURITY=y 590 576 CONFIG_SECURITY_NETWORK=y 577 + CONFIG_HARDENED_USERCOPY=y 591 578 CONFIG_SECURITY_SELINUX=y 592 579 CONFIG_SECURITY_SELINUX_BOOTPARAM=y 593 580 CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 ··· 612 597 CONFIG_CRYPTO_LRW=m 613 598 CONFIG_CRYPTO_PCBC=m 614 599 CONFIG_CRYPTO_KEYWRAP=m 600 + CONFIG_CRYPTO_CMAC=m 615 601 CONFIG_CRYPTO_XCBC=m 616 602 CONFIG_CRYPTO_VMAC=m 617 603 CONFIG_CRYPTO_CRC32=m ··· 625 609 CONFIG_CRYPTO_SHA3=m 626 610 CONFIG_CRYPTO_TGR192=m 627 611 CONFIG_CRYPTO_WP512=m 612 + CONFIG_CRYPTO_AES_TI=m 628 613 CONFIG_CRYPTO_ANUBIS=m 629 614 CONFIG_CRYPTO_BLOWFISH=m 630 615 CONFIG_CRYPTO_CAMELLIA=m ··· 641 624 CONFIG_CRYPTO_842=m 642 625 CONFIG_CRYPTO_LZ4=m 643 626 CONFIG_CRYPTO_LZ4HC=m 627 + CONFIG_CRYPTO_ANSI_CPRNG=m 644 628 CONFIG_CRYPTO_USER_API_HASH=m 645 629 CONFIG_CRYPTO_USER_API_SKCIPHER=m 646 630 CONFIG_CRYPTO_USER_API_RNG=m ··· 653 635 CONFIG_CRYPTO_SHA512_S390=m 654 636 CONFIG_CRYPTO_DES_S390=m 655 637 CONFIG_CRYPTO_AES_S390=m 638 + CONFIG_CRYPTO_PAES_S390=m 656 639 CONFIG_CRYPTO_GHASH_S390=m 657 640 CONFIG_CRYPTO_CRC32_S390=y 658 641 CONFIG_CRC7=m
+4 -2
arch/s390/configs/zfcpdump_defconfig
··· 12 12 CONFIG_NR_CPUS=2 13 13 # CONFIG_HOTPLUG_CPU is not set 14 14 CONFIG_HZ_100=y 15 + # CONFIG_ARCH_RANDOM is not set 15 16 # CONFIG_COMPACTION is not set 16 17 # CONFIG_MIGRATION is not set 18 + # CONFIG_BOUNCE is not set 17 19 # CONFIG_CHECK_STACK is not set 18 20 # CONFIG_CHSC_SCH is not set 19 21 # CONFIG_SCM_BUS is not set ··· 38 36 CONFIG_SCSI_LOGGING=y 39 37 CONFIG_SCSI_FC_ATTRS=y 40 38 CONFIG_ZFCP=y 41 - # CONFIG_INPUT_MOUSEDEV_PSAUX is not set 42 39 # CONFIG_INPUT_KEYBOARD is not set 43 40 # CONFIG_INPUT_MOUSE is not set 44 41 # CONFIG_SERIO is not set 45 42 # CONFIG_HVC_IUCV is not set 43 + # CONFIG_HW_RANDOM_S390 is not set 46 44 CONFIG_RAW_DRIVER=y 47 45 # CONFIG_SCLP_ASYNC is not set 48 46 # CONFIG_HMC_DRV is not set ··· 56 54 # CONFIG_INOTIFY_USER is not set 57 55 CONFIG_CONFIGFS_FS=y 58 56 # CONFIG_MISC_FILESYSTEMS is not set 57 + # CONFIG_NETWORK_FILESYSTEMS is not set 59 58 CONFIG_PRINTK_TIME=y 60 59 CONFIG_DEBUG_INFO=y 61 - CONFIG_DEBUG_FS=y 62 60 CONFIG_DEBUG_KERNEL=y 63 61 CONFIG_PANIC_ON_OOPS=y 64 62 # CONFIG_SCHED_DEBUG is not set
+3 -5
arch/s390/defconfig
··· 28 28 CONFIG_USER_NS=y 29 29 CONFIG_BLK_DEV_INITRD=y 30 30 CONFIG_EXPERT=y 31 + # CONFIG_SYSFS_SYSCALL is not set 31 32 CONFIG_BPF_SYSCALL=y 32 33 CONFIG_USERFAULTFD=y 33 34 # CONFIG_COMPAT_BRK is not set ··· 109 108 CONFIG_SCSI_VIRTIO=y 110 109 CONFIG_MD=y 111 110 CONFIG_MD_LINEAR=m 112 - CONFIG_MD_RAID0=m 113 111 CONFIG_MD_MULTIPATH=m 114 112 CONFIG_BLK_DEV_DM=y 115 113 CONFIG_DM_CRYPT=m ··· 131 131 CONFIG_VIRTIO_NET=y 132 132 # CONFIG_NET_VENDOR_ALACRITECH is not set 133 133 # CONFIG_NET_VENDOR_SOLARFLARE is not set 134 + # CONFIG_NET_VENDOR_SYNOPSYS is not set 134 135 # CONFIG_INPUT is not set 135 136 # CONFIG_SERIO is not set 136 137 CONFIG_DEVKMEM=y ··· 163 162 CONFIG_DEBUG_PAGEALLOC=y 164 163 CONFIG_DETECT_HUNG_TASK=y 165 164 CONFIG_PANIC_ON_OOPS=y 166 - CONFIG_TIMER_STATS=y 167 165 CONFIG_DEBUG_RT_MUTEXES=y 168 166 CONFIG_PROVE_LOCKING=y 169 167 CONFIG_LOCK_STAT=y ··· 172 172 CONFIG_DEBUG_SG=y 173 173 CONFIG_DEBUG_NOTIFIERS=y 174 174 CONFIG_RCU_CPU_STALL_TIMEOUT=60 175 - CONFIG_RCU_TRACE=y 176 175 CONFIG_LATENCYTOP=y 177 176 CONFIG_SCHED_TRACER=y 178 177 CONFIG_FTRACE_SYSCALLS=y 179 178 CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y 180 179 CONFIG_STACK_TRACER=y 181 180 CONFIG_BLK_DEV_IO_TRACE=y 182 - CONFIG_UPROBE_EVENTS=y 183 181 CONFIG_FUNCTION_PROFILER=y 184 182 CONFIG_TRACE_ENUM_MAP_FILE=y 185 183 CONFIG_KPROBES_SANITY_TEST=y ··· 188 190 CONFIG_CRYPTO_GCM=m 189 191 CONFIG_CRYPTO_CBC=y 190 192 CONFIG_CRYPTO_CTS=m 191 - CONFIG_CRYPTO_ECB=m 192 193 CONFIG_CRYPTO_LRW=m 193 194 CONFIG_CRYPTO_PCBC=m 194 195 CONFIG_CRYPTO_XTS=m ··· 227 230 CONFIG_CRYPTO_USER_API_RNG=m 228 231 CONFIG_ZCRYPT=m 229 232 CONFIG_PKEY=m 233 + CONFIG_CRYPTO_PAES_S390=m 230 234 CONFIG_CRYPTO_SHA1_S390=m 231 235 CONFIG_CRYPTO_SHA256_S390=m 232 236 CONFIG_CRYPTO_SHA512_S390=m
-1
arch/s390/include/asm/kvm_host.h
··· 541 541 struct mutex ais_lock; 542 542 u8 simm; 543 543 u8 nimm; 544 - int ais_enabled; 545 544 }; 546 545 547 546 struct kvm_hw_wp_info_arch {
-5
arch/s390/include/asm/processor.h
··· 221 221 /* Free guarded storage control block for current */ 222 222 void exit_thread_gs(void); 223 223 224 - /* 225 - * Return saved PC of a blocked thread. 226 - */ 227 - extern unsigned long thread_saved_pc(struct task_struct *t); 228 - 229 224 unsigned long get_wchan(struct task_struct *p); 230 225 #define task_pt_regs(tsk) ((struct pt_regs *) \ 231 226 (task_stack_page(tsk) + THREAD_SIZE) - 1)
+13 -6
arch/s390/kernel/entry.S
··· 231 231 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 232 232 .Lsie_done: 233 233 # some program checks are suppressing. C code (e.g. do_protection_exception) 234 - # will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other 235 - # instructions between sie64a and .Lsie_done should not cause program 236 - # interrupts. So lets use a nop (47 00 00 00) as a landing pad. 234 + # will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There 235 + # are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. 236 + # Other instructions between sie64a and .Lsie_done should not cause program 237 + # interrupts. So lets use 3 nops as a landing pad for all possible rewinds. 237 238 # See also .Lcleanup_sie 238 - .Lrewind_pad: 239 - nop 0 239 + .Lrewind_pad6: 240 + nopr 7 241 + .Lrewind_pad4: 242 + nopr 7 243 + .Lrewind_pad2: 244 + nopr 7 240 245 .globl sie_exit 241 246 sie_exit: 242 247 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area ··· 254 249 stg %r14,__SF_EMPTY+16(%r15) # set exit reason code 255 250 j sie_exit 256 251 257 - EX_TABLE(.Lrewind_pad,.Lsie_fault) 252 + EX_TABLE(.Lrewind_pad6,.Lsie_fault) 253 + EX_TABLE(.Lrewind_pad4,.Lsie_fault) 254 + EX_TABLE(.Lrewind_pad2,.Lsie_fault) 258 255 EX_TABLE(sie_exit,.Lsie_fault) 259 256 EXPORT_SYMBOL(sie64a) 260 257 EXPORT_SYMBOL(sie_exit)
+1 -6
arch/s390/kernel/ipl.c
··· 564 564 565 565 static void __ipl_run(void *unused) 566 566 { 567 - if (MACHINE_IS_LPAR && ipl_info.type == IPL_TYPE_CCW) 568 - diag308(DIAG308_LOAD_NORMAL_DUMP, NULL); 569 567 diag308(DIAG308_LOAD_CLEAR, NULL); 570 568 if (MACHINE_IS_VM) 571 569 __cpcmd("IPL", NULL, 0, NULL); ··· 1086 1088 break; 1087 1089 case REIPL_METHOD_CCW_DIAG: 1088 1090 diag308(DIAG308_SET, reipl_block_ccw); 1089 - if (MACHINE_IS_LPAR) 1090 - diag308(DIAG308_LOAD_NORMAL_DUMP, NULL); 1091 - else 1092 - diag308(DIAG308_LOAD_CLEAR, NULL); 1091 + diag308(DIAG308_LOAD_CLEAR, NULL); 1093 1092 break; 1094 1093 case REIPL_METHOD_FCP_RW_DIAG: 1095 1094 diag308(DIAG308_SET, reipl_block_fcp);
-25
arch/s390/kernel/process.c
··· 41 41 42 42 asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); 43 43 44 - /* 45 - * Return saved PC of a blocked thread. used in kernel/sched. 46 - * resume in entry.S does not create a new stack frame, it 47 - * just stores the registers %r6-%r15 to the frame given by 48 - * schedule. We want to return the address of the caller of 49 - * schedule, so we have to walk the backchain one time to 50 - * find the frame schedule() store its return address. 51 - */ 52 - unsigned long thread_saved_pc(struct task_struct *tsk) 53 - { 54 - struct stack_frame *sf, *low, *high; 55 - 56 - if (!tsk || !task_stack_page(tsk)) 57 - return 0; 58 - low = task_stack_page(tsk); 59 - high = (struct stack_frame *) task_pt_regs(tsk); 60 - sf = (struct stack_frame *) tsk->thread.ksp; 61 - if (sf <= low || sf > high) 62 - return 0; 63 - sf = (struct stack_frame *) sf->back_chain; 64 - if (sf <= low || sf > high) 65 - return 0; 66 - return sf->gprs[8]; 67 - } 68 - 69 44 extern void kernel_thread_starter(void); 70 45 71 46 /*
+6 -9
arch/s390/kvm/gaccess.c
··· 977 977 ptr = asce.origin * 4096; 978 978 if (asce.r) { 979 979 *fake = 1; 980 + ptr = 0; 980 981 asce.dt = ASCE_TYPE_REGION1; 981 982 } 982 983 switch (asce.dt) { 983 984 case ASCE_TYPE_REGION1: 984 - if (vaddr.rfx01 > asce.tl && !asce.r) 985 + if (vaddr.rfx01 > asce.tl && !*fake) 985 986 return PGM_REGION_FIRST_TRANS; 986 987 break; 987 988 case ASCE_TYPE_REGION2: ··· 1010 1009 union region1_table_entry rfte; 1011 1010 1012 1011 if (*fake) { 1013 - /* offset in 16EB guest memory block */ 1014 - ptr = ptr + ((unsigned long) vaddr.rsx << 53UL); 1012 + ptr += (unsigned long) vaddr.rfx << 53; 1015 1013 rfte.val = ptr; 1016 1014 goto shadow_r2t; 1017 1015 } ··· 1036 1036 union region2_table_entry rste; 1037 1037 1038 1038 if (*fake) { 1039 - /* offset in 8PB guest memory block */ 1040 - ptr = ptr + ((unsigned long) vaddr.rtx << 42UL); 1039 + ptr += (unsigned long) vaddr.rsx << 42; 1041 1040 rste.val = ptr; 1042 1041 goto shadow_r3t; 1043 1042 } ··· 1063 1064 union region3_table_entry rtte; 1064 1065 1065 1066 if (*fake) { 1066 - /* offset in 4TB guest memory block */ 1067 - ptr = ptr + ((unsigned long) vaddr.sx << 31UL); 1067 + ptr += (unsigned long) vaddr.rtx << 31; 1068 1068 rtte.val = ptr; 1069 1069 goto shadow_sgt; 1070 1070 } ··· 1099 1101 union segment_table_entry ste; 1100 1102 1101 1103 if (*fake) { 1102 - /* offset in 2G guest memory block */ 1103 - ptr = ptr + ((unsigned long) vaddr.sx << 20UL); 1104 + ptr += (unsigned long) vaddr.sx << 20; 1104 1105 ste.val = ptr; 1105 1106 goto shadow_pgt; 1106 1107 }
+2 -2
arch/s390/kvm/interrupt.c
··· 2160 2160 struct kvm_s390_ais_req req; 2161 2161 int ret = 0; 2162 2162 2163 - if (!fi->ais_enabled) 2163 + if (!test_kvm_facility(kvm, 72)) 2164 2164 return -ENOTSUPP; 2165 2165 2166 2166 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req))) ··· 2204 2204 }; 2205 2205 int ret = 0; 2206 2206 2207 - if (!fi->ais_enabled || !adapter->suppressible) 2207 + if (!test_kvm_facility(kvm, 72) || !adapter->suppressible) 2208 2208 return kvm_s390_inject_vm(kvm, &s390int); 2209 2209 2210 2210 mutex_lock(&fi->ais_lock);
-2
arch/s390/kvm/kvm-s390.c
··· 558 558 } else { 559 559 set_kvm_facility(kvm->arch.model.fac_mask, 72); 560 560 set_kvm_facility(kvm->arch.model.fac_list, 72); 561 - kvm->arch.float_int.ais_enabled = 1; 562 561 r = 0; 563 562 } 564 563 mutex_unlock(&kvm->lock); ··· 1532 1533 mutex_init(&kvm->arch.float_int.ais_lock); 1533 1534 kvm->arch.float_int.simm = 0; 1534 1535 kvm->arch.float_int.nimm = 0; 1535 - kvm->arch.float_int.ais_enabled = 0; 1536 1536 spin_lock_init(&kvm->arch.float_int.lock); 1537 1537 for (i = 0; i < FIRQ_LIST_COUNT; i++) 1538 1538 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
+2 -2
arch/s390/mm/mmap.c
··· 101 101 addr = PAGE_ALIGN(addr); 102 102 vma = find_vma(mm, addr); 103 103 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 104 - (!vma || addr + len <= vma->vm_start)) 104 + (!vma || addr + len <= vm_start_gap(vma))) 105 105 goto check_asce_limit; 106 106 } 107 107 ··· 151 151 addr = PAGE_ALIGN(addr); 152 152 vma = find_vma(mm, addr); 153 153 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 154 - (!vma || addr + len <= vma->vm_start)) 154 + (!vma || addr + len <= vm_start_gap(vma))) 155 155 goto check_asce_limit; 156 156 } 157 157
-1
arch/score/include/asm/processor.h
··· 13 13 */ 14 14 extern void (*cpu_wait)(void); 15 15 16 - extern unsigned long thread_saved_pc(struct task_struct *tsk); 17 16 extern void start_thread(struct pt_regs *regs, 18 17 unsigned long pc, unsigned long sp); 19 18 extern unsigned long get_wchan(struct task_struct *p);
-5
arch/score/kernel/process.c
··· 101 101 return 1; 102 102 } 103 103 104 - unsigned long thread_saved_pc(struct task_struct *tsk) 105 - { 106 - return task_pt_regs(tsk)->cp0_epc; 107 - } 108 - 109 104 unsigned long get_wchan(struct task_struct *task) 110 105 { 111 106 if (!task || task == current || task->state == TASK_RUNNING)
+2 -2
arch/sh/mm/mmap.c
··· 64 64 65 65 vma = find_vma(mm, addr); 66 66 if (TASK_SIZE - len >= addr && 67 - (!vma || addr + len <= vma->vm_start)) 67 + (!vma || addr + len <= vm_start_gap(vma))) 68 68 return addr; 69 69 } 70 70 ··· 114 114 115 115 vma = find_vma(mm, addr); 116 116 if (TASK_SIZE - len >= addr && 117 - (!vma || addr + len <= vma->vm_start)) 117 + (!vma || addr + len <= vm_start_gap(vma))) 118 118 return addr; 119 119 } 120 120
+8 -7
arch/sparc/Kconfig
··· 192 192 int "Maximum number of CPUs" 193 193 depends on SMP 194 194 range 2 32 if SPARC32 195 - range 2 1024 if SPARC64 195 + range 2 4096 if SPARC64 196 196 default 32 if SPARC32 197 - default 64 if SPARC64 197 + default 4096 if SPARC64 198 198 199 199 source kernel/Kconfig.hz 200 200 ··· 295 295 depends on SPARC64 && SMP 296 296 297 297 config NODES_SHIFT 298 - int 299 - default "4" 298 + int "Maximum NUMA Nodes (as a power of 2)" 299 + range 4 5 if SPARC64 300 + default "5" 300 301 depends on NEED_MULTIPLE_NODES 302 + help 303 + Specify the maximum number of NUMA Nodes available on the target 304 + system. Increases memory reserved to accommodate various tables. 301 305 302 306 # Some NUMA nodes have memory ranges that span 303 307 # other nodes. Even though a pfn is valid and ··· 576 572 bool 577 573 depends on COMPAT && SYSVIPC 578 574 default y 579 - 580 - config KEYS_COMPAT 581 - def_bool y if COMPAT && KEYS 582 575 583 576 endmenu 584 577
+1 -1
arch/sparc/include/asm/mmu_64.h
··· 52 52 #define CTX_NR_MASK TAG_CONTEXT_BITS 53 53 #define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK) 54 54 55 - #define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL)) 55 + #define CTX_FIRST_VERSION BIT(CTX_VERSION_SHIFT) 56 56 #define CTX_VALID(__ctx) \ 57 57 (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK)) 58 58 #define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
+4 -28
arch/sparc/include/asm/mmu_context_64.h
··· 19 19 extern unsigned long tlb_context_cache; 20 20 extern unsigned long mmu_context_bmap[]; 21 21 22 + DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm); 22 23 void get_new_mmu_context(struct mm_struct *mm); 23 - #ifdef CONFIG_SMP 24 - void smp_new_mmu_context_version(void); 25 - #else 26 - #define smp_new_mmu_context_version() do { } while (0) 27 - #endif 28 - 29 24 int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 30 25 void destroy_context(struct mm_struct *mm); 31 26 ··· 71 76 static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) 72 77 { 73 78 unsigned long ctx_valid, flags; 74 - int cpu; 79 + int cpu = smp_processor_id(); 75 80 81 + per_cpu(per_cpu_secondary_mm, cpu) = mm; 76 82 if (unlikely(mm == &init_mm)) 77 83 return; 78 84 ··· 119 123 * for the first time, we must flush that context out of the 120 124 * local TLB. 121 125 */ 122 - cpu = smp_processor_id(); 123 126 if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) { 124 127 cpumask_set_cpu(cpu, mm_cpumask(mm)); 125 128 __flush_tlb_mm(CTX_HWBITS(mm->context), ··· 128 133 } 129 134 130 135 #define deactivate_mm(tsk,mm) do { } while (0) 131 - 132 - /* Activate a new MM instance for the current task. */ 133 - static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) 134 - { 135 - unsigned long flags; 136 - int cpu; 137 - 138 - spin_lock_irqsave(&mm->context.lock, flags); 139 - if (!CTX_VALID(mm->context)) 140 - get_new_mmu_context(mm); 141 - cpu = smp_processor_id(); 142 - if (!cpumask_test_cpu(cpu, mm_cpumask(mm))) 143 - cpumask_set_cpu(cpu, mm_cpumask(mm)); 144 - 145 - load_secondary_context(mm); 146 - __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); 147 - tsb_context_switch(mm); 148 - spin_unlock_irqrestore(&mm->context.lock, flags); 149 - } 150 - 136 + #define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL) 151 137 #endif /* !(__ASSEMBLY__) */ 152 138 153 139 #endif /* !(__SPARC64_MMU_CONTEXT_H) */
-1
arch/sparc/include/asm/pil.h
··· 20 20 #define PIL_SMP_CALL_FUNC 1 21 21 #define PIL_SMP_RECEIVE_SIGNAL 2 22 22 #define PIL_SMP_CAPTURE 3 23 - #define PIL_SMP_CTX_NEW_VERSION 4 24 23 #define PIL_DEVICE_IRQ 5 25 24 #define PIL_SMP_CALL_FUNC_SNGL 6 26 25 #define PIL_DEFERRED_PCR_WORK 7
-3
arch/sparc/include/asm/processor_32.h
··· 67 67 .current_ds = KERNEL_DS, \ 68 68 } 69 69 70 - /* Return saved PC of a blocked thread. */ 71 - unsigned long thread_saved_pc(struct task_struct *t); 72 - 73 70 /* Do necessary setup to start up a newly executed thread. */ 74 71 static inline void start_thread(struct pt_regs * regs, unsigned long pc, 75 72 unsigned long sp)
-2
arch/sparc/include/asm/processor_64.h
··· 89 89 #include <linux/types.h> 90 90 #include <asm/fpumacro.h> 91 91 92 - /* Return saved PC of a blocked thread. */ 93 92 struct task_struct; 94 - unsigned long thread_saved_pc(struct task_struct *); 95 93 96 94 /* On Uniprocessor, even in RMO processes see TSO semantics */ 97 95 #ifdef CONFIG_SMP
+1
arch/sparc/include/asm/vio.h
··· 327 327 int compat_len; 328 328 329 329 u64 dev_no; 330 + u64 id; 330 331 331 332 unsigned long channel_id; 332 333
+1 -1
arch/sparc/kernel/ds.c
··· 909 909 pbuf.req.handle = cp->handle; 910 910 pbuf.req.major = 1; 911 911 pbuf.req.minor = 0; 912 - strcpy(pbuf.req.svc_id, cp->service_id); 912 + strcpy(pbuf.id_buf, cp->service_id); 913 913 914 914 err = __ds_send(lp, &pbuf, msg_len); 915 915 if (err > 0)
+13 -4
arch/sparc/kernel/irq_64.c
··· 1034 1034 { 1035 1035 #ifdef CONFIG_SMP 1036 1036 unsigned long page; 1037 + void *mondo, *p; 1037 1038 1038 - BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); 1039 + BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE); 1040 + 1041 + /* Make sure mondo block is 64byte aligned */ 1042 + p = kzalloc(127, GFP_KERNEL); 1043 + if (!p) { 1044 + prom_printf("SUN4V: Error, cannot allocate mondo block.\n"); 1045 + prom_halt(); 1046 + } 1047 + mondo = (void *)(((unsigned long)p + 63) & ~0x3f); 1048 + tb->cpu_mondo_block_pa = __pa(mondo); 1039 1049 1040 1050 page = get_zeroed_page(GFP_KERNEL); 1041 1051 if (!page) { 1042 - prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); 1052 + prom_printf("SUN4V: Error, cannot allocate cpu list page.\n"); 1043 1053 prom_halt(); 1044 1054 } 1045 1055 1046 - tb->cpu_mondo_block_pa = __pa(page); 1047 - tb->cpu_list_pa = __pa(page + 64); 1056 + tb->cpu_list_pa = __pa(page); 1048 1057 #endif 1049 1058 } 1050 1059
-1
arch/sparc/kernel/kernel.h
··· 37 37 /* smp_64.c */ 38 38 void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs); 39 39 void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs); 40 - void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs); 41 40 void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs); 42 41 void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs); 43 42
-8
arch/sparc/kernel/process_32.c
··· 177 177 } 178 178 179 179 /* 180 - * Note: sparc64 has a pretty intricated thread_saved_pc, check it out. 181 - */ 182 - unsigned long thread_saved_pc(struct task_struct *tsk) 183 - { 184 - return task_thread_info(tsk)->kpc; 185 - } 186 - 187 - /* 188 180 * Free current thread data structures etc.. 189 181 */ 190 182 void exit_thread(struct task_struct *tsk)
-19
arch/sparc/kernel/process_64.c
··· 400 400 401 401 #endif 402 402 403 - unsigned long thread_saved_pc(struct task_struct *tsk) 404 - { 405 - struct thread_info *ti = task_thread_info(tsk); 406 - unsigned long ret = 0xdeadbeefUL; 407 - 408 - if (ti && ti->ksp) { 409 - unsigned long *sp; 410 - sp = (unsigned long *)(ti->ksp + STACK_BIAS); 411 - if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL && 412 - sp[14]) { 413 - unsigned long *fp; 414 - fp = (unsigned long *)(sp[14] + STACK_BIAS); 415 - if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL) 416 - ret = fp[15]; 417 - } 418 - } 419 - return ret; 420 - } 421 - 422 403 /* Free current thread data structures etc.. */ 423 404 void exit_thread(struct task_struct *tsk) 424 405 {
-31
arch/sparc/kernel/smp_64.c
··· 964 964 preempt_enable(); 965 965 } 966 966 967 - void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) 968 - { 969 - struct mm_struct *mm; 970 - unsigned long flags; 971 - 972 - clear_softint(1 << irq); 973 - 974 - /* See if we need to allocate a new TLB context because 975 - * the version of the one we are using is now out of date. 976 - */ 977 - mm = current->active_mm; 978 - if (unlikely(!mm || (mm == &init_mm))) 979 - return; 980 - 981 - spin_lock_irqsave(&mm->context.lock, flags); 982 - 983 - if (unlikely(!CTX_VALID(mm->context))) 984 - get_new_mmu_context(mm); 985 - 986 - spin_unlock_irqrestore(&mm->context.lock, flags); 987 - 988 - load_secondary_context(mm); 989 - __flush_tlb_mm(CTX_HWBITS(mm->context), 990 - SECONDARY_CONTEXT); 991 - } 992 - 993 - void smp_new_mmu_context_version(void) 994 - { 995 - smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0); 996 - } 997 - 998 967 #ifdef CONFIG_KGDB 999 968 void kgdb_roundup_cpus(unsigned long flags) 1000 969 {
+2 -2
arch/sparc/kernel/sys_sparc_64.c
··· 120 120 121 121 vma = find_vma(mm, addr); 122 122 if (task_size - len >= addr && 123 - (!vma || addr + len <= vma->vm_start)) 123 + (!vma || addr + len <= vm_start_gap(vma))) 124 124 return addr; 125 125 } 126 126 ··· 183 183 184 184 vma = find_vma(mm, addr); 185 185 if (task_size - len >= addr && 186 - (!vma || addr + len <= vma->vm_start)) 186 + (!vma || addr + len <= vm_start_gap(vma))) 187 187 return addr; 188 188 } 189 189
+7 -4
arch/sparc/kernel/tsb.S
··· 455 455 .type copy_tsb,#function 456 456 copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size 457 457 * %o2=new_tsb_base, %o3=new_tsb_size 458 + * %o4=page_size_shift 458 459 */ 459 460 sethi %uhi(TSB_PASS_BITS), %g7 460 461 srlx %o3, 4, %o3 461 - add %o0, %o1, %g1 /* end of old tsb */ 462 + add %o0, %o1, %o1 /* end of old tsb */ 462 463 sllx %g7, 32, %g7 463 464 sub %o3, 1, %o3 /* %o3 == new tsb hash mask */ 465 + 466 + mov %o4, %g1 /* page_size_shift */ 464 467 465 468 661: prefetcha [%o0] ASI_N, #one_read 466 469 .section .tsb_phys_patch, "ax" ··· 489 486 /* This can definitely be computed faster... */ 490 487 srlx %o0, 4, %o5 /* Build index */ 491 488 and %o5, 511, %o5 /* Mask index */ 492 - sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */ 489 + sllx %o5, %g1, %o5 /* Put into vaddr position */ 493 490 or %o4, %o5, %o4 /* Full VADDR. */ 494 - srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */ 491 + srlx %o4, %g1, %o4 /* Shift down to create index */ 495 492 and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */ 496 493 sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */ 497 494 TSB_STORE(%o2 + %o4, %g2) /* Store TAG */ ··· 499 496 TSB_STORE(%o2 + %o4, %g3) /* Store TTE */ 500 497 501 498 80: add %o0, 16, %o0 502 - cmp %o0, %g1 499 + cmp %o0, %o1 503 500 bne,pt %xcc, 90b 504 501 nop 505 502
+1 -1
arch/sparc/kernel/ttable_64.S
··· 50 50 tl0_irq1: TRAP_IRQ(smp_call_function_client, 1) 51 51 tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2) 52 52 tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3) 53 - tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4) 53 + tl0_irq4: BTRAP(0x44) 54 54 #else 55 55 tl0_irq1: BTRAP(0x41) 56 56 tl0_irq2: BTRAP(0x42)
+64 -4
arch/sparc/kernel/vio.c
··· 302 302 if (!id) { 303 303 dev_set_name(&vdev->dev, "%s", bus_id_name); 304 304 vdev->dev_no = ~(u64)0; 305 + vdev->id = ~(u64)0; 305 306 } else if (!cfg_handle) { 306 307 dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id); 307 308 vdev->dev_no = *id; 309 + vdev->id = ~(u64)0; 308 310 } else { 309 311 dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name, 310 312 *cfg_handle, *id); 311 313 vdev->dev_no = *cfg_handle; 314 + vdev->id = *id; 312 315 } 313 316 314 317 vdev->dev.parent = parent; ··· 354 351 (void) vio_create_one(hp, node, &root_vdev->dev); 355 352 } 356 353 354 + struct vio_md_node_query { 355 + const char *type; 356 + u64 dev_no; 357 + u64 id; 358 + }; 359 + 357 360 static int vio_md_node_match(struct device *dev, void *arg) 358 361 { 362 + struct vio_md_node_query *query = (struct vio_md_node_query *) arg; 359 363 struct vio_dev *vdev = to_vio_dev(dev); 360 364 361 - if (vdev->mp == (u64) arg) 362 - return 1; 365 + if (vdev->dev_no != query->dev_no) 366 + return 0; 367 + if (vdev->id != query->id) 368 + return 0; 369 + if (strcmp(vdev->type, query->type)) 370 + return 0; 363 371 364 - return 0; 372 + return 1; 365 373 } 366 374 367 375 static void vio_remove(struct mdesc_handle *hp, u64 node) 368 376 { 377 + const char *type; 378 + const u64 *id, *cfg_handle; 379 + u64 a; 380 + struct vio_md_node_query query; 369 381 struct device *dev; 370 382 371 - dev = device_find_child(&root_vdev->dev, (void *) node, 383 + type = mdesc_get_property(hp, node, "device-type", NULL); 384 + if (!type) { 385 + type = mdesc_get_property(hp, node, "name", NULL); 386 + if (!type) 387 + type = mdesc_node_name(hp, node); 388 + } 389 + 390 + query.type = type; 391 + 392 + id = mdesc_get_property(hp, node, "id", NULL); 393 + cfg_handle = NULL; 394 + mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) { 395 + u64 target; 396 + 397 + target = mdesc_arc_target(hp, a); 398 + cfg_handle = mdesc_get_property(hp, target, 399 + "cfg-handle", NULL); 400 + if (cfg_handle) 401 + break; 402 + } 403 + 404 + if (!id) { 405 + query.dev_no = ~(u64)0; 406 + query.id = ~(u64)0; 407 + } else if (!cfg_handle) { 408 + query.dev_no = *id; 409 + query.id = ~(u64)0; 410 + } else { 411 + query.dev_no = *cfg_handle; 412 + query.id = *id; 413 + } 414 + 415 + dev = device_find_child(&root_vdev->dev, &query, 372 416 vio_md_node_match); 373 417 if (dev) { 374 418 printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev)); 375 419 376 420 device_unregister(dev); 377 421 put_device(dev); 422 + } else { 423 + if (!id) 424 + printk(KERN_ERR "VIO: Removed unknown %s node.\n", 425 + type); 426 + else if (!cfg_handle) 427 + printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n", 428 + type, *id); 429 + else 430 + printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n", 431 + type, *cfg_handle, *id); 378 432 } 379 433 } 380 434
+1
arch/sparc/lib/Makefile
··· 15 15 lib-$(CONFIG_SPARC64) += atomic_64.o 16 16 lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o 17 17 lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o 18 + lib-$(CONFIG_SPARC64) += multi3.o 18 19 19 20 lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o 20 21 lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
+35
arch/sparc/lib/multi3.S
··· 1 + #include <linux/linkage.h> 2 + #include <asm/export.h> 3 + 4 + .text 5 + .align 4 6 + ENTRY(__multi3) /* %o0 = u, %o1 = v */ 7 + mov %o1, %g1 8 + srl %o3, 0, %g4 9 + mulx %g4, %g1, %o1 10 + srlx %g1, 0x20, %g3 11 + mulx %g3, %g4, %g5 12 + sllx %g5, 0x20, %o5 13 + srl %g1, 0, %g4 14 + sub %o1, %o5, %o5 15 + srlx %o5, 0x20, %o5 16 + addcc %g5, %o5, %g5 17 + srlx %o3, 0x20, %o5 18 + mulx %g4, %o5, %g4 19 + mulx %g3, %o5, %o5 20 + sethi %hi(0x80000000), %g3 21 + addcc %g5, %g4, %g5 22 + srlx %g5, 0x20, %g5 23 + add %g3, %g3, %g3 24 + movcc %xcc, %g0, %g3 25 + addcc %o5, %g5, %o5 26 + sllx %g4, 0x20, %g4 27 + add %o1, %g4, %o1 28 + add %o5, %g3, %g2 29 + mulx %g1, %o2, %g1 30 + add %g1, %g2, %g1 31 + mulx %o0, %o3, %o0 32 + retl 33 + add %g1, %o0, %o0 34 + ENDPROC(__multi3) 35 + EXPORT_SYMBOL(__multi3)
+1 -1
arch/sparc/mm/hugetlbpage.c
··· 120 120 addr = ALIGN(addr, huge_page_size(h)); 121 121 vma = find_vma(mm, addr); 122 122 if (task_size - len >= addr && 123 - (!vma || addr + len <= vma->vm_start)) 123 + (!vma || addr + len <= vm_start_gap(vma))) 124 124 return addr; 125 125 } 126 126 if (mm->get_unmapped_area == arch_get_unmapped_area)
+60 -29
arch/sparc/mm/init_64.c
··· 358 358 } 359 359 360 360 if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) { 361 - pr_warn("hugepagesz=%llu not supported by MMU.\n", 361 + hugetlb_bad_size(); 362 + pr_err("hugepagesz=%llu not supported by MMU.\n", 362 363 hugepage_size); 363 364 goto out; 364 365 } ··· 707 706 708 707 /* get_new_mmu_context() uses "cache + 1". */ 709 708 DEFINE_SPINLOCK(ctx_alloc_lock); 710 - unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; 709 + unsigned long tlb_context_cache = CTX_FIRST_VERSION; 711 710 #define MAX_CTX_NR (1UL << CTX_NR_BITS) 712 711 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) 713 712 DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); 713 + DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0}; 714 + 715 + static void mmu_context_wrap(void) 716 + { 717 + unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK; 718 + unsigned long new_ver, new_ctx, old_ctx; 719 + struct mm_struct *mm; 720 + int cpu; 721 + 722 + bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS); 723 + 724 + /* Reserve kernel context */ 725 + set_bit(0, mmu_context_bmap); 726 + 727 + new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION; 728 + if (unlikely(new_ver == 0)) 729 + new_ver = CTX_FIRST_VERSION; 730 + tlb_context_cache = new_ver; 731 + 732 + /* 733 + * Make sure that any new mm that are added into per_cpu_secondary_mm, 734 + * are going to go through get_new_mmu_context() path. 735 + */ 736 + mb(); 737 + 738 + /* 739 + * Updated versions to current on those CPUs that had valid secondary 740 + * contexts 741 + */ 742 + for_each_online_cpu(cpu) { 743 + /* 744 + * If a new mm is stored after we took this mm from the array, 745 + * it will go into get_new_mmu_context() path, because we 746 + * already bumped the version in tlb_context_cache. 747 + */ 748 + mm = per_cpu(per_cpu_secondary_mm, cpu); 749 + 750 + if (unlikely(!mm || mm == &init_mm)) 751 + continue; 752 + 753 + old_ctx = mm->context.sparc64_ctx_val; 754 + if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) { 755 + new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver; 756 + set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap); 757 + mm->context.sparc64_ctx_val = new_ctx; 758 + } 759 + } 760 + } 714 761 715 762 /* Caller does TLB context flushing on local CPU if necessary. 716 763 * The caller also ensures that CTX_VALID(mm->context) is false. ··· 774 725 { 775 726 unsigned long ctx, new_ctx; 776 727 unsigned long orig_pgsz_bits; 777 - int new_version; 778 728 779 729 spin_lock(&ctx_alloc_lock); 730 + retry: 731 + /* wrap might have happened, test again if our context became valid */ 732 + if (unlikely(CTX_VALID(mm->context))) 733 + goto out; 780 734 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); 781 735 ctx = (tlb_context_cache + 1) & CTX_NR_MASK; 782 736 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); 783 - new_version = 0; 784 737 if (new_ctx >= (1 << CTX_NR_BITS)) { 785 738 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); 786 739 if (new_ctx >= ctx) { 787 - int i; 788 - new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + 789 - CTX_FIRST_VERSION; 790 - if (new_ctx == 1) 791 - new_ctx = CTX_FIRST_VERSION; 792 - 793 - /* Don't call memset, for 16 entries that's just 794 - * plain silly... 795 - */ 796 - mmu_context_bmap[0] = 3; 797 - mmu_context_bmap[1] = 0; 798 - mmu_context_bmap[2] = 0; 799 - mmu_context_bmap[3] = 0; 800 - for (i = 4; i < CTX_BMAP_SLOTS; i += 4) { 801 - mmu_context_bmap[i + 0] = 0; 802 - mmu_context_bmap[i + 1] = 0; 803 - mmu_context_bmap[i + 2] = 0; 804 - mmu_context_bmap[i + 3] = 0; 805 - } 806 - new_version = 1; 807 - goto out; 740 + mmu_context_wrap(); 741 + goto retry; 808 742 } 809 743 } 744 + if (mm->context.sparc64_ctx_val) 745 + cpumask_clear(mm_cpumask(mm)); 810 746 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); 811 747 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); 812 - out: 813 748 tlb_context_cache = new_ctx; 814 749 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; 750 + out: 815 751 spin_unlock(&ctx_alloc_lock); 816 - 817 - if (unlikely(new_version)) 818 - smp_new_mmu_context_version(); 819 752 } 820 753 821 754 static int numa_enabled = 1;
+5 -2
arch/sparc/mm/tsb.c
··· 496 496 extern void copy_tsb(unsigned long old_tsb_base, 497 497 unsigned long old_tsb_size, 498 498 unsigned long new_tsb_base, 499 - unsigned long new_tsb_size); 499 + unsigned long new_tsb_size, 500 + unsigned long page_size_shift); 500 501 unsigned long old_tsb_base = (unsigned long) old_tsb; 501 502 unsigned long new_tsb_base = (unsigned long) new_tsb; 502 503 ··· 505 504 old_tsb_base = __pa(old_tsb_base); 506 505 new_tsb_base = __pa(new_tsb_base); 507 506 } 508 - copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); 507 + copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size, 508 + tsb_index == MM_TSB_BASE ? 509 + PAGE_SHIFT : REAL_HPAGE_SHIFT); 509 510 } 510 511 511 512 mm->context.tsb_block[tsb_index].tsb = new_tsb;
-5
arch/sparc/mm/ultra.S
··· 971 971 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint 972 972 retry 973 973 974 - .globl xcall_new_mmu_context_version 975 - xcall_new_mmu_context_version: 976 - wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint 977 - retry 978 - 979 974 #ifdef CONFIG_KGDB 980 975 .globl xcall_kgdb_capture 981 976 xcall_kgdb_capture:
-7
arch/tile/include/asm/processor.h
··· 214 214 215 215 extern void prepare_exit_to_usermode(struct pt_regs *regs, u32 flags); 216 216 217 - 218 - /* 219 - * Return saved (kernel) PC of a blocked thread. 220 - * Only used in a printk() in kernel/sched/core.c, so don't work too hard. 221 - */ 222 - #define thread_saved_pc(t) ((t)->thread.pc) 223 - 224 217 unsigned long get_wchan(struct task_struct *p); 225 218 226 219 /* Return initial ksp value for given task. */
+1 -1
arch/tile/mm/hugetlbpage.c
··· 233 233 addr = ALIGN(addr, huge_page_size(h)); 234 234 vma = find_vma(mm, addr); 235 235 if (TASK_SIZE - len >= addr && 236 - (!vma || addr + len <= vma->vm_start)) 236 + (!vma || addr + len <= vm_start_gap(vma))) 237 237 return addr; 238 238 } 239 239 if (current->mm->get_unmapped_area == arch_get_unmapped_area)
-2
arch/um/include/asm/processor-generic.h
··· 58 58 { 59 59 } 60 60 61 - extern unsigned long thread_saved_pc(struct task_struct *t); 62 - 63 61 static inline void mm_copy_segments(struct mm_struct *from_mm, 64 62 struct mm_struct *new_mm) 65 63 {
-6
arch/um/kernel/um_arch.c
··· 56 56 __attribute__((__section__(".data..init_irqstack"))) = 57 57 { INIT_THREAD_INFO(init_task) }; 58 58 59 - unsigned long thread_saved_pc(struct task_struct *task) 60 - { 61 - /* FIXME: Need to look up userspace_pid by cpu */ 62 - return os_process_pc(userspace_pid[0]); 63 - } 64 - 65 59 /* Changed in setup_arch, which is called in early boot */ 66 60 static char host_info[(__NEW_UTS_LEN + 1) * 5]; 67 61
-4
arch/x86/Kconfig
··· 2776 2776 config SYSVIPC_COMPAT 2777 2777 def_bool y 2778 2778 depends on SYSVIPC 2779 - 2780 - config KEYS_COMPAT 2781 - def_bool y 2782 - depends on KEYS 2783 2779 endif 2784 2780 2785 2781 endmenu
-3
arch/x86/boot/compressed/kaslr.c
··· 564 564 { 565 565 unsigned long random_addr, min_addr; 566 566 567 - /* By default, keep output position unchanged. */ 568 - *virt_addr = *output; 569 - 570 567 if (cmdline_find_option_bool("nokaslr")) { 571 568 warn("KASLR disabled: 'nokaslr' on cmdline."); 572 569 return;
+4 -2
arch/x86/boot/compressed/misc.c
··· 338 338 unsigned long output_len) 339 339 { 340 340 const unsigned long kernel_total_size = VO__end - VO__text; 341 - unsigned long virt_addr = (unsigned long)output; 341 + unsigned long virt_addr = LOAD_PHYSICAL_ADDR; 342 342 343 343 /* Retain x86 boot parameters pointer passed from startup_32/64. */ 344 344 boot_params = rmode; ··· 390 390 #ifdef CONFIG_X86_64 391 391 if (heap > 0x3fffffffffffUL) 392 392 error("Destination address too large"); 393 + if (virt_addr + max(output_len, kernel_total_size) > KERNEL_IMAGE_SIZE) 394 + error("Destination virtual address is beyond the kernel mapping area"); 393 395 #else 394 396 if (heap > ((-__PAGE_OFFSET-(128<<20)-1) & 0x7fffffff)) 395 397 error("Destination address too large"); ··· 399 397 #ifndef CONFIG_RELOCATABLE 400 398 if ((unsigned long)output != LOAD_PHYSICAL_ADDR) 401 399 error("Destination address does not match LOAD_PHYSICAL_ADDR"); 402 - if ((unsigned long)output != virt_addr) 400 + if (virt_addr != LOAD_PHYSICAL_ADDR) 403 401 error("Destination virtual address changed when not relocatable"); 404 402 #endif 405 403
-2
arch/x86/boot/compressed/misc.h
··· 81 81 unsigned long output_size, 82 82 unsigned long *virt_addr) 83 83 { 84 - /* No change from existing output location. */ 85 - *virt_addr = *output; 86 84 } 87 85 #endif 88 86
+2 -2
arch/x86/events/intel/core.c
··· 431 431 [ C(DTLB) ] = { 432 432 [ C(OP_READ) ] = { 433 433 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */ 434 - [ C(RESULT_MISS) ] = 0x608, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ 434 + [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ 435 435 }, 436 436 [ C(OP_WRITE) ] = { 437 437 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */ 438 - [ C(RESULT_MISS) ] = 0x649, /* DTLB_STORE_MISSES.WALK_COMPLETED */ 438 + [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */ 439 439 }, 440 440 [ C(OP_PREFETCH) ] = { 441 441 [ C(RESULT_ACCESS) ] = 0x0,
+1 -1
arch/x86/events/intel/uncore.c
··· 1170 1170 pmu = type->pmus; 1171 1171 for (i = 0; i < type->num_boxes; i++, pmu++) { 1172 1172 box = pmu->boxes[pkg]; 1173 - if (!box && atomic_inc_return(&box->refcnt) == 1) 1173 + if (box && atomic_inc_return(&box->refcnt) == 1) 1174 1174 uncore_box_init(box); 1175 1175 } 1176 1176 }
+1
arch/x86/include/asm/extable.h
··· 29 29 } while (0) 30 30 31 31 extern int fixup_exception(struct pt_regs *regs, int trapnr); 32 + extern int fixup_bug(struct pt_regs *regs, int trapnr); 32 33 extern bool ex_has_fault_handler(unsigned long ip); 33 34 extern void early_fixup_exception(struct pt_regs *regs, int trapnr); 34 35
+1
arch/x86/include/asm/kvm_emulate.h
··· 296 296 297 297 bool perm_ok; /* do not check permissions if true */ 298 298 bool ud; /* inject an #UD if host doesn't support insn */ 299 + bool tf; /* TF value before instruction (after for syscall/sysret) */ 299 300 300 301 bool have_exception; 301 302 struct x86_exception exception;
+1 -2
arch/x86/include/asm/mshyperv.h
··· 2 2 #define _ASM_X86_MSHYPER_H 3 3 4 4 #include <linux/types.h> 5 - #include <linux/interrupt.h> 6 - #include <linux/clocksource.h> 5 + #include <linux/atomic.h> 7 6 #include <asm/hyperv.h> 8 7 9 8 /*
-2
arch/x86/include/asm/processor.h
··· 860 860 861 861 #endif /* CONFIG_X86_64 */ 862 862 863 - extern unsigned long thread_saved_pc(struct task_struct *tsk); 864 - 865 863 extern void start_thread(struct pt_regs *regs, unsigned long new_ip, 866 864 unsigned long new_sp); 867 865
+1
arch/x86/kernel/cpu/cyrix.c
··· 255 255 break; 256 256 257 257 case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */ 258 + case 11: /* GX1 with inverted Device ID */ 258 259 #ifdef CONFIG_PCI 259 260 { 260 261 u32 vendor, device;
+3 -1
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
··· 856 856 dentry = kernfs_mount(fs_type, flags, rdt_root, 857 857 RDTGROUP_SUPER_MAGIC, NULL); 858 858 if (IS_ERR(dentry)) 859 - goto out_cdp; 859 + goto out_destroy; 860 860 861 861 static_branch_enable(&rdt_enable_key); 862 862 goto out; 863 863 864 + out_destroy: 865 + kernfs_remove(kn_info); 864 866 out_cdp: 865 867 cdp_disable(); 866 868 out:
+8 -8
arch/x86/kernel/cpu/microcode/amd.c
··· 320 320 } 321 321 322 322 static enum ucode_state 323 - load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size); 323 + load_microcode_amd(bool save, u8 family, const u8 *data, size_t size); 324 324 325 325 int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) 326 326 { ··· 338 338 if (!desc.mc) 339 339 return -EINVAL; 340 340 341 - ret = load_microcode_amd(smp_processor_id(), x86_family(cpuid_1_eax), 342 - desc.data, desc.size); 341 + ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size); 343 342 if (ret != UCODE_OK) 344 343 return -EINVAL; 345 344 ··· 674 675 } 675 676 676 677 static enum ucode_state 677 - load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size) 678 + load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) 678 679 { 679 680 enum ucode_state ret; 680 681 ··· 688 689 689 690 #ifdef CONFIG_X86_32 690 691 /* save BSP's matching patch for early load */ 691 - if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) { 692 - struct ucode_patch *p = find_patch(cpu); 692 + if (save) { 693 + struct ucode_patch *p = find_patch(0); 693 694 if (p) { 694 695 memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); 695 696 memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), ··· 721 722 { 722 723 char fw_name[36] = "amd-ucode/microcode_amd.bin"; 723 724 struct cpuinfo_x86 *c = &cpu_data(cpu); 725 + bool bsp = c->cpu_index == boot_cpu_data.cpu_index; 724 726 enum ucode_state ret = UCODE_NFOUND; 725 727 const struct firmware *fw; 726 728 727 729 /* reload ucode container only on the boot cpu */ 728 - if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index) 730 + if (!refresh_fw || !bsp) 729 731 return UCODE_OK; 730 732 731 733 if (c->x86 >= 0x15) ··· 743 743 goto fw_release; 744 744 } 745 745 746 - ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size); 746 + ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size); 747 747 748 748 fw_release: 749 749 release_firmware(fw);
+3
arch/x86/kernel/cpu/microcode/intel.c
··· 619 619 620 620 show_saved_mc(); 621 621 622 + /* initrd is going away, clear patch ptr. */ 623 + intel_ucode_patch = NULL; 624 + 622 625 return 0; 623 626 } 624 627
+1 -1
arch/x86/kernel/kvm.c
··· 161 161 */ 162 162 rcu_irq_exit(); 163 163 native_safe_halt(); 164 - rcu_irq_enter(); 165 164 local_irq_disable(); 165 + rcu_irq_enter(); 166 166 } 167 167 } 168 168 if (!n.halted)
-11
arch/x86/kernel/process.c
··· 545 545 } 546 546 547 547 /* 548 - * Return saved PC of a blocked thread. 549 - * What is this good for? it will be always the scheduler or ret_from_fork. 550 - */ 551 - unsigned long thread_saved_pc(struct task_struct *tsk) 552 - { 553 - struct inactive_task_frame *frame = 554 - (struct inactive_task_frame *) READ_ONCE(tsk->thread.sp); 555 - return READ_ONCE_NOCHECK(frame->ret_addr); 556 - } 557 - 558 - /* 559 548 * Called from fs/proc with a reference on @p to find the function 560 549 * which called into schedule(). This needs to be done carefully 561 550 * because the task might wake up and we might look at a stack
+1 -1
arch/x86/kernel/process_32.c
··· 78 78 79 79 printk(KERN_DEFAULT "EIP: %pS\n", (void *)regs->ip); 80 80 printk(KERN_DEFAULT "EFLAGS: %08lx CPU: %d\n", regs->flags, 81 - smp_processor_id()); 81 + raw_smp_processor_id()); 82 82 83 83 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", 84 84 regs->ax, regs->bx, regs->cx, regs->dx);
+2 -2
arch/x86/kernel/sys_x86_64.c
··· 144 144 addr = PAGE_ALIGN(addr); 145 145 vma = find_vma(mm, addr); 146 146 if (end - len >= addr && 147 - (!vma || addr + len <= vma->vm_start)) 147 + (!vma || addr + len <= vm_start_gap(vma))) 148 148 return addr; 149 149 } 150 150 ··· 187 187 addr = PAGE_ALIGN(addr); 188 188 vma = find_vma(mm, addr); 189 189 if (TASK_SIZE - len >= addr && 190 - (!vma || addr + len <= vma->vm_start)) 190 + (!vma || addr + len <= vm_start_gap(vma))) 191 191 return addr; 192 192 } 193 193
+1 -1
arch/x86/kernel/tboot.c
··· 514 514 if (!tboot_enabled()) 515 515 return 0; 516 516 517 - if (!intel_iommu_tboot_noforce) 517 + if (intel_iommu_tboot_noforce) 518 518 return 1; 519 519 520 520 if (no_iommu || swiotlb || dmar_disabled)
+1 -1
arch/x86/kernel/traps.c
··· 182 182 return ud == INSN_UD0 || ud == INSN_UD2; 183 183 } 184 184 185 - static int fixup_bug(struct pt_regs *regs, int trapnr) 185 + int fixup_bug(struct pt_regs *regs, int trapnr) 186 186 { 187 187 if (trapnr != X86_TRAP_UD) 188 188 return 0;
+11 -9
arch/x86/kvm/cpuid.c
··· 780 780 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i) 781 781 { 782 782 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i]; 783 - int j, nent = vcpu->arch.cpuid_nent; 783 + struct kvm_cpuid_entry2 *ej; 784 + int j = i; 785 + int nent = vcpu->arch.cpuid_nent; 784 786 785 787 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT; 786 788 /* when no next entry is found, the current entry[i] is reselected */ 787 - for (j = i + 1; ; j = (j + 1) % nent) { 788 - struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j]; 789 - if (ej->function == e->function) { 790 - ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; 791 - return j; 792 - } 793 - } 794 - return 0; /* silence gcc, even though control never reaches here */ 789 + do { 790 + j = (j + 1) % nent; 791 + ej = &vcpu->arch.cpuid_entries[j]; 792 + } while (ej->function != e->function); 793 + 794 + ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; 795 + 796 + return j; 795 797 } 796 798 797 799 /* find an entry with matching function, matching index (if needed), and that
+1
arch/x86/kvm/emulate.c
··· 2742 2742 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); 2743 2743 } 2744 2744 2745 + ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; 2745 2746 return X86EMUL_CONTINUE; 2746 2747 } 2747 2748
+4 -1
arch/x86/kvm/lapic.c
··· 1495 1495 1496 1496 static void cancel_hv_timer(struct kvm_lapic *apic) 1497 1497 { 1498 + preempt_disable(); 1498 1499 kvm_x86_ops->cancel_hv_timer(apic->vcpu); 1499 1500 apic->lapic_timer.hv_timer_in_use = false; 1501 + preempt_enable(); 1500 1502 } 1501 1503 1502 1504 static bool start_hv_timer(struct kvm_lapic *apic) ··· 1936 1934 for (i = 0; i < KVM_APIC_LVT_NUM; i++) 1937 1935 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); 1938 1936 apic_update_lvtt(apic); 1939 - if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED)) 1937 + if (kvm_vcpu_is_reset_bsp(vcpu) && 1938 + kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED)) 1940 1939 kvm_lapic_set_reg(apic, APIC_LVT0, 1941 1940 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); 1942 1941 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
+5 -2
arch/x86/kvm/mmu.c
··· 3698 3698 return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); 3699 3699 } 3700 3700 3701 - static bool can_do_async_pf(struct kvm_vcpu *vcpu) 3701 + bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) 3702 3702 { 3703 3703 if (unlikely(!lapic_in_kernel(vcpu) || 3704 3704 kvm_event_needs_reinjection(vcpu))) 3705 + return false; 3706 + 3707 + if (is_guest_mode(vcpu)) 3705 3708 return false; 3706 3709 3707 3710 return kvm_x86_ops->interrupt_allowed(vcpu); ··· 3722 3719 if (!async) 3723 3720 return false; /* *pfn has correct page already */ 3724 3721 3725 - if (!prefault && can_do_async_pf(vcpu)) { 3722 + if (!prefault && kvm_can_do_async_pf(vcpu)) { 3726 3723 trace_kvm_try_async_get_page(gva, gfn); 3727 3724 if (kvm_find_async_pf_gfn(vcpu, gfn)) { 3728 3725 trace_kvm_async_pf_doublefault(gva, gfn);
+1
arch/x86/kvm/mmu.h
··· 76 76 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu); 77 77 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, 78 78 bool accessed_dirty); 79 + bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu); 79 80 80 81 static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) 81 82 {
+12 -14
arch/x86/kvm/svm.c
··· 1807 1807 * AMD's VMCB does not have an explicit unusable field, so emulate it 1808 1808 * for cross vendor migration purposes by "not present" 1809 1809 */ 1810 - var->unusable = !var->present || (var->type == 0); 1810 + var->unusable = !var->present; 1811 1811 1812 1812 switch (seg) { 1813 1813 case VCPU_SREG_TR: ··· 1840 1840 */ 1841 1841 if (var->unusable) 1842 1842 var->db = 0; 1843 + /* This is symmetric with svm_set_segment() */ 1843 1844 var->dpl = to_svm(vcpu)->vmcb->save.cpl; 1844 1845 break; 1845 1846 } ··· 1981 1980 s->base = var->base; 1982 1981 s->limit = var->limit; 1983 1982 s->selector = var->selector; 1984 - if (var->unusable) 1985 - s->attrib = 0; 1986 - else { 1987 - s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); 1988 - s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; 1989 - s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; 1990 - s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT; 1991 - s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; 1992 - s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; 1993 - s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; 1994 - s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; 1995 - } 1983 + s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); 1984 + s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; 1985 + s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; 1986 + s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT; 1987 + s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; 1988 + s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; 1989 + s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; 1990 + s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; 1996 1991 1997 1992 /* 1998 1993 * This is always accurate, except if SYSRET returned to a segment ··· 1997 2000 * would entail passing the CPL to userspace and back. 1998 2001 */ 1999 2002 if (seg == VCPU_SREG_SS) 2000 - svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; 2003 + /* This is symmetric with svm_get_segment() */ 2004 + svm->vmcb->save.cpl = (var->dpl & 3); 2001 2005 2002 2006 mark_dirty(svm->vmcb, VMCB_SEG); 2003 2007 }
+63 -86
arch/x86/kvm/vmx.c
··· 2425 2425 if (!(vmcs12->exception_bitmap & (1u << nr))) 2426 2426 return 0; 2427 2427 2428 - nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, 2428 + nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, 2429 2429 vmcs_read32(VM_EXIT_INTR_INFO), 2430 2430 vmcs_readl(EXIT_QUALIFICATION)); 2431 2431 return 1; ··· 6914 6914 return 0; 6915 6915 } 6916 6916 6917 - /* 6918 - * This function performs the various checks including 6919 - * - if it's 4KB aligned 6920 - * - No bits beyond the physical address width are set 6921 - * - Returns 0 on success or else 1 6922 - * (Intel SDM Section 30.3) 6923 - */ 6924 - static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, 6925 - gpa_t *vmpointer) 6917 + static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer) 6926 6918 { 6927 6919 gva_t gva; 6928 - gpa_t vmptr; 6929 6920 struct x86_exception e; 6930 - struct page *page; 6931 - struct vcpu_vmx *vmx = to_vmx(vcpu); 6932 - int maxphyaddr = cpuid_maxphyaddr(vcpu); 6933 6921 6934 6922 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), 6935 6923 vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) 6936 6924 return 1; 6937 6925 6938 - if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, 6939 - sizeof(vmptr), &e)) { 6926 + if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer, 6927 + sizeof(*vmpointer), &e)) { 6940 6928 kvm_inject_page_fault(vcpu, &e); 6941 6929 return 1; 6942 6930 } 6943 6931 6944 - switch (exit_reason) { 6945 - case EXIT_REASON_VMON: 6946 - /* 6947 - * SDM 3: 24.11.5 6948 - * The first 4 bytes of VMXON region contain the supported 6949 - * VMCS revision identifier 6950 - * 6951 - * Note - IA32_VMX_BASIC[48] will never be 1 6952 - * for the nested case; 6953 - * which replaces physical address width with 32 6954 - * 6955 - */ 6956 - if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { 6957 - nested_vmx_failInvalid(vcpu); 6958 - return kvm_skip_emulated_instruction(vcpu); 6959 - } 6960 - 6961 - page = nested_get_page(vcpu, vmptr); 6962 - if (page == NULL) { 6963 - nested_vmx_failInvalid(vcpu); 6964 - return kvm_skip_emulated_instruction(vcpu); 6965 - } 6966 - if (*(u32 *)kmap(page) != VMCS12_REVISION) { 6967 - kunmap(page); 6968 - nested_release_page_clean(page); 6969 - nested_vmx_failInvalid(vcpu); 6970 - return kvm_skip_emulated_instruction(vcpu); 6971 - } 6972 - kunmap(page); 6973 - nested_release_page_clean(page); 6974 - vmx->nested.vmxon_ptr = vmptr; 6975 - break; 6976 - case EXIT_REASON_VMCLEAR: 6977 - if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { 6978 - nested_vmx_failValid(vcpu, 6979 - VMXERR_VMCLEAR_INVALID_ADDRESS); 6980 - return kvm_skip_emulated_instruction(vcpu); 6981 - } 6982 - 6983 - if (vmptr == vmx->nested.vmxon_ptr) { 6984 - nested_vmx_failValid(vcpu, 6985 - VMXERR_VMCLEAR_VMXON_POINTER); 6986 - return kvm_skip_emulated_instruction(vcpu); 6987 - } 6988 - break; 6989 - case EXIT_REASON_VMPTRLD: 6990 - if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { 6991 - nested_vmx_failValid(vcpu, 6992 - VMXERR_VMPTRLD_INVALID_ADDRESS); 6993 - return kvm_skip_emulated_instruction(vcpu); 6994 - } 6995 - 6996 - if (vmptr == vmx->nested.vmxon_ptr) { 6997 - nested_vmx_failValid(vcpu, 6998 - VMXERR_VMPTRLD_VMXON_POINTER); 6999 - return kvm_skip_emulated_instruction(vcpu); 7000 - } 7001 - break; 7002 - default: 7003 - return 1; /* shouldn't happen */ 7004 - } 7005 - 7006 - if (vmpointer) 7007 - *vmpointer = vmptr; 7008 6932 return 0; 7009 6933 } 7010 6934 ··· 6990 7066 static int handle_vmon(struct kvm_vcpu *vcpu) 6991 7067 { 6992 7068 int ret; 7069 + gpa_t vmptr; 7070 + struct page *page; 6993 7071 struct vcpu_vmx *vmx = to_vmx(vcpu); 6994 7072 const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED 6995 7073 | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; ··· 7021 7095 return 1; 7022 7096 } 7023 7097 7024 - if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL)) 7098 + if (nested_vmx_get_vmptr(vcpu, &vmptr)) 7025 7099 return 1; 7026 - 7100 + 7101 + /* 7102 + * SDM 3: 24.11.5 7103 + * The first 4 bytes of VMXON region contain the supported 7104 + * VMCS revision identifier 7105 + * 7106 + * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; 7107 + * which replaces physical address width with 32 7108 + */ 7109 + if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { 7110 + nested_vmx_failInvalid(vcpu); 7111 + return kvm_skip_emulated_instruction(vcpu); 7112 + } 7113 + 7114 + page = nested_get_page(vcpu, vmptr); 7115 + if (page == NULL) { 7116 + nested_vmx_failInvalid(vcpu); 7117 + return kvm_skip_emulated_instruction(vcpu); 7118 + } 7119 + if (*(u32 *)kmap(page) != VMCS12_REVISION) { 7120 + kunmap(page); 7121 + nested_release_page_clean(page); 7122 + nested_vmx_failInvalid(vcpu); 7123 + return kvm_skip_emulated_instruction(vcpu); 7124 + } 7125 + kunmap(page); 7126 + nested_release_page_clean(page); 7127 + 7128 + vmx->nested.vmxon_ptr = vmptr; 7027 7129 ret = enter_vmx_operation(vcpu); 7028 7130 if (ret) 7029 7131 return ret; ··· 7167 7213 if (!nested_vmx_check_permission(vcpu)) 7168 7214 return 1; 7169 7215 7170 - if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr)) 7216 + if (nested_vmx_get_vmptr(vcpu, &vmptr)) 7171 7217 return 1; 7218 + 7219 + if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { 7220 + nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS); 7221 + return kvm_skip_emulated_instruction(vcpu); 7222 + } 7223 + 7224 + if (vmptr == vmx->nested.vmxon_ptr) { 7225 + nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); 7226 + return kvm_skip_emulated_instruction(vcpu); 7227 + } 7172 7228 7173 7229 if (vmptr == vmx->nested.current_vmptr) 7174 7230 nested_release_vmcs12(vmx); ··· 7509 7545 if (!nested_vmx_check_permission(vcpu)) 7510 7546 return 1; 7511 7547 7512 - if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr)) 7548 + if (nested_vmx_get_vmptr(vcpu, &vmptr)) 7513 7549 return 1; 7550 + 7551 + if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { 7552 + nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS); 7553 + return kvm_skip_emulated_instruction(vcpu); 7554 + } 7555 + 7556 + if (vmptr == vmx->nested.vmxon_ptr) { 7557 + nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER); 7558 + return kvm_skip_emulated_instruction(vcpu); 7559 + } 7514 7560 7515 7561 if (vmx->nested.current_vmptr != vmptr) { 7516 7562 struct vmcs12 *new_vmcs12; ··· 7887 7913 { 7888 7914 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 7889 7915 int cr = exit_qualification & 15; 7890 - int reg = (exit_qualification >> 8) & 15; 7891 - unsigned long val = kvm_register_readl(vcpu, reg); 7916 + int reg; 7917 + unsigned long val; 7892 7918 7893 7919 switch ((exit_qualification >> 4) & 3) { 7894 7920 case 0: /* mov to cr */ 7921 + reg = (exit_qualification >> 8) & 15; 7922 + val = kvm_register_readl(vcpu, reg); 7895 7923 switch (cr) { 7896 7924 case 0: 7897 7925 if (vmcs12->cr0_guest_host_mask & ··· 7948 7972 * lmsw can change bits 1..3 of cr0, and only set bit 0 of 7949 7973 * cr0. Other attempted changes are ignored, with no exit. 7950 7974 */ 7975 + val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; 7951 7976 if (vmcs12->cr0_guest_host_mask & 0xe & 7952 7977 (val ^ vmcs12->cr0_read_shadow)) 7953 7978 return true;
+38 -34
arch/x86/kvm/x86.c
··· 5313 5313 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 5314 5314 5315 5315 ctxt->eflags = kvm_get_rflags(vcpu); 5316 + ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; 5317 + 5316 5318 ctxt->eip = kvm_rip_read(vcpu); 5317 5319 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : 5318 5320 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : ··· 5530 5528 return dr6; 5531 5529 } 5532 5530 5533 - static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r) 5531 + static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r) 5534 5532 { 5535 5533 struct kvm_run *kvm_run = vcpu->run; 5536 5534 5537 - /* 5538 - * rflags is the old, "raw" value of the flags. The new value has 5539 - * not been saved yet. 5540 - * 5541 - * This is correct even for TF set by the guest, because "the 5542 - * processor will not generate this exception after the instruction 5543 - * that sets the TF flag". 5544 - */ 5545 - if (unlikely(rflags & X86_EFLAGS_TF)) { 5546 - if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { 5547 - kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | 5548 - DR6_RTM; 5549 - kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; 5550 - kvm_run->debug.arch.exception = DB_VECTOR; 5551 - kvm_run->exit_reason = KVM_EXIT_DEBUG; 5552 - *r = EMULATE_USER_EXIT; 5553 - } else { 5554 - /* 5555 - * "Certain debug exceptions may clear bit 0-3. The 5556 - * remaining contents of the DR6 register are never 5557 - * cleared by the processor". 5558 - */ 5559 - vcpu->arch.dr6 &= ~15; 5560 - vcpu->arch.dr6 |= DR6_BS | DR6_RTM; 5561 - kvm_queue_exception(vcpu, DB_VECTOR); 5562 - } 5535 + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { 5536 + kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM; 5537 + kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; 5538 + kvm_run->debug.arch.exception = DB_VECTOR; 5539 + kvm_run->exit_reason = KVM_EXIT_DEBUG; 5540 + *r = EMULATE_USER_EXIT; 5541 + } else { 5542 + /* 5543 + * "Certain debug exceptions may clear bit 0-3. The 5544 + * remaining contents of the DR6 register are never 5545 + * cleared by the processor". 5546 + */ 5547 + vcpu->arch.dr6 &= ~15; 5548 + vcpu->arch.dr6 |= DR6_BS | DR6_RTM; 5549 + kvm_queue_exception(vcpu, DB_VECTOR); 5563 5550 } 5564 5551 } 5565 5552 ··· 5558 5567 int r = EMULATE_DONE; 5559 5568 5560 5569 kvm_x86_ops->skip_emulated_instruction(vcpu); 5561 - kvm_vcpu_check_singlestep(vcpu, rflags, &r); 5570 + 5571 + /* 5572 + * rflags is the old, "raw" value of the flags. The new value has 5573 + * not been saved yet. 5574 + * 5575 + * This is correct even for TF set by the guest, because "the 5576 + * processor will not generate this exception after the instruction 5577 + * that sets the TF flag". 5578 + */ 5579 + if (unlikely(rflags & X86_EFLAGS_TF)) 5580 + kvm_vcpu_do_singlestep(vcpu, &r); 5562 5581 return r == EMULATE_DONE; 5563 5582 } 5564 5583 EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); ··· 5727 5726 toggle_interruptibility(vcpu, ctxt->interruptibility); 5728 5727 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 5729 5728 kvm_rip_write(vcpu, ctxt->eip); 5730 - if (r == EMULATE_DONE) 5731 - kvm_vcpu_check_singlestep(vcpu, rflags, &r); 5729 + if (r == EMULATE_DONE && 5730 + (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) 5731 + kvm_vcpu_do_singlestep(vcpu, &r); 5732 5732 if (!ctxt->have_exception || 5733 5733 exception_type(ctxt->exception.vector) == EXCPT_TRAP) 5734 5734 __kvm_set_rflags(vcpu, ctxt->eflags); ··· 8396 8394 if (vcpu->arch.pv.pv_unhalted) 8397 8395 return true; 8398 8396 8399 - if (atomic_read(&vcpu->arch.nmi_queued)) 8397 + if (kvm_test_request(KVM_REQ_NMI, vcpu) || 8398 + (vcpu->arch.nmi_pending && 8399 + kvm_x86_ops->nmi_allowed(vcpu))) 8400 8400 return true; 8401 8401 8402 - if (kvm_test_request(KVM_REQ_SMI, vcpu)) 8402 + if (kvm_test_request(KVM_REQ_SMI, vcpu) || 8403 + (vcpu->arch.smi_pending && !is_smm(vcpu))) 8403 8404 return true; 8404 8405 8405 8406 if (kvm_arch_interrupt_allowed(vcpu) && ··· 8609 8604 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) 8610 8605 return true; 8611 8606 else 8612 - return !kvm_event_needs_reinjection(vcpu) && 8613 - kvm_x86_ops->interrupt_allowed(vcpu); 8607 + return kvm_can_do_async_pf(vcpu); 8614 8608 } 8615 8609 8616 8610 void kvm_arch_start_assignment(struct kvm *kvm)
+3
arch/x86/mm/extable.c
··· 162 162 if (fixup_exception(regs, trapnr)) 163 163 return; 164 164 165 + if (fixup_bug(regs, trapnr)) 166 + return; 167 + 165 168 fail: 166 169 early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n", 167 170 (unsigned)trapnr, (unsigned long)regs->cs, regs->ip,
+1 -1
arch/x86/mm/hugetlbpage.c
··· 148 148 addr = ALIGN(addr, huge_page_size(h)); 149 149 vma = find_vma(mm, addr); 150 150 if (TASK_SIZE - len >= addr && 151 - (!vma || addr + len <= vma->vm_start)) 151 + (!vma || addr + len <= vm_start_gap(vma))) 152 152 return addr; 153 153 } 154 154 if (mm->get_unmapped_area == arch_get_unmapped_area)
+3 -3
arch/x86/mm/init.c
··· 161 161 162 162 static void __init probe_page_size_mask(void) 163 163 { 164 - #if !defined(CONFIG_KMEMCHECK) 165 164 /* 166 165 * For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will 167 166 * use small pages. 168 167 * This will simplify cpa(), which otherwise needs to support splitting 169 168 * large pages into small in interrupt context, etc. 170 169 */ 171 - if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled()) 170 + if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled() && !IS_ENABLED(CONFIG_KMEMCHECK)) 172 171 page_size_mask |= 1 << PG_LEVEL_2M; 173 - #endif 172 + else 173 + direct_gbpages = 0; 174 174 175 175 /* Enable PSE if available */ 176 176 if (boot_cpu_has(X86_FEATURE_PSE))
+7 -1
arch/x86/mm/init_64.c
··· 990 990 991 991 pud_base = pud_offset(p4d, 0); 992 992 remove_pud_table(pud_base, addr, next, direct); 993 - free_pud_table(pud_base, p4d); 993 + /* 994 + * For 4-level page tables we do not want to free PUDs, but in the 995 + * 5-level case we should free them. This code will have to change 996 + * to adapt for boot-time switching between 4 and 5 level page tables. 997 + */ 998 + if (CONFIG_PGTABLE_LEVELS == 5) 999 + free_pud_table(pud_base, p4d); 994 1000 } 995 1001 996 1002 if (direct)
+3 -6
arch/x86/mm/pat.c
··· 65 65 } 66 66 early_param("nopat", nopat); 67 67 68 - static bool __read_mostly __pat_initialized = false; 69 - 70 68 bool pat_enabled(void) 71 69 { 72 - return __pat_initialized; 70 + return !!__pat_enabled; 73 71 } 74 72 EXPORT_SYMBOL_GPL(pat_enabled); 75 73 ··· 225 227 } 226 228 227 229 wrmsrl(MSR_IA32_CR_PAT, pat); 228 - __pat_initialized = true; 229 230 230 231 __init_cache_modes(pat); 231 232 } 232 233 233 234 static void pat_ap_init(u64 pat) 234 235 { 235 - if (!this_cpu_has(X86_FEATURE_PAT)) { 236 + if (!boot_cpu_has(X86_FEATURE_PAT)) { 236 237 /* 237 238 * If this happens we are on a secondary CPU, but switched to 238 239 * PAT on the boot CPU. We have no way to undo PAT. ··· 306 309 u64 pat; 307 310 struct cpuinfo_x86 *c = &boot_cpu_data; 308 311 309 - if (!__pat_enabled) { 312 + if (!pat_enabled()) { 310 313 init_cache_modes(); 311 314 return; 312 315 }
+4 -2
arch/x86/platform/efi/efi.c
··· 828 828 829 829 /* 830 830 * We don't do virtual mode, since we don't do runtime services, on 831 - * non-native EFI 831 + * non-native EFI. With efi=old_map, we don't do runtime services in 832 + * kexec kernel because in the initial boot something else might 833 + * have been mapped at these virtual addresses. 832 834 */ 833 - if (!efi_is_native()) { 835 + if (!efi_is_native() || efi_enabled(EFI_OLD_MEMMAP)) { 834 836 efi_memmap_unmap(); 835 837 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); 836 838 return;
+71 -8
arch/x86/platform/efi/efi_64.c
··· 71 71 72 72 pgd_t * __init efi_call_phys_prolog(void) 73 73 { 74 - unsigned long vaddress; 75 - pgd_t *save_pgd; 74 + unsigned long vaddr, addr_pgd, addr_p4d, addr_pud; 75 + pgd_t *save_pgd, *pgd_k, *pgd_efi; 76 + p4d_t *p4d, *p4d_k, *p4d_efi; 77 + pud_t *pud; 76 78 77 79 int pgd; 78 - int n_pgds; 80 + int n_pgds, i, j; 79 81 80 82 if (!efi_enabled(EFI_OLD_MEMMAP)) { 81 83 save_pgd = (pgd_t *)read_cr3(); ··· 90 88 n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); 91 89 save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL); 92 90 91 + /* 92 + * Build 1:1 identity mapping for efi=old_map usage. Note that 93 + * PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while 94 + * it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical 95 + * address X, the pud_index(X) != pud_index(__va(X)), we can only copy 96 + * PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping. 97 + * This means here we can only reuse the PMD tables of the direct mapping. 98 + */ 93 99 for (pgd = 0; pgd < n_pgds; pgd++) { 94 - save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE); 95 - vaddress = (unsigned long)__va(pgd * PGDIR_SIZE); 96 - set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress)); 100 + addr_pgd = (unsigned long)(pgd * PGDIR_SIZE); 101 + vaddr = (unsigned long)__va(pgd * PGDIR_SIZE); 102 + pgd_efi = pgd_offset_k(addr_pgd); 103 + save_pgd[pgd] = *pgd_efi; 104 + 105 + p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd); 106 + if (!p4d) { 107 + pr_err("Failed to allocate p4d table!\n"); 108 + goto out; 109 + } 110 + 111 + for (i = 0; i < PTRS_PER_P4D; i++) { 112 + addr_p4d = addr_pgd + i * P4D_SIZE; 113 + p4d_efi = p4d + p4d_index(addr_p4d); 114 + 115 + pud = pud_alloc(&init_mm, p4d_efi, addr_p4d); 116 + if (!pud) { 117 + pr_err("Failed to allocate pud table!\n"); 118 + goto out; 119 + } 120 + 121 + for (j = 0; j < PTRS_PER_PUD; j++) { 122 + addr_pud = addr_p4d + j * PUD_SIZE; 123 + 124 + if (addr_pud > (max_pfn << PAGE_SHIFT)) 125 + break; 126 + 127 + vaddr = (unsigned long)__va(addr_pud); 128 + 129 + pgd_k = pgd_offset_k(vaddr); 130 + p4d_k = p4d_offset(pgd_k, vaddr); 131 + pud[j] = *pud_offset(p4d_k, vaddr); 132 + } 133 + } 97 134 } 98 135 out: 99 136 __flush_tlb_all(); ··· 145 104 /* 146 105 * After the lock is released, the original page table is restored. 147 106 */ 148 - int pgd_idx; 107 + int pgd_idx, i; 149 108 int nr_pgds; 109 + pgd_t *pgd; 110 + p4d_t *p4d; 111 + pud_t *pud; 150 112 151 113 if (!efi_enabled(EFI_OLD_MEMMAP)) { 152 114 write_cr3((unsigned long)save_pgd); ··· 159 115 160 116 nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); 161 117 162 - for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) 118 + for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) { 119 + pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE); 163 120 set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]); 121 + 122 + if (!(pgd_val(*pgd) & _PAGE_PRESENT)) 123 + continue; 124 + 125 + for (i = 0; i < PTRS_PER_P4D; i++) { 126 + p4d = p4d_offset(pgd, 127 + pgd_idx * PGDIR_SIZE + i * P4D_SIZE); 128 + 129 + if (!(p4d_val(*p4d) & _PAGE_PRESENT)) 130 + continue; 131 + 132 + pud = (pud_t *)p4d_page_vaddr(*p4d); 133 + pud_free(&init_mm, pud); 134 + } 135 + 136 + p4d = (p4d_t *)pgd_page_vaddr(*pgd); 137 + p4d_free(&init_mm, p4d); 138 + } 164 139 165 140 kfree(save_pgd); 166 141
+3
arch/x86/platform/efi/quirks.c
··· 360 360 free_bootmem_late(start, size); 361 361 } 362 362 363 + if (!num_entries) 364 + return; 365 + 363 366 new_size = efi.memmap.desc_size * num_entries; 364 367 new_phys = efi_memmap_alloc(num_entries); 365 368 if (!new_phys) {
+2 -1
arch/xtensa/include/asm/irq.h
··· 29 29 # define PLATFORM_NR_IRQS 0 30 30 #endif 31 31 #define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS 32 - #define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS) 32 + #define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1) 33 + #define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1) 33 34 34 35 #if VARIANT_NR_IRQS == 0 35 36 static inline void variant_init_irq(void) { }
-2
arch/xtensa/include/asm/processor.h
··· 213 213 #define release_segments(mm) do { } while(0) 214 214 #define forget_segments() do { } while (0) 215 215 216 - #define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc) 217 - 218 216 extern unsigned long get_wchan(struct task_struct *p); 219 217 220 218 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
-5
arch/xtensa/kernel/irq.c
··· 34 34 { 35 35 int irq = irq_find_mapping(NULL, hwirq); 36 36 37 - if (hwirq >= NR_IRQS) { 38 - printk(KERN_EMERG "%s: cannot handle IRQ %d\n", 39 - __func__, hwirq); 40 - } 41 - 42 37 #ifdef CONFIG_DEBUG_STACKOVERFLOW 43 38 /* Debugging check for stack overflow: is there less than 1KB free? */ 44 39 {
+1 -2
arch/xtensa/kernel/setup.c
··· 593 593 (ccount_freq/10000) % 100, 594 594 loops_per_jiffy/(500000/HZ), 595 595 (loops_per_jiffy/(5000/HZ)) % 100); 596 - 597 - seq_printf(f,"flags\t\t: " 596 + seq_puts(f, "flags\t\t: " 598 597 #if XCHAL_HAVE_NMI 599 598 "nmi " 600 599 #endif
+1 -1
arch/xtensa/kernel/syscall.c
··· 88 88 /* At this point: (!vmm || addr < vmm->vm_end). */ 89 89 if (TASK_SIZE - len < addr) 90 90 return -ENOMEM; 91 - if (!vmm || addr + len <= vmm->vm_start) 91 + if (!vmm || addr + len <= vm_start_gap(vmm)) 92 92 return addr; 93 93 addr = vmm->vm_end; 94 94 if (flags & MAP_SHARED)
+3 -3
arch/xtensa/kernel/vmlinux.lds.S
··· 118 118 SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR) 119 119 SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4) 120 120 SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR) 121 - SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 48) 121 + SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 20) 122 122 SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR) 123 123 #endif 124 124 ··· 306 306 .UserExceptionVector.literal) 307 307 SECTION_VECTOR (_DoubleExceptionVector_literal, 308 308 .DoubleExceptionVector.literal, 309 - DOUBLEEXC_VECTOR_VADDR - 48, 309 + DOUBLEEXC_VECTOR_VADDR - 20, 310 310 SIZEOF(.UserExceptionVector.text), 311 311 .UserExceptionVector.text) 312 312 SECTION_VECTOR (_DoubleExceptionVector_text, 313 313 .DoubleExceptionVector.text, 314 314 DOUBLEEXC_VECTOR_VADDR, 315 - 48, 315 + 20, 316 316 .DoubleExceptionVector.literal) 317 317 318 318 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
+1 -2
arch/xtensa/platforms/iss/simdisk.c
··· 317 317 if (simdisk_count > MAX_SIMDISK_COUNT) 318 318 simdisk_count = MAX_SIMDISK_COUNT; 319 319 320 - sddev = kmalloc(simdisk_count * sizeof(struct simdisk), 321 - GFP_KERNEL); 320 + sddev = kmalloc_array(simdisk_count, sizeof(*sddev), GFP_KERNEL); 322 321 if (sddev == NULL) 323 322 goto out_unregister; 324 323
+4 -2
arch/xtensa/platforms/xtfpga/include/platform/hardware.h
··· 24 24 25 25 /* Interrupt configuration. */ 26 26 27 - #define PLATFORM_NR_IRQS 10 27 + #define PLATFORM_NR_IRQS 0 28 28 29 29 /* Default assignment of LX60 devices to external interrupts. */ 30 30 31 31 #ifdef CONFIG_XTENSA_MX 32 32 #define DUART16552_INTNUM XCHAL_EXTINT3_NUM 33 33 #define OETH_IRQ XCHAL_EXTINT4_NUM 34 + #define C67X00_IRQ XCHAL_EXTINT8_NUM 34 35 #else 35 36 #define DUART16552_INTNUM XCHAL_EXTINT0_NUM 36 37 #define OETH_IRQ XCHAL_EXTINT1_NUM 38 + #define C67X00_IRQ XCHAL_EXTINT5_NUM 37 39 #endif 38 40 39 41 /* ··· 65 63 66 64 #define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000) 67 65 #define C67X00_SIZE 0x10 68 - #define C67X00_IRQ 5 66 + 69 67 #endif /* __XTENSA_XTAVNET_HARDWARE_H */
+5 -5
arch/xtensa/platforms/xtfpga/setup.c
··· 175 175 .flags = IORESOURCE_MEM, 176 176 }, 177 177 [2] = { /* IRQ number */ 178 - .start = OETH_IRQ, 179 - .end = OETH_IRQ, 178 + .start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ), 179 + .end = XTENSA_PIC_LINUX_IRQ(OETH_IRQ), 180 180 .flags = IORESOURCE_IRQ, 181 181 }, 182 182 }; ··· 213 213 .flags = IORESOURCE_MEM, 214 214 }, 215 215 [1] = { /* IRQ number */ 216 - .start = C67X00_IRQ, 217 - .end = C67X00_IRQ, 216 + .start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ), 217 + .end = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ), 218 218 .flags = IORESOURCE_IRQ, 219 219 }, 220 220 }; ··· 247 247 static struct plat_serial8250_port serial_platform_data[] = { 248 248 [0] = { 249 249 .mapbase = DUART16552_PADDR, 250 - .irq = DUART16552_INTNUM, 250 + .irq = XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM), 251 251 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | 252 252 UPF_IOREMAP, 253 253 .iotype = XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32,
+93 -23
block/bfq-cgroup.c
··· 52 52 BFQG_FLAG_FNS(empty) 53 53 #undef BFQG_FLAG_FNS 54 54 55 - /* This should be called with the queue_lock held. */ 55 + /* This should be called with the scheduler lock held. */ 56 56 static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) 57 57 { 58 58 unsigned long long now; ··· 67 67 bfqg_stats_clear_waiting(stats); 68 68 } 69 69 70 - /* This should be called with the queue_lock held. */ 70 + /* This should be called with the scheduler lock held. */ 71 71 static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, 72 72 struct bfq_group *curr_bfqg) 73 73 { ··· 81 81 bfqg_stats_mark_waiting(stats); 82 82 } 83 83 84 - /* This should be called with the queue_lock held. */ 84 + /* This should be called with the scheduler lock held. */ 85 85 static void bfqg_stats_end_empty_time(struct bfqg_stats *stats) 86 86 { 87 87 unsigned long long now; ··· 203 203 204 204 static void bfqg_get(struct bfq_group *bfqg) 205 205 { 206 - return blkg_get(bfqg_to_blkg(bfqg)); 206 + bfqg->ref++; 207 207 } 208 208 209 209 void bfqg_put(struct bfq_group *bfqg) 210 210 { 211 - return blkg_put(bfqg_to_blkg(bfqg)); 211 + bfqg->ref--; 212 + 213 + if (bfqg->ref == 0) 214 + kfree(bfqg); 215 + } 216 + 217 + static void bfqg_and_blkg_get(struct bfq_group *bfqg) 218 + { 219 + /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */ 220 + bfqg_get(bfqg); 221 + 222 + blkg_get(bfqg_to_blkg(bfqg)); 223 + } 224 + 225 + void bfqg_and_blkg_put(struct bfq_group *bfqg) 226 + { 227 + bfqg_put(bfqg); 228 + 229 + blkg_put(bfqg_to_blkg(bfqg)); 212 230 } 213 231 214 232 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, ··· 330 312 if (bfqq) { 331 313 bfqq->ioprio = bfqq->new_ioprio; 332 314 bfqq->ioprio_class = bfqq->new_ioprio_class; 333 - bfqg_get(bfqg); 315 + /* 316 + * Make sure that bfqg and its associated blkg do not 317 + * disappear before entity. 318 + */ 319 + bfqg_and_blkg_get(bfqg); 334 320 } 335 321 entity->parent = bfqg->my_entity; /* NULL for root group */ 336 322 entity->sched_data = &bfqg->sched_data; ··· 421 399 return NULL; 422 400 } 423 401 402 + /* see comments in bfq_bic_update_cgroup for why refcounting */ 403 + bfqg_get(bfqg); 424 404 return &bfqg->pd; 425 405 } 426 406 ··· 450 426 struct bfq_group *bfqg = pd_to_bfqg(pd); 451 427 452 428 bfqg_stats_exit(&bfqg->stats); 453 - return kfree(bfqg); 429 + bfqg_put(bfqg); 454 430 } 455 431 456 432 void bfq_pd_reset_stats(struct blkg_policy_data *pd) ··· 520 496 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating 521 497 * it on the new one. Avoid putting the entity on the old group idle tree. 522 498 * 523 - * Must be called under the queue lock; the cgroup owning @bfqg must 524 - * not disappear (by now this just means that we are called under 525 - * rcu_read_lock()). 499 + * Must be called under the scheduler lock, to make sure that the blkg 500 + * owning @bfqg does not disappear (see comments in 501 + * bfq_bic_update_cgroup on guaranteeing the consistency of blkg 502 + * objects). 526 503 */ 527 504 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, 528 505 struct bfq_group *bfqg) ··· 544 519 bfq_deactivate_bfqq(bfqd, bfqq, false, false); 545 520 else if (entity->on_st) 546 521 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); 547 - bfqg_put(bfqq_group(bfqq)); 522 + bfqg_and_blkg_put(bfqq_group(bfqq)); 548 523 549 - /* 550 - * Here we use a reference to bfqg. We don't need a refcounter 551 - * as the cgroup reference will not be dropped, so that its 552 - * destroy() callback will not be invoked. 553 - */ 554 524 entity->parent = bfqg->my_entity; 555 525 entity->sched_data = &bfqg->sched_data; 556 - bfqg_get(bfqg); 526 + /* pin down bfqg and its associated blkg */ 527 + bfqg_and_blkg_get(bfqg); 557 528 558 529 if (bfq_bfqq_busy(bfqq)) { 559 530 bfq_pos_tree_add_move(bfqd, bfqq); ··· 566 545 * @bic: the bic to move. 567 546 * @blkcg: the blk-cgroup to move to. 568 547 * 569 - * Move bic to blkcg, assuming that bfqd->queue is locked; the caller 570 - * has to make sure that the reference to cgroup is valid across the call. 548 + * Move bic to blkcg, assuming that bfqd->lock is held; which makes 549 + * sure that the reference to cgroup is valid across the call (see 550 + * comments in bfq_bic_update_cgroup on this issue) 571 551 * 572 552 * NOTE: an alternative approach might have been to store the current 573 553 * cgroup in bfqq and getting a reference to it, reducing the lookup ··· 626 604 goto out; 627 605 628 606 bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio)); 607 + /* 608 + * Update blkg_path for bfq_log_* functions. We cache this 609 + * path, and update it here, for the following 610 + * reasons. Operations on blkg objects in blk-cgroup are 611 + * protected with the request_queue lock, and not with the 612 + * lock that protects the instances of this scheduler 613 + * (bfqd->lock). This exposes BFQ to the following sort of 614 + * race. 615 + * 616 + * The blkg_lookup performed in bfq_get_queue, protected 617 + * through rcu, may happen to return the address of a copy of 618 + * the original blkg. If this is the case, then the 619 + * bfqg_and_blkg_get performed in bfq_get_queue, to pin down 620 + * the blkg, is useless: it does not prevent blk-cgroup code 621 + * from destroying both the original blkg and all objects 622 + * directly or indirectly referred by the copy of the 623 + * blkg. 624 + * 625 + * On the bright side, destroy operations on a blkg invoke, as 626 + * a first step, hooks of the scheduler associated with the 627 + * blkg. And these hooks are executed with bfqd->lock held for 628 + * BFQ. As a consequence, for any blkg associated with the 629 + * request queue this instance of the scheduler is attached 630 + * to, we are guaranteed that such a blkg is not destroyed, and 631 + * that all the pointers it contains are consistent, while we 632 + * are holding bfqd->lock. A blkg_lookup performed with 633 + * bfqd->lock held then returns a fully consistent blkg, which 634 + * remains consistent until this lock is held. 635 + * 636 + * Thanks to the last fact, and to the fact that: (1) bfqg has 637 + * been obtained through a blkg_lookup in the above 638 + * assignment, and (2) bfqd->lock is being held, here we can 639 + * safely use the policy data for the involved blkg (i.e., the 640 + * field bfqg->pd) to get to the blkg associated with bfqg, 641 + * and then we can safely use any field of blkg. After we 642 + * release bfqd->lock, even just getting blkg through this 643 + * bfqg may cause dangling references to be traversed, as 644 + * bfqg->pd may not exist any more. 645 + * 646 + * In view of the above facts, here we cache, in the bfqg, any 647 + * blkg data we may need for this bic, and for its associated 648 + * bfq_queue. As of now, we need to cache only the path of the 649 + * blkg, which is used in the bfq_log_* functions. 650 + * 651 + * Finally, note that bfqg itself needs to be protected from 652 + * destruction on the blkg_free of the original blkg (which 653 + * invokes bfq_pd_free). We use an additional private 654 + * refcounter for bfqg, to let it disappear only after no 655 + * bfq_queue refers to it any longer. 656 + */ 657 + blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path)); 629 658 bic->blkcg_serial_nr = serial_nr; 630 659 out: 631 660 rcu_read_unlock(); ··· 713 640 * @bfqd: the device data structure with the root group. 714 641 * @bfqg: the group to move from. 715 642 * @st: the service tree with the entities. 716 - * 717 - * Needs queue_lock to be taken and reference to be valid over the call. 718 643 */ 719 644 static void bfq_reparent_active_entities(struct bfq_data *bfqd, 720 645 struct bfq_group *bfqg, ··· 763 692 /* 764 693 * The idle tree may still contain bfq_queues belonging 765 694 * to exited task because they never migrated to a different 766 - * cgroup from the one being destroyed now. No one else 767 - * can access them so it's safe to act without any lock. 695 + * cgroup from the one being destroyed now. 768 696 */ 769 697 bfq_flush_idle_tree(st); 770 698
+1 -1
block/bfq-iosched.c
··· 3665 3665 3666 3666 kmem_cache_free(bfq_pool, bfqq); 3667 3667 #ifdef CONFIG_BFQ_GROUP_IOSCHED 3668 - bfqg_put(bfqg); 3668 + bfqg_and_blkg_put(bfqg); 3669 3669 #endif 3670 3670 } 3671 3671
+11 -12
block/bfq-iosched.h
··· 759 759 /* must be the first member */ 760 760 struct blkg_policy_data pd; 761 761 762 + /* cached path for this blkg (see comments in bfq_bic_update_cgroup) */ 763 + char blkg_path[128]; 764 + 765 + /* reference counter (see comments in bfq_bic_update_cgroup) */ 766 + int ref; 767 + 762 768 struct bfq_entity entity; 763 769 struct bfq_sched_data sched_data; 764 770 ··· 844 838 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); 845 839 struct bfq_group *bfqq_group(struct bfq_queue *bfqq); 846 840 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node); 847 - void bfqg_put(struct bfq_group *bfqg); 841 + void bfqg_and_blkg_put(struct bfq_group *bfqg); 848 842 849 843 #ifdef CONFIG_BFQ_GROUP_IOSCHED 850 844 extern struct cftype bfq_blkcg_legacy_files[]; ··· 916 910 struct bfq_group *bfqq_group(struct bfq_queue *bfqq); 917 911 918 912 #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ 919 - char __pbuf[128]; \ 920 - \ 921 - blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \ 922 - blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid, \ 913 + blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid,\ 923 914 bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ 924 - __pbuf, ##args); \ 915 + bfqq_group(bfqq)->blkg_path, ##args); \ 925 916 } while (0) 926 917 927 - #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ 928 - char __pbuf[128]; \ 929 - \ 930 - blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \ 931 - blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args); \ 932 - } while (0) 918 + #define bfq_log_bfqg(bfqd, bfqg, fmt, args...) \ 919 + blk_add_trace_msg((bfqd)->queue, "%s " fmt, (bfqg)->blkg_path, ##args) 933 920 934 921 #else /* CONFIG_BFQ_GROUP_IOSCHED */ 935 922
+3
block/bio-integrity.c
··· 175 175 if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE) 176 176 return false; 177 177 178 + if (!bio_sectors(bio)) 179 + return false; 180 + 178 181 /* Already protected? */ 179 182 if (bio_integrity(bio)) 180 183 return false;
+9 -3
block/bio.c
··· 240 240 return bvl; 241 241 } 242 242 243 - static void __bio_free(struct bio *bio) 243 + void bio_uninit(struct bio *bio) 244 244 { 245 245 bio_disassociate_task(bio); 246 246 247 247 if (bio_integrity(bio)) 248 248 bio_integrity_free(bio); 249 249 } 250 + EXPORT_SYMBOL(bio_uninit); 250 251 251 252 static void bio_free(struct bio *bio) 252 253 { 253 254 struct bio_set *bs = bio->bi_pool; 254 255 void *p; 255 256 256 - __bio_free(bio); 257 + bio_uninit(bio); 257 258 258 259 if (bs) { 259 260 bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio)); ··· 272 271 } 273 272 } 274 273 274 + /* 275 + * Users of this function have their own bio allocation. Subsequently, 276 + * they must remember to pair any call to bio_init() with bio_uninit() 277 + * when IO has completed, or when the bio is released. 278 + */ 275 279 void bio_init(struct bio *bio, struct bio_vec *table, 276 280 unsigned short max_vecs) 277 281 { ··· 303 297 { 304 298 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); 305 299 306 - __bio_free(bio); 300 + bio_uninit(bio); 307 301 308 302 memset(bio, 0, BIO_RESET_BYTES); 309 303 bio->bi_flags = flags;
+1 -1
block/blk-cgroup.c
··· 74 74 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); 75 75 76 76 if (blkg->blkcg != &blkcg_root) 77 - blk_exit_rl(&blkg->rl); 77 + blk_exit_rl(blkg->q, &blkg->rl); 78 78 79 79 blkg_rwstat_exit(&blkg->stat_ios); 80 80 blkg_rwstat_exit(&blkg->stat_bytes);
+8 -2
block/blk-core.c
··· 648 648 if (!rl->rq_pool) 649 649 return -ENOMEM; 650 650 651 + if (rl != &q->root_rl) 652 + WARN_ON_ONCE(!blk_get_queue(q)); 653 + 651 654 return 0; 652 655 } 653 656 654 - void blk_exit_rl(struct request_list *rl) 657 + void blk_exit_rl(struct request_queue *q, struct request_list *rl) 655 658 { 656 - if (rl->rq_pool) 659 + if (rl->rq_pool) { 657 660 mempool_destroy(rl->rq_pool); 661 + if (rl != &q->root_rl) 662 + blk_put_queue(q); 663 + } 658 664 } 659 665 660 666 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
+46 -12
block/blk-mq-sched.c
··· 68 68 __blk_mq_sched_assign_ioc(q, rq, bio, ioc); 69 69 } 70 70 71 + /* 72 + * Mark a hardware queue as needing a restart. For shared queues, maintain 73 + * a count of how many hardware queues are marked for restart. 74 + */ 75 + static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) 76 + { 77 + if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 78 + return; 79 + 80 + if (hctx->flags & BLK_MQ_F_TAG_SHARED) { 81 + struct request_queue *q = hctx->queue; 82 + 83 + if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 84 + atomic_inc(&q->shared_hctx_restart); 85 + } else 86 + set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 87 + } 88 + 89 + static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) 90 + { 91 + if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 92 + return false; 93 + 94 + if (hctx->flags & BLK_MQ_F_TAG_SHARED) { 95 + struct request_queue *q = hctx->queue; 96 + 97 + if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 98 + atomic_dec(&q->shared_hctx_restart); 99 + } else 100 + clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 101 + 102 + if (blk_mq_hctx_has_pending(hctx)) { 103 + blk_mq_run_hw_queue(hctx, true); 104 + return true; 105 + } 106 + 107 + return false; 108 + } 109 + 71 110 struct request *blk_mq_sched_get_request(struct request_queue *q, 72 111 struct bio *bio, 73 112 unsigned int op, ··· 305 266 return true; 306 267 } 307 268 308 - static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx) 309 - { 310 - if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) { 311 - clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 312 - if (blk_mq_hctx_has_pending(hctx)) { 313 - blk_mq_run_hw_queue(hctx, true); 314 - return true; 315 - } 316 - } 317 - return false; 318 - } 319 - 320 269 /** 321 270 * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list 322 271 * @pos: loop cursor. ··· 336 309 unsigned int i, j; 337 310 338 311 if (set->flags & BLK_MQ_F_TAG_SHARED) { 312 + /* 313 + * If this is 0, then we know that no hardware queues 314 + * have RESTART marked. We're done. 315 + */ 316 + if (!atomic_read(&queue->shared_hctx_restart)) 317 + return; 318 + 339 319 rcu_read_lock(); 340 320 list_for_each_entry_rcu_rr(q, queue, &set->tag_list, 341 321 tag_set_list) {
-9
block/blk-mq-sched.h
··· 115 115 return false; 116 116 } 117 117 118 - /* 119 - * Mark a hardware queue as needing a restart. 120 - */ 121 - static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx) 122 - { 123 - if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 124 - set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); 125 - } 126 - 127 118 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) 128 119 { 129 120 return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+39 -12
block/blk-mq.c
··· 1461 1461 return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true); 1462 1462 } 1463 1463 1464 - static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie, 1465 - bool may_sleep) 1464 + static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, 1465 + struct request *rq, 1466 + blk_qc_t *cookie, bool may_sleep) 1466 1467 { 1467 1468 struct request_queue *q = rq->q; 1468 1469 struct blk_mq_queue_data bd = { 1469 1470 .rq = rq, 1470 1471 .last = true, 1471 1472 }; 1472 - struct blk_mq_hw_ctx *hctx; 1473 1473 blk_qc_t new_cookie; 1474 1474 int ret; 1475 + bool run_queue = true; 1476 + 1477 + if (blk_mq_hctx_stopped(hctx)) { 1478 + run_queue = false; 1479 + goto insert; 1480 + } 1475 1481 1476 1482 if (q->elevator) 1477 1483 goto insert; 1478 1484 1479 - if (!blk_mq_get_driver_tag(rq, &hctx, false)) 1485 + if (!blk_mq_get_driver_tag(rq, NULL, false)) 1480 1486 goto insert; 1481 1487 1482 1488 new_cookie = request_to_qc_t(hctx, rq); ··· 1506 1500 1507 1501 __blk_mq_requeue_request(rq); 1508 1502 insert: 1509 - blk_mq_sched_insert_request(rq, false, true, false, may_sleep); 1503 + blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep); 1510 1504 } 1511 1505 1512 1506 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, ··· 1514 1508 { 1515 1509 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { 1516 1510 rcu_read_lock(); 1517 - __blk_mq_try_issue_directly(rq, cookie, false); 1511 + __blk_mq_try_issue_directly(hctx, rq, cookie, false); 1518 1512 rcu_read_unlock(); 1519 1513 } else { 1520 1514 unsigned int srcu_idx; ··· 1522 1516 might_sleep(); 1523 1517 1524 1518 srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu); 1525 - __blk_mq_try_issue_directly(rq, cookie, true); 1519 + __blk_mq_try_issue_directly(hctx, rq, cookie, true); 1526 1520 srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx); 1527 1521 } 1528 1522 } ··· 1625 1619 1626 1620 blk_mq_put_ctx(data.ctx); 1627 1621 1628 - if (same_queue_rq) 1622 + if (same_queue_rq) { 1623 + data.hctx = blk_mq_map_queue(q, 1624 + same_queue_rq->mq_ctx->cpu); 1629 1625 blk_mq_try_issue_directly(data.hctx, same_queue_rq, 1630 1626 &cookie); 1627 + } 1631 1628 } else if (q->nr_hw_queues > 1 && is_sync) { 1632 1629 blk_mq_put_ctx(data.ctx); 1633 1630 blk_mq_bio_to_request(rq, bio); ··· 2103 2094 } 2104 2095 } 2105 2096 2097 + /* 2098 + * Caller needs to ensure that we're either frozen/quiesced, or that 2099 + * the queue isn't live yet. 2100 + */ 2106 2101 static void queue_set_hctx_shared(struct request_queue *q, bool shared) 2107 2102 { 2108 2103 struct blk_mq_hw_ctx *hctx; 2109 2104 int i; 2110 2105 2111 2106 queue_for_each_hw_ctx(q, hctx, i) { 2112 - if (shared) 2107 + if (shared) { 2108 + if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 2109 + atomic_inc(&q->shared_hctx_restart); 2113 2110 hctx->flags |= BLK_MQ_F_TAG_SHARED; 2114 - else 2111 + } else { 2112 + if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) 2113 + atomic_dec(&q->shared_hctx_restart); 2115 2114 hctx->flags &= ~BLK_MQ_F_TAG_SHARED; 2115 + } 2116 2116 } 2117 2117 } 2118 2118 2119 - static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared) 2119 + static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, 2120 + bool shared) 2120 2121 { 2121 2122 struct request_queue *q; 2122 2123 ··· 2660 2641 return ret; 2661 2642 } 2662 2643 2663 - void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) 2644 + static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, 2645 + int nr_hw_queues) 2664 2646 { 2665 2647 struct request_queue *q; 2666 2648 ··· 2684 2664 2685 2665 list_for_each_entry(q, &set->tag_list, tag_set_list) 2686 2666 blk_mq_unfreeze_queue(q); 2667 + } 2668 + 2669 + void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) 2670 + { 2671 + mutex_lock(&set->tag_list_lock); 2672 + __blk_mq_update_nr_hw_queues(set, nr_hw_queues); 2673 + mutex_unlock(&set->tag_list_lock); 2687 2674 } 2688 2675 EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); 2689 2676
+23 -13
block/blk-sysfs.c
··· 777 777 } 778 778 779 779 /** 780 - * blk_release_queue: - release a &struct request_queue when it is no longer needed 781 - * @kobj: the kobj belonging to the request queue to be released 780 + * __blk_release_queue - release a request queue when it is no longer needed 781 + * @work: pointer to the release_work member of the request queue to be released 782 782 * 783 783 * Description: 784 - * blk_release_queue is the pair to blk_init_queue() or 785 - * blk_queue_make_request(). It should be called when a request queue is 786 - * being released; typically when a block device is being de-registered. 787 - * Currently, its primary task it to free all the &struct request 788 - * structures that were allocated to the queue and the queue itself. 784 + * blk_release_queue is the counterpart of blk_init_queue(). It should be 785 + * called when a request queue is being released; typically when a block 786 + * device is being de-registered. Its primary task it to free the queue 787 + * itself. 789 788 * 790 - * Note: 789 + * Notes: 791 790 * The low level driver must have finished any outstanding requests first 792 791 * via blk_cleanup_queue(). 793 - **/ 794 - static void blk_release_queue(struct kobject *kobj) 792 + * 793 + * Although blk_release_queue() may be called with preemption disabled, 794 + * __blk_release_queue() may sleep. 795 + */ 796 + static void __blk_release_queue(struct work_struct *work) 795 797 { 796 - struct request_queue *q = 797 - container_of(kobj, struct request_queue, kobj); 798 + struct request_queue *q = container_of(work, typeof(*q), release_work); 798 799 799 800 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) 800 801 blk_stat_remove_callback(q, q->poll_cb); ··· 810 809 811 810 blk_free_queue_stats(q->stats); 812 811 813 - blk_exit_rl(&q->root_rl); 812 + blk_exit_rl(q, &q->root_rl); 814 813 815 814 if (q->queue_tags) 816 815 __blk_queue_free_tags(q); ··· 833 832 834 833 ida_simple_remove(&blk_queue_ida, q->id); 835 834 call_rcu(&q->rcu_head, blk_free_queue_rcu); 835 + } 836 + 837 + static void blk_release_queue(struct kobject *kobj) 838 + { 839 + struct request_queue *q = 840 + container_of(kobj, struct request_queue, kobj); 841 + 842 + INIT_WORK(&q->release_work, __blk_release_queue); 843 + schedule_work(&q->release_work); 836 844 } 837 845 838 846 static const struct sysfs_ops queue_sysfs_ops = {
+18 -4
block/blk-throttle.c
··· 27 27 #define MIN_THROTL_IOPS (10) 28 28 #define DFL_LATENCY_TARGET (-1L) 29 29 #define DFL_IDLE_THRESHOLD (0) 30 + #define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */ 31 + #define LATENCY_FILTERED_SSD (0) 32 + /* 33 + * For HD, very small latency comes from sequential IO. Such IO is helpless to 34 + * help determine if its IO is impacted by others, hence we ignore the IO 35 + */ 36 + #define LATENCY_FILTERED_HD (1000L) /* 1ms */ 30 37 31 38 #define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT) 32 39 ··· 219 212 struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE]; 220 213 struct latency_bucket __percpu *latency_buckets; 221 214 unsigned long last_calculate_time; 215 + unsigned long filtered_latency; 222 216 223 217 bool track_bio_latency; 224 218 }; ··· 706 698 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq, 707 699 unsigned long expires) 708 700 { 709 - unsigned long max_expire = jiffies + 8 * sq_to_tg(sq)->td->throtl_slice; 701 + unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice; 710 702 711 703 /* 712 704 * Since we are adjusting the throttle limit dynamically, the sleep ··· 2289 2281 throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat), 2290 2282 bio_op(bio), lat); 2291 2283 2292 - if (tg->latency_target) { 2284 + if (tg->latency_target && lat >= tg->td->filtered_latency) { 2293 2285 int bucket; 2294 2286 unsigned int threshold; 2295 2287 ··· 2425 2417 void blk_throtl_register_queue(struct request_queue *q) 2426 2418 { 2427 2419 struct throtl_data *td; 2420 + int i; 2428 2421 2429 2422 td = q->td; 2430 2423 BUG_ON(!td); 2431 2424 2432 - if (blk_queue_nonrot(q)) 2425 + if (blk_queue_nonrot(q)) { 2433 2426 td->throtl_slice = DFL_THROTL_SLICE_SSD; 2434 - else 2427 + td->filtered_latency = LATENCY_FILTERED_SSD; 2428 + } else { 2435 2429 td->throtl_slice = DFL_THROTL_SLICE_HD; 2430 + td->filtered_latency = LATENCY_FILTERED_HD; 2431 + for (i = 0; i < LATENCY_BUCKET_SIZE; i++) 2432 + td->avg_buckets[i].latency = DFL_HD_BASELINE_LATENCY; 2433 + } 2436 2434 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW 2437 2435 /* if no low limit, use previous default */ 2438 2436 td->throtl_slice = DFL_THROTL_SLICE_HD;
+1 -1
block/blk.h
··· 59 59 60 60 int blk_init_rl(struct request_list *rl, struct request_queue *q, 61 61 gfp_t gfp_mask); 62 - void blk_exit_rl(struct request_list *rl); 62 + void blk_exit_rl(struct request_queue *q, struct request_list *rl); 63 63 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 64 64 struct bio *bio); 65 65 void blk_queue_bypass_start(struct request_queue *q);
+15 -2
block/cfq-iosched.c
··· 38 38 static const int cfq_hist_divisor = 4; 39 39 40 40 /* 41 - * offset from end of service tree 41 + * offset from end of queue service tree for idle class 42 42 */ 43 43 #define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5) 44 + /* offset from end of group service tree under time slice mode */ 45 + #define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5) 46 + /* offset from end of group service under IOPS mode */ 47 + #define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5) 44 48 45 49 /* 46 50 * below this threshold, we consider thinktime immediate ··· 1366 1362 cfqg->vfraction = max_t(unsigned, vfr, 1); 1367 1363 } 1368 1364 1365 + static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd) 1366 + { 1367 + if (!iops_mode(cfqd)) 1368 + return CFQ_SLICE_MODE_GROUP_DELAY; 1369 + else 1370 + return CFQ_IOPS_MODE_GROUP_DELAY; 1371 + } 1372 + 1369 1373 static void 1370 1374 cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg) 1371 1375 { ··· 1393 1381 n = rb_last(&st->rb); 1394 1382 if (n) { 1395 1383 __cfqg = rb_entry_cfqg(n); 1396 - cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY; 1384 + cfqg->vdisktime = __cfqg->vdisktime + 1385 + cfq_get_cfqg_vdisktime_delay(cfqd); 1397 1386 } else 1398 1387 cfqg->vdisktime = st->min_vdisktime; 1399 1388 cfq_group_service_tree_add(st, cfqg);
+1 -1
crypto/asymmetric_keys/public_key.c
··· 141 141 * signature and returns that to us. 142 142 */ 143 143 ret = crypto_akcipher_verify(req); 144 - if (ret == -EINPROGRESS) { 144 + if ((ret == -EINPROGRESS) || (ret == -EBUSY)) { 145 145 wait_for_completion(&compl.completion); 146 146 ret = compl.err; 147 147 }
+2 -2
crypto/asymmetric_keys/verify_pefile.c
··· 381 381 } 382 382 383 383 error: 384 - kfree(desc); 384 + kzfree(desc); 385 385 error_no_desc: 386 386 crypto_free_shash(tfm); 387 387 kleave(" = %d", ret); ··· 450 450 ret = pefile_digest_pe(pebuf, pelen, &ctx); 451 451 452 452 error: 453 - kfree(ctx.digest); 453 + kzfree(ctx.digest); 454 454 return ret; 455 455 }
+1
crypto/asymmetric_keys/x509_cert_parser.c
··· 102 102 } 103 103 } 104 104 105 + ret = -ENOMEM; 105 106 cert->pub->key = kmemdup(ctx->key, ctx->key_size, GFP_KERNEL); 106 107 if (!cert->pub->key) 107 108 goto error_decode;
+2 -3
crypto/drbg.c
··· 1767 1767 break; 1768 1768 case -EINPROGRESS: 1769 1769 case -EBUSY: 1770 - ret = wait_for_completion_interruptible( 1771 - &drbg->ctr_completion); 1772 - if (!ret && !drbg->ctr_async_err) { 1770 + wait_for_completion(&drbg->ctr_completion); 1771 + if (!drbg->ctr_async_err) { 1773 1772 reinit_completion(&drbg->ctr_completion); 1774 1773 break; 1775 1774 }
+2 -4
crypto/gcm.c
··· 152 152 153 153 err = crypto_skcipher_encrypt(&data->req); 154 154 if (err == -EINPROGRESS || err == -EBUSY) { 155 - err = wait_for_completion_interruptible( 156 - &data->result.completion); 157 - if (!err) 158 - err = data->result.err; 155 + wait_for_completion(&data->result.completion); 156 + err = data->result.err; 159 157 } 160 158 161 159 if (err)
+25 -13
drivers/acpi/acpica/tbutils.c
··· 416 416 } 417 417 } 418 418 419 - table_desc->validation_count++; 420 - if (table_desc->validation_count == 0) { 421 - ACPI_ERROR((AE_INFO, 422 - "Table %p, Validation count is zero after increment\n", 423 - table_desc)); 424 - table_desc->validation_count--; 425 - return_ACPI_STATUS(AE_LIMIT); 419 + if (table_desc->validation_count < ACPI_MAX_TABLE_VALIDATIONS) { 420 + table_desc->validation_count++; 421 + 422 + /* 423 + * Detect validation_count overflows to ensure that the warning 424 + * message will only be printed once. 425 + */ 426 + if (table_desc->validation_count >= ACPI_MAX_TABLE_VALIDATIONS) { 427 + ACPI_WARNING((AE_INFO, 428 + "Table %p, Validation count overflows\n", 429 + table_desc)); 430 + } 426 431 } 427 432 428 433 *out_table = table_desc->pointer; ··· 454 449 455 450 ACPI_FUNCTION_TRACE(acpi_tb_put_table); 456 451 457 - if (table_desc->validation_count == 0) { 458 - ACPI_WARNING((AE_INFO, 459 - "Table %p, Validation count is zero before decrement\n", 460 - table_desc)); 461 - return_VOID; 452 + if (table_desc->validation_count < ACPI_MAX_TABLE_VALIDATIONS) { 453 + table_desc->validation_count--; 454 + 455 + /* 456 + * Detect validation_count underflows to ensure that the warning 457 + * message will only be printed once. 458 + */ 459 + if (table_desc->validation_count >= ACPI_MAX_TABLE_VALIDATIONS) { 460 + ACPI_WARNING((AE_INFO, 461 + "Table %p, Validation count underflows\n", 462 + table_desc)); 463 + return_VOID; 464 + } 462 465 } 463 - table_desc->validation_count--; 464 466 465 467 if (table_desc->validation_count == 0) { 466 468
-9
drivers/acpi/acpica/utresrc.c
··· 474 474 return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); 475 475 } 476 476 477 - /* 478 - * The end_tag opcode must be followed by a zero byte. 479 - * Although this byte is technically defined to be a checksum, 480 - * in practice, all ASL compilers set this byte to zero. 481 - */ 482 - if (*(aml + 1) != 0) { 483 - return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); 484 - } 485 - 486 477 /* Return the pointer to the end_tag if requested */ 487 478 488 479 if (!user_function) {
+14 -8
drivers/acpi/arm64/iort.c
··· 666 666 int ret = -ENODEV; 667 667 struct fwnode_handle *iort_fwnode; 668 668 669 - /* 670 - * If we already translated the fwspec there 671 - * is nothing left to do, return the iommu_ops. 672 - */ 673 - ops = iort_fwspec_iommu_ops(dev->iommu_fwspec); 674 - if (ops) 675 - return ops; 676 - 677 669 if (node) { 678 670 iort_fwnode = iort_get_fwnode(node); 679 671 if (!iort_fwnode) ··· 727 735 u32 streamid = 0; 728 736 int err; 729 737 738 + /* 739 + * If we already translated the fwspec there 740 + * is nothing left to do, return the iommu_ops. 741 + */ 742 + ops = iort_fwspec_iommu_ops(dev->iommu_fwspec); 743 + if (ops) 744 + return ops; 745 + 730 746 if (dev_is_pci(dev)) { 731 747 struct pci_bus *bus = to_pci_dev(dev)->bus; 732 748 u32 rid; ··· 781 781 err = iort_add_device_replay(ops, dev); 782 782 if (err) 783 783 ops = ERR_PTR(err); 784 + 785 + /* Ignore all other errors apart from EPROBE_DEFER */ 786 + if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) { 787 + dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops)); 788 + ops = NULL; 789 + } 784 790 785 791 return ops; 786 792 }
+1 -1
drivers/acpi/battery.c
··· 782 782 if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) || 783 783 (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) && 784 784 (battery->capacity_now <= battery->alarm))) 785 - pm_wakeup_hard_event(&battery->device->dev); 785 + pm_wakeup_event(&battery->device->dev, 0); 786 786 787 787 return result; 788 788 }
+3 -4
drivers/acpi/button.c
··· 113 113 114 114 static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier); 115 115 static struct acpi_device *lid_device; 116 - static u8 lid_init_state = ACPI_BUTTON_LID_INIT_OPEN; 116 + static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD; 117 117 118 118 static unsigned long lid_report_interval __read_mostly = 500; 119 119 module_param(lid_report_interval, ulong, 0644); ··· 217 217 } 218 218 219 219 if (state) 220 - pm_wakeup_hard_event(&device->dev); 220 + pm_wakeup_event(&device->dev, 0); 221 221 222 222 ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); 223 223 if (ret == NOTIFY_DONE) ··· 402 402 } else { 403 403 int keycode; 404 404 405 - pm_wakeup_hard_event(&device->dev); 405 + pm_wakeup_event(&device->dev, 0); 406 406 if (button->suspended) 407 407 break; 408 408 ··· 534 534 lid_device = device; 535 535 } 536 536 537 - device_init_wakeup(&device->dev, true); 538 537 printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device)); 539 538 return 0; 540 539
+1 -2
drivers/acpi/device_pm.c
··· 24 24 #include <linux/pm_qos.h> 25 25 #include <linux/pm_domain.h> 26 26 #include <linux/pm_runtime.h> 27 - #include <linux/suspend.h> 28 27 29 28 #include "internal.h" 30 29 ··· 399 400 mutex_lock(&acpi_pm_notifier_lock); 400 401 401 402 if (adev->wakeup.flags.notifier_present) { 402 - pm_wakeup_ws_event(adev->wakeup.ws, 0, true); 403 + __pm_wakeup_event(adev->wakeup.ws, 0); 403 404 if (adev->wakeup.context.work.func) 404 405 queue_pm_work(&adev->wakeup.context.work); 405 406 }
+39 -32
drivers/acpi/scan.c
··· 1371 1371 iort_set_dma_mask(dev); 1372 1372 1373 1373 iommu = iort_iommu_configure(dev); 1374 - if (IS_ERR(iommu)) 1375 - return PTR_ERR(iommu); 1374 + if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER) 1375 + return -EPROBE_DEFER; 1376 1376 1377 1377 size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); 1378 1378 /* ··· 1428 1428 adev->flags.coherent_dma = cca; 1429 1429 } 1430 1430 1431 + static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data) 1432 + { 1433 + bool *is_spi_i2c_slave_p = data; 1434 + 1435 + if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) 1436 + return 1; 1437 + 1438 + /* 1439 + * devices that are connected to UART still need to be enumerated to 1440 + * platform bus 1441 + */ 1442 + if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART) 1443 + *is_spi_i2c_slave_p = true; 1444 + 1445 + /* no need to do more checking */ 1446 + return -1; 1447 + } 1448 + 1449 + static bool acpi_is_spi_i2c_slave(struct acpi_device *device) 1450 + { 1451 + struct list_head resource_list; 1452 + bool is_spi_i2c_slave = false; 1453 + 1454 + INIT_LIST_HEAD(&resource_list); 1455 + acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave, 1456 + &is_spi_i2c_slave); 1457 + acpi_dev_free_resource_list(&resource_list); 1458 + 1459 + return is_spi_i2c_slave; 1460 + } 1461 + 1431 1462 void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, 1432 1463 int type, unsigned long long sta) 1433 1464 { ··· 1474 1443 acpi_bus_get_flags(device); 1475 1444 device->flags.match_driver = false; 1476 1445 device->flags.initialized = true; 1446 + device->flags.spi_i2c_slave = acpi_is_spi_i2c_slave(device); 1477 1447 acpi_device_clear_enumerated(device); 1478 1448 device_initialize(&device->dev); 1479 1449 dev_set_uevent_suppress(&device->dev, true); ··· 1759 1727 return AE_OK; 1760 1728 } 1761 1729 1762 - static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data) 1763 - { 1764 - bool *is_spi_i2c_slave_p = data; 1765 - 1766 - if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) 1767 - return 1; 1768 - 1769 - /* 1770 - * devices that are connected to UART still need to be enumerated to 1771 - * platform bus 1772 - */ 1773 - if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART) 1774 - *is_spi_i2c_slave_p = true; 1775 - 1776 - /* no need to do more checking */ 1777 - return -1; 1778 - } 1779 - 1780 1730 static void acpi_default_enumeration(struct acpi_device *device) 1781 1731 { 1782 - struct list_head resource_list; 1783 - bool is_spi_i2c_slave = false; 1784 - 1785 1732 /* 1786 1733 * Do not enumerate SPI/I2C slaves as they will be enumerated by their 1787 1734 * respective parents. 1788 1735 */ 1789 - INIT_LIST_HEAD(&resource_list); 1790 - acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave, 1791 - &is_spi_i2c_slave); 1792 - acpi_dev_free_resource_list(&resource_list); 1793 - if (!is_spi_i2c_slave) { 1736 + if (!device->flags.spi_i2c_slave) { 1794 1737 acpi_create_platform_device(device, NULL); 1795 1738 acpi_device_set_enumerated(device); 1796 1739 } else { ··· 1861 1854 return; 1862 1855 1863 1856 device->flags.match_driver = true; 1864 - if (ret > 0) { 1857 + if (ret > 0 && !device->flags.spi_i2c_slave) { 1865 1858 acpi_device_set_enumerated(device); 1866 1859 goto ok; 1867 1860 } ··· 1870 1863 if (ret < 0) 1871 1864 return; 1872 1865 1873 - if (device->pnp.type.platform_id) 1874 - acpi_default_enumeration(device); 1875 - else 1866 + if (!device->pnp.type.platform_id && !device->flags.spi_i2c_slave) 1876 1867 acpi_device_set_enumerated(device); 1868 + else 1869 + acpi_default_enumeration(device); 1877 1870 1878 1871 ok: 1879 1872 list_for_each_entry(child, &device->children, node)
-28
drivers/acpi/sleep.c
··· 663 663 acpi_os_wait_events_complete(); 664 664 if (acpi_sci_irq_valid()) 665 665 enable_irq_wake(acpi_sci_irq); 666 - 667 666 return 0; 668 - } 669 - 670 - static void acpi_freeze_wake(void) 671 - { 672 - /* 673 - * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means 674 - * that the SCI has triggered while suspended, so cancel the wakeup in 675 - * case it has not been a wakeup event (the GPEs will be checked later). 676 - */ 677 - if (acpi_sci_irq_valid() && 678 - !irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) 679 - pm_system_cancel_wakeup(); 680 - } 681 - 682 - static void acpi_freeze_sync(void) 683 - { 684 - /* 685 - * Process all pending events in case there are any wakeup ones. 686 - * 687 - * The EC driver uses the system workqueue, so that one needs to be 688 - * flushed too. 689 - */ 690 - acpi_os_wait_events_complete(); 691 - flush_scheduled_work(); 692 667 } 693 668 694 669 static void acpi_freeze_restore(void) ··· 671 696 acpi_disable_wakeup_devices(ACPI_STATE_S0); 672 697 if (acpi_sci_irq_valid()) 673 698 disable_irq_wake(acpi_sci_irq); 674 - 675 699 acpi_enable_all_runtime_gpes(); 676 700 } 677 701 ··· 682 708 static const struct platform_freeze_ops acpi_freeze_ops = { 683 709 .begin = acpi_freeze_begin, 684 710 .prepare = acpi_freeze_prepare, 685 - .wake = acpi_freeze_wake, 686 - .sync = acpi_freeze_sync, 687 711 .restore = acpi_freeze_restore, 688 712 .end = acpi_freeze_end, 689 713 };
+5 -2
drivers/acpi/sysfs.c
··· 333 333 container_of(bin_attr, struct acpi_table_attr, attr); 334 334 struct acpi_table_header *table_header = NULL; 335 335 acpi_status status; 336 + ssize_t rc; 336 337 337 338 status = acpi_get_table(table_attr->name, table_attr->instance, 338 339 &table_header); 339 340 if (ACPI_FAILURE(status)) 340 341 return -ENODEV; 341 342 342 - return memory_read_from_buffer(buf, count, &offset, 343 - table_header, table_header->length); 343 + rc = memory_read_from_buffer(buf, count, &offset, table_header, 344 + table_header->length); 345 + acpi_put_table(table_header); 346 + return rc; 344 347 } 345 348 346 349 static int acpi_table_attr_init(struct kobject *tables_obj,
+38
drivers/ata/ahci.c
··· 1364 1364 {} 1365 1365 #endif 1366 1366 1367 + /* 1368 + * On the Acer Aspire Switch Alpha 12, sometimes all SATA ports are detected 1369 + * as DUMMY, or detected but eventually get a "link down" and never get up 1370 + * again. When this happens, CAP.NP may hold a value of 0x00 or 0x01, and the 1371 + * port_map may hold a value of 0x00. 1372 + * 1373 + * Overriding CAP.NP to 0x02 and the port_map to 0x7 will reveal all 3 ports 1374 + * and can significantly reduce the occurrence of the problem. 1375 + * 1376 + * https://bugzilla.kernel.org/show_bug.cgi?id=189471 1377 + */ 1378 + static void acer_sa5_271_workaround(struct ahci_host_priv *hpriv, 1379 + struct pci_dev *pdev) 1380 + { 1381 + static const struct dmi_system_id sysids[] = { 1382 + { 1383 + .ident = "Acer Switch Alpha 12", 1384 + .matches = { 1385 + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 1386 + DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271") 1387 + }, 1388 + }, 1389 + { } 1390 + }; 1391 + 1392 + if (dmi_check_system(sysids)) { 1393 + dev_info(&pdev->dev, "enabling Acer Switch Alpha 12 workaround\n"); 1394 + if ((hpriv->saved_cap & 0xC734FF00) == 0xC734FF00) { 1395 + hpriv->port_map = 0x7; 1396 + hpriv->cap = 0xC734FF02; 1397 + } 1398 + } 1399 + } 1400 + 1367 1401 #ifdef CONFIG_ARM64 1368 1402 /* 1369 1403 * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently. ··· 1669 1635 dev_info(&pdev->dev, 1670 1636 "online status unreliable, applying workaround\n"); 1671 1637 } 1638 + 1639 + 1640 + /* Acer SA5-271 workaround modifies private_data */ 1641 + acer_sa5_271_workaround(hpriv, pdev); 1672 1642 1673 1643 /* CAP.NP sometimes indicate the index of the last enabled 1674 1644 * port, at other times, that of the last possible port, so
+3 -2
drivers/ata/libahci_platform.c
··· 514 514 515 515 irq = platform_get_irq(pdev, 0); 516 516 if (irq <= 0) { 517 - dev_err(dev, "no irq\n"); 518 - return -EINVAL; 517 + if (irq != -EPROBE_DEFER) 518 + dev_err(dev, "no irq\n"); 519 + return irq; 519 520 } 520 521 521 522 hpriv->irq = irq;
+1 -1
drivers/ata/libata-core.c
··· 6800 6800 } 6801 6801 6802 6802 force_ent->port = simple_strtoul(id, &endp, 10); 6803 - if (p == endp || *endp != '\0') { 6803 + if (id == endp || *endp != '\0') { 6804 6804 *reason = "invalid port/link"; 6805 6805 return -EINVAL; 6806 6806 }
+8 -5
drivers/ata/sata_mv.c
··· 4067 4067 struct ata_host *host; 4068 4068 struct mv_host_priv *hpriv; 4069 4069 struct resource *res; 4070 - void __iomem *mmio; 4071 4070 int n_ports = 0, irq = 0; 4072 4071 int rc; 4073 4072 int port; ··· 4085 4086 * Get the register base first 4086 4087 */ 4087 4088 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 4088 - mmio = devm_ioremap_resource(&pdev->dev, res); 4089 - if (IS_ERR(mmio)) 4090 - return PTR_ERR(mmio); 4089 + if (res == NULL) 4090 + return -EINVAL; 4091 4091 4092 4092 /* allocate host */ 4093 4093 if (pdev->dev.of_node) { ··· 4130 4132 hpriv->board_idx = chip_soc; 4131 4133 4132 4134 host->iomap = NULL; 4133 - hpriv->base = mmio - SATAHC0_REG_BASE; 4135 + hpriv->base = devm_ioremap(&pdev->dev, res->start, 4136 + resource_size(res)); 4137 + if (!hpriv->base) 4138 + return -ENOMEM; 4139 + 4140 + hpriv->base -= SATAHC0_REG_BASE; 4134 4141 4135 4142 hpriv->clk = clk_get(&pdev->dev, NULL); 4136 4143 if (IS_ERR(hpriv->clk))
+12 -3
drivers/ata/sata_rcar.c
··· 890 890 dev_err(&pdev->dev, "failed to get access to sata clock\n"); 891 891 return PTR_ERR(priv->clk); 892 892 } 893 - clk_prepare_enable(priv->clk); 893 + 894 + ret = clk_prepare_enable(priv->clk); 895 + if (ret) 896 + return ret; 894 897 895 898 host = ata_host_alloc(&pdev->dev, 1); 896 899 if (!host) { ··· 973 970 struct ata_host *host = dev_get_drvdata(dev); 974 971 struct sata_rcar_priv *priv = host->private_data; 975 972 void __iomem *base = priv->base; 973 + int ret; 976 974 977 - clk_prepare_enable(priv->clk); 975 + ret = clk_prepare_enable(priv->clk); 976 + if (ret) 977 + return ret; 978 978 979 979 /* ack and mask */ 980 980 iowrite32(0, base + SATAINTSTAT_REG); ··· 994 988 { 995 989 struct ata_host *host = dev_get_drvdata(dev); 996 990 struct sata_rcar_priv *priv = host->private_data; 991 + int ret; 997 992 998 - clk_prepare_enable(priv->clk); 993 + ret = clk_prepare_enable(priv->clk); 994 + if (ret) 995 + return ret; 999 996 1000 997 sata_rcar_setup_port(host); 1001 998
-5
drivers/base/base.h
··· 126 126 extern void driver_remove_groups(struct device_driver *drv, 127 127 const struct attribute_group **groups); 128 128 129 - extern int device_add_groups(struct device *dev, 130 - const struct attribute_group **groups); 131 - extern void device_remove_groups(struct device *dev, 132 - const struct attribute_group **groups); 133 - 134 129 extern char *make_class_name(const char *name, struct kobject *kobj); 135 130 136 131 extern int devres_release_all(struct device *dev);
+132
drivers/base/core.c
··· 1026 1026 { 1027 1027 return sysfs_create_groups(&dev->kobj, groups); 1028 1028 } 1029 + EXPORT_SYMBOL_GPL(device_add_groups); 1029 1030 1030 1031 void device_remove_groups(struct device *dev, 1031 1032 const struct attribute_group **groups) 1032 1033 { 1033 1034 sysfs_remove_groups(&dev->kobj, groups); 1034 1035 } 1036 + EXPORT_SYMBOL_GPL(device_remove_groups); 1037 + 1038 + union device_attr_group_devres { 1039 + const struct attribute_group *group; 1040 + const struct attribute_group **groups; 1041 + }; 1042 + 1043 + static int devm_attr_group_match(struct device *dev, void *res, void *data) 1044 + { 1045 + return ((union device_attr_group_devres *)res)->group == data; 1046 + } 1047 + 1048 + static void devm_attr_group_remove(struct device *dev, void *res) 1049 + { 1050 + union device_attr_group_devres *devres = res; 1051 + const struct attribute_group *group = devres->group; 1052 + 1053 + dev_dbg(dev, "%s: removing group %p\n", __func__, group); 1054 + sysfs_remove_group(&dev->kobj, group); 1055 + } 1056 + 1057 + static void devm_attr_groups_remove(struct device *dev, void *res) 1058 + { 1059 + union device_attr_group_devres *devres = res; 1060 + const struct attribute_group **groups = devres->groups; 1061 + 1062 + dev_dbg(dev, "%s: removing groups %p\n", __func__, groups); 1063 + sysfs_remove_groups(&dev->kobj, groups); 1064 + } 1065 + 1066 + /** 1067 + * devm_device_add_group - given a device, create a managed attribute group 1068 + * @dev: The device to create the group for 1069 + * @grp: The attribute group to create 1070 + * 1071 + * This function creates a group for the first time. It will explicitly 1072 + * warn and error if any of the attribute files being created already exist. 1073 + * 1074 + * Returns 0 on success or error code on failure. 1075 + */ 1076 + int devm_device_add_group(struct device *dev, const struct attribute_group *grp) 1077 + { 1078 + union device_attr_group_devres *devres; 1079 + int error; 1080 + 1081 + devres = devres_alloc(devm_attr_group_remove, 1082 + sizeof(*devres), GFP_KERNEL); 1083 + if (!devres) 1084 + return -ENOMEM; 1085 + 1086 + error = sysfs_create_group(&dev->kobj, grp); 1087 + if (error) { 1088 + devres_free(devres); 1089 + return error; 1090 + } 1091 + 1092 + devres->group = grp; 1093 + devres_add(dev, devres); 1094 + return 0; 1095 + } 1096 + EXPORT_SYMBOL_GPL(devm_device_add_group); 1097 + 1098 + /** 1099 + * devm_device_remove_group: remove a managed group from a device 1100 + * @dev: device to remove the group from 1101 + * @grp: group to remove 1102 + * 1103 + * This function removes a group of attributes from a device. The attributes 1104 + * previously have to have been created for this group, otherwise it will fail. 1105 + */ 1106 + void devm_device_remove_group(struct device *dev, 1107 + const struct attribute_group *grp) 1108 + { 1109 + WARN_ON(devres_release(dev, devm_attr_group_remove, 1110 + devm_attr_group_match, 1111 + /* cast away const */ (void *)grp)); 1112 + } 1113 + EXPORT_SYMBOL_GPL(devm_device_remove_group); 1114 + 1115 + /** 1116 + * devm_device_add_groups - create a bunch of managed attribute groups 1117 + * @dev: The device to create the group for 1118 + * @groups: The attribute groups to create, NULL terminated 1119 + * 1120 + * This function creates a bunch of managed attribute groups. If an error 1121 + * occurs when creating a group, all previously created groups will be 1122 + * removed, unwinding everything back to the original state when this 1123 + * function was called. It will explicitly warn and error if any of the 1124 + * attribute files being created already exist. 1125 + * 1126 + * Returns 0 on success or error code from sysfs_create_group on failure. 1127 + */ 1128 + int devm_device_add_groups(struct device *dev, 1129 + const struct attribute_group **groups) 1130 + { 1131 + union device_attr_group_devres *devres; 1132 + int error; 1133 + 1134 + devres = devres_alloc(devm_attr_groups_remove, 1135 + sizeof(*devres), GFP_KERNEL); 1136 + if (!devres) 1137 + return -ENOMEM; 1138 + 1139 + error = sysfs_create_groups(&dev->kobj, groups); 1140 + if (error) { 1141 + devres_free(devres); 1142 + return error; 1143 + } 1144 + 1145 + devres->groups = groups; 1146 + devres_add(dev, devres); 1147 + return 0; 1148 + } 1149 + EXPORT_SYMBOL_GPL(devm_device_add_groups); 1150 + 1151 + /** 1152 + * devm_device_remove_groups - remove a list of managed groups 1153 + * 1154 + * @dev: The device for the groups to be removed from 1155 + * @groups: NULL terminated list of groups to be removed 1156 + * 1157 + * If groups is not NULL, remove the specified groups from the device. 1158 + */ 1159 + void devm_device_remove_groups(struct device *dev, 1160 + const struct attribute_group **groups) 1161 + { 1162 + WARN_ON(devres_release(dev, devm_attr_groups_remove, 1163 + devm_attr_group_match, 1164 + /* cast away const */ (void *)groups)); 1165 + } 1166 + EXPORT_SYMBOL_GPL(devm_device_remove_groups); 1035 1167 1036 1168 static int device_add_attrs(struct device *dev) 1037 1169 {
+4
drivers/base/dd.c
··· 259 259 if (dev->bus) 260 260 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 261 261 BUS_NOTIFY_BOUND_DRIVER, dev); 262 + 263 + kobject_uevent(&dev->kobj, KOBJ_BIND); 262 264 } 263 265 264 266 static int driver_sysfs_add(struct device *dev) ··· 850 848 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 851 849 BUS_NOTIFY_UNBOUND_DRIVER, 852 850 dev); 851 + 852 + kobject_uevent(&dev->kobj, KOBJ_UNBIND); 853 853 } 854 854 } 855 855
+5
drivers/base/power/main.c
··· 1091 1091 if (async_error) 1092 1092 goto Complete; 1093 1093 1094 + if (pm_wakeup_pending()) { 1095 + async_error = -EBUSY; 1096 + goto Complete; 1097 + } 1098 + 1094 1099 if (dev->power.syscore || dev->power.direct_complete) 1095 1100 goto Complete; 1096 1101
+6 -12
drivers/base/power/wakeup.c
··· 28 28 /* First wakeup IRQ seen by the kernel in the last cycle. */ 29 29 unsigned int pm_wakeup_irq __read_mostly; 30 30 31 - /* If greater than 0 and the system is suspending, terminate the suspend. */ 32 - static atomic_t pm_abort_suspend __read_mostly; 31 + /* If set and the system is suspending, terminate the suspend. */ 32 + static bool pm_abort_suspend __read_mostly; 33 33 34 34 /* 35 35 * Combined counters of registered wakeup events and wakeup events in progress. ··· 855 855 pm_print_active_wakeup_sources(); 856 856 } 857 857 858 - return ret || atomic_read(&pm_abort_suspend) > 0; 858 + return ret || pm_abort_suspend; 859 859 } 860 860 861 861 void pm_system_wakeup(void) 862 862 { 863 - atomic_inc(&pm_abort_suspend); 863 + pm_abort_suspend = true; 864 864 freeze_wake(); 865 865 } 866 866 EXPORT_SYMBOL_GPL(pm_system_wakeup); 867 867 868 - void pm_system_cancel_wakeup(void) 868 + void pm_wakeup_clear(void) 869 869 { 870 - atomic_dec(&pm_abort_suspend); 871 - } 872 - 873 - void pm_wakeup_clear(bool reset) 874 - { 870 + pm_abort_suspend = false; 875 871 pm_wakeup_irq = 0; 876 - if (reset) 877 - atomic_set(&pm_abort_suspend, 0); 878 872 } 879 873 880 874 void pm_system_irq_wakeup(unsigned int irq_number)
+3
drivers/block/loop.c
··· 608 608 */ 609 609 static int loop_flush(struct loop_device *lo) 610 610 { 611 + /* loop not yet configured, no running thread, nothing to flush */ 612 + if (lo->lo_state != Lo_bound) 613 + return 0; 611 614 return loop_switch(lo, NULL); 612 615 } 613 616
+5 -10
drivers/block/nbd.c
··· 937 937 return -ENOSPC; 938 938 } 939 939 940 - /* Reset all properties of an NBD device */ 941 - static void nbd_reset(struct nbd_device *nbd) 942 - { 943 - nbd->config = NULL; 944 - nbd->tag_set.timeout = 0; 945 - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); 946 - } 947 - 948 940 static void nbd_bdev_reset(struct block_device *bdev) 949 941 { 950 942 if (bdev->bd_openers > 1) ··· 1021 1029 } 1022 1030 kfree(config->socks); 1023 1031 } 1024 - nbd_reset(nbd); 1032 + kfree(nbd->config); 1033 + nbd->config = NULL; 1034 + 1035 + nbd->tag_set.timeout = 0; 1036 + queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); 1025 1037 1026 1038 mutex_unlock(&nbd->config_lock); 1027 1039 nbd_put(nbd); ··· 1479 1483 disk->fops = &nbd_fops; 1480 1484 disk->private_data = nbd; 1481 1485 sprintf(disk->disk_name, "nbd%d", index); 1482 - nbd_reset(nbd); 1483 1486 add_disk(disk); 1484 1487 nbd_total_devices++; 1485 1488 return index;
+2
drivers/block/rbd.c
··· 4023 4023 4024 4024 switch (req_op(rq)) { 4025 4025 case REQ_OP_DISCARD: 4026 + case REQ_OP_WRITE_ZEROES: 4026 4027 op_type = OBJ_OP_DISCARD; 4027 4028 break; 4028 4029 case REQ_OP_WRITE: ··· 4421 4420 q->limits.discard_granularity = segment_size; 4422 4421 q->limits.discard_alignment = segment_size; 4423 4422 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); 4423 + blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE); 4424 4424 4425 4425 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) 4426 4426 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
+12 -14
drivers/block/xen-blkback/blkback.c
··· 609 609 unsigned long timeout; 610 610 int ret; 611 611 612 - xen_blkif_get(blkif); 613 - 614 612 set_freezable(); 615 613 while (!kthread_should_stop()) { 616 614 if (try_to_freeze()) ··· 663 665 print_stats(ring); 664 666 665 667 ring->xenblkd = NULL; 666 - xen_blkif_put(blkif); 667 668 668 669 return 0; 669 670 } ··· 1433 1436 static void make_response(struct xen_blkif_ring *ring, u64 id, 1434 1437 unsigned short op, int st) 1435 1438 { 1436 - struct blkif_response resp; 1439 + struct blkif_response *resp; 1437 1440 unsigned long flags; 1438 1441 union blkif_back_rings *blk_rings; 1439 1442 int notify; 1440 - 1441 - resp.id = id; 1442 - resp.operation = op; 1443 - resp.status = st; 1444 1443 1445 1444 spin_lock_irqsave(&ring->blk_ring_lock, flags); 1446 1445 blk_rings = &ring->blk_rings; 1447 1446 /* Place on the response ring for the relevant domain. */ 1448 1447 switch (ring->blkif->blk_protocol) { 1449 1448 case BLKIF_PROTOCOL_NATIVE: 1450 - memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), 1451 - &resp, sizeof(resp)); 1449 + resp = RING_GET_RESPONSE(&blk_rings->native, 1450 + blk_rings->native.rsp_prod_pvt); 1452 1451 break; 1453 1452 case BLKIF_PROTOCOL_X86_32: 1454 - memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), 1455 - &resp, sizeof(resp)); 1453 + resp = RING_GET_RESPONSE(&blk_rings->x86_32, 1454 + blk_rings->x86_32.rsp_prod_pvt); 1456 1455 break; 1457 1456 case BLKIF_PROTOCOL_X86_64: 1458 - memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), 1459 - &resp, sizeof(resp)); 1457 + resp = RING_GET_RESPONSE(&blk_rings->x86_64, 1458 + blk_rings->x86_64.rsp_prod_pvt); 1460 1459 break; 1461 1460 default: 1462 1461 BUG(); 1463 1462 } 1463 + 1464 + resp->id = id; 1465 + resp->operation = op; 1466 + resp->status = st; 1467 + 1464 1468 blk_rings->common.rsp_prod_pvt++; 1465 1469 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); 1466 1470 spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
+6 -20
drivers/block/xen-blkback/common.h
··· 75 75 struct blkif_common_request { 76 76 char dummy; 77 77 }; 78 - struct blkif_common_response { 79 - char dummy; 80 - }; 78 + 79 + /* i386 protocol version */ 81 80 82 81 struct blkif_x86_32_request_rw { 83 82 uint8_t nr_segments; /* number of segments */ ··· 128 129 } u; 129 130 } __attribute__((__packed__)); 130 131 131 - /* i386 protocol version */ 132 - #pragma pack(push, 4) 133 - struct blkif_x86_32_response { 134 - uint64_t id; /* copied from request */ 135 - uint8_t operation; /* copied from request */ 136 - int16_t status; /* BLKIF_RSP_??? */ 137 - }; 138 - #pragma pack(pop) 139 132 /* x86_64 protocol version */ 140 133 141 134 struct blkif_x86_64_request_rw { ··· 184 193 } u; 185 194 } __attribute__((__packed__)); 186 195 187 - struct blkif_x86_64_response { 188 - uint64_t __attribute__((__aligned__(8))) id; 189 - uint8_t operation; /* copied from request */ 190 - int16_t status; /* BLKIF_RSP_??? */ 191 - }; 192 - 193 196 DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, 194 - struct blkif_common_response); 197 + struct blkif_response); 195 198 DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, 196 - struct blkif_x86_32_response); 199 + struct blkif_response __packed); 197 200 DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, 198 - struct blkif_x86_64_response); 201 + struct blkif_response); 199 202 200 203 union blkif_back_rings { 201 204 struct blkif_back_ring native; ··· 266 281 267 282 wait_queue_head_t wq; 268 283 atomic_t inflight; 284 + bool active; 269 285 /* One thread per blkif ring. */ 270 286 struct task_struct *xenblkd; 271 287 unsigned int waiting_reqs;
+8 -7
drivers/block/xen-blkback/xenbus.c
··· 159 159 init_waitqueue_head(&ring->shutdown_wq); 160 160 ring->blkif = blkif; 161 161 ring->st_print = jiffies; 162 - xen_blkif_get(blkif); 162 + ring->active = true; 163 163 } 164 164 165 165 return 0; ··· 249 249 struct xen_blkif_ring *ring = &blkif->rings[r]; 250 250 unsigned int i = 0; 251 251 252 + if (!ring->active) 253 + continue; 254 + 252 255 if (ring->xenblkd) { 253 256 kthread_stop(ring->xenblkd); 254 257 wake_up(&ring->shutdown_wq); 255 - ring->xenblkd = NULL; 256 258 } 257 259 258 260 /* The above kthread_stop() guarantees that at this point we ··· 298 296 BUG_ON(ring->free_pages_num != 0); 299 297 BUG_ON(ring->persistent_gnt_c != 0); 300 298 WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); 301 - xen_blkif_put(blkif); 299 + ring->active = false; 302 300 } 303 301 blkif->nr_ring_pages = 0; 304 302 /* ··· 314 312 315 313 static void xen_blkif_free(struct xen_blkif *blkif) 316 314 { 317 - 318 - xen_blkif_disconnect(blkif); 315 + WARN_ON(xen_blkif_disconnect(blkif)); 319 316 xen_vbd_free(&blkif->vbd); 317 + kfree(blkif->be->mode); 318 + kfree(blkif->be); 320 319 321 320 /* Make sure everything is drained before shutting down */ 322 321 kmem_cache_free(xen_blkif_cachep, blkif); ··· 514 511 xen_blkif_put(be->blkif); 515 512 } 516 513 517 - kfree(be->mode); 518 - kfree(be); 519 514 return 0; 520 515 } 521 516
+1 -1
drivers/char/mem.c
··· 343 343 phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; 344 344 345 345 /* It's illegal to wrap around the end of the physical address space. */ 346 - if (offset + (phys_addr_t)size < offset) 346 + if (offset + (phys_addr_t)size - 1 < offset) 347 347 return -EINVAL; 348 348 349 349 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
+3 -3
drivers/char/pcmcia/cm4040_cs.c
··· 374 374 375 375 rc = write_sync_reg(SCR_HOST_TO_READER_START, dev); 376 376 if (rc <= 0) { 377 - DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc); 377 + DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc); 378 378 DEBUGP(2, dev, "<- cm4040_write (failed)\n"); 379 379 if (rc == -ERESTARTSYS) 380 380 return rc; ··· 387 387 for (i = 0; i < bytes_to_write; i++) { 388 388 rc = wait_for_bulk_out_ready(dev); 389 389 if (rc <= 0) { 390 - DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2Zx\n", 390 + DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2zx\n", 391 391 rc); 392 392 DEBUGP(2, dev, "<- cm4040_write (failed)\n"); 393 393 if (rc == -ERESTARTSYS) ··· 403 403 rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev); 404 404 405 405 if (rc <= 0) { 406 - DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc); 406 + DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc); 407 407 DEBUGP(2, dev, "<- cm4040_write (failed)\n"); 408 408 if (rc == -ERESTARTSYS) 409 409 return rc;
+46 -5
drivers/char/random.c
··· 1 1 /* 2 2 * random.c -- A strong random number generator 3 3 * 4 + * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All 5 + * Rights Reserved. 6 + * 4 7 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005 5 8 * 6 9 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All ··· 765 762 static struct crng_state **crng_node_pool __read_mostly; 766 763 #endif 767 764 765 + static void invalidate_batched_entropy(void); 766 + 768 767 static void crng_initialize(struct crng_state *crng) 769 768 { 770 769 int i; ··· 803 798 p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp; 804 799 cp++; crng_init_cnt++; len--; 805 800 } 801 + spin_unlock_irqrestore(&primary_crng.lock, flags); 806 802 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { 803 + invalidate_batched_entropy(); 807 804 crng_init = 1; 808 805 wake_up_interruptible(&crng_init_wait); 809 806 pr_notice("random: fast init done\n"); 810 807 } 811 - spin_unlock_irqrestore(&primary_crng.lock, flags); 812 808 return 1; 813 809 } 814 810 ··· 841 835 } 842 836 memzero_explicit(&buf, sizeof(buf)); 843 837 crng->init_time = jiffies; 838 + spin_unlock_irqrestore(&primary_crng.lock, flags); 844 839 if (crng == &primary_crng && crng_init < 2) { 840 + invalidate_batched_entropy(); 845 841 crng_init = 2; 846 842 process_random_ready_list(); 847 843 wake_up_interruptible(&crng_init_wait); 848 844 pr_notice("random: crng init done\n"); 849 845 } 850 - spin_unlock_irqrestore(&primary_crng.lock, flags); 851 846 } 852 847 853 848 static inline void crng_wait_ready(void) ··· 1104 1097 static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) 1105 1098 { 1106 1099 __u32 *ptr = (__u32 *) regs; 1100 + unsigned int idx; 1107 1101 1108 1102 if (regs == NULL) 1109 1103 return 0; 1110 - if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32)) 1111 - f->reg_idx = 0; 1112 - return *(ptr + f->reg_idx++); 1104 + idx = READ_ONCE(f->reg_idx); 1105 + if (idx >= sizeof(struct pt_regs) / sizeof(__u32)) 1106 + idx = 0; 1107 + ptr += idx++; 1108 + WRITE_ONCE(f->reg_idx, idx); 1109 + return *ptr; 1113 1110 } 1114 1111 1115 1112 void add_interrupt_randomness(int irq, int irq_flags) ··· 2030 2019 }; 2031 2020 unsigned int position; 2032 2021 }; 2022 + static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock); 2033 2023 2034 2024 /* 2035 2025 * Get a random word for internal kernel use only. The quality of the random ··· 2041 2029 u64 get_random_u64(void) 2042 2030 { 2043 2031 u64 ret; 2032 + bool use_lock = READ_ONCE(crng_init) < 2; 2033 + unsigned long flags = 0; 2044 2034 struct batched_entropy *batch; 2045 2035 2046 2036 #if BITS_PER_LONG == 64 ··· 2055 2041 #endif 2056 2042 2057 2043 batch = &get_cpu_var(batched_entropy_u64); 2044 + if (use_lock) 2045 + read_lock_irqsave(&batched_entropy_reset_lock, flags); 2058 2046 if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { 2059 2047 extract_crng((u8 *)batch->entropy_u64); 2060 2048 batch->position = 0; 2061 2049 } 2062 2050 ret = batch->entropy_u64[batch->position++]; 2051 + if (use_lock) 2052 + read_unlock_irqrestore(&batched_entropy_reset_lock, flags); 2063 2053 put_cpu_var(batched_entropy_u64); 2064 2054 return ret; 2065 2055 } ··· 2073 2055 u32 get_random_u32(void) 2074 2056 { 2075 2057 u32 ret; 2058 + bool use_lock = READ_ONCE(crng_init) < 2; 2059 + unsigned long flags = 0; 2076 2060 struct batched_entropy *batch; 2077 2061 2078 2062 if (arch_get_random_int(&ret)) 2079 2063 return ret; 2080 2064 2081 2065 batch = &get_cpu_var(batched_entropy_u32); 2066 + if (use_lock) 2067 + read_lock_irqsave(&batched_entropy_reset_lock, flags); 2082 2068 if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { 2083 2069 extract_crng((u8 *)batch->entropy_u32); 2084 2070 batch->position = 0; 2085 2071 } 2086 2072 ret = batch->entropy_u32[batch->position++]; 2073 + if (use_lock) 2074 + read_unlock_irqrestore(&batched_entropy_reset_lock, flags); 2087 2075 put_cpu_var(batched_entropy_u32); 2088 2076 return ret; 2089 2077 } 2090 2078 EXPORT_SYMBOL(get_random_u32); 2079 + 2080 + /* It's important to invalidate all potential batched entropy that might 2081 + * be stored before the crng is initialized, which we can do lazily by 2082 + * simply resetting the counter to zero so that it's re-extracted on the 2083 + * next usage. */ 2084 + static void invalidate_batched_entropy(void) 2085 + { 2086 + int cpu; 2087 + unsigned long flags; 2088 + 2089 + write_lock_irqsave(&batched_entropy_reset_lock, flags); 2090 + for_each_possible_cpu (cpu) { 2091 + per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0; 2092 + per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0; 2093 + } 2094 + write_unlock_irqrestore(&batched_entropy_reset_lock, flags); 2095 + } 2091 2096 2092 2097 /** 2093 2098 * randomize_page - Generate a random, page aligned address
+1
drivers/clk/meson/Kconfig
··· 14 14 config COMMON_CLK_GXBB 15 15 bool 16 16 depends on COMMON_CLK_AMLOGIC 17 + select RESET_CONTROLLER 17 18 help 18 19 Support for the clock controller on AmLogic S905 devices, aka gxbb. 19 20 Say Y if you want peripherals and CPU frequency scaling to work.
+1
drivers/clk/sunxi-ng/Kconfig
··· 156 156 bool "Support for Allwinner SoCs' PRCM CCUs" 157 157 select SUNXI_CCU_DIV 158 158 select SUNXI_CCU_GATE 159 + select SUNXI_CCU_MP 159 160 default MACH_SUN8I || (ARCH_SUNXI && ARM64) 160 161 161 162 endif
+3 -1
drivers/clk/sunxi-ng/ccu-sun50i-a64.h
··· 31 31 #define CLK_PLL_VIDEO0_2X 8 32 32 #define CLK_PLL_VE 9 33 33 #define CLK_PLL_DDR0 10 34 - #define CLK_PLL_PERIPH0 11 34 + 35 + /* PLL_PERIPH0 exported for PRCM */ 36 + 35 37 #define CLK_PLL_PERIPH0_2X 12 36 38 #define CLK_PLL_PERIPH1 13 37 39 #define CLK_PLL_PERIPH1_2X 14
+1 -1
drivers/clk/sunxi-ng/ccu-sun5i.c
··· 243 243 static SUNXI_CCU_GATE(ahb_dma_clk, "ahb-dma", "ahb", 244 244 0x060, BIT(6), 0); 245 245 static SUNXI_CCU_GATE(ahb_bist_clk, "ahb-bist", "ahb", 246 - 0x060, BIT(6), 0); 246 + 0x060, BIT(7), 0); 247 247 static SUNXI_CCU_GATE(ahb_mmc0_clk, "ahb-mmc0", "ahb", 248 248 0x060, BIT(8), 0); 249 249 static SUNXI_CCU_GATE(ahb_mmc1_clk, "ahb-mmc1", "ahb",
+1 -1
drivers/clk/sunxi-ng/ccu-sun6i-a31.c
··· 556 556 0x12c, 0, 4, 24, 3, BIT(31), 557 557 CLK_SET_RATE_PARENT); 558 558 static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents, 559 - 0x12c, 0, 4, 24, 3, BIT(31), 559 + 0x130, 0, 4, 24, 3, BIT(31), 560 560 CLK_SET_RATE_PARENT); 561 561 562 562 static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1",
+3 -1
drivers/clk/sunxi-ng/ccu-sun8i-h3.h
··· 29 29 #define CLK_PLL_VIDEO 6 30 30 #define CLK_PLL_VE 7 31 31 #define CLK_PLL_DDR 8 32 - #define CLK_PLL_PERIPH0 9 32 + 33 + /* PLL_PERIPH0 exported for PRCM */ 34 + 33 35 #define CLK_PLL_PERIPH0_2X 10 34 36 #define CLK_PLL_GPU 11 35 37 #define CLK_PLL_PERIPH1 12
+1 -1
drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
··· 537 537 [RST_BUS_EMAC] = { 0x2c0, BIT(17) }, 538 538 [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) }, 539 539 [RST_BUS_SPI0] = { 0x2c0, BIT(20) }, 540 - [RST_BUS_OTG] = { 0x2c0, BIT(23) }, 540 + [RST_BUS_OTG] = { 0x2c0, BIT(24) }, 541 541 [RST_BUS_EHCI0] = { 0x2c0, BIT(26) }, 542 542 [RST_BUS_OHCI0] = { 0x2c0, BIT(29) }, 543 543
+2 -2
drivers/clocksource/arm_arch_timer.c
··· 1209 1209 return 0; 1210 1210 } 1211 1211 1212 - rate = readl_relaxed(frame + CNTFRQ); 1212 + rate = readl_relaxed(base + CNTFRQ); 1213 1213 1214 - iounmap(frame); 1214 + iounmap(base); 1215 1215 1216 1216 return rate; 1217 1217 }
+1
drivers/clocksource/cadence_ttc_timer.c
··· 18 18 #include <linux/clk.h> 19 19 #include <linux/interrupt.h> 20 20 #include <linux/clockchips.h> 21 + #include <linux/clocksource.h> 21 22 #include <linux/of_address.h> 22 23 #include <linux/of_irq.h> 23 24 #include <linux/slab.h>
+1
drivers/clocksource/timer-sun5i.c
··· 12 12 13 13 #include <linux/clk.h> 14 14 #include <linux/clockchips.h> 15 + #include <linux/clocksource.h> 15 16 #include <linux/delay.h> 16 17 #include <linux/interrupt.h> 17 18 #include <linux/irq.h>
+1
drivers/cpufreq/cpufreq.c
··· 2468 2468 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) && 2469 2469 list_empty(&cpufreq_policy_list)) { 2470 2470 /* if all ->init() calls failed, unregister */ 2471 + ret = -ENODEV; 2471 2472 pr_debug("%s: No CPU initialized for driver %s\n", __func__, 2472 2473 driver_data->name); 2473 2474 goto err_if_unreg;
+2 -2
drivers/cpufreq/cpufreq_conservative.c
··· 185 185 int ret; 186 186 ret = sscanf(buf, "%u", &input); 187 187 188 - /* cannot be lower than 11 otherwise freq will not fall */ 189 - if (ret != 1 || input < 11 || input > 100 || 188 + /* cannot be lower than 1 otherwise freq will not fall */ 189 + if (ret != 1 || input < 1 || input > 100 || 190 190 input >= dbs_data->up_threshold) 191 191 return -EINVAL; 192 192
+3 -2
drivers/cpufreq/intel_pstate.c
··· 571 571 static int min_perf_pct_min(void) 572 572 { 573 573 struct cpudata *cpu = all_cpu_data[0]; 574 + int turbo_pstate = cpu->pstate.turbo_pstate; 574 575 575 - return DIV_ROUND_UP(cpu->pstate.min_pstate * 100, 576 - cpu->pstate.turbo_pstate); 576 + return turbo_pstate ? 577 + DIV_ROUND_UP(cpu->pstate.min_pstate * 100, turbo_pstate) : 0; 577 578 } 578 579 579 580 static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
+16 -3
drivers/cpufreq/kirkwood-cpufreq.c
··· 127 127 return PTR_ERR(priv.cpu_clk); 128 128 } 129 129 130 - clk_prepare_enable(priv.cpu_clk); 130 + err = clk_prepare_enable(priv.cpu_clk); 131 + if (err) { 132 + dev_err(priv.dev, "Unable to prepare cpuclk\n"); 133 + return err; 134 + } 135 + 131 136 kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000; 132 137 133 138 priv.ddr_clk = of_clk_get_by_name(np, "ddrclk"); ··· 142 137 goto out_cpu; 143 138 } 144 139 145 - clk_prepare_enable(priv.ddr_clk); 140 + err = clk_prepare_enable(priv.ddr_clk); 141 + if (err) { 142 + dev_err(priv.dev, "Unable to prepare ddrclk\n"); 143 + goto out_cpu; 144 + } 146 145 kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000; 147 146 148 147 priv.powersave_clk = of_clk_get_by_name(np, "powersave"); ··· 155 146 err = PTR_ERR(priv.powersave_clk); 156 147 goto out_ddr; 157 148 } 158 - clk_prepare_enable(priv.powersave_clk); 149 + err = clk_prepare_enable(priv.powersave_clk); 150 + if (err) { 151 + dev_err(priv.dev, "Unable to prepare powersave clk\n"); 152 + goto out_ddr; 153 + } 159 154 160 155 of_node_put(np); 161 156 np = NULL;
+3 -1
drivers/cpuidle/dt_idle_states.c
··· 180 180 if (!state_node) 181 181 break; 182 182 183 - if (!of_device_is_available(state_node)) 183 + if (!of_device_is_available(state_node)) { 184 + of_node_put(state_node); 184 185 continue; 186 + } 185 187 186 188 if (!idle_state_valid(state_node, i, cpumask)) { 187 189 pr_warn("%s idle state not valid, bailing out\n",
+7 -2
drivers/dax/super.c
··· 210 210 static struct inode *dax_alloc_inode(struct super_block *sb) 211 211 { 212 212 struct dax_device *dax_dev; 213 + struct inode *inode; 213 214 214 215 dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL); 215 - return &dax_dev->inode; 216 + inode = &dax_dev->inode; 217 + inode->i_rdev = 0; 218 + return inode; 216 219 } 217 220 218 221 static struct dax_device *to_dax_dev(struct inode *inode) ··· 230 227 231 228 kfree(dax_dev->host); 232 229 dax_dev->host = NULL; 233 - ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev)); 230 + if (inode->i_rdev) 231 + ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev)); 234 232 kmem_cache_free(dax_cache, dax_dev); 235 233 } 236 234 ··· 427 423 struct dax_device *dax_dev = _dax_dev; 428 424 struct inode *inode = &dax_dev->inode; 429 425 426 + memset(dax_dev, 0, sizeof(*dax_dev)); 430 427 inode_init_once(inode); 431 428 } 432 429
+5 -1
drivers/devfreq/event/exynos-nocp.c
··· 267 267 } 268 268 platform_set_drvdata(pdev, nocp); 269 269 270 - clk_prepare_enable(nocp->clk); 270 + ret = clk_prepare_enable(nocp->clk); 271 + if (ret) { 272 + dev_err(&pdev->dev, "failed to prepare ppmu clock\n"); 273 + return ret; 274 + } 271 275 272 276 pr_info("exynos-nocp: new NoC Probe device registered: %s\n", 273 277 dev_name(dev));
+6 -2
drivers/devfreq/event/exynos-ppmu.c
··· 44 44 { "ppmu-event2-"#name, PPMU_PMNCNT2 }, \ 45 45 { "ppmu-event3-"#name, PPMU_PMNCNT3 } 46 46 47 - struct __exynos_ppmu_events { 47 + static struct __exynos_ppmu_events { 48 48 char *name; 49 49 int id; 50 50 } ppmu_events[] = { ··· 648 648 dev_name(&pdev->dev), desc[i].name); 649 649 } 650 650 651 - clk_prepare_enable(info->ppmu.clk); 651 + ret = clk_prepare_enable(info->ppmu.clk); 652 + if (ret) { 653 + dev_err(&pdev->dev, "failed to prepare ppmu clock\n"); 654 + return ret; 655 + } 652 656 653 657 return 0; 654 658 }
+35 -4
drivers/dma/ep93xx_dma.c
··· 201 201 struct dma_device dma_dev; 202 202 bool m2m; 203 203 int (*hw_setup)(struct ep93xx_dma_chan *); 204 + void (*hw_synchronize)(struct ep93xx_dma_chan *); 204 205 void (*hw_shutdown)(struct ep93xx_dma_chan *); 205 206 void (*hw_submit)(struct ep93xx_dma_chan *); 206 207 int (*hw_interrupt)(struct ep93xx_dma_chan *); ··· 324 323 | M2P_CONTROL_ENABLE; 325 324 m2p_set_control(edmac, control); 326 325 326 + edmac->buffer = 0; 327 + 327 328 return 0; 328 329 } 329 330 ··· 334 331 return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3; 335 332 } 336 333 337 - static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) 334 + static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac) 338 335 { 336 + unsigned long flags; 339 337 u32 control; 340 338 339 + spin_lock_irqsave(&edmac->lock, flags); 341 340 control = readl(edmac->regs + M2P_CONTROL); 342 341 control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); 343 342 m2p_set_control(edmac, control); 343 + spin_unlock_irqrestore(&edmac->lock, flags); 344 344 345 345 while (m2p_channel_state(edmac) >= M2P_STATE_ON) 346 - cpu_relax(); 346 + schedule(); 347 + } 347 348 349 + static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) 350 + { 348 351 m2p_set_control(edmac, 0); 349 352 350 - while (m2p_channel_state(edmac) == M2P_STATE_STALL) 351 - cpu_relax(); 353 + while (m2p_channel_state(edmac) != M2P_STATE_IDLE) 354 + dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n"); 352 355 } 353 356 354 357 static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) ··· 1170 1161 } 1171 1162 1172 1163 /** 1164 + * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the 1165 + * current context. 1166 + * @chan: channel 1167 + * 1168 + * Synchronizes the DMA channel termination to the current context. When this 1169 + * function returns it is guaranteed that all transfers for previously issued 1170 + * descriptors have stopped and and it is safe to free the memory associated 1171 + * with them. Furthermore it is guaranteed that all complete callback functions 1172 + * for a previously submitted descriptor have finished running and it is safe to 1173 + * free resources accessed from within the complete callbacks. 1174 + */ 1175 + static void ep93xx_dma_synchronize(struct dma_chan *chan) 1176 + { 1177 + struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 1178 + 1179 + if (edmac->edma->hw_synchronize) 1180 + edmac->edma->hw_synchronize(edmac); 1181 + } 1182 + 1183 + /** 1173 1184 * ep93xx_dma_terminate_all - terminate all transactions 1174 1185 * @chan: channel 1175 1186 * ··· 1352 1323 dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; 1353 1324 dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; 1354 1325 dma_dev->device_config = ep93xx_dma_slave_config; 1326 + dma_dev->device_synchronize = ep93xx_dma_synchronize; 1355 1327 dma_dev->device_terminate_all = ep93xx_dma_terminate_all; 1356 1328 dma_dev->device_issue_pending = ep93xx_dma_issue_pending; 1357 1329 dma_dev->device_tx_status = ep93xx_dma_tx_status; ··· 1370 1340 } else { 1371 1341 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); 1372 1342 1343 + edma->hw_synchronize = m2p_hw_synchronize; 1373 1344 edma->hw_setup = m2p_hw_setup; 1374 1345 edma->hw_shutdown = m2p_hw_shutdown; 1375 1346 edma->hw_submit = m2p_hw_submit;
+44 -65
drivers/dma/mv_xor_v2.c
··· 161 161 struct mv_xor_v2_sw_desc *sw_desq; 162 162 int desc_size; 163 163 unsigned int npendings; 164 + unsigned int hw_queue_idx; 164 165 }; 165 166 166 167 /** ··· 215 214 } 216 215 217 216 /* 218 - * Return the next available index in the DESQ. 219 - */ 220 - static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev) 221 - { 222 - /* read the index for the next available descriptor in the DESQ */ 223 - u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF); 224 - 225 - return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT) 226 - & MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK); 227 - } 228 - 229 - /* 230 217 * notify the engine of new descriptors, and update the available index. 231 218 */ 232 219 static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev, ··· 246 257 return MV_XOR_V2_EXT_DESC_SIZE; 247 258 } 248 259 249 - /* 250 - * Set the IMSG threshold 251 - */ 252 - static inline 253 - void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val) 254 - { 255 - u32 reg; 256 - 257 - reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); 258 - 259 - reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); 260 - reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); 261 - 262 - writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); 263 - } 264 - 265 260 static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) 266 261 { 267 262 struct mv_xor_v2_device *xor_dev = data; ··· 261 288 if (!ndescs) 262 289 return IRQ_NONE; 263 290 264 - /* 265 - * Update IMSG threshold, to disable new IMSG interrupts until 266 - * end of the tasklet 267 - */ 268 - mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM); 269 - 270 291 /* schedule a tasklet to handle descriptors callbacks */ 271 292 tasklet_schedule(&xor_dev->irq_tasklet); 272 293 ··· 273 306 static dma_cookie_t 274 307 mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx) 275 308 { 276 - int desq_ptr; 277 309 void *dest_hw_desc; 278 310 dma_cookie_t cookie; 279 311 struct mv_xor_v2_sw_desc *sw_desc = ··· 288 322 spin_lock_bh(&xor_dev->lock); 289 323 cookie = dma_cookie_assign(tx); 290 324 291 - /* get the next available slot in the DESQ */ 292 - desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev); 293 - 294 325 /* copy the HW descriptor from the SW descriptor to the DESQ */ 295 - dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr; 326 + dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx; 296 327 297 328 memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size); 298 329 299 330 xor_dev->npendings++; 331 + xor_dev->hw_queue_idx++; 332 + if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM) 333 + xor_dev->hw_queue_idx = 0; 300 334 301 335 spin_unlock_bh(&xor_dev->lock); 302 336 ··· 310 344 mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev) 311 345 { 312 346 struct mv_xor_v2_sw_desc *sw_desc; 347 + bool found = false; 313 348 314 349 /* Lock the channel */ 315 350 spin_lock_bh(&xor_dev->lock); ··· 322 355 return NULL; 323 356 } 324 357 325 - /* get a free SW descriptor from the SW DESQ */ 326 - sw_desc = list_first_entry(&xor_dev->free_sw_desc, 327 - struct mv_xor_v2_sw_desc, free_list); 358 + list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) { 359 + if (async_tx_test_ack(&sw_desc->async_tx)) { 360 + found = true; 361 + break; 362 + } 363 + } 364 + 365 + if (!found) { 366 + spin_unlock_bh(&xor_dev->lock); 367 + return NULL; 368 + } 369 + 328 370 list_del(&sw_desc->free_list); 329 371 330 372 /* Release the channel */ 331 373 spin_unlock_bh(&xor_dev->lock); 332 - 333 - /* set the async tx descriptor */ 334 - dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan); 335 - sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit; 336 - async_tx_ack(&sw_desc->async_tx); 337 374 338 375 return sw_desc; 339 376 } ··· 360 389 __func__, len, &src, &dest, flags); 361 390 362 391 sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); 392 + if (!sw_desc) 393 + return NULL; 363 394 364 395 sw_desc->async_tx.flags = flags; 365 396 ··· 416 443 __func__, src_cnt, len, &dest, flags); 417 444 418 445 sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); 446 + if (!sw_desc) 447 + return NULL; 419 448 420 449 sw_desc->async_tx.flags = flags; 421 450 ··· 466 491 container_of(chan, struct mv_xor_v2_device, dmachan); 467 492 468 493 sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); 494 + if (!sw_desc) 495 + return NULL; 469 496 470 497 /* set the HW descriptor */ 471 498 hw_descriptor = &sw_desc->hw_desc; ··· 531 554 { 532 555 struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data; 533 556 int pending_ptr, num_of_pending, i; 534 - struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL; 535 557 struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL; 536 558 537 559 dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__); ··· 538 562 /* get the pending descriptors parameters */ 539 563 num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr); 540 564 541 - /* next HW descriptor */ 542 - next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr; 543 - 544 565 /* loop over free descriptors */ 545 566 for (i = 0; i < num_of_pending; i++) { 546 - 547 - if (pending_ptr > MV_XOR_V2_DESC_NUM) 548 - pending_ptr = 0; 549 - 550 - if (next_pending_sw_desc != NULL) 551 - next_pending_hw_desc++; 567 + struct mv_xor_v2_descriptor *next_pending_hw_desc = 568 + xor_dev->hw_desq_virt + pending_ptr; 552 569 553 570 /* get the SW descriptor related to the HW descriptor */ 554 571 next_pending_sw_desc = ··· 577 608 578 609 /* increment the next descriptor */ 579 610 pending_ptr++; 611 + if (pending_ptr >= MV_XOR_V2_DESC_NUM) 612 + pending_ptr = 0; 580 613 } 581 614 582 615 if (num_of_pending != 0) { 583 616 /* free the descriptores */ 584 617 mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending); 585 618 } 586 - 587 - /* Update IMSG threshold, to enable new IMSG interrupts */ 588 - mv_xor_v2_set_imsg_thrd(xor_dev, 0); 589 619 } 590 620 591 621 /* ··· 615 647 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF); 616 648 writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32, 617 649 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF); 618 - 619 - /* enable the DMA engine */ 620 - writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); 621 650 622 651 /* 623 652 * This is a temporary solution, until we activate the ··· 659 694 reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL; 660 695 writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE); 661 696 697 + /* enable the DMA engine */ 698 + writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); 699 + 662 700 return 0; 663 701 } 664 702 ··· 692 724 return PTR_ERR(xor_dev->glob_base); 693 725 694 726 platform_set_drvdata(pdev, xor_dev); 727 + 728 + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); 729 + if (ret) 730 + return ret; 695 731 696 732 xor_dev->clk = devm_clk_get(&pdev->dev, NULL); 697 733 if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) ··· 757 785 758 786 /* add all SW descriptors to the free list */ 759 787 for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) { 760 - xor_dev->sw_desq[i].idx = i; 761 - list_add(&xor_dev->sw_desq[i].free_list, 788 + struct mv_xor_v2_sw_desc *sw_desc = 789 + xor_dev->sw_desq + i; 790 + sw_desc->idx = i; 791 + dma_async_tx_descriptor_init(&sw_desc->async_tx, 792 + &xor_dev->dmachan); 793 + sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit; 794 + async_tx_ack(&sw_desc->async_tx); 795 + 796 + list_add(&sw_desc->free_list, 762 797 &xor_dev->free_sw_desc); 763 798 } 764 799
+2 -1
drivers/dma/pl330.c
··· 3008 3008 3009 3009 for (i = 0; i < AMBA_NR_IRQS; i++) { 3010 3010 irq = adev->irq[i]; 3011 - devm_free_irq(&adev->dev, irq, pl330); 3011 + if (irq) 3012 + devm_free_irq(&adev->dev, irq, pl330); 3012 3013 } 3013 3014 3014 3015 dma_async_device_unregister(&pl330->ddma);
+3
drivers/dma/sh/rcar-dmac.c
··· 1287 1287 if (desc->hwdescs.use) { 1288 1288 dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & 1289 1289 RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT; 1290 + if (dptr == 0) 1291 + dptr = desc->nchunks; 1292 + dptr--; 1290 1293 WARN_ON(dptr >= desc->nchunks); 1291 1294 } else { 1292 1295 running = desc->running;
+1 -1
drivers/dma/sh/usb-dmac.c
··· 117 117 #define USB_DMASWR 0x0008 118 118 #define USB_DMASWR_SWR (1 << 0) 119 119 #define USB_DMAOR 0x0060 120 - #define USB_DMAOR_AE (1 << 2) 120 + #define USB_DMAOR_AE (1 << 1) 121 121 #define USB_DMAOR_DME (1 << 0) 122 122 123 123 #define USB_DMASAR 0x0000
+2
drivers/firmware/dmi-id.c
··· 47 47 DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION); 48 48 DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL); 49 49 DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID); 50 + DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0444, DMI_PRODUCT_FAMILY); 50 51 DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR); 51 52 DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME); 52 53 DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION); ··· 192 191 ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION); 193 192 ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL); 194 193 ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID); 194 + ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY); 195 195 ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR); 196 196 ADD_DMI_ATTR(board_name, DMI_BOARD_NAME); 197 197 ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION);
+38 -12
drivers/firmware/dmi_scan.c
··· 144 144 145 145 buf = dmi_early_remap(dmi_base, orig_dmi_len); 146 146 if (buf == NULL) 147 - return -1; 147 + return -ENOMEM; 148 148 149 149 dmi_decode_table(buf, decode, NULL); 150 150 ··· 178 178 const char *d = (const char *) dm; 179 179 const char *p; 180 180 181 - if (dmi_ident[slot]) 181 + if (dmi_ident[slot] || dm->length <= string) 182 182 return; 183 183 184 184 p = dmi_string(dm, d[string]); ··· 191 191 static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, 192 192 int index) 193 193 { 194 - const u8 *d = (u8 *) dm + index; 194 + const u8 *d; 195 195 char *s; 196 196 int is_ff = 1, is_00 = 1, i; 197 197 198 - if (dmi_ident[slot]) 198 + if (dmi_ident[slot] || dm->length <= index + 16) 199 199 return; 200 200 201 + d = (u8 *) dm + index; 201 202 for (i = 0; i < 16 && (is_ff || is_00); i++) { 202 203 if (d[i] != 0x00) 203 204 is_00 = 0; ··· 229 228 static void __init dmi_save_type(const struct dmi_header *dm, int slot, 230 229 int index) 231 230 { 232 - const u8 *d = (u8 *) dm + index; 231 + const u8 *d; 233 232 char *s; 234 233 235 - if (dmi_ident[slot]) 234 + if (dmi_ident[slot] || dm->length <= index) 236 235 return; 237 236 238 237 s = dmi_alloc(4); 239 238 if (!s) 240 239 return; 241 240 241 + d = (u8 *) dm + index; 242 242 sprintf(s, "%u", *d & 0x7F); 243 243 dmi_ident[slot] = s; 244 244 } ··· 280 278 281 279 static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm) 282 280 { 283 - int i, count = *(u8 *)(dm + 1); 281 + int i, count; 284 282 struct dmi_device *dev; 285 283 284 + if (dm->length < 0x05) 285 + return; 286 + 287 + count = *(u8 *)(dm + 1); 286 288 for (i = 1; i <= count; i++) { 287 289 const char *devname = dmi_string(dm, i); 288 290 ··· 359 353 const char *name; 360 354 const u8 *d = (u8 *)dm; 361 355 356 + if (dm->length < 0x0B) 357 + return; 358 + 362 359 /* Skip disabled device */ 363 360 if ((d[0x5] & 0x80) == 0) 364 361 return; ··· 396 387 const char *d = (const char *)dm; 397 388 static int nr; 398 389 399 - if (dm->type != DMI_ENTRY_MEM_DEVICE) 390 + if (dm->type != DMI_ENTRY_MEM_DEVICE || dm->length < 0x12) 400 391 return; 401 392 if (nr >= dmi_memdev_nr) { 402 393 pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n"); ··· 439 430 dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6); 440 431 dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7); 441 432 dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8); 433 + dmi_save_ident(dm, DMI_PRODUCT_FAMILY, 26); 442 434 break; 443 435 case 2: /* Base Board Information */ 444 436 dmi_save_ident(dm, DMI_BOARD_VENDOR, 4); ··· 659 649 goto error; 660 650 661 651 /* 652 + * Same logic as above, look for a 64-bit entry point 653 + * first, and if not found, fall back to 32-bit entry point. 654 + */ 655 + memcpy_fromio(buf, p, 16); 656 + for (q = p + 16; q < p + 0x10000; q += 16) { 657 + memcpy_fromio(buf + 16, q, 16); 658 + if (!dmi_smbios3_present(buf)) { 659 + dmi_available = 1; 660 + dmi_early_unmap(p, 0x10000); 661 + goto out; 662 + } 663 + memcpy(buf, buf + 16, 16); 664 + } 665 + 666 + /* 662 667 * Iterate over all possible DMI header addresses q. 663 668 * Maintain the 32 bytes around q in buf. On the 664 669 * first iteration, substitute zero for the ··· 683 658 memset(buf, 0, 16); 684 659 for (q = p; q < p + 0x10000; q += 16) { 685 660 memcpy_fromio(buf + 16, q, 16); 686 - if (!dmi_smbios3_present(buf) || !dmi_present(buf)) { 661 + if (!dmi_present(buf)) { 687 662 dmi_available = 1; 688 663 dmi_early_unmap(p, 0x10000); 689 664 goto out; ··· 1017 992 * @decode: Callback function 1018 993 * @private_data: Private data to be passed to the callback function 1019 994 * 1020 - * Returns -1 when the DMI table can't be reached, 0 on success. 995 + * Returns 0 on success, -ENXIO if DMI is not selected or not present, 996 + * or a different negative error code if DMI walking fails. 1021 997 */ 1022 998 int dmi_walk(void (*decode)(const struct dmi_header *, void *), 1023 999 void *private_data) ··· 1026 1000 u8 *buf; 1027 1001 1028 1002 if (!dmi_available) 1029 - return -1; 1003 + return -ENXIO; 1030 1004 1031 1005 buf = dmi_remap(dmi_base, dmi_len); 1032 1006 if (buf == NULL) 1033 - return -1; 1007 + return -ENOMEM; 1034 1008 1035 1009 dmi_decode_table(buf, decode, private_data); 1036 1010
+27
drivers/firmware/efi/efi-bgrt.c
··· 27 27 u32 size; 28 28 } __packed; 29 29 30 + static bool efi_bgrt_addr_valid(u64 addr) 31 + { 32 + efi_memory_desc_t *md; 33 + 34 + for_each_efi_memory_desc(md) { 35 + u64 size; 36 + u64 end; 37 + 38 + if (md->type != EFI_BOOT_SERVICES_DATA) 39 + continue; 40 + 41 + size = md->num_pages << EFI_PAGE_SHIFT; 42 + end = md->phys_addr + size; 43 + if (addr >= md->phys_addr && addr < end) 44 + return true; 45 + } 46 + 47 + return false; 48 + } 49 + 30 50 void __init efi_bgrt_init(struct acpi_table_header *table) 31 51 { 32 52 void *image; ··· 54 34 struct acpi_table_bgrt *bgrt = &bgrt_tab; 55 35 56 36 if (acpi_disabled) 37 + return; 38 + 39 + if (!efi_enabled(EFI_MEMMAP)) 57 40 return; 58 41 59 42 if (table->length < sizeof(bgrt_tab)) { ··· 85 62 goto out; 86 63 } 87 64 65 + if (!efi_bgrt_addr_valid(bgrt->image_address)) { 66 + pr_notice("Ignoring BGRT: invalid image address\n"); 67 + goto out; 68 + } 88 69 image = early_memremap(bgrt->image_address, sizeof(bmp_header)); 89 70 if (!image) { 90 71 pr_notice("Ignoring BGRT: failed to map image header memory\n");
+2 -2
drivers/firmware/efi/libstub/secureboot.c
··· 16 16 17 17 /* BIOS variables */ 18 18 static const efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID; 19 - static const efi_char16_t const efi_SecureBoot_name[] = { 19 + static const efi_char16_t efi_SecureBoot_name[] = { 20 20 'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0 21 21 }; 22 - static const efi_char16_t const efi_SetupMode_name[] = { 22 + static const efi_char16_t efi_SetupMode_name[] = { 23 23 'S', 'e', 't', 'u', 'p', 'M', 'o', 'd', 'e', 0 24 24 }; 25 25
+4 -4
drivers/firmware/google/vpd.c
··· 136 136 info->value = value; 137 137 138 138 INIT_LIST_HEAD(&info->list); 139 - list_add_tail(&info->list, &sec->attribs); 140 139 141 140 ret = sysfs_create_bin_file(sec->kobj, &info->bin_attr); 142 141 if (ret) 143 142 goto free_info_key; 144 143 144 + list_add_tail(&info->list, &sec->attribs); 145 145 return 0; 146 146 147 147 free_info_key: ··· 158 158 struct vpd_attrib_info *temp; 159 159 160 160 list_for_each_entry_safe(info, temp, &sec->attribs, list) { 161 - kfree(info->key); 162 161 sysfs_remove_bin_file(sec->kobj, &info->bin_attr); 162 + kfree(info->key); 163 163 kfree(info); 164 164 } 165 165 } ··· 244 244 { 245 245 if (sec->enabled) { 246 246 vpd_section_attrib_destroy(sec); 247 - kobject_del(sec->kobj); 247 + kobject_put(sec->kobj); 248 248 sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr); 249 249 kfree(sec->raw_name); 250 250 iounmap(sec->baseaddr); ··· 331 331 { 332 332 vpd_section_destroy(&ro_vpd); 333 333 vpd_section_destroy(&rw_vpd); 334 - kobject_del(vpd_kobj); 334 + kobject_put(vpd_kobj); 335 335 } 336 336 337 337 module_init(vpd_platform_init);
+3
drivers/gpio/gpio-aspeed.c
··· 646 646 int rc; 647 647 int i; 648 648 649 + if (!gpio->clk) 650 + return -EINVAL; 651 + 649 652 rc = usecs_to_cycles(gpio, usecs, &requested_cycles); 650 653 if (rc < 0) { 651 654 dev_warn(chip->parent, "Failed to convert %luus to cycles at %luHz: %d\n",
+36 -18
drivers/gpio/gpio-crystalcove.c
··· 90 90 { 91 91 int reg; 92 92 93 - if (gpio == 94) 94 - return GPIOPANELCTL; 93 + if (gpio >= CRYSTALCOVE_GPIO_NUM) { 94 + /* 95 + * Virtual GPIO called from ACPI, for now we only support 96 + * the panel ctl. 97 + */ 98 + switch (gpio) { 99 + case 0x5e: 100 + return GPIOPANELCTL; 101 + default: 102 + return -EOPNOTSUPP; 103 + } 104 + } 95 105 96 106 if (reg_type == CTRL_IN) { 97 107 if (gpio < 8) ··· 140 130 static int crystalcove_gpio_dir_in(struct gpio_chip *chip, unsigned gpio) 141 131 { 142 132 struct crystalcove_gpio *cg = gpiochip_get_data(chip); 133 + int reg = to_reg(gpio, CTRL_OUT); 143 134 144 - if (gpio > CRYSTALCOVE_VGPIO_NUM) 135 + if (reg < 0) 145 136 return 0; 146 137 147 - return regmap_write(cg->regmap, to_reg(gpio, CTRL_OUT), 148 - CTLO_INPUT_SET); 138 + return regmap_write(cg->regmap, reg, CTLO_INPUT_SET); 149 139 } 150 140 151 141 static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned gpio, 152 142 int value) 153 143 { 154 144 struct crystalcove_gpio *cg = gpiochip_get_data(chip); 145 + int reg = to_reg(gpio, CTRL_OUT); 155 146 156 - if (gpio > CRYSTALCOVE_VGPIO_NUM) 147 + if (reg < 0) 157 148 return 0; 158 149 159 - return regmap_write(cg->regmap, to_reg(gpio, CTRL_OUT), 160 - CTLO_OUTPUT_SET | value); 150 + return regmap_write(cg->regmap, reg, CTLO_OUTPUT_SET | value); 161 151 } 162 152 163 153 static int crystalcove_gpio_get(struct gpio_chip *chip, unsigned gpio) 164 154 { 165 155 struct crystalcove_gpio *cg = gpiochip_get_data(chip); 166 - int ret; 167 156 unsigned int val; 157 + int ret, reg = to_reg(gpio, CTRL_IN); 168 158 169 - if (gpio > CRYSTALCOVE_VGPIO_NUM) 159 + if (reg < 0) 170 160 return 0; 171 161 172 - ret = regmap_read(cg->regmap, to_reg(gpio, CTRL_IN), &val); 162 + ret = regmap_read(cg->regmap, reg, &val); 173 163 if (ret) 174 164 return ret; 175 165 ··· 180 170 unsigned gpio, int value) 181 171 { 182 172 struct crystalcove_gpio *cg = gpiochip_get_data(chip); 173 + int reg = to_reg(gpio, CTRL_OUT); 183 174 184 - if (gpio > CRYSTALCOVE_VGPIO_NUM) 175 + if (reg < 0) 185 176 return; 186 177 187 178 if (value) 188 - regmap_update_bits(cg->regmap, to_reg(gpio, CTRL_OUT), 1, 1); 179 + regmap_update_bits(cg->regmap, reg, 1, 1); 189 180 else 190 - regmap_update_bits(cg->regmap, to_reg(gpio, CTRL_OUT), 1, 0); 181 + regmap_update_bits(cg->regmap, reg, 1, 0); 191 182 } 192 183 193 184 static int crystalcove_irq_type(struct irq_data *data, unsigned type) 194 185 { 195 186 struct crystalcove_gpio *cg = 196 187 gpiochip_get_data(irq_data_get_irq_chip_data(data)); 188 + 189 + if (data->hwirq >= CRYSTALCOVE_GPIO_NUM) 190 + return 0; 197 191 198 192 switch (type) { 199 193 case IRQ_TYPE_NONE: ··· 249 235 struct crystalcove_gpio *cg = 250 236 gpiochip_get_data(irq_data_get_irq_chip_data(data)); 251 237 252 - cg->set_irq_mask = false; 253 - cg->update |= UPDATE_IRQ_MASK; 238 + if (data->hwirq < CRYSTALCOVE_GPIO_NUM) { 239 + cg->set_irq_mask = false; 240 + cg->update |= UPDATE_IRQ_MASK; 241 + } 254 242 } 255 243 256 244 static void crystalcove_irq_mask(struct irq_data *data) ··· 260 244 struct crystalcove_gpio *cg = 261 245 gpiochip_get_data(irq_data_get_irq_chip_data(data)); 262 246 263 - cg->set_irq_mask = true; 264 - cg->update |= UPDATE_IRQ_MASK; 247 + if (data->hwirq < CRYSTALCOVE_GPIO_NUM) { 248 + cg->set_irq_mask = true; 249 + cg->update |= UPDATE_IRQ_MASK; 250 + } 265 251 } 266 252 267 253 static struct irq_chip crystalcove_irqchip = {
+11 -4
drivers/gpio/gpio-mvebu.c
··· 721 721 u32 set; 722 722 723 723 if (!of_device_is_compatible(mvchip->chip.of_node, 724 - "marvell,armada-370-xp-gpio")) 724 + "marvell,armada-370-gpio")) 725 725 return 0; 726 726 727 727 if (IS_ERR(mvchip->clk)) ··· 747 747 set = U32_MAX; 748 748 else 749 749 return -EINVAL; 750 - writel_relaxed(0, mvebu_gpioreg_blink_counter_select(mvchip)); 750 + writel_relaxed(set, mvebu_gpioreg_blink_counter_select(mvchip)); 751 751 752 752 mvpwm = devm_kzalloc(dev, sizeof(struct mvebu_pwm), GFP_KERNEL); 753 753 if (!mvpwm) ··· 768 768 mvpwm->chip.dev = dev; 769 769 mvpwm->chip.ops = &mvebu_pwm_ops; 770 770 mvpwm->chip.npwm = mvchip->chip.ngpio; 771 + /* 772 + * There may already be some PWM allocated, so we can't force 773 + * mvpwm->chip.base to a fixed point like mvchip->chip.base. 774 + * So, we let pwmchip_add() do the numbering and take the next free 775 + * region. 776 + */ 777 + mvpwm->chip.base = -1; 771 778 772 779 spin_lock_init(&mvpwm->lock); 773 780 ··· 852 845 .data = (void *) MVEBU_GPIO_SOC_VARIANT_ARMADAXP, 853 846 }, 854 847 { 855 - .compatible = "marvell,armada-370-xp-gpio", 848 + .compatible = "marvell,armada-370-gpio", 856 849 .data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION, 857 850 }, 858 851 { ··· 1128 1121 mvchip); 1129 1122 } 1130 1123 1131 - /* Armada 370/XP has simple PWM support for GPIO lines */ 1124 + /* Some MVEBU SoCs have simple PWM support for GPIO lines */ 1132 1125 if (IS_ENABLED(CONFIG_PWM)) 1133 1126 return mvebu_pwm_probe(pdev, mvchip, id); 1134 1127
+1 -1
drivers/gpio/gpiolib-acpi.c
··· 201 201 handler = acpi_gpio_irq_handler_evt; 202 202 } 203 203 if (!handler) 204 - return AE_BAD_PARAMETER; 204 + return AE_OK; 205 205 206 206 pin = acpi_gpiochip_pin_to_gpio_offset(chip->gpiodev, pin); 207 207 if (pin < 0)
+2 -1
drivers/gpio/gpiolib.c
··· 708 708 709 709 ge.timestamp = ktime_get_real_ns(); 710 710 711 - if (le->eflags & GPIOEVENT_REQUEST_BOTH_EDGES) { 711 + if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE 712 + && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { 712 713 int level = gpiod_get_value_cansleep(le->desc); 713 714 714 715 if (level)
+4
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
··· 693 693 DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", 694 694 adev->clock.default_dispclk / 100); 695 695 adev->clock.default_dispclk = 60000; 696 + } else if (adev->clock.default_dispclk <= 60000) { 697 + DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n", 698 + adev->clock.default_dispclk / 100); 699 + adev->clock.default_dispclk = 62500; 696 700 } 697 701 adev->clock.dp_extclk = 698 702 le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 449 449 {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 450 450 {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 451 451 {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 452 + {0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 452 453 {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 453 454 /* Vega 10 */ 454 455 {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
+5 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
··· 220 220 } 221 221 222 222 const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = { 223 - amdgpu_vram_mgr_init, 224 - amdgpu_vram_mgr_fini, 225 - amdgpu_vram_mgr_new, 226 - amdgpu_vram_mgr_del, 227 - amdgpu_vram_mgr_debug 223 + .init = amdgpu_vram_mgr_init, 224 + .takedown = amdgpu_vram_mgr_fini, 225 + .get_node = amdgpu_vram_mgr_new, 226 + .put_node = amdgpu_vram_mgr_del, 227 + .debug = amdgpu_vram_mgr_debug 228 228 };
+2 -2
drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
··· 165 165 struct drm_device *dev = crtc->dev; 166 166 struct amdgpu_device *adev = dev->dev_private; 167 167 int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); 168 - ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; 168 + ENABLE_DISP_POWER_GATING_PS_ALLOCATION args; 169 169 170 170 memset(&args, 0, sizeof(args)); 171 171 ··· 178 178 void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev) 179 179 { 180 180 int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); 181 - ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; 181 + ENABLE_DISP_POWER_GATING_PS_ALLOCATION args; 182 182 183 183 memset(&args, 0, sizeof(args)); 184 184
+5 -2
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
··· 1207 1207 u32 tmp, wm_mask, lb_vblank_lead_lines = 0; 1208 1208 1209 1209 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1210 - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 1211 - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 1210 + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, 1211 + (u32)mode->clock); 1212 + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, 1213 + (u32)mode->clock); 1214 + line_time = min(line_time, (u32)65535); 1212 1215 1213 1216 /* watermark for high clocks */ 1214 1217 if (adev->pm.dpm_enabled) {
+5 -2
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
··· 1176 1176 u32 tmp, wm_mask, lb_vblank_lead_lines = 0; 1177 1177 1178 1178 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1179 - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 1180 - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 1179 + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, 1180 + (u32)mode->clock); 1181 + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, 1182 + (u32)mode->clock); 1183 + line_time = min(line_time, (u32)65535); 1181 1184 1182 1185 /* watermark for high clocks */ 1183 1186 if (adev->pm.dpm_enabled) {
+5 -2
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
··· 983 983 fixed20_12 a, b, c; 984 984 985 985 if (amdgpu_crtc->base.enabled && num_heads && mode) { 986 - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 987 - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 986 + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, 987 + (u32)mode->clock); 988 + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, 989 + (u32)mode->clock); 990 + line_time = min(line_time, (u32)65535); 988 991 priority_a_cnt = 0; 989 992 priority_b_cnt = 0; 990 993
+5 -2
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
··· 1091 1091 u32 tmp, wm_mask, lb_vblank_lead_lines = 0; 1092 1092 1093 1093 if (amdgpu_crtc->base.enabled && num_heads && mode) { 1094 - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 1095 - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 1094 + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, 1095 + (u32)mode->clock); 1096 + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, 1097 + (u32)mode->clock); 1098 + line_time = min(line_time, (u32)65535); 1096 1099 1097 1100 /* watermark for high clocks */ 1098 1101 if (adev->pm.dpm_enabled) {
+68 -27
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
··· 77 77 static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) 78 78 { 79 79 struct amdgpu_device *adev = ring->adev; 80 + u32 v; 81 + 82 + mutex_lock(&adev->grbm_idx_mutex); 83 + if (adev->vce.harvest_config == 0 || 84 + adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) 85 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 86 + else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) 87 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 80 88 81 89 if (ring == &adev->vce.ring[0]) 82 - return RREG32(mmVCE_RB_RPTR); 90 + v = RREG32(mmVCE_RB_RPTR); 83 91 else if (ring == &adev->vce.ring[1]) 84 - return RREG32(mmVCE_RB_RPTR2); 92 + v = RREG32(mmVCE_RB_RPTR2); 85 93 else 86 - return RREG32(mmVCE_RB_RPTR3); 94 + v = RREG32(mmVCE_RB_RPTR3); 95 + 96 + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 97 + mutex_unlock(&adev->grbm_idx_mutex); 98 + 99 + return v; 87 100 } 88 101 89 102 /** ··· 109 96 static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring) 110 97 { 111 98 struct amdgpu_device *adev = ring->adev; 99 + u32 v; 100 + 101 + mutex_lock(&adev->grbm_idx_mutex); 102 + if (adev->vce.harvest_config == 0 || 103 + adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) 104 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 105 + else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) 106 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 112 107 113 108 if (ring == &adev->vce.ring[0]) 114 - return RREG32(mmVCE_RB_WPTR); 109 + v = RREG32(mmVCE_RB_WPTR); 115 110 else if (ring == &adev->vce.ring[1]) 116 - return RREG32(mmVCE_RB_WPTR2); 111 + v = RREG32(mmVCE_RB_WPTR2); 117 112 else 118 - return RREG32(mmVCE_RB_WPTR3); 113 + v = RREG32(mmVCE_RB_WPTR3); 114 + 115 + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 116 + mutex_unlock(&adev->grbm_idx_mutex); 117 + 118 + return v; 119 119 } 120 120 121 121 /** ··· 142 116 { 143 117 struct amdgpu_device *adev = ring->adev; 144 118 119 + mutex_lock(&adev->grbm_idx_mutex); 120 + if (adev->vce.harvest_config == 0 || 121 + adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) 122 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); 123 + else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) 124 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); 125 + 145 126 if (ring == &adev->vce.ring[0]) 146 127 WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); 147 128 else if (ring == &adev->vce.ring[1]) 148 129 WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); 149 130 else 150 131 WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); 132 + 133 + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); 134 + mutex_unlock(&adev->grbm_idx_mutex); 151 135 } 152 136 153 137 static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) ··· 267 231 struct amdgpu_ring *ring; 268 232 int idx, r; 269 233 270 - ring = &adev->vce.ring[0]; 271 - WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr)); 272 - WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); 273 - WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); 274 - WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 275 - WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); 276 - 277 - ring = &adev->vce.ring[1]; 278 - WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr)); 279 - WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); 280 - WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); 281 - WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 282 - WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); 283 - 284 - ring = &adev->vce.ring[2]; 285 - WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr)); 286 - WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); 287 - WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr); 288 - WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr)); 289 - WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4); 290 - 291 234 mutex_lock(&adev->grbm_idx_mutex); 292 235 for (idx = 0; idx < 2; ++idx) { 293 236 if (adev->vce.harvest_config & (1 << idx)) 294 237 continue; 295 238 296 239 WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); 240 + 241 + /* Program instance 0 reg space for two instances or instance 0 case 242 + program instance 1 reg space for only instance 1 available case */ 243 + if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) { 244 + ring = &adev->vce.ring[0]; 245 + WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr)); 246 + WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); 247 + WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); 248 + WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 249 + WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); 250 + 251 + ring = &adev->vce.ring[1]; 252 + WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr)); 253 + WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); 254 + WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); 255 + WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 256 + WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); 257 + 258 + ring = &adev->vce.ring[2]; 259 + WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr)); 260 + WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); 261 + WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr); 262 + WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr)); 263 + WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4); 264 + } 265 + 297 266 vce_v3_0_mc_resume(adev, idx); 298 267 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); 299 268
+10 -10
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
··· 709 709 710 710 static struct phm_master_table_item 711 711 vega10_thermal_start_thermal_controller_master_list[] = { 712 - {NULL, tf_vega10_thermal_initialize}, 713 - {NULL, tf_vega10_thermal_set_temperature_range}, 714 - {NULL, tf_vega10_thermal_enable_alert}, 712 + { .tableFunction = tf_vega10_thermal_initialize }, 713 + { .tableFunction = tf_vega10_thermal_set_temperature_range }, 714 + { .tableFunction = tf_vega10_thermal_enable_alert }, 715 715 /* We should restrict performance levels to low before we halt the SMC. 716 716 * On the other hand we are still in boot state when we do this 717 717 * so it would be pointless. 718 718 * If this assumption changes we have to revisit this table. 719 719 */ 720 - {NULL, tf_vega10_thermal_setup_fan_table}, 721 - {NULL, tf_vega10_thermal_start_smc_fan_control}, 722 - {NULL, NULL} 720 + { .tableFunction = tf_vega10_thermal_setup_fan_table }, 721 + { .tableFunction = tf_vega10_thermal_start_smc_fan_control }, 722 + { } 723 723 }; 724 724 725 725 static struct phm_master_table_header ··· 731 731 732 732 static struct phm_master_table_item 733 733 vega10_thermal_set_temperature_range_master_list[] = { 734 - {NULL, tf_vega10_thermal_disable_alert}, 735 - {NULL, tf_vega10_thermal_set_temperature_range}, 736 - {NULL, tf_vega10_thermal_enable_alert}, 737 - {NULL, NULL} 734 + { .tableFunction = tf_vega10_thermal_disable_alert }, 735 + { .tableFunction = tf_vega10_thermal_set_temperature_range }, 736 + { .tableFunction = tf_vega10_thermal_enable_alert }, 737 + { } 738 738 }; 739 739 740 740 struct phm_master_table_header
+1
drivers/gpu/drm/bridge/synopsys/Kconfig
··· 1 1 config DRM_DW_HDMI 2 2 tristate 3 3 select DRM_KMS_HELPER 4 + select REGMAP_MMIO 4 5 5 6 config DRM_DW_HDMI_AHB_AUDIO 6 7 tristate "Synopsys Designware AHB Audio interface"
+11
drivers/gpu/drm/drm_atomic_helper.c
··· 508 508 bool has_connectors = 509 509 !!new_crtc_state->connector_mask; 510 510 511 + WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); 512 + 511 513 if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) { 512 514 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n", 513 515 crtc->base.id, crtc->name); ··· 552 550 553 551 for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) { 554 552 const struct drm_connector_helper_funcs *funcs = connector->helper_private; 553 + 554 + WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 555 555 556 556 /* 557 557 * This only sets crtc->connectors_changed for routing changes, ··· 653 649 654 650 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { 655 651 const struct drm_plane_helper_funcs *funcs; 652 + 653 + WARN_ON(!drm_modeset_is_locked(&plane->mutex)); 656 654 657 655 funcs = plane->helper_private; 658 656 ··· 2669 2663 2670 2664 drm_modeset_acquire_init(&ctx, 0); 2671 2665 while (1) { 2666 + err = drm_modeset_lock_all_ctx(dev, &ctx); 2667 + if (err) 2668 + goto out; 2669 + 2672 2670 err = drm_atomic_helper_commit_duplicated_state(state, &ctx); 2671 + out: 2673 2672 if (err != -EDEADLK) 2674 2673 break; 2675 2674
+20 -18
drivers/gpu/drm/drm_connector.c
··· 1229 1229 if (!connector) 1230 1230 return -ENOENT; 1231 1231 1232 - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 1233 - encoder = drm_connector_get_encoder(connector); 1234 - if (encoder) 1235 - out_resp->encoder_id = encoder->base.id; 1236 - else 1237 - out_resp->encoder_id = 0; 1238 - 1239 - ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic, 1240 - (uint32_t __user *)(unsigned long)(out_resp->props_ptr), 1241 - (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr), 1242 - &out_resp->count_props); 1243 - drm_modeset_unlock(&dev->mode_config.connection_mutex); 1244 - if (ret) 1245 - goto out_unref; 1246 - 1247 1232 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) 1248 1233 if (connector->encoder_ids[i] != 0) 1249 1234 encoders_count++; ··· 1241 1256 if (put_user(connector->encoder_ids[i], 1242 1257 encoder_ptr + copied)) { 1243 1258 ret = -EFAULT; 1244 - goto out_unref; 1259 + goto out; 1245 1260 } 1246 1261 copied++; 1247 1262 } ··· 1285 1300 if (copy_to_user(mode_ptr + copied, 1286 1301 &u_mode, sizeof(u_mode))) { 1287 1302 ret = -EFAULT; 1303 + mutex_unlock(&dev->mode_config.mutex); 1304 + 1288 1305 goto out; 1289 1306 } 1290 1307 copied++; 1291 1308 } 1292 1309 } 1293 1310 out_resp->count_modes = mode_count; 1294 - out: 1295 1311 mutex_unlock(&dev->mode_config.mutex); 1296 - out_unref: 1312 + 1313 + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 1314 + encoder = drm_connector_get_encoder(connector); 1315 + if (encoder) 1316 + out_resp->encoder_id = encoder->base.id; 1317 + else 1318 + out_resp->encoder_id = 0; 1319 + 1320 + /* Only grab properties after probing, to make sure EDID and other 1321 + * properties reflect the latest status. */ 1322 + ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic, 1323 + (uint32_t __user *)(unsigned long)(out_resp->props_ptr), 1324 + (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr), 1325 + &out_resp->count_props); 1326 + drm_modeset_unlock(&dev->mode_config.connection_mutex); 1327 + 1328 + out: 1297 1329 drm_connector_put(connector); 1298 1330 1299 1331 return ret;
+83
drivers/gpu/drm/drm_dp_helper.c
··· 1208 1208 return 0; 1209 1209 } 1210 1210 EXPORT_SYMBOL(drm_dp_stop_crc); 1211 + 1212 + struct dpcd_quirk { 1213 + u8 oui[3]; 1214 + bool is_branch; 1215 + u32 quirks; 1216 + }; 1217 + 1218 + #define OUI(first, second, third) { (first), (second), (third) } 1219 + 1220 + static const struct dpcd_quirk dpcd_quirk_list[] = { 1221 + /* Analogix 7737 needs reduced M and N at HBR2 link rates */ 1222 + { OUI(0x00, 0x22, 0xb9), true, BIT(DP_DPCD_QUIRK_LIMITED_M_N) }, 1223 + }; 1224 + 1225 + #undef OUI 1226 + 1227 + /* 1228 + * Get a bit mask of DPCD quirks for the sink/branch device identified by 1229 + * ident. The quirk data is shared but it's up to the drivers to act on the 1230 + * data. 1231 + * 1232 + * For now, only the OUI (first three bytes) is used, but this may be extended 1233 + * to device identification string and hardware/firmware revisions later. 1234 + */ 1235 + static u32 1236 + drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch) 1237 + { 1238 + const struct dpcd_quirk *quirk; 1239 + u32 quirks = 0; 1240 + int i; 1241 + 1242 + for (i = 0; i < ARRAY_SIZE(dpcd_quirk_list); i++) { 1243 + quirk = &dpcd_quirk_list[i]; 1244 + 1245 + if (quirk->is_branch != is_branch) 1246 + continue; 1247 + 1248 + if (memcmp(quirk->oui, ident->oui, sizeof(ident->oui)) != 0) 1249 + continue; 1250 + 1251 + quirks |= quirk->quirks; 1252 + } 1253 + 1254 + return quirks; 1255 + } 1256 + 1257 + /** 1258 + * drm_dp_read_desc - read sink/branch descriptor from DPCD 1259 + * @aux: DisplayPort AUX channel 1260 + * @desc: Device decriptor to fill from DPCD 1261 + * @is_branch: true for branch devices, false for sink devices 1262 + * 1263 + * Read DPCD 0x400 (sink) or 0x500 (branch) into @desc. Also debug log the 1264 + * identification. 1265 + * 1266 + * Returns 0 on success or a negative error code on failure. 1267 + */ 1268 + int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, 1269 + bool is_branch) 1270 + { 1271 + struct drm_dp_dpcd_ident *ident = &desc->ident; 1272 + unsigned int offset = is_branch ? DP_BRANCH_OUI : DP_SINK_OUI; 1273 + int ret, dev_id_len; 1274 + 1275 + ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident)); 1276 + if (ret < 0) 1277 + return ret; 1278 + 1279 + desc->quirks = drm_dp_get_quirks(ident, is_branch); 1280 + 1281 + dev_id_len = strnlen(ident->device_id, sizeof(ident->device_id)); 1282 + 1283 + DRM_DEBUG_KMS("DP %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n", 1284 + is_branch ? "branch" : "sink", 1285 + (int)sizeof(ident->oui), ident->oui, 1286 + dev_id_len, ident->device_id, 1287 + ident->hw_rev >> 4, ident->hw_rev & 0xf, 1288 + ident->sw_major_rev, ident->sw_minor_rev, 1289 + desc->quirks); 1290 + 1291 + return 0; 1292 + } 1293 + EXPORT_SYMBOL(drm_dp_read_desc);
+6 -1
drivers/gpu/drm/drm_drv.c
··· 358 358 void drm_unplug_dev(struct drm_device *dev) 359 359 { 360 360 /* for a USB device */ 361 - drm_dev_unregister(dev); 361 + if (drm_core_check_feature(dev, DRIVER_MODESET)) 362 + drm_modeset_unregister_all(dev); 363 + 364 + drm_minor_unregister(dev, DRM_MINOR_PRIMARY); 365 + drm_minor_unregister(dev, DRM_MINOR_RENDER); 366 + drm_minor_unregister(dev, DRM_MINOR_CONTROL); 362 367 363 368 mutex_lock(&drm_global_mutex); 364 369
+2 -1
drivers/gpu/drm/etnaviv/etnaviv_gem.h
··· 106 106 struct etnaviv_gpu *gpu; 107 107 struct ww_acquire_ctx ticket; 108 108 struct dma_fence *fence; 109 + u32 flags; 109 110 unsigned int nr_bos; 110 111 struct etnaviv_gem_submit_bo bos[0]; 111 - u32 flags; 112 + /* No new members here, the previous one is variable-length! */ 112 113 }; 113 114 114 115 int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
+1 -1
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
··· 172 172 for (i = 0; i < submit->nr_bos; i++) { 173 173 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; 174 174 bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE; 175 - bool explicit = !(submit->flags & ETNA_SUBMIT_NO_IMPLICIT); 175 + bool explicit = !!(submit->flags & ETNA_SUBMIT_NO_IMPLICIT); 176 176 177 177 ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write, 178 178 explicit);
+1 -7
drivers/gpu/drm/exynos/exynos_drm_drv.c
··· 82 82 return ret; 83 83 } 84 84 85 - static void exynos_drm_preclose(struct drm_device *dev, 86 - struct drm_file *file) 87 - { 88 - exynos_drm_subdrv_close(dev, file); 89 - } 90 - 91 85 static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) 92 86 { 87 + exynos_drm_subdrv_close(dev, file); 93 88 kfree(file->driver_priv); 94 89 file->driver_priv = NULL; 95 90 } ··· 140 145 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME 141 146 | DRIVER_ATOMIC | DRIVER_RENDER, 142 147 .open = exynos_drm_open, 143 - .preclose = exynos_drm_preclose, 144 148 .lastclose = exynos_drm_lastclose, 145 149 .postclose = exynos_drm_postclose, 146 150 .gem_free_object_unlocked = exynos_drm_gem_free_object,
+1 -4
drivers/gpu/drm/exynos/exynos_drm_drv.h
··· 160 160 * drm framework doesn't support multiple irq yet. 161 161 * we can refer to the crtc to current hardware interrupt occurred through 162 162 * this pipe value. 163 - * @enabled: if the crtc is enabled or not 164 - * @event: vblank event that is currently queued for flip 165 - * @wait_update: wait all pending planes updates to finish 166 - * @pending_update: number of pending plane updates in this crtc 167 163 * @ops: pointer to callbacks for exynos drm specific functionality 168 164 * @ctx: A pointer to the crtc's implementation specific context 165 + * @pipe_clk: A pointer to the crtc's pipeline clock. 169 166 */ 170 167 struct exynos_drm_crtc { 171 168 struct drm_crtc base;
+9 -17
drivers/gpu/drm/exynos/exynos_drm_dsi.c
··· 1633 1633 { 1634 1634 struct device *dev = dsi->dev; 1635 1635 struct device_node *node = dev->of_node; 1636 - struct device_node *ep; 1637 1636 int ret; 1638 1637 1639 1638 ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency", ··· 1640 1641 if (ret < 0) 1641 1642 return ret; 1642 1643 1643 - ep = of_graph_get_endpoint_by_regs(node, DSI_PORT_OUT, 0); 1644 - if (!ep) { 1645 - dev_err(dev, "no output port with endpoint specified\n"); 1646 - return -EINVAL; 1647 - } 1648 - 1649 - ret = exynos_dsi_of_read_u32(ep, "samsung,burst-clock-frequency", 1644 + ret = exynos_dsi_of_read_u32(node, "samsung,burst-clock-frequency", 1650 1645 &dsi->burst_clk_rate); 1651 1646 if (ret < 0) 1652 - goto end; 1647 + return ret; 1653 1648 1654 - ret = exynos_dsi_of_read_u32(ep, "samsung,esc-clock-frequency", 1649 + ret = exynos_dsi_of_read_u32(node, "samsung,esc-clock-frequency", 1655 1650 &dsi->esc_clk_rate); 1656 1651 if (ret < 0) 1657 - goto end; 1658 - 1659 - of_node_put(ep); 1652 + return ret; 1660 1653 1661 1654 dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_OUT, 0); 1662 1655 if (!dsi->bridge_node) 1663 1656 return -EINVAL; 1664 1657 1665 - end: 1666 - of_node_put(ep); 1667 - 1668 - return ret; 1658 + return 0; 1669 1659 } 1670 1660 1671 1661 static int exynos_dsi_bind(struct device *dev, struct device *master, ··· 1805 1817 1806 1818 static int exynos_dsi_remove(struct platform_device *pdev) 1807 1819 { 1820 + struct exynos_dsi *dsi = platform_get_drvdata(pdev); 1821 + 1822 + of_node_put(dsi->bridge_node); 1823 + 1808 1824 pm_runtime_disable(&pdev->dev); 1809 1825 1810 1826 component_del(&pdev->dev, &exynos_dsi_component_ops);
+1 -1
drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
··· 760 760 * Get the endpoint node. In our case, dsi has one output port1 761 761 * to which the external HDMI bridge is connected. 762 762 */ 763 - ret = drm_of_find_panel_or_bridge(np, 0, 0, NULL, &dsi->bridge); 763 + ret = drm_of_find_panel_or_bridge(np, 1, 0, NULL, &dsi->bridge); 764 764 if (ret) 765 765 return ret; 766 766
+20 -10
drivers/gpu/drm/i915/gvt/execlist.c
··· 779 779 vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; 780 780 } 781 781 782 + static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask) 783 + { 784 + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 785 + struct intel_engine_cs *engine; 786 + struct intel_vgpu_workload *pos, *n; 787 + unsigned int tmp; 788 + 789 + /* free the unsubmited workloads in the queues. */ 790 + for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { 791 + list_for_each_entry_safe(pos, n, 792 + &vgpu->workload_q_head[engine->id], list) { 793 + list_del_init(&pos->list); 794 + free_workload(pos); 795 + } 796 + } 797 + } 798 + 782 799 void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) 783 800 { 801 + clean_workloads(vgpu, ALL_ENGINES); 784 802 kmem_cache_destroy(vgpu->workloads); 785 803 } 786 804 ··· 829 811 { 830 812 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 831 813 struct intel_engine_cs *engine; 832 - struct intel_vgpu_workload *pos, *n; 833 814 unsigned int tmp; 834 815 835 - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { 836 - /* free the unsubmited workload in the queue */ 837 - list_for_each_entry_safe(pos, n, 838 - &vgpu->workload_q_head[engine->id], list) { 839 - list_del_init(&pos->list); 840 - free_workload(pos); 841 - } 842 - 816 + clean_workloads(vgpu, engine_mask); 817 + for_each_engine_masked(engine, dev_priv, engine_mask, tmp) 843 818 init_vgpu_execlist(vgpu, engine->id); 844 - } 845 819 }
+21 -9
drivers/gpu/drm/i915/gvt/handlers.c
··· 1366 1366 void *p_data, unsigned int bytes) 1367 1367 { 1368 1368 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 1369 - i915_reg_t reg = {.reg = offset}; 1369 + u32 v = *(u32 *)p_data; 1370 + 1371 + if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)) 1372 + return intel_vgpu_default_mmio_write(vgpu, 1373 + offset, p_data, bytes); 1370 1374 1371 1375 switch (offset) { 1372 1376 case 0x4ddc: 1373 - vgpu_vreg(vgpu, offset) = 0x8000003c; 1374 - /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */ 1375 - I915_WRITE(reg, vgpu_vreg(vgpu, offset)); 1377 + /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */ 1378 + vgpu_vreg(vgpu, offset) = v & ~(1 << 31); 1376 1379 break; 1377 1380 case 0x42080: 1378 - vgpu_vreg(vgpu, offset) = 0x8000; 1379 - /* WaCompressedResourceDisplayNewHashMode:skl */ 1380 - I915_WRITE(reg, vgpu_vreg(vgpu, offset)); 1381 + /* bypass WaCompressedResourceDisplayNewHashMode */ 1382 + vgpu_vreg(vgpu, offset) = v & ~(1 << 15); 1383 + break; 1384 + case 0xe194: 1385 + /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */ 1386 + vgpu_vreg(vgpu, offset) = v & ~(1 << 8); 1387 + break; 1388 + case 0x7014: 1389 + /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */ 1390 + vgpu_vreg(vgpu, offset) = v & ~(1 << 13); 1381 1391 break; 1382 1392 default: 1383 1393 return -EINVAL; ··· 1644 1634 MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL); 1645 1635 MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, 1646 1636 NULL, NULL); 1647 - MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1637 + MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, 1638 + skl_misc_ctl_write); 1648 1639 MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL); 1649 1640 MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL); 1650 1641 MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL); ··· 2579 2568 MMIO_D(0x6e570, D_BDW_PLUS); 2580 2569 MMIO_D(0x65f10, D_BDW_PLUS); 2581 2570 2582 - MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2571 + MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, 2572 + skl_misc_ctl_write); 2583 2573 MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2584 2574 MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2585 2575 MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+5 -1
drivers/gpu/drm/i915/i915_debugfs.c
··· 292 292 struct file_stats *stats = data; 293 293 struct i915_vma *vma; 294 294 295 + lockdep_assert_held(&obj->base.dev->struct_mutex); 296 + 295 297 stats->count++; 296 298 stats->total += obj->base.size; 297 299 if (!obj->bind_count) ··· 478 476 struct drm_i915_gem_request *request; 479 477 struct task_struct *task; 480 478 479 + mutex_lock(&dev->struct_mutex); 480 + 481 481 memset(&stats, 0, sizeof(stats)); 482 482 stats.file_priv = file->driver_priv; 483 483 spin_lock(&file->table_lock); ··· 491 487 * still alive (e.g. get_pid(current) => fork() => exit()). 492 488 * Therefore, we need to protect this ->comm access using RCU. 493 489 */ 494 - mutex_lock(&dev->struct_mutex); 495 490 request = list_first_entry_or_null(&file_priv->mm.request_list, 496 491 struct drm_i915_gem_request, 497 492 client_link); ··· 500 497 PIDTYPE_PID); 501 498 print_file_stats(m, task ? task->comm : "<unknown>", stats); 502 499 rcu_read_unlock(); 500 + 503 501 mutex_unlock(&dev->struct_mutex); 504 502 } 505 503 mutex_unlock(&dev->filelist_mutex);
+9 -4
drivers/gpu/drm/i915/i915_drv.c
··· 1235 1235 goto out_fini; 1236 1236 1237 1237 pci_set_drvdata(pdev, &dev_priv->drm); 1238 + /* 1239 + * Disable the system suspend direct complete optimization, which can 1240 + * leave the device suspended skipping the driver's suspend handlers 1241 + * if the device was already runtime suspended. This is needed due to 1242 + * the difference in our runtime and system suspend sequence and 1243 + * becaue the HDA driver may require us to enable the audio power 1244 + * domain during system suspend. 1245 + */ 1246 + pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; 1238 1247 1239 1248 ret = i915_driver_init_early(dev_priv, ent); 1240 1249 if (ret < 0) ··· 1281 1272 1282 1273 dev_priv->ipc_enabled = false; 1283 1274 1284 - /* Everything is in place, we can now relax! */ 1285 - DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", 1286 - driver.name, driver.major, driver.minor, driver.patchlevel, 1287 - driver.date, pci_name(pdev), dev_priv->drm.primary->index); 1288 1275 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) 1289 1276 DRM_INFO("DRM_I915_DEBUG enabled\n"); 1290 1277 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+12 -1
drivers/gpu/drm/i915/i915_drv.h
··· 562 562 563 563 void intel_link_compute_m_n(int bpp, int nlanes, 564 564 int pixel_clock, int link_clock, 565 - struct intel_link_m_n *m_n); 565 + struct intel_link_m_n *m_n, 566 + bool reduce_m_n); 566 567 567 568 /* Interface history: 568 569 * ··· 2986 2985 { 2987 2986 #ifdef CONFIG_INTEL_IOMMU 2988 2987 if (INTEL_GEN(dev_priv) >= 6 && intel_iommu_gfx_mapped) 2988 + return true; 2989 + #endif 2990 + return false; 2991 + } 2992 + 2993 + static inline bool 2994 + intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv) 2995 + { 2996 + #ifdef CONFIG_INTEL_IOMMU 2997 + if (IS_BROXTON(dev_priv) && intel_iommu_gfx_mapped) 2989 2998 return true; 2990 2999 #endif 2991 3000 return false;
+45 -20
drivers/gpu/drm/i915/i915_gem.c
··· 2285 2285 struct page *page; 2286 2286 unsigned long last_pfn = 0; /* suppress gcc warning */ 2287 2287 unsigned int max_segment; 2288 + gfp_t noreclaim; 2288 2289 int ret; 2289 - gfp_t gfp; 2290 2290 2291 2291 /* Assert that the object is not currently in any GPU domain. As it 2292 2292 * wasn't in the GTT, there shouldn't be any way it could have been in ··· 2315 2315 * Fail silently without starting the shrinker 2316 2316 */ 2317 2317 mapping = obj->base.filp->f_mapping; 2318 - gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM)); 2319 - gfp |= __GFP_NORETRY | __GFP_NOWARN; 2318 + noreclaim = mapping_gfp_constraint(mapping, 2319 + ~(__GFP_IO | __GFP_RECLAIM)); 2320 + noreclaim |= __GFP_NORETRY | __GFP_NOWARN; 2321 + 2320 2322 sg = st->sgl; 2321 2323 st->nents = 0; 2322 2324 for (i = 0; i < page_count; i++) { 2323 - page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2324 - if (unlikely(IS_ERR(page))) { 2325 - i915_gem_shrink(dev_priv, 2326 - page_count, 2327 - I915_SHRINK_BOUND | 2328 - I915_SHRINK_UNBOUND | 2329 - I915_SHRINK_PURGEABLE); 2325 + const unsigned int shrink[] = { 2326 + I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE, 2327 + 0, 2328 + }, *s = shrink; 2329 + gfp_t gfp = noreclaim; 2330 + 2331 + do { 2330 2332 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2331 - } 2332 - if (unlikely(IS_ERR(page))) { 2333 - gfp_t reclaim; 2333 + if (likely(!IS_ERR(page))) 2334 + break; 2335 + 2336 + if (!*s) { 2337 + ret = PTR_ERR(page); 2338 + goto err_sg; 2339 + } 2340 + 2341 + i915_gem_shrink(dev_priv, 2 * page_count, *s++); 2342 + cond_resched(); 2334 2343 2335 2344 /* We've tried hard to allocate the memory by reaping 2336 2345 * our own buffer, now let the real VM do its job and ··· 2349 2340 * defer the oom here by reporting the ENOMEM back 2350 2341 * to userspace. 2351 2342 */ 2352 - reclaim = mapping_gfp_mask(mapping); 2353 - reclaim |= __GFP_NORETRY; /* reclaim, but no oom */ 2343 + if (!*s) { 2344 + /* reclaim and warn, but no oom */ 2345 + gfp = mapping_gfp_mask(mapping); 2354 2346 2355 - page = shmem_read_mapping_page_gfp(mapping, i, reclaim); 2356 - if (IS_ERR(page)) { 2357 - ret = PTR_ERR(page); 2358 - goto err_sg; 2347 + /* Our bo are always dirty and so we require 2348 + * kswapd to reclaim our pages (direct reclaim 2349 + * does not effectively begin pageout of our 2350 + * buffers on its own). However, direct reclaim 2351 + * only waits for kswapd when under allocation 2352 + * congestion. So as a result __GFP_RECLAIM is 2353 + * unreliable and fails to actually reclaim our 2354 + * dirty pages -- unless you try over and over 2355 + * again with !__GFP_NORETRY. However, we still 2356 + * want to fail this allocation rather than 2357 + * trigger the out-of-memory killer and for 2358 + * this we want the future __GFP_MAYFAIL. 2359 + */ 2359 2360 } 2360 - } 2361 + } while (1); 2362 + 2361 2363 if (!i || 2362 2364 sg->length >= max_segment || 2363 2365 page_to_pfn(page) != last_pfn + 1) { ··· 3318 3298 { 3319 3299 int ret; 3320 3300 3301 + /* If the device is asleep, we have no requests outstanding */ 3302 + if (!READ_ONCE(i915->gt.awake)) 3303 + return 0; 3304 + 3321 3305 if (flags & I915_WAIT_LOCKED) { 3322 3306 struct i915_gem_timeline *tl; 3323 3307 ··· 4242 4218 4243 4219 mapping = obj->base.filp->f_mapping; 4244 4220 mapping_set_gfp_mask(mapping, mask); 4221 + GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); 4245 4222 4246 4223 i915_gem_object_init(obj, &i915_gem_object_ops); 4247 4224
+14 -3
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 546 546 } 547 547 548 548 static int 549 - i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, 549 + i915_gem_execbuffer_relocate_entry(struct i915_vma *vma, 550 550 struct eb_vmas *eb, 551 551 struct drm_i915_gem_relocation_entry *reloc, 552 552 struct reloc_cache *cache) 553 553 { 554 + struct drm_i915_gem_object *obj = vma->obj; 554 555 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 555 556 struct drm_gem_object *target_obj; 556 557 struct drm_i915_gem_object *target_i915_obj; ··· 629 628 return -EINVAL; 630 629 } 631 630 631 + /* 632 + * If we write into the object, we need to force the synchronisation 633 + * barrier, either with an asynchronous clflush or if we executed the 634 + * patching using the GPU (though that should be serialised by the 635 + * timeline). To be completely sure, and since we are required to 636 + * do relocations we are already stalling, disable the user's opt 637 + * of our synchronisation. 638 + */ 639 + vma->exec_entry->flags &= ~EXEC_OBJECT_ASYNC; 640 + 632 641 ret = relocate_entry(obj, reloc, cache, target_offset); 633 642 if (ret) 634 643 return ret; ··· 689 678 do { 690 679 u64 offset = r->presumed_offset; 691 680 692 - ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache); 681 + ret = i915_gem_execbuffer_relocate_entry(vma, eb, r, &cache); 693 682 if (ret) 694 683 goto out; 695 684 ··· 737 726 738 727 reloc_cache_init(&cache, eb->i915); 739 728 for (i = 0; i < entry->relocation_count; i++) { 740 - ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache); 729 + ret = i915_gem_execbuffer_relocate_entry(vma, eb, &relocs[i], &cache); 741 730 if (ret) 742 731 break; 743 732 }
+106 -2
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 2191 2191 gen8_set_pte(&gtt_base[i], scratch_pte); 2192 2192 } 2193 2193 2194 + static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) 2195 + { 2196 + struct drm_i915_private *dev_priv = vm->i915; 2197 + 2198 + /* 2199 + * Make sure the internal GAM fifo has been cleared of all GTT 2200 + * writes before exiting stop_machine(). This guarantees that 2201 + * any aperture accesses waiting to start in another process 2202 + * cannot back up behind the GTT writes causing a hang. 2203 + * The register can be any arbitrary GAM register. 2204 + */ 2205 + POSTING_READ(GFX_FLSH_CNTL_GEN6); 2206 + } 2207 + 2208 + struct insert_page { 2209 + struct i915_address_space *vm; 2210 + dma_addr_t addr; 2211 + u64 offset; 2212 + enum i915_cache_level level; 2213 + }; 2214 + 2215 + static int bxt_vtd_ggtt_insert_page__cb(void *_arg) 2216 + { 2217 + struct insert_page *arg = _arg; 2218 + 2219 + gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); 2220 + bxt_vtd_ggtt_wa(arg->vm); 2221 + 2222 + return 0; 2223 + } 2224 + 2225 + static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, 2226 + dma_addr_t addr, 2227 + u64 offset, 2228 + enum i915_cache_level level, 2229 + u32 unused) 2230 + { 2231 + struct insert_page arg = { vm, addr, offset, level }; 2232 + 2233 + stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL); 2234 + } 2235 + 2236 + struct insert_entries { 2237 + struct i915_address_space *vm; 2238 + struct sg_table *st; 2239 + u64 start; 2240 + enum i915_cache_level level; 2241 + }; 2242 + 2243 + static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) 2244 + { 2245 + struct insert_entries *arg = _arg; 2246 + 2247 + gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0); 2248 + bxt_vtd_ggtt_wa(arg->vm); 2249 + 2250 + return 0; 2251 + } 2252 + 2253 + static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, 2254 + struct sg_table *st, 2255 + u64 start, 2256 + enum i915_cache_level level, 2257 + u32 unused) 2258 + { 2259 + struct insert_entries arg = { vm, st, start, level }; 2260 + 2261 + stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); 2262 + } 2263 + 2264 + struct clear_range { 2265 + struct i915_address_space *vm; 2266 + u64 start; 2267 + u64 length; 2268 + }; 2269 + 2270 + static int bxt_vtd_ggtt_clear_range__cb(void *_arg) 2271 + { 2272 + struct clear_range *arg = _arg; 2273 + 2274 + gen8_ggtt_clear_range(arg->vm, arg->start, arg->length); 2275 + bxt_vtd_ggtt_wa(arg->vm); 2276 + 2277 + return 0; 2278 + } 2279 + 2280 + static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm, 2281 + u64 start, 2282 + u64 length) 2283 + { 2284 + struct clear_range arg = { vm, start, length }; 2285 + 2286 + stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL); 2287 + } 2288 + 2194 2289 static void gen6_ggtt_clear_range(struct i915_address_space *vm, 2195 2290 u64 start, u64 length) 2196 2291 { ··· 2408 2313 appgtt->base.allocate_va_range) { 2409 2314 ret = appgtt->base.allocate_va_range(&appgtt->base, 2410 2315 vma->node.start, 2411 - vma->node.size); 2316 + vma->size); 2412 2317 if (ret) 2413 2318 goto err_pages; 2414 2319 } ··· 2880 2785 2881 2786 ggtt->base.insert_entries = gen8_ggtt_insert_entries; 2882 2787 2788 + /* Serialize GTT updates with aperture access on BXT if VT-d is on. */ 2789 + if (intel_ggtt_update_needs_vtd_wa(dev_priv)) { 2790 + ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; 2791 + ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL; 2792 + if (ggtt->base.clear_range != nop_clear_range) 2793 + ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL; 2794 + } 2795 + 2883 2796 ggtt->invalidate = gen6_ggtt_invalidate; 2884 2797 2885 2798 return ggtt_probe_common(ggtt, size); ··· 3100 2997 3101 2998 void i915_ggtt_disable_guc(struct drm_i915_private *i915) 3102 2999 { 3103 - i915->ggtt.invalidate = gen6_ggtt_invalidate; 3000 + if (i915->ggtt.invalidate == guc_ggtt_invalidate) 3001 + i915->ggtt.invalidate = gen6_ggtt_invalidate; 3104 3002 } 3105 3003 3106 3004 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
+1 -1
drivers/gpu/drm/i915/i915_gem_request.c
··· 623 623 * GPU processing the request, we never over-estimate the 624 624 * position of the head. 625 625 */ 626 - req->head = req->ring->tail; 626 + req->head = req->ring->emit; 627 627 628 628 /* Check that we didn't interrupt ourselves with a new request */ 629 629 GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
-5
drivers/gpu/drm/i915/i915_gem_shrinker.c
··· 59 59 return; 60 60 61 61 mutex_unlock(&dev->struct_mutex); 62 - 63 - /* expedite the RCU grace period to free some request slabs */ 64 - synchronize_rcu_expedited(); 65 62 } 66 63 67 64 static bool any_vma_pinned(struct drm_i915_gem_object *obj) ··· 270 273 I915_SHRINK_UNBOUND | 271 274 I915_SHRINK_ACTIVE); 272 275 intel_runtime_pm_put(dev_priv); 273 - 274 - synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */ 275 276 276 277 return freed; 277 278 }
+1 -1
drivers/gpu/drm/i915/i915_gem_tiling.c
··· 278 278 obj->mm.quirked = false; 279 279 } 280 280 if (!i915_gem_object_is_tiled(obj)) { 281 - GEM_BUG_ON(!obj->mm.quirked); 281 + GEM_BUG_ON(obj->mm.quirked); 282 282 __i915_gem_object_pin_pages(obj); 283 283 obj->mm.quirked = true; 284 284 }
+1 -3
drivers/gpu/drm/i915/i915_guc_submission.c
··· 480 480 GEM_BUG_ON(freespace < wqi_size); 481 481 482 482 /* The GuC firmware wants the tail index in QWords, not bytes */ 483 - tail = rq->tail; 484 - assert_ring_tail_valid(rq->ring, rq->tail); 485 - tail >>= 3; 483 + tail = intel_ring_set_tail(rq->ring, rq->tail) >> 3; 486 484 GEM_BUG_ON(tail > WQ_RING_TAIL_MAX); 487 485 488 486 /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
+6 -9
drivers/gpu/drm/i915/i915_irq.c
··· 2953 2953 u32 pipestat_mask; 2954 2954 u32 enable_mask; 2955 2955 enum pipe pipe; 2956 - u32 val; 2957 2956 2958 2957 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 2959 2958 PIPE_CRC_DONE_INTERRUPT_STATUS; ··· 2963 2964 2964 2965 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 2965 2966 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2966 - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 2967 + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2968 + I915_LPE_PIPE_A_INTERRUPT | 2969 + I915_LPE_PIPE_B_INTERRUPT; 2970 + 2967 2971 if (IS_CHERRYVIEW(dev_priv)) 2968 - enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 2972 + enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 2973 + I915_LPE_PIPE_C_INTERRUPT; 2969 2974 2970 2975 WARN_ON(dev_priv->irq_mask != ~0); 2971 - 2972 - val = (I915_LPE_PIPE_A_INTERRUPT | 2973 - I915_LPE_PIPE_B_INTERRUPT | 2974 - I915_LPE_PIPE_C_INTERRUPT); 2975 - 2976 - enable_mask |= val; 2977 2976 2978 2977 dev_priv->irq_mask = ~enable_mask; 2979 2978
+1 -2
drivers/gpu/drm/i915/i915_pci.c
··· 208 208 static const struct intel_device_info intel_ironlake_m_info = { 209 209 GEN5_FEATURES, 210 210 .platform = INTEL_IRONLAKE, 211 - .is_mobile = 1, 211 + .is_mobile = 1, .has_fbc = 1, 212 212 }; 213 213 214 214 #define GEN6_FEATURES \ ··· 390 390 .has_hw_contexts = 1, \ 391 391 .has_logical_ring_contexts = 1, \ 392 392 .has_guc = 1, \ 393 - .has_decoupled_mmio = 1, \ 394 393 .has_aliasing_ppgtt = 1, \ 395 394 .has_full_ppgtt = 1, \ 396 395 .has_full_48bit_ppgtt = 1, \
+2 -6
drivers/gpu/drm/i915/i915_pvinfo.h
··· 36 36 #define VGT_VERSION_MAJOR 1 37 37 #define VGT_VERSION_MINOR 0 38 38 39 - #define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor)) 40 - #define INTEL_VGT_IF_VERSION \ 41 - INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR) 42 - 43 39 /* 44 40 * notifications from guest to vgpu device model 45 41 */ ··· 51 55 52 56 struct vgt_if { 53 57 u64 magic; /* VGT_MAGIC */ 54 - uint16_t version_major; 55 - uint16_t version_minor; 58 + u16 version_major; 59 + u16 version_minor; 56 60 u32 vgt_id; /* ID of vGT instance */ 57 61 u32 rsv1[12]; /* pad to offset 0x40 */ 58 62 /*
+1 -1
drivers/gpu/drm/i915/i915_reg.h
··· 8280 8280 8281 8281 /* MIPI DSI registers */ 8282 8282 8283 - #define _MIPI_PORT(port, a, c) ((port) ? c : a) /* ports A and C only */ 8283 + #define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */ 8284 8284 #define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c)) 8285 8285 8286 8286 #define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004)
+4 -6
drivers/gpu/drm/i915/i915_vgpu.c
··· 60 60 */ 61 61 void i915_check_vgpu(struct drm_i915_private *dev_priv) 62 62 { 63 - uint64_t magic; 64 - uint32_t version; 63 + u64 magic; 64 + u16 version_major; 65 65 66 66 BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); 67 67 ··· 69 69 if (magic != VGT_MAGIC) 70 70 return; 71 71 72 - version = INTEL_VGT_IF_VERSION_ENCODE( 73 - __raw_i915_read16(dev_priv, vgtif_reg(version_major)), 74 - __raw_i915_read16(dev_priv, vgtif_reg(version_minor))); 75 - if (version != INTEL_VGT_IF_VERSION) { 72 + version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major)); 73 + if (version_major < VGT_VERSION_MAJOR) { 76 74 DRM_INFO("VGT interface version mismatch!\n"); 77 75 return; 78 76 }
+5
drivers/gpu/drm/i915/i915_vma.c
··· 650 650 break; 651 651 } 652 652 653 + if (!ret) { 654 + ret = i915_gem_active_retire(&vma->last_fence, 655 + &vma->vm->i915->drm.struct_mutex); 656 + } 657 + 653 658 __i915_vma_unpin(vma); 654 659 if (ret) 655 660 return ret;
+48 -27
drivers/gpu/drm/i915/intel_display.c
··· 120 120 static void skylake_pfit_enable(struct intel_crtc *crtc); 121 121 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); 122 122 static void ironlake_pfit_enable(struct intel_crtc *crtc); 123 - static void intel_modeset_setup_hw_state(struct drm_device *dev); 123 + static void intel_modeset_setup_hw_state(struct drm_device *dev, 124 + struct drm_modeset_acquire_ctx *ctx); 124 125 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); 125 126 126 127 struct intel_limit { ··· 3450 3449 struct drm_crtc *crtc; 3451 3450 int i, ret; 3452 3451 3453 - intel_modeset_setup_hw_state(dev); 3452 + intel_modeset_setup_hw_state(dev, ctx); 3454 3453 i915_redisable_vga(to_i915(dev)); 3455 3454 3456 3455 if (!state) ··· 4599 4598 4600 4599 static int 4601 4600 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 4602 - unsigned scaler_user, int *scaler_id, unsigned int rotation, 4601 + unsigned int scaler_user, int *scaler_id, 4603 4602 int src_w, int src_h, int dst_w, int dst_h) 4604 4603 { 4605 4604 struct intel_crtc_scaler_state *scaler_state = ··· 4608 4607 to_intel_crtc(crtc_state->base.crtc); 4609 4608 int need_scaling; 4610 4609 4611 - need_scaling = drm_rotation_90_or_270(rotation) ? 4612 - (src_h != dst_w || src_w != dst_h): 4613 - (src_w != dst_w || src_h != dst_h); 4610 + /* 4611 + * Src coordinates are already rotated by 270 degrees for 4612 + * the 90/270 degree plane rotation cases (to match the 4613 + * GTT mapping), hence no need to account for rotation here. 4614 + */ 4615 + need_scaling = src_w != dst_w || src_h != dst_h; 4614 4616 4615 4617 /* 4616 4618 * if plane is being disabled or scaler is no more required or force detach ··· 4675 4671 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; 4676 4672 4677 4673 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 4678 - &state->scaler_state.scaler_id, DRM_ROTATE_0, 4674 + &state->scaler_state.scaler_id, 4679 4675 state->pipe_src_w, state->pipe_src_h, 4680 4676 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay); 4681 4677 } ··· 4704 4700 ret = skl_update_scaler(crtc_state, force_detach, 4705 4701 drm_plane_index(&intel_plane->base), 4706 4702 &plane_state->scaler_id, 4707 - plane_state->base.rotation, 4708 4703 drm_rect_width(&plane_state->base.src) >> 16, 4709 4704 drm_rect_height(&plane_state->base.src) >> 16, 4710 4705 drm_rect_width(&plane_state->base.dst), ··· 5826 5823 intel_update_watermarks(intel_crtc); 5827 5824 } 5828 5825 5829 - static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) 5826 + static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, 5827 + struct drm_modeset_acquire_ctx *ctx) 5830 5828 { 5831 5829 struct intel_encoder *encoder; 5832 5830 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ··· 5857 5853 return; 5858 5854 } 5859 5855 5860 - state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; 5856 + state->acquire_ctx = ctx; 5861 5857 5862 5858 /* Everything's already locked, -EDEADLK can't happen. */ 5863 5859 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); ··· 6105 6101 pipe_config->fdi_lanes = lane; 6106 6102 6107 6103 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 6108 - link_bw, &pipe_config->fdi_m_n); 6104 + link_bw, &pipe_config->fdi_m_n, false); 6109 6105 6110 6106 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); 6111 6107 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { ··· 6281 6277 } 6282 6278 6283 6279 static void compute_m_n(unsigned int m, unsigned int n, 6284 - uint32_t *ret_m, uint32_t *ret_n) 6280 + uint32_t *ret_m, uint32_t *ret_n, 6281 + bool reduce_m_n) 6285 6282 { 6286 6283 /* 6287 6284 * Reduce M/N as much as possible without loss in precision. Several DP ··· 6290 6285 * values. The passed in values are more likely to have the least 6291 6286 * significant bits zero than M after rounding below, so do this first. 6292 6287 */ 6293 - while ((m & 1) == 0 && (n & 1) == 0) { 6294 - m >>= 1; 6295 - n >>= 1; 6288 + if (reduce_m_n) { 6289 + while ((m & 1) == 0 && (n & 1) == 0) { 6290 + m >>= 1; 6291 + n >>= 1; 6292 + } 6296 6293 } 6297 6294 6298 6295 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); ··· 6305 6298 void 6306 6299 intel_link_compute_m_n(int bits_per_pixel, int nlanes, 6307 6300 int pixel_clock, int link_clock, 6308 - struct intel_link_m_n *m_n) 6301 + struct intel_link_m_n *m_n, 6302 + bool reduce_m_n) 6309 6303 { 6310 6304 m_n->tu = 64; 6311 6305 6312 6306 compute_m_n(bits_per_pixel * pixel_clock, 6313 6307 link_clock * nlanes * 8, 6314 - &m_n->gmch_m, &m_n->gmch_n); 6308 + &m_n->gmch_m, &m_n->gmch_n, 6309 + reduce_m_n); 6315 6310 6316 6311 compute_m_n(pixel_clock, link_clock, 6317 - &m_n->link_m, &m_n->link_n); 6312 + &m_n->link_m, &m_n->link_n, 6313 + reduce_m_n); 6318 6314 } 6319 6315 6320 6316 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) ··· 12207 12197 * type. For DP ports it behaves like most other platforms, but on HDMI 12208 12198 * there's an extra 1 line difference. So we need to add two instead of 12209 12199 * one to the value. 12200 + * 12201 + * On VLV/CHV DSI the scanline counter would appear to increment 12202 + * approx. 1/3 of a scanline before start of vblank. Unfortunately 12203 + * that means we can't tell whether we're in vblank or not while 12204 + * we're on that particular line. We must still set scanline_offset 12205 + * to 1 so that the vblank timestamps come out correct when we query 12206 + * the scanline counter from within the vblank interrupt handler. 12207 + * However if queried just before the start of vblank we'll get an 12208 + * answer that's slightly in the future. 12210 12209 */ 12211 12210 if (IS_GEN2(dev_priv)) { 12212 12211 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; ··· 15032 15013 intel_setup_outputs(dev_priv); 15033 15014 15034 15015 drm_modeset_lock_all(dev); 15035 - intel_modeset_setup_hw_state(dev); 15016 + intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx); 15036 15017 drm_modeset_unlock_all(dev); 15037 15018 15038 15019 for_each_intel_crtc(dev, crtc) { ··· 15069 15050 return 0; 15070 15051 } 15071 15052 15072 - static void intel_enable_pipe_a(struct drm_device *dev) 15053 + static void intel_enable_pipe_a(struct drm_device *dev, 15054 + struct drm_modeset_acquire_ctx *ctx) 15073 15055 { 15074 15056 struct intel_connector *connector; 15075 15057 struct drm_connector_list_iter conn_iter; 15076 15058 struct drm_connector *crt = NULL; 15077 15059 struct intel_load_detect_pipe load_detect_temp; 15078 - struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx; 15079 15060 int ret; 15080 15061 15081 15062 /* We can't just switch on the pipe A, we need to set things up with a ··· 15147 15128 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A); 15148 15129 } 15149 15130 15150 - static void intel_sanitize_crtc(struct intel_crtc *crtc) 15131 + static void intel_sanitize_crtc(struct intel_crtc *crtc, 15132 + struct drm_modeset_acquire_ctx *ctx) 15151 15133 { 15152 15134 struct drm_device *dev = crtc->base.dev; 15153 15135 struct drm_i915_private *dev_priv = to_i915(dev); ··· 15194 15174 plane = crtc->plane; 15195 15175 crtc->base.primary->state->visible = true; 15196 15176 crtc->plane = !plane; 15197 - intel_crtc_disable_noatomic(&crtc->base); 15177 + intel_crtc_disable_noatomic(&crtc->base, ctx); 15198 15178 crtc->plane = plane; 15199 15179 } 15200 15180 ··· 15204 15184 * resume. Force-enable the pipe to fix this, the update_dpms 15205 15185 * call below we restore the pipe to the right state, but leave 15206 15186 * the required bits on. */ 15207 - intel_enable_pipe_a(dev); 15187 + intel_enable_pipe_a(dev, ctx); 15208 15188 } 15209 15189 15210 15190 /* Adjust the state of the output pipe according to whether we 15211 15191 * have active connectors/encoders. */ 15212 15192 if (crtc->active && !intel_crtc_has_encoders(crtc)) 15213 - intel_crtc_disable_noatomic(&crtc->base); 15193 + intel_crtc_disable_noatomic(&crtc->base, ctx); 15214 15194 15215 15195 if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) { 15216 15196 /* ··· 15508 15488 * and sanitizes it to the current state 15509 15489 */ 15510 15490 static void 15511 - intel_modeset_setup_hw_state(struct drm_device *dev) 15491 + intel_modeset_setup_hw_state(struct drm_device *dev, 15492 + struct drm_modeset_acquire_ctx *ctx) 15512 15493 { 15513 15494 struct drm_i915_private *dev_priv = to_i915(dev); 15514 15495 enum pipe pipe; ··· 15529 15508 for_each_pipe(dev_priv, pipe) { 15530 15509 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 15531 15510 15532 - intel_sanitize_crtc(crtc); 15511 + intel_sanitize_crtc(crtc, ctx); 15533 15512 intel_dump_pipe_config(crtc, crtc->config, 15534 15513 "[setup_hw_state]"); 15535 15514 }
+10 -35
drivers/gpu/drm/i915/intel_dp.c
··· 1507 1507 DRM_DEBUG_KMS("common rates: %s\n", str); 1508 1508 } 1509 1509 1510 - bool 1511 - __intel_dp_read_desc(struct intel_dp *intel_dp, struct intel_dp_desc *desc) 1512 - { 1513 - u32 base = drm_dp_is_branch(intel_dp->dpcd) ? DP_BRANCH_OUI : 1514 - DP_SINK_OUI; 1515 - 1516 - return drm_dp_dpcd_read(&intel_dp->aux, base, desc, sizeof(*desc)) == 1517 - sizeof(*desc); 1518 - } 1519 - 1520 - bool intel_dp_read_desc(struct intel_dp *intel_dp) 1521 - { 1522 - struct intel_dp_desc *desc = &intel_dp->desc; 1523 - bool oui_sup = intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & 1524 - DP_OUI_SUPPORT; 1525 - int dev_id_len; 1526 - 1527 - if (!__intel_dp_read_desc(intel_dp, desc)) 1528 - return false; 1529 - 1530 - dev_id_len = strnlen(desc->device_id, sizeof(desc->device_id)); 1531 - DRM_DEBUG_KMS("DP %s: OUI %*phD%s dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n", 1532 - drm_dp_is_branch(intel_dp->dpcd) ? "branch" : "sink", 1533 - (int)sizeof(desc->oui), desc->oui, oui_sup ? "" : "(NS)", 1534 - dev_id_len, desc->device_id, 1535 - desc->hw_rev >> 4, desc->hw_rev & 0xf, 1536 - desc->sw_major_rev, desc->sw_minor_rev); 1537 - 1538 - return true; 1539 - } 1540 - 1541 1510 static int rate_to_index(int find, const int *rates) 1542 1511 { 1543 1512 int i = 0; ··· 1593 1624 int common_rates[DP_MAX_SUPPORTED_RATES] = {}; 1594 1625 int common_len; 1595 1626 uint8_t link_bw, rate_select; 1627 + bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc, 1628 + DP_DPCD_QUIRK_LIMITED_M_N); 1596 1629 1597 1630 common_len = intel_dp_common_rates(intel_dp, common_rates); 1598 1631 ··· 1724 1753 intel_link_compute_m_n(bpp, lane_count, 1725 1754 adjusted_mode->crtc_clock, 1726 1755 pipe_config->port_clock, 1727 - &pipe_config->dp_m_n); 1756 + &pipe_config->dp_m_n, 1757 + reduce_m_n); 1728 1758 1729 1759 if (intel_connector->panel.downclock_mode != NULL && 1730 1760 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { ··· 1733 1761 intel_link_compute_m_n(bpp, lane_count, 1734 1762 intel_connector->panel.downclock_mode->clock, 1735 1763 pipe_config->port_clock, 1736 - &pipe_config->dp_m2_n2); 1764 + &pipe_config->dp_m2_n2, 1765 + reduce_m_n); 1737 1766 } 1738 1767 1739 1768 /* ··· 3595 3622 if (!intel_dp_read_dpcd(intel_dp)) 3596 3623 return false; 3597 3624 3598 - intel_dp_read_desc(intel_dp); 3625 + drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 3626 + drm_dp_is_branch(intel_dp->dpcd)); 3599 3627 3600 3628 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 3601 3629 dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] & ··· 4598 4624 4599 4625 intel_dp_print_rates(intel_dp); 4600 4626 4601 - intel_dp_read_desc(intel_dp); 4627 + drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 4628 + drm_dp_is_branch(intel_dp->dpcd)); 4602 4629 4603 4630 intel_dp_configure_mst(intel_dp); 4604 4631
-2
drivers/gpu/drm/i915/intel_dp_aux_backlight.c
··· 119 119 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base); 120 120 struct intel_panel *panel = &connector->panel; 121 121 122 - intel_dp_aux_enable_backlight(connector); 123 - 124 122 if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) 125 123 panel->backlight.max = 0xFFFF; 126 124 else
+4 -1
drivers/gpu/drm/i915/intel_dp_mst.c
··· 44 44 int lane_count, slots; 45 45 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 46 46 int mst_pbn; 47 + bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc, 48 + DP_DPCD_QUIRK_LIMITED_M_N); 47 49 48 50 pipe_config->has_pch_encoder = false; 49 51 bpp = 24; ··· 77 75 intel_link_compute_m_n(bpp, lane_count, 78 76 adjusted_mode->crtc_clock, 79 77 pipe_config->port_clock, 80 - &pipe_config->dp_m_n); 78 + &pipe_config->dp_m_n, 79 + reduce_m_n); 81 80 82 81 pipe_config->dp_m_n.tu = slots; 83 82
+1 -12
drivers/gpu/drm/i915/intel_drv.h
··· 906 906 M2_N2 907 907 }; 908 908 909 - struct intel_dp_desc { 910 - u8 oui[3]; 911 - u8 device_id[6]; 912 - u8 hw_rev; 913 - u8 sw_major_rev; 914 - u8 sw_minor_rev; 915 - } __packed; 916 - 917 909 struct intel_dp_compliance_data { 918 910 unsigned long edid; 919 911 uint8_t video_pattern; ··· 949 957 /* Max link BW for the sink as per DPCD registers */ 950 958 int max_sink_link_bw; 951 959 /* sink or branch descriptor */ 952 - struct intel_dp_desc desc; 960 + struct drm_dp_desc desc; 953 961 struct drm_dp_aux aux; 954 962 enum intel_display_power_domain aux_power_domain; 955 963 uint8_t train_set[4]; ··· 1524 1532 } 1525 1533 1526 1534 bool intel_dp_read_dpcd(struct intel_dp *intel_dp); 1527 - bool __intel_dp_read_desc(struct intel_dp *intel_dp, 1528 - struct intel_dp_desc *desc); 1529 - bool intel_dp_read_desc(struct intel_dp *intel_dp); 1530 1535 int intel_dp_link_required(int pixel_clock, int bpp); 1531 1536 int intel_dp_max_data_rate(int max_link_clock, int max_lanes); 1532 1537 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
+17 -3
drivers/gpu/drm/i915/intel_engine_cs.c
··· 1075 1075 return 0; 1076 1076 } 1077 1077 1078 + static bool ring_is_idle(struct intel_engine_cs *engine) 1079 + { 1080 + struct drm_i915_private *dev_priv = engine->i915; 1081 + bool idle = true; 1082 + 1083 + intel_runtime_pm_get(dev_priv); 1084 + 1085 + /* No bit for gen2, so assume the CS parser is idle */ 1086 + if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE)) 1087 + idle = false; 1088 + 1089 + intel_runtime_pm_put(dev_priv); 1090 + 1091 + return idle; 1092 + } 1093 + 1078 1094 /** 1079 1095 * intel_engine_is_idle() - Report if the engine has finished process all work 1080 1096 * @engine: the intel_engine_cs ··· 1100 1084 */ 1101 1085 bool intel_engine_is_idle(struct intel_engine_cs *engine) 1102 1086 { 1103 - struct drm_i915_private *dev_priv = engine->i915; 1104 - 1105 1087 /* Any inflight/incomplete requests? */ 1106 1088 if (!i915_seqno_passed(intel_engine_get_seqno(engine), 1107 1089 intel_engine_last_submit(engine))) ··· 1114 1100 return false; 1115 1101 1116 1102 /* Ring stopped? */ 1117 - if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE)) 1103 + if (!ring_is_idle(engine)) 1118 1104 return false; 1119 1105 1120 1106 return true;
+7 -12
drivers/gpu/drm/i915/intel_fbc.c
··· 82 82 static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache, 83 83 int *width, int *height) 84 84 { 85 - int w, h; 86 - 87 - if (drm_rotation_90_or_270(cache->plane.rotation)) { 88 - w = cache->plane.src_h; 89 - h = cache->plane.src_w; 90 - } else { 91 - w = cache->plane.src_w; 92 - h = cache->plane.src_h; 93 - } 94 - 95 85 if (width) 96 - *width = w; 86 + *width = cache->plane.src_w; 97 87 if (height) 98 - *height = h; 88 + *height = cache->plane.src_h; 99 89 } 100 90 101 91 static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, ··· 736 746 cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate; 737 747 738 748 cache->plane.rotation = plane_state->base.rotation; 749 + /* 750 + * Src coordinates are already rotated by 270 degrees for 751 + * the 90/270 degree plane rotation cases (to match the 752 + * GTT mapping), hence no need to account for rotation here. 753 + */ 739 754 cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16; 740 755 cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16; 741 756 cache->plane.visible = plane_state->base.visible;
-36
drivers/gpu/drm/i915/intel_lpe_audio.c
··· 149 149 150 150 static void lpe_audio_irq_unmask(struct irq_data *d) 151 151 { 152 - struct drm_i915_private *dev_priv = d->chip_data; 153 - unsigned long irqflags; 154 - u32 val = (I915_LPE_PIPE_A_INTERRUPT | 155 - I915_LPE_PIPE_B_INTERRUPT); 156 - 157 - if (IS_CHERRYVIEW(dev_priv)) 158 - val |= I915_LPE_PIPE_C_INTERRUPT; 159 - 160 - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 161 - 162 - dev_priv->irq_mask &= ~val; 163 - I915_WRITE(VLV_IIR, val); 164 - I915_WRITE(VLV_IIR, val); 165 - I915_WRITE(VLV_IMR, dev_priv->irq_mask); 166 - POSTING_READ(VLV_IMR); 167 - 168 - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 169 152 } 170 153 171 154 static void lpe_audio_irq_mask(struct irq_data *d) 172 155 { 173 - struct drm_i915_private *dev_priv = d->chip_data; 174 - unsigned long irqflags; 175 - u32 val = (I915_LPE_PIPE_A_INTERRUPT | 176 - I915_LPE_PIPE_B_INTERRUPT); 177 - 178 - if (IS_CHERRYVIEW(dev_priv)) 179 - val |= I915_LPE_PIPE_C_INTERRUPT; 180 - 181 - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 182 - 183 - dev_priv->irq_mask |= val; 184 - I915_WRITE(VLV_IMR, dev_priv->irq_mask); 185 - I915_WRITE(VLV_IIR, val); 186 - I915_WRITE(VLV_IIR, val); 187 - POSTING_READ(VLV_IIR); 188 - 189 - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 190 156 } 191 157 192 158 static struct irq_chip lpe_audio_irqchip = { ··· 295 329 return; 296 330 297 331 desc = irq_to_desc(dev_priv->lpe_audio.irq); 298 - 299 - lpe_audio_irq_mask(&desc->irq_data); 300 332 301 333 lpe_audio_platdev_destroy(dev_priv); 302 334
+3 -5
drivers/gpu/drm/i915/intel_lrc.c
··· 326 326 rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; 327 327 u32 *reg_state = ce->lrc_reg_state; 328 328 329 - assert_ring_tail_valid(rq->ring, rq->tail); 330 - reg_state[CTX_RING_TAIL+1] = rq->tail; 329 + reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail); 331 330 332 331 /* True 32b PPGTT with dynamic page allocation: update PDP 333 332 * registers and point the unallocated PDPs to scratch page. ··· 1988 1989 1989 1990 ce->ring = ring; 1990 1991 ce->state = vma; 1991 - ce->initialised = engine->init_context == NULL; 1992 + ce->initialised |= engine->init_context == NULL; 1992 1993 1993 1994 return 0; 1994 1995 ··· 2035 2036 ce->state->obj->mm.dirty = true; 2036 2037 i915_gem_object_unpin_map(ce->state->obj); 2037 2038 2038 - ce->ring->head = ce->ring->tail = 0; 2039 - intel_ring_update_space(ce->ring); 2039 + intel_ring_reset(ce->ring, 0); 2040 2040 } 2041 2041 } 2042 2042 }
+1 -1
drivers/gpu/drm/i915/intel_lspcon.c
··· 240 240 return false; 241 241 } 242 242 243 - intel_dp_read_desc(dp); 243 + drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd)); 244 244 245 245 DRM_DEBUG_KMS("Success: LSPCON init\n"); 246 246 return true;
+33 -12
drivers/gpu/drm/i915/intel_pm.c
··· 3373 3373 3374 3374 /* n.b., src is 16.16 fixed point, dst is whole integer */ 3375 3375 if (plane->id == PLANE_CURSOR) { 3376 + /* 3377 + * Cursors only support 0/180 degree rotation, 3378 + * hence no need to account for rotation here. 3379 + */ 3376 3380 src_w = pstate->base.src_w; 3377 3381 src_h = pstate->base.src_h; 3378 3382 dst_w = pstate->base.crtc_w; 3379 3383 dst_h = pstate->base.crtc_h; 3380 3384 } else { 3385 + /* 3386 + * Src coordinates are already rotated by 270 degrees for 3387 + * the 90/270 degree plane rotation cases (to match the 3388 + * GTT mapping), hence no need to account for rotation here. 3389 + */ 3381 3390 src_w = drm_rect_width(&pstate->base.src); 3382 3391 src_h = drm_rect_height(&pstate->base.src); 3383 3392 dst_w = drm_rect_width(&pstate->base.dst); 3384 3393 dst_h = drm_rect_height(&pstate->base.dst); 3385 3394 } 3386 - 3387 - if (drm_rotation_90_or_270(pstate->base.rotation)) 3388 - swap(dst_w, dst_h); 3389 3395 3390 3396 downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); 3391 3397 downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); ··· 3423 3417 if (y && format != DRM_FORMAT_NV12) 3424 3418 return 0; 3425 3419 3420 + /* 3421 + * Src coordinates are already rotated by 270 degrees for 3422 + * the 90/270 degree plane rotation cases (to match the 3423 + * GTT mapping), hence no need to account for rotation here. 3424 + */ 3426 3425 width = drm_rect_width(&intel_pstate->base.src) >> 16; 3427 3426 height = drm_rect_height(&intel_pstate->base.src) >> 16; 3428 - 3429 - if (drm_rotation_90_or_270(pstate->rotation)) 3430 - swap(width, height); 3431 3427 3432 3428 /* for planar format */ 3433 3429 if (format == DRM_FORMAT_NV12) { ··· 3513 3505 fb->modifier != I915_FORMAT_MOD_Yf_TILED) 3514 3506 return 8; 3515 3507 3508 + /* 3509 + * Src coordinates are already rotated by 270 degrees for 3510 + * the 90/270 degree plane rotation cases (to match the 3511 + * GTT mapping), hence no need to account for rotation here. 3512 + */ 3516 3513 src_w = drm_rect_width(&intel_pstate->base.src) >> 16; 3517 3514 src_h = drm_rect_height(&intel_pstate->base.src) >> 16; 3518 - 3519 - if (drm_rotation_90_or_270(pstate->rotation)) 3520 - swap(src_w, src_h); 3521 3515 3522 3516 /* Halve UV plane width and height for NV12 */ 3523 3517 if (fb->format->format == DRM_FORMAT_NV12 && !y) { ··· 3804 3794 width = intel_pstate->base.crtc_w; 3805 3795 height = intel_pstate->base.crtc_h; 3806 3796 } else { 3797 + /* 3798 + * Src coordinates are already rotated by 270 degrees for 3799 + * the 90/270 degree plane rotation cases (to match the 3800 + * GTT mapping), hence no need to account for rotation here. 3801 + */ 3807 3802 width = drm_rect_width(&intel_pstate->base.src) >> 16; 3808 3803 height = drm_rect_height(&intel_pstate->base.src) >> 16; 3809 3804 } 3810 - 3811 - if (drm_rotation_90_or_270(pstate->rotation)) 3812 - swap(width, height); 3813 3805 3814 3806 cpp = fb->format->cpp[0]; 3815 3807 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); ··· 4347 4335 struct drm_crtc_state *cstate; 4348 4336 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 4349 4337 struct skl_wm_values *results = &intel_state->wm_results; 4338 + struct drm_device *dev = state->dev; 4350 4339 struct skl_pipe_wm *pipe_wm; 4351 4340 bool changed = false; 4352 4341 int ret, i; 4342 + 4343 + /* 4344 + * When we distrust bios wm we always need to recompute to set the 4345 + * expected DDB allocations for each CRTC. 4346 + */ 4347 + if (to_i915(dev)->wm.distrust_bios_wm) 4348 + changed = true; 4353 4349 4354 4350 /* 4355 4351 * If this transaction isn't actually touching any CRTC's, don't ··· 4369 4349 */ 4370 4350 for_each_new_crtc_in_state(state, crtc, cstate, i) 4371 4351 changed = true; 4352 + 4372 4353 if (!changed) 4373 4354 return 0; 4374 4355
+3 -2
drivers/gpu/drm/i915/intel_psr.c
··· 435 435 } 436 436 437 437 /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */ 438 - if (intel_crtc->config->pipe_src_w > 3200 || 439 - intel_crtc->config->pipe_src_h > 2000) { 438 + if (dev_priv->psr.psr2_support && 439 + (intel_crtc->config->pipe_src_w > 3200 || 440 + intel_crtc->config->pipe_src_h > 2000)) { 440 441 dev_priv->psr.psr2_support = false; 441 442 return false; 442 443 }
+27 -14
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 49 49 50 50 void intel_ring_update_space(struct intel_ring *ring) 51 51 { 52 - ring->space = __intel_ring_space(ring->head, ring->tail, ring->size); 52 + ring->space = __intel_ring_space(ring->head, ring->emit, ring->size); 53 53 } 54 54 55 55 static int ··· 774 774 775 775 i915_gem_request_submit(request); 776 776 777 - assert_ring_tail_valid(request->ring, request->tail); 778 - I915_WRITE_TAIL(request->engine, request->tail); 777 + I915_WRITE_TAIL(request->engine, 778 + intel_ring_set_tail(request->ring, request->tail)); 779 779 } 780 780 781 781 static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs) ··· 1316 1316 return PTR_ERR(addr); 1317 1317 } 1318 1318 1319 + void intel_ring_reset(struct intel_ring *ring, u32 tail) 1320 + { 1321 + GEM_BUG_ON(!list_empty(&ring->request_list)); 1322 + ring->tail = tail; 1323 + ring->head = tail; 1324 + ring->emit = tail; 1325 + intel_ring_update_space(ring); 1326 + } 1327 + 1319 1328 void intel_ring_unpin(struct intel_ring *ring) 1320 1329 { 1321 1330 GEM_BUG_ON(!ring->vma); 1322 1331 GEM_BUG_ON(!ring->vaddr); 1332 + 1333 + /* Discard any unused bytes beyond that submitted to hw. */ 1334 + intel_ring_reset(ring, ring->tail); 1323 1335 1324 1336 if (i915_vma_is_map_and_fenceable(ring->vma)) 1325 1337 i915_vma_unpin_iomap(ring->vma); ··· 1574 1562 struct intel_engine_cs *engine; 1575 1563 enum intel_engine_id id; 1576 1564 1565 + /* Restart from the beginning of the rings for convenience */ 1577 1566 for_each_engine(engine, dev_priv, id) 1578 - engine->buffer->head = engine->buffer->tail; 1567 + intel_ring_reset(engine->buffer, 0); 1579 1568 } 1580 1569 1581 1570 static int ring_request_alloc(struct drm_i915_gem_request *request) ··· 1629 1616 unsigned space; 1630 1617 1631 1618 /* Would completion of this request free enough space? */ 1632 - space = __intel_ring_space(target->postfix, ring->tail, 1619 + space = __intel_ring_space(target->postfix, ring->emit, 1633 1620 ring->size); 1634 1621 if (space >= bytes) 1635 1622 break; ··· 1654 1641 u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) 1655 1642 { 1656 1643 struct intel_ring *ring = req->ring; 1657 - int remain_actual = ring->size - ring->tail; 1658 - int remain_usable = ring->effective_size - ring->tail; 1644 + int remain_actual = ring->size - ring->emit; 1645 + int remain_usable = ring->effective_size - ring->emit; 1659 1646 int bytes = num_dwords * sizeof(u32); 1660 1647 int total_bytes, wait_bytes; 1661 1648 bool need_wrap = false; ··· 1691 1678 1692 1679 if (unlikely(need_wrap)) { 1693 1680 GEM_BUG_ON(remain_actual > ring->space); 1694 - GEM_BUG_ON(ring->tail + remain_actual > ring->size); 1681 + GEM_BUG_ON(ring->emit + remain_actual > ring->size); 1695 1682 1696 1683 /* Fill the tail with MI_NOOP */ 1697 - memset(ring->vaddr + ring->tail, 0, remain_actual); 1698 - ring->tail = 0; 1684 + memset(ring->vaddr + ring->emit, 0, remain_actual); 1685 + ring->emit = 0; 1699 1686 ring->space -= remain_actual; 1700 1687 } 1701 1688 1702 - GEM_BUG_ON(ring->tail > ring->size - bytes); 1703 - cs = ring->vaddr + ring->tail; 1704 - ring->tail += bytes; 1689 + GEM_BUG_ON(ring->emit > ring->size - bytes); 1690 + cs = ring->vaddr + ring->emit; 1691 + ring->emit += bytes; 1705 1692 ring->space -= bytes; 1706 1693 GEM_BUG_ON(ring->space < 0); 1707 1694 ··· 1712 1699 int intel_ring_cacheline_align(struct drm_i915_gem_request *req) 1713 1700 { 1714 1701 int num_dwords = 1715 - (req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); 1702 + (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); 1716 1703 u32 *cs; 1717 1704 1718 1705 if (num_dwords == 0)
+17 -2
drivers/gpu/drm/i915/intel_ringbuffer.h
··· 145 145 146 146 u32 head; 147 147 u32 tail; 148 + u32 emit; 148 149 149 150 int space; 150 151 int size; ··· 489 488 struct intel_ring * 490 489 intel_engine_create_ring(struct intel_engine_cs *engine, int size); 491 490 int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias); 491 + void intel_ring_reset(struct intel_ring *ring, u32 tail); 492 + void intel_ring_update_space(struct intel_ring *ring); 492 493 void intel_ring_unpin(struct intel_ring *ring); 493 494 void intel_ring_free(struct intel_ring *ring); 494 495 ··· 514 511 * reserved for the command packet (i.e. the value passed to 515 512 * intel_ring_begin()). 516 513 */ 517 - GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs); 514 + GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs); 518 515 } 519 516 520 517 static inline u32 ··· 543 540 GEM_BUG_ON(tail >= ring->size); 544 541 } 545 542 546 - void intel_ring_update_space(struct intel_ring *ring); 543 + static inline unsigned int 544 + intel_ring_set_tail(struct intel_ring *ring, unsigned int tail) 545 + { 546 + /* Whilst writes to the tail are strictly order, there is no 547 + * serialisation between readers and the writers. The tail may be 548 + * read by i915_gem_request_retire() just as it is being updated 549 + * by execlists, as although the breadcrumb is complete, the context 550 + * switch hasn't been seen. 551 + */ 552 + assert_ring_tail_valid(ring, tail); 553 + ring->tail = tail; 554 + return tail; 555 + } 547 556 548 557 void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno); 549 558
+21
drivers/gpu/drm/i915/intel_sprite.c
··· 83 83 */ 84 84 void intel_pipe_update_start(struct intel_crtc *crtc) 85 85 { 86 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 86 87 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 87 88 long timeout = msecs_to_jiffies_timeout(1); 88 89 int scanline, min, max, vblank_start; 89 90 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); 91 + bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 92 + intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI); 90 93 DEFINE_WAIT(wait); 91 94 92 95 vblank_start = adjusted_mode->crtc_vblank_start; ··· 141 138 finish_wait(wq, &wait); 142 139 143 140 drm_crtc_vblank_put(&crtc->base); 141 + 142 + /* 143 + * On VLV/CHV DSI the scanline counter would appear to 144 + * increment approx. 1/3 of a scanline before start of vblank. 145 + * The registers still get latched at start of vblank however. 146 + * This means we must not write any registers on the first 147 + * line of vblank (since not the whole line is actually in 148 + * vblank). And unfortunately we can't use the interrupt to 149 + * wait here since it will fire too soon. We could use the 150 + * frame start interrupt instead since it will fire after the 151 + * critical scanline, but that would require more changes 152 + * in the interrupt code. So for now we'll just do the nasty 153 + * thing and poll for the bad scanline to pass us by. 154 + * 155 + * FIXME figure out if BXT+ DSI suffers from this as well 156 + */ 157 + while (need_vlv_dsi_wa && scanline == vblank_start) 158 + scanline = intel_get_crtc_scanline(crtc); 144 159 145 160 crtc->debug.scanline_start = scanline; 146 161 crtc->debug.start_vbl_time = ktime_get();
-2
drivers/gpu/drm/i915/intel_uc.h
··· 59 59 * available in the work queue (note, the queue is shared, 60 60 * not per-engine). It is OK for this to be nonzero, but 61 61 * it should not be huge! 62 - * q_fail: failed to enqueue a work item. This should never happen, 63 - * because we check for space beforehand. 64 62 * b_fail: failed to ring the doorbell. This should never happen, unless 65 63 * somehow the hardware misbehaves, or maybe if the GuC firmware 66 64 * crashes? We probably need to reset the GPU to recover.
+5 -3
drivers/gpu/drm/i915/selftests/i915_gem_context.c
··· 320 320 static int igt_ctx_exec(void *arg) 321 321 { 322 322 struct drm_i915_private *i915 = arg; 323 - struct drm_i915_gem_object *obj; 323 + struct drm_i915_gem_object *obj = NULL; 324 324 struct drm_file *file; 325 325 IGT_TIMEOUT(end_time); 326 326 LIST_HEAD(objects); ··· 359 359 } 360 360 361 361 for_each_engine(engine, i915, id) { 362 - if (dw == 0) { 362 + if (!obj) { 363 363 obj = create_test_object(ctx, file, &objects); 364 364 if (IS_ERR(obj)) { 365 365 err = PTR_ERR(obj); ··· 376 376 goto out_unlock; 377 377 } 378 378 379 - if (++dw == max_dwords(obj)) 379 + if (++dw == max_dwords(obj)) { 380 + obj = NULL; 380 381 dw = 0; 382 + } 381 383 ndwords++; 382 384 } 383 385 ncontexts++;
+1 -1
drivers/gpu/drm/imx/imx-ldb.c
··· 673 673 ret = drm_of_find_panel_or_bridge(child, 674 674 imx_ldb->lvds_mux ? 4 : 2, 0, 675 675 &channel->panel, &channel->bridge); 676 - if (ret) 676 + if (ret && ret != -ENODEV) 677 677 return ret; 678 678 679 679 /* panel ddc only if there is no bridge */
+6 -9
drivers/gpu/drm/mediatek/mtk_dsi.c
··· 19 19 #include <drm/drm_of.h> 20 20 #include <linux/clk.h> 21 21 #include <linux/component.h> 22 + #include <linux/iopoll.h> 22 23 #include <linux/irq.h> 23 24 #include <linux/of.h> 24 25 #include <linux/of_platform.h> ··· 901 900 902 901 static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi) 903 902 { 904 - u32 timeout_ms = 500000; /* total 1s ~ 2s timeout */ 903 + int ret; 904 + u32 val; 905 905 906 - while (timeout_ms--) { 907 - if (!(readl(dsi->regs + DSI_INTSTA) & DSI_BUSY)) 908 - break; 909 - 910 - usleep_range(2, 4); 911 - } 912 - 913 - if (timeout_ms == 0) { 906 + ret = readl_poll_timeout(dsi->regs + DSI_INTSTA, val, !(val & DSI_BUSY), 907 + 4, 2000000); 908 + if (ret) { 914 909 DRM_WARN("polling dsi wait not busy timeout!\n"); 915 910 916 911 mtk_dsi_enable(dsi);
+1 -1
drivers/gpu/drm/mediatek/mtk_hdmi.c
··· 1062 1062 } 1063 1063 1064 1064 err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer)); 1065 - if (err) { 1065 + if (err < 0) { 1066 1066 dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n", 1067 1067 err); 1068 1068 return err;
+15 -5
drivers/gpu/drm/meson/meson_drv.c
··· 152 152 .max_register = 0x1000, 153 153 }; 154 154 155 - static int meson_drv_bind(struct device *dev) 155 + static int meson_drv_bind_master(struct device *dev, bool has_components) 156 156 { 157 157 struct platform_device *pdev = to_platform_device(dev); 158 158 struct meson_drm *priv; ··· 233 233 if (ret) 234 234 goto free_drm; 235 235 236 - ret = component_bind_all(drm->dev, drm); 237 - if (ret) { 238 - dev_err(drm->dev, "Couldn't bind all components\n"); 239 - goto free_drm; 236 + if (has_components) { 237 + ret = component_bind_all(drm->dev, drm); 238 + if (ret) { 239 + dev_err(drm->dev, "Couldn't bind all components\n"); 240 + goto free_drm; 241 + } 240 242 } 241 243 242 244 ret = meson_plane_create(priv); ··· 276 274 drm_dev_unref(drm); 277 275 278 276 return ret; 277 + } 278 + 279 + static int meson_drv_bind(struct device *dev) 280 + { 281 + return meson_drv_bind_master(dev, true); 279 282 } 280 283 281 284 static void meson_drv_unbind(struct device *dev) ··· 363 356 364 357 count += meson_probe_remote(pdev, &match, np, remote); 365 358 } 359 + 360 + if (count && !match) 361 + return meson_drv_bind_master(&pdev->dev, false); 366 362 367 363 /* If some endpoints were found, initialize the nodes */ 368 364 if (count) {
+8 -1
drivers/gpu/drm/mgag200/mgag200_mode.c
··· 1173 1173 1174 1174 1175 1175 if (IS_G200_SE(mdev)) { 1176 - if (mdev->unique_rev_id >= 0x02) { 1176 + if (mdev->unique_rev_id >= 0x04) { 1177 + WREG8(MGAREG_CRTCEXT_INDEX, 0x06); 1178 + WREG8(MGAREG_CRTCEXT_DATA, 0); 1179 + } else if (mdev->unique_rev_id >= 0x02) { 1177 1180 u8 hi_pri_lvl; 1178 1181 u32 bpp; 1179 1182 u32 mb; ··· 1641 1638 return MODE_VIRTUAL_Y; 1642 1639 if (mga_vga_calculate_mode_bandwidth(mode, bpp) 1643 1640 > (30100 * 1024)) 1641 + return MODE_BANDWIDTH; 1642 + } else { 1643 + if (mga_vga_calculate_mode_bandwidth(mode, bpp) 1644 + > (55000 * 1024)) 1644 1645 return MODE_BANDWIDTH; 1645 1646 } 1646 1647 } else if (mdev->type == G200_WB) {
+1
drivers/gpu/drm/msm/Kconfig
··· 13 13 select QCOM_SCM 14 14 select SND_SOC_HDMI_CODEC if SND_SOC 15 15 select SYNC_FILE 16 + select PM_OPP 16 17 default y 17 18 help 18 19 DRM/KMS driver for MSM/snapdragon.
+1 -1
drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
··· 116 116 return 0; 117 117 } 118 118 119 - static struct irq_domain_ops mdss_hw_irqdomain_ops = { 119 + static const struct irq_domain_ops mdss_hw_irqdomain_ops = { 120 120 .map = mdss_hw_irqdomain_map, 121 121 .xlate = irq_domain_xlate_onecell, 122 122 };
+7 -2
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
··· 225 225 226 226 mdp5_state = kmemdup(to_mdp5_plane_state(plane->state), 227 227 sizeof(*mdp5_state), GFP_KERNEL); 228 + if (!mdp5_state) 229 + return NULL; 228 230 229 - if (mdp5_state && mdp5_state->base.fb) 230 - drm_framebuffer_reference(mdp5_state->base.fb); 231 + __drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base); 231 232 232 233 return &mdp5_state->base; 233 234 } ··· 445 444 mdp5_pipe_release(state->state, old_hwpipe); 446 445 mdp5_pipe_release(state->state, old_right_hwpipe); 447 446 } 447 + } else { 448 + mdp5_pipe_release(state->state, mdp5_state->hwpipe); 449 + mdp5_pipe_release(state->state, mdp5_state->r_hwpipe); 450 + mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL; 448 451 } 449 452 450 453 return 0;
+1
drivers/gpu/drm/msm/msm_drv.c
··· 830 830 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 831 831 .gem_prime_export = drm_gem_prime_export, 832 832 .gem_prime_import = drm_gem_prime_import, 833 + .gem_prime_res_obj = msm_gem_prime_res_obj, 833 834 .gem_prime_pin = msm_gem_prime_pin, 834 835 .gem_prime_unpin = msm_gem_prime_unpin, 835 836 .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
+1
drivers/gpu/drm/msm/msm_drv.h
··· 224 224 void *msm_gem_prime_vmap(struct drm_gem_object *obj); 225 225 void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 226 226 int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); 227 + struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj); 227 228 struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, 228 229 struct dma_buf_attachment *attach, struct sg_table *sg); 229 230 int msm_gem_prime_pin(struct drm_gem_object *obj);
+2 -8
drivers/gpu/drm/msm/msm_fence.c
··· 99 99 } 100 100 101 101 struct msm_fence { 102 - struct msm_fence_context *fctx; 103 102 struct dma_fence base; 103 + struct msm_fence_context *fctx; 104 104 }; 105 105 106 106 static inline struct msm_fence *to_msm_fence(struct dma_fence *fence) ··· 130 130 return fence_completed(f->fctx, f->base.seqno); 131 131 } 132 132 133 - static void msm_fence_release(struct dma_fence *fence) 134 - { 135 - struct msm_fence *f = to_msm_fence(fence); 136 - kfree_rcu(f, base.rcu); 137 - } 138 - 139 133 static const struct dma_fence_ops msm_fence_ops = { 140 134 .get_driver_name = msm_fence_get_driver_name, 141 135 .get_timeline_name = msm_fence_get_timeline_name, 142 136 .enable_signaling = msm_fence_enable_signaling, 143 137 .signaled = msm_fence_signaled, 144 138 .wait = dma_fence_default_wait, 145 - .release = msm_fence_release, 139 + .release = dma_fence_free, 146 140 }; 147 141 148 142 struct dma_fence *
+6
drivers/gpu/drm/msm/msm_gem.c
··· 758 758 struct msm_gem_object *msm_obj; 759 759 bool use_vram = false; 760 760 761 + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 762 + 761 763 switch (flags & MSM_BO_CACHE_MASK) { 762 764 case MSM_BO_UNCACHED: 763 765 case MSM_BO_CACHED: ··· 855 853 856 854 size = PAGE_ALIGN(dmabuf->size); 857 855 856 + /* Take mutex so we can modify the inactive list in msm_gem_new_impl */ 857 + mutex_lock(&dev->struct_mutex); 858 858 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj); 859 + mutex_unlock(&dev->struct_mutex); 860 + 859 861 if (ret) 860 862 goto fail; 861 863
+7
drivers/gpu/drm/msm/msm_gem_prime.c
··· 70 70 if (!obj->import_attach) 71 71 msm_gem_put_pages(obj); 72 72 } 73 + 74 + struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj) 75 + { 76 + struct msm_gem_object *msm_obj = to_msm_bo(obj); 77 + 78 + return msm_obj->resv; 79 + }
+7 -7
drivers/gpu/drm/msm/msm_gem_submit.c
··· 410 410 if (!in_fence) 411 411 return -EINVAL; 412 412 413 - /* TODO if we get an array-fence due to userspace merging multiple 414 - * fences, we need a way to determine if all the backing fences 415 - * are from our own context.. 413 + /* 414 + * Wait if the fence is from a foreign context, or if the fence 415 + * array contains any fence from a foreign context. 416 416 */ 417 - 418 - if (in_fence->context != gpu->fctx->context) { 417 + if (!dma_fence_match_context(in_fence, gpu->fctx->context)) { 419 418 ret = dma_fence_wait(in_fence, true); 420 419 if (ret) 421 420 return ret; ··· 495 496 goto out; 496 497 } 497 498 498 - if ((submit_cmd.size + submit_cmd.submit_offset) >= 499 - msm_obj->base.size) { 499 + if (!submit_cmd.size || 500 + ((submit_cmd.size + submit_cmd.submit_offset) > 501 + msm_obj->base.size)) { 500 502 DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size); 501 503 ret = -EINVAL; 502 504 goto out;
+2 -2
drivers/gpu/drm/msm/msm_gpu.c
··· 549 549 gpu->grp_clks[i] = get_clock(dev, name); 550 550 551 551 /* Remember the key clocks that we need to control later */ 552 - if (!strcmp(name, "core")) 552 + if (!strcmp(name, "core") || !strcmp(name, "core_clk")) 553 553 gpu->core_clk = gpu->grp_clks[i]; 554 - else if (!strcmp(name, "rbbmtimer")) 554 + else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk")) 555 555 gpu->rbbmtimer_clk = gpu->grp_clks[i]; 556 556 557 557 ++i;
+42
drivers/gpu/drm/mxsfb/mxsfb_crtc.c
··· 35 35 #include "mxsfb_drv.h" 36 36 #include "mxsfb_regs.h" 37 37 38 + #define MXS_SET_ADDR 0x4 39 + #define MXS_CLR_ADDR 0x8 40 + #define MODULE_CLKGATE BIT(30) 41 + #define MODULE_SFTRST BIT(31) 42 + /* 1 second delay should be plenty of time for block reset */ 43 + #define RESET_TIMEOUT 1000000 44 + 38 45 static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val) 39 46 { 40 47 return (val & mxsfb->devdata->hs_wdth_mask) << ··· 166 159 clk_disable_unprepare(mxsfb->clk_disp_axi); 167 160 } 168 161 162 + /* 163 + * Clear the bit and poll it cleared. This is usually called with 164 + * a reset address and mask being either SFTRST(bit 31) or CLKGATE 165 + * (bit 30). 166 + */ 167 + static int clear_poll_bit(void __iomem *addr, u32 mask) 168 + { 169 + u32 reg; 170 + 171 + writel(mask, addr + MXS_CLR_ADDR); 172 + return readl_poll_timeout(addr, reg, !(reg & mask), 0, RESET_TIMEOUT); 173 + } 174 + 175 + static int mxsfb_reset_block(void __iomem *reset_addr) 176 + { 177 + int ret; 178 + 179 + ret = clear_poll_bit(reset_addr, MODULE_SFTRST); 180 + if (ret) 181 + return ret; 182 + 183 + writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR); 184 + 185 + ret = clear_poll_bit(reset_addr, MODULE_SFTRST); 186 + if (ret) 187 + return ret; 188 + 189 + return clear_poll_bit(reset_addr, MODULE_CLKGATE); 190 + } 191 + 169 192 static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) 170 193 { 171 194 struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode; ··· 209 172 * first stop the controller and drain its FIFOs. 210 173 */ 211 174 mxsfb_enable_axi_clk(mxsfb); 175 + 176 + /* Mandatory eLCDIF reset as per the Reference Manual */ 177 + err = mxsfb_reset_block(mxsfb->base); 178 + if (err) 179 + return; 212 180 213 181 /* Clear the FIFOs */ 214 182 writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET);
+1
drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
··· 4 4 5 5 struct nvkm_alarm { 6 6 struct list_head head; 7 + struct list_head exec; 7 8 u64 timestamp; 8 9 void (*func)(struct nvkm_alarm *); 9 10 };
+17 -21
drivers/gpu/drm/nouveau/nouveau_drm.c
··· 80 80 module_param_named(modeset, nouveau_modeset, int, 0400); 81 81 82 82 MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)"); 83 - int nouveau_runtime_pm = -1; 83 + static int nouveau_runtime_pm = -1; 84 84 module_param_named(runpm, nouveau_runtime_pm, int, 0400); 85 85 86 86 static struct drm_driver driver_stub; ··· 495 495 nouveau_fbcon_init(dev); 496 496 nouveau_led_init(dev); 497 497 498 - if (nouveau_runtime_pm != 0) { 498 + if (nouveau_pmops_runtime()) { 499 499 pm_runtime_use_autosuspend(dev->dev); 500 500 pm_runtime_set_autosuspend_delay(dev->dev, 5000); 501 501 pm_runtime_set_active(dev->dev); ··· 527 527 { 528 528 struct nouveau_drm *drm = nouveau_drm(dev); 529 529 530 - if (nouveau_runtime_pm != 0) { 530 + if (nouveau_pmops_runtime()) { 531 531 pm_runtime_get_sync(dev->dev); 532 532 pm_runtime_forbid(dev->dev); 533 533 } ··· 726 726 return nouveau_do_resume(drm_dev, false); 727 727 } 728 728 729 + bool 730 + nouveau_pmops_runtime() 731 + { 732 + if (nouveau_runtime_pm == -1) 733 + return nouveau_is_optimus() || nouveau_is_v1_dsm(); 734 + return nouveau_runtime_pm == 1; 735 + } 736 + 729 737 static int 730 738 nouveau_pmops_runtime_suspend(struct device *dev) 731 739 { ··· 741 733 struct drm_device *drm_dev = pci_get_drvdata(pdev); 742 734 int ret; 743 735 744 - if (nouveau_runtime_pm == 0) { 745 - pm_runtime_forbid(dev); 746 - return -EBUSY; 747 - } 748 - 749 - /* are we optimus enabled? */ 750 - if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) { 751 - DRM_DEBUG_DRIVER("failing to power off - not optimus\n"); 736 + if (!nouveau_pmops_runtime()) { 752 737 pm_runtime_forbid(dev); 753 738 return -EBUSY; 754 739 } ··· 766 765 struct nvif_device *device = &nouveau_drm(drm_dev)->client.device; 767 766 int ret; 768 767 769 - if (nouveau_runtime_pm == 0) 770 - return -EINVAL; 768 + if (!nouveau_pmops_runtime()) { 769 + pm_runtime_forbid(dev); 770 + return -EBUSY; 771 + } 771 772 772 773 pci_set_power_state(pdev, PCI_D0); 773 774 pci_restore_state(pdev); ··· 799 796 struct nouveau_drm *drm = nouveau_drm(drm_dev); 800 797 struct drm_crtc *crtc; 801 798 802 - if (nouveau_runtime_pm == 0) { 803 - pm_runtime_forbid(dev); 804 - return -EBUSY; 805 - } 806 - 807 - /* are we optimus enabled? */ 808 - if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) { 809 - DRM_DEBUG_DRIVER("failing to power off - not optimus\n"); 799 + if (!nouveau_pmops_runtime()) { 810 800 pm_runtime_forbid(dev); 811 801 return -EBUSY; 812 802 }
+1 -2
drivers/gpu/drm/nouveau/nouveau_drv.h
··· 108 108 #include <nvif/object.h> 109 109 #include <nvif/device.h> 110 110 111 - extern int nouveau_runtime_pm; 112 - 113 111 struct nouveau_drm { 114 112 struct nouveau_cli client; 115 113 struct drm_device *dev; ··· 193 195 194 196 int nouveau_pmops_suspend(struct device *); 195 197 int nouveau_pmops_resume(struct device *); 198 + bool nouveau_pmops_runtime(void); 196 199 197 200 #include <nvkm/core/tegra.h> 198 201
+2 -11
drivers/gpu/drm/nouveau/nouveau_vga.c
··· 87 87 nouveau_vga_init(struct nouveau_drm *drm) 88 88 { 89 89 struct drm_device *dev = drm->dev; 90 - bool runtime = false; 90 + bool runtime = nouveau_pmops_runtime(); 91 91 92 92 /* only relevant for PCI devices */ 93 93 if (!dev->pdev) ··· 99 99 if (pci_is_thunderbolt_attached(dev->pdev)) 100 100 return; 101 101 102 - if (nouveau_runtime_pm == 1) 103 - runtime = true; 104 - if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm())) 105 - runtime = true; 106 102 vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime); 107 103 108 104 if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus()) ··· 109 113 nouveau_vga_fini(struct nouveau_drm *drm) 110 114 { 111 115 struct drm_device *dev = drm->dev; 112 - bool runtime = false; 116 + bool runtime = nouveau_pmops_runtime(); 113 117 114 118 vga_client_register(dev->pdev, NULL, NULL, NULL); 115 119 116 120 if (pci_is_thunderbolt_attached(dev->pdev)) 117 121 return; 118 - 119 - if (nouveau_runtime_pm == 1) 120 - runtime = true; 121 - if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm())) 122 - runtime = true; 123 122 124 123 vga_switcheroo_unregister_client(dev->pdev); 125 124 if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
+2 -1
drivers/gpu/drm/nouveau/nv50_display.c
··· 2107 2107 asyc->set.dither = true; 2108 2108 } 2109 2109 } else { 2110 - asyc->set.mask = ~0; 2110 + if (asyc) 2111 + asyc->set.mask = ~0; 2111 2112 asyh->set.mask = ~0; 2112 2113 } 2113 2114
+4 -3
drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
··· 50 50 /* Move to completed list. We'll drop the lock before 51 51 * executing the callback so it can reschedule itself. 52 52 */ 53 - list_move_tail(&alarm->head, &exec); 53 + list_del_init(&alarm->head); 54 + list_add(&alarm->exec, &exec); 54 55 } 55 56 56 57 /* Shut down interrupt if no more pending alarms. */ ··· 60 59 spin_unlock_irqrestore(&tmr->lock, flags); 61 60 62 61 /* Execute completed callbacks. */ 63 - list_for_each_entry_safe(alarm, atemp, &exec, head) { 64 - list_del_init(&alarm->head); 62 + list_for_each_entry_safe(alarm, atemp, &exec, exec) { 63 + list_del(&alarm->exec); 65 64 alarm->func(alarm); 66 65 } 67 66 }
+5 -2
drivers/gpu/drm/radeon/cik.c
··· 9267 9267 u32 tmp, wm_mask; 9268 9268 9269 9269 if (radeon_crtc->base.enabled && num_heads && mode) { 9270 - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 9271 - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 9270 + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, 9271 + (u32)mode->clock); 9272 + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, 9273 + (u32)mode->clock); 9274 + line_time = min(line_time, (u32)65535); 9272 9275 9273 9276 /* watermark for high clocks */ 9274 9277 if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
+5 -2
drivers/gpu/drm/radeon/evergreen.c
··· 2266 2266 fixed20_12 a, b, c; 2267 2267 2268 2268 if (radeon_crtc->base.enabled && num_heads && mode) { 2269 - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 2270 - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 2269 + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, 2270 + (u32)mode->clock); 2271 + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, 2272 + (u32)mode->clock); 2273 + line_time = min(line_time, (u32)65535); 2271 2274 priority_a_cnt = 0; 2272 2275 priority_b_cnt = 0; 2273 2276 dram_channels = evergreen_get_number_of_dram_channels(rdev);
+7
drivers/gpu/drm/radeon/radeon_combios.c
··· 3393 3393 rdev->pdev->subsystem_vendor == 0x103c && 3394 3394 rdev->pdev->subsystem_device == 0x280a) 3395 3395 return; 3396 + /* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume 3397 + * - it hangs on resume inside the dynclk 1 table. 3398 + */ 3399 + if (rdev->family == CHIP_RS400 && 3400 + rdev->pdev->subsystem_vendor == 0x1179 && 3401 + rdev->pdev->subsystem_device == 0xff31) 3402 + return; 3396 3403 3397 3404 /* DYN CLK 1 */ 3398 3405 table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
+4
drivers/gpu/drm/radeon/radeon_device.c
··· 136 136 * https://bugzilla.kernel.org/show_bug.cgi?id=51381 137 137 */ 138 138 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, 139 + /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU 140 + * https://bugs.freedesktop.org/show_bug.cgi?id=101491 141 + */ 142 + { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, 139 143 /* macbook pro 8.2 */ 140 144 { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, 141 145 { 0, 0, 0, 0, 0 },
+1 -1
drivers/gpu/drm/radeon/radeon_uvd.c
··· 621 621 } 622 622 623 623 /* TODO: is this still necessary on NI+ ? */ 624 - if ((cmd == 0 || cmd == 1 || cmd == 0x3) && 624 + if ((cmd == 0 || cmd == 0x3) && 625 625 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { 626 626 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", 627 627 start, end);
+5 -2
drivers/gpu/drm/radeon/si.c
··· 2284 2284 fixed20_12 a, b, c; 2285 2285 2286 2286 if (radeon_crtc->base.enabled && num_heads && mode) { 2287 - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; 2288 - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); 2287 + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, 2288 + (u32)mode->clock); 2289 + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, 2290 + (u32)mode->clock); 2291 + line_time = min(line_time, (u32)65535); 2289 2292 priority_a_cnt = 0; 2290 2293 priority_b_cnt = 0; 2291 2294
-12
drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
··· 245 245 struct drm_connector_state *conn_state) 246 246 { 247 247 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); 248 - struct rockchip_dp_device *dp = to_dp(encoder); 249 - int ret; 250 248 251 249 /* 252 250 * The hardware IC designed that VOP must output the RGB10 video ··· 256 258 257 259 s->output_mode = ROCKCHIP_OUT_MODE_AAAA; 258 260 s->output_type = DRM_MODE_CONNECTOR_eDP; 259 - if (dp->data->chip_type == RK3399_EDP) { 260 - /* 261 - * For RK3399, VOP Lit must code the out mode to RGB888, 262 - * VOP Big must code the out mode to RGB10. 263 - */ 264 - ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, 265 - encoder); 266 - if (ret > 0) 267 - s->output_mode = ROCKCHIP_OUT_MODE_P888; 268 - } 269 261 270 262 return 0; 271 263 }
+2 -7
drivers/gpu/drm/rockchip/cdn-dp-core.c
··· 615 615 { 616 616 struct cdn_dp_device *dp = encoder_to_dp(encoder); 617 617 int ret, val; 618 - struct rockchip_crtc_state *state; 619 618 620 619 ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder); 621 620 if (ret < 0) { ··· 624 625 625 626 DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n", 626 627 (ret) ? "LIT" : "BIG"); 627 - state = to_rockchip_crtc_state(encoder->crtc->state); 628 - if (ret) { 628 + if (ret) 629 629 val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16); 630 - state->output_mode = ROCKCHIP_OUT_MODE_P888; 631 - } else { 630 + else 632 631 val = DP_SEL_VOP_LIT << 16; 633 - state->output_mode = ROCKCHIP_OUT_MODE_AAAA; 634 - } 635 632 636 633 ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val); 637 634 if (ret)
+8
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
··· 875 875 static void vop_crtc_enable(struct drm_crtc *crtc) 876 876 { 877 877 struct vop *vop = to_vop(crtc); 878 + const struct vop_data *vop_data = vop->data; 878 879 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state); 879 880 struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode; 880 881 u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start; ··· 968 967 DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n", 969 968 s->output_type); 970 969 } 970 + 971 + /* 972 + * if vop is not support RGB10 output, need force RGB10 to RGB888. 973 + */ 974 + if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA && 975 + !(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10)) 976 + s->output_mode = ROCKCHIP_OUT_MODE_P888; 971 977 VOP_CTRL_SET(vop, out_mode, s->output_mode); 972 978 973 979 VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
+3
drivers/gpu/drm/rockchip/rockchip_drm_vop.h
··· 142 142 const struct vop_intr *intr; 143 143 const struct vop_win_data *win; 144 144 unsigned int win_size; 145 + 146 + #define VOP_FEATURE_OUTPUT_RGB10 BIT(0) 147 + u64 feature; 145 148 }; 146 149 147 150 /* interrupt define */
+2
drivers/gpu/drm/rockchip/rockchip_vop_reg.c
··· 275 275 static const struct vop_data rk3288_vop = { 276 276 .init_table = rk3288_init_reg_table, 277 277 .table_size = ARRAY_SIZE(rk3288_init_reg_table), 278 + .feature = VOP_FEATURE_OUTPUT_RGB10, 278 279 .intr = &rk3288_vop_intr, 279 280 .ctrl = &rk3288_ctrl_data, 280 281 .win = rk3288_vop_win_data, ··· 344 343 static const struct vop_data rk3399_vop_big = { 345 344 .init_table = rk3399_init_reg_table, 346 345 .table_size = ARRAY_SIZE(rk3399_init_reg_table), 346 + .feature = VOP_FEATURE_OUTPUT_RGB10, 347 347 .intr = &rk3399_vop_intr, 348 348 .ctrl = &rk3399_ctrl_data, 349 349 /*
+5 -17
drivers/gpu/drm/tegra/drm.c
··· 451 451 452 452 453 453 #ifdef CONFIG_DRM_TEGRA_STAGING 454 - static struct tegra_drm_context * 455 - tegra_drm_file_get_context(struct tegra_drm_file *file, u32 id) 456 - { 457 - struct tegra_drm_context *context; 458 - 459 - mutex_lock(&file->lock); 460 - context = idr_find(&file->contexts, id); 461 - mutex_unlock(&file->lock); 462 - 463 - return context; 464 - } 465 - 466 454 static int tegra_gem_create(struct drm_device *drm, void *data, 467 455 struct drm_file *file) 468 456 { ··· 539 551 if (err < 0) 540 552 return err; 541 553 542 - err = idr_alloc(&fpriv->contexts, context, 0, 0, GFP_KERNEL); 554 + err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL); 543 555 if (err < 0) { 544 556 client->ops->close_channel(context); 545 557 return err; ··· 594 606 595 607 mutex_lock(&fpriv->lock); 596 608 597 - context = tegra_drm_file_get_context(fpriv, args->context); 609 + context = idr_find(&fpriv->contexts, args->context); 598 610 if (!context) { 599 611 err = -EINVAL; 600 612 goto unlock; ··· 619 631 620 632 mutex_lock(&fpriv->lock); 621 633 622 - context = tegra_drm_file_get_context(fpriv, args->context); 634 + context = idr_find(&fpriv->contexts, args->context); 623 635 if (!context) { 624 636 err = -ENODEV; 625 637 goto unlock; ··· 648 660 649 661 mutex_lock(&fpriv->lock); 650 662 651 - context = tegra_drm_file_get_context(fpriv, args->context); 663 + context = idr_find(&fpriv->contexts, args->context); 652 664 if (!context) { 653 665 err = -ENODEV; 654 666 goto unlock; ··· 673 685 674 686 mutex_lock(&fpriv->lock); 675 687 676 - context = tegra_drm_file_get_context(fpriv, args->context); 688 + context = idr_find(&fpriv->contexts, args->context); 677 689 if (!context) { 678 690 err = -ENODEV; 679 691 goto unlock;
+1
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
··· 321 321 list_for_each_entry_safe(entry, next, &man->list, head) 322 322 vmw_cmdbuf_res_free(man, entry); 323 323 324 + drm_ht_remove(&man->resources); 324 325 kfree(man); 325 326 } 326 327
+2 -2
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 41 41 #include <drm/ttm/ttm_module.h> 42 42 #include "vmwgfx_fence.h" 43 43 44 - #define VMWGFX_DRIVER_DATE "20170221" 44 + #define VMWGFX_DRIVER_DATE "20170607" 45 45 #define VMWGFX_DRIVER_MAJOR 2 46 - #define VMWGFX_DRIVER_MINOR 12 46 + #define VMWGFX_DRIVER_MINOR 13 47 47 #define VMWGFX_DRIVER_PATCHLEVEL 0 48 48 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 49 49 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
+2
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
··· 368 368 return fifo_state->static_buffer; 369 369 else { 370 370 fifo_state->dynamic_buffer = vmalloc(bytes); 371 + if (!fifo_state->dynamic_buffer) 372 + goto out_err; 371 373 return fifo_state->dynamic_buffer; 372 374 } 373 375 }
+1 -114
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 274 274 } 275 275 276 276 277 - 278 - /** 279 - * vmw_du_cursor_plane_update() - Update cursor image and location 280 - * 281 - * @plane: plane object to update 282 - * @crtc: owning CRTC of @plane 283 - * @fb: framebuffer to flip onto plane 284 - * @crtc_x: x offset of plane on crtc 285 - * @crtc_y: y offset of plane on crtc 286 - * @crtc_w: width of plane rectangle on crtc 287 - * @crtc_h: height of plane rectangle on crtc 288 - * @src_x: Not used 289 - * @src_y: Not used 290 - * @src_w: Not used 291 - * @src_h: Not used 292 - * 293 - * 294 - * RETURNS: 295 - * Zero on success, error code on failure 296 - */ 297 - int vmw_du_cursor_plane_update(struct drm_plane *plane, 298 - struct drm_crtc *crtc, 299 - struct drm_framebuffer *fb, 300 - int crtc_x, int crtc_y, 301 - unsigned int crtc_w, 302 - unsigned int crtc_h, 303 - uint32_t src_x, uint32_t src_y, 304 - uint32_t src_w, uint32_t src_h) 305 - { 306 - struct vmw_private *dev_priv = vmw_priv(crtc->dev); 307 - struct vmw_display_unit *du = vmw_crtc_to_du(crtc); 308 - struct vmw_surface *surface = NULL; 309 - struct vmw_dma_buffer *dmabuf = NULL; 310 - s32 hotspot_x, hotspot_y; 311 - int ret; 312 - 313 - hotspot_x = du->hotspot_x + fb->hot_x; 314 - hotspot_y = du->hotspot_y + fb->hot_y; 315 - 316 - /* A lot of the code assumes this */ 317 - if (crtc_w != 64 || crtc_h != 64) { 318 - ret = -EINVAL; 319 - goto out; 320 - } 321 - 322 - if (vmw_framebuffer_to_vfb(fb)->dmabuf) 323 - dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer; 324 - else 325 - surface = vmw_framebuffer_to_vfbs(fb)->surface; 326 - 327 - if (surface && !surface->snooper.image) { 328 - DRM_ERROR("surface not suitable for cursor\n"); 329 - ret = -EINVAL; 330 - goto out; 331 - } 332 - 333 - /* setup new image */ 334 - ret = 0; 335 - if (surface) { 336 - /* vmw_user_surface_lookup takes one reference */ 337 - du->cursor_surface = surface; 338 - 339 - du->cursor_age = du->cursor_surface->snooper.age; 340 - 341 - ret = vmw_cursor_update_image(dev_priv, surface->snooper.image, 342 - 64, 64, hotspot_x, hotspot_y); 343 - } else if (dmabuf) { 344 - /* vmw_user_surface_lookup takes one reference */ 345 - du->cursor_dmabuf = dmabuf; 346 - 347 - ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, crtc_w, crtc_h, 348 - hotspot_x, hotspot_y); 349 - } else { 350 - vmw_cursor_update_position(dev_priv, false, 0, 0); 351 - goto out; 352 - } 353 - 354 - if (!ret) { 355 - du->cursor_x = crtc_x + du->set_gui_x; 356 - du->cursor_y = crtc_y + du->set_gui_y; 357 - 358 - vmw_cursor_update_position(dev_priv, true, 359 - du->cursor_x + hotspot_x, 360 - du->cursor_y + hotspot_y); 361 - } 362 - 363 - out: 364 - return ret; 365 - } 366 - 367 - 368 - int vmw_du_cursor_plane_disable(struct drm_plane *plane) 369 - { 370 - if (plane->fb) { 371 - drm_framebuffer_unreference(plane->fb); 372 - plane->fb = NULL; 373 - } 374 - 375 - return -EINVAL; 376 - } 377 - 378 - 379 277 void vmw_du_cursor_plane_destroy(struct drm_plane *plane) 380 278 { 381 279 vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0); ··· 367 469 } 368 470 369 471 return 0; 370 - } 371 - 372 - 373 - void 374 - vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane, 375 - struct drm_plane_state *old_state) 376 - { 377 - struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc; 378 - struct vmw_private *dev_priv = vmw_priv(crtc->dev); 379 - 380 - drm_atomic_set_fb_for_plane(plane->state, NULL); 381 - vmw_cursor_update_position(dev_priv, false, 0, 0); 382 472 } 383 473 384 474 ··· 1384 1498 */ 1385 1499 if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) && 1386 1500 dmabuf && only_2d && 1501 + mode_cmd->width > 64 && /* Don't create a proxy for cursor */ 1387 1502 dev_priv->active_display_unit == vmw_du_screen_target) { 1388 1503 ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd, 1389 1504 dmabuf, &surface);
-15
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
··· 256 256 u16 *r, u16 *g, u16 *b, 257 257 uint32_t size, 258 258 struct drm_modeset_acquire_ctx *ctx); 259 - int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv, 260 - uint32_t handle, uint32_t width, uint32_t height, 261 - int32_t hot_x, int32_t hot_y); 262 - int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); 263 259 int vmw_du_connector_set_property(struct drm_connector *connector, 264 260 struct drm_property *property, 265 261 uint64_t val); ··· 335 339 /* Universal Plane Helpers */ 336 340 void vmw_du_primary_plane_destroy(struct drm_plane *plane); 337 341 void vmw_du_cursor_plane_destroy(struct drm_plane *plane); 338 - int vmw_du_cursor_plane_disable(struct drm_plane *plane); 339 - int vmw_du_cursor_plane_update(struct drm_plane *plane, 340 - struct drm_crtc *crtc, 341 - struct drm_framebuffer *fb, 342 - int crtc_x, int crtc_y, 343 - unsigned int crtc_w, 344 - unsigned int crtc_h, 345 - uint32_t src_x, uint32_t src_y, 346 - uint32_t src_w, uint32_t src_h); 347 342 348 343 /* Atomic Helpers */ 349 344 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, ··· 343 356 struct drm_plane_state *state); 344 357 void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, 345 358 struct drm_plane_state *old_state); 346 - void vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane, 347 - struct drm_plane_state *old_state); 348 359 int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, 349 360 struct drm_plane_state *new_state); 350 361 void vmw_du_plane_cleanup_fb(struct drm_plane *plane,
+22 -5
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
··· 56 56 * @right: Right side of bounding box. 57 57 * @top: Top side of bounding box. 58 58 * @bottom: Bottom side of bounding box. 59 + * @fb_left: Left side of the framebuffer/content bounding box 60 + * @fb_top: Top of the framebuffer/content bounding box 59 61 * @buf: DMA buffer when DMA-ing between buffer and screen targets. 60 62 * @sid: Surface ID when copying between surface and screen targets. 61 63 */ ··· 65 63 struct vmw_kms_dirty base; 66 64 SVGA3dTransferType transfer; 67 65 s32 left, right, top, bottom; 66 + s32 fb_left, fb_top; 68 67 u32 pitch; 69 68 union { 70 69 struct vmw_dma_buffer *buf; ··· 650 647 * 651 648 * @dirty: The closure structure. 652 649 * 653 - * This function calculates the bounding box for all the incoming clips 650 + * This function calculates the bounding box for all the incoming clips. 654 651 */ 655 652 static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty) 656 653 { ··· 659 656 660 657 dirty->num_hits = 1; 661 658 662 - /* Calculate bounding box */ 659 + /* Calculate destination bounding box */ 663 660 ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1); 664 661 ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1); 665 662 ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2); 666 663 ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2); 664 + 665 + /* 666 + * Calculate content bounding box. We only need the top-left 667 + * coordinate because width and height will be the same as the 668 + * destination bounding box above 669 + */ 670 + ddirty->fb_left = min_t(s32, ddirty->fb_left, dirty->fb_x); 671 + ddirty->fb_top = min_t(s32, ddirty->fb_top, dirty->fb_y); 667 672 } 668 673 669 674 ··· 708 697 /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */ 709 698 src_pitch = stdu->display_srf->base_size.width * stdu->cpp; 710 699 src = ttm_kmap_obj_virtual(&stdu->host_map, &not_used); 711 - src += dirty->unit_y1 * src_pitch + dirty->unit_x1 * stdu->cpp; 700 + src += ddirty->top * src_pitch + ddirty->left * stdu->cpp; 712 701 713 702 dst_pitch = ddirty->pitch; 714 703 dst = ttm_kmap_obj_virtual(&stdu->guest_map, &not_used); 715 - dst += dirty->fb_y * dst_pitch + dirty->fb_x * stdu->cpp; 704 + dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp; 716 705 717 706 718 707 /* Figure out the real direction */ ··· 771 760 } 772 761 773 762 out_cleanup: 774 - ddirty->left = ddirty->top = S32_MAX; 763 + ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX; 775 764 ddirty->right = ddirty->bottom = S32_MIN; 776 765 } 777 766 ··· 823 812 SVGA3D_READ_HOST_VRAM; 824 813 ddirty.left = ddirty.top = S32_MAX; 825 814 ddirty.right = ddirty.bottom = S32_MIN; 815 + ddirty.fb_left = ddirty.fb_top = S32_MAX; 826 816 ddirty.pitch = vfb->base.pitches[0]; 827 817 ddirty.buf = buf; 828 818 ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit; ··· 1367 1355 DRM_ERROR("Failed to bind surface to STDU.\n"); 1368 1356 else 1369 1357 crtc->primary->fb = plane->state->fb; 1358 + 1359 + ret = vmw_stdu_update_st(dev_priv, stdu); 1360 + 1361 + if (ret) 1362 + DRM_ERROR("Failed to update STDU.\n"); 1370 1363 } 1371 1364 1372 1365
+15 -8
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
··· 1274 1274 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1275 1275 int ret; 1276 1276 uint32_t size; 1277 - uint32_t backup_handle; 1277 + uint32_t backup_handle = 0; 1278 1278 1279 1279 if (req->multisample_count != 0) 1280 + return -EINVAL; 1281 + 1282 + if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS) 1280 1283 return -EINVAL; 1281 1284 1282 1285 if (unlikely(vmw_user_surface_size == 0)) ··· 1317 1314 ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, 1318 1315 &res->backup, 1319 1316 &user_srf->backup_base); 1320 - if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < 1321 - res->backup_size) { 1322 - DRM_ERROR("Surface backup buffer is too small.\n"); 1323 - vmw_dmabuf_unreference(&res->backup); 1324 - ret = -EINVAL; 1325 - goto out_unlock; 1317 + if (ret == 0) { 1318 + if (res->backup->base.num_pages * PAGE_SIZE < 1319 + res->backup_size) { 1320 + DRM_ERROR("Surface backup buffer is too small.\n"); 1321 + vmw_dmabuf_unreference(&res->backup); 1322 + ret = -EINVAL; 1323 + goto out_unlock; 1324 + } else { 1325 + backup_handle = req->buffer_handle; 1326 + } 1326 1327 } 1327 1328 } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer) 1328 1329 ret = vmw_user_dmabuf_alloc(dev_priv, tfile, ··· 1498 1491 dev_priv->stdu_max_height); 1499 1492 1500 1493 if (size.width > max_width || size.height > max_height) { 1501 - DRM_ERROR("%ux%u\n, exeeds max surface size %ux%u", 1494 + DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u", 1502 1495 size.width, size.height, 1503 1496 max_width, max_height); 1504 1497 return -EINVAL;
+1 -1
drivers/gpu/host1x/dev.c
··· 172 172 173 173 host->rst = devm_reset_control_get(&pdev->dev, "host1x"); 174 174 if (IS_ERR(host->rst)) { 175 - err = PTR_ERR(host->clk); 175 + err = PTR_ERR(host->rst); 176 176 dev_err(&pdev->dev, "failed to get reset: %d\n", err); 177 177 return err; 178 178 }
+8 -7
drivers/gpu/ipu-v3/ipu-common.c
··· 725 725 spin_lock_irqsave(&ipu->lock, flags); 726 726 727 727 val = ipu_cm_read(ipu, IPU_CONF); 728 - if (vdi) { 728 + if (vdi) 729 729 val |= IPU_CONF_IC_INPUT; 730 - } else { 730 + else 731 731 val &= ~IPU_CONF_IC_INPUT; 732 - if (csi_id == 1) 733 - val |= IPU_CONF_CSI_SEL; 734 - else 735 - val &= ~IPU_CONF_CSI_SEL; 736 - } 732 + 733 + if (csi_id == 1) 734 + val |= IPU_CONF_CSI_SEL; 735 + else 736 + val &= ~IPU_CONF_CSI_SEL; 737 + 737 738 ipu_cm_write(ipu, val, IPU_CONF); 738 739 739 740 spin_unlock_irqrestore(&ipu->lock, flags);
+5 -8
drivers/gpu/ipu-v3/ipu-pre.c
··· 131 131 if (pre->in_use) 132 132 return -EBUSY; 133 133 134 - clk_prepare_enable(pre->clk_axi); 135 - 136 134 /* first get the engine out of reset and remove clock gating */ 137 135 writel(0, pre->regs + IPU_PRE_CTRL); 138 136 ··· 147 149 148 150 void ipu_pre_put(struct ipu_pre *pre) 149 151 { 150 - u32 val; 151 - 152 - val = IPU_PRE_CTRL_SFTRST | IPU_PRE_CTRL_CLKGATE; 153 - writel(val, pre->regs + IPU_PRE_CTRL); 154 - 155 - clk_disable_unprepare(pre->clk_axi); 152 + writel(IPU_PRE_CTRL_SFTRST, pre->regs + IPU_PRE_CTRL); 156 153 157 154 pre->in_use = false; 158 155 } ··· 242 249 if (!pre->buffer_virt) 243 250 return -ENOMEM; 244 251 252 + clk_prepare_enable(pre->clk_axi); 253 + 245 254 pre->dev = dev; 246 255 platform_set_drvdata(pdev, pre); 247 256 mutex_lock(&ipu_pre_list_mutex); ··· 262 267 list_del(&pre->list); 263 268 available_pres--; 264 269 mutex_unlock(&ipu_pre_list_mutex); 270 + 271 + clk_disable_unprepare(pre->clk_axi); 265 272 266 273 if (pre->buffer_virt) 267 274 gen_pool_free(pre->iram, (unsigned long)pre->buffer_virt,
+4 -2
drivers/hid/Kconfig
··· 275 275 - Trio Linker Plus II 276 276 277 277 config HID_ELECOM 278 - tristate "ELECOM BM084 bluetooth mouse" 278 + tristate "ELECOM HID devices" 279 279 depends on HID 280 280 ---help--- 281 - Support for the ELECOM BM084 (bluetooth mouse). 281 + Support for ELECOM devices: 282 + - BM084 Bluetooth Mouse 283 + - DEFT Trackball (Wired and wireless) 282 284 283 285 config HID_ELO 284 286 tristate "ELO USB 4000/4500 touchscreen"
+12
drivers/hid/hid-asus.c
··· 69 69 #define QUIRK_IS_MULTITOUCH BIT(3) 70 70 #define QUIRK_NO_CONSUMER_USAGES BIT(4) 71 71 #define QUIRK_USE_KBD_BACKLIGHT BIT(5) 72 + #define QUIRK_T100_KEYBOARD BIT(6) 72 73 73 74 #define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \ 74 75 QUIRK_NO_INIT_REPORTS | \ ··· 537 536 drvdata->kbd_backlight->removed = true; 538 537 cancel_work_sync(&drvdata->kbd_backlight->work); 539 538 } 539 + 540 + hid_hw_stop(hdev); 540 541 } 541 542 542 543 static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc, ··· 551 548 hid_info(hdev, "Fixing up Asus notebook report descriptor\n"); 552 549 rdesc[55] = 0xdd; 553 550 } 551 + if (drvdata->quirks & QUIRK_T100_KEYBOARD && 552 + *rsize == 76 && rdesc[73] == 0x81 && rdesc[74] == 0x01) { 553 + hid_info(hdev, "Fixing up Asus T100 keyb report descriptor\n"); 554 + rdesc[74] &= ~HID_MAIN_ITEM_CONSTANT; 555 + } 556 + 554 557 return rdesc; 555 558 } 556 559 ··· 569 560 USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) }, 570 561 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, 571 562 USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2), QUIRK_USE_KBD_BACKLIGHT }, 563 + { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, 564 + USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD), 565 + QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES }, 572 566 { } 573 567 }; 574 568 MODULE_DEVICE_TABLE(hid, asus_devices);
+224 -61
drivers/hid/hid-core.c
··· 826 826 * hid-rmi should take care of them, 827 827 * not hid-generic 828 828 */ 829 - if (IS_ENABLED(CONFIG_HID_RMI)) 830 - hid->group = HID_GROUP_RMI; 829 + hid->group = HID_GROUP_RMI; 831 830 break; 832 831 } 833 832 833 + /* fall back to generic driver in case specific driver doesn't exist */ 834 + switch (hid->group) { 835 + case HID_GROUP_MULTITOUCH_WIN_8: 836 + /* fall-through */ 837 + case HID_GROUP_MULTITOUCH: 838 + if (!IS_ENABLED(CONFIG_HID_MULTITOUCH)) 839 + hid->group = HID_GROUP_GENERIC; 840 + break; 841 + case HID_GROUP_SENSOR_HUB: 842 + if (!IS_ENABLED(CONFIG_HID_SENSOR_HUB)) 843 + hid->group = HID_GROUP_GENERIC; 844 + break; 845 + case HID_GROUP_RMI: 846 + if (!IS_ENABLED(CONFIG_HID_RMI)) 847 + hid->group = HID_GROUP_GENERIC; 848 + break; 849 + case HID_GROUP_WACOM: 850 + if (!IS_ENABLED(CONFIG_HID_WACOM)) 851 + hid->group = HID_GROUP_GENERIC; 852 + break; 853 + case HID_GROUP_LOGITECH_DJ_DEVICE: 854 + if (!IS_ENABLED(CONFIG_HID_LOGITECH_DJ)) 855 + hid->group = HID_GROUP_GENERIC; 856 + break; 857 + } 834 858 vfree(parser); 835 859 return 0; 836 860 } ··· 1787 1763 * used as a driver. See hid_scan_report(). 1788 1764 */ 1789 1765 static const struct hid_device_id hid_have_special_driver[] = { 1766 + #if IS_ENABLED(CONFIG_HID_A4TECH) 1790 1767 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, 1791 1768 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, 1792 1769 { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) }, 1770 + #endif 1771 + #if IS_ENABLED(CONFIG_HID_ACCUTOUCH) 1772 + { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) }, 1773 + #endif 1774 + #if IS_ENABLED(CONFIG_HID_ACRUX) 1793 1775 { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) }, 1794 1776 { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) }, 1777 + #endif 1778 + #if IS_ENABLED(CONFIG_HID_ALPS) 1795 1779 { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) }, 1780 + #endif 1781 + #if IS_ENABLED(CONFIG_HID_APPLE) 1796 1782 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, 1797 - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) }, 1798 - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) }, 1799 1783 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) }, 1800 1784 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) }, 1801 1785 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) }, ··· 1824 1792 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) }, 1825 1793 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) }, 1826 1794 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) }, 1827 - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) }, 1828 - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) }, 1829 - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) }, 1830 - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, 1831 - { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) }, 1832 1795 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) }, 1833 1796 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) }, 1834 1797 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) }, ··· 1878 1851 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) }, 1879 1852 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, 1880 1853 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, 1854 + #endif 1855 + #if IS_ENABLED(CONFIG_HID_APPLEIR) 1856 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) }, 1857 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) }, 1858 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) }, 1859 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, 1860 + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) }, 1861 + #endif 1862 + #if IS_ENABLED(CONFIG_HID_ASUS) 1881 1863 { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) }, 1882 1864 { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) }, 1883 1865 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) }, 1884 1866 { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) }, 1867 + { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) }, 1868 + #endif 1869 + #if IS_ENABLED(CONFIG_HID_AUREAL) 1885 1870 { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) }, 1871 + #endif 1872 + #if IS_ENABLED(CONFIG_HID_BELKIN) 1886 1873 { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, 1874 + { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, 1875 + #endif 1876 + #if IS_ENABLED(CONFIG_HID_BETOP_FF) 1887 1877 { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) }, 1888 1878 { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) }, 1889 1879 { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) }, 1890 1880 { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) }, 1891 - { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, 1892 - { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) }, 1881 + #endif 1882 + #if IS_ENABLED(CONFIG_HID_CHERRY) 1893 1883 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, 1894 1884 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) }, 1885 + #endif 1886 + #if IS_ENABLED(CONFIG_HID_CHICONY) 1895 1887 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, 1896 - { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) }, 1897 1888 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) }, 1898 1889 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, 1899 1890 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, 1891 + { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) }, 1892 + #endif 1893 + #if IS_ENABLED(CONFIG_HID_CMEDIA) 1894 + { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) }, 1895 + #endif 1896 + #if IS_ENABLED(CONFIG_HID_CORSAIR) 1900 1897 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) }, 1901 1898 { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) }, 1902 - { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, 1899 + #endif 1900 + #if IS_ENABLED(CONFIG_HID_CP2112) 1903 1901 { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) }, 1902 + #endif 1903 + #if IS_ENABLED(CONFIG_HID_CYPRESS) 1904 1904 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) }, 1905 1905 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) }, 1906 1906 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) }, 1907 1907 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) }, 1908 1908 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) }, 1909 - { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) }, 1909 + #endif 1910 + #if IS_ENABLED(CONFIG_HID_DRAGONRISE) 1910 1911 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) }, 1911 1912 { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) }, 1912 - #if IS_ENABLED(CONFIG_HID_MAYFLASH) 1913 - { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) }, 1914 - { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) }, 1915 - { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) }, 1916 - { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) }, 1917 1913 #endif 1918 - { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) }, 1919 - { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) }, 1914 + #if IS_ENABLED(CONFIG_HID_ELECOM) 1920 1915 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, 1916 + { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, 1917 + { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, 1918 + #endif 1919 + #if IS_ENABLED(CONFIG_HID_ELO) 1921 1920 { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, 1922 1921 { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) }, 1923 - { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) }, 1922 + #endif 1923 + #if IS_ENABLED(CONFIG_HID_EMS_FF) 1924 1924 { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) }, 1925 + #endif 1926 + #if IS_ENABLED(CONFIG_HID_EZKEY) 1925 1927 { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) }, 1926 - { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) }, 1927 - { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) }, 1928 + #endif 1929 + #if IS_ENABLED(CONFIG_HID_GEMBIRD) 1928 1930 { HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) }, 1929 - { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) }, 1931 + #endif 1932 + #if IS_ENABLED(CONFIG_HID_GFRM) 1933 + { HID_BLUETOOTH_DEVICE(0x58, 0x2000) }, 1934 + { HID_BLUETOOTH_DEVICE(0x471, 0x2210) }, 1935 + #endif 1936 + #if IS_ENABLED(CONFIG_HID_GREENASIA) 1930 1937 { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) }, 1938 + #endif 1939 + #if IS_ENABLED(CONFIG_HID_GT683R) 1940 + { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) }, 1941 + #endif 1942 + #if IS_ENABLED(CONFIG_HID_GYRATION) 1931 1943 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, 1932 1944 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, 1933 1945 { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) }, 1946 + #endif 1947 + #if IS_ENABLED(CONFIG_HID_HOLTEK) 1934 1948 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) }, 1935 1949 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) }, 1936 1950 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, ··· 1980 1912 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) }, 1981 1913 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, 1982 1914 { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) }, 1983 - { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) }, 1984 - { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) }, 1985 - { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, 1915 + #endif 1916 + #if IS_ENABLED(CONFIG_HID_ICADE) 1986 1917 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, 1918 + #endif 1919 + #if IS_ENABLED(CONFIG_HID_KENSINGTON) 1987 1920 { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, 1921 + #endif 1922 + #if IS_ENABLED(CONFIG_HID_KEYTOUCH) 1988 1923 { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) }, 1924 + #endif 1925 + #if IS_ENABLED(CONFIG_HID_KYE) 1989 1926 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) }, 1990 1927 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) }, 1991 1928 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) }, ··· 2000 1927 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) }, 2001 1928 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, 2002 1929 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) }, 2003 - { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, 1930 + #endif 1931 + #if IS_ENABLED(CONFIG_HID_LCPOWER) 2004 1932 { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, 1933 + #endif 1934 + #if IS_ENABLED(CONFIG_HID_LED) 1935 + { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) }, 1936 + { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) }, 1937 + { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) }, 1938 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) }, 1939 + { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) }, 1940 + { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) }, 1941 + #endif 2005 1942 #if IS_ENABLED(CONFIG_HID_LENOVO) 2006 1943 { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, 2007 1944 { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) }, 2008 1945 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) }, 2009 1946 { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) }, 2010 1947 #endif 2011 - { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) }, 1948 + #if IS_ENABLED(CONFIG_HID_LOGITECH) 2012 1949 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) }, 2013 1950 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) }, 2014 1951 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) }, 2015 1952 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) }, 2016 - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) }, 2017 - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) }, 2018 1953 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) }, 2019 1954 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) }, 2020 1955 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) }, ··· 2035 1954 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) }, 2036 1955 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) }, 2037 1956 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL) }, 2038 - { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) }, 2039 1957 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) }, 2040 1958 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) }, 2041 1959 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) }, ··· 2046 1966 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) }, 2047 1967 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) }, 2048 1968 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) }, 2049 - #if IS_ENABLED(CONFIG_HID_LOGITECH_DJ) 2050 - { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) }, 2051 - { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) }, 2052 - #endif 2053 1969 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) }, 2054 1970 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) }, 2055 1971 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) }, 2056 1972 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) }, 2057 - { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) }, 2058 - { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) }, 2059 - { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) }, 1973 + #endif 1974 + #if IS_ENABLED(CONFIG_HID_LOGITECH_HIDPP) 1975 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) }, 1976 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) }, 1977 + #endif 1978 + #if IS_ENABLED(CONFIG_HID_LOGITECH_DJ) 1979 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) }, 1980 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) }, 1981 + #endif 1982 + #if IS_ENABLED(CONFIG_HID_MAGICMOUSE) 1983 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) }, 1984 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) }, 1985 + #endif 1986 + #if IS_ENABLED(CONFIG_HID_MAYFLASH) 1987 + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) }, 1988 + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) }, 1989 + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) }, 1990 + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) }, 1991 + #endif 1992 + #if IS_ENABLED(CONFIG_HID_MICROSOFT) 2060 1993 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) }, 2061 1994 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) }, 2062 1995 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) }, ··· 2085 1992 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) }, 2086 1993 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) }, 2087 1994 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) }, 1995 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) }, 1996 + #endif 1997 + #if IS_ENABLED(CONFIG_HID_MONTEREY) 2088 1998 { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, 2089 - { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) }, 1999 + #endif 2000 + #if IS_ENABLED(CONFIG_HID_MULTITOUCH) 2001 + { HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) }, 2002 + #endif 2003 + #if IS_ENABLED(CONFIG_HID_WIIMOTE) 2004 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) }, 2005 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, 2006 + #endif 2007 + #if IS_ENABLED(CONFIG_HID_NTI) 2090 2008 { HID_USB_DEVICE(USB_VENDOR_ID_NTI, USB_DEVICE_ID_USB_SUN) }, 2009 + #endif 2010 + #if IS_ENABLED(CONFIG_HID_NTRIG) 2091 2011 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, 2092 2012 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) }, 2093 2013 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) }, ··· 2120 2014 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) }, 2121 2015 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) }, 2122 2016 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) }, 2017 + #endif 2018 + #if IS_ENABLED(CONFIG_HID_ORTEK) 2123 2019 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, 2124 2020 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, 2021 + { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, 2022 + #endif 2023 + #if IS_ENABLED(CONFIG_HID_PANTHERLORD) 2024 + { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) }, 2025 + { HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) }, 2026 + { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) }, 2027 + { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, 2028 + #endif 2029 + #if IS_ENABLED(CONFIG_HID_PENMOUNT) 2125 2030 { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) }, 2031 + #endif 2032 + #if IS_ENABLED(CONFIG_HID_PETALYNX) 2126 2033 { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, 2034 + #endif 2035 + #if IS_ENABLED(CONFIG_HID_PICOLCD) 2036 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) }, 2037 + { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) }, 2038 + #endif 2039 + #if IS_ENABLED(CONFIG_HID_PLANTRONICS) 2127 2040 { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) }, 2041 + #endif 2042 + #if IS_ENABLED(CONFIG_HID_PRIMAX) 2128 2043 { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) }, 2129 - { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) }, 2044 + #endif 2045 + #if IS_ENABLED(CONFIG_HID_PRODIKEYS) 2046 + { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, 2047 + #endif 2048 + #if IS_ENABLED(CONFIG_HID_RMI) 2049 + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) }, 2050 + { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) }, 2051 + #endif 2130 2052 #if IS_ENABLED(CONFIG_HID_ROCCAT) 2131 2053 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) }, 2132 2054 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) }, ··· 2182 2048 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) }, 2183 2049 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) }, 2184 2050 #endif 2051 + #if IS_ENABLED(CONFIG_HID_SAMSUNG) 2185 2052 { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, 2186 2053 { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) }, 2187 - { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, 2054 + #endif 2055 + #if IS_ENABLED(CONFIG_HID_SMARTJOYPLUS) 2056 + { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) }, 2057 + { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, 2058 + { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, 2059 + { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) }, 2060 + { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) }, 2061 + { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) }, 2062 + { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) }, 2063 + #endif 2064 + #if IS_ENABLED(CONFIG_HID_SONY) 2065 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) }, 2188 2066 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) }, 2189 2067 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) }, 2190 2068 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) }, ··· 2215 2069 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, 2216 2070 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) }, 2217 2071 { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) }, 2072 + #endif 2073 + #if IS_ENABLED(CONFIG_HID_SPEEDLINK) 2074 + { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) }, 2075 + #endif 2076 + #if IS_ENABLED(CONFIG_HID_STEELSERIES) 2218 2077 { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) }, 2078 + #endif 2079 + #if IS_ENABLED(CONFIG_HID_SUNPLUS) 2219 2080 { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, 2220 - { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) }, 2081 + #endif 2082 + #if IS_ENABLED(CONFIG_HID_THRUSTMASTER) 2221 2083 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) }, 2222 2084 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) }, 2223 2085 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) }, ··· 2234 2080 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) }, 2235 2081 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) }, 2236 2082 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) }, 2083 + #endif 2084 + #if IS_ENABLED(CONFIG_HID_TIVO) 2237 2085 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, 2238 2086 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, 2239 2087 { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) }, 2088 + #endif 2089 + #if IS_ENABLED(CONFIG_HID_TOPSEED) 2090 + { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, 2091 + { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) }, 2092 + { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) }, 2240 2093 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, 2241 2094 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, 2095 + #endif 2096 + #if IS_ENABLED(CONFIG_HID_TWINHAN) 2242 2097 { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, 2098 + #endif 2099 + #if IS_ENABLED(CONFIG_HID_UCLOGIC) 2100 + { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) }, 2101 + { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_HUION_TABLET) }, 2243 2102 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) }, 2244 2103 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) }, 2245 2104 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) }, ··· 2260 2093 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) }, 2261 2094 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) }, 2262 2095 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) }, 2263 - { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) }, 2264 2096 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) }, 2265 2097 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) }, 2266 2098 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) }, 2267 2099 { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, 2268 - { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, 2269 2100 { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) }, 2270 - { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, 2271 - { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, 2272 - { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) }, 2273 - { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) }, 2274 - { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) }, 2275 - { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) }, 2276 - { HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) }, 2101 + { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) }, 2102 + #endif 2103 + #if IS_ENABLED(CONFIG_HID_UDRAW_PS3) 2104 + { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) }, 2105 + #endif 2106 + #if IS_ENABLED(CONFIG_HID_WALTOP) 2277 2107 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) }, 2278 2108 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) }, 2279 2109 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) }, ··· 2278 2114 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) }, 2279 2115 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) }, 2280 2116 { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) }, 2281 - { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) }, 2117 + #endif 2118 + #if IS_ENABLED(CONFIG_HID_XINMO) 2282 2119 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, 2283 2120 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) }, 2121 + #endif 2122 + #if IS_ENABLED(CONFIG_HID_ZEROPLUS) 2284 2123 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, 2285 2124 { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, 2125 + #endif 2126 + #if IS_ENABLED(CONFIG_HID_ZYDACRON) 2286 2127 { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) }, 2287 - 2288 - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) }, 2289 - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) }, 2290 - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, 2291 - { HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) }, 2292 - { HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) }, 2293 - { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) }, 2128 + #endif 2294 2129 { } 2295 2130 }; 2296 2131
+53 -9
drivers/hid/hid-elecom.c
··· 1 1 /* 2 - * HID driver for Elecom BM084 (bluetooth mouse). 3 - * Removes a non-existing horizontal wheel from 4 - * the HID descriptor. 5 - * (This module is based on "hid-ortek".) 6 - * 2 + * HID driver for ELECOM devices. 7 3 * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com> 4 + * Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com> 5 + * Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu> 8 6 */ 9 7 10 8 /* ··· 21 23 static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc, 22 24 unsigned int *rsize) 23 25 { 24 - if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) { 25 - hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n"); 26 - rdesc[47] = 0x00; 26 + switch (hdev->product) { 27 + case USB_DEVICE_ID_ELECOM_BM084: 28 + /* The BM084 Bluetooth mouse includes a non-existing horizontal 29 + * wheel in the HID descriptor. */ 30 + if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) { 31 + hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n"); 32 + rdesc[47] = 0x00; 33 + } 34 + break; 35 + case USB_DEVICE_ID_ELECOM_DEFT_WIRED: 36 + case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS: 37 + /* The DEFT trackball has eight buttons, but its descriptor only 38 + * reports five, disabling the three Fn buttons on the top of 39 + * the mouse. 40 + * 41 + * Apply the following diff to the descriptor: 42 + * 43 + * Collection (Physical), Collection (Physical), 44 + * Report ID (1), Report ID (1), 45 + * Report Count (5), -> Report Count (8), 46 + * Report Size (1), Report Size (1), 47 + * Usage Page (Button), Usage Page (Button), 48 + * Usage Minimum (01h), Usage Minimum (01h), 49 + * Usage Maximum (05h), -> Usage Maximum (08h), 50 + * Logical Minimum (0), Logical Minimum (0), 51 + * Logical Maximum (1), Logical Maximum (1), 52 + * Input (Variable), Input (Variable), 53 + * Report Count (1), -> Report Count (0), 54 + * Report Size (3), Report Size (3), 55 + * Input (Constant), Input (Constant), 56 + * Report Size (16), Report Size (16), 57 + * Report Count (2), Report Count (2), 58 + * Usage Page (Desktop), Usage Page (Desktop), 59 + * Usage (X), Usage (X), 60 + * Usage (Y), Usage (Y), 61 + * Logical Minimum (-32768), Logical Minimum (-32768), 62 + * Logical Maximum (32767), Logical Maximum (32767), 63 + * Input (Variable, Relative), Input (Variable, Relative), 64 + * End Collection, End Collection, 65 + */ 66 + if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) { 67 + hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n"); 68 + rdesc[13] = 8; /* Button/Variable Report Count */ 69 + rdesc[21] = 8; /* Button/Variable Usage Maximum */ 70 + rdesc[29] = 0; /* Button/Constant Report Count */ 71 + } 72 + break; 27 73 } 28 74 return rdesc; 29 75 } 30 76 31 77 static const struct hid_device_id elecom_devices[] = { 32 - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084)}, 78 + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, 79 + { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, 80 + { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, 33 81 { } 34 82 }; 35 83 MODULE_DEVICE_TABLE(hid, elecom_devices);
+6
drivers/hid/hid-ids.h
··· 173 173 #define USB_VENDOR_ID_ASUSTEK 0x0b05 174 174 #define USB_DEVICE_ID_ASUSTEK_LCM 0x1726 175 175 #define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b 176 + #define USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD 0x17e0 176 177 #define USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD 0x8585 177 178 #define USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD 0x0101 178 179 #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1 0x1854 ··· 319 318 #define USB_VENDOR_ID_DELCOM 0x0fc5 320 319 #define USB_DEVICE_ID_DELCOM_VISUAL_IND 0xb080 321 320 321 + #define USB_VENDOR_ID_DELL 0x413c 322 + #define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a 323 + 322 324 #define USB_VENDOR_ID_DELORME 0x1163 323 325 #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 324 326 #define USB_DEVICE_ID_DELORME_EM_LT20 0x0200 ··· 362 358 363 359 #define USB_VENDOR_ID_ELECOM 0x056e 364 360 #define USB_DEVICE_ID_ELECOM_BM084 0x0061 361 + #define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe 362 + #define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff 365 363 366 364 #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 367 365 #define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004
+13
drivers/hid/i2c-hid/i2c-hid.c
··· 897 897 return 0; 898 898 } 899 899 900 + static void i2c_hid_acpi_fix_up_power(struct device *dev) 901 + { 902 + acpi_handle handle = ACPI_HANDLE(dev); 903 + struct acpi_device *adev; 904 + 905 + if (handle && acpi_bus_get_device(handle, &adev) == 0) 906 + acpi_device_fix_up_power(adev); 907 + } 908 + 900 909 static const struct acpi_device_id i2c_hid_acpi_match[] = { 901 910 {"ACPI0C50", 0 }, 902 911 {"PNP0C50", 0 }, ··· 918 909 { 919 910 return -ENODEV; 920 911 } 912 + 913 + static inline void i2c_hid_acpi_fix_up_power(struct device *dev) {} 921 914 #endif 922 915 923 916 #ifdef CONFIG_OF ··· 1040 1029 ret = i2c_hid_alloc_buffers(ihid, HID_MIN_BUFFER_SIZE); 1041 1030 if (ret < 0) 1042 1031 goto err_regulator; 1032 + 1033 + i2c_hid_acpi_fix_up_power(&client->dev); 1043 1034 1044 1035 pm_runtime_get_noresume(&client->dev); 1045 1036 pm_runtime_set_active(&client->dev);
+1
drivers/hid/usbhid/hid-quirks.c
··· 85 85 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, 86 86 { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, 87 87 { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, 88 + { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, 88 89 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 89 90 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, 90 91 { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
+24 -23
drivers/hid/wacom_wac.c
··· 1571 1571 { 1572 1572 unsigned char *data = wacom->data; 1573 1573 1574 - if (wacom->pen_input) 1574 + if (wacom->pen_input) { 1575 1575 dev_dbg(wacom->pen_input->dev.parent, 1576 1576 "%s: received report #%d\n", __func__, data[0]); 1577 - else if (wacom->touch_input) 1577 + 1578 + if (len == WACOM_PKGLEN_PENABLED || 1579 + data[0] == WACOM_REPORT_PENABLED) 1580 + return wacom_tpc_pen(wacom); 1581 + } 1582 + else if (wacom->touch_input) { 1578 1583 dev_dbg(wacom->touch_input->dev.parent, 1579 1584 "%s: received report #%d\n", __func__, data[0]); 1580 1585 1581 - switch (len) { 1582 - case WACOM_PKGLEN_TPC1FG: 1583 - return wacom_tpc_single_touch(wacom, len); 1584 - 1585 - case WACOM_PKGLEN_TPC2FG: 1586 - return wacom_tpc_mt_touch(wacom); 1587 - 1588 - case WACOM_PKGLEN_PENABLED: 1589 - return wacom_tpc_pen(wacom); 1590 - 1591 - default: 1592 - switch (data[0]) { 1593 - case WACOM_REPORT_TPC1FG: 1594 - case WACOM_REPORT_TPCHID: 1595 - case WACOM_REPORT_TPCST: 1596 - case WACOM_REPORT_TPC1FGE: 1586 + switch (len) { 1587 + case WACOM_PKGLEN_TPC1FG: 1597 1588 return wacom_tpc_single_touch(wacom, len); 1598 1589 1599 - case WACOM_REPORT_TPCMT: 1600 - case WACOM_REPORT_TPCMT2: 1601 - return wacom_mt_touch(wacom); 1590 + case WACOM_PKGLEN_TPC2FG: 1591 + return wacom_tpc_mt_touch(wacom); 1602 1592 1603 - case WACOM_REPORT_PENABLED: 1604 - return wacom_tpc_pen(wacom); 1593 + default: 1594 + switch (data[0]) { 1595 + case WACOM_REPORT_TPC1FG: 1596 + case WACOM_REPORT_TPCHID: 1597 + case WACOM_REPORT_TPCST: 1598 + case WACOM_REPORT_TPC1FGE: 1599 + return wacom_tpc_single_touch(wacom, len); 1600 + 1601 + case WACOM_REPORT_TPCMT: 1602 + case WACOM_REPORT_TPCMT2: 1603 + return wacom_mt_touch(wacom); 1604 + 1605 + } 1605 1606 } 1606 1607 } 1607 1608
+1 -1
drivers/hsi/clients/ssi_protocol.c
··· 1066 1066 dev->addr_len = 1; 1067 1067 dev->tx_queue_len = SSIP_TXQUEUE_LEN; 1068 1068 1069 - dev->destructor = free_netdev; 1069 + dev->needs_free_netdev = true; 1070 1070 dev->header_ops = &phonet_header_ops; 1071 1071 } 1072 1072
+1
drivers/hwmon/Kconfig
··· 343 343 344 344 config SENSORS_ASPEED 345 345 tristate "ASPEED AST2400/AST2500 PWM and Fan tach driver" 346 + select REGMAP 346 347 help 347 348 This driver provides support for ASPEED AST2400/AST2500 PWM 348 349 and Fan Tacho controllers.
+35 -30
drivers/hwmon/aspeed-pwm-tacho.c
··· 7 7 */ 8 8 9 9 #include <linux/clk.h> 10 + #include <linux/errno.h> 10 11 #include <linux/gpio/consumer.h> 11 12 #include <linux/delay.h> 12 13 #include <linux/hwmon.h> ··· 495 494 return clk / (clk_unit * div_h * div_l * tacho_div * tacho_unit); 496 495 } 497 496 498 - static u32 aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv, 497 + static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv, 499 498 u8 fan_tach_ch) 500 499 { 501 500 u32 raw_data, tach_div, clk_source, sec, val; ··· 511 510 msleep(sec); 512 511 513 512 regmap_read(priv->regmap, ASPEED_PTCR_RESULT, &val); 513 + if (!(val & RESULT_STATUS_MASK)) 514 + return -ETIMEDOUT; 515 + 514 516 raw_data = val & RESULT_VALUE_MASK; 515 517 tach_div = priv->type_fan_tach_clock_division[type]; 516 518 tach_div = 0x4 << (tach_div * 2); ··· 565 561 { 566 562 struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); 567 563 int index = sensor_attr->index; 568 - u32 rpm; 564 + int rpm; 569 565 struct aspeed_pwm_tacho_data *priv = dev_get_drvdata(dev); 570 566 571 567 rpm = aspeed_get_fan_tach_ch_rpm(priv, index); 568 + if (rpm < 0) 569 + return rpm; 572 570 573 - return sprintf(buf, "%u\n", rpm); 571 + return sprintf(buf, "%d\n", rpm); 574 572 } 575 573 576 574 static umode_t pwm_is_visible(struct kobject *kobj, ··· 597 591 return a->mode; 598 592 } 599 593 600 - static SENSOR_DEVICE_ATTR(pwm0, 0644, 601 - show_pwm, set_pwm, 0); 602 594 static SENSOR_DEVICE_ATTR(pwm1, 0644, 603 - show_pwm, set_pwm, 1); 595 + show_pwm, set_pwm, 0); 604 596 static SENSOR_DEVICE_ATTR(pwm2, 0644, 605 - show_pwm, set_pwm, 2); 597 + show_pwm, set_pwm, 1); 606 598 static SENSOR_DEVICE_ATTR(pwm3, 0644, 607 - show_pwm, set_pwm, 3); 599 + show_pwm, set_pwm, 2); 608 600 static SENSOR_DEVICE_ATTR(pwm4, 0644, 609 - show_pwm, set_pwm, 4); 601 + show_pwm, set_pwm, 3); 610 602 static SENSOR_DEVICE_ATTR(pwm5, 0644, 611 - show_pwm, set_pwm, 5); 603 + show_pwm, set_pwm, 4); 612 604 static SENSOR_DEVICE_ATTR(pwm6, 0644, 613 - show_pwm, set_pwm, 6); 605 + show_pwm, set_pwm, 5); 614 606 static SENSOR_DEVICE_ATTR(pwm7, 0644, 607 + show_pwm, set_pwm, 6); 608 + static SENSOR_DEVICE_ATTR(pwm8, 0644, 615 609 show_pwm, set_pwm, 7); 616 610 static struct attribute *pwm_dev_attrs[] = { 617 - &sensor_dev_attr_pwm0.dev_attr.attr, 618 611 &sensor_dev_attr_pwm1.dev_attr.attr, 619 612 &sensor_dev_attr_pwm2.dev_attr.attr, 620 613 &sensor_dev_attr_pwm3.dev_attr.attr, ··· 621 616 &sensor_dev_attr_pwm5.dev_attr.attr, 622 617 &sensor_dev_attr_pwm6.dev_attr.attr, 623 618 &sensor_dev_attr_pwm7.dev_attr.attr, 619 + &sensor_dev_attr_pwm8.dev_attr.attr, 624 620 NULL, 625 621 }; 626 622 ··· 630 624 .is_visible = pwm_is_visible, 631 625 }; 632 626 633 - static SENSOR_DEVICE_ATTR(fan0_input, 0444, 634 - show_rpm, NULL, 0); 635 627 static SENSOR_DEVICE_ATTR(fan1_input, 0444, 636 - show_rpm, NULL, 1); 628 + show_rpm, NULL, 0); 637 629 static SENSOR_DEVICE_ATTR(fan2_input, 0444, 638 - show_rpm, NULL, 2); 630 + show_rpm, NULL, 1); 639 631 static SENSOR_DEVICE_ATTR(fan3_input, 0444, 640 - show_rpm, NULL, 3); 632 + show_rpm, NULL, 2); 641 633 static SENSOR_DEVICE_ATTR(fan4_input, 0444, 642 - show_rpm, NULL, 4); 634 + show_rpm, NULL, 3); 643 635 static SENSOR_DEVICE_ATTR(fan5_input, 0444, 644 - show_rpm, NULL, 5); 636 + show_rpm, NULL, 4); 645 637 static SENSOR_DEVICE_ATTR(fan6_input, 0444, 646 - show_rpm, NULL, 6); 638 + show_rpm, NULL, 5); 647 639 static SENSOR_DEVICE_ATTR(fan7_input, 0444, 648 - show_rpm, NULL, 7); 640 + show_rpm, NULL, 6); 649 641 static SENSOR_DEVICE_ATTR(fan8_input, 0444, 650 - show_rpm, NULL, 8); 642 + show_rpm, NULL, 7); 651 643 static SENSOR_DEVICE_ATTR(fan9_input, 0444, 652 - show_rpm, NULL, 9); 644 + show_rpm, NULL, 8); 653 645 static SENSOR_DEVICE_ATTR(fan10_input, 0444, 654 - show_rpm, NULL, 10); 646 + show_rpm, NULL, 9); 655 647 static SENSOR_DEVICE_ATTR(fan11_input, 0444, 656 - show_rpm, NULL, 11); 648 + show_rpm, NULL, 10); 657 649 static SENSOR_DEVICE_ATTR(fan12_input, 0444, 658 - show_rpm, NULL, 12); 650 + show_rpm, NULL, 11); 659 651 static SENSOR_DEVICE_ATTR(fan13_input, 0444, 660 - show_rpm, NULL, 13); 652 + show_rpm, NULL, 12); 661 653 static SENSOR_DEVICE_ATTR(fan14_input, 0444, 662 - show_rpm, NULL, 14); 654 + show_rpm, NULL, 13); 663 655 static SENSOR_DEVICE_ATTR(fan15_input, 0444, 656 + show_rpm, NULL, 14); 657 + static SENSOR_DEVICE_ATTR(fan16_input, 0444, 664 658 show_rpm, NULL, 15); 665 659 static struct attribute *fan_dev_attrs[] = { 666 - &sensor_dev_attr_fan0_input.dev_attr.attr, 667 660 &sensor_dev_attr_fan1_input.dev_attr.attr, 668 661 &sensor_dev_attr_fan2_input.dev_attr.attr, 669 662 &sensor_dev_attr_fan3_input.dev_attr.attr, ··· 678 673 &sensor_dev_attr_fan13_input.dev_attr.attr, 679 674 &sensor_dev_attr_fan14_input.dev_attr.attr, 680 675 &sensor_dev_attr_fan15_input.dev_attr.attr, 676 + &sensor_dev_attr_fan16_input.dev_attr.attr, 681 677 NULL 682 678 }; 683 679 ··· 808 802 if (ret) 809 803 return ret; 810 804 } 811 - of_node_put(np); 812 805 813 806 priv->groups[0] = &pwm_dev_group; 814 807 priv->groups[1] = &fan_dev_group;
+4 -4
drivers/i2c/busses/i2c-imx.c
··· 734 734 * the first read operation, otherwise the first read cost 735 735 * one extra clock cycle. 736 736 */ 737 - temp = readb(i2c_imx->base + IMX_I2C_I2CR); 737 + temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); 738 738 temp |= I2CR_MTX; 739 - writeb(temp, i2c_imx->base + IMX_I2C_I2CR); 739 + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); 740 740 } 741 741 msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); 742 742 ··· 857 857 * the first read operation, otherwise the first read cost 858 858 * one extra clock cycle. 859 859 */ 860 - temp = readb(i2c_imx->base + IMX_I2C_I2CR); 860 + temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); 861 861 temp |= I2CR_MTX; 862 - writeb(temp, i2c_imx->base + IMX_I2C_I2CR); 862 + imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); 863 863 } 864 864 } else if (i == (msgs->len - 2)) { 865 865 dev_dbg(&i2c_imx->adapter.dev,
+1 -1
drivers/i2c/busses/i2c-ismt.c
··· 584 584 585 585 /* unmap the data buffer */ 586 586 if (dma_size != 0) 587 - dma_unmap_single(&adap->dev, dma_addr, dma_size, dma_direction); 587 + dma_unmap_single(dev, dma_addr, dma_size, dma_direction); 588 588 589 589 if (unlikely(!time_left)) { 590 590 dev_err(dev, "completion wait timed out\n");
+1 -1
drivers/i2c/busses/i2c-rcar.c
··· 319 319 rcar_i2c_write(priv, ICFBSCR, TCYC06); 320 320 321 321 dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg), 322 - priv->msg->len, priv->dma_direction); 322 + sg_dma_len(&priv->sg), priv->dma_direction); 323 323 324 324 priv->dma_direction = DMA_NONE; 325 325 }
+4 -4
drivers/iio/adc/bcm_iproc_adc.c
··· 143 143 iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_DATA); 144 144 } 145 145 146 - static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data) 146 + static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data) 147 147 { 148 148 u32 channel_intr_status; 149 149 u32 intr_status; ··· 167 167 return IRQ_NONE; 168 168 } 169 169 170 - static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data) 170 + static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data) 171 171 { 172 172 irqreturn_t retval = IRQ_NONE; 173 173 struct iproc_adc_priv *adc_priv; ··· 181 181 adc_priv = iio_priv(indio_dev); 182 182 183 183 regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status); 184 - dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_thread(),INTRPT_STS:%x\n", 184 + dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_handler(),INTRPT_STS:%x\n", 185 185 intr_status); 186 186 187 187 intr_channels = (intr_status & IPROC_ADC_INTR_MASK) >> IPROC_ADC_INTR; ··· 566 566 } 567 567 568 568 ret = devm_request_threaded_irq(&pdev->dev, adc_priv->irqno, 569 - iproc_adc_interrupt_thread, 570 569 iproc_adc_interrupt_handler, 570 + iproc_adc_interrupt_thread, 571 571 IRQF_SHARED, "iproc-adc", indio_dev); 572 572 if (ret) { 573 573 dev_err(&pdev->dev, "request_irq error %d\n", ret);
+5 -5
drivers/iio/adc/max9611.c
··· 438 438 struct max9611_dev *max9611 = iio_priv(dev_to_iio_dev(dev)); 439 439 unsigned int i, r; 440 440 441 - i = max9611->shunt_resistor_uohm / 1000; 442 - r = max9611->shunt_resistor_uohm % 1000; 441 + i = max9611->shunt_resistor_uohm / 1000000; 442 + r = max9611->shunt_resistor_uohm % 1000000; 443 443 444 - return sprintf(buf, "%u.%03u\n", i, r); 444 + return sprintf(buf, "%u.%06u\n", i, r); 445 445 } 446 446 447 447 static IIO_DEVICE_ATTR(in_power_shunt_resistor, 0444, ··· 536 536 int ret; 537 537 538 538 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*max9611)); 539 - if (IS_ERR(indio_dev)) 540 - return PTR_ERR(indio_dev); 539 + if (!indio_dev) 540 + return -ENOMEM; 541 541 542 542 i2c_set_clientdata(client, indio_dev); 543 543
+2 -2
drivers/iio/adc/meson_saradc.c
··· 468 468 static void meson_sar_adc_clear_fifo(struct iio_dev *indio_dev) 469 469 { 470 470 struct meson_sar_adc_priv *priv = iio_priv(indio_dev); 471 - int count; 471 + unsigned int count, tmp; 472 472 473 473 for (count = 0; count < MESON_SAR_ADC_MAX_FIFO_SIZE; count++) { 474 474 if (!meson_sar_adc_get_fifo_count(indio_dev)) 475 475 break; 476 476 477 - regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, 0); 477 + regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, &tmp); 478 478 } 479 479 } 480 480
+5 -2
drivers/iio/adc/mxs-lradc-adc.c
··· 718 718 adc->dev = dev; 719 719 720 720 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 721 + if (!iores) 722 + return -EINVAL; 723 + 721 724 adc->base = devm_ioremap(dev, iores->start, resource_size(iores)); 722 - if (IS_ERR(adc->base)) 723 - return PTR_ERR(adc->base); 725 + if (!adc->base) 726 + return -ENOMEM; 724 727 725 728 init_completion(&adc->completion); 726 729 spin_lock_init(&adc->lock);
+24 -14
drivers/iio/adc/sun4i-gpadc-iio.c
··· 105 105 bool no_irq; 106 106 /* prevents concurrent reads of temperature and ADC */ 107 107 struct mutex mutex; 108 + struct thermal_zone_device *tzd; 109 + struct device *sensor_device; 108 110 }; 109 111 110 112 #define SUN4I_GPADC_ADC_CHANNEL(_channel, _name) { \ ··· 504 502 { 505 503 struct sun4i_gpadc_iio *info = iio_priv(indio_dev); 506 504 const struct of_device_id *of_dev; 507 - struct thermal_zone_device *tzd; 508 505 struct resource *mem; 509 506 void __iomem *base; 510 507 int ret; ··· 533 532 if (!IS_ENABLED(CONFIG_THERMAL_OF)) 534 533 return 0; 535 534 536 - tzd = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, info, 537 - &sun4i_ts_tz_ops); 538 - if (IS_ERR(tzd)) 535 + info->sensor_device = &pdev->dev; 536 + info->tzd = thermal_zone_of_sensor_register(info->sensor_device, 0, 537 + info, &sun4i_ts_tz_ops); 538 + if (IS_ERR(info->tzd)) 539 539 dev_err(&pdev->dev, "could not register thermal sensor: %ld\n", 540 - PTR_ERR(tzd)); 540 + PTR_ERR(info->tzd)); 541 541 542 - return PTR_ERR_OR_ZERO(tzd); 542 + return PTR_ERR_OR_ZERO(info->tzd); 543 543 } 544 544 545 545 static int sun4i_gpadc_probe_mfd(struct platform_device *pdev, ··· 586 584 * of_node, and the device from this driver as third argument to 587 585 * return the temperature. 588 586 */ 589 - struct thermal_zone_device *tzd; 590 - tzd = devm_thermal_zone_of_sensor_register(pdev->dev.parent, 0, 591 - info, 592 - &sun4i_ts_tz_ops); 593 - if (IS_ERR(tzd)) { 587 + info->sensor_device = pdev->dev.parent; 588 + info->tzd = thermal_zone_of_sensor_register(info->sensor_device, 589 + 0, info, 590 + &sun4i_ts_tz_ops); 591 + if (IS_ERR(info->tzd)) { 594 592 dev_err(&pdev->dev, 595 593 "could not register thermal sensor: %ld\n", 596 - PTR_ERR(tzd)); 597 - return PTR_ERR(tzd); 594 + PTR_ERR(info->tzd)); 595 + return PTR_ERR(info->tzd); 598 596 } 599 597 } else { 600 598 indio_dev->num_channels = ··· 690 688 691 689 pm_runtime_put(&pdev->dev); 692 690 pm_runtime_disable(&pdev->dev); 693 - if (!info->no_irq && IS_ENABLED(CONFIG_THERMAL_OF)) 691 + 692 + if (!IS_ENABLED(CONFIG_THERMAL_OF)) 693 + return 0; 694 + 695 + thermal_zone_of_sensor_unregister(info->sensor_device, info->tzd); 696 + 697 + if (!info->no_irq) 694 698 iio_map_array_unregister(indio_dev); 695 699 696 700 return 0; ··· 708 700 { "sun6i-a31-gpadc-iio", (kernel_ulong_t)&sun6i_gpadc_data }, 709 701 { /* sentinel */ }, 710 702 }; 703 + MODULE_DEVICE_TABLE(platform, sun4i_gpadc_id); 711 704 712 705 static struct platform_driver sun4i_gpadc_driver = { 713 706 .driver = { ··· 720 711 .probe = sun4i_gpadc_probe, 721 712 .remove = sun4i_gpadc_remove, 722 713 }; 714 + MODULE_DEVICE_TABLE(of, sun4i_gpadc_of_id); 723 715 724 716 module_platform_driver(sun4i_gpadc_driver); 725 717
+1 -1
drivers/iio/adc/ti_am335x_adc.c
··· 614 614 return -EINVAL; 615 615 } 616 616 617 - indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*indio_dev)); 617 + indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev)); 618 618 if (indio_dev == NULL) { 619 619 dev_err(&pdev->dev, "failed to allocate iio device\n"); 620 620 return -ENOMEM;
+1
drivers/iio/buffer/industrialio-buffer-dma.c
··· 14 14 #include <linux/sched.h> 15 15 #include <linux/poll.h> 16 16 #include <linux/iio/buffer.h> 17 + #include <linux/iio/buffer_impl.h> 17 18 #include <linux/iio/buffer-dma.h> 18 19 #include <linux/dma-mapping.h> 19 20 #include <linux/sizes.h>
+1
drivers/iio/buffer/industrialio-buffer-dmaengine.c
··· 14 14 15 15 #include <linux/iio/iio.h> 16 16 #include <linux/iio/buffer.h> 17 + #include <linux/iio/buffer_impl.h> 17 18 #include <linux/iio/buffer-dma.h> 18 19 #include <linux/iio/buffer-dmaengine.h> 19 20
+36 -3
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
··· 41 41 static const struct inv_mpu6050_reg_map reg_set_6500 = { 42 42 .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV, 43 43 .lpf = INV_MPU6050_REG_CONFIG, 44 + .accel_lpf = INV_MPU6500_REG_ACCEL_CONFIG_2, 44 45 .user_ctrl = INV_MPU6050_REG_USER_CTRL, 45 46 .fifo_en = INV_MPU6050_REG_FIFO_EN, 46 47 .gyro_config = INV_MPU6050_REG_GYRO_CONFIG, ··· 212 211 EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg); 213 212 214 213 /** 214 + * inv_mpu6050_set_lpf_regs() - set low pass filter registers, chip dependent 215 + * 216 + * MPU60xx/MPU9150 use only 1 register for accelerometer + gyroscope 217 + * MPU6500 and above have a dedicated register for accelerometer 218 + */ 219 + static int inv_mpu6050_set_lpf_regs(struct inv_mpu6050_state *st, 220 + enum inv_mpu6050_filter_e val) 221 + { 222 + int result; 223 + 224 + result = regmap_write(st->map, st->reg->lpf, val); 225 + if (result) 226 + return result; 227 + 228 + switch (st->chip_type) { 229 + case INV_MPU6050: 230 + case INV_MPU6000: 231 + case INV_MPU9150: 232 + /* old chips, nothing to do */ 233 + result = 0; 234 + break; 235 + default: 236 + /* set accel lpf */ 237 + result = regmap_write(st->map, st->reg->accel_lpf, val); 238 + break; 239 + } 240 + 241 + return result; 242 + } 243 + 244 + /** 215 245 * inv_mpu6050_init_config() - Initialize hardware, disable FIFO. 216 246 * 217 247 * Initial configuration: ··· 265 233 if (result) 266 234 return result; 267 235 268 - d = INV_MPU6050_FILTER_20HZ; 269 - result = regmap_write(st->map, st->reg->lpf, d); 236 + result = inv_mpu6050_set_lpf_regs(st, INV_MPU6050_FILTER_20HZ); 270 237 if (result) 271 238 return result; 272 239 ··· 568 537 * would be alising. This function basically search for the 569 538 * correct low pass parameters based on the fifo rate, e.g, 570 539 * sampling frequency. 540 + * 541 + * lpf is set automatically when setting sampling rate to avoid any aliases. 571 542 */ 572 543 static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate) 573 544 { ··· 585 552 while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1)) 586 553 i++; 587 554 data = d[i]; 588 - result = regmap_write(st->map, st->reg->lpf, data); 555 + result = inv_mpu6050_set_lpf_regs(st, data); 589 556 if (result) 590 557 return result; 591 558 st->chip_config.lpf = data;
+3
drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
··· 28 28 * struct inv_mpu6050_reg_map - Notable registers. 29 29 * @sample_rate_div: Divider applied to gyro output rate. 30 30 * @lpf: Configures internal low pass filter. 31 + * @accel_lpf: Configures accelerometer low pass filter. 31 32 * @user_ctrl: Enables/resets the FIFO. 32 33 * @fifo_en: Determines which data will appear in FIFO. 33 34 * @gyro_config: gyro config register. ··· 48 47 struct inv_mpu6050_reg_map { 49 48 u8 sample_rate_div; 50 49 u8 lpf; 50 + u8 accel_lpf; 51 51 u8 user_ctrl; 52 52 u8 fifo_en; 53 53 u8 gyro_config; ··· 190 188 #define INV_MPU6050_FIFO_THRESHOLD 500 191 189 192 190 /* mpu6500 registers */ 191 + #define INV_MPU6500_REG_ACCEL_CONFIG_2 0x1D 193 192 #define INV_MPU6500_REG_ACCEL_OFFSET 0x77 194 193 195 194 /* delay time in milliseconds */
+2 -1
drivers/iio/industrialio-trigger.c
··· 451 451 return len; 452 452 453 453 out_trigger_put: 454 - iio_trigger_put(trig); 454 + if (trig) 455 + iio_trigger_put(trig); 455 456 return ret; 456 457 } 457 458
+2 -2
drivers/iio/light/ltr501.c
··· 74 74 static const struct reg_field reg_field_it = 75 75 REG_FIELD(LTR501_ALS_MEAS_RATE, 3, 4); 76 76 static const struct reg_field reg_field_als_intr = 77 - REG_FIELD(LTR501_INTR, 0, 0); 78 - static const struct reg_field reg_field_ps_intr = 79 77 REG_FIELD(LTR501_INTR, 1, 1); 78 + static const struct reg_field reg_field_ps_intr = 79 + REG_FIELD(LTR501_INTR, 0, 0); 80 80 static const struct reg_field reg_field_als_rate = 81 81 REG_FIELD(LTR501_ALS_MEAS_RATE, 0, 2); 82 82 static const struct reg_field reg_field_ps_rate =
+6 -8
drivers/iio/proximity/as3935.c
··· 40 40 #define AS3935_AFE_PWR_BIT BIT(0) 41 41 42 42 #define AS3935_INT 0x03 43 - #define AS3935_INT_MASK 0x07 43 + #define AS3935_INT_MASK 0x0f 44 44 #define AS3935_EVENT_INT BIT(3) 45 - #define AS3935_NOISE_INT BIT(1) 45 + #define AS3935_NOISE_INT BIT(0) 46 46 47 47 #define AS3935_DATA 0x07 48 48 #define AS3935_DATA_MASK 0x3F ··· 215 215 216 216 st->buffer[0] = val & AS3935_DATA_MASK; 217 217 iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer, 218 - pf->timestamp); 218 + iio_get_time_ns(indio_dev)); 219 219 err_read: 220 220 iio_trigger_notify_done(indio_dev->trig); 221 221 ··· 244 244 245 245 switch (val) { 246 246 case AS3935_EVENT_INT: 247 - iio_trigger_poll(st->trig); 247 + iio_trigger_poll_chained(st->trig); 248 248 break; 249 249 case AS3935_NOISE_INT: 250 250 dev_warn(&st->spi->dev, "noise level is too high\n"); ··· 269 269 270 270 static void calibrate_as3935(struct as3935_state *st) 271 271 { 272 - mutex_lock(&st->lock); 273 - 274 272 /* mask disturber interrupt bit */ 275 273 as3935_write(st, AS3935_INT, BIT(5)); 276 274 ··· 278 280 279 281 mdelay(2); 280 282 as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV)); 281 - 282 - mutex_unlock(&st->lock); 283 283 } 284 284 285 285 #ifdef CONFIG_PM_SLEEP ··· 313 317 goto err_resume; 314 318 val &= ~AS3935_AFE_PWR_BIT; 315 319 ret = as3935_write(st, AS3935_AFE_GAIN, val); 320 + 321 + calibrate_as3935(st); 316 322 317 323 err_resume: 318 324 mutex_unlock(&st->lock);
+1 -9
drivers/infiniband/core/addr.c
··· 449 449 return ret; 450 450 451 451 rt = (struct rt6_info *)dst; 452 - if (ipv6_addr_any(&fl6.saddr)) { 453 - ret = ipv6_dev_get_saddr(addr->net, ip6_dst_idev(dst)->dev, 454 - &fl6.daddr, 0, &fl6.saddr); 455 - if (ret) 456 - goto put; 457 - 452 + if (ipv6_addr_any(&src_in->sin6_addr)) { 458 453 src_in->sin6_family = AF_INET6; 459 454 src_in->sin6_addr = fl6.saddr; 460 455 } ··· 466 471 467 472 *pdst = dst; 468 473 return 0; 469 - put: 470 - dst_release(dst); 471 - return ret; 472 474 } 473 475 #else 474 476 static int addr6_resolve(struct sockaddr_in6 *src_in,
+2 -2
drivers/infiniband/core/cm.c
··· 1429 1429 primary_path->packet_life_time = 1430 1430 cm_req_get_primary_local_ack_timeout(req_msg); 1431 1431 primary_path->packet_life_time -= (primary_path->packet_life_time > 0); 1432 - sa_path_set_service_id(primary_path, req_msg->service_id); 1432 + primary_path->service_id = req_msg->service_id; 1433 1433 1434 1434 if (req_msg->alt_local_lid) { 1435 1435 alt_path->dgid = req_msg->alt_local_gid; ··· 1452 1452 alt_path->packet_life_time = 1453 1453 cm_req_get_alt_local_ack_timeout(req_msg); 1454 1454 alt_path->packet_life_time -= (alt_path->packet_life_time > 0); 1455 - sa_path_set_service_id(alt_path, req_msg->service_id); 1455 + alt_path->service_id = req_msg->service_id; 1456 1456 } 1457 1457 } 1458 1458
+6 -7
drivers/infiniband/core/cma.c
··· 1140 1140 ib->sib_pkey = path->pkey; 1141 1141 ib->sib_flowinfo = path->flow_label; 1142 1142 memcpy(&ib->sib_addr, &path->sgid, 16); 1143 - ib->sib_sid = sa_path_get_service_id(path); 1143 + ib->sib_sid = path->service_id; 1144 1144 ib->sib_scope_id = 0; 1145 1145 } else { 1146 1146 ib->sib_pkey = listen_ib->sib_pkey; ··· 1274 1274 memcpy(&req->local_gid, &req_param->primary_path->sgid, 1275 1275 sizeof(req->local_gid)); 1276 1276 req->has_gid = true; 1277 - req->service_id = 1278 - sa_path_get_service_id(req_param->primary_path); 1277 + req->service_id = req_param->primary_path->service_id; 1279 1278 req->pkey = be16_to_cpu(req_param->primary_path->pkey); 1280 1279 if (req->pkey != req_param->bth_pkey) 1281 1280 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" ··· 1826 1827 struct rdma_route *rt; 1827 1828 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; 1828 1829 struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path; 1829 - const __be64 service_id = sa_path_get_service_id(path); 1830 + const __be64 service_id = 1831 + ib_event->param.req_rcvd.primary_path->service_id; 1830 1832 int ret; 1831 1833 1832 1834 id = rdma_create_id(listen_id->route.addr.dev_addr.net, ··· 2345 2345 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 2346 2346 path_rec.numb_path = 1; 2347 2347 path_rec.reversible = 1; 2348 - sa_path_set_service_id(&path_rec, 2349 - rdma_get_service_id(&id_priv->id, 2350 - cma_dst_addr(id_priv))); 2348 + path_rec.service_id = rdma_get_service_id(&id_priv->id, 2349 + cma_dst_addr(id_priv)); 2351 2350 2352 2351 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 2353 2352 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
+10
drivers/infiniband/core/core_priv.h
··· 169 169 int ib_sa_init(void); 170 170 void ib_sa_cleanup(void); 171 171 172 + int ibnl_init(void); 173 + void ibnl_cleanup(void); 174 + 175 + /** 176 + * Check if there are any listeners to the netlink group 177 + * @group: the netlink group ID 178 + * Returns 0 on success or a negative for no listeners. 179 + */ 180 + int ibnl_chk_listeners(unsigned int group); 181 + 172 182 int ib_nl_handle_resolve_resp(struct sk_buff *skb, 173 183 struct netlink_callback *cb); 174 184 int ib_nl_handle_set_timeout(struct sk_buff *skb,
+1 -1
drivers/infiniband/core/netlink.c
··· 37 37 #include <net/net_namespace.h> 38 38 #include <net/sock.h> 39 39 #include <rdma/rdma_netlink.h> 40 + #include "core_priv.h" 40 41 41 42 struct ibnl_client { 42 43 struct list_head list; ··· 56 55 return -1; 57 56 return 0; 58 57 } 59 - EXPORT_SYMBOL(ibnl_chk_listeners); 60 58 61 59 int ibnl_add_client(int index, int nops, 62 60 const struct ibnl_client_cbs cb_table[])
+3 -3
drivers/infiniband/core/sa_query.c
··· 194 194 .field_name = "sa_path_rec:" #field 195 195 196 196 static const struct ib_field path_rec_table[] = { 197 - { PATH_REC_FIELD(ib.service_id), 197 + { PATH_REC_FIELD(service_id), 198 198 .offset_words = 0, 199 199 .offset_bits = 0, 200 200 .size_bits = 64 }, ··· 296 296 .field_name = "sa_path_rec:" #field 297 297 298 298 static const struct ib_field opa_path_rec_table[] = { 299 - { OPA_PATH_REC_FIELD(opa.service_id), 299 + { OPA_PATH_REC_FIELD(service_id), 300 300 .offset_words = 0, 301 301 .offset_bits = 0, 302 302 .size_bits = 64 }, ··· 774 774 775 775 /* Now build the attributes */ 776 776 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) { 777 - val64 = be64_to_cpu(sa_path_get_service_id(sa_rec)); 777 + val64 = be64_to_cpu(sa_rec->service_id); 778 778 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID, 779 779 sizeof(val64), &val64); 780 780 }
+1 -1
drivers/infiniband/core/umem.c
··· 58 58 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { 59 59 60 60 page = sg_page(sg); 61 - if (umem->writable && dirty) 61 + if (!PageDirty(page) && umem->writable && dirty) 62 62 set_page_dirty_lock(page); 63 63 put_page(page); 64 64 }
+5 -1
drivers/infiniband/core/umem_odp.c
··· 321 321 struct vm_area_struct *vma; 322 322 struct hstate *h; 323 323 324 + down_read(&mm->mmap_sem); 324 325 vma = find_vma(mm, ib_umem_start(umem)); 325 - if (!vma || !is_vm_hugetlb_page(vma)) 326 + if (!vma || !is_vm_hugetlb_page(vma)) { 327 + up_read(&mm->mmap_sem); 326 328 return -EINVAL; 329 + } 327 330 h = hstate_vma(vma); 328 331 umem->page_shift = huge_page_shift(h); 332 + up_read(&mm->mmap_sem); 329 333 umem->hugetlb = 1; 330 334 } else { 331 335 umem->hugetlb = 0;
+4 -4
drivers/infiniband/core/uverbs_marshall.c
··· 96 96 } 97 97 EXPORT_SYMBOL(ib_copy_qp_attr_to_user); 98 98 99 - void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, 100 - struct sa_path_rec *src) 99 + static void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, 100 + struct sa_path_rec *src) 101 101 { 102 - memcpy(dst->dgid, src->dgid.raw, sizeof src->dgid); 103 - memcpy(dst->sgid, src->sgid.raw, sizeof src->sgid); 102 + memcpy(dst->dgid, src->dgid.raw, sizeof(src->dgid)); 103 + memcpy(dst->sgid, src->sgid.raw, sizeof(src->sgid)); 104 104 105 105 dst->dlid = htons(ntohl(sa_path_get_dlid(src))); 106 106 dst->slid = htons(ntohl(sa_path_get_slid(src)));
+4
drivers/infiniband/hw/bnxt_re/bnxt_re.h
··· 56 56 #define BNXT_RE_MAX_SRQC_COUNT (64 * 1024) 57 57 #define BNXT_RE_MAX_CQ_COUNT (64 * 1024) 58 58 59 + #define BNXT_RE_UD_QP_HW_STALL 0x400000 60 + 61 + #define BNXT_RE_RQ_WQE_THRESHOLD 32 62 + 59 63 struct bnxt_re_work { 60 64 struct work_struct work; 61 65 unsigned long event;
+347 -124
drivers/infiniband/hw/bnxt_re/ib_verbs.c
··· 61 61 #include "ib_verbs.h" 62 62 #include <rdma/bnxt_re-abi.h> 63 63 64 + static int __from_ib_access_flags(int iflags) 65 + { 66 + int qflags = 0; 67 + 68 + if (iflags & IB_ACCESS_LOCAL_WRITE) 69 + qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; 70 + if (iflags & IB_ACCESS_REMOTE_READ) 71 + qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ; 72 + if (iflags & IB_ACCESS_REMOTE_WRITE) 73 + qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE; 74 + if (iflags & IB_ACCESS_REMOTE_ATOMIC) 75 + qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC; 76 + if (iflags & IB_ACCESS_MW_BIND) 77 + qflags |= BNXT_QPLIB_ACCESS_MW_BIND; 78 + if (iflags & IB_ZERO_BASED) 79 + qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED; 80 + if (iflags & IB_ACCESS_ON_DEMAND) 81 + qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND; 82 + return qflags; 83 + }; 84 + 85 + static enum ib_access_flags __to_ib_access_flags(int qflags) 86 + { 87 + enum ib_access_flags iflags = 0; 88 + 89 + if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE) 90 + iflags |= IB_ACCESS_LOCAL_WRITE; 91 + if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE) 92 + iflags |= IB_ACCESS_REMOTE_WRITE; 93 + if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ) 94 + iflags |= IB_ACCESS_REMOTE_READ; 95 + if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC) 96 + iflags |= IB_ACCESS_REMOTE_ATOMIC; 97 + if (qflags & BNXT_QPLIB_ACCESS_MW_BIND) 98 + iflags |= IB_ACCESS_MW_BIND; 99 + if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED) 100 + iflags |= IB_ZERO_BASED; 101 + if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND) 102 + iflags |= IB_ACCESS_ON_DEMAND; 103 + return iflags; 104 + }; 105 + 64 106 static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list, 65 107 struct bnxt_qplib_sge *sg_list, int num) 66 108 { ··· 191 149 ib_attr->max_total_mcast_qp_attach = 0; 192 150 ib_attr->max_ah = dev_attr->max_ah; 193 151 194 - ib_attr->max_fmr = dev_attr->max_fmr; 195 - ib_attr->max_map_per_fmr = 1; /* ? */ 152 + ib_attr->max_fmr = 0; 153 + ib_attr->max_map_per_fmr = 0; 196 154 197 155 ib_attr->max_srq = dev_attr->max_srq; 198 156 ib_attr->max_srq_wr = dev_attr->max_srq_wqes; ··· 452 410 return IB_LINK_LAYER_ETHERNET; 453 411 } 454 412 413 + #define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE) 414 + 415 + static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd) 416 + { 417 + struct bnxt_re_fence_data *fence = &pd->fence; 418 + struct ib_mr *ib_mr = &fence->mr->ib_mr; 419 + struct bnxt_qplib_swqe *wqe = &fence->bind_wqe; 420 + 421 + memset(wqe, 0, sizeof(*wqe)); 422 + wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW; 423 + wqe->wr_id = BNXT_QPLIB_FENCE_WRID; 424 + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; 425 + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; 426 + wqe->bind.zero_based = false; 427 + wqe->bind.parent_l_key = ib_mr->lkey; 428 + wqe->bind.va = (u64)(unsigned long)fence->va; 429 + wqe->bind.length = fence->size; 430 + wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ); 431 + wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1; 432 + 433 + /* Save the initial rkey in fence structure for now; 434 + * wqe->bind.r_key will be set at (re)bind time. 435 + */ 436 + fence->bind_rkey = ib_inc_rkey(fence->mw->rkey); 437 + } 438 + 439 + static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp) 440 + { 441 + struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp, 442 + qplib_qp); 443 + struct ib_pd *ib_pd = qp->ib_qp.pd; 444 + struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 445 + struct bnxt_re_fence_data *fence = &pd->fence; 446 + struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe; 447 + struct bnxt_qplib_swqe wqe; 448 + int rc; 449 + 450 + memcpy(&wqe, fence_wqe, sizeof(wqe)); 451 + wqe.bind.r_key = fence->bind_rkey; 452 + fence->bind_rkey = ib_inc_rkey(fence->bind_rkey); 453 + 454 + dev_dbg(rdev_to_dev(qp->rdev), 455 + "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n", 456 + wqe.bind.r_key, qp->qplib_qp.id, pd); 457 + rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); 458 + if (rc) { 459 + dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n"); 460 + return rc; 461 + } 462 + bnxt_qplib_post_send_db(&qp->qplib_qp); 463 + 464 + return rc; 465 + } 466 + 467 + static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd) 468 + { 469 + struct bnxt_re_fence_data *fence = &pd->fence; 470 + struct bnxt_re_dev *rdev = pd->rdev; 471 + struct device *dev = &rdev->en_dev->pdev->dev; 472 + struct bnxt_re_mr *mr = fence->mr; 473 + 474 + if (fence->mw) { 475 + bnxt_re_dealloc_mw(fence->mw); 476 + fence->mw = NULL; 477 + } 478 + if (mr) { 479 + if (mr->ib_mr.rkey) 480 + bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr, 481 + true); 482 + if (mr->ib_mr.lkey) 483 + bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); 484 + kfree(mr); 485 + fence->mr = NULL; 486 + } 487 + if (fence->dma_addr) { 488 + dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES, 489 + DMA_BIDIRECTIONAL); 490 + fence->dma_addr = 0; 491 + } 492 + } 493 + 494 + static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) 495 + { 496 + int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND; 497 + struct bnxt_re_fence_data *fence = &pd->fence; 498 + struct bnxt_re_dev *rdev = pd->rdev; 499 + struct device *dev = &rdev->en_dev->pdev->dev; 500 + struct bnxt_re_mr *mr = NULL; 501 + dma_addr_t dma_addr = 0; 502 + struct ib_mw *mw; 503 + u64 pbl_tbl; 504 + int rc; 505 + 506 + dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES, 507 + DMA_BIDIRECTIONAL); 508 + rc = dma_mapping_error(dev, dma_addr); 509 + if (rc) { 510 + dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n"); 511 + rc = -EIO; 512 + fence->dma_addr = 0; 513 + goto fail; 514 + } 515 + fence->dma_addr = dma_addr; 516 + 517 + /* Allocate a MR */ 518 + mr = kzalloc(sizeof(*mr), GFP_KERNEL); 519 + if (!mr) { 520 + rc = -ENOMEM; 521 + goto fail; 522 + } 523 + fence->mr = mr; 524 + mr->rdev = rdev; 525 + mr->qplib_mr.pd = &pd->qplib_pd; 526 + mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; 527 + mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); 528 + rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); 529 + if (rc) { 530 + dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n"); 531 + goto fail; 532 + } 533 + 534 + /* Register MR */ 535 + mr->ib_mr.lkey = mr->qplib_mr.lkey; 536 + mr->qplib_mr.va = (u64)(unsigned long)fence->va; 537 + mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES; 538 + pbl_tbl = dma_addr; 539 + rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl, 540 + BNXT_RE_FENCE_PBL_SIZE, false); 541 + if (rc) { 542 + dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n"); 543 + goto fail; 544 + } 545 + mr->ib_mr.rkey = mr->qplib_mr.rkey; 546 + 547 + /* Create a fence MW only for kernel consumers */ 548 + mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL); 549 + if (!mw) { 550 + dev_err(rdev_to_dev(rdev), 551 + "Failed to create fence-MW for PD: %p\n", pd); 552 + rc = -EINVAL; 553 + goto fail; 554 + } 555 + fence->mw = mw; 556 + 557 + bnxt_re_create_fence_wqe(pd); 558 + return 0; 559 + 560 + fail: 561 + bnxt_re_destroy_fence_mr(pd); 562 + return rc; 563 + } 564 + 455 565 /* Protection Domains */ 456 566 int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) 457 567 { ··· 611 417 struct bnxt_re_dev *rdev = pd->rdev; 612 418 int rc; 613 419 420 + bnxt_re_destroy_fence_mr(pd); 614 421 if (ib_pd->uobject && pd->dpi.dbr) { 615 422 struct ib_ucontext *ib_uctx = ib_pd->uobject->context; 616 423 struct bnxt_re_ucontext *ucntx; ··· 693 498 } 694 499 } 695 500 501 + if (!udata) 502 + if (bnxt_re_create_fence_mr(pd)) 503 + dev_warn(rdev_to_dev(rdev), 504 + "Failed to create Fence-MR\n"); 696 505 return &pd->ib_pd; 697 506 dbfail: 698 507 (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, ··· 1048 849 /* Shadow QP SQ depth should be same as QP1 RQ depth */ 1049 850 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe; 1050 851 qp->qplib_qp.sq.max_sge = 2; 852 + /* Q full delta can be 1 since it is internal QP */ 853 + qp->qplib_qp.sq.q_full_delta = 1; 1051 854 1052 855 qp->qplib_qp.scq = qp1_qp->scq; 1053 856 qp->qplib_qp.rcq = qp1_qp->rcq; 1054 857 1055 858 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe; 1056 859 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge; 860 + /* Q full delta can be 1 since it is internal QP */ 861 + qp->qplib_qp.rq.q_full_delta = 1; 1057 862 1058 863 qp->qplib_qp.mtu = qp1_qp->mtu; 1059 864 ··· 1120 917 qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type == 1121 918 IB_SIGNAL_ALL_WR) ? true : false); 1122 919 1123 - entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1); 1124 - qp->qplib_qp.sq.max_wqe = min_t(u32, entries, 1125 - dev_attr->max_qp_wqes + 1); 1126 - 1127 920 qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge; 1128 921 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges) 1129 922 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges; ··· 1158 959 qp->qplib_qp.rq.max_wqe = min_t(u32, entries, 1159 960 dev_attr->max_qp_wqes + 1); 1160 961 962 + qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - 963 + qp_init_attr->cap.max_recv_wr; 964 + 1161 965 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge; 1162 966 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) 1163 967 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; ··· 1169 967 qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); 1170 968 1171 969 if (qp_init_attr->qp_type == IB_QPT_GSI) { 970 + /* Allocate 1 more than what's provided */ 971 + entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1); 972 + qp->qplib_qp.sq.max_wqe = min_t(u32, entries, 973 + dev_attr->max_qp_wqes + 1); 974 + qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - 975 + qp_init_attr->cap.max_send_wr; 1172 976 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; 1173 977 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) 1174 978 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; ··· 1214 1006 } 1215 1007 1216 1008 } else { 1009 + /* Allocate 128 + 1 more than what's provided */ 1010 + entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1011 + BNXT_QPLIB_RESERVED_QP_WRS + 1); 1012 + qp->qplib_qp.sq.max_wqe = min_t(u32, entries, 1013 + dev_attr->max_qp_wqes + 1014 + BNXT_QPLIB_RESERVED_QP_WRS + 1); 1015 + qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1; 1016 + 1017 + /* 1018 + * Reserving one slot for Phantom WQE. Application can 1019 + * post one extra entry in this case. But allowing this to avoid 1020 + * unexpected Queue full condition 1021 + */ 1022 + 1023 + qp->qplib_qp.sq.q_full_delta -= 1; 1024 + 1217 1025 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom; 1218 1026 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; 1219 1027 if (udata) { ··· 1249 1025 1250 1026 qp->ib_qp.qp_num = qp->qplib_qp.id; 1251 1027 spin_lock_init(&qp->sq_lock); 1028 + spin_lock_init(&qp->rq_lock); 1252 1029 1253 1030 if (udata) { 1254 1031 struct bnxt_re_qp_resp resp; ··· 1353 1128 return IB_MTU_2048; 1354 1129 } 1355 1130 } 1356 - 1357 - static int __from_ib_access_flags(int iflags) 1358 - { 1359 - int qflags = 0; 1360 - 1361 - if (iflags & IB_ACCESS_LOCAL_WRITE) 1362 - qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; 1363 - if (iflags & IB_ACCESS_REMOTE_READ) 1364 - qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ; 1365 - if (iflags & IB_ACCESS_REMOTE_WRITE) 1366 - qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE; 1367 - if (iflags & IB_ACCESS_REMOTE_ATOMIC) 1368 - qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC; 1369 - if (iflags & IB_ACCESS_MW_BIND) 1370 - qflags |= BNXT_QPLIB_ACCESS_MW_BIND; 1371 - if (iflags & IB_ZERO_BASED) 1372 - qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED; 1373 - if (iflags & IB_ACCESS_ON_DEMAND) 1374 - qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND; 1375 - return qflags; 1376 - }; 1377 - 1378 - static enum ib_access_flags __to_ib_access_flags(int qflags) 1379 - { 1380 - enum ib_access_flags iflags = 0; 1381 - 1382 - if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE) 1383 - iflags |= IB_ACCESS_LOCAL_WRITE; 1384 - if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE) 1385 - iflags |= IB_ACCESS_REMOTE_WRITE; 1386 - if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ) 1387 - iflags |= IB_ACCESS_REMOTE_READ; 1388 - if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC) 1389 - iflags |= IB_ACCESS_REMOTE_ATOMIC; 1390 - if (qflags & BNXT_QPLIB_ACCESS_MW_BIND) 1391 - iflags |= IB_ACCESS_MW_BIND; 1392 - if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED) 1393 - iflags |= IB_ZERO_BASED; 1394 - if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND) 1395 - iflags |= IB_ACCESS_ON_DEMAND; 1396 - return iflags; 1397 - }; 1398 1131 1399 1132 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev, 1400 1133 struct bnxt_re_qp *qp1_qp, ··· 1561 1378 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr); 1562 1379 qp->qplib_qp.sq.max_wqe = min_t(u32, entries, 1563 1380 dev_attr->max_qp_wqes + 1); 1381 + qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - 1382 + qp_attr->cap.max_send_wr; 1383 + /* 1384 + * Reserving one slot for Phantom WQE. Some application can 1385 + * post one extra entry in this case. Allowing this to avoid 1386 + * unexpected Queue full condition 1387 + */ 1388 + qp->qplib_qp.sq.q_full_delta -= 1; 1564 1389 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge; 1565 1390 if (qp->qplib_qp.rq.max_wqe) { 1566 1391 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr); 1567 1392 qp->qplib_qp.rq.max_wqe = 1568 1393 min_t(u32, entries, dev_attr->max_qp_wqes + 1); 1394 + qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - 1395 + qp_attr->cap.max_recv_wr; 1569 1396 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge; 1570 1397 } else { 1571 1398 /* SRQ was used prior, just ignore the RQ caps */ ··· 2076 1883 return payload_sz; 2077 1884 } 2078 1885 1886 + static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp) 1887 + { 1888 + if ((qp->ib_qp.qp_type == IB_QPT_UD || 1889 + qp->ib_qp.qp_type == IB_QPT_GSI || 1890 + qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) && 1891 + qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) { 1892 + int qp_attr_mask; 1893 + struct ib_qp_attr qp_attr; 1894 + 1895 + qp_attr_mask = IB_QP_STATE; 1896 + qp_attr.qp_state = IB_QPS_RTS; 1897 + bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL); 1898 + qp->qplib_qp.wqe_cnt = 0; 1899 + } 1900 + } 1901 + 2079 1902 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, 2080 1903 struct bnxt_re_qp *qp, 2081 1904 struct ib_send_wr *wr) ··· 2137 1928 wr = wr->next; 2138 1929 } 2139 1930 bnxt_qplib_post_send_db(&qp->qplib_qp); 1931 + bnxt_ud_qp_hw_stall_workaround(qp); 2140 1932 spin_unlock_irqrestore(&qp->sq_lock, flags); 2141 1933 return rc; 2142 1934 } ··· 2234 2024 wr = wr->next; 2235 2025 } 2236 2026 bnxt_qplib_post_send_db(&qp->qplib_qp); 2027 + bnxt_ud_qp_hw_stall_workaround(qp); 2237 2028 spin_unlock_irqrestore(&qp->sq_lock, flags); 2238 2029 2239 2030 return rc; ··· 2282 2071 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); 2283 2072 struct bnxt_qplib_swqe wqe; 2284 2073 int rc = 0, payload_sz = 0; 2074 + unsigned long flags; 2075 + u32 count = 0; 2285 2076 2077 + spin_lock_irqsave(&qp->rq_lock, flags); 2286 2078 while (wr) { 2287 2079 /* House keeping */ 2288 2080 memset(&wqe, 0, sizeof(wqe)); ··· 2314 2100 *bad_wr = wr; 2315 2101 break; 2316 2102 } 2103 + 2104 + /* Ring DB if the RQEs posted reaches a threshold value */ 2105 + if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) { 2106 + bnxt_qplib_post_recv_db(&qp->qplib_qp); 2107 + count = 0; 2108 + } 2109 + 2317 2110 wr = wr->next; 2318 2111 } 2319 - bnxt_qplib_post_recv_db(&qp->qplib_qp); 2112 + 2113 + if (count) 2114 + bnxt_qplib_post_recv_db(&qp->qplib_qp); 2115 + 2116 + spin_unlock_irqrestore(&qp->rq_lock, flags); 2117 + 2320 2118 return rc; 2321 2119 } 2322 2120 ··· 2869 2643 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; 2870 2644 } 2871 2645 2646 + static int send_phantom_wqe(struct bnxt_re_qp *qp) 2647 + { 2648 + struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp; 2649 + unsigned long flags; 2650 + int rc = 0; 2651 + 2652 + spin_lock_irqsave(&qp->sq_lock, flags); 2653 + 2654 + rc = bnxt_re_bind_fence_mw(lib_qp); 2655 + if (!rc) { 2656 + lib_qp->sq.phantom_wqe_cnt++; 2657 + dev_dbg(&lib_qp->sq.hwq.pdev->dev, 2658 + "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n", 2659 + lib_qp->id, lib_qp->sq.hwq.prod, 2660 + HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq), 2661 + lib_qp->sq.phantom_wqe_cnt); 2662 + } 2663 + 2664 + spin_unlock_irqrestore(&qp->sq_lock, flags); 2665 + return rc; 2666 + } 2667 + 2872 2668 int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) 2873 2669 { 2874 2670 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); 2875 2671 struct bnxt_re_qp *qp; 2876 2672 struct bnxt_qplib_cqe *cqe; 2877 2673 int i, ncqe, budget; 2674 + struct bnxt_qplib_q *sq; 2675 + struct bnxt_qplib_qp *lib_qp; 2878 2676 u32 tbl_idx; 2879 2677 struct bnxt_re_sqp_entries *sqp_entry = NULL; 2880 2678 unsigned long flags; ··· 2911 2661 } 2912 2662 cqe = &cq->cql[0]; 2913 2663 while (budget) { 2914 - ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget); 2664 + lib_qp = NULL; 2665 + ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp); 2666 + if (lib_qp) { 2667 + sq = &lib_qp->sq; 2668 + if (sq->send_phantom) { 2669 + qp = container_of(lib_qp, 2670 + struct bnxt_re_qp, qplib_qp); 2671 + if (send_phantom_wqe(qp) == -ENOMEM) 2672 + dev_err(rdev_to_dev(cq->rdev), 2673 + "Phantom failed! Scheduled to send again\n"); 2674 + else 2675 + sq->send_phantom = false; 2676 + } 2677 + } 2678 + 2915 2679 if (!ncqe) 2916 2680 break; 2917 2681 ··· 3086 2822 struct bnxt_re_dev *rdev = mr->rdev; 3087 2823 int rc; 3088 2824 2825 + rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); 2826 + if (rc) { 2827 + dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc); 2828 + return rc; 2829 + } 2830 + 3089 2831 if (mr->npages && mr->pages) { 3090 2832 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, 3091 2833 &mr->qplib_frpl); ··· 3099 2829 mr->npages = 0; 3100 2830 mr->pages = NULL; 3101 2831 } 3102 - rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); 3103 - 3104 2832 if (!IS_ERR_OR_NULL(mr->ib_umem)) 3105 2833 ib_umem_release(mr->ib_umem); 3106 2834 ··· 3182 2914 return ERR_PTR(rc); 3183 2915 } 3184 2916 3185 - /* Fast Memory Regions */ 3186 - struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *ib_pd, int mr_access_flags, 3187 - struct ib_fmr_attr *fmr_attr) 2917 + struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, 2918 + struct ib_udata *udata) 3188 2919 { 3189 2920 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); 3190 2921 struct bnxt_re_dev *rdev = pd->rdev; 3191 - struct bnxt_re_fmr *fmr; 2922 + struct bnxt_re_mw *mw; 3192 2923 int rc; 3193 2924 3194 - if (fmr_attr->max_pages > MAX_PBL_LVL_2_PGS || 3195 - fmr_attr->max_maps > rdev->dev_attr.max_map_per_fmr) { 3196 - dev_err(rdev_to_dev(rdev), "Allocate FMR exceeded Max limit"); 2925 + mw = kzalloc(sizeof(*mw), GFP_KERNEL); 2926 + if (!mw) 3197 2927 return ERR_PTR(-ENOMEM); 3198 - } 3199 - fmr = kzalloc(sizeof(*fmr), GFP_KERNEL); 3200 - if (!fmr) 3201 - return ERR_PTR(-ENOMEM); 2928 + mw->rdev = rdev; 2929 + mw->qplib_mw.pd = &pd->qplib_pd; 3202 2930 3203 - fmr->rdev = rdev; 3204 - fmr->qplib_fmr.pd = &pd->qplib_pd; 3205 - fmr->qplib_fmr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; 3206 - 3207 - rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &fmr->qplib_fmr); 3208 - if (rc) 2931 + mw->qplib_mw.type = (type == IB_MW_TYPE_1 ? 2932 + CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 : 2933 + CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B); 2934 + rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw); 2935 + if (rc) { 2936 + dev_err(rdev_to_dev(rdev), "Allocate MW failed!"); 3209 2937 goto fail; 2938 + } 2939 + mw->ib_mw.rkey = mw->qplib_mw.rkey; 3210 2940 3211 - fmr->qplib_fmr.flags = __from_ib_access_flags(mr_access_flags); 3212 - fmr->ib_fmr.lkey = fmr->qplib_fmr.lkey; 3213 - fmr->ib_fmr.rkey = fmr->ib_fmr.lkey; 2941 + atomic_inc(&rdev->mw_count); 2942 + return &mw->ib_mw; 3214 2943 3215 - atomic_inc(&rdev->mr_count); 3216 - return &fmr->ib_fmr; 3217 2944 fail: 3218 - kfree(fmr); 2945 + kfree(mw); 3219 2946 return ERR_PTR(rc); 3220 2947 } 3221 2948 3222 - int bnxt_re_map_phys_fmr(struct ib_fmr *ib_fmr, u64 *page_list, int list_len, 3223 - u64 iova) 2949 + int bnxt_re_dealloc_mw(struct ib_mw *ib_mw) 3224 2950 { 3225 - struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr, 3226 - ib_fmr); 3227 - struct bnxt_re_dev *rdev = fmr->rdev; 2951 + struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw); 2952 + struct bnxt_re_dev *rdev = mw->rdev; 3228 2953 int rc; 3229 2954 3230 - fmr->qplib_fmr.va = iova; 3231 - fmr->qplib_fmr.total_size = list_len * PAGE_SIZE; 3232 - 3233 - rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &fmr->qplib_fmr, page_list, 3234 - list_len, true); 3235 - if (rc) 3236 - dev_err(rdev_to_dev(rdev), "Failed to map FMR for lkey = 0x%x!", 3237 - fmr->ib_fmr.lkey); 3238 - return rc; 3239 - } 3240 - 3241 - int bnxt_re_unmap_fmr(struct list_head *fmr_list) 3242 - { 3243 - struct bnxt_re_dev *rdev; 3244 - struct bnxt_re_fmr *fmr; 3245 - struct ib_fmr *ib_fmr; 3246 - int rc = 0; 3247 - 3248 - /* Validate each FMRs inside the fmr_list */ 3249 - list_for_each_entry(ib_fmr, fmr_list, list) { 3250 - fmr = container_of(ib_fmr, struct bnxt_re_fmr, ib_fmr); 3251 - rdev = fmr->rdev; 3252 - 3253 - if (rdev) { 3254 - rc = bnxt_qplib_dereg_mrw(&rdev->qplib_res, 3255 - &fmr->qplib_fmr, true); 3256 - if (rc) 3257 - break; 3258 - } 2955 + rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw); 2956 + if (rc) { 2957 + dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc); 2958 + return rc; 3259 2959 } 3260 - return rc; 3261 - } 3262 2960 3263 - int bnxt_re_dealloc_fmr(struct ib_fmr *ib_fmr) 3264 - { 3265 - struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr, 3266 - ib_fmr); 3267 - struct bnxt_re_dev *rdev = fmr->rdev; 3268 - int rc; 3269 - 3270 - rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &fmr->qplib_fmr); 3271 - if (rc) 3272 - dev_err(rdev_to_dev(rdev), "Failed to free FMR"); 3273 - 3274 - kfree(fmr); 3275 - atomic_dec(&rdev->mr_count); 2961 + kfree(mw); 2962 + atomic_dec(&rdev->mw_count); 3276 2963 return rc; 3277 2964 } 3278 2965
+16 -6
drivers/infiniband/hw/bnxt_re/ib_verbs.h
··· 44 44 u32 refcnt; 45 45 }; 46 46 47 + #define BNXT_RE_FENCE_BYTES 64 48 + struct bnxt_re_fence_data { 49 + u32 size; 50 + u8 va[BNXT_RE_FENCE_BYTES]; 51 + dma_addr_t dma_addr; 52 + struct bnxt_re_mr *mr; 53 + struct ib_mw *mw; 54 + struct bnxt_qplib_swqe bind_wqe; 55 + u32 bind_rkey; 56 + }; 57 + 47 58 struct bnxt_re_pd { 48 59 struct bnxt_re_dev *rdev; 49 60 struct ib_pd ib_pd; 50 61 struct bnxt_qplib_pd qplib_pd; 51 62 struct bnxt_qplib_dpi dpi; 63 + struct bnxt_re_fence_data fence; 52 64 }; 53 65 54 66 struct bnxt_re_ah { ··· 74 62 struct bnxt_re_dev *rdev; 75 63 struct ib_qp ib_qp; 76 64 spinlock_t sq_lock; /* protect sq */ 65 + spinlock_t rq_lock; /* protect rq */ 77 66 struct bnxt_qplib_qp qplib_qp; 78 67 struct ib_umem *sumem; 79 68 struct ib_umem *rumem; ··· 194 181 struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type, 195 182 u32 max_num_sg); 196 183 int bnxt_re_dereg_mr(struct ib_mr *mr); 197 - struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *pd, int mr_access_flags, 198 - struct ib_fmr_attr *fmr_attr); 199 - int bnxt_re_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len, 200 - u64 iova); 201 - int bnxt_re_unmap_fmr(struct list_head *fmr_list); 202 - int bnxt_re_dealloc_fmr(struct ib_fmr *fmr); 184 + struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, 185 + struct ib_udata *udata); 186 + int bnxt_re_dealloc_mw(struct ib_mw *mw); 203 187 struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, 204 188 u64 virt_addr, int mr_access_flags, 205 189 struct ib_udata *udata);
-4
drivers/infiniband/hw/bnxt_re/main.c
··· 507 507 ibdev->dereg_mr = bnxt_re_dereg_mr; 508 508 ibdev->alloc_mr = bnxt_re_alloc_mr; 509 509 ibdev->map_mr_sg = bnxt_re_map_mr_sg; 510 - ibdev->alloc_fmr = bnxt_re_alloc_fmr; 511 - ibdev->map_phys_fmr = bnxt_re_map_phys_fmr; 512 - ibdev->unmap_fmr = bnxt_re_unmap_fmr; 513 - ibdev->dealloc_fmr = bnxt_re_dealloc_fmr; 514 510 515 511 ibdev->reg_user_mr = bnxt_re_reg_user_mr; 516 512 ibdev->alloc_ucontext = bnxt_re_alloc_ucontext;
+199 -185
drivers/infiniband/hw/bnxt_re/qplib_fp.c
··· 284 284 { 285 285 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 286 286 struct cmdq_create_qp1 req; 287 - struct creq_create_qp1_resp *resp; 287 + struct creq_create_qp1_resp resp; 288 288 struct bnxt_qplib_pbl *pbl; 289 289 struct bnxt_qplib_q *sq = &qp->sq; 290 290 struct bnxt_qplib_q *rq = &qp->rq; ··· 394 394 395 395 req.pd_id = cpu_to_le32(qp->pd->id); 396 396 397 - resp = (struct creq_create_qp1_resp *) 398 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 399 - NULL, 0); 400 - if (!resp) { 401 - dev_err(&res->pdev->dev, "QPLIB: FP: CREATE_QP1 send failed"); 402 - rc = -EINVAL; 397 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 398 + (void *)&resp, NULL, 0); 399 + if (rc) 403 400 goto fail; 404 - } 405 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 406 - /* Cmd timed out */ 407 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 timed out"); 408 - rc = -ETIMEDOUT; 409 - goto fail; 410 - } 411 - if (resp->status || 412 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 413 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 failed "); 414 - dev_err(&rcfw->pdev->dev, 415 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 416 - resp->status, le16_to_cpu(req.cookie), 417 - le16_to_cpu(resp->cookie)); 418 - rc = -EINVAL; 419 - goto fail; 420 - } 421 - qp->id = le32_to_cpu(resp->xid); 401 + 402 + qp->id = le32_to_cpu(resp.xid); 422 403 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; 423 404 sq->flush_in_progress = false; 424 405 rq->flush_in_progress = false; ··· 423 442 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 424 443 struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr; 425 444 struct cmdq_create_qp req; 426 - struct creq_create_qp_resp *resp; 445 + struct creq_create_qp_resp resp; 427 446 struct bnxt_qplib_pbl *pbl; 428 447 struct sq_psn_search **psn_search_ptr; 429 448 unsigned long int psn_search, poff = 0; ··· 608 627 } 609 628 req.pd_id = cpu_to_le32(qp->pd->id); 610 629 611 - resp = (struct creq_create_qp_resp *) 612 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 613 - NULL, 0); 614 - if (!resp) { 615 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP send failed"); 616 - rc = -EINVAL; 630 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 631 + (void *)&resp, NULL, 0); 632 + if (rc) 617 633 goto fail; 618 - } 619 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 620 - /* Cmd timed out */ 621 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP timed out"); 622 - rc = -ETIMEDOUT; 623 - goto fail; 624 - } 625 - if (resp->status || 626 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 627 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP failed "); 628 - dev_err(&rcfw->pdev->dev, 629 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 630 - resp->status, le16_to_cpu(req.cookie), 631 - le16_to_cpu(resp->cookie)); 632 - rc = -EINVAL; 633 - goto fail; 634 - } 635 - qp->id = le32_to_cpu(resp->xid); 634 + 635 + qp->id = le32_to_cpu(resp.xid); 636 636 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; 637 637 sq->flush_in_progress = false; 638 638 rq->flush_in_progress = false; ··· 731 769 { 732 770 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 733 771 struct cmdq_modify_qp req; 734 - struct creq_modify_qp_resp *resp; 772 + struct creq_modify_qp_resp resp; 735 773 u16 cmd_flags = 0, pkey; 736 774 u32 temp32[4]; 737 775 u32 bmask; 776 + int rc; 738 777 739 778 RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags); 740 779 ··· 825 862 826 863 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id); 827 864 828 - resp = (struct creq_modify_qp_resp *) 829 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 830 - NULL, 0); 831 - if (!resp) { 832 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP send failed"); 833 - return -EINVAL; 834 - } 835 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 836 - /* Cmd timed out */ 837 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP timed out"); 838 - return -ETIMEDOUT; 839 - } 840 - if (resp->status || 841 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 842 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP failed "); 843 - dev_err(&rcfw->pdev->dev, 844 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 845 - resp->status, le16_to_cpu(req.cookie), 846 - le16_to_cpu(resp->cookie)); 847 - return -EINVAL; 848 - } 865 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 866 + (void *)&resp, NULL, 0); 867 + if (rc) 868 + return rc; 849 869 qp->cur_qp_state = qp->state; 850 870 return 0; 851 871 } ··· 837 891 { 838 892 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 839 893 struct cmdq_query_qp req; 840 - struct creq_query_qp_resp *resp; 894 + struct creq_query_qp_resp resp; 895 + struct bnxt_qplib_rcfw_sbuf *sbuf; 841 896 struct creq_query_qp_resp_sb *sb; 842 897 u16 cmd_flags = 0; 843 898 u32 temp32[4]; 844 - int i; 899 + int i, rc = 0; 845 900 846 901 RCFW_CMD_PREP(req, QUERY_QP, cmd_flags); 847 902 903 + sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); 904 + if (!sbuf) 905 + return -ENOMEM; 906 + sb = sbuf->sb; 907 + 848 908 req.qp_cid = cpu_to_le32(qp->id); 849 909 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; 850 - resp = (struct creq_query_qp_resp *) 851 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 852 - (void **)&sb, 0); 853 - if (!resp) { 854 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP send failed"); 855 - return -EINVAL; 856 - } 857 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 858 - /* Cmd timed out */ 859 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP timed out"); 860 - return -ETIMEDOUT; 861 - } 862 - if (resp->status || 863 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 864 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP failed "); 865 - dev_err(&rcfw->pdev->dev, 866 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 867 - resp->status, le16_to_cpu(req.cookie), 868 - le16_to_cpu(resp->cookie)); 869 - return -EINVAL; 870 - } 910 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, 911 + (void *)sbuf, 0); 912 + if (rc) 913 + goto bail; 871 914 /* Extract the context from the side buffer */ 872 915 qp->state = sb->en_sqd_async_notify_state & 873 916 CREQ_QUERY_QP_RESP_SB_STATE_MASK; ··· 911 976 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id); 912 977 memcpy(qp->smac, sb->src_mac, 6); 913 978 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id); 914 - return 0; 979 + bail: 980 + bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); 981 + return rc; 915 982 } 916 983 917 984 static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp) ··· 958 1021 { 959 1022 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 960 1023 struct cmdq_destroy_qp req; 961 - struct creq_destroy_qp_resp *resp; 1024 + struct creq_destroy_qp_resp resp; 962 1025 unsigned long flags; 963 1026 u16 cmd_flags = 0; 1027 + int rc; 964 1028 965 1029 RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags); 966 1030 967 1031 req.qp_cid = cpu_to_le32(qp->id); 968 - resp = (struct creq_destroy_qp_resp *) 969 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 970 - NULL, 0); 971 - if (!resp) { 972 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP send failed"); 973 - return -EINVAL; 974 - } 975 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 976 - /* Cmd timed out */ 977 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP timed out"); 978 - return -ETIMEDOUT; 979 - } 980 - if (resp->status || 981 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 982 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP failed "); 983 - dev_err(&rcfw->pdev->dev, 984 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 985 - resp->status, le16_to_cpu(req.cookie), 986 - le16_to_cpu(resp->cookie)); 987 - return -EINVAL; 988 - } 1032 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 1033 + (void *)&resp, NULL, 0); 1034 + if (rc) 1035 + return rc; 989 1036 990 1037 /* Must walk the associated CQs to nullified the QP ptr */ 991 1038 spin_lock_irqsave(&qp->scq->hwq.lock, flags); ··· 1083 1162 rc = -EINVAL; 1084 1163 goto done; 1085 1164 } 1086 - if (HWQ_CMP((sq->hwq.prod + 1), &sq->hwq) == 1087 - HWQ_CMP(sq->hwq.cons, &sq->hwq)) { 1165 + 1166 + if (bnxt_qplib_queue_full(sq)) { 1167 + dev_err(&sq->hwq.pdev->dev, 1168 + "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x", 1169 + sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements, 1170 + sq->q_full_delta); 1088 1171 rc = -ENOMEM; 1089 1172 goto done; 1090 1173 } ··· 1298 1373 } 1299 1374 1300 1375 sq->hwq.prod++; 1376 + 1377 + qp->wqe_cnt++; 1378 + 1301 1379 done: 1302 1380 return rc; 1303 1381 } ··· 1339 1411 rc = -EINVAL; 1340 1412 goto done; 1341 1413 } 1342 - if (HWQ_CMP((rq->hwq.prod + 1), &rq->hwq) == 1343 - HWQ_CMP(rq->hwq.cons, &rq->hwq)) { 1414 + if (bnxt_qplib_queue_full(rq)) { 1344 1415 dev_err(&rq->hwq.pdev->dev, 1345 1416 "QPLIB: FP: QP (0x%x) RQ is full!", qp->id); 1346 1417 rc = -EINVAL; ··· 1410 1483 { 1411 1484 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 1412 1485 struct cmdq_create_cq req; 1413 - struct creq_create_cq_resp *resp; 1486 + struct creq_create_cq_resp resp; 1414 1487 struct bnxt_qplib_pbl *pbl; 1415 1488 u16 cmd_flags = 0; 1416 1489 int rc; ··· 1452 1525 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) << 1453 1526 CMDQ_CREATE_CQ_CNQ_ID_SFT); 1454 1527 1455 - resp = (struct creq_create_cq_resp *) 1456 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 1457 - NULL, 0); 1458 - if (!resp) { 1459 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ send failed"); 1460 - return -EINVAL; 1461 - } 1462 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 1463 - /* Cmd timed out */ 1464 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ timed out"); 1465 - rc = -ETIMEDOUT; 1528 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 1529 + (void *)&resp, NULL, 0); 1530 + if (rc) 1466 1531 goto fail; 1467 - } 1468 - if (resp->status || 1469 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 1470 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ failed "); 1471 - dev_err(&rcfw->pdev->dev, 1472 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 1473 - resp->status, le16_to_cpu(req.cookie), 1474 - le16_to_cpu(resp->cookie)); 1475 - rc = -EINVAL; 1476 - goto fail; 1477 - } 1478 - cq->id = le32_to_cpu(resp->xid); 1532 + 1533 + cq->id = le32_to_cpu(resp.xid); 1479 1534 cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem; 1480 1535 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD; 1481 1536 init_waitqueue_head(&cq->waitq); ··· 1475 1566 { 1476 1567 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 1477 1568 struct cmdq_destroy_cq req; 1478 - struct creq_destroy_cq_resp *resp; 1569 + struct creq_destroy_cq_resp resp; 1479 1570 u16 cmd_flags = 0; 1571 + int rc; 1480 1572 1481 1573 RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags); 1482 1574 1483 1575 req.cq_cid = cpu_to_le32(cq->id); 1484 - resp = (struct creq_destroy_cq_resp *) 1485 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 1486 - NULL, 0); 1487 - if (!resp) { 1488 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ send failed"); 1489 - return -EINVAL; 1490 - } 1491 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 1492 - /* Cmd timed out */ 1493 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ timed out"); 1494 - return -ETIMEDOUT; 1495 - } 1496 - if (resp->status || 1497 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 1498 - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ failed "); 1499 - dev_err(&rcfw->pdev->dev, 1500 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 1501 - resp->status, le16_to_cpu(req.cookie), 1502 - le16_to_cpu(resp->cookie)); 1503 - return -EINVAL; 1504 - } 1576 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 1577 + (void *)&resp, NULL, 0); 1578 + if (rc) 1579 + return rc; 1505 1580 bnxt_qplib_free_hwq(res->pdev, &cq->hwq); 1506 1581 return 0; 1507 1582 } ··· 1557 1664 return rc; 1558 1665 } 1559 1666 1667 + /* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) 1668 + * CQE is track from sw_cq_cons to max_element but valid only if VALID=1 1669 + */ 1670 + static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq, 1671 + u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons) 1672 + { 1673 + struct bnxt_qplib_q *sq = &qp->sq; 1674 + struct bnxt_qplib_swq *swq; 1675 + u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx; 1676 + struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr; 1677 + struct cq_req *peek_req_hwcqe; 1678 + struct bnxt_qplib_qp *peek_qp; 1679 + struct bnxt_qplib_q *peek_sq; 1680 + int i, rc = 0; 1681 + 1682 + /* Normal mode */ 1683 + /* Check for the psn_search marking before completing */ 1684 + swq = &sq->swq[sw_sq_cons]; 1685 + if (swq->psn_search && 1686 + le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) { 1687 + /* Unmark */ 1688 + swq->psn_search->flags_next_psn = cpu_to_le32 1689 + (le32_to_cpu(swq->psn_search->flags_next_psn) 1690 + & ~0x80000000); 1691 + dev_dbg(&cq->hwq.pdev->dev, 1692 + "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n", 1693 + cq_cons, qp->id, sw_sq_cons, cqe_sq_cons); 1694 + sq->condition = true; 1695 + sq->send_phantom = true; 1696 + 1697 + /* TODO: Only ARM if the previous SQE is ARMALL */ 1698 + bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL); 1699 + 1700 + rc = -EAGAIN; 1701 + goto out; 1702 + } 1703 + if (sq->condition) { 1704 + /* Peek at the completions */ 1705 + peek_raw_cq_cons = cq->hwq.cons; 1706 + peek_sw_cq_cons = cq_cons; 1707 + i = cq->hwq.max_elements; 1708 + while (i--) { 1709 + peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq); 1710 + peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; 1711 + peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)] 1712 + [CQE_IDX(peek_sw_cq_cons)]; 1713 + /* If the next hwcqe is VALID */ 1714 + if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons, 1715 + cq->hwq.max_elements)) { 1716 + /* If the next hwcqe is a REQ */ 1717 + if ((peek_hwcqe->cqe_type_toggle & 1718 + CQ_BASE_CQE_TYPE_MASK) == 1719 + CQ_BASE_CQE_TYPE_REQ) { 1720 + peek_req_hwcqe = (struct cq_req *) 1721 + peek_hwcqe; 1722 + peek_qp = (struct bnxt_qplib_qp *) 1723 + ((unsigned long) 1724 + le64_to_cpu 1725 + (peek_req_hwcqe->qp_handle)); 1726 + peek_sq = &peek_qp->sq; 1727 + peek_sq_cons_idx = HWQ_CMP(le16_to_cpu( 1728 + peek_req_hwcqe->sq_cons_idx) - 1 1729 + , &sq->hwq); 1730 + /* If the hwcqe's sq's wr_id matches */ 1731 + if (peek_sq == sq && 1732 + sq->swq[peek_sq_cons_idx].wr_id == 1733 + BNXT_QPLIB_FENCE_WRID) { 1734 + /* 1735 + * Unbreak only if the phantom 1736 + * comes back 1737 + */ 1738 + dev_dbg(&cq->hwq.pdev->dev, 1739 + "FP:Got Phantom CQE"); 1740 + sq->condition = false; 1741 + sq->single = true; 1742 + rc = 0; 1743 + goto out; 1744 + } 1745 + } 1746 + /* Valid but not the phantom, so keep looping */ 1747 + } else { 1748 + /* Not valid yet, just exit and wait */ 1749 + rc = -EINVAL; 1750 + goto out; 1751 + } 1752 + peek_sw_cq_cons++; 1753 + peek_raw_cq_cons++; 1754 + } 1755 + dev_err(&cq->hwq.pdev->dev, 1756 + "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x", 1757 + cq_cons, qp->id, sw_sq_cons, cqe_sq_cons); 1758 + rc = -EINVAL; 1759 + } 1760 + out: 1761 + return rc; 1762 + } 1763 + 1560 1764 static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, 1561 1765 struct cq_req *hwcqe, 1562 - struct bnxt_qplib_cqe **pcqe, int *budget) 1766 + struct bnxt_qplib_cqe **pcqe, int *budget, 1767 + u32 cq_cons, struct bnxt_qplib_qp **lib_qp) 1563 1768 { 1564 1769 struct bnxt_qplib_qp *qp; 1565 1770 struct bnxt_qplib_q *sq; 1566 1771 struct bnxt_qplib_cqe *cqe; 1567 - u32 sw_cons, cqe_cons; 1772 + u32 sw_sq_cons, cqe_sq_cons; 1773 + struct bnxt_qplib_swq *swq; 1568 1774 int rc = 0; 1569 1775 1570 1776 qp = (struct bnxt_qplib_qp *)((unsigned long) ··· 1675 1683 } 1676 1684 sq = &qp->sq; 1677 1685 1678 - cqe_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq); 1679 - if (cqe_cons > sq->hwq.max_elements) { 1686 + cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq); 1687 + if (cqe_sq_cons > sq->hwq.max_elements) { 1680 1688 dev_err(&cq->hwq.pdev->dev, 1681 1689 "QPLIB: FP: CQ Process req reported "); 1682 1690 dev_err(&cq->hwq.pdev->dev, 1683 1691 "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x", 1684 - cqe_cons, sq->hwq.max_elements); 1692 + cqe_sq_cons, sq->hwq.max_elements); 1685 1693 return -EINVAL; 1686 1694 } 1687 1695 /* If we were in the middle of flushing the SQ, continue */ ··· 1690 1698 1691 1699 /* Require to walk the sq's swq to fabricate CQEs for all previously 1692 1700 * signaled SWQEs due to CQE aggregation from the current sq cons 1693 - * to the cqe_cons 1701 + * to the cqe_sq_cons 1694 1702 */ 1695 1703 cqe = *pcqe; 1696 1704 while (*budget) { 1697 - sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); 1698 - if (sw_cons == cqe_cons) 1705 + sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); 1706 + if (sw_sq_cons == cqe_sq_cons) 1707 + /* Done */ 1699 1708 break; 1709 + 1710 + swq = &sq->swq[sw_sq_cons]; 1700 1711 memset(cqe, 0, sizeof(*cqe)); 1701 1712 cqe->opcode = CQ_BASE_CQE_TYPE_REQ; 1702 1713 cqe->qp_handle = (u64)(unsigned long)qp; 1703 1714 cqe->src_qp = qp->id; 1704 - cqe->wr_id = sq->swq[sw_cons].wr_id; 1705 - cqe->type = sq->swq[sw_cons].type; 1715 + cqe->wr_id = swq->wr_id; 1716 + if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID) 1717 + goto skip; 1718 + cqe->type = swq->type; 1706 1719 1707 1720 /* For the last CQE, check for status. For errors, regardless 1708 1721 * of the request being signaled or not, it must complete with 1709 1722 * the hwcqe error status 1710 1723 */ 1711 - if (HWQ_CMP((sw_cons + 1), &sq->hwq) == cqe_cons && 1724 + if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons && 1712 1725 hwcqe->status != CQ_REQ_STATUS_OK) { 1713 1726 cqe->status = hwcqe->status; 1714 1727 dev_err(&cq->hwq.pdev->dev, 1715 1728 "QPLIB: FP: CQ Processed Req "); 1716 1729 dev_err(&cq->hwq.pdev->dev, 1717 1730 "QPLIB: wr_id[%d] = 0x%llx with status 0x%x", 1718 - sw_cons, cqe->wr_id, cqe->status); 1731 + sw_sq_cons, cqe->wr_id, cqe->status); 1719 1732 cqe++; 1720 1733 (*budget)--; 1721 1734 sq->flush_in_progress = true; 1722 1735 /* Must block new posting of SQ and RQ */ 1723 1736 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; 1737 + sq->condition = false; 1738 + sq->single = false; 1724 1739 } else { 1725 - if (sq->swq[sw_cons].flags & 1726 - SQ_SEND_FLAGS_SIGNAL_COMP) { 1740 + if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { 1741 + /* Before we complete, do WA 9060 */ 1742 + if (do_wa9060(qp, cq, cq_cons, sw_sq_cons, 1743 + cqe_sq_cons)) { 1744 + *lib_qp = qp; 1745 + goto out; 1746 + } 1727 1747 cqe->status = CQ_REQ_STATUS_OK; 1728 1748 cqe++; 1729 1749 (*budget)--; 1730 1750 } 1731 1751 } 1752 + skip: 1732 1753 sq->hwq.cons++; 1754 + if (sq->single) 1755 + break; 1733 1756 } 1757 + out: 1734 1758 *pcqe = cqe; 1735 - if (!*budget && HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_cons) { 1759 + if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) { 1736 1760 /* Out of budget */ 1737 1761 rc = -EAGAIN; 1738 1762 goto done; 1739 1763 } 1764 + /* 1765 + * Back to normal completion mode only after it has completed all of 1766 + * the WC for this CQE 1767 + */ 1768 + sq->single = false; 1740 1769 if (!sq->flush_in_progress) 1741 1770 goto done; 1742 1771 flush: ··· 2087 2074 } 2088 2075 2089 2076 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, 2090 - int num_cqes) 2077 + int num_cqes, struct bnxt_qplib_qp **lib_qp) 2091 2078 { 2092 2079 struct cq_base *hw_cqe, **hw_cqe_ptr; 2093 2080 unsigned long flags; ··· 2112 2099 case CQ_BASE_CQE_TYPE_REQ: 2113 2100 rc = bnxt_qplib_cq_process_req(cq, 2114 2101 (struct cq_req *)hw_cqe, 2115 - &cqe, &budget); 2102 + &cqe, &budget, 2103 + sw_cons, lib_qp); 2116 2104 break; 2117 2105 case CQ_BASE_CQE_TYPE_RES_RC: 2118 2106 rc = bnxt_qplib_cq_process_res_rc(cq,
+17 -1
drivers/infiniband/hw/bnxt_re/qplib_fp.h
··· 88 88 89 89 struct bnxt_qplib_swqe { 90 90 /* General */ 91 + #define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */ 91 92 u64 wr_id; 92 93 u8 reqs_type; 93 94 u8 type; ··· 217 216 struct scatterlist *sglist; 218 217 u32 nmap; 219 218 u32 max_wqe; 219 + u16 q_full_delta; 220 220 u16 max_sge; 221 221 u32 psn; 222 222 bool flush_in_progress; 223 + bool condition; 224 + bool single; 225 + bool send_phantom; 226 + u32 phantom_wqe_cnt; 227 + u32 phantom_cqe_cnt; 228 + u32 next_cq_cons; 223 229 }; 224 230 225 231 struct bnxt_qplib_qp { ··· 250 242 u8 timeout; 251 243 u8 retry_cnt; 252 244 u8 rnr_retry; 245 + u64 wqe_cnt; 253 246 u32 min_rnr_timer; 254 247 u32 max_rd_atomic; 255 248 u32 max_dest_rd_atomic; ··· 309 300 #define CQE_CMP_VALID(hdr, raw_cons, cp_bit) \ 310 301 (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \ 311 302 !((raw_cons) & (cp_bit))) 303 + 304 + static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *qplib_q) 305 + { 306 + return HWQ_CMP((qplib_q->hwq.prod + qplib_q->q_full_delta), 307 + &qplib_q->hwq) == HWQ_CMP(qplib_q->hwq.cons, 308 + &qplib_q->hwq); 309 + } 312 310 313 311 struct bnxt_qplib_cqe { 314 312 u8 status; ··· 448 432 int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); 449 433 int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); 450 434 int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, 451 - int num); 435 + int num, struct bnxt_qplib_qp **qp); 452 436 void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); 453 437 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); 454 438 int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
+163 -149
drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
··· 39 39 #include <linux/spinlock.h> 40 40 #include <linux/pci.h> 41 41 #include <linux/prefetch.h> 42 + #include <linux/delay.h> 43 + 42 44 #include "roce_hsi.h" 43 45 #include "qplib_res.h" 44 46 #include "qplib_rcfw.h" 45 47 static void bnxt_qplib_service_creq(unsigned long data); 46 48 47 49 /* Hardware communication channel */ 48 - int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) 50 + static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) 49 51 { 50 52 u16 cbit; 51 53 int rc; 52 54 53 - cookie &= RCFW_MAX_COOKIE_VALUE; 54 55 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 55 - if (!test_bit(cbit, rcfw->cmdq_bitmap)) 56 - dev_warn(&rcfw->pdev->dev, 57 - "QPLIB: CMD bit %d for cookie 0x%x is not set?", 58 - cbit, cookie); 59 - 60 56 rc = wait_event_timeout(rcfw->waitq, 61 57 !test_bit(cbit, rcfw->cmdq_bitmap), 62 58 msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS)); 63 - if (!rc) { 64 - dev_warn(&rcfw->pdev->dev, 65 - "QPLIB: Bono Error: timeout %d msec, msg {0x%x}\n", 66 - RCFW_CMD_WAIT_TIME_MS, cookie); 67 - } 68 - 69 - return rc; 59 + return rc ? 0 : -ETIMEDOUT; 70 60 }; 71 61 72 - int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) 62 + static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) 73 63 { 74 - u32 count = -1; 64 + u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT; 75 65 u16 cbit; 76 66 77 - cookie &= RCFW_MAX_COOKIE_VALUE; 78 67 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 79 68 if (!test_bit(cbit, rcfw->cmdq_bitmap)) 80 69 goto done; 81 70 do { 71 + mdelay(1); /* 1m sec */ 82 72 bnxt_qplib_service_creq((unsigned long)rcfw); 83 73 } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count); 84 74 done: 85 - return count; 75 + return count ? 0 : -ETIMEDOUT; 86 76 }; 87 77 88 - void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, 89 - struct cmdq_base *req, void **crsbe, 90 - u8 is_block) 78 + static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, 79 + struct creq_base *resp, void *sb, u8 is_block) 91 80 { 92 - struct bnxt_qplib_crsq *crsq = &rcfw->crsq; 93 81 struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr; 94 82 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; 95 - struct bnxt_qplib_hwq *crsb = &rcfw->crsb; 96 - struct bnxt_qplib_crsqe *crsqe = NULL; 97 - struct bnxt_qplib_crsbe **crsb_ptr; 83 + struct bnxt_qplib_crsq *crsqe; 98 84 u32 sw_prod, cmdq_prod; 99 - u8 retry_cnt = 0xFF; 100 - dma_addr_t dma_addr; 101 85 unsigned long flags; 102 86 u32 size, opcode; 103 87 u16 cookie, cbit; 104 88 int pg, idx; 105 89 u8 *preq; 106 90 107 - retry: 108 91 opcode = req->opcode; 109 92 if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && 110 93 (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC && ··· 95 112 dev_err(&rcfw->pdev->dev, 96 113 "QPLIB: RCFW not initialized, reject opcode 0x%x", 97 114 opcode); 98 - return NULL; 115 + return -EINVAL; 99 116 } 100 117 101 118 if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && 102 119 opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) { 103 120 dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!"); 104 - return NULL; 121 + return -EINVAL; 105 122 } 106 123 107 124 /* Cmdq are in 16-byte units, each request can consume 1 or more 108 125 * cmdqe 109 126 */ 110 127 spin_lock_irqsave(&cmdq->lock, flags); 111 - if (req->cmd_size > cmdq->max_elements - 112 - ((HWQ_CMP(cmdq->prod, cmdq) - HWQ_CMP(cmdq->cons, cmdq)) & 113 - (cmdq->max_elements - 1))) { 128 + if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) { 114 129 dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!"); 115 130 spin_unlock_irqrestore(&cmdq->lock, flags); 116 - 117 - if (!retry_cnt--) 118 - return NULL; 119 - goto retry; 131 + return -EAGAIN; 120 132 } 121 133 122 - retry_cnt = 0xFF; 123 134 124 - cookie = atomic_inc_return(&rcfw->seq_num) & RCFW_MAX_COOKIE_VALUE; 135 + cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE; 125 136 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 126 137 if (is_block) 127 138 cookie |= RCFW_CMD_IS_BLOCKING; 139 + 140 + set_bit(cbit, rcfw->cmdq_bitmap); 128 141 req->cookie = cpu_to_le16(cookie); 129 - if (test_and_set_bit(cbit, rcfw->cmdq_bitmap)) { 130 - dev_err(&rcfw->pdev->dev, 131 - "QPLIB: RCFW MAX outstanding cmd reached!"); 132 - atomic_dec(&rcfw->seq_num); 142 + crsqe = &rcfw->crsqe_tbl[cbit]; 143 + if (crsqe->resp) { 133 144 spin_unlock_irqrestore(&cmdq->lock, flags); 134 - 135 - if (!retry_cnt--) 136 - return NULL; 137 - goto retry; 145 + return -EBUSY; 138 146 } 139 - /* Reserve a resp buffer slot if requested */ 140 - if (req->resp_size && crsbe) { 141 - spin_lock(&crsb->lock); 142 - sw_prod = HWQ_CMP(crsb->prod, crsb); 143 - crsb_ptr = (struct bnxt_qplib_crsbe **)crsb->pbl_ptr; 144 - *crsbe = (void *)&crsb_ptr[get_crsb_pg(sw_prod)] 145 - [get_crsb_idx(sw_prod)]; 146 - bnxt_qplib_crsb_dma_next(crsb->pbl_dma_ptr, sw_prod, &dma_addr); 147 - req->resp_addr = cpu_to_le64(dma_addr); 148 - crsb->prod++; 149 - spin_unlock(&crsb->lock); 147 + memset(resp, 0, sizeof(*resp)); 148 + crsqe->resp = (struct creq_qp_event *)resp; 149 + crsqe->resp->cookie = req->cookie; 150 + crsqe->req_size = req->cmd_size; 151 + if (req->resp_size && sb) { 152 + struct bnxt_qplib_rcfw_sbuf *sbuf = sb; 150 153 151 - req->resp_size = (sizeof(struct bnxt_qplib_crsbe) + 152 - BNXT_QPLIB_CMDQE_UNITS - 1) / 153 - BNXT_QPLIB_CMDQE_UNITS; 154 + req->resp_addr = cpu_to_le64(sbuf->dma_addr); 155 + req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) / 156 + BNXT_QPLIB_CMDQE_UNITS; 154 157 } 158 + 155 159 cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; 156 160 preq = (u8 *)req; 157 161 size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS; ··· 160 190 preq += min_t(u32, size, sizeof(*cmdqe)); 161 191 size -= min_t(u32, size, sizeof(*cmdqe)); 162 192 cmdq->prod++; 193 + rcfw->seq_num++; 163 194 } while (size > 0); 195 + 196 + rcfw->seq_num++; 164 197 165 198 cmdq_prod = cmdq->prod; 166 199 if (rcfw->flags & FIRMWARE_FIRST_FLAG) { 167 - /* The very first doorbell write is required to set this flag 168 - * which prompts the FW to reset its internal pointers 200 + /* The very first doorbell write 201 + * is required to set this flag 202 + * which prompts the FW to reset 203 + * its internal pointers 169 204 */ 170 205 cmdq_prod |= FIRMWARE_FIRST_FLAG; 171 206 rcfw->flags &= ~FIRMWARE_FIRST_FLAG; 172 207 } 173 - sw_prod = HWQ_CMP(crsq->prod, crsq); 174 - crsqe = &crsq->crsq[sw_prod]; 175 - memset(crsqe, 0, sizeof(*crsqe)); 176 - crsq->prod++; 177 - crsqe->req_size = req->cmd_size; 178 208 179 209 /* ring CMDQ DB */ 210 + wmb(); 180 211 writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem + 181 212 rcfw->cmdq_bar_reg_prod_off); 182 213 writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem + ··· 185 214 done: 186 215 spin_unlock_irqrestore(&cmdq->lock, flags); 187 216 /* Return the CREQ response pointer */ 188 - return crsqe ? &crsqe->qp_event : NULL; 217 + return 0; 189 218 } 190 219 220 + int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, 221 + struct cmdq_base *req, 222 + struct creq_base *resp, 223 + void *sb, u8 is_block) 224 + { 225 + struct creq_qp_event *evnt = (struct creq_qp_event *)resp; 226 + u16 cookie; 227 + u8 opcode, retry_cnt = 0xFF; 228 + int rc = 0; 229 + 230 + do { 231 + opcode = req->opcode; 232 + rc = __send_message(rcfw, req, resp, sb, is_block); 233 + cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE; 234 + if (!rc) 235 + break; 236 + 237 + if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) { 238 + /* send failed */ 239 + dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed", 240 + cookie, opcode); 241 + return rc; 242 + } 243 + is_block ? mdelay(1) : usleep_range(500, 1000); 244 + 245 + } while (retry_cnt--); 246 + 247 + if (is_block) 248 + rc = __block_for_resp(rcfw, cookie); 249 + else 250 + rc = __wait_for_resp(rcfw, cookie); 251 + if (rc) { 252 + /* timed out */ 253 + dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec", 254 + cookie, opcode, RCFW_CMD_WAIT_TIME_MS); 255 + return rc; 256 + } 257 + 258 + if (evnt->status) { 259 + /* failed with status */ 260 + dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x", 261 + cookie, opcode, evnt->status); 262 + rc = -EFAULT; 263 + } 264 + 265 + return rc; 266 + } 191 267 /* Completions */ 192 268 static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, 193 269 struct creq_func_event *func_event) ··· 278 260 static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, 279 261 struct creq_qp_event *qp_event) 280 262 { 281 - struct bnxt_qplib_crsq *crsq = &rcfw->crsq; 282 263 struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; 283 - struct bnxt_qplib_crsqe *crsqe; 284 - u16 cbit, cookie, blocked = 0; 264 + struct bnxt_qplib_crsq *crsqe; 285 265 unsigned long flags; 286 - u32 sw_cons; 266 + u16 cbit, blocked = 0; 267 + u16 cookie; 268 + __le16 mcookie; 287 269 288 270 switch (qp_event->event) { 289 271 case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: ··· 293 275 default: 294 276 /* Command Response */ 295 277 spin_lock_irqsave(&cmdq->lock, flags); 296 - sw_cons = HWQ_CMP(crsq->cons, crsq); 297 - crsqe = &crsq->crsq[sw_cons]; 298 - crsq->cons++; 299 - memcpy(&crsqe->qp_event, qp_event, sizeof(crsqe->qp_event)); 300 - 301 - cookie = le16_to_cpu(crsqe->qp_event.cookie); 278 + cookie = le16_to_cpu(qp_event->cookie); 279 + mcookie = qp_event->cookie; 302 280 blocked = cookie & RCFW_CMD_IS_BLOCKING; 303 281 cookie &= RCFW_MAX_COOKIE_VALUE; 304 282 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; 283 + crsqe = &rcfw->crsqe_tbl[cbit]; 284 + if (crsqe->resp && 285 + crsqe->resp->cookie == mcookie) { 286 + memcpy(crsqe->resp, qp_event, sizeof(*qp_event)); 287 + crsqe->resp = NULL; 288 + } else { 289 + dev_err(&rcfw->pdev->dev, 290 + "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x", 291 + crsqe->resp ? "mismatch" : "collision", 292 + crsqe->resp ? crsqe->resp->cookie : 0, mcookie); 293 + } 305 294 if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap)) 306 295 dev_warn(&rcfw->pdev->dev, 307 296 "QPLIB: CMD bit %d was not requested", cbit); 308 - 309 297 cmdq->cons += crsqe->req_size; 310 - spin_unlock_irqrestore(&cmdq->lock, flags); 298 + crsqe->req_size = 0; 299 + 311 300 if (!blocked) 312 301 wake_up(&rcfw->waitq); 313 - break; 302 + spin_unlock_irqrestore(&cmdq->lock, flags); 314 303 } 315 304 return 0; 316 305 } ··· 330 305 struct creq_base *creqe, **creq_ptr; 331 306 u32 sw_cons, raw_cons; 332 307 unsigned long flags; 333 - u32 type; 308 + u32 type, budget = CREQ_ENTRY_POLL_BUDGET; 334 309 335 - /* Service the CREQ until empty */ 310 + /* Service the CREQ until budget is over */ 336 311 spin_lock_irqsave(&creq->lock, flags); 337 312 raw_cons = creq->cons; 338 - while (1) { 313 + while (budget > 0) { 339 314 sw_cons = HWQ_CMP(raw_cons, creq); 340 315 creq_ptr = (struct creq_base **)creq->pbl_ptr; 341 316 creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]; ··· 345 320 type = creqe->type & CREQ_BASE_TYPE_MASK; 346 321 switch (type) { 347 322 case CREQ_BASE_TYPE_QP_EVENT: 348 - if (!bnxt_qplib_process_qp_event 349 - (rcfw, (struct creq_qp_event *)creqe)) 350 - rcfw->creq_qp_event_processed++; 351 - else { 352 - dev_warn(&rcfw->pdev->dev, "QPLIB: crsqe with"); 353 - dev_warn(&rcfw->pdev->dev, 354 - "QPLIB: type = 0x%x not handled", 355 - type); 356 - } 323 + bnxt_qplib_process_qp_event 324 + (rcfw, (struct creq_qp_event *)creqe); 325 + rcfw->creq_qp_event_processed++; 357 326 break; 358 327 case CREQ_BASE_TYPE_FUNC_EVENT: 359 328 if (!bnxt_qplib_process_func_event ··· 365 346 break; 366 347 } 367 348 raw_cons++; 349 + budget--; 368 350 } 351 + 369 352 if (creq->cons != raw_cons) { 370 353 creq->cons = raw_cons; 371 354 CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons, ··· 396 375 /* RCFW */ 397 376 int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw) 398 377 { 399 - struct creq_deinitialize_fw_resp *resp; 400 378 struct cmdq_deinitialize_fw req; 379 + struct creq_deinitialize_fw_resp resp; 401 380 u16 cmd_flags = 0; 381 + int rc; 402 382 403 383 RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags); 404 - resp = (struct creq_deinitialize_fw_resp *) 405 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 406 - NULL, 0); 407 - if (!resp) 408 - return -EINVAL; 409 - 410 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) 411 - return -ETIMEDOUT; 412 - 413 - if (resp->status || 414 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) 415 - return -EFAULT; 384 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, 385 + NULL, 0); 386 + if (rc) 387 + return rc; 416 388 417 389 clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); 418 390 return 0; ··· 431 417 int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, 432 418 struct bnxt_qplib_ctx *ctx, int is_virtfn) 433 419 { 434 - struct creq_initialize_fw_resp *resp; 435 420 struct cmdq_initialize_fw req; 421 + struct creq_initialize_fw_resp resp; 436 422 u16 cmd_flags = 0, level; 423 + int rc; 437 424 438 425 RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); 439 426 ··· 497 482 498 483 skip_ctx_setup: 499 484 req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id); 500 - resp = (struct creq_initialize_fw_resp *) 501 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 502 - NULL, 0); 503 - if (!resp) { 504 - dev_err(&rcfw->pdev->dev, 505 - "QPLIB: RCFW: INITIALIZE_FW send failed"); 506 - return -EINVAL; 507 - } 508 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 509 - /* Cmd timed out */ 510 - dev_err(&rcfw->pdev->dev, 511 - "QPLIB: RCFW: INITIALIZE_FW timed out"); 512 - return -ETIMEDOUT; 513 - } 514 - if (resp->status || 515 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 516 - dev_err(&rcfw->pdev->dev, 517 - "QPLIB: RCFW: INITIALIZE_FW failed"); 518 - return -EINVAL; 519 - } 485 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, 486 + NULL, 0); 487 + if (rc) 488 + return rc; 520 489 set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); 521 490 return 0; 522 491 } 523 492 524 493 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) 525 494 { 526 - bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->crsb); 527 - kfree(rcfw->crsq.crsq); 495 + kfree(rcfw->crsqe_tbl); 528 496 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq); 529 497 bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq); 530 - 531 498 rcfw->pdev = NULL; 532 499 } 533 500 ··· 536 539 goto fail; 537 540 } 538 541 539 - rcfw->crsq.max_elements = rcfw->cmdq.max_elements; 540 - rcfw->crsq.crsq = kcalloc(rcfw->crsq.max_elements, 541 - sizeof(*rcfw->crsq.crsq), GFP_KERNEL); 542 - if (!rcfw->crsq.crsq) 542 + rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements, 543 + sizeof(*rcfw->crsqe_tbl), GFP_KERNEL); 544 + if (!rcfw->crsqe_tbl) 543 545 goto fail; 544 546 545 - rcfw->crsb.max_elements = BNXT_QPLIB_CRSBE_MAX_CNT; 546 - if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->crsb, NULL, 0, 547 - &rcfw->crsb.max_elements, 548 - BNXT_QPLIB_CRSBE_UNITS, 0, PAGE_SIZE, 549 - HWQ_TYPE_CTX)) { 550 - dev_err(&rcfw->pdev->dev, 551 - "QPLIB: HW channel CRSB allocation failed"); 552 - goto fail; 553 - } 554 547 return 0; 555 548 556 549 fail: ··· 593 606 int rc; 594 607 595 608 /* General */ 596 - atomic_set(&rcfw->seq_num, 0); 609 + rcfw->seq_num = 0; 597 610 rcfw->flags = FIRMWARE_FIRST_FLAG; 598 611 bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD * 599 612 sizeof(unsigned long)); ··· 622 635 RCFW_PF_COMM_PROD_OFFSET; 623 636 624 637 rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET; 625 - 626 - /* CRSQ */ 627 - rcfw->crsq.prod = 0; 628 - rcfw->crsq.cons = 0; 629 638 630 639 /* CREQ */ 631 640 rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION; ··· 674 691 /* Write to the Bono mailbox register */ 675 692 __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4); 676 693 return 0; 694 + } 695 + 696 + struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( 697 + struct bnxt_qplib_rcfw *rcfw, 698 + u32 size) 699 + { 700 + struct bnxt_qplib_rcfw_sbuf *sbuf; 701 + 702 + sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC); 703 + if (!sbuf) 704 + return NULL; 705 + 706 + sbuf->size = size; 707 + sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size, 708 + &sbuf->dma_addr, GFP_ATOMIC); 709 + if (!sbuf->sb) 710 + goto bail; 711 + 712 + return sbuf; 713 + bail: 714 + kfree(sbuf); 715 + return NULL; 716 + } 717 + 718 + void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw, 719 + struct bnxt_qplib_rcfw_sbuf *sbuf) 720 + { 721 + if (sbuf->sb) 722 + dma_free_coherent(&rcfw->pdev->dev, sbuf->size, 723 + sbuf->sb, sbuf->dma_addr); 724 + kfree(sbuf); 677 725 }
+20 -41
drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
··· 73 73 #define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT 74 74 #define RCFW_MAX_COOKIE_VALUE 0x7FFF 75 75 #define RCFW_CMD_IS_BLOCKING 0x8000 76 + #define RCFW_BLOCKED_CMD_WAIT_COUNT 0x4E20 76 77 77 78 /* Cmdq contains a fix number of a 16-Byte slots */ 78 79 struct bnxt_qplib_cmdqe { ··· 94 93 struct bnxt_qplib_crsbe { 95 94 u8 data[1024]; 96 95 }; 97 - 98 - /* CRSQ SB */ 99 - #define BNXT_QPLIB_CRSBE_MAX_CNT 4 100 - #define BNXT_QPLIB_CRSBE_UNITS sizeof(struct bnxt_qplib_crsbe) 101 - #define BNXT_QPLIB_CRSBE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CRSBE_UNITS) 102 - 103 - #define MAX_CRSB_IDX (BNXT_QPLIB_CRSBE_MAX_CNT - 1) 104 - #define MAX_CRSB_IDX_PER_PG (BNXT_QPLIB_CRSBE_CNT_PER_PG - 1) 105 - 106 - static inline u32 get_crsb_pg(u32 val) 107 - { 108 - return (val & ~MAX_CRSB_IDX_PER_PG) / BNXT_QPLIB_CRSBE_CNT_PER_PG; 109 - } 110 - 111 - static inline u32 get_crsb_idx(u32 val) 112 - { 113 - return val & MAX_CRSB_IDX_PER_PG; 114 - } 115 - 116 - static inline void bnxt_qplib_crsb_dma_next(dma_addr_t *pg_map_arr, 117 - u32 prod, dma_addr_t *dma_addr) 118 - { 119 - *dma_addr = pg_map_arr[(prod) / BNXT_QPLIB_CRSBE_CNT_PER_PG]; 120 - *dma_addr += ((prod) % BNXT_QPLIB_CRSBE_CNT_PER_PG) * 121 - BNXT_QPLIB_CRSBE_UNITS; 122 - } 123 96 124 97 /* CREQ */ 125 98 /* Allocate 1 per QP for async error notification for now */ ··· 133 158 #define CREQ_DB(db, raw_cons, cp_bit) \ 134 159 writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db) 135 160 161 + #define CREQ_ENTRY_POLL_BUDGET 0x100 162 + 136 163 /* HWQ */ 137 - struct bnxt_qplib_crsqe { 138 - struct creq_qp_event qp_event; 164 + 165 + struct bnxt_qplib_crsq { 166 + struct creq_qp_event *resp; 139 167 u32 req_size; 140 168 }; 141 169 142 - struct bnxt_qplib_crsq { 143 - struct bnxt_qplib_crsqe *crsq; 144 - u32 prod; 145 - u32 cons; 146 - u32 max_elements; 170 + struct bnxt_qplib_rcfw_sbuf { 171 + void *sb; 172 + dma_addr_t dma_addr; 173 + u32 size; 147 174 }; 148 175 149 176 /* RCFW Communication Channels */ ··· 162 185 wait_queue_head_t waitq; 163 186 int (*aeq_handler)(struct bnxt_qplib_rcfw *, 164 187 struct creq_func_event *); 165 - atomic_t seq_num; 188 + u32 seq_num; 166 189 167 190 /* Bar region info */ 168 191 void __iomem *cmdq_bar_reg_iomem; ··· 180 203 181 204 /* Actual Cmd and Resp Queues */ 182 205 struct bnxt_qplib_hwq cmdq; 183 - struct bnxt_qplib_crsq crsq; 184 - struct bnxt_qplib_hwq crsb; 206 + struct bnxt_qplib_crsq *crsqe_tbl; 185 207 }; 186 208 187 209 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); ··· 195 219 (struct bnxt_qplib_rcfw *, 196 220 struct creq_func_event *)); 197 221 198 - int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie); 199 - int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie); 200 - void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, 201 - struct cmdq_base *req, void **crsbe, 202 - u8 is_block); 222 + struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( 223 + struct bnxt_qplib_rcfw *rcfw, 224 + u32 size); 225 + void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw, 226 + struct bnxt_qplib_rcfw_sbuf *sbuf); 227 + int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, 228 + struct cmdq_base *req, struct creq_base *resp, 229 + void *sbuf, u8 is_block); 203 230 204 231 int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw); 205 232 int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
+4
drivers/infiniband/hw/bnxt_re/qplib_res.h
··· 48 48 49 49 #define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1)) 50 50 51 + #define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \ 52 + ((HWQ_CMP(hwq->prod, hwq)\ 53 + - HWQ_CMP(hwq->cons, hwq))\ 54 + & (hwq->max_elements - 1))) 51 55 enum bnxt_qplib_hwq_type { 52 56 HWQ_TYPE_CTX, 53 57 HWQ_TYPE_QUEUE,
+83 -250
drivers/infiniband/hw/bnxt_re/qplib_sp.c
··· 55 55 struct bnxt_qplib_dev_attr *attr) 56 56 { 57 57 struct cmdq_query_func req; 58 - struct creq_query_func_resp *resp; 58 + struct creq_query_func_resp resp; 59 + struct bnxt_qplib_rcfw_sbuf *sbuf; 59 60 struct creq_query_func_resp_sb *sb; 60 61 u16 cmd_flags = 0; 61 62 u32 temp; 62 63 u8 *tqm_alloc; 63 - int i; 64 + int i, rc = 0; 64 65 65 66 RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags); 66 67 67 - req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; 68 - resp = (struct creq_query_func_resp *) 69 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void **)&sb, 70 - 0); 71 - if (!resp) { 72 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC send failed"); 73 - return -EINVAL; 74 - } 75 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 76 - /* Cmd timed out */ 77 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC timed out"); 78 - return -ETIMEDOUT; 79 - } 80 - if (resp->status || 81 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 82 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC failed "); 68 + sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); 69 + if (!sbuf) { 83 70 dev_err(&rcfw->pdev->dev, 84 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 85 - resp->status, le16_to_cpu(req.cookie), 86 - le16_to_cpu(resp->cookie)); 87 - return -EINVAL; 71 + "QPLIB: SP: QUERY_FUNC alloc side buffer failed"); 72 + return -ENOMEM; 88 73 } 74 + 75 + sb = sbuf->sb; 76 + req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; 77 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, 78 + (void *)sbuf, 0); 79 + if (rc) 80 + goto bail; 81 + 89 82 /* Extract the context from the side buffer */ 90 83 attr->max_qp = le32_to_cpu(sb->max_qp); 91 84 attr->max_qp_rd_atom = ··· 88 95 sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? 89 96 BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom; 90 97 attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr); 98 + /* 99 + * 128 WQEs needs to be reserved for the HW (8916). Prevent 100 + * reporting the max number 101 + */ 102 + attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS; 91 103 attr->max_qp_sges = sb->max_sge; 92 104 attr->max_cq = le32_to_cpu(sb->max_cq); 93 105 attr->max_cq_wqes = le32_to_cpu(sb->max_cqe); ··· 128 130 attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc); 129 131 attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); 130 132 } 131 - return 0; 133 + 134 + bail: 135 + bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); 136 + return rc; 132 137 } 133 138 134 139 /* SGID */ ··· 179 178 /* Remove GID from the SGID table */ 180 179 if (update) { 181 180 struct cmdq_delete_gid req; 182 - struct creq_delete_gid_resp *resp; 181 + struct creq_delete_gid_resp resp; 183 182 u16 cmd_flags = 0; 183 + int rc; 184 184 185 185 RCFW_CMD_PREP(req, DELETE_GID, cmd_flags); 186 186 if (sgid_tbl->hw_id[index] == 0xFFFF) { ··· 190 188 return -EINVAL; 191 189 } 192 190 req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]); 193 - resp = (struct creq_delete_gid_resp *) 194 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, 195 - 0); 196 - if (!resp) { 197 - dev_err(&res->pdev->dev, 198 - "QPLIB: SP: DELETE_GID send failed"); 199 - return -EINVAL; 200 - } 201 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, 202 - le16_to_cpu(req.cookie))) { 203 - /* Cmd timed out */ 204 - dev_err(&res->pdev->dev, 205 - "QPLIB: SP: DELETE_GID timed out"); 206 - return -ETIMEDOUT; 207 - } 208 - if (resp->status || 209 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 210 - dev_err(&res->pdev->dev, 211 - "QPLIB: SP: DELETE_GID failed "); 212 - dev_err(&res->pdev->dev, 213 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 214 - resp->status, le16_to_cpu(req.cookie), 215 - le16_to_cpu(resp->cookie)); 216 - return -EINVAL; 217 - } 191 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 192 + (void *)&resp, NULL, 0); 193 + if (rc) 194 + return rc; 218 195 } 219 196 memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero, 220 197 sizeof(bnxt_qplib_gid_zero)); ··· 215 234 struct bnxt_qplib_res, 216 235 sgid_tbl); 217 236 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 218 - int i, free_idx, rc = 0; 237 + int i, free_idx; 219 238 220 239 if (!sgid_tbl) { 221 240 dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated"); ··· 247 266 } 248 267 if (update) { 249 268 struct cmdq_add_gid req; 250 - struct creq_add_gid_resp *resp; 269 + struct creq_add_gid_resp resp; 251 270 u16 cmd_flags = 0; 252 271 u32 temp32[4]; 253 272 u16 temp16[3]; 273 + int rc; 254 274 255 275 RCFW_CMD_PREP(req, ADD_GID, cmd_flags); 256 276 ··· 272 290 req.src_mac[1] = cpu_to_be16(temp16[1]); 273 291 req.src_mac[2] = cpu_to_be16(temp16[2]); 274 292 275 - resp = (struct creq_add_gid_resp *) 276 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 277 - NULL, 0); 278 - if (!resp) { 279 - dev_err(&res->pdev->dev, 280 - "QPLIB: SP: ADD_GID send failed"); 281 - return -EINVAL; 282 - } 283 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, 284 - le16_to_cpu(req.cookie))) { 285 - /* Cmd timed out */ 286 - dev_err(&res->pdev->dev, 287 - "QPIB: SP: ADD_GID timed out"); 288 - return -ETIMEDOUT; 289 - } 290 - if (resp->status || 291 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 292 - dev_err(&res->pdev->dev, "QPLIB: SP: ADD_GID failed "); 293 - dev_err(&res->pdev->dev, 294 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 295 - resp->status, le16_to_cpu(req.cookie), 296 - le16_to_cpu(resp->cookie)); 297 - return -EINVAL; 298 - } 299 - sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp->xid); 293 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 294 + (void *)&resp, NULL, 0); 295 + if (rc) 296 + return rc; 297 + sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid); 300 298 } 301 299 /* Add GID to the sgid_tbl */ 302 300 memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid)); ··· 287 325 288 326 *index = free_idx; 289 327 /* unlock */ 290 - return rc; 328 + return 0; 291 329 } 292 330 293 331 /* pkeys */ ··· 384 422 { 385 423 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 386 424 struct cmdq_create_ah req; 387 - struct creq_create_ah_resp *resp; 425 + struct creq_create_ah_resp resp; 388 426 u16 cmd_flags = 0; 389 427 u32 temp32[4]; 390 428 u16 temp16[3]; 429 + int rc; 391 430 392 431 RCFW_CMD_PREP(req, CREATE_AH, cmd_flags); 393 432 ··· 413 450 req.dest_mac[1] = cpu_to_le16(temp16[1]); 414 451 req.dest_mac[2] = cpu_to_le16(temp16[2]); 415 452 416 - resp = (struct creq_create_ah_resp *) 417 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 418 - NULL, 1); 419 - if (!resp) { 420 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH send failed"); 421 - return -EINVAL; 422 - } 423 - if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) { 424 - /* Cmd timed out */ 425 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH timed out"); 426 - return -ETIMEDOUT; 427 - } 428 - if (resp->status || 429 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 430 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH failed "); 431 - dev_err(&rcfw->pdev->dev, 432 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 433 - resp->status, le16_to_cpu(req.cookie), 434 - le16_to_cpu(resp->cookie)); 435 - return -EINVAL; 436 - } 437 - ah->id = le32_to_cpu(resp->xid); 453 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, 454 + NULL, 1); 455 + if (rc) 456 + return rc; 457 + 458 + ah->id = le32_to_cpu(resp.xid); 438 459 return 0; 439 460 } 440 461 ··· 426 479 { 427 480 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 428 481 struct cmdq_destroy_ah req; 429 - struct creq_destroy_ah_resp *resp; 482 + struct creq_destroy_ah_resp resp; 430 483 u16 cmd_flags = 0; 484 + int rc; 431 485 432 486 /* Clean up the AH table in the device */ 433 487 RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags); 434 488 435 489 req.ah_cid = cpu_to_le32(ah->id); 436 490 437 - resp = (struct creq_destroy_ah_resp *) 438 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 439 - NULL, 1); 440 - if (!resp) { 441 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH send failed"); 442 - return -EINVAL; 443 - } 444 - if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) { 445 - /* Cmd timed out */ 446 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH timed out"); 447 - return -ETIMEDOUT; 448 - } 449 - if (resp->status || 450 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 451 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH failed "); 452 - dev_err(&rcfw->pdev->dev, 453 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 454 - resp->status, le16_to_cpu(req.cookie), 455 - le16_to_cpu(resp->cookie)); 456 - return -EINVAL; 457 - } 491 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, 492 + NULL, 1); 493 + if (rc) 494 + return rc; 458 495 return 0; 459 496 } 460 497 ··· 447 516 { 448 517 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 449 518 struct cmdq_deallocate_key req; 450 - struct creq_deallocate_key_resp *resp; 519 + struct creq_deallocate_key_resp resp; 451 520 u16 cmd_flags = 0; 521 + int rc; 452 522 453 523 if (mrw->lkey == 0xFFFFFFFF) { 454 524 dev_info(&res->pdev->dev, ··· 468 536 else 469 537 req.key = cpu_to_le32(mrw->lkey); 470 538 471 - resp = (struct creq_deallocate_key_resp *) 472 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 473 - NULL, 0); 474 - if (!resp) { 475 - dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR send failed"); 476 - return -EINVAL; 477 - } 478 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 479 - /* Cmd timed out */ 480 - dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR timed out"); 481 - return -ETIMEDOUT; 482 - } 483 - if (resp->status || 484 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 485 - dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR failed "); 486 - dev_err(&res->pdev->dev, 487 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 488 - resp->status, le16_to_cpu(req.cookie), 489 - le16_to_cpu(resp->cookie)); 490 - return -EINVAL; 491 - } 539 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, 540 + NULL, 0); 541 + if (rc) 542 + return rc; 543 + 492 544 /* Free the qplib's MRW memory */ 493 545 if (mrw->hwq.max_elements) 494 546 bnxt_qplib_free_hwq(res->pdev, &mrw->hwq); ··· 484 568 { 485 569 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 486 570 struct cmdq_allocate_mrw req; 487 - struct creq_allocate_mrw_resp *resp; 571 + struct creq_allocate_mrw_resp resp; 488 572 u16 cmd_flags = 0; 489 573 unsigned long tmp; 574 + int rc; 490 575 491 576 RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags); 492 577 ··· 501 584 tmp = (unsigned long)mrw; 502 585 req.mrw_handle = cpu_to_le64(tmp); 503 586 504 - resp = (struct creq_allocate_mrw_resp *) 505 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 506 - NULL, 0); 507 - if (!resp) { 508 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW send failed"); 509 - return -EINVAL; 510 - } 511 - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { 512 - /* Cmd timed out */ 513 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW timed out"); 514 - return -ETIMEDOUT; 515 - } 516 - if (resp->status || 517 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 518 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW failed "); 519 - dev_err(&rcfw->pdev->dev, 520 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 521 - resp->status, le16_to_cpu(req.cookie), 522 - le16_to_cpu(resp->cookie)); 523 - return -EINVAL; 524 - } 587 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 588 + (void *)&resp, NULL, 0); 589 + if (rc) 590 + return rc; 591 + 525 592 if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) || 526 593 (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) || 527 594 (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)) 528 - mrw->rkey = le32_to_cpu(resp->xid); 595 + mrw->rkey = le32_to_cpu(resp.xid); 529 596 else 530 - mrw->lkey = le32_to_cpu(resp->xid); 597 + mrw->lkey = le32_to_cpu(resp.xid); 531 598 return 0; 532 599 } 533 600 ··· 520 619 { 521 620 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 522 621 struct cmdq_deregister_mr req; 523 - struct creq_deregister_mr_resp *resp; 622 + struct creq_deregister_mr_resp resp; 524 623 u16 cmd_flags = 0; 525 624 int rc; 526 625 527 626 RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags); 528 627 529 628 req.lkey = cpu_to_le32(mrw->lkey); 530 - resp = (struct creq_deregister_mr_resp *) 531 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 532 - NULL, block); 533 - if (!resp) { 534 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR send failed"); 535 - return -EINVAL; 536 - } 537 - if (block) 538 - rc = bnxt_qplib_rcfw_block_for_resp(rcfw, 539 - le16_to_cpu(req.cookie)); 540 - else 541 - rc = bnxt_qplib_rcfw_wait_for_resp(rcfw, 542 - le16_to_cpu(req.cookie)); 543 - if (!rc) { 544 - /* Cmd timed out */ 545 - dev_err(&res->pdev->dev, "QPLIB: SP: DEREG_MR timed out"); 546 - return -ETIMEDOUT; 547 - } 548 - if (resp->status || 549 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 550 - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR failed "); 551 - dev_err(&rcfw->pdev->dev, 552 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 553 - resp->status, le16_to_cpu(req.cookie), 554 - le16_to_cpu(resp->cookie)); 555 - return -EINVAL; 556 - } 629 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 630 + (void *)&resp, NULL, block); 631 + if (rc) 632 + return rc; 557 633 558 634 /* Free the qplib's MR memory */ 559 635 if (mrw->hwq.max_elements) { ··· 547 669 { 548 670 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 549 671 struct cmdq_register_mr req; 550 - struct creq_register_mr_resp *resp; 672 + struct creq_register_mr_resp resp; 551 673 u16 cmd_flags = 0, level; 552 674 int pg_ptrs, pages, i, rc; 553 675 dma_addr_t **pbl_ptr; ··· 608 730 req.key = cpu_to_le32(mr->lkey); 609 731 req.mr_size = cpu_to_le64(mr->total_size); 610 732 611 - resp = (struct creq_register_mr_resp *) 612 - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 613 - NULL, block); 614 - if (!resp) { 615 - dev_err(&res->pdev->dev, "SP: REG_MR send failed"); 616 - rc = -EINVAL; 733 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 734 + (void *)&resp, NULL, block); 735 + if (rc) 617 736 goto fail; 618 - } 619 - if (block) 620 - rc = bnxt_qplib_rcfw_block_for_resp(rcfw, 621 - le16_to_cpu(req.cookie)); 622 - else 623 - rc = bnxt_qplib_rcfw_wait_for_resp(rcfw, 624 - le16_to_cpu(req.cookie)); 625 - if (!rc) { 626 - /* Cmd timed out */ 627 - dev_err(&res->pdev->dev, "SP: REG_MR timed out"); 628 - rc = -ETIMEDOUT; 629 - goto fail; 630 - } 631 - if (resp->status || 632 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 633 - dev_err(&res->pdev->dev, "QPLIB: SP: REG_MR failed "); 634 - dev_err(&res->pdev->dev, 635 - "QPLIB: SP: with status 0x%x cmdq 0x%x resp 0x%x", 636 - resp->status, le16_to_cpu(req.cookie), 637 - le16_to_cpu(resp->cookie)); 638 - rc = -EINVAL; 639 - goto fail; 640 - } 737 + 641 738 return 0; 642 739 643 740 fail: ··· 657 804 { 658 805 struct bnxt_qplib_rcfw *rcfw = res->rcfw; 659 806 struct cmdq_map_tc_to_cos req; 660 - struct creq_map_tc_to_cos_resp *resp; 807 + struct creq_map_tc_to_cos_resp resp; 661 808 u16 cmd_flags = 0; 662 - int tleft; 809 + int rc = 0; 663 810 664 811 RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags); 665 812 req.cos0 = cpu_to_le16(cids[0]); 666 813 req.cos1 = cpu_to_le16(cids[1]); 667 814 668 - resp = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, 0); 669 - if (!resp) { 670 - dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS send failed"); 671 - return -EINVAL; 672 - } 673 - 674 - tleft = bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie)); 675 - if (!tleft) { 676 - dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS timed out"); 677 - return -ETIMEDOUT; 678 - } 679 - 680 - if (resp->status || 681 - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { 682 - dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS failed "); 683 - dev_err(&res->pdev->dev, 684 - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", 685 - resp->status, le16_to_cpu(req.cookie), 686 - le16_to_cpu(resp->cookie)); 687 - return -EINVAL; 688 - } 689 - 815 + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, 816 + (void *)&resp, NULL, 0); 690 817 return 0; 691 818 }
+2
drivers/infiniband/hw/bnxt_re/qplib_sp.h
··· 40 40 #ifndef __BNXT_QPLIB_SP_H__ 41 41 #define __BNXT_QPLIB_SP_H__ 42 42 43 + #define BNXT_QPLIB_RESERVED_QP_WRS 128 44 + 43 45 struct bnxt_qplib_dev_attr { 44 46 char fw_ver[32]; 45 47 u16 max_sgid;
+7 -2
drivers/infiniband/hw/cxgb4/cm.c
··· 488 488 489 489 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); 490 490 release_ep_resources(ep); 491 + kfree_skb(skb); 491 492 return 0; 492 493 } 493 494 ··· 499 498 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); 500 499 c4iw_put_ep(&ep->parent_ep->com); 501 500 release_ep_resources(ep); 501 + kfree_skb(skb); 502 502 return 0; 503 503 } 504 504 ··· 571 569 572 570 pr_debug("%s rdev %p\n", __func__, rdev); 573 571 req->cmd = CPL_ABORT_NO_RST; 572 + skb_get(skb); 574 573 ret = c4iw_ofld_send(rdev, skb); 575 574 if (ret) { 576 575 __state_set(&ep->com, DEAD); 577 576 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); 578 - } 577 + } else 578 + kfree_skb(skb); 579 579 } 580 580 581 581 static int send_flowc(struct c4iw_ep *ep) ··· 2521 2517 goto reject; 2522 2518 } 2523 2519 2524 - hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) + 2520 + hdrs = ((iptype == 4) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) + 2521 + sizeof(struct tcphdr) + 2525 2522 ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0); 2526 2523 if (peer_mss && child_ep->mtu > (peer_mss + hdrs)) 2527 2524 child_ep->mtu = peer_mss + hdrs;
+8 -4
drivers/infiniband/hw/cxgb4/device.c
··· 767 767 kfree(entry); 768 768 } 769 769 770 - list_for_each_safe(pos, nxt, &uctx->qpids) { 770 + list_for_each_safe(pos, nxt, &uctx->cqids) { 771 771 entry = list_entry(pos, struct c4iw_qid_list, entry); 772 772 list_del_init(&entry->entry); 773 773 kfree(entry); ··· 880 880 rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free"); 881 881 if (!rdev->free_workq) { 882 882 err = -ENOMEM; 883 - goto err_free_status_page; 883 + goto err_free_status_page_and_wr_log; 884 884 } 885 885 886 886 rdev->status_page->db_off = 0; 887 887 888 888 return 0; 889 - err_free_status_page: 889 + err_free_status_page_and_wr_log: 890 + if (c4iw_wr_log && rdev->wr_log) 891 + kfree(rdev->wr_log); 890 892 free_page((unsigned long)rdev->status_page); 891 893 destroy_ocqp_pool: 892 894 c4iw_ocqp_pool_destroy(rdev); ··· 905 903 { 906 904 destroy_workqueue(rdev->free_workq); 907 905 kfree(rdev->wr_log); 906 + c4iw_release_dev_ucontext(rdev, &rdev->uctx); 908 907 free_page((unsigned long)rdev->status_page); 909 908 c4iw_pblpool_destroy(rdev); 910 909 c4iw_rqtpool_destroy(rdev); 910 + c4iw_ocqp_pool_destroy(rdev); 911 911 c4iw_destroy_resource(&rdev->resource); 912 912 } 913 913 ··· 975 971 devp->rdev.lldi.sge_egrstatuspagesize); 976 972 977 973 devp->rdev.hw_queue.t4_eq_status_entries = 978 - devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1; 974 + devp->rdev.lldi.sge_egrstatuspagesize / 64; 979 975 devp->rdev.hw_queue.t4_max_eq_size = 65520; 980 976 devp->rdev.hw_queue.t4_max_iq_size = 65520; 981 977 devp->rdev.hw_queue.t4_max_rq_size = 8192 -
+50 -17
drivers/infiniband/hw/hfi1/chip.c
··· 6312 6312 } 6313 6313 } 6314 6314 6315 - static void write_global_credit(struct hfi1_devdata *dd, 6316 - u8 vau, u16 total, u16 shared) 6315 + /* 6316 + * Set up allocation unit vaulue. 6317 + */ 6318 + void set_up_vau(struct hfi1_devdata *dd, u8 vau) 6317 6319 { 6318 - write_csr(dd, SEND_CM_GLOBAL_CREDIT, 6319 - ((u64)total << 6320 - SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) | 6321 - ((u64)shared << 6322 - SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) | 6323 - ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT)); 6320 + u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); 6321 + 6322 + /* do not modify other values in the register */ 6323 + reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK; 6324 + reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT; 6325 + write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); 6324 6326 } 6325 6327 6326 6328 /* 6327 6329 * Set up initial VL15 credits of the remote. Assumes the rest of 6328 - * the CM credit registers are zero from a previous global or credit reset . 6330 + * the CM credit registers are zero from a previous global or credit reset. 6331 + * Shared limit for VL15 will always be 0. 6329 6332 */ 6330 - void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf) 6333 + void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf) 6331 6334 { 6332 - /* leave shared count at zero for both global and VL15 */ 6333 - write_global_credit(dd, vau, vl15buf, 0); 6335 + u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); 6336 + 6337 + /* set initial values for total and shared credit limit */ 6338 + reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK | 6339 + SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK); 6340 + 6341 + /* 6342 + * Set total limit to be equal to VL15 credits. 6343 + * Leave shared limit at 0. 6344 + */ 6345 + reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT; 6346 + write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); 6334 6347 6335 6348 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf 6336 6349 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); ··· 6361 6348 for (i = 0; i < TXE_NUM_DATA_VL; i++) 6362 6349 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0); 6363 6350 write_csr(dd, SEND_CM_CREDIT_VL15, 0); 6364 - write_global_credit(dd, 0, 0, 0); 6351 + write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0); 6365 6352 /* reset the CM block */ 6366 6353 pio_send_control(dd, PSC_CM_RESET); 6354 + /* reset cached value */ 6355 + dd->vl15buf_cached = 0; 6367 6356 } 6368 6357 6369 6358 /* convert a vCU to a CU */ ··· 6854 6839 { 6855 6840 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, 6856 6841 link_up_work); 6842 + struct hfi1_devdata *dd = ppd->dd; 6843 + 6857 6844 set_link_state(ppd, HLS_UP_INIT); 6858 6845 6859 6846 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */ 6860 - read_ltp_rtt(ppd->dd); 6847 + read_ltp_rtt(dd); 6861 6848 /* 6862 6849 * OPA specifies that certain counters are cleared on a transition 6863 6850 * to link up, so do that. 6864 6851 */ 6865 - clear_linkup_counters(ppd->dd); 6852 + clear_linkup_counters(dd); 6866 6853 /* 6867 6854 * And (re)set link up default values. 6868 6855 */ 6869 6856 set_linkup_defaults(ppd); 6870 6857 6858 + /* 6859 + * Set VL15 credits. Use cached value from verify cap interrupt. 6860 + * In case of quick linkup or simulator, vl15 value will be set by 6861 + * handle_linkup_change. VerifyCap interrupt handler will not be 6862 + * called in those scenarios. 6863 + */ 6864 + if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) 6865 + set_up_vl15(dd, dd->vl15buf_cached); 6866 + 6871 6867 /* enforce link speed enabled */ 6872 6868 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) { 6873 6869 /* oops - current speed is not enabled, bounce */ 6874 - dd_dev_err(ppd->dd, 6870 + dd_dev_err(dd, 6875 6871 "Link speed active 0x%x is outside enabled 0x%x, downing link\n", 6876 6872 ppd->link_speed_active, ppd->link_speed_enabled); 6877 6873 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0, ··· 7383 7357 */ 7384 7358 if (vau == 0) 7385 7359 vau = 1; 7386 - set_up_vl15(dd, vau, vl15buf); 7360 + set_up_vau(dd, vau); 7361 + 7362 + /* 7363 + * Set VL15 credits to 0 in global credit register. Cache remote VL15 7364 + * credits value and wait for link-up interrupt ot set it. 7365 + */ 7366 + set_up_vl15(dd, 0); 7367 + dd->vl15buf_cached = vl15buf; 7387 7368 7388 7369 /* set up the LCB CRC mode */ 7389 7370 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
+2
drivers/infiniband/hw/hfi1/chip_registers.h
··· 839 839 #define SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK 0x8ull 840 840 #define SEND_CM_CTRL_RESETCSR 0x0000000000000020ull 841 841 #define SEND_CM_GLOBAL_CREDIT (TXE + 0x000000000508) 842 + #define SEND_CM_GLOBAL_CREDIT_AU_MASK 0x7ull 842 843 #define SEND_CM_GLOBAL_CREDIT_AU_SHIFT 16 844 + #define SEND_CM_GLOBAL_CREDIT_AU_SMASK 0x70000ull 843 845 #define SEND_CM_GLOBAL_CREDIT_RESETCSR 0x0000094000030000ull 844 846 #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK 0xFFFFull 845 847 #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT 0
+10 -1
drivers/infiniband/hw/hfi1/hfi.h
··· 1045 1045 /* initial vl15 credits to use */ 1046 1046 u16 vl15_init; 1047 1047 1048 + /* 1049 + * Cached value for vl15buf, read during verify cap interrupt. VL15 1050 + * credits are to be kept at 0 and set when handling the link-up 1051 + * interrupt. This removes the possibility of receiving VL15 MAD 1052 + * packets before this HFI is ready. 1053 + */ 1054 + u16 vl15buf_cached; 1055 + 1048 1056 /* Misc small ints */ 1049 1057 u8 n_krcv_queues; 1050 1058 u8 qos_shift; ··· 1606 1598 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t); 1607 1599 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t); 1608 1600 1609 - void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf); 1601 + void set_up_vau(struct hfi1_devdata *dd, u8 vau); 1602 + void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf); 1610 1603 void reset_link_credits(struct hfi1_devdata *dd); 1611 1604 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu); 1612 1605
+2 -1
drivers/infiniband/hw/hfi1/intr.c
··· 130 130 * the remote values. Both sides must be using the values. 131 131 */ 132 132 if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { 133 - set_up_vl15(dd, dd->vau, dd->vl15_init); 133 + set_up_vau(dd, dd->vau); 134 + set_up_vl15(dd, dd->vl15_init); 134 135 assign_remote_cm_au_table(dd, dd->vcu); 135 136 } 136 137
+2 -2
drivers/infiniband/hw/hfi1/pcie.c
··· 207 207 /* 208 208 * Save BARs and command to rewrite after device reset. 209 209 */ 210 - dd->pcibar0 = addr; 211 - dd->pcibar1 = addr >> 32; 210 + pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, &dd->pcibar0); 211 + pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, &dd->pcibar1); 212 212 pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom); 213 213 pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command); 214 214 pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl);
+4 -1
drivers/infiniband/hw/hfi1/rc.c
··· 2159 2159 ret = hfi1_rvt_get_rwqe(qp, 1); 2160 2160 if (ret < 0) 2161 2161 goto nack_op_err; 2162 - if (!ret) 2162 + if (!ret) { 2163 + /* peer will send again */ 2164 + rvt_put_ss(&qp->r_sge); 2163 2165 goto rnr_nak; 2166 + } 2164 2167 wc.ex.imm_data = ohdr->u.rc.imm_data; 2165 2168 wc.wc_flags = IB_WC_WITH_IMM; 2166 2169 goto send_last;
+2 -1
drivers/infiniband/hw/hfi1/sysfs.c
··· 196 196 }; 197 197 198 198 static struct attribute *port_cc_default_attributes[] = { 199 - &cc_prescan_attr.attr 199 + &cc_prescan_attr.attr, 200 + NULL 200 201 }; 201 202 202 203 static struct kobj_type port_cc_ktype = {
+1 -2
drivers/infiniband/hw/i40iw/i40iw_cm.c
··· 784 784 } 785 785 786 786 ctrl_ird |= IETF_PEER_TO_PEER; 787 - ctrl_ird |= IETF_FLPDU_ZERO_LEN; 788 787 789 788 switch (mpa_key) { 790 789 case MPA_KEY_REQUEST: ··· 2445 2446 } else { 2446 2447 type = I40IW_CM_EVENT_CONNECTED; 2447 2448 cm_node->state = I40IW_CM_STATE_OFFLOADED; 2448 - i40iw_send_ack(cm_node); 2449 2449 } 2450 + i40iw_send_ack(cm_node); 2450 2451 break; 2451 2452 default: 2452 2453 pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state);
+1 -11
drivers/infiniband/hw/i40iw/i40iw_ctrl.c
··· 285 285 struct i40iw_sc_dev *dev = vsi->dev; 286 286 struct i40iw_sc_qp *qp = NULL; 287 287 bool qs_handle_change = false; 288 - bool mss_change = false; 289 288 unsigned long flags; 290 289 u16 qs_handle; 291 290 int i; 292 291 293 - if (vsi->mss != l2params->mss) { 294 - mss_change = true; 295 - vsi->mss = l2params->mss; 296 - } 292 + vsi->mss = l2params->mss; 297 293 298 294 i40iw_fill_qos_list(l2params->qs_handle_list); 299 295 for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) { 300 296 qs_handle = l2params->qs_handle_list[i]; 301 297 if (vsi->qos[i].qs_handle != qs_handle) 302 298 qs_handle_change = true; 303 - else if (!mss_change) 304 - continue; /* no MSS nor qs handle change */ 305 299 spin_lock_irqsave(&vsi->qos[i].lock, flags); 306 300 qp = i40iw_get_qp(&vsi->qos[i].qplist, qp); 307 301 while (qp) { 308 - if (mss_change) 309 - i40iw_qp_mss_modify(dev, qp); 310 302 if (qs_handle_change) { 311 303 qp->qs_handle = qs_handle; 312 304 /* issue cqp suspend command */ ··· 2387 2395 2388 2396 set_64bit_val(wqe, 2389 2397 8, 2390 - LS_64(info->new_mss, I40IW_CQPSQ_QP_NEWMSS) | 2391 2398 LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN)); 2392 2399 2393 2400 set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); ··· 2401 2410 LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) | 2402 2411 LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) | 2403 2412 LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) | 2404 - LS_64(info->mss_change, I40IW_CQPSQ_QP_MSSCHANGE) | 2405 2413 LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) | 2406 2414 LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) | 2407 2415 LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
+13 -7
drivers/infiniband/hw/i40iw/i40iw_main.c
··· 1319 1319 status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE, 1320 1320 I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK); 1321 1321 if (status) 1322 - goto exit; 1322 + goto error; 1323 1323 info.fpm_query_buf_pa = mem.pa; 1324 1324 info.fpm_query_buf = mem.va; 1325 1325 status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE, 1326 1326 I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK); 1327 1327 if (status) 1328 - goto exit; 1328 + goto error; 1329 1329 info.fpm_commit_buf_pa = mem.pa; 1330 1330 info.fpm_commit_buf = mem.va; 1331 1331 info.hmc_fn_id = ldev->fid; ··· 1347 1347 info.exception_lan_queue = 1; 1348 1348 info.vchnl_send = i40iw_virtchnl_send; 1349 1349 status = i40iw_device_init(&iwdev->sc_dev, &info); 1350 - exit: 1351 - if (status) { 1352 - kfree(iwdev->hmc_info_mem); 1353 - iwdev->hmc_info_mem = NULL; 1354 - } 1350 + 1351 + if (status) 1352 + goto error; 1355 1353 memset(&vsi_info, 0, sizeof(vsi_info)); 1356 1354 vsi_info.dev = &iwdev->sc_dev; 1357 1355 vsi_info.back_vsi = (void *)iwdev; ··· 1360 1362 memset(&stats_info, 0, sizeof(stats_info)); 1361 1363 stats_info.fcn_id = ldev->fid; 1362 1364 stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL); 1365 + if (!stats_info.pestat) { 1366 + status = I40IW_ERR_NO_MEMORY; 1367 + goto error; 1368 + } 1363 1369 stats_info.stats_initialize = true; 1364 1370 if (stats_info.pestat) 1365 1371 i40iw_vsi_stats_init(&iwdev->vsi, &stats_info); 1366 1372 } 1373 + return status; 1374 + error: 1375 + kfree(iwdev->hmc_info_mem); 1376 + iwdev->hmc_info_mem = NULL; 1367 1377 return status; 1368 1378 } 1369 1379
-1
drivers/infiniband/hw/i40iw/i40iw_osdep.h
··· 199 199 struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx); 200 200 void *i40iw_remove_head(struct list_head *list); 201 201 void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend); 202 - void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp); 203 202 204 203 void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len); 205 204 void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred);
-2
drivers/infiniband/hw/i40iw/i40iw_type.h
··· 541 541 struct i40iw_modify_qp_info { 542 542 u64 rx_win0; 543 543 u64 rx_win1; 544 - u16 new_mss; 545 544 u8 next_iwarp_state; 546 545 u8 termlen; 547 546 bool ord_valid; ··· 553 554 bool dont_send_term; 554 555 bool dont_send_fin; 555 556 bool cached_var_valid; 556 - bool mss_change; 557 557 bool force_loopback; 558 558 }; 559 559
-17
drivers/infiniband/hw/i40iw/i40iw_utils.c
··· 757 757 } 758 758 759 759 /** 760 - * i40iw_qp_mss_modify - modify mss for qp 761 - * @dev: hardware control device structure 762 - * @qp: hardware control qp 763 - */ 764 - void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp) 765 - { 766 - struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; 767 - struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp; 768 - struct i40iw_modify_qp_info info; 769 - 770 - memset(&info, 0, sizeof(info)); 771 - info.mss_change = true; 772 - info.new_mss = qp->vsi->mss; 773 - i40iw_hw_modify_qp(iwdev, iwqp, &info, false); 774 - } 775 - 776 - /** 777 760 * i40iw_term_modify_qp - modify qp for term message 778 761 * @qp: hardware control qp 779 762 * @next_state: qp's next state
+1 -4
drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
··· 443 443 if (!dev->vchnl_up) 444 444 return I40IW_ERR_NOT_READY; 445 445 if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) { 446 - if (vchnl_msg->iw_op_ver != I40IW_VCHNL_OP_GET_VER_V0) 447 - vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg); 448 - else 449 - vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg); 446 + vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg); 450 447 return I40IW_SUCCESS; 451 448 } 452 449 for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
+1
drivers/infiniband/hw/mlx4/mad.c
··· 1578 1578 if (port < 0) 1579 1579 return; 1580 1580 ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff)); 1581 + ah.ibah.type = rdma_ah_find_type(&dev->ib_dev, port); 1581 1582 1582 1583 mlx4_ib_query_ah(&ah.ibah, &ah_attr); 1583 1584 if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH)
+18 -2
drivers/infiniband/hw/mlx5/main.c
··· 2979 2979 return ret; 2980 2980 } 2981 2981 2982 + static u8 mlx5_get_umr_fence(u8 umr_fence_cap) 2983 + { 2984 + switch (umr_fence_cap) { 2985 + case MLX5_CAP_UMR_FENCE_NONE: 2986 + return MLX5_FENCE_MODE_NONE; 2987 + case MLX5_CAP_UMR_FENCE_SMALL: 2988 + return MLX5_FENCE_MODE_INITIATOR_SMALL; 2989 + default: 2990 + return MLX5_FENCE_MODE_STRONG_ORDERING; 2991 + } 2992 + } 2993 + 2982 2994 static int create_dev_resources(struct mlx5_ib_resources *devr) 2983 2995 { 2984 2996 struct ib_srq_init_attr attr; ··· 3692 3680 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; 3693 3681 dev->ib_dev.get_port_immutable = mlx5_port_immutable; 3694 3682 dev->ib_dev.get_dev_fw_str = get_dev_fw_str; 3695 - dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev; 3696 - dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev; 3683 + if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) { 3684 + dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev; 3685 + dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev; 3686 + } 3697 3687 if (mlx5_core_is_pf(mdev)) { 3698 3688 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; 3699 3689 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state; ··· 3706 3692 dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext; 3707 3693 3708 3694 mlx5_ib_internal_fill_odp_caps(dev); 3695 + 3696 + dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence)); 3709 3697 3710 3698 if (MLX5_CAP_GEN(mdev, imaicl)) { 3711 3699 dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw;
+2 -1
drivers/infiniband/hw/mlx5/mlx5_ib.h
··· 349 349 struct mlx5_ib_wq rq; 350 350 351 351 u8 sq_signal_bits; 352 - u8 fm_cache; 352 + u8 next_fence; 353 353 struct mlx5_ib_wq sq; 354 354 355 355 /* serialize qp state modifications ··· 654 654 struct mlx5_ib_port *port; 655 655 struct mlx5_sq_bfreg bfreg; 656 656 struct mlx5_sq_bfreg fp_bfreg; 657 + u8 umr_fence; 657 658 }; 658 659 659 660 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
+23 -36
drivers/infiniband/hw/mlx5/qp.c
··· 3738 3738 } 3739 3739 } 3740 3740 3741 - static u8 get_fence(u8 fence, struct ib_send_wr *wr) 3742 - { 3743 - if (unlikely(wr->opcode == IB_WR_LOCAL_INV && 3744 - wr->send_flags & IB_SEND_FENCE)) 3745 - return MLX5_FENCE_MODE_STRONG_ORDERING; 3746 - 3747 - if (unlikely(fence)) { 3748 - if (wr->send_flags & IB_SEND_FENCE) 3749 - return MLX5_FENCE_MODE_SMALL_AND_FENCE; 3750 - else 3751 - return fence; 3752 - } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) { 3753 - return MLX5_FENCE_MODE_FENCE; 3754 - } 3755 - 3756 - return 0; 3757 - } 3758 - 3759 3741 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, 3760 3742 struct mlx5_wqe_ctrl_seg **ctrl, 3761 3743 struct ib_send_wr *wr, unsigned *idx, ··· 3766 3784 static void finish_wqe(struct mlx5_ib_qp *qp, 3767 3785 struct mlx5_wqe_ctrl_seg *ctrl, 3768 3786 u8 size, unsigned idx, u64 wr_id, 3769 - int nreq, u8 fence, u8 next_fence, 3770 - u32 mlx5_opcode) 3787 + int nreq, u8 fence, u32 mlx5_opcode) 3771 3788 { 3772 3789 u8 opmod = 0; 3773 3790 ··· 3774 3793 mlx5_opcode | ((u32)opmod << 24)); 3775 3794 ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8)); 3776 3795 ctrl->fm_ce_se |= fence; 3777 - qp->fm_cache = next_fence; 3778 3796 if (unlikely(qp->wq_sig)) 3779 3797 ctrl->signature = wq_sig(ctrl); 3780 3798 ··· 3833 3853 goto out; 3834 3854 } 3835 3855 3836 - fence = qp->fm_cache; 3837 3856 num_sge = wr->num_sge; 3838 3857 if (unlikely(num_sge > qp->sq.max_gs)) { 3839 3858 mlx5_ib_warn(dev, "\n"); ··· 3847 3868 err = -ENOMEM; 3848 3869 *bad_wr = wr; 3849 3870 goto out; 3871 + } 3872 + 3873 + if (wr->opcode == IB_WR_LOCAL_INV || 3874 + wr->opcode == IB_WR_REG_MR) { 3875 + fence = dev->umr_fence; 3876 + next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 3877 + } else if (wr->send_flags & IB_SEND_FENCE) { 3878 + if (qp->next_fence) 3879 + fence = MLX5_FENCE_MODE_SMALL_AND_FENCE; 3880 + else 3881 + fence = MLX5_FENCE_MODE_FENCE; 3882 + } else { 3883 + fence = qp->next_fence; 3850 3884 } 3851 3885 3852 3886 switch (ibqp->qp_type) { ··· 3888 3896 goto out; 3889 3897 3890 3898 case IB_WR_LOCAL_INV: 3891 - next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 3892 3899 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; 3893 3900 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); 3894 3901 set_linv_wr(qp, &seg, &size); ··· 3895 3904 break; 3896 3905 3897 3906 case IB_WR_REG_MR: 3898 - next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 3899 3907 qp->sq.wr_data[idx] = IB_WR_REG_MR; 3900 3908 ctrl->imm = cpu_to_be32(reg_wr(wr)->key); 3901 3909 err = set_reg_wr(qp, reg_wr(wr), &seg, &size); ··· 3917 3927 goto out; 3918 3928 } 3919 3929 3920 - finish_wqe(qp, ctrl, size, idx, wr->wr_id, 3921 - nreq, get_fence(fence, wr), 3922 - next_fence, MLX5_OPCODE_UMR); 3930 + finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, 3931 + fence, MLX5_OPCODE_UMR); 3923 3932 /* 3924 3933 * SET_PSV WQEs are not signaled and solicited 3925 3934 * on error ··· 3943 3954 goto out; 3944 3955 } 3945 3956 3946 - finish_wqe(qp, ctrl, size, idx, wr->wr_id, 3947 - nreq, get_fence(fence, wr), 3948 - next_fence, MLX5_OPCODE_SET_PSV); 3957 + finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, 3958 + fence, MLX5_OPCODE_SET_PSV); 3949 3959 err = begin_wqe(qp, &seg, &ctrl, wr, 3950 3960 &idx, &size, nreq); 3951 3961 if (err) { ··· 3954 3966 goto out; 3955 3967 } 3956 3968 3957 - next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 3958 3969 err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire, 3959 3970 mr->sig->psv_wire.psv_idx, &seg, 3960 3971 &size); ··· 3963 3976 goto out; 3964 3977 } 3965 3978 3966 - finish_wqe(qp, ctrl, size, idx, wr->wr_id, 3967 - nreq, get_fence(fence, wr), 3968 - next_fence, MLX5_OPCODE_SET_PSV); 3979 + finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, 3980 + fence, MLX5_OPCODE_SET_PSV); 3981 + qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 3969 3982 num_sge = 0; 3970 3983 goto skip_psv; 3971 3984 ··· 4076 4089 } 4077 4090 } 4078 4091 4079 - finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, 4080 - get_fence(fence, wr), next_fence, 4092 + qp->next_fence = next_fence; 4093 + finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, fence, 4081 4094 mlx5_ib_opcode[wr->opcode]); 4082 4095 skip_psv: 4083 4096 if (0)
+1 -2
drivers/infiniband/hw/nes/nes_cm.c
··· 610 610 ctrl_ord = cm_node->ord_size & IETF_NO_IRD_ORD; 611 611 } 612 612 ctrl_ird |= IETF_PEER_TO_PEER; 613 - ctrl_ird |= IETF_FLPDU_ZERO_LEN; 614 613 615 614 switch (mpa_key) { 616 615 case MPA_KEY_REQUEST: ··· 1825 1826 type = NES_CM_EVENT_CONNECTED; 1826 1827 cm_node->state = NES_CM_STATE_TSA; 1827 1828 } 1828 - 1829 + send_ack(cm_node, NULL); 1829 1830 break; 1830 1831 default: 1831 1832 WARN_ON(1);
+4 -1
drivers/infiniband/hw/qedr/qedr.h
··· 58 58 #define QEDR_MSG_QP " QP" 59 59 #define QEDR_MSG_GSI " GSI" 60 60 61 - #define QEDR_CQ_MAGIC_NUMBER (0x11223344) 61 + #define QEDR_CQ_MAGIC_NUMBER (0x11223344) 62 + 63 + #define FW_PAGE_SIZE (RDMA_RING_PAGE_SIZE) 64 + #define FW_PAGE_SHIFT (12) 62 65 63 66 struct qedr_dev; 64 67
+6 -4
drivers/infiniband/hw/qedr/qedr_cm.c
··· 270 270 return rc; 271 271 } 272 272 273 - vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev); 274 - if (vlan_id < VLAN_CFI_MASK) 275 - has_vlan = true; 276 - if (sgid_attr.ndev) 273 + if (sgid_attr.ndev) { 274 + vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev); 275 + if (vlan_id < VLAN_CFI_MASK) 276 + has_vlan = true; 277 + 277 278 dev_put(sgid_attr.ndev); 279 + } 278 280 279 281 if (!memcmp(&sgid, &zgid, sizeof(sgid))) { 280 282 DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
+39 -25
drivers/infiniband/hw/qedr/verbs.c
··· 653 653 654 654 static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, 655 655 struct qedr_pbl *pbl, 656 - struct qedr_pbl_info *pbl_info) 656 + struct qedr_pbl_info *pbl_info, u32 pg_shift) 657 657 { 658 658 int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0; 659 + u32 fw_pg_cnt, fw_pg_per_umem_pg; 659 660 struct qedr_pbl *pbl_tbl; 660 661 struct scatterlist *sg; 661 662 struct regpair *pbe; 663 + u64 pg_addr; 662 664 int entry; 663 - u32 addr; 664 665 665 666 if (!pbl_info->num_pbes) 666 667 return; ··· 684 683 685 684 shift = umem->page_shift; 686 685 686 + fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift); 687 + 687 688 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { 688 689 pages = sg_dma_len(sg) >> shift; 690 + pg_addr = sg_dma_address(sg); 689 691 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { 690 - /* store the page address in pbe */ 691 - pbe->lo = cpu_to_le32(sg_dma_address(sg) + 692 - (pg_cnt << shift)); 693 - addr = upper_32_bits(sg_dma_address(sg) + 694 - (pg_cnt << shift)); 695 - pbe->hi = cpu_to_le32(addr); 696 - pbe_cnt++; 697 - total_num_pbes++; 698 - pbe++; 692 + for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) { 693 + pbe->lo = cpu_to_le32(pg_addr); 694 + pbe->hi = cpu_to_le32(upper_32_bits(pg_addr)); 699 695 700 - if (total_num_pbes == pbl_info->num_pbes) 701 - return; 696 + pg_addr += BIT(pg_shift); 697 + pbe_cnt++; 698 + total_num_pbes++; 699 + pbe++; 702 700 703 - /* If the given pbl is full storing the pbes, 704 - * move to next pbl. 705 - */ 706 - if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) { 707 - pbl_tbl++; 708 - pbe = (struct regpair *)pbl_tbl->va; 709 - pbe_cnt = 0; 701 + if (total_num_pbes == pbl_info->num_pbes) 702 + return; 703 + 704 + /* If the given pbl is full storing the pbes, 705 + * move to next pbl. 706 + */ 707 + if (pbe_cnt == 708 + (pbl_info->pbl_size / sizeof(u64))) { 709 + pbl_tbl++; 710 + pbe = (struct regpair *)pbl_tbl->va; 711 + pbe_cnt = 0; 712 + } 713 + 714 + fw_pg_cnt++; 710 715 } 711 716 } 712 717 } ··· 761 754 u64 buf_addr, size_t buf_len, 762 755 int access, int dmasync) 763 756 { 764 - int page_cnt; 757 + u32 fw_pages; 765 758 int rc; 766 759 767 760 q->buf_addr = buf_addr; ··· 773 766 return PTR_ERR(q->umem); 774 767 } 775 768 776 - page_cnt = ib_umem_page_count(q->umem); 777 - rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0); 769 + fw_pages = ib_umem_page_count(q->umem) << 770 + (q->umem->page_shift - FW_PAGE_SHIFT); 771 + 772 + rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0); 778 773 if (rc) 779 774 goto err0; 780 775 ··· 786 777 goto err0; 787 778 } 788 779 789 - qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info); 780 + qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info, 781 + FW_PAGE_SHIFT); 790 782 791 783 return 0; 792 784 ··· 2236 2226 goto err1; 2237 2227 2238 2228 qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table, 2239 - &mr->info.pbl_info); 2229 + &mr->info.pbl_info, mr->umem->page_shift); 2240 2230 2241 2231 rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); 2242 2232 if (rc) { ··· 3218 3208 break; 3219 3209 case IB_WC_REG_MR: 3220 3210 qp->wqe_wr_id[qp->sq.cons].mr->info.completed++; 3211 + break; 3212 + case IB_WC_RDMA_READ: 3213 + case IB_WC_SEND: 3214 + wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len; 3221 3215 break; 3222 3216 default: 3223 3217 break;
+3 -1
drivers/infiniband/hw/qib/qib_rc.c
··· 1956 1956 ret = qib_get_rwqe(qp, 1); 1957 1957 if (ret < 0) 1958 1958 goto nack_op_err; 1959 - if (!ret) 1959 + if (!ret) { 1960 + rvt_put_ss(&qp->r_sge); 1960 1961 goto rnr_nak; 1962 + } 1961 1963 wc.ex.imm_data = ohdr->u.rc.imm_data; 1962 1964 hdrsize += 4; 1963 1965 wc.wc_flags = IB_WC_WITH_IMM;
+4 -1
drivers/infiniband/sw/rxe/rxe.h
··· 68 68 static inline u32 rxe_crc32(struct rxe_dev *rxe, 69 69 u32 crc, void *next, size_t len) 70 70 { 71 + u32 retval; 71 72 int err; 72 73 73 74 SHASH_DESC_ON_STACK(shash, rxe->tfm); ··· 82 81 return crc32_le(crc, next, len); 83 82 } 84 83 85 - return *(u32 *)shash_desc_ctx(shash); 84 + retval = *(u32 *)shash_desc_ctx(shash); 85 + barrier_data(shash_desc_ctx(shash)); 86 + return retval; 86 87 } 87 88 88 89 int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
+2 -7
drivers/infiniband/sw/rxe/rxe_verbs.c
··· 740 740 741 741 sge = ibwr->sg_list; 742 742 for (i = 0; i < num_sge; i++, sge++) { 743 - if (qp->is_user && copy_from_user(p, (__user void *) 744 - (uintptr_t)sge->addr, sge->length)) 745 - return -EFAULT; 746 - 747 - else if (!qp->is_user) 748 - memcpy(p, (void *)(uintptr_t)sge->addr, 749 - sge->length); 743 + memcpy(p, (void *)(uintptr_t)sge->addr, 744 + sge->length); 750 745 751 746 p += sge->length; 752 747 }
+1 -1
drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
··· 178 178 static int ipoib_get_link_ksettings(struct net_device *netdev, 179 179 struct ethtool_link_ksettings *cmd) 180 180 { 181 - struct ipoib_dev_priv *priv = netdev_priv(netdev); 181 + struct ipoib_dev_priv *priv = ipoib_priv(netdev); 182 182 struct ib_port_attr attr; 183 183 int ret, speed, width; 184 184
-1
drivers/infiniband/ulp/ipoib/ipoib_ib.c
··· 863 863 set_bit(IPOIB_STOP_REAPER, &priv->flags); 864 864 cancel_delayed_work(&priv->ah_reap_task); 865 865 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); 866 - napi_enable(&priv->napi); 867 866 ipoib_ib_dev_stop(dev); 868 867 return -1; 869 868 }
+14 -3
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 1590 1590 wait_for_completion(&priv->ntbl.deleted); 1591 1591 } 1592 1592 1593 - void ipoib_dev_uninit_default(struct net_device *dev) 1593 + static void ipoib_dev_uninit_default(struct net_device *dev) 1594 1594 { 1595 1595 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1596 1596 1597 1597 ipoib_transport_dev_cleanup(dev); 1598 + 1599 + netif_napi_del(&priv->napi); 1598 1600 1599 1601 ipoib_cm_dev_cleanup(dev); 1600 1602 ··· 1651 1649 kfree(priv->rx_ring); 1652 1650 1653 1651 out: 1652 + netif_napi_del(&priv->napi); 1654 1653 return -ENOMEM; 1655 1654 } 1656 1655 ··· 2240 2237 2241 2238 device_init_failed: 2242 2239 free_netdev(priv->dev); 2240 + kfree(priv); 2243 2241 2244 2242 alloc_mem_failed: 2245 2243 return ERR_PTR(result); ··· 2281 2277 2282 2278 static void ipoib_remove_one(struct ib_device *device, void *client_data) 2283 2279 { 2284 - struct ipoib_dev_priv *priv, *tmp; 2280 + struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv; 2285 2281 struct list_head *dev_list = client_data; 2286 2282 2287 2283 if (!dev_list) ··· 2304 2300 flush_workqueue(priv->wq); 2305 2301 2306 2302 unregister_netdev(priv->dev); 2307 - free_netdev(priv->dev); 2303 + if (device->free_rdma_netdev) 2304 + device->free_rdma_netdev(priv->dev); 2305 + else 2306 + free_netdev(priv->dev); 2307 + 2308 + list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) 2309 + kfree(cpriv); 2310 + 2308 2311 kfree(priv); 2309 2312 } 2310 2313
+7 -4
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
··· 133 133 snprintf(intf_name, sizeof intf_name, "%s.%04x", 134 134 ppriv->dev->name, pkey); 135 135 136 + if (!rtnl_trylock()) 137 + return restart_syscall(); 138 + 136 139 priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); 137 140 if (!priv) 138 141 return -ENOMEM; 139 - 140 - if (!rtnl_trylock()) 141 - return restart_syscall(); 142 142 143 143 down_write(&ppriv->vlan_rwsem); 144 144 ··· 167 167 168 168 rtnl_unlock(); 169 169 170 - if (result) 170 + if (result) { 171 171 free_netdev(priv->dev); 172 + kfree(priv); 173 + } 172 174 173 175 return result; 174 176 } ··· 211 209 212 210 if (dev) { 213 211 free_netdev(dev); 212 + kfree(priv); 214 213 return 0; 215 214 } 216 215
+2 -2
drivers/infiniband/ulp/srp/ib_srp.c
··· 320 320 ch->path.sgid = target->sgid; 321 321 ch->path.dgid = target->orig_dgid; 322 322 ch->path.pkey = target->pkey; 323 - sa_path_set_service_id(&ch->path, target->service_id); 323 + ch->path.service_id = target->service_id; 324 324 325 325 return 0; 326 326 } ··· 575 575 return 0; 576 576 577 577 err_qp: 578 - srp_destroy_qp(ch, qp); 578 + ib_destroy_qp(qp); 579 579 580 580 err_send_cq: 581 581 ib_free_cq(send_cq);
+3 -3
drivers/input/input.c
··· 1398 1398 NULL 1399 1399 }; 1400 1400 1401 - static struct attribute_group input_dev_attr_group = { 1401 + static const struct attribute_group input_dev_attr_group = { 1402 1402 .attrs = input_dev_attrs, 1403 1403 }; 1404 1404 ··· 1425 1425 NULL 1426 1426 }; 1427 1427 1428 - static struct attribute_group input_dev_id_attr_group = { 1428 + static const struct attribute_group input_dev_id_attr_group = { 1429 1429 .name = "id", 1430 1430 .attrs = input_dev_id_attrs, 1431 1431 }; ··· 1495 1495 NULL 1496 1496 }; 1497 1497 1498 - static struct attribute_group input_dev_caps_attr_group = { 1498 + static const struct attribute_group input_dev_caps_attr_group = { 1499 1499 .name = "capabilities", 1500 1500 .attrs = input_dev_caps_attrs, 1501 1501 };
+1 -1
drivers/input/joystick/iforce/iforce-serio.c
··· 164 164 kfree(iforce); 165 165 } 166 166 167 - static struct serio_device_id iforce_serio_ids[] = { 167 + static const struct serio_device_id iforce_serio_ids[] = { 168 168 { 169 169 .type = SERIO_RS232, 170 170 .proto = SERIO_IFORCE,
+1 -1
drivers/input/joystick/iforce/iforce-usb.c
··· 209 209 kfree(iforce); 210 210 } 211 211 212 - static struct usb_device_id iforce_usb_ids [] = { 212 + static const struct usb_device_id iforce_usb_ids[] = { 213 213 { USB_DEVICE(0x044f, 0xa01c) }, /* Thrustmaster Motor Sport GT */ 214 214 { USB_DEVICE(0x046d, 0xc281) }, /* Logitech WingMan Force */ 215 215 { USB_DEVICE(0x046d, 0xc291) }, /* Logitech WingMan Formula Force */
+1 -1
drivers/input/joystick/magellan.c
··· 198 198 * The serio driver structure. 199 199 */ 200 200 201 - static struct serio_device_id magellan_serio_ids[] = { 201 + static const struct serio_device_id magellan_serio_ids[] = { 202 202 { 203 203 .type = SERIO_RS232, 204 204 .proto = SERIO_MAGELLAN,
+1 -1
drivers/input/joystick/spaceball.c
··· 272 272 * The serio driver structure. 273 273 */ 274 274 275 - static struct serio_device_id spaceball_serio_ids[] = { 275 + static const struct serio_device_id spaceball_serio_ids[] = { 276 276 { 277 277 .type = SERIO_RS232, 278 278 .proto = SERIO_SPACEBALL,
+1 -1
drivers/input/joystick/spaceorb.c
··· 213 213 * The serio driver structure. 214 214 */ 215 215 216 - static struct serio_device_id spaceorb_serio_ids[] = { 216 + static const struct serio_device_id spaceorb_serio_ids[] = { 217 217 { 218 218 .type = SERIO_RS232, 219 219 .proto = SERIO_SPACEORB,
+1 -1
drivers/input/joystick/stinger.c
··· 184 184 * The serio driver structure. 185 185 */ 186 186 187 - static struct serio_device_id stinger_serio_ids[] = { 187 + static const struct serio_device_id stinger_serio_ids[] = { 188 188 { 189 189 .type = SERIO_RS232, 190 190 .proto = SERIO_STINGER,
+1 -1
drivers/input/joystick/twidjoy.c
··· 233 233 * The serio driver structure. 234 234 */ 235 235 236 - static struct serio_device_id twidjoy_serio_ids[] = { 236 + static const struct serio_device_id twidjoy_serio_ids[] = { 237 237 { 238 238 .type = SERIO_RS232, 239 239 .proto = SERIO_TWIDJOY,
+1 -1
drivers/input/joystick/warrior.c
··· 193 193 * The serio driver structure. 194 194 */ 195 195 196 - static struct serio_device_id warrior_serio_ids[] = { 196 + static const struct serio_device_id warrior_serio_ids[] = { 197 197 { 198 198 .type = SERIO_RS232, 199 199 .proto = SERIO_WARRIOR,
+1 -1
drivers/input/joystick/xpad.c
··· 408 408 #define XPAD_XBOXONE_VENDOR(vend) \ 409 409 { XPAD_XBOXONE_VENDOR_PROTOCOL(vend, 208) } 410 410 411 - static struct usb_device_id xpad_table[] = { 411 + static const struct usb_device_id xpad_table[] = { 412 412 { USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */ 413 413 XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster X-Box 360 controllers */ 414 414 XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
+1 -1
drivers/input/joystick/zhenhua.c
··· 192 192 * The serio driver structure. 193 193 */ 194 194 195 - static struct serio_device_id zhenhua_serio_ids[] = { 195 + static const struct serio_device_id zhenhua_serio_ids[] = { 196 196 { 197 197 .type = SERIO_RS232, 198 198 .proto = SERIO_ZHENHUA,
+1 -1
drivers/input/keyboard/atkbd.c
··· 1270 1270 return retval; 1271 1271 } 1272 1272 1273 - static struct serio_device_id atkbd_serio_ids[] = { 1273 + static const struct serio_device_id atkbd_serio_ids[] = { 1274 1274 { 1275 1275 .type = SERIO_8042, 1276 1276 .proto = SERIO_ANY,
+3 -15
drivers/input/keyboard/gpio_keys.c
··· 353 353 NULL, 354 354 }; 355 355 356 - static struct attribute_group gpio_keys_attr_group = { 356 + static const struct attribute_group gpio_keys_attr_group = { 357 357 .attrs = gpio_keys_attrs, 358 358 }; 359 359 ··· 827 827 828 828 fwnode_handle_put(child); 829 829 830 - error = sysfs_create_group(&dev->kobj, &gpio_keys_attr_group); 830 + error = devm_device_add_group(dev, &gpio_keys_attr_group); 831 831 if (error) { 832 832 dev_err(dev, "Unable to export keys/switches, error: %d\n", 833 833 error); ··· 838 838 if (error) { 839 839 dev_err(dev, "Unable to register input device, error: %d\n", 840 840 error); 841 - goto err_remove_group; 841 + return error; 842 842 } 843 843 844 844 device_init_wakeup(dev, wakeup); 845 - 846 - return 0; 847 - 848 - err_remove_group: 849 - sysfs_remove_group(&dev->kobj, &gpio_keys_attr_group); 850 - return error; 851 - } 852 - 853 - static int gpio_keys_remove(struct platform_device *pdev) 854 - { 855 - sysfs_remove_group(&pdev->dev.kobj, &gpio_keys_attr_group); 856 845 857 846 return 0; 858 847 } ··· 901 912 902 913 static struct platform_driver gpio_keys_device_driver = { 903 914 .probe = gpio_keys_probe, 904 - .remove = gpio_keys_remove, 905 915 .driver = { 906 916 .name = "gpio-keys", 907 917 .pm = &gpio_keys_pm_ops,
+1 -1
drivers/input/keyboard/hil_kbd.c
··· 559 559 return error; 560 560 } 561 561 562 - static struct serio_device_id hil_dev_ids[] = { 562 + static const struct serio_device_id hil_dev_ids[] = { 563 563 { 564 564 .type = SERIO_HIL_MLC, 565 565 .proto = SERIO_HIL,
+1 -1
drivers/input/keyboard/lkkbd.c
··· 707 707 kfree(lk); 708 708 } 709 709 710 - static struct serio_device_id lkkbd_serio_ids[] = { 710 + static const struct serio_device_id lkkbd_serio_ids[] = { 711 711 { 712 712 .type = SERIO_RS232, 713 713 .proto = SERIO_LKKBD,
+1 -1
drivers/input/keyboard/newtonkbd.c
··· 142 142 kfree(nkbd); 143 143 } 144 144 145 - static struct serio_device_id nkbd_serio_ids[] = { 145 + static const struct serio_device_id nkbd_serio_ids[] = { 146 146 { 147 147 .type = SERIO_RS232, 148 148 .proto = SERIO_NEWTON,
+10 -5
drivers/input/keyboard/pxa27x_keypad.c
··· 644 644 static int pxa27x_keypad_open(struct input_dev *dev) 645 645 { 646 646 struct pxa27x_keypad *keypad = input_get_drvdata(dev); 647 - 647 + int ret; 648 648 /* Enable unit clock */ 649 - clk_prepare_enable(keypad->clk); 649 + ret = clk_prepare_enable(keypad->clk); 650 + if (ret) 651 + return ret; 652 + 650 653 pxa27x_keypad_config(keypad); 651 654 652 655 return 0; ··· 686 683 struct platform_device *pdev = to_platform_device(dev); 687 684 struct pxa27x_keypad *keypad = platform_get_drvdata(pdev); 688 685 struct input_dev *input_dev = keypad->input_dev; 686 + int ret = 0; 689 687 690 688 /* 691 689 * If the keypad is used as wake up source, the clock is not turned ··· 699 695 700 696 if (input_dev->users) { 701 697 /* Enable unit clock */ 702 - clk_prepare_enable(keypad->clk); 703 - pxa27x_keypad_config(keypad); 698 + ret = clk_prepare_enable(keypad->clk); 699 + if (!ret) 700 + pxa27x_keypad_config(keypad); 704 701 } 705 702 706 703 mutex_unlock(&input_dev->mutex); 707 704 } 708 705 709 - return 0; 706 + return ret; 710 707 } 711 708 #endif 712 709
+1 -1
drivers/input/keyboard/stowaway.c
··· 146 146 kfree(skbd); 147 147 } 148 148 149 - static struct serio_device_id skbd_serio_ids[] = { 149 + static const struct serio_device_id skbd_serio_ids[] = { 150 150 { 151 151 .type = SERIO_RS232, 152 152 .proto = SERIO_STOWAWAY,
+1 -1
drivers/input/keyboard/sunkbd.c
··· 339 339 kfree(sunkbd); 340 340 } 341 341 342 - static struct serio_device_id sunkbd_serio_ids[] = { 342 + static const struct serio_device_id sunkbd_serio_ids[] = { 343 343 { 344 344 .type = SERIO_RS232, 345 345 .proto = SERIO_SUNKBD,
+4 -1
drivers/input/keyboard/tegra-kbc.c
··· 370 370 { 371 371 unsigned int debounce_cnt; 372 372 u32 val = 0; 373 + int ret; 373 374 374 - clk_prepare_enable(kbc->clk); 375 + ret = clk_prepare_enable(kbc->clk); 376 + if (ret) 377 + return ret; 375 378 376 379 /* Reset the KBC controller to clear all previous status.*/ 377 380 reset_control_assert(kbc->rst);
+1 -1
drivers/input/keyboard/xtkbd.c
··· 145 145 kfree(xtkbd); 146 146 } 147 147 148 - static struct serio_device_id xtkbd_serio_ids[] = { 148 + static const struct serio_device_id xtkbd_serio_ids[] = { 149 149 { 150 150 .type = SERIO_XT, 151 151 .proto = SERIO_ANY,
+11
drivers/input/misc/Kconfig
··· 581 581 To compile this driver as a module, choose M here: the module will be 582 582 called pwm-beeper. 583 583 584 + config INPUT_RK805_PWRKEY 585 + tristate "Rockchip RK805 PMIC power key support" 586 + depends on MFD_RK808 587 + help 588 + Select this option to enable power key driver for RK805. 589 + 590 + If unsure, say N. 591 + 592 + To compile this driver as a module, choose M here: the module will be 593 + called rk805_pwrkey. 594 + 584 595 config INPUT_GPIO_ROTARY_ENCODER 585 596 tristate "Rotary encoders connected to GPIO pins" 586 597 depends on GPIOLIB || COMPILE_TEST
+1
drivers/input/misc/Makefile
··· 64 64 obj-$(CONFIG_INPUT_RETU_PWRBUTTON) += retu-pwrbutton.o 65 65 obj-$(CONFIG_INPUT_AXP20X_PEK) += axp20x-pek.o 66 66 obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o 67 + obj-$(CONFIG_INPUT_RK805_PWRKEY) += rk805-pwrkey.o 67 68 obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o 68 69 obj-$(CONFIG_INPUT_SIRFSOC_ONKEY) += sirfsoc-onkey.o 69 70 obj-$(CONFIG_INPUT_SOC_BUTTON_ARRAY) += soc_button_array.o
+1 -1
drivers/input/misc/ati_remote2.c
··· 110 110 module_param(mode_mask, mode_mask, 0644); 111 111 MODULE_PARM_DESC(mode_mask, "Bitmask of modes to accept <4:PC><3:AUX4><2:AUX3><1:AUX2><0:AUX1>"); 112 112 113 - static struct usb_device_id ati_remote2_id_table[] = { 113 + static const struct usb_device_id ati_remote2_id_table[] = { 114 114 { USB_DEVICE(0x0471, 0x0602) }, /* ATI Remote Wonder II */ 115 115 { } 116 116 };
+107 -60
drivers/input/misc/axp20x-pek.c
··· 29 29 #define AXP20X_PEK_STARTUP_MASK (0xc0) 30 30 #define AXP20X_PEK_SHUTDOWN_MASK (0x03) 31 31 32 + struct axp20x_info { 33 + const struct axp20x_time *startup_time; 34 + unsigned int startup_mask; 35 + const struct axp20x_time *shutdown_time; 36 + unsigned int shutdown_mask; 37 + }; 38 + 32 39 struct axp20x_pek { 33 40 struct axp20x_dev *axp20x; 34 41 struct input_dev *input; 42 + struct axp20x_info *info; 35 43 int irq_dbr; 36 44 int irq_dbf; 37 45 }; ··· 56 48 { .time = 2000, .idx = 3 }, 57 49 }; 58 50 51 + static const struct axp20x_time axp221_startup_time[] = { 52 + { .time = 128, .idx = 0 }, 53 + { .time = 1000, .idx = 1 }, 54 + { .time = 2000, .idx = 2 }, 55 + { .time = 3000, .idx = 3 }, 56 + }; 57 + 59 58 static const struct axp20x_time shutdown_time[] = { 60 59 { .time = 4000, .idx = 0 }, 61 60 { .time = 6000, .idx = 1 }, ··· 70 55 { .time = 10000, .idx = 3 }, 71 56 }; 72 57 73 - struct axp20x_pek_ext_attr { 74 - const struct axp20x_time *p_time; 75 - unsigned int mask; 58 + static const struct axp20x_info axp20x_info = { 59 + .startup_time = startup_time, 60 + .startup_mask = AXP20X_PEK_STARTUP_MASK, 61 + .shutdown_time = shutdown_time, 62 + .shutdown_mask = AXP20X_PEK_SHUTDOWN_MASK, 76 63 }; 77 64 78 - static struct axp20x_pek_ext_attr axp20x_pek_startup_ext_attr = { 79 - .p_time = startup_time, 80 - .mask = AXP20X_PEK_STARTUP_MASK, 65 + static const struct axp20x_info axp221_info = { 66 + .startup_time = axp221_startup_time, 67 + .startup_mask = AXP20X_PEK_STARTUP_MASK, 68 + .shutdown_time = shutdown_time, 69 + .shutdown_mask = AXP20X_PEK_SHUTDOWN_MASK, 81 70 }; 82 71 83 - static struct axp20x_pek_ext_attr axp20x_pek_shutdown_ext_attr = { 84 - .p_time = shutdown_time, 85 - .mask = AXP20X_PEK_SHUTDOWN_MASK, 86 - }; 87 - 88 - static struct axp20x_pek_ext_attr *get_axp_ext_attr(struct device_attribute *attr) 89 - { 90 - return container_of(attr, struct dev_ext_attribute, attr)->var; 91 - } 92 - 93 - static ssize_t axp20x_show_ext_attr(struct device *dev, 94 - struct device_attribute *attr, char *buf) 72 + static ssize_t axp20x_show_attr(struct device *dev, 73 + const struct axp20x_time *time, 74 + unsigned int mask, char *buf) 95 75 { 96 76 struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev); 97 - struct axp20x_pek_ext_attr *axp20x_ea = get_axp_ext_attr(attr); 98 77 unsigned int val; 99 78 int ret, i; 100 79 ··· 96 87 if (ret != 0) 97 88 return ret; 98 89 99 - val &= axp20x_ea->mask; 100 - val >>= ffs(axp20x_ea->mask) - 1; 90 + val &= mask; 91 + val >>= ffs(mask) - 1; 101 92 102 93 for (i = 0; i < 4; i++) 103 - if (val == axp20x_ea->p_time[i].idx) 104 - val = axp20x_ea->p_time[i].time; 94 + if (val == time[i].idx) 95 + val = time[i].time; 105 96 106 97 return sprintf(buf, "%u\n", val); 107 98 } 108 99 109 - static ssize_t axp20x_store_ext_attr(struct device *dev, 110 - struct device_attribute *attr, 111 - const char *buf, size_t count) 100 + static ssize_t axp20x_show_attr_startup(struct device *dev, 101 + struct device_attribute *attr, 102 + char *buf) 112 103 { 113 104 struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev); 114 - struct axp20x_pek_ext_attr *axp20x_ea = get_axp_ext_attr(attr); 105 + 106 + return axp20x_show_attr(dev, axp20x_pek->info->startup_time, 107 + axp20x_pek->info->startup_mask, buf); 108 + } 109 + 110 + static ssize_t axp20x_show_attr_shutdown(struct device *dev, 111 + struct device_attribute *attr, 112 + char *buf) 113 + { 114 + struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev); 115 + 116 + return axp20x_show_attr(dev, axp20x_pek->info->shutdown_time, 117 + axp20x_pek->info->shutdown_mask, buf); 118 + } 119 + 120 + static ssize_t axp20x_store_attr(struct device *dev, 121 + const struct axp20x_time *time, 122 + unsigned int mask, const char *buf, 123 + size_t count) 124 + { 125 + struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev); 115 126 char val_str[20]; 116 127 size_t len; 117 128 int ret, i; ··· 152 123 for (i = 3; i >= 0; i--) { 153 124 unsigned int err; 154 125 155 - err = abs(axp20x_ea->p_time[i].time - val); 126 + err = abs(time[i].time - val); 156 127 if (err < best_err) { 157 128 best_err = err; 158 - idx = axp20x_ea->p_time[i].idx; 129 + idx = time[i].idx; 159 130 } 160 131 161 132 if (!err) 162 133 break; 163 134 } 164 135 165 - idx <<= ffs(axp20x_ea->mask) - 1; 166 - ret = regmap_update_bits(axp20x_pek->axp20x->regmap, 167 - AXP20X_PEK_KEY, 168 - axp20x_ea->mask, idx); 136 + idx <<= ffs(mask) - 1; 137 + ret = regmap_update_bits(axp20x_pek->axp20x->regmap, AXP20X_PEK_KEY, 138 + mask, idx); 169 139 if (ret != 0) 170 140 return -EINVAL; 171 141 172 142 return count; 173 143 } 174 144 175 - static struct dev_ext_attribute axp20x_dev_attr_startup = { 176 - .attr = __ATTR(startup, 0644, axp20x_show_ext_attr, axp20x_store_ext_attr), 177 - .var = &axp20x_pek_startup_ext_attr, 178 - }; 145 + static ssize_t axp20x_store_attr_startup(struct device *dev, 146 + struct device_attribute *attr, 147 + const char *buf, size_t count) 148 + { 149 + struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev); 179 150 180 - static struct dev_ext_attribute axp20x_dev_attr_shutdown = { 181 - .attr = __ATTR(shutdown, 0644, axp20x_show_ext_attr, axp20x_store_ext_attr), 182 - .var = &axp20x_pek_shutdown_ext_attr, 183 - }; 151 + return axp20x_store_attr(dev, axp20x_pek->info->startup_time, 152 + axp20x_pek->info->startup_mask, buf, count); 153 + } 154 + 155 + static ssize_t axp20x_store_attr_shutdown(struct device *dev, 156 + struct device_attribute *attr, 157 + const char *buf, size_t count) 158 + { 159 + struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev); 160 + 161 + return axp20x_store_attr(dev, axp20x_pek->info->shutdown_time, 162 + axp20x_pek->info->shutdown_mask, buf, count); 163 + } 164 + 165 + DEVICE_ATTR(startup, 0644, axp20x_show_attr_startup, axp20x_store_attr_startup); 166 + DEVICE_ATTR(shutdown, 0644, axp20x_show_attr_shutdown, 167 + axp20x_store_attr_shutdown); 184 168 185 169 static struct attribute *axp20x_attributes[] = { 186 - &axp20x_dev_attr_startup.attr.attr, 187 - &axp20x_dev_attr_shutdown.attr.attr, 170 + &dev_attr_startup.attr, 171 + &dev_attr_shutdown.attr, 188 172 NULL, 189 173 }; 190 174 ··· 222 180 input_sync(idev); 223 181 224 182 return IRQ_HANDLED; 225 - } 226 - 227 - static void axp20x_remove_sysfs_group(void *_data) 228 - { 229 - struct device *dev = _data; 230 - 231 - sysfs_remove_group(&dev->kobj, &axp20x_attribute_group); 232 183 } 233 184 234 185 static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek, ··· 333 298 static int axp20x_pek_probe(struct platform_device *pdev) 334 299 { 335 300 struct axp20x_pek *axp20x_pek; 301 + const struct platform_device_id *match = platform_get_device_id(pdev); 336 302 int error; 303 + 304 + if (!match) { 305 + dev_err(&pdev->dev, "Failed to get platform_device_id\n"); 306 + return -EINVAL; 307 + } 337 308 338 309 axp20x_pek = devm_kzalloc(&pdev->dev, sizeof(struct axp20x_pek), 339 310 GFP_KERNEL); ··· 354 313 return error; 355 314 } 356 315 357 - error = sysfs_create_group(&pdev->dev.kobj, &axp20x_attribute_group); 316 + axp20x_pek->info = (struct axp20x_info *)match->driver_data; 317 + 318 + error = devm_device_add_group(&pdev->dev, &axp20x_attribute_group); 358 319 if (error) { 359 320 dev_err(&pdev->dev, "Failed to create sysfs attributes: %d\n", 360 - error); 361 - return error; 362 - } 363 - 364 - error = devm_add_action(&pdev->dev, 365 - axp20x_remove_sysfs_group, &pdev->dev); 366 - if (error) { 367 - axp20x_remove_sysfs_group(&pdev->dev); 368 - dev_err(&pdev->dev, "Failed to add sysfs cleanup action: %d\n", 369 321 error); 370 322 return error; 371 323 } ··· 392 358 #endif 393 359 }; 394 360 361 + static const struct platform_device_id axp_pek_id_match[] = { 362 + { 363 + .name = "axp20x-pek", 364 + .driver_data = (kernel_ulong_t)&axp20x_info, 365 + }, 366 + { 367 + .name = "axp221-pek", 368 + .driver_data = (kernel_ulong_t)&axp221_info, 369 + }, 370 + { /* sentinel */ } 371 + }; 372 + 395 373 static struct platform_driver axp20x_pek_driver = { 396 374 .probe = axp20x_pek_probe, 375 + .id_table = axp_pek_id_match, 397 376 .driver = { 398 377 .name = "axp20x-pek", 399 378 .pm = &axp20x_pek_pm_ops,
+2 -2
drivers/input/misc/ims-pcu.c
··· 1261 1261 return mode; 1262 1262 } 1263 1263 1264 - static struct attribute_group ims_pcu_attr_group = { 1264 + static const struct attribute_group ims_pcu_attr_group = { 1265 1265 .is_visible = ims_pcu_is_attr_visible, 1266 1266 .attrs = ims_pcu_attrs, 1267 1267 }; ··· 1480 1480 NULL 1481 1481 }; 1482 1482 1483 - static struct attribute_group ims_pcu_ofn_attr_group = { 1483 + static const struct attribute_group ims_pcu_ofn_attr_group = { 1484 1484 .name = "ofn", 1485 1485 .attrs = ims_pcu_ofn_attrs, 1486 1486 };
+1 -1
drivers/input/misc/keyspan_remote.c
··· 85 85 }; 86 86 87 87 /* table of devices that work with this driver */ 88 - static struct usb_device_id keyspan_table[] = { 88 + static const struct usb_device_id keyspan_table[] = { 89 89 { USB_DEVICE(USB_KEYSPAN_VENDOR_ID, USB_KEYSPAN_PRODUCT_UIA11) }, 90 90 { } /* Terminating entry */ 91 91 };
+11 -6
drivers/input/misc/pcspkr.c
··· 18 18 #include <linux/input.h> 19 19 #include <linux/platform_device.h> 20 20 #include <linux/timex.h> 21 - #include <asm/io.h> 21 + #include <linux/io.h> 22 22 23 23 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); 24 24 MODULE_DESCRIPTION("PC Speaker beeper driver"); 25 25 MODULE_LICENSE("GPL"); 26 26 MODULE_ALIAS("platform:pcspkr"); 27 27 28 - static int pcspkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) 28 + static int pcspkr_event(struct input_dev *dev, unsigned int type, 29 + unsigned int code, int value) 29 30 { 30 31 unsigned int count = 0; 31 32 unsigned long flags; 32 33 33 34 if (type != EV_SND) 34 - return -1; 35 + return -EINVAL; 35 36 36 37 switch (code) { 37 - case SND_BELL: if (value) value = 1000; 38 - case SND_TONE: break; 39 - default: return -1; 38 + case SND_BELL: 39 + if (value) 40 + value = 1000; 41 + case SND_TONE: 42 + break; 43 + default: 44 + return -EINVAL; 40 45 } 41 46 42 47 if (value > 20 && value < 32767)
+1 -1
drivers/input/misc/powermate.c
··· 432 432 } 433 433 } 434 434 435 - static struct usb_device_id powermate_devices [] = { 435 + static const struct usb_device_id powermate_devices[] = { 436 436 { USB_DEVICE(POWERMATE_VENDOR, POWERMATE_PRODUCT_NEW) }, 437 437 { USB_DEVICE(POWERMATE_VENDOR, POWERMATE_PRODUCT_OLD) }, 438 438 { USB_DEVICE(CONTOUR_VENDOR, CONTOUR_JOG) },
+111
drivers/input/misc/rk805-pwrkey.c
··· 1 + /* 2 + * Rockchip RK805 PMIC Power Key driver 3 + * 4 + * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd 5 + * 6 + * Author: Joseph Chen <chenjh@rock-chips.com> 7 + * 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms of the GNU General Public License as published by the 10 + * Free Software Foundation; either version 2 of the License, or (at your 11 + * option) any later version. 12 + */ 13 + 14 + #include <linux/errno.h> 15 + #include <linux/init.h> 16 + #include <linux/input.h> 17 + #include <linux/interrupt.h> 18 + #include <linux/kernel.h> 19 + #include <linux/module.h> 20 + #include <linux/platform_device.h> 21 + 22 + static irqreturn_t pwrkey_fall_irq(int irq, void *_pwr) 23 + { 24 + struct input_dev *pwr = _pwr; 25 + 26 + input_report_key(pwr, KEY_POWER, 1); 27 + input_sync(pwr); 28 + 29 + return IRQ_HANDLED; 30 + } 31 + 32 + static irqreturn_t pwrkey_rise_irq(int irq, void *_pwr) 33 + { 34 + struct input_dev *pwr = _pwr; 35 + 36 + input_report_key(pwr, KEY_POWER, 0); 37 + input_sync(pwr); 38 + 39 + return IRQ_HANDLED; 40 + } 41 + 42 + static int rk805_pwrkey_probe(struct platform_device *pdev) 43 + { 44 + struct input_dev *pwr; 45 + int fall_irq, rise_irq; 46 + int err; 47 + 48 + pwr = devm_input_allocate_device(&pdev->dev); 49 + if (!pwr) { 50 + dev_err(&pdev->dev, "Can't allocate power button\n"); 51 + return -ENOMEM; 52 + } 53 + 54 + pwr->name = "rk805 pwrkey"; 55 + pwr->phys = "rk805_pwrkey/input0"; 56 + pwr->id.bustype = BUS_HOST; 57 + input_set_capability(pwr, EV_KEY, KEY_POWER); 58 + 59 + fall_irq = platform_get_irq(pdev, 0); 60 + if (fall_irq < 0) { 61 + dev_err(&pdev->dev, "Can't get fall irq: %d\n", fall_irq); 62 + return fall_irq; 63 + } 64 + 65 + rise_irq = platform_get_irq(pdev, 1); 66 + if (rise_irq < 0) { 67 + dev_err(&pdev->dev, "Can't get rise irq: %d\n", rise_irq); 68 + return rise_irq; 69 + } 70 + 71 + err = devm_request_any_context_irq(&pwr->dev, fall_irq, 72 + pwrkey_fall_irq, 73 + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 74 + "rk805_pwrkey_fall", pwr); 75 + if (err < 0) { 76 + dev_err(&pdev->dev, "Can't register fall irq: %d\n", err); 77 + return err; 78 + } 79 + 80 + err = devm_request_any_context_irq(&pwr->dev, rise_irq, 81 + pwrkey_rise_irq, 82 + IRQF_TRIGGER_RISING | IRQF_ONESHOT, 83 + "rk805_pwrkey_rise", pwr); 84 + if (err < 0) { 85 + dev_err(&pdev->dev, "Can't register rise irq: %d\n", err); 86 + return err; 87 + } 88 + 89 + err = input_register_device(pwr); 90 + if (err) { 91 + dev_err(&pdev->dev, "Can't register power button: %d\n", err); 92 + return err; 93 + } 94 + 95 + platform_set_drvdata(pdev, pwr); 96 + device_init_wakeup(&pdev->dev, true); 97 + 98 + return 0; 99 + } 100 + 101 + static struct platform_driver rk805_pwrkey_driver = { 102 + .probe = rk805_pwrkey_probe, 103 + .driver = { 104 + .name = "rk805-pwrkey", 105 + }, 106 + }; 107 + module_platform_driver(rk805_pwrkey_driver); 108 + 109 + MODULE_AUTHOR("Joseph Chen <chenjh@rock-chips.com>"); 110 + MODULE_DESCRIPTION("RK805 PMIC Power Key driver"); 111 + MODULE_LICENSE("GPL");
+4 -1
drivers/input/misc/xen-kbdfront.c
··· 84 84 struct xenkbd_key *key) 85 85 { 86 86 struct input_dev *dev; 87 + int value = key->pressed; 87 88 88 89 if (test_bit(key->keycode, info->ptr->keybit)) { 89 90 dev = info->ptr; 90 91 } else if (test_bit(key->keycode, info->kbd->keybit)) { 91 92 dev = info->kbd; 93 + if (key->pressed && test_bit(key->keycode, info->kbd->key)) 94 + value = 2; /* Mark as autorepeat */ 92 95 } else { 93 96 pr_warn("unhandled keycode 0x%x\n", key->keycode); 94 97 return; 95 98 } 96 99 97 - input_report_key(dev, key->keycode, key->pressed); 100 + input_event(dev, EV_KEY, key->keycode, value); 98 101 input_sync(dev); 99 102 } 100 103
+1 -1
drivers/input/misc/yealink.c
··· 798 798 NULL 799 799 }; 800 800 801 - static struct attribute_group yld_attr_group = { 801 + static const struct attribute_group yld_attr_group = { 802 802 .attrs = yld_attributes 803 803 }; 804 804
+1 -1
drivers/input/mouse/appletouch.c
··· 125 125 * According to Info.plist Geyser IV is the same as Geyser III.) 126 126 */ 127 127 128 - static struct usb_device_id atp_table[] = { 128 + static const struct usb_device_id atp_table[] = { 129 129 /* PowerBooks Feb 2005, iBooks G4 */ 130 130 ATP_DEVICE(0x020e, fountain_info), /* FOUNTAIN ANSI */ 131 131 ATP_DEVICE(0x020f, fountain_info), /* FOUNTAIN ISO */
+1 -1
drivers/input/mouse/byd.c
··· 344 344 u8 param[4]; 345 345 size_t i; 346 346 347 - const struct { 347 + static const struct { 348 348 u16 command; 349 349 u8 arg; 350 350 } seq[] = {
+1 -1
drivers/input/mouse/elan_i2c.h
··· 58 58 59 59 int (*get_version)(struct i2c_client *client, bool iap, u8 *version); 60 60 int (*get_sm_version)(struct i2c_client *client, 61 - u16 *ic_type, u8 *version); 61 + u16 *ic_type, u8 *version, u8 *clickpad); 62 62 int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum); 63 63 int (*get_product_id)(struct i2c_client *client, u16 *id); 64 64
+7 -2
drivers/input/mouse/elan_i2c_core.c
··· 95 95 u8 min_baseline; 96 96 u8 max_baseline; 97 97 bool baseline_ready; 98 + u8 clickpad; 98 99 }; 99 100 100 101 static int elan_get_fwinfo(u16 ic_type, u16 *validpage_count, ··· 214 213 return error; 215 214 216 215 error = data->ops->get_sm_version(data->client, &data->ic_type, 217 - &data->sm_version); 216 + &data->sm_version, &data->clickpad); 218 217 if (error) 219 218 return error; 220 219 ··· 924 923 } 925 924 926 925 input_report_key(input, BTN_LEFT, tp_info & 0x01); 926 + input_report_key(input, BTN_RIGHT, tp_info & 0x02); 927 927 input_report_abs(input, ABS_DISTANCE, hover_event != 0); 928 928 input_mt_report_pointer_emulation(input, true); 929 929 input_sync(input); ··· 993 991 994 992 __set_bit(EV_ABS, input->evbit); 995 993 __set_bit(INPUT_PROP_POINTER, input->propbit); 996 - __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); 994 + if (data->clickpad) 995 + __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); 996 + else 997 + __set_bit(BTN_RIGHT, input->keybit); 997 998 __set_bit(BTN_LEFT, input->keybit); 998 999 999 1000 /* Set up ST parameters */
+12 -1
drivers/input/mouse/elan_i2c_i2c.c
··· 288 288 } 289 289 290 290 static int elan_i2c_get_sm_version(struct i2c_client *client, 291 - u16 *ic_type, u8 *version) 291 + u16 *ic_type, u8 *version, 292 + u8 *clickpad) 292 293 { 293 294 int error; 294 295 u8 pattern_ver; ··· 318 317 return error; 319 318 } 320 319 *version = val[1]; 320 + *clickpad = val[0] & 0x10; 321 321 } else { 322 322 error = elan_i2c_read_cmd(client, ETP_I2C_OSM_VERSION_CMD, val); 323 323 if (error) { ··· 328 326 } 329 327 *version = val[0]; 330 328 *ic_type = val[1]; 329 + 330 + error = elan_i2c_read_cmd(client, ETP_I2C_NSM_VERSION_CMD, 331 + val); 332 + if (error) { 333 + dev_err(&client->dev, "failed to get SM version: %d\n", 334 + error); 335 + return error; 336 + } 337 + *clickpad = val[0] & 0x10; 331 338 } 332 339 333 340 return 0;
+3 -1
drivers/input/mouse/elan_i2c_smbus.c
··· 166 166 } 167 167 168 168 static int elan_smbus_get_sm_version(struct i2c_client *client, 169 - u16 *ic_type, u8 *version) 169 + u16 *ic_type, u8 *version, 170 + u8 *clickpad) 170 171 { 171 172 int error; 172 173 u8 val[3]; ··· 181 180 182 181 *version = val[0]; 183 182 *ic_type = val[1]; 183 + *clickpad = val[0] & 0x10; 184 184 return 0; 185 185 } 186 186
+1 -1
drivers/input/mouse/elantech.c
··· 1377 1377 NULL 1378 1378 }; 1379 1379 1380 - static struct attribute_group elantech_attr_group = { 1380 + static const struct attribute_group elantech_attr_group = { 1381 1381 .attrs = elantech_attrs, 1382 1382 }; 1383 1383
+1 -1
drivers/input/mouse/psmouse-base.c
··· 101 101 NULL 102 102 }; 103 103 104 - static struct attribute_group psmouse_attribute_group = { 104 + static const struct attribute_group psmouse_attribute_group = { 105 105 .attrs = psmouse_attributes, 106 106 }; 107 107
+1 -1
drivers/input/mouse/synaptics_usb.c
··· 525 525 return synusb_resume(intf); 526 526 } 527 527 528 - static struct usb_device_id synusb_idtable[] = { 528 + static const struct usb_device_id synusb_idtable[] = { 529 529 { USB_DEVICE_SYNAPTICS(TP, SYNUSB_TOUCHPAD) }, 530 530 { USB_DEVICE_SYNAPTICS(INT_TP, SYNUSB_TOUCHPAD) }, 531 531 { USB_DEVICE_SYNAPTICS(CPAD,
+34 -28
drivers/input/mousedev.c
··· 15 15 #define MOUSEDEV_MINORS 31 16 16 #define MOUSEDEV_MIX 63 17 17 18 + #include <linux/bitops.h> 18 19 #include <linux/sched.h> 19 20 #include <linux/slab.h> 20 21 #include <linux/poll.h> ··· 104 103 spinlock_t packet_lock; 105 104 int pos_x, pos_y; 106 105 107 - signed char ps2[6]; 106 + u8 ps2[6]; 108 107 unsigned char ready, buffer, bufsiz; 109 108 unsigned char imexseq, impsseq; 110 109 enum mousedev_emul mode; ··· 292 291 } 293 292 294 293 client->pos_x += packet->dx; 295 - client->pos_x = client->pos_x < 0 ? 296 - 0 : (client->pos_x >= xres ? xres : client->pos_x); 294 + client->pos_x = clamp_val(client->pos_x, 0, xres); 295 + 297 296 client->pos_y += packet->dy; 298 - client->pos_y = client->pos_y < 0 ? 299 - 0 : (client->pos_y >= yres ? yres : client->pos_y); 297 + client->pos_y = clamp_val(client->pos_y, 0, yres); 300 298 301 299 p->dx += packet->dx; 302 300 p->dy += packet->dy; ··· 571 571 return error; 572 572 } 573 573 574 - static inline int mousedev_limit_delta(int delta, int limit) 575 - { 576 - return delta > limit ? limit : (delta < -limit ? -limit : delta); 577 - } 578 - 579 - static void mousedev_packet(struct mousedev_client *client, 580 - signed char *ps2_data) 574 + static void mousedev_packet(struct mousedev_client *client, u8 *ps2_data) 581 575 { 582 576 struct mousedev_motion *p = &client->packets[client->tail]; 577 + s8 dx, dy, dz; 583 578 584 - ps2_data[0] = 0x08 | 585 - ((p->dx < 0) << 4) | ((p->dy < 0) << 5) | (p->buttons & 0x07); 586 - ps2_data[1] = mousedev_limit_delta(p->dx, 127); 587 - ps2_data[2] = mousedev_limit_delta(p->dy, 127); 588 - p->dx -= ps2_data[1]; 589 - p->dy -= ps2_data[2]; 579 + dx = clamp_val(p->dx, -127, 127); 580 + p->dx -= dx; 581 + 582 + dy = clamp_val(p->dy, -127, 127); 583 + p->dy -= dy; 584 + 585 + ps2_data[0] = BIT(3); 586 + ps2_data[0] |= ((dx & BIT(7)) >> 3) | ((dy & BIT(7)) >> 2); 587 + ps2_data[0] |= p->buttons & 0x07; 588 + ps2_data[1] = dx; 589 + ps2_data[2] = dy; 590 590 591 591 switch (client->mode) { 592 592 case MOUSEDEV_EMUL_EXPS: 593 - ps2_data[3] = mousedev_limit_delta(p->dz, 7); 594 - p->dz -= ps2_data[3]; 595 - ps2_data[3] = (ps2_data[3] & 0x0f) | ((p->buttons & 0x18) << 1); 593 + dz = clamp_val(p->dz, -7, 7); 594 + p->dz -= dz; 595 + 596 + ps2_data[3] = (dz & 0x0f) | ((p->buttons & 0x18) << 1); 596 597 client->bufsiz = 4; 597 598 break; 598 599 599 600 case MOUSEDEV_EMUL_IMPS: 600 - ps2_data[0] |= 601 - ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1); 602 - ps2_data[3] = mousedev_limit_delta(p->dz, 127); 603 - p->dz -= ps2_data[3]; 601 + dz = clamp_val(p->dz, -127, 127); 602 + p->dz -= dz; 603 + 604 + ps2_data[0] |= ((p->buttons & 0x10) >> 3) | 605 + ((p->buttons & 0x08) >> 1); 606 + ps2_data[3] = dz; 607 + 604 608 client->bufsiz = 4; 605 609 break; 606 610 607 611 case MOUSEDEV_EMUL_PS2: 608 612 default: 609 - ps2_data[0] |= 610 - ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1); 611 613 p->dz = 0; 614 + 615 + ps2_data[0] |= ((p->buttons & 0x10) >> 3) | 616 + ((p->buttons & 0x08) >> 1); 617 + 612 618 client->bufsiz = 3; 613 619 break; 614 620 } ··· 720 714 { 721 715 struct mousedev_client *client = file->private_data; 722 716 struct mousedev *mousedev = client->mousedev; 723 - signed char data[sizeof(client->ps2)]; 717 + u8 data[sizeof(client->ps2)]; 724 718 int retval = 0; 725 719 726 720 if (!client->ready && !client->buffer && mousedev->exist &&
+4 -9
drivers/input/rmi4/rmi_f01.c
··· 334 334 NULL 335 335 }; 336 336 337 - static struct attribute_group rmi_f01_attr_group = { 337 + static const struct attribute_group rmi_f01_attr_group = { 338 338 .attrs = rmi_f01_attrs, 339 339 }; 340 340 ··· 570 570 571 571 dev_set_drvdata(&fn->dev, f01); 572 572 573 - error = sysfs_create_group(&fn->rmi_dev->dev.kobj, &rmi_f01_attr_group); 573 + error = devm_device_add_group(&fn->rmi_dev->dev, &rmi_f01_attr_group); 574 574 if (error) 575 - dev_warn(&fn->dev, "Failed to create sysfs group: %d\n", error); 575 + dev_warn(&fn->dev, 576 + "Failed to create attribute group: %d\n", error); 576 577 577 578 return 0; 578 - } 579 - 580 - static void rmi_f01_remove(struct rmi_function *fn) 581 - { 582 - sysfs_remove_group(&fn->rmi_dev->dev.kobj, &rmi_f01_attr_group); 583 579 } 584 580 585 581 static int rmi_f01_config(struct rmi_function *fn) ··· 717 721 }, 718 722 .func = 0x01, 719 723 .probe = rmi_f01_probe, 720 - .remove = rmi_f01_remove, 721 724 .config = rmi_f01_config, 722 725 .attention = rmi_f01_attention, 723 726 .suspend = rmi_f01_suspend,
+1 -1
drivers/input/rmi4/rmi_f34.c
··· 516 516 NULL 517 517 }; 518 518 519 - static struct attribute_group rmi_firmware_attr_group = { 519 + static const struct attribute_group rmi_firmware_attr_group = { 520 520 .attrs = rmi_firmware_attrs, 521 521 }; 522 522
+11
drivers/input/serio/Kconfig
··· 292 292 To compile this driver as a module, choose M here: the 293 293 module will be called sun4i-ps2. 294 294 295 + config SERIO_GPIO_PS2 296 + tristate "GPIO PS/2 bit banging driver" 297 + depends on GPIOLIB 298 + help 299 + Say Y here if you want PS/2 bit banging support via GPIO. 300 + 301 + To compile this driver as a module, choose M here: the 302 + module will be called ps2-gpio. 303 + 304 + If you are unsure, say N. 305 + 295 306 config USERIO 296 307 tristate "User space serio port driver support" 297 308 help
+1
drivers/input/serio/Makefile
··· 30 30 obj-$(CONFIG_SERIO_OLPC_APSP) += olpc_apsp.o 31 31 obj-$(CONFIG_HYPERV_KEYBOARD) += hyperv-keyboard.o 32 32 obj-$(CONFIG_SERIO_SUN4I_PS2) += sun4i-ps2.o 33 + obj-$(CONFIG_SERIO_GPIO_PS2) += ps2-gpio.o 33 34 obj-$(CONFIG_USERIO) += userio.o
+1 -1
drivers/input/serio/ambakmi.c
··· 187 187 188 188 static SIMPLE_DEV_PM_OPS(amba_kmi_dev_pm_ops, NULL, amba_kmi_resume); 189 189 190 - static struct amba_id amba_kmi_idtable[] = { 190 + static const struct amba_id amba_kmi_idtable[] = { 191 191 { 192 192 .id = 0x00041050, 193 193 .mask = 0x000fffff,
+2 -2
drivers/input/serio/i8042-x86ia64io.h
··· 927 927 return 0; 928 928 } 929 929 930 - static struct pnp_device_id pnp_kbd_devids[] = { 930 + static const struct pnp_device_id pnp_kbd_devids[] = { 931 931 { .id = "PNP0300", .driver_data = 0 }, 932 932 { .id = "PNP0301", .driver_data = 0 }, 933 933 { .id = "PNP0302", .driver_data = 0 }, ··· 957 957 }, 958 958 }; 959 959 960 - static struct pnp_device_id pnp_aux_devids[] = { 960 + static const struct pnp_device_id pnp_aux_devids[] = { 961 961 { .id = "AUI0200", .driver_data = 0 }, 962 962 { .id = "FJC6000", .driver_data = 0 }, 963 963 { .id = "FJC6001", .driver_data = 0 },
+453
drivers/input/serio/ps2-gpio.c
··· 1 + /* 2 + * GPIO based serio bus driver for bit banging the PS/2 protocol 3 + * 4 + * Author: Danilo Krummrich <danilokrummrich@dk-develop.de> 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + */ 10 + 11 + #include <linux/gpio/consumer.h> 12 + #include <linux/interrupt.h> 13 + #include <linux/module.h> 14 + #include <linux/serio.h> 15 + #include <linux/slab.h> 16 + #include <linux/platform_device.h> 17 + #include <linux/workqueue.h> 18 + #include <linux/completion.h> 19 + #include <linux/mutex.h> 20 + #include <linux/preempt.h> 21 + #include <linux/property.h> 22 + #include <linux/of.h> 23 + #include <linux/jiffies.h> 24 + #include <linux/delay.h> 25 + 26 + #define DRIVER_NAME "ps2-gpio" 27 + 28 + #define PS2_MODE_RX 0 29 + #define PS2_MODE_TX 1 30 + 31 + #define PS2_START_BIT 0 32 + #define PS2_DATA_BIT0 1 33 + #define PS2_DATA_BIT1 2 34 + #define PS2_DATA_BIT2 3 35 + #define PS2_DATA_BIT3 4 36 + #define PS2_DATA_BIT4 5 37 + #define PS2_DATA_BIT5 6 38 + #define PS2_DATA_BIT6 7 39 + #define PS2_DATA_BIT7 8 40 + #define PS2_PARITY_BIT 9 41 + #define PS2_STOP_BIT 10 42 + #define PS2_TX_TIMEOUT 11 43 + #define PS2_ACK_BIT 12 44 + 45 + #define PS2_DEV_RET_ACK 0xfa 46 + #define PS2_DEV_RET_NACK 0xfe 47 + 48 + #define PS2_CMD_RESEND 0xfe 49 + 50 + struct ps2_gpio_data { 51 + struct device *dev; 52 + struct serio *serio; 53 + unsigned char mode; 54 + struct gpio_desc *gpio_clk; 55 + struct gpio_desc *gpio_data; 56 + bool write_enable; 57 + int irq; 58 + unsigned char rx_cnt; 59 + unsigned char rx_byte; 60 + unsigned char tx_cnt; 61 + unsigned char tx_byte; 62 + struct completion tx_done; 63 + struct mutex tx_mutex; 64 + struct delayed_work tx_work; 65 + }; 66 + 67 + static int ps2_gpio_open(struct serio *serio) 68 + { 69 + struct ps2_gpio_data *drvdata = serio->port_data; 70 + 71 + enable_irq(drvdata->irq); 72 + return 0; 73 + } 74 + 75 + static void ps2_gpio_close(struct serio *serio) 76 + { 77 + struct ps2_gpio_data *drvdata = serio->port_data; 78 + 79 + disable_irq(drvdata->irq); 80 + } 81 + 82 + static int __ps2_gpio_write(struct serio *serio, unsigned char val) 83 + { 84 + struct ps2_gpio_data *drvdata = serio->port_data; 85 + 86 + disable_irq_nosync(drvdata->irq); 87 + gpiod_direction_output(drvdata->gpio_clk, 0); 88 + 89 + drvdata->mode = PS2_MODE_TX; 90 + drvdata->tx_byte = val; 91 + 92 + schedule_delayed_work(&drvdata->tx_work, usecs_to_jiffies(200)); 93 + 94 + return 0; 95 + } 96 + 97 + static int ps2_gpio_write(struct serio *serio, unsigned char val) 98 + { 99 + struct ps2_gpio_data *drvdata = serio->port_data; 100 + int ret = 0; 101 + 102 + if (in_task()) { 103 + mutex_lock(&drvdata->tx_mutex); 104 + __ps2_gpio_write(serio, val); 105 + if (!wait_for_completion_timeout(&drvdata->tx_done, 106 + msecs_to_jiffies(10000))) 107 + ret = SERIO_TIMEOUT; 108 + mutex_unlock(&drvdata->tx_mutex); 109 + } else { 110 + __ps2_gpio_write(serio, val); 111 + } 112 + 113 + return ret; 114 + } 115 + 116 + static void ps2_gpio_tx_work_fn(struct work_struct *work) 117 + { 118 + struct delayed_work *dwork = to_delayed_work(work); 119 + struct ps2_gpio_data *drvdata = container_of(dwork, 120 + struct ps2_gpio_data, 121 + tx_work); 122 + 123 + enable_irq(drvdata->irq); 124 + gpiod_direction_output(drvdata->gpio_data, 0); 125 + gpiod_direction_input(drvdata->gpio_clk); 126 + } 127 + 128 + static irqreturn_t ps2_gpio_irq_rx(struct ps2_gpio_data *drvdata) 129 + { 130 + unsigned char byte, cnt; 131 + int data; 132 + int rxflags = 0; 133 + static unsigned long old_jiffies; 134 + 135 + byte = drvdata->rx_byte; 136 + cnt = drvdata->rx_cnt; 137 + 138 + if (old_jiffies == 0) 139 + old_jiffies = jiffies; 140 + 141 + if ((jiffies - old_jiffies) > usecs_to_jiffies(100)) { 142 + dev_err(drvdata->dev, 143 + "RX: timeout, probably we missed an interrupt\n"); 144 + goto err; 145 + } 146 + old_jiffies = jiffies; 147 + 148 + data = gpiod_get_value(drvdata->gpio_data); 149 + if (unlikely(data < 0)) { 150 + dev_err(drvdata->dev, "RX: failed to get data gpio val: %d\n", 151 + data); 152 + goto err; 153 + } 154 + 155 + switch (cnt) { 156 + case PS2_START_BIT: 157 + /* start bit should be low */ 158 + if (unlikely(data)) { 159 + dev_err(drvdata->dev, "RX: start bit should be low\n"); 160 + goto err; 161 + } 162 + break; 163 + case PS2_DATA_BIT0: 164 + case PS2_DATA_BIT1: 165 + case PS2_DATA_BIT2: 166 + case PS2_DATA_BIT3: 167 + case PS2_DATA_BIT4: 168 + case PS2_DATA_BIT5: 169 + case PS2_DATA_BIT6: 170 + case PS2_DATA_BIT7: 171 + /* processing data bits */ 172 + if (data) 173 + byte |= (data << (cnt - 1)); 174 + break; 175 + case PS2_PARITY_BIT: 176 + /* check odd parity */ 177 + if (!((hweight8(byte) & 1) ^ data)) { 178 + rxflags |= SERIO_PARITY; 179 + dev_warn(drvdata->dev, "RX: parity error\n"); 180 + if (!drvdata->write_enable) 181 + goto err; 182 + } 183 + 184 + /* Do not send spurious ACK's and NACK's when write fn is 185 + * not provided. 186 + */ 187 + if (!drvdata->write_enable) { 188 + if (byte == PS2_DEV_RET_NACK) 189 + goto err; 190 + else if (byte == PS2_DEV_RET_ACK) 191 + break; 192 + } 193 + 194 + /* Let's send the data without waiting for the stop bit to be 195 + * sent. It may happen that we miss the stop bit. When this 196 + * happens we have no way to recover from this, certainly 197 + * missing the parity bit would be recognized when processing 198 + * the stop bit. When missing both, data is lost. 199 + */ 200 + serio_interrupt(drvdata->serio, byte, rxflags); 201 + dev_dbg(drvdata->dev, "RX: sending byte 0x%x\n", byte); 202 + break; 203 + case PS2_STOP_BIT: 204 + /* stop bit should be high */ 205 + if (unlikely(!data)) { 206 + dev_err(drvdata->dev, "RX: stop bit should be high\n"); 207 + goto err; 208 + } 209 + cnt = byte = 0; 210 + old_jiffies = 0; 211 + goto end; /* success */ 212 + default: 213 + dev_err(drvdata->dev, "RX: got out of sync with the device\n"); 214 + goto err; 215 + } 216 + 217 + cnt++; 218 + goto end; /* success */ 219 + 220 + err: 221 + cnt = byte = 0; 222 + old_jiffies = 0; 223 + __ps2_gpio_write(drvdata->serio, PS2_CMD_RESEND); 224 + end: 225 + drvdata->rx_cnt = cnt; 226 + drvdata->rx_byte = byte; 227 + return IRQ_HANDLED; 228 + } 229 + 230 + static irqreturn_t ps2_gpio_irq_tx(struct ps2_gpio_data *drvdata) 231 + { 232 + unsigned char byte, cnt; 233 + int data; 234 + static unsigned long old_jiffies; 235 + 236 + cnt = drvdata->tx_cnt; 237 + byte = drvdata->tx_byte; 238 + 239 + if (old_jiffies == 0) 240 + old_jiffies = jiffies; 241 + 242 + if ((jiffies - old_jiffies) > usecs_to_jiffies(100)) { 243 + dev_err(drvdata->dev, 244 + "TX: timeout, probably we missed an interrupt\n"); 245 + goto err; 246 + } 247 + old_jiffies = jiffies; 248 + 249 + switch (cnt) { 250 + case PS2_START_BIT: 251 + /* should never happen */ 252 + dev_err(drvdata->dev, 253 + "TX: start bit should have been sent already\n"); 254 + goto err; 255 + case PS2_DATA_BIT0: 256 + case PS2_DATA_BIT1: 257 + case PS2_DATA_BIT2: 258 + case PS2_DATA_BIT3: 259 + case PS2_DATA_BIT4: 260 + case PS2_DATA_BIT5: 261 + case PS2_DATA_BIT6: 262 + case PS2_DATA_BIT7: 263 + data = byte & BIT(cnt - 1); 264 + gpiod_set_value(drvdata->gpio_data, data); 265 + break; 266 + case PS2_PARITY_BIT: 267 + /* do odd parity */ 268 + data = !(hweight8(byte) & 1); 269 + gpiod_set_value(drvdata->gpio_data, data); 270 + break; 271 + case PS2_STOP_BIT: 272 + /* release data line to generate stop bit */ 273 + gpiod_direction_input(drvdata->gpio_data); 274 + break; 275 + case PS2_TX_TIMEOUT: 276 + /* Devices generate one extra clock pulse before sending the 277 + * acknowledgment. 278 + */ 279 + break; 280 + case PS2_ACK_BIT: 281 + gpiod_direction_input(drvdata->gpio_data); 282 + data = gpiod_get_value(drvdata->gpio_data); 283 + if (data) { 284 + dev_warn(drvdata->dev, "TX: received NACK, retry\n"); 285 + goto err; 286 + } 287 + 288 + drvdata->mode = PS2_MODE_RX; 289 + complete(&drvdata->tx_done); 290 + 291 + cnt = 1; 292 + old_jiffies = 0; 293 + goto end; /* success */ 294 + default: 295 + /* Probably we missed the stop bit. Therefore we release data 296 + * line and try again. 297 + */ 298 + gpiod_direction_input(drvdata->gpio_data); 299 + dev_err(drvdata->dev, "TX: got out of sync with the device\n"); 300 + goto err; 301 + } 302 + 303 + cnt++; 304 + goto end; /* success */ 305 + 306 + err: 307 + cnt = 1; 308 + old_jiffies = 0; 309 + gpiod_direction_input(drvdata->gpio_data); 310 + __ps2_gpio_write(drvdata->serio, drvdata->tx_byte); 311 + end: 312 + drvdata->tx_cnt = cnt; 313 + return IRQ_HANDLED; 314 + } 315 + 316 + static irqreturn_t ps2_gpio_irq(int irq, void *dev_id) 317 + { 318 + struct ps2_gpio_data *drvdata = dev_id; 319 + 320 + return drvdata->mode ? ps2_gpio_irq_tx(drvdata) : 321 + ps2_gpio_irq_rx(drvdata); 322 + } 323 + 324 + static int ps2_gpio_get_props(struct device *dev, 325 + struct ps2_gpio_data *drvdata) 326 + { 327 + drvdata->gpio_data = devm_gpiod_get(dev, "data", GPIOD_IN); 328 + if (IS_ERR(drvdata->gpio_data)) { 329 + dev_err(dev, "failed to request data gpio: %ld", 330 + PTR_ERR(drvdata->gpio_data)); 331 + return PTR_ERR(drvdata->gpio_data); 332 + } 333 + 334 + drvdata->gpio_clk = devm_gpiod_get(dev, "clk", GPIOD_IN); 335 + if (IS_ERR(drvdata->gpio_clk)) { 336 + dev_err(dev, "failed to request clock gpio: %ld", 337 + PTR_ERR(drvdata->gpio_clk)); 338 + return PTR_ERR(drvdata->gpio_clk); 339 + } 340 + 341 + drvdata->write_enable = device_property_read_bool(dev, 342 + "write-enable"); 343 + 344 + return 0; 345 + } 346 + 347 + static int ps2_gpio_probe(struct platform_device *pdev) 348 + { 349 + struct ps2_gpio_data *drvdata; 350 + struct serio *serio; 351 + struct device *dev = &pdev->dev; 352 + int error; 353 + 354 + drvdata = devm_kzalloc(dev, sizeof(struct ps2_gpio_data), GFP_KERNEL); 355 + serio = kzalloc(sizeof(struct serio), GFP_KERNEL); 356 + if (!drvdata || !serio) { 357 + error = -ENOMEM; 358 + goto err_free_serio; 359 + } 360 + 361 + error = ps2_gpio_get_props(dev, drvdata); 362 + if (error) 363 + goto err_free_serio; 364 + 365 + if (gpiod_cansleep(drvdata->gpio_data) || 366 + gpiod_cansleep(drvdata->gpio_clk)) { 367 + dev_err(dev, "GPIO data or clk are connected via slow bus\n"); 368 + error = -EINVAL; 369 + } 370 + 371 + drvdata->irq = platform_get_irq(pdev, 0); 372 + if (drvdata->irq < 0) { 373 + dev_err(dev, "failed to get irq from platform resource: %d\n", 374 + drvdata->irq); 375 + error = drvdata->irq; 376 + goto err_free_serio; 377 + } 378 + 379 + error = devm_request_irq(dev, drvdata->irq, ps2_gpio_irq, 380 + IRQF_NO_THREAD, DRIVER_NAME, drvdata); 381 + if (error) { 382 + dev_err(dev, "failed to request irq %d: %d\n", 383 + drvdata->irq, error); 384 + goto err_free_serio; 385 + } 386 + 387 + /* Keep irq disabled until serio->open is called. */ 388 + disable_irq(drvdata->irq); 389 + 390 + serio->id.type = SERIO_8042; 391 + serio->open = ps2_gpio_open; 392 + serio->close = ps2_gpio_close; 393 + /* Write can be enabled in platform/dt data, but possibly it will not 394 + * work because of the tough timings. 395 + */ 396 + serio->write = drvdata->write_enable ? ps2_gpio_write : NULL; 397 + serio->port_data = drvdata; 398 + serio->dev.parent = dev; 399 + strlcpy(serio->name, dev_name(dev), sizeof(serio->name)); 400 + strlcpy(serio->phys, dev_name(dev), sizeof(serio->phys)); 401 + 402 + drvdata->serio = serio; 403 + drvdata->dev = dev; 404 + drvdata->mode = PS2_MODE_RX; 405 + 406 + /* Tx count always starts at 1, as the start bit is sent implicitly by 407 + * host-to-device communication initialization. 408 + */ 409 + drvdata->tx_cnt = 1; 410 + 411 + INIT_DELAYED_WORK(&drvdata->tx_work, ps2_gpio_tx_work_fn); 412 + init_completion(&drvdata->tx_done); 413 + mutex_init(&drvdata->tx_mutex); 414 + 415 + serio_register_port(serio); 416 + platform_set_drvdata(pdev, drvdata); 417 + 418 + return 0; /* success */ 419 + 420 + err_free_serio: 421 + kfree(serio); 422 + return error; 423 + } 424 + 425 + static int ps2_gpio_remove(struct platform_device *pdev) 426 + { 427 + struct ps2_gpio_data *drvdata = platform_get_drvdata(pdev); 428 + 429 + serio_unregister_port(drvdata->serio); 430 + return 0; 431 + } 432 + 433 + #if defined(CONFIG_OF) 434 + static const struct of_device_id ps2_gpio_match[] = { 435 + { .compatible = "ps2-gpio", }, 436 + { }, 437 + }; 438 + MODULE_DEVICE_TABLE(of, ps2_gpio_match); 439 + #endif 440 + 441 + static struct platform_driver ps2_gpio_driver = { 442 + .probe = ps2_gpio_probe, 443 + .remove = ps2_gpio_remove, 444 + .driver = { 445 + .name = DRIVER_NAME, 446 + .of_match_table = of_match_ptr(ps2_gpio_match), 447 + }, 448 + }; 449 + module_platform_driver(ps2_gpio_driver); 450 + 451 + MODULE_AUTHOR("Danilo Krummrich <danilokrummrich@dk-develop.de>"); 452 + MODULE_DESCRIPTION("GPIO PS2 driver"); 453 + MODULE_LICENSE("GPL v2");
+2 -2
drivers/input/serio/serio.c
··· 469 469 NULL 470 470 }; 471 471 472 - static struct attribute_group serio_id_attr_group = { 472 + static const struct attribute_group serio_id_attr_group = { 473 473 .name = "id", 474 474 .attrs = serio_device_id_attrs, 475 475 }; ··· 489 489 NULL 490 490 }; 491 491 492 - static struct attribute_group serio_device_attr_group = { 492 + static const struct attribute_group serio_device_attr_group = { 493 493 .attrs = serio_device_attrs, 494 494 }; 495 495
+1 -1
drivers/input/serio/serio_raw.c
··· 410 410 serio_set_drvdata(serio, NULL); 411 411 } 412 412 413 - static struct serio_device_id serio_raw_serio_ids[] = { 413 + static const struct serio_device_id serio_raw_serio_ids[] = { 414 414 { 415 415 .type = SERIO_8042, 416 416 .proto = SERIO_ANY,
+8 -4
drivers/input/serio/xilinx_ps2.c
··· 45 45 #define XPS2_STATUS_RX_FULL 0x00000001 /* Receive Full */ 46 46 #define XPS2_STATUS_TX_FULL 0x00000002 /* Transmit Full */ 47 47 48 - /* Bit definitions for ISR/IER registers. Both the registers have the same bit 49 - * definitions and are only defined once. */ 48 + /* 49 + * Bit definitions for ISR/IER registers. Both the registers have the same bit 50 + * definitions and are only defined once. 51 + */ 50 52 #define XPS2_IPIXR_WDT_TOUT 0x00000001 /* Watchdog Timeout Interrupt */ 51 53 #define XPS2_IPIXR_TX_NOACK 0x00000002 /* Transmit No ACK Interrupt */ 52 54 #define XPS2_IPIXR_TX_ACK 0x00000004 /* Transmit ACK (Data) Interrupt */ ··· 294 292 /* Disable all the interrupts, just in case */ 295 293 out_be32(drvdata->base_address + XPS2_IPIER_OFFSET, 0); 296 294 297 - /* Reset the PS2 device and abort any current transaction, to make sure 298 - * we have the PS2 in a good state */ 295 + /* 296 + * Reset the PS2 device and abort any current transaction, 297 + * to make sure we have the PS2 in a good state. 298 + */ 299 299 out_be32(drvdata->base_address + XPS2_SRST_OFFSET, XPS2_SRST_RESET); 300 300 301 301 dev_info(dev, "Xilinx PS2 at 0x%08llX mapped to 0x%p, irq=%d\n",
+1 -1
drivers/input/tablet/acecad.c
··· 260 260 kfree(acecad); 261 261 } 262 262 263 - static struct usb_device_id usb_acecad_id_table [] = { 263 + static const struct usb_device_id usb_acecad_id_table[] = { 264 264 { USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_FLAIR), .driver_info = 0 }, 265 265 { USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_302), .driver_info = 1 }, 266 266 { }
+1 -1
drivers/input/tablet/aiptek.c
··· 1676 1676 NULL 1677 1677 }; 1678 1678 1679 - static struct attribute_group aiptek_attribute_group = { 1679 + static const struct attribute_group aiptek_attribute_group = { 1680 1680 .attrs = aiptek_attributes, 1681 1681 }; 1682 1682
+1 -1
drivers/input/tablet/kbtab.c
··· 88 88 __func__, retval); 89 89 } 90 90 91 - static struct usb_device_id kbtab_ids[] = { 91 + static const struct usb_device_id kbtab_ids[] = { 92 92 { USB_DEVICE(USB_VENDOR_ID_KBGEAR, 0x1001), .driver_info = 0 }, 93 93 { } 94 94 };
+1 -1
drivers/input/tablet/wacom_serial4.c
··· 594 594 return err; 595 595 } 596 596 597 - static struct serio_device_id wacom_serio_ids[] = { 597 + static const struct serio_device_id wacom_serio_ids[] = { 598 598 { 599 599 .type = SERIO_RS232, 600 600 .proto = SERIO_WACOM_IV,
+2 -2
drivers/input/touchscreen/ads7846.c
··· 499 499 NULL, 500 500 }; 501 501 502 - static struct attribute_group ads7846_attr_group = { 502 + static const struct attribute_group ads7846_attr_group = { 503 503 .attrs = ads7846_attributes, 504 504 .is_visible = ads7846_is_visible, 505 505 }; ··· 599 599 NULL, 600 600 }; 601 601 602 - static struct attribute_group ads784x_attr_group = { 602 + static const struct attribute_group ads784x_attr_group = { 603 603 .attrs = ads784x_attributes, 604 604 }; 605 605
+31 -15
drivers/input/touchscreen/atmel_mxt_ts.c
··· 28 28 #include <linux/interrupt.h> 29 29 #include <linux/of.h> 30 30 #include <linux/slab.h> 31 + #include <linux/gpio/consumer.h> 31 32 #include <asm/unaligned.h> 32 33 #include <media/v4l2-device.h> 33 34 #include <media/v4l2-ioctl.h> ··· 301 300 u8 multitouch; 302 301 struct t7_config t7_cfg; 303 302 struct mxt_dbg dbg; 303 + struct gpio_desc *reset_gpio; 304 304 305 305 /* Cached parameters from object table */ 306 306 u16 T5_address; ··· 3119 3117 if (IS_ERR(pdata)) 3120 3118 return PTR_ERR(pdata); 3121 3119 3122 - data = kzalloc(sizeof(struct mxt_data), GFP_KERNEL); 3123 - if (!data) { 3124 - dev_err(&client->dev, "Failed to allocate memory\n"); 3120 + data = devm_kzalloc(&client->dev, sizeof(struct mxt_data), GFP_KERNEL); 3121 + if (!data) 3125 3122 return -ENOMEM; 3126 - } 3127 3123 3128 3124 snprintf(data->phys, sizeof(data->phys), "i2c-%u-%04x/input0", 3129 3125 client->adapter->nr, client->addr); ··· 3135 3135 init_completion(&data->reset_completion); 3136 3136 init_completion(&data->crc_completion); 3137 3137 3138 - error = request_threaded_irq(client->irq, NULL, mxt_interrupt, 3139 - pdata->irqflags | IRQF_ONESHOT, 3140 - client->name, data); 3138 + data->reset_gpio = devm_gpiod_get_optional(&client->dev, 3139 + "reset", GPIOD_OUT_LOW); 3140 + if (IS_ERR(data->reset_gpio)) { 3141 + error = PTR_ERR(data->reset_gpio); 3142 + dev_err(&client->dev, "Failed to get reset gpio: %d\n", error); 3143 + return error; 3144 + } 3145 + 3146 + error = devm_request_threaded_irq(&client->dev, client->irq, 3147 + NULL, mxt_interrupt, 3148 + pdata->irqflags | IRQF_ONESHOT, 3149 + client->name, data); 3141 3150 if (error) { 3142 3151 dev_err(&client->dev, "Failed to register interrupt\n"); 3143 - goto err_free_mem; 3152 + return error; 3153 + } 3154 + 3155 + if (data->reset_gpio) { 3156 + data->in_bootloader = true; 3157 + msleep(MXT_RESET_TIME); 3158 + reinit_completion(&data->bl_completion); 3159 + gpiod_set_value(data->reset_gpio, 1); 3160 + error = mxt_wait_for_completion(data, &data->bl_completion, 3161 + MXT_RESET_TIMEOUT); 3162 + if (error) 3163 + return error; 3164 + data->in_bootloader = false; 3144 3165 } 3145 3166 3146 3167 disable_irq(client->irq); 3147 3168 3148 3169 error = mxt_initialize(data); 3149 3170 if (error) 3150 - goto err_free_irq; 3171 + return error; 3151 3172 3152 3173 error = sysfs_create_group(&client->dev.kobj, &mxt_attr_group); 3153 3174 if (error) { ··· 3182 3161 err_free_object: 3183 3162 mxt_free_input_device(data); 3184 3163 mxt_free_object_table(data); 3185 - err_free_irq: 3186 - free_irq(client->irq, data); 3187 - err_free_mem: 3188 - kfree(data); 3189 3164 return error; 3190 3165 } 3191 3166 ··· 3189 3172 { 3190 3173 struct mxt_data *data = i2c_get_clientdata(client); 3191 3174 3175 + disable_irq(data->irq); 3192 3176 sysfs_remove_group(&client->dev.kobj, &mxt_attr_group); 3193 - free_irq(data->irq, data); 3194 3177 mxt_free_input_device(data); 3195 3178 mxt_free_object_table(data); 3196 - kfree(data); 3197 3179 3198 3180 return 0; 3199 3181 }
+1 -1
drivers/input/touchscreen/dynapro.c
··· 164 164 * The serio driver structure. 165 165 */ 166 166 167 - static struct serio_device_id dynapro_serio_ids[] = { 167 + static const struct serio_device_id dynapro_serio_ids[] = { 168 168 { 169 169 .type = SERIO_RS232, 170 170 .proto = SERIO_DYNAPRO,
+1 -1
drivers/input/touchscreen/elants_i2c.c
··· 1066 1066 NULL 1067 1067 }; 1068 1068 1069 - static struct attribute_group elants_attribute_group = { 1069 + static const struct attribute_group elants_attribute_group = { 1070 1070 .attrs = elants_attributes, 1071 1071 }; 1072 1072
+1 -1
drivers/input/touchscreen/elo.c
··· 381 381 * The serio driver structure. 382 382 */ 383 383 384 - static struct serio_device_id elo_serio_ids[] = { 384 + static const struct serio_device_id elo_serio_ids[] = { 385 385 { 386 386 .type = SERIO_RS232, 387 387 .proto = SERIO_ELO,
+1 -1
drivers/input/touchscreen/fujitsu_ts.c
··· 151 151 /* 152 152 * The serio driver structure. 153 153 */ 154 - static struct serio_device_id fujitsu_serio_ids[] = { 154 + static const struct serio_device_id fujitsu_serio_ids[] = { 155 155 { 156 156 .type = SERIO_RS232, 157 157 .proto = SERIO_FUJITSU,
+1 -1
drivers/input/touchscreen/gunze.c
··· 162 162 * The serio driver structure. 163 163 */ 164 164 165 - static struct serio_device_id gunze_serio_ids[] = { 165 + static const struct serio_device_id gunze_serio_ids[] = { 166 166 { 167 167 .type = SERIO_RS232, 168 168 .proto = SERIO_GUNZE,
+1 -1
drivers/input/touchscreen/hampshire.c
··· 163 163 * The serio driver structure. 164 164 */ 165 165 166 - static struct serio_device_id hampshire_serio_ids[] = { 166 + static const struct serio_device_id hampshire_serio_ids[] = { 167 167 { 168 168 .type = SERIO_RS232, 169 169 .proto = SERIO_HAMPSHIRE,
+1 -1
drivers/input/touchscreen/inexio.c
··· 165 165 * The serio driver structure. 166 166 */ 167 167 168 - static struct serio_device_id inexio_serio_ids[] = { 168 + static const struct serio_device_id inexio_serio_ids[] = { 169 169 { 170 170 .type = SERIO_RS232, 171 171 .proto = SERIO_INEXIO,
+1 -1
drivers/input/touchscreen/mtouch.c
··· 178 178 * The serio driver structure. 179 179 */ 180 180 181 - static struct serio_device_id mtouch_serio_ids[] = { 181 + static const struct serio_device_id mtouch_serio_ids[] = { 182 182 { 183 183 .type = SERIO_RS232, 184 184 .proto = SERIO_MICROTOUCH,
+5 -3
drivers/input/touchscreen/mxs-lradc-ts.c
··· 30 30 #include <linux/of_irq.h> 31 31 #include <linux/platform_device.h> 32 32 33 - const char *mxs_lradc_ts_irq_names[] = { 33 + static const char * const mxs_lradc_ts_irq_names[] = { 34 34 "mxs-lradc-touchscreen", 35 35 "mxs-lradc-channel6", 36 36 "mxs-lradc-channel7", ··· 630 630 spin_lock_init(&ts->lock); 631 631 632 632 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 633 + if (!iores) 634 + return -EINVAL; 633 635 ts->base = devm_ioremap(dev, iores->start, resource_size(iores)); 634 - if (IS_ERR(ts->base)) 635 - return PTR_ERR(ts->base); 636 + if (!ts->base) 637 + return -ENOMEM; 636 638 637 639 ret = of_property_read_u32(node, "fsl,lradc-touchscreen-wires", 638 640 &ts_wires);
+1 -1
drivers/input/touchscreen/penmount.c
··· 293 293 * The serio driver structure. 294 294 */ 295 295 296 - static struct serio_device_id pm_serio_ids[] = { 296 + static const struct serio_device_id pm_serio_ids[] = { 297 297 { 298 298 .type = SERIO_RS232, 299 299 .proto = SERIO_PENMOUNT,
+1 -1
drivers/input/touchscreen/raydium_i2c_ts.c
··· 939 939 NULL 940 940 }; 941 941 942 - static struct attribute_group raydium_i2c_attribute_group = { 942 + static const struct attribute_group raydium_i2c_attribute_group = { 943 943 .attrs = raydium_i2c_attributes, 944 944 }; 945 945
+1 -1
drivers/input/touchscreen/sun4i-ts.c
··· 206 206 return sun4i_get_temp(data, temp); 207 207 } 208 208 209 - static struct thermal_zone_of_device_ops sun4i_ts_tz_ops = { 209 + static const struct thermal_zone_of_device_ops sun4i_ts_tz_ops = { 210 210 .get_temp = sun4i_get_tz_temp, 211 211 }; 212 212
+30 -16
drivers/input/touchscreen/sur40.c
··· 59 59 __le16 blob_id; 60 60 61 61 u8 action; /* 0x02 = enter/exit, 0x03 = update (?) */ 62 - u8 unknown; /* always 0x01 or 0x02 (no idea what this is?) */ 62 + u8 type; /* bitmask (0x01 blob, 0x02 touch, 0x04 tag) */ 63 63 64 64 __le16 bb_pos_x; /* upper left corner of bounding box */ 65 65 __le16 bb_pos_y; ··· 133 133 134 134 /* control commands */ 135 135 #define SUR40_GET_VERSION 0xb0 /* 12 bytes string */ 136 - #define SUR40_UNKNOWN1 0xb3 /* 5 bytes */ 137 - #define SUR40_UNKNOWN2 0xc1 /* 24 bytes */ 136 + #define SUR40_ACCEL_CAPS 0xb3 /* 5 bytes */ 137 + #define SUR40_SENSOR_CAPS 0xc1 /* 24 bytes */ 138 + 139 + #define SUR40_POKE 0xc5 /* poke register byte */ 140 + #define SUR40_PEEK 0xc4 /* 48 bytes registers */ 138 141 139 142 #define SUR40_GET_STATE 0xc5 /* 4 bytes state (?) */ 140 143 #define SUR40_GET_SENSORS 0xb1 /* 8 bytes sensors */ 144 + 145 + #define SUR40_BLOB 0x01 146 + #define SUR40_TOUCH 0x02 147 + #define SUR40_TAG 0x04 141 148 142 149 static const struct v4l2_pix_format sur40_pix_format[] = { 143 150 { ··· 245 238 if (result < 0) 246 239 goto error; 247 240 248 - result = sur40_command(dev, SUR40_UNKNOWN2, 0x00, buffer, 24); 241 + result = sur40_command(dev, SUR40_SENSOR_CAPS, 0x00, buffer, 24); 249 242 if (result < 0) 250 243 goto error; 251 244 252 - result = sur40_command(dev, SUR40_UNKNOWN1, 0x00, buffer, 5); 245 + result = sur40_command(dev, SUR40_ACCEL_CAPS, 0x00, buffer, 5); 253 246 if (result < 0) 254 247 goto error; 255 248 ··· 296 289 static void sur40_report_blob(struct sur40_blob *blob, struct input_dev *input) 297 290 { 298 291 int wide, major, minor; 292 + int bb_size_x, bb_size_y, pos_x, pos_y, ctr_x, ctr_y, slotnum; 299 293 300 - int bb_size_x = le16_to_cpu(blob->bb_size_x); 301 - int bb_size_y = le16_to_cpu(blob->bb_size_y); 294 + if (blob->type != SUR40_TOUCH) 295 + return; 302 296 303 - int pos_x = le16_to_cpu(blob->pos_x); 304 - int pos_y = le16_to_cpu(blob->pos_y); 305 - 306 - int ctr_x = le16_to_cpu(blob->ctr_x); 307 - int ctr_y = le16_to_cpu(blob->ctr_y); 308 - 309 - int slotnum = input_mt_get_slot_by_key(input, blob->blob_id); 297 + slotnum = input_mt_get_slot_by_key(input, blob->blob_id); 310 298 if (slotnum < 0 || slotnum >= MAX_CONTACTS) 311 299 return; 300 + 301 + bb_size_x = le16_to_cpu(blob->bb_size_x); 302 + bb_size_y = le16_to_cpu(blob->bb_size_y); 303 + 304 + pos_x = le16_to_cpu(blob->pos_x); 305 + pos_y = le16_to_cpu(blob->pos_y); 306 + 307 + ctr_x = le16_to_cpu(blob->ctr_x); 308 + ctr_y = le16_to_cpu(blob->ctr_y); 312 309 313 310 input_mt_slot(input, slotnum); 314 311 input_mt_report_slot_state(input, MT_TOOL_FINGER, 1); ··· 378 367 /* 379 368 * Sanity check. when video data is also being retrieved, the 380 369 * packet ID will usually increase in the middle of a series 381 - * instead of at the end. 382 - */ 370 + * instead of at the end. However, the data is still consistent, 371 + * so the packet ID is probably just valid for the first packet 372 + * in a series. 373 + 383 374 if (packet_id != le32_to_cpu(header->packet_id)) 384 375 dev_dbg(sur40->dev, "packet ID mismatch\n"); 376 + */ 385 377 386 378 packet_blobs = result / sizeof(struct sur40_blob); 387 379 dev_dbg(sur40->dev, "received %d blobs\n", packet_blobs);
+1 -1
drivers/input/touchscreen/touchit213.c
··· 192 192 * The serio driver structure. 193 193 */ 194 194 195 - static struct serio_device_id touchit213_serio_ids[] = { 195 + static const struct serio_device_id touchit213_serio_ids[] = { 196 196 { 197 197 .type = SERIO_RS232, 198 198 .proto = SERIO_TOUCHIT213,
+1 -1
drivers/input/touchscreen/touchright.c
··· 152 152 * The serio driver structure. 153 153 */ 154 154 155 - static struct serio_device_id tr_serio_ids[] = { 155 + static const struct serio_device_id tr_serio_ids[] = { 156 156 { 157 157 .type = SERIO_RS232, 158 158 .proto = SERIO_TOUCHRIGHT,
+1 -1
drivers/input/touchscreen/touchwin.c
··· 159 159 * The serio driver structure. 160 160 */ 161 161 162 - static struct serio_device_id tw_serio_ids[] = { 162 + static const struct serio_device_id tw_serio_ids[] = { 163 163 { 164 164 .type = SERIO_RS232, 165 165 .proto = SERIO_TOUCHWIN,
+1 -1
drivers/input/touchscreen/tsc40.c
··· 141 141 serio_set_drvdata(serio, NULL); 142 142 } 143 143 144 - static struct serio_device_id tsc_serio_ids[] = { 144 + static const struct serio_device_id tsc_serio_ids[] = { 145 145 { 146 146 .type = SERIO_RS232, 147 147 .proto = SERIO_TSC40,
+1 -1
drivers/input/touchscreen/wacom_w8001.c
··· 681 681 return err; 682 682 } 683 683 684 - static struct serio_device_id w8001_serio_ids[] = { 684 + static const struct serio_device_id w8001_serio_ids[] = { 685 685 { 686 686 .type = SERIO_RS232, 687 687 .proto = SERIO_W8001,
+2 -4
drivers/iommu/amd_iommu.c
··· 3879 3879 u8 vector, u32 dest_apicid, int devid) 3880 3880 { 3881 3881 struct irte_ga *irte = (struct irte_ga *) entry; 3882 - struct iommu_dev_data *dev_data = search_dev_data(devid); 3883 3882 3884 3883 irte->lo.val = 0; 3885 3884 irte->hi.val = 0; 3886 - irte->lo.fields_remap.guest_mode = dev_data ? dev_data->use_vapic : 0; 3887 3885 irte->lo.fields_remap.int_type = delivery_mode; 3888 3886 irte->lo.fields_remap.dm = dest_mode; 3889 3887 irte->hi.fields.vector = vector; ··· 3937 3939 struct irte_ga *irte = (struct irte_ga *) entry; 3938 3940 struct iommu_dev_data *dev_data = search_dev_data(devid); 3939 3941 3940 - if (!dev_data || !dev_data->use_vapic) { 3942 + if (!dev_data || !dev_data->use_vapic || 3943 + !irte->lo.fields_remap.guest_mode) { 3941 3944 irte->hi.fields.vector = vector; 3942 3945 irte->lo.fields_remap.destination = dest_apicid; 3943 - irte->lo.fields_remap.guest_mode = 0; 3944 3946 modify_irte_ga(devid, index, irte, NULL); 3945 3947 } 3946 3948 }
+7
drivers/iommu/of_iommu.c
··· 118 118 119 119 ops = iommu_ops_from_fwnode(fwnode); 120 120 if ((ops && !ops->of_xlate) || 121 + !of_device_is_available(iommu_spec->np) || 121 122 (!ops && !of_iommu_driver_present(iommu_spec->np))) 122 123 return NULL; 123 124 ··· 235 234 236 235 if (err) 237 236 ops = ERR_PTR(err); 237 + } 238 + 239 + /* Ignore all other errors apart from EPROBE_DEFER */ 240 + if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) { 241 + dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops)); 242 + ops = NULL; 238 243 } 239 244 240 245 return ops;
+3 -3
drivers/irqchip/irq-mips-gic.c
··· 140 140 } 141 141 142 142 #ifdef CONFIG_CLKSRC_MIPS_GIC 143 - u64 gic_read_count(void) 143 + u64 notrace gic_read_count(void) 144 144 { 145 145 unsigned int hi, hi2, lo; 146 146 ··· 167 167 return bits; 168 168 } 169 169 170 - void gic_write_compare(u64 cnt) 170 + void notrace gic_write_compare(u64 cnt) 171 171 { 172 172 if (mips_cm_is64) { 173 173 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt); ··· 179 179 } 180 180 } 181 181 182 - void gic_write_cpu_compare(u64 cnt, int cpu) 182 + void notrace gic_write_cpu_compare(u64 cnt, int cpu) 183 183 { 184 184 unsigned long flags; 185 185
+1 -1
drivers/irqchip/irq-xtensa-mx.c
··· 142 142 int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent) 143 143 { 144 144 struct irq_domain *root_domain = 145 - irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, 145 + irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0, 146 146 &xtensa_mx_irq_domain_ops, 147 147 &xtensa_mx_irq_chip); 148 148 irq_set_default_host(root_domain);
+1 -1
drivers/irqchip/irq-xtensa-pic.c
··· 89 89 int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent) 90 90 { 91 91 struct irq_domain *root_domain = 92 - irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, 92 + irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0, 93 93 &xtensa_irq_domain_ops, &xtensa_irq_chip); 94 94 irq_set_default_host(root_domain); 95 95 return 0;
+1 -1
drivers/isdn/i4l/isdn_ppp.c
··· 2364 2364 id); 2365 2365 return NULL; 2366 2366 } else { 2367 - rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_KERNEL); 2367 + rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_ATOMIC); 2368 2368 if (!rs) 2369 2369 return NULL; 2370 2370 rs->state = CCPResetIdle;
+1 -1
drivers/isdn/mISDN/stack.c
··· 75 75 if (sk->sk_state != MISDN_BOUND) 76 76 continue; 77 77 if (!cskb) 78 - cskb = skb_copy(skb, GFP_KERNEL); 78 + cskb = skb_copy(skb, GFP_ATOMIC); 79 79 if (!cskb) { 80 80 printk(KERN_WARNING "%s no skb\n", __func__); 81 81 break;
+2 -2
drivers/leds/leds-bcm6328.c
··· 242 242 243 243 spin_lock_irqsave(lock, flags); 244 244 val = bcm6328_led_read(addr); 245 - val |= (BIT(reg) << (((sel % 4) * 4) + 16)); 245 + val |= (BIT(reg % 4) << (((sel % 4) * 4) + 16)); 246 246 bcm6328_led_write(addr, val); 247 247 spin_unlock_irqrestore(lock, flags); 248 248 } ··· 269 269 270 270 spin_lock_irqsave(lock, flags); 271 271 val = bcm6328_led_read(addr); 272 - val |= (BIT(reg) << ((sel % 4) * 4)); 272 + val |= (BIT(reg % 4) << ((sel % 4) * 4)); 273 273 bcm6328_led_write(addr, val); 274 274 spin_unlock_irqrestore(lock, flags); 275 275 }
-31
drivers/leds/trigger/ledtrig-heartbeat.c
··· 20 20 #include <linux/sched/loadavg.h> 21 21 #include <linux/leds.h> 22 22 #include <linux/reboot.h> 23 - #include <linux/suspend.h> 24 23 #include "../leds.h" 25 24 26 25 static int panic_heartbeats; ··· 162 163 .deactivate = heartbeat_trig_deactivate, 163 164 }; 164 165 165 - static int heartbeat_pm_notifier(struct notifier_block *nb, 166 - unsigned long pm_event, void *unused) 167 - { 168 - int rc; 169 - 170 - switch (pm_event) { 171 - case PM_SUSPEND_PREPARE: 172 - case PM_HIBERNATION_PREPARE: 173 - case PM_RESTORE_PREPARE: 174 - led_trigger_unregister(&heartbeat_led_trigger); 175 - break; 176 - case PM_POST_SUSPEND: 177 - case PM_POST_HIBERNATION: 178 - case PM_POST_RESTORE: 179 - rc = led_trigger_register(&heartbeat_led_trigger); 180 - if (rc) 181 - pr_err("could not re-register heartbeat trigger\n"); 182 - break; 183 - default: 184 - break; 185 - } 186 - return NOTIFY_DONE; 187 - } 188 - 189 166 static int heartbeat_reboot_notifier(struct notifier_block *nb, 190 167 unsigned long code, void *unused) 191 168 { ··· 175 200 panic_heartbeats = 1; 176 201 return NOTIFY_DONE; 177 202 } 178 - 179 - static struct notifier_block heartbeat_pm_nb = { 180 - .notifier_call = heartbeat_pm_notifier, 181 - }; 182 203 183 204 static struct notifier_block heartbeat_reboot_nb = { 184 205 .notifier_call = heartbeat_reboot_notifier, ··· 192 221 atomic_notifier_chain_register(&panic_notifier_list, 193 222 &heartbeat_panic_nb); 194 223 register_reboot_notifier(&heartbeat_reboot_nb); 195 - register_pm_notifier(&heartbeat_pm_nb); 196 224 } 197 225 return rc; 198 226 } 199 227 200 228 static void __exit heartbeat_trig_exit(void) 201 229 { 202 - unregister_pm_notifier(&heartbeat_pm_nb); 203 230 unregister_reboot_notifier(&heartbeat_reboot_nb); 204 231 atomic_notifier_chain_unregister(&panic_notifier_list, 205 232 &heartbeat_panic_nb);
+4 -4
drivers/md/bitmap.c
··· 485 485 pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic)); 486 486 pr_debug(" version: %d\n", le32_to_cpu(sb->version)); 487 487 pr_debug(" uuid: %08x.%08x.%08x.%08x\n", 488 - *(__u32 *)(sb->uuid+0), 489 - *(__u32 *)(sb->uuid+4), 490 - *(__u32 *)(sb->uuid+8), 491 - *(__u32 *)(sb->uuid+12)); 488 + le32_to_cpu(*(__u32 *)(sb->uuid+0)), 489 + le32_to_cpu(*(__u32 *)(sb->uuid+4)), 490 + le32_to_cpu(*(__u32 *)(sb->uuid+8)), 491 + le32_to_cpu(*(__u32 *)(sb->uuid+12))); 492 492 pr_debug(" events: %llu\n", 493 493 (unsigned long long) le64_to_cpu(sb->events)); 494 494 pr_debug("events cleared: %llu\n",
+1 -1
drivers/md/dm-bufio.c
··· 1334 1334 { 1335 1335 struct dm_io_request io_req = { 1336 1336 .bi_op = REQ_OP_WRITE, 1337 - .bi_op_flags = REQ_PREFLUSH, 1337 + .bi_op_flags = REQ_PREFLUSH | REQ_SYNC, 1338 1338 .mem.type = DM_IO_KMEM, 1339 1339 .mem.ptr.addr = NULL, 1340 1340 .client = c->dm_io,
+18 -24
drivers/md/dm-integrity.c
··· 783 783 for (i = 0; i < commit_sections; i++) 784 784 rw_section_mac(ic, commit_start + i, true); 785 785 } 786 - rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, commit_sections, &io_comp); 786 + rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start, 787 + commit_sections, &io_comp); 787 788 } else { 788 789 unsigned to_end; 789 790 io_comp.in_flight = (atomic_t)ATOMIC_INIT(2); ··· 1105 1104 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio) 1106 1105 { 1107 1106 struct bio *bio; 1108 - spin_lock_irq(&ic->endio_wait.lock); 1107 + unsigned long flags; 1108 + 1109 + spin_lock_irqsave(&ic->endio_wait.lock, flags); 1109 1110 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io)); 1110 1111 bio_list_add(&ic->flush_bio_list, bio); 1111 - spin_unlock_irq(&ic->endio_wait.lock); 1112 + spin_unlock_irqrestore(&ic->endio_wait.lock, flags); 1113 + 1112 1114 queue_work(ic->commit_wq, &ic->commit_work); 1113 1115 } 1114 1116 ··· 2378 2374 blk_queue_max_integrity_segments(disk->queue, UINT_MAX); 2379 2375 } 2380 2376 2381 - /* FIXME: use new kvmalloc */ 2382 - static void *dm_integrity_kvmalloc(size_t size, gfp_t gfp) 2383 - { 2384 - void *ptr = NULL; 2385 - 2386 - if (size <= PAGE_SIZE) 2387 - ptr = kmalloc(size, GFP_KERNEL | gfp); 2388 - if (!ptr && size <= KMALLOC_MAX_SIZE) 2389 - ptr = kmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | gfp); 2390 - if (!ptr) 2391 - ptr = __vmalloc(size, GFP_KERNEL | gfp, PAGE_KERNEL); 2392 - 2393 - return ptr; 2394 - } 2395 - 2396 2377 static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl) 2397 2378 { 2398 2379 unsigned i; ··· 2396 2407 struct page_list *pl; 2397 2408 unsigned i; 2398 2409 2399 - pl = dm_integrity_kvmalloc(page_list_desc_size, __GFP_ZERO); 2410 + pl = kvmalloc(page_list_desc_size, GFP_KERNEL | __GFP_ZERO); 2400 2411 if (!pl) 2401 2412 return NULL; 2402 2413 ··· 2426 2437 struct scatterlist **sl; 2427 2438 unsigned i; 2428 2439 2429 - sl = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), __GFP_ZERO); 2440 + sl = kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), GFP_KERNEL | __GFP_ZERO); 2430 2441 if (!sl) 2431 2442 return NULL; 2432 2443 ··· 2442 2453 2443 2454 n_pages = (end_index - start_index + 1); 2444 2455 2445 - s = dm_integrity_kvmalloc(n_pages * sizeof(struct scatterlist), 0); 2456 + s = kvmalloc(n_pages * sizeof(struct scatterlist), GFP_KERNEL); 2446 2457 if (!s) { 2447 2458 dm_integrity_free_journal_scatterlist(ic, sl); 2448 2459 return NULL; ··· 2606 2617 goto bad; 2607 2618 } 2608 2619 2609 - sg = dm_integrity_kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), 0); 2620 + sg = kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), GFP_KERNEL); 2610 2621 if (!sg) { 2611 2622 *error = "Unable to allocate sg list"; 2612 2623 r = -ENOMEM; ··· 2662 2673 r = -ENOMEM; 2663 2674 goto bad; 2664 2675 } 2665 - ic->sk_requests = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), __GFP_ZERO); 2676 + ic->sk_requests = kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), GFP_KERNEL | __GFP_ZERO); 2666 2677 if (!ic->sk_requests) { 2667 2678 *error = "Unable to allocate sk requests"; 2668 2679 r = -ENOMEM; ··· 2729 2740 r = -ENOMEM; 2730 2741 goto bad; 2731 2742 } 2732 - ic->journal_tree = dm_integrity_kvmalloc(journal_tree_size, 0); 2743 + ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL); 2733 2744 if (!ic->journal_tree) { 2734 2745 *error = "Could not allocate memory for journal tree"; 2735 2746 r = -ENOMEM; ··· 3041 3052 r = calculate_device_limits(ic); 3042 3053 if (r) { 3043 3054 ti->error = "The device is too small"; 3055 + goto bad; 3056 + } 3057 + if (ti->len > ic->provided_data_sectors) { 3058 + r = -EINVAL; 3059 + ti->error = "Not enough provided sectors for requested mapping size"; 3044 3060 goto bad; 3045 3061 } 3046 3062
+2 -2
drivers/md/dm-io.c
··· 317 317 else if (op == REQ_OP_WRITE_SAME) 318 318 special_cmd_max_sectors = q->limits.max_write_same_sectors; 319 319 if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES || 320 - op == REQ_OP_WRITE_SAME) && 321 - special_cmd_max_sectors == 0) { 320 + op == REQ_OP_WRITE_SAME) && special_cmd_max_sectors == 0) { 321 + atomic_inc(&io->count); 322 322 dec_count(io, region, -EOPNOTSUPP); 323 323 return; 324 324 }
+3 -2
drivers/md/dm-ioctl.c
··· 1710 1710 } 1711 1711 1712 1712 /* 1713 - * Try to avoid low memory issues when a device is suspended. 1713 + * Use __GFP_HIGH to avoid low memory issues when a device is 1714 + * suspended and the ioctl is needed to resume it. 1714 1715 * Use kmalloc() rather than vmalloc() when we can. 1715 1716 */ 1716 1717 dmi = NULL; 1717 1718 noio_flag = memalloc_noio_save(); 1718 - dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL); 1719 + dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL | __GFP_HIGH); 1719 1720 memalloc_noio_restore(noio_flag); 1720 1721 1721 1722 if (!dmi) {
+14 -3
drivers/md/dm-raid.c
··· 1927 1927 /******************************************************************** 1928 1928 * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!! 1929 1929 * 1930 - * FEATURE_FLAG_SUPPORTS_V190 in the features member indicates that those exist 1930 + * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist 1931 1931 */ 1932 1932 1933 1933 __le32 flags; /* Flags defining array states for reshaping */ ··· 2092 2092 sb->layout = cpu_to_le32(mddev->layout); 2093 2093 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); 2094 2094 2095 + /******************************************************************** 2096 + * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!! 2097 + * 2098 + * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist 2099 + */ 2095 2100 sb->new_level = cpu_to_le32(mddev->new_level); 2096 2101 sb->new_layout = cpu_to_le32(mddev->new_layout); 2097 2102 sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors); ··· 2443 2438 mddev->bitmap_info.default_offset = mddev->bitmap_info.offset; 2444 2439 2445 2440 if (!test_and_clear_bit(FirstUse, &rdev->flags)) { 2446 - /* Retrieve device size stored in superblock to be prepared for shrink */ 2447 - rdev->sectors = le64_to_cpu(sb->sectors); 2441 + /* 2442 + * Retrieve rdev size stored in superblock to be prepared for shrink. 2443 + * Check extended superblock members are present otherwise the size 2444 + * will not be set! 2445 + */ 2446 + if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) 2447 + rdev->sectors = le64_to_cpu(sb->sectors); 2448 + 2448 2449 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset); 2449 2450 if (rdev->recovery_offset == MaxSector) 2450 2451 set_bit(In_sync, &rdev->flags);
+20 -3
drivers/md/dm-raid1.c
··· 145 145 146 146 struct dm_raid1_bio_record { 147 147 struct mirror *m; 148 + /* if details->bi_bdev == NULL, details were not saved */ 148 149 struct dm_bio_details details; 149 150 region_t write_region; 150 151 }; ··· 261 260 struct mirror *m; 262 261 struct dm_io_request io_req = { 263 262 .bi_op = REQ_OP_WRITE, 264 - .bi_op_flags = REQ_PREFLUSH, 263 + .bi_op_flags = REQ_PREFLUSH | REQ_SYNC, 265 264 .mem.type = DM_IO_KMEM, 266 265 .mem.ptr.addr = NULL, 267 266 .client = ms->io_client, ··· 1199 1198 struct dm_raid1_bio_record *bio_record = 1200 1199 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); 1201 1200 1201 + bio_record->details.bi_bdev = NULL; 1202 + 1202 1203 if (rw == WRITE) { 1203 1204 /* Save region for mirror_end_io() handler */ 1204 1205 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio); ··· 1259 1256 } 1260 1257 1261 1258 if (error == -EOPNOTSUPP) 1262 - return error; 1259 + goto out; 1263 1260 1264 1261 if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD)) 1265 - return error; 1262 + goto out; 1266 1263 1267 1264 if (unlikely(error)) { 1265 + if (!bio_record->details.bi_bdev) { 1266 + /* 1267 + * There wasn't enough memory to record necessary 1268 + * information for a retry or there was no other 1269 + * mirror in-sync. 1270 + */ 1271 + DMERR_LIMIT("Mirror read failed."); 1272 + return -EIO; 1273 + } 1274 + 1268 1275 m = bio_record->m; 1269 1276 1270 1277 DMERR("Mirror read failed from %s. Trying alternative device.", ··· 1290 1277 bd = &bio_record->details; 1291 1278 1292 1279 dm_bio_restore(bd, bio); 1280 + bio_record->details.bi_bdev = NULL; 1293 1281 bio->bi_error = 0; 1294 1282 1295 1283 queue_bio(ms, bio, rw); ··· 1298 1284 } 1299 1285 DMERR("All replicated volumes dead, failing I/O"); 1300 1286 } 1287 + 1288 + out: 1289 + bio_record->details.bi_bdev = NULL; 1301 1290 1302 1291 return error; 1303 1292 }
+2 -1
drivers/md/dm-snap-persistent.c
··· 741 741 /* 742 742 * Commit exceptions to disk. 743 743 */ 744 - if (ps->valid && area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA)) 744 + if (ps->valid && area_io(ps, REQ_OP_WRITE, 745 + REQ_PREFLUSH | REQ_FUA | REQ_SYNC)) 745 746 ps->valid = 0; 746 747 747 748 /*
+13 -13
drivers/md/dm-thin.c
··· 1094 1094 return; 1095 1095 } 1096 1096 1097 + /* 1098 + * Increment the unmapped blocks. This prevents a race between the 1099 + * passdown io and reallocation of freed blocks. 1100 + */ 1101 + r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end); 1102 + if (r) { 1103 + metadata_operation_failed(pool, "dm_pool_inc_data_range", r); 1104 + bio_io_error(m->bio); 1105 + cell_defer_no_holder(tc, m->cell); 1106 + mempool_free(m, pool->mapping_pool); 1107 + return; 1108 + } 1109 + 1097 1110 discard_parent = bio_alloc(GFP_NOIO, 1); 1098 1111 if (!discard_parent) { 1099 1112 DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.", ··· 1126 1113 r = issue_discard(&op, m->data_block, data_end); 1127 1114 end_discard(&op, r); 1128 1115 } 1129 - } 1130 - 1131 - /* 1132 - * Increment the unmapped blocks. This prevents a race between the 1133 - * passdown io and reallocation of freed blocks. 1134 - */ 1135 - r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end); 1136 - if (r) { 1137 - metadata_operation_failed(pool, "dm_pool_inc_data_range", r); 1138 - bio_io_error(m->bio); 1139 - cell_defer_no_holder(tc, m->cell); 1140 - mempool_free(m, pool->mapping_pool); 1141 - return; 1142 1116 } 1143 1117 } 1144 1118
+2 -2
drivers/md/dm-verity-target.c
··· 166 166 return r; 167 167 } 168 168 169 - if (likely(v->version >= 1)) 169 + if (likely(v->salt_size && (v->version >= 1))) 170 170 r = verity_hash_update(v, req, v->salt, v->salt_size, res); 171 171 172 172 return r; ··· 177 177 { 178 178 int r; 179 179 180 - if (unlikely(!v->version)) { 180 + if (unlikely(v->salt_size && (!v->version))) { 181 181 r = verity_hash_update(v, req, v->salt, v->salt_size, res); 182 182 183 183 if (r < 0) {
+1 -1
drivers/md/dm.c
··· 1657 1657 1658 1658 bio_init(&md->flush_bio, NULL, 0); 1659 1659 md->flush_bio.bi_bdev = md->bdev; 1660 - md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 1660 + md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; 1661 1661 1662 1662 dm_stats_init(&md->stats); 1663 1663
+3 -1
drivers/md/md-cluster.c
··· 1311 1311 cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); 1312 1312 lock_comm(cinfo, 1); 1313 1313 ret = __sendmsg(cinfo, &cmsg); 1314 - if (ret) 1314 + if (ret) { 1315 + unlock_comm(cinfo); 1315 1316 return ret; 1317 + } 1316 1318 cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE; 1317 1319 ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX); 1318 1320 cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE;
+13 -5
drivers/md/md.c
··· 765 765 test_bit(FailFast, &rdev->flags) && 766 766 !test_bit(LastDev, &rdev->flags)) 767 767 ff = MD_FAILFAST; 768 - bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA | ff; 768 + bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff; 769 769 770 770 atomic_inc(&mddev->pending_writes); 771 771 submit_bio(bio); ··· 5174 5174 5175 5175 static void no_op(struct percpu_ref *r) {} 5176 5176 5177 + int mddev_init_writes_pending(struct mddev *mddev) 5178 + { 5179 + if (mddev->writes_pending.percpu_count_ptr) 5180 + return 0; 5181 + if (percpu_ref_init(&mddev->writes_pending, no_op, 0, GFP_KERNEL) < 0) 5182 + return -ENOMEM; 5183 + /* We want to start with the refcount at zero */ 5184 + percpu_ref_put(&mddev->writes_pending); 5185 + return 0; 5186 + } 5187 + EXPORT_SYMBOL_GPL(mddev_init_writes_pending); 5188 + 5177 5189 static int md_alloc(dev_t dev, char *name) 5178 5190 { 5179 5191 /* ··· 5251 5239 blk_queue_make_request(mddev->queue, md_make_request); 5252 5240 blk_set_stacking_limits(&mddev->queue->limits); 5253 5241 5254 - if (percpu_ref_init(&mddev->writes_pending, no_op, 0, GFP_KERNEL) < 0) 5255 - goto abort; 5256 - /* We want to start with the refcount at zero */ 5257 - percpu_ref_put(&mddev->writes_pending); 5258 5242 disk = alloc_disk(1 << shift); 5259 5243 if (!disk) { 5260 5244 blk_cleanup_queue(mddev->queue);
+1
drivers/md/md.h
··· 648 648 extern void md_wakeup_thread(struct md_thread *thread); 649 649 extern void md_check_recovery(struct mddev *mddev); 650 650 extern void md_reap_sync_thread(struct mddev *mddev); 651 + extern int mddev_init_writes_pending(struct mddev *mddev); 651 652 extern void md_write_start(struct mddev *mddev, struct bio *bi); 652 653 extern void md_write_inc(struct mddev *mddev, struct bio *bi); 653 654 extern void md_write_end(struct mddev *mddev);
+2
drivers/md/raid1.c
··· 3063 3063 mdname(mddev)); 3064 3064 return -EIO; 3065 3065 } 3066 + if (mddev_init_writes_pending(mddev) < 0) 3067 + return -ENOMEM; 3066 3068 /* 3067 3069 * copy the already verified devices into our private RAID1 3068 3070 * bookkeeping area. [whatever we allocate in run(),
+3
drivers/md/raid10.c
··· 3611 3611 int first = 1; 3612 3612 bool discard_supported = false; 3613 3613 3614 + if (mddev_init_writes_pending(mddev) < 0) 3615 + return -ENOMEM; 3616 + 3614 3617 if (mddev->private == NULL) { 3615 3618 conf = setup_conf(mddev); 3616 3619 if (IS_ERR(conf))
+2 -2
drivers/md/raid5-cache.c
··· 1782 1782 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, 1783 1783 mb, PAGE_SIZE)); 1784 1784 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, 1785 - REQ_FUA, false)) { 1785 + REQ_SYNC | REQ_FUA, false)) { 1786 1786 __free_page(page); 1787 1787 return -EIO; 1788 1788 } ··· 2388 2388 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, 2389 2389 mb, PAGE_SIZE)); 2390 2390 sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, 2391 - REQ_OP_WRITE, REQ_FUA, false); 2391 + REQ_OP_WRITE, REQ_SYNC | REQ_FUA, false); 2392 2392 sh->log_start = ctx->pos; 2393 2393 list_add_tail(&sh->r5c, &log->stripe_in_journal_list); 2394 2394 atomic_inc(&log->stripe_in_journal_count);
+2 -2
drivers/md/raid5-ppl.c
··· 907 907 pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE)); 908 908 909 909 if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset, 910 - PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_FUA, 0, 911 - false)) { 910 + PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC | 911 + REQ_FUA, 0, false)) { 912 912 md_error(rdev->mddev, rdev); 913 913 ret = -EIO; 914 914 }
+17 -4
drivers/md/raid5.c
··· 4085 4085 set_bit(STRIPE_INSYNC, &sh->state); 4086 4086 else { 4087 4087 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); 4088 - if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 4088 + if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { 4089 4089 /* don't try to repair!! */ 4090 4090 set_bit(STRIPE_INSYNC, &sh->state); 4091 - else { 4091 + pr_warn_ratelimited("%s: mismatch sector in range " 4092 + "%llu-%llu\n", mdname(conf->mddev), 4093 + (unsigned long long) sh->sector, 4094 + (unsigned long long) sh->sector + 4095 + STRIPE_SECTORS); 4096 + } else { 4092 4097 sh->check_state = check_state_compute_run; 4093 4098 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 4094 4099 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); ··· 4242 4237 } 4243 4238 } else { 4244 4239 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); 4245 - if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 4240 + if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { 4246 4241 /* don't try to repair!! */ 4247 4242 set_bit(STRIPE_INSYNC, &sh->state); 4248 - else { 4243 + pr_warn_ratelimited("%s: mismatch sector in range " 4244 + "%llu-%llu\n", mdname(conf->mddev), 4245 + (unsigned long long) sh->sector, 4246 + (unsigned long long) sh->sector + 4247 + STRIPE_SECTORS); 4248 + } else { 4249 4249 int *target = &sh->ops.target; 4250 4250 4251 4251 sh->ops.target = -1; ··· 7117 7107 int i; 7118 7108 long long min_offset_diff = 0; 7119 7109 int first = 1; 7110 + 7111 + if (mddev_init_writes_pending(mddev) < 0) 7112 + return -ENOMEM; 7120 7113 7121 7114 if (mddev->recovery_cp != MaxSector) 7122 7115 pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
+6
drivers/media/Kconfig
··· 2 2 # Multimedia device configuration 3 3 # 4 4 5 + config CEC_CORE 6 + tristate 7 + 8 + config CEC_NOTIFIER 9 + bool 10 + 5 11 menuconfig MEDIA_SUPPORT 6 12 tristate "Multimedia support" 7 13 depends on HAS_IOMEM
+2 -2
drivers/media/Makefile
··· 4 4 5 5 media-objs := media-device.o media-devnode.o media-entity.o 6 6 7 - obj-$(CONFIG_CEC_CORE) += cec/ 8 - 9 7 # 10 8 # I2C drivers should come before other drivers, otherwise they'll fail 11 9 # when compiled as builtin drivers ··· 23 25 24 26 # There are both core and drivers at RC subtree - merge before drivers 25 27 obj-y += rc/ 28 + 29 + obj-$(CONFIG_CEC_CORE) += cec/ 26 30 27 31 # 28 32 # Finally, merge the drivers that require the core
+1 -14
drivers/media/cec/Kconfig
··· 1 - config CEC_CORE 2 - tristate 3 - depends on MEDIA_CEC_SUPPORT 4 - default y 5 - 6 - config MEDIA_CEC_NOTIFIER 7 - bool 8 - 9 1 config MEDIA_CEC_RC 10 2 bool "HDMI CEC RC integration" 11 3 depends on CEC_CORE && RC_CORE 4 + depends on CEC_CORE=m || RC_CORE=y 12 5 ---help--- 13 6 Pass on CEC remote control messages to the RC framework. 14 - 15 - config MEDIA_CEC_DEBUG 16 - bool "HDMI CEC debugfs interface" 17 - depends on CEC_CORE && DEBUG_FS 18 - ---help--- 19 - Turns on the DebugFS interface for CEC devices.
+1 -1
drivers/media/cec/Makefile
··· 1 1 cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o 2 2 3 - ifeq ($(CONFIG_MEDIA_CEC_NOTIFIER),y) 3 + ifeq ($(CONFIG_CEC_NOTIFIER),y) 4 4 cec-objs += cec-notifier.o 5 5 endif 6 6
+1 -1
drivers/media/cec/cec-adap.c
··· 1864 1864 WARN_ON(call_op(adap, adap_monitor_all_enable, 0)); 1865 1865 } 1866 1866 1867 - #ifdef CONFIG_MEDIA_CEC_DEBUG 1867 + #ifdef CONFIG_DEBUG_FS 1868 1868 /* 1869 1869 * Log the current state of the CEC adapter. 1870 1870 * Very useful for debugging.
+1 -7
drivers/media/cec/cec-api.c
··· 271 271 bool block, struct cec_msg __user *parg) 272 272 { 273 273 struct cec_msg msg = {}; 274 - long err = 0; 274 + long err; 275 275 276 276 if (copy_from_user(&msg, parg, sizeof(msg))) 277 277 return -EFAULT; 278 - mutex_lock(&adap->lock); 279 - if (!adap->is_configured && fh->mode_follower < CEC_MODE_MONITOR) 280 - err = -ENONET; 281 - mutex_unlock(&adap->lock); 282 - if (err) 283 - return err; 284 278 285 279 err = cec_receive_msg(fh, &msg, block); 286 280 if (err)
+4 -4
drivers/media/cec/cec-core.c
··· 187 187 put_device(&devnode->dev); 188 188 } 189 189 190 - #ifdef CONFIG_MEDIA_CEC_NOTIFIER 190 + #ifdef CONFIG_CEC_NOTIFIER 191 191 static void cec_cec_notify(struct cec_adapter *adap, u16 pa) 192 192 { 193 193 cec_s_phys_addr(adap, pa, false); ··· 323 323 } 324 324 325 325 dev_set_drvdata(&adap->devnode.dev, adap); 326 - #ifdef CONFIG_MEDIA_CEC_DEBUG 326 + #ifdef CONFIG_DEBUG_FS 327 327 if (!top_cec_dir) 328 328 return 0; 329 329 ··· 355 355 adap->rc = NULL; 356 356 #endif 357 357 debugfs_remove_recursive(adap->cec_dir); 358 - #ifdef CONFIG_MEDIA_CEC_NOTIFIER 358 + #ifdef CONFIG_CEC_NOTIFIER 359 359 if (adap->notifier) 360 360 cec_notifier_unregister(adap->notifier); 361 361 #endif ··· 395 395 return ret; 396 396 } 397 397 398 - #ifdef CONFIG_MEDIA_CEC_DEBUG 398 + #ifdef CONFIG_DEBUG_FS 399 399 top_cec_dir = debugfs_create_dir("cec", NULL); 400 400 if (IS_ERR_OR_NULL(top_cec_dir)) { 401 401 pr_warn("cec: Failed to create debugfs cec dir\n");
+6 -3
drivers/media/i2c/Kconfig
··· 220 220 221 221 config VIDEO_ADV7604_CEC 222 222 bool "Enable Analog Devices ADV7604 CEC support" 223 - depends on VIDEO_ADV7604 && CEC_CORE 223 + depends on VIDEO_ADV7604 224 + select CEC_CORE 224 225 ---help--- 225 226 When selected the adv7604 will support the optional 226 227 HDMI CEC feature. ··· 241 240 242 241 config VIDEO_ADV7842_CEC 243 242 bool "Enable Analog Devices ADV7842 CEC support" 244 - depends on VIDEO_ADV7842 && CEC_CORE 243 + depends on VIDEO_ADV7842 244 + select CEC_CORE 245 245 ---help--- 246 246 When selected the adv7842 will support the optional 247 247 HDMI CEC feature. ··· 480 478 481 479 config VIDEO_ADV7511_CEC 482 480 bool "Enable Analog Devices ADV7511 CEC support" 483 - depends on VIDEO_ADV7511 && CEC_CORE 481 + depends on VIDEO_ADV7511 482 + select CEC_CORE 484 483 ---help--- 485 484 When selected the adv7511 will support the optional 486 485 HDMI CEC feature.
+1 -1
drivers/media/i2c/tc358743.c
··· 223 223 static void i2c_wr8_and_or(struct v4l2_subdev *sd, u16 reg, 224 224 u8 mask, u8 val) 225 225 { 226 - i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 2) & mask) | val, 2); 226 + i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 1) & mask) | val, 1); 227 227 } 228 228 229 229 static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg)
+6 -4
drivers/media/platform/Kconfig
··· 501 501 502 502 config VIDEO_SAMSUNG_S5P_CEC 503 503 tristate "Samsung S5P CEC driver" 504 - depends on CEC_CORE && (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST) 505 - select MEDIA_CEC_NOTIFIER 504 + depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST 505 + select CEC_CORE 506 + select CEC_NOTIFIER 506 507 ---help--- 507 508 This is a driver for Samsung S5P HDMI CEC interface. It uses the 508 509 generic CEC framework interface. ··· 512 511 513 512 config VIDEO_STI_HDMI_CEC 514 513 tristate "STMicroelectronics STiH4xx HDMI CEC driver" 515 - depends on CEC_CORE && (ARCH_STI || COMPILE_TEST) 516 - select MEDIA_CEC_NOTIFIER 514 + depends on ARCH_STI || COMPILE_TEST 515 + select CEC_CORE 516 + select CEC_NOTIFIER 517 517 ---help--- 518 518 This is a driver for STIH4xx HDMI CEC interface. It uses the 519 519 generic CEC framework interface.
+4 -4
drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c
··· 493 493 } 494 494 495 495 static struct vdec_common_if vdec_h264_if = { 496 - vdec_h264_init, 497 - vdec_h264_decode, 498 - vdec_h264_get_param, 499 - vdec_h264_deinit, 496 + .init = vdec_h264_init, 497 + .decode = vdec_h264_decode, 498 + .get_param = vdec_h264_get_param, 499 + .deinit = vdec_h264_deinit, 500 500 }; 501 501 502 502 struct vdec_common_if *get_h264_dec_comm_if(void);
+4 -4
drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c
··· 620 620 } 621 621 622 622 static struct vdec_common_if vdec_vp8_if = { 623 - vdec_vp8_init, 624 - vdec_vp8_decode, 625 - vdec_vp8_get_param, 626 - vdec_vp8_deinit, 623 + .init = vdec_vp8_init, 624 + .decode = vdec_vp8_decode, 625 + .get_param = vdec_vp8_get_param, 626 + .deinit = vdec_vp8_deinit, 627 627 }; 628 628 629 629 struct vdec_common_if *get_vp8_dec_comm_if(void);
+4 -4
drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c
··· 979 979 } 980 980 981 981 static struct vdec_common_if vdec_vp9_if = { 982 - vdec_vp9_init, 983 - vdec_vp9_decode, 984 - vdec_vp9_get_param, 985 - vdec_vp9_deinit, 982 + .init = vdec_vp9_init, 983 + .decode = vdec_vp9_decode, 984 + .get_param = vdec_vp9_get_param, 985 + .deinit = vdec_vp9_deinit, 986 986 }; 987 987 988 988 struct vdec_common_if *get_vp9_dec_comm_if(void);
+2 -1
drivers/media/platform/vivid/Kconfig
··· 26 26 27 27 config VIDEO_VIVID_CEC 28 28 bool "Enable CEC emulation support" 29 - depends on VIDEO_VIVID && CEC_CORE 29 + depends on VIDEO_VIVID 30 + select CEC_CORE 30 31 ---help--- 31 32 When selected the vivid module will emulate the optional 32 33 HDMI CEC feature.
+8 -5
drivers/media/rc/rc-ir-raw.c
··· 211 211 */ 212 212 void ir_raw_event_handle(struct rc_dev *dev) 213 213 { 214 - if (!dev->raw) 214 + if (!dev->raw || !dev->raw->thread) 215 215 return; 216 216 217 217 wake_up_process(dev->raw->thread); ··· 490 490 { 491 491 int rc; 492 492 struct ir_raw_handler *handler; 493 + struct task_struct *thread; 493 494 494 495 if (!dev) 495 496 return -EINVAL; ··· 508 507 * because the event is coming from userspace 509 508 */ 510 509 if (dev->driver_type != RC_DRIVER_IR_RAW_TX) { 511 - dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw, 512 - "rc%u", dev->minor); 510 + thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u", 511 + dev->minor); 513 512 514 - if (IS_ERR(dev->raw->thread)) { 515 - rc = PTR_ERR(dev->raw->thread); 513 + if (IS_ERR(thread)) { 514 + rc = PTR_ERR(thread); 516 515 goto out; 517 516 } 517 + 518 + dev->raw->thread = thread; 518 519 } 519 520 520 521 mutex_lock(&ir_raw_handler_lock);
+6
drivers/media/rc/sir_ir.c
··· 183 183 static unsigned long delt; 184 184 unsigned long deltintr; 185 185 unsigned long flags; 186 + int counter = 0; 186 187 int iir, lsr; 187 188 188 189 while ((iir = inb(io + UART_IIR) & UART_IIR_ID)) { 190 + if (++counter > 256) { 191 + dev_err(&sir_ir_dev->dev, "Trapped in interrupt"); 192 + break; 193 + } 194 + 189 195 switch (iir & UART_IIR_ID) { /* FIXME toto treba preriedit */ 190 196 case UART_IIR_MSI: 191 197 (void)inb(io + UART_MSR);
+2 -1
drivers/media/usb/pulse8-cec/Kconfig
··· 1 1 config USB_PULSE8_CEC 2 2 tristate "Pulse Eight HDMI CEC" 3 - depends on USB_ACM && CEC_CORE 3 + depends on USB_ACM 4 + select CEC_CORE 4 5 select SERIO 5 6 select SERIO_SERPORT 6 7 ---help---
+2 -1
drivers/media/usb/rainshadow-cec/Kconfig
··· 1 1 config USB_RAINSHADOW_CEC 2 2 tristate "RainShadow Tech HDMI CEC" 3 - depends on USB_ACM && CEC_CORE 3 + depends on USB_ACM 4 + select CEC_CORE 4 5 select SERIO 5 6 select SERIO_SERPORT 6 7 ---help---
+2 -1
drivers/media/usb/rainshadow-cec/rainshadow-cec.c
··· 119 119 120 120 while (true) { 121 121 unsigned long flags; 122 - bool exit_loop; 122 + bool exit_loop = false; 123 123 char data; 124 124 125 125 spin_lock_irqsave(&rain->buf_lock, flags); ··· 336 336 serio_set_drvdata(serio, rain); 337 337 INIT_WORK(&rain->work, rain_irq_work_handler); 338 338 mutex_init(&rain->write_lock); 339 + spin_lock_init(&rain->buf_lock); 339 340 340 341 err = serio_open(serio, drv); 341 342 if (err)
+1 -1
drivers/media/v4l2-core/videobuf2-core.c
··· 868 868 869 869 void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) 870 870 { 871 - if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) 871 + if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) 872 872 return NULL; 873 873 874 874 return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
+1 -1
drivers/memory/atmel-ebi.c
··· 581 581 return of_platform_populate(np, NULL, NULL, dev); 582 582 } 583 583 584 - static int atmel_ebi_resume(struct device *dev) 584 + static __maybe_unused int atmel_ebi_resume(struct device *dev) 585 585 { 586 586 struct atmel_ebi *ebi = dev_get_drvdata(dev); 587 587 struct atmel_ebi_dev *ebid;
+1 -2
drivers/mfd/arizona-core.c
··· 245 245 int ret; 246 246 247 247 ret = regmap_read_poll_timeout(arizona->regmap, 248 - ARIZONA_INTERRUPT_RAW_STATUS_5, val, 249 - ((val & mask) == target), 248 + reg, val, ((val & mask) == target), 250 249 ARIZONA_REG_POLL_DELAY_US, 251 250 timeout_ms * 1000); 252 251 if (ret)
+3 -3
drivers/misc/cxl/context.c
··· 45 45 mutex_init(&ctx->mapping_lock); 46 46 ctx->mapping = NULL; 47 47 48 - if (cxl_is_psl8(afu)) { 48 + if (cxl_is_power8()) { 49 49 spin_lock_init(&ctx->sste_lock); 50 50 51 51 /* ··· 189 189 if (start + len > ctx->afu->adapter->ps_size) 190 190 return -EINVAL; 191 191 192 - if (cxl_is_psl9(ctx->afu)) { 192 + if (cxl_is_power9()) { 193 193 /* 194 194 * Make sure there is a valid problem state 195 195 * area space for this AFU. ··· 324 324 { 325 325 struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu); 326 326 327 - if (cxl_is_psl8(ctx->afu)) 327 + if (cxl_is_power8()) 328 328 free_page((u64)ctx->sstp); 329 329 if (ctx->ff_page) 330 330 __free_page(ctx->ff_page);
+5 -13
drivers/misc/cxl/cxl.h
··· 357 357 #define CXL_PSL9_DSISR_An_PF_RGP 0x0000000000000090ULL /* PTE not found (Radix Guest (parent)) 0b10010000 */ 358 358 #define CXL_PSL9_DSISR_An_PF_HRH 0x0000000000000094ULL /* PTE not found (HPT/Radix Host) 0b10010100 */ 359 359 #define CXL_PSL9_DSISR_An_PF_STEG 0x000000000000009CULL /* PTE not found (STEG VA) 0b10011100 */ 360 + #define CXL_PSL9_DSISR_An_URTCH 0x00000000000000B4ULL /* Unsupported Radix Tree Configuration 0b10110100 */ 360 361 361 362 /****** CXL_PSL_TFC_An ******************************************************/ 362 363 #define CXL_PSL_TFC_An_A (1ull << (63-28)) /* Acknowledge non-translation fault */ ··· 845 844 846 845 static inline bool cxl_is_power9(void) 847 846 { 848 - /* intermediate solution */ 849 - if (!cxl_is_power8() && 850 - (cpu_has_feature(CPU_FTRS_POWER9) || 851 - cpu_has_feature(CPU_FTR_POWER9_DD1))) 847 + if (pvr_version_is(PVR_POWER9)) 852 848 return true; 853 849 return false; 854 850 } 855 851 856 - static inline bool cxl_is_psl8(struct cxl_afu *afu) 852 + static inline bool cxl_is_power9_dd1(void) 857 853 { 858 - if (afu->adapter->caia_major == 1) 859 - return true; 860 - return false; 861 - } 862 - 863 - static inline bool cxl_is_psl9(struct cxl_afu *afu) 864 - { 865 - if (afu->adapter->caia_major == 2) 854 + if ((pvr_version_is(PVR_POWER9)) && 855 + cpu_has_feature(CPU_FTR_POWER9_DD1)) 866 856 return true; 867 857 return false; 868 858 }
+15 -8
drivers/misc/cxl/fault.c
··· 187 187 188 188 static bool cxl_is_segment_miss(struct cxl_context *ctx, u64 dsisr) 189 189 { 190 - if ((cxl_is_psl8(ctx->afu)) && (dsisr & CXL_PSL_DSISR_An_DS)) 190 + if ((cxl_is_power8() && (dsisr & CXL_PSL_DSISR_An_DS))) 191 191 return true; 192 192 193 193 return false; ··· 195 195 196 196 static bool cxl_is_page_fault(struct cxl_context *ctx, u64 dsisr) 197 197 { 198 - if ((cxl_is_psl8(ctx->afu)) && (dsisr & CXL_PSL_DSISR_An_DM)) 198 + u64 crs; /* Translation Checkout Response Status */ 199 + 200 + if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_An_DM)) 199 201 return true; 200 202 201 - if ((cxl_is_psl9(ctx->afu)) && 202 - ((dsisr & CXL_PSL9_DSISR_An_CO_MASK) & 203 - (CXL_PSL9_DSISR_An_PF_SLR | CXL_PSL9_DSISR_An_PF_RGC | 204 - CXL_PSL9_DSISR_An_PF_RGP | CXL_PSL9_DSISR_An_PF_HRH | 205 - CXL_PSL9_DSISR_An_PF_STEG))) 206 - return true; 203 + if (cxl_is_power9()) { 204 + crs = (dsisr & CXL_PSL9_DSISR_An_CO_MASK); 205 + if ((crs == CXL_PSL9_DSISR_An_PF_SLR) || 206 + (crs == CXL_PSL9_DSISR_An_PF_RGC) || 207 + (crs == CXL_PSL9_DSISR_An_PF_RGP) || 208 + (crs == CXL_PSL9_DSISR_An_PF_HRH) || 209 + (crs == CXL_PSL9_DSISR_An_PF_STEG) || 210 + (crs == CXL_PSL9_DSISR_An_URTCH)) { 211 + return true; 212 + } 213 + } 207 214 208 215 return false; 209 216 }
+2 -5
drivers/misc/cxl/file.c
··· 159 159 160 160 /* Do this outside the status_mutex to avoid a circular dependency with 161 161 * the locking in cxl_mmap_fault() */ 162 - if (copy_from_user(&work, uwork, 163 - sizeof(struct cxl_ioctl_start_work))) { 164 - rc = -EFAULT; 165 - goto out; 166 - } 162 + if (copy_from_user(&work, uwork, sizeof(work))) 163 + return -EFAULT; 167 164 168 165 mutex_lock(&ctx->status_mutex); 169 166 if (ctx->status != OPENED) {
+13 -4
drivers/misc/cxl/main.c
··· 329 329 330 330 cxl_debugfs_init(); 331 331 332 - if ((rc = register_cxl_calls(&cxl_calls))) 333 - goto err; 332 + /* 333 + * we don't register the callback on P9. slb callack is only 334 + * used for the PSL8 MMU and CX4. 335 + */ 336 + if (cxl_is_power8()) { 337 + rc = register_cxl_calls(&cxl_calls); 338 + if (rc) 339 + goto err; 340 + } 334 341 335 342 if (cpu_has_feature(CPU_FTR_HVMODE)) { 336 343 cxl_ops = &cxl_native_ops; ··· 354 347 355 348 return 0; 356 349 err1: 357 - unregister_cxl_calls(&cxl_calls); 350 + if (cxl_is_power8()) 351 + unregister_cxl_calls(&cxl_calls); 358 352 err: 359 353 cxl_debugfs_exit(); 360 354 cxl_file_exit(); ··· 374 366 375 367 cxl_debugfs_exit(); 376 368 cxl_file_exit(); 377 - unregister_cxl_calls(&cxl_calls); 369 + if (cxl_is_power8()) 370 + unregister_cxl_calls(&cxl_calls); 378 371 idr_destroy(&cxl_adapter_idr); 379 372 } 380 373
+28 -15
drivers/misc/cxl/native.c
··· 105 105 CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK, 106 106 false); 107 107 108 - /* Re-enable any masked interrupts */ 109 - serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); 110 - serr &= ~CXL_PSL_SERR_An_IRQ_MASKS; 111 - cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); 112 - 108 + /* 109 + * Re-enable any masked interrupts when the AFU is not 110 + * activated to avoid side effects after attaching a process 111 + * in dedicated mode. 112 + */ 113 + if (afu->current_mode == 0) { 114 + serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); 115 + serr &= ~CXL_PSL_SERR_An_IRQ_MASKS; 116 + cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); 117 + } 113 118 114 119 return rc; 115 120 } ··· 144 139 145 140 pr_devel("PSL purge request\n"); 146 141 147 - if (cxl_is_psl8(afu)) 142 + if (cxl_is_power8()) 148 143 trans_fault = CXL_PSL_DSISR_TRANS; 149 - if (cxl_is_psl9(afu)) 144 + if (cxl_is_power9()) 150 145 trans_fault = CXL_PSL9_DSISR_An_TF; 151 146 152 147 if (!cxl_ops->link_ok(afu->adapter, afu)) { ··· 608 603 if (!test_tsk_thread_flag(current, TIF_32BIT)) 609 604 sr |= CXL_PSL_SR_An_SF; 610 605 } 611 - if (cxl_is_psl9(ctx->afu)) { 606 + if (cxl_is_power9()) { 612 607 if (radix_enabled()) 613 608 sr |= CXL_PSL_SR_An_XLAT_ror; 614 609 else ··· 1122 1117 1123 1118 static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr) 1124 1119 { 1125 - if ((cxl_is_psl8(afu)) && (dsisr & CXL_PSL_DSISR_TRANS)) 1120 + if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_TRANS)) 1126 1121 return true; 1127 1122 1128 - if ((cxl_is_psl9(afu)) && (dsisr & CXL_PSL9_DSISR_An_TF)) 1123 + if ((cxl_is_power9()) && (dsisr & CXL_PSL9_DSISR_An_TF)) 1129 1124 return true; 1130 1125 1131 1126 return false; ··· 1199 1194 if (ph != ctx->pe) 1200 1195 return; 1201 1196 dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An); 1202 - if (cxl_is_psl8(ctx->afu) && 1197 + if (cxl_is_power8() && 1203 1198 ((dsisr & CXL_PSL_DSISR_PENDING) == 0)) 1204 1199 return; 1205 - if (cxl_is_psl9(ctx->afu) && 1200 + if (cxl_is_power9() && 1206 1201 ((dsisr & CXL_PSL9_DSISR_PENDING) == 0)) 1207 1202 return; 1208 1203 /* ··· 1307 1302 1308 1303 void cxl_native_release_psl_err_irq(struct cxl *adapter) 1309 1304 { 1310 - if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq)) 1305 + if (adapter->native->err_virq == 0 || 1306 + adapter->native->err_virq != 1307 + irq_find_mapping(NULL, adapter->native->err_hwirq)) 1311 1308 return; 1312 1309 1313 1310 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); 1314 1311 cxl_unmap_irq(adapter->native->err_virq, adapter); 1315 1312 cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq); 1316 1313 kfree(adapter->irq_name); 1314 + adapter->native->err_virq = 0; 1317 1315 } 1318 1316 1319 1317 int cxl_native_register_serr_irq(struct cxl_afu *afu) ··· 1354 1346 1355 1347 void cxl_native_release_serr_irq(struct cxl_afu *afu) 1356 1348 { 1357 - if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq)) 1349 + if (afu->serr_virq == 0 || 1350 + afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq)) 1358 1351 return; 1359 1352 1360 1353 cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000); 1361 1354 cxl_unmap_irq(afu->serr_virq, afu); 1362 1355 cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); 1363 1356 kfree(afu->err_irq_name); 1357 + afu->serr_virq = 0; 1364 1358 } 1365 1359 1366 1360 int cxl_native_register_psl_irq(struct cxl_afu *afu) ··· 1385 1375 1386 1376 void cxl_native_release_psl_irq(struct cxl_afu *afu) 1387 1377 { 1388 - if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq)) 1378 + if (afu->native->psl_virq == 0 || 1379 + afu->native->psl_virq != 1380 + irq_find_mapping(NULL, afu->native->psl_hwirq)) 1389 1381 return; 1390 1382 1391 1383 cxl_unmap_irq(afu->native->psl_virq, afu); 1392 1384 cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq); 1393 1385 kfree(afu->psl_irq_name); 1386 + afu->native->psl_virq = 0; 1394 1387 } 1395 1388 1396 1389 static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
+4 -7
drivers/misc/cxl/pci.c
··· 436 436 /* nMMU_ID Defaults to: b’000001001’*/ 437 437 xsl_dsnctl |= ((u64)0x09 << (63-28)); 438 438 439 - if (cxl_is_power9() && !cpu_has_feature(CPU_FTR_POWER9_DD1)) { 439 + if (!(cxl_is_power9_dd1())) { 440 440 /* 441 441 * Used to identify CAPI packets which should be sorted into 442 442 * the Non-Blocking queues by the PHB. This field should match ··· 491 491 cxl_p1_write(adapter, CXL_PSL9_APCDEDTYPE, 0x40000003FFFF0000ULL); 492 492 493 493 /* Disable vc dd1 fix */ 494 - if ((cxl_is_power9() && cpu_has_feature(CPU_FTR_POWER9_DD1))) 494 + if (cxl_is_power9_dd1()) 495 495 cxl_p1_write(adapter, CXL_PSL9_GP_CT, 0x0400000000000001ULL); 496 496 497 497 return 0; ··· 1439 1439 * The adapter is about to be reset, so ignore errors. 1440 1440 * Not supported on P9 DD1 1441 1441 */ 1442 - if ((cxl_is_power8()) || 1443 - ((cxl_is_power9() && !cpu_has_feature(CPU_FTR_POWER9_DD1)))) 1442 + if ((cxl_is_power8()) || (!(cxl_is_power9_dd1()))) 1444 1443 cxl_data_cache_flush(adapter); 1445 1444 1446 1445 /* pcie_warm_reset requests a fundamental pci reset which includes a ··· 1749 1750 .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl9, 1750 1751 .debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl9, 1751 1752 .psl_irq_dump_registers = cxl_native_irq_dump_regs_psl9, 1752 - .err_irq_dump_registers = cxl_native_err_irq_dump_regs, 1753 1753 .debugfs_stop_trace = cxl_stop_trace_psl9, 1754 1754 .write_timebase_ctrl = write_timebase_ctrl_psl9, 1755 1755 .timebase_read = timebase_read_psl9, ··· 1887 1889 * Flush adapter datacache as its about to be removed. 1888 1890 * Not supported on P9 DD1. 1889 1891 */ 1890 - if ((cxl_is_power8()) || 1891 - ((cxl_is_power9() && !cpu_has_feature(CPU_FTR_POWER9_DD1)))) 1892 + if ((cxl_is_power8()) || (!(cxl_is_power9_dd1()))) 1892 1893 cxl_data_cache_flush(adapter); 1893 1894 1894 1895 cxl_deconfigure_adapter(adapter);
+3 -1
drivers/misc/mei/bus.c
··· 763 763 { 764 764 struct mei_cl_device *cldev = to_mei_cl_device(dev); 765 765 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); 766 + u8 version = mei_me_cl_ver(cldev->me_cl); 766 767 767 - return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:", cldev->name, uuid); 768 + return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:", 769 + cldev->name, uuid, version); 768 770 } 769 771 static DEVICE_ATTR_RO(modalias); 770 772
+11 -1
drivers/misc/sgi-xp/xp.h
··· 309 309 xpc_send(short partid, int ch_number, u32 flags, void *payload, 310 310 u16 payload_size) 311 311 { 312 + if (!xpc_interface.send) 313 + return xpNotLoaded; 314 + 312 315 return xpc_interface.send(partid, ch_number, flags, payload, 313 316 payload_size); 314 317 } ··· 320 317 xpc_send_notify(short partid, int ch_number, u32 flags, void *payload, 321 318 u16 payload_size, xpc_notify_func func, void *key) 322 319 { 320 + if (!xpc_interface.send_notify) 321 + return xpNotLoaded; 322 + 323 323 return xpc_interface.send_notify(partid, ch_number, flags, payload, 324 324 payload_size, func, key); 325 325 } ··· 330 324 static inline void 331 325 xpc_received(short partid, int ch_number, void *payload) 332 326 { 333 - return xpc_interface.received(partid, ch_number, payload); 327 + if (xpc_interface.received) 328 + xpc_interface.received(partid, ch_number, payload); 334 329 } 335 330 336 331 static inline enum xp_retval 337 332 xpc_partid_to_nasids(short partid, void *nasids) 338 333 { 334 + if (!xpc_interface.partid_to_nasids) 335 + return xpNotLoaded; 336 + 339 337 return xpc_interface.partid_to_nasids(partid, nasids); 340 338 } 341 339
+7 -29
drivers/misc/sgi-xp/xp_main.c
··· 69 69 EXPORT_SYMBOL_GPL(xpc_registrations); 70 70 71 71 /* 72 - * Initialize the XPC interface to indicate that XPC isn't loaded. 72 + * Initialize the XPC interface to NULL to indicate that XPC isn't loaded. 73 73 */ 74 - static enum xp_retval 75 - xpc_notloaded(void) 76 - { 77 - return xpNotLoaded; 78 - } 79 - 80 - struct xpc_interface xpc_interface = { 81 - (void (*)(int))xpc_notloaded, 82 - (void (*)(int))xpc_notloaded, 83 - (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded, 84 - (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func, 85 - void *))xpc_notloaded, 86 - (void (*)(short, int, void *))xpc_notloaded, 87 - (enum xp_retval(*)(short, void *))xpc_notloaded 88 - }; 74 + struct xpc_interface xpc_interface = { }; 89 75 EXPORT_SYMBOL_GPL(xpc_interface); 90 76 91 77 /* ··· 101 115 void 102 116 xpc_clear_interface(void) 103 117 { 104 - xpc_interface.connect = (void (*)(int))xpc_notloaded; 105 - xpc_interface.disconnect = (void (*)(int))xpc_notloaded; 106 - xpc_interface.send = (enum xp_retval(*)(short, int, u32, void *, u16)) 107 - xpc_notloaded; 108 - xpc_interface.send_notify = (enum xp_retval(*)(short, int, u32, void *, 109 - u16, xpc_notify_func, 110 - void *))xpc_notloaded; 111 - xpc_interface.received = (void (*)(short, int, void *)) 112 - xpc_notloaded; 113 - xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *)) 114 - xpc_notloaded; 118 + memset(&xpc_interface, 0, sizeof(xpc_interface)); 115 119 } 116 120 EXPORT_SYMBOL_GPL(xpc_clear_interface); 117 121 ··· 164 188 165 189 mutex_unlock(&registration->mutex); 166 190 167 - xpc_interface.connect(ch_number); 191 + if (xpc_interface.connect) 192 + xpc_interface.connect(ch_number); 168 193 169 194 return xpSuccess; 170 195 } ··· 214 237 registration->assigned_limit = 0; 215 238 registration->idle_limit = 0; 216 239 217 - xpc_interface.disconnect(ch_number); 240 + if (xpc_interface.disconnect) 241 + xpc_interface.disconnect(ch_number); 218 242 219 243 mutex_unlock(&registration->mutex); 220 244
+9
drivers/mmc/host/meson-gx-mmc.c
··· 210 210 int i; 211 211 bool use_desc_chain_mode = true; 212 212 213 + /* 214 + * Broken SDIO with AP6255-based WiFi on Khadas VIM Pro has been 215 + * reported. For some strange reason this occurs in descriptor 216 + * chain mode only. So let's fall back to bounce buffer mode 217 + * for command SD_IO_RW_EXTENDED. 218 + */ 219 + if (mrq->cmd->opcode == SD_IO_RW_EXTENDED) 220 + return; 221 + 213 222 for_each_sg(data->sg, sg, data->sg_len, i) 214 223 /* check for 8 byte alignment */ 215 224 if (sg->offset & 7) {
+35 -11
drivers/mtd/nand/nand_base.c
··· 202 202 return 0; 203 203 } 204 204 205 - const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = { 205 + static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = { 206 206 .ecc = nand_ooblayout_ecc_lp_hamming, 207 207 .free = nand_ooblayout_free_lp_hamming, 208 208 }; ··· 4361 4361 /* Initialize the ->data_interface field. */ 4362 4362 ret = nand_init_data_interface(chip); 4363 4363 if (ret) 4364 - return ret; 4364 + goto err_nand_init; 4365 4365 4366 4366 /* 4367 4367 * Setup the data interface correctly on the chip and controller side. ··· 4373 4373 */ 4374 4374 ret = nand_setup_data_interface(chip); 4375 4375 if (ret) 4376 - return ret; 4376 + goto err_nand_init; 4377 4377 4378 4378 nand_maf_id = chip->id.data[0]; 4379 4379 nand_dev_id = chip->id.data[1]; ··· 4404 4404 mtd->size = i * chip->chipsize; 4405 4405 4406 4406 return 0; 4407 + 4408 + err_nand_init: 4409 + /* Free manufacturer priv data. */ 4410 + nand_manufacturer_cleanup(chip); 4411 + 4412 + return ret; 4407 4413 } 4408 4414 EXPORT_SYMBOL(nand_scan_ident); 4409 4415 ··· 4580 4574 4581 4575 /* New bad blocks should be marked in OOB, flash-based BBT, or both */ 4582 4576 if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) && 4583 - !(chip->bbt_options & NAND_BBT_USE_FLASH))) 4584 - return -EINVAL; 4577 + !(chip->bbt_options & NAND_BBT_USE_FLASH))) { 4578 + ret = -EINVAL; 4579 + goto err_ident; 4580 + } 4585 4581 4586 4582 if (invalid_ecc_page_accessors(chip)) { 4587 4583 pr_err("Invalid ECC page accessors setup\n"); 4588 - return -EINVAL; 4584 + ret = -EINVAL; 4585 + goto err_ident; 4589 4586 } 4590 4587 4591 4588 if (!(chip->options & NAND_OWN_BUFFERS)) { 4592 4589 nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL); 4593 - if (!nbuf) 4594 - return -ENOMEM; 4590 + if (!nbuf) { 4591 + ret = -ENOMEM; 4592 + goto err_ident; 4593 + } 4595 4594 4596 4595 nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL); 4597 4596 if (!nbuf->ecccalc) { ··· 4619 4608 4620 4609 chip->buffers = nbuf; 4621 4610 } else { 4622 - if (!chip->buffers) 4623 - return -ENOMEM; 4611 + if (!chip->buffers) { 4612 + ret = -ENOMEM; 4613 + goto err_ident; 4614 + } 4624 4615 } 4625 4616 4626 4617 /* Set the internal oob buffer location, just after the page data */ ··· 4855 4842 return 0; 4856 4843 4857 4844 /* Build bad block table */ 4858 - return chip->scan_bbt(mtd); 4845 + ret = chip->scan_bbt(mtd); 4846 + if (ret) 4847 + goto err_free; 4848 + return 0; 4849 + 4859 4850 err_free: 4860 4851 if (nbuf) { 4861 4852 kfree(nbuf->databuf); ··· 4867 4850 kfree(nbuf->ecccalc); 4868 4851 kfree(nbuf); 4869 4852 } 4853 + 4854 + err_ident: 4855 + /* Clean up nand_scan_ident(). */ 4856 + 4857 + /* Free manufacturer priv data. */ 4858 + nand_manufacturer_cleanup(chip); 4859 + 4870 4860 return ret; 4871 4861 } 4872 4862 EXPORT_SYMBOL(nand_scan_tail);
-1
drivers/mtd/nand/nand_ids.c
··· 6 6 * published by the Free Software Foundation. 7 7 * 8 8 */ 9 - #include <linux/module.h> 10 9 #include <linux/mtd/nand.h> 11 10 #include <linux/sizes.h> 12 11
+3
drivers/mtd/nand/nand_samsung.c
··· 84 84 case 7: 85 85 chip->ecc_strength_ds = 60; 86 86 break; 87 + default: 88 + WARN(1, "Could not decode ECC info"); 89 + chip->ecc_step_ds = 0; 87 90 } 88 91 } 89 92 } else {
+16 -7
drivers/mtd/nand/tango_nand.c
··· 55 55 * byte 1 for other packets in the page (PKT_N, for N > 0) 56 56 * ERR_COUNT_PKT_N is the max error count over all but the first packet. 57 57 */ 58 - #define DECODE_OK_PKT_0(v) ((v) & BIT(7)) 59 - #define DECODE_OK_PKT_N(v) ((v) & BIT(15)) 60 58 #define ERR_COUNT_PKT_0(v) (((v) >> 0) & 0x3f) 61 59 #define ERR_COUNT_PKT_N(v) (((v) >> 8) & 0x3f) 60 + #define DECODE_FAIL_PKT_0(v) (((v) & BIT(7)) == 0) 61 + #define DECODE_FAIL_PKT_N(v) (((v) & BIT(15)) == 0) 62 62 63 63 /* Offsets relative to pbus_base */ 64 64 #define PBUS_CS_CTRL 0x83c ··· 193 193 chip->ecc.strength); 194 194 if (res < 0) 195 195 mtd->ecc_stats.failed++; 196 + else 197 + mtd->ecc_stats.corrected += res; 196 198 197 199 bitflips = max(res, bitflips); 198 200 buf += pkt_size; ··· 204 202 return bitflips; 205 203 } 206 204 207 - static int decode_error_report(struct tango_nfc *nfc) 205 + static int decode_error_report(struct nand_chip *chip) 208 206 { 209 207 u32 status, res; 208 + struct mtd_info *mtd = nand_to_mtd(chip); 209 + struct tango_nfc *nfc = to_tango_nfc(chip->controller); 210 210 211 211 status = readl_relaxed(nfc->reg_base + NFC_XFER_STATUS); 212 212 if (status & PAGE_IS_EMPTY) ··· 216 212 217 213 res = readl_relaxed(nfc->mem_base + ERROR_REPORT); 218 214 219 - if (DECODE_OK_PKT_0(res) && DECODE_OK_PKT_N(res)) 220 - return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res)); 215 + if (DECODE_FAIL_PKT_0(res) || DECODE_FAIL_PKT_N(res)) 216 + return -EBADMSG; 221 217 222 - return -EBADMSG; 218 + /* ERR_COUNT_PKT_N is max, not sum, but that's all we have */ 219 + mtd->ecc_stats.corrected += 220 + ERR_COUNT_PKT_0(res) + ERR_COUNT_PKT_N(res); 221 + 222 + return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res)); 223 223 } 224 224 225 225 static void tango_dma_callback(void *arg) ··· 290 282 if (err) 291 283 return err; 292 284 293 - res = decode_error_report(nfc); 285 + res = decode_error_report(chip); 294 286 if (res < 0) { 295 287 chip->ecc.read_oob_raw(mtd, chip, page); 296 288 res = check_erased_page(chip, buf); ··· 671 663 { .compatible = "sigma,smp8758-nand" }, 672 664 { /* sentinel */ } 673 665 }; 666 + MODULE_DEVICE_TABLE(of, tango_nand_ids); 674 667 675 668 static struct platform_driver tango_nand_driver = { 676 669 .probe = tango_nand_probe,
+4 -3
drivers/net/arcnet/arcnet.c
··· 756 756 struct net_device *dev = dev_id; 757 757 struct arcnet_local *lp; 758 758 int recbuf, status, diagstatus, didsomething, boguscount; 759 + unsigned long flags; 759 760 int retval = IRQ_NONE; 760 761 761 762 arc_printk(D_DURING, dev, "\n"); ··· 766 765 lp = netdev_priv(dev); 767 766 BUG_ON(!lp); 768 767 769 - spin_lock(&lp->lock); 768 + spin_lock_irqsave(&lp->lock, flags); 770 769 771 770 /* RESET flag was enabled - if device is not running, we must 772 771 * clear it right away (but nothing else). ··· 775 774 if (lp->hw.status(dev) & RESETflag) 776 775 lp->hw.command(dev, CFLAGScmd | RESETclear); 777 776 lp->hw.intmask(dev, 0); 778 - spin_unlock(&lp->lock); 777 + spin_unlock_irqrestore(&lp->lock, flags); 779 778 return retval; 780 779 } 781 780 ··· 999 998 udelay(1); 1000 999 lp->hw.intmask(dev, lp->intmask); 1001 1000 1002 - spin_unlock(&lp->lock); 1001 + spin_unlock_irqrestore(&lp->lock, flags); 1003 1002 return retval; 1004 1003 } 1005 1004 EXPORT_SYMBOL(arcnet_interrupt);
+1 -1
drivers/net/arcnet/capmode.c
··· 212 212 ackpkt->soft.cap.proto = 0; /* using protocol 0 for acknowledge */ 213 213 ackpkt->soft.cap.mes.ack = acked; 214 214 215 - arc_printk(D_PROTO, dev, "Ackknowledge for cap packet %x.\n", 215 + arc_printk(D_PROTO, dev, "Acknowledge for cap packet %x.\n", 216 216 *((int *)&ackpkt->soft.cap.cookie[0])); 217 217 218 218 ackskb->protocol = cpu_to_be16(ETH_P_ARCNET);
+4 -2
drivers/net/arcnet/com20020-pci.c
··· 135 135 for (i = 0; i < ci->devcount; i++) { 136 136 struct com20020_pci_channel_map *cm = &ci->chan_map_tbl[i]; 137 137 struct com20020_dev *card; 138 + int dev_id_mask = 0xf; 138 139 139 140 dev = alloc_arcdev(device); 140 141 if (!dev) { ··· 167 166 arcnet_outb(0x00, ioaddr, COM20020_REG_W_COMMAND); 168 167 arcnet_inb(ioaddr, COM20020_REG_R_DIAGSTAT); 169 168 169 + SET_NETDEV_DEV(dev, &pdev->dev); 170 170 dev->base_addr = ioaddr; 171 171 dev->dev_addr[0] = node; 172 172 dev->irq = pdev->irq; ··· 181 179 182 180 /* Get the dev_id from the PLX rotary coder */ 183 181 if (!strncmp(ci->name, "EAE PLX-PCI MA1", 15)) 184 - dev->dev_id = 0xc; 185 - dev->dev_id ^= inb(priv->misc + ci->rotary) >> 4; 182 + dev_id_mask = 0x3; 183 + dev->dev_id = (inb(priv->misc + ci->rotary) >> 4) & dev_id_mask; 186 184 187 185 snprintf(dev->name, sizeof(dev->name), "arc%d-%d", dev->dev_id, i); 188 186
-2
drivers/net/arcnet/com20020.c
··· 246 246 return -ENODEV; 247 247 } 248 248 249 - dev->base_addr = ioaddr; 250 - 251 249 arc_printk(D_NORMAL, dev, "%s: station %02Xh found at %03lXh, IRQ %d.\n", 252 250 lp->card_name, dev->dev_addr[0], dev->base_addr, dev->irq); 253 251
+27
drivers/net/bonding/bond_3ad.c
··· 90 90 AD_LINK_SPEED_100MBPS, 91 91 AD_LINK_SPEED_1000MBPS, 92 92 AD_LINK_SPEED_2500MBPS, 93 + AD_LINK_SPEED_5000MBPS, 93 94 AD_LINK_SPEED_10000MBPS, 95 + AD_LINK_SPEED_14000MBPS, 94 96 AD_LINK_SPEED_20000MBPS, 95 97 AD_LINK_SPEED_25000MBPS, 96 98 AD_LINK_SPEED_40000MBPS, 99 + AD_LINK_SPEED_50000MBPS, 97 100 AD_LINK_SPEED_56000MBPS, 98 101 AD_LINK_SPEED_100000MBPS, 99 102 }; ··· 262 259 * %AD_LINK_SPEED_100MBPS, 263 260 * %AD_LINK_SPEED_1000MBPS, 264 261 * %AD_LINK_SPEED_2500MBPS, 262 + * %AD_LINK_SPEED_5000MBPS, 265 263 * %AD_LINK_SPEED_10000MBPS 264 + * %AD_LINK_SPEED_14000MBPS, 266 265 * %AD_LINK_SPEED_20000MBPS 267 266 * %AD_LINK_SPEED_25000MBPS 268 267 * %AD_LINK_SPEED_40000MBPS 268 + * %AD_LINK_SPEED_50000MBPS 269 269 * %AD_LINK_SPEED_56000MBPS 270 270 * %AD_LINK_SPEED_100000MBPS 271 271 */ ··· 302 296 speed = AD_LINK_SPEED_2500MBPS; 303 297 break; 304 298 299 + case SPEED_5000: 300 + speed = AD_LINK_SPEED_5000MBPS; 301 + break; 302 + 305 303 case SPEED_10000: 306 304 speed = AD_LINK_SPEED_10000MBPS; 305 + break; 306 + 307 + case SPEED_14000: 308 + speed = AD_LINK_SPEED_14000MBPS; 307 309 break; 308 310 309 311 case SPEED_20000: ··· 324 310 325 311 case SPEED_40000: 326 312 speed = AD_LINK_SPEED_40000MBPS; 313 + break; 314 + 315 + case SPEED_50000: 316 + speed = AD_LINK_SPEED_50000MBPS; 327 317 break; 328 318 329 319 case SPEED_56000: ··· 725 707 case AD_LINK_SPEED_2500MBPS: 726 708 bandwidth = nports * 2500; 727 709 break; 710 + case AD_LINK_SPEED_5000MBPS: 711 + bandwidth = nports * 5000; 712 + break; 728 713 case AD_LINK_SPEED_10000MBPS: 729 714 bandwidth = nports * 10000; 715 + break; 716 + case AD_LINK_SPEED_14000MBPS: 717 + bandwidth = nports * 14000; 730 718 break; 731 719 case AD_LINK_SPEED_20000MBPS: 732 720 bandwidth = nports * 20000; ··· 742 718 break; 743 719 case AD_LINK_SPEED_40000MBPS: 744 720 bandwidth = nports * 40000; 721 + break; 722 + case AD_LINK_SPEED_50000MBPS: 723 + bandwidth = nports * 50000; 745 724 break; 746 725 case AD_LINK_SPEED_56000MBPS: 747 726 bandwidth = nports * 56000;
+3 -3
drivers/net/bonding/bond_main.c
··· 4192 4192 struct bonding *bond = netdev_priv(bond_dev); 4193 4193 if (bond->wq) 4194 4194 destroy_workqueue(bond->wq); 4195 - free_netdev(bond_dev); 4196 4195 } 4197 4196 4198 4197 void bond_setup(struct net_device *bond_dev) ··· 4211 4212 bond_dev->netdev_ops = &bond_netdev_ops; 4212 4213 bond_dev->ethtool_ops = &bond_ethtool_ops; 4213 4214 4214 - bond_dev->destructor = bond_destructor; 4215 + bond_dev->needs_free_netdev = true; 4216 + bond_dev->priv_destructor = bond_destructor; 4215 4217 4216 4218 SET_NETDEV_DEVTYPE(bond_dev, &bond_type); 4217 4219 ··· 4736 4736 4737 4737 rtnl_unlock(); 4738 4738 if (res < 0) 4739 - bond_destructor(bond_dev); 4739 + free_netdev(bond_dev); 4740 4740 return res; 4741 4741 } 4742 4742
+1 -1
drivers/net/caif/caif_hsi.c
··· 1121 1121 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 1122 1122 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ; 1123 1123 dev->priv_flags |= IFF_NO_QUEUE; 1124 - dev->destructor = free_netdev; 1124 + dev->needs_free_netdev = true; 1125 1125 dev->netdev_ops = &cfhsi_netdevops; 1126 1126 for (i = 0; i < CFHSI_PRIO_LAST; ++i) 1127 1127 skb_queue_head_init(&cfhsi->qhead[i]);
+1 -1
drivers/net/caif/caif_serial.c
··· 428 428 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 429 429 dev->mtu = CAIF_MAX_MTU; 430 430 dev->priv_flags |= IFF_NO_QUEUE; 431 - dev->destructor = free_netdev; 431 + dev->needs_free_netdev = true; 432 432 skb_queue_head_init(&serdev->head); 433 433 serdev->common.link_select = CAIF_LINK_LOW_LATENCY; 434 434 serdev->common.use_frag = true;
+1 -1
drivers/net/caif/caif_spi.c
··· 712 712 dev->flags = IFF_NOARP | IFF_POINTOPOINT; 713 713 dev->priv_flags |= IFF_NO_QUEUE; 714 714 dev->mtu = SPI_MAX_PAYLOAD_SIZE; 715 - dev->destructor = free_netdev; 715 + dev->needs_free_netdev = true; 716 716 skb_queue_head_init(&cfspi->qhead); 717 717 skb_queue_head_init(&cfspi->chead); 718 718 cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
+1 -1
drivers/net/caif/caif_virtio.c
··· 617 617 netdev->tx_queue_len = 100; 618 618 netdev->flags = IFF_POINTOPOINT | IFF_NOARP; 619 619 netdev->mtu = CFV_DEF_MTU_SIZE; 620 - netdev->destructor = free_netdev; 620 + netdev->needs_free_netdev = true; 621 621 } 622 622 623 623 /* Create debugfs counters for the device */
+3
drivers/net/can/dev.c
··· 391 391 can_update_state_error_stats(dev, new_state); 392 392 priv->state = new_state; 393 393 394 + if (!cf) 395 + return; 396 + 394 397 if (unlikely(new_state == CAN_STATE_BUS_OFF)) { 395 398 cf->can_id |= CAN_ERR_BUSOFF; 396 399 return;
+1 -1
drivers/net/can/peak_canfd/peak_canfd.c
··· 489 489 struct pucan_rx_msg *msg_list, int msg_count) 490 490 { 491 491 void *msg_ptr = msg_list; 492 - int i, msg_size; 492 + int i, msg_size = 0; 493 493 494 494 for (i = 0; i < msg_count; i++) { 495 495 msg_size = peak_canfd_handle_msg(priv, msg_ptr);
+3 -4
drivers/net/can/slcan.c
··· 417 417 static void slc_free_netdev(struct net_device *dev) 418 418 { 419 419 int i = dev->base_addr; 420 - free_netdev(dev); 420 + 421 421 slcan_devs[i] = NULL; 422 422 } 423 423 ··· 436 436 static void slc_setup(struct net_device *dev) 437 437 { 438 438 dev->netdev_ops = &slc_netdev_ops; 439 - dev->destructor = slc_free_netdev; 439 + dev->needs_free_netdev = true; 440 + dev->priv_destructor = slc_free_netdev; 440 441 441 442 dev->hard_header_len = 0; 442 443 dev->addr_len = 0; ··· 762 761 if (sl->tty) { 763 762 printk(KERN_ERR "%s: tty discipline still running\n", 764 763 dev->name); 765 - /* Intentionally leak the control block. */ 766 - dev->destructor = NULL; 767 764 } 768 765 769 766 unregister_netdev(dev);
+2
drivers/net/can/usb/gs_usb.c
··· 265 265 sizeof(*dm), 266 266 1000); 267 267 268 + kfree(dm); 269 + 268 270 return rc; 269 271 } 270 272
+1 -3
drivers/net/can/usb/peak_usb/pcan_usb_core.c
··· 908 908 const struct peak_usb_adapter *peak_usb_adapter = NULL; 909 909 int i, err = -ENOMEM; 910 910 911 - usb_dev = interface_to_usbdev(intf); 912 - 913 911 /* get corresponding PCAN-USB adapter */ 914 912 for (i = 0; i < ARRAY_SIZE(peak_usb_adapters_list); i++) 915 913 if (peak_usb_adapters_list[i]->device_id == usb_id_product) { ··· 918 920 if (!peak_usb_adapter) { 919 921 /* should never come except device_id bad usage in this file */ 920 922 pr_err("%s: didn't find device id. 0x%x in devices list\n", 921 - PCAN_USB_DRIVER_NAME, usb_dev->descriptor.idProduct); 923 + PCAN_USB_DRIVER_NAME, usb_id_product); 922 924 return -ENODEV; 923 925 } 924 926
+2 -2
drivers/net/can/vcan.c
··· 152 152 static void vcan_setup(struct net_device *dev) 153 153 { 154 154 dev->type = ARPHRD_CAN; 155 - dev->mtu = CAN_MTU; 155 + dev->mtu = CANFD_MTU; 156 156 dev->hard_header_len = 0; 157 157 dev->addr_len = 0; 158 158 dev->tx_queue_len = 0; ··· 163 163 dev->flags |= IFF_ECHO; 164 164 165 165 dev->netdev_ops = &vcan_netdev_ops; 166 - dev->destructor = free_netdev; 166 + dev->needs_free_netdev = true; 167 167 } 168 168 169 169 static struct rtnl_link_ops vcan_link_ops __read_mostly = {
+2 -2
drivers/net/can/vxcan.c
··· 150 150 static void vxcan_setup(struct net_device *dev) 151 151 { 152 152 dev->type = ARPHRD_CAN; 153 - dev->mtu = CAN_MTU; 153 + dev->mtu = CANFD_MTU; 154 154 dev->hard_header_len = 0; 155 155 dev->addr_len = 0; 156 156 dev->tx_queue_len = 0; 157 157 dev->flags = (IFF_NOARP|IFF_ECHO); 158 158 dev->netdev_ops = &vxcan_netdev_ops; 159 - dev->destructor = free_netdev; 159 + dev->needs_free_netdev = true; 160 160 } 161 161 162 162 /* forward declaration for rtnl_create_link() */
+3 -3
drivers/net/dsa/mv88e6xxx/global2.h
··· 114 114 return -EOPNOTSUPP; 115 115 } 116 116 117 - int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev, 118 - int src_port, u16 data) 117 + static inline int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, 118 + int src_dev, int src_port, u16 data) 119 119 { 120 120 return -EOPNOTSUPP; 121 121 } 122 122 123 - int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip) 123 + static inline int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip) 124 124 { 125 125 return -EOPNOTSUPP; 126 126 }
+2 -2
drivers/net/dummy.c
··· 328 328 struct dummy_priv *priv = netdev_priv(dev); 329 329 330 330 kfree(priv->vfinfo); 331 - free_netdev(dev); 332 331 } 333 332 334 333 static void dummy_setup(struct net_device *dev) ··· 337 338 /* Initialize the device structure. */ 338 339 dev->netdev_ops = &dummy_netdev_ops; 339 340 dev->ethtool_ops = &dummy_ethtool_ops; 340 - dev->destructor = dummy_free_netdev; 341 + dev->needs_free_netdev = true; 342 + dev->priv_destructor = dummy_free_netdev; 341 343 342 344 /* Fill in device structure with ethernet-generic values. */ 343 345 dev->flags |= IFF_NOARP;
+21 -14
drivers/net/ethernet/amazon/ena/ena_com.c
··· 61 61 62 62 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF 63 63 64 + #define ENA_REGS_ADMIN_INTR_MASK 1 65 + 64 66 /*****************************************************************************/ 65 67 /*****************************************************************************/ 66 68 /*****************************************************************************/ ··· 234 232 tail_masked = admin_queue->sq.tail & queue_size_mask; 235 233 236 234 /* In case of queue FULL */ 237 - cnt = admin_queue->sq.tail - admin_queue->sq.head; 235 + cnt = atomic_read(&admin_queue->outstanding_cmds); 238 236 if (cnt >= admin_queue->q_depth) { 239 - pr_debug("admin queue is FULL (tail %d head %d depth: %d)\n", 240 - admin_queue->sq.tail, admin_queue->sq.head, 241 - admin_queue->q_depth); 237 + pr_debug("admin queue is full.\n"); 242 238 admin_queue->stats.out_of_space++; 243 239 return ERR_PTR(-ENOSPC); 244 240 } ··· 508 508 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, 509 509 struct ena_com_admin_queue *admin_queue) 510 510 { 511 - unsigned long flags; 512 - u32 start_time; 511 + unsigned long flags, timeout; 513 512 int ret; 514 513 515 - start_time = ((u32)jiffies_to_usecs(jiffies)); 514 + timeout = jiffies + ADMIN_CMD_TIMEOUT_US; 516 515 517 - while (comp_ctx->status == ENA_CMD_SUBMITTED) { 518 - if ((((u32)jiffies_to_usecs(jiffies)) - start_time) > 519 - ADMIN_CMD_TIMEOUT_US) { 516 + while (1) { 517 + spin_lock_irqsave(&admin_queue->q_lock, flags); 518 + ena_com_handle_admin_completion(admin_queue); 519 + spin_unlock_irqrestore(&admin_queue->q_lock, flags); 520 + 521 + if (comp_ctx->status != ENA_CMD_SUBMITTED) 522 + break; 523 + 524 + if (time_is_before_jiffies(timeout)) { 520 525 pr_err("Wait for completion (polling) timeout\n"); 521 526 /* ENA didn't have any completion */ 522 527 spin_lock_irqsave(&admin_queue->q_lock, flags); ··· 532 527 ret = -ETIME; 533 528 goto err; 534 529 } 535 - 536 - spin_lock_irqsave(&admin_queue->q_lock, flags); 537 - ena_com_handle_admin_completion(admin_queue); 538 - spin_unlock_irqrestore(&admin_queue->q_lock, flags); 539 530 540 531 msleep(100); 541 532 } ··· 1456 1455 1457 1456 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) 1458 1457 { 1458 + u32 mask_value = 0; 1459 + 1460 + if (polling) 1461 + mask_value = ENA_REGS_ADMIN_INTR_MASK; 1462 + 1463 + writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF); 1459 1464 ena_dev->admin_queue.polling = polling; 1460 1465 } 1461 1466
+1 -1
drivers/net/ethernet/amazon/ena/ena_ethtool.c
··· 80 80 ENA_STAT_TX_ENTRY(tx_poll), 81 81 ENA_STAT_TX_ENTRY(doorbells), 82 82 ENA_STAT_TX_ENTRY(prepare_ctx_err), 83 - ENA_STAT_TX_ENTRY(missing_tx_comp), 84 83 ENA_STAT_TX_ENTRY(bad_req_id), 85 84 }; 86 85 ··· 93 94 ENA_STAT_RX_ENTRY(dma_mapping_err), 94 95 ENA_STAT_RX_ENTRY(bad_desc_num), 95 96 ENA_STAT_RX_ENTRY(rx_copybreak_pkt), 97 + ENA_STAT_RX_ENTRY(empty_rx_ring), 96 98 }; 97 99 98 100 static const struct ena_stats ena_stats_ena_com_strings[] = {
+132 -47
drivers/net/ethernet/amazon/ena/ena_netdev.c
··· 190 190 rxr->sgl_size = adapter->max_rx_sgl_size; 191 191 rxr->smoothed_interval = 192 192 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); 193 + rxr->empty_rx_queue = 0; 193 194 } 194 195 } 195 196 ··· 1079 1078 rx_ring->per_napi_bytes = 0; 1080 1079 } 1081 1080 1081 + static inline void ena_unmask_interrupt(struct ena_ring *tx_ring, 1082 + struct ena_ring *rx_ring) 1083 + { 1084 + struct ena_eth_io_intr_reg intr_reg; 1085 + 1086 + /* Update intr register: rx intr delay, 1087 + * tx intr delay and interrupt unmask 1088 + */ 1089 + ena_com_update_intr_reg(&intr_reg, 1090 + rx_ring->smoothed_interval, 1091 + tx_ring->smoothed_interval, 1092 + true); 1093 + 1094 + /* It is a shared MSI-X. 1095 + * Tx and Rx CQ have pointer to it. 1096 + * So we use one of them to reach the intr reg 1097 + */ 1098 + ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg); 1099 + } 1100 + 1082 1101 static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring, 1083 1102 struct ena_ring *rx_ring) 1084 1103 { ··· 1129 1108 { 1130 1109 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); 1131 1110 struct ena_ring *tx_ring, *rx_ring; 1132 - struct ena_eth_io_intr_reg intr_reg; 1133 1111 1134 1112 u32 tx_work_done; 1135 1113 u32 rx_work_done; ··· 1169 1149 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) 1170 1150 ena_adjust_intr_moderation(rx_ring, tx_ring); 1171 1151 1172 - /* Update intr register: rx intr delay, 1173 - * tx intr delay and interrupt unmask 1174 - */ 1175 - ena_com_update_intr_reg(&intr_reg, 1176 - rx_ring->smoothed_interval, 1177 - tx_ring->smoothed_interval, 1178 - true); 1179 - 1180 - /* It is a shared MSI-X. 1181 - * Tx and Rx CQ have pointer to it. 1182 - * So we use one of them to reach the intr reg 1183 - */ 1184 - ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg); 1152 + ena_unmask_interrupt(tx_ring, rx_ring); 1185 1153 } 1186 - 1187 1154 1188 1155 ena_update_ring_numa_node(tx_ring, rx_ring); 1189 1156 ··· 1492 1485 1493 1486 ena_napi_enable_all(adapter); 1494 1487 1488 + /* Enable completion queues interrupt */ 1489 + for (i = 0; i < adapter->num_queues; i++) 1490 + ena_unmask_interrupt(&adapter->tx_ring[i], 1491 + &adapter->rx_ring[i]); 1492 + 1495 1493 /* schedule napi in case we had pending packets 1496 1494 * from the last time we disable napi 1497 1495 */ ··· 1544 1532 "Failed to get TX queue handlers. TX queue num %d rc: %d\n", 1545 1533 qid, rc); 1546 1534 ena_com_destroy_io_queue(ena_dev, ena_qid); 1535 + return rc; 1547 1536 } 1548 1537 1549 1538 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); ··· 1609 1596 "Failed to get RX queue handlers. RX queue num %d rc: %d\n", 1610 1597 qid, rc); 1611 1598 ena_com_destroy_io_queue(ena_dev, ena_qid); 1599 + return rc; 1612 1600 } 1613 1601 1614 1602 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); ··· 1995 1981 1996 1982 tx_info->tx_descs = nb_hw_desc; 1997 1983 tx_info->last_jiffies = jiffies; 1984 + tx_info->print_once = 0; 1998 1985 1999 1986 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, 2000 1987 tx_ring->ring_size); ··· 2565 2550 "Reset attempt failed. Can not reset the device\n"); 2566 2551 } 2567 2552 2568 - static void check_for_missing_tx_completions(struct ena_adapter *adapter) 2553 + static int check_missing_comp_in_queue(struct ena_adapter *adapter, 2554 + struct ena_ring *tx_ring) 2569 2555 { 2570 2556 struct ena_tx_buffer *tx_buf; 2571 2557 unsigned long last_jiffies; 2558 + u32 missed_tx = 0; 2559 + int i; 2560 + 2561 + for (i = 0; i < tx_ring->ring_size; i++) { 2562 + tx_buf = &tx_ring->tx_buffer_info[i]; 2563 + last_jiffies = tx_buf->last_jiffies; 2564 + if (unlikely(last_jiffies && 2565 + time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) { 2566 + if (!tx_buf->print_once) 2567 + netif_notice(adapter, tx_err, adapter->netdev, 2568 + "Found a Tx that wasn't completed on time, qid %d, index %d.\n", 2569 + tx_ring->qid, i); 2570 + 2571 + tx_buf->print_once = 1; 2572 + missed_tx++; 2573 + 2574 + if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) { 2575 + netif_err(adapter, tx_err, adapter->netdev, 2576 + "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n", 2577 + missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS); 2578 + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 2579 + return -EIO; 2580 + } 2581 + } 2582 + } 2583 + 2584 + return 0; 2585 + } 2586 + 2587 + static void check_for_missing_tx_completions(struct ena_adapter *adapter) 2588 + { 2572 2589 struct ena_ring *tx_ring; 2573 - int i, j, budget; 2574 - u32 missed_tx; 2590 + int i, budget, rc; 2575 2591 2576 2592 /* Make sure the driver doesn't turn the device in other process */ 2577 2593 smp_rmb(); ··· 2618 2572 for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) { 2619 2573 tx_ring = &adapter->tx_ring[i]; 2620 2574 2621 - for (j = 0; j < tx_ring->ring_size; j++) { 2622 - tx_buf = &tx_ring->tx_buffer_info[j]; 2623 - last_jiffies = tx_buf->last_jiffies; 2624 - if (unlikely(last_jiffies && time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) { 2625 - netif_notice(adapter, tx_err, adapter->netdev, 2626 - "Found a Tx that wasn't completed on time, qid %d, index %d.\n", 2627 - tx_ring->qid, j); 2628 - 2629 - u64_stats_update_begin(&tx_ring->syncp); 2630 - missed_tx = tx_ring->tx_stats.missing_tx_comp++; 2631 - u64_stats_update_end(&tx_ring->syncp); 2632 - 2633 - /* Clear last jiffies so the lost buffer won't 2634 - * be counted twice. 2635 - */ 2636 - tx_buf->last_jiffies = 0; 2637 - 2638 - if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) { 2639 - netif_err(adapter, tx_err, adapter->netdev, 2640 - "The number of lost tx completion is above the threshold (%d > %d). Reset the device\n", 2641 - missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS); 2642 - set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); 2643 - } 2644 - } 2645 - } 2575 + rc = check_missing_comp_in_queue(adapter, tx_ring); 2576 + if (unlikely(rc)) 2577 + return; 2646 2578 2647 2579 budget--; 2648 2580 if (!budget) ··· 2628 2604 } 2629 2605 2630 2606 adapter->last_monitored_tx_qid = i % adapter->num_queues; 2607 + } 2608 + 2609 + /* trigger napi schedule after 2 consecutive detections */ 2610 + #define EMPTY_RX_REFILL 2 2611 + /* For the rare case where the device runs out of Rx descriptors and the 2612 + * napi handler failed to refill new Rx descriptors (due to a lack of memory 2613 + * for example). 2614 + * This case will lead to a deadlock: 2615 + * The device won't send interrupts since all the new Rx packets will be dropped 2616 + * The napi handler won't allocate new Rx descriptors so the device will be 2617 + * able to send new packets. 2618 + * 2619 + * This scenario can happen when the kernel's vm.min_free_kbytes is too small. 2620 + * It is recommended to have at least 512MB, with a minimum of 128MB for 2621 + * constrained environment). 2622 + * 2623 + * When such a situation is detected - Reschedule napi 2624 + */ 2625 + static void check_for_empty_rx_ring(struct ena_adapter *adapter) 2626 + { 2627 + struct ena_ring *rx_ring; 2628 + int i, refill_required; 2629 + 2630 + if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) 2631 + return; 2632 + 2633 + if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) 2634 + return; 2635 + 2636 + for (i = 0; i < adapter->num_queues; i++) { 2637 + rx_ring = &adapter->rx_ring[i]; 2638 + 2639 + refill_required = 2640 + ena_com_sq_empty_space(rx_ring->ena_com_io_sq); 2641 + if (unlikely(refill_required == (rx_ring->ring_size - 1))) { 2642 + rx_ring->empty_rx_queue++; 2643 + 2644 + if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { 2645 + u64_stats_update_begin(&rx_ring->syncp); 2646 + rx_ring->rx_stats.empty_rx_ring++; 2647 + u64_stats_update_end(&rx_ring->syncp); 2648 + 2649 + netif_err(adapter, drv, adapter->netdev, 2650 + "trigger refill for ring %d\n", i); 2651 + 2652 + napi_schedule(rx_ring->napi); 2653 + rx_ring->empty_rx_queue = 0; 2654 + } 2655 + } else { 2656 + rx_ring->empty_rx_queue = 0; 2657 + } 2658 + } 2631 2659 } 2632 2660 2633 2661 /* Check for keep alive expiration */ ··· 2735 2659 check_for_admin_com_state(adapter); 2736 2660 2737 2661 check_for_missing_tx_completions(adapter); 2662 + 2663 + check_for_empty_rx_ring(adapter); 2738 2664 2739 2665 if (debug_area) 2740 2666 ena_dump_stats_to_buf(adapter, debug_area); ··· 2918 2840 { 2919 2841 int release_bars; 2920 2842 2843 + if (ena_dev->mem_bar) 2844 + devm_iounmap(&pdev->dev, ena_dev->mem_bar); 2845 + 2846 + devm_iounmap(&pdev->dev, ena_dev->reg_bar); 2847 + 2921 2848 release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; 2922 2849 pci_release_selected_regions(pdev, release_bars); 2923 2850 } ··· 3010 2927 goto err_free_ena_dev; 3011 2928 } 3012 2929 3013 - ena_dev->reg_bar = ioremap(pci_resource_start(pdev, ENA_REG_BAR), 3014 - pci_resource_len(pdev, ENA_REG_BAR)); 2930 + ena_dev->reg_bar = devm_ioremap(&pdev->dev, 2931 + pci_resource_start(pdev, ENA_REG_BAR), 2932 + pci_resource_len(pdev, ENA_REG_BAR)); 3015 2933 if (!ena_dev->reg_bar) { 3016 2934 dev_err(&pdev->dev, "failed to remap regs bar\n"); 3017 2935 rc = -EFAULT; ··· 3032 2948 ena_set_push_mode(pdev, ena_dev, &get_feat_ctx); 3033 2949 3034 2950 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { 3035 - ena_dev->mem_bar = ioremap_wc(pci_resource_start(pdev, ENA_MEM_BAR), 3036 - pci_resource_len(pdev, ENA_MEM_BAR)); 2951 + ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, 2952 + pci_resource_start(pdev, ENA_MEM_BAR), 2953 + pci_resource_len(pdev, ENA_MEM_BAR)); 3037 2954 if (!ena_dev->mem_bar) { 3038 2955 rc = -EFAULT; 3039 2956 goto err_device_destroy;
+15 -3
drivers/net/ethernet/amazon/ena/ena_netdev.h
··· 45 45 46 46 #define DRV_MODULE_VER_MAJOR 1 47 47 #define DRV_MODULE_VER_MINOR 1 48 - #define DRV_MODULE_VER_SUBMINOR 2 48 + #define DRV_MODULE_VER_SUBMINOR 7 49 49 50 50 #define DRV_MODULE_NAME "ena" 51 51 #ifndef DRV_MODULE_VERSION ··· 146 146 u32 tx_descs; 147 147 /* num of buffers used by this skb */ 148 148 u32 num_of_bufs; 149 - /* Save the last jiffies to detect missing tx packets */ 149 + 150 + /* Used for detect missing tx packets to limit the number of prints */ 151 + u32 print_once; 152 + /* Save the last jiffies to detect missing tx packets 153 + * 154 + * sets to non zero value on ena_start_xmit and set to zero on 155 + * napi and timer_Service_routine. 156 + * 157 + * while this value is not protected by lock, 158 + * a given packet is not expected to be handled by ena_start_xmit 159 + * and by napi/timer_service at the same time. 160 + */ 150 161 unsigned long last_jiffies; 151 162 struct ena_com_buf bufs[ENA_PKT_MAX_BUFS]; 152 163 } ____cacheline_aligned; ··· 181 170 u64 napi_comp; 182 171 u64 tx_poll; 183 172 u64 doorbells; 184 - u64 missing_tx_comp; 185 173 u64 bad_req_id; 186 174 }; 187 175 ··· 194 184 u64 dma_mapping_err; 195 185 u64 bad_desc_num; 196 186 u64 rx_copybreak_pkt; 187 + u64 empty_rx_ring; 197 188 }; 198 189 199 190 struct ena_ring { ··· 242 231 struct ena_stats_tx tx_stats; 243 232 struct ena_stats_rx rx_stats; 244 233 }; 234 + int empty_rx_queue; 245 235 } ____cacheline_aligned; 246 236 247 237 struct ena_stats_dev {
+2 -3
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
··· 324 324 struct xgbe_ring *ring, 325 325 struct xgbe_ring_data *rdata) 326 326 { 327 - int order, ret; 327 + int ret; 328 328 329 329 if (!ring->rx_hdr_pa.pages) { 330 330 ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0); ··· 333 333 } 334 334 335 335 if (!ring->rx_buf_pa.pages) { 336 - order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0); 337 336 ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC, 338 - order); 337 + PAGE_ALLOC_COSTLY_ORDER); 339 338 if (ret) 340 339 return ret; 341 340 }
-3
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
··· 193 193 struct aq_hw_caps_s *aq_hw_caps, 194 194 u32 *regs_buff); 195 195 196 - int hw_atl_utils_hw_get_settings(struct aq_hw_s *self, 197 - struct ethtool_cmd *cmd); 198 - 199 196 int hw_atl_utils_hw_set_power(struct aq_hw_s *self, 200 197 unsigned int power_state); 201 198
+5 -2
drivers/net/ethernet/broadcom/bcmsysport.c
··· 2026 2026 priv->num_rx_desc_words = params->num_rx_desc_words; 2027 2027 2028 2028 priv->irq0 = platform_get_irq(pdev, 0); 2029 - if (!priv->is_lite) 2029 + if (!priv->is_lite) { 2030 2030 priv->irq1 = platform_get_irq(pdev, 1); 2031 - priv->wol_irq = platform_get_irq(pdev, 2); 2031 + priv->wol_irq = platform_get_irq(pdev, 2); 2032 + } else { 2033 + priv->wol_irq = platform_get_irq(pdev, 1); 2034 + } 2032 2035 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) { 2033 2036 dev_err(&pdev->dev, "invalid interrupts\n"); 2034 2037 ret = -EINVAL;
+16 -5
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 1926 1926 } 1927 1927 1928 1928 /* select a non-FCoE queue */ 1929 - return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); 1929 + return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); 1930 1930 } 1931 1931 1932 1932 void bnx2x_set_num_queues(struct bnx2x *bp) ··· 3883 3883 /* when transmitting in a vf, start bd must hold the ethertype 3884 3884 * for fw to enforce it 3885 3885 */ 3886 + u16 vlan_tci = 0; 3886 3887 #ifndef BNX2X_STOP_ON_ERROR 3887 - if (IS_VF(bp)) 3888 + if (IS_VF(bp)) { 3888 3889 #endif 3889 - tx_start_bd->vlan_or_ethertype = 3890 - cpu_to_le16(ntohs(eth->h_proto)); 3890 + /* Still need to consider inband vlan for enforced */ 3891 + if (__vlan_get_tag(skb, &vlan_tci)) { 3892 + tx_start_bd->vlan_or_ethertype = 3893 + cpu_to_le16(ntohs(eth->h_proto)); 3894 + } else { 3895 + tx_start_bd->bd_flags.as_bitfield |= 3896 + (X_ETH_INBAND_VLAN << 3897 + ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); 3898 + tx_start_bd->vlan_or_ethertype = 3899 + cpu_to_le16(vlan_tci); 3900 + } 3891 3901 #ifndef BNX2X_STOP_ON_ERROR 3892 - else 3902 + } else { 3893 3903 /* used by FW for packet accounting */ 3894 3904 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); 3905 + } 3895 3906 #endif 3896 3907 } 3897 3908
+1 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
··· 12729 12729 } else { 12730 12730 /* If no mc addresses are required, flush the configuration */ 12731 12731 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL); 12732 - if (rc) 12732 + if (rc < 0) 12733 12733 BNX2X_ERR("Failed to clear multicast configuration %d\n", 12734 12734 rc); 12735 12735 }
+13 -2
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
··· 901 901 /* release VF resources */ 902 902 bnx2x_vf_free_resc(bp, vf); 903 903 904 + vf->malicious = false; 905 + 904 906 /* re-open the mailbox */ 905 907 bnx2x_vf_enable_mbx(bp, vf->abs_vfid); 906 908 return; ··· 1824 1822 vf->abs_vfid, qidx); 1825 1823 bnx2x_vf_handle_rss_update_eqe(bp, vf); 1826 1824 case EVENT_RING_OPCODE_VF_FLR: 1827 - case EVENT_RING_OPCODE_MALICIOUS_VF: 1828 1825 /* Do nothing for now */ 1826 + return 0; 1827 + case EVENT_RING_OPCODE_MALICIOUS_VF: 1828 + vf->malicious = true; 1829 1829 return 0; 1830 1830 } 1831 1831 ··· 1905 1901 if (vf->state != VF_ENABLED) { 1906 1902 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), 1907 1903 "vf %d not enabled so no stats for it\n", 1904 + vf->abs_vfid); 1905 + continue; 1906 + } 1907 + 1908 + if (vf->malicious) { 1909 + DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), 1910 + "vf %d malicious so no stats for it\n", 1908 1911 vf->abs_vfid); 1909 1912 continue; 1910 1913 } ··· 3053 3042 { 3054 3043 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, 3055 3044 sizeof(struct bnx2x_vf_mbx_msg)); 3056 - BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, 3045 + BNX2X_PCI_FREE(bp->pf2vf_bulletin, bp->pf2vf_bulletin_mapping, 3057 3046 sizeof(union pf_vf_bulletin)); 3058 3047 } 3059 3048
+1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
··· 141 141 #define VF_RESET 3 /* VF FLR'd, pending cleanup */ 142 142 143 143 bool flr_clnup_stage; /* true during flr cleanup */ 144 + bool malicious; /* true if FW indicated so, until FLR */ 144 145 145 146 /* dma */ 146 147 dma_addr_t fw_stat_map;
+52 -9
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 1301 1301 cp_cons = NEXT_CMP(cp_cons); 1302 1302 } 1303 1303 1304 - if (unlikely(agg_bufs > MAX_SKB_FRAGS)) { 1304 + if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { 1305 1305 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); 1306 - netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1307 - agg_bufs, (int)MAX_SKB_FRAGS); 1306 + if (agg_bufs > MAX_SKB_FRAGS) 1307 + netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", 1308 + agg_bufs, (int)MAX_SKB_FRAGS); 1308 1309 return NULL; 1309 1310 } 1310 1311 ··· 1563 1562 return rc; 1564 1563 } 1565 1564 1565 + /* In netpoll mode, if we are using a combined completion ring, we need to 1566 + * discard the rx packets and recycle the buffers. 1567 + */ 1568 + static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi, 1569 + u32 *raw_cons, u8 *event) 1570 + { 1571 + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; 1572 + u32 tmp_raw_cons = *raw_cons; 1573 + struct rx_cmp_ext *rxcmp1; 1574 + struct rx_cmp *rxcmp; 1575 + u16 cp_cons; 1576 + u8 cmp_type; 1577 + 1578 + cp_cons = RING_CMP(tmp_raw_cons); 1579 + rxcmp = (struct rx_cmp *) 1580 + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1581 + 1582 + tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); 1583 + cp_cons = RING_CMP(tmp_raw_cons); 1584 + rxcmp1 = (struct rx_cmp_ext *) 1585 + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; 1586 + 1587 + if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) 1588 + return -EBUSY; 1589 + 1590 + cmp_type = RX_CMP_TYPE(rxcmp); 1591 + if (cmp_type == CMP_TYPE_RX_L2_CMP) { 1592 + rxcmp1->rx_cmp_cfa_code_errors_v2 |= 1593 + cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); 1594 + } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { 1595 + struct rx_tpa_end_cmp_ext *tpa_end1; 1596 + 1597 + tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; 1598 + tpa_end1->rx_tpa_end_cmp_errors_v2 |= 1599 + cpu_to_le32(RX_TPA_END_CMP_ERRORS); 1600 + } 1601 + return bnxt_rx_pkt(bp, bnapi, raw_cons, event); 1602 + } 1603 + 1566 1604 #define BNXT_GET_EVENT_PORT(data) \ 1567 1605 ((data) & \ 1568 1606 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) ··· 1784 1744 if (unlikely(tx_pkts > bp->tx_wake_thresh)) 1785 1745 rx_pkts = budget; 1786 1746 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { 1787 - rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); 1747 + if (likely(budget)) 1748 + rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event); 1749 + else 1750 + rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons, 1751 + &event); 1788 1752 if (likely(rc >= 0)) 1789 1753 rx_pkts += rc; 1790 1754 else if (rc == -EBUSY) /* partial completion */ ··· 6707 6663 struct bnxt *bp = netdev_priv(dev); 6708 6664 int i; 6709 6665 6710 - for (i = 0; i < bp->cp_nr_rings; i++) { 6711 - struct bnxt_irq *irq = &bp->irq_tbl[i]; 6666 + /* Only process tx rings/combined rings in netpoll mode. */ 6667 + for (i = 0; i < bp->tx_nr_rings; i++) { 6668 + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; 6712 6669 6713 - disable_irq(irq->vector); 6714 - irq->handler(irq->vector, bp->bnapi[i]); 6715 - enable_irq(irq->vector); 6670 + napi_schedule(&txr->bnapi->napi); 6716 6671 } 6717 6672 } 6718 6673 #endif
+5 -1
drivers/net/ethernet/broadcom/bnxt/bnxt.h
··· 374 374 375 375 __le32 rx_tpa_end_cmp_errors_v2; 376 376 #define RX_TPA_END_CMP_V2 (0x1 << 0) 377 - #define RX_TPA_END_CMP_ERRORS (0x7fff << 1) 377 + #define RX_TPA_END_CMP_ERRORS (0x3 << 1) 378 378 #define RX_TPA_END_CMPL_ERRORS_SHIFT 1 379 379 380 380 u32 rx_tpa_end_cmp_start_opaque; 381 381 }; 382 + 383 + #define TPA_END_ERRORS(rx_tpa_end_ext) \ 384 + ((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 & \ 385 + cpu_to_le32(RX_TPA_END_CMP_ERRORS)) 382 386 383 387 #define DB_IDX_MASK 0xffffff 384 388 #define DB_IDX_VALID (0x1 << 26)
+23 -12
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 2171 2171 { 2172 2172 int err; 2173 2173 2174 + mutex_lock(&uld_mutex); 2174 2175 err = setup_sge_queues(adap); 2175 2176 if (err) 2176 - goto out; 2177 + goto rel_lock; 2177 2178 err = setup_rss(adap); 2178 2179 if (err) 2179 2180 goto freeq; ··· 2197 2196 if (err) 2198 2197 goto irq_err; 2199 2198 } 2199 + 2200 2200 enable_rx(adap); 2201 2201 t4_sge_start(adap); 2202 2202 t4_intr_enable(adap); 2203 2203 adap->flags |= FULL_INIT_DONE; 2204 + mutex_unlock(&uld_mutex); 2205 + 2204 2206 notify_ulds(adap, CXGB4_STATE_UP); 2205 2207 #if IS_ENABLED(CONFIG_IPV6) 2206 2208 update_clip(adap); 2207 2209 #endif 2208 2210 /* Initialize hash mac addr list*/ 2209 2211 INIT_LIST_HEAD(&adap->mac_hlist); 2210 - out: 2211 2212 return err; 2213 + 2212 2214 irq_err: 2213 2215 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); 2214 2216 freeq: 2215 2217 t4_free_sge_resources(adap); 2216 - goto out; 2218 + rel_lock: 2219 + mutex_unlock(&uld_mutex); 2220 + return err; 2217 2221 } 2218 2222 2219 2223 static void cxgb_down(struct adapter *adapter) ··· 2776 2770 void t4_fatal_err(struct adapter *adap) 2777 2771 { 2778 2772 int port; 2773 + 2774 + if (pci_channel_offline(adap->pdev)) 2775 + return; 2779 2776 2780 2777 /* Disable the SGE since ULDs are going to free resources that 2781 2778 * could be exposed to the adapter. RDMA MWs for example... ··· 3891 3882 spin_lock(&adap->stats_lock); 3892 3883 for_each_port(adap, i) { 3893 3884 struct net_device *dev = adap->port[i]; 3894 - 3895 - netif_device_detach(dev); 3896 - netif_carrier_off(dev); 3885 + if (dev) { 3886 + netif_device_detach(dev); 3887 + netif_carrier_off(dev); 3888 + } 3897 3889 } 3898 3890 spin_unlock(&adap->stats_lock); 3899 3891 disable_interrupts(adap); ··· 3973 3963 rtnl_lock(); 3974 3964 for_each_port(adap, i) { 3975 3965 struct net_device *dev = adap->port[i]; 3976 - 3977 - if (netif_running(dev)) { 3978 - link_start(dev); 3979 - cxgb_set_rxmode(dev); 3966 + if (dev) { 3967 + if (netif_running(dev)) { 3968 + link_start(dev); 3969 + cxgb_set_rxmode(dev); 3970 + } 3971 + netif_device_attach(dev); 3980 3972 } 3981 - netif_device_attach(dev); 3982 3973 } 3983 3974 rtnl_unlock(); 3984 3975 } ··· 4527 4516 /* Initialize the device structure. */ 4528 4517 dev->netdev_ops = &cxgb4_mgmt_netdev_ops; 4529 4518 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; 4530 - dev->destructor = free_netdev; 4519 + dev->needs_free_netdev = true; 4531 4520 } 4532 4521 4533 4522 static int config_mgmt_dev(struct pci_dev *pdev)
+7 -2
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
··· 4557 4557 */ 4558 4558 void t4_intr_disable(struct adapter *adapter) 4559 4559 { 4560 - u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A); 4561 - u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? 4560 + u32 whoami, pf; 4561 + 4562 + if (pci_channel_offline(adapter->pdev)) 4563 + return; 4564 + 4565 + whoami = t4_read_reg(adapter, PL_WHOAMI_A); 4566 + pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? 4562 4567 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); 4563 4568 4564 4569 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
+3 -3
drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
··· 37 37 38 38 #define T4FW_VERSION_MAJOR 0x01 39 39 #define T4FW_VERSION_MINOR 0x10 40 - #define T4FW_VERSION_MICRO 0x2B 40 + #define T4FW_VERSION_MICRO 0x2D 41 41 #define T4FW_VERSION_BUILD 0x00 42 42 43 43 #define T4FW_MIN_VERSION_MAJOR 0x01 ··· 46 46 47 47 #define T5FW_VERSION_MAJOR 0x01 48 48 #define T5FW_VERSION_MINOR 0x10 49 - #define T5FW_VERSION_MICRO 0x2B 49 + #define T5FW_VERSION_MICRO 0x2D 50 50 #define T5FW_VERSION_BUILD 0x00 51 51 52 52 #define T5FW_MIN_VERSION_MAJOR 0x00 ··· 55 55 56 56 #define T6FW_VERSION_MAJOR 0x01 57 57 #define T6FW_VERSION_MINOR 0x10 58 - #define T6FW_VERSION_MICRO 0x2B 58 + #define T6FW_VERSION_MICRO 0x2D 59 59 #define T6FW_VERSION_BUILD 0x00 60 60 61 61 #define T6FW_MIN_VERSION_MAJOR 0x00
+2 -1
drivers/net/ethernet/ethoc.c
··· 739 739 if (ret) 740 740 return ret; 741 741 742 + napi_enable(&priv->napi); 743 + 742 744 ethoc_init_ring(priv, dev->mem_start); 743 745 ethoc_reset(priv); 744 746 ··· 756 754 priv->old_duplex = -1; 757 755 758 756 phy_start(dev->phydev); 759 - napi_enable(&priv->napi); 760 757 761 758 if (netif_msg_ifup(priv)) { 762 759 dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
+1 -1
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
··· 2647 2647 priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ 2648 2648 2649 2649 /* device used for DMA mapping */ 2650 - arch_setup_dma_ops(dev, 0, 0, NULL, false); 2650 + set_dma_ops(dev, get_dma_ops(&pdev->dev)); 2651 2651 err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40)); 2652 2652 if (err) { 2653 2653 dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
+1
drivers/net/ethernet/freescale/fman/Kconfig
··· 2 2 tristate "FMan support" 3 3 depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST 4 4 select GENERIC_ALLOCATOR 5 + depends on HAS_DMA 5 6 select PHYLIB 6 7 default n 7 8 help
+2
drivers/net/ethernet/freescale/fman/mac.c
··· 623 623 goto no_mem; 624 624 } 625 625 626 + set_dma_ops(&pdev->dev, get_dma_ops(priv->dev)); 627 + 626 628 ret = platform_device_add_data(pdev, &data, sizeof(data)); 627 629 if (ret) 628 630 goto err;
+8 -1
drivers/net/ethernet/freescale/fsl_pq_mdio.c
··· 381 381 { 382 382 const struct of_device_id *id = 383 383 of_match_device(fsl_pq_mdio_match, &pdev->dev); 384 - const struct fsl_pq_mdio_data *data = id->data; 384 + const struct fsl_pq_mdio_data *data; 385 385 struct device_node *np = pdev->dev.of_node; 386 386 struct resource res; 387 387 struct device_node *tbi; 388 388 struct fsl_pq_mdio_priv *priv; 389 389 struct mii_bus *new_bus; 390 390 int err; 391 + 392 + if (!id) { 393 + dev_err(&pdev->dev, "Failed to match device\n"); 394 + return -ENODEV; 395 + } 396 + 397 + data = id->data; 391 398 392 399 dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible); 393 400
+14 -2
drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
··· 288 288 289 289 /* Force 1000M Link, Default is 0x0200 */ 290 290 phy_write(phy_dev, 7, 0x20C); 291 - phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); 292 291 293 - /* Enable PHY loop-back */ 292 + /* Powerup Fiber */ 293 + phy_write(phy_dev, HNS_PHY_PAGE_REG, 1); 294 + val = phy_read(phy_dev, COPPER_CONTROL_REG); 295 + val &= ~PHY_POWER_DOWN; 296 + phy_write(phy_dev, COPPER_CONTROL_REG, val); 297 + 298 + /* Enable Phy Loopback */ 299 + phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); 294 300 val = phy_read(phy_dev, COPPER_CONTROL_REG); 295 301 val |= PHY_LOOP_BACK; 296 302 val &= ~PHY_POWER_DOWN; ··· 305 299 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA); 306 300 phy_write(phy_dev, 1, 0x400); 307 301 phy_write(phy_dev, 7, 0x200); 302 + 303 + phy_write(phy_dev, HNS_PHY_PAGE_REG, 1); 304 + val = phy_read(phy_dev, COPPER_CONTROL_REG); 305 + val |= PHY_POWER_DOWN; 306 + phy_write(phy_dev, COPPER_CONTROL_REG, val); 307 + 308 308 phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); 309 309 phy_write(phy_dev, 9, 0xF00); 310 310
+40 -27
drivers/net/ethernet/ibm/emac/core.c
··· 343 343 { 344 344 struct emac_regs __iomem *p = dev->emacp; 345 345 int n = 20; 346 + bool __maybe_unused try_internal_clock = false; 346 347 347 348 DBG(dev, "reset" NL); 348 349 ··· 356 355 } 357 356 358 357 #ifdef CONFIG_PPC_DCR_NATIVE 358 + do_retry: 359 359 /* 360 360 * PPC460EX/GT Embedded Processor Advanced User's Manual 361 361 * section 28.10.1 Mode Register 0 (EMACx_MR0) states: ··· 364 362 * of the EMAC. If none is present, select the internal clock 365 363 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1). 366 364 * After a soft reset, select the external clock. 365 + * 366 + * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the 367 + * ethernet cable is not attached. This causes the reset to timeout 368 + * and the PHY detection code in emac_init_phy() is unable to 369 + * communicate and detect the AR8035-A PHY. As a result, the emac 370 + * driver bails out early and the user has no ethernet. 371 + * In order to stay compatible with existing configurations, the 372 + * driver will temporarily switch to the internal clock, after 373 + * the first reset fails. 367 374 */ 368 375 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { 369 - if (dev->phy_address == 0xffffffff && 370 - dev->phy_map == 0xffffffff) { 376 + if (try_internal_clock || (dev->phy_address == 0xffffffff && 377 + dev->phy_map == 0xffffffff)) { 371 378 /* No PHY: select internal loop clock before reset */ 372 379 dcri_clrset(SDR0, SDR0_ETH_CFG, 373 380 0, SDR0_ETH_CFG_ECS << dev->cell_index); ··· 394 383 395 384 #ifdef CONFIG_PPC_DCR_NATIVE 396 385 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { 397 - if (dev->phy_address == 0xffffffff && 398 - dev->phy_map == 0xffffffff) { 386 + if (!n && !try_internal_clock) { 387 + /* first attempt has timed out. */ 388 + n = 20; 389 + try_internal_clock = true; 390 + goto do_retry; 391 + } 392 + 393 + if (try_internal_clock || (dev->phy_address == 0xffffffff && 394 + dev->phy_map == 0xffffffff)) { 399 395 /* No PHY: restore external clock source after reset */ 400 396 dcri_clrset(SDR0, SDR0_ETH_CFG, 401 397 SDR0_ETH_CFG_ECS << dev->cell_index, 0); ··· 2478 2460 return emac_reset(dev); 2479 2461 } 2480 2462 2463 + static int emac_mdio_phy_start_aneg(struct mii_phy *phy, 2464 + struct phy_device *phy_dev) 2465 + { 2466 + phy_dev->autoneg = phy->autoneg; 2467 + phy_dev->speed = phy->speed; 2468 + phy_dev->duplex = phy->duplex; 2469 + phy_dev->advertising = phy->advertising; 2470 + return phy_start_aneg(phy_dev); 2471 + } 2472 + 2481 2473 static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise) 2482 2474 { 2483 2475 struct net_device *ndev = phy->dev; 2484 2476 struct emac_instance *dev = netdev_priv(ndev); 2485 2477 2486 - dev->phy.autoneg = AUTONEG_ENABLE; 2487 - dev->phy.speed = SPEED_1000; 2488 - dev->phy.duplex = DUPLEX_FULL; 2489 - dev->phy.advertising = advertise; 2490 2478 phy->autoneg = AUTONEG_ENABLE; 2491 - phy->speed = dev->phy.speed; 2492 - phy->duplex = dev->phy.duplex; 2493 2479 phy->advertising = advertise; 2494 - return phy_start_aneg(dev->phy_dev); 2480 + return emac_mdio_phy_start_aneg(phy, dev->phy_dev); 2495 2481 } 2496 2482 2497 2483 static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd) ··· 2503 2481 struct net_device *ndev = phy->dev; 2504 2482 struct emac_instance *dev = netdev_priv(ndev); 2505 2483 2506 - dev->phy.autoneg = AUTONEG_DISABLE; 2507 - dev->phy.speed = speed; 2508 - dev->phy.duplex = fd; 2509 2484 phy->autoneg = AUTONEG_DISABLE; 2510 2485 phy->speed = speed; 2511 2486 phy->duplex = fd; 2512 - return phy_start_aneg(dev->phy_dev); 2487 + return emac_mdio_phy_start_aneg(phy, dev->phy_dev); 2513 2488 } 2514 2489 2515 2490 static int emac_mdio_poll_link(struct mii_phy *phy) ··· 2528 2509 { 2529 2510 struct net_device *ndev = phy->dev; 2530 2511 struct emac_instance *dev = netdev_priv(ndev); 2512 + struct phy_device *phy_dev = dev->phy_dev; 2531 2513 int res; 2532 2514 2533 - res = phy_read_status(dev->phy_dev); 2515 + res = phy_read_status(phy_dev); 2534 2516 if (res) 2535 2517 return res; 2536 2518 2537 - dev->phy.speed = phy->speed; 2538 - dev->phy.duplex = phy->duplex; 2539 - dev->phy.pause = phy->pause; 2540 - dev->phy.asym_pause = phy->asym_pause; 2519 + phy->speed = phy_dev->speed; 2520 + phy->duplex = phy_dev->duplex; 2521 + phy->pause = phy_dev->pause; 2522 + phy->asym_pause = phy_dev->asym_pause; 2541 2523 return 0; 2542 2524 } 2543 2525 ··· 2548 2528 struct emac_instance *dev = netdev_priv(ndev); 2549 2529 2550 2530 phy_start(dev->phy_dev); 2551 - dev->phy.autoneg = phy->autoneg; 2552 - dev->phy.speed = phy->speed; 2553 - dev->phy.duplex = phy->duplex; 2554 - dev->phy.advertising = phy->advertising; 2555 - dev->phy.pause = phy->pause; 2556 - dev->phy.asym_pause = phy->asym_pause; 2557 - 2558 2531 return phy_init_hw(dev->phy_dev); 2559 2532 } 2560 2533
+7 -1
drivers/net/ethernet/ibm/ibmvnic.c
··· 81 81 static const char ibmvnic_driver_name[] = "ibmvnic"; 82 82 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver"; 83 83 84 - MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>"); 84 + MODULE_AUTHOR("Santiago Leon"); 85 85 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver"); 86 86 MODULE_LICENSE("GPL"); 87 87 MODULE_VERSION(IBMVNIC_DRIVER_VERSION); ··· 1468 1468 } 1469 1469 #endif 1470 1470 1471 + static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) 1472 + { 1473 + return -EOPNOTSUPP; 1474 + } 1475 + 1471 1476 static const struct net_device_ops ibmvnic_netdev_ops = { 1472 1477 .ndo_open = ibmvnic_open, 1473 1478 .ndo_stop = ibmvnic_close, ··· 1484 1479 #ifdef CONFIG_NET_POLL_CONTROLLER 1485 1480 .ndo_poll_controller = ibmvnic_netpoll_controller, 1486 1481 #endif 1482 + .ndo_change_mtu = ibmvnic_change_mtu, 1487 1483 }; 1488 1484 1489 1485 /* ethtool functions */
+1
drivers/net/ethernet/intel/i40e/i40e.h
··· 399 399 #define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1) 400 400 #define I40E_FLAG_MSI_ENABLED BIT_ULL(2) 401 401 #define I40E_FLAG_MSIX_ENABLED BIT_ULL(3) 402 + #define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT_ULL(4) 402 403 #define I40E_FLAG_RSS_ENABLED BIT_ULL(6) 403 404 #define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7) 404 405 #define I40E_FLAG_IWARP_ENABLED BIT_ULL(10)
+2 -2
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
··· 224 224 I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0), 225 225 I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0), 226 226 I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0), 227 - I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_CAPABLE, 0), 227 + I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0), 228 228 I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0), 229 229 }; 230 230 ··· 4092 4092 4093 4093 /* Only allow ATR evict on hardware that is capable of handling it */ 4094 4094 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) 4095 - pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE; 4095 + pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_ENABLED; 4096 4096 4097 4097 if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) { 4098 4098 u16 sw_flags = 0, valid_flags = 0;
+22 -21
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 295 295 **/ 296 296 void i40e_service_event_schedule(struct i40e_pf *pf) 297 297 { 298 - if (!test_bit(__I40E_VSI_DOWN, pf->state) && 298 + if (!test_bit(__I40E_DOWN, pf->state) && 299 299 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) 300 300 queue_work(i40e_wq, &pf->service_task); 301 301 } ··· 3611 3611 * this is not a performance path and napi_schedule() 3612 3612 * can deal with rescheduling. 3613 3613 */ 3614 - if (!test_bit(__I40E_VSI_DOWN, pf->state)) 3614 + if (!test_bit(__I40E_DOWN, pf->state)) 3615 3615 napi_schedule_irqoff(&q_vector->napi); 3616 3616 } 3617 3617 ··· 3687 3687 enable_intr: 3688 3688 /* re-enable interrupt causes */ 3689 3689 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); 3690 - if (!test_bit(__I40E_VSI_DOWN, pf->state)) { 3690 + if (!test_bit(__I40E_DOWN, pf->state)) { 3691 3691 i40e_service_event_schedule(pf); 3692 3692 i40e_irq_dynamic_enable_icr0(pf, false); 3693 3693 } ··· 6203 6203 { 6204 6204 6205 6205 /* if interface is down do nothing */ 6206 - if (test_bit(__I40E_VSI_DOWN, pf->state)) 6206 + if (test_bit(__I40E_DOWN, pf->state)) 6207 6207 return; 6208 6208 6209 6209 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) ··· 6344 6344 int i; 6345 6345 6346 6346 /* if interface is down do nothing */ 6347 - if (test_bit(__I40E_VSI_DOWN, pf->state) || 6347 + if (test_bit(__I40E_DOWN, pf->state) || 6348 6348 test_bit(__I40E_CONFIG_BUSY, pf->state)) 6349 6349 return; 6350 6350 ··· 6399 6399 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED); 6400 6400 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); 6401 6401 } 6402 - if (test_bit(__I40E_VSI_DOWN_REQUESTED, pf->state)) { 6403 - reset_flags |= BIT(__I40E_VSI_DOWN_REQUESTED); 6404 - clear_bit(__I40E_VSI_DOWN_REQUESTED, pf->state); 6402 + if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) { 6403 + reset_flags |= BIT(__I40E_DOWN_REQUESTED); 6404 + clear_bit(__I40E_DOWN_REQUESTED, pf->state); 6405 6405 } 6406 6406 6407 6407 /* If there's a recovery already waiting, it takes ··· 6415 6415 6416 6416 /* If we're already down or resetting, just bail */ 6417 6417 if (reset_flags && 6418 - !test_bit(__I40E_VSI_DOWN, pf->state) && 6418 + !test_bit(__I40E_DOWN, pf->state) && 6419 6419 !test_bit(__I40E_CONFIG_BUSY, pf->state)) { 6420 6420 rtnl_lock(); 6421 6421 i40e_do_reset(pf, reset_flags, true); ··· 7002 7002 u32 val; 7003 7003 int v; 7004 7004 7005 - if (test_bit(__I40E_VSI_DOWN, pf->state)) 7005 + if (test_bit(__I40E_DOWN, pf->state)) 7006 7006 goto clear_recovery; 7007 7007 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); 7008 7008 ··· 8821 8821 (pf->hw.aq.api_min_ver > 4))) { 8822 8822 /* Supported in FW API version higher than 1.4 */ 8823 8823 pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; 8824 - pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; 8825 - } else { 8826 - pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; 8827 8824 } 8825 + 8826 + /* Enable HW ATR eviction if possible */ 8827 + if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) 8828 + pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED; 8828 8829 8829 8830 pf->eeprom_version = 0xDEAD; 8830 8831 pf->lan_veb = I40E_NO_VEB; ··· 9768 9767 return -ENODEV; 9769 9768 } 9770 9769 if (vsi == pf->vsi[pf->lan_vsi] && 9771 - !test_bit(__I40E_VSI_DOWN, pf->state)) { 9770 + !test_bit(__I40E_DOWN, pf->state)) { 9772 9771 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); 9773 9772 return -ENODEV; 9774 9773 } ··· 11004 11003 } 11005 11004 pf->next_vsi = 0; 11006 11005 pf->pdev = pdev; 11007 - set_bit(__I40E_VSI_DOWN, pf->state); 11006 + set_bit(__I40E_DOWN, pf->state); 11008 11007 11009 11008 hw = &pf->hw; 11010 11009 hw->back = pf; ··· 11294 11293 * before setting up the misc vector or we get a race and the vector 11295 11294 * ends up disabled forever. 11296 11295 */ 11297 - clear_bit(__I40E_VSI_DOWN, pf->state); 11296 + clear_bit(__I40E_DOWN, pf->state); 11298 11297 11299 11298 /* In case of MSIX we are going to setup the misc vector right here 11300 11299 * to handle admin queue events etc. In case of legacy and MSI ··· 11449 11448 11450 11449 /* Unwind what we've done if something failed in the setup */ 11451 11450 err_vsis: 11452 - set_bit(__I40E_VSI_DOWN, pf->state); 11451 + set_bit(__I40E_DOWN, pf->state); 11453 11452 i40e_clear_interrupt_scheme(pf); 11454 11453 kfree(pf->vsi); 11455 11454 err_switch_setup: ··· 11501 11500 11502 11501 /* no more scheduling of any task */ 11503 11502 set_bit(__I40E_SUSPENDED, pf->state); 11504 - set_bit(__I40E_VSI_DOWN, pf->state); 11503 + set_bit(__I40E_DOWN, pf->state); 11505 11504 if (pf->service_timer.data) 11506 11505 del_timer_sync(&pf->service_timer); 11507 11506 if (pf->service_task.func) ··· 11741 11740 struct i40e_hw *hw = &pf->hw; 11742 11741 11743 11742 set_bit(__I40E_SUSPENDED, pf->state); 11744 - set_bit(__I40E_VSI_DOWN, pf->state); 11743 + set_bit(__I40E_DOWN, pf->state); 11745 11744 rtnl_lock(); 11746 11745 i40e_prep_for_reset(pf, true); 11747 11746 rtnl_unlock(); ··· 11790 11789 int retval = 0; 11791 11790 11792 11791 set_bit(__I40E_SUSPENDED, pf->state); 11793 - set_bit(__I40E_VSI_DOWN, pf->state); 11792 + set_bit(__I40E_DOWN, pf->state); 11794 11793 11795 11794 if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE)) 11796 11795 i40e_enable_mc_magic_wake(pf); ··· 11842 11841 11843 11842 /* handling the reset will rebuild the device state */ 11844 11843 if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) { 11845 - clear_bit(__I40E_VSI_DOWN, pf->state); 11844 + clear_bit(__I40E_DOWN, pf->state); 11846 11845 rtnl_lock(); 11847 11846 i40e_reset_and_rebuild(pf, false, true); 11848 11847 rtnl_unlock();
+4 -3
drivers/net/ethernet/intel/i40e/i40e_txrx.c
··· 1854 1854 #if (PAGE_SIZE < 8192) 1855 1855 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; 1856 1856 #else 1857 - unsigned int truesize = SKB_DATA_ALIGN(size); 1857 + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 1858 + SKB_DATA_ALIGN(I40E_SKB_PAD + size); 1858 1859 #endif 1859 1860 struct sk_buff *skb; 1860 1861 ··· 2341 2340 /* Due to lack of space, no more new filters can be programmed */ 2342 2341 if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)) 2343 2342 return; 2344 - if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) { 2343 + if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) { 2345 2344 /* HW ATR eviction will take care of removing filters on FIN 2346 2345 * and RST packets. 2347 2346 */ ··· 2403 2402 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & 2404 2403 I40E_TXD_FLTR_QW1_CNTINDEX_MASK; 2405 2404 2406 - if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) 2405 + if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) 2407 2406 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK; 2408 2407 2409 2408 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
+2
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
··· 3017 3017 VLAN_VID_MASK)); 3018 3018 } 3019 3019 3020 + spin_unlock_bh(&vsi->mac_filter_hash_lock); 3020 3021 if (vlan_id || qos) 3021 3022 ret = i40e_vsi_add_pvid(vsi, vlanprio); 3022 3023 else 3023 3024 i40e_vsi_remove_pvid(vsi); 3025 + spin_lock_bh(&vsi->mac_filter_hash_lock); 3024 3026 3025 3027 if (vlan_id) { 3026 3028 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
+2 -1
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
··· 1190 1190 #if (PAGE_SIZE < 8192) 1191 1191 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; 1192 1192 #else 1193 - unsigned int truesize = SKB_DATA_ALIGN(size); 1193 + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + 1194 + SKB_DATA_ALIGN(I40E_SKB_PAD + size); 1194 1195 #endif 1195 1196 struct sk_buff *skb; 1196 1197
+33 -43
drivers/net/ethernet/marvell/mvpp2.c
··· 3719 3719 dma_addr_t *dma_addr, 3720 3720 phys_addr_t *phys_addr) 3721 3721 { 3722 - int cpu = smp_processor_id(); 3722 + int cpu = get_cpu(); 3723 3723 3724 3724 *dma_addr = mvpp2_percpu_read(priv, cpu, 3725 3725 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); ··· 3740 3740 if (sizeof(phys_addr_t) == 8) 3741 3741 *phys_addr |= (u64)phys_addr_highbits << 32; 3742 3742 } 3743 + 3744 + put_cpu(); 3743 3745 } 3744 3746 3745 3747 /* Free all buffers from the pool */ ··· 3922 3920 return bm; 3923 3921 } 3924 3922 3925 - /* Get pool number from a BM cookie */ 3926 - static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie) 3927 - { 3928 - return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF; 3929 - } 3930 - 3931 3923 /* Release buffer to BM */ 3932 3924 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, 3933 3925 dma_addr_t buf_dma_addr, 3934 3926 phys_addr_t buf_phys_addr) 3935 3927 { 3936 - int cpu = smp_processor_id(); 3928 + int cpu = get_cpu(); 3937 3929 3938 3930 if (port->priv->hw_version == MVPP22) { 3939 3931 u32 val = 0; ··· 3954 3958 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); 3955 3959 mvpp2_percpu_write(port->priv, cpu, 3956 3960 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); 3961 + 3962 + put_cpu(); 3957 3963 } 3958 3964 3959 3965 /* Refill BM pool */ 3960 - static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, 3966 + static void mvpp2_pool_refill(struct mvpp2_port *port, int pool, 3961 3967 dma_addr_t dma_addr, 3962 3968 phys_addr_t phys_addr) 3963 3969 { 3964 - int pool = mvpp2_bm_cookie_pool_get(bm); 3965 - 3966 3970 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 3967 3971 } 3968 3972 ··· 4181 4185 static void mvpp22_port_mii_set(struct mvpp2_port *port) 4182 4186 { 4183 4187 u32 val; 4184 - 4185 - return; 4186 4188 4187 4189 /* Only GOP port 0 has an XLG MAC */ 4188 4190 if (port->gop_id == 0) { ··· 4509 4515 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 4510 4516 } 4511 4517 4512 - /* Obtain BM cookie information from descriptor */ 4513 - static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port, 4514 - struct mvpp2_rx_desc *rx_desc) 4515 - { 4516 - int cpu = smp_processor_id(); 4517 - int pool; 4518 - 4519 - pool = (mvpp2_rxdesc_status_get(port, rx_desc) & 4520 - MVPP2_RXD_BM_POOL_ID_MASK) >> 4521 - MVPP2_RXD_BM_POOL_ID_OFFS; 4522 - 4523 - return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | 4524 - ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS); 4525 - } 4526 - 4527 4518 /* Tx descriptors helper methods */ 4528 4519 4529 4520 /* Get pointer to next Tx descriptor to be processed (send) by HW */ ··· 4736 4757 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, 4737 4758 struct mvpp2_rx_queue *rxq) 4738 4759 { 4739 - int cpu = smp_processor_id(); 4760 + int cpu = get_cpu(); 4740 4761 4741 4762 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) 4742 4763 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; ··· 4744 4765 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); 4745 4766 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, 4746 4767 rxq->pkts_coal); 4768 + 4769 + put_cpu(); 4747 4770 } 4748 4771 4749 4772 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) ··· 4926 4945 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 4927 4946 4928 4947 /* Set Rx descriptors queue starting address - indirect access */ 4929 - cpu = smp_processor_id(); 4948 + cpu = get_cpu(); 4930 4949 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); 4931 4950 if (port->priv->hw_version == MVPP21) 4932 4951 rxq_dma = rxq->descs_dma; ··· 4935 4954 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); 4936 4955 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); 4937 4956 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); 4957 + put_cpu(); 4938 4958 4939 4959 /* Set Offset */ 4940 4960 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); ··· 4962 4980 4963 4981 for (i = 0; i < rx_received; i++) { 4964 4982 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 4965 - u32 bm = mvpp2_bm_cookie_build(port, rx_desc); 4983 + u32 status = mvpp2_rxdesc_status_get(port, rx_desc); 4984 + int pool; 4966 4985 4967 - mvpp2_pool_refill(port, bm, 4986 + pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >> 4987 + MVPP2_RXD_BM_POOL_ID_OFFS; 4988 + 4989 + mvpp2_pool_refill(port, pool, 4968 4990 mvpp2_rxdesc_dma_addr_get(port, rx_desc), 4969 4991 mvpp2_rxdesc_cookie_get(port, rx_desc)); 4970 4992 } ··· 4998 5012 * free descriptor number 4999 5013 */ 5000 5014 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 5001 - cpu = smp_processor_id(); 5015 + cpu = get_cpu(); 5002 5016 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); 5003 5017 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); 5004 5018 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); 5019 + put_cpu(); 5005 5020 } 5006 5021 5007 5022 /* Create and initialize a Tx queue */ ··· 5025 5038 txq->last_desc = txq->size - 1; 5026 5039 5027 5040 /* Set Tx descriptors queue starting address - indirect access */ 5028 - cpu = smp_processor_id(); 5041 + cpu = get_cpu(); 5029 5042 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); 5030 5043 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 5031 5044 txq->descs_dma); ··· 5050 5063 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, 5051 5064 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | 5052 5065 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); 5066 + put_cpu(); 5053 5067 5054 5068 /* WRR / EJP configuration - indirect access */ 5055 5069 tx_port_num = mvpp2_egress_port(port); ··· 5121 5133 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); 5122 5134 5123 5135 /* Set Tx descriptors queue starting address and size */ 5124 - cpu = smp_processor_id(); 5136 + cpu = get_cpu(); 5125 5137 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); 5126 5138 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); 5127 5139 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); 5140 + put_cpu(); 5128 5141 } 5129 5142 5130 5143 /* Cleanup Tx ports */ ··· 5135 5146 int delay, pending, cpu; 5136 5147 u32 val; 5137 5148 5138 - cpu = smp_processor_id(); 5149 + cpu = get_cpu(); 5139 5150 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); 5140 5151 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); 5141 5152 val |= MVPP2_TXQ_DRAIN_EN_MASK; ··· 5162 5173 5163 5174 val &= ~MVPP2_TXQ_DRAIN_EN_MASK; 5164 5175 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); 5176 + put_cpu(); 5165 5177 5166 5178 for_each_present_cpu(cpu) { 5167 5179 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); ··· 5410 5420 5411 5421 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */ 5412 5422 static int mvpp2_rx_refill(struct mvpp2_port *port, 5413 - struct mvpp2_bm_pool *bm_pool, u32 bm) 5423 + struct mvpp2_bm_pool *bm_pool, int pool) 5414 5424 { 5415 5425 dma_addr_t dma_addr; 5416 5426 phys_addr_t phys_addr; ··· 5422 5432 if (!buf) 5423 5433 return -ENOMEM; 5424 5434 5425 - mvpp2_pool_refill(port, bm, dma_addr, phys_addr); 5435 + mvpp2_pool_refill(port, pool, dma_addr, phys_addr); 5426 5436 5427 5437 return 0; 5428 5438 } ··· 5480 5490 unsigned int frag_size; 5481 5491 dma_addr_t dma_addr; 5482 5492 phys_addr_t phys_addr; 5483 - u32 bm, rx_status; 5493 + u32 rx_status; 5484 5494 int pool, rx_bytes, err; 5485 5495 void *data; 5486 5496 ··· 5492 5502 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); 5493 5503 data = (void *)phys_to_virt(phys_addr); 5494 5504 5495 - bm = mvpp2_bm_cookie_build(port, rx_desc); 5496 - pool = mvpp2_bm_cookie_pool_get(bm); 5505 + pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> 5506 + MVPP2_RXD_BM_POOL_ID_OFFS; 5497 5507 bm_pool = &port->priv->bm_pools[pool]; 5498 5508 5499 5509 /* In case of an error, release the requested buffer pointer ··· 5506 5516 dev->stats.rx_errors++; 5507 5517 mvpp2_rx_error(port, rx_desc); 5508 5518 /* Return the buffer to the pool */ 5509 - mvpp2_pool_refill(port, bm, dma_addr, phys_addr); 5519 + mvpp2_pool_refill(port, pool, dma_addr, phys_addr); 5510 5520 continue; 5511 5521 } 5512 5522 ··· 5521 5531 goto err_drop_frame; 5522 5532 } 5523 5533 5524 - err = mvpp2_rx_refill(port, bm_pool, bm); 5534 + err = mvpp2_rx_refill(port, bm_pool, pool); 5525 5535 if (err) { 5526 5536 netdev_err(port->dev, "failed to refill BM pools\n"); 5527 5537 goto err_drop_frame;
-5
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
··· 1562 1562 qpn = priv->drop_qp.qpn; 1563 1563 else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) { 1564 1564 qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1); 1565 - if (qpn < priv->rss_map.base_qpn || 1566 - qpn >= priv->rss_map.base_qpn + priv->rx_ring_num) { 1567 - en_warn(priv, "rxnfc: QP (0x%x) doesn't exist\n", qpn); 1568 - return -EINVAL; 1569 - } 1570 1565 } else { 1571 1566 if (cmd->fs.ring_cookie >= priv->rx_ring_num) { 1572 1567 en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
+11 -4
drivers/net/ethernet/mellanox/mlx4/mcg.c
··· 35 35 #include <linux/etherdevice.h> 36 36 37 37 #include <linux/mlx4/cmd.h> 38 + #include <linux/mlx4/qp.h> 38 39 #include <linux/export.h> 39 40 40 41 #include "mlx4.h" ··· 986 985 if (IS_ERR(mailbox)) 987 986 return PTR_ERR(mailbox); 988 987 988 + if (!mlx4_qp_lookup(dev, rule->qpn)) { 989 + mlx4_err_rule(dev, "QP doesn't exist\n", rule); 990 + ret = -EINVAL; 991 + goto out; 992 + } 993 + 989 994 trans_rule_ctrl_to_hw(rule, mailbox->buf); 990 995 991 996 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); 992 997 993 998 list_for_each_entry(cur, &rule->list, list) { 994 999 ret = parse_trans_rule(dev, cur, mailbox->buf + size); 995 - if (ret < 0) { 996 - mlx4_free_cmd_mailbox(dev, mailbox); 997 - return ret; 998 - } 1000 + if (ret < 0) 1001 + goto out; 1002 + 999 1003 size += ret; 1000 1004 } 1001 1005 ··· 1027 1021 } 1028 1022 } 1029 1023 1024 + out: 1030 1025 mlx4_free_cmd_mailbox(dev, mailbox); 1031 1026 1032 1027 return ret;
+19
drivers/net/ethernet/mellanox/mlx4/qp.c
··· 384 384 __mlx4_qp_free_icm(dev, qpn); 385 385 } 386 386 387 + struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) 388 + { 389 + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; 390 + struct mlx4_qp *qp; 391 + 392 + spin_lock(&qp_table->lock); 393 + 394 + qp = __mlx4_qp_lookup(dev, qpn); 395 + 396 + spin_unlock(&qp_table->lock); 397 + return qp; 398 + } 399 + 387 400 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) 388 401 { 389 402 struct mlx4_priv *priv = mlx4_priv(dev); ··· 484 471 } 485 472 486 473 if (attr & MLX4_UPDATE_QP_QOS_VPORT) { 474 + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) { 475 + mlx4_warn(dev, "Granular QoS per VF is not enabled\n"); 476 + err = -EOPNOTSUPP; 477 + goto out; 478 + } 479 + 487 480 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP; 488 481 cmd->qp_context.qos_vport = params->qos_vport; 489 482 }
+11 -5
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
··· 5255 5255 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 5256 5256 } 5257 5257 5258 + static void update_qos_vpp(struct mlx4_update_qp_context *ctx, 5259 + struct mlx4_vf_immed_vlan_work *work) 5260 + { 5261 + ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP); 5262 + ctx->qp_context.qos_vport = work->qos_vport; 5263 + } 5264 + 5258 5265 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) 5259 5266 { 5260 5267 struct mlx4_vf_immed_vlan_work *work = ··· 5376 5369 qp->sched_queue & 0xC7; 5377 5370 upd_context->qp_context.pri_path.sched_queue |= 5378 5371 ((work->qos & 0x7) << 3); 5379 - upd_context->qp_mask |= 5380 - cpu_to_be64(1ULL << 5381 - MLX4_UPD_QP_MASK_QOS_VPP); 5382 - upd_context->qp_context.qos_vport = 5383 - work->qos_vport; 5372 + 5373 + if (dev->caps.flags2 & 5374 + MLX4_DEV_CAP_FLAG2_QOS_VPP) 5375 + update_qos_vpp(upd_context, work); 5384 5376 } 5385 5377 5386 5378 err = mlx4_cmd(dev, mailbox->dma,
+5 -3
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 458 458 459 459 struct mlx5e_rx_am_stats { 460 460 int ppms; /* packets per msec */ 461 + int bpms; /* bytes per msec */ 461 462 int epms; /* events per msec */ 462 463 }; 463 464 464 465 struct mlx5e_rx_am_sample { 465 - ktime_t time; 466 - unsigned int pkt_ctr; 467 - u16 event_ctr; 466 + ktime_t time; 467 + u32 pkt_ctr; 468 + u32 byte_ctr; 469 + u16 event_ctr; 468 470 }; 469 471 470 472 struct mlx5e_rx_am { /* Adaptive Moderation */
+4 -4
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 1242 1242 SOF_TIMESTAMPING_RX_HARDWARE | 1243 1243 SOF_TIMESTAMPING_RAW_HARDWARE; 1244 1244 1245 - info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) | 1246 - (BIT(1) << HWTSTAMP_TX_ON); 1245 + info->tx_types = BIT(HWTSTAMP_TX_OFF) | 1246 + BIT(HWTSTAMP_TX_ON); 1247 1247 1248 - info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) | 1249 - (BIT(1) << HWTSTAMP_FILTER_ALL); 1248 + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | 1249 + BIT(HWTSTAMP_FILTER_ALL); 1250 1250 1251 1251 return 0; 1252 1252 }
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 4241 4241 return netdev; 4242 4242 4243 4243 err_cleanup_nic: 4244 - profile->cleanup(priv); 4244 + if (profile->cleanup) 4245 + profile->cleanup(priv); 4245 4246 free_netdev(netdev); 4246 4247 4247 4248 return NULL;
+2
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 791 791 params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); 792 792 params->num_tc = 1; 793 793 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; 794 + 795 + mlx5_query_min_inline(mdev, &params->tx_min_inline_mode); 794 796 } 795 797 796 798 static void mlx5e_build_rep_netdev(struct net_device *netdev)
+26 -19
drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
··· 183 183 mlx5e_am_step(am); 184 184 } 185 185 186 + #define IS_SIGNIFICANT_DIFF(val, ref) \ 187 + (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */ 188 + 186 189 static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr, 187 190 struct mlx5e_rx_am_stats *prev) 188 191 { 189 - int diff; 190 - 191 - if (!prev->ppms) 192 - return curr->ppms ? MLX5E_AM_STATS_BETTER : 192 + if (!prev->bpms) 193 + return curr->bpms ? MLX5E_AM_STATS_BETTER : 193 194 MLX5E_AM_STATS_SAME; 194 195 195 - diff = curr->ppms - prev->ppms; 196 - if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */ 197 - return (diff > 0) ? MLX5E_AM_STATS_BETTER : 198 - MLX5E_AM_STATS_WORSE; 196 + if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms)) 197 + return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER : 198 + MLX5E_AM_STATS_WORSE; 199 199 200 - if (!prev->epms) 201 - return curr->epms ? MLX5E_AM_STATS_WORSE : 202 - MLX5E_AM_STATS_SAME; 200 + if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) 201 + return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER : 202 + MLX5E_AM_STATS_WORSE; 203 203 204 - diff = curr->epms - prev->epms; 205 - if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */ 206 - return (diff < 0) ? MLX5E_AM_STATS_BETTER : 207 - MLX5E_AM_STATS_WORSE; 204 + if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) 205 + return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER : 206 + MLX5E_AM_STATS_WORSE; 208 207 209 208 return MLX5E_AM_STATS_SAME; 210 209 } ··· 265 266 { 266 267 s->time = ktime_get(); 267 268 s->pkt_ctr = rq->stats.packets; 269 + s->byte_ctr = rq->stats.bytes; 268 270 s->event_ctr = rq->cq.event_ctr; 269 271 } 270 272 271 273 #define MLX5E_AM_NEVENTS 64 274 + #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) 275 + #define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1)) 272 276 273 277 static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, 274 278 struct mlx5e_rx_am_sample *end, ··· 279 277 { 280 278 /* u32 holds up to 71 minutes, should be enough */ 281 279 u32 delta_us = ktime_us_delta(end->time, start->time); 282 - unsigned int npkts = end->pkt_ctr - start->pkt_ctr; 280 + u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr); 281 + u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr, 282 + start->byte_ctr); 283 283 284 284 if (!delta_us) 285 285 return; 286 286 287 - curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us; 288 - curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us; 287 + curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us); 288 + curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us); 289 + curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC, 290 + delta_us); 289 291 } 290 292 291 293 void mlx5e_rx_am_work(struct work_struct *work) ··· 314 308 315 309 switch (am->state) { 316 310 case MLX5E_AM_MEASURE_IN_PROGRESS: 317 - nevents = rq->cq.event_ctr - am->start_sample.event_ctr; 311 + nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr, 312 + am->start_sample.event_ctr); 318 313 if (nevents < MLX5E_AM_NEVENTS) 319 314 break; 320 315 mlx5e_am_sample(rq, &end_sample);
+2 -9
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
··· 417 417 }; 418 418 419 419 static const struct counter_desc mlx5e_pme_status_desc[] = { 420 - { "module_plug", 0 }, 421 420 { "module_unplug", 8 }, 422 421 }; 423 422 424 423 static const struct counter_desc mlx5e_pme_error_desc[] = { 425 - { "module_pwr_budget_exd", 0 }, /* power budget exceed */ 426 - { "module_long_range", 8 }, /* long range for non MLNX cable */ 427 - { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */ 428 - { "module_no_eeprom", 24 }, /* no eeprom/retry time out */ 429 - { "module_enforce_part", 32 }, /* enforce part number list */ 430 - { "module_unknown_id", 40 }, /* unknown identifier */ 431 - { "module_high_temp", 48 }, /* high temperature */ 424 + { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */ 425 + { "module_high_temp", 48 }, /* high temperature */ 432 426 { "module_bad_shorted", 56 }, /* bad or shorted cable/module */ 433 - { "module_unknown_status", 64 }, 434 427 }; 435 428 436 429 #endif /* __MLX5_EN_STATS_H__ */
-1
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 895 895 {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])}, 896 896 {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)}, 897 897 898 - {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)}, 899 898 {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)}, 900 899 {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)}, 901 900 {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)},
+40 -37
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 906 906 return 0; 907 907 } 908 908 909 - int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) 909 + static int mlx5_devlink_eswitch_check(struct devlink *devlink) 910 910 { 911 - struct mlx5_core_dev *dev; 912 - u16 cur_mlx5_mode, mlx5_mode = 0; 911 + struct mlx5_core_dev *dev = devlink_priv(devlink); 913 912 914 - dev = devlink_priv(devlink); 913 + if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 914 + return -EOPNOTSUPP; 915 915 916 916 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 917 917 return -EOPNOTSUPP; 918 918 919 - cur_mlx5_mode = dev->priv.eswitch->mode; 920 - 921 - if (cur_mlx5_mode == SRIOV_NONE) 919 + if (dev->priv.eswitch->mode == SRIOV_NONE) 922 920 return -EOPNOTSUPP; 921 + 922 + return 0; 923 + } 924 + 925 + int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) 926 + { 927 + struct mlx5_core_dev *dev = devlink_priv(devlink); 928 + u16 cur_mlx5_mode, mlx5_mode = 0; 929 + int err; 930 + 931 + err = mlx5_devlink_eswitch_check(devlink); 932 + if (err) 933 + return err; 934 + 935 + cur_mlx5_mode = dev->priv.eswitch->mode; 923 936 924 937 if (esw_mode_from_devlink(mode, &mlx5_mode)) 925 938 return -EINVAL; ··· 950 937 951 938 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) 952 939 { 953 - struct mlx5_core_dev *dev; 940 + struct mlx5_core_dev *dev = devlink_priv(devlink); 941 + int err; 954 942 955 - dev = devlink_priv(devlink); 956 - 957 - if (!MLX5_CAP_GEN(dev, vport_group_manager)) 958 - return -EOPNOTSUPP; 959 - 960 - if (dev->priv.eswitch->mode == SRIOV_NONE) 961 - return -EOPNOTSUPP; 943 + err = mlx5_devlink_eswitch_check(devlink); 944 + if (err) 945 + return err; 962 946 963 947 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); 964 948 } ··· 964 954 { 965 955 struct mlx5_core_dev *dev = devlink_priv(devlink); 966 956 struct mlx5_eswitch *esw = dev->priv.eswitch; 967 - int num_vports = esw->enabled_vports; 968 957 int err, vport; 969 958 u8 mlx5_mode; 970 959 971 - if (!MLX5_CAP_GEN(dev, vport_group_manager)) 972 - return -EOPNOTSUPP; 973 - 974 - if (esw->mode == SRIOV_NONE) 975 - return -EOPNOTSUPP; 960 + err = mlx5_devlink_eswitch_check(devlink); 961 + if (err) 962 + return err; 976 963 977 964 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { 978 965 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: ··· 992 985 if (err) 993 986 goto out; 994 987 995 - for (vport = 1; vport < num_vports; vport++) { 988 + for (vport = 1; vport < esw->enabled_vports; vport++) { 996 989 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode); 997 990 if (err) { 998 991 esw_warn(dev, "Failed to set min inline on vport %d\n", ··· 1017 1010 { 1018 1011 struct mlx5_core_dev *dev = devlink_priv(devlink); 1019 1012 struct mlx5_eswitch *esw = dev->priv.eswitch; 1013 + int err; 1020 1014 1021 - if (!MLX5_CAP_GEN(dev, vport_group_manager)) 1022 - return -EOPNOTSUPP; 1023 - 1024 - if (esw->mode == SRIOV_NONE) 1025 - return -EOPNOTSUPP; 1015 + err = mlx5_devlink_eswitch_check(devlink); 1016 + if (err) 1017 + return err; 1026 1018 1027 1019 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); 1028 1020 } ··· 1068 1062 struct mlx5_eswitch *esw = dev->priv.eswitch; 1069 1063 int err; 1070 1064 1071 - if (!MLX5_CAP_GEN(dev, vport_group_manager)) 1072 - return -EOPNOTSUPP; 1073 - 1074 - if (esw->mode == SRIOV_NONE) 1075 - return -EOPNOTSUPP; 1065 + err = mlx5_devlink_eswitch_check(devlink); 1066 + if (err) 1067 + return err; 1076 1068 1077 1069 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && 1078 1070 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) || ··· 1109 1105 { 1110 1106 struct mlx5_core_dev *dev = devlink_priv(devlink); 1111 1107 struct mlx5_eswitch *esw = dev->priv.eswitch; 1108 + int err; 1112 1109 1113 - if (!MLX5_CAP_GEN(dev, vport_group_manager)) 1114 - return -EOPNOTSUPP; 1115 - 1116 - if (esw->mode == SRIOV_NONE) 1117 - return -EOPNOTSUPP; 1110 + err = mlx5_devlink_eswitch_check(devlink); 1111 + if (err) 1112 + return err; 1118 1113 1119 1114 *encap = esw->offloads.encap; 1120 1115 return 0;
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 862 862 ft_attr.level = level; 863 863 ft_attr.prio = prio; 864 864 865 - return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, 0); 865 + return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport); 866 866 } 867 867 868 868 struct mlx5_flow_table*
+5 -6
drivers/net/ethernet/mellanox/mlx5/core/health.c
··· 275 275 struct mlx5_core_health *health = &dev->priv.health; 276 276 u32 count; 277 277 278 - if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 279 - mod_timer(&health->timer, get_next_poll_jiffies()); 280 - return; 281 - } 278 + if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) 279 + goto out; 282 280 283 281 count = ioread32be(health->health_counter); 284 282 if (count == health->prev) ··· 288 290 if (health->miss_counter == MAX_MISSES) { 289 291 dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n"); 290 292 print_health_info(dev); 291 - } else { 292 - mod_timer(&health->timer, get_next_poll_jiffies()); 293 293 } 294 294 295 295 if (in_fatal(dev) && !health->sick) { ··· 301 305 "new health works are not permitted at this stage\n"); 302 306 spin_unlock(&health->wq_lock); 303 307 } 308 + 309 + out: 310 + mod_timer(&health->timer, get_next_poll_jiffies()); 304 311 } 305 312 306 313 void mlx5_start_health_poll(struct mlx5_core_dev *dev)
+18 -7
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 175 175 }, 176 176 }; 177 177 178 - #define FW_INIT_TIMEOUT_MILI 2000 179 - #define FW_INIT_WAIT_MS 2 178 + #define FW_INIT_TIMEOUT_MILI 2000 179 + #define FW_INIT_WAIT_MS 2 180 + #define FW_PRE_INIT_TIMEOUT_MILI 10000 180 181 181 182 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili) 182 183 { ··· 538 537 /* disable cmdif checksum */ 539 538 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); 540 539 541 - /* If the HCA supports 4K UARs use it */ 542 - if (MLX5_CAP_GEN_MAX(dev, uar_4k)) 540 + /* Enable 4K UAR only when HCA supports it and page size is bigger 541 + * than 4K. 542 + */ 543 + if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096) 543 544 MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1); 544 545 545 546 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); ··· 624 621 cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), 625 622 priv->irq_info[i].mask); 626 623 627 - #ifdef CONFIG_SMP 628 - if (irq_set_affinity_hint(irq, priv->irq_info[i].mask)) 624 + if (IS_ENABLED(CONFIG_SMP) && 625 + irq_set_affinity_hint(irq, priv->irq_info[i].mask)) 629 626 mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq); 630 - #endif 631 627 632 628 return 0; 633 629 } ··· 1013 1011 * up 1014 1012 */ 1015 1013 dev->state = MLX5_DEVICE_STATE_UP; 1014 + 1015 + /* wait for firmware to accept initialization segments configurations 1016 + */ 1017 + err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI); 1018 + if (err) { 1019 + dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n", 1020 + FW_PRE_INIT_TIMEOUT_MILI); 1021 + goto out; 1022 + } 1016 1023 1017 1024 err = mlx5_cmd_init(dev); 1018 1025 if (err) {
+3
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
··· 3334 3334 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); 3335 3335 u16 vid = vlan_dev_vlan_id(vlan_dev); 3336 3336 3337 + if (netif_is_bridge_port(vlan_dev)) 3338 + return 0; 3339 + 3337 3340 if (mlxsw_sp_port_dev_check(real_dev)) 3338 3341 return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event, 3339 3342 vid);
+1 -1
drivers/net/ethernet/qlogic/qed/qed_debug.c
··· 2956 2956 qed_wr(p_hwfn, 2957 2957 p_ptt, 2958 2958 s_storm_defs[storm_id].cm_ctx_wr_addr, 2959 - BIT(9) | lid); 2959 + (i << 9) | lid); 2960 2960 *(dump_buf + offset) = qed_rd(p_hwfn, 2961 2961 p_ptt, 2962 2962 rd_reg_addr);
+2 -1
drivers/net/ethernet/qlogic/qed/qed_main.c
··· 1730 1730 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); 1731 1731 break; 1732 1732 default: 1733 - DP_ERR(cdev, "Invalid protocol type = %d\n", type); 1733 + DP_VERBOSE(cdev, QED_MSG_SP, 1734 + "Invalid protocol type = %d\n", type); 1734 1735 return; 1735 1736 } 1736 1737 }
+24 -2
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
··· 1824 1824 u32 (*get_cap_size)(void *, int); 1825 1825 void (*set_sys_info)(void *, int, u32); 1826 1826 void (*store_cap_mask)(void *, u32); 1827 + bool (*encap_rx_offload) (struct qlcnic_adapter *adapter); 1828 + bool (*encap_tx_offload) (struct qlcnic_adapter *adapter); 1827 1829 }; 1828 1830 1829 1831 extern struct qlcnic_nic_template qlcnic_vf_ops; 1830 1832 1831 - static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter) 1833 + static inline bool qlcnic_83xx_encap_tx_offload(struct qlcnic_adapter *adapter) 1832 1834 { 1833 1835 return adapter->ahw->extra_capability[0] & 1834 1836 QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD; 1835 1837 } 1836 1838 1837 - static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter) 1839 + static inline bool qlcnic_83xx_encap_rx_offload(struct qlcnic_adapter *adapter) 1838 1840 { 1839 1841 return adapter->ahw->extra_capability[0] & 1840 1842 QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD; 1843 + } 1844 + 1845 + static inline bool qlcnic_82xx_encap_tx_offload(struct qlcnic_adapter *adapter) 1846 + { 1847 + return false; 1848 + } 1849 + 1850 + static inline bool qlcnic_82xx_encap_rx_offload(struct qlcnic_adapter *adapter) 1851 + { 1852 + return false; 1853 + } 1854 + 1855 + static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter) 1856 + { 1857 + return adapter->ahw->hw_ops->encap_rx_offload(adapter); 1858 + } 1859 + 1860 + static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter) 1861 + { 1862 + return adapter->ahw->hw_ops->encap_tx_offload(adapter); 1841 1863 } 1842 1864 1843 1865 static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter)
+2
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
··· 242 242 .get_cap_size = qlcnic_83xx_get_cap_size, 243 243 .set_sys_info = qlcnic_83xx_set_sys_info, 244 244 .store_cap_mask = qlcnic_83xx_store_cap_mask, 245 + .encap_rx_offload = qlcnic_83xx_encap_rx_offload, 246 + .encap_tx_offload = qlcnic_83xx_encap_tx_offload, 245 247 }; 246 248 247 249 static struct qlcnic_nic_template qlcnic_83xx_ops = {
+1 -1
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
··· 341 341 } 342 342 return -EIO; 343 343 } 344 - usleep_range(1000, 1500); 344 + udelay(1200); 345 345 } 346 346 347 347 if (id_reg)
+2
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
··· 632 632 .get_cap_size = qlcnic_82xx_get_cap_size, 633 633 .set_sys_info = qlcnic_82xx_set_sys_info, 634 634 .store_cap_mask = qlcnic_82xx_store_cap_mask, 635 + .encap_rx_offload = qlcnic_82xx_encap_rx_offload, 636 + .encap_tx_offload = qlcnic_82xx_encap_tx_offload, 635 637 }; 636 638 637 639 static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
+2
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
··· 77 77 .free_mac_list = qlcnic_sriov_vf_free_mac_list, 78 78 .enable_sds_intr = qlcnic_83xx_enable_sds_intr, 79 79 .disable_sds_intr = qlcnic_83xx_disable_sds_intr, 80 + .encap_rx_offload = qlcnic_83xx_encap_rx_offload, 81 + .encap_tx_offload = qlcnic_83xx_encap_tx_offload, 80 82 }; 81 83 82 84 static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
+1 -1
drivers/net/ethernet/qualcomm/emac/emac-mac.c
··· 931 931 emac_mac_config(adpt); 932 932 emac_mac_rx_descs_refill(adpt, &adpt->rx_q); 933 933 934 - adpt->phydev->irq = PHY_IGNORE_INTERRUPT; 934 + adpt->phydev->irq = PHY_POLL; 935 935 ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link, 936 936 PHY_INTERFACE_MODE_SGMII); 937 937 if (ret) {
+4 -71
drivers/net/ethernet/qualcomm/emac/emac-phy.c
··· 13 13 /* Qualcomm Technologies, Inc. EMAC PHY Controller driver. 14 14 */ 15 15 16 - #include <linux/module.h> 17 - #include <linux/of.h> 18 - #include <linux/of_net.h> 19 16 #include <linux/of_mdio.h> 20 17 #include <linux/phy.h> 21 18 #include <linux/iopoll.h> 22 19 #include <linux/acpi.h> 23 20 #include "emac.h" 24 - #include "emac-mac.h" 25 21 26 22 /* EMAC base register offsets */ 27 23 #define EMAC_MDIO_CTRL 0x001414 ··· 48 52 49 53 #define MDIO_WAIT_TIMES 1000 50 54 51 - #define EMAC_LINK_SPEED_DEFAULT (\ 52 - EMAC_LINK_SPEED_10_HALF |\ 53 - EMAC_LINK_SPEED_10_FULL |\ 54 - EMAC_LINK_SPEED_100_HALF |\ 55 - EMAC_LINK_SPEED_100_FULL |\ 56 - EMAC_LINK_SPEED_1GB_FULL) 57 - 58 - /** 59 - * emac_phy_mdio_autopoll_disable() - disable mdio autopoll 60 - * @adpt: the emac adapter 61 - * 62 - * The autopoll feature takes over the MDIO bus. In order for 63 - * the PHY driver to be able to talk to the PHY over the MDIO 64 - * bus, we need to temporarily disable the autopoll feature. 65 - */ 66 - static int emac_phy_mdio_autopoll_disable(struct emac_adapter *adpt) 67 - { 68 - u32 val; 69 - 70 - /* disable autopoll */ 71 - emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, MDIO_AP_EN, 0); 72 - 73 - /* wait for any mdio polling to complete */ 74 - if (!readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, val, 75 - !(val & MDIO_BUSY), 100, MDIO_WAIT_TIMES * 100)) 76 - return 0; 77 - 78 - /* failed to disable; ensure it is enabled before returning */ 79 - emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN); 80 - 81 - return -EBUSY; 82 - } 83 - 84 - /** 85 - * emac_phy_mdio_autopoll_disable() - disable mdio autopoll 86 - * @adpt: the emac adapter 87 - * 88 - * The EMAC has the ability to poll the external PHY on the MDIO 89 - * bus for link state changes. This eliminates the need for the 90 - * driver to poll the phy. If if the link state does change, 91 - * the EMAC issues an interrupt on behalf of the PHY. 92 - */ 93 - static void emac_phy_mdio_autopoll_enable(struct emac_adapter *adpt) 94 - { 95 - emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN); 96 - } 97 - 98 55 static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum) 99 56 { 100 57 struct emac_adapter *adpt = bus->priv; 101 58 u32 reg; 102 - int ret; 103 - 104 - ret = emac_phy_mdio_autopoll_disable(adpt); 105 - if (ret) 106 - return ret; 107 59 108 60 emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK, 109 61 (addr << PHY_ADDR_SHFT)); ··· 66 122 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, 67 123 !(reg & (MDIO_START | MDIO_BUSY)), 68 124 100, MDIO_WAIT_TIMES * 100)) 69 - ret = -EIO; 70 - else 71 - ret = (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK; 125 + return -EIO; 72 126 73 - emac_phy_mdio_autopoll_enable(adpt); 74 - 75 - return ret; 127 + return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK; 76 128 } 77 129 78 130 static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) 79 131 { 80 132 struct emac_adapter *adpt = bus->priv; 81 133 u32 reg; 82 - int ret; 83 - 84 - ret = emac_phy_mdio_autopoll_disable(adpt); 85 - if (ret) 86 - return ret; 87 134 88 135 emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK, 89 136 (addr << PHY_ADDR_SHFT)); ··· 90 155 if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, 91 156 !(reg & (MDIO_START | MDIO_BUSY)), 100, 92 157 MDIO_WAIT_TIMES * 100)) 93 - ret = -EIO; 158 + return -EIO; 94 159 95 - emac_phy_mdio_autopoll_enable(adpt); 96 - 97 - return ret; 160 + return 0; 98 161 } 99 162 100 163 /* Configure the MDIO bus and connect the external PHY */
+1 -21
drivers/net/ethernet/qualcomm/emac/emac.c
··· 50 50 #define DMAR_DLY_CNT_DEF 15 51 51 #define DMAW_DLY_CNT_DEF 4 52 52 53 - #define IMR_NORMAL_MASK (\ 54 - ISR_ERROR |\ 55 - ISR_GPHY_LINK |\ 56 - ISR_TX_PKT |\ 57 - GPHY_WAKEUP_INT) 58 - 59 - #define IMR_EXTENDED_MASK (\ 60 - SW_MAN_INT |\ 61 - ISR_OVER |\ 62 - ISR_ERROR |\ 63 - ISR_GPHY_LINK |\ 64 - ISR_TX_PKT |\ 65 - GPHY_WAKEUP_INT) 53 + #define IMR_NORMAL_MASK (ISR_ERROR | ISR_OVER | ISR_TX_PKT) 66 54 67 55 #define ISR_TX_PKT (\ 68 56 TX_PKT_INT |\ 69 57 TX_PKT_INT1 |\ 70 58 TX_PKT_INT2 |\ 71 59 TX_PKT_INT3) 72 - 73 - #define ISR_GPHY_LINK (\ 74 - GPHY_LINK_UP_INT |\ 75 - GPHY_LINK_DOWN_INT) 76 60 77 61 #define ISR_OVER (\ 78 62 RFD0_UR_INT |\ ··· 170 186 171 187 if (status & ISR_OVER) 172 188 net_warn_ratelimited("warning: TX/RX overflow\n"); 173 - 174 - /* link event */ 175 - if (status & ISR_GPHY_LINK) 176 - phy_mac_interrupt(adpt->phydev, !!(status & GPHY_LINK_UP_INT)); 177 189 178 190 exit: 179 191 /* enable the interrupt */
+12 -12
drivers/net/ethernet/renesas/ravb_main.c
··· 230 230 int ring_size; 231 231 int i; 232 232 233 - /* Free RX skb ringbuffer */ 234 - if (priv->rx_skb[q]) { 235 - for (i = 0; i < priv->num_rx_ring[q]; i++) 236 - dev_kfree_skb(priv->rx_skb[q][i]); 237 - } 238 - kfree(priv->rx_skb[q]); 239 - priv->rx_skb[q] = NULL; 240 - 241 - /* Free aligned TX buffers */ 242 - kfree(priv->tx_align[q]); 243 - priv->tx_align[q] = NULL; 244 - 245 233 if (priv->rx_ring[q]) { 246 234 for (i = 0; i < priv->num_rx_ring[q]; i++) { 247 235 struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; ··· 257 269 priv->tx_desc_dma[q]); 258 270 priv->tx_ring[q] = NULL; 259 271 } 272 + 273 + /* Free RX skb ringbuffer */ 274 + if (priv->rx_skb[q]) { 275 + for (i = 0; i < priv->num_rx_ring[q]; i++) 276 + dev_kfree_skb(priv->rx_skb[q][i]); 277 + } 278 + kfree(priv->rx_skb[q]); 279 + priv->rx_skb[q] = NULL; 280 + 281 + /* Free aligned TX buffers */ 282 + kfree(priv->tx_align[q]); 283 + priv->tx_align[q] = NULL; 260 284 261 285 /* Free TX skb ringbuffer. 262 286 * SKBs are freed by ravb_tx_free() call above.
+1 -1
drivers/net/ethernet/rocker/rocker_ofdpa.c
··· 1505 1505 *index = entry->index; 1506 1506 resolved = false; 1507 1507 } else if (removing) { 1508 - ofdpa_neigh_del(trans, found); 1509 1508 *index = found->index; 1509 + ofdpa_neigh_del(trans, found); 1510 1510 } else if (updating) { 1511 1511 ofdpa_neigh_update(found, trans, NULL, false); 1512 1512 resolved = !is_zero_ether_addr(found->eth_dst);
+8 -7
drivers/net/ethernet/sfc/ef10.c
··· 4172 4172 * recipients 4173 4173 */ 4174 4174 if (is_mc_recip) { 4175 - MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); 4175 + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); 4176 4176 unsigned int depth, i; 4177 4177 4178 4178 memset(inbuf, 0, sizeof(inbuf)); ··· 4320 4320 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); 4321 4321 } else { 4322 4322 efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, 4323 - MC_CMD_FILTER_OP_IN_LEN, 4323 + MC_CMD_FILTER_OP_EXT_IN_LEN, 4324 4324 NULL, 0, rc); 4325 4325 } 4326 4326 } ··· 4453 4453 struct efx_filter_spec *spec) 4454 4454 { 4455 4455 struct efx_ef10_filter_table *table = efx->filter_state; 4456 - MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); 4456 + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); 4457 4457 struct efx_filter_spec *saved_spec; 4458 4458 unsigned int hash, i, depth = 1; 4459 4459 bool replacing = false; ··· 4940 4940 static void efx_ef10_filter_table_remove(struct efx_nic *efx) 4941 4941 { 4942 4942 struct efx_ef10_filter_table *table = efx->filter_state; 4943 - MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN); 4943 + MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); 4944 4944 struct efx_filter_spec *spec; 4945 4945 unsigned int filter_idx; 4946 4946 int rc; ··· 5105 5105 5106 5106 /* Insert/renew filters */ 5107 5107 for (i = 0; i < addr_count; i++) { 5108 + EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID); 5108 5109 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 5109 5110 efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr); 5110 5111 rc = efx_ef10_filter_insert(efx, &spec, true); ··· 5123 5122 } 5124 5123 return rc; 5125 5124 } else { 5126 - /* mark as not inserted, and carry on */ 5127 - rc = EFX_EF10_FILTER_ID_INVALID; 5125 + /* keep invalid ID, and carry on */ 5128 5126 } 5127 + } else { 5128 + ids[i] = efx_ef10_filter_get_unsafe_id(rc); 5129 5129 } 5130 - ids[i] = efx_ef10_filter_get_unsafe_id(rc); 5131 5130 } 5132 5131 5133 5132 if (multicast && rollback) {
-2
drivers/net/ethernet/sfc/ef10_sriov.c
··· 661 661 up_write(&vf->efx->filter_sem); 662 662 mutex_unlock(&vf->efx->mac_lock); 663 663 664 - up_write(&vf->efx->filter_sem); 665 - 666 664 rc2 = efx_net_open(vf->efx->net_dev); 667 665 if (rc2) 668 666 goto reset_nic;
+5 -1
drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
··· 37 37 #define TSE_PCS_CONTROL_AN_EN_MASK BIT(12) 38 38 #define TSE_PCS_CONTROL_REG 0x00 39 39 #define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9) 40 + #define TSE_PCS_CTRL_AUTONEG_SGMII 0x1140 40 41 #define TSE_PCS_IF_MODE_REG 0x28 41 42 #define TSE_PCS_LINK_TIMER_0_REG 0x24 42 43 #define TSE_PCS_LINK_TIMER_1_REG 0x26 ··· 66 65 #define TSE_PCS_SW_RESET_TIMEOUT 100 67 66 #define TSE_PCS_USE_SGMII_AN_MASK BIT(1) 68 67 #define TSE_PCS_USE_SGMII_ENA BIT(0) 68 + #define TSE_PCS_IF_USE_SGMII 0x03 69 69 70 70 #define SGMII_ADAPTER_CTRL_REG 0x00 71 71 #define SGMII_ADAPTER_DISABLE 0x0001 ··· 103 101 { 104 102 int ret = 0; 105 103 106 - writew(TSE_PCS_USE_SGMII_ENA, base + TSE_PCS_IF_MODE_REG); 104 + writew(TSE_PCS_IF_USE_SGMII, base + TSE_PCS_IF_MODE_REG); 105 + 106 + writew(TSE_PCS_CTRL_AUTONEG_SGMII, base + TSE_PCS_CONTROL_REG); 107 107 108 108 writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG); 109 109 writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG);
+7 -4
drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
··· 214 214 { 215 215 /* Context type from W/B descriptor must be zero */ 216 216 if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE) 217 - return -EINVAL; 217 + return 0; 218 218 219 219 /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */ 220 220 if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS) 221 - return 0; 221 + return 1; 222 222 223 - return 1; 223 + return 0; 224 224 } 225 225 226 226 static inline u64 dwmac4_get_timestamp(void *desc, u32 ats) ··· 282 282 } 283 283 } 284 284 exit: 285 - return ret; 285 + if (likely(ret == 0)) 286 + return 1; 287 + 288 + return 0; 286 289 } 287 290 288 291 static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
+37 -15
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 434 434 return; 435 435 436 436 /* check tx tstamp status */ 437 - if (!priv->hw->desc->get_tx_timestamp_status(p)) { 437 + if (priv->hw->desc->get_tx_timestamp_status(p)) { 438 438 /* get the valid tstamp */ 439 439 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); 440 440 441 441 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 442 442 shhwtstamp.hwtstamp = ns_to_ktime(ns); 443 443 444 - netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns); 444 + netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); 445 445 /* pass tstamp to stack */ 446 446 skb_tstamp_tx(skb, &shhwtstamp); 447 447 } ··· 468 468 return; 469 469 470 470 /* Check if timestamp is available */ 471 - if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { 471 + if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { 472 472 /* For GMAC4, the valid timestamp is from CTX next desc. */ 473 473 if (priv->plat->has_gmac4) 474 474 ns = priv->hw->desc->get_timestamp(np, priv->adv_ts); 475 475 else 476 476 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); 477 477 478 - netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns); 478 + netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); 479 479 shhwtstamp = skb_hwtstamps(skb); 480 480 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 481 481 shhwtstamp->hwtstamp = ns_to_ktime(ns); 482 482 } else { 483 - netdev_err(priv->dev, "cannot get RX hw timestamp\n"); 483 + netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); 484 484 } 485 485 } 486 486 ··· 546 546 /* PTP v1, UDP, any kind of event packet */ 547 547 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 548 548 /* take time stamp for all event messages */ 549 - snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 549 + if (priv->plat->has_gmac4) 550 + snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; 551 + else 552 + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 550 553 551 554 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 552 555 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; ··· 581 578 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 582 579 ptp_v2 = PTP_TCR_TSVER2ENA; 583 580 /* take time stamp for all event messages */ 584 - snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 581 + if (priv->plat->has_gmac4) 582 + snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; 583 + else 584 + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 585 585 586 586 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 587 587 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; ··· 618 612 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 619 613 ptp_v2 = PTP_TCR_TSVER2ENA; 620 614 /* take time stamp for all event messages */ 621 - snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 615 + if (priv->plat->has_gmac4) 616 + snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; 617 + else 618 + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 622 619 623 620 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 624 621 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; ··· 1217 1208 u32 rx_count = priv->plat->rx_queues_to_use; 1218 1209 unsigned int bfsize = 0; 1219 1210 int ret = -ENOMEM; 1220 - u32 queue; 1211 + int queue; 1221 1212 int i; 1222 1213 1223 1214 if (priv->hw->mode->set_16kib_bfsize) ··· 2733 2724 2734 2725 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size, 2735 2726 0, 1, 2736 - (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE), 2727 + (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), 2737 2728 0, 0); 2738 2729 2739 2730 tmp_len -= TSO_MAX_BUFF_SIZE; ··· 2831 2822 2832 2823 tx_q->tx_skbuff_dma[first_entry].buf = des; 2833 2824 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); 2834 - tx_q->tx_skbuff[first_entry] = skb; 2835 2825 2836 2826 first->des0 = cpu_to_le32(des); 2837 2827 ··· 2864 2856 2865 2857 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; 2866 2858 2859 + /* Only the last descriptor gets to point to the skb. */ 2860 + tx_q->tx_skbuff[tx_q->cur_tx] = skb; 2861 + 2862 + /* We've used all descriptors we need for this skb, however, 2863 + * advance cur_tx so that it references a fresh descriptor. 2864 + * ndo_start_xmit will fill this descriptor the next time it's 2865 + * called and stmmac_tx_clean may clean up to this descriptor. 2866 + */ 2867 2867 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); 2868 2868 2869 2869 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { ··· 2963 2947 int i, csum_insertion = 0, is_jumbo = 0; 2964 2948 u32 queue = skb_get_queue_mapping(skb); 2965 2949 int nfrags = skb_shinfo(skb)->nr_frags; 2966 - unsigned int entry, first_entry; 2950 + int entry; 2951 + unsigned int first_entry; 2967 2952 struct dma_desc *desc, *first; 2968 2953 struct stmmac_tx_queue *tx_q; 2969 2954 unsigned int enh_desc; ··· 3004 2987 desc = tx_q->dma_tx + entry; 3005 2988 3006 2989 first = desc; 3007 - 3008 - tx_q->tx_skbuff[first_entry] = skb; 3009 2990 3010 2991 enh_desc = priv->plat->enh_desc; 3011 2992 /* To program the descriptors according to the size of the frame */ ··· 3052 3037 skb->len); 3053 3038 } 3054 3039 3055 - entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 3040 + /* Only the last descriptor gets to point to the skb. */ 3041 + tx_q->tx_skbuff[entry] = skb; 3056 3042 3043 + /* We've used all descriptors we need for this skb, however, 3044 + * advance cur_tx so that it references a fresh descriptor. 3045 + * ndo_start_xmit will fill this descriptor the next time it's 3046 + * called and stmmac_tx_clean may clean up to this descriptor. 3047 + */ 3048 + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 3057 3049 tx_q->cur_tx = entry; 3058 3050 3059 3051 if (netif_msg_pktdata(priv)) {
+2 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
··· 59 59 /* Enable Snapshot for Messages Relevant to Master */ 60 60 #define PTP_TCR_TSMSTRENA BIT(15) 61 61 /* Select PTP packets for Taking Snapshots */ 62 - #define PTP_TCR_SNAPTYPSEL_1 GENMASK(17, 16) 62 + #define PTP_TCR_SNAPTYPSEL_1 BIT(16) 63 + #define PTP_GMAC4_TCR_SNAPTYPSEL_1 GENMASK(17, 16) 63 64 /* Enable MAC address for PTP Frame Filtering */ 64 65 #define PTP_TCR_TSENMACADDR BIT(18) 65 66
+1 -1
drivers/net/ethernet/ti/cpsw-common.c
··· 90 90 if (of_device_is_compatible(dev->of_node, "ti,dm816-emac")) 91 91 return cpsw_am33xx_cm_get_macid(dev, 0x30, slave, mac_addr); 92 92 93 - if (of_machine_is_compatible("ti,am4372")) 93 + if (of_machine_is_compatible("ti,am43")) 94 94 return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr); 95 95 96 96 if (of_machine_is_compatible("ti,dra7"))
+2 -2
drivers/net/geneve.c
··· 1007 1007 1008 1008 dev->netdev_ops = &geneve_netdev_ops; 1009 1009 dev->ethtool_ops = &geneve_ethtool_ops; 1010 - dev->destructor = free_netdev; 1010 + dev->needs_free_netdev = true; 1011 1011 1012 1012 SET_NETDEV_DEVTYPE(dev, &geneve_type); 1013 1013 ··· 1133 1133 1134 1134 /* make enough headroom for basic scenario */ 1135 1135 encap_len = GENEVE_BASE_HLEN + ETH_HLEN; 1136 - if (ip_tunnel_info_af(info) == AF_INET) { 1136 + if (!metadata && ip_tunnel_info_af(info) == AF_INET) { 1137 1137 encap_len += sizeof(struct iphdr); 1138 1138 dev->max_mtu -= sizeof(struct iphdr); 1139 1139 } else {
+1 -1
drivers/net/gtp.c
··· 611 611 static void gtp_link_setup(struct net_device *dev) 612 612 { 613 613 dev->netdev_ops = &gtp_netdev_ops; 614 - dev->destructor = free_netdev; 614 + dev->needs_free_netdev = true; 615 615 616 616 dev->hard_header_len = 0; 617 617 dev->addr_len = 0;
+1 -1
drivers/net/hamradio/6pack.c
··· 311 311 { 312 312 /* Finish setting up the DEVICE info. */ 313 313 dev->netdev_ops = &sp_netdev_ops; 314 - dev->destructor = free_netdev; 314 + dev->needs_free_netdev = true; 315 315 dev->mtu = SIXP_MTU; 316 316 dev->hard_header_len = AX25_MAX_HEADER_LEN; 317 317 dev->header_ops = &ax25_header_ops;
+1 -1
drivers/net/hamradio/bpqether.c
··· 476 476 static void bpq_setup(struct net_device *dev) 477 477 { 478 478 dev->netdev_ops = &bpq_netdev_ops; 479 - dev->destructor = free_netdev; 479 + dev->needs_free_netdev = true; 480 480 481 481 memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); 482 482 memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
+2
drivers/net/hamradio/hdlcdrv.c
··· 576 576 case HDLCDRVCTL_CALIBRATE: 577 577 if(!capable(CAP_SYS_RAWIO)) 578 578 return -EPERM; 579 + if (s->par.bitrate <= 0) 580 + return -EINVAL; 579 581 if (bi.data.calibrate > INT_MAX / s->par.bitrate) 580 582 return -EINVAL; 581 583 s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
+3 -2
drivers/net/hyperv/hyperv_net.h
··· 171 171 spinlock_t request_lock; 172 172 struct list_head req_list; 173 173 174 + struct work_struct mcast_work; 175 + 174 176 u8 hw_mac_adr[ETH_ALEN]; 175 177 u8 rss_key[NETVSC_HASH_KEYLEN]; 176 178 u16 ind_table[ITAB_NUM]; ··· 203 201 int rndis_filter_close(struct netvsc_device *nvdev); 204 202 int rndis_filter_device_add(struct hv_device *dev, 205 203 struct netvsc_device_info *info); 204 + void rndis_filter_update(struct netvsc_device *nvdev); 206 205 void rndis_filter_device_remove(struct hv_device *dev, 207 206 struct netvsc_device *nvdev); 208 207 int rndis_filter_set_rss_param(struct rndis_device *rdev, ··· 214 211 struct vmbus_channel *channel, 215 212 void *data, u32 buflen); 216 213 217 - int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter); 218 214 int rndis_filter_set_device_mac(struct net_device *ndev, char *mac); 219 215 220 216 void netvsc_switch_datapath(struct net_device *nv_dev, bool vf); ··· 698 696 /* list protection */ 699 697 spinlock_t lock; 700 698 701 - struct work_struct work; 702 699 u32 msg_enable; /* debug level */ 703 700 704 701 u32 tx_checksum_mask;
+20 -38
drivers/net/hyperv/netvsc_drv.c
··· 56 56 module_param(debug, int, S_IRUGO); 57 57 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 58 58 59 - static void do_set_multicast(struct work_struct *w) 60 - { 61 - struct net_device_context *ndevctx = 62 - container_of(w, struct net_device_context, work); 63 - struct hv_device *device_obj = ndevctx->device_ctx; 64 - struct net_device *ndev = hv_get_drvdata(device_obj); 65 - struct netvsc_device *nvdev = rcu_dereference(ndevctx->nvdev); 66 - struct rndis_device *rdev; 67 - 68 - if (!nvdev) 69 - return; 70 - 71 - rdev = nvdev->extension; 72 - if (rdev == NULL) 73 - return; 74 - 75 - if (ndev->flags & IFF_PROMISC) 76 - rndis_filter_set_packet_filter(rdev, 77 - NDIS_PACKET_TYPE_PROMISCUOUS); 78 - else 79 - rndis_filter_set_packet_filter(rdev, 80 - NDIS_PACKET_TYPE_BROADCAST | 81 - NDIS_PACKET_TYPE_ALL_MULTICAST | 82 - NDIS_PACKET_TYPE_DIRECTED); 83 - } 84 - 85 59 static void netvsc_set_multicast_list(struct net_device *net) 86 60 { 87 61 struct net_device_context *net_device_ctx = netdev_priv(net); 62 + struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 88 63 89 - schedule_work(&net_device_ctx->work); 64 + rndis_filter_update(nvdev); 90 65 } 91 66 92 67 static int netvsc_open(struct net_device *net) ··· 98 123 99 124 netif_tx_disable(net); 100 125 101 - /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */ 102 - cancel_work_sync(&net_device_ctx->work); 103 126 ret = rndis_filter_close(nvdev); 104 127 if (ret != 0) { 105 128 netdev_err(net, "unable to close device (ret %d).\n", ret); ··· 776 803 channels->rx_count || channels->tx_count || channels->other_count) 777 804 return -EINVAL; 778 805 779 - if (count > net->num_tx_queues || count > net->num_rx_queues) 806 + if (count > net->num_tx_queues || count > VRSS_CHANNEL_MAX) 780 807 return -EINVAL; 781 808 782 809 if (!nvdev || nvdev->destroy) ··· 1001 1028 static int netvsc_get_sset_count(struct net_device *dev, int string_set) 1002 1029 { 1003 1030 struct net_device_context *ndc = netdev_priv(dev); 1004 - struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev); 1031 + struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); 1005 1032 1006 1033 if (!nvdev) 1007 1034 return -ENODEV; ··· 1131 1158 } 1132 1159 1133 1160 #ifdef CONFIG_NET_POLL_CONTROLLER 1134 - static void netvsc_poll_controller(struct net_device *net) 1161 + static void netvsc_poll_controller(struct net_device *dev) 1135 1162 { 1136 - /* As netvsc_start_xmit() works synchronous we don't have to 1137 - * trigger anything here. 1138 - */ 1163 + struct net_device_context *ndc = netdev_priv(dev); 1164 + struct netvsc_device *ndev; 1165 + int i; 1166 + 1167 + rcu_read_lock(); 1168 + ndev = rcu_dereference(ndc->nvdev); 1169 + if (ndev) { 1170 + for (i = 0; i < ndev->num_chn; i++) { 1171 + struct netvsc_channel *nvchan = &ndev->chan_table[i]; 1172 + 1173 + napi_schedule(&nvchan->napi); 1174 + } 1175 + } 1176 + rcu_read_unlock(); 1139 1177 } 1140 1178 #endif 1141 1179 ··· 1203 1219 rndis_dev = ndev->extension; 1204 1220 if (indir) { 1205 1221 for (i = 0; i < ITAB_NUM; i++) 1206 - if (indir[i] >= dev->num_rx_queues) 1222 + if (indir[i] >= VRSS_CHANNEL_MAX) 1207 1223 return -EINVAL; 1208 1224 1209 1225 for (i = 0; i < ITAB_NUM; i++) ··· 1536 1552 hv_set_drvdata(dev, net); 1537 1553 1538 1554 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); 1539 - INIT_WORK(&net_device_ctx->work, do_set_multicast); 1540 1555 1541 1556 spin_lock_init(&net_device_ctx->lock); 1542 1557 INIT_LIST_HEAD(&net_device_ctx->reconfig_events); ··· 1605 1622 netif_device_detach(net); 1606 1623 1607 1624 cancel_delayed_work_sync(&ndev_ctx->dwork); 1608 - cancel_work_sync(&ndev_ctx->work); 1609 1625 1610 1626 /* 1611 1627 * Call to the vsc driver to let it know that the device is being
+29 -1
drivers/net/hyperv/rndis_filter.c
··· 31 31 32 32 #include "hyperv_net.h" 33 33 34 + static void rndis_set_multicast(struct work_struct *w); 34 35 35 36 #define RNDIS_EXT_LEN PAGE_SIZE 36 37 struct rndis_request { ··· 77 76 spin_lock_init(&device->request_lock); 78 77 79 78 INIT_LIST_HEAD(&device->req_list); 79 + INIT_WORK(&device->mcast_work, rndis_set_multicast); 80 80 81 81 device->state = RNDIS_DEV_UNINITIALIZED; 82 82 ··· 817 815 return ret; 818 816 } 819 817 820 - int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) 818 + static int rndis_filter_set_packet_filter(struct rndis_device *dev, 819 + u32 new_filter) 821 820 { 822 821 struct rndis_request *request; 823 822 struct rndis_set_request *set; ··· 847 844 put_rndis_request(dev, request); 848 845 849 846 return ret; 847 + } 848 + 849 + static void rndis_set_multicast(struct work_struct *w) 850 + { 851 + struct rndis_device *rdev 852 + = container_of(w, struct rndis_device, mcast_work); 853 + 854 + if (rdev->ndev->flags & IFF_PROMISC) 855 + rndis_filter_set_packet_filter(rdev, 856 + NDIS_PACKET_TYPE_PROMISCUOUS); 857 + else 858 + rndis_filter_set_packet_filter(rdev, 859 + NDIS_PACKET_TYPE_BROADCAST | 860 + NDIS_PACKET_TYPE_ALL_MULTICAST | 861 + NDIS_PACKET_TYPE_DIRECTED); 862 + } 863 + 864 + void rndis_filter_update(struct netvsc_device *nvdev) 865 + { 866 + struct rndis_device *rdev = nvdev->extension; 867 + 868 + schedule_work(&rdev->mcast_work); 850 869 } 851 870 852 871 static int rndis_filter_init_device(struct rndis_device *dev) ··· 997 972 998 973 if (dev->state != RNDIS_DEV_DATAINITIALIZED) 999 974 return 0; 975 + 976 + /* Make sure rndis_set_multicast doesn't re-enable filter! */ 977 + cancel_work_sync(&dev->mcast_work); 1000 978 1001 979 ret = rndis_filter_set_packet_filter(dev, 0); 1002 980 if (ret == -ENODEV)
+2 -2
drivers/net/ifb.c
··· 207 207 __skb_queue_purge(&txp->tq); 208 208 } 209 209 kfree(dp->tx_private); 210 - free_netdev(dev); 211 210 } 212 211 213 212 static void ifb_setup(struct net_device *dev) ··· 229 230 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 230 231 netif_keep_dst(dev); 231 232 eth_hw_addr_random(dev); 232 - dev->destructor = ifb_dev_free; 233 + dev->needs_free_netdev = true; 234 + dev->priv_destructor = ifb_dev_free; 233 235 } 234 236 235 237 static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
+1 -1
drivers/net/ipvlan/ipvlan_main.c
··· 632 632 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); 633 633 dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE; 634 634 dev->netdev_ops = &ipvlan_netdev_ops; 635 - dev->destructor = free_netdev; 635 + dev->needs_free_netdev = true; 636 636 dev->header_ops = &ipvlan_header_ops; 637 637 dev->ethtool_ops = &ipvlan_ethtool_ops; 638 638 }
+2 -2
drivers/net/loopback.c
··· 159 159 { 160 160 dev_net(dev)->loopback_dev = NULL; 161 161 free_percpu(dev->lstats); 162 - free_netdev(dev); 163 162 } 164 163 165 164 static const struct net_device_ops loopback_ops = { ··· 195 196 dev->ethtool_ops = &loopback_ethtool_ops; 196 197 dev->header_ops = &eth_header_ops; 197 198 dev->netdev_ops = &loopback_ops; 198 - dev->destructor = loopback_dev_free; 199 + dev->needs_free_netdev = true; 200 + dev->priv_destructor = loopback_dev_free; 199 201 } 200 202 201 203 /* Setup and register the loopback device. */
+2 -2
drivers/net/macsec.c
··· 2996 2996 free_percpu(macsec->secy.tx_sc.stats); 2997 2997 2998 2998 dev_put(real_dev); 2999 - free_netdev(dev); 3000 2999 } 3001 3000 3002 3001 static void macsec_setup(struct net_device *dev) ··· 3005 3006 dev->max_mtu = ETH_MAX_MTU; 3006 3007 dev->priv_flags |= IFF_NO_QUEUE; 3007 3008 dev->netdev_ops = &macsec_netdev_ops; 3008 - dev->destructor = macsec_free_netdev; 3009 + dev->needs_free_netdev = true; 3010 + dev->priv_destructor = macsec_free_netdev; 3009 3011 SET_NETDEV_DEVTYPE(dev, &macsec_type); 3010 3012 3011 3013 eth_zero_addr(dev->broadcast);
+72 -15
drivers/net/macvlan.c
··· 39 39 #define MACVLAN_HASH_SIZE (1<<MACVLAN_HASH_BITS) 40 40 #define MACVLAN_BC_QUEUE_LEN 1000 41 41 42 + #define MACVLAN_F_PASSTHRU 1 43 + #define MACVLAN_F_ADDRCHANGE 2 44 + 42 45 struct macvlan_port { 43 46 struct net_device *dev; 44 47 struct hlist_head vlan_hash[MACVLAN_HASH_SIZE]; 45 48 struct list_head vlans; 46 49 struct sk_buff_head bc_queue; 47 50 struct work_struct bc_work; 48 - bool passthru; 51 + u32 flags; 49 52 int count; 50 53 struct hlist_head vlan_source_hash[MACVLAN_HASH_SIZE]; 51 54 DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ); 55 + unsigned char perm_addr[ETH_ALEN]; 52 56 }; 53 57 54 58 struct macvlan_source_entry { ··· 69 65 #define MACVLAN_SKB_CB(__skb) ((struct macvlan_skb_cb *)&((__skb)->cb[0])) 70 66 71 67 static void macvlan_port_destroy(struct net_device *dev); 68 + 69 + static inline bool macvlan_passthru(const struct macvlan_port *port) 70 + { 71 + return port->flags & MACVLAN_F_PASSTHRU; 72 + } 73 + 74 + static inline void macvlan_set_passthru(struct macvlan_port *port) 75 + { 76 + port->flags |= MACVLAN_F_PASSTHRU; 77 + } 78 + 79 + static inline bool macvlan_addr_change(const struct macvlan_port *port) 80 + { 81 + return port->flags & MACVLAN_F_ADDRCHANGE; 82 + } 83 + 84 + static inline void macvlan_set_addr_change(struct macvlan_port *port) 85 + { 86 + port->flags |= MACVLAN_F_ADDRCHANGE; 87 + } 88 + 89 + static inline void macvlan_clear_addr_change(struct macvlan_port *port) 90 + { 91 + port->flags &= ~MACVLAN_F_ADDRCHANGE; 92 + } 72 93 73 94 /* Hash Ethernet address */ 74 95 static u32 macvlan_eth_hash(const unsigned char *addr) ··· 210 181 static bool macvlan_addr_busy(const struct macvlan_port *port, 211 182 const unsigned char *addr) 212 183 { 213 - /* Test to see if the specified multicast address is 184 + /* Test to see if the specified address is 214 185 * currently in use by the underlying device or 215 186 * another macvlan. 216 187 */ 217 - if (ether_addr_equal_64bits(port->dev->dev_addr, addr)) 188 + if (!macvlan_passthru(port) && !macvlan_addr_change(port) && 189 + ether_addr_equal_64bits(port->dev->dev_addr, addr)) 218 190 return true; 219 191 220 192 if (macvlan_hash_lookup(port, addr)) ··· 475 445 } 476 446 477 447 macvlan_forward_source(skb, port, eth->h_source); 478 - if (port->passthru) 448 + if (macvlan_passthru(port)) 479 449 vlan = list_first_or_null_rcu(&port->vlans, 480 450 struct macvlan_dev, list); 481 451 else ··· 604 574 struct net_device *lowerdev = vlan->lowerdev; 605 575 int err; 606 576 607 - if (vlan->port->passthru) { 577 + if (macvlan_passthru(vlan->port)) { 608 578 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) { 609 579 err = dev_set_promiscuity(lowerdev, 1); 610 580 if (err < 0) ··· 679 649 dev_uc_unsync(lowerdev, dev); 680 650 dev_mc_unsync(lowerdev, dev); 681 651 682 - if (vlan->port->passthru) { 652 + if (macvlan_passthru(vlan->port)) { 683 653 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) 684 654 dev_set_promiscuity(lowerdev, -1); 685 655 goto hash_del; ··· 702 672 { 703 673 struct macvlan_dev *vlan = netdev_priv(dev); 704 674 struct net_device *lowerdev = vlan->lowerdev; 675 + struct macvlan_port *port = vlan->port; 705 676 int err; 706 677 707 678 if (!(dev->flags & IFF_UP)) { ··· 713 682 if (macvlan_addr_busy(vlan->port, addr)) 714 683 return -EBUSY; 715 684 716 - if (!vlan->port->passthru) { 685 + if (!macvlan_passthru(port)) { 717 686 err = dev_uc_add(lowerdev, addr); 718 687 if (err) 719 688 return err; ··· 723 692 724 693 macvlan_hash_change_addr(vlan, addr); 725 694 } 695 + if (macvlan_passthru(port) && !macvlan_addr_change(port)) { 696 + /* Since addr_change isn't set, we are here due to lower 697 + * device change. Save the lower-dev address so we can 698 + * restore it later. 699 + */ 700 + ether_addr_copy(vlan->port->perm_addr, 701 + lowerdev->dev_addr); 702 + } 703 + macvlan_clear_addr_change(port); 726 704 return 0; 727 705 } 728 706 ··· 743 703 if (!is_valid_ether_addr(addr->sa_data)) 744 704 return -EADDRNOTAVAIL; 745 705 706 + /* If the addresses are the same, this is a no-op */ 707 + if (ether_addr_equal(dev->dev_addr, addr->sa_data)) 708 + return 0; 709 + 746 710 if (vlan->mode == MACVLAN_MODE_PASSTHRU) { 711 + macvlan_set_addr_change(vlan->port); 747 712 dev_set_mac_address(vlan->lowerdev, addr); 748 713 return 0; 749 714 } ··· 973 928 /* Support unicast filter only on passthru devices. 974 929 * Multicast filter should be allowed on all devices. 975 930 */ 976 - if (!vlan->port->passthru && is_unicast_ether_addr(addr)) 931 + if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr)) 977 932 return -EOPNOTSUPP; 978 933 979 934 if (flags & NLM_F_REPLACE) ··· 997 952 /* Support unicast filter only on passthru devices. 998 953 * Multicast filter should be allowed on all devices. 999 954 */ 1000 - if (!vlan->port->passthru && is_unicast_ether_addr(addr)) 955 + if (!macvlan_passthru(vlan->port) && is_unicast_ether_addr(addr)) 1001 956 return -EOPNOTSUPP; 1002 957 1003 958 if (is_unicast_ether_addr(addr)) ··· 1137 1092 netif_keep_dst(dev); 1138 1093 dev->priv_flags |= IFF_UNICAST_FLT; 1139 1094 dev->netdev_ops = &macvlan_netdev_ops; 1140 - dev->destructor = free_netdev; 1095 + dev->needs_free_netdev = true; 1141 1096 dev->header_ops = &macvlan_hard_header_ops; 1142 1097 dev->ethtool_ops = &macvlan_ethtool_ops; 1143 1098 } ··· 1165 1120 if (port == NULL) 1166 1121 return -ENOMEM; 1167 1122 1168 - port->passthru = false; 1169 1123 port->dev = dev; 1124 + ether_addr_copy(port->perm_addr, dev->dev_addr); 1170 1125 INIT_LIST_HEAD(&port->vlans); 1171 1126 for (i = 0; i < MACVLAN_HASH_SIZE; i++) 1172 1127 INIT_HLIST_HEAD(&port->vlan_hash[i]); ··· 1204 1159 dev_put(src->dev); 1205 1160 1206 1161 kfree_skb(skb); 1162 + } 1163 + 1164 + /* If the lower device address has been changed by passthru 1165 + * macvlan, put it back. 1166 + */ 1167 + if (macvlan_passthru(port) && 1168 + !ether_addr_equal(port->dev->dev_addr, port->perm_addr)) { 1169 + struct sockaddr sa; 1170 + 1171 + sa.sa_family = port->dev->type; 1172 + memcpy(&sa.sa_data, port->perm_addr, port->dev->addr_len); 1173 + dev_set_mac_address(port->dev, &sa); 1207 1174 } 1208 1175 1209 1176 kfree(port); ··· 1383 1326 port = macvlan_port_get_rtnl(lowerdev); 1384 1327 1385 1328 /* Only 1 macvlan device can be created in passthru mode */ 1386 - if (port->passthru) { 1329 + if (macvlan_passthru(port)) { 1387 1330 /* The macvlan port must be not created this time, 1388 1331 * still goto destroy_macvlan_port for readability. 1389 1332 */ ··· 1409 1352 err = -EINVAL; 1410 1353 goto destroy_macvlan_port; 1411 1354 } 1412 - port->passthru = true; 1355 + macvlan_set_passthru(port); 1413 1356 eth_hw_addr_inherit(dev, lowerdev); 1414 1357 } 1415 1358 ··· 1491 1434 if (data && data[IFLA_MACVLAN_FLAGS]) { 1492 1435 __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); 1493 1436 bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC; 1494 - if (vlan->port->passthru && promisc) { 1437 + if (macvlan_passthru(vlan->port) && promisc) { 1495 1438 int err; 1496 1439 1497 1440 if (flags & MACVLAN_FLAG_NOPROMISC) ··· 1654 1597 } 1655 1598 break; 1656 1599 case NETDEV_CHANGEADDR: 1657 - if (!port->passthru) 1600 + if (!macvlan_passthru(port)) 1658 1601 return NOTIFY_DONE; 1659 1602 1660 1603 vlan = list_first_entry_or_null(&port->vlans,
+1 -1
drivers/net/netconsole.c
··· 358 358 if (err) 359 359 goto out_unlock; 360 360 361 - pr_info("netconsole: network logging started\n"); 361 + pr_info("network logging started\n"); 362 362 } else { /* false */ 363 363 /* We need to disable the netconsole before cleaning it up 364 364 * otherwise we might end up in write_msg() with
+1 -1
drivers/net/nlmon.c
··· 113 113 114 114 dev->netdev_ops = &nlmon_ops; 115 115 dev->ethtool_ops = &nlmon_ethtool_ops; 116 - dev->destructor = free_netdev; 116 + dev->needs_free_netdev = true; 117 117 118 118 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | 119 119 NETIF_F_HIGHDMA | NETIF_F_LLTX;
+1
drivers/net/phy/Kconfig
··· 127 127 tristate "ThunderX SOCs MDIO buses" 128 128 depends on 64BIT 129 129 depends on PCI 130 + depends on !(MDIO_DEVICE=y && PHYLIB=m) 130 131 select MDIO_CAVIUM 131 132 help 132 133 This driver supports the MDIO interfaces found on Cavium
+1 -1
drivers/net/phy/dp83640.c
··· 908 908 if (overflow) { 909 909 pr_debug("tx timestamp queue overflow, count %d\n", overflow); 910 910 while (skb) { 911 - skb_complete_tx_timestamp(skb, NULL); 911 + kfree_skb(skb); 912 912 skb = skb_dequeue(&dp83640->tx_queue); 913 913 } 914 914 return;
-2
drivers/net/phy/marvell.c
··· 1127 1127 if (adv < 0) 1128 1128 return adv; 1129 1129 1130 - lpa &= adv; 1131 - 1132 1130 if (status & MII_M1011_PHY_STATUS_FULLDUPLEX) 1133 1131 phydev->duplex = DUPLEX_FULL; 1134 1132 else
+13
drivers/net/phy/mdio_bus.c
··· 658 658 return 0; 659 659 } 660 660 661 + static int mdio_uevent(struct device *dev, struct kobj_uevent_env *env) 662 + { 663 + int rc; 664 + 665 + /* Some devices have extra OF data and an OF-style MODALIAS */ 666 + rc = of_device_uevent_modalias(dev, env); 667 + if (rc != -ENODEV) 668 + return rc; 669 + 670 + return 0; 671 + } 672 + 661 673 #ifdef CONFIG_PM 662 674 static int mdio_bus_suspend(struct device *dev) 663 675 { ··· 720 708 struct bus_type mdio_bus_type = { 721 709 .name = "mdio_bus", 722 710 .match = mdio_bus_match, 711 + .uevent = mdio_uevent, 723 712 .pm = MDIO_BUS_PM_OPS, 724 713 }; 725 714 EXPORT_SYMBOL(mdio_bus_type);
+30 -14
drivers/net/phy/micrel.c
··· 268 268 return ret; 269 269 } 270 270 271 + /* Some config bits need to be set again on resume, handle them here. */ 272 + static int kszphy_config_reset(struct phy_device *phydev) 273 + { 274 + struct kszphy_priv *priv = phydev->priv; 275 + int ret; 276 + 277 + if (priv->rmii_ref_clk_sel) { 278 + ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val); 279 + if (ret) { 280 + phydev_err(phydev, 281 + "failed to set rmii reference clock\n"); 282 + return ret; 283 + } 284 + } 285 + 286 + if (priv->led_mode >= 0) 287 + kszphy_setup_led(phydev, priv->type->led_mode_reg, priv->led_mode); 288 + 289 + return 0; 290 + } 291 + 271 292 static int kszphy_config_init(struct phy_device *phydev) 272 293 { 273 294 struct kszphy_priv *priv = phydev->priv; 274 295 const struct kszphy_type *type; 275 - int ret; 276 296 277 297 if (!priv) 278 298 return 0; ··· 305 285 if (type->has_nand_tree_disable) 306 286 kszphy_nand_tree_disable(phydev); 307 287 308 - if (priv->rmii_ref_clk_sel) { 309 - ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val); 310 - if (ret) { 311 - phydev_err(phydev, 312 - "failed to set rmii reference clock\n"); 313 - return ret; 314 - } 315 - } 316 - 317 - if (priv->led_mode >= 0) 318 - kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode); 319 - 320 - return 0; 288 + return kszphy_config_reset(phydev); 321 289 } 322 290 323 291 static int ksz8041_config_init(struct phy_device *phydev) ··· 619 611 if ((regval & 0xFF) == 0xFF) { 620 612 phy_init_hw(phydev); 621 613 phydev->link = 0; 614 + if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev)) 615 + phydev->drv->config_intr(phydev); 622 616 } 623 617 624 618 return 0; ··· 710 700 711 701 static int kszphy_resume(struct phy_device *phydev) 712 702 { 703 + int ret; 704 + 713 705 genphy_resume(phydev); 706 + 707 + ret = kszphy_config_reset(phydev); 708 + if (ret) 709 + return ret; 714 710 715 711 /* Enable PHY Interrupts */ 716 712 if (phy_interrupt_is_valid(phydev)) {
+3 -1
drivers/net/phy/phy.c
··· 54 54 return "5Gbps"; 55 55 case SPEED_10000: 56 56 return "10Gbps"; 57 + case SPEED_14000: 58 + return "14Gbps"; 57 59 case SPEED_20000: 58 60 return "20Gbps"; 59 61 case SPEED_25000: ··· 243 241 * phy_lookup_setting - lookup a PHY setting 244 242 * @speed: speed to match 245 243 * @duplex: duplex to match 246 - * @feature: allowed link modes 244 + * @features: allowed link modes 247 245 * @exact: an exact match is required 248 246 * 249 247 * Search the settings array for a setting that matches the speed and
+3 -4
drivers/net/slip/slip.c
··· 629 629 static void sl_free_netdev(struct net_device *dev) 630 630 { 631 631 int i = dev->base_addr; 632 - free_netdev(dev); 632 + 633 633 slip_devs[i] = NULL; 634 634 } 635 635 ··· 651 651 static void sl_setup(struct net_device *dev) 652 652 { 653 653 dev->netdev_ops = &sl_netdev_ops; 654 - dev->destructor = sl_free_netdev; 654 + dev->needs_free_netdev = true; 655 + dev->priv_destructor = sl_free_netdev; 655 656 656 657 dev->hard_header_len = 0; 657 658 dev->addr_len = 0; ··· 1370 1369 if (sl->tty) { 1371 1370 printk(KERN_ERR "%s: tty discipline still running\n", 1372 1371 dev->name); 1373 - /* Intentionally leak the control block. */ 1374 - dev->destructor = NULL; 1375 1372 } 1376 1373 1377 1374 unregister_netdev(dev);
+2 -2
drivers/net/team/team.c
··· 1643 1643 struct team *team = netdev_priv(dev); 1644 1644 1645 1645 free_percpu(team->pcpu_stats); 1646 - free_netdev(dev); 1647 1646 } 1648 1647 1649 1648 static int team_open(struct net_device *dev) ··· 2078 2079 2079 2080 dev->netdev_ops = &team_netdev_ops; 2080 2081 dev->ethtool_ops = &team_ethtool_ops; 2081 - dev->destructor = team_destructor; 2082 + dev->needs_free_netdev = true; 2083 + dev->priv_destructor = team_destructor; 2082 2084 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); 2083 2085 dev->priv_flags |= IFF_NO_QUEUE; 2084 2086 dev->priv_flags |= IFF_TEAM;
+2 -2
drivers/net/tun.c
··· 1560 1560 free_percpu(tun->pcpu_stats); 1561 1561 tun_flow_uninit(tun); 1562 1562 security_tun_dev_free_security(tun->security); 1563 - free_netdev(dev); 1564 1563 } 1565 1564 1566 1565 static void tun_setup(struct net_device *dev) ··· 1570 1571 tun->group = INVALID_GID; 1571 1572 1572 1573 dev->ethtool_ops = &tun_ethtool_ops; 1573 - dev->destructor = tun_free_netdev; 1574 + dev->needs_free_netdev = true; 1575 + dev->priv_destructor = tun_free_netdev; 1574 1576 /* We prefer our own queue length */ 1575 1577 dev->tx_queue_len = TUN_READQ_SIZE; 1576 1578 }
+16
drivers/net/usb/ax88179_178a.c
··· 1722 1722 .tx_fixup = ax88179_tx_fixup, 1723 1723 }; 1724 1724 1725 + static const struct driver_info belkin_info = { 1726 + .description = "Belkin USB Ethernet Adapter", 1727 + .bind = ax88179_bind, 1728 + .unbind = ax88179_unbind, 1729 + .status = ax88179_status, 1730 + .link_reset = ax88179_link_reset, 1731 + .reset = ax88179_reset, 1732 + .flags = FLAG_ETHER | FLAG_FRAMING_AX, 1733 + .rx_fixup = ax88179_rx_fixup, 1734 + .tx_fixup = ax88179_tx_fixup, 1735 + }; 1736 + 1725 1737 static const struct usb_device_id products[] = { 1726 1738 { 1727 1739 /* ASIX AX88179 10/100/1000 */ ··· 1763 1751 /* Lenovo OneLinkDock Gigabit LAN */ 1764 1752 USB_DEVICE(0x17ef, 0x304b), 1765 1753 .driver_info = (unsigned long)&lenovo_info, 1754 + }, { 1755 + /* Belkin B2B128 USB 3.0 Hub + Gigabit Ethernet Adapter */ 1756 + USB_DEVICE(0x050d, 0x0128), 1757 + .driver_info = (unsigned long)&belkin_info, 1766 1758 }, 1767 1759 { }, 1768 1760 };
+1 -1
drivers/net/usb/cdc-phonet.c
··· 298 298 dev->addr_len = 1; 299 299 dev->tx_queue_len = 3; 300 300 301 - dev->destructor = free_netdev; 301 + dev->needs_free_netdev = true; 302 302 } 303 303 304 304 /*
+5 -1
drivers/net/usb/qmi_wwan.c
··· 123 123 dev->addr_len = 0; 124 124 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 125 125 dev->netdev_ops = &qmimux_netdev_ops; 126 - dev->destructor = free_netdev; 126 + dev->needs_free_netdev = true; 127 127 } 128 128 129 129 static struct net_device *qmimux_find_dev(struct usbnet *dev, u8 mux_id) ··· 1192 1192 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */ 1193 1193 {QMI_FIXED_INTF(0x1199, 0x9057, 8)}, 1194 1194 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ 1195 + {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */ 1196 + {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */ 1195 1197 {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */ 1196 1198 {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ 1197 1199 {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ ··· 1208 1206 {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ 1209 1207 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 1210 1208 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ 1209 + {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */ 1210 + {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */ 1211 1211 {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ 1212 1212 {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ 1213 1213 {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
+2
drivers/net/usb/r8152.c
··· 4368 4368 break; 4369 4369 } 4370 4370 4371 + dev_dbg(&intf->dev, "Detected version 0x%04x\n", version); 4372 + 4371 4373 return version; 4372 4374 } 4373 4375
+4 -4
drivers/net/veth.c
··· 222 222 static void veth_dev_free(struct net_device *dev) 223 223 { 224 224 free_percpu(dev->vstats); 225 - free_netdev(dev); 226 225 } 227 226 228 227 #ifdef CONFIG_NET_POLL_CONTROLLER ··· 316 317 NETIF_F_HW_VLAN_STAG_TX | 317 318 NETIF_F_HW_VLAN_CTAG_RX | 318 319 NETIF_F_HW_VLAN_STAG_RX); 319 - dev->destructor = veth_dev_free; 320 + dev->needs_free_netdev = true; 321 + dev->priv_destructor = veth_dev_free; 320 322 dev->max_mtu = ETH_MAX_MTU; 321 323 322 324 dev->hw_features = VETH_FEATURES; ··· 383 383 tbp = tb; 384 384 } 385 385 386 - if (tbp[IFLA_IFNAME]) { 386 + if (ifmp && tbp[IFLA_IFNAME]) { 387 387 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); 388 388 name_assign_type = NET_NAME_USER; 389 389 } else { ··· 402 402 return PTR_ERR(peer); 403 403 } 404 404 405 - if (tbp[IFLA_ADDRESS] == NULL) 405 + if (!ifmp || !tbp[IFLA_ADDRESS]) 406 406 eth_hw_addr_random(peer); 407 407 408 408 if (ifmp && (dev->ifindex != 0))
+4 -2
drivers/net/virtio_net.c
··· 869 869 unsigned int len; 870 870 871 871 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), 872 - rq->min_buf_len - hdr_len, PAGE_SIZE - hdr_len); 872 + rq->min_buf_len, PAGE_SIZE - hdr_len); 873 873 return ALIGN(len, L1_CACHE_BYTES); 874 874 } 875 875 ··· 1797 1797 flush_work(&vi->config_work); 1798 1798 1799 1799 netif_device_detach(vi->dev); 1800 + netif_tx_disable(vi->dev); 1800 1801 cancel_delayed_work_sync(&vi->refill); 1801 1802 1802 1803 if (netif_running(vi->dev)) { ··· 2145 2144 unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; 2146 2145 unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); 2147 2146 2148 - return max(min_buf_len, hdr_len); 2147 + return max(max(min_buf_len, hdr_len) - hdr_len, 2148 + (unsigned int)GOOD_PACKET_LEN); 2149 2149 } 2150 2150 2151 2151 static int virtnet_find_vqs(struct virtnet_info *vi)
+33 -5
drivers/net/vrf.c
··· 36 36 #include <net/addrconf.h> 37 37 #include <net/l3mdev.h> 38 38 #include <net/fib_rules.h> 39 + #include <net/netns/generic.h> 39 40 40 41 #define DRV_NAME "vrf" 41 42 #define DRV_VERSION "1.0" 42 43 43 44 #define FIB_RULE_PREF 1000 /* default preference for FIB rules */ 44 - static bool add_fib_rules = true; 45 + 46 + static unsigned int vrf_net_id; 45 47 46 48 struct net_vrf { 47 49 struct rtable __rcu *rth; ··· 1350 1348 dev->netdev_ops = &vrf_netdev_ops; 1351 1349 dev->l3mdev_ops = &vrf_l3mdev_ops; 1352 1350 dev->ethtool_ops = &vrf_ethtool_ops; 1353 - dev->destructor = free_netdev; 1351 + dev->needs_free_netdev = true; 1354 1352 1355 1353 /* Fill in device structure with ethernet-generic values. */ 1356 1354 eth_hw_addr_random(dev); ··· 1396 1394 struct nlattr *tb[], struct nlattr *data[]) 1397 1395 { 1398 1396 struct net_vrf *vrf = netdev_priv(dev); 1397 + bool *add_fib_rules; 1398 + struct net *net; 1399 1399 int err; 1400 1400 1401 1401 if (!data || !data[IFLA_VRF_TABLE]) ··· 1413 1409 if (err) 1414 1410 goto out; 1415 1411 1416 - if (add_fib_rules) { 1412 + net = dev_net(dev); 1413 + add_fib_rules = net_generic(net, vrf_net_id); 1414 + if (*add_fib_rules) { 1417 1415 err = vrf_add_fib_rules(dev); 1418 1416 if (err) { 1419 1417 unregister_netdevice(dev); 1420 1418 goto out; 1421 1419 } 1422 - add_fib_rules = false; 1420 + *add_fib_rules = false; 1423 1421 } 1424 1422 1425 1423 out: ··· 1504 1498 .notifier_call = vrf_device_event, 1505 1499 }; 1506 1500 1501 + /* Initialize per network namespace state */ 1502 + static int __net_init vrf_netns_init(struct net *net) 1503 + { 1504 + bool *add_fib_rules = net_generic(net, vrf_net_id); 1505 + 1506 + *add_fib_rules = true; 1507 + 1508 + return 0; 1509 + } 1510 + 1511 + static struct pernet_operations vrf_net_ops __net_initdata = { 1512 + .init = vrf_netns_init, 1513 + .id = &vrf_net_id, 1514 + .size = sizeof(bool), 1515 + }; 1516 + 1507 1517 static int __init vrf_init_module(void) 1508 1518 { 1509 1519 int rc; 1510 1520 1511 1521 register_netdevice_notifier(&vrf_notifier_block); 1512 1522 1513 - rc = rtnl_link_register(&vrf_link_ops); 1523 + rc = register_pernet_subsys(&vrf_net_ops); 1514 1524 if (rc < 0) 1515 1525 goto error; 1526 + 1527 + rc = rtnl_link_register(&vrf_link_ops); 1528 + if (rc < 0) { 1529 + unregister_pernet_subsys(&vrf_net_ops); 1530 + goto error; 1531 + } 1516 1532 1517 1533 return 0; 1518 1534
+1 -1
drivers/net/vsockmon.c
··· 135 135 136 136 dev->netdev_ops = &vsockmon_ops; 137 137 dev->ethtool_ops = &vsockmon_ethtool_ops; 138 - dev->destructor = free_netdev; 138 + dev->needs_free_netdev = true; 139 139 140 140 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | 141 141 NETIF_F_HIGHDMA | NETIF_F_LLTX;
+31 -10
drivers/net/vxlan.c
··· 59 59 60 60 static int vxlan_sock_add(struct vxlan_dev *vxlan); 61 61 62 + static void vxlan_vs_del_dev(struct vxlan_dev *vxlan); 63 + 62 64 /* per-network namespace private data for this module */ 63 65 struct vxlan_net { 64 66 struct list_head vxlan_list; ··· 742 740 call_rcu(&f->rcu, vxlan_fdb_free); 743 741 } 744 742 743 + static void vxlan_dst_free(struct rcu_head *head) 744 + { 745 + struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu); 746 + 747 + dst_cache_destroy(&rd->dst_cache); 748 + kfree(rd); 749 + } 750 + 751 + static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, 752 + struct vxlan_rdst *rd) 753 + { 754 + list_del_rcu(&rd->list); 755 + vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH); 756 + call_rcu(&rd->rcu, vxlan_dst_free); 757 + } 758 + 745 759 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, 746 760 union vxlan_addr *ip, __be16 *port, __be32 *src_vni, 747 761 __be32 *vni, u32 *ifindex) ··· 882 864 * otherwise destroy the fdb entry 883 865 */ 884 866 if (rd && !list_is_singular(&f->remotes)) { 885 - list_del_rcu(&rd->list); 886 - vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH); 887 - kfree_rcu(rd, rcu); 867 + vxlan_fdb_dst_destroy(vxlan, f, rd); 888 868 goto out; 889 869 } 890 870 ··· 1082 1066 1083 1067 rcu_assign_pointer(vxlan->vn4_sock, NULL); 1084 1068 synchronize_net(); 1069 + 1070 + vxlan_vs_del_dev(vxlan); 1085 1071 1086 1072 if (__vxlan_sock_release_prep(sock4)) { 1087 1073 udp_tunnel_sock_release(sock4->sock); ··· 2360 2342 mod_timer(&vxlan->age_timer, next_timer); 2361 2343 } 2362 2344 2345 + static void vxlan_vs_del_dev(struct vxlan_dev *vxlan) 2346 + { 2347 + struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 2348 + 2349 + spin_lock(&vn->sock_lock); 2350 + hlist_del_init_rcu(&vxlan->hlist); 2351 + spin_unlock(&vn->sock_lock); 2352 + } 2353 + 2363 2354 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan) 2364 2355 { 2365 2356 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); ··· 2611 2584 eth_hw_addr_random(dev); 2612 2585 ether_setup(dev); 2613 2586 2614 - dev->destructor = free_netdev; 2587 + dev->needs_free_netdev = true; 2615 2588 SET_NETDEV_DEVTYPE(dev, &vxlan_type); 2616 2589 2617 2590 dev->features |= NETIF_F_LLTX; ··· 3313 3286 static void vxlan_dellink(struct net_device *dev, struct list_head *head) 3314 3287 { 3315 3288 struct vxlan_dev *vxlan = netdev_priv(dev); 3316 - struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 3317 3289 3318 3290 vxlan_flush(vxlan, true); 3319 - 3320 - spin_lock(&vn->sock_lock); 3321 - if (!hlist_unhashed(&vxlan->hlist)) 3322 - hlist_del_rcu(&vxlan->hlist); 3323 - spin_unlock(&vn->sock_lock); 3324 3291 3325 3292 gro_cells_destroy(&vxlan->gro_cells); 3326 3293 list_del(&vxlan->next);
+1 -1
drivers/net/wan/dlci.c
··· 475 475 dev->flags = 0; 476 476 dev->header_ops = &dlci_header_ops; 477 477 dev->netdev_ops = &dlci_netdev_ops; 478 - dev->destructor = free_netdev; 478 + dev->needs_free_netdev = true; 479 479 480 480 dlp->receive = dlci_receive; 481 481
+1 -1
drivers/net/wan/hdlc_fr.c
··· 1106 1106 return -EIO; 1107 1107 } 1108 1108 1109 - dev->destructor = free_netdev; 1109 + dev->needs_free_netdev = true; 1110 1110 *get_dev_p(pvc, type) = dev; 1111 1111 if (!used) { 1112 1112 state(hdlc)->dce_changed = 1;
+1 -1
drivers/net/wan/lapbether.c
··· 306 306 static void lapbeth_setup(struct net_device *dev) 307 307 { 308 308 dev->netdev_ops = &lapbeth_netdev_ops; 309 - dev->destructor = free_netdev; 309 + dev->needs_free_netdev = true; 310 310 dev->type = ARPHRD_X25; 311 311 dev->hard_header_len = 3; 312 312 dev->mtu = 1000;
+1 -1
drivers/net/wireless/ath/ath6kl/main.c
··· 1287 1287 struct ath6kl *ar = ath6kl_priv(dev); 1288 1288 1289 1289 dev->netdev_ops = &ath6kl_netdev_ops; 1290 - dev->destructor = free_netdev; 1290 + dev->needs_free_netdev = true; 1291 1291 dev->watchdog_timeo = ATH6KL_TX_TIMEOUT; 1292 1292 1293 1293 dev->needed_headroom = ETH_HLEN;
+2
drivers/net/wireless/ath/wcn36xx/main.c
··· 1271 1271 qcom_smem_state_put(wcn->tx_enable_state); 1272 1272 qcom_smem_state_put(wcn->tx_rings_empty_state); 1273 1273 1274 + rpmsg_destroy_ept(wcn->smd_channel); 1275 + 1274 1276 iounmap(wcn->dxe_base); 1275 1277 iounmap(wcn->ccu_base); 1276 1278
-1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
··· 5225 5225 5226 5226 if (vif) 5227 5227 brcmf_free_vif(vif); 5228 - free_netdev(ndev); 5229 5228 } 5230 5229 5231 5230 static bool brcmf_is_linkup(const struct brcmf_event_msg *e)
+2 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
··· 624 624 if (!ndev) 625 625 return ERR_PTR(-ENOMEM); 626 626 627 - ndev->destructor = brcmf_cfg80211_free_netdev; 627 + ndev->needs_free_netdev = true; 628 + ndev->priv_destructor = brcmf_cfg80211_free_netdev; 628 629 ifp = netdev_priv(ndev); 629 630 ifp->ndev = ndev; 630 631 /* store mapping ifidx to bsscfgidx */
+17 -18
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
··· 442 442 const char *nvram_name; 443 443 u16 domain_nr; 444 444 u16 bus_nr; 445 - void (*done)(struct device *dev, const struct firmware *fw, 445 + void (*done)(struct device *dev, int err, const struct firmware *fw, 446 446 void *nvram_image, u32 nvram_len); 447 447 }; 448 448 ··· 477 477 if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL)) 478 478 goto fail; 479 479 480 - fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length); 480 + fwctx->done(fwctx->dev, 0, fwctx->code, nvram, nvram_length); 481 481 kfree(fwctx); 482 482 return; 483 483 484 484 fail: 485 485 brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); 486 486 release_firmware(fwctx->code); 487 - device_release_driver(fwctx->dev); 487 + fwctx->done(fwctx->dev, -ENOENT, NULL, NULL, 0); 488 488 kfree(fwctx); 489 489 } 490 490 491 491 static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx) 492 492 { 493 493 struct brcmf_fw *fwctx = ctx; 494 - int ret; 494 + int ret = 0; 495 495 496 496 brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev)); 497 - if (!fw) 497 + if (!fw) { 498 + ret = -ENOENT; 498 499 goto fail; 499 - 500 - /* only requested code so done here */ 501 - if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) { 502 - fwctx->done(fwctx->dev, fw, NULL, 0); 503 - kfree(fwctx); 504 - return; 505 500 } 501 + /* only requested code so done here */ 502 + if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) 503 + goto done; 504 + 506 505 fwctx->code = fw; 507 506 ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name, 508 507 fwctx->dev, GFP_KERNEL, fwctx, 509 508 brcmf_fw_request_nvram_done); 510 509 511 - if (!ret) 512 - return; 513 - 514 - brcmf_fw_request_nvram_done(NULL, fwctx); 510 + /* pass NULL to nvram callback for bcm47xx fallback */ 511 + if (ret) 512 + brcmf_fw_request_nvram_done(NULL, fwctx); 515 513 return; 516 514 517 515 fail: 518 516 brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); 519 - device_release_driver(fwctx->dev); 517 + done: 518 + fwctx->done(fwctx->dev, ret, fw, NULL, 0); 520 519 kfree(fwctx); 521 520 } 522 521 523 522 int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, 524 523 const char *code, const char *nvram, 525 - void (*fw_cb)(struct device *dev, 524 + void (*fw_cb)(struct device *dev, int err, 526 525 const struct firmware *fw, 527 526 void *nvram_image, u32 nvram_len), 528 527 u16 domain_nr, u16 bus_nr) ··· 554 555 555 556 int brcmf_fw_get_firmwares(struct device *dev, u16 flags, 556 557 const char *code, const char *nvram, 557 - void (*fw_cb)(struct device *dev, 558 + void (*fw_cb)(struct device *dev, int err, 558 559 const struct firmware *fw, 559 560 void *nvram_image, u32 nvram_len)) 560 561 {
+2 -2
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
··· 73 73 */ 74 74 int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, 75 75 const char *code, const char *nvram, 76 - void (*fw_cb)(struct device *dev, 76 + void (*fw_cb)(struct device *dev, int err, 77 77 const struct firmware *fw, 78 78 void *nvram_image, u32 nvram_len), 79 79 u16 domain_nr, u16 bus_nr); 80 80 int brcmf_fw_get_firmwares(struct device *dev, u16 flags, 81 81 const char *code, const char *nvram, 82 - void (*fw_cb)(struct device *dev, 82 + void (*fw_cb)(struct device *dev, int err, 83 83 const struct firmware *fw, 84 84 void *nvram_image, u32 nvram_len)); 85 85
+1 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
··· 2145 2145 struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr); 2146 2146 struct brcmf_fws_mac_descriptor *entry; 2147 2147 2148 - if (!ifp->ndev || fws->fcmode == BRCMF_FWS_FCMODE_NONE) 2148 + if (!ifp->ndev || !brcmf_fws_queue_skbs(fws)) 2149 2149 return; 2150 2150 2151 2151 entry = &fws->desc.iface[ifp->ifidx];
+12 -5
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
··· 1650 1650 .write32 = brcmf_pcie_buscore_write32, 1651 1651 }; 1652 1652 1653 - static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw, 1653 + static void brcmf_pcie_setup(struct device *dev, int ret, 1654 + const struct firmware *fw, 1654 1655 void *nvram, u32 nvram_len) 1655 1656 { 1656 - struct brcmf_bus *bus = dev_get_drvdata(dev); 1657 - struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie; 1658 - struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo; 1657 + struct brcmf_bus *bus; 1658 + struct brcmf_pciedev *pcie_bus_dev; 1659 + struct brcmf_pciedev_info *devinfo; 1659 1660 struct brcmf_commonring **flowrings; 1660 - int ret; 1661 1661 u32 i; 1662 1662 1663 + /* check firmware loading result */ 1664 + if (ret) 1665 + goto fail; 1666 + 1667 + bus = dev_get_drvdata(dev); 1668 + pcie_bus_dev = bus->bus_priv.pcie; 1669 + devinfo = pcie_bus_dev->devinfo; 1663 1670 brcmf_pcie_attach(devinfo); 1664 1671 1665 1672 /* Some of the firmwares have the size of the memory of the device
+13 -7
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
··· 3422 3422 /* otherwise, set txglomalign */ 3423 3423 value = sdiodev->settings->bus.sdio.sd_sgentry_align; 3424 3424 /* SDIO ADMA requires at least 32 bit alignment */ 3425 - value = max_t(u32, value, 4); 3425 + value = max_t(u32, value, ALIGNMENT); 3426 3426 err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value, 3427 3427 sizeof(u32)); 3428 3428 } ··· 3982 3982 .get_memdump = brcmf_sdio_bus_get_memdump, 3983 3983 }; 3984 3984 3985 - static void brcmf_sdio_firmware_callback(struct device *dev, 3985 + static void brcmf_sdio_firmware_callback(struct device *dev, int err, 3986 3986 const struct firmware *code, 3987 3987 void *nvram, u32 nvram_len) 3988 3988 { 3989 - struct brcmf_bus *bus_if = dev_get_drvdata(dev); 3990 - struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 3991 - struct brcmf_sdio *bus = sdiodev->bus; 3992 - int err = 0; 3989 + struct brcmf_bus *bus_if; 3990 + struct brcmf_sdio_dev *sdiodev; 3991 + struct brcmf_sdio *bus; 3993 3992 u8 saveclk; 3994 3993 3995 - brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev)); 3994 + brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err); 3995 + bus_if = dev_get_drvdata(dev); 3996 + sdiodev = bus_if->bus_priv.sdio; 3997 + if (err) 3998 + goto fail; 3996 3999 3997 4000 if (!bus_if->drvr) 3998 4001 return; 4002 + 4003 + bus = sdiodev->bus; 3999 4004 4000 4005 /* try to download image and nvram to the dongle */ 4001 4006 bus->alp_only = true; ··· 4088 4083 fail: 4089 4084 brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err); 4090 4085 device_release_driver(dev); 4086 + device_release_driver(&sdiodev->func[2]->dev); 4091 4087 } 4092 4088 4093 4089 struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
+5 -4
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
··· 1159 1159 return ret; 1160 1160 } 1161 1161 1162 - static void brcmf_usb_probe_phase2(struct device *dev, 1162 + static void brcmf_usb_probe_phase2(struct device *dev, int ret, 1163 1163 const struct firmware *fw, 1164 1164 void *nvram, u32 nvlen) 1165 1165 { 1166 1166 struct brcmf_bus *bus = dev_get_drvdata(dev); 1167 - struct brcmf_usbdev_info *devinfo; 1168 - int ret; 1167 + struct brcmf_usbdev_info *devinfo = bus->bus_priv.usb->devinfo; 1168 + 1169 + if (ret) 1170 + goto error; 1169 1171 1170 1172 brcmf_dbg(USB, "Start fw downloading\n"); 1171 1173 1172 - devinfo = bus->bus_priv.usb->devinfo; 1173 1174 ret = check_file(fw->data); 1174 1175 if (ret < 0) { 1175 1176 brcmf_err("invalid firmware\n");
+2 -2
drivers/net/wireless/intel/iwlwifi/iwl-7000.c
··· 79 79 /* Lowest firmware API version supported */ 80 80 #define IWL7260_UCODE_API_MIN 17 81 81 #define IWL7265_UCODE_API_MIN 17 82 - #define IWL7265D_UCODE_API_MIN 17 83 - #define IWL3168_UCODE_API_MIN 20 82 + #define IWL7265D_UCODE_API_MIN 22 83 + #define IWL3168_UCODE_API_MIN 22 84 84 85 85 /* NVM versions */ 86 86 #define IWL7260_NVM_VERSION 0x0a1d
+2 -2
drivers/net/wireless/intel/iwlwifi/iwl-8000.c
··· 74 74 #define IWL8265_UCODE_API_MAX 30 75 75 76 76 /* Lowest firmware API version supported */ 77 - #define IWL8000_UCODE_API_MIN 17 78 - #define IWL8265_UCODE_API_MIN 20 77 + #define IWL8000_UCODE_API_MIN 22 78 + #define IWL8265_UCODE_API_MIN 22 79 79 80 80 /* NVM versions */ 81 81 #define IWL8000_NVM_VERSION 0x0a1d
+1
drivers/net/wireless/intel/iwlwifi/iwl-prph.h
··· 370 370 #define MON_DMARB_RD_DATA_ADDR (0xa03c5c) 371 371 372 372 #define DBGC_IN_SAMPLE (0xa03c00) 373 + #define DBGC_OUT_CTRL (0xa03c0c) 373 374 374 375 /* enable the ID buf for read */ 375 376 #define WFPM_PS_CTL_CLR 0xA0300C
+5
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
··· 307 307 /* Bit 1-3: LQ command color. Used to match responses to LQ commands */ 308 308 #define LQ_FLAG_COLOR_POS 1 309 309 #define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS) 310 + #define LQ_FLAG_COLOR_GET(_f) (((_f) & LQ_FLAG_COLOR_MSK) >>\ 311 + LQ_FLAG_COLOR_POS) 312 + #define LQ_FLAGS_COLOR_INC(_c) ((((_c) + 1) << LQ_FLAG_COLOR_POS) &\ 313 + LQ_FLAG_COLOR_MSK) 314 + #define LQ_FLAG_COLOR_SET(_f, _c) ((_c) | ((_f) & ~LQ_FLAG_COLOR_MSK)) 310 315 311 316 /* Bit 4-5: Tx RTS BW Signalling 312 317 * (0) No RTS BW signalling
+3
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
··· 519 519 * bit-7 invalid rate indication 520 520 */ 521 521 #define TX_RES_INIT_RATE_INDEX_MSK 0x0f 522 + #define TX_RES_RATE_TABLE_COLOR_POS 4 522 523 #define TX_RES_RATE_TABLE_COLOR_MSK 0x70 523 524 #define TX_RES_INV_RATE_INDEX_MSK 0x80 525 + #define TX_RES_RATE_TABLE_COL_GET(_f) (((_f) & TX_RES_RATE_TABLE_COLOR_MSK) >>\ 526 + TX_RES_RATE_TABLE_COLOR_POS) 524 527 525 528 #define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f) 526 529 #define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
+1 -11
drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
··· 1002 1002 return 0; 1003 1003 } 1004 1004 1005 - static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm) 1006 - { 1007 - if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) 1008 - iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); 1009 - else 1010 - iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1); 1011 - } 1012 - 1013 1005 int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) 1014 1006 { 1015 1007 u8 *ptr; ··· 1015 1023 /* EARLY START - firmware's configuration is hard coded */ 1016 1024 if ((!mvm->fw->dbg_conf_tlv[conf_id] || 1017 1025 !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) && 1018 - conf_id == FW_DBG_START_FROM_ALIVE) { 1019 - iwl_mvm_restart_early_start(mvm); 1026 + conf_id == FW_DBG_START_FROM_ALIVE) 1020 1027 return 0; 1021 - } 1022 1028 1023 1029 if (!mvm->fw->dbg_conf_tlv[conf_id]) 1024 1030 return -EINVAL;
+1 -1
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
··· 1040 1040 struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6; 1041 1041 struct iwl_mac_beacon_cmd_v7 beacon_cmd; 1042 1042 } u = {}; 1043 - struct iwl_mac_beacon_cmd beacon_cmd; 1043 + struct iwl_mac_beacon_cmd beacon_cmd = {}; 1044 1044 struct ieee80211_tx_info *info; 1045 1045 u32 beacon_skb_len; 1046 1046 u32 rate, tx_flags;
+5 -1
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
··· 1730 1730 */ 1731 1731 static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) 1732 1732 { 1733 + u32 cmd_queue = iwl_mvm_is_dqa_supported(mvm) ? IWL_MVM_DQA_CMD_QUEUE : 1734 + IWL_MVM_CMD_QUEUE; 1735 + 1733 1736 return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) & 1734 - ~BIT(IWL_MVM_CMD_QUEUE)); 1737 + ~BIT(cmd_queue)); 1735 1738 } 1736 1739 1737 1740 static inline ··· 1756 1753 if (!iwl_mvm_has_new_tx_api(mvm)) 1757 1754 iwl_free_fw_paging(mvm); 1758 1755 mvm->ucode_loaded = false; 1756 + mvm->fw_dbg_conf = FW_DBG_INVALID; 1759 1757 iwl_trans_stop_device(mvm->trans); 1760 1758 } 1761 1759
+25 -9
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
··· 1149 1149 1150 1150 mutex_lock(&mvm->mutex); 1151 1151 1152 - /* stop recording */ 1153 1152 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { 1153 + /* stop recording */ 1154 1154 iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); 1155 + 1156 + iwl_mvm_fw_error_dump(mvm); 1157 + 1158 + /* start recording again if the firmware is not crashed */ 1159 + if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) && 1160 + mvm->fw->dbg_dest_tlv) 1161 + iwl_clear_bits_prph(mvm->trans, 1162 + MON_BUFF_SAMPLE_CTL, 0x100); 1155 1163 } else { 1164 + u32 in_sample = iwl_read_prph(mvm->trans, DBGC_IN_SAMPLE); 1165 + u32 out_ctrl = iwl_read_prph(mvm->trans, DBGC_OUT_CTRL); 1166 + 1167 + /* stop recording */ 1156 1168 iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0); 1157 - /* wait before we collect the data till the DBGC stop */ 1158 1169 udelay(100); 1170 + iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0); 1171 + /* wait before we collect the data till the DBGC stop */ 1172 + udelay(500); 1173 + 1174 + iwl_mvm_fw_error_dump(mvm); 1175 + 1176 + /* start recording again if the firmware is not crashed */ 1177 + if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) && 1178 + mvm->fw->dbg_dest_tlv) { 1179 + iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, in_sample); 1180 + iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, out_ctrl); 1181 + } 1159 1182 } 1160 - 1161 - iwl_mvm_fw_error_dump(mvm); 1162 - 1163 - /* start recording again if the firmware is not crashed */ 1164 - WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) && 1165 - mvm->fw->dbg_dest_tlv && 1166 - iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf)); 1167 1183 1168 1184 mutex_unlock(&mvm->mutex); 1169 1185
+11 -35
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
··· 2 2 * 3 3 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 4 4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 5 - * Copyright(c) 2016 Intel Deutschland GmbH 5 + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 6 6 * 7 7 * This program is free software; you can redistribute it and/or modify it 8 8 * under the terms of version 2 of the GNU General Public License as ··· 1083 1083 rs_get_lower_rate_in_column(lq_sta, rate); 1084 1084 } 1085 1085 1086 - /* Check if both rates are identical 1087 - * allow_ant_mismatch enables matching a SISO rate on ANT_A or ANT_B 1088 - * with a rate indicating STBC/BFER and ANT_AB. 1089 - */ 1090 - static inline bool rs_rate_equal(struct rs_rate *a, 1091 - struct rs_rate *b, 1092 - bool allow_ant_mismatch) 1093 - 1094 - { 1095 - bool ant_match = (a->ant == b->ant) && (a->stbc == b->stbc) && 1096 - (a->bfer == b->bfer); 1097 - 1098 - if (allow_ant_mismatch) { 1099 - if (a->stbc || a->bfer) { 1100 - WARN_ONCE(a->ant != ANT_AB, "stbc %d bfer %d ant %d", 1101 - a->stbc, a->bfer, a->ant); 1102 - ant_match |= (b->ant == ANT_A || b->ant == ANT_B); 1103 - } else if (b->stbc || b->bfer) { 1104 - WARN_ONCE(b->ant != ANT_AB, "stbc %d bfer %d ant %d", 1105 - b->stbc, b->bfer, b->ant); 1106 - ant_match |= (a->ant == ANT_A || a->ant == ANT_B); 1107 - } 1108 - } 1109 - 1110 - return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) && 1111 - (a->ldpc == b->ldpc) && (a->index == b->index) && ant_match; 1112 - } 1113 - 1114 1086 /* Check if both rates share the same column */ 1115 1087 static inline bool rs_rate_column_match(struct rs_rate *a, 1116 1088 struct rs_rate *b) ··· 1154 1182 u32 lq_hwrate; 1155 1183 struct rs_rate lq_rate, tx_resp_rate; 1156 1184 struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; 1157 - u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0]; 1185 + u32 tlc_info = (uintptr_t)info->status.status_driver_data[0]; 1186 + u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK; 1187 + u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info); 1158 1188 u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1]; 1159 1189 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1160 1190 struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta; 1161 - bool allow_ant_mismatch = fw_has_api(&mvm->fw->ucode_capa, 1162 - IWL_UCODE_TLV_API_LQ_SS_PARAMS); 1163 1191 1164 1192 /* Treat uninitialized rate scaling data same as non-existing. */ 1165 1193 if (!lq_sta) { ··· 1234 1262 rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate); 1235 1263 1236 1264 /* Here we actually compare this rate to the latest LQ command */ 1237 - if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) { 1265 + if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) { 1238 1266 IWL_DEBUG_RATE(mvm, 1239 - "initial tx resp rate 0x%x does not match 0x%x\n", 1240 - tx_resp_hwrate, lq_hwrate); 1267 + "tx resp color 0x%x does not match 0x%x\n", 1268 + lq_color, LQ_FLAG_COLOR_GET(table->flags)); 1241 1269 1242 1270 /* 1243 1271 * Since rates mis-match, the last LQ command may have failed. ··· 3298 3326 u8 valid_tx_ant = 0; 3299 3327 struct iwl_lq_cmd *lq_cmd = &lq_sta->lq; 3300 3328 bool toggle_ant = false; 3329 + u32 color; 3301 3330 3302 3331 memcpy(&rate, initial_rate, sizeof(rate)); 3303 3332 ··· 3353 3380 num_rates, num_retries, valid_tx_ant, 3354 3381 toggle_ant); 3355 3382 3383 + /* update the color of the LQ command (as a counter at bits 1-3) */ 3384 + color = LQ_FLAGS_COLOR_INC(LQ_FLAG_COLOR_GET(lq_cmd->flags)); 3385 + lq_cmd->flags = LQ_FLAG_COLOR_SET(lq_cmd->flags, color); 3356 3386 } 3357 3387 3358 3388 struct rs_bfer_active_iter_data {
+15
drivers/net/wireless/intel/iwlwifi/mvm/rs.h
··· 2 2 * 3 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 4 4 * Copyright(c) 2015 Intel Mobile Communications GmbH 5 + * Copyright(c) 2017 Intel Deutschland GmbH 5 6 * 6 7 * This program is free software; you can redistribute it and/or modify it 7 8 * under the terms of version 2 of the GNU General Public License as ··· 357 356 struct iwl_mvm *drv; 358 357 } pers; 359 358 }; 359 + 360 + /* ieee80211_tx_info's status_driver_data[0] is packed with lq color and txp 361 + * Note, it's iwlmvm <-> mac80211 interface. 362 + * bits 0-7: reduced tx power 363 + * bits 8-10: LQ command's color 364 + */ 365 + #define RS_DRV_DATA_TXP_MSK 0xff 366 + #define RS_DRV_DATA_LQ_COLOR_POS 8 367 + #define RS_DRV_DATA_LQ_COLOR_MSK (7 << RS_DRV_DATA_LQ_COLOR_POS) 368 + #define RS_DRV_DATA_LQ_COLOR_GET(_f) (((_f) & RS_DRV_DATA_LQ_COLOR_MSK) >>\ 369 + RS_DRV_DATA_LQ_COLOR_POS) 370 + #define RS_DRV_DATA_PACK(_c, _p) ((void *)(uintptr_t)\ 371 + (((uintptr_t)_p) |\ 372 + ((_c) << RS_DRV_DATA_LQ_COLOR_POS))) 360 373 361 374 /* Initialize station's rate scaling information after adding station */ 362 375 void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+17 -9
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
··· 2120 2120 if (!iwl_mvm_is_dqa_supported(mvm)) 2121 2121 return 0; 2122 2122 2123 - if (WARN_ON(vif->type != NL80211_IFTYPE_AP)) 2123 + if (WARN_ON(vif->type != NL80211_IFTYPE_AP && 2124 + vif->type != NL80211_IFTYPE_ADHOC)) 2124 2125 return -ENOTSUPP; 2125 2126 2126 2127 /* ··· 2156 2155 mvmvif->cab_queue = queue; 2157 2156 } else if (!fw_has_api(&mvm->fw->ucode_capa, 2158 2157 IWL_UCODE_TLV_API_STA_TYPE)) { 2158 + /* 2159 + * In IBSS, ieee80211_check_queues() sets the cab_queue to be 2160 + * invalid, so make sure we use the queue we want. 2161 + * Note that this is done here as we want to avoid making DQA 2162 + * changes in mac80211 layer. 2163 + */ 2164 + if (vif->type == NL80211_IFTYPE_ADHOC) { 2165 + vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; 2166 + mvmvif->cab_queue = vif->cab_queue; 2167 + } 2159 2168 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, 2160 2169 &cfg, timeout); 2161 2170 } ··· 3332 3321 3333 3322 /* Get the station from the mvm local station table */ 3334 3323 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 3335 - if (!mvm_sta) { 3336 - IWL_ERR(mvm, "Failed to find station\n"); 3337 - return -EINVAL; 3338 - } 3339 - sta_id = mvm_sta->sta_id; 3324 + if (mvm_sta) 3325 + sta_id = mvm_sta->sta_id; 3340 3326 3341 3327 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", 3342 3328 keyconf->keyidx, sta_id); 3343 3329 3344 - if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3345 - keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3346 - keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) 3330 + if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3331 + keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3332 + keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)) 3347 3333 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); 3348 3334 3349 3335 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
+2
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
··· 313 313 * This is basically (last acked packet++). 314 314 * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the 315 315 * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). 316 + * @lq_color: the color of the LQ command as it appears in tx response. 316 317 * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed. 317 318 * @state: state of the BA agreement establishment / tear down. 318 319 * @txq_id: Tx queue used by the BA session / DQA ··· 332 331 u16 next_reclaimed; 333 332 /* The rest is Tx AGG related */ 334 333 u32 rate_n_flags; 334 + u8 lq_color; 335 335 bool amsdu_in_ampdu_allowed; 336 336 enum iwl_mvm_agg_state state; 337 337 u16 txq_id;
+5 -3
drivers/net/wireless/intel/iwlwifi/mvm/tt.c
··· 790 790 struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata); 791 791 int ret; 792 792 793 - if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) 794 - return -EIO; 795 - 796 793 mutex_lock(&mvm->mutex); 794 + 795 + if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) { 796 + ret = -EIO; 797 + goto unlock; 798 + } 797 799 798 800 if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) { 799 801 ret = -EINVAL;
+11 -1
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
··· 1323 1323 struct iwl_mvm_sta *mvmsta; 1324 1324 struct sk_buff_head skbs; 1325 1325 u8 skb_freed = 0; 1326 + u8 lq_color; 1326 1327 u16 next_reclaimed, seq_ctl; 1327 1328 bool is_ndp = false; 1328 1329 ··· 1406 1405 info->status.tx_time = 1407 1406 le16_to_cpu(tx_resp->wireless_media_time); 1408 1407 BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); 1408 + lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info); 1409 1409 info->status.status_driver_data[0] = 1410 - (void *)(uintptr_t)tx_resp->reduced_tpc; 1410 + RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc); 1411 1411 1412 1412 ieee80211_tx_status(mvm->hw, skb); 1413 1413 } ··· 1640 1638 le32_to_cpu(tx_resp->initial_rate); 1641 1639 mvmsta->tid_data[tid].tx_time = 1642 1640 le16_to_cpu(tx_resp->wireless_media_time); 1641 + mvmsta->tid_data[tid].lq_color = 1642 + (tx_resp->tlc_info & TX_RES_RATE_TABLE_COLOR_MSK) >> 1643 + TX_RES_RATE_TABLE_COLOR_POS; 1643 1644 } 1644 1645 1645 1646 rcu_read_unlock(); ··· 1712 1707 iwl_mvm_check_ratid_empty(mvm, sta, tid); 1713 1708 1714 1709 freed = 0; 1710 + 1711 + /* pack lq color from tid_data along the reduced txp */ 1712 + ba_info->status.status_driver_data[0] = 1713 + RS_DRV_DATA_PACK(tid_data->lq_color, 1714 + ba_info->status.status_driver_data[0]); 1715 1715 ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate; 1716 1716 1717 1717 skb_queue_walk(&reclaimed_skbs, skb) {
+4 -2
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
··· 2803 2803 #ifdef CONFIG_PM_SLEEP 2804 2804 static int iwl_trans_pcie_suspend(struct iwl_trans *trans) 2805 2805 { 2806 - if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3) 2806 + if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 && 2807 + (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3)) 2807 2808 return iwl_pci_fw_enter_d0i3(trans); 2808 2809 2809 2810 return 0; ··· 2812 2811 2813 2812 static void iwl_trans_pcie_resume(struct iwl_trans *trans) 2814 2813 { 2815 - if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3) 2814 + if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 && 2815 + (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3)) 2816 2816 iwl_pci_fw_exit_d0i3(trans); 2817 2817 } 2818 2818 #endif /* CONFIG_PM_SLEEP */
+6 -3
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
··· 906 906 907 907 if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) { 908 908 ret = -EINVAL; 909 - goto error; 909 + goto error_free_resp; 910 910 } 911 911 912 912 rsp = (void *)hcmd.resp_pkt->data; ··· 915 915 if (qid > ARRAY_SIZE(trans_pcie->txq)) { 916 916 WARN_ONCE(1, "queue index %d unsupported", qid); 917 917 ret = -EIO; 918 - goto error; 918 + goto error_free_resp; 919 919 } 920 920 921 921 if (test_and_set_bit(qid, trans_pcie->queue_used)) { 922 922 WARN_ONCE(1, "queue %d already used", qid); 923 923 ret = -EIO; 924 - goto error; 924 + goto error_free_resp; 925 925 } 926 926 927 927 txq->id = qid; ··· 934 934 (txq->write_ptr) | (qid << 16)); 935 935 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); 936 936 937 + iwl_free_resp(&hcmd); 937 938 return qid; 938 939 940 + error_free_resp: 941 + iwl_free_resp(&hcmd); 939 942 error: 940 943 iwl_pcie_gen2_txq_free_memory(trans, txq); 941 944 return ret;
+1 -1
drivers/net/wireless/intersil/hostap/hostap_main.c
··· 73 73 dev->mem_end = mdev->mem_end; 74 74 75 75 hostap_setup_dev(dev, local, type); 76 - dev->destructor = free_netdev; 76 + dev->needs_free_netdev = true; 77 77 78 78 sprintf(dev->name, "%s%s", prefix, name); 79 79 if (!rtnl_locked)
+1 -1
drivers/net/wireless/mac80211_hwsim.c
··· 2861 2861 static void hwsim_mon_setup(struct net_device *dev) 2862 2862 { 2863 2863 dev->netdev_ops = &hwsim_netdev_ops; 2864 - dev->destructor = free_netdev; 2864 + dev->needs_free_netdev = true; 2865 2865 ether_setup(dev); 2866 2866 dev->priv_flags |= IFF_NO_QUEUE; 2867 2867 dev->type = ARPHRD_IEEE80211_RADIOTAP;
+1 -1
drivers/net/wireless/marvell/mwifiex/main.c
··· 1280 1280 struct net_device *dev) 1281 1281 { 1282 1282 dev->netdev_ops = &mwifiex_netdev_ops; 1283 - dev->destructor = free_netdev; 1283 + dev->needs_free_netdev = true; 1284 1284 /* Initialize private structure */ 1285 1285 priv->current_key_index = 0; 1286 1286 priv->media_connected = false;
+1
drivers/net/xen-netback/common.h
··· 199 199 unsigned long remaining_credit; 200 200 struct timer_list credit_timeout; 201 201 u64 credit_window_start; 202 + bool rate_limited; 202 203 203 204 /* Statistics */ 204 205 struct xenvif_stats stats;
+5 -1
drivers/net/xen-netback/interface.c
··· 106 106 107 107 if (work_done < budget) { 108 108 napi_complete_done(napi, work_done); 109 - xenvif_napi_schedule_or_enable_events(queue); 109 + /* If the queue is rate-limited, it shall be 110 + * rescheduled in the timer callback. 111 + */ 112 + if (likely(!queue->rate_limited)) 113 + xenvif_napi_schedule_or_enable_events(queue); 110 114 } 111 115 112 116 return work_done;
+5 -1
drivers/net/xen-netback/netback.c
··· 180 180 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ 181 181 182 182 queue->remaining_credit = min(max_credit, max_burst); 183 + queue->rate_limited = false; 183 184 } 184 185 185 186 void xenvif_tx_credit_callback(unsigned long data) ··· 687 686 msecs_to_jiffies(queue->credit_usec / 1000); 688 687 689 688 /* Timer could already be pending in rare cases. */ 690 - if (timer_pending(&queue->credit_timeout)) 689 + if (timer_pending(&queue->credit_timeout)) { 690 + queue->rate_limited = true; 691 691 return true; 692 + } 692 693 693 694 /* Passed the point where we can replenish credit? */ 694 695 if (time_after_eq64(now, next_credit)) { ··· 705 702 mod_timer(&queue->credit_timeout, 706 703 next_credit); 707 704 queue->credit_window_start = next_credit; 705 + queue->rate_limited = true; 708 706 709 707 return true; 710 708 }
+1 -1
drivers/ntb/hw/intel/ntb_hw_intel.c
··· 2878 2878 .link_is_up = xeon_link_is_up, 2879 2879 .db_ioread = skx_db_ioread, 2880 2880 .db_iowrite = skx_db_iowrite, 2881 - .db_size = sizeof(u64), 2881 + .db_size = sizeof(u32), 2882 2882 .ntb_ctl = SKX_NTBCNTL_OFFSET, 2883 2883 .mw_bar = {2, 4}, 2884 2884 };
+11 -47
drivers/ntb/ntb_transport.c
··· 177 177 u64 rx_err_ver; 178 178 u64 rx_memcpy; 179 179 u64 rx_async; 180 - u64 dma_rx_prep_err; 181 180 u64 tx_bytes; 182 181 u64 tx_pkts; 183 182 u64 tx_ring_full; 184 183 u64 tx_err_no_buf; 185 184 u64 tx_memcpy; 186 185 u64 tx_async; 187 - u64 dma_tx_prep_err; 188 186 }; 189 187 190 188 struct ntb_transport_mw { ··· 252 254 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) 253 255 #define NTB_QP_DEF_NUM_ENTRIES 100 254 256 #define NTB_LINK_DOWN_TIMEOUT 10 255 - #define DMA_RETRIES 20 256 - #define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50) 257 257 258 258 static void ntb_transport_rxc_db(unsigned long data); 259 259 static const struct ntb_ctx_ops ntb_transport_ops; ··· 512 516 out_offset += snprintf(buf + out_offset, out_count - out_offset, 513 517 "free tx - \t%u\n", 514 518 ntb_transport_tx_free_entry(qp)); 515 - out_offset += snprintf(buf + out_offset, out_count - out_offset, 516 - "DMA tx prep err - \t%llu\n", 517 - qp->dma_tx_prep_err); 518 - out_offset += snprintf(buf + out_offset, out_count - out_offset, 519 - "DMA rx prep err - \t%llu\n", 520 - qp->dma_rx_prep_err); 521 519 522 520 out_offset += snprintf(buf + out_offset, out_count - out_offset, 523 521 "\n"); ··· 613 623 if (!mw->virt_addr) 614 624 return -ENOMEM; 615 625 616 - if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) 626 + if (mw_num < qp_count % mw_count) 617 627 num_qps_mw = qp_count / mw_count + 1; 618 628 else 619 629 num_qps_mw = qp_count / mw_count; ··· 758 768 qp->tx_err_no_buf = 0; 759 769 qp->tx_memcpy = 0; 760 770 qp->tx_async = 0; 761 - qp->dma_tx_prep_err = 0; 762 - qp->dma_rx_prep_err = 0; 763 771 } 764 772 765 773 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) ··· 988 1000 qp->event_handler = NULL; 989 1001 ntb_qp_link_down_reset(qp); 990 1002 991 - if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count) 1003 + if (mw_num < qp_count % mw_count) 992 1004 num_qps_mw = qp_count / mw_count + 1; 993 1005 else 994 1006 num_qps_mw = qp_count / mw_count; ··· 1116 1128 qp_count = ilog2(qp_bitmap); 1117 1129 if (max_num_clients && max_num_clients < qp_count) 1118 1130 qp_count = max_num_clients; 1119 - else if (mw_count < qp_count) 1120 - qp_count = mw_count; 1131 + else if (nt->mw_count < qp_count) 1132 + qp_count = nt->mw_count; 1121 1133 1122 1134 qp_bitmap &= BIT_ULL(qp_count) - 1; 1123 1135 ··· 1305 1317 struct dmaengine_unmap_data *unmap; 1306 1318 dma_cookie_t cookie; 1307 1319 void *buf = entry->buf; 1308 - int retries = 0; 1309 1320 1310 1321 len = entry->len; 1311 1322 device = chan->device; ··· 1333 1346 1334 1347 unmap->from_cnt = 1; 1335 1348 1336 - for (retries = 0; retries < DMA_RETRIES; retries++) { 1337 - txd = device->device_prep_dma_memcpy(chan, 1338 - unmap->addr[1], 1339 - unmap->addr[0], len, 1340 - DMA_PREP_INTERRUPT); 1341 - if (txd) 1342 - break; 1343 - 1344 - set_current_state(TASK_INTERRUPTIBLE); 1345 - schedule_timeout(DMA_OUT_RESOURCE_TO); 1346 - } 1347 - 1348 - if (!txd) { 1349 - qp->dma_rx_prep_err++; 1349 + txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], 1350 + unmap->addr[0], len, 1351 + DMA_PREP_INTERRUPT); 1352 + if (!txd) 1350 1353 goto err_get_unmap; 1351 - } 1352 1354 1353 1355 txd->callback_result = ntb_rx_copy_callback; 1354 1356 txd->callback_param = entry; ··· 1582 1606 struct dmaengine_unmap_data *unmap; 1583 1607 dma_addr_t dest; 1584 1608 dma_cookie_t cookie; 1585 - int retries = 0; 1586 1609 1587 1610 device = chan->device; 1588 1611 dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index; ··· 1603 1628 1604 1629 unmap->to_cnt = 1; 1605 1630 1606 - for (retries = 0; retries < DMA_RETRIES; retries++) { 1607 - txd = device->device_prep_dma_memcpy(chan, dest, 1608 - unmap->addr[0], len, 1609 - DMA_PREP_INTERRUPT); 1610 - if (txd) 1611 - break; 1612 - 1613 - set_current_state(TASK_INTERRUPTIBLE); 1614 - schedule_timeout(DMA_OUT_RESOURCE_TO); 1615 - } 1616 - 1617 - if (!txd) { 1618 - qp->dma_tx_prep_err++; 1631 + txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, 1632 + DMA_PREP_INTERRUPT); 1633 + if (!txd) 1619 1634 goto err_get_unmap; 1620 - } 1621 1635 1622 1636 txd->callback_result = ntb_tx_copy_callback; 1623 1637 txd->callback_param = entry;
+2 -2
drivers/ntb/test/ntb_perf.c
··· 90 90 91 91 static unsigned int seg_order = 19; /* 512K */ 92 92 module_param(seg_order, uint, 0644); 93 - MODULE_PARM_DESC(seg_order, "size order [n^2] of buffer segment for testing"); 93 + MODULE_PARM_DESC(seg_order, "size order [2^n] of buffer segment for testing"); 94 94 95 95 static unsigned int run_order = 32; /* 4G */ 96 96 module_param(run_order, uint, 0644); 97 - MODULE_PARM_DESC(run_order, "size order [n^2] of total data to transfer"); 97 + MODULE_PARM_DESC(run_order, "size order [2^n] of total data to transfer"); 98 98 99 99 static bool use_dma; /* default to 0 */ 100 100 module_param(use_dma, bool, 0644);
+14 -7
drivers/nvme/host/core.c
··· 56 56 static int nvme_char_major; 57 57 module_param(nvme_char_major, int, 0); 58 58 59 - static unsigned long default_ps_max_latency_us = 25000; 59 + static unsigned long default_ps_max_latency_us = 100000; 60 60 module_param(default_ps_max_latency_us, ulong, 0644); 61 61 MODULE_PARM_DESC(default_ps_max_latency_us, 62 62 "max power saving latency for new devices; use PM QOS to change per device"); ··· 1342 1342 * transitioning between power states. Therefore, when running 1343 1343 * in any given state, we will enter the next lower-power 1344 1344 * non-operational state after waiting 50 * (enlat + exlat) 1345 - * microseconds, as long as that state's total latency is under 1345 + * microseconds, as long as that state's exit latency is under 1346 1346 * the requested maximum latency. 1347 1347 * 1348 1348 * We will not autonomously enter any non-operational state for ··· 1387 1387 * lowest-power state, not the number of states. 1388 1388 */ 1389 1389 for (state = (int)ctrl->npss; state >= 0; state--) { 1390 - u64 total_latency_us, transition_ms; 1390 + u64 total_latency_us, exit_latency_us, transition_ms; 1391 1391 1392 1392 if (target) 1393 1393 table->entries[state] = target; ··· 1408 1408 NVME_PS_FLAGS_NON_OP_STATE)) 1409 1409 continue; 1410 1410 1411 - total_latency_us = 1412 - (u64)le32_to_cpu(ctrl->psd[state].entry_lat) + 1413 - + le32_to_cpu(ctrl->psd[state].exit_lat); 1414 - if (total_latency_us > ctrl->ps_max_latency_us) 1411 + exit_latency_us = 1412 + (u64)le32_to_cpu(ctrl->psd[state].exit_lat); 1413 + if (exit_latency_us > ctrl->ps_max_latency_us) 1415 1414 continue; 1415 + 1416 + total_latency_us = 1417 + exit_latency_us + 1418 + le32_to_cpu(ctrl->psd[state].entry_lat); 1416 1419 1417 1420 /* 1418 1421 * This state is good. Use it as the APST idle ··· 2441 2438 struct nvme_ns *ns; 2442 2439 2443 2440 mutex_lock(&ctrl->namespaces_mutex); 2441 + 2442 + /* Forcibly start all queues to avoid having stuck requests */ 2443 + blk_mq_start_hw_queues(ctrl->admin_q); 2444 + 2444 2445 list_for_each_entry(ns, &ctrl->namespaces, list) { 2445 2446 /* 2446 2447 * Revalidating a dead namespace sets capacity to 0. This will
+18 -2
drivers/nvme/host/fc.c
··· 1139 1139 /* *********************** NVME Ctrl Routines **************************** */ 1140 1140 1141 1141 static void __nvme_fc_final_op_cleanup(struct request *rq); 1142 + static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg); 1142 1143 1143 1144 static int 1144 1145 nvme_fc_reinit_request(void *data, struct request *rq) ··· 1266 1265 struct nvme_command *sqe = &op->cmd_iu.sqe; 1267 1266 __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1); 1268 1267 union nvme_result result; 1269 - bool complete_rq; 1268 + bool complete_rq, terminate_assoc = true; 1270 1269 1271 1270 /* 1272 1271 * WARNING: ··· 1295 1294 * fabricate a CQE, the following fields will not be set as they 1296 1295 * are not referenced: 1297 1296 * cqe.sqid, cqe.sqhd, cqe.command_id 1297 + * 1298 + * Failure or error of an individual i/o, in a transport 1299 + * detected fashion unrelated to the nvme completion status, 1300 + * potentially cause the initiator and target sides to get out 1301 + * of sync on SQ head/tail (aka outstanding io count allowed). 1302 + * Per FC-NVME spec, failure of an individual command requires 1303 + * the connection to be terminated, which in turn requires the 1304 + * association to be terminated. 1298 1305 */ 1299 1306 1300 1307 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, ··· 1368 1359 goto done; 1369 1360 } 1370 1361 1362 + terminate_assoc = false; 1363 + 1371 1364 done: 1372 1365 if (op->flags & FCOP_FLAGS_AEN) { 1373 1366 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); ··· 1377 1366 atomic_set(&op->state, FCPOP_STATE_IDLE); 1378 1367 op->flags = FCOP_FLAGS_AEN; /* clear other flags */ 1379 1368 nvme_fc_ctrl_put(ctrl); 1380 - return; 1369 + goto check_error; 1381 1370 } 1382 1371 1383 1372 complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op); ··· 1390 1379 nvme_end_request(rq, status, result); 1391 1380 } else 1392 1381 __nvme_fc_final_op_cleanup(rq); 1382 + 1383 + check_error: 1384 + if (terminate_assoc) 1385 + nvme_fc_error_recovery(ctrl, "transport detected io error"); 1393 1386 } 1394 1387 1395 1388 static int ··· 2806 2791 ctrl->ctrl.opts = NULL; 2807 2792 /* initiate nvme ctrl ref counting teardown */ 2808 2793 nvme_uninit_ctrl(&ctrl->ctrl); 2794 + nvme_put_ctrl(&ctrl->ctrl); 2809 2795 2810 2796 /* as we're past the point where we transition to the ref 2811 2797 * counting teardown path, if we return a bad pointer here,
+8 -8
drivers/nvme/host/pci.c
··· 1367 1367 bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); 1368 1368 1369 1369 /* If there is a reset ongoing, we shouldn't reset again. */ 1370 - if (work_busy(&dev->reset_work)) 1370 + if (dev->ctrl.state == NVME_CTRL_RESETTING) 1371 1371 return false; 1372 1372 1373 1373 /* We shouldn't reset unless the controller is on fatal error state ··· 1805 1805 if (pci_is_enabled(pdev)) { 1806 1806 u32 csts = readl(dev->bar + NVME_REG_CSTS); 1807 1807 1808 - if (dev->ctrl.state == NVME_CTRL_LIVE) 1808 + if (dev->ctrl.state == NVME_CTRL_LIVE || 1809 + dev->ctrl.state == NVME_CTRL_RESETTING) 1809 1810 nvme_start_freeze(&dev->ctrl); 1810 1811 dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) || 1811 1812 pdev->error_state != pci_channel_io_normal); ··· 1904 1903 bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); 1905 1904 int result = -ENODEV; 1906 1905 1907 - if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING)) 1906 + if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) 1908 1907 goto out; 1909 1908 1910 1909 /* ··· 1913 1912 */ 1914 1913 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) 1915 1914 nvme_dev_disable(dev, false); 1916 - 1917 - if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) 1918 - goto out; 1919 1915 1920 1916 result = nvme_pci_enable(dev); 1921 1917 if (result) ··· 2007 2009 { 2008 2010 if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q)) 2009 2011 return -ENODEV; 2010 - if (work_busy(&dev->reset_work)) 2011 - return -ENODEV; 2012 + if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) 2013 + return -EBUSY; 2012 2014 if (!queue_work(nvme_workq, &dev->reset_work)) 2013 2015 return -EBUSY; 2014 2016 return 0; ··· 2134 2136 if (result) 2135 2137 goto release_pools; 2136 2138 2139 + nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING); 2137 2140 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 2138 2141 2139 2142 queue_work(nvme_workq, &dev->reset_work); ··· 2178 2179 2179 2180 nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 2180 2181 2182 + cancel_work_sync(&dev->reset_work); 2181 2183 pci_set_drvdata(pdev, NULL); 2182 2184 2183 2185 if (!pci_device_is_present(pdev)) {
+29 -15
drivers/nvme/host/rdma.c
··· 753 753 if (ret) 754 754 goto requeue; 755 755 756 - blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true); 757 - 758 756 ret = nvmf_connect_admin_queue(&ctrl->ctrl); 759 757 if (ret) 760 - goto stop_admin_q; 758 + goto requeue; 761 759 762 760 set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); 763 761 764 762 ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); 765 763 if (ret) 766 - goto stop_admin_q; 764 + goto requeue; 767 765 768 766 nvme_start_keep_alive(&ctrl->ctrl); 769 767 770 768 if (ctrl->queue_count > 1) { 771 769 ret = nvme_rdma_init_io_queues(ctrl); 772 770 if (ret) 773 - goto stop_admin_q; 771 + goto requeue; 774 772 775 773 ret = nvme_rdma_connect_io_queues(ctrl); 776 774 if (ret) 777 - goto stop_admin_q; 775 + goto requeue; 778 776 } 779 777 780 778 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); ··· 780 782 ctrl->ctrl.opts->nr_reconnects = 0; 781 783 782 784 if (ctrl->queue_count > 1) { 783 - nvme_start_queues(&ctrl->ctrl); 784 785 nvme_queue_scan(&ctrl->ctrl); 785 786 nvme_queue_async_events(&ctrl->ctrl); 786 787 } ··· 788 791 789 792 return; 790 793 791 - stop_admin_q: 792 - blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); 793 794 requeue: 794 795 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", 795 796 ctrl->ctrl.opts->nr_reconnects); ··· 817 822 nvme_cancel_request, &ctrl->ctrl); 818 823 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, 819 824 nvme_cancel_request, &ctrl->ctrl); 825 + 826 + /* 827 + * queues are not a live anymore, so restart the queues to fail fast 828 + * new IO 829 + */ 830 + blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true); 831 + nvme_start_queues(&ctrl->ctrl); 820 832 821 833 nvme_rdma_reconnect_or_remove(ctrl); 822 834 } ··· 1435 1433 /* 1436 1434 * We cannot accept any other command until the Connect command has completed. 1437 1435 */ 1438 - static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, 1436 + static inline int nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, 1439 1437 struct request *rq) 1440 1438 { 1441 1439 if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { ··· 1443 1441 1444 1442 if (!blk_rq_is_passthrough(rq) || 1445 1443 cmd->common.opcode != nvme_fabrics_command || 1446 - cmd->fabrics.fctype != nvme_fabrics_type_connect) 1447 - return false; 1444 + cmd->fabrics.fctype != nvme_fabrics_type_connect) { 1445 + /* 1446 + * reconnecting state means transport disruption, which 1447 + * can take a long time and even might fail permanently, 1448 + * so we can't let incoming I/O be requeued forever. 1449 + * fail it fast to allow upper layers a chance to 1450 + * failover. 1451 + */ 1452 + if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING) 1453 + return -EIO; 1454 + else 1455 + return -EAGAIN; 1456 + } 1448 1457 } 1449 1458 1450 - return true; 1459 + return 0; 1451 1460 } 1452 1461 1453 1462 static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, ··· 1476 1463 1477 1464 WARN_ON_ONCE(rq->tag < 0); 1478 1465 1479 - if (!nvme_rdma_queue_is_ready(queue, rq)) 1480 - return BLK_MQ_RQ_QUEUE_BUSY; 1466 + ret = nvme_rdma_queue_is_ready(queue, rq); 1467 + if (unlikely(ret)) 1468 + goto err; 1481 1469 1482 1470 dev = queue->device->dev; 1483 1471 ib_dma_sync_single_for_cpu(dev, sqe->dma,
+2 -2
drivers/of/device.c
··· 144 144 coherent ? " " : " not "); 145 145 146 146 iommu = of_iommu_configure(dev, np); 147 - if (IS_ERR(iommu)) 148 - return PTR_ERR(iommu); 147 + if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER) 148 + return -EPROBE_DEFER; 149 149 150 150 dev_dbg(dev, "device is%sbehind an iommu\n", 151 151 iommu ? " " : " not ");
+6 -6
drivers/pci/access.c
··· 896 896 { 897 897 if (pci_dev_is_disconnected(dev)) { 898 898 *val = ~0; 899 - return -ENODEV; 899 + return PCIBIOS_DEVICE_NOT_FOUND; 900 900 } 901 901 return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); 902 902 } ··· 906 906 { 907 907 if (pci_dev_is_disconnected(dev)) { 908 908 *val = ~0; 909 - return -ENODEV; 909 + return PCIBIOS_DEVICE_NOT_FOUND; 910 910 } 911 911 return pci_bus_read_config_word(dev->bus, dev->devfn, where, val); 912 912 } ··· 917 917 { 918 918 if (pci_dev_is_disconnected(dev)) { 919 919 *val = ~0; 920 - return -ENODEV; 920 + return PCIBIOS_DEVICE_NOT_FOUND; 921 921 } 922 922 return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); 923 923 } ··· 926 926 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val) 927 927 { 928 928 if (pci_dev_is_disconnected(dev)) 929 - return -ENODEV; 929 + return PCIBIOS_DEVICE_NOT_FOUND; 930 930 return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val); 931 931 } 932 932 EXPORT_SYMBOL(pci_write_config_byte); ··· 934 934 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val) 935 935 { 936 936 if (pci_dev_is_disconnected(dev)) 937 - return -ENODEV; 937 + return PCIBIOS_DEVICE_NOT_FOUND; 938 938 return pci_bus_write_config_word(dev->bus, dev->devfn, where, val); 939 939 } 940 940 EXPORT_SYMBOL(pci_write_config_word); ··· 943 943 u32 val) 944 944 { 945 945 if (pci_dev_is_disconnected(dev)) 946 - return -ENODEV; 946 + return PCIBIOS_DEVICE_NOT_FOUND; 947 947 return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); 948 948 } 949 949 EXPORT_SYMBOL(pci_write_config_dword);
+1
drivers/pci/endpoint/functions/Kconfig
··· 5 5 config PCI_EPF_TEST 6 6 tristate "PCI Endpoint Test driver" 7 7 depends on PCI_ENDPOINT 8 + select CRC32 8 9 help 9 10 Enable this configuration option to enable the test driver 10 11 for PCI Endpoint.
+11
drivers/perf/arm_pmu_acpi.c
··· 29 29 return -EINVAL; 30 30 31 31 gsi = gicc->performance_interrupt; 32 + 33 + /* 34 + * Per the ACPI spec, the MADT cannot describe a PMU that doesn't 35 + * have an interrupt. QEMU advertises this by using a GSI of zero, 36 + * which is not known to be valid on any hardware despite being 37 + * valid per the spec. Take the pragmatic approach and reject a 38 + * GSI of zero for now. 39 + */ 40 + if (!gsi) 41 + return 0; 42 + 32 43 if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE) 33 44 trigger = ACPI_EDGE_SENSITIVE; 34 45 else
+7 -7
drivers/phy/phy-qcom-qmp.c
··· 844 844 int num = qmp->cfg->num_vregs; 845 845 int i; 846 846 847 - qmp->vregs = devm_kcalloc(dev, num, sizeof(qmp->vregs), GFP_KERNEL); 847 + qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL); 848 848 if (!qmp->vregs) 849 849 return -ENOMEM; 850 850 ··· 983 983 * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2. 984 984 */ 985 985 qphy->tx = of_iomap(np, 0); 986 - if (IS_ERR(qphy->tx)) 987 - return PTR_ERR(qphy->tx); 986 + if (!qphy->tx) 987 + return -ENOMEM; 988 988 989 989 qphy->rx = of_iomap(np, 1); 990 - if (IS_ERR(qphy->rx)) 991 - return PTR_ERR(qphy->rx); 990 + if (!qphy->rx) 991 + return -ENOMEM; 992 992 993 993 qphy->pcs = of_iomap(np, 2); 994 - if (IS_ERR(qphy->pcs)) 995 - return PTR_ERR(qphy->pcs); 994 + if (!qphy->pcs) 995 + return -ENOMEM; 996 996 997 997 /* 998 998 * Get PHY's Pipe clock, if any. USB3 and PCIe are PIPE3
+3 -17
drivers/pinctrl/core.c
··· 680 680 * pinctrl_generic_free_groups() - removes all pin groups 681 681 * @pctldev: pin controller device 682 682 * 683 - * Note that the caller must take care of locking. 683 + * Note that the caller must take care of locking. The pinctrl groups 684 + * are allocated with devm_kzalloc() so no need to free them here. 684 685 */ 685 686 static void pinctrl_generic_free_groups(struct pinctrl_dev *pctldev) 686 687 { 687 688 struct radix_tree_iter iter; 688 - struct group_desc *group; 689 - unsigned long *indices; 690 689 void **slot; 691 - int i = 0; 692 - 693 - indices = devm_kzalloc(pctldev->dev, sizeof(*indices) * 694 - pctldev->num_groups, GFP_KERNEL); 695 - if (!indices) 696 - return; 697 690 698 691 radix_tree_for_each_slot(slot, &pctldev->pin_group_tree, &iter, 0) 699 - indices[i++] = iter.index; 700 - 701 - for (i = 0; i < pctldev->num_groups; i++) { 702 - group = radix_tree_lookup(&pctldev->pin_group_tree, 703 - indices[i]); 704 - radix_tree_delete(&pctldev->pin_group_tree, indices[i]); 705 - devm_kfree(pctldev->dev, group); 706 - } 692 + radix_tree_delete(&pctldev->pin_group_tree, iter.index); 707 693 708 694 pctldev->num_groups = 0; 709 695 }
+12 -4
drivers/pinctrl/freescale/pinctrl-mxs.c
··· 194 194 return 0; 195 195 } 196 196 197 + static void mxs_pinctrl_rmwl(u32 value, u32 mask, u8 shift, void __iomem *reg) 198 + { 199 + u32 tmp; 200 + 201 + tmp = readl(reg); 202 + tmp &= ~(mask << shift); 203 + tmp |= value << shift; 204 + writel(tmp, reg); 205 + } 206 + 197 207 static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector, 198 208 unsigned group) 199 209 { ··· 221 211 reg += bank * 0x20 + pin / 16 * 0x10; 222 212 shift = pin % 16 * 2; 223 213 224 - writel(0x3 << shift, reg + CLR); 225 - writel(g->muxsel[i] << shift, reg + SET); 214 + mxs_pinctrl_rmwl(g->muxsel[i], 0x3, shift, reg); 226 215 } 227 216 228 217 return 0; ··· 288 279 /* mA */ 289 280 if (config & MA_PRESENT) { 290 281 shift = pin % 8 * 4; 291 - writel(0x3 << shift, reg + CLR); 292 - writel(ma << shift, reg + SET); 282 + mxs_pinctrl_rmwl(ma, 0x3, shift, reg); 293 283 } 294 284 295 285 /* vol */
+19 -5
drivers/pinctrl/intel/pinctrl-cherryview.c
··· 1539 1539 * is not listed below. 1540 1540 */ 1541 1541 static const struct dmi_system_id chv_no_valid_mask[] = { 1542 + /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */ 1542 1543 { 1543 - /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */ 1544 - .ident = "Acer Chromebook (CYAN)", 1544 + .ident = "Intel_Strago based Chromebooks (All models)", 1545 1545 .matches = { 1546 1546 DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 1547 - DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"), 1548 - DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"), 1547 + DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"), 1549 1548 }, 1550 - } 1549 + }, 1550 + { 1551 + .ident = "Acer Chromebook R11 (Cyan)", 1552 + .matches = { 1553 + DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 1554 + DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"), 1555 + }, 1556 + }, 1557 + { 1558 + .ident = "Samsung Chromebook 3 (Celes)", 1559 + .matches = { 1560 + DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), 1561 + DMI_MATCH(DMI_PRODUCT_NAME, "Celes"), 1562 + }, 1563 + }, 1564 + {} 1551 1565 }; 1552 1566 1553 1567 static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
-3
drivers/pinctrl/pinconf-generic.c
··· 35 35 PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 36 36 "input bias pull to pin specific state", NULL, false), 37 37 PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false), 38 - PCONFDUMP(PIN_CONFIG_BIDIRECTIONAL, "bi-directional pin operations", NULL, false), 39 38 PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL, false), 40 39 PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL, false), 41 40 PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false), ··· 160 161 { "bias-pull-up", PIN_CONFIG_BIAS_PULL_UP, 1 }, 161 162 { "bias-pull-pin-default", PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 1 }, 162 163 { "bias-pull-down", PIN_CONFIG_BIAS_PULL_DOWN, 1 }, 163 - { "bi-directional", PIN_CONFIG_BIDIRECTIONAL, 1 }, 164 164 { "drive-open-drain", PIN_CONFIG_DRIVE_OPEN_DRAIN, 0 }, 165 165 { "drive-open-source", PIN_CONFIG_DRIVE_OPEN_SOURCE, 0 }, 166 166 { "drive-push-pull", PIN_CONFIG_DRIVE_PUSH_PULL, 0 }, ··· 172 174 { "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 }, 173 175 { "low-power-disable", PIN_CONFIG_LOW_POWER_MODE, 0 }, 174 176 { "low-power-enable", PIN_CONFIG_LOW_POWER_MODE, 1 }, 175 - { "output-enable", PIN_CONFIG_OUTPUT, 1, }, 176 177 { "output-high", PIN_CONFIG_OUTPUT, 1, }, 177 178 { "output-low", PIN_CONFIG_OUTPUT, 0, }, 178 179 { "power-source", PIN_CONFIG_POWER_SOURCE, 0 },
+41 -50
drivers/pinctrl/pinctrl-amd.c
··· 495 495 .flags = IRQCHIP_SKIP_SET_WAKE, 496 496 }; 497 497 498 - static void amd_gpio_irq_handler(struct irq_desc *desc) 498 + #define PIN_IRQ_PENDING (BIT(INTERRUPT_STS_OFF) | BIT(WAKE_STS_OFF)) 499 + 500 + static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id) 499 501 { 500 - u32 i; 501 - u32 off; 502 - u32 reg; 503 - u32 pin_reg; 504 - u64 reg64; 505 - int handled = 0; 506 - unsigned int irq; 502 + struct amd_gpio *gpio_dev = dev_id; 503 + struct gpio_chip *gc = &gpio_dev->gc; 504 + irqreturn_t ret = IRQ_NONE; 505 + unsigned int i, irqnr; 507 506 unsigned long flags; 508 - struct irq_chip *chip = irq_desc_get_chip(desc); 509 - struct gpio_chip *gc = irq_desc_get_handler_data(desc); 510 - struct amd_gpio *gpio_dev = gpiochip_get_data(gc); 507 + u32 *regs, regval; 508 + u64 status, mask; 511 509 512 - chained_irq_enter(chip, desc); 513 - /*enable GPIO interrupt again*/ 510 + /* Read the wake status */ 514 511 raw_spin_lock_irqsave(&gpio_dev->lock, flags); 515 - reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG1); 516 - reg64 = reg; 517 - reg64 = reg64 << 32; 518 - 519 - reg = readl(gpio_dev->base + WAKE_INT_STATUS_REG0); 520 - reg64 |= reg; 512 + status = readl(gpio_dev->base + WAKE_INT_STATUS_REG1); 513 + status <<= 32; 514 + status |= readl(gpio_dev->base + WAKE_INT_STATUS_REG0); 521 515 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); 522 516 523 - /* 524 - * first 46 bits indicates interrupt status. 525 - * one bit represents four interrupt sources. 526 - */ 527 - for (off = 0; off < 46 ; off++) { 528 - if (reg64 & BIT(off)) { 529 - for (i = 0; i < 4; i++) { 530 - pin_reg = readl(gpio_dev->base + 531 - (off * 4 + i) * 4); 532 - if ((pin_reg & BIT(INTERRUPT_STS_OFF)) || 533 - (pin_reg & BIT(WAKE_STS_OFF))) { 534 - irq = irq_find_mapping(gc->irqdomain, 535 - off * 4 + i); 536 - generic_handle_irq(irq); 537 - writel(pin_reg, 538 - gpio_dev->base 539 - + (off * 4 + i) * 4); 540 - handled++; 541 - } 542 - } 517 + /* Bit 0-45 contain the relevant status bits */ 518 + status &= (1ULL << 46) - 1; 519 + regs = gpio_dev->base; 520 + for (mask = 1, irqnr = 0; status; mask <<= 1, regs += 4, irqnr += 4) { 521 + if (!(status & mask)) 522 + continue; 523 + status &= ~mask; 524 + 525 + /* Each status bit covers four pins */ 526 + for (i = 0; i < 4; i++) { 527 + regval = readl(regs + i); 528 + if (!(regval & PIN_IRQ_PENDING)) 529 + continue; 530 + irq = irq_find_mapping(gc->irqdomain, irqnr + i); 531 + generic_handle_irq(irq); 532 + /* Clear interrupt */ 533 + writel(regval, regs + i); 534 + ret = IRQ_HANDLED; 543 535 } 544 536 } 545 537 546 - if (handled == 0) 547 - handle_bad_irq(desc); 548 - 538 + /* Signal EOI to the GPIO unit */ 549 539 raw_spin_lock_irqsave(&gpio_dev->lock, flags); 550 - reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG); 551 - reg |= EOI_MASK; 552 - writel(reg, gpio_dev->base + WAKE_INT_MASTER_REG); 540 + regval = readl(gpio_dev->base + WAKE_INT_MASTER_REG); 541 + regval |= EOI_MASK; 542 + writel(regval, gpio_dev->base + WAKE_INT_MASTER_REG); 553 543 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); 554 544 555 - chained_irq_exit(chip, desc); 545 + return ret; 556 546 } 557 547 558 548 static int amd_get_groups_count(struct pinctrl_dev *pctldev) ··· 811 821 goto out2; 812 822 } 813 823 814 - gpiochip_set_chained_irqchip(&gpio_dev->gc, 815 - &amd_gpio_irqchip, 816 - irq_base, 817 - amd_gpio_irq_handler); 824 + ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler, 0, 825 + KBUILD_MODNAME, gpio_dev); 826 + if (ret) 827 + goto out2; 828 + 818 829 platform_set_drvdata(pdev, gpio_dev); 819 830 820 831 dev_dbg(&pdev->dev, "amd gpio driver loaded\n");
+4 -40
drivers/pinctrl/pinctrl-rockchip.c
··· 143 143 * @gpio_chip: gpiolib chip 144 144 * @grange: gpio range 145 145 * @slock: spinlock for the gpio bank 146 - * @irq_lock: bus lock for irq chip 147 - * @new_irqs: newly configured irqs which must be muxed as GPIOs in 148 - * irq_bus_sync_unlock() 149 146 */ 150 147 struct rockchip_pin_bank { 151 148 void __iomem *reg_base; ··· 165 168 struct pinctrl_gpio_range grange; 166 169 raw_spinlock_t slock; 167 170 u32 toggle_edge_mode; 168 - struct mutex irq_lock; 169 - u32 new_irqs; 170 171 }; 171 172 172 173 #define PIN_BANK(id, pins, label) \ ··· 2129 2134 int ret; 2130 2135 2131 2136 /* make sure the pin is configured as gpio input */ 2132 - ret = rockchip_verify_mux(bank, d->hwirq, RK_FUNC_GPIO); 2137 + ret = rockchip_set_mux(bank, d->hwirq, RK_FUNC_GPIO); 2133 2138 if (ret < 0) 2134 2139 return ret; 2135 2140 2136 - bank->new_irqs |= mask; 2137 - 2141 + clk_enable(bank->clk); 2138 2142 raw_spin_lock_irqsave(&bank->slock, flags); 2139 2143 2140 2144 data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR); ··· 2191 2197 default: 2192 2198 irq_gc_unlock(gc); 2193 2199 raw_spin_unlock_irqrestore(&bank->slock, flags); 2200 + clk_disable(bank->clk); 2194 2201 return -EINVAL; 2195 2202 } 2196 2203 ··· 2200 2205 2201 2206 irq_gc_unlock(gc); 2202 2207 raw_spin_unlock_irqrestore(&bank->slock, flags); 2208 + clk_disable(bank->clk); 2203 2209 2204 2210 return 0; 2205 2211 } ··· 2241 2245 struct rockchip_pin_bank *bank = gc->private; 2242 2246 2243 2247 irq_gc_mask_set_bit(d); 2244 - clk_disable(bank->clk); 2245 - } 2246 - 2247 - static void rockchip_irq_bus_lock(struct irq_data *d) 2248 - { 2249 - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 2250 - struct rockchip_pin_bank *bank = gc->private; 2251 - 2252 - clk_enable(bank->clk); 2253 - mutex_lock(&bank->irq_lock); 2254 - } 2255 - 2256 - static void rockchip_irq_bus_sync_unlock(struct irq_data *d) 2257 - { 2258 - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 2259 - struct rockchip_pin_bank *bank = gc->private; 2260 - 2261 - while (bank->new_irqs) { 2262 - unsigned int irq = __ffs(bank->new_irqs); 2263 - int ret; 2264 - 2265 - ret = rockchip_set_mux(bank, irq, RK_FUNC_GPIO); 2266 - WARN_ON(ret < 0); 2267 - 2268 - bank->new_irqs &= ~BIT(irq); 2269 - } 2270 - 2271 - mutex_unlock(&bank->irq_lock); 2272 2248 clk_disable(bank->clk); 2273 2249 } 2274 2250 ··· 2310 2342 gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend; 2311 2343 gc->chip_types[0].chip.irq_resume = rockchip_irq_resume; 2312 2344 gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type; 2313 - gc->chip_types[0].chip.irq_bus_lock = rockchip_irq_bus_lock; 2314 - gc->chip_types[0].chip.irq_bus_sync_unlock = 2315 - rockchip_irq_bus_sync_unlock; 2316 2345 gc->wake_enabled = IRQ_MSK(bank->nr_pins); 2317 2346 2318 2347 irq_set_chained_handler_and_data(bank->irq, ··· 2483 2518 int bank_pins = 0; 2484 2519 2485 2520 raw_spin_lock_init(&bank->slock); 2486 - mutex_init(&bank->irq_lock); 2487 2521 bank->drvdata = d; 2488 2522 bank->pin_base = ctrl->nr_pins; 2489 2523 ctrl->nr_pins += bank->nr_pins;
+4 -17
drivers/pinctrl/pinmux.c
··· 826 826 * pinmux_generic_free_functions() - removes all functions 827 827 * @pctldev: pin controller device 828 828 * 829 - * Note that the caller must take care of locking. 829 + * Note that the caller must take care of locking. The pinctrl 830 + * functions are allocated with devm_kzalloc() so no need to free 831 + * them here. 830 832 */ 831 833 void pinmux_generic_free_functions(struct pinctrl_dev *pctldev) 832 834 { 833 835 struct radix_tree_iter iter; 834 - struct function_desc *function; 835 - unsigned long *indices; 836 836 void **slot; 837 - int i = 0; 838 - 839 - indices = devm_kzalloc(pctldev->dev, sizeof(*indices) * 840 - pctldev->num_functions, GFP_KERNEL); 841 - if (!indices) 842 - return; 843 837 844 838 radix_tree_for_each_slot(slot, &pctldev->pin_function_tree, &iter, 0) 845 - indices[i++] = iter.index; 846 - 847 - for (i = 0; i < pctldev->num_functions; i++) { 848 - function = radix_tree_lookup(&pctldev->pin_function_tree, 849 - indices[i]); 850 - radix_tree_delete(&pctldev->pin_function_tree, indices[i]); 851 - devm_kfree(pctldev->dev, function); 852 - } 839 + radix_tree_delete(&pctldev->pin_function_tree, iter.index); 853 840 854 841 pctldev->num_functions = 0; 855 842 }
+1 -1
drivers/pinctrl/stm32/pinctrl-stm32.c
··· 798 798 break; 799 799 case PIN_CONFIG_OUTPUT: 800 800 __stm32_gpio_set(bank, offset, arg); 801 - ret = stm32_pmx_gpio_set_direction(pctldev, NULL, pin, false); 801 + ret = stm32_pmx_gpio_set_direction(pctldev, range, pin, false); 802 802 break; 803 803 default: 804 804 ret = -EINVAL;
+1 -1
drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
··· 394 394 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18), 395 395 SUNXI_FUNCTION(0x0, "gpio_in"), 396 396 SUNXI_FUNCTION(0x1, "gpio_out"), 397 - SUNXI_FUNCTION(0x3, "owa")), /* DOUT */ 397 + SUNXI_FUNCTION(0x3, "spdif")), /* DOUT */ 398 398 SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19), 399 399 SUNXI_FUNCTION(0x0, "gpio_in"), 400 400 SUNXI_FUNCTION(0x1, "gpio_out")),
+1 -1
drivers/platform/goldfish/goldfish_pipe.c
··· 704 704 /* Reallocate the array */ 705 705 u32 new_capacity = 2 * dev->pipes_capacity; 706 706 struct goldfish_pipe **pipes = 707 - kcalloc(new_capacity, sizeof(*pipes), GFP_KERNEL); 707 + kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC); 708 708 if (!pipes) 709 709 return -ENOMEM; 710 710 memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity);
+7 -9
drivers/platform/x86/intel_telemetry_debugfs.c
··· 97 97 } \ 98 98 } 99 99 100 - #ifdef CONFIG_PM_SLEEP 101 100 static u8 suspend_prep_ok; 102 101 static u32 suspend_shlw_ctr_temp, suspend_deep_ctr_temp; 103 102 static u64 suspend_shlw_res_temp, suspend_deep_res_temp; 104 - #endif 105 103 106 104 struct telemetry_susp_stats { 107 105 u32 shlw_swake_ctr; ··· 805 807 .release = single_release, 806 808 }; 807 809 808 - #ifdef CONFIG_PM_SLEEP 809 810 static int pm_suspend_prep_cb(void) 810 811 { 811 812 struct telemetry_evtlog evtlog[TELEM_MAX_OS_ALLOCATED_EVENTS]; ··· 934 937 static struct notifier_block pm_notifier = { 935 938 .notifier_call = pm_notification, 936 939 }; 937 - #endif /* CONFIG_PM_SLEEP */ 938 940 939 941 static int __init telemetry_debugfs_init(void) 940 942 { ··· 956 960 if (err < 0) 957 961 return -EINVAL; 958 962 959 - 960 - #ifdef CONFIG_PM_SLEEP 961 963 register_pm_notifier(&pm_notifier); 962 - #endif /* CONFIG_PM_SLEEP */ 963 964 964 965 debugfs_conf->telemetry_dbg_dir = debugfs_create_dir("telemetry", NULL); 965 - if (!debugfs_conf->telemetry_dbg_dir) 966 - return -ENOMEM; 966 + if (!debugfs_conf->telemetry_dbg_dir) { 967 + err = -ENOMEM; 968 + goto out_pm; 969 + } 967 970 968 971 f = debugfs_create_file("pss_info", S_IFREG | S_IRUGO, 969 972 debugfs_conf->telemetry_dbg_dir, NULL, ··· 1009 1014 out: 1010 1015 debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir); 1011 1016 debugfs_conf->telemetry_dbg_dir = NULL; 1017 + out_pm: 1018 + unregister_pm_notifier(&pm_notifier); 1012 1019 1013 1020 return err; 1014 1021 } ··· 1019 1022 { 1020 1023 debugfs_remove_recursive(debugfs_conf->telemetry_dbg_dir); 1021 1024 debugfs_conf->telemetry_dbg_dir = NULL; 1025 + unregister_pm_notifier(&pm_notifier); 1022 1026 } 1023 1027 1024 1028 late_initcall(telemetry_debugfs_init);
+2
drivers/reset/hisilicon/hi6220_reset.c
··· 155 155 } 156 156 157 157 postcore_initcall(hi6220_reset_init); 158 + 159 + MODULE_LICENSE("GPL v2");
+6 -6
drivers/s390/cio/vfio_ccw_ops.c
··· 70 70 { 71 71 return sprintf(buf, "I/O subchannel (Non-QDIO)\n"); 72 72 } 73 - MDEV_TYPE_ATTR_RO(name); 73 + static MDEV_TYPE_ATTR_RO(name); 74 74 75 75 static ssize_t device_api_show(struct kobject *kobj, struct device *dev, 76 76 char *buf) 77 77 { 78 78 return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING); 79 79 } 80 - MDEV_TYPE_ATTR_RO(device_api); 80 + static MDEV_TYPE_ATTR_RO(device_api); 81 81 82 82 static ssize_t available_instances_show(struct kobject *kobj, 83 83 struct device *dev, char *buf) ··· 86 86 87 87 return sprintf(buf, "%d\n", atomic_read(&private->avail)); 88 88 } 89 - MDEV_TYPE_ATTR_RO(available_instances); 89 + static MDEV_TYPE_ATTR_RO(available_instances); 90 90 91 91 static struct attribute *mdev_types_attrs[] = { 92 92 &mdev_type_attr_name.attr, ··· 100 100 .attrs = mdev_types_attrs, 101 101 }; 102 102 103 - struct attribute_group *mdev_type_groups[] = { 103 + static struct attribute_group *mdev_type_groups[] = { 104 104 &mdev_type_group, 105 105 NULL, 106 106 }; ··· 152 152 &events, &private->nb); 153 153 } 154 154 155 - void vfio_ccw_mdev_release(struct mdev_device *mdev) 155 + static void vfio_ccw_mdev_release(struct mdev_device *mdev) 156 156 { 157 157 struct vfio_ccw_private *private = 158 158 dev_get_drvdata(mdev_parent_dev(mdev)); ··· 233 233 } 234 234 } 235 235 236 - int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info) 236 + static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info) 237 237 { 238 238 if (info->index != VFIO_CCW_IO_IRQ_INDEX) 239 239 return -EINVAL;
+24 -14
drivers/s390/crypto/ap_bus.c
··· 668 668 struct ap_driver *ap_drv = to_ap_drv(dev->driver); 669 669 int rc; 670 670 671 + /* Add queue/card to list of active queues/cards */ 672 + spin_lock_bh(&ap_list_lock); 673 + if (is_card_dev(dev)) 674 + list_add(&to_ap_card(dev)->list, &ap_card_list); 675 + else 676 + list_add(&to_ap_queue(dev)->list, 677 + &to_ap_queue(dev)->card->queues); 678 + spin_unlock_bh(&ap_list_lock); 679 + 671 680 ap_dev->drv = ap_drv; 672 681 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; 673 - if (rc) 682 + 683 + if (rc) { 684 + spin_lock_bh(&ap_list_lock); 685 + if (is_card_dev(dev)) 686 + list_del_init(&to_ap_card(dev)->list); 687 + else 688 + list_del_init(&to_ap_queue(dev)->list); 689 + spin_unlock_bh(&ap_list_lock); 674 690 ap_dev->drv = NULL; 691 + } 692 + 675 693 return rc; 676 694 } 677 695 ··· 698 680 struct ap_device *ap_dev = to_ap_dev(dev); 699 681 struct ap_driver *ap_drv = ap_dev->drv; 700 682 683 + if (ap_drv->remove) 684 + ap_drv->remove(ap_dev); 685 + 686 + /* Remove queue/card from list of active queues/cards */ 701 687 spin_lock_bh(&ap_list_lock); 702 688 if (is_card_dev(dev)) 703 689 list_del_init(&to_ap_card(dev)->list); 704 690 else 705 691 list_del_init(&to_ap_queue(dev)->list); 706 692 spin_unlock_bh(&ap_list_lock); 707 - if (ap_drv->remove) 708 - ap_drv->remove(ap_dev); 693 + 709 694 return 0; 710 695 } 711 696 ··· 1077 1056 } 1078 1057 /* get it and thus adjust reference counter */ 1079 1058 get_device(&ac->ap_dev.device); 1080 - /* Add card device to card list */ 1081 - spin_lock_bh(&ap_list_lock); 1082 - list_add(&ac->list, &ap_card_list); 1083 - spin_unlock_bh(&ap_list_lock); 1084 1059 } 1085 1060 /* now create the new queue device */ 1086 1061 aq = ap_queue_create(qid, type); ··· 1087 1070 aq->ap_dev.device.parent = &ac->ap_dev.device; 1088 1071 dev_set_name(&aq->ap_dev.device, 1089 1072 "%02x.%04x", id, dom); 1090 - /* Add queue device to card queue list */ 1091 - spin_lock_bh(&ap_list_lock); 1092 - list_add(&aq->list, &ac->queues); 1093 - spin_unlock_bh(&ap_list_lock); 1094 1073 /* Start with a device reset */ 1095 1074 spin_lock_bh(&aq->lock); 1096 1075 ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); ··· 1094 1081 /* Register device */ 1095 1082 rc = device_register(&aq->ap_dev.device); 1096 1083 if (rc) { 1097 - spin_lock_bh(&ap_list_lock); 1098 - list_del_init(&aq->list); 1099 - spin_unlock_bh(&ap_list_lock); 1100 1084 put_device(&aq->ap_dev.device); 1101 1085 continue; 1102 1086 }
+8 -1
drivers/s390/crypto/ap_card.c
··· 160 160 161 161 static void ap_card_device_release(struct device *dev) 162 162 { 163 - kfree(to_ap_card(dev)); 163 + struct ap_card *ac = to_ap_card(dev); 164 + 165 + if (!list_empty(&ac->list)) { 166 + spin_lock_bh(&ap_list_lock); 167 + list_del_init(&ac->list); 168 + spin_unlock_bh(&ap_list_lock); 169 + } 170 + kfree(ac); 164 171 } 165 172 166 173 struct ap_card *ap_card_create(int id, int queue_depth, int device_type,
+8 -1
drivers/s390/crypto/ap_queue.c
··· 584 584 585 585 static void ap_queue_device_release(struct device *dev) 586 586 { 587 - kfree(to_ap_queue(dev)); 587 + struct ap_queue *aq = to_ap_queue(dev); 588 + 589 + if (!list_empty(&aq->list)) { 590 + spin_lock_bh(&ap_list_lock); 591 + list_del_init(&aq->list); 592 + spin_unlock_bh(&ap_list_lock); 593 + } 594 + kfree(aq); 588 595 } 589 596 590 597 struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
+2 -2
drivers/s390/net/netiucv.c
··· 1954 1954 privptr->conn = NULL; privptr->fsm = NULL; 1955 1955 /* privptr gets freed by free_netdev() */ 1956 1956 } 1957 - free_netdev(dev); 1958 1957 } 1959 1958 1960 1959 /** ··· 1971 1972 dev->mtu = NETIUCV_MTU_DEFAULT; 1972 1973 dev->min_mtu = 576; 1973 1974 dev->max_mtu = NETIUCV_MTU_MAX; 1974 - dev->destructor = netiucv_free_netdevice; 1975 + dev->needs_free_netdev = true; 1976 + dev->priv_destructor = netiucv_free_netdevice; 1975 1977 dev->hard_header_len = NETIUCV_HDRLEN; 1976 1978 dev->addr_len = 0; 1977 1979 dev->type = ARPHRD_SLIP;
+1
drivers/scsi/bnx2fc/bnx2fc.h
··· 191 191 struct bnx2fc_cmd_mgr *cmd_mgr; 192 192 spinlock_t hba_lock; 193 193 struct mutex hba_mutex; 194 + struct mutex hba_stats_mutex; 194 195 unsigned long adapter_state; 195 196 #define ADAPTER_STATE_UP 0 196 197 #define ADAPTER_STATE_GOING_DOWN 1
+8 -2
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
··· 663 663 if (!fw_stats) 664 664 return NULL; 665 665 666 + mutex_lock(&hba->hba_stats_mutex); 667 + 666 668 bnx2fc_stats = fc_get_host_stats(shost); 667 669 668 670 init_completion(&hba->stat_req_done); 669 671 if (bnx2fc_send_stat_req(hba)) 670 - return bnx2fc_stats; 672 + goto unlock_stats_mutex; 671 673 rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ)); 672 674 if (!rc) { 673 675 BNX2FC_HBA_DBG(lport, "FW stat req timed out\n"); 674 - return bnx2fc_stats; 676 + goto unlock_stats_mutex; 675 677 } 676 678 BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt); 677 679 bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt; ··· 695 693 696 694 memcpy(&hba->prev_stats, hba->stats_buffer, 697 695 sizeof(struct fcoe_statistics_params)); 696 + 697 + unlock_stats_mutex: 698 + mutex_unlock(&hba->hba_stats_mutex); 698 699 return bnx2fc_stats; 699 700 } 700 701 ··· 1345 1340 } 1346 1341 spin_lock_init(&hba->hba_lock); 1347 1342 mutex_init(&hba->hba_mutex); 1343 + mutex_init(&hba->hba_stats_mutex); 1348 1344 1349 1345 hba->cnic = cnic; 1350 1346
-1
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
··· 1595 1595 cxgbi_sock_put(csk); 1596 1596 } 1597 1597 csk->dst = NULL; 1598 - csk->cdev = NULL; 1599 1598 } 1600 1599 1601 1600 static int init_act_open(struct cxgbi_sock *csk)
+40 -14
drivers/scsi/cxgbi/libcxgbi.c
··· 867 867 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", 868 868 csk, (csk)->state, (csk)->flags, (csk)->tid); 869 869 spin_lock_bh(&csk->lock); 870 - dst_confirm(csk->dst); 870 + if (csk->dst) 871 + dst_confirm(csk->dst); 871 872 data_lost = skb_queue_len(&csk->receive_queue); 872 873 __skb_queue_purge(&csk->receive_queue); 873 874 ··· 883 882 } 884 883 885 884 if (close_req) { 886 - if (data_lost) 885 + if (!cxgbi_sock_flag(csk, CTPF_LOGOUT_RSP_RCVD) || 886 + data_lost) 887 887 csk->cdev->csk_send_abort_req(csk); 888 888 else 889 889 csk->cdev->csk_send_close_req(csk); ··· 1188 1186 cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb)); 1189 1187 skb = next; 1190 1188 } 1191 - done: 1189 + 1192 1190 if (likely(skb_queue_len(&csk->write_queue))) 1193 1191 cdev->csk_push_tx_frames(csk, 1); 1192 + done: 1194 1193 spin_unlock_bh(&csk->lock); 1195 1194 return copied; 1196 1195 ··· 1571 1568 } 1572 1569 } 1573 1570 1574 - static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb) 1571 + static int 1572 + skb_read_pdu_bhs(struct cxgbi_sock *csk, struct iscsi_conn *conn, 1573 + struct sk_buff *skb) 1575 1574 { 1576 1575 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 1576 + int err; 1577 1577 1578 1578 log_debug(1 << CXGBI_DBG_PDU_RX, 1579 1579 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n", ··· 1614 1608 } 1615 1609 } 1616 1610 1617 - return read_pdu_skb(conn, skb, 0, 0); 1611 + err = read_pdu_skb(conn, skb, 0, 0); 1612 + if (likely(err >= 0)) { 1613 + struct iscsi_hdr *hdr = (struct iscsi_hdr *)skb->data; 1614 + u8 opcode = hdr->opcode & ISCSI_OPCODE_MASK; 1615 + 1616 + if (unlikely(opcode == ISCSI_OP_LOGOUT_RSP)) 1617 + cxgbi_sock_set_flag(csk, CTPF_LOGOUT_RSP_RCVD); 1618 + } 1619 + 1620 + return err; 1618 1621 } 1619 1622 1620 1623 static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb, ··· 1728 1713 cxgbi_skcb_rx_pdulen(skb)); 1729 1714 1730 1715 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) { 1731 - err = skb_read_pdu_bhs(conn, skb); 1716 + err = skb_read_pdu_bhs(csk, conn, skb); 1732 1717 if (err < 0) { 1733 1718 pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, " 1734 1719 "f 0x%lx, plen %u.\n", ··· 1746 1731 cxgbi_skcb_flags(skb), 1747 1732 cxgbi_skcb_rx_pdulen(skb)); 1748 1733 } else { 1749 - err = skb_read_pdu_bhs(conn, skb); 1734 + err = skb_read_pdu_bhs(csk, conn, skb); 1750 1735 if (err < 0) { 1751 1736 pr_err("bhs, csk 0x%p, skb 0x%p,%u, " 1752 1737 "f 0x%lx, plen %u.\n", ··· 1888 1873 tcp_task->dd_data = tdata; 1889 1874 task->hdr = NULL; 1890 1875 1876 + if (tdata->skb) { 1877 + kfree_skb(tdata->skb); 1878 + tdata->skb = NULL; 1879 + } 1880 + 1891 1881 if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) && 1892 1882 (opcode == ISCSI_OP_SCSI_DATA_OUT || 1893 1883 (opcode == ISCSI_OP_SCSI_CMD && ··· 1910 1890 return -ENOMEM; 1911 1891 } 1912 1892 1893 + skb_get(tdata->skb); 1913 1894 skb_reserve(tdata->skb, cdev->skb_tx_rsvd); 1914 1895 task->hdr = (struct iscsi_hdr *)tdata->skb->data; 1915 1896 task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */ ··· 2056 2035 unsigned int datalen; 2057 2036 int err; 2058 2037 2059 - if (!skb) { 2038 + if (!skb || cxgbi_skcb_test_flag(skb, SKCBF_TX_DONE)) { 2060 2039 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, 2061 - "task 0x%p, skb NULL.\n", task); 2040 + "task 0x%p, skb 0x%p\n", task, skb); 2062 2041 return 0; 2063 2042 } 2064 2043 ··· 2071 2050 } 2072 2051 2073 2052 datalen = skb->data_len; 2074 - tdata->skb = NULL; 2075 2053 2076 2054 /* write ppod first if using ofldq to write ppod */ 2077 2055 if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) { ··· 2098 2078 pdulen += ISCSI_DIGEST_SIZE; 2099 2079 2100 2080 task->conn->txdata_octets += pdulen; 2081 + cxgbi_skcb_set_flag(skb, SKCBF_TX_DONE); 2101 2082 return 0; 2102 2083 } 2103 2084 ··· 2107 2086 "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n", 2108 2087 task, skb, skb->len, skb->data_len, err); 2109 2088 /* reset skb to send when we are called again */ 2110 - tdata->skb = skb; 2111 2089 return err; 2112 2090 } 2113 2091 ··· 2114 2094 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", 2115 2095 task->itt, skb, skb->len, skb->data_len, err); 2116 2096 2117 - kfree_skb(skb); 2097 + __kfree_skb(tdata->skb); 2098 + tdata->skb = NULL; 2118 2099 2119 2100 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); 2120 2101 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); ··· 2134 2113 2135 2114 tcp_task->dd_data = NULL; 2136 2115 /* never reached the xmit task callout */ 2137 - if (tdata->skb) 2138 - __kfree_skb(tdata->skb); 2116 + if (tdata->skb) { 2117 + kfree_skb(tdata->skb); 2118 + tdata->skb = NULL; 2119 + } 2139 2120 2140 2121 task_release_itt(task, task->hdr_itt); 2141 2122 memset(tdata, 0, sizeof(*tdata)); ··· 2737 2714 static int __init libcxgbi_init_module(void) 2738 2715 { 2739 2716 pr_info("%s", version); 2717 + 2718 + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) < 2719 + sizeof(struct cxgbi_skb_cb)); 2740 2720 return 0; 2741 2721 } 2742 2722
+9 -8
drivers/scsi/cxgbi/libcxgbi.h
··· 187 187 CTPF_HAS_ATID, /* reserved atid */ 188 188 CTPF_HAS_TID, /* reserved hw tid */ 189 189 CTPF_OFFLOAD_DOWN, /* offload function off */ 190 + CTPF_LOGOUT_RSP_RCVD, /* received logout response */ 190 191 }; 191 192 192 193 struct cxgbi_skb_rx_cb { ··· 196 195 }; 197 196 198 197 struct cxgbi_skb_tx_cb { 199 - void *l2t; 198 + void *handle; 199 + void *arp_err_handler; 200 200 struct sk_buff *wr_next; 201 201 }; 202 202 ··· 205 203 SKCBF_TX_NEED_HDR, /* packet needs a header */ 206 204 SKCBF_TX_MEM_WRITE, /* memory write */ 207 205 SKCBF_TX_FLAG_COMPL, /* wr completion flag */ 206 + SKCBF_TX_DONE, /* skb tx done */ 208 207 SKCBF_RX_COALESCED, /* received whole pdu */ 209 208 SKCBF_RX_HDR, /* received pdu header */ 210 209 SKCBF_RX_DATA, /* received pdu payload */ ··· 218 215 }; 219 216 220 217 struct cxgbi_skb_cb { 221 - unsigned char ulp_mode; 222 - unsigned long flags; 223 - unsigned int seq; 224 218 union { 225 219 struct cxgbi_skb_rx_cb rx; 226 220 struct cxgbi_skb_tx_cb tx; 227 221 }; 222 + unsigned char ulp_mode; 223 + unsigned long flags; 224 + unsigned int seq; 228 225 }; 229 226 230 227 #define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0])) ··· 377 374 cxgbi_skcb_tx_wr_next(skb) = NULL; 378 375 /* 379 376 * We want to take an extra reference since both us and the driver 380 - * need to free the packet before it's really freed. We know there's 381 - * just one user currently so we use atomic_set rather than skb_get 382 - * to avoid the atomic op. 377 + * need to free the packet before it's really freed. 383 378 */ 384 - atomic_set(&skb->users, 2); 379 + skb_get(skb); 385 380 386 381 if (!csk->wr_pending_head) 387 382 csk->wr_pending_head = skb;
+4 -6
drivers/scsi/device_handler/scsi_dh_rdac.c
··· 265 265 struct list_head *list, 266 266 unsigned char *cdb) 267 267 { 268 - struct scsi_device *sdev = ctlr->ms_sdev; 269 - struct rdac_dh_data *h = sdev->handler_data; 270 268 struct rdac_mode_common *common; 271 269 unsigned data_size; 272 270 struct rdac_queue_data *qdata; 273 271 u8 *lun_table; 274 272 275 - if (h->ctlr->use_ms10) { 273 + if (ctlr->use_ms10) { 276 274 struct rdac_pg_expanded *rdac_pg; 277 275 278 276 data_size = sizeof(struct rdac_pg_expanded); 279 - rdac_pg = &h->ctlr->mode_select.expanded; 277 + rdac_pg = &ctlr->mode_select.expanded; 280 278 memset(rdac_pg, 0, data_size); 281 279 common = &rdac_pg->common; 282 280 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40; ··· 286 288 struct rdac_pg_legacy *rdac_pg; 287 289 288 290 data_size = sizeof(struct rdac_pg_legacy); 289 - rdac_pg = &h->ctlr->mode_select.legacy; 291 + rdac_pg = &ctlr->mode_select.legacy; 290 292 memset(rdac_pg, 0, data_size); 291 293 common = &rdac_pg->common; 292 294 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; ··· 302 304 } 303 305 304 306 /* Prepare the command. */ 305 - if (h->ctlr->use_ms10) { 307 + if (ctlr->use_ms10) { 306 308 cdb[0] = MODE_SELECT_10; 307 309 cdb[7] = data_size >> 8; 308 310 cdb[8] = data_size & 0xff;
+23 -4
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
··· 1170 1170 cmd = list_first_entry_or_null(&vscsi->free_cmd, 1171 1171 struct ibmvscsis_cmd, list); 1172 1172 if (cmd) { 1173 + if (cmd->abort_cmd) 1174 + cmd->abort_cmd = NULL; 1173 1175 cmd->flags &= ~(DELAY_SEND); 1174 1176 list_del(&cmd->list); 1175 1177 cmd->iue = iue; ··· 1776 1774 if (cmd->abort_cmd) { 1777 1775 retry = true; 1778 1776 cmd->abort_cmd->flags &= ~(DELAY_SEND); 1777 + cmd->abort_cmd = NULL; 1779 1778 } 1780 1779 1781 1780 /* ··· 1791 1788 list_del(&cmd->list); 1792 1789 ibmvscsis_free_cmd_resources(vscsi, 1793 1790 cmd); 1791 + /* 1792 + * With a successfully aborted op 1793 + * through LIO we want to increment the 1794 + * the vscsi credit so that when we dont 1795 + * send a rsp to the original scsi abort 1796 + * op (h_send_crq), but the tm rsp to 1797 + * the abort is sent, the credit is 1798 + * correctly sent with the abort tm rsp. 1799 + * We would need 1 for the abort tm rsp 1800 + * and 1 credit for the aborted scsi op. 1801 + * Thus we need to increment here. 1802 + * Also we want to increment the credit 1803 + * here because we want to make sure 1804 + * cmd is actually released first 1805 + * otherwise the client will think it 1806 + * it can send a new cmd, and we could 1807 + * find ourselves short of cmd elements. 1808 + */ 1809 + vscsi->credit += 1; 1794 1810 } else { 1795 1811 iue = cmd->iue; 1796 1812 ··· 2984 2962 2985 2963 rsp->opcode = SRP_RSP; 2986 2964 2987 - if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING) 2988 - rsp->req_lim_delta = cpu_to_be32(vscsi->credit); 2989 - else 2990 - rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit); 2965 + rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit); 2991 2966 rsp->tag = cmd->rsp.tag; 2992 2967 rsp->flags = 0; 2993 2968
+1 -1
drivers/scsi/lpfc/lpfc_crtn.h
··· 127 127 void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *); 128 128 int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *, 129 129 struct serv_parm *, uint32_t, int); 130 - int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *); 130 + void lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *); 131 131 void lpfc_more_plogi(struct lpfc_vport *); 132 132 void lpfc_more_adisc(struct lpfc_vport *); 133 133 void lpfc_end_rscn(struct lpfc_vport *);
+3 -2
drivers/scsi/lpfc/lpfc_ct.c
··· 978 978 ndlp, did, ndlp->nlp_fc4_type, 979 979 FC_TYPE_FCP, FC_TYPE_NVME); 980 980 ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; 981 + 982 + lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 983 + lpfc_issue_els_prli(vport, ndlp, 0); 981 984 } 982 - lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); 983 - lpfc_issue_els_prli(vport, ndlp, 0); 984 985 } else 985 986 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 986 987 "3065 GFT_ID failed x%08x\n", irsp->ulpStatus);
+5 -2
drivers/scsi/lpfc/lpfc_nportdisc.c
··· 206 206 * associated with a LPFC_NODELIST entry. This 207 207 * routine effectively results in a "software abort". 208 208 */ 209 - int 209 + void 210 210 lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 211 211 { 212 212 LIST_HEAD(abort_list); ··· 214 214 struct lpfc_iocbq *iocb, *next_iocb; 215 215 216 216 pring = lpfc_phba_elsring(phba); 217 + 218 + /* In case of error recovery path, we might have a NULL pring here */ 219 + if (!pring) 220 + return; 217 221 218 222 /* Abort outstanding I/O on NPort <nlp_DID> */ 219 223 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, ··· 277 273 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); 278 274 279 275 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); 280 - return 0; 281 276 } 282 277 283 278 static int
+2 -2
drivers/scsi/lpfc/lpfc_nvmet.c
··· 799 799 } 800 800 spin_unlock_irqrestore(&ctxp->ctxlock, flags); 801 801 802 - lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid, 803 - ctxp->state, 0); 802 + lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid, 803 + ctxp->state, aborting); 804 804 805 805 atomic_inc(&lpfc_nvmep->xmt_fcp_release); 806 806
+2 -1
drivers/scsi/qedi/qedi.h
··· 38 38 #define QEDI_MAX_ISCSI_TASK 4096 39 39 #define QEDI_MAX_TASK_NUM 0x0FFF 40 40 #define QEDI_MAX_ISCSI_CONNS_PER_HBA 1024 41 - #define QEDI_ISCSI_MAX_BDS_PER_CMD 256 /* Firmware max BDs is 256 */ 41 + #define QEDI_ISCSI_MAX_BDS_PER_CMD 255 /* Firmware max BDs is 255 */ 42 42 #define MAX_OUSTANDING_TASKS_PER_CON 1024 43 43 44 44 #define QEDI_MAX_BD_LEN 0xffff ··· 63 63 #define QEDI_PAGE_MASK (~((QEDI_PAGE_SIZE) - 1)) 64 64 65 65 #define QEDI_PAGE_SIZE 4096 66 + #define QEDI_HW_DMA_BOUNDARY 0xfff 66 67 #define QEDI_PATH_HANDLE 0xFE0000000UL 67 68 68 69 struct qedi_uio_ctrl {
+2 -1
drivers/scsi/qedi/qedi_fw.c
··· 870 870 QEDI_ERR(&qedi->dbg_ctx, 871 871 "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n", 872 872 protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task); 873 - WARN_ON(1); 874 873 } 875 874 } 876 875 ··· 1493 1494 tmf_hdr = (struct iscsi_tm *)mtask->hdr; 1494 1495 qedi_cmd = (struct qedi_cmd *)mtask->dd_data; 1495 1496 ep = qedi_conn->ep; 1497 + if (!ep) 1498 + return -ENODEV; 1496 1499 1497 1500 tid = qedi_get_task_idx(qedi); 1498 1501 if (tid == -1)
+6 -1
drivers/scsi/qedi/qedi_iscsi.c
··· 59 59 .this_id = -1, 60 60 .sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD, 61 61 .max_sectors = 0xffff, 62 + .dma_boundary = QEDI_HW_DMA_BOUNDARY, 62 63 .cmd_per_lun = 128, 63 64 .use_clustering = ENABLE_CLUSTERING, 64 65 .shost_attrs = qedi_shost_attrs, ··· 1224 1223 1225 1224 iscsi_cid = (u32)path_data->handle; 1226 1225 qedi_ep = qedi->ep_tbl[iscsi_cid]; 1227 - QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, 1226 + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, 1228 1227 "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep); 1228 + if (!qedi_ep) { 1229 + ret = -EINVAL; 1230 + goto set_path_exit; 1231 + } 1229 1232 1230 1233 if (!is_valid_ether_addr(&path_data->mac_addr[0])) { 1231 1234 QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
+14 -15
drivers/scsi/qedi/qedi_main.c
··· 151 151 152 152 static void __qedi_free_uio_rings(struct qedi_uio_dev *udev) 153 153 { 154 + if (udev->uctrl) { 155 + free_page((unsigned long)udev->uctrl); 156 + udev->uctrl = NULL; 157 + } 158 + 154 159 if (udev->ll2_ring) { 155 160 free_page((unsigned long)udev->ll2_ring); 156 161 udev->ll2_ring = NULL; ··· 174 169 __qedi_free_uio_rings(udev); 175 170 176 171 pci_dev_put(udev->pdev); 177 - kfree(udev->uctrl); 178 172 kfree(udev); 179 173 } 180 174 ··· 212 208 if (udev->ll2_ring || udev->ll2_buf) 213 209 return rc; 214 210 211 + /* Memory for control area. */ 212 + udev->uctrl = (void *)get_zeroed_page(GFP_KERNEL); 213 + if (!udev->uctrl) 214 + return -ENOMEM; 215 + 215 216 /* Allocating memory for LL2 ring */ 216 217 udev->ll2_ring_size = QEDI_PAGE_SIZE; 217 218 udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP); ··· 246 237 static int qedi_alloc_uio_rings(struct qedi_ctx *qedi) 247 238 { 248 239 struct qedi_uio_dev *udev = NULL; 249 - struct qedi_uio_ctrl *uctrl = NULL; 250 240 int rc = 0; 251 241 252 242 list_for_each_entry(udev, &qedi_udev_list, list) { ··· 266 258 goto err_udev; 267 259 } 268 260 269 - uctrl = kzalloc(sizeof(*uctrl), GFP_KERNEL); 270 - if (!uctrl) { 271 - rc = -ENOMEM; 272 - goto err_uctrl; 273 - } 274 - 275 261 udev->uio_dev = -1; 276 262 277 263 udev->qedi = qedi; 278 264 udev->pdev = qedi->pdev; 279 - udev->uctrl = uctrl; 280 265 281 266 rc = __qedi_alloc_uio_rings(udev); 282 267 if (rc) 283 - goto err_uio_rings; 268 + goto err_uctrl; 284 269 285 270 list_add(&udev->list, &qedi_udev_list); 286 271 ··· 284 283 udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE; 285 284 return 0; 286 285 287 - err_uio_rings: 288 - kfree(uctrl); 289 286 err_uctrl: 290 287 kfree(udev); 291 288 err_udev: ··· 827 828 qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages; 828 829 qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues; 829 830 qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug; 831 + qedi->pf_params.iscsi_pf_params.two_msl_timer = 4000; 832 + qedi->pf_params.iscsi_pf_params.max_fin_rt = 2; 830 833 831 834 for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) { 832 835 if ((1 << log_page_size) == PAGE_SIZE) ··· 1499 1498 1500 1499 void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx) 1501 1500 { 1502 - if (!test_and_clear_bit(idx, qedi->task_idx_map)) { 1501 + if (!test_and_clear_bit(idx, qedi->task_idx_map)) 1503 1502 QEDI_ERR(&qedi->dbg_ctx, 1504 1503 "FW task context, already cleared, tid=0x%x\n", idx); 1505 - WARN_ON(1); 1506 - } 1507 1504 } 1508 1505 1509 1506 void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
+5 -4
drivers/scsi/qla2xxx/qla_bsg.c
··· 730 730 return -EIO; 731 731 } 732 732 733 + memset(&elreq, 0, sizeof(elreq)); 734 + 733 735 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev, 734 736 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, 735 737 DMA_TO_DEVICE); ··· 797 795 798 796 if (atomic_read(&vha->loop_state) == LOOP_READY && 799 797 (ha->current_topology == ISP_CFG_F || 800 - ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) && 801 - le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE 802 - && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 803 - elreq.options == EXTERNAL_LOOPBACK) { 798 + (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE && 799 + req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 800 + elreq.options == EXTERNAL_LOOPBACK) { 804 801 type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; 805 802 ql_dbg(ql_dbg_user, vha, 0x701e, 806 803 "BSG request type: %s.\n", type);
+2 -2
drivers/scsi/qla2xxx/qla_dbg.c
··· 1131 1131 1132 1132 /* Mailbox registers. */ 1133 1133 mbx_reg = &reg->mailbox0; 1134 - for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++) 1134 + for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++) 1135 1135 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); 1136 1136 1137 1137 /* Transfer sequence registers. */ ··· 2090 2090 2091 2091 /* Mailbox registers. */ 2092 2092 mbx_reg = &reg->mailbox0; 2093 - for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++) 2093 + for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++) 2094 2094 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); 2095 2095 2096 2096 /* Transfer sequence registers. */
+1
drivers/scsi/qla2xxx/qla_def.h
··· 3425 3425 uint8_t max_req_queues; 3426 3426 uint8_t max_rsp_queues; 3427 3427 uint8_t max_qpairs; 3428 + uint8_t num_qpairs; 3428 3429 struct qla_qpair *base_qpair; 3429 3430 struct qla_npiv_entry *npiv_info; 3430 3431 uint16_t nvram_npiv_size;
+4 -1
drivers/scsi/qla2xxx/qla_init.c
··· 7543 7543 /* Assign available que pair id */ 7544 7544 mutex_lock(&ha->mq_lock); 7545 7545 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs); 7546 - if (qpair_id >= ha->max_qpairs) { 7546 + if (ha->num_qpairs >= ha->max_qpairs) { 7547 7547 mutex_unlock(&ha->mq_lock); 7548 7548 ql_log(ql_log_warn, vha, 0x0183, 7549 7549 "No resources to create additional q pair.\n"); 7550 7550 goto fail_qid_map; 7551 7551 } 7552 + ha->num_qpairs++; 7552 7553 set_bit(qpair_id, ha->qpair_qid_map); 7553 7554 ha->queue_pair_map[qpair_id] = qpair; 7554 7555 qpair->id = qpair_id; ··· 7636 7635 fail_msix: 7637 7636 ha->queue_pair_map[qpair_id] = NULL; 7638 7637 clear_bit(qpair_id, ha->qpair_qid_map); 7638 + ha->num_qpairs--; 7639 7639 mutex_unlock(&ha->mq_lock); 7640 7640 fail_qid_map: 7641 7641 kfree(qpair); ··· 7662 7660 mutex_lock(&ha->mq_lock); 7663 7661 ha->queue_pair_map[qpair->id] = NULL; 7664 7662 clear_bit(qpair->id, ha->qpair_qid_map); 7663 + ha->num_qpairs--; 7665 7664 list_del(&qpair->qp_list_elem); 7666 7665 if (list_empty(&vha->qp_list)) 7667 7666 vha->flags.qpairs_available = 0;
+7 -19
drivers/scsi/qla2xxx/qla_inline.h
··· 129 129 } 130 130 131 131 static inline void 132 - qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp, 133 - struct qla_tgt_cmd *tc) 132 + qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx) 134 133 { 135 - struct dsd_dma *dsd_ptr, *tdsd_ptr; 136 - struct crc_context *ctx; 137 - 138 - if (sp) 139 - ctx = (struct crc_context *)GET_CMD_CTX_SP(sp); 140 - else if (tc) 141 - ctx = (struct crc_context *)tc->ctx; 142 - else { 143 - BUG(); 144 - return; 145 - } 134 + struct dsd_dma *dsd, *tdsd; 146 135 147 136 /* clean up allocated prev pool */ 148 - list_for_each_entry_safe(dsd_ptr, tdsd_ptr, 149 - &ctx->dsd_list, list) { 150 - dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr, 151 - dsd_ptr->dsd_list_dma); 152 - list_del(&dsd_ptr->list); 153 - kfree(dsd_ptr); 137 + list_for_each_entry_safe(dsd, tdsd, &ctx->dsd_list, list) { 138 + dma_pool_free(ha->dl_dma_pool, dsd->dsd_addr, 139 + dsd->dsd_list_dma); 140 + list_del(&dsd->list); 141 + kfree(dsd); 154 142 } 155 143 INIT_LIST_HEAD(&ctx->dsd_list); 156 144 }
+1 -1
drivers/scsi/qla2xxx/qla_isr.c
··· 3282 3282 } 3283 3283 3284 3284 /* Enable MSI-X vector for response queue update for queue 0 */ 3285 - if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 3285 + if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 3286 3286 if (ha->msixbase && ha->mqiobase && 3287 3287 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || 3288 3288 ql2xmqsupport))
+2 -11
drivers/scsi/qla2xxx/qla_mbx.c
··· 3676 3676 qlt_update_host_map(vha, id); 3677 3677 } 3678 3678 3679 - fc_host_port_name(vha->host) = 3680 - wwn_to_u64(vha->port_name); 3681 - 3682 - if (qla_ini_mode_enabled(vha)) 3683 - ql_dbg(ql_dbg_mbx, vha, 0x1018, 3684 - "FA-WWN portname %016llx (%x)\n", 3685 - fc_host_port_name(vha->host), 3686 - rptid_entry->vp_status); 3687 - 3688 3679 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 3689 3680 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 3690 3681 } else { ··· 4812 4821 4813 4822 memset(mcp->mb, 0 , sizeof(mcp->mb)); 4814 4823 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 4815 - mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */ 4824 + /* BIT_6 specifies 64bit address */ 4825 + mcp->mb[1] = mreq->options | BIT_15 | BIT_6; 4816 4826 if (IS_CNA_CAPABLE(ha)) { 4817 - mcp->mb[1] |= BIT_15; 4818 4827 mcp->mb[2] = vha->fcoe_fcf_idx; 4819 4828 } 4820 4829 mcp->mb[16] = LSW(mreq->rcv_dma);
+29 -17
drivers/scsi/qla2xxx/qla_os.c
··· 630 630 sp->flags &= ~SRB_CRC_PROT_DMA_VALID; 631 631 } 632 632 633 + if (!ctx) 634 + goto end; 635 + 633 636 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 634 637 /* List assured to be having elements */ 635 - qla2x00_clean_dsd_pool(ha, sp, NULL); 638 + qla2x00_clean_dsd_pool(ha, ctx); 636 639 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 637 640 } 638 641 639 642 if (sp->flags & SRB_CRC_CTX_DMA_VALID) { 640 - dma_pool_free(ha->dl_dma_pool, ctx, 641 - ((struct crc_context *)ctx)->crc_ctx_dma); 643 + struct crc_context *ctx0 = ctx; 644 + 645 + dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma); 642 646 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 643 647 } 644 648 645 649 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 646 - struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx; 650 + struct ct6_dsd *ctx1 = ctx; 647 651 648 652 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, 649 - ctx1->fcp_cmnd_dma); 653 + ctx1->fcp_cmnd_dma); 650 654 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); 651 655 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; 652 656 ha->gbl_dsd_avail += ctx1->dsd_use_cnt; 653 657 mempool_free(ctx1, ha->ctx_mempool); 654 658 } 655 659 660 + end: 656 661 CMD_SP(cmd) = NULL; 657 662 qla2x00_rel_sp(sp); 658 663 } ··· 704 699 sp->flags &= ~SRB_CRC_PROT_DMA_VALID; 705 700 } 706 701 702 + if (!ctx) 703 + goto end; 704 + 707 705 if (sp->flags & SRB_CRC_CTX_DSD_VALID) { 708 706 /* List assured to be having elements */ 709 - qla2x00_clean_dsd_pool(ha, sp, NULL); 707 + qla2x00_clean_dsd_pool(ha, ctx); 710 708 sp->flags &= ~SRB_CRC_CTX_DSD_VALID; 711 709 } 712 710 713 711 if (sp->flags & SRB_CRC_CTX_DMA_VALID) { 714 - dma_pool_free(ha->dl_dma_pool, ctx, 715 - ((struct crc_context *)ctx)->crc_ctx_dma); 712 + struct crc_context *ctx0 = ctx; 713 + 714 + dma_pool_free(ha->dl_dma_pool, ctx, ctx0->crc_ctx_dma); 716 715 sp->flags &= ~SRB_CRC_CTX_DMA_VALID; 717 716 } 718 717 719 718 if (sp->flags & SRB_FCP_CMND_DMA_VALID) { 720 - struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx; 721 - 719 + struct ct6_dsd *ctx1 = ctx; 722 720 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, 723 721 ctx1->fcp_cmnd_dma); 724 722 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); ··· 729 721 ha->gbl_dsd_avail += ctx1->dsd_use_cnt; 730 722 mempool_free(ctx1, ha->ctx_mempool); 731 723 } 732 - 724 + end: 733 725 CMD_SP(cmd) = NULL; 734 726 qla2xxx_rel_qpair_sp(sp->qpair, sp); 735 727 } ··· 1640 1632 void 1641 1633 qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) 1642 1634 { 1643 - int que, cnt; 1635 + int que, cnt, status; 1644 1636 unsigned long flags; 1645 1637 srb_t *sp; 1646 1638 struct qla_hw_data *ha = vha->hw; ··· 1670 1662 */ 1671 1663 sp_get(sp); 1672 1664 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1673 - qla2xxx_eh_abort(GET_CMD_SP(sp)); 1665 + status = qla2xxx_eh_abort(GET_CMD_SP(sp)); 1674 1666 spin_lock_irqsave(&ha->hardware_lock, flags); 1667 + /* Get rid of extra reference if immediate exit 1668 + * from ql2xxx_eh_abort */ 1669 + if (status == FAILED && (qla2x00_isp_reg_stat(ha))) 1670 + atomic_dec(&sp->ref_count); 1675 1671 } 1676 1672 req->outstanding_cmds[cnt] = NULL; 1677 1673 sp->done(sp, res); ··· 2635 2623 2636 2624 if (mem_only) { 2637 2625 if (pci_enable_device_mem(pdev)) 2638 - goto probe_out; 2626 + return ret; 2639 2627 } else { 2640 2628 if (pci_enable_device(pdev)) 2641 - goto probe_out; 2629 + return ret; 2642 2630 } 2643 2631 2644 2632 /* This may fail but that's ok */ ··· 2648 2636 if (!ha) { 2649 2637 ql_log_pci(ql_log_fatal, pdev, 0x0009, 2650 2638 "Unable to allocate memory for ha.\n"); 2651 - goto probe_out; 2639 + goto disable_device; 2652 2640 } 2653 2641 ql_dbg_pci(ql_dbg_init, pdev, 0x000a, 2654 2642 "Memory allocated for ha=%p.\n", ha); ··· 3266 3254 pci_release_selected_regions(ha->pdev, ha->bars); 3267 3255 kfree(ha); 3268 3256 3269 - probe_out: 3257 + disable_device: 3270 3258 pci_disable_device(pdev); 3271 3259 return ret; 3272 3260 }
+6 -4
drivers/scsi/qla2xxx/qla_target.c
··· 2245 2245 pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt, 2246 2246 cmd->dma_data_direction); 2247 2247 2248 - if (cmd->ctx_dsd_alloced) 2249 - qla2x00_clean_dsd_pool(ha, NULL, cmd); 2248 + if (!cmd->ctx) 2249 + return; 2250 2250 2251 - if (cmd->ctx) 2252 - dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma); 2251 + if (cmd->ctx_dsd_alloced) 2252 + qla2x00_clean_dsd_pool(ha, cmd->ctx); 2253 + 2254 + dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma); 2253 2255 } 2254 2256 2255 2257 static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
+1 -1
drivers/scsi/qla2xxx/qla_tmpl.c
··· 371 371 goto done; 372 372 } 373 373 374 - if (end <= start || start == 0 || end == 0) { 374 + if (end < start || start == 0 || end == 0) { 375 375 ql_dbg(ql_dbg_misc, vha, 0xd023, 376 376 "%s: unusable range (start=%x end=%x)\n", __func__, 377 377 ent->t262.end_addr, ent->t262.start_addr);
+1 -1
drivers/scsi/scsi_debug.c
··· 1404 1404 arr[4] = SDEBUG_LONG_INQ_SZ - 5; 1405 1405 arr[5] = (int)have_dif_prot; /* PROTECT bit */ 1406 1406 if (sdebug_vpd_use_hostno == 0) 1407 - arr[5] = 0x10; /* claim: implicit TGPS */ 1407 + arr[5] |= 0x10; /* claim: implicit TPGS */ 1408 1408 arr[6] = 0x10; /* claim: MultiP */ 1409 1409 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */ 1410 1410 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
+1 -1
drivers/staging/ccree/Kconfig
··· 1 1 config CRYPTO_DEV_CCREE 2 2 tristate "Support for ARM TrustZone CryptoCell C7XX family of Crypto accelerators" 3 - depends on CRYPTO_HW && OF && HAS_DMA 3 + depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA 4 4 default n 5 5 select CRYPTO_HASH 6 6 select CRYPTO_BLKCIPHER
+2 -1
drivers/staging/ccree/ssi_buffer_mgr.c
··· 216 216 uint32_t nents, lbytes; 217 217 218 218 nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL); 219 - sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip), 0, (direct == SSI_SG_TO_BUF)); 219 + sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip, 220 + (direct == SSI_SG_TO_BUF)); 220 221 } 221 222 222 223 static inline int ssi_buffer_mgr_render_buff_to_mlli(
+1 -5
drivers/staging/iio/cdc/ad7152.c
··· 231 231 if (i >= ARRAY_SIZE(ad7152_filter_rate_table)) 232 232 i = ARRAY_SIZE(ad7152_filter_rate_table) - 1; 233 233 234 - mutex_lock(&chip->state_lock); 235 234 ret = i2c_smbus_write_byte_data(chip->client, 236 235 AD7152_REG_CFG2, AD7152_CFG2_OSR(i)); 237 - if (ret < 0) { 238 - mutex_unlock(&chip->state_lock); 236 + if (ret < 0) 239 237 return ret; 240 - } 241 238 242 239 chip->filter_rate_setup = i; 243 - mutex_unlock(&chip->state_lock); 244 240 245 241 return ret; 246 242 }
-9
drivers/staging/lustre/lustre/lov/lov_pack.c
··· 293 293 size_t lmmk_size; 294 294 size_t lum_size; 295 295 int rc; 296 - mm_segment_t seg; 297 296 298 297 if (!lsm) 299 298 return -ENODATA; 300 - 301 - /* 302 - * "Switch to kernel segment" to allow copying from kernel space by 303 - * copy_{to,from}_user(). 304 - */ 305 - seg = get_fs(); 306 - set_fs(KERNEL_DS); 307 299 308 300 if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) { 309 301 CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n", ··· 398 406 out_free: 399 407 kvfree(lmmk); 400 408 out: 401 - set_fs(seg); 402 409 return rc; 403 410 }
-2
drivers/staging/media/atomisp/i2c/Makefile
··· 19 19 20 20 obj-$(CONFIG_VIDEO_LM3554) += lm3554.o 21 21 22 - ccflags-y += -Werror 23 -
-2
drivers/staging/media/atomisp/i2c/imx/Makefile
··· 4 4 5 5 ov8858_driver-objs := ../ov8858.o dw9718.o vcm.o 6 6 obj-$(CONFIG_VIDEO_OV8858) += ov8858_driver.o 7 - 8 - ccflags-y += -Werror
-2
drivers/staging/media/atomisp/i2c/ov5693/Makefile
··· 1 1 obj-$(CONFIG_VIDEO_OV5693) += ov5693.o 2 - 3 - ccflags-y += -Werror
+1 -1
drivers/staging/media/atomisp/pci/atomisp2/Makefile
··· 351 351 DEFINES += -DATOMISP_POSTFIX=\"css2400b0_v21\" -DISP2400B0 352 352 DEFINES += -DSYSTEM_hive_isp_css_2400_system -DISP2400 353 353 354 - ccflags-y += $(INCLUDES) $(DEFINES) -fno-common -Werror 354 + ccflags-y += $(INCLUDES) $(DEFINES) -fno-common 355 355
+1 -1
drivers/staging/rtl8188eu/os_dep/mon.c
··· 152 152 static void mon_setup(struct net_device *dev) 153 153 { 154 154 dev->netdev_ops = &mon_netdev_ops; 155 - dev->destructor = free_netdev; 155 + dev->needs_free_netdev = true; 156 156 ether_setup(dev); 157 157 dev->priv_flags |= IFF_NO_QUEUE; 158 158 dev->type = ARPHRD_IEEE80211;
+2 -1
drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
··· 2667 2667 mon_ndev->type = ARPHRD_IEEE80211_RADIOTAP; 2668 2668 strncpy(mon_ndev->name, name, IFNAMSIZ); 2669 2669 mon_ndev->name[IFNAMSIZ - 1] = 0; 2670 - mon_ndev->destructor = rtw_ndev_destructor; 2670 + mon_ndev->needs_free_netdev = true; 2671 + mon_ndev->priv_destructor = rtw_ndev_destructor; 2671 2672 2672 2673 mon_ndev->netdev_ops = &rtw_cfg80211_monitor_if_ops; 2673 2674
-2
drivers/staging/rtl8723bs/os_dep/os_intfs.c
··· 1207 1207 1208 1208 if (ndev->ieee80211_ptr) 1209 1209 kfree((u8 *)ndev->ieee80211_ptr); 1210 - 1211 - free_netdev(ndev); 1212 1210 } 1213 1211 1214 1212 void rtw_dev_unload(struct adapter *padapter)
+1 -1
drivers/staging/rtl8723bs/os_dep/osdep_service.c
··· 160 160 oldfs = get_fs(); set_fs(get_ds()); 161 161 162 162 if (1!=readFile(fp, &buf, 1)) 163 - ret = PTR_ERR(fp); 163 + ret = -EINVAL; 164 164 165 165 set_fs(oldfs); 166 166 filp_close(fp, NULL);
+44 -8
drivers/target/iscsi/iscsi_target.c
··· 1279 1279 */ 1280 1280 if (dump_payload) 1281 1281 goto after_immediate_data; 1282 + /* 1283 + * Check for underflow case where both EDTL and immediate data payload 1284 + * exceeds what is presented by CDB's TRANSFER LENGTH, and what has 1285 + * already been set in target_cmd_size_check() as se_cmd->data_length. 1286 + * 1287 + * For this special case, fail the command and dump the immediate data 1288 + * payload. 1289 + */ 1290 + if (cmd->first_burst_len > cmd->se_cmd.data_length) { 1291 + cmd->sense_reason = TCM_INVALID_CDB_FIELD; 1292 + goto after_immediate_data; 1293 + } 1282 1294 1283 1295 immed_ret = iscsit_handle_immediate_data(cmd, hdr, 1284 1296 cmd->first_burst_len); ··· 3802 3790 { 3803 3791 int ret = 0; 3804 3792 struct iscsi_conn *conn = arg; 3793 + bool conn_freed = false; 3794 + 3805 3795 /* 3806 3796 * Allow ourselves to be interrupted by SIGINT so that a 3807 3797 * connection recovery / failure event can be triggered externally. ··· 3829 3815 goto transport_err; 3830 3816 3831 3817 ret = iscsit_handle_response_queue(conn); 3832 - if (ret == 1) 3818 + if (ret == 1) { 3833 3819 goto get_immediate; 3834 - else if (ret == -ECONNRESET) 3820 + } else if (ret == -ECONNRESET) { 3821 + conn_freed = true; 3835 3822 goto out; 3836 - else if (ret < 0) 3823 + } else if (ret < 0) { 3837 3824 goto transport_err; 3825 + } 3838 3826 } 3839 3827 3840 3828 transport_err: ··· 3846 3830 * responsible for cleaning up the early connection failure. 3847 3831 */ 3848 3832 if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN) 3849 - iscsit_take_action_for_connection_exit(conn); 3833 + iscsit_take_action_for_connection_exit(conn, &conn_freed); 3850 3834 out: 3835 + if (!conn_freed) { 3836 + while (!kthread_should_stop()) { 3837 + msleep(100); 3838 + } 3839 + } 3851 3840 return 0; 3852 3841 } 3853 3842 ··· 4025 4004 { 4026 4005 int rc; 4027 4006 struct iscsi_conn *conn = arg; 4007 + bool conn_freed = false; 4028 4008 4029 4009 /* 4030 4010 * Allow ourselves to be interrupted by SIGINT so that a ··· 4038 4016 */ 4039 4017 rc = wait_for_completion_interruptible(&conn->rx_login_comp); 4040 4018 if (rc < 0 || iscsi_target_check_conn_state(conn)) 4041 - return 0; 4019 + goto out; 4042 4020 4043 4021 if (!conn->conn_transport->iscsit_get_rx_pdu) 4044 4022 return 0; ··· 4047 4025 4048 4026 if (!signal_pending(current)) 4049 4027 atomic_set(&conn->transport_failed, 1); 4050 - iscsit_take_action_for_connection_exit(conn); 4028 + iscsit_take_action_for_connection_exit(conn, &conn_freed); 4029 + 4030 + out: 4031 + if (!conn_freed) { 4032 + while (!kthread_should_stop()) { 4033 + msleep(100); 4034 + } 4035 + } 4036 + 4051 4037 return 0; 4052 4038 } 4053 4039 ··· 4435 4405 * always sleep waiting for RX/TX thread shutdown to complete 4436 4406 * within iscsit_close_connection(). 4437 4407 */ 4438 - if (!conn->conn_transport->rdma_shutdown) 4408 + if (!conn->conn_transport->rdma_shutdown) { 4439 4409 sleep = cmpxchg(&conn->tx_thread_active, true, false); 4410 + if (!sleep) 4411 + return; 4412 + } 4440 4413 4441 4414 atomic_set(&conn->conn_logout_remove, 0); 4442 4415 complete(&conn->conn_logout_comp); ··· 4455 4422 { 4456 4423 int sleep = 1; 4457 4424 4458 - if (!conn->conn_transport->rdma_shutdown) 4425 + if (!conn->conn_transport->rdma_shutdown) { 4459 4426 sleep = cmpxchg(&conn->tx_thread_active, true, false); 4427 + if (!sleep) 4428 + return; 4429 + } 4460 4430 4461 4431 atomic_set(&conn->conn_logout_remove, 0); 4462 4432 complete(&conn->conn_logout_comp);
+5 -1
drivers/target/iscsi/iscsi_target_erl0.c
··· 930 930 } 931 931 } 932 932 933 - void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) 933 + void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed) 934 934 { 935 + *conn_freed = false; 936 + 935 937 spin_lock_bh(&conn->state_lock); 936 938 if (atomic_read(&conn->connection_exit)) { 937 939 spin_unlock_bh(&conn->state_lock); ··· 944 942 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { 945 943 spin_unlock_bh(&conn->state_lock); 946 944 iscsit_close_connection(conn); 945 + *conn_freed = true; 947 946 return; 948 947 } 949 948 ··· 958 955 spin_unlock_bh(&conn->state_lock); 959 956 960 957 iscsit_handle_connection_cleanup(conn); 958 + *conn_freed = true; 961 959 }
+1 -1
drivers/target/iscsi/iscsi_target_erl0.h
··· 15 15 extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *); 16 16 extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int); 17 17 extern void iscsit_fall_back_to_erl0(struct iscsi_session *); 18 - extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *); 18 + extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *); 19 19 20 20 #endif /*** ISCSI_TARGET_ERL0_H ***/
+4
drivers/target/iscsi/iscsi_target_login.c
··· 1464 1464 break; 1465 1465 } 1466 1466 1467 + while (!kthread_should_stop()) { 1468 + msleep(100); 1469 + } 1470 + 1467 1471 return 0; 1468 1472 }
+133 -63
drivers/target/iscsi/iscsi_target_nego.c
··· 493 493 494 494 static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *); 495 495 496 - static bool iscsi_target_sk_state_check(struct sock *sk) 496 + static bool __iscsi_target_sk_check_close(struct sock *sk) 497 497 { 498 498 if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) { 499 - pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE," 499 + pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE," 500 500 "returning FALSE\n"); 501 - return false; 501 + return true; 502 502 } 503 - return true; 503 + return false; 504 + } 505 + 506 + static bool iscsi_target_sk_check_close(struct iscsi_conn *conn) 507 + { 508 + bool state = false; 509 + 510 + if (conn->sock) { 511 + struct sock *sk = conn->sock->sk; 512 + 513 + read_lock_bh(&sk->sk_callback_lock); 514 + state = (__iscsi_target_sk_check_close(sk) || 515 + test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)); 516 + read_unlock_bh(&sk->sk_callback_lock); 517 + } 518 + return state; 519 + } 520 + 521 + static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag) 522 + { 523 + bool state = false; 524 + 525 + if (conn->sock) { 526 + struct sock *sk = conn->sock->sk; 527 + 528 + read_lock_bh(&sk->sk_callback_lock); 529 + state = test_bit(flag, &conn->login_flags); 530 + read_unlock_bh(&sk->sk_callback_lock); 531 + } 532 + return state; 533 + } 534 + 535 + static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag) 536 + { 537 + bool state = false; 538 + 539 + if (conn->sock) { 540 + struct sock *sk = conn->sock->sk; 541 + 542 + write_lock_bh(&sk->sk_callback_lock); 543 + state = (__iscsi_target_sk_check_close(sk) || 544 + test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)); 545 + if (!state) 546 + clear_bit(flag, &conn->login_flags); 547 + write_unlock_bh(&sk->sk_callback_lock); 548 + } 549 + return state; 504 550 } 505 551 506 552 static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) ··· 586 540 587 541 pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n", 588 542 conn, current->comm, current->pid); 543 + /* 544 + * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready() 545 + * before initial PDU processing in iscsi_target_start_negotiation() 546 + * has completed, go ahead and retry until it's cleared. 547 + * 548 + * Otherwise if the TCP connection drops while this is occuring, 549 + * iscsi_target_start_negotiation() will detect the failure, call 550 + * cancel_delayed_work_sync(&conn->login_work), and cleanup the 551 + * remaining iscsi connection resources from iscsi_np process context. 552 + */ 553 + if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) { 554 + schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10)); 555 + return; 556 + } 589 557 590 558 spin_lock(&tpg->tpg_state_lock); 591 559 state = (tpg->tpg_state == TPG_STATE_ACTIVE); ··· 607 547 608 548 if (!state) { 609 549 pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n"); 610 - iscsi_target_restore_sock_callbacks(conn); 611 - iscsi_target_login_drop(conn, login); 612 - iscsit_deaccess_np(np, tpg, tpg_np); 613 - return; 550 + goto err; 614 551 } 615 552 616 - if (conn->sock) { 617 - struct sock *sk = conn->sock->sk; 618 - 619 - read_lock_bh(&sk->sk_callback_lock); 620 - state = iscsi_target_sk_state_check(sk); 621 - read_unlock_bh(&sk->sk_callback_lock); 622 - 623 - if (!state) { 624 - pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); 625 - iscsi_target_restore_sock_callbacks(conn); 626 - iscsi_target_login_drop(conn, login); 627 - iscsit_deaccess_np(np, tpg, tpg_np); 628 - return; 629 - } 553 + if (iscsi_target_sk_check_close(conn)) { 554 + pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); 555 + goto err; 630 556 } 631 557 632 558 conn->login_kworker = current; ··· 630 584 flush_signals(current); 631 585 conn->login_kworker = NULL; 632 586 633 - if (rc < 0) { 634 - iscsi_target_restore_sock_callbacks(conn); 635 - iscsi_target_login_drop(conn, login); 636 - iscsit_deaccess_np(np, tpg, tpg_np); 637 - return; 638 - } 587 + if (rc < 0) 588 + goto err; 639 589 640 590 pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n", 641 591 conn, current->comm, current->pid); 642 592 643 593 rc = iscsi_target_do_login(conn, login); 644 594 if (rc < 0) { 645 - iscsi_target_restore_sock_callbacks(conn); 646 - iscsi_target_login_drop(conn, login); 647 - iscsit_deaccess_np(np, tpg, tpg_np); 595 + goto err; 648 596 } else if (!rc) { 649 - if (conn->sock) { 650 - struct sock *sk = conn->sock->sk; 651 - 652 - write_lock_bh(&sk->sk_callback_lock); 653 - clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags); 654 - write_unlock_bh(&sk->sk_callback_lock); 655 - } 597 + if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE)) 598 + goto err; 656 599 } else if (rc == 1) { 657 600 iscsi_target_nego_release(conn); 658 601 iscsi_post_login_handler(np, conn, zero_tsih); 659 602 iscsit_deaccess_np(np, tpg, tpg_np); 660 603 } 604 + return; 605 + 606 + err: 607 + iscsi_target_restore_sock_callbacks(conn); 608 + iscsi_target_login_drop(conn, login); 609 + iscsit_deaccess_np(np, tpg, tpg_np); 661 610 } 662 611 663 612 static void iscsi_target_do_cleanup(struct work_struct *work) ··· 700 659 orig_state_change(sk); 701 660 return; 702 661 } 662 + state = __iscsi_target_sk_check_close(sk); 663 + pr_debug("__iscsi_target_sk_close_change: state: %d\n", state); 664 + 703 665 if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { 704 666 pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change" 705 667 " conn: %p\n", conn); 668 + if (state) 669 + set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); 706 670 write_unlock_bh(&sk->sk_callback_lock); 707 671 orig_state_change(sk); 708 672 return; 709 673 } 710 - if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { 674 + if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { 711 675 pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n", 712 676 conn); 713 677 write_unlock_bh(&sk->sk_callback_lock); 714 678 orig_state_change(sk); 715 679 return; 716 680 } 717 - 718 - state = iscsi_target_sk_state_check(sk); 719 - write_unlock_bh(&sk->sk_callback_lock); 720 - 721 - pr_debug("iscsi_target_sk_state_change: state: %d\n", state); 722 - 723 - if (!state) { 681 + /* 682 + * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED, 683 + * but only queue conn->login_work -> iscsi_target_do_login_rx() 684 + * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared. 685 + * 686 + * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close() 687 + * will detect the dropped TCP connection from delayed workqueue context. 688 + * 689 + * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial 690 + * iscsi_target_start_negotiation() is running, iscsi_target_do_login() 691 + * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation() 692 + * via iscsi_target_sk_check_and_clear() is responsible for detecting the 693 + * dropped TCP connection in iscsi_np process context, and cleaning up 694 + * the remaining iscsi connection resources. 695 + */ 696 + if (state) { 724 697 pr_debug("iscsi_target_sk_state_change got failed state\n"); 725 - schedule_delayed_work(&conn->login_cleanup_work, 0); 698 + set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); 699 + state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags); 700 + write_unlock_bh(&sk->sk_callback_lock); 701 + 702 + orig_state_change(sk); 703 + 704 + if (!state) 705 + schedule_delayed_work(&conn->login_work, 0); 726 706 return; 727 707 } 708 + write_unlock_bh(&sk->sk_callback_lock); 709 + 728 710 orig_state_change(sk); 729 711 } 730 712 ··· 1010 946 if (iscsi_target_handle_csg_one(conn, login) < 0) 1011 947 return -1; 1012 948 if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { 949 + /* 950 + * Check to make sure the TCP connection has not 951 + * dropped asynchronously while session reinstatement 952 + * was occuring in this kthread context, before 953 + * transitioning to full feature phase operation. 954 + */ 955 + if (iscsi_target_sk_check_close(conn)) 956 + return -1; 957 + 1013 958 login->tsih = conn->sess->tsih; 1014 959 login->login_complete = 1; 1015 960 iscsi_target_restore_sock_callbacks(conn); ··· 1043 970 login_rsp->flags &= ~ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK; 1044 971 } 1045 972 break; 1046 - } 1047 - 1048 - if (conn->sock) { 1049 - struct sock *sk = conn->sock->sk; 1050 - bool state; 1051 - 1052 - read_lock_bh(&sk->sk_callback_lock); 1053 - state = iscsi_target_sk_state_check(sk); 1054 - read_unlock_bh(&sk->sk_callback_lock); 1055 - 1056 - if (!state) { 1057 - pr_debug("iscsi_target_do_login() failed state for" 1058 - " conn: %p\n", conn); 1059 - return -1; 1060 - } 1061 973 } 1062 974 1063 975 return 0; ··· 1313 1255 1314 1256 write_lock_bh(&sk->sk_callback_lock); 1315 1257 set_bit(LOGIN_FLAGS_READY, &conn->login_flags); 1258 + set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags); 1316 1259 write_unlock_bh(&sk->sk_callback_lock); 1317 1260 } 1318 - 1261 + /* 1262 + * If iscsi_target_do_login returns zero to signal more PDU 1263 + * exchanges are required to complete the login, go ahead and 1264 + * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection 1265 + * is still active. 1266 + * 1267 + * Otherwise if TCP connection dropped asynchronously, go ahead 1268 + * and perform connection cleanup now. 1269 + */ 1319 1270 ret = iscsi_target_do_login(conn, login); 1271 + if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU)) 1272 + ret = -1; 1273 + 1320 1274 if (ret < 0) { 1321 1275 cancel_delayed_work_sync(&conn->login_work); 1322 1276 cancel_delayed_work_sync(&conn->login_cleanup_work);
+1 -1
drivers/target/target_core_internal.h
··· 136 136 void release_se_kmem_caches(void); 137 137 u32 scsi_get_new_index(scsi_index_t); 138 138 void transport_subsystem_check_init(void); 139 - void transport_cmd_finish_abort(struct se_cmd *, int); 139 + int transport_cmd_finish_abort(struct se_cmd *, int); 140 140 unsigned char *transport_dump_cmd_direction(struct se_cmd *); 141 141 void transport_dump_dev_state(struct se_device *, char *, int *); 142 142 void transport_dump_dev_info(struct se_device *, struct se_lun *,
+8 -8
drivers/target/target_core_tmr.c
··· 75 75 kfree(tmr); 76 76 } 77 77 78 - static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) 78 + static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) 79 79 { 80 80 unsigned long flags; 81 81 bool remove = true, send_tas; ··· 91 91 transport_send_task_abort(cmd); 92 92 } 93 93 94 - transport_cmd_finish_abort(cmd, remove); 94 + return transport_cmd_finish_abort(cmd, remove); 95 95 } 96 96 97 97 static int target_check_cdb_and_preempt(struct list_head *list, ··· 184 184 cancel_work_sync(&se_cmd->work); 185 185 transport_wait_for_tasks(se_cmd); 186 186 187 - transport_cmd_finish_abort(se_cmd, true); 188 - target_put_sess_cmd(se_cmd); 187 + if (!transport_cmd_finish_abort(se_cmd, true)) 188 + target_put_sess_cmd(se_cmd); 189 189 190 190 printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" 191 191 " ref_tag: %llu\n", ref_tag); ··· 281 281 cancel_work_sync(&cmd->work); 282 282 transport_wait_for_tasks(cmd); 283 283 284 - transport_cmd_finish_abort(cmd, 1); 285 - target_put_sess_cmd(cmd); 284 + if (!transport_cmd_finish_abort(cmd, 1)) 285 + target_put_sess_cmd(cmd); 286 286 } 287 287 } 288 288 ··· 380 380 cancel_work_sync(&cmd->work); 381 381 transport_wait_for_tasks(cmd); 382 382 383 - core_tmr_handle_tas_abort(cmd, tas); 384 - target_put_sess_cmd(cmd); 383 + if (!core_tmr_handle_tas_abort(cmd, tas)) 384 + target_put_sess_cmd(cmd); 385 385 } 386 386 } 387 387
+24 -8
drivers/target/target_core_transport.c
··· 651 651 percpu_ref_put(&lun->lun_ref); 652 652 } 653 653 654 - void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 654 + int transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 655 655 { 656 656 bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); 657 + int ret = 0; 657 658 658 659 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) 659 660 transport_lun_remove_cmd(cmd); ··· 666 665 cmd->se_tfo->aborted_task(cmd); 667 666 668 667 if (transport_cmd_check_stop_to_fabric(cmd)) 669 - return; 668 + return 1; 670 669 if (remove && ack_kref) 671 - transport_put_cmd(cmd); 670 + ret = transport_put_cmd(cmd); 671 + 672 + return ret; 672 673 } 673 674 674 675 static void target_complete_failure_work(struct work_struct *work) ··· 1163 1160 if (cmd->unknown_data_length) { 1164 1161 cmd->data_length = size; 1165 1162 } else if (size != cmd->data_length) { 1166 - pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" 1163 + pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:" 1167 1164 " %u does not match SCSI CDB Length: %u for SAM Opcode:" 1168 1165 " 0x%02x\n", cmd->se_tfo->get_fabric_name(), 1169 1166 cmd->data_length, size, cmd->t_task_cdb[0]); 1170 1167 1171 - if (cmd->data_direction == DMA_TO_DEVICE && 1172 - cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1173 - pr_err("Rejecting underflow/overflow WRITE data\n"); 1174 - return TCM_INVALID_CDB_FIELD; 1168 + if (cmd->data_direction == DMA_TO_DEVICE) { 1169 + if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 1170 + pr_err_ratelimited("Rejecting underflow/overflow" 1171 + " for WRITE data CDB\n"); 1172 + return TCM_INVALID_CDB_FIELD; 1173 + } 1174 + /* 1175 + * Some fabric drivers like iscsi-target still expect to 1176 + * always reject overflow writes. Reject this case until 1177 + * full fabric driver level support for overflow writes 1178 + * is introduced tree-wide. 1179 + */ 1180 + if (size > cmd->data_length) { 1181 + pr_err_ratelimited("Rejecting overflow for" 1182 + " WRITE control CDB\n"); 1183 + return TCM_INVALID_CDB_FIELD; 1184 + } 1175 1185 } 1176 1186 /* 1177 1187 * Reject READ_* or WRITE_* with overflow/underflow for
+33 -13
drivers/target/target_core_user.c
··· 97 97 98 98 struct tcmu_dev { 99 99 struct list_head node; 100 - 100 + struct kref kref; 101 101 struct se_device se_dev; 102 102 103 103 char *name; ··· 969 969 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); 970 970 if (!udev) 971 971 return NULL; 972 + kref_init(&udev->kref); 972 973 973 974 udev->name = kstrdup(name, GFP_KERNEL); 974 975 if (!udev->name) { ··· 1146 1145 return 0; 1147 1146 } 1148 1147 1148 + static void tcmu_dev_call_rcu(struct rcu_head *p) 1149 + { 1150 + struct se_device *dev = container_of(p, struct se_device, rcu_head); 1151 + struct tcmu_dev *udev = TCMU_DEV(dev); 1152 + 1153 + kfree(udev->uio_info.name); 1154 + kfree(udev->name); 1155 + kfree(udev); 1156 + } 1157 + 1158 + static void tcmu_dev_kref_release(struct kref *kref) 1159 + { 1160 + struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref); 1161 + struct se_device *dev = &udev->se_dev; 1162 + 1163 + call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); 1164 + } 1165 + 1149 1166 static int tcmu_release(struct uio_info *info, struct inode *inode) 1150 1167 { 1151 1168 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); ··· 1171 1152 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); 1172 1153 1173 1154 pr_debug("close\n"); 1174 - 1155 + /* release ref from configure */ 1156 + kref_put(&udev->kref, tcmu_dev_kref_release); 1175 1157 return 0; 1176 1158 } 1177 1159 ··· 1292 1272 dev->dev_attrib.hw_max_sectors = 128; 1293 1273 dev->dev_attrib.hw_queue_depth = 128; 1294 1274 1275 + /* 1276 + * Get a ref incase userspace does a close on the uio device before 1277 + * LIO has initiated tcmu_free_device. 1278 + */ 1279 + kref_get(&udev->kref); 1280 + 1295 1281 ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, 1296 1282 udev->uio_info.uio_dev->minor); 1297 1283 if (ret) ··· 1310 1284 return 0; 1311 1285 1312 1286 err_netlink: 1287 + kref_put(&udev->kref, tcmu_dev_kref_release); 1313 1288 uio_unregister_device(&udev->uio_info); 1314 1289 err_register: 1315 1290 vfree(udev->mb_addr); 1316 1291 err_vzalloc: 1317 1292 kfree(info->name); 1293 + info->name = NULL; 1318 1294 1319 1295 return ret; 1320 1296 } ··· 1328 1300 return 0; 1329 1301 } 1330 1302 return -EINVAL; 1331 - } 1332 - 1333 - static void tcmu_dev_call_rcu(struct rcu_head *p) 1334 - { 1335 - struct se_device *dev = container_of(p, struct se_device, rcu_head); 1336 - struct tcmu_dev *udev = TCMU_DEV(dev); 1337 - 1338 - kfree(udev); 1339 1303 } 1340 1304 1341 1305 static bool tcmu_dev_configured(struct tcmu_dev *udev) ··· 1384 1364 udev->uio_info.uio_dev->minor); 1385 1365 1386 1366 uio_unregister_device(&udev->uio_info); 1387 - kfree(udev->uio_info.name); 1388 - kfree(udev->name); 1389 1367 } 1390 - call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); 1368 + 1369 + /* release ref from init */ 1370 + kref_put(&udev->kref, tcmu_dev_kref_release); 1391 1371 } 1392 1372 1393 1373 enum {
-2
drivers/tty/tty_port.c
··· 34 34 if (!disc) 35 35 return 0; 36 36 37 - mutex_lock(&tty->atomic_write_lock); 38 37 ret = tty_ldisc_receive_buf(disc, p, (char *)f, count); 39 - mutex_unlock(&tty->atomic_write_lock); 40 38 41 39 tty_ldisc_deref(disc); 42 40
+4 -1
drivers/usb/chipidea/core.c
··· 843 843 { 844 844 struct ci_hdrc *ci = dev_get_drvdata(dev); 845 845 846 - return sprintf(buf, "%s\n", ci_role(ci)->name); 846 + if (ci->role != CI_ROLE_END) 847 + return sprintf(buf, "%s\n", ci_role(ci)->name); 848 + 849 + return 0; 847 850 } 848 851 849 852 static ssize_t ci_role_store(struct device *dev,
+2 -1
drivers/usb/chipidea/debug.c
··· 294 294 { 295 295 struct ci_hdrc *ci = s->private; 296 296 297 - seq_printf(s, "%s\n", ci_role(ci)->name); 297 + if (ci->role != CI_ROLE_END) 298 + seq_printf(s, "%s\n", ci_role(ci)->name); 298 299 299 300 return 0; 300 301 }
+6 -2
drivers/usb/chipidea/udc.c
··· 1993 1993 int ci_hdrc_gadget_init(struct ci_hdrc *ci) 1994 1994 { 1995 1995 struct ci_role_driver *rdrv; 1996 + int ret; 1996 1997 1997 1998 if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC)) 1998 1999 return -ENXIO; ··· 2006 2005 rdrv->stop = udc_id_switch_for_host; 2007 2006 rdrv->irq = udc_irq; 2008 2007 rdrv->name = "gadget"; 2009 - ci->roles[CI_ROLE_GADGET] = rdrv; 2010 2008 2011 - return udc_start(ci); 2009 + ret = udc_start(ci); 2010 + if (!ret) 2011 + ci->roles[CI_ROLE_GADGET] = rdrv; 2012 + 2013 + return ret; 2012 2014 }
+32 -9
drivers/usb/chipidea/usbmisc_imx.c
··· 108 108 const struct usbmisc_ops *ops; 109 109 }; 110 110 111 + static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data); 112 + 111 113 static int usbmisc_imx25_init(struct imx_usbmisc_data *data) 112 114 { 113 115 struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev); ··· 244 242 val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN 245 243 | MX53_USB_UHx_CTRL_ULPI_INT_EN; 246 244 writel(val, reg); 247 - /* Disable internal 60Mhz clock */ 248 - reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET; 249 - val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF; 250 - writel(val, reg); 245 + if (is_imx53_usbmisc(data)) { 246 + /* Disable internal 60Mhz clock */ 247 + reg = usbmisc->base + 248 + MX53_USB_CLKONOFF_CTRL_OFFSET; 249 + val = readl(reg) | 250 + MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF; 251 + writel(val, reg); 252 + } 253 + 251 254 } 252 255 if (data->disable_oc) { 253 256 reg = usbmisc->base + MX53_USB_UH2_CTRL_OFFSET; ··· 274 267 val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN 275 268 | MX53_USB_UHx_CTRL_ULPI_INT_EN; 276 269 writel(val, reg); 277 - /* Disable internal 60Mhz clock */ 278 - reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET; 279 - val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF; 280 - writel(val, reg); 270 + 271 + if (is_imx53_usbmisc(data)) { 272 + /* Disable internal 60Mhz clock */ 273 + reg = usbmisc->base + 274 + MX53_USB_CLKONOFF_CTRL_OFFSET; 275 + val = readl(reg) | 276 + MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF; 277 + writel(val, reg); 278 + } 281 279 } 282 280 if (data->disable_oc) { 283 281 reg = usbmisc->base + MX53_USB_UH3_CTRL_OFFSET; ··· 468 456 .init = usbmisc_imx27_init, 469 457 }; 470 458 459 + static const struct usbmisc_ops imx51_usbmisc_ops = { 460 + .init = usbmisc_imx53_init, 461 + }; 462 + 471 463 static const struct usbmisc_ops imx53_usbmisc_ops = { 472 464 .init = usbmisc_imx53_init, 473 465 }; ··· 494 478 .init = usbmisc_imx7d_init, 495 479 .set_wakeup = usbmisc_imx7d_set_wakeup, 496 480 }; 481 + 482 + static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data) 483 + { 484 + struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev); 485 + 486 + return usbmisc->ops == &imx53_usbmisc_ops; 487 + } 497 488 498 489 int imx_usbmisc_init(struct imx_usbmisc_data *data) 499 490 { ··· 559 536 }, 560 537 { 561 538 .compatible = "fsl,imx51-usbmisc", 562 - .data = &imx53_usbmisc_ops, 539 + .data = &imx51_usbmisc_ops, 563 540 }, 564 541 { 565 542 .compatible = "fsl,imx53-usbmisc",
+2
drivers/usb/dwc2/params.c
··· 144 144 { .compatible = "lantiq,xrx200-usb", .data = dwc2_set_ltq_params }, 145 145 { .compatible = "snps,dwc2" }, 146 146 { .compatible = "samsung,s3c6400-hsotg" }, 147 + { .compatible = "amlogic,meson8-usb", 148 + .data = dwc2_set_amlogic_params }, 147 149 { .compatible = "amlogic,meson8b-usb", 148 150 .data = dwc2_set_amlogic_params }, 149 151 { .compatible = "amlogic,meson-gxbb-usb",
+5 -6
drivers/usb/gadget/composite.c
··· 315 315 list_del(&f->list); 316 316 if (f->unbind) 317 317 f->unbind(c, f); 318 + 319 + if (f->bind_deactivated) 320 + usb_function_activate(f); 318 321 } 319 322 EXPORT_SYMBOL_GPL(usb_remove_function); 320 323 ··· 959 956 960 957 f = list_first_entry(&config->functions, 961 958 struct usb_function, list); 962 - list_del(&f->list); 963 - if (f->unbind) { 964 - DBG(cdev, "unbind function '%s'/%p\n", f->name, f); 965 - f->unbind(config, f); 966 - /* may free memory for "f" */ 967 - } 959 + 960 + usb_remove_function(config, f); 968 961 } 969 962 list_del(&config->list); 970 963 if (config->unbind) {
+11 -2
drivers/usb/gadget/function/f_mass_storage.c
··· 396 396 /* Caller must hold fsg->lock */ 397 397 static void wakeup_thread(struct fsg_common *common) 398 398 { 399 - smp_wmb(); /* ensure the write of bh->state is complete */ 399 + /* 400 + * Ensure the reading of thread_wakeup_needed 401 + * and the writing of bh->state are completed 402 + */ 403 + smp_mb(); 400 404 /* Tell the main thread that something has happened */ 401 405 common->thread_wakeup_needed = 1; 402 406 if (common->thread_task) ··· 631 627 } 632 628 __set_current_state(TASK_RUNNING); 633 629 common->thread_wakeup_needed = 0; 634 - smp_rmb(); /* ensure the latest bh->state is visible */ 630 + 631 + /* 632 + * Ensure the writing of thread_wakeup_needed 633 + * and the reading of bh->state are completed 634 + */ 635 + smp_mb(); 635 636 return rc; 636 637 } 637 638
+1 -1
drivers/usb/gadget/function/f_phonet.c
··· 281 281 dev->tx_queue_len = 1; 282 282 283 283 dev->netdev_ops = &pn_netdev_ops; 284 - dev->destructor = free_netdev; 284 + dev->needs_free_netdev = true; 285 285 dev->header_ops = &phonet_header_ops; 286 286 } 287 287
+6 -3
drivers/usb/gadget/legacy/inode.c
··· 1183 1183 1184 1184 /* closing ep0 === shutdown all */ 1185 1185 1186 - if (dev->gadget_registered) 1186 + if (dev->gadget_registered) { 1187 1187 usb_gadget_unregister_driver (&gadgetfs_driver); 1188 + dev->gadget_registered = false; 1189 + } 1188 1190 1189 1191 /* at this point "good" hardware has disconnected the 1190 1192 * device from USB; the host won't see it any more. ··· 1679 1677 gadgetfs_suspend (struct usb_gadget *gadget) 1680 1678 { 1681 1679 struct dev_data *dev = get_gadget_data (gadget); 1680 + unsigned long flags; 1682 1681 1683 1682 INFO (dev, "suspended from state %d\n", dev->state); 1684 - spin_lock (&dev->lock); 1683 + spin_lock_irqsave(&dev->lock, flags); 1685 1684 switch (dev->state) { 1686 1685 case STATE_DEV_SETUP: // VERY odd... host died?? 1687 1686 case STATE_DEV_CONNECTED: ··· 1693 1690 default: 1694 1691 break; 1695 1692 } 1696 - spin_unlock (&dev->lock); 1693 + spin_unlock_irqrestore(&dev->lock, flags); 1697 1694 } 1698 1695 1699 1696 static struct usb_gadget_driver gadgetfs_driver = {
+4 -9
drivers/usb/gadget/udc/dummy_hcd.c
··· 442 442 /* Report reset and disconnect events to the driver */ 443 443 if (dum->driver && (disconnect || reset)) { 444 444 stop_activity(dum); 445 - spin_unlock(&dum->lock); 446 445 if (reset) 447 446 usb_gadget_udc_reset(&dum->gadget, dum->driver); 448 447 else 449 448 dum->driver->disconnect(&dum->gadget); 450 - spin_lock(&dum->lock); 451 449 } 452 450 } else if (dum_hcd->active != dum_hcd->old_active) { 453 - if (dum_hcd->old_active && dum->driver->suspend) { 454 - spin_unlock(&dum->lock); 451 + if (dum_hcd->old_active && dum->driver->suspend) 455 452 dum->driver->suspend(&dum->gadget); 456 - spin_lock(&dum->lock); 457 - } else if (!dum_hcd->old_active && dum->driver->resume) { 458 - spin_unlock(&dum->lock); 453 + else if (!dum_hcd->old_active && dum->driver->resume) 459 454 dum->driver->resume(&dum->gadget); 460 - spin_lock(&dum->lock); 461 - } 462 455 } 463 456 464 457 dum_hcd->old_status = dum_hcd->port_status; ··· 976 983 struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g); 977 984 struct dummy *dum = dum_hcd->dum; 978 985 986 + spin_lock_irq(&dum->lock); 979 987 dum->driver = NULL; 988 + spin_unlock_irq(&dum->lock); 980 989 981 990 return 0; 982 991 }
+1 -8
drivers/usb/gadget/udc/net2280.c
··· 2470 2470 nuke(&dev->ep[i]); 2471 2471 2472 2472 /* report disconnect; the driver is already quiesced */ 2473 - if (driver) { 2474 - spin_unlock(&dev->lock); 2473 + if (driver) 2475 2474 driver->disconnect(&dev->gadget); 2476 - spin_lock(&dev->lock); 2477 - } 2478 2475 2479 2476 usb_reinit(dev); 2480 2477 } ··· 3345 3348 BIT(PCI_RETRY_ABORT_INTERRUPT)) 3346 3349 3347 3350 static void handle_stat1_irqs(struct net2280 *dev, u32 stat) 3348 - __releases(dev->lock) 3349 - __acquires(dev->lock) 3350 3351 { 3351 3352 struct net2280_ep *ep; 3352 3353 u32 tmp, num, mask, scratch; ··· 3385 3390 if (disconnect || reset) { 3386 3391 stop_activity(dev, dev->driver); 3387 3392 ep0_start(dev); 3388 - spin_unlock(&dev->lock); 3389 3393 if (reset) 3390 3394 usb_gadget_udc_reset 3391 3395 (&dev->gadget, dev->driver); 3392 3396 else 3393 3397 (dev->driver->disconnect) 3394 3398 (&dev->gadget); 3395 - spin_lock(&dev->lock); 3396 3399 return; 3397 3400 } 3398 3401 }
+32 -13
drivers/usb/gadget/udc/renesas_usb3.c
··· 623 623 { 624 624 usb3_disconnect(usb3); 625 625 usb3_write(usb3, 0, USB3_P0_INT_ENA); 626 - usb3_write(usb3, 0, USB3_PN_INT_ENA); 627 626 usb3_write(usb3, 0, USB3_USB_OTG_INT_ENA); 628 627 usb3_write(usb3, 0, USB3_USB_INT_ENA_1); 629 628 usb3_write(usb3, 0, USB3_USB_INT_ENA_2); ··· 1474 1475 struct renesas_usb3_request *usb3_req, 1475 1476 int status) 1476 1477 { 1477 - usb3_pn_stop(usb3); 1478 + unsigned long flags; 1479 + 1480 + spin_lock_irqsave(&usb3->lock, flags); 1481 + if (usb3_pn_change(usb3, usb3_ep->num)) 1482 + usb3_pn_stop(usb3); 1483 + spin_unlock_irqrestore(&usb3->lock, flags); 1484 + 1478 1485 usb3_disable_pipe_irq(usb3, usb3_ep->num); 1479 1486 usb3_request_done(usb3_ep, usb3_req, status); 1480 1487 ··· 1509 1504 { 1510 1505 struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, num); 1511 1506 struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep); 1507 + bool done = false; 1512 1508 1513 1509 if (!usb3_req) 1514 1510 return; 1511 + 1512 + spin_lock(&usb3->lock); 1513 + if (usb3_pn_change(usb3, num)) 1514 + goto out; 1515 1515 1516 1516 if (usb3_ep->dir_in) { 1517 1517 /* Do not stop the IN pipe here to detect LSTTR interrupt */ ··· 1524 1514 usb3_clear_bit(usb3, PN_INT_BFRDY, USB3_PN_INT_ENA); 1525 1515 } else { 1526 1516 if (!usb3_read_pipe(usb3_ep, usb3_req, USB3_PN_READ)) 1527 - usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0); 1517 + done = true; 1528 1518 } 1519 + 1520 + out: 1521 + /* need to unlock because usb3_request_done_pipen() locks it */ 1522 + spin_unlock(&usb3->lock); 1523 + 1524 + if (done) 1525 + usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0); 1529 1526 } 1530 1527 1531 1528 static void usb3_irq_epc_pipen(struct renesas_usb3 *usb3, int num) 1532 1529 { 1533 1530 u32 pn_int_sta; 1534 1531 1535 - if (usb3_pn_change(usb3, num) < 0) 1532 + spin_lock(&usb3->lock); 1533 + if (usb3_pn_change(usb3, num) < 0) { 1534 + spin_unlock(&usb3->lock); 1536 1535 return; 1536 + } 1537 1537 1538 1538 pn_int_sta = usb3_read(usb3, USB3_PN_INT_STA); 1539 1539 pn_int_sta &= usb3_read(usb3, USB3_PN_INT_ENA); 1540 1540 usb3_write(usb3, pn_int_sta, USB3_PN_INT_STA); 1541 + spin_unlock(&usb3->lock); 1541 1542 if (pn_int_sta & PN_INT_LSTTR) 1542 1543 usb3_irq_epc_pipen_lsttr(usb3, num); 1543 1544 if (pn_int_sta & PN_INT_BFRDY) ··· 1681 1660 1682 1661 spin_lock_irqsave(&usb3->lock, flags); 1683 1662 if (!usb3_pn_change(usb3, usb3_ep->num)) { 1663 + usb3_write(usb3, 0, USB3_PN_INT_ENA); 1684 1664 usb3_write(usb3, 0, USB3_PN_RAMMAP); 1685 1665 usb3_clear_bit(usb3, PN_CON_EN, USB3_PN_CON); 1686 1666 } ··· 1821 1799 /* hook up the driver */ 1822 1800 usb3->driver = driver; 1823 1801 1802 + pm_runtime_enable(usb3_to_dev(usb3)); 1803 + pm_runtime_get_sync(usb3_to_dev(usb3)); 1804 + 1824 1805 renesas_usb3_init_controller(usb3); 1825 1806 1826 1807 return 0; ··· 1832 1807 static int renesas_usb3_stop(struct usb_gadget *gadget) 1833 1808 { 1834 1809 struct renesas_usb3 *usb3 = gadget_to_renesas_usb3(gadget); 1835 - unsigned long flags; 1836 1810 1837 - spin_lock_irqsave(&usb3->lock, flags); 1838 1811 usb3->softconnect = false; 1839 1812 usb3->gadget.speed = USB_SPEED_UNKNOWN; 1840 1813 usb3->driver = NULL; 1841 1814 renesas_usb3_stop_controller(usb3); 1842 - spin_unlock_irqrestore(&usb3->lock, flags); 1815 + 1816 + pm_runtime_put(usb3_to_dev(usb3)); 1817 + pm_runtime_disable(usb3_to_dev(usb3)); 1843 1818 1844 1819 return 0; 1845 1820 } ··· 1915 1890 struct renesas_usb3 *usb3 = platform_get_drvdata(pdev); 1916 1891 1917 1892 device_remove_file(&pdev->dev, &dev_attr_role); 1918 - 1919 - pm_runtime_put(&pdev->dev); 1920 - pm_runtime_disable(&pdev->dev); 1921 1893 1922 1894 usb_del_gadget_udc(&usb3->gadget); 1923 1895 ··· 2120 2098 goto err_dev_create; 2121 2099 2122 2100 usb3->workaround_for_vbus = priv->workaround_for_vbus; 2123 - 2124 - pm_runtime_enable(&pdev->dev); 2125 - pm_runtime_get_sync(&pdev->dev); 2126 2101 2127 2102 dev_info(&pdev->dev, "probed\n"); 2128 2103
+5 -2
drivers/usb/host/xhci-mem.c
··· 2119 2119 { 2120 2120 u32 temp, port_offset, port_count; 2121 2121 int i; 2122 - u8 major_revision; 2122 + u8 major_revision, minor_revision; 2123 2123 struct xhci_hub *rhub; 2124 2124 2125 2125 temp = readl(addr); 2126 2126 major_revision = XHCI_EXT_PORT_MAJOR(temp); 2127 + minor_revision = XHCI_EXT_PORT_MINOR(temp); 2127 2128 2128 2129 if (major_revision == 0x03) { 2129 2130 rhub = &xhci->usb3_rhub; ··· 2138 2137 return; 2139 2138 } 2140 2139 rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp); 2141 - rhub->min_rev = XHCI_EXT_PORT_MINOR(temp); 2140 + 2141 + if (rhub->min_rev < minor_revision) 2142 + rhub->min_rev = minor_revision; 2142 2143 2143 2144 /* Port offset and count in the third dword, see section 7.2 */ 2144 2145 temp = readl(addr + 2);
+3
drivers/usb/host/xhci-pci.c
··· 201 201 if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && 202 202 pdev->device == 0x1042) 203 203 xhci->quirks |= XHCI_BROKEN_STREAMS; 204 + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && 205 + pdev->device == 0x1142) 206 + xhci->quirks |= XHCI_TRUST_TX_LENGTH; 204 207 205 208 if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) 206 209 xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
+5
drivers/usb/musb/musb_dsps.c
··· 245 245 dsps_mod_timer_optional(glue); 246 246 break; 247 247 case OTG_STATE_A_WAIT_BCON: 248 + /* keep VBUS on for host-only mode */ 249 + if (musb->port_mode == MUSB_PORT_MODE_HOST) { 250 + dsps_mod_timer_optional(glue); 251 + break; 252 + } 248 253 musb_writeb(musb->mregs, MUSB_DEVCTL, 0); 249 254 skip_session = 1; 250 255 /* fall */
+1 -1
drivers/video/fbdev/core/fbmon.c
··· 1048 1048 1049 1049 for (i = 0; i < (128 - edid[2]) / DETAILED_TIMING_DESCRIPTION_SIZE; 1050 1050 i++, block += DETAILED_TIMING_DESCRIPTION_SIZE) 1051 - if (PIXEL_CLOCK) 1051 + if (PIXEL_CLOCK != 0) 1052 1052 edt[num++] = block - edid; 1053 1053 1054 1054 /* Yikes, EDID data is totally useless */
+3 -2
drivers/video/fbdev/smscufx.c
··· 1646 1646 dev_dbg(dev->gdev, "%s %s - serial #%s\n", 1647 1647 usbdev->manufacturer, usbdev->product, usbdev->serial); 1648 1648 dev_dbg(dev->gdev, "vid_%04x&pid_%04x&rev_%04x driver's ufx_data struct at %p\n", 1649 - usbdev->descriptor.idVendor, usbdev->descriptor.idProduct, 1650 - usbdev->descriptor.bcdDevice, dev); 1649 + le16_to_cpu(usbdev->descriptor.idVendor), 1650 + le16_to_cpu(usbdev->descriptor.idProduct), 1651 + le16_to_cpu(usbdev->descriptor.bcdDevice), dev); 1651 1652 dev_dbg(dev->gdev, "console enable=%d\n", console); 1652 1653 dev_dbg(dev->gdev, "fb_defio enable=%d\n", fb_defio); 1653 1654
+5 -4
drivers/video/fbdev/udlfb.c
··· 1105 1105 char *bufptr; 1106 1106 struct urb *urb; 1107 1107 1108 - pr_info("/dev/fb%d FB_BLANK mode %d --> %d\n", 1109 - info->node, dev->blank_mode, blank_mode); 1108 + pr_debug("/dev/fb%d FB_BLANK mode %d --> %d\n", 1109 + info->node, dev->blank_mode, blank_mode); 1110 1110 1111 1111 if ((dev->blank_mode == FB_BLANK_POWERDOWN) && 1112 1112 (blank_mode != FB_BLANK_POWERDOWN)) { ··· 1613 1613 pr_info("%s %s - serial #%s\n", 1614 1614 usbdev->manufacturer, usbdev->product, usbdev->serial); 1615 1615 pr_info("vid_%04x&pid_%04x&rev_%04x driver's dlfb_data struct at %p\n", 1616 - usbdev->descriptor.idVendor, usbdev->descriptor.idProduct, 1617 - usbdev->descriptor.bcdDevice, dev); 1616 + le16_to_cpu(usbdev->descriptor.idVendor), 1617 + le16_to_cpu(usbdev->descriptor.idProduct), 1618 + le16_to_cpu(usbdev->descriptor.bcdDevice), dev); 1618 1619 pr_info("console enable=%d\n", console); 1619 1620 pr_info("fb_defio enable=%d\n", fb_defio); 1620 1621 pr_info("shadow enable=%d\n", shadow);
+3 -5
drivers/video/fbdev/via/viafbdev.c
··· 1630 1630 } 1631 1631 static void viafb_remove_proc(struct viafb_shared *shared) 1632 1632 { 1633 - struct proc_dir_entry *viafb_entry = shared->proc_entry, 1634 - *iga1_entry = shared->iga1_proc_entry, 1635 - *iga2_entry = shared->iga2_proc_entry; 1633 + struct proc_dir_entry *viafb_entry = shared->proc_entry; 1636 1634 1637 1635 if (!viafb_entry) 1638 1636 return; 1639 1637 1640 - remove_proc_entry("output_devices", iga2_entry); 1638 + remove_proc_entry("output_devices", shared->iga2_proc_entry); 1641 1639 remove_proc_entry("iga2", viafb_entry); 1642 - remove_proc_entry("output_devices", iga1_entry); 1640 + remove_proc_entry("output_devices", shared->iga1_proc_entry); 1643 1641 remove_proc_entry("iga1", viafb_entry); 1644 1642 remove_proc_entry("supported_output_devices", viafb_entry); 1645 1643
+7
drivers/virtio/virtio_balloon.c
··· 663 663 } 664 664 #endif 665 665 666 + static int virtballoon_validate(struct virtio_device *vdev) 667 + { 668 + __virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM); 669 + return 0; 670 + } 671 + 666 672 static unsigned int features[] = { 667 673 VIRTIO_BALLOON_F_MUST_TELL_HOST, 668 674 VIRTIO_BALLOON_F_STATS_VQ, ··· 681 675 .driver.name = KBUILD_MODNAME, 682 676 .driver.owner = THIS_MODULE, 683 677 .id_table = id_table, 678 + .validate = virtballoon_validate, 684 679 .probe = virtballoon_probe, 685 680 .remove = virtballoon_remove, 686 681 .config_changed = virtballoon_changed,
+2 -2
drivers/xen/privcmd.c
··· 362 362 st->global_error = 1; 363 363 } 364 364 } 365 - st->va += PAGE_SIZE * nr; 366 - st->index += nr; 365 + st->va += XEN_PAGE_SIZE * nr; 366 + st->index += nr / XEN_PFN_PER_PAGE; 367 367 368 368 return 0; 369 369 }
+1 -1
fs/autofs4/dev-ioctl.c
··· 344 344 int status; 345 345 346 346 token = (autofs_wqt_t) param->fail.token; 347 - status = param->fail.status ? param->fail.status : -ENOENT; 347 + status = param->fail.status < 0 ? param->fail.status : -ENOENT; 348 348 return autofs4_wait_release(sbi, token, status); 349 349 } 350 350
+4 -1
fs/block_dev.c
··· 263 263 kfree(vecs); 264 264 265 265 if (unlikely(bio.bi_error)) 266 - return bio.bi_error; 266 + ret = bio.bi_error; 267 + 268 + bio_uninit(&bio); 269 + 267 270 return ret; 268 271 } 269 272
+2 -2
fs/btrfs/ctree.h
··· 2563 2563 static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info, 2564 2564 unsigned num_items) 2565 2565 { 2566 - return fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items; 2566 + return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items; 2567 2567 } 2568 2568 2569 2569 /* ··· 2573 2573 static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info, 2574 2574 unsigned num_items) 2575 2575 { 2576 - return fs_info->nodesize * BTRFS_MAX_LEVEL * num_items; 2576 + return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items; 2577 2577 } 2578 2578 2579 2579 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
+1 -1
fs/btrfs/dir-item.c
··· 468 468 469 469 if (btrfs_dir_name_len(leaf, dir_item) > namelen) { 470 470 btrfs_crit(fs_info, "invalid dir item name len: %u", 471 - (unsigned)btrfs_dir_data_len(leaf, dir_item)); 471 + (unsigned)btrfs_dir_name_len(leaf, dir_item)); 472 472 return 1; 473 473 } 474 474
+6 -4
fs/btrfs/disk-io.c
··· 3467 3467 * we fua the first super. The others we allow 3468 3468 * to go down lazy. 3469 3469 */ 3470 - if (i == 0) 3471 - ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_FUA, bh); 3472 - else 3470 + if (i == 0) { 3471 + ret = btrfsic_submit_bh(REQ_OP_WRITE, 3472 + REQ_SYNC | REQ_FUA, bh); 3473 + } else { 3473 3474 ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_SYNC, bh); 3475 + } 3474 3476 if (ret) 3475 3477 errors++; 3476 3478 } ··· 3537 3535 3538 3536 bio->bi_end_io = btrfs_end_empty_barrier; 3539 3537 bio->bi_bdev = device->bdev; 3540 - bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 3538 + bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH; 3541 3539 init_completion(&device->flush_wait); 3542 3540 bio->bi_private = &device->flush_wait; 3543 3541 device->flush_bio = bio;
+4 -3
fs/btrfs/extent-tree.c
··· 3993 3993 info->space_info_kobj, "%s", 3994 3994 alloc_name(found->flags)); 3995 3995 if (ret) { 3996 + percpu_counter_destroy(&found->total_bytes_pinned); 3996 3997 kfree(found); 3997 3998 return ret; 3998 3999 } ··· 4845 4844 spin_unlock(&delayed_rsv->lock); 4846 4845 4847 4846 commit: 4848 - trans = btrfs_join_transaction(fs_info->fs_root); 4847 + trans = btrfs_join_transaction(fs_info->extent_root); 4849 4848 if (IS_ERR(trans)) 4850 4849 return -ENOSPC; 4851 4850 ··· 4863 4862 struct btrfs_space_info *space_info, u64 num_bytes, 4864 4863 u64 orig_bytes, int state) 4865 4864 { 4866 - struct btrfs_root *root = fs_info->fs_root; 4865 + struct btrfs_root *root = fs_info->extent_root; 4867 4866 struct btrfs_trans_handle *trans; 4868 4867 int nr; 4869 4868 int ret = 0; ··· 5063 5062 int flush_state = FLUSH_DELAYED_ITEMS_NR; 5064 5063 5065 5064 spin_lock(&space_info->lock); 5066 - to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root, 5065 + to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->extent_root, 5067 5066 space_info); 5068 5067 if (!to_reclaim) { 5069 5068 spin_unlock(&space_info->lock);
+123 -3
fs/btrfs/extent_io.c
··· 2458 2458 if (!uptodate) { 2459 2459 ClearPageUptodate(page); 2460 2460 SetPageError(page); 2461 - ret = ret < 0 ? ret : -EIO; 2461 + ret = err < 0 ? err : -EIO; 2462 2462 mapping_set_error(page->mapping, ret); 2463 2463 } 2464 2464 } ··· 4377 4377 return NULL; 4378 4378 } 4379 4379 4380 + /* 4381 + * To cache previous fiemap extent 4382 + * 4383 + * Will be used for merging fiemap extent 4384 + */ 4385 + struct fiemap_cache { 4386 + u64 offset; 4387 + u64 phys; 4388 + u64 len; 4389 + u32 flags; 4390 + bool cached; 4391 + }; 4392 + 4393 + /* 4394 + * Helper to submit fiemap extent. 4395 + * 4396 + * Will try to merge current fiemap extent specified by @offset, @phys, 4397 + * @len and @flags with cached one. 4398 + * And only when we fails to merge, cached one will be submitted as 4399 + * fiemap extent. 4400 + * 4401 + * Return value is the same as fiemap_fill_next_extent(). 4402 + */ 4403 + static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo, 4404 + struct fiemap_cache *cache, 4405 + u64 offset, u64 phys, u64 len, u32 flags) 4406 + { 4407 + int ret = 0; 4408 + 4409 + if (!cache->cached) 4410 + goto assign; 4411 + 4412 + /* 4413 + * Sanity check, extent_fiemap() should have ensured that new 4414 + * fiemap extent won't overlap with cahced one. 4415 + * Not recoverable. 4416 + * 4417 + * NOTE: Physical address can overlap, due to compression 4418 + */ 4419 + if (cache->offset + cache->len > offset) { 4420 + WARN_ON(1); 4421 + return -EINVAL; 4422 + } 4423 + 4424 + /* 4425 + * Only merges fiemap extents if 4426 + * 1) Their logical addresses are continuous 4427 + * 4428 + * 2) Their physical addresses are continuous 4429 + * So truly compressed (physical size smaller than logical size) 4430 + * extents won't get merged with each other 4431 + * 4432 + * 3) Share same flags except FIEMAP_EXTENT_LAST 4433 + * So regular extent won't get merged with prealloc extent 4434 + */ 4435 + if (cache->offset + cache->len == offset && 4436 + cache->phys + cache->len == phys && 4437 + (cache->flags & ~FIEMAP_EXTENT_LAST) == 4438 + (flags & ~FIEMAP_EXTENT_LAST)) { 4439 + cache->len += len; 4440 + cache->flags |= flags; 4441 + goto try_submit_last; 4442 + } 4443 + 4444 + /* Not mergeable, need to submit cached one */ 4445 + ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys, 4446 + cache->len, cache->flags); 4447 + cache->cached = false; 4448 + if (ret) 4449 + return ret; 4450 + assign: 4451 + cache->cached = true; 4452 + cache->offset = offset; 4453 + cache->phys = phys; 4454 + cache->len = len; 4455 + cache->flags = flags; 4456 + try_submit_last: 4457 + if (cache->flags & FIEMAP_EXTENT_LAST) { 4458 + ret = fiemap_fill_next_extent(fieinfo, cache->offset, 4459 + cache->phys, cache->len, cache->flags); 4460 + cache->cached = false; 4461 + } 4462 + return ret; 4463 + } 4464 + 4465 + /* 4466 + * Sanity check for fiemap cache 4467 + * 4468 + * All fiemap cache should be submitted by emit_fiemap_extent() 4469 + * Iteration should be terminated either by last fiemap extent or 4470 + * fieinfo->fi_extents_max. 4471 + * So no cached fiemap should exist. 4472 + */ 4473 + static int check_fiemap_cache(struct btrfs_fs_info *fs_info, 4474 + struct fiemap_extent_info *fieinfo, 4475 + struct fiemap_cache *cache) 4476 + { 4477 + int ret; 4478 + 4479 + if (!cache->cached) 4480 + return 0; 4481 + 4482 + /* Small and recoverbale problem, only to info developer */ 4483 + #ifdef CONFIG_BTRFS_DEBUG 4484 + WARN_ON(1); 4485 + #endif 4486 + btrfs_warn(fs_info, 4487 + "unhandled fiemap cache detected: offset=%llu phys=%llu len=%llu flags=0x%x", 4488 + cache->offset, cache->phys, cache->len, cache->flags); 4489 + ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys, 4490 + cache->len, cache->flags); 4491 + cache->cached = false; 4492 + if (ret > 0) 4493 + ret = 0; 4494 + return ret; 4495 + } 4496 + 4380 4497 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 4381 4498 __u64 start, __u64 len, get_extent_t *get_extent) 4382 4499 { ··· 4511 4394 struct extent_state *cached_state = NULL; 4512 4395 struct btrfs_path *path; 4513 4396 struct btrfs_root *root = BTRFS_I(inode)->root; 4397 + struct fiemap_cache cache = { 0 }; 4514 4398 int end = 0; 4515 4399 u64 em_start = 0; 4516 4400 u64 em_len = 0; ··· 4691 4573 flags |= FIEMAP_EXTENT_LAST; 4692 4574 end = 1; 4693 4575 } 4694 - ret = fiemap_fill_next_extent(fieinfo, em_start, disko, 4695 - em_len, flags); 4576 + ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko, 4577 + em_len, flags); 4696 4578 if (ret) { 4697 4579 if (ret == 1) 4698 4580 ret = 0; ··· 4700 4582 } 4701 4583 } 4702 4584 out_free: 4585 + if (!ret) 4586 + ret = check_fiemap_cache(root->fs_info, fieinfo, &cache); 4703 4587 free_extent_map(em); 4704 4588 out: 4705 4589 btrfs_free_path(path);
+4 -1
fs/btrfs/hash.c
··· 38 38 { 39 39 SHASH_DESC_ON_STACK(shash, tfm); 40 40 u32 *ctx = (u32 *)shash_desc_ctx(shash); 41 + u32 retval; 41 42 int err; 42 43 43 44 shash->tfm = tfm; ··· 48 47 err = crypto_shash_update(shash, address, length); 49 48 BUG_ON(err); 50 49 51 - return *ctx; 50 + retval = *ctx; 51 + barrier_data(ctx); 52 + return retval; 52 53 }
+3 -3
fs/btrfs/inode.c
··· 2952 2952 2953 2953 ret = test_range_bit(io_tree, ordered_extent->file_offset, 2954 2954 ordered_extent->file_offset + ordered_extent->len - 1, 2955 - EXTENT_DEFRAG, 1, cached_state); 2955 + EXTENT_DEFRAG, 0, cached_state); 2956 2956 if (ret) { 2957 2957 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item); 2958 2958 if (0 && last_snapshot >= BTRFS_I(inode)->generation) ··· 7483 7483 int found = false; 7484 7484 void **pagep = NULL; 7485 7485 struct page *page = NULL; 7486 - int start_idx; 7487 - int end_idx; 7486 + unsigned long start_idx; 7487 + unsigned long end_idx; 7488 7488 7489 7489 start_idx = start >> PAGE_SHIFT; 7490 7490
+1
fs/ceph/acl.c
··· 131 131 } 132 132 133 133 if (new_mode != old_mode) { 134 + newattrs.ia_ctime = current_time(inode); 134 135 newattrs.ia_mode = new_mode; 135 136 newattrs.ia_valid = ATTR_MODE; 136 137 ret = __ceph_setattr(inode, &newattrs);
+4
fs/ceph/export.c
··· 91 91 ceph_mdsc_put_request(req); 92 92 if (!inode) 93 93 return ERR_PTR(-ESTALE); 94 + if (inode->i_nlink == 0) { 95 + iput(inode); 96 + return ERR_PTR(-ESTALE); 97 + } 94 98 } 95 99 96 100 return d_obtain_alias(inode);
+2 -3
fs/ceph/inode.c
··· 2022 2022 attr->ia_size > inode->i_size) { 2023 2023 i_size_write(inode, attr->ia_size); 2024 2024 inode->i_blocks = calc_inode_blocks(attr->ia_size); 2025 - inode->i_ctime = attr->ia_ctime; 2026 2025 ci->i_reported_size = attr->ia_size; 2027 2026 dirtied |= CEPH_CAP_FILE_EXCL; 2028 2027 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || ··· 2043 2044 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 2044 2045 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec, 2045 2046 only ? "ctime only" : "ignored"); 2046 - inode->i_ctime = attr->ia_ctime; 2047 2047 if (only) { 2048 2048 /* 2049 2049 * if kernel wants to dirty ctime but nothing else, ··· 2065 2067 if (dirtied) { 2066 2068 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied, 2067 2069 &prealloc_cf); 2068 - inode->i_ctime = current_time(inode); 2070 + inode->i_ctime = attr->ia_ctime; 2069 2071 } 2070 2072 2071 2073 release &= issued; ··· 2083 2085 req->r_inode_drop = release; 2084 2086 req->r_args.setattr.mask = cpu_to_le32(mask); 2085 2087 req->r_num_caps = 1; 2088 + req->r_stamp = attr->ia_ctime; 2086 2089 err = ceph_mdsc_do_request(mdsc, NULL, req); 2087 2090 } 2088 2091 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
+1 -3
fs/ceph/mds_client.c
··· 1687 1687 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) 1688 1688 { 1689 1689 struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS); 1690 - struct timespec ts; 1691 1690 1692 1691 if (!req) 1693 1692 return ERR_PTR(-ENOMEM); ··· 1705 1706 init_completion(&req->r_safe_completion); 1706 1707 INIT_LIST_HEAD(&req->r_unsafe_item); 1707 1708 1708 - ktime_get_real_ts(&ts); 1709 - req->r_stamp = timespec_trunc(ts, mdsc->fsc->sb->s_time_gran); 1709 + req->r_stamp = timespec_trunc(current_kernel_time(), mdsc->fsc->sb->s_time_gran); 1710 1710 1711 1711 req->r_op = op; 1712 1712 req->r_direct_mode = mode;
+1 -1
fs/cifs/file.c
··· 3271 3271 if (!is_sync_kiocb(iocb)) 3272 3272 ctx->iocb = iocb; 3273 3273 3274 - if (to->type & ITER_IOVEC) 3274 + if (to->type == ITER_IOVEC) 3275 3275 ctx->should_dirty = true; 3276 3276 3277 3277 rc = setup_aio_ctx_iter(ctx, to, READ);
+1 -1
fs/cifs/misc.c
··· 810 810 811 811 if (!pages) { 812 812 pages = vmalloc(max_pages * sizeof(struct page *)); 813 - if (!bv) { 813 + if (!pages) { 814 814 kvfree(bv); 815 815 return -ENOMEM; 816 816 }
+7 -2
fs/cifs/smb1ops.c
··· 849 849 struct cifs_fid *fid, __u16 search_flags, 850 850 struct cifs_search_info *srch_inf) 851 851 { 852 - return CIFSFindFirst(xid, tcon, path, cifs_sb, 853 - &fid->netfid, search_flags, srch_inf, true); 852 + int rc; 853 + 854 + rc = CIFSFindFirst(xid, tcon, path, cifs_sb, 855 + &fid->netfid, search_flags, srch_inf, true); 856 + if (rc) 857 + cifs_dbg(FYI, "find first failed=%d\n", rc); 858 + return rc; 854 859 } 855 860 856 861 static int
+5 -3
fs/cifs/smb2ops.c
··· 982 982 rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL); 983 983 kfree(utf16_path); 984 984 if (rc) { 985 - cifs_dbg(VFS, "open dir failed\n"); 985 + cifs_dbg(FYI, "open dir failed rc=%d\n", rc); 986 986 return rc; 987 987 } 988 988 ··· 992 992 rc = SMB2_query_directory(xid, tcon, fid->persistent_fid, 993 993 fid->volatile_fid, 0, srch_inf); 994 994 if (rc) { 995 - cifs_dbg(VFS, "query directory failed\n"); 995 + cifs_dbg(FYI, "query directory failed rc=%d\n", rc); 996 996 SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid); 997 997 } 998 998 return rc; ··· 1809 1809 1810 1810 sg = init_sg(rqst, sign); 1811 1811 if (!sg) { 1812 - cifs_dbg(VFS, "%s: Failed to init sg %d", __func__, rc); 1812 + cifs_dbg(VFS, "%s: Failed to init sg", __func__); 1813 + rc = -ENOMEM; 1813 1814 goto free_req; 1814 1815 } 1815 1816 ··· 1818 1817 iv = kzalloc(iv_len, GFP_KERNEL); 1819 1818 if (!iv) { 1820 1819 cifs_dbg(VFS, "%s: Failed to alloc IV", __func__); 1820 + rc = -ENOMEM; 1821 1821 goto free_sg; 1822 1822 } 1823 1823 iv[0] = 3;
-2
fs/cifs/xattr.c
··· 188 188 pcreatetime = (__u64 *)value; 189 189 *pcreatetime = CIFS_I(inode)->createtime; 190 190 return sizeof(__u64); 191 - 192 - return rc; 193 191 } 194 192 195 193
+8
fs/configfs/item.c
··· 138 138 } 139 139 EXPORT_SYMBOL(config_item_get); 140 140 141 + struct config_item *config_item_get_unless_zero(struct config_item *item) 142 + { 143 + if (item && kref_get_unless_zero(&item->ci_kref)) 144 + return item; 145 + return NULL; 146 + } 147 + EXPORT_SYMBOL(config_item_get_unless_zero); 148 + 141 149 static void config_item_cleanup(struct config_item *item) 142 150 { 143 151 struct config_item_type *t = item->ci_type;
+1 -2
fs/configfs/symlink.c
··· 83 83 ret = -ENOMEM; 84 84 sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL); 85 85 if (sl) { 86 - sl->sl_target = config_item_get(item); 87 86 spin_lock(&configfs_dirent_lock); 88 87 if (target_sd->s_type & CONFIGFS_USET_DROPPING) { 89 88 spin_unlock(&configfs_dirent_lock); 90 - config_item_put(item); 91 89 kfree(sl); 92 90 return -ENOENT; 93 91 } 92 + sl->sl_target = config_item_get(item); 94 93 list_add(&sl->sl_list, &target_sd->s_links); 95 94 spin_unlock(&configfs_dirent_lock); 96 95 ret = configfs_create_link(sl, parent_item->ci_dentry,
+24
fs/dax.c
··· 859 859 if (ret < 0) 860 860 goto out; 861 861 } 862 + start_index = indices[pvec.nr - 1] + 1; 862 863 } 863 864 out: 864 865 put_dax(dax_dev); ··· 1156 1155 } 1157 1156 1158 1157 /* 1158 + * It is possible, particularly with mixed reads & writes to private 1159 + * mappings, that we have raced with a PMD fault that overlaps with 1160 + * the PTE we need to set up. If so just return and the fault will be 1161 + * retried. 1162 + */ 1163 + if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { 1164 + vmf_ret = VM_FAULT_NOPAGE; 1165 + goto unlock_entry; 1166 + } 1167 + 1168 + /* 1159 1169 * Note that we don't bother to use iomap_apply here: DAX required 1160 1170 * the file system block size to be equal the page size, which means 1161 1171 * that we never have to deal with more than a single extent here. ··· 1408 1396 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD); 1409 1397 if (IS_ERR(entry)) 1410 1398 goto fallback; 1399 + 1400 + /* 1401 + * It is possible, particularly with mixed reads & writes to private 1402 + * mappings, that we have raced with a PTE fault that overlaps with 1403 + * the PMD we need to set up. If so just return and the fault will be 1404 + * retried. 1405 + */ 1406 + if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && 1407 + !pmd_devmap(*vmf->pmd)) { 1408 + result = 0; 1409 + goto unlock_entry; 1410 + } 1411 1411 1412 1412 /* 1413 1413 * Note that we don't use iomap_apply here. We aren't doing I/O, only
+4 -6
fs/dcache.c
··· 1494 1494 { 1495 1495 struct detach_data *data = _data; 1496 1496 1497 - if (!data->mountpoint && !data->select.found) 1497 + if (!data->mountpoint && list_empty(&data->select.dispose)) 1498 1498 __d_drop(data->select.start); 1499 1499 } 1500 1500 ··· 1536 1536 1537 1537 d_walk(dentry, &data, detach_and_collect, check_and_drop); 1538 1538 1539 - if (data.select.found) 1539 + if (!list_empty(&data.select.dispose)) 1540 1540 shrink_dentry_list(&data.select.dispose); 1541 + else if (!data.mountpoint) 1542 + return; 1541 1543 1542 1544 if (data.mountpoint) { 1543 1545 detach_mounts(data.mountpoint); 1544 1546 dput(data.mountpoint); 1545 1547 } 1546 - 1547 - if (!data.mountpoint && !data.select.found) 1548 - break; 1549 - 1550 1548 cond_resched(); 1551 1549 } 1552 1550 }
+24 -4
fs/exec.c
··· 220 220 221 221 if (write) { 222 222 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; 223 + unsigned long ptr_size; 223 224 struct rlimit *rlim; 225 + 226 + /* 227 + * Since the stack will hold pointers to the strings, we 228 + * must account for them as well. 229 + * 230 + * The size calculation is the entire vma while each arg page is 231 + * built, so each time we get here it's calculating how far it 232 + * is currently (rather than each call being just the newly 233 + * added size from the arg page). As a result, we need to 234 + * always add the entire size of the pointers, so that on the 235 + * last call to get_arg_page() we'll actually have the entire 236 + * correct size. 237 + */ 238 + ptr_size = (bprm->argc + bprm->envc) * sizeof(void *); 239 + if (ptr_size > ULONG_MAX - size) 240 + goto fail; 241 + size += ptr_size; 224 242 225 243 acct_arg_size(bprm, size / PAGE_SIZE); 226 244 ··· 257 239 * to work from. 258 240 */ 259 241 rlim = current->signal->rlim; 260 - if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) { 261 - put_page(page); 262 - return NULL; 263 - } 242 + if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) 243 + goto fail; 264 244 } 265 245 266 246 return page; 247 + 248 + fail: 249 + put_page(page); 250 + return NULL; 267 251 } 268 252 269 253 static void put_arg_page(struct page *page)
+4
fs/ext4/acl.c
··· 4 4 * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de> 5 5 */ 6 6 7 + #include <linux/quotaops.h> 7 8 #include "ext4_jbd2.h" 8 9 #include "ext4.h" 9 10 #include "xattr.h" ··· 233 232 handle_t *handle; 234 233 int error, retries = 0; 235 234 235 + error = dquot_initialize(inode); 236 + if (error) 237 + return error; 236 238 retry: 237 239 handle = ext4_journal_start(inode, EXT4_HT_XATTR, 238 240 ext4_jbd2_credits_xattr(inode));
-2
fs/ext4/ext4.h
··· 2523 2523 int buf_size, 2524 2524 struct inode *dir, 2525 2525 struct ext4_filename *fname, 2526 - const struct qstr *d_name, 2527 2526 unsigned int offset, 2528 2527 struct ext4_dir_entry_2 **res_dir); 2529 2528 extern int ext4_generic_delete_entry(handle_t *handle, ··· 3006 3007 int *has_inline_data); 3007 3008 extern struct buffer_head *ext4_find_inline_entry(struct inode *dir, 3008 3009 struct ext4_filename *fname, 3009 - const struct qstr *d_name, 3010 3010 struct ext4_dir_entry_2 **res_dir, 3011 3011 int *has_inline_data); 3012 3012 extern int ext4_delete_inline_entry(handle_t *handle,
+42 -43
fs/ext4/extents.c
··· 3413 3413 struct ext4_sb_info *sbi; 3414 3414 struct ext4_extent_header *eh; 3415 3415 struct ext4_map_blocks split_map; 3416 - struct ext4_extent zero_ex; 3416 + struct ext4_extent zero_ex1, zero_ex2; 3417 3417 struct ext4_extent *ex, *abut_ex; 3418 3418 ext4_lblk_t ee_block, eof_block; 3419 3419 unsigned int ee_len, depth, map_len = map->m_len; 3420 3420 int allocated = 0, max_zeroout = 0; 3421 3421 int err = 0; 3422 - int split_flag = 0; 3422 + int split_flag = EXT4_EXT_DATA_VALID2; 3423 3423 3424 3424 ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" 3425 3425 "block %llu, max_blocks %u\n", inode->i_ino, ··· 3436 3436 ex = path[depth].p_ext; 3437 3437 ee_block = le32_to_cpu(ex->ee_block); 3438 3438 ee_len = ext4_ext_get_actual_len(ex); 3439 - zero_ex.ee_len = 0; 3439 + zero_ex1.ee_len = 0; 3440 + zero_ex2.ee_len = 0; 3440 3441 3441 3442 trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); 3442 3443 ··· 3577 3576 if (ext4_encrypted_inode(inode)) 3578 3577 max_zeroout = 0; 3579 3578 3580 - /* If extent is less than s_max_zeroout_kb, zeroout directly */ 3581 - if (max_zeroout && (ee_len <= max_zeroout)) { 3582 - err = ext4_ext_zeroout(inode, ex); 3583 - if (err) 3584 - goto out; 3585 - zero_ex.ee_block = ex->ee_block; 3586 - zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)); 3587 - ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex)); 3588 - 3589 - err = ext4_ext_get_access(handle, inode, path + depth); 3590 - if (err) 3591 - goto out; 3592 - ext4_ext_mark_initialized(ex); 3593 - ext4_ext_try_to_merge(handle, inode, path, ex); 3594 - err = ext4_ext_dirty(handle, inode, path + path->p_depth); 3595 - goto out; 3596 - } 3597 - 3598 3579 /* 3599 - * four cases: 3580 + * five cases: 3600 3581 * 1. split the extent into three extents. 3601 - * 2. split the extent into two extents, zeroout the first half. 3602 - * 3. split the extent into two extents, zeroout the second half. 3582 + * 2. split the extent into two extents, zeroout the head of the first 3583 + * extent. 3584 + * 3. split the extent into two extents, zeroout the tail of the second 3585 + * extent. 3603 3586 * 4. split the extent into two extents with out zeroout. 3587 + * 5. no splitting needed, just possibly zeroout the head and / or the 3588 + * tail of the extent. 3604 3589 */ 3605 3590 split_map.m_lblk = map->m_lblk; 3606 3591 split_map.m_len = map->m_len; 3607 3592 3608 - if (max_zeroout && (allocated > map->m_len)) { 3593 + if (max_zeroout && (allocated > split_map.m_len)) { 3609 3594 if (allocated <= max_zeroout) { 3610 - /* case 3 */ 3611 - zero_ex.ee_block = 3612 - cpu_to_le32(map->m_lblk); 3613 - zero_ex.ee_len = cpu_to_le16(allocated); 3614 - ext4_ext_store_pblock(&zero_ex, 3615 - ext4_ext_pblock(ex) + map->m_lblk - ee_block); 3616 - err = ext4_ext_zeroout(inode, &zero_ex); 3595 + /* case 3 or 5 */ 3596 + zero_ex1.ee_block = 3597 + cpu_to_le32(split_map.m_lblk + 3598 + split_map.m_len); 3599 + zero_ex1.ee_len = 3600 + cpu_to_le16(allocated - split_map.m_len); 3601 + ext4_ext_store_pblock(&zero_ex1, 3602 + ext4_ext_pblock(ex) + split_map.m_lblk + 3603 + split_map.m_len - ee_block); 3604 + err = ext4_ext_zeroout(inode, &zero_ex1); 3617 3605 if (err) 3618 3606 goto out; 3619 - split_map.m_lblk = map->m_lblk; 3620 3607 split_map.m_len = allocated; 3621 - } else if (map->m_lblk - ee_block + map->m_len < max_zeroout) { 3622 - /* case 2 */ 3623 - if (map->m_lblk != ee_block) { 3624 - zero_ex.ee_block = ex->ee_block; 3625 - zero_ex.ee_len = cpu_to_le16(map->m_lblk - 3608 + } 3609 + if (split_map.m_lblk - ee_block + split_map.m_len < 3610 + max_zeroout) { 3611 + /* case 2 or 5 */ 3612 + if (split_map.m_lblk != ee_block) { 3613 + zero_ex2.ee_block = ex->ee_block; 3614 + zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk - 3626 3615 ee_block); 3627 - ext4_ext_store_pblock(&zero_ex, 3616 + ext4_ext_store_pblock(&zero_ex2, 3628 3617 ext4_ext_pblock(ex)); 3629 - err = ext4_ext_zeroout(inode, &zero_ex); 3618 + err = ext4_ext_zeroout(inode, &zero_ex2); 3630 3619 if (err) 3631 3620 goto out; 3632 3621 } 3633 3622 3623 + split_map.m_len += split_map.m_lblk - ee_block; 3634 3624 split_map.m_lblk = ee_block; 3635 - split_map.m_len = map->m_lblk - ee_block + map->m_len; 3636 3625 allocated = map->m_len; 3637 3626 } 3638 3627 } ··· 3633 3642 err = 0; 3634 3643 out: 3635 3644 /* If we have gotten a failure, don't zero out status tree */ 3636 - if (!err) 3637 - err = ext4_zeroout_es(inode, &zero_ex); 3645 + if (!err) { 3646 + err = ext4_zeroout_es(inode, &zero_ex1); 3647 + if (!err) 3648 + err = ext4_zeroout_es(inode, &zero_ex2); 3649 + } 3638 3650 return err ? err : allocated; 3639 3651 } 3640 3652 ··· 4877 4883 4878 4884 /* Zero out partial block at the edges of the range */ 4879 4885 ret = ext4_zero_partial_blocks(handle, inode, offset, len); 4886 + if (ret >= 0) 4887 + ext4_update_inode_fsync_trans(handle, inode, 1); 4880 4888 4881 4889 if (file->f_flags & O_SYNC) 4882 4890 ext4_handle_sync(handle); ··· 5565 5569 ext4_handle_sync(handle); 5566 5570 inode->i_mtime = inode->i_ctime = current_time(inode); 5567 5571 ext4_mark_inode_dirty(handle, inode); 5572 + ext4_update_inode_fsync_trans(handle, inode, 1); 5568 5573 5569 5574 out_stop: 5570 5575 ext4_journal_stop(handle); ··· 5739 5742 up_write(&EXT4_I(inode)->i_data_sem); 5740 5743 if (IS_SYNC(inode)) 5741 5744 ext4_handle_sync(handle); 5745 + if (ret >= 0) 5746 + ext4_update_inode_fsync_trans(handle, inode, 1); 5742 5747 5743 5748 out_stop: 5744 5749 ext4_journal_stop(handle);
+16 -38
fs/ext4/file.c
··· 474 474 endoff = (loff_t)end_blk << blkbits; 475 475 476 476 index = startoff >> PAGE_SHIFT; 477 - end = endoff >> PAGE_SHIFT; 477 + end = (endoff - 1) >> PAGE_SHIFT; 478 478 479 479 pagevec_init(&pvec, 0); 480 480 do { 481 481 int i, num; 482 482 unsigned long nr_pages; 483 483 484 - num = min_t(pgoff_t, end - index, PAGEVEC_SIZE); 484 + num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1; 485 485 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, 486 486 (pgoff_t)num); 487 - if (nr_pages == 0) { 488 - if (whence == SEEK_DATA) 489 - break; 490 - 491 - BUG_ON(whence != SEEK_HOLE); 492 - /* 493 - * If this is the first time to go into the loop and 494 - * offset is not beyond the end offset, it will be a 495 - * hole at this offset 496 - */ 497 - if (lastoff == startoff || lastoff < endoff) 498 - found = 1; 487 + if (nr_pages == 0) 499 488 break; 500 - } 501 - 502 - /* 503 - * If this is the first time to go into the loop and 504 - * offset is smaller than the first page offset, it will be a 505 - * hole at this offset. 506 - */ 507 - if (lastoff == startoff && whence == SEEK_HOLE && 508 - lastoff < page_offset(pvec.pages[0])) { 509 - found = 1; 510 - break; 511 - } 512 489 513 490 for (i = 0; i < nr_pages; i++) { 514 491 struct page *page = pvec.pages[i]; 515 492 struct buffer_head *bh, *head; 516 493 517 494 /* 518 - * If the current offset is not beyond the end of given 519 - * range, it will be a hole. 495 + * If current offset is smaller than the page offset, 496 + * there is a hole at this offset. 520 497 */ 521 - if (lastoff < endoff && whence == SEEK_HOLE && 522 - page->index > end) { 498 + if (whence == SEEK_HOLE && lastoff < endoff && 499 + lastoff < page_offset(pvec.pages[i])) { 523 500 found = 1; 524 501 *offset = lastoff; 525 502 goto out; 526 503 } 504 + 505 + if (page->index > end) 506 + goto out; 527 507 528 508 lock_page(page); 529 509 ··· 544 564 unlock_page(page); 545 565 } 546 566 547 - /* 548 - * The no. of pages is less than our desired, that would be a 549 - * hole in there. 550 - */ 551 - if (nr_pages < num && whence == SEEK_HOLE) { 552 - found = 1; 553 - *offset = lastoff; 567 + /* The no. of pages is less than our desired, we are done. */ 568 + if (nr_pages < num) 554 569 break; 555 - } 556 570 557 571 index = pvec.pages[i - 1]->index + 1; 558 572 pagevec_release(&pvec); 559 573 } while (index <= end); 560 574 575 + if (whence == SEEK_HOLE && lastoff < endoff) { 576 + found = 1; 577 + *offset = lastoff; 578 + } 561 579 out: 562 580 pagevec_release(&pvec); 563 581 return found;
+2 -3
fs/ext4/inline.c
··· 1627 1627 1628 1628 struct buffer_head *ext4_find_inline_entry(struct inode *dir, 1629 1629 struct ext4_filename *fname, 1630 - const struct qstr *d_name, 1631 1630 struct ext4_dir_entry_2 **res_dir, 1632 1631 int *has_inline_data) 1633 1632 { ··· 1648 1649 EXT4_INLINE_DOTDOT_SIZE; 1649 1650 inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE; 1650 1651 ret = ext4_search_dir(iloc.bh, inline_start, inline_size, 1651 - dir, fname, d_name, 0, res_dir); 1652 + dir, fname, 0, res_dir); 1652 1653 if (ret == 1) 1653 1654 goto out_find; 1654 1655 if (ret < 0) ··· 1661 1662 inline_size = ext4_get_inline_size(dir) - EXT4_MIN_INLINE_DATA_SIZE; 1662 1663 1663 1664 ret = ext4_search_dir(iloc.bh, inline_start, inline_size, 1664 - dir, fname, d_name, 0, res_dir); 1665 + dir, fname, 0, res_dir); 1665 1666 if (ret == 1) 1666 1667 goto out_find; 1667 1668
+22 -8
fs/ext4/inode.c
··· 2124 2124 static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page) 2125 2125 { 2126 2126 int len; 2127 - loff_t size = i_size_read(mpd->inode); 2127 + loff_t size; 2128 2128 int err; 2129 2129 2130 2130 BUG_ON(page->index != mpd->first_page); 2131 + clear_page_dirty_for_io(page); 2132 + /* 2133 + * We have to be very careful here! Nothing protects writeback path 2134 + * against i_size changes and the page can be writeably mapped into 2135 + * page tables. So an application can be growing i_size and writing 2136 + * data through mmap while writeback runs. clear_page_dirty_for_io() 2137 + * write-protects our page in page tables and the page cannot get 2138 + * written to again until we release page lock. So only after 2139 + * clear_page_dirty_for_io() we are safe to sample i_size for 2140 + * ext4_bio_write_page() to zero-out tail of the written page. We rely 2141 + * on the barrier provided by TestClearPageDirty in 2142 + * clear_page_dirty_for_io() to make sure i_size is really sampled only 2143 + * after page tables are updated. 2144 + */ 2145 + size = i_size_read(mpd->inode); 2131 2146 if (page->index == size >> PAGE_SHIFT) 2132 2147 len = size & ~PAGE_MASK; 2133 2148 else 2134 2149 len = PAGE_SIZE; 2135 - clear_page_dirty_for_io(page); 2136 2150 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false); 2137 2151 if (!err) 2138 2152 mpd->wbc->nr_to_write--; ··· 3643 3629 get_block_func = ext4_dio_get_block_unwritten_async; 3644 3630 dio_flags = DIO_LOCKING; 3645 3631 } 3646 - #ifdef CONFIG_EXT4_FS_ENCRYPTION 3647 - BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)); 3648 - #endif 3649 3632 ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, 3650 3633 get_block_func, ext4_end_io_dio, NULL, 3651 3634 dio_flags); ··· 3724 3713 */ 3725 3714 inode_lock_shared(inode); 3726 3715 ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, 3727 - iocb->ki_pos + count); 3716 + iocb->ki_pos + count - 1); 3728 3717 if (ret) 3729 3718 goto out_unlock; 3730 3719 ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, ··· 4218 4207 4219 4208 inode->i_mtime = inode->i_ctime = current_time(inode); 4220 4209 ext4_mark_inode_dirty(handle, inode); 4210 + if (ret >= 0) 4211 + ext4_update_inode_fsync_trans(handle, inode, 1); 4221 4212 out_stop: 4222 4213 ext4_journal_stop(handle); 4223 4214 out_dio: ··· 5650 5637 /* No extended attributes present */ 5651 5638 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || 5652 5639 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { 5653 - memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, 5654 - new_extra_isize); 5640 + memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE + 5641 + EXT4_I(inode)->i_extra_isize, 0, 5642 + new_extra_isize - EXT4_I(inode)->i_extra_isize); 5655 5643 EXT4_I(inode)->i_extra_isize = new_extra_isize; 5656 5644 return 0; 5657 5645 }
+14 -9
fs/ext4/mballoc.c
··· 3887 3887 3888 3888 err = ext4_mb_load_buddy(sb, group, &e4b); 3889 3889 if (err) { 3890 - ext4_error(sb, "Error loading buddy information for %u", group); 3890 + ext4_warning(sb, "Error %d loading buddy information for %u", 3891 + err, group); 3891 3892 put_bh(bitmap_bh); 3892 3893 return 0; 3893 3894 } ··· 4045 4044 BUG_ON(pa->pa_type != MB_INODE_PA); 4046 4045 group = ext4_get_group_number(sb, pa->pa_pstart); 4047 4046 4048 - err = ext4_mb_load_buddy(sb, group, &e4b); 4047 + err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 4048 + GFP_NOFS|__GFP_NOFAIL); 4049 4049 if (err) { 4050 - ext4_error(sb, "Error loading buddy information for %u", 4051 - group); 4050 + ext4_error(sb, "Error %d loading buddy information for %u", 4051 + err, group); 4052 4052 continue; 4053 4053 } 4054 4054 ··· 4305 4303 spin_unlock(&lg->lg_prealloc_lock); 4306 4304 4307 4305 list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { 4306 + int err; 4308 4307 4309 4308 group = ext4_get_group_number(sb, pa->pa_pstart); 4310 - if (ext4_mb_load_buddy(sb, group, &e4b)) { 4311 - ext4_error(sb, "Error loading buddy information for %u", 4312 - group); 4309 + err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 4310 + GFP_NOFS|__GFP_NOFAIL); 4311 + if (err) { 4312 + ext4_error(sb, "Error %d loading buddy information for %u", 4313 + err, group); 4313 4314 continue; 4314 4315 } 4315 4316 ext4_lock_group(sb, group); ··· 5132 5127 5133 5128 ret = ext4_mb_load_buddy(sb, group, &e4b); 5134 5129 if (ret) { 5135 - ext4_error(sb, "Error in loading buddy " 5136 - "information for %u", group); 5130 + ext4_warning(sb, "Error %d loading buddy information for %u", 5131 + ret, group); 5137 5132 return ret; 5138 5133 } 5139 5134 bitmap = e4b.bd_bitmap;
+5 -8
fs/ext4/namei.c
··· 1155 1155 static inline int search_dirblock(struct buffer_head *bh, 1156 1156 struct inode *dir, 1157 1157 struct ext4_filename *fname, 1158 - const struct qstr *d_name, 1159 1158 unsigned int offset, 1160 1159 struct ext4_dir_entry_2 **res_dir) 1161 1160 { 1162 1161 return ext4_search_dir(bh, bh->b_data, dir->i_sb->s_blocksize, dir, 1163 - fname, d_name, offset, res_dir); 1162 + fname, offset, res_dir); 1164 1163 } 1165 1164 1166 1165 /* ··· 1261 1262 */ 1262 1263 int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size, 1263 1264 struct inode *dir, struct ext4_filename *fname, 1264 - const struct qstr *d_name, 1265 1265 unsigned int offset, struct ext4_dir_entry_2 **res_dir) 1266 1266 { 1267 1267 struct ext4_dir_entry_2 * de; ··· 1353 1355 1354 1356 if (ext4_has_inline_data(dir)) { 1355 1357 int has_inline_data = 1; 1356 - ret = ext4_find_inline_entry(dir, &fname, d_name, res_dir, 1358 + ret = ext4_find_inline_entry(dir, &fname, res_dir, 1357 1359 &has_inline_data); 1358 1360 if (has_inline_data) { 1359 1361 if (inlined) ··· 1445 1447 goto next; 1446 1448 } 1447 1449 set_buffer_verified(bh); 1448 - i = search_dirblock(bh, dir, &fname, d_name, 1450 + i = search_dirblock(bh, dir, &fname, 1449 1451 block << EXT4_BLOCK_SIZE_BITS(sb), res_dir); 1450 1452 if (i == 1) { 1451 1453 EXT4_I(dir)->i_dir_start_lookup = block; ··· 1486 1488 { 1487 1489 struct super_block * sb = dir->i_sb; 1488 1490 struct dx_frame frames[2], *frame; 1489 - const struct qstr *d_name = fname->usr_fname; 1490 1491 struct buffer_head *bh; 1491 1492 ext4_lblk_t block; 1492 1493 int retval; ··· 1502 1505 if (IS_ERR(bh)) 1503 1506 goto errout; 1504 1507 1505 - retval = search_dirblock(bh, dir, fname, d_name, 1508 + retval = search_dirblock(bh, dir, fname, 1506 1509 block << EXT4_BLOCK_SIZE_BITS(sb), 1507 1510 res_dir); 1508 1511 if (retval == 1) ··· 1527 1530 1528 1531 bh = NULL; 1529 1532 errout: 1530 - dxtrace(printk(KERN_DEBUG "%s not found\n", d_name->name)); 1533 + dxtrace(printk(KERN_DEBUG "%s not found\n", fname->usr_fname->name)); 1531 1534 success: 1532 1535 dx_release(frames); 1533 1536 return bh;
+8 -9
fs/ext4/super.c
··· 848 848 { 849 849 int type; 850 850 851 - if (ext4_has_feature_quota(sb)) { 852 - dquot_disable(sb, -1, 853 - DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); 854 - } else { 855 - /* Use our quota_off function to clear inode flags etc. */ 856 - for (type = 0; type < EXT4_MAXQUOTAS; type++) 857 - ext4_quota_off(sb, type); 858 - } 851 + /* Use our quota_off function to clear inode flags etc. */ 852 + for (type = 0; type < EXT4_MAXQUOTAS; type++) 853 + ext4_quota_off(sb, type); 859 854 } 860 855 #else 861 856 static inline void ext4_quota_off_umount(struct super_block *sb) ··· 1174 1179 return res; 1175 1180 } 1176 1181 1182 + res = dquot_initialize(inode); 1183 + if (res) 1184 + return res; 1177 1185 retry: 1178 1186 handle = ext4_journal_start(inode, EXT4_HT_MISC, 1179 1187 ext4_jbd2_credits_xattr(inode)); ··· 5483 5485 goto out; 5484 5486 5485 5487 err = dquot_quota_off(sb, type); 5486 - if (err) 5488 + if (err || ext4_has_feature_quota(sb)) 5487 5489 goto out_put; 5488 5490 5489 5491 inode_lock(inode); ··· 5503 5505 out_unlock: 5504 5506 inode_unlock(inode); 5505 5507 out_put: 5508 + lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL); 5506 5509 iput(inode); 5507 5510 return err; 5508 5511 out:
+8
fs/ext4/xattr.c
··· 888 888 else { 889 889 u32 ref; 890 890 891 + WARN_ON_ONCE(dquot_initialize_needed(inode)); 892 + 891 893 /* The old block is released after updating 892 894 the inode. */ 893 895 error = dquot_alloc_block(inode, ··· 955 953 } else { 956 954 /* We need to allocate a new block */ 957 955 ext4_fsblk_t goal, block; 956 + 957 + WARN_ON_ONCE(dquot_initialize_needed(inode)); 958 958 959 959 goal = ext4_group_first_block_no(sb, 960 960 EXT4_I(inode)->i_block_group); ··· 1170 1166 return -EINVAL; 1171 1167 if (strlen(name) > 255) 1172 1168 return -ERANGE; 1169 + 1173 1170 ext4_write_lock_xattr(inode, &no_expand); 1174 1171 1175 1172 error = ext4_reserve_inode_write(handle, inode, &is.iloc); ··· 1272 1267 int error, retries = 0; 1273 1268 int credits = ext4_jbd2_credits_xattr(inode); 1274 1269 1270 + error = dquot_initialize(inode); 1271 + if (error) 1272 + return error; 1275 1273 retry: 1276 1274 handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits); 1277 1275 if (IS_ERR(handle)) {
+4 -1
fs/f2fs/f2fs.h
··· 1078 1078 { 1079 1079 SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver); 1080 1080 u32 *ctx = (u32 *)shash_desc_ctx(shash); 1081 + u32 retval; 1081 1082 int err; 1082 1083 1083 1084 shash->tfm = sbi->s_chksum_driver; ··· 1088 1087 err = crypto_shash_update(shash, address, length); 1089 1088 BUG_ON(err); 1090 1089 1091 - return *ctx; 1090 + retval = *ctx; 1091 + barrier_data(ctx); 1092 + return retval; 1092 1093 } 1093 1094 1094 1095 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
+1 -1
fs/gfs2/log.c
··· 659 659 struct gfs2_log_header *lh; 660 660 unsigned int tail; 661 661 u32 hash; 662 - int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META; 662 + int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC; 663 663 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); 664 664 enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); 665 665 lh = page_address(page);
+1 -1
fs/hugetlbfs/inode.c
··· 200 200 addr = ALIGN(addr, huge_page_size(h)); 201 201 vma = find_vma(mm, addr); 202 202 if (TASK_SIZE - len >= addr && 203 - (!vma || addr + len <= vma->vm_start)) 203 + (!vma || addr + len <= vm_start_gap(vma))) 204 204 return addr; 205 205 } 206 206
+6
fs/jbd2/transaction.c
··· 680 680 681 681 rwsem_release(&journal->j_trans_commit_map, 1, _THIS_IP_); 682 682 handle->h_buffer_credits = nblocks; 683 + /* 684 + * Restore the original nofs context because the journal restart 685 + * is basically the same thing as journal stop and start. 686 + * start_this_handle will start a new nofs context. 687 + */ 688 + memalloc_nofs_restore(handle->saved_alloc_context); 683 689 ret = start_this_handle(journal, handle, gfp_mask); 684 690 return ret; 685 691 }
+2
fs/namespace.c
··· 3488 3488 return err; 3489 3489 } 3490 3490 3491 + put_mnt_ns(old_mnt_ns); 3492 + 3491 3493 /* Update the pwd and root */ 3492 3494 set_fs_pwd(fs, &root); 3493 3495 set_fs_root(fs, &root);
-1
fs/nfs/callback_xdr.c
··· 753 753 * A single slot, so highest used slotid is either 0 or -1 754 754 */ 755 755 nfs4_free_slot(tbl, slot); 756 - nfs4_slot_tbl_drain_complete(tbl); 757 756 spin_unlock(&tbl->slot_tbl_lock); 758 757 } 759 758
+24 -27
fs/nfs/dir.c
··· 1946 1946 } 1947 1947 EXPORT_SYMBOL_GPL(nfs_link); 1948 1948 1949 - static void 1950 - nfs_complete_rename(struct rpc_task *task, struct nfs_renamedata *data) 1951 - { 1952 - struct dentry *old_dentry = data->old_dentry; 1953 - struct dentry *new_dentry = data->new_dentry; 1954 - struct inode *old_inode = d_inode(old_dentry); 1955 - struct inode *new_inode = d_inode(new_dentry); 1956 - 1957 - nfs_mark_for_revalidate(old_inode); 1958 - 1959 - switch (task->tk_status) { 1960 - case 0: 1961 - if (new_inode != NULL) 1962 - nfs_drop_nlink(new_inode); 1963 - d_move(old_dentry, new_dentry); 1964 - nfs_set_verifier(new_dentry, 1965 - nfs_save_change_attribute(data->new_dir)); 1966 - break; 1967 - case -ENOENT: 1968 - nfs_dentry_handle_enoent(old_dentry); 1969 - } 1970 - } 1971 - 1972 1949 /* 1973 1950 * RENAME 1974 1951 * FIXME: Some nfsds, like the Linux user space nfsd, may generate a ··· 1976 1999 { 1977 2000 struct inode *old_inode = d_inode(old_dentry); 1978 2001 struct inode *new_inode = d_inode(new_dentry); 1979 - struct dentry *dentry = NULL; 2002 + struct dentry *dentry = NULL, *rehash = NULL; 1980 2003 struct rpc_task *task; 1981 2004 int error = -EBUSY; 1982 2005 ··· 1999 2022 * To prevent any new references to the target during the 2000 2023 * rename, we unhash the dentry in advance. 2001 2024 */ 2002 - if (!d_unhashed(new_dentry)) 2025 + if (!d_unhashed(new_dentry)) { 2003 2026 d_drop(new_dentry); 2027 + rehash = new_dentry; 2028 + } 2004 2029 2005 2030 if (d_count(new_dentry) > 2) { 2006 2031 int err; ··· 2019 2040 goto out; 2020 2041 2021 2042 new_dentry = dentry; 2043 + rehash = NULL; 2022 2044 new_inode = NULL; 2023 2045 } 2024 2046 } ··· 2028 2048 if (new_inode != NULL) 2029 2049 NFS_PROTO(new_inode)->return_delegation(new_inode); 2030 2050 2031 - task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, 2032 - nfs_complete_rename); 2051 + task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL); 2033 2052 if (IS_ERR(task)) { 2034 2053 error = PTR_ERR(task); 2035 2054 goto out; ··· 2038 2059 if (error == 0) 2039 2060 error = task->tk_status; 2040 2061 rpc_put_task(task); 2062 + nfs_mark_for_revalidate(old_inode); 2041 2063 out: 2064 + if (rehash) 2065 + d_rehash(rehash); 2042 2066 trace_nfs_rename_exit(old_dir, old_dentry, 2043 2067 new_dir, new_dentry, error); 2068 + if (!error) { 2069 + if (new_inode != NULL) 2070 + nfs_drop_nlink(new_inode); 2071 + /* 2072 + * The d_move() should be here instead of in an async RPC completion 2073 + * handler because we need the proper locks to move the dentry. If 2074 + * we're interrupted by a signal, the async RPC completion handler 2075 + * should mark the directories for revalidation. 2076 + */ 2077 + d_move(old_dentry, new_dentry); 2078 + nfs_set_verifier(new_dentry, 2079 + nfs_save_change_attribute(new_dir)); 2080 + } else if (error == -ENOENT) 2081 + nfs_dentry_handle_enoent(old_dentry); 2082 + 2044 2083 /* new dentry created? */ 2045 2084 if (dentry) 2046 2085 dput(dentry);
+1
fs/nfs/flexfilelayout/flexfilelayout.c
··· 454 454 goto out_err_free; 455 455 456 456 /* fh */ 457 + rc = -EIO; 457 458 p = xdr_inline_decode(&stream, 4); 458 459 if (!p) 459 460 goto out_err_free;
-2
fs/nfs/internal.h
··· 398 398 bool nfs_auth_info_match(const struct nfs_auth_info *, rpc_authflavor_t); 399 399 struct dentry *nfs_try_mount(int, const char *, struct nfs_mount_info *, 400 400 struct nfs_subversion *); 401 - void nfs_initialise_sb(struct super_block *); 402 401 int nfs_set_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *); 403 402 int nfs_clone_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *); 404 403 struct dentry *nfs_fs_mount_common(struct nfs_server *, int, const char *, ··· 457 458 extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio); 458 459 459 460 /* super.c */ 460 - void nfs_clone_super(struct super_block *, struct nfs_mount_info *); 461 461 void nfs_umount_begin(struct super_block *); 462 462 int nfs_statfs(struct dentry *, struct kstatfs *); 463 463 int nfs_show_options(struct seq_file *, struct dentry *);
+1 -1
fs/nfs/namespace.c
··· 246 246 247 247 devname = nfs_devname(dentry, page, PAGE_SIZE); 248 248 if (IS_ERR(devname)) 249 - mnt = (struct vfsmount *)devname; 249 + mnt = ERR_CAST(devname); 250 250 else 251 251 mnt = nfs_do_clone_mount(NFS_SB(dentry->d_sb), devname, &mountdata); 252 252
+1 -1
fs/nfs/nfs42proc.c
··· 177 177 if (status) 178 178 goto out; 179 179 180 - if (!nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 180 + if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 181 181 &res->commit_res.verf->verifier)) { 182 182 status = -EAGAIN; 183 183 goto out;
-1
fs/nfs/nfs4client.c
··· 582 582 */ 583 583 nfs4_schedule_path_down_recovery(pos); 584 584 default: 585 - spin_lock(&nn->nfs_client_lock); 586 585 goto out; 587 586 } 588 587
+3 -2
fs/nfs/nfs4proc.c
··· 2589 2589 2590 2590 /* Except MODE, it seems harmless of setting twice. */ 2591 2591 if (opendata->o_arg.createmode != NFS4_CREATE_EXCLUSIVE && 2592 - attrset[1] & FATTR4_WORD1_MODE) 2592 + (attrset[1] & FATTR4_WORD1_MODE || 2593 + attrset[2] & FATTR4_WORD2_MODE_UMASK)) 2593 2594 sattr->ia_valid &= ~ATTR_MODE; 2594 2595 2595 2596 if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL) ··· 8417 8416 size_t max_pages = max_response_pages(server); 8418 8417 8419 8418 dprintk("--> %s\n", __func__); 8419 + nfs4_sequence_free_slot(&lgp->res.seq_res); 8420 8420 nfs4_free_pages(lgp->args.layout.pages, max_pages); 8421 8421 pnfs_put_layout_hdr(NFS_I(inode)->layout); 8422 8422 put_nfs_open_context(lgp->args.ctx); ··· 8492 8490 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */ 8493 8491 if (status == 0 && lgp->res.layoutp->len) 8494 8492 lseg = pnfs_layout_process(lgp); 8495 - nfs4_sequence_free_slot(&lgp->res.seq_res); 8496 8493 rpc_put_task(task); 8497 8494 dprintk("<-- %s status=%d\n", __func__, status); 8498 8495 if (status)
+2
fs/nfs/nfs4state.c
··· 2134 2134 put_rpccred(cred); 2135 2135 switch (status) { 2136 2136 case 0: 2137 + case -EINTR: 2138 + case -ERESTARTSYS: 2137 2139 break; 2138 2140 case -ETIMEDOUT: 2139 2141 if (clnt->cl_softrtry)
+17 -8
fs/nfs/pnfs.c
··· 2094 2094 } 2095 2095 EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout); 2096 2096 2097 + /* 2098 + * Check for any intersection between the request and the pgio->pg_lseg, 2099 + * and if none, put this pgio->pg_lseg away. 2100 + */ 2101 + static void 2102 + pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) 2103 + { 2104 + if (pgio->pg_lseg && !pnfs_lseg_request_intersecting(pgio->pg_lseg, req)) { 2105 + pnfs_put_lseg(pgio->pg_lseg); 2106 + pgio->pg_lseg = NULL; 2107 + } 2108 + } 2109 + 2097 2110 void 2098 2111 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) 2099 2112 { 2100 2113 u64 rd_size = req->wb_bytes; 2101 2114 2102 2115 pnfs_generic_pg_check_layout(pgio); 2116 + pnfs_generic_pg_check_range(pgio, req); 2103 2117 if (pgio->pg_lseg == NULL) { 2104 2118 if (pgio->pg_dreq == NULL) 2105 2119 rd_size = i_size_read(pgio->pg_inode) - req_offset(req); ··· 2145 2131 struct nfs_page *req, u64 wb_size) 2146 2132 { 2147 2133 pnfs_generic_pg_check_layout(pgio); 2134 + pnfs_generic_pg_check_range(pgio, req); 2148 2135 if (pgio->pg_lseg == NULL) { 2149 2136 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 2150 2137 req->wb_context, ··· 2206 2191 seg_end = pnfs_end_offset(pgio->pg_lseg->pls_range.offset, 2207 2192 pgio->pg_lseg->pls_range.length); 2208 2193 req_start = req_offset(req); 2209 - WARN_ON_ONCE(req_start >= seg_end); 2194 + 2210 2195 /* start of request is past the last byte of this segment */ 2211 - if (req_start >= seg_end) { 2212 - /* reference the new lseg */ 2213 - if (pgio->pg_ops->pg_cleanup) 2214 - pgio->pg_ops->pg_cleanup(pgio); 2215 - if (pgio->pg_ops->pg_init) 2216 - pgio->pg_ops->pg_init(pgio, req); 2196 + if (req_start >= seg_end) 2217 2197 return 0; 2218 - } 2219 2198 2220 2199 /* adjust 'size' iff there are fewer bytes left in the 2221 2200 * segment than what nfs_generic_pg_test returned */
+10
fs/nfs/pnfs.h
··· 593 593 return pnfs_is_range_intersecting(l1->offset, end1, l2->offset, end2); 594 594 } 595 595 596 + static inline bool 597 + pnfs_lseg_request_intersecting(struct pnfs_layout_segment *lseg, struct nfs_page *req) 598 + { 599 + u64 seg_last = pnfs_end_offset(lseg->pls_range.offset, lseg->pls_range.length); 600 + u64 req_last = req_offset(req) + req->wb_bytes; 601 + 602 + return pnfs_is_range_intersecting(lseg->pls_range.offset, seg_last, 603 + req_offset(req), req_last); 604 + } 605 + 596 606 extern unsigned int layoutstats_timer; 597 607 598 608 #ifdef NFS_DEBUG
+3 -2
fs/nfs/super.c
··· 2301 2301 /* 2302 2302 * Initialise the common bits of the superblock 2303 2303 */ 2304 - inline void nfs_initialise_sb(struct super_block *sb) 2304 + static void nfs_initialise_sb(struct super_block *sb) 2305 2305 { 2306 2306 struct nfs_server *server = NFS_SB(sb); 2307 2307 ··· 2348 2348 /* 2349 2349 * Finish setting up a cloned NFS2/3/4 superblock 2350 2350 */ 2351 - void nfs_clone_super(struct super_block *sb, struct nfs_mount_info *mount_info) 2351 + static void nfs_clone_super(struct super_block *sb, 2352 + struct nfs_mount_info *mount_info) 2352 2353 { 2353 2354 const struct super_block *old_sb = mount_info->cloned->sb; 2354 2355 struct nfs_server *server = NFS_SB(sb);
+6 -17
fs/nfsd/nfs3xdr.c
··· 334 334 if (!p) 335 335 return 0; 336 336 p = xdr_decode_hyper(p, &args->offset); 337 + 337 338 args->count = ntohl(*p++); 338 - 339 - if (!xdr_argsize_check(rqstp, p)) 340 - return 0; 341 - 342 339 len = min(args->count, max_blocksize); 343 340 344 341 /* set up the kvec */ ··· 349 352 v++; 350 353 } 351 354 args->vlen = v; 352 - return 1; 355 + return xdr_argsize_check(rqstp, p); 353 356 } 354 357 355 358 int ··· 541 544 p = decode_fh(p, &args->fh); 542 545 if (!p) 543 546 return 0; 544 - if (!xdr_argsize_check(rqstp, p)) 545 - return 0; 546 547 args->buffer = page_address(*(rqstp->rq_next_page++)); 547 548 548 - return 1; 549 + return xdr_argsize_check(rqstp, p); 549 550 } 550 551 551 552 int ··· 569 574 args->verf = p; p += 2; 570 575 args->dircount = ~0; 571 576 args->count = ntohl(*p++); 572 - 573 - if (!xdr_argsize_check(rqstp, p)) 574 - return 0; 575 - 576 577 args->count = min_t(u32, args->count, PAGE_SIZE); 577 578 args->buffer = page_address(*(rqstp->rq_next_page++)); 578 579 579 - return 1; 580 + return xdr_argsize_check(rqstp, p); 580 581 } 581 582 582 583 int ··· 590 599 args->dircount = ntohl(*p++); 591 600 args->count = ntohl(*p++); 592 601 593 - if (!xdr_argsize_check(rqstp, p)) 594 - return 0; 595 - 596 602 len = args->count = min(args->count, max_blocksize); 597 603 while (len > 0) { 598 604 struct page *p = *(rqstp->rq_next_page++); ··· 597 609 args->buffer = page_address(p); 598 610 len -= PAGE_SIZE; 599 611 } 600 - return 1; 612 + 613 + return xdr_argsize_check(rqstp, p); 601 614 } 602 615 603 616 int
+6 -7
fs/nfsd/nfs4proc.c
··· 1769 1769 opdesc->op_get_currentstateid(cstate, &op->u); 1770 1770 op->status = opdesc->op_func(rqstp, cstate, &op->u); 1771 1771 1772 + /* Only from SEQUENCE */ 1773 + if (cstate->status == nfserr_replay_cache) { 1774 + dprintk("%s NFS4.1 replay from cache\n", __func__); 1775 + status = op->status; 1776 + goto out; 1777 + } 1772 1778 if (!op->status) { 1773 1779 if (opdesc->op_set_currentstateid) 1774 1780 opdesc->op_set_currentstateid(cstate, &op->u); ··· 1785 1779 if (need_wrongsec_check(rqstp)) 1786 1780 op->status = check_nfsd_access(current_fh->fh_export, rqstp); 1787 1781 } 1788 - 1789 1782 encode_op: 1790 - /* Only from SEQUENCE */ 1791 - if (cstate->status == nfserr_replay_cache) { 1792 - dprintk("%s NFS4.1 replay from cache\n", __func__); 1793 - status = op->status; 1794 - goto out; 1795 - } 1796 1783 if (op->status == nfserr_replay_me) { 1797 1784 op->replay = &cstate->replay_owner->so_replay; 1798 1785 nfsd4_encode_replay(&resp->xdr, op);
+3 -10
fs/nfsd/nfsxdr.c
··· 257 257 len = args->count = ntohl(*p++); 258 258 p++; /* totalcount - unused */ 259 259 260 - if (!xdr_argsize_check(rqstp, p)) 261 - return 0; 262 - 263 260 len = min_t(unsigned int, len, NFSSVC_MAXBLKSIZE_V2); 264 261 265 262 /* set up somewhere to store response. ··· 272 275 v++; 273 276 } 274 277 args->vlen = v; 275 - return 1; 278 + return xdr_argsize_check(rqstp, p); 276 279 } 277 280 278 281 int ··· 362 365 p = decode_fh(p, &args->fh); 363 366 if (!p) 364 367 return 0; 365 - if (!xdr_argsize_check(rqstp, p)) 366 - return 0; 367 368 args->buffer = page_address(*(rqstp->rq_next_page++)); 368 369 369 - return 1; 370 + return xdr_argsize_check(rqstp, p); 370 371 } 371 372 372 373 int ··· 402 407 args->cookie = ntohl(*p++); 403 408 args->count = ntohl(*p++); 404 409 args->count = min_t(u32, args->count, PAGE_SIZE); 405 - if (!xdr_argsize_check(rqstp, p)) 406 - return 0; 407 410 args->buffer = page_address(*(rqstp->rq_next_page++)); 408 411 409 - return 1; 412 + return xdr_argsize_check(rqstp, p); 410 413 } 411 414 412 415 /*
+1 -1
fs/ntfs/namei.c
··· 159 159 PTR_ERR(dent_inode)); 160 160 kfree(name); 161 161 /* Return the error code. */ 162 - return (struct dentry *)dent_inode; 162 + return ERR_CAST(dent_inode); 163 163 } 164 164 /* It is guaranteed that @name is no longer allocated at this point. */ 165 165 if (MREF_ERR(mref) == -ENOENT) {
+4
fs/ocfs2/dlmglue.c
··· 2591 2591 struct ocfs2_lock_res *lockres; 2592 2592 2593 2593 lockres = &OCFS2_I(inode)->ip_inode_lockres; 2594 + /* had_lock means that the currect process already takes the cluster 2595 + * lock previously. If had_lock is 1, we have nothing to do here, and 2596 + * it will get unlocked where we got the lock. 2597 + */ 2594 2598 if (!had_lock) { 2595 2599 ocfs2_remove_holder(lockres, oh); 2596 2600 ocfs2_inode_unlock(inode, ex);
+1 -1
fs/ocfs2/export.c
··· 119 119 120 120 if (IS_ERR(inode)) { 121 121 mlog_errno(PTR_ERR(inode)); 122 - result = (void *)inode; 122 + result = ERR_CAST(inode); 123 123 goto bail; 124 124 } 125 125
+13 -10
fs/ocfs2/xattr.c
··· 1328 1328 void *buffer, 1329 1329 size_t buffer_size) 1330 1330 { 1331 - int ret; 1331 + int ret, had_lock; 1332 1332 struct buffer_head *di_bh = NULL; 1333 + struct ocfs2_lock_holder oh; 1333 1334 1334 - ret = ocfs2_inode_lock(inode, &di_bh, 0); 1335 - if (ret < 0) { 1336 - mlog_errno(ret); 1337 - return ret; 1335 + had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 0, &oh); 1336 + if (had_lock < 0) { 1337 + mlog_errno(had_lock); 1338 + return had_lock; 1338 1339 } 1339 1340 down_read(&OCFS2_I(inode)->ip_xattr_sem); 1340 1341 ret = ocfs2_xattr_get_nolock(inode, di_bh, name_index, 1341 1342 name, buffer, buffer_size); 1342 1343 up_read(&OCFS2_I(inode)->ip_xattr_sem); 1343 1344 1344 - ocfs2_inode_unlock(inode, 0); 1345 + ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock); 1345 1346 1346 1347 brelse(di_bh); 1347 1348 ··· 3538 3537 { 3539 3538 struct buffer_head *di_bh = NULL; 3540 3539 struct ocfs2_dinode *di; 3541 - int ret, credits, ref_meta = 0, ref_credits = 0; 3540 + int ret, credits, had_lock, ref_meta = 0, ref_credits = 0; 3542 3541 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 3543 3542 struct inode *tl_inode = osb->osb_tl_inode; 3544 3543 struct ocfs2_xattr_set_ctxt ctxt = { NULL, NULL, NULL, }; 3545 3544 struct ocfs2_refcount_tree *ref_tree = NULL; 3545 + struct ocfs2_lock_holder oh; 3546 3546 3547 3547 struct ocfs2_xattr_info xi = { 3548 3548 .xi_name_index = name_index, ··· 3574 3572 return -ENOMEM; 3575 3573 } 3576 3574 3577 - ret = ocfs2_inode_lock(inode, &di_bh, 1); 3578 - if (ret < 0) { 3575 + had_lock = ocfs2_inode_lock_tracker(inode, &di_bh, 1, &oh); 3576 + if (had_lock < 0) { 3577 + ret = had_lock; 3579 3578 mlog_errno(ret); 3580 3579 goto cleanup_nolock; 3581 3580 } ··· 3673 3670 if (ret) 3674 3671 mlog_errno(ret); 3675 3672 } 3676 - ocfs2_inode_unlock(inode, 1); 3673 + ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock); 3677 3674 cleanup_nolock: 3678 3675 brelse(di_bh); 3679 3676 brelse(xbs.xattr_bh);
+1
fs/overlayfs/Kconfig
··· 1 1 config OVERLAY_FS 2 2 tristate "Overlay filesystem support" 3 + select EXPORTFS 3 4 help 4 5 An overlay filesystem combines two filesystems - an 'upper' filesystem 5 6 and a 'lower' filesystem. When a name exists in both filesystems, the
+37 -22
fs/overlayfs/copy_up.c
··· 300 300 return PTR_ERR(fh); 301 301 } 302 302 303 - err = ovl_do_setxattr(upper, OVL_XATTR_ORIGIN, fh, fh ? fh->len : 0, 0); 303 + /* 304 + * Do not fail when upper doesn't support xattrs. 305 + */ 306 + err = ovl_check_setxattr(dentry, upper, OVL_XATTR_ORIGIN, fh, 307 + fh ? fh->len : 0, 0); 304 308 kfree(fh); 305 309 306 310 return err; ··· 330 326 .link = link 331 327 }; 332 328 333 - upper = lookup_one_len(dentry->d_name.name, upperdir, 334 - dentry->d_name.len); 335 - err = PTR_ERR(upper); 336 - if (IS_ERR(upper)) 337 - goto out; 338 - 339 329 err = security_inode_copy_up(dentry, &new_creds); 340 330 if (err < 0) 341 - goto out1; 331 + goto out; 342 332 343 333 if (new_creds) 344 334 old_creds = override_creds(new_creds); ··· 340 342 if (tmpfile) 341 343 temp = ovl_do_tmpfile(upperdir, stat->mode); 342 344 else 343 - temp = ovl_lookup_temp(workdir, dentry); 344 - err = PTR_ERR(temp); 345 - if (IS_ERR(temp)) 346 - goto out1; 347 - 345 + temp = ovl_lookup_temp(workdir); 348 346 err = 0; 349 - if (!tmpfile) 347 + if (IS_ERR(temp)) { 348 + err = PTR_ERR(temp); 349 + temp = NULL; 350 + } 351 + 352 + if (!err && !tmpfile) 350 353 err = ovl_create_real(wdir, temp, &cattr, NULL, true); 351 354 352 355 if (new_creds) { ··· 356 357 } 357 358 358 359 if (err) 359 - goto out2; 360 + goto out; 360 361 361 362 if (S_ISREG(stat->mode)) { 362 363 struct path upperpath; ··· 392 393 /* 393 394 * Store identifier of lower inode in upper inode xattr to 394 395 * allow lookup of the copy up origin inode. 396 + * 397 + * Don't set origin when we are breaking the association with a lower 398 + * hard link. 395 399 */ 396 - err = ovl_set_origin(dentry, lowerpath->dentry, temp); 397 - if (err) 400 + if (S_ISDIR(stat->mode) || stat->nlink == 1) { 401 + err = ovl_set_origin(dentry, lowerpath->dentry, temp); 402 + if (err) 403 + goto out_cleanup; 404 + } 405 + 406 + upper = lookup_one_len(dentry->d_name.name, upperdir, 407 + dentry->d_name.len); 408 + if (IS_ERR(upper)) { 409 + err = PTR_ERR(upper); 410 + upper = NULL; 398 411 goto out_cleanup; 412 + } 399 413 400 414 if (tmpfile) 401 415 err = ovl_do_link(temp, udir, upper, true); ··· 423 411 424 412 /* Restore timestamps on parent (best effort) */ 425 413 ovl_set_timestamps(upperdir, pstat); 426 - out2: 427 - dput(temp); 428 - out1: 429 - dput(upper); 430 414 out: 415 + dput(temp); 416 + dput(upper); 431 417 return err; 432 418 433 419 out_cleanup: 434 420 if (!tmpfile) 435 421 ovl_cleanup(wdir, temp); 436 - goto out2; 422 + goto out; 437 423 } 438 424 439 425 /* ··· 463 453 464 454 ovl_path_upper(parent, &parentpath); 465 455 upperdir = parentpath.dentry; 456 + 457 + /* Mark parent "impure" because it may now contain non-pure upper */ 458 + err = ovl_set_impure(parent, upperdir); 459 + if (err) 460 + return err; 466 461 467 462 err = vfs_getattr(&parentpath, &pstat, 468 463 STATX_ATIME | STATX_MTIME, AT_STATX_SYNC_AS_STAT);
+47 -14
fs/overlayfs/dir.c
··· 41 41 } 42 42 } 43 43 44 - struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry) 44 + struct dentry *ovl_lookup_temp(struct dentry *workdir) 45 45 { 46 46 struct dentry *temp; 47 47 char name[20]; ··· 68 68 struct dentry *whiteout; 69 69 struct inode *wdir = workdir->d_inode; 70 70 71 - whiteout = ovl_lookup_temp(workdir, dentry); 71 + whiteout = ovl_lookup_temp(workdir); 72 72 if (IS_ERR(whiteout)) 73 73 return whiteout; 74 74 ··· 127 127 return err; 128 128 } 129 129 130 - static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry) 130 + static int ovl_set_opaque_xerr(struct dentry *dentry, struct dentry *upper, 131 + int xerr) 131 132 { 132 133 int err; 133 134 134 - err = ovl_do_setxattr(upperdentry, OVL_XATTR_OPAQUE, "y", 1, 0); 135 + err = ovl_check_setxattr(dentry, upper, OVL_XATTR_OPAQUE, "y", 1, xerr); 135 136 if (!err) 136 137 ovl_dentry_set_opaque(dentry); 137 138 138 139 return err; 140 + } 141 + 142 + static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry) 143 + { 144 + /* 145 + * Fail with -EIO when trying to create opaque dir and upper doesn't 146 + * support xattrs. ovl_rename() calls ovl_set_opaque_xerr(-EXDEV) to 147 + * return a specific error for noxattr case. 148 + */ 149 + return ovl_set_opaque_xerr(dentry, upperdentry, -EIO); 139 150 } 140 151 141 152 /* Common operations required to be done after creation of file on upper */ ··· 171 160 static bool ovl_type_merge(struct dentry *dentry) 172 161 { 173 162 return OVL_TYPE_MERGE(ovl_path_type(dentry)); 163 + } 164 + 165 + static bool ovl_type_origin(struct dentry *dentry) 166 + { 167 + return OVL_TYPE_ORIGIN(ovl_path_type(dentry)); 174 168 } 175 169 176 170 static int ovl_create_upper(struct dentry *dentry, struct inode *inode, ··· 266 250 if (upper->d_parent->d_inode != udir) 267 251 goto out_unlock; 268 252 269 - opaquedir = ovl_lookup_temp(workdir, dentry); 253 + opaquedir = ovl_lookup_temp(workdir); 270 254 err = PTR_ERR(opaquedir); 271 255 if (IS_ERR(opaquedir)) 272 256 goto out_unlock; ··· 398 382 if (err) 399 383 goto out; 400 384 401 - newdentry = ovl_lookup_temp(workdir, dentry); 385 + newdentry = ovl_lookup_temp(workdir); 402 386 err = PTR_ERR(newdentry); 403 387 if (IS_ERR(newdentry)) 404 388 goto out_unlock; ··· 862 846 if (IS_ERR(redirect)) 863 847 return PTR_ERR(redirect); 864 848 865 - err = ovl_do_setxattr(ovl_dentry_upper(dentry), OVL_XATTR_REDIRECT, 866 - redirect, strlen(redirect), 0); 849 + err = ovl_check_setxattr(dentry, ovl_dentry_upper(dentry), 850 + OVL_XATTR_REDIRECT, 851 + redirect, strlen(redirect), -EXDEV); 867 852 if (!err) { 868 853 spin_lock(&dentry->d_lock); 869 854 ovl_dentry_set_redirect(dentry, redirect); 870 855 spin_unlock(&dentry->d_lock); 871 856 } else { 872 857 kfree(redirect); 873 - if (err == -EOPNOTSUPP) 874 - ovl_clear_redirect_dir(dentry->d_sb); 875 - else 876 - pr_warn_ratelimited("overlay: failed to set redirect (%i)\n", err); 858 + pr_warn_ratelimited("overlay: failed to set redirect (%i)\n", err); 877 859 /* Fall back to userspace copy-up */ 878 860 err = -EXDEV; 879 861 } ··· 957 943 old_upperdir = ovl_dentry_upper(old->d_parent); 958 944 new_upperdir = ovl_dentry_upper(new->d_parent); 959 945 946 + if (!samedir) { 947 + /* 948 + * When moving a merge dir or non-dir with copy up origin into 949 + * a new parent, we are marking the new parent dir "impure". 950 + * When ovl_iterate() iterates an "impure" upper dir, it will 951 + * lookup the origin inodes of the entries to fill d_ino. 952 + */ 953 + if (ovl_type_origin(old)) { 954 + err = ovl_set_impure(new->d_parent, new_upperdir); 955 + if (err) 956 + goto out_revert_creds; 957 + } 958 + if (!overwrite && ovl_type_origin(new)) { 959 + err = ovl_set_impure(old->d_parent, old_upperdir); 960 + if (err) 961 + goto out_revert_creds; 962 + } 963 + } 964 + 960 965 trap = lock_rename(new_upperdir, old_upperdir); 961 966 962 967 olddentry = lookup_one_len(old->d_name.name, old_upperdir, ··· 1025 992 if (ovl_type_merge_or_lower(old)) 1026 993 err = ovl_set_redirect(old, samedir); 1027 994 else if (!old_opaque && ovl_type_merge(new->d_parent)) 1028 - err = ovl_set_opaque(old, olddentry); 995 + err = ovl_set_opaque_xerr(old, olddentry, -EXDEV); 1029 996 if (err) 1030 997 goto out_dput; 1031 998 } ··· 1033 1000 if (ovl_type_merge_or_lower(new)) 1034 1001 err = ovl_set_redirect(new, samedir); 1035 1002 else if (!new_opaque && ovl_type_merge(old->d_parent)) 1036 - err = ovl_set_opaque(new, newdentry); 1003 + err = ovl_set_opaque_xerr(new, newdentry, -EXDEV); 1037 1004 if (err) 1038 1005 goto out_dput; 1039 1006 }
+11 -1
fs/overlayfs/inode.c
··· 240 240 return res; 241 241 } 242 242 243 + static bool ovl_can_list(const char *s) 244 + { 245 + /* List all non-trusted xatts */ 246 + if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0) 247 + return true; 248 + 249 + /* Never list trusted.overlay, list other trusted for superuser only */ 250 + return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN); 251 + } 252 + 243 253 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) 244 254 { 245 255 struct dentry *realdentry = ovl_dentry_real(dentry); ··· 273 263 return -EIO; 274 264 275 265 len -= slen; 276 - if (ovl_is_private_xattr(s)) { 266 + if (!ovl_can_list(s)) { 277 267 res -= slen; 278 268 memmove(s, s + slen, len); 279 269 } else {
+5 -11
fs/overlayfs/namei.c
··· 169 169 170 170 static bool ovl_is_opaquedir(struct dentry *dentry) 171 171 { 172 - int res; 173 - char val; 174 - 175 - if (!d_is_dir(dentry)) 176 - return false; 177 - 178 - res = vfs_getxattr(dentry, OVL_XATTR_OPAQUE, &val, 1); 179 - if (res == 1 && val == 'y') 180 - return true; 181 - 182 - return false; 172 + return ovl_check_dir_xattr(dentry, OVL_XATTR_OPAQUE); 183 173 } 184 174 185 175 static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d, ··· 341 351 unsigned int ctr = 0; 342 352 struct inode *inode = NULL; 343 353 bool upperopaque = false; 354 + bool upperimpure = false; 344 355 char *upperredirect = NULL; 345 356 struct dentry *this; 346 357 unsigned int i; ··· 386 395 poe = roe; 387 396 } 388 397 upperopaque = d.opaque; 398 + if (upperdentry && d.is_dir) 399 + upperimpure = ovl_is_impuredir(upperdentry); 389 400 } 390 401 391 402 if (!d.stop && poe->numlower) { ··· 456 463 457 464 revert_creds(old_cred); 458 465 oe->opaque = upperopaque; 466 + oe->impure = upperimpure; 459 467 oe->redirect = upperredirect; 460 468 oe->__upperdentry = upperdentry; 461 469 memcpy(oe->lowerstack, stack, sizeof(struct path) * ctr);
+14 -2
fs/overlayfs/overlayfs.h
··· 24 24 #define OVL_XATTR_OPAQUE OVL_XATTR_PREFIX "opaque" 25 25 #define OVL_XATTR_REDIRECT OVL_XATTR_PREFIX "redirect" 26 26 #define OVL_XATTR_ORIGIN OVL_XATTR_PREFIX "origin" 27 + #define OVL_XATTR_IMPURE OVL_XATTR_PREFIX "impure" 27 28 28 29 /* 29 30 * The tuple (fh,uuid) is a universal unique identifier for a copy up origin, ··· 204 203 struct ovl_dir_cache *ovl_dir_cache(struct dentry *dentry); 205 204 void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache); 206 205 bool ovl_dentry_is_opaque(struct dentry *dentry); 206 + bool ovl_dentry_is_impure(struct dentry *dentry); 207 207 bool ovl_dentry_is_whiteout(struct dentry *dentry); 208 208 void ovl_dentry_set_opaque(struct dentry *dentry); 209 209 bool ovl_redirect_dir(struct super_block *sb); 210 - void ovl_clear_redirect_dir(struct super_block *sb); 211 210 const char *ovl_dentry_get_redirect(struct dentry *dentry); 212 211 void ovl_dentry_set_redirect(struct dentry *dentry, const char *redirect); 213 212 void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry); ··· 220 219 struct file *ovl_path_open(struct path *path, int flags); 221 220 int ovl_copy_up_start(struct dentry *dentry); 222 221 void ovl_copy_up_end(struct dentry *dentry); 222 + bool ovl_check_dir_xattr(struct dentry *dentry, const char *name); 223 + int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry, 224 + const char *name, const void *value, size_t size, 225 + int xerr); 226 + int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry); 227 + 228 + static inline bool ovl_is_impuredir(struct dentry *dentry) 229 + { 230 + return ovl_check_dir_xattr(dentry, OVL_XATTR_IMPURE); 231 + } 232 + 223 233 224 234 /* namei.c */ 225 235 int ovl_path_next(int idx, struct dentry *dentry, struct path *path); ··· 275 263 276 264 /* dir.c */ 277 265 extern const struct inode_operations ovl_dir_inode_operations; 278 - struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry); 266 + struct dentry *ovl_lookup_temp(struct dentry *workdir); 279 267 struct cattr { 280 268 dev_t rdev; 281 269 umode_t mode;
+2
fs/overlayfs/ovl_entry.h
··· 28 28 /* creds of process who forced instantiation of super block */ 29 29 const struct cred *creator_cred; 30 30 bool tmpfile; 31 + bool noxattr; 31 32 wait_queue_head_t copyup_wq; 32 33 /* sb common to all layers */ 33 34 struct super_block *same_sb; ··· 43 42 u64 version; 44 43 const char *redirect; 45 44 bool opaque; 45 + bool impure; 46 46 bool copying; 47 47 }; 48 48 struct rcu_head rcu;
+17 -1
fs/overlayfs/super.c
··· 891 891 dput(temp); 892 892 else 893 893 pr_warn("overlayfs: upper fs does not support tmpfile.\n"); 894 + 895 + /* 896 + * Check if upper/work fs supports trusted.overlay.* 897 + * xattr 898 + */ 899 + err = ovl_do_setxattr(ufs->workdir, OVL_XATTR_OPAQUE, 900 + "0", 1, 0); 901 + if (err) { 902 + ufs->noxattr = true; 903 + pr_warn("overlayfs: upper fs does not support xattr.\n"); 904 + } else { 905 + vfs_removexattr(ufs->workdir, OVL_XATTR_OPAQUE); 906 + } 894 907 } 895 908 } 896 909 ··· 974 961 path_put(&workpath); 975 962 kfree(lowertmp); 976 963 977 - oe->__upperdentry = upperpath.dentry; 964 + if (upperpath.dentry) { 965 + oe->__upperdentry = upperpath.dentry; 966 + oe->impure = ovl_is_impuredir(upperpath.dentry); 967 + } 978 968 for (i = 0; i < numlower; i++) { 979 969 oe->lowerstack[i].dentry = stack[i].dentry; 980 970 oe->lowerstack[i].mnt = ufs->lower_mnt[i];
+64 -8
fs/overlayfs/util.c
··· 175 175 return oe->opaque; 176 176 } 177 177 178 + bool ovl_dentry_is_impure(struct dentry *dentry) 179 + { 180 + struct ovl_entry *oe = dentry->d_fsdata; 181 + 182 + return oe->impure; 183 + } 184 + 178 185 bool ovl_dentry_is_whiteout(struct dentry *dentry) 179 186 { 180 187 return !dentry->d_inode && ovl_dentry_is_opaque(dentry); ··· 198 191 { 199 192 struct ovl_fs *ofs = sb->s_fs_info; 200 193 201 - return ofs->config.redirect_dir; 202 - } 203 - 204 - void ovl_clear_redirect_dir(struct super_block *sb) 205 - { 206 - struct ovl_fs *ofs = sb->s_fs_info; 207 - 208 - ofs->config.redirect_dir = false; 194 + return ofs->config.redirect_dir && !ofs->noxattr; 209 195 } 210 196 211 197 const char *ovl_dentry_get_redirect(struct dentry *dentry) ··· 302 302 oe->copying = false; 303 303 wake_up_locked(&ofs->copyup_wq); 304 304 spin_unlock(&ofs->copyup_wq.lock); 305 + } 306 + 307 + bool ovl_check_dir_xattr(struct dentry *dentry, const char *name) 308 + { 309 + int res; 310 + char val; 311 + 312 + if (!d_is_dir(dentry)) 313 + return false; 314 + 315 + res = vfs_getxattr(dentry, name, &val, 1); 316 + if (res == 1 && val == 'y') 317 + return true; 318 + 319 + return false; 320 + } 321 + 322 + int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry, 323 + const char *name, const void *value, size_t size, 324 + int xerr) 325 + { 326 + int err; 327 + struct ovl_fs *ofs = dentry->d_sb->s_fs_info; 328 + 329 + if (ofs->noxattr) 330 + return xerr; 331 + 332 + err = ovl_do_setxattr(upperdentry, name, value, size, 0); 333 + 334 + if (err == -EOPNOTSUPP) { 335 + pr_warn("overlayfs: cannot set %s xattr on upper\n", name); 336 + ofs->noxattr = true; 337 + return xerr; 338 + } 339 + 340 + return err; 341 + } 342 + 343 + int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry) 344 + { 345 + int err; 346 + struct ovl_entry *oe = dentry->d_fsdata; 347 + 348 + if (oe->impure) 349 + return 0; 350 + 351 + /* 352 + * Do not fail when upper doesn't support xattrs. 353 + * Upper inodes won't have origin nor redirect xattr anyway. 354 + */ 355 + err = ovl_check_setxattr(dentry, upperdentry, OVL_XATTR_IMPURE, 356 + "y", 1, 0); 357 + if (!err) 358 + oe->impure = true; 359 + 360 + return err; 305 361 }
+1 -1
fs/proc/base.c
··· 821 821 if (!mmget_not_zero(mm)) 822 822 goto free; 823 823 824 - flags = write ? FOLL_WRITE : 0; 824 + flags = FOLL_FORCE | (write ? FOLL_WRITE : 0); 825 825 826 826 while (count > 0) { 827 827 int this_len = min_t(int, count, PAGE_SIZE);
-4
fs/proc/task_mmu.c
··· 300 300 301 301 /* We don't show the stack guard page in /proc/maps */ 302 302 start = vma->vm_start; 303 - if (stack_guard_page_start(vma, start)) 304 - start += PAGE_SIZE; 305 303 end = vma->vm_end; 306 - if (stack_guard_page_end(vma, end)) 307 - end -= PAGE_SIZE; 308 304 309 305 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); 310 306 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
+16
fs/quota/dquot.c
··· 1512 1512 } 1513 1513 EXPORT_SYMBOL(dquot_initialize); 1514 1514 1515 + bool dquot_initialize_needed(struct inode *inode) 1516 + { 1517 + struct dquot **dquots; 1518 + int i; 1519 + 1520 + if (!dquot_active(inode)) 1521 + return false; 1522 + 1523 + dquots = i_dquot(inode); 1524 + for (i = 0; i < MAXQUOTAS; i++) 1525 + if (!dquots[i] && sb_has_quota_active(inode->i_sb, i)) 1526 + return true; 1527 + return false; 1528 + } 1529 + EXPORT_SYMBOL(dquot_initialize_needed); 1530 + 1515 1531 /* 1516 1532 * Release all quotas referenced by inode. 1517 1533 *
+1 -1
fs/read_write.c
··· 1285 1285 if (!(file->f_mode & FMODE_CAN_WRITE)) 1286 1286 goto out; 1287 1287 1288 - ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, 0); 1288 + ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, flags); 1289 1289 1290 1290 out: 1291 1291 if (ret > 0)
+2 -2
fs/reiserfs/journal.c
··· 1112 1112 depth = reiserfs_write_unlock_nested(s); 1113 1113 if (reiserfs_barrier_flush(s)) 1114 1114 __sync_dirty_buffer(jl->j_commit_bh, 1115 - REQ_PREFLUSH | REQ_FUA); 1115 + REQ_SYNC | REQ_PREFLUSH | REQ_FUA); 1116 1116 else 1117 1117 sync_dirty_buffer(jl->j_commit_bh); 1118 1118 reiserfs_write_lock_nested(s, depth); ··· 1271 1271 1272 1272 if (reiserfs_barrier_flush(sb)) 1273 1273 __sync_dirty_buffer(journal->j_header_bh, 1274 - REQ_PREFLUSH | REQ_FUA); 1274 + REQ_SYNC | REQ_PREFLUSH | REQ_FUA); 1275 1275 else 1276 1276 sync_dirty_buffer(journal->j_header_bh); 1277 1277
+1
fs/stat.c
··· 672 672 inode->i_bytes -= 512; 673 673 } 674 674 } 675 + EXPORT_SYMBOL(__inode_add_bytes); 675 676 676 677 void inode_add_bytes(struct inode *inode, loff_t bytes) 677 678 {
+44 -26
fs/ufs/balloc.c
··· 82 82 ufs_error (sb, "ufs_free_fragments", 83 83 "bit already cleared for fragment %u", i); 84 84 } 85 - 85 + 86 + inode_sub_bytes(inode, count << uspi->s_fshift); 86 87 fs32_add(sb, &ucg->cg_cs.cs_nffree, count); 87 88 uspi->cs_total.cs_nffree += count; 88 89 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); ··· 185 184 ufs_error(sb, "ufs_free_blocks", "freeing free fragment"); 186 185 } 187 186 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); 187 + inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift); 188 188 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) 189 189 ufs_clusteracct (sb, ucpi, blkno, 1); 190 190 ··· 400 398 /* 401 399 * There is not enough space for user on the device 402 400 */ 403 - if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) { 404 - mutex_unlock(&UFS_SB(sb)->s_lock); 405 - UFSD("EXIT (FAILED)\n"); 406 - return 0; 401 + if (unlikely(ufs_freefrags(uspi) <= uspi->s_root_blocks)) { 402 + if (!capable(CAP_SYS_RESOURCE)) { 403 + mutex_unlock(&UFS_SB(sb)->s_lock); 404 + UFSD("EXIT (FAILED)\n"); 405 + return 0; 406 + } 407 407 } 408 408 409 409 if (goal >= uspi->s_size) ··· 423 419 if (result) { 424 420 ufs_clear_frags(inode, result + oldcount, 425 421 newcount - oldcount, locked_page != NULL); 422 + *err = 0; 426 423 write_seqlock(&UFS_I(inode)->meta_lock); 427 424 ufs_cpu_to_data_ptr(sb, p, result); 428 - write_sequnlock(&UFS_I(inode)->meta_lock); 429 - *err = 0; 430 425 UFS_I(inode)->i_lastfrag = 431 426 max(UFS_I(inode)->i_lastfrag, fragment + count); 427 + write_sequnlock(&UFS_I(inode)->meta_lock); 432 428 } 433 429 mutex_unlock(&UFS_SB(sb)->s_lock); 434 430 UFSD("EXIT, result %llu\n", (unsigned long long)result); ··· 441 437 result = ufs_add_fragments(inode, tmp, oldcount, newcount); 442 438 if (result) { 443 439 *err = 0; 440 + read_seqlock_excl(&UFS_I(inode)->meta_lock); 444 441 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag, 445 442 fragment + count); 443 + read_sequnlock_excl(&UFS_I(inode)->meta_lock); 446 444 ufs_clear_frags(inode, result + oldcount, newcount - oldcount, 447 445 locked_page != NULL); 448 446 mutex_unlock(&UFS_SB(sb)->s_lock); ··· 455 449 /* 456 450 * allocate new block and move data 457 451 */ 458 - switch (fs32_to_cpu(sb, usb1->fs_optim)) { 459 - case UFS_OPTSPACE: 452 + if (fs32_to_cpu(sb, usb1->fs_optim) == UFS_OPTSPACE) { 460 453 request = newcount; 461 - if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree 462 - > uspi->s_dsize * uspi->s_minfree / (2 * 100)) 463 - break; 464 - usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); 465 - break; 466 - default: 467 - usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); 468 - 469 - case UFS_OPTTIME: 454 + if (uspi->cs_total.cs_nffree < uspi->s_space_to_time) 455 + usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); 456 + } else { 470 457 request = uspi->s_fpb; 471 - if (uspi->cs_total.cs_nffree < uspi->s_dsize * 472 - (uspi->s_minfree - 2) / 100) 473 - break; 474 - usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME); 475 - break; 458 + if (uspi->cs_total.cs_nffree > uspi->s_time_to_space) 459 + usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE); 476 460 } 477 461 result = ufs_alloc_fragments (inode, cgno, goal, request, err); 478 462 if (result) { 479 463 ufs_clear_frags(inode, result + oldcount, newcount - oldcount, 480 464 locked_page != NULL); 465 + mutex_unlock(&UFS_SB(sb)->s_lock); 481 466 ufs_change_blocknr(inode, fragment - oldcount, oldcount, 482 467 uspi->s_sbbase + tmp, 483 468 uspi->s_sbbase + result, locked_page); 469 + *err = 0; 484 470 write_seqlock(&UFS_I(inode)->meta_lock); 485 471 ufs_cpu_to_data_ptr(sb, p, result); 486 - write_sequnlock(&UFS_I(inode)->meta_lock); 487 - *err = 0; 488 472 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag, 489 473 fragment + count); 490 - mutex_unlock(&UFS_SB(sb)->s_lock); 474 + write_sequnlock(&UFS_I(inode)->meta_lock); 491 475 if (newcount < request) 492 476 ufs_free_fragments (inode, result + newcount, request - newcount); 493 477 ufs_free_fragments (inode, tmp, oldcount); ··· 489 493 UFSD("EXIT (FAILED)\n"); 490 494 return 0; 491 495 } 496 + 497 + static bool try_add_frags(struct inode *inode, unsigned frags) 498 + { 499 + unsigned size = frags * i_blocksize(inode); 500 + spin_lock(&inode->i_lock); 501 + __inode_add_bytes(inode, size); 502 + if (unlikely((u32)inode->i_blocks != inode->i_blocks)) { 503 + __inode_sub_bytes(inode, size); 504 + spin_unlock(&inode->i_lock); 505 + return false; 506 + } 507 + spin_unlock(&inode->i_lock); 508 + return true; 509 + } 492 510 493 511 static u64 ufs_add_fragments(struct inode *inode, u64 fragment, 494 512 unsigned oldcount, unsigned newcount) ··· 540 530 for (i = oldcount; i < newcount; i++) 541 531 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i)) 542 532 return 0; 533 + 534 + if (!try_add_frags(inode, count)) 535 + return 0; 543 536 /* 544 537 * Block can be extended 545 538 */ ··· 660 647 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i); 661 648 i = uspi->s_fpb - count; 662 649 650 + inode_sub_bytes(inode, i << uspi->s_fshift); 663 651 fs32_add(sb, &ucg->cg_cs.cs_nffree, i); 664 652 uspi->cs_total.cs_nffree += i; 665 653 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i); ··· 670 656 671 657 result = ufs_bitmap_search (sb, ucpi, goal, allocsize); 672 658 if (result == INVBLOCK) 659 + return 0; 660 + if (!try_add_frags(inode, count)) 673 661 return 0; 674 662 for (i = 0; i < count; i++) 675 663 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i); ··· 732 716 return INVBLOCK; 733 717 ucpi->c_rotor = result; 734 718 gotit: 719 + if (!try_add_frags(inode, uspi->s_fpb)) 720 + return 0; 735 721 blkno = ufs_fragstoblks(result); 736 722 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); 737 723 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
+52 -44
fs/ufs/inode.c
··· 235 235 236 236 p = ufs_get_direct_data_ptr(uspi, ufsi, block); 237 237 tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p), 238 - new_size, err, locked_page); 238 + new_size - (lastfrag & uspi->s_fpbmask), err, 239 + locked_page); 239 240 return tmp != 0; 240 241 } 241 242 ··· 285 284 goal += uspi->s_fpb; 286 285 } 287 286 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), 288 - goal, uspi->s_fpb, err, locked_page); 287 + goal, nfrags, err, locked_page); 289 288 290 289 if (!tmp) { 291 290 *err = -ENOSPC; ··· 401 400 u64 phys64 = 0; 402 401 unsigned frag = fragment & uspi->s_fpbmask; 403 402 404 - if (!create) { 405 - phys64 = ufs_frag_map(inode, offsets, depth); 406 - goto out; 407 - } 403 + phys64 = ufs_frag_map(inode, offsets, depth); 404 + if (!create) 405 + goto done; 408 406 407 + if (phys64) { 408 + if (fragment >= UFS_NDIR_FRAGMENT) 409 + goto done; 410 + read_seqlock_excl(&UFS_I(inode)->meta_lock); 411 + if (fragment < UFS_I(inode)->i_lastfrag) { 412 + read_sequnlock_excl(&UFS_I(inode)->meta_lock); 413 + goto done; 414 + } 415 + read_sequnlock_excl(&UFS_I(inode)->meta_lock); 416 + } 409 417 /* This code entered only while writing ....? */ 410 418 411 419 mutex_lock(&UFS_I(inode)->truncate_mutex); ··· 458 448 } 459 449 mutex_unlock(&UFS_I(inode)->truncate_mutex); 460 450 return err; 451 + 452 + done: 453 + if (phys64) 454 + map_bh(bh_result, sb, phys64 + frag); 455 + return 0; 461 456 } 462 457 463 458 static int ufs_writepage(struct page *page, struct writeback_control *wbc) ··· 566 551 */ 567 552 inode->i_mode = mode = fs16_to_cpu(sb, ufs_inode->ui_mode); 568 553 set_nlink(inode, fs16_to_cpu(sb, ufs_inode->ui_nlink)); 569 - if (inode->i_nlink == 0) { 570 - ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 571 - return -1; 572 - } 554 + if (inode->i_nlink == 0) 555 + return -ESTALE; 573 556 574 557 /* 575 558 * Linux now has 32-bit uid and gid, so we can support EFT. ··· 576 563 i_gid_write(inode, ufs_get_inode_gid(sb, ufs_inode)); 577 564 578 565 inode->i_size = fs64_to_cpu(sb, ufs_inode->ui_size); 579 - inode->i_atime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); 580 - inode->i_ctime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); 581 - inode->i_mtime.tv_sec = fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); 566 + inode->i_atime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_atime.tv_sec); 567 + inode->i_ctime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_ctime.tv_sec); 568 + inode->i_mtime.tv_sec = (signed)fs32_to_cpu(sb, ufs_inode->ui_mtime.tv_sec); 582 569 inode->i_mtime.tv_nsec = 0; 583 570 inode->i_atime.tv_nsec = 0; 584 571 inode->i_ctime.tv_nsec = 0; ··· 612 599 */ 613 600 inode->i_mode = mode = fs16_to_cpu(sb, ufs2_inode->ui_mode); 614 601 set_nlink(inode, fs16_to_cpu(sb, ufs2_inode->ui_nlink)); 615 - if (inode->i_nlink == 0) { 616 - ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 617 - return -1; 618 - } 602 + if (inode->i_nlink == 0) 603 + return -ESTALE; 619 604 620 605 /* 621 606 * Linux now has 32-bit uid and gid, so we can support EFT. ··· 653 642 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 654 643 struct buffer_head * bh; 655 644 struct inode *inode; 656 - int err; 645 + int err = -EIO; 657 646 658 647 UFSD("ENTER, ino %lu\n", ino); 659 648 ··· 688 677 err = ufs1_read_inode(inode, 689 678 ufs_inode + ufs_inotofsbo(inode->i_ino)); 690 679 } 691 - 680 + brelse(bh); 692 681 if (err) 693 682 goto bad_inode; 683 + 694 684 inode->i_version++; 695 685 ufsi->i_lastfrag = 696 686 (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift; ··· 700 688 701 689 ufs_set_inode_ops(inode); 702 690 703 - brelse(bh); 704 - 705 691 UFSD("EXIT\n"); 706 692 unlock_new_inode(inode); 707 693 return inode; 708 694 709 695 bad_inode: 710 696 iget_failed(inode); 711 - return ERR_PTR(-EIO); 697 + return ERR_PTR(err); 712 698 } 713 699 714 700 static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode) ··· 851 841 truncate_inode_pages_final(&inode->i_data); 852 842 if (want_delete) { 853 843 inode->i_size = 0; 854 - if (inode->i_blocks) 844 + if (inode->i_blocks && 845 + (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 846 + S_ISLNK(inode->i_mode))) 855 847 ufs_truncate_blocks(inode); 848 + ufs_update_inode(inode, inode_needs_sync(inode)); 856 849 } 857 850 858 851 invalidate_inode_buffers(inode); ··· 881 868 ctx->to = from + count; 882 869 } 883 870 884 - #define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift) 885 871 #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift) 886 872 887 873 static void ufs_trunc_direct(struct inode *inode) ··· 1112 1100 return err; 1113 1101 } 1114 1102 1115 - static void __ufs_truncate_blocks(struct inode *inode) 1103 + static void ufs_truncate_blocks(struct inode *inode) 1116 1104 { 1117 1105 struct ufs_inode_info *ufsi = UFS_I(inode); 1118 1106 struct super_block *sb = inode->i_sb; 1119 1107 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1120 1108 unsigned offsets[4]; 1121 - int depth = ufs_block_to_path(inode, DIRECT_BLOCK, offsets); 1109 + int depth; 1122 1110 int depth2; 1123 1111 unsigned i; 1124 1112 struct ufs_buffer_head *ubh[3]; 1125 1113 void *p; 1126 1114 u64 block; 1127 1115 1128 - if (!depth) 1129 - return; 1116 + if (inode->i_size) { 1117 + sector_t last = (inode->i_size - 1) >> uspi->s_bshift; 1118 + depth = ufs_block_to_path(inode, last, offsets); 1119 + if (!depth) 1120 + return; 1121 + } else { 1122 + depth = 1; 1123 + } 1130 1124 1131 - /* find the last non-zero in offsets[] */ 1132 1125 for (depth2 = depth - 1; depth2; depth2--) 1133 - if (offsets[depth2]) 1126 + if (offsets[depth2] != uspi->s_apb - 1) 1134 1127 break; 1135 1128 1136 1129 mutex_lock(&ufsi->truncate_mutex); ··· 1144 1127 offsets[0] = UFS_IND_BLOCK; 1145 1128 } else { 1146 1129 /* get the blocks that should be partially emptied */ 1147 - p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]); 1130 + p = ufs_get_direct_data_ptr(uspi, ufsi, offsets[0]++); 1148 1131 for (i = 0; i < depth2; i++) { 1149 - offsets[i]++; /* next branch is fully freed */ 1150 1132 block = ufs_data_ptr_to_cpu(sb, p); 1151 1133 if (!block) 1152 1134 break; ··· 1156 1140 write_sequnlock(&ufsi->meta_lock); 1157 1141 break; 1158 1142 } 1159 - p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]); 1143 + p = ubh_get_data_ptr(uspi, ubh[i], offsets[i + 1]++); 1160 1144 } 1161 1145 while (i--) 1162 1146 free_branch_tail(inode, offsets[i + 1], ubh[i], depth - i - 1); ··· 1171 1155 free_full_branch(inode, block, i - UFS_IND_BLOCK + 1); 1172 1156 } 1173 1157 } 1158 + read_seqlock_excl(&ufsi->meta_lock); 1174 1159 ufsi->i_lastfrag = DIRECT_FRAGMENT; 1160 + read_sequnlock_excl(&ufsi->meta_lock); 1175 1161 mark_inode_dirty(inode); 1176 1162 mutex_unlock(&ufsi->truncate_mutex); 1177 1163 } ··· 1201 1183 1202 1184 truncate_setsize(inode, size); 1203 1185 1204 - __ufs_truncate_blocks(inode); 1186 + ufs_truncate_blocks(inode); 1205 1187 inode->i_mtime = inode->i_ctime = current_time(inode); 1206 1188 mark_inode_dirty(inode); 1207 1189 out: 1208 1190 UFSD("EXIT: err %d\n", err); 1209 1191 return err; 1210 - } 1211 - 1212 - static void ufs_truncate_blocks(struct inode *inode) 1213 - { 1214 - if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1215 - S_ISLNK(inode->i_mode))) 1216 - return; 1217 - if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 1218 - return; 1219 - __ufs_truncate_blocks(inode); 1220 1192 } 1221 1193 1222 1194 int ufs_setattr(struct dentry *dentry, struct iattr *attr)
+69 -27
fs/ufs/super.c
··· 480 480 usb3 = ubh_get_usb_third(uspi); 481 481 482 482 if ((mtype == UFS_MOUNT_UFSTYPE_44BSD && 483 - (usb1->fs_flags & UFS_FLAGS_UPDATED)) || 483 + (usb2->fs_un.fs_u2.fs_maxbsize == usb1->fs_bsize)) || 484 484 mtype == UFS_MOUNT_UFSTYPE_UFS2) { 485 485 /*we have statistic in different place, then usual*/ 486 486 uspi->cs_total.cs_ndir = fs64_to_cpu(sb, usb2->fs_un.fs_u2.cs_ndir); ··· 596 596 usb2 = ubh_get_usb_second(uspi); 597 597 usb3 = ubh_get_usb_third(uspi); 598 598 599 - if ((mtype == UFS_MOUNT_UFSTYPE_44BSD && 600 - (usb1->fs_flags & UFS_FLAGS_UPDATED)) || 601 - mtype == UFS_MOUNT_UFSTYPE_UFS2) { 599 + if (mtype == UFS_MOUNT_UFSTYPE_UFS2) { 602 600 /*we have statistic in different place, then usual*/ 603 601 usb2->fs_un.fs_u2.cs_ndir = 604 602 cpu_to_fs64(sb, uspi->cs_total.cs_ndir); ··· 606 608 cpu_to_fs64(sb, uspi->cs_total.cs_nifree); 607 609 usb3->fs_un1.fs_u2.cs_nffree = 608 610 cpu_to_fs64(sb, uspi->cs_total.cs_nffree); 609 - } else { 610 - usb1->fs_cstotal.cs_ndir = 611 - cpu_to_fs32(sb, uspi->cs_total.cs_ndir); 612 - usb1->fs_cstotal.cs_nbfree = 613 - cpu_to_fs32(sb, uspi->cs_total.cs_nbfree); 614 - usb1->fs_cstotal.cs_nifree = 615 - cpu_to_fs32(sb, uspi->cs_total.cs_nifree); 616 - usb1->fs_cstotal.cs_nffree = 617 - cpu_to_fs32(sb, uspi->cs_total.cs_nffree); 611 + goto out; 618 612 } 613 + 614 + if (mtype == UFS_MOUNT_UFSTYPE_44BSD && 615 + (usb2->fs_un.fs_u2.fs_maxbsize == usb1->fs_bsize)) { 616 + /* store stats in both old and new places */ 617 + usb2->fs_un.fs_u2.cs_ndir = 618 + cpu_to_fs64(sb, uspi->cs_total.cs_ndir); 619 + usb2->fs_un.fs_u2.cs_nbfree = 620 + cpu_to_fs64(sb, uspi->cs_total.cs_nbfree); 621 + usb3->fs_un1.fs_u2.cs_nifree = 622 + cpu_to_fs64(sb, uspi->cs_total.cs_nifree); 623 + usb3->fs_un1.fs_u2.cs_nffree = 624 + cpu_to_fs64(sb, uspi->cs_total.cs_nffree); 625 + } 626 + usb1->fs_cstotal.cs_ndir = cpu_to_fs32(sb, uspi->cs_total.cs_ndir); 627 + usb1->fs_cstotal.cs_nbfree = cpu_to_fs32(sb, uspi->cs_total.cs_nbfree); 628 + usb1->fs_cstotal.cs_nifree = cpu_to_fs32(sb, uspi->cs_total.cs_nifree); 629 + usb1->fs_cstotal.cs_nffree = cpu_to_fs32(sb, uspi->cs_total.cs_nffree); 630 + out: 619 631 ubh_mark_buffer_dirty(USPI_UBH(uspi)); 620 632 ufs_print_super_stuff(sb, usb1, usb2, usb3); 621 633 UFSD("EXIT\n"); ··· 754 746 return; 755 747 } 756 748 749 + static u64 ufs_max_bytes(struct super_block *sb) 750 + { 751 + struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 752 + int bits = uspi->s_apbshift; 753 + u64 res; 754 + 755 + if (bits > 21) 756 + res = ~0ULL; 757 + else 758 + res = UFS_NDADDR + (1LL << bits) + (1LL << (2*bits)) + 759 + (1LL << (3*bits)); 760 + 761 + if (res >= (MAX_LFS_FILESIZE >> uspi->s_bshift)) 762 + return MAX_LFS_FILESIZE; 763 + return res << uspi->s_bshift; 764 + } 765 + 757 766 static int ufs_fill_super(struct super_block *sb, void *data, int silent) 758 767 { 759 768 struct ufs_sb_info * sbi; ··· 837 812 uspi->s_dirblksize = UFS_SECTOR_SIZE; 838 813 super_block_offset=UFS_SBLOCK; 839 814 840 - /* Keep 2Gig file limit. Some UFS variants need to override 841 - this but as I don't know which I'll let those in the know loosen 842 - the rules */ 815 + sb->s_maxbytes = MAX_LFS_FILESIZE; 816 + 843 817 switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) { 844 818 case UFS_MOUNT_UFSTYPE_44BSD: 845 819 UFSD("ufstype=44bsd\n"); ··· 1004 980 flags |= UFS_ST_SUN; 1005 981 } 1006 982 983 + if ((flags & UFS_ST_MASK) == UFS_ST_44BSD && 984 + uspi->s_postblformat == UFS_42POSTBLFMT) { 985 + if (!silent) 986 + pr_err("this is not a 44bsd filesystem"); 987 + goto failed; 988 + } 989 + 1007 990 /* 1008 991 * Check ufs magic number 1009 992 */ ··· 1158 1127 uspi->s_cgmask = fs32_to_cpu(sb, usb1->fs_cgmask); 1159 1128 1160 1129 if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { 1161 - uspi->s_u2_size = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size); 1162 - uspi->s_u2_dsize = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize); 1130 + uspi->s_size = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_size); 1131 + uspi->s_dsize = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize); 1163 1132 } else { 1164 1133 uspi->s_size = fs32_to_cpu(sb, usb1->fs_size); 1165 1134 uspi->s_dsize = fs32_to_cpu(sb, usb1->fs_dsize); ··· 1208 1177 uspi->s_postbloff = fs32_to_cpu(sb, usb3->fs_postbloff); 1209 1178 uspi->s_rotbloff = fs32_to_cpu(sb, usb3->fs_rotbloff); 1210 1179 1180 + uspi->s_root_blocks = mul_u64_u32_div(uspi->s_dsize, 1181 + uspi->s_minfree, 100); 1182 + if (uspi->s_minfree <= 5) { 1183 + uspi->s_time_to_space = ~0ULL; 1184 + uspi->s_space_to_time = 0; 1185 + usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE); 1186 + } else { 1187 + uspi->s_time_to_space = (uspi->s_root_blocks / 2) + 1; 1188 + uspi->s_space_to_time = mul_u64_u32_div(uspi->s_dsize, 1189 + uspi->s_minfree - 2, 100) - 1; 1190 + } 1191 + 1211 1192 /* 1212 1193 * Compute another frequently used values 1213 1194 */ ··· 1255 1212 "fast symlink size (%u)\n", uspi->s_maxsymlinklen); 1256 1213 uspi->s_maxsymlinklen = maxsymlen; 1257 1214 } 1215 + sb->s_maxbytes = ufs_max_bytes(sb); 1258 1216 sb->s_max_links = UFS_LINK_MAX; 1259 1217 1260 1218 inode = ufs_iget(sb, UFS_ROOTINO); ··· 1409 1365 mutex_lock(&UFS_SB(sb)->s_lock); 1410 1366 usb3 = ubh_get_usb_third(uspi); 1411 1367 1412 - if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) { 1368 + if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) 1413 1369 buf->f_type = UFS2_MAGIC; 1414 - buf->f_blocks = fs64_to_cpu(sb, usb3->fs_un1.fs_u2.fs_dsize); 1415 - } else { 1370 + else 1416 1371 buf->f_type = UFS_MAGIC; 1417 - buf->f_blocks = uspi->s_dsize; 1418 - } 1419 - buf->f_bfree = ufs_blkstofrags(uspi->cs_total.cs_nbfree) + 1420 - uspi->cs_total.cs_nffree; 1372 + 1373 + buf->f_blocks = uspi->s_dsize; 1374 + buf->f_bfree = ufs_freefrags(uspi); 1421 1375 buf->f_ffree = uspi->cs_total.cs_nifree; 1422 1376 buf->f_bsize = sb->s_blocksize; 1423 - buf->f_bavail = (buf->f_bfree > (((long)buf->f_blocks / 100) * uspi->s_minfree)) 1424 - ? (buf->f_bfree - (((long)buf->f_blocks / 100) * uspi->s_minfree)) : 0; 1377 + buf->f_bavail = (buf->f_bfree > uspi->s_root_blocks) 1378 + ? (buf->f_bfree - uspi->s_root_blocks) : 0; 1425 1379 buf->f_files = uspi->s_ncg * uspi->s_ipg; 1426 1380 buf->f_namelen = UFS_MAXNAMLEN; 1427 1381 buf->f_fsid.val[0] = (u32)id;
+5 -4
fs/ufs/ufs_fs.h
··· 733 733 __u32 s_dblkno; /* offset of first data after cg */ 734 734 __u32 s_cgoffset; /* cylinder group offset in cylinder */ 735 735 __u32 s_cgmask; /* used to calc mod fs_ntrak */ 736 - __u32 s_size; /* number of blocks (fragments) in fs */ 737 - __u32 s_dsize; /* number of data blocks in fs */ 738 - __u64 s_u2_size; /* ufs2: number of blocks (fragments) in fs */ 739 - __u64 s_u2_dsize; /*ufs2: number of data blocks in fs */ 736 + __u64 s_size; /* number of blocks (fragments) in fs */ 737 + __u64 s_dsize; /* number of data blocks in fs */ 740 738 __u32 s_ncg; /* number of cylinder groups */ 741 739 __u32 s_bsize; /* size of basic blocks */ 742 740 __u32 s_fsize; /* size of fragments */ ··· 791 793 __u32 s_maxsymlinklen;/* upper limit on fast symlinks' size */ 792 794 __s32 fs_magic; /* filesystem magic */ 793 795 unsigned int s_dirblksize; 796 + __u64 s_root_blocks; 797 + __u64 s_time_to_space; 798 + __u64 s_space_to_time; 794 799 }; 795 800 796 801 /*
+8 -9
fs/ufs/util.c
··· 243 243 struct page *ufs_get_locked_page(struct address_space *mapping, 244 244 pgoff_t index) 245 245 { 246 - struct page *page; 247 - 248 - page = find_lock_page(mapping, index); 246 + struct inode *inode = mapping->host; 247 + struct page *page = find_lock_page(mapping, index); 249 248 if (!page) { 250 249 page = read_mapping_page(mapping, index, NULL); 251 250 ··· 252 253 printk(KERN_ERR "ufs_change_blocknr: " 253 254 "read_mapping_page error: ino %lu, index: %lu\n", 254 255 mapping->host->i_ino, index); 255 - goto out; 256 + return page; 256 257 } 257 258 258 259 lock_page(page); ··· 261 262 /* Truncate got there first */ 262 263 unlock_page(page); 263 264 put_page(page); 264 - page = NULL; 265 - goto out; 265 + return NULL; 266 266 } 267 267 268 268 if (!PageUptodate(page) || PageError(page)) { ··· 270 272 271 273 printk(KERN_ERR "ufs_change_blocknr: " 272 274 "can not read page: ino %lu, index: %lu\n", 273 - mapping->host->i_ino, index); 275 + inode->i_ino, index); 274 276 275 - page = ERR_PTR(-EIO); 277 + return ERR_PTR(-EIO); 276 278 } 277 279 } 278 - out: 280 + if (!page_has_buffers(page)) 281 + create_empty_buffers(page, 1 << inode->i_blkbits, 0); 279 282 return page; 280 283 }
+9 -10
fs/ufs/util.h
··· 350 350 #define ubh_blkmap(ubh,begin,bit) \ 351 351 ((*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) >> ((bit) & 7)) & (0xff >> (UFS_MAXFRAG - uspi->s_fpb))) 352 352 353 - /* 354 - * Determine the number of available frags given a 355 - * percentage to hold in reserve. 356 - */ 357 353 static inline u64 358 - ufs_freespace(struct ufs_sb_private_info *uspi, int percentreserved) 354 + ufs_freefrags(struct ufs_sb_private_info *uspi) 359 355 { 360 356 return ufs_blkstofrags(uspi->cs_total.cs_nbfree) + 361 - uspi->cs_total.cs_nffree - 362 - (uspi->s_dsize * (percentreserved) / 100); 357 + uspi->cs_total.cs_nffree; 363 358 } 364 359 365 360 /* ··· 468 473 static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi, 469 474 struct ufs_buffer_head * ubh, unsigned begin, unsigned block) 470 475 { 476 + u8 mask; 471 477 switch (uspi->s_fpb) { 472 478 case 8: 473 479 return (*ubh_get_addr (ubh, begin + block) == 0xff); 474 480 case 4: 475 - return (*ubh_get_addr (ubh, begin + (block >> 1)) == (0x0f << ((block & 0x01) << 2))); 481 + mask = 0x0f << ((block & 0x01) << 2); 482 + return (*ubh_get_addr (ubh, begin + (block >> 1)) & mask) == mask; 476 483 case 2: 477 - return (*ubh_get_addr (ubh, begin + (block >> 2)) == (0x03 << ((block & 0x03) << 1))); 484 + mask = 0x03 << ((block & 0x03) << 1); 485 + return (*ubh_get_addr (ubh, begin + (block >> 2)) & mask) == mask; 478 486 case 1: 479 - return (*ubh_get_addr (ubh, begin + (block >> 3)) == (0x01 << (block & 0x07))); 487 + mask = 0x01 << (block & 0x07); 488 + return (*ubh_get_addr (ubh, begin + (block >> 3)) & mask) == mask; 480 489 } 481 490 return 0; 482 491 }
+21 -8
fs/userfaultfd.c
··· 340 340 bool must_wait, return_to_userland; 341 341 long blocking_state; 342 342 343 - BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); 344 - 345 343 ret = VM_FAULT_SIGBUS; 344 + 345 + /* 346 + * We don't do userfault handling for the final child pid update. 347 + * 348 + * We also don't do userfault handling during 349 + * coredumping. hugetlbfs has the special 350 + * follow_hugetlb_page() to skip missing pages in the 351 + * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with 352 + * the no_page_table() helper in follow_page_mask(), but the 353 + * shmem_vm_ops->fault method is invoked even during 354 + * coredumping without mmap_sem and it ends up here. 355 + */ 356 + if (current->flags & (PF_EXITING|PF_DUMPCORE)) 357 + goto out; 358 + 359 + /* 360 + * Coredumping runs without mmap_sem so we can only check that 361 + * the mmap_sem is held, if PF_DUMPCORE was not set. 362 + */ 363 + WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem)); 364 + 346 365 ctx = vmf->vma->vm_userfaultfd_ctx.ctx; 347 366 if (!ctx) 348 367 goto out; ··· 377 358 * caller of handle_userfault to release the mmap_sem. 378 359 */ 379 360 if (unlikely(ACCESS_ONCE(ctx->released))) 380 - goto out; 381 - 382 - /* 383 - * We don't do userfault handling for the final child pid update. 384 - */ 385 - if (current->flags & PF_EXITING) 386 361 goto out; 387 362 388 363 /*
+5 -2
fs/xfs/xfs_aops.c
··· 1316 1316 * The swap code (ab-)uses ->bmap to get a block mapping and then 1317 1317 * bypasseѕ the file system for actual I/O. We really can't allow 1318 1318 * that on reflinks inodes, so we have to skip out here. And yes, 1319 - * 0 is the magic code for a bmap error.. 1319 + * 0 is the magic code for a bmap error. 1320 + * 1321 + * Since we don't pass back blockdev info, we can't return bmap 1322 + * information for rt files either. 1320 1323 */ 1321 - if (xfs_is_reflink_inode(ip)) 1324 + if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip)) 1322 1325 return 0; 1323 1326 1324 1327 filemap_write_and_wait(mapping);
+26 -12
fs/xfs/xfs_buf.c
··· 97 97 xfs_buf_ioacct_inc( 98 98 struct xfs_buf *bp) 99 99 { 100 - if (bp->b_flags & (XBF_NO_IOACCT|_XBF_IN_FLIGHT)) 100 + if (bp->b_flags & XBF_NO_IOACCT) 101 101 return; 102 102 103 103 ASSERT(bp->b_flags & XBF_ASYNC); 104 - bp->b_flags |= _XBF_IN_FLIGHT; 105 - percpu_counter_inc(&bp->b_target->bt_io_count); 104 + spin_lock(&bp->b_lock); 105 + if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) { 106 + bp->b_state |= XFS_BSTATE_IN_FLIGHT; 107 + percpu_counter_inc(&bp->b_target->bt_io_count); 108 + } 109 + spin_unlock(&bp->b_lock); 106 110 } 107 111 108 112 /* ··· 114 110 * freed and unaccount from the buftarg. 115 111 */ 116 112 static inline void 113 + __xfs_buf_ioacct_dec( 114 + struct xfs_buf *bp) 115 + { 116 + lockdep_assert_held(&bp->b_lock); 117 + 118 + if (bp->b_state & XFS_BSTATE_IN_FLIGHT) { 119 + bp->b_state &= ~XFS_BSTATE_IN_FLIGHT; 120 + percpu_counter_dec(&bp->b_target->bt_io_count); 121 + } 122 + } 123 + 124 + static inline void 117 125 xfs_buf_ioacct_dec( 118 126 struct xfs_buf *bp) 119 127 { 120 - if (!(bp->b_flags & _XBF_IN_FLIGHT)) 121 - return; 122 - 123 - bp->b_flags &= ~_XBF_IN_FLIGHT; 124 - percpu_counter_dec(&bp->b_target->bt_io_count); 128 + spin_lock(&bp->b_lock); 129 + __xfs_buf_ioacct_dec(bp); 130 + spin_unlock(&bp->b_lock); 125 131 } 126 132 127 133 /* ··· 163 149 * unaccounted (released to LRU) before that occurs. Drop in-flight 164 150 * status now to preserve accounting consistency. 165 151 */ 166 - xfs_buf_ioacct_dec(bp); 167 - 168 152 spin_lock(&bp->b_lock); 153 + __xfs_buf_ioacct_dec(bp); 154 + 169 155 atomic_set(&bp->b_lru_ref, 0); 170 156 if (!(bp->b_state & XFS_BSTATE_DISPOSE) && 171 157 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru))) ··· 993 979 * ensures the decrement occurs only once per-buf. 994 980 */ 995 981 if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru)) 996 - xfs_buf_ioacct_dec(bp); 982 + __xfs_buf_ioacct_dec(bp); 997 983 goto out_unlock; 998 984 } 999 985 1000 986 /* the last reference has been dropped ... */ 1001 - xfs_buf_ioacct_dec(bp); 987 + __xfs_buf_ioacct_dec(bp); 1002 988 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { 1003 989 /* 1004 990 * If the buffer is added to the LRU take a new reference to the
+2 -3
fs/xfs/xfs_buf.h
··· 63 63 #define _XBF_KMEM (1 << 21)/* backed by heap memory */ 64 64 #define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */ 65 65 #define _XBF_COMPOUND (1 << 23)/* compound buffer */ 66 - #define _XBF_IN_FLIGHT (1 << 25) /* I/O in flight, for accounting purposes */ 67 66 68 67 typedef unsigned int xfs_buf_flags_t; 69 68 ··· 83 84 { _XBF_PAGES, "PAGES" }, \ 84 85 { _XBF_KMEM, "KMEM" }, \ 85 86 { _XBF_DELWRI_Q, "DELWRI_Q" }, \ 86 - { _XBF_COMPOUND, "COMPOUND" }, \ 87 - { _XBF_IN_FLIGHT, "IN_FLIGHT" } 87 + { _XBF_COMPOUND, "COMPOUND" } 88 88 89 89 90 90 /* 91 91 * Internal state flags. 92 92 */ 93 93 #define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */ 94 + #define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */ 94 95 95 96 /* 96 97 * The xfs_buftarg contains 2 notions of "sector size" -
+2 -3
fs/xfs/xfs_icache.c
··· 66 66 67 67 XFS_STATS_INC(mp, vn_active); 68 68 ASSERT(atomic_read(&ip->i_pincount) == 0); 69 - ASSERT(!spin_is_locked(&ip->i_flags_lock)); 70 69 ASSERT(!xfs_isiflocked(ip)); 71 70 ASSERT(ip->i_ino == 0); 72 71 ··· 189 190 { 190 191 struct xfs_mount *mp = pag->pag_mount; 191 192 192 - ASSERT(spin_is_locked(&pag->pag_ici_lock)); 193 + lockdep_assert_held(&pag->pag_ici_lock); 193 194 if (pag->pag_ici_reclaimable++) 194 195 return; 195 196 ··· 211 212 { 212 213 struct xfs_mount *mp = pag->pag_mount; 213 214 214 - ASSERT(spin_is_locked(&pag->pag_ici_lock)); 215 + lockdep_assert_held(&pag->pag_ici_lock); 215 216 if (--pag->pag_ici_reclaimable) 216 217 return; 217 218
+2 -1
include/acpi/acpi_bus.h
··· 210 210 u32 of_compatible_ok:1; 211 211 u32 coherent_dma:1; 212 212 u32 cca_seen:1; 213 - u32 reserved:20; 213 + u32 spi_i2c_slave:1; 214 + u32 reserved:19; 214 215 }; 215 216 216 217 /* File System */
+14
include/acpi/actbl.h
··· 374 374 u16 validation_count; 375 375 }; 376 376 377 + /* 378 + * Maximum value of the validation_count field in struct acpi_table_desc. 379 + * When reached, validation_count cannot be changed any more and the table will 380 + * be permanently regarded as validated. 381 + * 382 + * This is to prevent situations in which unbalanced table get/put operations 383 + * may cause premature table unmapping in the OS to happen. 384 + * 385 + * The maximum validation count can be defined to any value, but should be 386 + * greater than the maximum number of OS early stage mapping slots to avoid 387 + * leaking early stage table mappings to the late stage. 388 + */ 389 + #define ACPI_MAX_TABLE_VALIDATIONS ACPI_UINT16_MAX 390 + 377 391 /* Masks for Flags field above */ 378 392 379 393 #define ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL (0) /* Virtual address, external maintained */
+51
include/drm/drm_dp_helper.h
··· 913 913 int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc); 914 914 int drm_dp_stop_crc(struct drm_dp_aux *aux); 915 915 916 + struct drm_dp_dpcd_ident { 917 + u8 oui[3]; 918 + u8 device_id[6]; 919 + u8 hw_rev; 920 + u8 sw_major_rev; 921 + u8 sw_minor_rev; 922 + } __packed; 923 + 924 + /** 925 + * struct drm_dp_desc - DP branch/sink device descriptor 926 + * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch). 927 + * @quirks: Quirks; use drm_dp_has_quirk() to query for the quirks. 928 + */ 929 + struct drm_dp_desc { 930 + struct drm_dp_dpcd_ident ident; 931 + u32 quirks; 932 + }; 933 + 934 + int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, 935 + bool is_branch); 936 + 937 + /** 938 + * enum drm_dp_quirk - Display Port sink/branch device specific quirks 939 + * 940 + * Display Port sink and branch devices in the wild have a variety of bugs, try 941 + * to collect them here. The quirks are shared, but it's up to the drivers to 942 + * implement workarounds for them. 943 + */ 944 + enum drm_dp_quirk { 945 + /** 946 + * @DP_DPCD_QUIRK_LIMITED_M_N: 947 + * 948 + * The device requires main link attributes Mvid and Nvid to be limited 949 + * to 16 bits. 950 + */ 951 + DP_DPCD_QUIRK_LIMITED_M_N, 952 + }; 953 + 954 + /** 955 + * drm_dp_has_quirk() - does the DP device have a specific quirk 956 + * @desc: Device decriptor filled by drm_dp_read_desc() 957 + * @quirk: Quirk to query for 958 + * 959 + * Return true if DP device identified by @desc has @quirk. 960 + */ 961 + static inline bool 962 + drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk) 963 + { 964 + return desc->quirks & BIT(quirk); 965 + } 966 + 916 967 #endif /* _DRM_DP_HELPER_H_ */
+2
include/dt-bindings/clock/sun50i-a64-ccu.h
··· 43 43 #ifndef _DT_BINDINGS_CLK_SUN50I_A64_H_ 44 44 #define _DT_BINDINGS_CLK_SUN50I_A64_H_ 45 45 46 + #define CLK_PLL_PERIPH0 11 47 + 46 48 #define CLK_BUS_MIPI_DSI 28 47 49 #define CLK_BUS_CE 29 48 50 #define CLK_BUS_DMA 30
+2
include/dt-bindings/clock/sun8i-h3-ccu.h
··· 43 43 #ifndef _DT_BINDINGS_CLK_SUN8I_H3_H_ 44 44 #define _DT_BINDINGS_CLK_SUN8I_H3_H_ 45 45 46 + #define CLK_PLL_PERIPH0 9 47 + 46 48 #define CLK_CPUX 14 47 49 48 50 #define CLK_BUS_CE 20
+1
include/linux/bio.h
··· 426 426 427 427 extern void bio_init(struct bio *bio, struct bio_vec *table, 428 428 unsigned short max_vecs); 429 + extern void bio_uninit(struct bio *); 429 430 extern void bio_reset(struct bio *); 430 431 void bio_chain(struct bio *, struct bio *); 431 432
+4
include/linux/blkdev.h
··· 391 391 int nr_rqs[2]; /* # allocated [a]sync rqs */ 392 392 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ 393 393 394 + atomic_t shared_hctx_restart; 395 + 394 396 struct blk_queue_stats *stats; 395 397 struct rq_wb *rq_wb; 396 398 ··· 588 586 589 587 size_t cmd_size; 590 588 void *rq_alloc_data; 589 + 590 + struct work_struct release_work; 591 591 }; 592 592 593 593 #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
+1
include/linux/cgroup-defs.h
··· 48 48 CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ 49 49 CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ 50 50 CSS_VISIBLE = (1 << 3), /* css is visible to userland */ 51 + CSS_DYING = (1 << 4), /* css is dying */ 51 52 }; 52 53 53 54 /* bits in struct cgroup flags field */
+20
include/linux/cgroup.h
··· 344 344 } 345 345 346 346 /** 347 + * css_is_dying - test whether the specified css is dying 348 + * @css: target css 349 + * 350 + * Test whether @css is in the process of offlining or already offline. In 351 + * most cases, ->css_online() and ->css_offline() callbacks should be 352 + * enough; however, the actual offline operations are RCU delayed and this 353 + * test returns %true also when @css is scheduled to be offlined. 354 + * 355 + * This is useful, for example, when the use case requires synchronous 356 + * behavior with respect to cgroup removal. cgroup removal schedules css 357 + * offlining but the css can seem alive while the operation is being 358 + * delayed. If the delay affects user visible semantics, this test can be 359 + * used to resolve the situation. 360 + */ 361 + static inline bool css_is_dying(struct cgroup_subsys_state *css) 362 + { 363 + return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt); 364 + } 365 + 366 + /** 347 367 * css_put - put a css reference 348 368 * @css: target css 349 369 *
+8
include/linux/compiler-clang.h
··· 15 15 * with any version that can compile the kernel 16 16 */ 17 17 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) 18 + 19 + /* 20 + * GCC does not warn about unused static inline functions for 21 + * -Wunused-function. This turns out to avoid the need for complex #ifdef 22 + * directives. Suppress the warning in clang as well. 23 + */ 24 + #undef inline 25 + #define inline inline __attribute__((unused)) notrace
+2 -1
include/linux/configfs.h
··· 74 74 const char *name, 75 75 struct config_item_type *type); 76 76 77 - extern struct config_item * config_item_get(struct config_item *); 77 + extern struct config_item *config_item_get(struct config_item *); 78 + extern struct config_item *config_item_get_unless_zero(struct config_item *); 78 79 extern void config_item_put(struct config_item *); 79 80 80 81 struct config_item_type {
+30
include/linux/device.h
··· 1200 1200 const char *fmt, ...); 1201 1201 extern void device_destroy(struct class *cls, dev_t devt); 1202 1202 1203 + extern int __must_check device_add_groups(struct device *dev, 1204 + const struct attribute_group **groups); 1205 + extern void device_remove_groups(struct device *dev, 1206 + const struct attribute_group **groups); 1207 + 1208 + static inline int __must_check device_add_group(struct device *dev, 1209 + const struct attribute_group *grp) 1210 + { 1211 + const struct attribute_group *groups[] = { grp, NULL }; 1212 + 1213 + return device_add_groups(dev, groups); 1214 + } 1215 + 1216 + static inline void device_remove_group(struct device *dev, 1217 + const struct attribute_group *grp) 1218 + { 1219 + const struct attribute_group *groups[] = { grp, NULL }; 1220 + 1221 + return device_remove_groups(dev, groups); 1222 + } 1223 + 1224 + extern int __must_check devm_device_add_groups(struct device *dev, 1225 + const struct attribute_group **groups); 1226 + extern void devm_device_remove_groups(struct device *dev, 1227 + const struct attribute_group **groups); 1228 + extern int __must_check devm_device_add_group(struct device *dev, 1229 + const struct attribute_group *grp); 1230 + extern void devm_device_remove_group(struct device *dev, 1231 + const struct attribute_group *grp); 1232 + 1203 1233 /* 1204 1234 * Platform "fixup" functions - allow the platform to have their say 1205 1235 * about devices and actions that the general device layer doesn't
+1
include/linux/dma-iommu.h
··· 78 78 79 79 struct iommu_domain; 80 80 struct msi_msg; 81 + struct device; 81 82 82 83 static inline int iommu_dma_init(void) 83 84 {
+1 -1
include/linux/dmi.h
··· 136 136 static inline int dmi_name_in_serial(const char *s) { return 0; } 137 137 #define dmi_available 0 138 138 static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *), 139 - void *private_data) { return -1; } 139 + void *private_data) { return -ENXIO; } 140 140 static inline bool dmi_match(enum dmi_field f, const char *str) 141 141 { return false; } 142 142 static inline void dmi_memdev_name(u16 handle, const char **bank,
+1 -1
include/linux/elevator.h
··· 153 153 #endif 154 154 155 155 /* managed by elevator core */ 156 - char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */ 156 + char icq_cache_name[ELV_NAME_MAX + 6]; /* elvname + "_io_cq" */ 157 157 struct list_head list; 158 158 }; 159 159
+1 -1
include/linux/gfp.h
··· 41 41 #define ___GFP_WRITE 0x800000u 42 42 #define ___GFP_KSWAPD_RECLAIM 0x1000000u 43 43 #ifdef CONFIG_LOCKDEP 44 - #define ___GFP_NOLOCKDEP 0x4000000u 44 + #define ___GFP_NOLOCKDEP 0x2000000u 45 45 #else 46 46 #define ___GFP_NOLOCKDEP 0 47 47 #endif
+7
include/linux/gpio/machine.h
··· 56 56 .flags = _flags, \ 57 57 } 58 58 59 + #ifdef CONFIG_GPIOLIB 59 60 void gpiod_add_lookup_table(struct gpiod_lookup_table *table); 60 61 void gpiod_remove_lookup_table(struct gpiod_lookup_table *table); 62 + #else 63 + static inline 64 + void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {} 65 + static inline 66 + void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {} 67 + #endif 61 68 62 69 #endif /* __LINUX_GPIO_MACHINE_H */
-1
include/linux/hashtable.h
··· 167 167 /** 168 168 * hash_for_each_possible_rcu - iterate over all possible objects hashing to the 169 169 * same bucket in an rcu enabled hashtable 170 - * in a rcu enabled hashtable 171 170 * @name: hashtable to iterate 172 171 * @obj: the type * to use as a loop cursor for each entry 173 172 * @member: the name of the hlist_node within the struct
+4
include/linux/irqchip/arm-gic-v3.h
··· 417 417 #define ICH_HCR_EN (1 << 0) 418 418 #define ICH_HCR_UIE (1 << 1) 419 419 420 + #define ICH_VMCR_ACK_CTL_SHIFT 2 421 + #define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT) 422 + #define ICH_VMCR_FIQ_EN_SHIFT 3 423 + #define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT) 420 424 #define ICH_VMCR_CBPR_SHIFT 4 421 425 #define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT) 422 426 #define ICH_VMCR_EOIM_SHIFT 9
+25 -3
include/linux/irqchip/arm-gic.h
··· 25 25 #define GICC_ENABLE 0x1 26 26 #define GICC_INT_PRI_THRESHOLD 0xf0 27 27 28 - #define GIC_CPU_CTRL_EOImodeNS (1 << 9) 28 + #define GIC_CPU_CTRL_EnableGrp0_SHIFT 0 29 + #define GIC_CPU_CTRL_EnableGrp0 (1 << GIC_CPU_CTRL_EnableGrp0_SHIFT) 30 + #define GIC_CPU_CTRL_EnableGrp1_SHIFT 1 31 + #define GIC_CPU_CTRL_EnableGrp1 (1 << GIC_CPU_CTRL_EnableGrp1_SHIFT) 32 + #define GIC_CPU_CTRL_AckCtl_SHIFT 2 33 + #define GIC_CPU_CTRL_AckCtl (1 << GIC_CPU_CTRL_AckCtl_SHIFT) 34 + #define GIC_CPU_CTRL_FIQEn_SHIFT 3 35 + #define GIC_CPU_CTRL_FIQEn (1 << GIC_CPU_CTRL_FIQEn_SHIFT) 36 + #define GIC_CPU_CTRL_CBPR_SHIFT 4 37 + #define GIC_CPU_CTRL_CBPR (1 << GIC_CPU_CTRL_CBPR_SHIFT) 38 + #define GIC_CPU_CTRL_EOImodeNS_SHIFT 9 39 + #define GIC_CPU_CTRL_EOImodeNS (1 << GIC_CPU_CTRL_EOImodeNS_SHIFT) 29 40 30 41 #define GICC_IAR_INT_ID_MASK 0x3ff 31 42 #define GICC_INT_SPURIOUS 1023 ··· 95 84 #define GICH_LR_EOI (1 << 19) 96 85 #define GICH_LR_HW (1 << 31) 97 86 98 - #define GICH_VMCR_CTRL_SHIFT 0 99 - #define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT) 87 + #define GICH_VMCR_ENABLE_GRP0_SHIFT 0 88 + #define GICH_VMCR_ENABLE_GRP0_MASK (1 << GICH_VMCR_ENABLE_GRP0_SHIFT) 89 + #define GICH_VMCR_ENABLE_GRP1_SHIFT 1 90 + #define GICH_VMCR_ENABLE_GRP1_MASK (1 << GICH_VMCR_ENABLE_GRP1_SHIFT) 91 + #define GICH_VMCR_ACK_CTL_SHIFT 2 92 + #define GICH_VMCR_ACK_CTL_MASK (1 << GICH_VMCR_ACK_CTL_SHIFT) 93 + #define GICH_VMCR_FIQ_EN_SHIFT 3 94 + #define GICH_VMCR_FIQ_EN_MASK (1 << GICH_VMCR_FIQ_EN_SHIFT) 95 + #define GICH_VMCR_CBPR_SHIFT 4 96 + #define GICH_VMCR_CBPR_MASK (1 << GICH_VMCR_CBPR_SHIFT) 97 + #define GICH_VMCR_EOI_MODE_SHIFT 9 98 + #define GICH_VMCR_EOI_MODE_MASK (1 << GICH_VMCR_EOI_MODE_SHIFT) 99 + 100 100 #define GICH_VMCR_PRIMASK_SHIFT 27 101 101 #define GICH_VMCR_PRIMASK_MASK (0x1f << GICH_VMCR_PRIMASK_SHIFT) 102 102 #define GICH_VMCR_BINPOINT_SHIFT 21
+5 -1
include/linux/jiffies.h
··· 64 64 /* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ 65 65 #define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) 66 66 67 + #ifndef __jiffy_arch_data 68 + #define __jiffy_arch_data 69 + #endif 70 + 67 71 /* 68 72 * The 64-bit value is not atomic - you MUST NOT read it 69 73 * without sampling the sequence number in jiffies_lock. 70 74 * get_jiffies_64() will do this for you as appropriate. 71 75 */ 72 76 extern u64 __cacheline_aligned_in_smp jiffies_64; 73 - extern unsigned long volatile __cacheline_aligned_in_smp jiffies; 77 + extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies; 74 78 75 79 #if (BITS_PER_LONG < 64) 76 80 u64 get_jiffies_64(void);
-1
include/linux/key.h
··· 173 173 #ifdef KEY_DEBUGGING 174 174 unsigned magic; 175 175 #define KEY_DEBUG_MAGIC 0x18273645u 176 - #define KEY_DEBUG_MAGIC_X 0xf8e9dacbu 177 176 #endif 178 177 179 178 unsigned long flags; /* status flags (change with bitops) */
+2
include/linux/kobject.h
··· 57 57 KOBJ_MOVE, 58 58 KOBJ_ONLINE, 59 59 KOBJ_OFFLINE, 60 + KOBJ_BIND, 61 + KOBJ_UNBIND, 60 62 KOBJ_MAX 61 63 }; 62 64
+8
include/linux/memblock.h
··· 425 425 } 426 426 #endif 427 427 428 + extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr, 429 + phys_addr_t end_addr); 428 430 #else 429 431 static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) 432 + { 433 + return 0; 434 + } 435 + 436 + static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr, 437 + phys_addr_t end_addr) 430 438 { 431 439 return 0; 432 440 }
+1
include/linux/mlx4/qp.h
··· 470 470 u16 rate_val; 471 471 }; 472 472 473 + struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn); 473 474 int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, 474 475 enum mlx4_update_qp_attr attr, 475 476 struct mlx4_update_qp_params *params);
+9 -1
include/linux/mlx5/mlx5_ifc.h
··· 766 766 MLX5_CAP_PORT_TYPE_ETH = 0x1, 767 767 }; 768 768 769 + enum { 770 + MLX5_CAP_UMR_FENCE_STRONG = 0x0, 771 + MLX5_CAP_UMR_FENCE_SMALL = 0x1, 772 + MLX5_CAP_UMR_FENCE_NONE = 0x2, 773 + }; 774 + 769 775 struct mlx5_ifc_cmd_hca_cap_bits { 770 776 u8 reserved_at_0[0x80]; 771 777 ··· 881 875 u8 reserved_at_202[0x1]; 882 876 u8 ipoib_enhanced_offloads[0x1]; 883 877 u8 ipoib_basic_offloads[0x1]; 884 - u8 reserved_at_205[0xa]; 878 + u8 reserved_at_205[0x5]; 879 + u8 umr_fence[0x2]; 880 + u8 reserved_at_20c[0x3]; 885 881 u8 drain_sigerr[0x1]; 886 882 u8 cmdif_checksum[0x2]; 887 883 u8 sigerr_cqe[0x1];
+36 -28
include/linux/mm.h
··· 1393 1393 1394 1394 int get_cmdline(struct task_struct *task, char *buffer, int buflen); 1395 1395 1396 - /* Is the vma a continuation of the stack vma above it? */ 1397 - static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) 1398 - { 1399 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); 1400 - } 1401 - 1402 1396 static inline bool vma_is_anonymous(struct vm_area_struct *vma) 1403 1397 { 1404 1398 return !vma->vm_ops; ··· 1407 1413 #else 1408 1414 static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } 1409 1415 #endif 1410 - 1411 - static inline int stack_guard_page_start(struct vm_area_struct *vma, 1412 - unsigned long addr) 1413 - { 1414 - return (vma->vm_flags & VM_GROWSDOWN) && 1415 - (vma->vm_start == addr) && 1416 - !vma_growsdown(vma->vm_prev, addr); 1417 - } 1418 - 1419 - /* Is the vma a continuation of the stack vma below it? */ 1420 - static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) 1421 - { 1422 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); 1423 - } 1424 - 1425 - static inline int stack_guard_page_end(struct vm_area_struct *vma, 1426 - unsigned long addr) 1427 - { 1428 - return (vma->vm_flags & VM_GROWSUP) && 1429 - (vma->vm_end == addr) && 1430 - !vma_growsup(vma->vm_next, addr); 1431 - } 1432 1416 1433 1417 int vma_is_stack_for_current(struct vm_area_struct *vma); 1434 1418 ··· 2194 2222 pgoff_t offset, 2195 2223 unsigned long size); 2196 2224 2225 + extern unsigned long stack_guard_gap; 2197 2226 /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ 2198 2227 extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 2199 2228 ··· 2221 2248 if (vma && end_addr <= vma->vm_start) 2222 2249 vma = NULL; 2223 2250 return vma; 2251 + } 2252 + 2253 + static inline unsigned long vm_start_gap(struct vm_area_struct *vma) 2254 + { 2255 + unsigned long vm_start = vma->vm_start; 2256 + 2257 + if (vma->vm_flags & VM_GROWSDOWN) { 2258 + vm_start -= stack_guard_gap; 2259 + if (vm_start > vma->vm_start) 2260 + vm_start = 0; 2261 + } 2262 + return vm_start; 2263 + } 2264 + 2265 + static inline unsigned long vm_end_gap(struct vm_area_struct *vma) 2266 + { 2267 + unsigned long vm_end = vma->vm_end; 2268 + 2269 + if (vma->vm_flags & VM_GROWSUP) { 2270 + vm_end += stack_guard_gap; 2271 + if (vm_end < vma->vm_end) 2272 + vm_end = -PAGE_SIZE; 2273 + } 2274 + return vm_end; 2224 2275 } 2225 2276 2226 2277 static inline unsigned long vma_pages(struct vm_area_struct *vma) ··· 2323 2326 #define FOLL_MLOCK 0x1000 /* lock present pages */ 2324 2327 #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ 2325 2328 #define FOLL_COW 0x4000 /* internal GUP flag */ 2329 + 2330 + static inline int vm_fault_to_errno(int vm_fault, int foll_flags) 2331 + { 2332 + if (vm_fault & VM_FAULT_OOM) 2333 + return -ENOMEM; 2334 + if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) 2335 + return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT; 2336 + if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) 2337 + return -EFAULT; 2338 + return 0; 2339 + } 2326 2340 2327 2341 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, 2328 2342 void *data);
+1
include/linux/mmzone.h
··· 678 678 * is the first PFN that needs to be initialised. 679 679 */ 680 680 unsigned long first_deferred_pfn; 681 + unsigned long static_init_size; 681 682 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 682 683 683 684 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+1
include/linux/mod_devicetable.h
··· 467 467 DMI_PRODUCT_VERSION, 468 468 DMI_PRODUCT_SERIAL, 469 469 DMI_PRODUCT_UUID, 470 + DMI_PRODUCT_FAMILY, 470 471 DMI_BOARD_VENDOR, 471 472 DMI_BOARD_NAME, 472 473 DMI_BOARD_VERSION,
+1 -1
include/linux/moduleparam.h
··· 457 457 hwparam_ioport, /* Module parameter configures an I/O port */ 458 458 hwparam_iomem, /* Module parameter configures an I/O mem address */ 459 459 hwparam_ioport_or_iomem, /* Module parameter could be either, depending on other option */ 460 - hwparam_irq, /* Module parameter configures an I/O port */ 460 + hwparam_irq, /* Module parameter configures an IRQ */ 461 461 hwparam_dma, /* Module parameter configures a DMA channel */ 462 462 hwparam_dma_addr, /* Module parameter configures a DMA buffer address */ 463 463 hwparam_other, /* Module parameter configures some other value */
+10 -5
include/linux/netdevice.h
··· 914 914 * 915 915 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); 916 916 * Called when a user wants to change the Maximum Transfer Unit 917 - * of a device. If not defined, any request to change MTU will 918 - * will return an error. 917 + * of a device. 919 918 * 920 919 * void (*ndo_tx_timeout)(struct net_device *dev); 921 920 * Callback used when the transmitter has not made any progress ··· 1595 1596 * @rtnl_link_state: This enum represents the phases of creating 1596 1597 * a new link 1597 1598 * 1598 - * @destructor: Called from unregister, 1599 - * can be used to call free_netdev 1599 + * @needs_free_netdev: Should unregister perform free_netdev? 1600 + * @priv_destructor: Called from unregister 1600 1601 * @npinfo: XXX: need comments on this one 1601 1602 * @nd_net: Network namespace this network device is inside 1602 1603 * ··· 1857 1858 RTNL_LINK_INITIALIZING, 1858 1859 } rtnl_link_state:16; 1859 1860 1860 - void (*destructor)(struct net_device *dev); 1861 + bool needs_free_netdev; 1862 + void (*priv_destructor)(struct net_device *dev); 1861 1863 1862 1864 #ifdef CONFIG_NETPOLL 1863 1865 struct netpoll_info __rcu *npinfo; ··· 4259 4259 if (!dev->name[0] || strchr(dev->name, '%')) 4260 4260 return "(unnamed net_device)"; 4261 4261 return dev->name; 4262 + } 4263 + 4264 + static inline bool netdev_unregistering(const struct net_device *dev) 4265 + { 4266 + return dev->reg_state == NETREG_UNREGISTERING; 4262 4267 } 4263 4268 4264 4269 static inline const char *netdev_reg_state(const struct net_device *dev)
-3
include/linux/pinctrl/pinconf-generic.h
··· 42 42 * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high 43 43 * impedance to VDD). If the argument is != 0 pull-up is enabled, 44 44 * if it is 0, pull-up is total, i.e. the pin is connected to VDD. 45 - * @PIN_CONFIG_BIDIRECTIONAL: the pin will be configured to allow simultaneous 46 - * input and output operations. 47 45 * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open 48 46 * collector) which means it is usually wired with other output ports 49 47 * which are then pulled up with an external resistor. Setting this ··· 96 98 PIN_CONFIG_BIAS_PULL_DOWN, 97 99 PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 98 100 PIN_CONFIG_BIAS_PULL_UP, 99 - PIN_CONFIG_BIDIRECTIONAL, 100 101 PIN_CONFIG_DRIVE_OPEN_DRAIN, 101 102 PIN_CONFIG_DRIVE_OPEN_SOURCE, 102 103 PIN_CONFIG_DRIVE_PUSH_PULL,
+6
include/linux/quotaops.h
··· 44 44 void inode_reclaim_rsv_space(struct inode *inode, qsize_t number); 45 45 46 46 int dquot_initialize(struct inode *inode); 47 + bool dquot_initialize_needed(struct inode *inode); 47 48 void dquot_drop(struct inode *inode); 48 49 struct dquot *dqget(struct super_block *sb, struct kqid qid); 49 50 static inline struct dquot *dqgrab(struct dquot *dquot) ··· 206 205 static inline int dquot_initialize(struct inode *inode) 207 206 { 208 207 return 0; 208 + } 209 + 210 + static inline bool dquot_initialize_needed(struct inode *inode) 211 + { 212 + return false; 209 213 } 210 214 211 215 static inline void dquot_drop(struct inode *inode)
+1
include/linux/slub_def.h
··· 84 84 int red_left_pad; /* Left redzone padding size */ 85 85 #ifdef CONFIG_SYSFS 86 86 struct kobject kobj; /* For sysfs */ 87 + struct work_struct kobj_remove_work; 87 88 #endif 88 89 #ifdef CONFIG_MEMCG 89 90 struct memcg_cache_params memcg_params;
-2
include/linux/srcu.h
··· 172 172 { 173 173 int retval; 174 174 175 - preempt_disable(); 176 175 retval = __srcu_read_lock(sp); 177 - preempt_enable(); 178 176 rcu_lock_acquire(&(sp)->dep_map); 179 177 return retval; 180 178 }
+2 -1
include/linux/sunrpc/svc.h
··· 336 336 { 337 337 char *cp = (char *)p; 338 338 struct kvec *vec = &rqstp->rq_arg.head[0]; 339 - return cp == (char *)vec->iov_base + vec->iov_len; 339 + return cp >= (char*)vec->iov_base 340 + && cp <= (char*)vec->iov_base + vec->iov_len; 340 341 } 341 342 342 343 static inline int
+2 -5
include/linux/suspend.h
··· 189 189 struct platform_freeze_ops { 190 190 int (*begin)(void); 191 191 int (*prepare)(void); 192 - void (*wake)(void); 193 - void (*sync)(void); 194 192 void (*restore)(void); 195 193 void (*end)(void); 196 194 }; ··· 428 430 429 431 extern bool pm_wakeup_pending(void); 430 432 extern void pm_system_wakeup(void); 431 - extern void pm_system_cancel_wakeup(void); 432 - extern void pm_wakeup_clear(bool reset); 433 + extern void pm_wakeup_clear(void); 433 434 extern void pm_system_irq_wakeup(unsigned int irq_number); 434 435 extern bool pm_get_wakeup_count(unsigned int *count, bool block); 435 436 extern bool pm_save_wakeup_count(unsigned int count); ··· 478 481 479 482 static inline bool pm_wakeup_pending(void) { return false; } 480 483 static inline void pm_system_wakeup(void) {} 481 - static inline void pm_wakeup_clear(bool reset) {} 484 + static inline void pm_wakeup_clear(void) {} 482 485 static inline void pm_system_irq_wakeup(unsigned int irq_number) {} 483 486 484 487 static inline void lock_system_sleep(void) {}
+2 -3
include/linux/timekeeper_internal.h
··· 29 29 */ 30 30 struct tk_read_base { 31 31 struct clocksource *clock; 32 - u64 (*read)(struct clocksource *cs); 33 32 u64 mask; 34 33 u64 cycle_last; 35 34 u32 mult; ··· 57 58 * interval. 58 59 * @xtime_remainder: Shifted nano seconds left over when rounding 59 60 * @cycle_interval 60 - * @raw_interval: Raw nano seconds accumulated per NTP interval. 61 + * @raw_interval: Shifted raw nano seconds accumulated per NTP interval. 61 62 * @ntp_error: Difference between accumulated time and NTP time in ntp 62 63 * shifted nano seconds. 63 64 * @ntp_error_shift: Shift conversion between clock shifted nano seconds and ··· 99 100 u64 cycle_interval; 100 101 u64 xtime_interval; 101 102 s64 xtime_remainder; 102 - u32 raw_interval; 103 + u64 raw_interval; 103 104 /* The ntp_tick_length() value currently being used. 104 105 * This cached copy ensures we consistently apply the tick 105 106 * length for an entire tick, as ntp_tick_length may change
+11 -1
include/media/cec-notifier.h
··· 29 29 struct cec_adapter; 30 30 struct cec_notifier; 31 31 32 - #ifdef CONFIG_MEDIA_CEC_NOTIFIER 32 + #if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER) 33 33 34 34 /** 35 35 * cec_notifier_get - find or create a new cec_notifier for the given device. ··· 103 103 104 104 static inline void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n, 105 105 const struct edid *edid) 106 + { 107 + } 108 + 109 + static inline void cec_notifier_register(struct cec_notifier *n, 110 + struct cec_adapter *adap, 111 + void (*callback)(struct cec_adapter *adap, u16 pa)) 112 + { 113 + } 114 + 115 + static inline void cec_notifier_unregister(struct cec_notifier *n) 106 116 { 107 117 } 108 118
+3 -3
include/media/cec.h
··· 173 173 bool passthrough; 174 174 struct cec_log_addrs log_addrs; 175 175 176 - #ifdef CONFIG_MEDIA_CEC_NOTIFIER 176 + #ifdef CONFIG_CEC_NOTIFIER 177 177 struct cec_notifier *notifier; 178 178 #endif 179 179 ··· 206 206 #define cec_phys_addr_exp(pa) \ 207 207 ((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf 208 208 209 - #if IS_ENABLED(CONFIG_CEC_CORE) 209 + #if IS_REACHABLE(CONFIG_CEC_CORE) 210 210 struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops, 211 211 void *priv, const char *name, u32 caps, u8 available_las); 212 212 int cec_register_adapter(struct cec_adapter *adap, struct device *parent); ··· 300 300 */ 301 301 int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port); 302 302 303 - #ifdef CONFIG_MEDIA_CEC_NOTIFIER 303 + #ifdef CONFIG_CEC_NOTIFIER 304 304 void cec_register_cec_notifier(struct cec_adapter *adap, 305 305 struct cec_notifier *notifier); 306 306 #endif
+1
include/net/ipv6.h
··· 1007 1007 */ 1008 1008 extern const struct proto_ops inet6_stream_ops; 1009 1009 extern const struct proto_ops inet6_dgram_ops; 1010 + extern const struct proto_ops inet6_sockraw_ops; 1010 1011 1011 1012 struct group_source_req; 1012 1013 struct group_filter;
+1 -1
include/net/tcp.h
··· 924 924 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); 925 925 /* call when ack arrives (optional) */ 926 926 void (*in_ack_event)(struct sock *sk, u32 flags); 927 - /* new value of cwnd after loss (optional) */ 927 + /* new value of cwnd after loss (required) */ 928 928 u32 (*undo_cwnd)(struct sock *sk); 929 929 /* hook for packet ack accounting (optional) */ 930 930 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
+2 -2
include/net/wext.h
··· 6 6 struct net; 7 7 8 8 #ifdef CONFIG_WEXT_CORE 9 - int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, 9 + int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd, 10 10 void __user *arg); 11 11 int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, 12 12 unsigned long arg); ··· 14 14 struct iw_statistics *get_wireless_stats(struct net_device *dev); 15 15 int call_commit_handler(struct net_device *dev); 16 16 #else 17 - static inline int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, 17 + static inline int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd, 18 18 void __user *arg) 19 19 { 20 20 return -EINVAL;
+2 -5
include/net/xfrm.h
··· 1850 1850 } 1851 1851 #endif 1852 1852 1853 - #ifdef CONFIG_XFRM_OFFLOAD 1854 1853 void __net_init xfrm_dev_init(void); 1854 + 1855 + #ifdef CONFIG_XFRM_OFFLOAD 1855 1856 int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features); 1856 1857 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, 1857 1858 struct xfrm_user_offload *xuo); ··· 1878 1877 } 1879 1878 } 1880 1879 #else 1881 - static inline void __net_init xfrm_dev_init(void) 1882 - { 1883 - } 1884 - 1885 1880 static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features) 1886 1881 { 1887 1882 return 0;
+3 -22
include/rdma/ib_sa.h
··· 158 158 }; 159 159 160 160 struct sa_path_rec_ib { 161 - __be64 service_id; 162 161 __be16 dlid; 163 162 __be16 slid; 164 163 u8 raw_traffic; ··· 173 174 }; 174 175 175 176 struct sa_path_rec_opa { 176 - __be64 service_id; 177 177 __be32 dlid; 178 178 __be32 slid; 179 179 u8 raw_traffic; ··· 187 189 struct sa_path_rec { 188 190 union ib_gid dgid; 189 191 union ib_gid sgid; 192 + __be64 service_id; 190 193 /* reserved */ 191 194 __be32 flow_label; 192 195 u8 hop_limit; ··· 261 262 ib->ib.dlid = htons(ntohl(opa->opa.dlid)); 262 263 ib->ib.slid = htons(ntohl(opa->opa.slid)); 263 264 } 264 - ib->ib.service_id = opa->opa.service_id; 265 + ib->service_id = opa->service_id; 265 266 ib->ib.raw_traffic = opa->opa.raw_traffic; 266 267 } 267 268 ··· 280 281 } 281 282 opa->opa.slid = slid; 282 283 opa->opa.dlid = dlid; 283 - opa->opa.service_id = ib->ib.service_id; 284 + opa->service_id = ib->service_id; 284 285 opa->opa.raw_traffic = ib->ib.raw_traffic; 285 286 } 286 287 ··· 590 591 (rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2)); 591 592 } 592 593 593 - static inline void sa_path_set_service_id(struct sa_path_rec *rec, 594 - __be64 service_id) 595 - { 596 - if (rec->rec_type == SA_PATH_REC_TYPE_IB) 597 - rec->ib.service_id = service_id; 598 - else if (rec->rec_type == SA_PATH_REC_TYPE_OPA) 599 - rec->opa.service_id = service_id; 600 - } 601 - 602 594 static inline void sa_path_set_slid(struct sa_path_rec *rec, __be32 slid) 603 595 { 604 596 if (rec->rec_type == SA_PATH_REC_TYPE_IB) ··· 613 623 rec->ib.raw_traffic = raw_traffic; 614 624 else if (rec->rec_type == SA_PATH_REC_TYPE_OPA) 615 625 rec->opa.raw_traffic = raw_traffic; 616 - } 617 - 618 - static inline __be64 sa_path_get_service_id(struct sa_path_rec *rec) 619 - { 620 - if (rec->rec_type == SA_PATH_REC_TYPE_IB) 621 - return rec->ib.service_id; 622 - else if (rec->rec_type == SA_PATH_REC_TYPE_OPA) 623 - return rec->opa.service_id; 624 - return 0; 625 626 } 626 627 627 628 static inline __be32 sa_path_get_slid(struct sa_path_rec *rec)
-10
include/rdma/rdma_netlink.h
··· 10 10 struct module *module; 11 11 }; 12 12 13 - int ibnl_init(void); 14 - void ibnl_cleanup(void); 15 - 16 13 /** 17 14 * Add a a client to the list of IB netlink exporters. 18 15 * @index: Index of the added client ··· 73 76 */ 74 77 int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh, 75 78 unsigned int group, gfp_t flags); 76 - 77 - /** 78 - * Check if there are any listeners to the netlink group 79 - * @group: the netlink group ID 80 - * Returns 0 on success or a negative for no listeners. 81 - */ 82 - int ibnl_chk_listeners(unsigned int group); 83 79 84 80 #endif /* _RDMA_NETLINK_H */
+1
include/target/iscsi/iscsi_target_core.h
··· 557 557 #define LOGIN_FLAGS_READ_ACTIVE 1 558 558 #define LOGIN_FLAGS_CLOSED 2 559 559 #define LOGIN_FLAGS_READY 4 560 + #define LOGIN_FLAGS_INITIAL_PDU 8 560 561 unsigned long login_flags; 561 562 struct delayed_work login_work; 562 563 struct delayed_work login_cleanup_work;
+1 -25
include/uapi/linux/a.out.h
··· 112 112 #define N_TXTADDR(x) (N_MAGIC(x) == QMAGIC ? PAGE_SIZE : 0) 113 113 #endif 114 114 115 - /* Address of data segment in memory after it is loaded. 116 - Note that it is up to you to define SEGMENT_SIZE 117 - on machines not listed here. */ 118 - #if defined(vax) || defined(hp300) || defined(pyr) 119 - #define SEGMENT_SIZE page_size 120 - #endif 121 - #ifdef sony 122 - #define SEGMENT_SIZE 0x2000 123 - #endif /* Sony. */ 124 - #ifdef is68k 125 - #define SEGMENT_SIZE 0x20000 126 - #endif 127 - #if defined(m68k) && defined(PORTAR) 128 - #define PAGE_SIZE 0x400 129 - #define SEGMENT_SIZE PAGE_SIZE 130 - #endif 131 - 132 - #ifdef linux 115 + /* Address of data segment in memory after it is loaded. */ 133 116 #ifndef __KERNEL__ 134 117 #include <unistd.h> 135 118 #endif ··· 122 139 #ifndef SEGMENT_SIZE 123 140 #ifndef __KERNEL__ 124 141 #define SEGMENT_SIZE getpagesize() 125 - #endif 126 142 #endif 127 143 #endif 128 144 #endif ··· 242 260 unsigned int r_extern:1; 243 261 /* Four bits that aren't used, but when writing an object file 244 262 it is desirable to clear them. */ 245 - #ifdef NS32K 246 - unsigned r_bsr:1; 247 - unsigned r_disp:1; 248 - unsigned r_pad:2; 249 - #else 250 263 unsigned int r_pad:4; 251 - #endif 252 264 }; 253 265 #endif /* no N_RELOCATION_INFO_DECLARED. */ 254 266
+4 -2
include/uapi/linux/ethtool.h
··· 1486 1486 * it was forced up into this mode or autonegotiated. 1487 1487 */ 1488 1488 1489 - /* The forced speed, in units of 1Mb. All values 0 to INT_MAX are legal. */ 1490 - /* Update drivers/net/phy/phy.c:phy_speed_to_str() when adding new values */ 1489 + /* The forced speed, in units of 1Mb. All values 0 to INT_MAX are legal. 1490 + * Update drivers/net/phy/phy.c:phy_speed_to_str() and 1491 + * drivers/net/bonding/bond_3ad.c:__get_link_speed() when adding new values. 1492 + */ 1491 1493 #define SPEED_10 10 1492 1494 #define SPEED_100 100 1493 1495 #define SPEED_1000 1000
+2 -2
include/uapi/linux/keyctl.h
··· 70 70 }; 71 71 72 72 struct keyctl_kdf_params { 73 - char *hashname; 74 - char *otherinfo; 73 + char __user *hashname; 74 + char __user *otherinfo; 75 75 __u32 otherinfolen; 76 76 __u32 __spare[8]; 77 77 };
+1
include/uapi/linux/openvswitch.h
··· 343 343 #define OVS_KEY_ATTR_MAX (__OVS_KEY_ATTR_MAX - 1) 344 344 345 345 enum ovs_tunnel_key_attr { 346 + /* OVS_TUNNEL_KEY_ATTR_NONE, standard nl API requires this attribute! */ 346 347 OVS_TUNNEL_KEY_ATTR_ID, /* be64 Tunnel ID */ 347 348 OVS_TUNNEL_KEY_ATTR_IPV4_SRC, /* be32 src IP address. */ 348 349 OVS_TUNNEL_KEY_ATTR_IPV4_DST, /* be32 dst IP address. */
+5
kernel/bpf/verifier.c
··· 989 989 if (err) 990 990 return err; 991 991 992 + if (is_pointer_value(env, insn->src_reg)) { 993 + verbose("R%d leaks addr into mem\n", insn->src_reg); 994 + return -EACCES; 995 + } 996 + 992 997 /* check whether atomic_add can read the memory */ 993 998 err = check_mem_access(env, insn->dst_reg, insn->off, 994 999 BPF_SIZE(insn->code), BPF_READ, -1);
+5
kernel/cgroup/cgroup.c
··· 4265 4265 { 4266 4266 lockdep_assert_held(&cgroup_mutex); 4267 4267 4268 + if (css->flags & CSS_DYING) 4269 + return; 4270 + 4271 + css->flags |= CSS_DYING; 4272 + 4268 4273 /* 4269 4274 * This must happen before css is disassociated with its cgroup. 4270 4275 * See seq_css() for details.
+2 -2
kernel/cgroup/cpuset.c
··· 176 176 } cpuset_flagbits_t; 177 177 178 178 /* convenient tests for these bits */ 179 - static inline bool is_cpuset_online(const struct cpuset *cs) 179 + static inline bool is_cpuset_online(struct cpuset *cs) 180 180 { 181 - return test_bit(CS_ONLINE, &cs->flags); 181 + return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); 182 182 } 183 183 184 184 static inline int is_cpu_exclusive(const struct cpuset *cs)
+2 -2
kernel/cpu.c
··· 1658 1658 ret = !sp->name || sp->cant_stop ? -EINVAL : 0; 1659 1659 mutex_unlock(&cpuhp_state_mutex); 1660 1660 if (ret) 1661 - return ret; 1661 + goto out; 1662 1662 1663 1663 if (st->state < target) 1664 1664 ret = do_cpu_up(dev->id, target); 1665 1665 else 1666 1666 ret = do_cpu_down(dev->id, target); 1667 - 1667 + out: 1668 1668 unlock_device_hotplug(); 1669 1669 return ret ? ret : count; 1670 1670 }
+21
kernel/events/core.c
··· 7316 7316 return __perf_event_account_interrupt(event, 1); 7317 7317 } 7318 7318 7319 + static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs) 7320 + { 7321 + /* 7322 + * Due to interrupt latency (AKA "skid"), we may enter the 7323 + * kernel before taking an overflow, even if the PMU is only 7324 + * counting user events. 7325 + * To avoid leaking information to userspace, we must always 7326 + * reject kernel samples when exclude_kernel is set. 7327 + */ 7328 + if (event->attr.exclude_kernel && !user_mode(regs)) 7329 + return false; 7330 + 7331 + return true; 7332 + } 7333 + 7319 7334 /* 7320 7335 * Generic event overflow handling, sampling. 7321 7336 */ ··· 7350 7335 return 0; 7351 7336 7352 7337 ret = __perf_event_account_interrupt(event, throttle); 7338 + 7339 + /* 7340 + * For security, drop the skid kernel samples if necessary. 7341 + */ 7342 + if (!sample_is_allowed(event, regs)) 7343 + return ret; 7353 7344 7354 7345 /* 7355 7346 * XXX event_limit might not quite work as expected on inherited
+1 -1
kernel/events/ring_buffer.c
··· 580 580 int ret = -ENOMEM, max_order = 0; 581 581 582 582 if (!has_aux(event)) 583 - return -ENOTSUPP; 583 + return -EOPNOTSUPP; 584 584 585 585 if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) { 586 586 /*
+3 -1
kernel/irq/manage.c
··· 1312 1312 ret = __irq_set_trigger(desc, 1313 1313 new->flags & IRQF_TRIGGER_MASK); 1314 1314 1315 - if (ret) 1315 + if (ret) { 1316 + irq_release_resources(desc); 1316 1317 goto out_mask; 1318 + } 1317 1319 } 1318 1320 1319 1321 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
+1
kernel/livepatch/Kconfig
··· 10 10 depends on SYSFS 11 11 depends on KALLSYMS_ALL 12 12 depends on HAVE_LIVEPATCH 13 + depends on !TRIM_UNUSED_KSYMS 13 14 help 14 15 Say Y here if you want to support kernel live patching. 15 16 This option has no runtime impact until a kernel "patch"
+6 -2
kernel/livepatch/patch.c
··· 59 59 60 60 ops = container_of(fops, struct klp_ops, fops); 61 61 62 - rcu_read_lock(); 62 + /* 63 + * A variant of synchronize_sched() is used to allow patching functions 64 + * where RCU is not watching, see klp_synchronize_transition(). 65 + */ 66 + preempt_disable_notrace(); 63 67 64 68 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, 65 69 stack_node); ··· 119 115 120 116 klp_arch_set_pc(regs, (unsigned long)func->new_func); 121 117 unlock: 122 - rcu_read_unlock(); 118 + preempt_enable_notrace(); 123 119 } 124 120 125 121 /*
+31 -5
kernel/livepatch/transition.c
··· 49 49 static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn); 50 50 51 51 /* 52 + * This function is just a stub to implement a hard force 53 + * of synchronize_sched(). This requires synchronizing 54 + * tasks even in userspace and idle. 55 + */ 56 + static void klp_sync(struct work_struct *work) 57 + { 58 + } 59 + 60 + /* 61 + * We allow to patch also functions where RCU is not watching, 62 + * e.g. before user_exit(). We can not rely on the RCU infrastructure 63 + * to do the synchronization. Instead hard force the sched synchronization. 64 + * 65 + * This approach allows to use RCU functions for manipulating func_stack 66 + * safely. 67 + */ 68 + static void klp_synchronize_transition(void) 69 + { 70 + schedule_on_each_cpu(klp_sync); 71 + } 72 + 73 + /* 52 74 * The transition to the target patch state is complete. Clean up the data 53 75 * structures. 54 76 */ ··· 95 73 * func->transition gets cleared, the handler may choose a 96 74 * removed function. 97 75 */ 98 - synchronize_rcu(); 76 + klp_synchronize_transition(); 99 77 } 100 78 101 79 if (klp_transition_patch->immediate) ··· 114 92 115 93 /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ 116 94 if (klp_target_state == KLP_PATCHED) 117 - synchronize_rcu(); 95 + klp_synchronize_transition(); 118 96 119 97 read_lock(&tasklist_lock); 120 98 for_each_process_thread(g, task) { ··· 158 136 */ 159 137 void klp_update_patch_state(struct task_struct *task) 160 138 { 161 - rcu_read_lock(); 139 + /* 140 + * A variant of synchronize_sched() is used to allow patching functions 141 + * where RCU is not watching, see klp_synchronize_transition(). 142 + */ 143 + preempt_disable_notrace(); 162 144 163 145 /* 164 146 * This test_and_clear_tsk_thread_flag() call also serves as a read ··· 179 153 if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING)) 180 154 task->patch_state = READ_ONCE(klp_target_state); 181 155 182 - rcu_read_unlock(); 156 + preempt_enable_notrace(); 183 157 } 184 158 185 159 /* ··· 565 539 clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING); 566 540 567 541 /* Let any remaining calls to klp_update_patch_state() complete */ 568 - synchronize_rcu(); 542 + klp_synchronize_transition(); 569 543 570 544 klp_start_transition(); 571 545 }
+1 -1
kernel/power/process.c
··· 132 132 if (!pm_freezing) 133 133 atomic_inc(&system_freezing_cnt); 134 134 135 - pm_wakeup_clear(true); 135 + pm_wakeup_clear(); 136 136 pr_info("Freezing user space processes ... "); 137 137 pm_freezing = true; 138 138 error = try_to_freeze_tasks(true);
+4 -25
kernel/power/suspend.c
··· 72 72 73 73 static void freeze_enter(void) 74 74 { 75 - trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, true); 76 - 77 75 spin_lock_irq(&suspend_freeze_lock); 78 76 if (pm_wakeup_pending()) 79 77 goto out; ··· 98 100 out: 99 101 suspend_freeze_state = FREEZE_STATE_NONE; 100 102 spin_unlock_irq(&suspend_freeze_lock); 101 - 102 - trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, false); 103 - } 104 - 105 - static void s2idle_loop(void) 106 - { 107 - do { 108 - freeze_enter(); 109 - 110 - if (freeze_ops && freeze_ops->wake) 111 - freeze_ops->wake(); 112 - 113 - dpm_resume_noirq(PMSG_RESUME); 114 - if (freeze_ops && freeze_ops->sync) 115 - freeze_ops->sync(); 116 - 117 - if (pm_wakeup_pending()) 118 - break; 119 - 120 - pm_wakeup_clear(false); 121 - } while (!dpm_suspend_noirq(PMSG_SUSPEND)); 122 103 } 123 104 124 105 void freeze_wake(void) ··· 371 394 * all the devices are suspended. 372 395 */ 373 396 if (state == PM_SUSPEND_FREEZE) { 374 - s2idle_loop(); 375 - goto Platform_early_resume; 397 + trace_suspend_resume(TPS("machine_suspend"), state, true); 398 + freeze_enter(); 399 + trace_suspend_resume(TPS("machine_suspend"), state, false); 400 + goto Platform_wake; 376 401 } 377 402 378 403 error = disable_nonboot_cpus();
+10 -36
kernel/printk/printk.c
··· 269 269 #define MAX_CMDLINECONSOLES 8 270 270 271 271 static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES]; 272 - static int console_cmdline_cnt; 273 272 274 273 static int preferred_console = -1; 275 274 int console_set_on_cmdline; ··· 1905 1906 * See if this tty is not yet registered, and 1906 1907 * if we have a slot free. 1907 1908 */ 1908 - for (i = 0, c = console_cmdline; i < console_cmdline_cnt; i++, c++) { 1909 + for (i = 0, c = console_cmdline; 1910 + i < MAX_CMDLINECONSOLES && c->name[0]; 1911 + i++, c++) { 1909 1912 if (strcmp(c->name, name) == 0 && c->index == idx) { 1910 - if (brl_options) 1911 - return 0; 1912 - 1913 - /* 1914 - * Maintain an invariant that will help to find if 1915 - * the matching console is preferred, see 1916 - * register_console(): 1917 - * 1918 - * The last non-braille console is always 1919 - * the preferred one. 1920 - */ 1921 - if (i != console_cmdline_cnt - 1) 1922 - swap(console_cmdline[i], 1923 - console_cmdline[console_cmdline_cnt - 1]); 1924 - 1925 - preferred_console = console_cmdline_cnt - 1; 1926 - 1913 + if (!brl_options) 1914 + preferred_console = i; 1927 1915 return 0; 1928 1916 } 1929 1917 } ··· 1923 1937 braille_set_options(c, brl_options); 1924 1938 1925 1939 c->index = idx; 1926 - console_cmdline_cnt++; 1927 1940 return 0; 1928 1941 } 1929 1942 /* ··· 2462 2477 } 2463 2478 2464 2479 /* 2465 - * See if this console matches one we selected on the command line. 2466 - * 2467 - * There may be several entries in the console_cmdline array matching 2468 - * with the same console, one with newcon->match(), another by 2469 - * name/index: 2470 - * 2471 - * pl011,mmio,0x87e024000000,115200 -- added from SPCR 2472 - * ttyAMA0 -- added from command line 2473 - * 2474 - * Traverse the console_cmdline array in reverse order to be 2475 - * sure that if this console is preferred then it will be the first 2476 - * matching entry. We use the invariant that is maintained in 2477 - * __add_preferred_console(). 2480 + * See if this console matches one we selected on 2481 + * the command line. 2478 2482 */ 2479 - for (i = console_cmdline_cnt - 1; i >= 0; i--) { 2480 - c = console_cmdline + i; 2481 - 2483 + for (i = 0, c = console_cmdline; 2484 + i < MAX_CMDLINECONSOLES && c->name[0]; 2485 + i++, c++) { 2482 2486 if (!newcon->match || 2483 2487 newcon->match(newcon, c->name, c->index, c->options) != 0) { 2484 2488 /* default matching */
+2 -3
kernel/rcu/srcu.c
··· 263 263 264 264 /* 265 265 * Counts the new reader in the appropriate per-CPU element of the 266 - * srcu_struct. Must be called from process context. 266 + * srcu_struct. 267 267 * Returns an index that must be passed to the matching srcu_read_unlock(). 268 268 */ 269 269 int __srcu_read_lock(struct srcu_struct *sp) ··· 271 271 int idx; 272 272 273 273 idx = READ_ONCE(sp->completed) & 0x1; 274 - __this_cpu_inc(sp->per_cpu_ref->lock_count[idx]); 274 + this_cpu_inc(sp->per_cpu_ref->lock_count[idx]); 275 275 smp_mb(); /* B */ /* Avoid leaking the critical section. */ 276 276 return idx; 277 277 } ··· 281 281 * Removes the count for the old reader from the appropriate per-CPU 282 282 * element of the srcu_struct. Note that this may well be a different 283 283 * CPU than that which was incremented by the corresponding srcu_read_lock(). 284 - * Must be called from process context. 285 284 */ 286 285 void __srcu_read_unlock(struct srcu_struct *sp, int idx) 287 286 {
+4 -3
kernel/rcu/srcutiny.c
··· 97 97 98 98 /* 99 99 * Counts the new reader in the appropriate per-CPU element of the 100 - * srcu_struct. Must be called from process context. 101 - * Returns an index that must be passed to the matching srcu_read_unlock(). 100 + * srcu_struct. Can be invoked from irq/bh handlers, but the matching 101 + * __srcu_read_unlock() must be in the same handler instance. Returns an 102 + * index that must be passed to the matching srcu_read_unlock(). 102 103 */ 103 104 int __srcu_read_lock(struct srcu_struct *sp) 104 105 { ··· 113 112 114 113 /* 115 114 * Removes the count for the old reader from the appropriate element of 116 - * the srcu_struct. Must be called from process context. 115 + * the srcu_struct. 117 116 */ 118 117 void __srcu_read_unlock(struct srcu_struct *sp, int idx) 119 118 {
+2 -3
kernel/rcu/srcutree.c
··· 357 357 358 358 /* 359 359 * Counts the new reader in the appropriate per-CPU element of the 360 - * srcu_struct. Must be called from process context. 360 + * srcu_struct. 361 361 * Returns an index that must be passed to the matching srcu_read_unlock(). 362 362 */ 363 363 int __srcu_read_lock(struct srcu_struct *sp) ··· 365 365 int idx; 366 366 367 367 idx = READ_ONCE(sp->srcu_idx) & 0x1; 368 - __this_cpu_inc(sp->sda->srcu_lock_count[idx]); 368 + this_cpu_inc(sp->sda->srcu_lock_count[idx]); 369 369 smp_mb(); /* B */ /* Avoid leaking the critical section. */ 370 370 return idx; 371 371 } ··· 375 375 * Removes the count for the old reader from the appropriate per-CPU 376 376 * element of the srcu_struct. Note that this may well be a different 377 377 * CPU than that which was incremented by the corresponding srcu_read_lock(). 378 - * Must be called from process context. 379 378 */ 380 379 void __srcu_read_unlock(struct srcu_struct *sp, int idx) 381 380 {
+1 -1
kernel/sched/core.c
··· 5605 5605 BUG_ON(cpu_online(smp_processor_id())); 5606 5606 5607 5607 if (mm != &init_mm) { 5608 - switch_mm_irqs_off(mm, &init_mm, current); 5608 + switch_mm(mm, &init_mm, current); 5609 5609 finish_arch_post_lock_switch(); 5610 5610 } 5611 5611 mmdrop(mm);
-3
kernel/sched/cpufreq_schedutil.c
··· 101 101 if (sg_policy->next_freq == next_freq) 102 102 return; 103 103 104 - if (sg_policy->next_freq > next_freq) 105 - next_freq = (sg_policy->next_freq + next_freq) >> 1; 106 - 107 104 sg_policy->next_freq = next_freq; 108 105 sg_policy->last_freq_update_time = time; 109 106
+1 -1
kernel/sched/fair.c
··· 3563 3563 trace_sched_stat_runtime_enabled()) { 3564 3564 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, " 3565 3565 "stat_blocked and stat_runtime require the " 3566 - "kernel parameter schedstats=enabled or " 3566 + "kernel parameter schedstats=enable or " 3567 3567 "kernel.sched_schedstats=1\n"); 3568 3568 } 3569 3569 #endif
+14 -6
kernel/signal.c
··· 510 510 return !tsk->ptrace; 511 511 } 512 512 513 - static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) 513 + static void collect_signal(int sig, struct sigpending *list, siginfo_t *info, 514 + bool *resched_timer) 514 515 { 515 516 struct sigqueue *q, *first = NULL; 516 517 ··· 533 532 still_pending: 534 533 list_del_init(&first->list); 535 534 copy_siginfo(info, &first->info); 535 + 536 + *resched_timer = 537 + (first->flags & SIGQUEUE_PREALLOC) && 538 + (info->si_code == SI_TIMER) && 539 + (info->si_sys_private); 540 + 536 541 __sigqueue_free(first); 537 542 } else { 538 543 /* ··· 555 548 } 556 549 557 550 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, 558 - siginfo_t *info) 551 + siginfo_t *info, bool *resched_timer) 559 552 { 560 553 int sig = next_signal(pending, mask); 561 554 562 555 if (sig) 563 - collect_signal(sig, pending, info); 556 + collect_signal(sig, pending, info, resched_timer); 564 557 return sig; 565 558 } 566 559 ··· 572 565 */ 573 566 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) 574 567 { 568 + bool resched_timer = false; 575 569 int signr; 576 570 577 571 /* We only dequeue private signals from ourselves, we don't let 578 572 * signalfd steal them 579 573 */ 580 - signr = __dequeue_signal(&tsk->pending, mask, info); 574 + signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer); 581 575 if (!signr) { 582 576 signr = __dequeue_signal(&tsk->signal->shared_pending, 583 - mask, info); 577 + mask, info, &resched_timer); 584 578 #ifdef CONFIG_POSIX_TIMERS 585 579 /* 586 580 * itimer signal ? ··· 629 621 current->jobctl |= JOBCTL_STOP_DEQUEUED; 630 622 } 631 623 #ifdef CONFIG_POSIX_TIMERS 632 - if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { 624 + if (resched_timer) { 633 625 /* 634 626 * Release the siglock to ensure proper locking order 635 627 * of timer locks outside of siglocks. Note, we leave
+11 -3
kernel/time/alarmtimer.c
··· 387 387 { 388 388 struct alarm_base *base = &alarm_bases[alarm->type]; 389 389 390 - start = ktime_add(start, base->gettime()); 390 + start = ktime_add_safe(start, base->gettime()); 391 391 alarm_start(alarm, start); 392 392 } 393 393 EXPORT_SYMBOL_GPL(alarm_start_relative); ··· 475 475 overrun++; 476 476 } 477 477 478 - alarm->node.expires = ktime_add(alarm->node.expires, interval); 478 + alarm->node.expires = ktime_add_safe(alarm->node.expires, interval); 479 479 return overrun; 480 480 } 481 481 EXPORT_SYMBOL_GPL(alarm_forward); ··· 660 660 661 661 /* start the timer */ 662 662 timr->it.alarm.interval = timespec64_to_ktime(new_setting->it_interval); 663 + 664 + /* 665 + * Rate limit to the tick as a hot fix to prevent DOS. Will be 666 + * mopped up later. 667 + */ 668 + if (timr->it.alarm.interval < TICK_NSEC) 669 + timr->it.alarm.interval = TICK_NSEC; 670 + 663 671 exp = timespec64_to_ktime(new_setting->it_value); 664 672 /* Convert (if necessary) to absolute time */ 665 673 if (flags != TIMER_ABSTIME) { 666 674 ktime_t now; 667 675 668 676 now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime(); 669 - exp = ktime_add(now, exp); 677 + exp = ktime_add_safe(now, exp); 670 678 } 671 679 672 680 alarm_start(&timr->it.alarm.alarmtimer, exp);
+3 -1
kernel/time/tick-broadcast.c
··· 37 37 static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock); 38 38 39 39 #ifdef CONFIG_TICK_ONESHOT 40 + static void tick_broadcast_setup_oneshot(struct clock_event_device *bc); 40 41 static void tick_broadcast_clear_oneshot(int cpu); 41 42 static void tick_resume_broadcast_oneshot(struct clock_event_device *bc); 42 43 #else 44 + static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); } 43 45 static inline void tick_broadcast_clear_oneshot(int cpu) { } 44 46 static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { } 45 47 #endif ··· 869 867 /** 870 868 * tick_broadcast_setup_oneshot - setup the broadcast device 871 869 */ 872 - void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 870 + static void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 873 871 { 874 872 int cpu = smp_processor_id(); 875 873
-2
kernel/time/tick-internal.h
··· 126 126 127 127 /* Functions related to oneshot broadcasting */ 128 128 #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) 129 - extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc); 130 129 extern void tick_broadcast_switch_to_oneshot(void); 131 130 extern void tick_shutdown_broadcast_oneshot(unsigned int cpu); 132 131 extern int tick_broadcast_oneshot_active(void); ··· 133 134 bool tick_broadcast_oneshot_available(void); 134 135 extern struct cpumask *tick_get_broadcast_oneshot_mask(void); 135 136 #else /* !(BROADCAST && ONESHOT): */ 136 - static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); } 137 137 static inline void tick_broadcast_switch_to_oneshot(void) { } 138 138 static inline void tick_shutdown_broadcast_oneshot(unsigned int cpu) { } 139 139 static inline int tick_broadcast_oneshot_active(void) { return 0; }
+46 -25
kernel/time/timekeeping.c
··· 118 118 tk->offs_boot = ktime_add(tk->offs_boot, delta); 119 119 } 120 120 121 + /* 122 + * tk_clock_read - atomic clocksource read() helper 123 + * 124 + * This helper is necessary to use in the read paths because, while the 125 + * seqlock ensures we don't return a bad value while structures are updated, 126 + * it doesn't protect from potential crashes. There is the possibility that 127 + * the tkr's clocksource may change between the read reference, and the 128 + * clock reference passed to the read function. This can cause crashes if 129 + * the wrong clocksource is passed to the wrong read function. 130 + * This isn't necessary to use when holding the timekeeper_lock or doing 131 + * a read of the fast-timekeeper tkrs (which is protected by its own locking 132 + * and update logic). 133 + */ 134 + static inline u64 tk_clock_read(struct tk_read_base *tkr) 135 + { 136 + struct clocksource *clock = READ_ONCE(tkr->clock); 137 + 138 + return clock->read(clock); 139 + } 140 + 121 141 #ifdef CONFIG_DEBUG_TIMEKEEPING 122 142 #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */ 123 143 ··· 195 175 */ 196 176 do { 197 177 seq = read_seqcount_begin(&tk_core.seq); 198 - now = tkr->read(tkr->clock); 178 + now = tk_clock_read(tkr); 199 179 last = tkr->cycle_last; 200 180 mask = tkr->mask; 201 181 max = tkr->clock->max_cycles; ··· 229 209 u64 cycle_now, delta; 230 210 231 211 /* read clocksource */ 232 - cycle_now = tkr->read(tkr->clock); 212 + cycle_now = tk_clock_read(tkr); 233 213 234 214 /* calculate the delta since the last update_wall_time */ 235 215 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask); ··· 258 238 ++tk->cs_was_changed_seq; 259 239 old_clock = tk->tkr_mono.clock; 260 240 tk->tkr_mono.clock = clock; 261 - tk->tkr_mono.read = clock->read; 262 241 tk->tkr_mono.mask = clock->mask; 263 - tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock); 242 + tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono); 264 243 265 244 tk->tkr_raw.clock = clock; 266 - tk->tkr_raw.read = clock->read; 267 245 tk->tkr_raw.mask = clock->mask; 268 246 tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last; 269 247 ··· 280 262 /* Go back from cycles -> shifted ns */ 281 263 tk->xtime_interval = interval * clock->mult; 282 264 tk->xtime_remainder = ntpinterval - tk->xtime_interval; 283 - tk->raw_interval = (interval * clock->mult) >> clock->shift; 265 + tk->raw_interval = interval * clock->mult; 284 266 285 267 /* if changing clocks, convert xtime_nsec shift units */ 286 268 if (old_clock) { ··· 422 404 423 405 now += timekeeping_delta_to_ns(tkr, 424 406 clocksource_delta( 425 - tkr->read(tkr->clock), 407 + tk_clock_read(tkr), 426 408 tkr->cycle_last, 427 409 tkr->mask)); 428 410 } while (read_seqcount_retry(&tkf->seq, seq)); ··· 479 461 return cycles_at_suspend; 480 462 } 481 463 464 + static struct clocksource dummy_clock = { 465 + .read = dummy_clock_read, 466 + }; 467 + 482 468 /** 483 469 * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource. 484 470 * @tk: Timekeeper to snapshot. ··· 499 477 struct tk_read_base *tkr = &tk->tkr_mono; 500 478 501 479 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); 502 - cycles_at_suspend = tkr->read(tkr->clock); 503 - tkr_dummy.read = dummy_clock_read; 480 + cycles_at_suspend = tk_clock_read(tkr); 481 + tkr_dummy.clock = &dummy_clock; 504 482 update_fast_timekeeper(&tkr_dummy, &tk_fast_mono); 505 483 506 484 tkr = &tk->tkr_raw; 507 485 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); 508 - tkr_dummy.read = dummy_clock_read; 486 + tkr_dummy.clock = &dummy_clock; 509 487 update_fast_timekeeper(&tkr_dummy, &tk_fast_raw); 510 488 } 511 489 ··· 671 649 */ 672 650 static void timekeeping_forward_now(struct timekeeper *tk) 673 651 { 674 - struct clocksource *clock = tk->tkr_mono.clock; 675 652 u64 cycle_now, delta; 676 653 u64 nsec; 677 654 678 - cycle_now = tk->tkr_mono.read(clock); 655 + cycle_now = tk_clock_read(&tk->tkr_mono); 679 656 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); 680 657 tk->tkr_mono.cycle_last = cycle_now; 681 658 tk->tkr_raw.cycle_last = cycle_now; ··· 950 929 951 930 do { 952 931 seq = read_seqcount_begin(&tk_core.seq); 953 - 954 - now = tk->tkr_mono.read(tk->tkr_mono.clock); 932 + now = tk_clock_read(&tk->tkr_mono); 955 933 systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq; 956 934 systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq; 957 935 base_real = ktime_add(tk->tkr_mono.base, ··· 1128 1108 * Check whether the system counter value provided by the 1129 1109 * device driver is on the current timekeeping interval. 1130 1110 */ 1131 - now = tk->tkr_mono.read(tk->tkr_mono.clock); 1111 + now = tk_clock_read(&tk->tkr_mono); 1132 1112 interval_start = tk->tkr_mono.cycle_last; 1133 1113 if (!cycle_between(interval_start, cycles, now)) { 1134 1114 clock_was_set_seq = tk->clock_was_set_seq; ··· 1649 1629 * The less preferred source will only be tried if there is no better 1650 1630 * usable source. The rtc part is handled separately in rtc core code. 1651 1631 */ 1652 - cycle_now = tk->tkr_mono.read(clock); 1632 + cycle_now = tk_clock_read(&tk->tkr_mono); 1653 1633 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && 1654 1634 cycle_now > tk->tkr_mono.cycle_last) { 1655 1635 u64 nsec, cyc_delta; ··· 1996 1976 u32 shift, unsigned int *clock_set) 1997 1977 { 1998 1978 u64 interval = tk->cycle_interval << shift; 1999 - u64 raw_nsecs; 1979 + u64 snsec_per_sec; 2000 1980 2001 1981 /* If the offset is smaller than a shifted interval, do nothing */ 2002 1982 if (offset < interval) ··· 2011 1991 *clock_set |= accumulate_nsecs_to_secs(tk); 2012 1992 2013 1993 /* Accumulate raw time */ 2014 - raw_nsecs = (u64)tk->raw_interval << shift; 2015 - raw_nsecs += tk->raw_time.tv_nsec; 2016 - if (raw_nsecs >= NSEC_PER_SEC) { 2017 - u64 raw_secs = raw_nsecs; 2018 - raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); 2019 - tk->raw_time.tv_sec += raw_secs; 1994 + tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift; 1995 + tk->tkr_raw.xtime_nsec += tk->raw_interval << shift; 1996 + snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift; 1997 + while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) { 1998 + tk->tkr_raw.xtime_nsec -= snsec_per_sec; 1999 + tk->raw_time.tv_sec++; 2020 2000 } 2021 - tk->raw_time.tv_nsec = raw_nsecs; 2001 + tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift; 2002 + tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift; 2022 2003 2023 2004 /* Accumulate error between NTP and clock interval */ 2024 2005 tk->ntp_error += tk->ntp_tick << shift; ··· 2051 2030 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET 2052 2031 offset = real_tk->cycle_interval; 2053 2032 #else 2054 - offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock), 2033 + offset = clocksource_delta(tk_clock_read(&tk->tkr_mono), 2055 2034 tk->tkr_mono.cycle_last, tk->tkr_mono.mask); 2056 2035 #endif 2057 2036
-3
kernel/trace/ftrace.c
··· 4337 4337 4338 4338 command = strsep(&next, ":"); 4339 4339 4340 - if (WARN_ON_ONCE(!tr)) 4341 - return -EINVAL; 4342 - 4343 4340 mutex_lock(&ftrace_cmd_mutex); 4344 4341 list_for_each_entry(p, &ftrace_commands, list) { 4345 4342 if (strcmp(p->name, command) == 0) {
+3
kernel/trace/trace.c
··· 6881 6881 char *number; 6882 6882 int ret; 6883 6883 6884 + if (!tr) 6885 + return -ENODEV; 6886 + 6884 6887 /* hash funcs only work with set_ftrace_filter */ 6885 6888 if (!enable) 6886 6889 return -EINVAL;
+12
kernel/trace/trace_functions.c
··· 654 654 { 655 655 struct ftrace_probe_ops *ops; 656 656 657 + if (!tr) 658 + return -ENODEV; 659 + 657 660 /* we register both traceon and traceoff to this callback */ 658 661 if (strcmp(cmd, "traceon") == 0) 659 662 ops = param ? &traceon_count_probe_ops : &traceon_probe_ops; ··· 673 670 { 674 671 struct ftrace_probe_ops *ops; 675 672 673 + if (!tr) 674 + return -ENODEV; 675 + 676 676 ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops; 677 677 678 678 return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, ··· 687 681 char *glob, char *cmd, char *param, int enable) 688 682 { 689 683 struct ftrace_probe_ops *ops; 684 + 685 + if (!tr) 686 + return -ENODEV; 690 687 691 688 ops = &dump_probe_ops; 692 689 ··· 703 694 char *glob, char *cmd, char *param, int enable) 704 695 { 705 696 struct ftrace_probe_ops *ops; 697 + 698 + if (!tr) 699 + return -ENODEV; 706 700 707 701 ops = &cpudump_probe_ops; 708 702
+5 -9
kernel/trace/trace_kprobe.c
··· 707 707 pr_info("Probe point is not specified.\n"); 708 708 return -EINVAL; 709 709 } 710 - if (isdigit(argv[1][0])) { 711 - /* an address specified */ 712 - ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr); 713 - if (ret) { 714 - pr_info("Failed to parse address.\n"); 715 - return ret; 716 - } 717 - } else { 710 + 711 + /* try to parse an address. if that fails, try to read the 712 + * input as a symbol. */ 713 + if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) { 718 714 /* a symbol specified */ 719 715 symbol = argv[1]; 720 716 /* TODO: support .init module functions */ 721 717 ret = traceprobe_split_symbol_offset(symbol, &offset); 722 718 if (ret) { 723 - pr_info("Failed to parse symbol.\n"); 719 + pr_info("Failed to parse either an address or a symbol.\n"); 724 720 return ret; 725 721 } 726 722 if (offset && is_return &&
+4 -2
kernel/trace/trace_stack.c
··· 409 409 static int 410 410 stack_trace_filter_open(struct inode *inode, struct file *file) 411 411 { 412 - return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER, 412 + struct ftrace_ops *ops = inode->i_private; 413 + 414 + return ftrace_regex_open(ops, FTRACE_ITER_FILTER, 413 415 inode, file); 414 416 } 415 417 ··· 478 476 NULL, &stack_trace_fops); 479 477 480 478 trace_create_file("stack_trace_filter", 0444, d_tracer, 481 - NULL, &stack_trace_filter_fops); 479 + &trace_ops, &stack_trace_filter_fops); 482 480 483 481 if (stack_trace_filter_buf[0]) 484 482 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
+3 -3
lib/cmdline.c
··· 23 23 * the values[M, M+1, ..., N] into the ints array in get_options. 24 24 */ 25 25 26 - static int get_range(char **str, int *pint) 26 + static int get_range(char **str, int *pint, int n) 27 27 { 28 28 int x, inc_counter, upper_range; 29 29 30 30 (*str)++; 31 31 upper_range = simple_strtol((*str), NULL, 0); 32 32 inc_counter = upper_range - *pint; 33 - for (x = *pint; x < upper_range; x++) 33 + for (x = *pint; n && x < upper_range; x++, n--) 34 34 *pint++ = x; 35 35 return inc_counter; 36 36 } ··· 97 97 break; 98 98 if (res == 3) { 99 99 int range_nums; 100 - range_nums = get_range((char **)&str, ints + i); 100 + range_nums = get_range((char **)&str, ints + i, nints - i); 101 101 if (range_nums < 0) 102 102 break; 103 103 /*
+2
lib/kobject_uevent.c
··· 50 50 [KOBJ_MOVE] = "move", 51 51 [KOBJ_ONLINE] = "online", 52 52 [KOBJ_OFFLINE] = "offline", 53 + [KOBJ_BIND] = "bind", 54 + [KOBJ_UNBIND] = "unbind", 53 55 }; 54 56 55 57 /**
+4 -2
lib/libcrc32c.c
··· 43 43 u32 crc32c(u32 crc, const void *address, unsigned int length) 44 44 { 45 45 SHASH_DESC_ON_STACK(shash, tfm); 46 - u32 *ctx = (u32 *)shash_desc_ctx(shash); 46 + u32 ret, *ctx = (u32 *)shash_desc_ctx(shash); 47 47 int err; 48 48 49 49 shash->tfm = tfm; ··· 53 53 err = crypto_shash_update(shash, address, length); 54 54 BUG_ON(err); 55 55 56 - return *ctx; 56 + ret = *ctx; 57 + barrier_data(ctx); 58 + return ret; 57 59 } 58 60 59 61 EXPORT_SYMBOL(crc32c);
+8 -17
mm/gup.c
··· 387 387 /* mlock all present pages, but do not fault in new pages */ 388 388 if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) 389 389 return -ENOENT; 390 - /* For mm_populate(), just skip the stack guard page. */ 391 - if ((*flags & FOLL_POPULATE) && 392 - (stack_guard_page_start(vma, address) || 393 - stack_guard_page_end(vma, address + PAGE_SIZE))) 394 - return -ENOENT; 395 390 if (*flags & FOLL_WRITE) 396 391 fault_flags |= FAULT_FLAG_WRITE; 397 392 if (*flags & FOLL_REMOTE) ··· 402 407 403 408 ret = handle_mm_fault(vma, address, fault_flags); 404 409 if (ret & VM_FAULT_ERROR) { 405 - if (ret & VM_FAULT_OOM) 406 - return -ENOMEM; 407 - if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) 408 - return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT; 409 - if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) 410 - return -EFAULT; 410 + int err = vm_fault_to_errno(ret, *flags); 411 + 412 + if (err) 413 + return err; 411 414 BUG(); 412 415 } 413 416 ··· 716 723 ret = handle_mm_fault(vma, address, fault_flags); 717 724 major |= ret & VM_FAULT_MAJOR; 718 725 if (ret & VM_FAULT_ERROR) { 719 - if (ret & VM_FAULT_OOM) 720 - return -ENOMEM; 721 - if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) 722 - return -EHWPOISON; 723 - if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) 724 - return -EFAULT; 726 + int err = vm_fault_to_errno(ret, 0); 727 + 728 + if (err) 729 + return err; 725 730 BUG(); 726 731 } 727 732
+7 -1
mm/huge_memory.c
··· 1426 1426 */ 1427 1427 if (unlikely(pmd_trans_migrating(*vmf->pmd))) { 1428 1428 page = pmd_page(*vmf->pmd); 1429 + if (!get_page_unless_zero(page)) 1430 + goto out_unlock; 1429 1431 spin_unlock(vmf->ptl); 1430 1432 wait_on_page_locked(page); 1433 + put_page(page); 1431 1434 goto out; 1432 1435 } 1433 1436 ··· 1462 1459 1463 1460 /* Migration could have started since the pmd_trans_migrating check */ 1464 1461 if (!page_locked) { 1462 + page_nid = -1; 1463 + if (!get_page_unless_zero(page)) 1464 + goto out_unlock; 1465 1465 spin_unlock(vmf->ptl); 1466 1466 wait_on_page_locked(page); 1467 - page_nid = -1; 1467 + put_page(page); 1468 1468 goto out; 1469 1469 } 1470 1470
+5
mm/hugetlb.c
··· 4170 4170 } 4171 4171 ret = hugetlb_fault(mm, vma, vaddr, fault_flags); 4172 4172 if (ret & VM_FAULT_ERROR) { 4173 + int err = vm_fault_to_errno(ret, flags); 4174 + 4175 + if (err) 4176 + return err; 4177 + 4173 4178 remainder = 0; 4174 4179 break; 4175 4180 }
-1
mm/khugepaged.c
··· 652 652 spin_unlock(ptl); 653 653 free_page_and_swap_cache(src_page); 654 654 } 655 - cond_resched(); 656 655 } 657 656 } 658 657
+1 -2
mm/ksm.c
··· 1028 1028 goto out; 1029 1029 1030 1030 if (PageTransCompound(page)) { 1031 - err = split_huge_page(page); 1032 - if (err) 1031 + if (split_huge_page(page)) 1033 1032 goto out_unlock; 1034 1033 } 1035 1034
+23
mm/memblock.c
··· 1739 1739 } 1740 1740 } 1741 1741 1742 + extern unsigned long __init_memblock 1743 + memblock_reserved_memory_within(phys_addr_t start_addr, phys_addr_t end_addr) 1744 + { 1745 + struct memblock_region *rgn; 1746 + unsigned long size = 0; 1747 + int idx; 1748 + 1749 + for_each_memblock_type((&memblock.reserved), rgn) { 1750 + phys_addr_t start, end; 1751 + 1752 + if (rgn->base + rgn->size < start_addr) 1753 + continue; 1754 + if (rgn->base > end_addr) 1755 + continue; 1756 + 1757 + start = rgn->base; 1758 + end = start + rgn->size; 1759 + size += end - start; 1760 + } 1761 + 1762 + return size; 1763 + } 1764 + 1742 1765 void __init_memblock __memblock_dump_all(void) 1743 1766 { 1744 1767 pr_info("MEMBLOCK configuration:\n");
+6 -7
mm/memory-failure.c
··· 1184 1184 * page_remove_rmap() in try_to_unmap_one(). So to determine page status 1185 1185 * correctly, we save a copy of the page flags at this time. 1186 1186 */ 1187 - page_flags = p->flags; 1187 + if (PageHuge(p)) 1188 + page_flags = hpage->flags; 1189 + else 1190 + page_flags = p->flags; 1188 1191 1189 1192 /* 1190 1193 * unpoison always clear PG_hwpoison inside page lock ··· 1598 1595 if (ret) { 1599 1596 pr_info("soft offline: %#lx: migration failed %d, type %lx (%pGp)\n", 1600 1597 pfn, ret, page->flags, &page->flags); 1601 - /* 1602 - * We know that soft_offline_huge_page() tries to migrate 1603 - * only one hugepage pointed to by hpage, so we need not 1604 - * run through the pagelist here. 1605 - */ 1606 - putback_active_hugepage(hpage); 1598 + if (!list_empty(&pagelist)) 1599 + putback_movable_pages(&pagelist); 1607 1600 if (ret > 0) 1608 1601 ret = -EIO; 1609 1602 } else {
+30 -48
mm/memory.c
··· 2855 2855 } 2856 2856 2857 2857 /* 2858 - * This is like a special single-page "expand_{down|up}wards()", 2859 - * except we must first make sure that 'address{-|+}PAGE_SIZE' 2860 - * doesn't hit another vma. 2861 - */ 2862 - static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) 2863 - { 2864 - address &= PAGE_MASK; 2865 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { 2866 - struct vm_area_struct *prev = vma->vm_prev; 2867 - 2868 - /* 2869 - * Is there a mapping abutting this one below? 2870 - * 2871 - * That's only ok if it's the same stack mapping 2872 - * that has gotten split.. 2873 - */ 2874 - if (prev && prev->vm_end == address) 2875 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; 2876 - 2877 - return expand_downwards(vma, address - PAGE_SIZE); 2878 - } 2879 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { 2880 - struct vm_area_struct *next = vma->vm_next; 2881 - 2882 - /* As VM_GROWSDOWN but s/below/above/ */ 2883 - if (next && next->vm_start == address + PAGE_SIZE) 2884 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; 2885 - 2886 - return expand_upwards(vma, address + PAGE_SIZE); 2887 - } 2888 - return 0; 2889 - } 2890 - 2891 - /* 2892 2858 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2893 2859 * but allow concurrent faults), and pte mapped but not yet locked. 2894 2860 * We return with mmap_sem still held, but pte unmapped and unlocked. ··· 2869 2903 /* File mapping without ->vm_ops ? */ 2870 2904 if (vma->vm_flags & VM_SHARED) 2871 2905 return VM_FAULT_SIGBUS; 2872 - 2873 - /* Check if we need to add a guard page to the stack */ 2874 - if (check_stack_guard_page(vma, vmf->address) < 0) 2875 - return VM_FAULT_SIGSEGV; 2876 2906 2877 2907 /* 2878 2908 * Use pte_alloc() instead of pte_alloc_map(). We can't run ··· 2991 3029 return ret; 2992 3030 } 2993 3031 3032 + /* 3033 + * The ordering of these checks is important for pmds with _PAGE_DEVMAP set. 3034 + * If we check pmd_trans_unstable() first we will trip the bad_pmd() check 3035 + * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly 3036 + * returning 1 but not before it spams dmesg with the pmd_clear_bad() output. 3037 + */ 3038 + static int pmd_devmap_trans_unstable(pmd_t *pmd) 3039 + { 3040 + return pmd_devmap(*pmd) || pmd_trans_unstable(pmd); 3041 + } 3042 + 2994 3043 static int pte_alloc_one_map(struct vm_fault *vmf) 2995 3044 { 2996 3045 struct vm_area_struct *vma = vmf->vma; ··· 3025 3052 map_pte: 3026 3053 /* 3027 3054 * If a huge pmd materialized under us just retry later. Use 3028 - * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd 3029 - * didn't become pmd_trans_huge under us and then back to pmd_none, as 3030 - * a result of MADV_DONTNEED running immediately after a huge pmd fault 3031 - * in a different thread of this mm, in turn leading to a misleading 3032 - * pmd_trans_huge() retval. All we have to ensure is that it is a 3033 - * regular pmd that we can walk with pte_offset_map() and we can do that 3034 - * through an atomic read in C, which is what pmd_trans_unstable() 3035 - * provides. 3055 + * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of 3056 + * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge 3057 + * under us and then back to pmd_none, as a result of MADV_DONTNEED 3058 + * running immediately after a huge pmd fault in a different thread of 3059 + * this mm, in turn leading to a misleading pmd_trans_huge() retval. 3060 + * All we have to ensure is that it is a regular pmd that we can walk 3061 + * with pte_offset_map() and we can do that through an atomic read in 3062 + * C, which is what pmd_trans_unstable() provides. 3036 3063 */ 3037 - if (pmd_trans_unstable(vmf->pmd) || pmd_devmap(*vmf->pmd)) 3064 + if (pmd_devmap_trans_unstable(vmf->pmd)) 3038 3065 return VM_FAULT_NOPAGE; 3039 3066 3067 + /* 3068 + * At this point we know that our vmf->pmd points to a page of ptes 3069 + * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge() 3070 + * for the duration of the fault. If a racing MADV_DONTNEED runs and 3071 + * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still 3072 + * be valid and we will re-check to make sure the vmf->pte isn't 3073 + * pte_none() under vmf->ptl protection when we return to 3074 + * alloc_set_pte(). 3075 + */ 3040 3076 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 3041 3077 &vmf->ptl); 3042 3078 return 0; ··· 3672 3690 vmf->pte = NULL; 3673 3691 } else { 3674 3692 /* See comment in pte_alloc_one_map() */ 3675 - if (pmd_trans_unstable(vmf->pmd) || pmd_devmap(*vmf->pmd)) 3693 + if (pmd_devmap_trans_unstable(vmf->pmd)) 3676 3694 return 0; 3677 3695 /* 3678 3696 * A regular pmd is established and it can't morph into a huge
+3 -2
mm/mlock.c
··· 284 284 { 285 285 int i; 286 286 int nr = pagevec_count(pvec); 287 - int delta_munlocked; 287 + int delta_munlocked = -nr; 288 288 struct pagevec pvec_putback; 289 289 int pgrescued = 0; 290 290 ··· 304 304 continue; 305 305 else 306 306 __munlock_isolation_failed(page); 307 + } else { 308 + delta_munlocked++; 307 309 } 308 310 309 311 /* ··· 317 315 pagevec_add(&pvec_putback, pvec->pages[i]); 318 316 pvec->pages[i] = NULL; 319 317 } 320 - delta_munlocked = -nr + pagevec_count(&pvec_putback); 321 318 __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); 322 319 spin_unlock_irq(zone_lru_lock(zone)); 323 320
+97 -63
mm/mmap.c
··· 183 183 unsigned long retval; 184 184 unsigned long newbrk, oldbrk; 185 185 struct mm_struct *mm = current->mm; 186 + struct vm_area_struct *next; 186 187 unsigned long min_brk; 187 188 bool populate; 188 189 LIST_HEAD(uf); ··· 230 229 } 231 230 232 231 /* Check against existing mmap mappings. */ 233 - if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) 232 + next = find_vma(mm, oldbrk); 233 + if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) 234 234 goto out; 235 235 236 236 /* Ok, looks good - let it rip. */ ··· 255 253 256 254 static long vma_compute_subtree_gap(struct vm_area_struct *vma) 257 255 { 258 - unsigned long max, subtree_gap; 259 - max = vma->vm_start; 260 - if (vma->vm_prev) 261 - max -= vma->vm_prev->vm_end; 256 + unsigned long max, prev_end, subtree_gap; 257 + 258 + /* 259 + * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we 260 + * allow two stack_guard_gaps between them here, and when choosing 261 + * an unmapped area; whereas when expanding we only require one. 262 + * That's a little inconsistent, but keeps the code here simpler. 263 + */ 264 + max = vm_start_gap(vma); 265 + if (vma->vm_prev) { 266 + prev_end = vm_end_gap(vma->vm_prev); 267 + if (max > prev_end) 268 + max -= prev_end; 269 + else 270 + max = 0; 271 + } 262 272 if (vma->vm_rb.rb_left) { 263 273 subtree_gap = rb_entry(vma->vm_rb.rb_left, 264 274 struct vm_area_struct, vm_rb)->rb_subtree_gap; ··· 366 352 anon_vma_unlock_read(anon_vma); 367 353 } 368 354 369 - highest_address = vma->vm_end; 355 + highest_address = vm_end_gap(vma); 370 356 vma = vma->vm_next; 371 357 i++; 372 358 } ··· 555 541 if (vma->vm_next) 556 542 vma_gap_update(vma->vm_next); 557 543 else 558 - mm->highest_vm_end = vma->vm_end; 544 + mm->highest_vm_end = vm_end_gap(vma); 559 545 560 546 /* 561 547 * vma->vm_prev wasn't known when we followed the rbtree to find the ··· 870 856 vma_gap_update(vma); 871 857 if (end_changed) { 872 858 if (!next) 873 - mm->highest_vm_end = end; 859 + mm->highest_vm_end = vm_end_gap(vma); 874 860 else if (!adjust_next) 875 861 vma_gap_update(next); 876 862 } ··· 955 941 * mm->highest_vm_end doesn't need any update 956 942 * in remove_next == 1 case. 957 943 */ 958 - VM_WARN_ON(mm->highest_vm_end != end); 944 + VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); 959 945 } 960 946 } 961 947 if (insert && file) ··· 1801 1787 1802 1788 while (true) { 1803 1789 /* Visit left subtree if it looks promising */ 1804 - gap_end = vma->vm_start; 1790 + gap_end = vm_start_gap(vma); 1805 1791 if (gap_end >= low_limit && vma->vm_rb.rb_left) { 1806 1792 struct vm_area_struct *left = 1807 1793 rb_entry(vma->vm_rb.rb_left, ··· 1812 1798 } 1813 1799 } 1814 1800 1815 - gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; 1801 + gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; 1816 1802 check_current: 1817 1803 /* Check if current node has a suitable gap */ 1818 1804 if (gap_start > high_limit) 1819 1805 return -ENOMEM; 1820 - if (gap_end >= low_limit && gap_end - gap_start >= length) 1806 + if (gap_end >= low_limit && 1807 + gap_end > gap_start && gap_end - gap_start >= length) 1821 1808 goto found; 1822 1809 1823 1810 /* Visit right subtree if it looks promising */ ··· 1840 1825 vma = rb_entry(rb_parent(prev), 1841 1826 struct vm_area_struct, vm_rb); 1842 1827 if (prev == vma->vm_rb.rb_left) { 1843 - gap_start = vma->vm_prev->vm_end; 1844 - gap_end = vma->vm_start; 1828 + gap_start = vm_end_gap(vma->vm_prev); 1829 + gap_end = vm_start_gap(vma); 1845 1830 goto check_current; 1846 1831 } 1847 1832 } ··· 1905 1890 1906 1891 while (true) { 1907 1892 /* Visit right subtree if it looks promising */ 1908 - gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; 1893 + gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; 1909 1894 if (gap_start <= high_limit && vma->vm_rb.rb_right) { 1910 1895 struct vm_area_struct *right = 1911 1896 rb_entry(vma->vm_rb.rb_right, ··· 1918 1903 1919 1904 check_current: 1920 1905 /* Check if current node has a suitable gap */ 1921 - gap_end = vma->vm_start; 1906 + gap_end = vm_start_gap(vma); 1922 1907 if (gap_end < low_limit) 1923 1908 return -ENOMEM; 1924 - if (gap_start <= high_limit && gap_end - gap_start >= length) 1909 + if (gap_start <= high_limit && 1910 + gap_end > gap_start && gap_end - gap_start >= length) 1925 1911 goto found; 1926 1912 1927 1913 /* Visit left subtree if it looks promising */ ··· 1945 1929 struct vm_area_struct, vm_rb); 1946 1930 if (prev == vma->vm_rb.rb_right) { 1947 1931 gap_start = vma->vm_prev ? 1948 - vma->vm_prev->vm_end : 0; 1932 + vm_end_gap(vma->vm_prev) : 0; 1949 1933 goto check_current; 1950 1934 } 1951 1935 } ··· 1983 1967 unsigned long len, unsigned long pgoff, unsigned long flags) 1984 1968 { 1985 1969 struct mm_struct *mm = current->mm; 1986 - struct vm_area_struct *vma; 1970 + struct vm_area_struct *vma, *prev; 1987 1971 struct vm_unmapped_area_info info; 1988 1972 1989 1973 if (len > TASK_SIZE - mmap_min_addr) ··· 1994 1978 1995 1979 if (addr) { 1996 1980 addr = PAGE_ALIGN(addr); 1997 - vma = find_vma(mm, addr); 1981 + vma = find_vma_prev(mm, addr, &prev); 1998 1982 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 1999 - (!vma || addr + len <= vma->vm_start)) 1983 + (!vma || addr + len <= vm_start_gap(vma)) && 1984 + (!prev || addr >= vm_end_gap(prev))) 2000 1985 return addr; 2001 1986 } 2002 1987 ··· 2020 2003 const unsigned long len, const unsigned long pgoff, 2021 2004 const unsigned long flags) 2022 2005 { 2023 - struct vm_area_struct *vma; 2006 + struct vm_area_struct *vma, *prev; 2024 2007 struct mm_struct *mm = current->mm; 2025 2008 unsigned long addr = addr0; 2026 2009 struct vm_unmapped_area_info info; ··· 2035 2018 /* requesting a specific address */ 2036 2019 if (addr) { 2037 2020 addr = PAGE_ALIGN(addr); 2038 - vma = find_vma(mm, addr); 2021 + vma = find_vma_prev(mm, addr, &prev); 2039 2022 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 2040 - (!vma || addr + len <= vma->vm_start)) 2023 + (!vma || addr + len <= vm_start_gap(vma)) && 2024 + (!prev || addr >= vm_end_gap(prev))) 2041 2025 return addr; 2042 2026 } 2043 2027 ··· 2173 2155 * update accounting. This is shared with both the 2174 2156 * grow-up and grow-down cases. 2175 2157 */ 2176 - static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) 2158 + static int acct_stack_growth(struct vm_area_struct *vma, 2159 + unsigned long size, unsigned long grow) 2177 2160 { 2178 2161 struct mm_struct *mm = vma->vm_mm; 2179 2162 struct rlimit *rlim = current->signal->rlim; 2180 - unsigned long new_start, actual_size; 2163 + unsigned long new_start; 2181 2164 2182 2165 /* address space limit tests */ 2183 2166 if (!may_expand_vm(mm, vma->vm_flags, grow)) 2184 2167 return -ENOMEM; 2185 2168 2186 2169 /* Stack limit test */ 2187 - actual_size = size; 2188 - if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) 2189 - actual_size -= PAGE_SIZE; 2190 - if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur)) 2170 + if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur)) 2191 2171 return -ENOMEM; 2192 2172 2193 2173 /* mlock limit tests */ ··· 2223 2207 int expand_upwards(struct vm_area_struct *vma, unsigned long address) 2224 2208 { 2225 2209 struct mm_struct *mm = vma->vm_mm; 2210 + struct vm_area_struct *next; 2211 + unsigned long gap_addr; 2226 2212 int error = 0; 2227 2213 2228 2214 if (!(vma->vm_flags & VM_GROWSUP)) 2229 2215 return -EFAULT; 2230 2216 2231 - /* Guard against wrapping around to address 0. */ 2232 - if (address < PAGE_ALIGN(address+4)) 2233 - address = PAGE_ALIGN(address+4); 2234 - else 2217 + /* Guard against exceeding limits of the address space. */ 2218 + address &= PAGE_MASK; 2219 + if (address >= TASK_SIZE) 2235 2220 return -ENOMEM; 2221 + address += PAGE_SIZE; 2222 + 2223 + /* Enforce stack_guard_gap */ 2224 + gap_addr = address + stack_guard_gap; 2225 + 2226 + /* Guard against overflow */ 2227 + if (gap_addr < address || gap_addr > TASK_SIZE) 2228 + gap_addr = TASK_SIZE; 2229 + 2230 + next = vma->vm_next; 2231 + if (next && next->vm_start < gap_addr) { 2232 + if (!(next->vm_flags & VM_GROWSUP)) 2233 + return -ENOMEM; 2234 + /* Check that both stack segments have the same anon_vma? */ 2235 + } 2236 2236 2237 2237 /* We must make sure the anon_vma is allocated. */ 2238 2238 if (unlikely(anon_vma_prepare(vma))) ··· 2293 2261 if (vma->vm_next) 2294 2262 vma_gap_update(vma->vm_next); 2295 2263 else 2296 - mm->highest_vm_end = address; 2264 + mm->highest_vm_end = vm_end_gap(vma); 2297 2265 spin_unlock(&mm->page_table_lock); 2298 2266 2299 2267 perf_event_mmap(vma); ··· 2314 2282 unsigned long address) 2315 2283 { 2316 2284 struct mm_struct *mm = vma->vm_mm; 2285 + struct vm_area_struct *prev; 2286 + unsigned long gap_addr; 2317 2287 int error; 2318 2288 2319 2289 address &= PAGE_MASK; 2320 2290 error = security_mmap_addr(address); 2321 2291 if (error) 2322 2292 return error; 2293 + 2294 + /* Enforce stack_guard_gap */ 2295 + gap_addr = address - stack_guard_gap; 2296 + if (gap_addr > address) 2297 + return -ENOMEM; 2298 + prev = vma->vm_prev; 2299 + if (prev && prev->vm_end > gap_addr) { 2300 + if (!(prev->vm_flags & VM_GROWSDOWN)) 2301 + return -ENOMEM; 2302 + /* Check that both stack segments have the same anon_vma? */ 2303 + } 2323 2304 2324 2305 /* We must make sure the anon_vma is allocated. */ 2325 2306 if (unlikely(anon_vma_prepare(vma))) ··· 2388 2343 return error; 2389 2344 } 2390 2345 2391 - /* 2392 - * Note how expand_stack() refuses to expand the stack all the way to 2393 - * abut the next virtual mapping, *unless* that mapping itself is also 2394 - * a stack mapping. We want to leave room for a guard page, after all 2395 - * (the guard page itself is not added here, that is done by the 2396 - * actual page faulting logic) 2397 - * 2398 - * This matches the behavior of the guard page logic (see mm/memory.c: 2399 - * check_stack_guard_page()), which only allows the guard page to be 2400 - * removed under these circumstances. 2401 - */ 2346 + /* enforced gap between the expanding stack and other mappings. */ 2347 + unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT; 2348 + 2349 + static int __init cmdline_parse_stack_guard_gap(char *p) 2350 + { 2351 + unsigned long val; 2352 + char *endptr; 2353 + 2354 + val = simple_strtoul(p, &endptr, 10); 2355 + if (!*endptr) 2356 + stack_guard_gap = val << PAGE_SHIFT; 2357 + 2358 + return 0; 2359 + } 2360 + __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); 2361 + 2402 2362 #ifdef CONFIG_STACK_GROWSUP 2403 2363 int expand_stack(struct vm_area_struct *vma, unsigned long address) 2404 2364 { 2405 - struct vm_area_struct *next; 2406 - 2407 - address &= PAGE_MASK; 2408 - next = vma->vm_next; 2409 - if (next && next->vm_start == address + PAGE_SIZE) { 2410 - if (!(next->vm_flags & VM_GROWSUP)) 2411 - return -ENOMEM; 2412 - } 2413 2365 return expand_upwards(vma, address); 2414 2366 } 2415 2367 ··· 2428 2386 #else 2429 2387 int expand_stack(struct vm_area_struct *vma, unsigned long address) 2430 2388 { 2431 - struct vm_area_struct *prev; 2432 - 2433 - address &= PAGE_MASK; 2434 - prev = vma->vm_prev; 2435 - if (prev && prev->vm_end == address) { 2436 - if (!(prev->vm_flags & VM_GROWSDOWN)) 2437 - return -ENOMEM; 2438 - } 2439 2389 return expand_downwards(vma, address); 2440 2390 } 2441 2391 ··· 2525 2491 vma->vm_prev = prev; 2526 2492 vma_gap_update(vma); 2527 2493 } else 2528 - mm->highest_vm_end = prev ? prev->vm_end : 0; 2494 + mm->highest_vm_end = prev ? vm_end_gap(prev) : 0; 2529 2495 tail_vma->vm_next = NULL; 2530 2496 2531 2497 /* Kill the cache */
+25 -12
mm/page_alloc.c
··· 292 292 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 293 293 static inline void reset_deferred_meminit(pg_data_t *pgdat) 294 294 { 295 + unsigned long max_initialise; 296 + unsigned long reserved_lowmem; 297 + 298 + /* 299 + * Initialise at least 2G of a node but also take into account that 300 + * two large system hashes that can take up 1GB for 0.25TB/node. 301 + */ 302 + max_initialise = max(2UL << (30 - PAGE_SHIFT), 303 + (pgdat->node_spanned_pages >> 8)); 304 + 305 + /* 306 + * Compensate the all the memblock reservations (e.g. crash kernel) 307 + * from the initial estimation to make sure we will initialize enough 308 + * memory to boot. 309 + */ 310 + reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn, 311 + pgdat->node_start_pfn + max_initialise); 312 + max_initialise += reserved_lowmem; 313 + 314 + pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages); 295 315 pgdat->first_deferred_pfn = ULONG_MAX; 296 316 } 297 317 ··· 334 314 unsigned long pfn, unsigned long zone_end, 335 315 unsigned long *nr_initialised) 336 316 { 337 - unsigned long max_initialise; 338 - 339 317 /* Always populate low zones for address-contrained allocations */ 340 318 if (zone_end < pgdat_end_pfn(pgdat)) 341 319 return true; 342 - /* 343 - * Initialise at least 2G of a node but also take into account that 344 - * two large system hashes that can take up 1GB for 0.25TB/node. 345 - */ 346 - max_initialise = max(2UL << (30 - PAGE_SHIFT), 347 - (pgdat->node_spanned_pages >> 8)); 348 - 349 320 (*nr_initialised)++; 350 - if ((*nr_initialised > max_initialise) && 321 + if ((*nr_initialised > pgdat->static_init_size) && 351 322 (pfn & (PAGES_PER_SECTION - 1)) == 0) { 352 323 pgdat->first_deferred_pfn = pfn; 353 324 return false; ··· 3881 3870 goto got_pg; 3882 3871 3883 3872 /* Avoid allocations with no watermarks from looping endlessly */ 3884 - if (test_thread_flag(TIF_MEMDIE)) 3873 + if (test_thread_flag(TIF_MEMDIE) && 3874 + (alloc_flags == ALLOC_NO_WATERMARKS || 3875 + (gfp_mask & __GFP_NOMEMALLOC))) 3885 3876 goto nopage; 3886 3877 3887 3878 /* Retry as long as the OOM killer is making progress */ ··· 6149 6136 /* pg_data_t should be reset to zero when it's allocated */ 6150 6137 WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx); 6151 6138 6152 - reset_deferred_meminit(pgdat); 6153 6139 pgdat->node_id = nid; 6154 6140 pgdat->node_start_pfn = node_start_pfn; 6155 6141 pgdat->per_cpu_nodestats = NULL; ··· 6170 6158 (unsigned long)pgdat->node_mem_map); 6171 6159 #endif 6172 6160 6161 + reset_deferred_meminit(pgdat); 6173 6162 free_area_init_core(pgdat); 6174 6163 } 6175 6164
+30 -16
mm/slub.c
··· 5512 5512 char mbuf[64]; 5513 5513 char *buf; 5514 5514 struct slab_attribute *attr = to_slab_attr(slab_attrs[i]); 5515 + ssize_t len; 5515 5516 5516 5517 if (!attr || !attr->store || !attr->show) 5517 5518 continue; ··· 5537 5536 buf = buffer; 5538 5537 } 5539 5538 5540 - attr->show(root_cache, buf); 5541 - attr->store(s, buf, strlen(buf)); 5539 + len = attr->show(root_cache, buf); 5540 + if (len > 0) 5541 + attr->store(s, buf, len); 5542 5542 } 5543 5543 5544 5544 if (buffer) ··· 5625 5623 return name; 5626 5624 } 5627 5625 5626 + static void sysfs_slab_remove_workfn(struct work_struct *work) 5627 + { 5628 + struct kmem_cache *s = 5629 + container_of(work, struct kmem_cache, kobj_remove_work); 5630 + 5631 + if (!s->kobj.state_in_sysfs) 5632 + /* 5633 + * For a memcg cache, this may be called during 5634 + * deactivation and again on shutdown. Remove only once. 5635 + * A cache is never shut down before deactivation is 5636 + * complete, so no need to worry about synchronization. 5637 + */ 5638 + return; 5639 + 5640 + #ifdef CONFIG_MEMCG 5641 + kset_unregister(s->memcg_kset); 5642 + #endif 5643 + kobject_uevent(&s->kobj, KOBJ_REMOVE); 5644 + kobject_del(&s->kobj); 5645 + kobject_put(&s->kobj); 5646 + } 5647 + 5628 5648 static int sysfs_slab_add(struct kmem_cache *s) 5629 5649 { 5630 5650 int err; 5631 5651 const char *name; 5632 5652 struct kset *kset = cache_kset(s); 5633 5653 int unmergeable = slab_unmergeable(s); 5654 + 5655 + INIT_WORK(&s->kobj_remove_work, sysfs_slab_remove_workfn); 5634 5656 5635 5657 if (!kset) { 5636 5658 kobject_init(&s->kobj, &slab_ktype); ··· 5719 5693 */ 5720 5694 return; 5721 5695 5722 - if (!s->kobj.state_in_sysfs) 5723 - /* 5724 - * For a memcg cache, this may be called during 5725 - * deactivation and again on shutdown. Remove only once. 5726 - * A cache is never shut down before deactivation is 5727 - * complete, so no need to worry about synchronization. 5728 - */ 5729 - return; 5730 - 5731 - #ifdef CONFIG_MEMCG 5732 - kset_unregister(s->memcg_kset); 5733 - #endif 5734 - kobject_uevent(&s->kobj, KOBJ_REMOVE); 5735 - kobject_del(&s->kobj); 5696 + kobject_get(&s->kobj); 5697 + schedule_work(&s->kobj_remove_work); 5736 5698 } 5737 5699 5738 5700 void sysfs_slab_release(struct kmem_cache *s)
+3
mm/swap_cgroup.c
··· 48 48 if (!page) 49 49 goto not_enough_page; 50 50 ctrl->map[idx] = page; 51 + 52 + if (!(idx % SWAP_CLUSTER_MAX)) 53 + cond_resched(); 51 54 } 52 55 return 0; 53 56 not_enough_page:
+5 -2
mm/util.c
··· 357 357 WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL); 358 358 359 359 /* 360 - * Make sure that larger requests are not too disruptive - no OOM 361 - * killer and no allocation failure warnings as we have a fallback 360 + * We want to attempt a large physically contiguous block first because 361 + * it is less likely to fragment multiple larger blocks and therefore 362 + * contribute to a long term fragmentation less than vmalloc fallback. 363 + * However make sure that larger requests are not too disruptive - no 364 + * OOM killer and no allocation failure warnings as we have a fallback. 362 365 */ 363 366 if (size > PAGE_SIZE) { 364 367 kmalloc_flags |= __GFP_NOWARN;
+13 -2
mm/vmalloc.c
··· 287 287 if (p4d_none(*p4d)) 288 288 return NULL; 289 289 pud = pud_offset(p4d, addr); 290 - if (pud_none(*pud)) 290 + 291 + /* 292 + * Don't dereference bad PUD or PMD (below) entries. This will also 293 + * identify huge mappings, which we may encounter on architectures 294 + * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be 295 + * identified as vmalloc addresses by is_vmalloc_addr(), but are 296 + * not [unambiguously] associated with a struct page, so there is 297 + * no correct value to return for them. 298 + */ 299 + WARN_ON_ONCE(pud_bad(*pud)); 300 + if (pud_none(*pud) || pud_bad(*pud)) 291 301 return NULL; 292 302 pmd = pmd_offset(pud, addr); 293 - if (pmd_none(*pmd)) 303 + WARN_ON_ONCE(pmd_bad(*pmd)); 304 + if (pmd_none(*pmd) || pmd_bad(*pmd)) 294 305 return NULL; 295 306 296 307 ptep = pte_offset_map(pmd, addr);
+3 -3
mm/vmpressure.c
··· 115 115 unsigned long pressure = 0; 116 116 117 117 /* 118 - * reclaimed can be greater than scanned in cases 119 - * like THP, where the scanned is 1 and reclaimed 120 - * could be 512 118 + * reclaimed can be greater than scanned for things such as reclaimed 119 + * slab pages. shrink_node() just adds reclaimed pages without a 120 + * related increment to scanned pages. 121 121 */ 122 122 if (reclaimed >= scanned) 123 123 goto out;
+2 -1
net/8021q/vlan.c
··· 277 277 return 0; 278 278 279 279 out_free_newdev: 280 - free_netdev(new_dev); 280 + if (new_dev->reg_state == NETREG_UNINITIALIZED) 281 + free_netdev(new_dev); 281 282 return err; 282 283 } 283 284
+2 -2
net/8021q/vlan_dev.c
··· 813 813 814 814 free_percpu(vlan->vlan_pcpu_stats); 815 815 vlan->vlan_pcpu_stats = NULL; 816 - free_netdev(dev); 817 816 } 818 817 819 818 void vlan_setup(struct net_device *dev) ··· 825 826 netif_keep_dst(dev); 826 827 827 828 dev->netdev_ops = &vlan_netdev_ops; 828 - dev->destructor = vlan_dev_free; 829 + dev->needs_free_netdev = true; 830 + dev->priv_destructor = vlan_dev_free; 829 831 dev->ethtool_ops = &vlan_ethtool_ops; 830 832 831 833 dev->min_mtu = 0;
+3 -2
net/batman-adv/distributed-arp-table.c
··· 1064 1064 1065 1065 skb_new->protocol = eth_type_trans(skb_new, soft_iface); 1066 1066 1067 - soft_iface->stats.rx_packets++; 1068 - soft_iface->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size; 1067 + batadv_inc_counter(bat_priv, BATADV_CNT_RX); 1068 + batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, 1069 + skb->len + ETH_HLEN + hdr_size); 1069 1070 1070 1071 netif_rx(skb_new); 1071 1072 batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n");
+1 -1
net/batman-adv/routing.c
··· 987 987 batadv_dbg(BATADV_DBG_BLA, bat_priv, 988 988 "recv_unicast_packet(): Dropped unicast pkt received from another backbone gw %pM.\n", 989 989 orig_addr_gw); 990 - return NET_RX_DROP; 990 + goto free_skb; 991 991 } 992 992 } 993 993
+2 -3
net/batman-adv/soft-interface.c
··· 1034 1034 * netdev and its private data (bat_priv) 1035 1035 */ 1036 1036 rcu_barrier(); 1037 - 1038 - free_netdev(dev); 1039 1037 } 1040 1038 1041 1039 /** ··· 1045 1047 ether_setup(dev); 1046 1048 1047 1049 dev->netdev_ops = &batadv_netdev_ops; 1048 - dev->destructor = batadv_softif_free; 1050 + dev->needs_free_netdev = true; 1051 + dev->priv_destructor = batadv_softif_free; 1049 1052 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL; 1050 1053 dev->priv_flags |= IFF_NO_QUEUE; 1051 1054
+1 -1
net/bluetooth/6lowpan.c
··· 598 598 599 599 dev->netdev_ops = &netdev_ops; 600 600 dev->header_ops = &header_ops; 601 - dev->destructor = free_netdev; 601 + dev->needs_free_netdev = true; 602 602 } 603 603 604 604 static struct device_type bt_type = {
+1 -1
net/bridge/br_device.c
··· 379 379 ether_setup(dev); 380 380 381 381 dev->netdev_ops = &br_netdev_ops; 382 - dev->destructor = free_netdev; 382 + dev->needs_free_netdev = true; 383 383 dev->ethtool_ops = &br_ethtool_ops; 384 384 SET_NETDEV_DEVTYPE(dev, &br_type); 385 385 dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
+1 -1
net/bridge/br_netlink.c
··· 595 595 err = 0; 596 596 switch (nla_type(attr)) { 597 597 case IFLA_BRIDGE_VLAN_TUNNEL_INFO: 598 - if (!(p->flags & BR_VLAN_TUNNEL)) 598 + if (!p || !(p->flags & BR_VLAN_TUNNEL)) 599 599 return -EINVAL; 600 600 err = br_parse_vlan_tunnel_info(attr, &tinfo_curr); 601 601 if (err)
+2 -1
net/bridge/br_stp_if.c
··· 179 179 br_debug(br, "using kernel STP\n"); 180 180 181 181 /* To start timers on any ports left in blocking */ 182 - mod_timer(&br->hello_timer, jiffies + br->hello_time); 182 + if (br->dev->flags & IFF_UP) 183 + mod_timer(&br->hello_timer, jiffies + br->hello_time); 183 184 br_port_state_selection(br); 184 185 } 185 186
+4
net/caif/caif_socket.c
··· 754 754 755 755 lock_sock(sk); 756 756 757 + err = -EINVAL; 758 + if (addr_len < offsetofend(struct sockaddr, sa_family)) 759 + goto out; 760 + 757 761 err = -EAFNOSUPPORT; 758 762 if (uaddr->sa_family != AF_CAIF) 759 763 goto out;
+1 -5
net/caif/cfpkt_skbuff.c
··· 81 81 { 82 82 struct sk_buff *skb; 83 83 84 - if (likely(in_interrupt())) 85 - skb = alloc_skb(len + pfx, GFP_ATOMIC); 86 - else 87 - skb = alloc_skb(len + pfx, GFP_KERNEL); 88 - 84 + skb = alloc_skb(len + pfx, GFP_ATOMIC); 89 85 if (unlikely(skb == NULL)) 90 86 return NULL; 91 87
+2 -2
net/caif/chnl_net.c
··· 392 392 { 393 393 struct chnl_net *priv = netdev_priv(dev); 394 394 caif_free_client(&priv->chnl); 395 - free_netdev(dev); 396 395 } 397 396 398 397 static void ipcaif_net_setup(struct net_device *dev) 399 398 { 400 399 struct chnl_net *priv; 401 400 dev->netdev_ops = &netdev_ops; 402 - dev->destructor = chnl_net_destructor; 401 + dev->needs_free_netdev = true; 402 + dev->priv_destructor = chnl_net_destructor; 403 403 dev->flags |= IFF_NOARP; 404 404 dev->flags |= IFF_POINTOPOINT; 405 405 dev->mtu = GPRS_PDP_MTU;
+1 -2
net/can/af_can.c
··· 872 872 873 873 static int can_pernet_init(struct net *net) 874 874 { 875 - net->can.can_rcvlists_lock = 876 - __SPIN_LOCK_UNLOCKED(net->can.can_rcvlists_lock); 875 + spin_lock_init(&net->can.can_rcvlists_lock); 877 876 net->can.can_rx_alldev_list = 878 877 kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL); 879 878
+50 -24
net/core/dev.c
··· 1253 1253 if (!new_ifalias) 1254 1254 return -ENOMEM; 1255 1255 dev->ifalias = new_ifalias; 1256 + memcpy(dev->ifalias, alias, len); 1257 + dev->ifalias[len] = 0; 1256 1258 1257 - strlcpy(dev->ifalias, alias, len+1); 1258 1259 return len; 1259 1260 } 1260 1261 ··· 4767 4766 } 4768 4767 EXPORT_SYMBOL(gro_find_complete_by_type); 4769 4768 4769 + static void napi_skb_free_stolen_head(struct sk_buff *skb) 4770 + { 4771 + skb_dst_drop(skb); 4772 + secpath_reset(skb); 4773 + kmem_cache_free(skbuff_head_cache, skb); 4774 + } 4775 + 4770 4776 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) 4771 4777 { 4772 4778 switch (ret) { ··· 4787 4779 break; 4788 4780 4789 4781 case GRO_MERGED_FREE: 4790 - if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) { 4791 - skb_dst_drop(skb); 4792 - secpath_reset(skb); 4793 - kmem_cache_free(skbuff_head_cache, skb); 4794 - } else { 4782 + if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 4783 + napi_skb_free_stolen_head(skb); 4784 + else 4795 4785 __kfree_skb(skb); 4796 - } 4797 4786 break; 4798 4787 4799 4788 case GRO_HELD: ··· 4862 4857 break; 4863 4858 4864 4859 case GRO_DROP: 4865 - case GRO_MERGED_FREE: 4866 4860 napi_reuse_skb(napi, skb); 4861 + break; 4862 + 4863 + case GRO_MERGED_FREE: 4864 + if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) 4865 + napi_skb_free_stolen_head(skb); 4866 + else 4867 + napi_reuse_skb(napi, skb); 4867 4868 break; 4868 4869 4869 4870 case GRO_MERGED: ··· 4959 4948 } 4960 4949 EXPORT_SYMBOL(__skb_gro_checksum_complete); 4961 4950 4951 + static void net_rps_send_ipi(struct softnet_data *remsd) 4952 + { 4953 + #ifdef CONFIG_RPS 4954 + while (remsd) { 4955 + struct softnet_data *next = remsd->rps_ipi_next; 4956 + 4957 + if (cpu_online(remsd->cpu)) 4958 + smp_call_function_single_async(remsd->cpu, &remsd->csd); 4959 + remsd = next; 4960 + } 4961 + #endif 4962 + } 4963 + 4962 4964 /* 4963 4965 * net_rps_action_and_irq_enable sends any pending IPI's for rps. 4964 4966 * Note: called with local irq disabled, but exits with local irq enabled. ··· 4987 4963 local_irq_enable(); 4988 4964 4989 4965 /* Send pending IPI's to kick RPS processing on remote cpus. */ 4990 - while (remsd) { 4991 - struct softnet_data *next = remsd->rps_ipi_next; 4992 - 4993 - if (cpu_online(remsd->cpu)) 4994 - smp_call_function_single_async(remsd->cpu, 4995 - &remsd->csd); 4996 - remsd = next; 4997 - } 4966 + net_rps_send_ipi(remsd); 4998 4967 } else 4999 4968 #endif 5000 4969 local_irq_enable(); ··· 5216 5199 if (rc == BUSY_POLL_BUDGET) 5217 5200 __napi_schedule(napi); 5218 5201 local_bh_enable(); 5219 - if (local_softirq_pending()) 5220 - do_softirq(); 5221 5202 } 5222 5203 5223 5204 void napi_busy_loop(unsigned int napi_id, ··· 7516 7501 err_uninit: 7517 7502 if (dev->netdev_ops->ndo_uninit) 7518 7503 dev->netdev_ops->ndo_uninit(dev); 7504 + if (dev->priv_destructor) 7505 + dev->priv_destructor(dev); 7519 7506 goto out; 7520 7507 } 7521 7508 EXPORT_SYMBOL(register_netdevice); ··· 7725 7708 WARN_ON(rcu_access_pointer(dev->ip6_ptr)); 7726 7709 WARN_ON(dev->dn_ptr); 7727 7710 7728 - if (dev->destructor) 7729 - dev->destructor(dev); 7711 + if (dev->priv_destructor) 7712 + dev->priv_destructor(dev); 7713 + if (dev->needs_free_netdev) 7714 + free_netdev(dev); 7730 7715 7731 7716 /* Report a network device has been unregistered */ 7732 7717 rtnl_lock(); ··· 7793 7774 } else { 7794 7775 netdev_stats_to_stats64(storage, &dev->stats); 7795 7776 } 7796 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped); 7797 - storage->tx_dropped += atomic_long_read(&dev->tx_dropped); 7798 - storage->rx_nohandler += atomic_long_read(&dev->rx_nohandler); 7777 + storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped); 7778 + storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped); 7779 + storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler); 7799 7780 return storage; 7800 7781 } 7801 7782 EXPORT_SYMBOL(dev_get_stats); ··· 8211 8192 struct sk_buff **list_skb; 8212 8193 struct sk_buff *skb; 8213 8194 unsigned int cpu; 8214 - struct softnet_data *sd, *oldsd; 8195 + struct softnet_data *sd, *oldsd, *remsd = NULL; 8215 8196 8216 8197 local_irq_disable(); 8217 8198 cpu = smp_processor_id(); ··· 8251 8232 8252 8233 raise_softirq_irqoff(NET_TX_SOFTIRQ); 8253 8234 local_irq_enable(); 8235 + 8236 + #ifdef CONFIG_RPS 8237 + remsd = oldsd->rps_ipi_list; 8238 + oldsd->rps_ipi_list = NULL; 8239 + #endif 8240 + /* send out pending IPI's on offline CPU */ 8241 + net_rps_send_ipi(remsd); 8254 8242 8255 8243 /* Process offline CPU's input_pkt_queue */ 8256 8244 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
+16 -3
net/core/dev_ioctl.c
··· 410 410 if (cmd == SIOCGIFNAME) 411 411 return dev_ifname(net, (struct ifreq __user *)arg); 412 412 413 + /* 414 + * Take care of Wireless Extensions. Unfortunately struct iwreq 415 + * isn't a proper subset of struct ifreq (it's 8 byte shorter) 416 + * so we need to treat it specially, otherwise applications may 417 + * fault if the struct they're passing happens to land at the 418 + * end of a mapped page. 419 + */ 420 + if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) { 421 + struct iwreq iwr; 422 + 423 + if (copy_from_user(&iwr, arg, sizeof(iwr))) 424 + return -EFAULT; 425 + 426 + return wext_handle_ioctl(net, &iwr, cmd, arg); 427 + } 428 + 413 429 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) 414 430 return -EFAULT; 415 431 ··· 575 559 ret = -EFAULT; 576 560 return ret; 577 561 } 578 - /* Take care of Wireless Extensions */ 579 - if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) 580 - return wext_handle_ioctl(net, &ifr, cmd, arg); 581 562 return -ENOTTY; 582 563 } 583 564 }
+6 -2
net/core/devlink.c
··· 1680 1680 1681 1681 hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, 1682 1682 &devlink_nl_family, NLM_F_MULTI, cmd); 1683 - if (!hdr) 1683 + if (!hdr) { 1684 + nlmsg_free(skb); 1684 1685 return -EMSGSIZE; 1686 + } 1685 1687 1686 1688 if (devlink_nl_put_handle(skb, devlink)) 1687 1689 goto nla_put_failure; ··· 2100 2098 2101 2099 hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, 2102 2100 &devlink_nl_family, NLM_F_MULTI, cmd); 2103 - if (!hdr) 2101 + if (!hdr) { 2102 + nlmsg_free(skb); 2104 2103 return -EMSGSIZE; 2104 + } 2105 2105 2106 2106 if (devlink_nl_put_handle(skb, devlink)) 2107 2107 goto nla_put_failure;
+14
net/core/dst.c
··· 469 469 spin_lock_bh(&dst_garbage.lock); 470 470 dst = dst_garbage.list; 471 471 dst_garbage.list = NULL; 472 + /* The code in dst_ifdown places a hold on the loopback device. 473 + * If the gc entry processing is set to expire after a lengthy 474 + * interval, this hold can cause netdev_wait_allrefs() to hang 475 + * out and wait for a long time -- until the the loopback 476 + * interface is released. If we're really unlucky, it'll emit 477 + * pr_emerg messages to console too. Reset the interval here, 478 + * so dst cleanups occur in a more timely fashion. 479 + */ 480 + if (dst_garbage.timer_inc > DST_GC_INC) { 481 + dst_garbage.timer_inc = DST_GC_INC; 482 + dst_garbage.timer_expires = DST_GC_MIN; 483 + mod_delayed_work(system_wq, &dst_gc_work, 484 + dst_garbage.timer_expires); 485 + } 472 486 spin_unlock_bh(&dst_garbage.lock); 473 487 474 488 if (last)
+14 -7
net/core/fib_rules.c
··· 568 568 struct net *net = sock_net(skb->sk); 569 569 struct fib_rule_hdr *frh = nlmsg_data(nlh); 570 570 struct fib_rules_ops *ops = NULL; 571 - struct fib_rule *rule, *tmp; 571 + struct fib_rule *rule, *r; 572 572 struct nlattr *tb[FRA_MAX+1]; 573 573 struct fib_kuid_range range; 574 574 int err = -EINVAL; ··· 668 668 669 669 /* 670 670 * Check if this rule is a target to any of them. If so, 671 + * adjust to the next one with the same preference or 671 672 * disable them. As this operation is eventually very 672 - * expensive, it is only performed if goto rules have 673 - * actually been added. 673 + * expensive, it is only performed if goto rules, except 674 + * current if it is goto rule, have actually been added. 674 675 */ 675 676 if (ops->nr_goto_rules > 0) { 676 - list_for_each_entry(tmp, &ops->rules_list, list) { 677 - if (rtnl_dereference(tmp->ctarget) == rule) { 678 - RCU_INIT_POINTER(tmp->ctarget, NULL); 677 + struct fib_rule *n; 678 + 679 + n = list_next_entry(rule, list); 680 + if (&n->list == &ops->rules_list || n->pref != rule->pref) 681 + n = NULL; 682 + list_for_each_entry(r, &ops->rules_list, list) { 683 + if (rtnl_dereference(r->ctarget) != rule) 684 + continue; 685 + rcu_assign_pointer(r->ctarget, n); 686 + if (!n) 679 687 ops->unresolved_rules++; 680 - } 681 688 } 682 689 } 683 690
+4 -1
net/core/rtnetlink.c
··· 931 931 + nla_total_size(1) /* IFLA_LINKMODE */ 932 932 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ 933 933 + nla_total_size(4) /* IFLA_LINK_NETNSID */ 934 + + nla_total_size(4) /* IFLA_GROUP */ 934 935 + nla_total_size(ext_filter_mask 935 936 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ 936 937 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ ··· 1125 1124 struct ifla_vf_mac vf_mac; 1126 1125 struct ifla_vf_info ivi; 1127 1126 1127 + memset(&ivi, 0, sizeof(ivi)); 1128 + 1128 1129 /* Not all SR-IOV capable drivers support the 1129 1130 * spoofcheck and "RSS query enable" query. Preset to 1130 1131 * -1 so the user space tool can detect that the driver ··· 1135 1132 ivi.spoofchk = -1; 1136 1133 ivi.rss_query_en = -1; 1137 1134 ivi.trusted = -1; 1138 - memset(ivi.mac, 0, sizeof(ivi.mac)); 1139 1135 /* The default value for VF link state is "auto" 1140 1136 * IFLA_VF_LINK_STATE_AUTO which equals zero 1141 1137 */ ··· 1469 1467 [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, 1470 1468 [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, 1471 1469 [IFLA_XDP] = { .type = NLA_NESTED }, 1470 + [IFLA_GROUP] = { .type = NLA_U32 }, 1472 1471 }; 1473 1472 1474 1473 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
+4 -1
net/core/skbuff.c
··· 3754 3754 3755 3755 spin_lock_irqsave(&q->lock, flags); 3756 3756 skb = __skb_dequeue(q); 3757 - if (skb && (skb_next = skb_peek(q))) 3757 + if (skb && (skb_next = skb_peek(q))) { 3758 3758 icmp_next = is_icmp_err_skb(skb_next); 3759 + if (icmp_next) 3760 + sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin; 3761 + } 3759 3762 spin_unlock_irqrestore(&q->lock, flags); 3760 3763 3761 3764 if (is_icmp_err_skb(skb) && !icmp_next)
+4 -10
net/decnet/dn_route.c
··· 188 188 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); 189 189 } 190 190 191 - static inline void dnrt_drop(struct dn_route *rt) 192 - { 193 - dst_release(&rt->dst); 194 - call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); 195 - } 196 - 197 191 static void dn_dst_check_expire(unsigned long dummy) 198 192 { 199 193 int i; ··· 242 248 } 243 249 *rtp = rt->dst.dn_next; 244 250 rt->dst.dn_next = NULL; 245 - dnrt_drop(rt); 251 + dnrt_free(rt); 246 252 break; 247 253 } 248 254 spin_unlock_bh(&dn_rt_hash_table[i].lock); ··· 344 350 dst_use(&rth->dst, now); 345 351 spin_unlock_bh(&dn_rt_hash_table[hash].lock); 346 352 347 - dnrt_drop(rt); 353 + dst_free(&rt->dst); 348 354 *rp = rth; 349 355 return 0; 350 356 } ··· 374 380 for(; rt; rt = next) { 375 381 next = rcu_dereference_raw(rt->dst.dn_next); 376 382 RCU_INIT_POINTER(rt->dst.dn_next, NULL); 377 - dst_free((struct dst_entry *)rt); 383 + dnrt_free(rt); 378 384 } 379 385 380 386 nothing_to_declare: ··· 1181 1187 if (dev_out->flags & IFF_LOOPBACK) 1182 1188 flags |= RTCF_LOCAL; 1183 1189 1184 - rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST); 1190 + rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST); 1185 1191 if (rt == NULL) 1186 1192 goto e_nobufs; 1187 1193
+3 -1
net/decnet/netfilter/dn_rtmsg.c
··· 102 102 { 103 103 struct nlmsghdr *nlh = nlmsg_hdr(skb); 104 104 105 - if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) 105 + if (skb->len < sizeof(*nlh) || 106 + nlh->nlmsg_len < sizeof(*nlh) || 107 + skb->len < nlh->nlmsg_len) 106 108 return; 107 109 108 110 if (!netlink_capable(skb, CAP_NET_ADMIN))
+47
net/dsa/dsa.c
··· 223 223 return 0; 224 224 } 225 225 226 + #ifdef CONFIG_PM_SLEEP 227 + int dsa_switch_suspend(struct dsa_switch *ds) 228 + { 229 + int i, ret = 0; 230 + 231 + /* Suspend slave network devices */ 232 + for (i = 0; i < ds->num_ports; i++) { 233 + if (!dsa_is_port_initialized(ds, i)) 234 + continue; 235 + 236 + ret = dsa_slave_suspend(ds->ports[i].netdev); 237 + if (ret) 238 + return ret; 239 + } 240 + 241 + if (ds->ops->suspend) 242 + ret = ds->ops->suspend(ds); 243 + 244 + return ret; 245 + } 246 + EXPORT_SYMBOL_GPL(dsa_switch_suspend); 247 + 248 + int dsa_switch_resume(struct dsa_switch *ds) 249 + { 250 + int i, ret = 0; 251 + 252 + if (ds->ops->resume) 253 + ret = ds->ops->resume(ds); 254 + 255 + if (ret) 256 + return ret; 257 + 258 + /* Resume slave network devices */ 259 + for (i = 0; i < ds->num_ports; i++) { 260 + if (!dsa_is_port_initialized(ds, i)) 261 + continue; 262 + 263 + ret = dsa_slave_resume(ds->ports[i].netdev); 264 + if (ret) 265 + return ret; 266 + } 267 + 268 + return 0; 269 + } 270 + EXPORT_SYMBOL_GPL(dsa_switch_resume); 271 + #endif 272 + 226 273 static struct packet_type dsa_pack_type __read_mostly = { 227 274 .type = cpu_to_be16(ETH_P_XDSA), 228 275 .func = dsa_switch_rcv,
+3 -1
net/dsa/dsa2.c
··· 484 484 dsa_ds_unapply(dst, ds); 485 485 } 486 486 487 - if (dst->cpu_switch) 487 + if (dst->cpu_switch) { 488 488 dsa_cpu_port_ethtool_restore(dst->cpu_switch); 489 + dst->cpu_switch = NULL; 490 + } 489 491 490 492 pr_info("DSA: tree %d unapplied\n", dst->tree); 491 493 dst->applied = false;
-47
net/dsa/legacy.c
··· 289 289 dsa_switch_unregister_notifier(ds); 290 290 } 291 291 292 - #ifdef CONFIG_PM_SLEEP 293 - int dsa_switch_suspend(struct dsa_switch *ds) 294 - { 295 - int i, ret = 0; 296 - 297 - /* Suspend slave network devices */ 298 - for (i = 0; i < ds->num_ports; i++) { 299 - if (!dsa_is_port_initialized(ds, i)) 300 - continue; 301 - 302 - ret = dsa_slave_suspend(ds->ports[i].netdev); 303 - if (ret) 304 - return ret; 305 - } 306 - 307 - if (ds->ops->suspend) 308 - ret = ds->ops->suspend(ds); 309 - 310 - return ret; 311 - } 312 - EXPORT_SYMBOL_GPL(dsa_switch_suspend); 313 - 314 - int dsa_switch_resume(struct dsa_switch *ds) 315 - { 316 - int i, ret = 0; 317 - 318 - if (ds->ops->resume) 319 - ret = ds->ops->resume(ds); 320 - 321 - if (ret) 322 - return ret; 323 - 324 - /* Resume slave network devices */ 325 - for (i = 0; i < ds->num_ports; i++) { 326 - if (!dsa_is_port_initialized(ds, i)) 327 - continue; 328 - 329 - ret = dsa_slave_resume(ds->ports[i].netdev); 330 - if (ret) 331 - return ret; 332 - } 333 - 334 - return 0; 335 - } 336 - EXPORT_SYMBOL_GPL(dsa_switch_resume); 337 - #endif 338 - 339 292 /* platform driver init and cleanup *****************************************/ 340 293 static int dev_is_class(struct device *dev, void *class) 341 294 {
+2 -2
net/hsr/hsr_device.c
··· 378 378 del_timer_sync(&hsr->announce_timer); 379 379 380 380 synchronize_rcu(); 381 - free_netdev(hsr_dev); 382 381 } 383 382 384 383 static const struct net_device_ops hsr_device_ops = { ··· 403 404 SET_NETDEV_DEVTYPE(dev, &hsr_type); 404 405 dev->priv_flags |= IFF_NO_QUEUE; 405 406 406 - dev->destructor = hsr_dev_destroy; 407 + dev->needs_free_netdev = true; 408 + dev->priv_destructor = hsr_dev_destroy; 407 409 408 410 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | 409 411 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
+1 -2
net/hsr/hsr_forward.c
··· 324 324 unsigned long irqflags; 325 325 326 326 frame->is_supervision = is_supervision_frame(port->hsr, skb); 327 - frame->node_src = hsr_get_node(&port->hsr->node_db, skb, 328 - frame->is_supervision); 327 + frame->node_src = hsr_get_node(port, skb, frame->is_supervision); 329 328 if (frame->node_src == NULL) 330 329 return -1; /* Unknown node and !is_supervision, or no mem */ 331 330
+7 -2
net/hsr/hsr_framereg.c
··· 158 158 159 159 /* Get the hsr_node from which 'skb' was sent. 160 160 */ 161 - struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb, 161 + struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, 162 162 bool is_sup) 163 163 { 164 + struct list_head *node_db = &port->hsr->node_db; 164 165 struct hsr_node *node; 165 166 struct ethhdr *ethhdr; 166 167 u16 seq_out; ··· 187 186 */ 188 187 seq_out = hsr_get_skb_sequence_nr(skb) - 1; 189 188 } else { 190 - WARN_ONCE(1, "%s: Non-HSR frame\n", __func__); 189 + /* this is called also for frames from master port and 190 + * so warn only for non master ports 191 + */ 192 + if (port->type != HSR_PT_MASTER) 193 + WARN_ONCE(1, "%s: Non-HSR frame\n", __func__); 191 194 seq_out = HSR_SEQNR_START; 192 195 } 193 196
+1 -1
net/hsr/hsr_framereg.h
··· 18 18 19 19 struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[], 20 20 u16 seq_out); 21 - struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb, 21 + struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, 22 22 bool is_sup); 23 23 void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, 24 24 struct hsr_port *port);
+1 -1
net/ieee802154/6lowpan/core.c
··· 107 107 108 108 ldev->netdev_ops = &lowpan_netdev_ops; 109 109 ldev->header_ops = &lowpan_header_ops; 110 - ldev->destructor = free_netdev; 110 + ldev->needs_free_netdev = true; 111 111 ldev->features |= NETIF_F_NETNS_LOCAL; 112 112 } 113 113
+1 -1
net/ipv4/af_inet.c
··· 1043 1043 .type = SOCK_DGRAM, 1044 1044 .protocol = IPPROTO_ICMP, 1045 1045 .prot = &ping_prot, 1046 - .ops = &inet_dgram_ops, 1046 + .ops = &inet_sockraw_ops, 1047 1047 .flags = INET_PROTOSW_REUSE, 1048 1048 }, 1049 1049
+6 -2
net/ipv4/icmp.c
··· 657 657 /* Needed by both icmp_global_allow and icmp_xmit_lock */ 658 658 local_bh_disable(); 659 659 660 - /* Check global sysctl_icmp_msgs_per_sec ratelimit */ 661 - if (!icmpv4_global_allow(net, type, code)) 660 + /* Check global sysctl_icmp_msgs_per_sec ratelimit, unless 661 + * incoming dev is loopback. If outgoing dev change to not be 662 + * loopback, then peer ratelimit still work (in icmpv4_xrlim_allow) 663 + */ 664 + if (!(skb_in->dev && (skb_in->dev->flags&IFF_LOOPBACK)) && 665 + !icmpv4_global_allow(net, type, code)) 662 666 goto out_bh_enable; 663 667 664 668 sk = icmp_xmit_lock(net);
+15 -9
net/ipv4/igmp.c
··· 1112 1112 pmc = kzalloc(sizeof(*pmc), GFP_KERNEL); 1113 1113 if (!pmc) 1114 1114 return; 1115 + spin_lock_init(&pmc->lock); 1115 1116 spin_lock_bh(&im->lock); 1116 1117 pmc->interface = im->interface; 1117 1118 in_dev_hold(in_dev); ··· 2072 2071 2073 2072 static void ip_mc_clear_src(struct ip_mc_list *pmc) 2074 2073 { 2075 - struct ip_sf_list *psf, *nextpsf; 2074 + struct ip_sf_list *psf, *nextpsf, *tomb, *sources; 2076 2075 2077 - for (psf = pmc->tomb; psf; psf = nextpsf) { 2078 - nextpsf = psf->sf_next; 2079 - kfree(psf); 2080 - } 2076 + spin_lock_bh(&pmc->lock); 2077 + tomb = pmc->tomb; 2081 2078 pmc->tomb = NULL; 2082 - for (psf = pmc->sources; psf; psf = nextpsf) { 2083 - nextpsf = psf->sf_next; 2084 - kfree(psf); 2085 - } 2079 + sources = pmc->sources; 2086 2080 pmc->sources = NULL; 2087 2081 pmc->sfmode = MCAST_EXCLUDE; 2088 2082 pmc->sfcount[MCAST_INCLUDE] = 0; 2089 2083 pmc->sfcount[MCAST_EXCLUDE] = 1; 2084 + spin_unlock_bh(&pmc->lock); 2085 + 2086 + for (psf = tomb; psf; psf = nextpsf) { 2087 + nextpsf = psf->sf_next; 2088 + kfree(psf); 2089 + } 2090 + for (psf = sources; psf; psf = nextpsf) { 2091 + nextpsf = psf->sf_next; 2092 + kfree(psf); 2093 + } 2090 2094 } 2091 2095 2092 2096 /* Join a multicast group
+2 -1
net/ipv4/ip_output.c
··· 964 964 csummode = CHECKSUM_PARTIAL; 965 965 966 966 cork->length += length; 967 - if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) && 967 + if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) || 968 + (skb && skb_is_gso(skb))) && 968 969 (sk->sk_protocol == IPPROTO_UDP) && 969 970 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) && 970 971 (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
+4 -2
net/ipv4/ip_tunnel.c
··· 446 446 return 0; 447 447 448 448 drop: 449 + if (tun_dst) 450 + dst_release((struct dst_entry *)tun_dst); 449 451 kfree_skb(skb); 450 452 return 0; 451 453 } ··· 969 967 gro_cells_destroy(&tunnel->gro_cells); 970 968 dst_cache_destroy(&tunnel->dst_cache); 971 969 free_percpu(dev->tstats); 972 - free_netdev(dev); 973 970 } 974 971 975 972 void ip_tunnel_dellink(struct net_device *dev, struct list_head *head) ··· 1156 1155 struct iphdr *iph = &tunnel->parms.iph; 1157 1156 int err; 1158 1157 1159 - dev->destructor = ip_tunnel_dev_free; 1158 + dev->needs_free_netdev = true; 1159 + dev->priv_destructor = ip_tunnel_dev_free; 1160 1160 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1161 1161 if (!dev->tstats) 1162 1162 return -ENOMEM;
+16 -18
net/ipv4/ipmr.c
··· 101 101 static void ipmr_free_table(struct mr_table *mrt); 102 102 103 103 static void ip_mr_forward(struct net *net, struct mr_table *mrt, 104 - struct sk_buff *skb, struct mfc_cache *cache, 105 - int local); 104 + struct net_device *dev, struct sk_buff *skb, 105 + struct mfc_cache *cache, int local); 106 106 static int ipmr_cache_report(struct mr_table *mrt, 107 107 struct sk_buff *pkt, vifi_t vifi, int assert); 108 108 static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, ··· 501 501 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8; 502 502 dev->flags = IFF_NOARP; 503 503 dev->netdev_ops = &reg_vif_netdev_ops; 504 - dev->destructor = free_netdev; 504 + dev->needs_free_netdev = true; 505 505 dev->features |= NETIF_F_NETNS_LOCAL; 506 506 } 507 507 ··· 988 988 989 989 rtnl_unicast(skb, net, NETLINK_CB(skb).portid); 990 990 } else { 991 - ip_mr_forward(net, mrt, skb, c, 0); 991 + ip_mr_forward(net, mrt, skb->dev, skb, c, 0); 992 992 } 993 993 } 994 994 } ··· 1073 1073 1074 1074 /* Queue a packet for resolution. It gets locked cache entry! */ 1075 1075 static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, 1076 - struct sk_buff *skb) 1076 + struct sk_buff *skb, struct net_device *dev) 1077 1077 { 1078 1078 const struct iphdr *iph = ip_hdr(skb); 1079 1079 struct mfc_cache *c; ··· 1130 1130 kfree_skb(skb); 1131 1131 err = -ENOBUFS; 1132 1132 } else { 1133 + if (dev) { 1134 + skb->dev = dev; 1135 + skb->skb_iif = dev->ifindex; 1136 + } 1133 1137 skb_queue_tail(&c->mfc_un.unres.unresolved, skb); 1134 1138 err = 0; 1135 1139 } ··· 1832 1828 1833 1829 /* "local" means that we should preserve one skb (for local delivery) */ 1834 1830 static void ip_mr_forward(struct net *net, struct mr_table *mrt, 1835 - struct sk_buff *skb, struct mfc_cache *cache, 1836 - int local) 1831 + struct net_device *dev, struct sk_buff *skb, 1832 + struct mfc_cache *cache, int local) 1837 1833 { 1838 - int true_vifi = ipmr_find_vif(mrt, skb->dev); 1834 + int true_vifi = ipmr_find_vif(mrt, dev); 1839 1835 int psend = -1; 1840 1836 int vif, ct; 1841 1837 ··· 1857 1853 } 1858 1854 1859 1855 /* Wrong interface: drop packet and (maybe) send PIM assert. */ 1860 - if (mrt->vif_table[vif].dev != skb->dev) { 1861 - struct net_device *mdev; 1862 - 1863 - mdev = l3mdev_master_dev_rcu(mrt->vif_table[vif].dev); 1864 - if (mdev == skb->dev) 1865 - goto forward; 1866 - 1856 + if (mrt->vif_table[vif].dev != dev) { 1867 1857 if (rt_is_output_route(skb_rtable(skb))) { 1868 1858 /* It is our own packet, looped back. 1869 1859 * Very complicated situation... ··· 2051 2053 read_lock(&mrt_lock); 2052 2054 vif = ipmr_find_vif(mrt, dev); 2053 2055 if (vif >= 0) { 2054 - int err2 = ipmr_cache_unresolved(mrt, vif, skb); 2056 + int err2 = ipmr_cache_unresolved(mrt, vif, skb, dev); 2055 2057 read_unlock(&mrt_lock); 2056 2058 2057 2059 return err2; ··· 2062 2064 } 2063 2065 2064 2066 read_lock(&mrt_lock); 2065 - ip_mr_forward(net, mrt, skb, cache, local); 2067 + ip_mr_forward(net, mrt, dev, skb, cache, local); 2066 2068 read_unlock(&mrt_lock); 2067 2069 2068 2070 if (local) ··· 2236 2238 iph->saddr = saddr; 2237 2239 iph->daddr = daddr; 2238 2240 iph->version = 0; 2239 - err = ipmr_cache_unresolved(mrt, vif, skb2); 2241 + err = ipmr_cache_unresolved(mrt, vif, skb2, dev); 2240 2242 read_unlock(&mrt_lock); 2241 2243 rcu_read_unlock(); 2242 2244 return err;
+6 -2
net/ipv4/tcp.c
··· 2330 2330 tcp_init_send_head(sk); 2331 2331 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 2332 2332 __sk_dst_reset(sk); 2333 + dst_release(sk->sk_rx_dst); 2334 + sk->sk_rx_dst = NULL; 2333 2335 tcp_saved_syn_free(tp); 2334 2336 2335 2337 /* Clean up fastopen related fields */ ··· 2383 2381 return 0; 2384 2382 } 2385 2383 2386 - static int tcp_repair_options_est(struct tcp_sock *tp, 2384 + static int tcp_repair_options_est(struct sock *sk, 2387 2385 struct tcp_repair_opt __user *optbuf, unsigned int len) 2388 2386 { 2387 + struct tcp_sock *tp = tcp_sk(sk); 2389 2388 struct tcp_repair_opt opt; 2390 2389 2391 2390 while (len >= sizeof(opt)) { ··· 2399 2396 switch (opt.opt_code) { 2400 2397 case TCPOPT_MSS: 2401 2398 tp->rx_opt.mss_clamp = opt.opt_val; 2399 + tcp_mtup_init(sk); 2402 2400 break; 2403 2401 case TCPOPT_WINDOW: 2404 2402 { ··· 2559 2555 if (!tp->repair) 2560 2556 err = -EINVAL; 2561 2557 else if (sk->sk_state == TCP_ESTABLISHED) 2562 - err = tcp_repair_options_est(tp, 2558 + err = tcp_repair_options_est(sk, 2563 2559 (struct tcp_repair_opt __user *)optval, 2564 2560 optlen); 2565 2561 else
+1
net/ipv4/tcp_cong.c
··· 180 180 { 181 181 const struct inet_connection_sock *icsk = inet_csk(sk); 182 182 183 + tcp_sk(sk)->prior_ssthresh = 0; 183 184 if (icsk->icsk_ca_ops->init) 184 185 icsk->icsk_ca_ops->init(sk); 185 186 if (tcp_ca_needs_ecn(sk))
+6 -5
net/ipv6/addrconf.c
··· 332 332 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp, 333 333 unsigned long delay) 334 334 { 335 - if (!delayed_work_pending(&ifp->dad_work)) 336 - in6_ifa_hold(ifp); 337 - mod_delayed_work(addrconf_wq, &ifp->dad_work, delay); 335 + in6_ifa_hold(ifp); 336 + if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay)) 337 + in6_ifa_put(ifp); 338 338 } 339 339 340 340 static int snmp6_alloc_dev(struct inet6_dev *idev) ··· 3369 3369 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 3370 3370 struct netdev_notifier_changeupper_info *info; 3371 3371 struct inet6_dev *idev = __in6_dev_get(dev); 3372 + struct net *net = dev_net(dev); 3372 3373 int run_pending = 0; 3373 3374 int err; 3374 3375 ··· 3385 3384 case NETDEV_CHANGEMTU: 3386 3385 /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */ 3387 3386 if (dev->mtu < IPV6_MIN_MTU) { 3388 - addrconf_ifdown(dev, 1); 3387 + addrconf_ifdown(dev, dev != net->loopback_dev); 3389 3388 break; 3390 3389 } 3391 3390 ··· 3501 3500 * IPV6_MIN_MTU stop IPv6 on this interface. 3502 3501 */ 3503 3502 if (dev->mtu < IPV6_MIN_MTU) 3504 - addrconf_ifdown(dev, 1); 3503 + addrconf_ifdown(dev, dev != net->loopback_dev); 3505 3504 } 3506 3505 break; 3507 3506
+5 -1
net/ipv6/calipso.c
··· 1319 1319 struct ipv6hdr *ip6_hdr; 1320 1320 struct ipv6_opt_hdr *hop; 1321 1321 unsigned char buf[CALIPSO_MAX_BUFFER]; 1322 - int len_delta, new_end, pad; 1322 + int len_delta, new_end, pad, payload; 1323 1323 unsigned int start, end; 1324 1324 1325 1325 ip6_hdr = ipv6_hdr(skb); ··· 1346 1346 if (ret_val < 0) 1347 1347 return ret_val; 1348 1348 1349 + ip6_hdr = ipv6_hdr(skb); /* Reset as skb_cow() may have moved it */ 1350 + 1349 1351 if (len_delta) { 1350 1352 if (len_delta > 0) 1351 1353 skb_push(skb, len_delta); ··· 1357 1355 sizeof(*ip6_hdr) + start); 1358 1356 skb_reset_network_header(skb); 1359 1357 ip6_hdr = ipv6_hdr(skb); 1358 + payload = ntohs(ip6_hdr->payload_len); 1359 + ip6_hdr->payload_len = htons(payload + len_delta); 1360 1360 } 1361 1361 1362 1362 hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1);
+7 -1
net/ipv6/datagram.c
··· 250 250 */ 251 251 252 252 err = ip6_datagram_dst_update(sk, true); 253 - if (err) 253 + if (err) { 254 + /* Reset daddr and dport so that udp_v6_early_demux() 255 + * fails to find this socket 256 + */ 257 + memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr)); 258 + inet->inet_dport = 0; 254 259 goto out; 260 + } 255 261 256 262 sk->sk_state = TCP_ESTABLISHED; 257 263 sk_set_txhash(sk);
+25
net/ipv6/esp6_offload.c
··· 30 30 #include <net/ipv6.h> 31 31 #include <linux/icmpv6.h> 32 32 33 + static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen) 34 + { 35 + int off = sizeof(struct ipv6hdr); 36 + struct ipv6_opt_hdr *exthdr; 37 + 38 + if (likely(ipv6_hdr->nexthdr == NEXTHDR_ESP)) 39 + return offsetof(struct ipv6hdr, nexthdr); 40 + 41 + while (off < nhlen) { 42 + exthdr = (void *)ipv6_hdr + off; 43 + if (exthdr->nexthdr == NEXTHDR_ESP) 44 + return off; 45 + 46 + off += ipv6_optlen(exthdr); 47 + } 48 + 49 + return 0; 50 + } 51 + 33 52 static struct sk_buff **esp6_gro_receive(struct sk_buff **head, 34 53 struct sk_buff *skb) 35 54 { ··· 57 38 struct xfrm_state *x; 58 39 __be32 seq; 59 40 __be32 spi; 41 + int nhoff; 60 42 int err; 61 43 62 44 skb_pull(skb, offset); ··· 92 72 93 73 xo->flags |= XFRM_GRO; 94 74 75 + nhoff = esp6_nexthdr_esp_offset(ipv6_hdr(skb), offset); 76 + if (!nhoff) 77 + goto out; 78 + 79 + IP6CB(skb)->nhoff = nhoff; 95 80 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL; 96 81 XFRM_SPI_SKB_CB(skb)->family = AF_INET6; 97 82 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
+6 -16
net/ipv6/fib6_rules.c
··· 32 32 struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, 33 33 int flags, pol_lookup_t lookup) 34 34 { 35 - struct rt6_info *rt; 36 35 struct fib_lookup_arg arg = { 37 36 .lookup_ptr = lookup, 38 37 .flags = FIB_LOOKUP_NOREF, ··· 43 44 fib_rules_lookup(net->ipv6.fib6_rules_ops, 44 45 flowi6_to_flowi(fl6), flags, &arg); 45 46 46 - rt = arg.result; 47 + if (arg.result) 48 + return arg.result; 47 49 48 - if (!rt) { 49 - dst_hold(&net->ipv6.ip6_null_entry->dst); 50 - return &net->ipv6.ip6_null_entry->dst; 51 - } 52 - 53 - if (rt->rt6i_flags & RTF_REJECT && 54 - rt->dst.error == -EAGAIN) { 55 - ip6_rt_put(rt); 56 - rt = net->ipv6.ip6_null_entry; 57 - dst_hold(&rt->dst); 58 - } 59 - 60 - return &rt->dst; 50 + dst_hold(&net->ipv6.ip6_null_entry->dst); 51 + return &net->ipv6.ip6_null_entry->dst; 61 52 } 62 53 63 54 static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, ··· 110 121 flp6->saddr = saddr; 111 122 } 112 123 err = rt->dst.error; 113 - goto out; 124 + if (err != -EAGAIN) 125 + goto out; 114 126 } 115 127 again: 116 128 ip6_rt_put(rt);
+1 -1
net/ipv6/icmp.c
··· 491 491 local_bh_disable(); 492 492 493 493 /* Check global sysctl_icmp_msgs_per_sec ratelimit */ 494 - if (!icmpv6_global_allow(type)) 494 + if (!(skb->dev->flags&IFF_LOOPBACK) && !icmpv6_global_allow(type)) 495 495 goto out_bh_enable; 496 496 497 497 mip6_addr_swap(skb);
+1
net/ipv6/ila/ila_xlat.c
··· 62 62 { 63 63 u32 *v = (u32 *)loc.v32; 64 64 65 + __ila_hash_secret_init(); 65 66 return jhash_2words(v[0], v[1], hashrnd); 66 67 } 67 68
+1 -2
net/ipv6/ip6_fib.c
··· 289 289 struct rt6_info *rt; 290 290 291 291 rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags); 292 - if (rt->rt6i_flags & RTF_REJECT && 293 - rt->dst.error == -EAGAIN) { 292 + if (rt->dst.error == -EAGAIN) { 294 293 ip6_rt_put(rt); 295 294 rt = net->ipv6.ip6_null_entry; 296 295 dst_hold(&rt->dst);
+5 -4
net/ipv6/ip6_gre.c
··· 991 991 992 992 dst_cache_destroy(&t->dst_cache); 993 993 free_percpu(dev->tstats); 994 - free_netdev(dev); 995 994 } 996 995 997 996 static void ip6gre_tunnel_setup(struct net_device *dev) 998 997 { 999 998 dev->netdev_ops = &ip6gre_netdev_ops; 1000 - dev->destructor = ip6gre_dev_free; 999 + dev->needs_free_netdev = true; 1000 + dev->priv_destructor = ip6gre_dev_free; 1001 1001 1002 1002 dev->type = ARPHRD_IP6GRE; 1003 1003 ··· 1148 1148 return 0; 1149 1149 1150 1150 err_reg_dev: 1151 - ip6gre_dev_free(ign->fb_tunnel_dev); 1151 + free_netdev(ign->fb_tunnel_dev); 1152 1152 err_alloc_dev: 1153 1153 return err; 1154 1154 } ··· 1300 1300 ether_setup(dev); 1301 1301 1302 1302 dev->netdev_ops = &ip6gre_tap_netdev_ops; 1303 - dev->destructor = ip6gre_dev_free; 1303 + dev->needs_free_netdev = true; 1304 + dev->priv_destructor = ip6gre_dev_free; 1304 1305 1305 1306 dev->features |= NETIF_F_NETNS_LOCAL; 1306 1307 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+3 -1
net/ipv6/ip6_offload.c
··· 116 116 117 117 if (udpfrag) { 118 118 int err = ip6_find_1stfragopt(skb, &prevhdr); 119 - if (err < 0) 119 + if (err < 0) { 120 + kfree_skb_list(segs); 120 121 return ERR_PTR(err); 122 + } 121 123 fptr = (struct frag_hdr *)((u8 *)ipv6h + err); 122 124 fptr->frag_off = htons(offset); 123 125 if (skb->next)
+1 -1
net/ipv6/ip6_output.c
··· 1390 1390 */ 1391 1391 1392 1392 cork->length += length; 1393 - if ((((length + fragheaderlen) > mtu) || 1393 + if ((((length + (skb ? skb->len : headersize)) > mtu) || 1394 1394 (skb && skb_is_gso(skb))) && 1395 1395 (sk->sk_protocol == IPPROTO_UDP) && 1396 1396 (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) &&
+11 -6
net/ipv6/ip6_tunnel.c
··· 254 254 gro_cells_destroy(&t->gro_cells); 255 255 dst_cache_destroy(&t->dst_cache); 256 256 free_percpu(dev->tstats); 257 - free_netdev(dev); 258 257 } 259 258 260 259 static int ip6_tnl_create2(struct net_device *dev) ··· 321 322 return t; 322 323 323 324 failed_free: 324 - ip6_dev_free(dev); 325 + free_netdev(dev); 325 326 failed: 326 327 return ERR_PTR(err); 327 328 } ··· 858 859 return 0; 859 860 860 861 drop: 862 + if (tun_dst) 863 + dst_release((struct dst_entry *)tun_dst); 861 864 kfree_skb(skb); 862 865 return 0; 863 866 } ··· 1096 1095 1097 1096 if (!dst) { 1098 1097 route_lookup: 1098 + /* add dsfield to flowlabel for route lookup */ 1099 + fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel); 1100 + 1099 1101 dst = ip6_route_output(net, NULL, fl6); 1100 1102 1101 1103 if (dst->error) ··· 1248 1244 fl6.flowi6_proto = IPPROTO_IPIP; 1249 1245 fl6.daddr = key->u.ipv6.dst; 1250 1246 fl6.flowlabel = key->label; 1251 - dsfield = ip6_tclass(key->label); 1247 + dsfield = key->tos; 1252 1248 } else { 1253 1249 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1254 1250 encap_limit = t->parms.encap_limit; ··· 1319 1315 fl6.flowi6_proto = IPPROTO_IPV6; 1320 1316 fl6.daddr = key->u.ipv6.dst; 1321 1317 fl6.flowlabel = key->label; 1322 - dsfield = ip6_tclass(key->label); 1318 + dsfield = key->tos; 1323 1319 } else { 1324 1320 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 1325 1321 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ ··· 1778 1774 static void ip6_tnl_dev_setup(struct net_device *dev) 1779 1775 { 1780 1776 dev->netdev_ops = &ip6_tnl_netdev_ops; 1781 - dev->destructor = ip6_dev_free; 1777 + dev->needs_free_netdev = true; 1778 + dev->priv_destructor = ip6_dev_free; 1782 1779 1783 1780 dev->type = ARPHRD_TUNNEL6; 1784 1781 dev->flags |= IFF_NOARP; ··· 2226 2221 return 0; 2227 2222 2228 2223 err_register: 2229 - ip6_dev_free(ip6n->fb_tnl_dev); 2224 + free_netdev(ip6n->fb_tnl_dev); 2230 2225 err_alloc_dev: 2231 2226 return err; 2232 2227 }
+4 -4
net/ipv6/ip6_vti.c
··· 180 180 static void vti6_dev_free(struct net_device *dev) 181 181 { 182 182 free_percpu(dev->tstats); 183 - free_netdev(dev); 184 183 } 185 184 186 185 static int vti6_tnl_create2(struct net_device *dev) ··· 234 235 return t; 235 236 236 237 failed_free: 237 - vti6_dev_free(dev); 238 + free_netdev(dev); 238 239 failed: 239 240 return NULL; 240 241 } ··· 841 842 static void vti6_dev_setup(struct net_device *dev) 842 843 { 843 844 dev->netdev_ops = &vti6_netdev_ops; 844 - dev->destructor = vti6_dev_free; 845 + dev->needs_free_netdev = true; 846 + dev->priv_destructor = vti6_dev_free; 845 847 846 848 dev->type = ARPHRD_TUNNEL6; 847 849 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr); ··· 1100 1100 return 0; 1101 1101 1102 1102 err_register: 1103 - vti6_dev_free(ip6n->fb_tnl_dev); 1103 + free_netdev(ip6n->fb_tnl_dev); 1104 1104 err_alloc_dev: 1105 1105 return err; 1106 1106 }
+1 -1
net/ipv6/ip6mr.c
··· 733 733 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8; 734 734 dev->flags = IFF_NOARP; 735 735 dev->netdev_ops = &reg_vif_netdev_ops; 736 - dev->destructor = free_netdev; 736 + dev->needs_free_netdev = true; 737 737 dev->features |= NETIF_F_NETNS_LOCAL; 738 738 } 739 739
+1 -1
net/ipv6/ping.c
··· 192 192 .type = SOCK_DGRAM, 193 193 .protocol = IPPROTO_ICMPV6, 194 194 .prot = &pingv6_prot, 195 - .ops = &inet6_dgram_ops, 195 + .ops = &inet6_sockraw_ops, 196 196 .flags = INET_PROTOSW_REUSE, 197 197 }; 198 198
+1 -1
net/ipv6/proc.c
··· 219 219 u64 buff64[SNMP_MIB_MAX]; 220 220 int i; 221 221 222 - memset(buff64, 0, sizeof(unsigned long) * SNMP_MIB_MAX); 222 + memset(buff64, 0, sizeof(u64) * SNMP_MIB_MAX); 223 223 224 224 snmp_get_cpu_field64_batch(buff64, itemlist, mib, syncpoff); 225 225 for (i = 0; itemlist[i].name; i++)
+1 -1
net/ipv6/raw.c
··· 1338 1338 #endif /* CONFIG_PROC_FS */ 1339 1339 1340 1340 /* Same as inet6_dgram_ops, sans udp_poll. */ 1341 - static const struct proto_ops inet6_sockraw_ops = { 1341 + const struct proto_ops inet6_sockraw_ops = { 1342 1342 .family = PF_INET6, 1343 1343 .owner = THIS_MODULE, 1344 1344 .release = inet6_release,
+6 -1
net/ipv6/route.c
··· 2804 2804 if ((rt->dst.dev == dev || !dev) && 2805 2805 rt != adn->net->ipv6.ip6_null_entry && 2806 2806 (rt->rt6i_nsiblings == 0 || 2807 + (dev && netdev_unregistering(dev)) || 2807 2808 !rt->rt6i_idev->cnf.ignore_routes_with_linkdown)) 2808 2809 return -1; 2809 2810 ··· 3722 3721 net->ipv6.ip6_blk_hole_entry->dst.dev = dev; 3723 3722 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); 3724 3723 #endif 3725 - } else if (event == NETDEV_UNREGISTER) { 3724 + } else if (event == NETDEV_UNREGISTER && 3725 + dev->reg_state != NETREG_UNREGISTERED) { 3726 + /* NETDEV_UNREGISTER could be fired for multiple times by 3727 + * netdev_wait_allrefs(). Make sure we only call this once. 3728 + */ 3726 3729 in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev); 3727 3730 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 3728 3731 in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
+4 -4
net/ipv6/sit.c
··· 265 265 return nt; 266 266 267 267 failed_free: 268 - ipip6_dev_free(dev); 268 + free_netdev(dev); 269 269 failed: 270 270 return NULL; 271 271 } ··· 305 305 * we try harder to allocate. 306 306 */ 307 307 kp = (cmax <= 1 || capable(CAP_NET_ADMIN)) ? 308 - kcalloc(cmax, sizeof(*kp), GFP_KERNEL) : 308 + kcalloc(cmax, sizeof(*kp), GFP_KERNEL | __GFP_NOWARN) : 309 309 NULL; 310 310 311 311 rcu_read_lock(); ··· 1336 1336 1337 1337 dst_cache_destroy(&tunnel->dst_cache); 1338 1338 free_percpu(dev->tstats); 1339 - free_netdev(dev); 1340 1339 } 1341 1340 1342 1341 #define SIT_FEATURES (NETIF_F_SG | \ ··· 1350 1351 int t_hlen = tunnel->hlen + sizeof(struct iphdr); 1351 1352 1352 1353 dev->netdev_ops = &ipip6_netdev_ops; 1353 - dev->destructor = ipip6_dev_free; 1354 + dev->needs_free_netdev = true; 1355 + dev->priv_destructor = ipip6_dev_free; 1354 1356 1355 1357 dev->type = ARPHRD_SIT; 1356 1358 dev->hard_header_len = LL_MAX_HEADER + t_hlen;
+2 -1
net/ipv6/udp.c
··· 879 879 struct sock *sk; 880 880 881 881 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { 882 - if (INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif)) 882 + if (sk->sk_state == TCP_ESTABLISHED && 883 + INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif)) 883 884 return sk; 884 885 /* Only check first socket in chain */ 885 886 break;
+1 -1
net/ipv6/xfrm6_input.c
··· 43 43 return 1; 44 44 #endif 45 45 46 - ipv6_hdr(skb)->payload_len = htons(skb->len); 47 46 __skb_push(skb, skb->data - skb_network_header(skb)); 47 + ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); 48 48 49 49 if (xo && (xo->flags & XFRM_GRO)) { 50 50 skb_mac_header_rebuild(skb);
+2
net/ipv6/xfrm6_mode_ro.c
··· 47 47 iph = ipv6_hdr(skb); 48 48 49 49 hdr_len = x->type->hdr_offset(x, skb, &prevhdr); 50 + if (hdr_len < 0) 51 + return hdr_len; 50 52 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); 51 53 skb_set_network_header(skb, -x->props.header_len); 52 54 skb->transport_header = skb->network_header + hdr_len;
+2
net/ipv6/xfrm6_mode_transport.c
··· 30 30 skb_set_inner_transport_header(skb, skb_transport_offset(skb)); 31 31 32 32 hdr_len = x->type->hdr_offset(x, skb, &prevhdr); 33 + if (hdr_len < 0) 34 + return hdr_len; 33 35 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); 34 36 skb_set_network_header(skb, -x->props.header_len); 35 37 skb->transport_header = skb->network_header + hdr_len;
+1 -1
net/irda/irlan/irlan_eth.c
··· 65 65 ether_setup(dev); 66 66 67 67 dev->netdev_ops = &irlan_eth_netdev_ops; 68 - dev->destructor = free_netdev; 68 + dev->needs_free_netdev = true; 69 69 dev->min_mtu = 0; 70 70 dev->max_mtu = ETH_MAX_MTU; 71 71
+15 -4
net/key/af_key.c
··· 1157 1157 goto out; 1158 1158 } 1159 1159 1160 + err = -ENOBUFS; 1160 1161 key = ext_hdrs[SADB_EXT_KEY_AUTH - 1]; 1161 1162 if (sa->sadb_sa_auth) { 1162 1163 int keysize = 0; ··· 1169 1168 if (key) 1170 1169 keysize = (key->sadb_key_bits + 7) / 8; 1171 1170 x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL); 1172 - if (!x->aalg) 1171 + if (!x->aalg) { 1172 + err = -ENOMEM; 1173 1173 goto out; 1174 + } 1174 1175 strcpy(x->aalg->alg_name, a->name); 1175 1176 x->aalg->alg_key_len = 0; 1176 1177 if (key) { ··· 1191 1188 goto out; 1192 1189 } 1193 1190 x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL); 1194 - if (!x->calg) 1191 + if (!x->calg) { 1192 + err = -ENOMEM; 1195 1193 goto out; 1194 + } 1196 1195 strcpy(x->calg->alg_name, a->name); 1197 1196 x->props.calgo = sa->sadb_sa_encrypt; 1198 1197 } else { ··· 1208 1203 if (key) 1209 1204 keysize = (key->sadb_key_bits + 7) / 8; 1210 1205 x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL); 1211 - if (!x->ealg) 1206 + if (!x->ealg) { 1207 + err = -ENOMEM; 1212 1208 goto out; 1209 + } 1213 1210 strcpy(x->ealg->alg_name, a->name); 1214 1211 x->ealg->alg_key_len = 0; 1215 1212 if (key) { ··· 1256 1249 struct xfrm_encap_tmpl *natt; 1257 1250 1258 1251 x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL); 1259 - if (!x->encap) 1252 + if (!x->encap) { 1253 + err = -ENOMEM; 1260 1254 goto out; 1255 + } 1261 1256 1262 1257 natt = x->encap; 1263 1258 n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]; ··· 2764 2755 int err, err2; 2765 2756 2766 2757 err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, true); 2758 + if (!err) 2759 + xfrm_garbage_collect(net); 2767 2760 err2 = unicast_flush_resp(sk, hdr); 2768 2761 if (err || err2) { 2769 2762 if (err == -ESRCH) /* empty table - old silent behavior */
+8 -7
net/l2tp/l2tp_eth.c
··· 114 114 { 115 115 struct l2tp_eth *priv = netdev_priv(dev); 116 116 117 - stats->tx_bytes = atomic_long_read(&priv->tx_bytes); 118 - stats->tx_packets = atomic_long_read(&priv->tx_packets); 119 - stats->tx_dropped = atomic_long_read(&priv->tx_dropped); 120 - stats->rx_bytes = atomic_long_read(&priv->rx_bytes); 121 - stats->rx_packets = atomic_long_read(&priv->rx_packets); 122 - stats->rx_errors = atomic_long_read(&priv->rx_errors); 117 + stats->tx_bytes = (unsigned long) atomic_long_read(&priv->tx_bytes); 118 + stats->tx_packets = (unsigned long) atomic_long_read(&priv->tx_packets); 119 + stats->tx_dropped = (unsigned long) atomic_long_read(&priv->tx_dropped); 120 + stats->rx_bytes = (unsigned long) atomic_long_read(&priv->rx_bytes); 121 + stats->rx_packets = (unsigned long) atomic_long_read(&priv->rx_packets); 122 + stats->rx_errors = (unsigned long) atomic_long_read(&priv->rx_errors); 123 + 123 124 } 124 125 125 126 static const struct net_device_ops l2tp_eth_netdev_ops = { ··· 142 141 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 143 142 dev->features |= NETIF_F_LLTX; 144 143 dev->netdev_ops = &l2tp_eth_netdev_ops; 145 - dev->destructor = free_netdev; 144 + dev->needs_free_netdev = true; 146 145 } 147 146 148 147 static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
+68 -92
net/mac80211/agg-tx.c
··· 7 7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 8 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 9 9 * Copyright 2007-2010, Intel Corporation 10 - * Copyright(c) 2015 Intel Deutschland GmbH 10 + * Copyright(c) 2015-2017 Intel Deutschland GmbH 11 11 * 12 12 * This program is free software; you can redistribute it and/or modify 13 13 * it under the terms of the GNU General Public License version 2 as ··· 741 741 ieee80211_agg_start_txq(sta, tid, true); 742 742 } 743 743 744 - void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) 744 + void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid, 745 + struct tid_ampdu_tx *tid_tx) 746 + { 747 + struct ieee80211_sub_if_data *sdata = sta->sdata; 748 + struct ieee80211_local *local = sdata->local; 749 + 750 + if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))) 751 + return; 752 + 753 + if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) 754 + ieee80211_agg_tx_operational(local, sta, tid); 755 + } 756 + 757 + static struct tid_ampdu_tx * 758 + ieee80211_lookup_tid_tx(struct ieee80211_sub_if_data *sdata, 759 + const u8 *ra, u16 tid, struct sta_info **sta) 760 + { 761 + struct tid_ampdu_tx *tid_tx; 762 + 763 + if (tid >= IEEE80211_NUM_TIDS) { 764 + ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", 765 + tid, IEEE80211_NUM_TIDS); 766 + return NULL; 767 + } 768 + 769 + *sta = sta_info_get_bss(sdata, ra); 770 + if (!*sta) { 771 + ht_dbg(sdata, "Could not find station: %pM\n", ra); 772 + return NULL; 773 + } 774 + 775 + tid_tx = rcu_dereference((*sta)->ampdu_mlme.tid_tx[tid]); 776 + 777 + if (WARN_ON(!tid_tx)) 778 + ht_dbg(sdata, "addBA was not requested!\n"); 779 + 780 + return tid_tx; 781 + } 782 + 783 + void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, 784 + const u8 *ra, u16 tid) 745 785 { 746 786 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 747 787 struct ieee80211_local *local = sdata->local; ··· 790 750 791 751 trace_api_start_tx_ba_cb(sdata, ra, tid); 792 752 793 - if (tid >= IEEE80211_NUM_TIDS) { 794 - ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", 795 - tid, IEEE80211_NUM_TIDS); 796 - return; 797 - } 753 + rcu_read_lock(); 754 + tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta); 755 + if (!tid_tx) 756 + goto out; 798 757 799 - mutex_lock(&local->sta_mtx); 800 - sta = sta_info_get_bss(sdata, ra); 801 - if (!sta) { 802 - mutex_unlock(&local->sta_mtx); 803 - ht_dbg(sdata, "Could not find station: %pM\n", ra); 804 - return; 805 - } 806 - 807 - mutex_lock(&sta->ampdu_mlme.mtx); 808 - tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 809 - 810 - if (WARN_ON(!tid_tx)) { 811 - ht_dbg(sdata, "addBA was not requested!\n"); 812 - goto unlock; 813 - } 814 - 815 - if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))) 816 - goto unlock; 817 - 818 - if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) 819 - ieee80211_agg_tx_operational(local, sta, tid); 820 - 821 - unlock: 822 - mutex_unlock(&sta->ampdu_mlme.mtx); 823 - mutex_unlock(&local->sta_mtx); 824 - } 825 - 826 - void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, 827 - const u8 *ra, u16 tid) 828 - { 829 - struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 830 - struct ieee80211_local *local = sdata->local; 831 - struct ieee80211_ra_tid *ra_tid; 832 - struct sk_buff *skb = dev_alloc_skb(0); 833 - 834 - if (unlikely(!skb)) 835 - return; 836 - 837 - ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 838 - memcpy(&ra_tid->ra, ra, ETH_ALEN); 839 - ra_tid->tid = tid; 840 - 841 - skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START; 842 - skb_queue_tail(&sdata->skb_queue, skb); 843 - ieee80211_queue_work(&local->hw, &sdata->work); 758 + set_bit(HT_AGG_STATE_START_CB, &tid_tx->state); 759 + ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); 760 + out: 761 + rcu_read_unlock(); 844 762 } 845 763 EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); 846 764 ··· 858 860 } 859 861 EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); 860 862 861 - void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) 863 + void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid, 864 + struct tid_ampdu_tx *tid_tx) 862 865 { 863 - struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 864 - struct ieee80211_local *local = sdata->local; 865 - struct sta_info *sta; 866 - struct tid_ampdu_tx *tid_tx; 866 + struct ieee80211_sub_if_data *sdata = sta->sdata; 867 867 bool send_delba = false; 868 868 869 - trace_api_stop_tx_ba_cb(sdata, ra, tid); 869 + ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", 870 + sta->sta.addr, tid); 870 871 871 - if (tid >= IEEE80211_NUM_TIDS) { 872 - ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", 873 - tid, IEEE80211_NUM_TIDS); 874 - return; 875 - } 876 - 877 - ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", ra, tid); 878 - 879 - mutex_lock(&local->sta_mtx); 880 - 881 - sta = sta_info_get_bss(sdata, ra); 882 - if (!sta) { 883 - ht_dbg(sdata, "Could not find station: %pM\n", ra); 884 - goto unlock; 885 - } 886 - 887 - mutex_lock(&sta->ampdu_mlme.mtx); 888 872 spin_lock_bh(&sta->lock); 889 - tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 890 873 891 - if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 874 + if (!test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 892 875 ht_dbg(sdata, 893 876 "unexpected callback to A-MPDU stop for %pM tid %d\n", 894 877 sta->sta.addr, tid); ··· 885 906 spin_unlock_bh(&sta->lock); 886 907 887 908 if (send_delba) 888 - ieee80211_send_delba(sdata, ra, tid, 909 + ieee80211_send_delba(sdata, sta->sta.addr, tid, 889 910 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); 890 - 891 - mutex_unlock(&sta->ampdu_mlme.mtx); 892 - unlock: 893 - mutex_unlock(&local->sta_mtx); 894 911 } 895 912 896 913 void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, ··· 894 919 { 895 920 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 896 921 struct ieee80211_local *local = sdata->local; 897 - struct ieee80211_ra_tid *ra_tid; 898 - struct sk_buff *skb = dev_alloc_skb(0); 922 + struct sta_info *sta; 923 + struct tid_ampdu_tx *tid_tx; 899 924 900 - if (unlikely(!skb)) 901 - return; 925 + trace_api_stop_tx_ba_cb(sdata, ra, tid); 902 926 903 - ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 904 - memcpy(&ra_tid->ra, ra, ETH_ALEN); 905 - ra_tid->tid = tid; 927 + rcu_read_lock(); 928 + tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta); 929 + if (!tid_tx) 930 + goto out; 906 931 907 - skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP; 908 - skb_queue_tail(&sdata->skb_queue, skb); 909 - ieee80211_queue_work(&local->hw, &sdata->work); 932 + set_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state); 933 + ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); 934 + out: 935 + rcu_read_unlock(); 910 936 } 911 937 EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); 912 938
+2
net/mac80211/cfg.c
··· 902 902 default: 903 903 return -EINVAL; 904 904 } 905 + sdata->u.ap.req_smps = sdata->smps_mode; 906 + 905 907 sdata->needed_rx_chains = sdata->local->rx_chains; 906 908 907 909 sdata->vif.bss_conf.beacon_int = params->beacon_interval;
+12 -4
net/mac80211/ht.c
··· 7 7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 8 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 9 9 * Copyright 2007-2010, Intel Corporation 10 + * Copyright 2017 Intel Deutschland GmbH 10 11 * 11 12 * This program is free software; you can redistribute it and/or modify 12 13 * it under the terms of the GNU General Public License version 2 as ··· 290 289 { 291 290 int i; 292 291 293 - cancel_work_sync(&sta->ampdu_mlme.work); 294 - 295 292 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 296 293 __ieee80211_stop_tx_ba_session(sta, i, reason); 297 294 __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, ··· 297 298 reason != AGG_STOP_DESTROY_STA && 298 299 reason != AGG_STOP_PEER_REQUEST); 299 300 } 301 + 302 + /* stopping might queue the work again - so cancel only afterwards */ 303 + cancel_work_sync(&sta->ampdu_mlme.work); 300 304 } 301 305 302 306 void ieee80211_ba_session_work(struct work_struct *work) ··· 354 352 spin_unlock_bh(&sta->lock); 355 353 356 354 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 357 - if (tid_tx && test_and_clear_bit(HT_AGG_STATE_WANT_STOP, 358 - &tid_tx->state)) 355 + if (!tid_tx) 356 + continue; 357 + 358 + if (test_and_clear_bit(HT_AGG_STATE_START_CB, &tid_tx->state)) 359 + ieee80211_start_tx_ba_cb(sta, tid, tid_tx); 360 + if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state)) 359 361 ___ieee80211_stop_tx_ba_session(sta, tid, 360 362 AGG_STOP_LOCAL_REQUEST); 363 + if (test_and_clear_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state)) 364 + ieee80211_stop_tx_ba_cb(sta, tid, tid_tx); 361 365 } 362 366 mutex_unlock(&sta->ampdu_mlme.mtx); 363 367 }
+5 -11
net/mac80211/ieee80211_i.h
··· 1036 1036 1037 1037 enum sdata_queue_type { 1038 1038 IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0, 1039 - IEEE80211_SDATA_QUEUE_AGG_START = 1, 1040 - IEEE80211_SDATA_QUEUE_AGG_STOP = 2, 1041 1039 IEEE80211_SDATA_QUEUE_RX_AGG_START = 3, 1042 1040 IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4, 1043 1041 }; ··· 1425 1427 return local->hw.wiphy->bands[band]; 1426 1428 } 1427 1429 1428 - /* this struct represents 802.11n's RA/TID combination */ 1429 - struct ieee80211_ra_tid { 1430 - u8 ra[ETH_ALEN]; 1431 - u16 tid; 1432 - }; 1433 - 1434 1430 /* this struct holds the value parsing from channel switch IE */ 1435 1431 struct ieee80211_csa_ie { 1436 1432 struct cfg80211_chan_def chandef; ··· 1531 1539 return true; 1532 1540 /* can't handle non-legacy preamble yet */ 1533 1541 if (status->flag & RX_FLAG_MACTIME_PLCP_START && 1534 - status->encoding != RX_ENC_LEGACY) 1542 + status->encoding == RX_ENC_LEGACY) 1535 1543 return true; 1536 1544 return false; 1537 1545 } ··· 1786 1794 enum ieee80211_agg_stop_reason reason); 1787 1795 int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 1788 1796 enum ieee80211_agg_stop_reason reason); 1789 - void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid); 1790 - void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid); 1797 + void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid, 1798 + struct tid_ampdu_tx *tid_tx); 1799 + void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid, 1800 + struct tid_ampdu_tx *tid_tx); 1791 1801 void ieee80211_ba_session_work(struct work_struct *work); 1792 1802 void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid); 1793 1803 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid);
+5 -13
net/mac80211/iface.c
··· 1213 1213 static void ieee80211_if_free(struct net_device *dev) 1214 1214 { 1215 1215 free_percpu(dev->tstats); 1216 - free_netdev(dev); 1217 1216 } 1218 1217 1219 1218 static void ieee80211_if_setup(struct net_device *dev) ··· 1220 1221 ether_setup(dev); 1221 1222 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1222 1223 dev->netdev_ops = &ieee80211_dataif_ops; 1223 - dev->destructor = ieee80211_if_free; 1224 + dev->needs_free_netdev = true; 1225 + dev->priv_destructor = ieee80211_if_free; 1224 1226 } 1225 1227 1226 1228 static void ieee80211_if_setup_no_queue(struct net_device *dev) ··· 1237 1237 struct ieee80211_local *local = sdata->local; 1238 1238 struct sk_buff *skb; 1239 1239 struct sta_info *sta; 1240 - struct ieee80211_ra_tid *ra_tid; 1241 1240 struct ieee80211_rx_agg *rx_agg; 1242 1241 1243 1242 if (!ieee80211_sdata_running(sdata)) ··· 1252 1253 while ((skb = skb_dequeue(&sdata->skb_queue))) { 1253 1254 struct ieee80211_mgmt *mgmt = (void *)skb->data; 1254 1255 1255 - if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) { 1256 - ra_tid = (void *)&skb->cb; 1257 - ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra, 1258 - ra_tid->tid); 1259 - } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) { 1260 - ra_tid = (void *)&skb->cb; 1261 - ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra, 1262 - ra_tid->tid); 1263 - } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) { 1256 + if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) { 1264 1257 rx_agg = (void *)&skb->cb; 1265 1258 mutex_lock(&local->sta_mtx); 1266 1259 sta = sta_info_get_bss(sdata, rx_agg->addr); ··· 1816 1825 ret = dev_alloc_name(ndev, ndev->name); 1817 1826 if (ret < 0) { 1818 1827 ieee80211_if_free(ndev); 1828 + free_netdev(ndev); 1819 1829 return ret; 1820 1830 } 1821 1831 ··· 1906 1914 1907 1915 ret = register_netdevice(ndev); 1908 1916 if (ret) { 1909 - ieee80211_if_free(ndev); 1917 + free_netdev(ndev); 1910 1918 return ret; 1911 1919 } 1912 1920 }
+31 -31
net/mac80211/mlme.c
··· 601 601 struct ieee80211_supported_band *sband; 602 602 struct ieee80211_chanctx_conf *chanctx_conf; 603 603 struct ieee80211_channel *chan; 604 - u32 rate_flags, rates = 0; 604 + u32 rates = 0; 605 605 606 606 sdata_assert_lock(sdata); 607 607 ··· 612 612 return; 613 613 } 614 614 chan = chanctx_conf->def.chan; 615 - rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def); 616 615 rcu_read_unlock(); 617 616 sband = local->hw.wiphy->bands[chan->band]; 618 617 shift = ieee80211_vif_get_shift(&sdata->vif); ··· 635 636 */ 636 637 rates_len = 0; 637 638 for (i = 0; i < sband->n_bitrates; i++) { 638 - if ((rate_flags & sband->bitrates[i].flags) 639 - != rate_flags) 640 - continue; 641 639 rates |= BIT(i); 642 640 rates_len++; 643 641 } ··· 2814 2818 u32 *rates, u32 *basic_rates, 2815 2819 bool *have_higher_than_11mbit, 2816 2820 int *min_rate, int *min_rate_index, 2817 - int shift, u32 rate_flags) 2821 + int shift) 2818 2822 { 2819 2823 int i, j; 2820 2824 ··· 2842 2846 int brate; 2843 2847 2844 2848 br = &sband->bitrates[j]; 2845 - if ((rate_flags & br->flags) != rate_flags) 2846 - continue; 2847 2849 2848 2850 brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5); 2849 2851 if (brate == rate) { ··· 4392 4398 return -ENOMEM; 4393 4399 } 4394 4400 4395 - if (new_sta || override) { 4396 - err = ieee80211_prep_channel(sdata, cbss); 4397 - if (err) { 4398 - if (new_sta) 4399 - sta_info_free(local, new_sta); 4400 - return -EINVAL; 4401 - } 4402 - } 4403 - 4401 + /* 4402 + * Set up the information for the new channel before setting the 4403 + * new channel. We can't - completely race-free - change the basic 4404 + * rates bitmap and the channel (sband) that it refers to, but if 4405 + * we set it up before we at least avoid calling into the driver's 4406 + * bss_info_changed() method with invalid information (since we do 4407 + * call that from changing the channel - only for IDLE and perhaps 4408 + * some others, but ...). 4409 + * 4410 + * So to avoid that, just set up all the new information before the 4411 + * channel, but tell the driver to apply it only afterwards, since 4412 + * it might need the new channel for that. 4413 + */ 4404 4414 if (new_sta) { 4405 4415 u32 rates = 0, basic_rates = 0; 4406 4416 bool have_higher_than_11mbit; 4407 4417 int min_rate = INT_MAX, min_rate_index = -1; 4408 - struct ieee80211_chanctx_conf *chanctx_conf; 4409 4418 const struct cfg80211_bss_ies *ies; 4410 4419 int shift = ieee80211_vif_get_shift(&sdata->vif); 4411 - u32 rate_flags; 4412 - 4413 - rcu_read_lock(); 4414 - chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); 4415 - if (WARN_ON(!chanctx_conf)) { 4416 - rcu_read_unlock(); 4417 - sta_info_free(local, new_sta); 4418 - return -EINVAL; 4419 - } 4420 - rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def); 4421 - rcu_read_unlock(); 4422 4420 4423 4421 ieee80211_get_rates(sband, bss->supp_rates, 4424 4422 bss->supp_rates_len, 4425 4423 &rates, &basic_rates, 4426 4424 &have_higher_than_11mbit, 4427 4425 &min_rate, &min_rate_index, 4428 - shift, rate_flags); 4426 + shift); 4429 4427 4430 4428 /* 4431 4429 * This used to be a workaround for basic rates missing ··· 4475 4489 sdata->vif.bss_conf.sync_dtim_count = 0; 4476 4490 } 4477 4491 rcu_read_unlock(); 4492 + } 4478 4493 4479 - /* tell driver about BSSID, basic rates and timing */ 4494 + if (new_sta || override) { 4495 + err = ieee80211_prep_channel(sdata, cbss); 4496 + if (err) { 4497 + if (new_sta) 4498 + sta_info_free(local, new_sta); 4499 + return -EINVAL; 4500 + } 4501 + } 4502 + 4503 + if (new_sta) { 4504 + /* 4505 + * tell driver about BSSID, basic rates and timing 4506 + * this was set up above, before setting the channel 4507 + */ 4480 4508 ieee80211_bss_info_change_notify(sdata, 4481 4509 BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES | 4482 4510 BSS_CHANGED_BEACON_INT);
+5 -1
net/mac80211/rx.c
··· 1613 1613 */ 1614 1614 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && 1615 1615 !ieee80211_has_morefrags(hdr->frame_control) && 1616 + !ieee80211_is_back_req(hdr->frame_control) && 1616 1617 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1617 1618 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1618 1619 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 1619 - /* PM bit is only checked in frames where it isn't reserved, 1620 + /* 1621 + * PM bit is only checked in frames where it isn't reserved, 1620 1622 * in AP mode it's reserved in non-bufferable management frames 1621 1623 * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field) 1624 + * BAR frames should be ignored as specified in 1625 + * IEEE 802.11-2012 10.2.1.2. 1622 1626 */ 1623 1627 (!ieee80211_is_mgmt(hdr->frame_control) || 1624 1628 ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {
+1 -1
net/mac80211/sta_info.c
··· 2155 2155 struct ieee80211_sta_rx_stats *cpurxs; 2156 2156 2157 2157 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2158 - sinfo->rx_packets += cpurxs->dropped; 2158 + sinfo->rx_dropped_misc += cpurxs->dropped; 2159 2159 } 2160 2160 } 2161 2161
+2
net/mac80211/sta_info.h
··· 116 116 #define HT_AGG_STATE_STOPPING 3 117 117 #define HT_AGG_STATE_WANT_START 4 118 118 #define HT_AGG_STATE_WANT_STOP 5 119 + #define HT_AGG_STATE_START_CB 6 120 + #define HT_AGG_STATE_STOP_CB 7 119 121 120 122 enum ieee80211_agg_stop_reason { 121 123 AGG_STOP_DECLINED,
+5 -4
net/mac80211/wpa.c
··· 17 17 #include <asm/unaligned.h> 18 18 #include <net/mac80211.h> 19 19 #include <crypto/aes.h> 20 + #include <crypto/algapi.h> 20 21 21 22 #include "ieee80211_i.h" 22 23 #include "michael.h" ··· 154 153 data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; 155 154 key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 156 155 michael_mic(key, hdr, data, data_len, mic); 157 - if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0) 156 + if (crypto_memneq(mic, data + data_len, MICHAEL_MIC_LEN)) 158 157 goto mic_fail; 159 158 160 159 /* remove Michael MIC from payload */ ··· 1049 1048 bip_aad(skb, aad); 1050 1049 ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad, 1051 1050 skb->data + 24, skb->len - 24, mic); 1052 - if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { 1051 + if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { 1053 1052 key->u.aes_cmac.icverrors++; 1054 1053 return RX_DROP_UNUSABLE; 1055 1054 } ··· 1099 1098 bip_aad(skb, aad); 1100 1099 ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad, 1101 1100 skb->data + 24, skb->len - 24, mic); 1102 - if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { 1101 + if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { 1103 1102 key->u.aes_cmac.icverrors++; 1104 1103 return RX_DROP_UNUSABLE; 1105 1104 } ··· 1203 1202 if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce, 1204 1203 skb->data + 24, skb->len - 24, 1205 1204 mic) < 0 || 1206 - memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { 1205 + crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { 1207 1206 key->u.aes_gmac.icverrors++; 1208 1207 return RX_DROP_UNUSABLE; 1209 1208 }
+3 -4
net/mac802154/iface.c
··· 526 526 struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); 527 527 528 528 mac802154_llsec_destroy(&sdata->sec); 529 - 530 - free_netdev(dev); 531 529 } 532 530 533 531 static void ieee802154_if_setup(struct net_device *dev) ··· 591 593 sdata->dev->dev_addr); 592 594 593 595 sdata->dev->header_ops = &mac802154_header_ops; 594 - sdata->dev->destructor = mac802154_wpan_free; 596 + sdata->dev->needs_free_netdev = true; 597 + sdata->dev->priv_destructor = mac802154_wpan_free; 595 598 sdata->dev->netdev_ops = &mac802154_wpan_ops; 596 599 sdata->dev->ml_priv = &mac802154_mlme_wpan; 597 600 wpan_dev->promiscuous_mode = false; ··· 607 608 608 609 break; 609 610 case NL802154_IFTYPE_MONITOR: 610 - sdata->dev->destructor = free_netdev; 611 + sdata->dev->needs_free_netdev = true; 611 612 sdata->dev->netdev_ops = &mac802154_monitor_ops; 612 613 wpan_dev->promiscuous_mode = true; 613 614 break;
+1 -1
net/mpls/af_mpls.c
··· 1418 1418 continue; 1419 1419 alive++; 1420 1420 nh_flags &= ~flags; 1421 - WRITE_ONCE(nh->nh_flags, flags); 1421 + WRITE_ONCE(nh->nh_flags, nh_flags); 1422 1422 } endfor_nexthops(rt); 1423 1423 1424 1424 WRITE_ONCE(rt->rt_nhn_alive, alive);
+6 -1
net/netfilter/nf_conntrack_netlink.c
··· 890 890 } 891 891 out: 892 892 local_bh_enable(); 893 - if (last) 893 + if (last) { 894 + /* nf ct hash resize happened, now clear the leftover. */ 895 + if ((struct nf_conn *)cb->args[1] == last) 896 + cb->args[1] = 0; 897 + 894 898 nf_ct_put(last); 899 + } 895 900 896 901 while (i) { 897 902 i--;
+6 -3
net/netfilter/nf_conntrack_proto_sctp.c
··· 512 512 u8 pf, unsigned int hooknum) 513 513 { 514 514 const struct sctphdr *sh; 515 - struct sctphdr _sctph; 516 515 const char *logmsg; 517 516 518 - sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); 519 - if (!sh) { 517 + if (skb->len < dataoff + sizeof(struct sctphdr)) { 520 518 logmsg = "nf_ct_sctp: short packet "; 521 519 goto out_invalid; 522 520 } 523 521 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && 524 522 skb->ip_summed == CHECKSUM_NONE) { 523 + if (!skb_make_writable(skb, dataoff + sizeof(struct sctphdr))) { 524 + logmsg = "nf_ct_sctp: failed to read header "; 525 + goto out_invalid; 526 + } 527 + sh = (const struct sctphdr *)(skb->data + dataoff); 525 528 if (sh->checksum != sctp_compute_cksum(skb, dataoff)) { 526 529 logmsg = "nf_ct_sctp: bad CRC "; 527 530 goto out_invalid;
+1 -1
net/netfilter/nf_nat_core.c
··· 566 566 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack() 567 567 * will delete entry from already-freed table. 568 568 */ 569 - ct->status &= ~IPS_NAT_DONE_MASK; 569 + clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status); 570 570 rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource, 571 571 nf_nat_bysource_params); 572 572
+11 -11
net/netfilter/nft_set_rbtree.c
··· 116 116 else if (d > 0) 117 117 p = &parent->rb_right; 118 118 else { 119 - if (nft_set_elem_active(&rbe->ext, genmask)) { 120 - if (nft_rbtree_interval_end(rbe) && 121 - !nft_rbtree_interval_end(new)) 122 - p = &parent->rb_left; 123 - else if (!nft_rbtree_interval_end(rbe) && 124 - nft_rbtree_interval_end(new)) 125 - p = &parent->rb_right; 126 - else { 127 - *ext = &rbe->ext; 128 - return -EEXIST; 129 - } 119 + if (nft_rbtree_interval_end(rbe) && 120 + !nft_rbtree_interval_end(new)) { 121 + p = &parent->rb_left; 122 + } else if (!nft_rbtree_interval_end(rbe) && 123 + nft_rbtree_interval_end(new)) { 124 + p = &parent->rb_right; 125 + } else if (nft_set_elem_active(&rbe->ext, genmask)) { 126 + *ext = &rbe->ext; 127 + return -EEXIST; 128 + } else { 129 + p = &parent->rb_left; 130 130 } 131 131 } 132 132 }
+3 -1
net/netlink/af_netlink.c
··· 62 62 #include <asm/cacheflush.h> 63 63 #include <linux/hash.h> 64 64 #include <linux/genetlink.h> 65 + #include <linux/net_namespace.h> 65 66 66 67 #include <net/net_namespace.h> 67 68 #include <net/sock.h> ··· 1416 1415 goto out; 1417 1416 } 1418 1417 NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net); 1419 - NETLINK_CB(p->skb2).nsid_is_set = true; 1418 + if (NETLINK_CB(p->skb2).nsid != NETNSA_NSID_NOT_ASSIGNED) 1419 + NETLINK_CB(p->skb2).nsid_is_set = true; 1420 1420 val = netlink_broadcast_deliver(sk, p->skb2); 1421 1421 if (val < 0) { 1422 1422 netlink_overrun(sk);
+2 -2
net/openvswitch/vport-internal_dev.c
··· 94 94 struct vport *vport = ovs_internal_dev_get_vport(dev); 95 95 96 96 ovs_vport_free(vport); 97 - free_netdev(dev); 98 97 } 99 98 100 99 static void ··· 155 156 netdev->priv_flags &= ~IFF_TX_SKB_SHARING; 156 157 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH | 157 158 IFF_PHONY_HEADROOM | IFF_NO_QUEUE; 158 - netdev->destructor = internal_dev_destructor; 159 + netdev->needs_free_netdev = true; 160 + netdev->priv_destructor = internal_dev_destructor; 159 161 netdev->ethtool_ops = &internal_dev_ethtool_ops; 160 162 netdev->rtnl_link_ops = &internal_dev_link_ops; 161 163
+1 -1
net/phonet/pep-gprs.c
··· 236 236 dev->tx_queue_len = 10; 237 237 238 238 dev->netdev_ops = &gprs_netdev_ops; 239 - dev->destructor = free_netdev; 239 + dev->needs_free_netdev = true; 240 240 } 241 241 242 242 /*
+34 -30
net/rxrpc/key.c
··· 217 217 unsigned int *_toklen) 218 218 { 219 219 const __be32 *xdr = *_xdr; 220 - unsigned int toklen = *_toklen, n_parts, loop, tmp; 220 + unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen; 221 221 222 222 /* there must be at least one name, and at least #names+1 length 223 223 * words */ ··· 247 247 toklen -= 4; 248 248 if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX) 249 249 return -EINVAL; 250 - if (tmp > toklen) 250 + paddedlen = (tmp + 3) & ~3; 251 + if (paddedlen > toklen) 251 252 return -EINVAL; 252 253 princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL); 253 254 if (!princ->name_parts[loop]) 254 255 return -ENOMEM; 255 256 memcpy(princ->name_parts[loop], xdr, tmp); 256 257 princ->name_parts[loop][tmp] = 0; 257 - tmp = (tmp + 3) & ~3; 258 - toklen -= tmp; 259 - xdr += tmp >> 2; 258 + toklen -= paddedlen; 259 + xdr += paddedlen >> 2; 260 260 } 261 261 262 262 if (toklen < 4) ··· 265 265 toklen -= 4; 266 266 if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX) 267 267 return -EINVAL; 268 - if (tmp > toklen) 268 + paddedlen = (tmp + 3) & ~3; 269 + if (paddedlen > toklen) 269 270 return -EINVAL; 270 271 princ->realm = kmalloc(tmp + 1, GFP_KERNEL); 271 272 if (!princ->realm) 272 273 return -ENOMEM; 273 274 memcpy(princ->realm, xdr, tmp); 274 275 princ->realm[tmp] = 0; 275 - tmp = (tmp + 3) & ~3; 276 - toklen -= tmp; 277 - xdr += tmp >> 2; 276 + toklen -= paddedlen; 277 + xdr += paddedlen >> 2; 278 278 279 279 _debug("%s/...@%s", princ->name_parts[0], princ->realm); 280 280 ··· 293 293 unsigned int *_toklen) 294 294 { 295 295 const __be32 *xdr = *_xdr; 296 - unsigned int toklen = *_toklen, len; 296 + unsigned int toklen = *_toklen, len, paddedlen; 297 297 298 298 /* there must be at least one tag and one length word */ 299 299 if (toklen <= 8) ··· 307 307 toklen -= 8; 308 308 if (len > max_data_size) 309 309 return -EINVAL; 310 + paddedlen = (len + 3) & ~3; 311 + if (paddedlen > toklen) 312 + return -EINVAL; 310 313 td->data_len = len; 311 314 312 315 if (len > 0) { 313 316 td->data = kmemdup(xdr, len, GFP_KERNEL); 314 317 if (!td->data) 315 318 return -ENOMEM; 316 - len = (len + 3) & ~3; 317 - toklen -= len; 318 - xdr += len >> 2; 319 + toklen -= paddedlen; 320 + xdr += paddedlen >> 2; 319 321 } 320 322 321 323 _debug("tag %x len %x", td->tag, td->data_len); ··· 389 387 const __be32 **_xdr, unsigned int *_toklen) 390 388 { 391 389 const __be32 *xdr = *_xdr; 392 - unsigned int toklen = *_toklen, len; 390 + unsigned int toklen = *_toklen, len, paddedlen; 393 391 394 392 /* there must be at least one length word */ 395 393 if (toklen <= 4) ··· 401 399 toklen -= 4; 402 400 if (len > AFSTOKEN_K5_TIX_MAX) 403 401 return -EINVAL; 402 + paddedlen = (len + 3) & ~3; 403 + if (paddedlen > toklen) 404 + return -EINVAL; 404 405 *_tktlen = len; 405 406 406 407 _debug("ticket len %u", len); ··· 412 407 *_ticket = kmemdup(xdr, len, GFP_KERNEL); 413 408 if (!*_ticket) 414 409 return -ENOMEM; 415 - len = (len + 3) & ~3; 416 - toklen -= len; 417 - xdr += len >> 2; 410 + toklen -= paddedlen; 411 + xdr += paddedlen >> 2; 418 412 } 419 413 420 414 *_xdr = xdr; ··· 556 552 { 557 553 const __be32 *xdr = prep->data, *token; 558 554 const char *cp; 559 - unsigned int len, tmp, loop, ntoken, toklen, sec_ix; 555 + unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix; 560 556 size_t datalen = prep->datalen; 561 557 int ret; 562 558 ··· 582 578 if (len < 1 || len > AFSTOKEN_CELL_MAX) 583 579 goto not_xdr; 584 580 datalen -= 4; 585 - tmp = (len + 3) & ~3; 586 - if (tmp > datalen) 581 + paddedlen = (len + 3) & ~3; 582 + if (paddedlen > datalen) 587 583 goto not_xdr; 588 584 589 585 cp = (const char *) xdr; 590 586 for (loop = 0; loop < len; loop++) 591 587 if (!isprint(cp[loop])) 592 588 goto not_xdr; 593 - if (len < tmp) 594 - for (; loop < tmp; loop++) 595 - if (cp[loop]) 596 - goto not_xdr; 589 + for (; loop < paddedlen; loop++) 590 + if (cp[loop]) 591 + goto not_xdr; 597 592 _debug("cellname: [%u/%u] '%*.*s'", 598 - len, tmp, len, len, (const char *) xdr); 599 - datalen -= tmp; 600 - xdr += tmp >> 2; 593 + len, paddedlen, len, len, (const char *) xdr); 594 + datalen -= paddedlen; 595 + xdr += paddedlen >> 2; 601 596 602 597 /* get the token count */ 603 598 if (datalen < 12) ··· 617 614 sec_ix = ntohl(*xdr); 618 615 datalen -= 4; 619 616 _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix); 620 - if (toklen < 20 || toklen > datalen) 617 + paddedlen = (toklen + 3) & ~3; 618 + if (toklen < 20 || toklen > datalen || paddedlen > datalen) 621 619 goto not_xdr; 622 - datalen -= (toklen + 3) & ~3; 623 - xdr += (toklen + 3) >> 2; 620 + datalen -= paddedlen; 621 + xdr += paddedlen >> 2; 624 622 625 623 } while (--loop > 0); 626 624
+3 -1
net/sched/act_pedit.c
··· 94 94 k++; 95 95 } 96 96 97 - if (n) 97 + if (n) { 98 + err = -EINVAL; 98 99 goto err_out; 100 + } 99 101 100 102 return keys_ex; 101 103
+3 -5
net/sched/act_police.c
··· 132 132 } 133 133 } 134 134 135 - spin_lock_bh(&police->tcf_lock); 136 135 if (est) { 137 136 err = gen_replace_estimator(&police->tcf_bstats, NULL, 138 137 &police->tcf_rate_est, 139 138 &police->tcf_lock, 140 139 NULL, est); 141 140 if (err) 142 - goto failure_unlock; 141 + goto failure; 143 142 } else if (tb[TCA_POLICE_AVRATE] && 144 143 (ret == ACT_P_CREATED || 145 144 !gen_estimator_active(&police->tcf_rate_est))) { 146 145 err = -EINVAL; 147 - goto failure_unlock; 146 + goto failure; 148 147 } 149 148 149 + spin_lock_bh(&police->tcf_lock); 150 150 /* No failure allowed after this point */ 151 151 police->tcfp_mtu = parm->mtu; 152 152 if (police->tcfp_mtu == 0) { ··· 192 192 193 193 return ret; 194 194 195 - failure_unlock: 196 - spin_unlock_bh(&police->tcf_lock); 197 195 failure: 198 196 qdisc_put_rtab(P_tab); 199 197 qdisc_put_rtab(R_tab);
+2 -1
net/sched/sch_api.c
··· 1019 1019 return sch; 1020 1020 } 1021 1021 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */ 1022 - ops->destroy(sch); 1022 + if (ops->destroy) 1023 + ops->destroy(sch); 1023 1024 err_out3: 1024 1025 dev_put(dev); 1025 1026 kfree((char *) sch - sch->padded);
+1
net/sctp/endpointola.c
··· 275 275 if (sctp_sk(sk)->bind_hash) 276 276 sctp_put_port(sk); 277 277 278 + sctp_sk(sk)->ep = NULL; 278 279 sock_put(sk); 279 280 } 280 281
+3 -2
net/sctp/sctp_diag.c
··· 278 278 279 279 static int sctp_sock_dump(struct sock *sk, void *p) 280 280 { 281 - struct sctp_endpoint *ep = sctp_sk(sk)->ep; 282 281 struct sctp_comm_param *commp = p; 283 282 struct sk_buff *skb = commp->skb; 284 283 struct netlink_callback *cb = commp->cb; ··· 286 287 int err = 0; 287 288 288 289 lock_sock(sk); 289 - list_for_each_entry(assoc, &ep->asocs, asocs) { 290 + if (!sctp_sk(sk)->ep) 291 + goto release; 292 + list_for_each_entry(assoc, &sctp_sk(sk)->ep->asocs, asocs) { 290 293 if (cb->args[4] < cb->args[1]) 291 294 goto next; 292 295
+4 -5
net/sctp/socket.c
··· 4622 4622 4623 4623 for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize; 4624 4624 hash++, head++) { 4625 - read_lock(&head->lock); 4625 + read_lock_bh(&head->lock); 4626 4626 sctp_for_each_hentry(epb, &head->chain) { 4627 4627 err = cb(sctp_ep(epb), p); 4628 4628 if (err) 4629 4629 break; 4630 4630 } 4631 - read_unlock(&head->lock); 4631 + read_unlock_bh(&head->lock); 4632 4632 } 4633 4633 4634 4634 return err; ··· 4666 4666 if (err) 4667 4667 return err; 4668 4668 4669 - sctp_transport_get_idx(net, &hti, pos); 4670 - obj = sctp_transport_get_next(net, &hti); 4671 - for (; obj && !IS_ERR(obj); obj = sctp_transport_get_next(net, &hti)) { 4669 + obj = sctp_transport_get_idx(net, &hti, pos + 1); 4670 + for (; !IS_ERR_OR_NULL(obj); obj = sctp_transport_get_next(net, &hti)) { 4672 4671 struct sctp_transport *transport = obj; 4673 4672 4674 4673 if (!sctp_transport_hold(transport))
+2 -4
net/sunrpc/xprtrdma/backchannel.c
··· 119 119 120 120 for (i = 0; i < (reqs << 1); i++) { 121 121 rqst = kzalloc(sizeof(*rqst), GFP_KERNEL); 122 - if (!rqst) { 123 - pr_err("RPC: %s: Failed to create bc rpc_rqst\n", 124 - __func__); 122 + if (!rqst) 125 123 goto out_free; 126 - } 124 + 127 125 dprintk("RPC: %s: new rqst %p\n", __func__, rqst); 128 126 129 127 rqst->rq_xprt = &r_xprt->rx_xprt;
+6 -1
net/sunrpc/xprtsock.c
··· 2432 2432 case -ENETUNREACH: 2433 2433 case -EADDRINUSE: 2434 2434 case -ENOBUFS: 2435 - /* retry with existing socket, after a delay */ 2435 + /* 2436 + * xs_tcp_force_close() wakes tasks with -EIO. 2437 + * We need to wake them first to ensure the 2438 + * correct error code. 2439 + */ 2440 + xprt_wake_pending_tasks(xprt, status); 2436 2441 xs_tcp_force_close(xprt); 2437 2442 goto out; 2438 2443 }
+1 -1
net/tipc/msg.c
··· 508 508 } 509 509 510 510 if (skb_cloned(_skb) && 511 - pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_KERNEL)) 511 + pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC)) 512 512 goto exit; 513 513 514 514 /* Now reverse the concerned fields */
+6 -1
net/unix/af_unix.c
··· 999 999 struct path path = { }; 1000 1000 1001 1001 err = -EINVAL; 1002 - if (sunaddr->sun_family != AF_UNIX) 1002 + if (addr_len < offsetofend(struct sockaddr_un, sun_family) || 1003 + sunaddr->sun_family != AF_UNIX) 1003 1004 goto out; 1004 1005 1005 1006 if (addr_len == sizeof(short)) { ··· 1110 1109 struct sock *other; 1111 1110 unsigned int hash; 1112 1111 int err; 1112 + 1113 + err = -EINVAL; 1114 + if (alen < offsetofend(struct sockaddr, sa_family)) 1115 + goto out; 1113 1116 1114 1117 if (addr->sa_family != AF_UNSPEC) { 1115 1118 err = unix_mkname(sunaddr, alen, &hash);
+9 -13
net/wireless/wext-core.c
··· 914 914 * Main IOCTl dispatcher. 915 915 * Check the type of IOCTL and call the appropriate wrapper... 916 916 */ 917 - static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, 917 + static int wireless_process_ioctl(struct net *net, struct iwreq *iwr, 918 918 unsigned int cmd, 919 919 struct iw_request_info *info, 920 920 wext_ioctl_func standard, 921 921 wext_ioctl_func private) 922 922 { 923 - struct iwreq *iwr = (struct iwreq *) ifr; 924 923 struct net_device *dev; 925 924 iw_handler handler; 926 925 ··· 927 928 * The copy_to/from_user() of ifr is also dealt with in there */ 928 929 929 930 /* Make sure the device exist */ 930 - if ((dev = __dev_get_by_name(net, ifr->ifr_name)) == NULL) 931 + if ((dev = __dev_get_by_name(net, iwr->ifr_name)) == NULL) 931 932 return -ENODEV; 932 933 933 934 /* A bunch of special cases, then the generic case... ··· 956 957 else if (private) 957 958 return private(dev, iwr, cmd, info, handler); 958 959 } 959 - /* Old driver API : call driver ioctl handler */ 960 - if (dev->netdev_ops->ndo_do_ioctl) 961 - return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd); 962 960 return -EOPNOTSUPP; 963 961 } 964 962 ··· 973 977 } 974 978 975 979 /* entry point from dev ioctl */ 976 - static int wext_ioctl_dispatch(struct net *net, struct ifreq *ifr, 980 + static int wext_ioctl_dispatch(struct net *net, struct iwreq *iwr, 977 981 unsigned int cmd, struct iw_request_info *info, 978 982 wext_ioctl_func standard, 979 983 wext_ioctl_func private) ··· 983 987 if (ret) 984 988 return ret; 985 989 986 - dev_load(net, ifr->ifr_name); 990 + dev_load(net, iwr->ifr_name); 987 991 rtnl_lock(); 988 - ret = wireless_process_ioctl(net, ifr, cmd, info, standard, private); 992 + ret = wireless_process_ioctl(net, iwr, cmd, info, standard, private); 989 993 rtnl_unlock(); 990 994 991 995 return ret; ··· 1035 1039 } 1036 1040 1037 1041 1038 - int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, 1042 + int wext_handle_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd, 1039 1043 void __user *arg) 1040 1044 { 1041 1045 struct iw_request_info info = { .cmd = cmd, .flags = 0 }; 1042 1046 int ret; 1043 1047 1044 - ret = wext_ioctl_dispatch(net, ifr, cmd, &info, 1048 + ret = wext_ioctl_dispatch(net, iwr, cmd, &info, 1045 1049 ioctl_standard_call, 1046 1050 ioctl_private_call); 1047 1051 if (ret >= 0 && 1048 1052 IW_IS_GET(cmd) && 1049 - copy_to_user(arg, ifr, sizeof(struct iwreq))) 1053 + copy_to_user(arg, iwr, sizeof(struct iwreq))) 1050 1054 return -EFAULT; 1051 1055 1052 1056 return ret; ··· 1103 1107 info.cmd = cmd; 1104 1108 info.flags = IW_REQUEST_FLAG_COMPAT; 1105 1109 1106 - ret = wext_ioctl_dispatch(net, (struct ifreq *) &iwr, cmd, &info, 1110 + ret = wext_ioctl_dispatch(net, &iwr, cmd, &info, 1107 1111 compat_standard_call, 1108 1112 compat_private_call); 1109 1113
+1 -2
net/xfrm/Makefile
··· 4 4 5 5 obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \ 6 6 xfrm_input.o xfrm_output.o \ 7 - xfrm_sysctl.o xfrm_replay.o 8 - obj-$(CONFIG_XFRM_OFFLOAD) += xfrm_device.o 7 + xfrm_sysctl.o xfrm_replay.o xfrm_device.o 9 8 obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o 10 9 obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o 11 10 obj-$(CONFIG_XFRM_USER) += xfrm_user.o
+2
net/xfrm/xfrm_device.c
··· 22 22 #include <net/xfrm.h> 23 23 #include <linux/notifier.h> 24 24 25 + #ifdef CONFIG_XFRM_OFFLOAD 25 26 int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features) 26 27 { 27 28 int err; ··· 138 137 return true; 139 138 } 140 139 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok); 140 + #endif 141 141 142 142 int xfrm_dev_register(struct net_device *dev) 143 143 {
-4
net/xfrm/xfrm_policy.c
··· 1006 1006 err = -ESRCH; 1007 1007 out: 1008 1008 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1009 - 1010 - if (cnt) 1011 - xfrm_garbage_collect(net); 1012 - 1013 1009 return err; 1014 1010 } 1015 1011 EXPORT_SYMBOL(xfrm_policy_flush);
+1
net/xfrm/xfrm_user.c
··· 2027 2027 return 0; 2028 2028 return err; 2029 2029 } 2030 + xfrm_garbage_collect(net); 2030 2031 2031 2032 c.data.type = type; 2032 2033 c.event = nlh->nlmsg_type;
+9 -1
scripts/Makefile.headersinst
··· 14 14 include scripts/Kbuild.include 15 15 16 16 srcdir := $(srctree)/$(obj) 17 - subdirs := $(patsubst $(srcdir)/%/.,%,$(wildcard $(srcdir)/*/.)) 17 + 18 + # When make is run under a fakechroot environment, the function 19 + # $(wildcard $(srcdir)/*/.) doesn't only return directories, but also regular 20 + # files. So, we are using a combination of sort/dir/wildcard which works 21 + # with fakechroot. 22 + subdirs := $(patsubst $(srcdir)/%/,%,\ 23 + $(filter-out $(srcdir)/,\ 24 + $(sort $(dir $(wildcard $(srcdir)/*/))))) 25 + 18 26 # caller may set destination dir (when installing to asm/) 19 27 _dst := $(if $(dst),$(dst),$(obj)) 20 28
+5 -4
scripts/gdb/linux/dmesg.py
··· 23 23 super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA) 24 24 25 25 def invoke(self, arg, from_tty): 26 - log_buf_addr = int(str(gdb.parse_and_eval("log_buf")).split()[0], 16) 27 - log_first_idx = int(gdb.parse_and_eval("log_first_idx")) 28 - log_next_idx = int(gdb.parse_and_eval("log_next_idx")) 29 - log_buf_len = int(gdb.parse_and_eval("log_buf_len")) 26 + log_buf_addr = int(str(gdb.parse_and_eval( 27 + "'printk.c'::log_buf")).split()[0], 16) 28 + log_first_idx = int(gdb.parse_and_eval("'printk.c'::log_first_idx")) 29 + log_next_idx = int(gdb.parse_and_eval("'printk.c'::log_next_idx")) 30 + log_buf_len = int(gdb.parse_and_eval("'printk.c'::log_buf_len")) 30 31 31 32 inf = gdb.inferiors()[0] 32 33 start = log_buf_addr + log_first_idx
+1 -1
scripts/genksyms/genksyms.h
··· 75 75 int yylex(void); 76 76 int yyparse(void); 77 77 78 - void error_with_pos(const char *, ...); 78 + void error_with_pos(const char *, ...) __attribute__ ((format(printf, 1, 2))); 79 79 80 80 /*----------------------------------------------------------------------*/ 81 81 #define xmalloc(size) ({ void *__ptr = malloc(size); \
+1 -1
scripts/kconfig/Makefile
··· 196 196 197 197 # Check that we have the required ncurses stuff installed for lxdialog (menuconfig) 198 198 PHONY += $(obj)/dochecklxdialog 199 - $(addprefix $(obj)/,$(lxdialog)): $(obj)/dochecklxdialog 199 + $(addprefix $(obj)/, mconf.o $(lxdialog)): $(obj)/dochecklxdialog 200 200 $(obj)/dochecklxdialog: 201 201 $(Q)$(CONFIG_SHELL) $(check-lxdialog) -check $(HOSTCC) $(HOST_EXTRACFLAGS) $(HOSTLOADLIBES_mconf) 202 202
+6 -6
scripts/kconfig/nconf.c
··· 271 271 static int items_num; 272 272 static int global_exit; 273 273 /* the currently selected button */ 274 - const char *current_instructions = menu_instructions; 274 + static const char *current_instructions = menu_instructions; 275 275 276 276 static char *dialog_input_result; 277 277 static int dialog_input_result_len; ··· 305 305 }; 306 306 307 307 static const int function_keys_num = 9; 308 - struct function_keys function_keys[] = { 308 + static struct function_keys function_keys[] = { 309 309 { 310 310 .key_str = "F1", 311 311 .func = "Help", ··· 508 508 index = (index + items_num) % items_num; 509 509 while (true) { 510 510 char *str = k_menu_items[index].str; 511 - if (strcasestr(str, match_str) != 0) 511 + if (strcasestr(str, match_str) != NULL) 512 512 return index; 513 513 if (flag == FIND_NEXT_MATCH_UP || 514 514 flag == MATCH_TINKER_PATTERN_UP) ··· 1067 1067 1068 1068 static void conf(struct menu *menu) 1069 1069 { 1070 - struct menu *submenu = 0; 1070 + struct menu *submenu = NULL; 1071 1071 const char *prompt = menu_get_prompt(menu); 1072 1072 struct symbol *sym; 1073 1073 int res; ··· 1234 1234 static void conf_choice(struct menu *menu) 1235 1235 { 1236 1236 const char *prompt = _(menu_get_prompt(menu)); 1237 - struct menu *child = 0; 1237 + struct menu *child = NULL; 1238 1238 struct symbol *active; 1239 1239 int selected_index = 0; 1240 1240 int last_top_row = 0; ··· 1456 1456 } 1457 1457 } 1458 1458 1459 - void setup_windows(void) 1459 + static void setup_windows(void) 1460 1460 { 1461 1461 int lines, columns; 1462 1462
+2 -2
scripts/kconfig/nconf.gui.c
··· 129 129 mkattrn(FUNCTION_TEXT, A_REVERSE); 130 130 } 131 131 132 - void set_colors() 132 + void set_colors(void) 133 133 { 134 134 start_color(); 135 135 use_default_colors(); ··· 192 192 int lines = 0; 193 193 194 194 if (!text) 195 - return 0; 195 + return NULL; 196 196 197 197 for (i = 0; text[i] != '\0' && lines < line_no; i++) 198 198 if (text[i] == '\n')
+1
scripts/tags.sh
··· 106 106 case "$i" in 107 107 *.[cS]) 108 108 j=${i/\.[cS]/\.o} 109 + j="${j#$tree}" 109 110 if [ -e $j ]; then 110 111 echo $i 111 112 fi
+5 -1
security/keys/Kconfig
··· 20 20 21 21 If you are unsure as to whether this is required, answer N. 22 22 23 + config KEYS_COMPAT 24 + def_bool y 25 + depends on COMPAT && KEYS 26 + 23 27 config PERSISTENT_KEYRINGS 24 28 bool "Enable register of persistent per-UID keyrings" 25 29 depends on KEYS ··· 93 89 config KEY_DH_OPERATIONS 94 90 bool "Diffie-Hellman operations on retained keys" 95 91 depends on KEYS 96 - select MPILIB 97 92 select CRYPTO 98 93 select CRYPTO_HASH 94 + select CRYPTO_DH 99 95 help 100 96 This option provides support for calculating Diffie-Hellman 101 97 public keys and shared secrets using values stored as keys
+190 -122
security/keys/dh.c
··· 8 8 * 2 of the License, or (at your option) any later version. 9 9 */ 10 10 11 - #include <linux/mpi.h> 12 11 #include <linux/slab.h> 13 12 #include <linux/uaccess.h> 13 + #include <linux/scatterlist.h> 14 14 #include <linux/crypto.h> 15 15 #include <crypto/hash.h> 16 + #include <crypto/kpp.h> 17 + #include <crypto/dh.h> 16 18 #include <keys/user-type.h> 17 19 #include "internal.h" 18 20 19 - /* 20 - * Public key or shared secret generation function [RFC2631 sec 2.1.1] 21 - * 22 - * ya = g^xa mod p; 23 - * or 24 - * ZZ = yb^xa mod p; 25 - * 26 - * where xa is the local private key, ya is the local public key, g is 27 - * the generator, p is the prime, yb is the remote public key, and ZZ 28 - * is the shared secret. 29 - * 30 - * Both are the same calculation, so g or yb are the "base" and ya or 31 - * ZZ are the "result". 32 - */ 33 - static int do_dh(MPI result, MPI base, MPI xa, MPI p) 34 - { 35 - return mpi_powm(result, base, xa, p); 36 - } 37 - 38 - static ssize_t mpi_from_key(key_serial_t keyid, size_t maxlen, MPI *mpi) 21 + static ssize_t dh_data_from_key(key_serial_t keyid, void **data) 39 22 { 40 23 struct key *key; 41 24 key_ref_t key_ref; ··· 39 56 status = key_validate(key); 40 57 if (status == 0) { 41 58 const struct user_key_payload *payload; 59 + uint8_t *duplicate; 42 60 43 61 payload = user_key_payload_locked(key); 44 62 45 - if (maxlen == 0) { 46 - *mpi = NULL; 63 + duplicate = kmemdup(payload->data, payload->datalen, 64 + GFP_KERNEL); 65 + if (duplicate) { 66 + *data = duplicate; 47 67 ret = payload->datalen; 48 - } else if (payload->datalen <= maxlen) { 49 - *mpi = mpi_read_raw_data(payload->data, 50 - payload->datalen); 51 - if (*mpi) 52 - ret = payload->datalen; 53 68 } else { 54 - ret = -EINVAL; 69 + ret = -ENOMEM; 55 70 } 56 71 } 57 72 up_read(&key->sem); ··· 58 77 key_put(key); 59 78 error: 60 79 return ret; 80 + } 81 + 82 + static void dh_free_data(struct dh *dh) 83 + { 84 + kzfree(dh->key); 85 + kzfree(dh->p); 86 + kzfree(dh->g); 87 + } 88 + 89 + struct dh_completion { 90 + struct completion completion; 91 + int err; 92 + }; 93 + 94 + static void dh_crypto_done(struct crypto_async_request *req, int err) 95 + { 96 + struct dh_completion *compl = req->data; 97 + 98 + if (err == -EINPROGRESS) 99 + return; 100 + 101 + compl->err = err; 102 + complete(&compl->completion); 61 103 } 62 104 63 105 struct kdf_sdesc { ··· 93 89 struct crypto_shash *tfm; 94 90 struct kdf_sdesc *sdesc; 95 91 int size; 92 + int err; 96 93 97 94 /* allocate synchronous hash */ 98 95 tfm = crypto_alloc_shash(hashname, 0, 0); ··· 102 97 return PTR_ERR(tfm); 103 98 } 104 99 100 + err = -EINVAL; 101 + if (crypto_shash_digestsize(tfm) == 0) 102 + goto out_free_tfm; 103 + 104 + err = -ENOMEM; 105 105 size = sizeof(struct shash_desc) + crypto_shash_descsize(tfm); 106 106 sdesc = kmalloc(size, GFP_KERNEL); 107 107 if (!sdesc) 108 - return -ENOMEM; 108 + goto out_free_tfm; 109 109 sdesc->shash.tfm = tfm; 110 110 sdesc->shash.flags = 0x0; 111 111 112 112 *sdesc_ret = sdesc; 113 113 114 114 return 0; 115 + 116 + out_free_tfm: 117 + crypto_free_shash(tfm); 118 + return err; 115 119 } 116 120 117 121 static void kdf_dealloc(struct kdf_sdesc *sdesc) ··· 134 120 kzfree(sdesc); 135 121 } 136 122 137 - /* convert 32 bit integer into its string representation */ 138 - static inline void crypto_kw_cpu_to_be32(u32 val, u8 *buf) 139 - { 140 - __be32 *a = (__be32 *)buf; 141 - 142 - *a = cpu_to_be32(val); 143 - } 144 - 145 123 /* 146 124 * Implementation of the KDF in counter mode according to SP800-108 section 5.1 147 125 * as well as SP800-56A section 5.8.1 (Single-step KDF). ··· 144 138 * 5.8.1.2). 145 139 */ 146 140 static int kdf_ctr(struct kdf_sdesc *sdesc, const u8 *src, unsigned int slen, 147 - u8 *dst, unsigned int dlen) 141 + u8 *dst, unsigned int dlen, unsigned int zlen) 148 142 { 149 143 struct shash_desc *desc = &sdesc->shash; 150 144 unsigned int h = crypto_shash_digestsize(desc->tfm); 151 145 int err = 0; 152 146 u8 *dst_orig = dst; 153 - u32 i = 1; 154 - u8 iteration[sizeof(u32)]; 147 + __be32 counter = cpu_to_be32(1); 155 148 156 149 while (dlen) { 157 150 err = crypto_shash_init(desc); 158 151 if (err) 159 152 goto err; 160 153 161 - crypto_kw_cpu_to_be32(i, iteration); 162 - err = crypto_shash_update(desc, iteration, sizeof(u32)); 154 + err = crypto_shash_update(desc, (u8 *)&counter, sizeof(__be32)); 163 155 if (err) 164 156 goto err; 157 + 158 + if (zlen && h) { 159 + u8 tmpbuffer[h]; 160 + size_t chunk = min_t(size_t, zlen, h); 161 + memset(tmpbuffer, 0, chunk); 162 + 163 + do { 164 + err = crypto_shash_update(desc, tmpbuffer, 165 + chunk); 166 + if (err) 167 + goto err; 168 + 169 + zlen -= chunk; 170 + chunk = min_t(size_t, zlen, h); 171 + } while (zlen); 172 + } 165 173 166 174 if (src && slen) { 167 175 err = crypto_shash_update(desc, src, slen); ··· 199 179 200 180 dlen -= h; 201 181 dst += h; 202 - i++; 182 + counter = cpu_to_be32(be32_to_cpu(counter) + 1); 203 183 } 204 184 } 205 185 ··· 212 192 213 193 static int keyctl_dh_compute_kdf(struct kdf_sdesc *sdesc, 214 194 char __user *buffer, size_t buflen, 215 - uint8_t *kbuf, size_t kbuflen) 195 + uint8_t *kbuf, size_t kbuflen, size_t lzero) 216 196 { 217 197 uint8_t *outbuf = NULL; 218 198 int ret; ··· 223 203 goto err; 224 204 } 225 205 226 - ret = kdf_ctr(sdesc, kbuf, kbuflen, outbuf, buflen); 206 + ret = kdf_ctr(sdesc, kbuf, kbuflen, outbuf, buflen, lzero); 227 207 if (ret) 228 208 goto err; 229 209 ··· 241 221 struct keyctl_kdf_params *kdfcopy) 242 222 { 243 223 long ret; 244 - MPI base, private, prime, result; 245 - unsigned nbytes; 224 + ssize_t dlen; 225 + int secretlen; 226 + int outlen; 246 227 struct keyctl_dh_params pcopy; 247 - uint8_t *kbuf; 248 - ssize_t keylen; 249 - size_t resultlen; 228 + struct dh dh_inputs; 229 + struct scatterlist outsg; 230 + struct dh_completion compl; 231 + struct crypto_kpp *tfm; 232 + struct kpp_request *req; 233 + uint8_t *secret; 234 + uint8_t *outbuf; 250 235 struct kdf_sdesc *sdesc = NULL; 251 236 252 237 if (!params || (!buffer && buflen)) { 253 238 ret = -EINVAL; 254 - goto out; 239 + goto out1; 255 240 } 256 241 if (copy_from_user(&pcopy, params, sizeof(pcopy)) != 0) { 257 242 ret = -EFAULT; 258 - goto out; 243 + goto out1; 259 244 } 260 245 261 246 if (kdfcopy) { ··· 269 244 if (buflen > KEYCTL_KDF_MAX_OUTPUT_LEN || 270 245 kdfcopy->otherinfolen > KEYCTL_KDF_MAX_OI_LEN) { 271 246 ret = -EMSGSIZE; 272 - goto out; 247 + goto out1; 273 248 } 274 249 275 250 /* get KDF name string */ 276 251 hashname = strndup_user(kdfcopy->hashname, CRYPTO_MAX_ALG_NAME); 277 252 if (IS_ERR(hashname)) { 278 253 ret = PTR_ERR(hashname); 279 - goto out; 254 + goto out1; 280 255 } 281 256 282 257 /* allocate KDF from the kernel crypto API */ 283 258 ret = kdf_alloc(&sdesc, hashname); 284 259 kfree(hashname); 285 260 if (ret) 286 - goto out; 261 + goto out1; 287 262 } 288 263 289 - /* 290 - * If the caller requests postprocessing with a KDF, allow an 291 - * arbitrary output buffer size since the KDF ensures proper truncation. 292 - */ 293 - keylen = mpi_from_key(pcopy.prime, kdfcopy ? SIZE_MAX : buflen, &prime); 294 - if (keylen < 0 || !prime) { 295 - /* buflen == 0 may be used to query the required buffer size, 296 - * which is the prime key length. 297 - */ 298 - ret = keylen; 299 - goto out; 264 + memset(&dh_inputs, 0, sizeof(dh_inputs)); 265 + 266 + dlen = dh_data_from_key(pcopy.prime, &dh_inputs.p); 267 + if (dlen < 0) { 268 + ret = dlen; 269 + goto out1; 300 270 } 271 + dh_inputs.p_size = dlen; 301 272 302 - /* The result is never longer than the prime */ 303 - resultlen = keylen; 304 - 305 - keylen = mpi_from_key(pcopy.base, SIZE_MAX, &base); 306 - if (keylen < 0 || !base) { 307 - ret = keylen; 308 - goto error1; 273 + dlen = dh_data_from_key(pcopy.base, &dh_inputs.g); 274 + if (dlen < 0) { 275 + ret = dlen; 276 + goto out2; 309 277 } 278 + dh_inputs.g_size = dlen; 310 279 311 - keylen = mpi_from_key(pcopy.private, SIZE_MAX, &private); 312 - if (keylen < 0 || !private) { 313 - ret = keylen; 314 - goto error2; 280 + dlen = dh_data_from_key(pcopy.private, &dh_inputs.key); 281 + if (dlen < 0) { 282 + ret = dlen; 283 + goto out2; 315 284 } 285 + dh_inputs.key_size = dlen; 316 286 317 - result = mpi_alloc(0); 318 - if (!result) { 287 + secretlen = crypto_dh_key_len(&dh_inputs); 288 + secret = kmalloc(secretlen, GFP_KERNEL); 289 + if (!secret) { 319 290 ret = -ENOMEM; 320 - goto error3; 291 + goto out2; 321 292 } 322 - 323 - /* allocate space for DH shared secret and SP800-56A otherinfo */ 324 - kbuf = kmalloc(kdfcopy ? (resultlen + kdfcopy->otherinfolen) : resultlen, 325 - GFP_KERNEL); 326 - if (!kbuf) { 327 - ret = -ENOMEM; 328 - goto error4; 329 - } 330 - 331 - /* 332 - * Concatenate SP800-56A otherinfo past DH shared secret -- the 333 - * input to the KDF is (DH shared secret || otherinfo) 334 - */ 335 - if (kdfcopy && kdfcopy->otherinfo && 336 - copy_from_user(kbuf + resultlen, kdfcopy->otherinfo, 337 - kdfcopy->otherinfolen) != 0) { 338 - ret = -EFAULT; 339 - goto error5; 340 - } 341 - 342 - ret = do_dh(result, base, private, prime); 293 + ret = crypto_dh_encode_key(secret, secretlen, &dh_inputs); 343 294 if (ret) 344 - goto error5; 295 + goto out3; 345 296 346 - ret = mpi_read_buffer(result, kbuf, resultlen, &nbytes, NULL); 347 - if (ret != 0) 348 - goto error5; 297 + tfm = crypto_alloc_kpp("dh", CRYPTO_ALG_TYPE_KPP, 0); 298 + if (IS_ERR(tfm)) { 299 + ret = PTR_ERR(tfm); 300 + goto out3; 301 + } 302 + 303 + ret = crypto_kpp_set_secret(tfm, secret, secretlen); 304 + if (ret) 305 + goto out4; 306 + 307 + outlen = crypto_kpp_maxsize(tfm); 308 + 309 + if (!kdfcopy) { 310 + /* 311 + * When not using a KDF, buflen 0 is used to read the 312 + * required buffer length 313 + */ 314 + if (buflen == 0) { 315 + ret = outlen; 316 + goto out4; 317 + } else if (outlen > buflen) { 318 + ret = -EOVERFLOW; 319 + goto out4; 320 + } 321 + } 322 + 323 + outbuf = kzalloc(kdfcopy ? (outlen + kdfcopy->otherinfolen) : outlen, 324 + GFP_KERNEL); 325 + if (!outbuf) { 326 + ret = -ENOMEM; 327 + goto out4; 328 + } 329 + 330 + sg_init_one(&outsg, outbuf, outlen); 331 + 332 + req = kpp_request_alloc(tfm, GFP_KERNEL); 333 + if (!req) { 334 + ret = -ENOMEM; 335 + goto out5; 336 + } 337 + 338 + kpp_request_set_input(req, NULL, 0); 339 + kpp_request_set_output(req, &outsg, outlen); 340 + init_completion(&compl.completion); 341 + kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | 342 + CRYPTO_TFM_REQ_MAY_SLEEP, 343 + dh_crypto_done, &compl); 344 + 345 + /* 346 + * For DH, generate_public_key and generate_shared_secret are 347 + * the same calculation 348 + */ 349 + ret = crypto_kpp_generate_public_key(req); 350 + if (ret == -EINPROGRESS) { 351 + wait_for_completion(&compl.completion); 352 + ret = compl.err; 353 + if (ret) 354 + goto out6; 355 + } 349 356 350 357 if (kdfcopy) { 351 - ret = keyctl_dh_compute_kdf(sdesc, buffer, buflen, kbuf, 352 - resultlen + kdfcopy->otherinfolen); 353 - } else { 354 - ret = nbytes; 355 - if (copy_to_user(buffer, kbuf, nbytes) != 0) 358 + /* 359 + * Concatenate SP800-56A otherinfo past DH shared secret -- the 360 + * input to the KDF is (DH shared secret || otherinfo) 361 + */ 362 + if (copy_from_user(outbuf + req->dst_len, kdfcopy->otherinfo, 363 + kdfcopy->otherinfolen) != 0) { 356 364 ret = -EFAULT; 365 + goto out6; 366 + } 367 + 368 + ret = keyctl_dh_compute_kdf(sdesc, buffer, buflen, outbuf, 369 + req->dst_len + kdfcopy->otherinfolen, 370 + outlen - req->dst_len); 371 + } else if (copy_to_user(buffer, outbuf, req->dst_len) == 0) { 372 + ret = req->dst_len; 373 + } else { 374 + ret = -EFAULT; 357 375 } 358 376 359 - error5: 360 - kzfree(kbuf); 361 - error4: 362 - mpi_free(result); 363 - error3: 364 - mpi_free(private); 365 - error2: 366 - mpi_free(base); 367 - error1: 368 - mpi_free(prime); 369 - out: 377 + out6: 378 + kpp_request_free(req); 379 + out5: 380 + kzfree(outbuf); 381 + out4: 382 + crypto_free_kpp(tfm); 383 + out3: 384 + kzfree(secret); 385 + out2: 386 + dh_free_data(&dh_inputs); 387 + out1: 370 388 kdf_dealloc(sdesc); 371 389 return ret; 372 390 }
+75 -131
security/keys/encrypted-keys/encrypted.c
··· 30 30 #include <linux/scatterlist.h> 31 31 #include <linux/ctype.h> 32 32 #include <crypto/aes.h> 33 + #include <crypto/algapi.h> 33 34 #include <crypto/hash.h> 34 35 #include <crypto/sha.h> 35 36 #include <crypto/skcipher.h> ··· 55 54 #define MAX_DATA_SIZE 4096 56 55 #define MIN_DATA_SIZE 20 57 56 58 - struct sdesc { 59 - struct shash_desc shash; 60 - char ctx[]; 61 - }; 62 - 63 - static struct crypto_shash *hashalg; 64 - static struct crypto_shash *hmacalg; 57 + static struct crypto_shash *hash_tfm; 65 58 66 59 enum { 67 60 Opt_err = -1, Opt_new, Opt_load, Opt_update ··· 136 141 */ 137 142 static int valid_master_desc(const char *new_desc, const char *orig_desc) 138 143 { 139 - if (!memcmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) { 140 - if (strlen(new_desc) == KEY_TRUSTED_PREFIX_LEN) 141 - goto out; 142 - if (orig_desc) 143 - if (memcmp(new_desc, orig_desc, KEY_TRUSTED_PREFIX_LEN)) 144 - goto out; 145 - } else if (!memcmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN)) { 146 - if (strlen(new_desc) == KEY_USER_PREFIX_LEN) 147 - goto out; 148 - if (orig_desc) 149 - if (memcmp(new_desc, orig_desc, KEY_USER_PREFIX_LEN)) 150 - goto out; 151 - } else 152 - goto out; 144 + int prefix_len; 145 + 146 + if (!strncmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) 147 + prefix_len = KEY_TRUSTED_PREFIX_LEN; 148 + else if (!strncmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN)) 149 + prefix_len = KEY_USER_PREFIX_LEN; 150 + else 151 + return -EINVAL; 152 + 153 + if (!new_desc[prefix_len]) 154 + return -EINVAL; 155 + 156 + if (orig_desc && strncmp(new_desc, orig_desc, prefix_len)) 157 + return -EINVAL; 158 + 153 159 return 0; 154 - out: 155 - return -EINVAL; 156 160 } 157 161 158 162 /* ··· 315 321 return ukey; 316 322 } 317 323 318 - static struct sdesc *alloc_sdesc(struct crypto_shash *alg) 324 + static int calc_hash(struct crypto_shash *tfm, u8 *digest, 325 + const u8 *buf, unsigned int buflen) 319 326 { 320 - struct sdesc *sdesc; 321 - int size; 327 + SHASH_DESC_ON_STACK(desc, tfm); 328 + int err; 322 329 323 - size = sizeof(struct shash_desc) + crypto_shash_descsize(alg); 324 - sdesc = kmalloc(size, GFP_KERNEL); 325 - if (!sdesc) 326 - return ERR_PTR(-ENOMEM); 327 - sdesc->shash.tfm = alg; 328 - sdesc->shash.flags = 0x0; 329 - return sdesc; 330 + desc->tfm = tfm; 331 + desc->flags = 0; 332 + 333 + err = crypto_shash_digest(desc, buf, buflen, digest); 334 + shash_desc_zero(desc); 335 + return err; 330 336 } 331 337 332 338 static int calc_hmac(u8 *digest, const u8 *key, unsigned int keylen, 333 339 const u8 *buf, unsigned int buflen) 334 340 { 335 - struct sdesc *sdesc; 336 - int ret; 341 + struct crypto_shash *tfm; 342 + int err; 337 343 338 - sdesc = alloc_sdesc(hmacalg); 339 - if (IS_ERR(sdesc)) { 340 - pr_info("encrypted_key: can't alloc %s\n", hmac_alg); 341 - return PTR_ERR(sdesc); 344 + tfm = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC); 345 + if (IS_ERR(tfm)) { 346 + pr_err("encrypted_key: can't alloc %s transform: %ld\n", 347 + hmac_alg, PTR_ERR(tfm)); 348 + return PTR_ERR(tfm); 342 349 } 343 350 344 - ret = crypto_shash_setkey(hmacalg, key, keylen); 345 - if (!ret) 346 - ret = crypto_shash_digest(&sdesc->shash, buf, buflen, digest); 347 - kfree(sdesc); 348 - return ret; 349 - } 350 - 351 - static int calc_hash(u8 *digest, const u8 *buf, unsigned int buflen) 352 - { 353 - struct sdesc *sdesc; 354 - int ret; 355 - 356 - sdesc = alloc_sdesc(hashalg); 357 - if (IS_ERR(sdesc)) { 358 - pr_info("encrypted_key: can't alloc %s\n", hash_alg); 359 - return PTR_ERR(sdesc); 360 - } 361 - 362 - ret = crypto_shash_digest(&sdesc->shash, buf, buflen, digest); 363 - kfree(sdesc); 364 - return ret; 351 + err = crypto_shash_setkey(tfm, key, keylen); 352 + if (!err) 353 + err = calc_hash(tfm, digest, buf, buflen); 354 + crypto_free_shash(tfm); 355 + return err; 365 356 } 366 357 367 358 enum derived_key_type { ENC_KEY, AUTH_KEY }; ··· 364 385 derived_buf_len = HASH_SIZE; 365 386 366 387 derived_buf = kzalloc(derived_buf_len, GFP_KERNEL); 367 - if (!derived_buf) { 368 - pr_err("encrypted_key: out of memory\n"); 388 + if (!derived_buf) 369 389 return -ENOMEM; 370 - } 390 + 371 391 if (key_type) 372 392 strcpy(derived_buf, "AUTH_KEY"); 373 393 else ··· 374 396 375 397 memcpy(derived_buf + strlen(derived_buf) + 1, master_key, 376 398 master_keylen); 377 - ret = calc_hash(derived_key, derived_buf, derived_buf_len); 378 - kfree(derived_buf); 399 + ret = calc_hash(hash_tfm, derived_key, derived_buf, derived_buf_len); 400 + kzfree(derived_buf); 379 401 return ret; 380 402 } 381 403 ··· 458 480 struct skcipher_request *req; 459 481 unsigned int encrypted_datalen; 460 482 u8 iv[AES_BLOCK_SIZE]; 461 - unsigned int padlen; 462 - char pad[16]; 463 483 int ret; 464 484 465 485 encrypted_datalen = roundup(epayload->decrypted_datalen, blksize); 466 - padlen = encrypted_datalen - epayload->decrypted_datalen; 467 486 468 487 req = init_skcipher_req(derived_key, derived_keylen); 469 488 ret = PTR_ERR(req); ··· 468 493 goto out; 469 494 dump_decrypted_data(epayload); 470 495 471 - memset(pad, 0, sizeof pad); 472 496 sg_init_table(sg_in, 2); 473 497 sg_set_buf(&sg_in[0], epayload->decrypted_data, 474 498 epayload->decrypted_datalen); 475 - sg_set_buf(&sg_in[1], pad, padlen); 499 + sg_set_page(&sg_in[1], ZERO_PAGE(0), AES_BLOCK_SIZE, 0); 476 500 477 501 sg_init_table(sg_out, 1); 478 502 sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen); ··· 507 533 if (!ret) 508 534 dump_hmac(NULL, digest, HASH_SIZE); 509 535 out: 536 + memzero_explicit(derived_key, sizeof(derived_key)); 510 537 return ret; 511 538 } 512 539 ··· 536 561 ret = calc_hmac(digest, derived_key, sizeof derived_key, p, len); 537 562 if (ret < 0) 538 563 goto out; 539 - ret = memcmp(digest, epayload->format + epayload->datablob_len, 540 - sizeof digest); 564 + ret = crypto_memneq(digest, epayload->format + epayload->datablob_len, 565 + sizeof(digest)); 541 566 if (ret) { 542 567 ret = -EINVAL; 543 568 dump_hmac("datablob", ··· 546 571 dump_hmac("calc", digest, HASH_SIZE); 547 572 } 548 573 out: 574 + memzero_explicit(derived_key, sizeof(derived_key)); 549 575 return ret; 550 576 } 551 577 ··· 560 584 struct skcipher_request *req; 561 585 unsigned int encrypted_datalen; 562 586 u8 iv[AES_BLOCK_SIZE]; 563 - char pad[16]; 587 + u8 *pad; 564 588 int ret; 589 + 590 + /* Throwaway buffer to hold the unused zero padding at the end */ 591 + pad = kmalloc(AES_BLOCK_SIZE, GFP_KERNEL); 592 + if (!pad) 593 + return -ENOMEM; 565 594 566 595 encrypted_datalen = roundup(epayload->decrypted_datalen, blksize); 567 596 req = init_skcipher_req(derived_key, derived_keylen); ··· 575 594 goto out; 576 595 dump_encrypted_data(epayload, encrypted_datalen); 577 596 578 - memset(pad, 0, sizeof pad); 579 597 sg_init_table(sg_in, 1); 580 598 sg_init_table(sg_out, 2); 581 599 sg_set_buf(sg_in, epayload->encrypted_data, encrypted_datalen); 582 600 sg_set_buf(&sg_out[0], epayload->decrypted_data, 583 601 epayload->decrypted_datalen); 584 - sg_set_buf(&sg_out[1], pad, sizeof pad); 602 + sg_set_buf(&sg_out[1], pad, AES_BLOCK_SIZE); 585 603 586 604 memcpy(iv, epayload->iv, sizeof(iv)); 587 605 skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv); ··· 592 612 goto out; 593 613 dump_decrypted_data(epayload); 594 614 out: 615 + kfree(pad); 595 616 return ret; 596 617 } 597 618 ··· 703 722 out: 704 723 up_read(&mkey->sem); 705 724 key_put(mkey); 725 + memzero_explicit(derived_key, sizeof(derived_key)); 706 726 return ret; 707 727 } 708 728 ··· 810 828 ret = encrypted_init(epayload, key->description, format, master_desc, 811 829 decrypted_datalen, hex_encoded_iv); 812 830 if (ret < 0) { 813 - kfree(epayload); 831 + kzfree(epayload); 814 832 goto out; 815 833 } 816 834 817 835 rcu_assign_keypointer(key, epayload); 818 836 out: 819 - kfree(datablob); 837 + kzfree(datablob); 820 838 return ret; 821 839 } 822 840 ··· 825 843 struct encrypted_key_payload *epayload; 826 844 827 845 epayload = container_of(rcu, struct encrypted_key_payload, rcu); 828 - memset(epayload->decrypted_data, 0, epayload->decrypted_datalen); 829 - kfree(epayload); 846 + kzfree(epayload); 830 847 } 831 848 832 849 /* ··· 883 902 rcu_assign_keypointer(key, new_epayload); 884 903 call_rcu(&epayload->rcu, encrypted_rcu_free); 885 904 out: 886 - kfree(buf); 905 + kzfree(buf); 887 906 return ret; 888 907 } 889 908 ··· 941 960 942 961 up_read(&mkey->sem); 943 962 key_put(mkey); 963 + memzero_explicit(derived_key, sizeof(derived_key)); 944 964 945 965 if (copy_to_user(buffer, ascii_buf, asciiblob_len) != 0) 946 966 ret = -EFAULT; 947 - kfree(ascii_buf); 967 + kzfree(ascii_buf); 948 968 949 969 return asciiblob_len; 950 970 out: 951 971 up_read(&mkey->sem); 952 972 key_put(mkey); 973 + memzero_explicit(derived_key, sizeof(derived_key)); 953 974 return ret; 954 975 } 955 976 956 977 /* 957 - * encrypted_destroy - before freeing the key, clear the decrypted data 958 - * 959 - * Before freeing the key, clear the memory containing the decrypted 960 - * key data. 978 + * encrypted_destroy - clear and free the key's payload 961 979 */ 962 980 static void encrypted_destroy(struct key *key) 963 981 { 964 - struct encrypted_key_payload *epayload = key->payload.data[0]; 965 - 966 - if (!epayload) 967 - return; 968 - 969 - memzero_explicit(epayload->decrypted_data, epayload->decrypted_datalen); 970 - kfree(key->payload.data[0]); 982 + kzfree(key->payload.data[0]); 971 983 } 972 984 973 985 struct key_type key_type_encrypted = { ··· 973 999 }; 974 1000 EXPORT_SYMBOL_GPL(key_type_encrypted); 975 1001 976 - static void encrypted_shash_release(void) 977 - { 978 - if (hashalg) 979 - crypto_free_shash(hashalg); 980 - if (hmacalg) 981 - crypto_free_shash(hmacalg); 982 - } 983 - 984 - static int __init encrypted_shash_alloc(void) 985 - { 986 - int ret; 987 - 988 - hmacalg = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC); 989 - if (IS_ERR(hmacalg)) { 990 - pr_info("encrypted_key: could not allocate crypto %s\n", 991 - hmac_alg); 992 - return PTR_ERR(hmacalg); 993 - } 994 - 995 - hashalg = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC); 996 - if (IS_ERR(hashalg)) { 997 - pr_info("encrypted_key: could not allocate crypto %s\n", 998 - hash_alg); 999 - ret = PTR_ERR(hashalg); 1000 - goto hashalg_fail; 1001 - } 1002 - 1003 - return 0; 1004 - 1005 - hashalg_fail: 1006 - crypto_free_shash(hmacalg); 1007 - return ret; 1008 - } 1009 - 1010 1002 static int __init init_encrypted(void) 1011 1003 { 1012 1004 int ret; 1013 1005 1014 - ret = encrypted_shash_alloc(); 1015 - if (ret < 0) 1016 - return ret; 1006 + hash_tfm = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC); 1007 + if (IS_ERR(hash_tfm)) { 1008 + pr_err("encrypted_key: can't allocate %s transform: %ld\n", 1009 + hash_alg, PTR_ERR(hash_tfm)); 1010 + return PTR_ERR(hash_tfm); 1011 + } 1012 + 1017 1013 ret = aes_get_sizes(); 1018 1014 if (ret < 0) 1019 1015 goto out; ··· 992 1048 goto out; 993 1049 return 0; 994 1050 out: 995 - encrypted_shash_release(); 1051 + crypto_free_shash(hash_tfm); 996 1052 return ret; 997 1053 998 1054 } 999 1055 1000 1056 static void __exit cleanup_encrypted(void) 1001 1057 { 1002 - encrypted_shash_release(); 1058 + crypto_free_shash(hash_tfm); 1003 1059 unregister_key_type(&key_type_encrypted); 1004 1060 } 1005 1061
+1 -3
security/keys/gc.c
··· 158 158 159 159 kfree(key->description); 160 160 161 - #ifdef KEY_DEBUGGING 162 - key->magic = KEY_DEBUG_MAGIC_X; 163 - #endif 161 + memzero_explicit(key, sizeof(*key)); 164 162 kmem_cache_free(key_jar, key); 165 163 } 166 164 }
+6 -10
security/keys/key.c
··· 660 660 goto error; 661 661 662 662 found: 663 - /* pretend it doesn't exist if it is awaiting deletion */ 664 - if (refcount_read(&key->usage) == 0) 665 - goto not_found; 666 - 667 - /* this races with key_put(), but that doesn't matter since key_put() 668 - * doesn't actually change the key 663 + /* A key is allowed to be looked up only if someone still owns a 664 + * reference to it - otherwise it's awaiting the gc. 669 665 */ 670 - __key_get(key); 666 + if (!refcount_inc_not_zero(&key->usage)) 667 + goto not_found; 671 668 672 669 error: 673 670 spin_unlock(&key_serial_lock); ··· 963 966 /* the key must be writable */ 964 967 ret = key_permission(key_ref, KEY_NEED_WRITE); 965 968 if (ret < 0) 966 - goto error; 969 + return ret; 967 970 968 971 /* attempt to update it if supported */ 969 - ret = -EOPNOTSUPP; 970 972 if (!key->type->update) 971 - goto error; 973 + return -EOPNOTSUPP; 972 974 973 975 memset(&prep, 0, sizeof(prep)); 974 976 prep.data = payload;
+11 -5
security/keys/keyctl.c
··· 99 99 /* pull the payload in if one was supplied */ 100 100 payload = NULL; 101 101 102 - if (_payload) { 102 + if (plen) { 103 103 ret = -ENOMEM; 104 104 payload = kvmalloc(plen, GFP_KERNEL); 105 105 if (!payload) ··· 132 132 133 133 key_ref_put(keyring_ref); 134 134 error3: 135 - kvfree(payload); 135 + if (payload) { 136 + memzero_explicit(payload, plen); 137 + kvfree(payload); 138 + } 136 139 error2: 137 140 kfree(description); 138 141 error: ··· 327 324 328 325 /* pull the payload in if one was supplied */ 329 326 payload = NULL; 330 - if (_payload) { 327 + if (plen) { 331 328 ret = -ENOMEM; 332 329 payload = kmalloc(plen, GFP_KERNEL); 333 330 if (!payload) ··· 350 347 351 348 key_ref_put(key_ref); 352 349 error2: 353 - kfree(payload); 350 + kzfree(payload); 354 351 error: 355 352 return ret; 356 353 } ··· 1096 1093 keyctl_change_reqkey_auth(NULL); 1097 1094 1098 1095 error2: 1099 - kvfree(payload); 1096 + if (payload) { 1097 + memzero_explicit(payload, plen); 1098 + kvfree(payload); 1099 + } 1100 1100 error: 1101 1101 return ret; 1102 1102 }
+6 -6
security/keys/keyring.c
··· 706 706 * Non-keyrings avoid the leftmost branch of the root entirely (root 707 707 * slots 1-15). 708 708 */ 709 - ptr = ACCESS_ONCE(keyring->keys.root); 709 + ptr = READ_ONCE(keyring->keys.root); 710 710 if (!ptr) 711 711 goto not_this_keyring; 712 712 ··· 720 720 if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0) 721 721 goto not_this_keyring; 722 722 723 - ptr = ACCESS_ONCE(shortcut->next_node); 723 + ptr = READ_ONCE(shortcut->next_node); 724 724 node = assoc_array_ptr_to_node(ptr); 725 725 goto begin_node; 726 726 } ··· 740 740 if (assoc_array_ptr_is_shortcut(ptr)) { 741 741 shortcut = assoc_array_ptr_to_shortcut(ptr); 742 742 smp_read_barrier_depends(); 743 - ptr = ACCESS_ONCE(shortcut->next_node); 743 + ptr = READ_ONCE(shortcut->next_node); 744 744 BUG_ON(!assoc_array_ptr_is_node(ptr)); 745 745 } 746 746 node = assoc_array_ptr_to_node(ptr); ··· 752 752 ascend_to_node: 753 753 /* Go through the slots in a node */ 754 754 for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { 755 - ptr = ACCESS_ONCE(node->slots[slot]); 755 + ptr = READ_ONCE(node->slots[slot]); 756 756 757 757 if (assoc_array_ptr_is_meta(ptr) && node->back_pointer) 758 758 goto descend_to_node; ··· 790 790 /* We've dealt with all the slots in the current node, so now we need 791 791 * to ascend to the parent and continue processing there. 792 792 */ 793 - ptr = ACCESS_ONCE(node->back_pointer); 793 + ptr = READ_ONCE(node->back_pointer); 794 794 slot = node->parent_slot; 795 795 796 796 if (ptr && assoc_array_ptr_is_shortcut(ptr)) { 797 797 shortcut = assoc_array_ptr_to_shortcut(ptr); 798 798 smp_read_barrier_depends(); 799 - ptr = ACCESS_ONCE(shortcut->back_pointer); 799 + ptr = READ_ONCE(shortcut->back_pointer); 800 800 slot = shortcut->parent_slot; 801 801 } 802 802 if (!ptr)
+4 -3
security/keys/process_keys.c
··· 809 809 ret = PTR_ERR(keyring); 810 810 goto error2; 811 811 } else if (keyring == new->session_keyring) { 812 - key_put(keyring); 813 812 ret = 0; 814 - goto error2; 813 + goto error3; 815 814 } 816 815 817 816 /* we've got a keyring - now to install it */ 818 817 ret = install_session_keyring_to_cred(new, keyring); 819 818 if (ret < 0) 820 - goto error2; 819 + goto error3; 821 820 822 821 commit_creds(new); 823 822 mutex_unlock(&key_session_mutex); ··· 826 827 okay: 827 828 return ret; 828 829 830 + error3: 831 + key_put(keyring); 829 832 error2: 830 833 mutex_unlock(&key_session_mutex); 831 834 error:
+22 -28
security/keys/trusted.c
··· 70 70 } 71 71 72 72 ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest); 73 - kfree(sdesc); 73 + kzfree(sdesc); 74 74 return ret; 75 75 } 76 76 ··· 114 114 if (!ret) 115 115 ret = crypto_shash_final(&sdesc->shash, digest); 116 116 out: 117 - kfree(sdesc); 117 + kzfree(sdesc); 118 118 return ret; 119 119 } 120 120 ··· 165 165 paramdigest, TPM_NONCE_SIZE, h1, 166 166 TPM_NONCE_SIZE, h2, 1, &c, 0, 0); 167 167 out: 168 - kfree(sdesc); 168 + kzfree(sdesc); 169 169 return ret; 170 170 } 171 171 ··· 246 246 if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE)) 247 247 ret = -EINVAL; 248 248 out: 249 - kfree(sdesc); 249 + kzfree(sdesc); 250 250 return ret; 251 251 } 252 252 ··· 347 347 if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE)) 348 348 ret = -EINVAL; 349 349 out: 350 - kfree(sdesc); 350 + kzfree(sdesc); 351 351 return ret; 352 352 } 353 353 ··· 564 564 *bloblen = storedsize; 565 565 } 566 566 out: 567 - kfree(td); 567 + kzfree(td); 568 568 return ret; 569 569 } 570 570 ··· 678 678 if (ret < 0) 679 679 pr_info("trusted_key: srkseal failed (%d)\n", ret); 680 680 681 - kfree(tb); 681 + kzfree(tb); 682 682 return ret; 683 683 } 684 684 ··· 703 703 /* pull migratable flag out of sealed key */ 704 704 p->migratable = p->key[--p->key_len]; 705 705 706 - kfree(tb); 706 + kzfree(tb); 707 707 return ret; 708 708 } 709 709 ··· 1037 1037 if (!ret && options->pcrlock) 1038 1038 ret = pcrlock(options->pcrlock); 1039 1039 out: 1040 - kfree(datablob); 1041 - kfree(options); 1040 + kzfree(datablob); 1041 + kzfree(options); 1042 1042 if (!ret) 1043 1043 rcu_assign_keypointer(key, payload); 1044 1044 else 1045 - kfree(payload); 1045 + kzfree(payload); 1046 1046 return ret; 1047 1047 } 1048 1048 ··· 1051 1051 struct trusted_key_payload *p; 1052 1052 1053 1053 p = container_of(rcu, struct trusted_key_payload, rcu); 1054 - memset(p->key, 0, p->key_len); 1055 - kfree(p); 1054 + kzfree(p); 1056 1055 } 1057 1056 1058 1057 /* ··· 1093 1094 ret = datablob_parse(datablob, new_p, new_o); 1094 1095 if (ret != Opt_update) { 1095 1096 ret = -EINVAL; 1096 - kfree(new_p); 1097 + kzfree(new_p); 1097 1098 goto out; 1098 1099 } 1099 1100 1100 1101 if (!new_o->keyhandle) { 1101 1102 ret = -EINVAL; 1102 - kfree(new_p); 1103 + kzfree(new_p); 1103 1104 goto out; 1104 1105 } 1105 1106 ··· 1113 1114 ret = key_seal(new_p, new_o); 1114 1115 if (ret < 0) { 1115 1116 pr_info("trusted_key: key_seal failed (%d)\n", ret); 1116 - kfree(new_p); 1117 + kzfree(new_p); 1117 1118 goto out; 1118 1119 } 1119 1120 if (new_o->pcrlock) { 1120 1121 ret = pcrlock(new_o->pcrlock); 1121 1122 if (ret < 0) { 1122 1123 pr_info("trusted_key: pcrlock failed (%d)\n", ret); 1123 - kfree(new_p); 1124 + kzfree(new_p); 1124 1125 goto out; 1125 1126 } 1126 1127 } 1127 1128 rcu_assign_keypointer(key, new_p); 1128 1129 call_rcu(&p->rcu, trusted_rcu_free); 1129 1130 out: 1130 - kfree(datablob); 1131 - kfree(new_o); 1131 + kzfree(datablob); 1132 + kzfree(new_o); 1132 1133 return ret; 1133 1134 } 1134 1135 ··· 1157 1158 for (i = 0; i < p->blob_len; i++) 1158 1159 bufp = hex_byte_pack(bufp, p->blob[i]); 1159 1160 if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) { 1160 - kfree(ascii_buf); 1161 + kzfree(ascii_buf); 1161 1162 return -EFAULT; 1162 1163 } 1163 - kfree(ascii_buf); 1164 + kzfree(ascii_buf); 1164 1165 return 2 * p->blob_len; 1165 1166 } 1166 1167 1167 1168 /* 1168 - * trusted_destroy - before freeing the key, clear the decrypted data 1169 + * trusted_destroy - clear and free the key's payload 1169 1170 */ 1170 1171 static void trusted_destroy(struct key *key) 1171 1172 { 1172 - struct trusted_key_payload *p = key->payload.data[0]; 1173 - 1174 - if (!p) 1175 - return; 1176 - memset(p->key, 0, p->key_len); 1177 - kfree(key->payload.data[0]); 1173 + kzfree(key->payload.data[0]); 1178 1174 } 1179 1175 1180 1176 struct key_type key_type_trusted = {
+12 -4
security/keys/user_defined.c
··· 86 86 */ 87 87 void user_free_preparse(struct key_preparsed_payload *prep) 88 88 { 89 - kfree(prep->payload.data[0]); 89 + kzfree(prep->payload.data[0]); 90 90 } 91 91 EXPORT_SYMBOL_GPL(user_free_preparse); 92 + 93 + static void user_free_payload_rcu(struct rcu_head *head) 94 + { 95 + struct user_key_payload *payload; 96 + 97 + payload = container_of(head, struct user_key_payload, rcu); 98 + kzfree(payload); 99 + } 92 100 93 101 /* 94 102 * update a user defined key ··· 120 112 prep->payload.data[0] = NULL; 121 113 122 114 if (zap) 123 - kfree_rcu(zap, rcu); 115 + call_rcu(&zap->rcu, user_free_payload_rcu); 124 116 return ret; 125 117 } 126 118 EXPORT_SYMBOL_GPL(user_update); ··· 138 130 139 131 if (upayload) { 140 132 rcu_assign_keypointer(key, NULL); 141 - kfree_rcu(upayload, rcu); 133 + call_rcu(&upayload->rcu, user_free_payload_rcu); 142 134 } 143 135 } 144 136 ··· 151 143 { 152 144 struct user_key_payload *upayload = key->payload.data[0]; 153 145 154 - kfree(upayload); 146 + kzfree(upayload); 155 147 } 156 148 157 149 EXPORT_SYMBOL_GPL(user_destroy);
+2 -3
security/selinux/hooks.c
··· 1106 1106 1107 1107 opts->mnt_opts_flags = kcalloc(NUM_SEL_MNT_OPTS, sizeof(int), 1108 1108 GFP_KERNEL); 1109 - if (!opts->mnt_opts_flags) { 1110 - kfree(opts->mnt_opts); 1109 + if (!opts->mnt_opts_flags) 1111 1110 goto out_err; 1112 - } 1113 1111 1114 1112 if (fscontext) { 1115 1113 opts->mnt_opts[num_mnt_opts] = fscontext; ··· 1130 1132 return 0; 1131 1133 1132 1134 out_err: 1135 + security_free_mnt_opts(opts); 1133 1136 kfree(context); 1134 1137 kfree(defcontext); 1135 1138 kfree(fscontext);
+2 -2
sound/core/pcm_lib.c
··· 2492 2492 struct snd_pcm_substream *substream; 2493 2493 const struct snd_pcm_chmap_elem *map; 2494 2494 2495 - if (snd_BUG_ON(!info->chmap)) 2495 + if (!info->chmap) 2496 2496 return -EINVAL; 2497 2497 substream = snd_pcm_chmap_substream(info, idx); 2498 2498 if (!substream) ··· 2524 2524 unsigned int __user *dst; 2525 2525 int c, count = 0; 2526 2526 2527 - if (snd_BUG_ON(!info->chmap)) 2527 + if (!info->chmap) 2528 2528 return -EINVAL; 2529 2529 if (size < 8) 2530 2530 return -ENOMEM;
+5 -2
sound/core/timer.c
··· 1618 1618 if (err < 0) 1619 1619 goto __err; 1620 1620 1621 + tu->qhead = tu->qtail = tu->qused = 0; 1621 1622 kfree(tu->queue); 1622 1623 tu->queue = NULL; 1623 1624 kfree(tu->tqueue); ··· 1960 1959 1961 1960 tu = file->private_data; 1962 1961 unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read); 1962 + mutex_lock(&tu->ioctl_lock); 1963 1963 spin_lock_irq(&tu->qlock); 1964 1964 while ((long)count - result >= unit) { 1965 1965 while (!tu->qused) { ··· 1976 1974 add_wait_queue(&tu->qchange_sleep, &wait); 1977 1975 1978 1976 spin_unlock_irq(&tu->qlock); 1977 + mutex_unlock(&tu->ioctl_lock); 1979 1978 schedule(); 1979 + mutex_lock(&tu->ioctl_lock); 1980 1980 spin_lock_irq(&tu->qlock); 1981 1981 1982 1982 remove_wait_queue(&tu->qchange_sleep, &wait); ··· 1998 1994 tu->qused--; 1999 1995 spin_unlock_irq(&tu->qlock); 2000 1996 2001 - mutex_lock(&tu->ioctl_lock); 2002 1997 if (tu->tread) { 2003 1998 if (copy_to_user(buffer, &tu->tqueue[qhead], 2004 1999 sizeof(struct snd_timer_tread))) ··· 2007 2004 sizeof(struct snd_timer_read))) 2008 2005 err = -EFAULT; 2009 2006 } 2010 - mutex_unlock(&tu->ioctl_lock); 2011 2007 2012 2008 spin_lock_irq(&tu->qlock); 2013 2009 if (err < 0) ··· 2016 2014 } 2017 2015 _error: 2018 2016 spin_unlock_irq(&tu->qlock); 2017 + mutex_unlock(&tu->ioctl_lock); 2019 2018 return result > 0 ? result : err; 2020 2019 } 2021 2020
+6 -2
sound/firewire/amdtp-stream.c
··· 682 682 cycle = increment_cycle_count(cycle, 1); 683 683 if (s->handle_packet(s, 0, cycle, i) < 0) { 684 684 s->packet_index = -1; 685 - amdtp_stream_pcm_abort(s); 685 + if (in_interrupt()) 686 + amdtp_stream_pcm_abort(s); 687 + WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); 686 688 return; 687 689 } 688 690 } ··· 736 734 /* Queueing error or detecting invalid payload. */ 737 735 if (i < packets) { 738 736 s->packet_index = -1; 739 - amdtp_stream_pcm_abort(s); 737 + if (in_interrupt()) 738 + amdtp_stream_pcm_abort(s); 739 + WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); 740 740 return; 741 741 } 742 742
+1 -1
sound/firewire/amdtp-stream.h
··· 135 135 /* For a PCM substream processing. */ 136 136 struct snd_pcm_substream *pcm; 137 137 struct tasklet_struct period_tasklet; 138 - unsigned int pcm_buffer_pointer; 138 + snd_pcm_uframes_t pcm_buffer_pointer; 139 139 unsigned int pcm_period_pointer; 140 140 141 141 /* To wait for first packet. */
+2
sound/pci/hda/hda_codec.h
··· 295 295 296 296 #define list_for_each_codec(c, bus) \ 297 297 list_for_each_entry(c, &(bus)->core.codec_list, core.list) 298 + #define list_for_each_codec_safe(c, n, bus) \ 299 + list_for_each_entry_safe(c, n, &(bus)->core.codec_list, core.list) 298 300 299 301 /* snd_hda_codec_read/write optional flags */ 300 302 #define HDA_RW_NO_RESPONSE_FALLBACK (1 << 0)
+6 -2
sound/pci/hda/hda_controller.c
··· 1337 1337 /* configure each codec instance */ 1338 1338 int azx_codec_configure(struct azx *chip) 1339 1339 { 1340 - struct hda_codec *codec; 1341 - list_for_each_codec(codec, &chip->bus) { 1340 + struct hda_codec *codec, *next; 1341 + 1342 + /* use _safe version here since snd_hda_codec_configure() deregisters 1343 + * the device upon error and deletes itself from the bus list. 1344 + */ 1345 + list_for_each_codec_safe(codec, next, &chip->bus) { 1342 1346 snd_hda_codec_configure(codec); 1343 1347 } 1344 1348 return 0;
+1
sound/pci/hda/hda_generic.c
··· 3174 3174 spec->input_paths[i][nums]); 3175 3175 spec->input_paths[i][nums] = 3176 3176 spec->input_paths[i][n]; 3177 + spec->input_paths[i][n] = 0; 3177 3178 } 3178 3179 } 3179 3180 nums++;
+8 -3
sound/pci/hda/hda_intel.c
··· 370 370 #define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71) 371 371 #define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0) 372 372 #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) 373 + #define IS_BXT_T(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x1a98) 373 374 #define IS_GLK(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x3198) 374 - #define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \ 375 - IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci) || \ 376 - IS_GLK(pci) 375 + #define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348) 376 + #define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci) || \ 377 + IS_BXT_T(pci) || IS_KBL(pci) || IS_KBL_LP(pci) || \ 378 + IS_KBL_H(pci) || IS_GLK(pci) || IS_CFL(pci)) 377 379 378 380 static char *driver_short_names[] = { 379 381 [AZX_DRIVER_ICH] = "HDA Intel", ··· 2380 2378 /* Kabylake-H */ 2381 2379 { PCI_DEVICE(0x8086, 0xa2f0), 2382 2380 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, 2381 + /* Coffelake */ 2382 + { PCI_DEVICE(0x8086, 0xa348), 2383 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE}, 2383 2384 /* Broxton-P(Apollolake) */ 2384 2385 { PCI_DEVICE(0x8086, 0x5a98), 2385 2386 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
+8 -7
sound/pci/hda/patch_realtek.c
··· 2324 2324 SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF), 2325 2325 2326 2326 SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD), 2327 - SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), 2328 - SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), 2329 2327 SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), 2330 2328 SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), 2329 + SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), 2331 2330 SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), 2331 + SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), 2332 2332 SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), 2333 2333 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), 2334 2334 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), ··· 5854 5854 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), 5855 5855 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5856 5856 SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), 5857 + SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), 5857 5858 SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5859 + SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), 5860 + SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE), 5861 + SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE), 5858 5862 SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC), 5859 5863 SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC), 5860 5864 SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC), ··· 5866 5862 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), 5867 5863 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), 5868 5864 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), 5865 + SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC), 5869 5866 SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC), 5870 - SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5871 5867 SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), 5872 - SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), 5873 - SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), 5874 - SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE), 5875 - SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE), 5868 + SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5876 5869 SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC), 5877 5870 SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), 5878 5871 SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
+9
sound/soc/atmel/atmel-classd.c
··· 301 301 return 0; 302 302 } 303 303 304 + static int atmel_classd_codec_resume(struct snd_soc_codec *codec) 305 + { 306 + struct snd_soc_card *card = snd_soc_codec_get_drvdata(codec); 307 + struct atmel_classd *dd = snd_soc_card_get_drvdata(card); 308 + 309 + return regcache_sync(dd->regmap); 310 + } 311 + 304 312 static struct regmap *atmel_classd_codec_get_remap(struct device *dev) 305 313 { 306 314 return dev_get_regmap(dev, NULL); ··· 316 308 317 309 static struct snd_soc_codec_driver soc_codec_dev_classd = { 318 310 .probe = atmel_classd_codec_probe, 311 + .resume = atmel_classd_codec_resume, 319 312 .get_regmap = atmel_classd_codec_get_remap, 320 313 .component_driver = { 321 314 .controls = atmel_classd_snd_controls,
+1 -1
sound/soc/codecs/da7213.c
··· 772 772 ++i; 773 773 msleep(50); 774 774 } 775 - } while ((i < DA7213_SRM_CHECK_RETRIES) & (!srm_lock)); 775 + } while ((i < DA7213_SRM_CHECK_RETRIES) && (!srm_lock)); 776 776 777 777 if (!srm_lock) 778 778 dev_warn(codec->dev, "SRM failed to lock\n");
+7
sound/soc/codecs/rt286.c
··· 1108 1108 DMI_MATCH(DMI_PRODUCT_NAME, "Kabylake Client platform") 1109 1109 } 1110 1110 }, 1111 + { 1112 + .ident = "Thinkpad Helix 2nd", 1113 + .matches = { 1114 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 1115 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix 2nd") 1116 + } 1117 + }, 1111 1118 1112 1119 { } 1113 1120 };
+1 -1
sound/soc/generic/simple-card.c
··· 202 202 if (ret < 0) 203 203 return ret; 204 204 205 - ret = asoc_simple_card_init_mic(rtd->card, &priv->hp_jack, PREFIX); 205 + ret = asoc_simple_card_init_mic(rtd->card, &priv->mic_jack, PREFIX); 206 206 if (ret < 0) 207 207 return ret; 208 208
+5
sound/soc/intel/skylake/skl-sst-ipc.c
··· 413 413 u32 reply = header.primary & IPC_GLB_REPLY_STATUS_MASK; 414 414 u64 *ipc_header = (u64 *)(&header); 415 415 struct skl_sst *skl = container_of(ipc, struct skl_sst, ipc); 416 + unsigned long flags; 416 417 418 + spin_lock_irqsave(&ipc->dsp->spinlock, flags); 417 419 msg = skl_ipc_reply_get_msg(ipc, *ipc_header); 420 + spin_unlock_irqrestore(&ipc->dsp->spinlock, flags); 418 421 if (msg == NULL) { 419 422 dev_dbg(ipc->dev, "ipc: rx list is empty\n"); 420 423 return; ··· 459 456 } 460 457 } 461 458 459 + spin_lock_irqsave(&ipc->dsp->spinlock, flags); 462 460 list_del(&msg->list); 463 461 sst_ipc_tx_msg_reply_complete(ipc, msg); 462 + spin_unlock_irqrestore(&ipc->dsp->spinlock, flags); 464 463 } 465 464 466 465 irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context)
+1 -1
sound/soc/intel/skylake/skl-topology.c
··· 2502 2502 2503 2503 if (ret < 0) 2504 2504 return ret; 2505 - tkn_count += ret; 2505 + tkn_count = ret; 2506 2506 2507 2507 tuple_size += tkn_count * 2508 2508 sizeof(struct snd_soc_tplg_vendor_string_elem);
+88 -74
sound/soc/intel/skylake/skl.c
··· 410 410 struct skl *skl = ebus_to_skl(ebus); 411 411 struct hdac_bus *bus = ebus_to_hbus(ebus); 412 412 413 - skl->init_failed = 1; /* to be sure */ 413 + skl->init_done = 0; /* to be sure */ 414 414 415 415 snd_hdac_ext_stop_streams(ebus); 416 416 ··· 428 428 429 429 snd_hdac_ext_bus_exit(ebus); 430 430 431 + cancel_work_sync(&skl->probe_work); 431 432 if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) 432 433 snd_hdac_i915_exit(&ebus->bus); 434 + 433 435 return 0; 434 436 } 435 437 ··· 568 566 .get_response = snd_hdac_bus_get_response, 569 567 }; 570 568 569 + static int skl_i915_init(struct hdac_bus *bus) 570 + { 571 + int err; 572 + 573 + /* 574 + * The HDMI codec is in GPU so we need to ensure that it is powered 575 + * up and ready for probe 576 + */ 577 + err = snd_hdac_i915_init(bus); 578 + if (err < 0) 579 + return err; 580 + 581 + err = snd_hdac_display_power(bus, true); 582 + if (err < 0) 583 + dev_err(bus->dev, "Cannot turn on display power on i915\n"); 584 + 585 + return err; 586 + } 587 + 588 + static void skl_probe_work(struct work_struct *work) 589 + { 590 + struct skl *skl = container_of(work, struct skl, probe_work); 591 + struct hdac_ext_bus *ebus = &skl->ebus; 592 + struct hdac_bus *bus = ebus_to_hbus(ebus); 593 + struct hdac_ext_link *hlink = NULL; 594 + int err; 595 + 596 + if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) { 597 + err = skl_i915_init(bus); 598 + if (err < 0) 599 + return; 600 + } 601 + 602 + err = skl_init_chip(bus, true); 603 + if (err < 0) { 604 + dev_err(bus->dev, "Init chip failed with err: %d\n", err); 605 + goto out_err; 606 + } 607 + 608 + /* codec detection */ 609 + if (!bus->codec_mask) 610 + dev_info(bus->dev, "no hda codecs found!\n"); 611 + 612 + /* create codec instances */ 613 + err = skl_codec_create(ebus); 614 + if (err < 0) 615 + goto out_err; 616 + 617 + if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) { 618 + err = snd_hdac_display_power(bus, false); 619 + if (err < 0) { 620 + dev_err(bus->dev, "Cannot turn off display power on i915\n"); 621 + return; 622 + } 623 + } 624 + 625 + /* register platform dai and controls */ 626 + err = skl_platform_register(bus->dev); 627 + if (err < 0) 628 + return; 629 + /* 630 + * we are done probing so decrement link counts 631 + */ 632 + list_for_each_entry(hlink, &ebus->hlink_list, list) 633 + snd_hdac_ext_bus_link_put(ebus, hlink); 634 + 635 + /* configure PM */ 636 + pm_runtime_put_noidle(bus->dev); 637 + pm_runtime_allow(bus->dev); 638 + skl->init_done = 1; 639 + 640 + return; 641 + 642 + out_err: 643 + if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) 644 + err = snd_hdac_display_power(bus, false); 645 + } 646 + 571 647 /* 572 648 * constructor 573 649 */ ··· 673 593 snd_hdac_ext_bus_init(ebus, &pci->dev, &bus_core_ops, io_ops); 674 594 ebus->bus.use_posbuf = 1; 675 595 skl->pci = pci; 596 + INIT_WORK(&skl->probe_work, skl_probe_work); 676 597 677 598 ebus->bus.bdl_pos_adj = 0; 678 599 679 600 *rskl = skl; 680 601 681 602 return 0; 682 - } 683 - 684 - static int skl_i915_init(struct hdac_bus *bus) 685 - { 686 - int err; 687 - 688 - /* 689 - * The HDMI codec is in GPU so we need to ensure that it is powered 690 - * up and ready for probe 691 - */ 692 - err = snd_hdac_i915_init(bus); 693 - if (err < 0) 694 - return err; 695 - 696 - err = snd_hdac_display_power(bus, true); 697 - if (err < 0) { 698 - dev_err(bus->dev, "Cannot turn on display power on i915\n"); 699 - return err; 700 - } 701 - 702 - return err; 703 603 } 704 604 705 605 static int skl_first_init(struct hdac_ext_bus *ebus) ··· 744 684 /* initialize chip */ 745 685 skl_init_pci(skl); 746 686 747 - if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) { 748 - err = skl_i915_init(bus); 749 - if (err < 0) 750 - return err; 751 - } 752 - 753 - skl_init_chip(bus, true); 754 - 755 - /* codec detection */ 756 - if (!bus->codec_mask) { 757 - dev_info(bus->dev, "no hda codecs found!\n"); 758 - } 759 - 760 - return 0; 687 + return skl_init_chip(bus, true); 761 688 } 762 689 763 690 static int skl_probe(struct pci_dev *pci, ··· 753 706 struct skl *skl; 754 707 struct hdac_ext_bus *ebus = NULL; 755 708 struct hdac_bus *bus = NULL; 756 - struct hdac_ext_link *hlink = NULL; 757 709 int err; 758 710 759 711 /* we use ext core ops, so provide NULL for ops here */ ··· 775 729 776 730 if (skl->nhlt == NULL) { 777 731 err = -ENODEV; 778 - goto out_display_power_off; 732 + goto out_free; 779 733 } 780 734 781 735 err = skl_nhlt_create_sysfs(skl); ··· 806 760 if (bus->mlcap) 807 761 snd_hdac_ext_bus_get_ml_capabilities(ebus); 808 762 763 + snd_hdac_bus_stop_chip(bus); 764 + 809 765 /* create device for soc dmic */ 810 766 err = skl_dmic_device_register(skl); 811 767 if (err < 0) 812 768 goto out_dsp_free; 813 769 814 - /* register platform dai and controls */ 815 - err = skl_platform_register(bus->dev); 816 - if (err < 0) 817 - goto out_dmic_free; 818 - 819 - /* create codec instances */ 820 - err = skl_codec_create(ebus); 821 - if (err < 0) 822 - goto out_unregister; 823 - 824 - if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) { 825 - err = snd_hdac_display_power(bus, false); 826 - if (err < 0) { 827 - dev_err(bus->dev, "Cannot turn off display power on i915\n"); 828 - return err; 829 - } 830 - } 831 - 832 - /* 833 - * we are done probling so decrement link counts 834 - */ 835 - list_for_each_entry(hlink, &ebus->hlink_list, list) 836 - snd_hdac_ext_bus_link_put(ebus, hlink); 837 - 838 - /* configure PM */ 839 - pm_runtime_put_noidle(bus->dev); 840 - pm_runtime_allow(bus->dev); 770 + schedule_work(&skl->probe_work); 841 771 842 772 return 0; 843 773 844 - out_unregister: 845 - skl_platform_unregister(bus->dev); 846 - out_dmic_free: 847 - skl_dmic_device_unregister(skl); 848 774 out_dsp_free: 849 775 skl_free_dsp(skl); 850 776 out_mach_free: 851 777 skl_machine_device_unregister(skl); 852 778 out_nhlt_free: 853 779 skl_nhlt_free(skl->nhlt); 854 - out_display_power_off: 855 - if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) 856 - snd_hdac_display_power(bus, false); 857 780 out_free: 858 - skl->init_failed = 1; 859 781 skl_free(ebus); 860 782 861 783 return err; ··· 842 828 843 829 skl = ebus_to_skl(ebus); 844 830 845 - if (skl->init_failed) 831 + if (!skl->init_done) 846 832 return; 847 833 848 834 snd_hdac_ext_stop_streams(ebus);
+3 -1
sound/soc/intel/skylake/skl.h
··· 46 46 struct hdac_ext_bus ebus; 47 47 struct pci_dev *pci; 48 48 49 - unsigned int init_failed:1; /* delayed init failed */ 49 + unsigned int init_done:1; /* delayed init status */ 50 50 struct platform_device *dmic_dev; 51 51 struct platform_device *i2s_dev; 52 52 struct snd_soc_platform *platform; ··· 64 64 const struct firmware *tplg; 65 65 66 66 int supend_active; 67 + 68 + struct work_struct probe_work; 67 69 }; 68 70 69 71 #define skl_to_ebus(s) (&(s)->ebus)
+4 -2
sound/soc/sh/rcar/adg.c
··· 507 507 rbga = rbgx; 508 508 adg->rbga_rate_for_441khz = rate / div; 509 509 ckr |= brg_table[i] << 20; 510 - if (req_441kHz_rate) 510 + if (req_441kHz_rate && 511 + !(adg_mode_flags(adg) & AUDIO_OUT_48)) 511 512 parent_clk_name = __clk_get_name(clk); 512 513 } 513 514 } ··· 523 522 rbgb = rbgx; 524 523 adg->rbgb_rate_for_48khz = rate / div; 525 524 ckr |= brg_table[i] << 16; 526 - if (req_48kHz_rate) 525 + if (req_48kHz_rate && 526 + (adg_mode_flags(adg) & AUDIO_OUT_48)) 527 527 parent_clk_name = __clk_get_name(clk); 528 528 } 529 529 }
+1
sound/soc/sh/rcar/cmd.c
··· 89 89 dev_dbg(dev, "ctu/mix path = 0x%08x", data); 90 90 91 91 rsnd_mod_write(mod, CMD_ROUTE_SLCT, data); 92 + rsnd_mod_write(mod, CMD_BUSIF_MODE, rsnd_get_busif_shift(io, mod) | 1); 92 93 rsnd_mod_write(mod, CMD_BUSIF_DALIGN, rsnd_get_dalign(mod, io)); 93 94 94 95 rsnd_adg_set_cmd_timsel_gen2(mod, io);
+51
sound/soc/sh/rcar/core.c
··· 343 343 return 0x76543210; 344 344 } 345 345 346 + u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod) 347 + { 348 + enum rsnd_mod_type playback_mods[] = { 349 + RSND_MOD_SRC, 350 + RSND_MOD_CMD, 351 + RSND_MOD_SSIU, 352 + }; 353 + enum rsnd_mod_type capture_mods[] = { 354 + RSND_MOD_CMD, 355 + RSND_MOD_SRC, 356 + RSND_MOD_SSIU, 357 + }; 358 + struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); 359 + struct rsnd_mod *tmod = NULL; 360 + enum rsnd_mod_type *mods = 361 + rsnd_io_is_play(io) ? 362 + playback_mods : capture_mods; 363 + int i; 364 + 365 + /* 366 + * This is needed for 24bit data 367 + * We need to shift 8bit 368 + * 369 + * Linux 24bit data is located as 0x00****** 370 + * HW 24bit data is located as 0x******00 371 + * 372 + */ 373 + switch (runtime->sample_bits) { 374 + case 16: 375 + return 0; 376 + case 32: 377 + break; 378 + } 379 + 380 + for (i = 0; i < ARRAY_SIZE(playback_mods); i++) { 381 + tmod = rsnd_io_to_mod(io, mods[i]); 382 + if (tmod) 383 + break; 384 + } 385 + 386 + if (tmod != mod) 387 + return 0; 388 + 389 + if (rsnd_io_is_play(io)) 390 + return (0 << 20) | /* shift to Left */ 391 + (8 << 16); /* 8bit */ 392 + else 393 + return (1 << 20) | /* shift to Right */ 394 + (8 << 16); /* 8bit */ 395 + } 396 + 346 397 /* 347 398 * rsnd_dai functions 348 399 */
+1
sound/soc/sh/rcar/gen.c
··· 236 236 RSND_GEN_M_REG(SRC_ROUTE_MODE0, 0xc, 0x20), 237 237 RSND_GEN_M_REG(SRC_CTRL, 0x10, 0x20), 238 238 RSND_GEN_M_REG(SRC_INT_ENABLE0, 0x18, 0x20), 239 + RSND_GEN_M_REG(CMD_BUSIF_MODE, 0x184, 0x20), 239 240 RSND_GEN_M_REG(CMD_BUSIF_DALIGN,0x188, 0x20), 240 241 RSND_GEN_M_REG(CMD_ROUTE_SLCT, 0x18c, 0x20), 241 242 RSND_GEN_M_REG(CMD_CTRL, 0x190, 0x20),
+2
sound/soc/sh/rcar/rsnd.h
··· 73 73 RSND_REG_SCU_SYS_INT_EN0, 74 74 RSND_REG_SCU_SYS_INT_EN1, 75 75 RSND_REG_CMD_CTRL, 76 + RSND_REG_CMD_BUSIF_MODE, 76 77 RSND_REG_CMD_BUSIF_DALIGN, 77 78 RSND_REG_CMD_ROUTE_SLCT, 78 79 RSND_REG_CMDOUT_TIMSEL, ··· 205 204 u32 mask, u32 data); 206 205 u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io); 207 206 u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io); 207 + u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod); 208 208 209 209 /* 210 210 * R-Car DMA
+10 -2
sound/soc/sh/rcar/src.c
··· 190 190 struct rsnd_priv *priv = rsnd_mod_to_priv(mod); 191 191 struct device *dev = rsnd_priv_to_dev(priv); 192 192 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); 193 + int is_play = rsnd_io_is_play(io); 193 194 int use_src = 0; 194 195 u32 fin, fout; 195 196 u32 ifscr, fsrate, adinr; 196 197 u32 cr, route; 197 198 u32 bsdsr, bsisr; 199 + u32 i_busif, o_busif, tmp; 198 200 uint ratio; 199 201 200 202 if (!runtime) ··· 272 270 break; 273 271 } 274 272 273 + /* BUSIF_MODE */ 274 + tmp = rsnd_get_busif_shift(io, mod); 275 + i_busif = ( is_play ? tmp : 0) | 1; 276 + o_busif = (!is_play ? tmp : 0) | 1; 277 + 275 278 rsnd_mod_write(mod, SRC_ROUTE_MODE0, route); 276 279 277 280 rsnd_mod_write(mod, SRC_SRCIR, 1); /* initialize */ ··· 288 281 rsnd_mod_write(mod, SRC_BSISR, bsisr); 289 282 rsnd_mod_write(mod, SRC_SRCIR, 0); /* cancel initialize */ 290 283 291 - rsnd_mod_write(mod, SRC_I_BUSIF_MODE, 1); 292 - rsnd_mod_write(mod, SRC_O_BUSIF_MODE, 1); 284 + rsnd_mod_write(mod, SRC_I_BUSIF_MODE, i_busif); 285 + rsnd_mod_write(mod, SRC_O_BUSIF_MODE, o_busif); 286 + 293 287 rsnd_mod_write(mod, SRC_BUSIF_DALIGN, rsnd_get_dalign(mod, io)); 294 288 295 289 rsnd_adg_set_src_timesel_gen2(mod, io, fin, fout);
+15 -3
sound/soc/sh/rcar/ssi.c
··· 302 302 * always use 32bit system word. 303 303 * see also rsnd_ssi_master_clk_enable() 304 304 */ 305 - cr_own = FORCE | SWL_32 | PDTA; 305 + cr_own = FORCE | SWL_32; 306 306 307 307 if (rdai->bit_clk_inv) 308 308 cr_own |= SCKP; ··· 550 550 struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); 551 551 u32 *buf = (u32 *)(runtime->dma_area + 552 552 rsnd_dai_pointer_offset(io, 0)); 553 + int shift = 0; 554 + 555 + switch (runtime->sample_bits) { 556 + case 32: 557 + shift = 8; 558 + break; 559 + } 553 560 554 561 /* 555 562 * 8/16/32 data can be assesse to TDR/RDR register ··· 564 557 * see rsnd_ssi_init() 565 558 */ 566 559 if (rsnd_io_is_play(io)) 567 - rsnd_mod_write(mod, SSITDR, *buf); 560 + rsnd_mod_write(mod, SSITDR, (*buf) << shift); 568 561 else 569 - *buf = rsnd_mod_read(mod, SSIRDR); 562 + *buf = (rsnd_mod_read(mod, SSIRDR) >> shift); 570 563 571 564 elapsed = rsnd_dai_pointer_update(io, sizeof(*buf)); 572 565 } ··· 716 709 struct rsnd_priv *priv) 717 710 { 718 711 struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod); 712 + struct rsnd_mod *ssi_parent_mod = rsnd_io_to_mod_ssip(io); 713 + 714 + /* Do nothing for SSI parent mod */ 715 + if (ssi_parent_mod == mod) 716 + return 0; 719 717 720 718 /* PIO will request IRQ again */ 721 719 free_irq(ssi->irq, mod);
+2 -1
sound/soc/sh/rcar/ssiu.c
··· 144 144 (rsnd_io_is_play(io) ? 145 145 rsnd_runtime_channel_after_ctu(io) : 146 146 rsnd_runtime_channel_original(io))); 147 - rsnd_mod_write(mod, SSI_BUSIF_MODE, 1); 147 + rsnd_mod_write(mod, SSI_BUSIF_MODE, 148 + rsnd_get_busif_shift(io, mod) | 1); 148 149 rsnd_mod_write(mod, SSI_BUSIF_DALIGN, 149 150 rsnd_get_dalign(mod, io)); 150 151 }
+3 -2
sound/soc/soc-core.c
··· 2286 2286 list_for_each_entry(rtd, &card->rtd_list, list) 2287 2287 flush_delayed_work(&rtd->delayed_work); 2288 2288 2289 + /* free the ALSA card at first; this syncs with pending operations */ 2290 + snd_card_free(card->snd_card); 2291 + 2289 2292 /* remove and free each DAI */ 2290 2293 soc_remove_dai_links(card); 2291 2294 soc_remove_pcm_runtimes(card); ··· 2303 2300 if (card->remove) 2304 2301 card->remove(card); 2305 2302 2306 - snd_card_free(card->snd_card); 2307 2303 return 0; 2308 - 2309 2304 } 2310 2305 2311 2306 /* removes a socdev */
+12 -7
sound/usb/mixer_us16x08.c
··· 698 698 struct snd_usb_audio *chip = elem->head.mixer->chip; 699 699 struct snd_us16x08_meter_store *store = elem->private_data; 700 700 u8 meter_urb[64]; 701 - char tmp[sizeof(mix_init_msg2)] = {0}; 702 701 703 702 switch (kcontrol->private_value) { 704 - case 0: 705 - snd_us16x08_send_urb(chip, (char *)mix_init_msg1, 706 - sizeof(mix_init_msg1)); 703 + case 0: { 704 + char tmp[sizeof(mix_init_msg1)]; 705 + 706 + memcpy(tmp, mix_init_msg1, sizeof(mix_init_msg1)); 707 + snd_us16x08_send_urb(chip, tmp, 4); 707 708 snd_us16x08_recv_urb(chip, meter_urb, 708 709 sizeof(meter_urb)); 709 710 kcontrol->private_value++; 710 711 break; 712 + } 711 713 case 1: 712 714 snd_us16x08_recv_urb(chip, meter_urb, 713 715 sizeof(meter_urb)); ··· 720 718 sizeof(meter_urb)); 721 719 kcontrol->private_value++; 722 720 break; 723 - case 3: 721 + case 3: { 722 + char tmp[sizeof(mix_init_msg2)]; 723 + 724 724 memcpy(tmp, mix_init_msg2, sizeof(mix_init_msg2)); 725 725 tmp[2] = snd_get_meter_comp_index(store); 726 - snd_us16x08_send_urb(chip, tmp, sizeof(mix_init_msg2)); 726 + snd_us16x08_send_urb(chip, tmp, 10); 727 727 snd_us16x08_recv_urb(chip, meter_urb, 728 728 sizeof(meter_urb)); 729 729 kcontrol->private_value = 0; 730 730 break; 731 + } 731 732 } 732 733 733 734 for (set = 0; set < 6; set++) ··· 1140 1135 .control_id = SND_US16X08_ID_EQLOWMIDWIDTH, 1141 1136 .type = USB_MIXER_U8, 1142 1137 .num_channels = 16, 1143 - .name = "EQ MidQLow Q", 1138 + .name = "EQ MidLow Q", 1144 1139 }, 1145 1140 { /* EQ mid high gain */ 1146 1141 .kcontrol_new = &snd_us16x08_eq_gain_ctl,
+2 -1
tools/objtool/builtin-check.c
··· 192 192 "complete_and_exit", 193 193 "kvm_spurious_fault", 194 194 "__reiserfs_panic", 195 - "lbug_with_loc" 195 + "lbug_with_loc", 196 + "fortify_panic", 196 197 }; 197 198 198 199 if (func->bind == STB_WEAK)
+6 -2
tools/perf/Documentation/perf-probe.txt
··· 240 240 or 241 241 ./perf probe --add='schedule:12 cpu' 242 242 243 - this will add one or more probes which has the name start with "schedule". 243 + Add one or more probes which has the name start with "schedule". 244 244 245 - Add probes on lines in schedule() function which calls update_rq_clock(). 245 + ./perf probe schedule* 246 + or 247 + ./perf probe --add='schedule*' 248 + 249 + Add probes on lines in schedule() function which calls update_rq_clock(). 246 250 247 251 ./perf probe 'schedule;update_rq_clock*' 248 252 or
+1 -1
tools/perf/Documentation/perf-script-perl.txt
··· 39 39 When perf script is invoked using a trace script, a user-defined 40 40 'handler function' is called for each event in the trace. If there's 41 41 no handler function defined for a given event type, the event is 42 - ignored (or passed to a 'trace_handled' function, see below) and the 42 + ignored (or passed to a 'trace_unhandled' function, see below) and the 43 43 next event is processed. 44 44 45 45 Most of the event's field values are passed as arguments to the
+9 -14
tools/perf/Documentation/perf-script-python.txt
··· 149 149 print "id=%d, args=%s\n" % \ 150 150 (id, args), 151 151 152 - def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs, 153 - common_pid, common_comm): 154 - print_header(event_name, common_cpu, common_secs, common_nsecs, 155 - common_pid, common_comm) 152 + def trace_unhandled(event_name, context, event_fields_dict): 153 + print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]) 156 154 157 155 def print_header(event_name, cpu, secs, nsecs, pid, comm): 158 156 print "%-20s %5u %05u.%09u %8u %-20s " % \ ··· 319 321 process can be generalized to any tracepoint or set of tracepoints 320 322 you're interested in - basically find the tracepoint(s) you're 321 323 interested in by looking at the list of available events shown by 322 - 'perf list' and/or look in /sys/kernel/debug/tracing events for 324 + 'perf list' and/or look in /sys/kernel/debug/tracing/events/ for 323 325 detailed event and field info, record the corresponding trace data 324 326 using 'perf record', passing it the list of interesting events, 325 327 generate a skeleton script using 'perf script -g python' and modify the ··· 332 334 scripts listed by the 'perf script -l' command e.g.: 333 335 334 336 ---- 335 - root@tropicana:~# perf script -l 337 + # perf script -l 336 338 List of available trace scripts: 337 339 wakeup-latency system-wide min/max/avg wakeup latency 338 340 rw-by-file <comm> r/w activity for a program, by file ··· 381 383 382 384 ---- 383 385 # ls -al kernel-source/tools/perf/scripts/python 384 - 385 - root@tropicana:/home/trz/src/tip# ls -al tools/perf/scripts/python 386 386 total 32 387 387 drwxr-xr-x 4 trz trz 4096 2010-01-26 22:30 . 388 388 drwxr-xr-x 4 trz trz 4096 2010-01-26 22:29 .. ··· 395 399 should show a new entry for your script: 396 400 397 401 ---- 398 - root@tropicana:~# perf script -l 402 + # perf script -l 399 403 List of available trace scripts: 400 404 wakeup-latency system-wide min/max/avg wakeup latency 401 405 rw-by-file <comm> r/w activity for a program, by file ··· 433 437 When perf script is invoked using a trace script, a user-defined 434 438 'handler function' is called for each event in the trace. If there's 435 439 no handler function defined for a given event type, the event is 436 - ignored (or passed to a 'trace_handled' function, see below) and the 440 + ignored (or passed to a 'trace_unhandled' function, see below) and the 437 441 next event is processed. 438 442 439 443 Most of the event's field values are passed as arguments to the ··· 528 532 gives scripts a chance to do setup tasks: 529 533 530 534 ---- 531 - def trace_begin: 535 + def trace_begin(): 532 536 pass 533 537 ---- 534 538 ··· 537 541 as display results: 538 542 539 543 ---- 540 - def trace_end: 544 + def trace_end(): 541 545 pass 542 546 ---- 543 547 ··· 546 550 of common arguments are passed into it: 547 551 548 552 ---- 549 - def trace_unhandled(event_name, context, common_cpu, common_secs, 550 - common_nsecs, common_pid, common_comm): 553 + def trace_unhandled(event_name, context, event_fields_dict): 551 554 pass 552 555 ---- 553 556
+19 -19
tools/perf/Makefile.config
··· 19 19 20 20 include $(srctree)/tools/scripts/Makefile.arch 21 21 22 - $(call detected_var,ARCH) 22 + $(call detected_var,SRCARCH) 23 23 24 24 NO_PERF_REGS := 1 25 25 26 26 # Additional ARCH settings for ppc 27 - ifeq ($(ARCH),powerpc) 27 + ifeq ($(SRCARCH),powerpc) 28 28 NO_PERF_REGS := 0 29 29 LIBUNWIND_LIBS := -lunwind -lunwind-ppc64 30 30 endif 31 31 32 32 # Additional ARCH settings for x86 33 - ifeq ($(ARCH),x86) 33 + ifeq ($(SRCARCH),x86) 34 34 $(call detected,CONFIG_X86) 35 35 ifeq (${IS_64_BIT}, 1) 36 36 CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -DHAVE_SYSCALL_TABLE -I$(OUTPUT)arch/x86/include/generated ··· 43 43 NO_PERF_REGS := 0 44 44 endif 45 45 46 - ifeq ($(ARCH),arm) 46 + ifeq ($(SRCARCH),arm) 47 47 NO_PERF_REGS := 0 48 48 LIBUNWIND_LIBS = -lunwind -lunwind-arm 49 49 endif 50 50 51 - ifeq ($(ARCH),arm64) 51 + ifeq ($(SRCARCH),arm64) 52 52 NO_PERF_REGS := 0 53 53 LIBUNWIND_LIBS = -lunwind -lunwind-aarch64 54 54 endif ··· 61 61 # Disable it on all other architectures in case libdw unwind 62 62 # support is detected in system. Add supported architectures 63 63 # to the check. 64 - ifneq ($(ARCH),$(filter $(ARCH),x86 arm)) 64 + ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm)) 65 65 NO_LIBDW_DWARF_UNWIND := 1 66 66 endif 67 67 ··· 115 115 FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS) 116 116 FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf 117 117 118 - FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi 118 + FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi -I$(srctree)/tools/include/uapi 119 119 # include ARCH specific config 120 - -include $(src-perf)/arch/$(ARCH)/Makefile 120 + -include $(src-perf)/arch/$(SRCARCH)/Makefile 121 121 122 122 ifdef PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET 123 123 CFLAGS += -DHAVE_ARCH_REGS_QUERY_REGISTER_OFFSET ··· 228 228 endif 229 229 230 230 INC_FLAGS += -I$(src-perf)/util/include 231 - INC_FLAGS += -I$(src-perf)/arch/$(ARCH)/include 231 + INC_FLAGS += -I$(src-perf)/arch/$(SRCARCH)/include 232 232 INC_FLAGS += -I$(srctree)/tools/include/uapi 233 233 INC_FLAGS += -I$(srctree)/tools/include/ 234 - INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/uapi 235 - INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/ 236 - INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/ 234 + INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/uapi 235 + INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/include/ 236 + INC_FLAGS += -I$(srctree)/tools/arch/$(SRCARCH)/ 237 237 238 238 # $(obj-perf) for generated common-cmds.h 239 239 # $(obj-perf)/util for generated bison/flex headers ··· 355 355 356 356 ifndef NO_DWARF 357 357 ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined) 358 - msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled); 358 + msg := $(warning DWARF register mappings have not been defined for architecture $(SRCARCH), DWARF support disabled); 359 359 NO_DWARF := 1 360 360 else 361 361 CFLAGS += -DHAVE_DWARF_SUPPORT $(LIBDW_CFLAGS) ··· 380 380 CFLAGS += -DHAVE_BPF_PROLOGUE 381 381 $(call detected,CONFIG_BPF_PROLOGUE) 382 382 else 383 - msg := $(warning BPF prologue is not supported by architecture $(ARCH), missing regs_query_register_offset()); 383 + msg := $(warning BPF prologue is not supported by architecture $(SRCARCH), missing regs_query_register_offset()); 384 384 endif 385 385 else 386 386 msg := $(warning DWARF support is off, BPF prologue is disabled); ··· 406 406 endif 407 407 endif 408 408 409 - ifeq ($(ARCH),powerpc) 409 + ifeq ($(SRCARCH),powerpc) 410 410 ifndef NO_DWARF 411 411 CFLAGS += -DHAVE_SKIP_CALLCHAIN_IDX 412 412 endif ··· 487 487 endif 488 488 489 489 ifndef NO_LOCAL_LIBUNWIND 490 - ifeq ($(ARCH),$(filter $(ARCH),arm arm64)) 490 + ifeq ($(SRCARCH),$(filter $(SRCARCH),arm arm64)) 491 491 $(call feature_check,libunwind-debug-frame) 492 492 ifneq ($(feature-libunwind-debug-frame), 1) 493 493 msg := $(warning No debug_frame support found in libunwind); ··· 740 740 NO_PERF_READ_VDSO32 := 1 741 741 endif 742 742 endif 743 - ifneq ($(ARCH), x86) 743 + ifneq ($(SRCARCH), x86) 744 744 NO_PERF_READ_VDSOX32 := 1 745 745 endif 746 746 ifndef NO_PERF_READ_VDSOX32 ··· 769 769 endif 770 770 771 771 ifndef NO_AUXTRACE 772 - ifeq ($(ARCH),x86) 772 + ifeq ($(SRCARCH),x86) 773 773 ifeq ($(feature-get_cpuid), 0) 774 774 msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc); 775 775 NO_AUXTRACE := 1 ··· 872 872 ETC_PERFCONFIG = etc/perfconfig 873 873 endif 874 874 ifndef lib 875 - ifeq ($(ARCH)$(IS_64_BIT), x861) 875 + ifeq ($(SRCARCH)$(IS_64_BIT), x861) 876 876 lib = lib64 877 877 else 878 878 lib = lib
+1 -1
tools/perf/Makefile.perf
··· 226 226 227 227 ifeq ($(config),0) 228 228 include $(srctree)/tools/scripts/Makefile.arch 229 - -include arch/$(ARCH)/Makefile 229 + -include arch/$(SRCARCH)/Makefile 230 230 endif 231 231 232 232 # The FEATURE_DUMP_EXPORT holds location of the actual
+1 -1
tools/perf/arch/Build
··· 1 1 libperf-y += common.o 2 - libperf-y += $(ARCH)/ 2 + libperf-y += $(SRCARCH)/
+1
tools/perf/arch/common.c
··· 26 26 27 27 const char *const powerpc_triplets[] = { 28 28 "powerpc-unknown-linux-gnu-", 29 + "powerpc-linux-gnu-", 29 30 "powerpc64-unknown-linux-gnu-", 30 31 "powerpc64-linux-gnu-", 31 32 "powerpc64le-linux-gnu-",
+4 -1
tools/perf/builtin-stat.c
··· 1578 1578 static void print_footer(void) 1579 1579 { 1580 1580 FILE *output = stat_config.output; 1581 + int n; 1581 1582 1582 1583 if (!null_run) 1583 1584 fprintf(output, "\n"); ··· 1591 1590 } 1592 1591 fprintf(output, "\n\n"); 1593 1592 1594 - if (print_free_counters_hint) 1593 + if (print_free_counters_hint && 1594 + sysctl__read_int("kernel/nmi_watchdog", &n) >= 0 && 1595 + n > 0) 1595 1596 fprintf(output, 1596 1597 "Some events weren't counted. Try disabling the NMI watchdog:\n" 1597 1598 " echo 0 > /proc/sys/kernel/nmi_watchdog\n"
+4
tools/perf/builtin-trace.c
··· 681 681 { .name = "mlockall", .errmsg = true, 682 682 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, }, 683 683 { .name = "mmap", .hexret = true, 684 + /* The standard mmap maps to old_mmap on s390x */ 685 + #if defined(__s390x__) 686 + .alias = "old_mmap", 687 + #endif 684 688 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ 685 689 [2] = SCA_MMAP_PROT, /* prot */ 686 690 [3] = SCA_MMAP_FLAGS, /* flags */ }, },
+2 -2
tools/perf/pmu-events/Build
··· 2 2 3 3 jevents-y += json.o jsmn.o jevents.o 4 4 pmu-events-y += pmu-events.o 5 - JDIR = pmu-events/arch/$(ARCH) 5 + JDIR = pmu-events/arch/$(SRCARCH) 6 6 JSON = $(shell [ -d $(JDIR) ] && \ 7 7 find $(JDIR) -name '*.json' -o -name 'mapfile.csv') 8 8 # ··· 10 10 # directory and create tables in pmu-events.c. 11 11 # 12 12 $(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JEVENTS) 13 - $(Q)$(call echo-cmd,gen)$(JEVENTS) $(ARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V) 13 + $(Q)$(call echo-cmd,gen)$(JEVENTS) $(SRCARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V)
+1 -1
tools/perf/tests/Build
··· 75 75 $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@ 76 76 $(Q)echo ';' >> $@ 77 77 78 - ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64 powerpc)) 78 + ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc)) 79 79 perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o 80 80 endif 81 81
+14
tools/perf/tests/bp_signal.c
··· 288 288 return count1 == 1 && overflows == 3 && count2 == 3 && overflows_2 == 3 && count3 == 2 ? 289 289 TEST_OK : TEST_FAIL; 290 290 } 291 + 292 + bool test__bp_signal_is_supported(void) 293 + { 294 + /* 295 + * The powerpc so far does not have support to even create 296 + * instruction breakpoint using the perf event interface. 297 + * Once it's there we can release this. 298 + */ 299 + #ifdef __powerpc__ 300 + return false; 301 + #else 302 + return true; 303 + #endif 304 + }
+7
tools/perf/tests/builtin-test.c
··· 97 97 { 98 98 .desc = "Breakpoint overflow signal handler", 99 99 .func = test__bp_signal, 100 + .is_supported = test__bp_signal_is_supported, 100 101 }, 101 102 { 102 103 .desc = "Breakpoint overflow sampling", 103 104 .func = test__bp_signal_overflow, 105 + .is_supported = test__bp_signal_is_supported, 104 106 }, 105 107 { 106 108 .desc = "Number of exit events of a simple workload", ··· 402 400 403 401 if (!perf_test__matches(t, curr, argc, argv)) 404 402 continue; 403 + 404 + if (t->is_supported && !t->is_supported()) { 405 + pr_debug("%2d: %-*s: Disabled\n", i, width, t->desc); 406 + continue; 407 + } 405 408 406 409 pr_info("%2d: %-*s:", i, width, t->desc); 407 410
+19 -1
tools/perf/tests/code-reading.c
··· 229 229 unsigned char buf2[BUFSZ]; 230 230 size_t ret_len; 231 231 u64 objdump_addr; 232 + const char *objdump_name; 233 + char decomp_name[KMOD_DECOMP_LEN]; 232 234 int ret; 233 235 234 236 pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr); ··· 291 289 state->done[state->done_cnt++] = al.map->start; 292 290 } 293 291 292 + objdump_name = al.map->dso->long_name; 293 + if (dso__needs_decompress(al.map->dso)) { 294 + if (dso__decompress_kmodule_path(al.map->dso, objdump_name, 295 + decomp_name, 296 + sizeof(decomp_name)) < 0) { 297 + pr_debug("decompression failed\n"); 298 + return -1; 299 + } 300 + 301 + objdump_name = decomp_name; 302 + } 303 + 294 304 /* Read the object code using objdump */ 295 305 objdump_addr = map__rip_2objdump(al.map, al.addr); 296 - ret = read_via_objdump(al.map->dso->long_name, objdump_addr, buf2, len); 306 + ret = read_via_objdump(objdump_name, objdump_addr, buf2, len); 307 + 308 + if (dso__needs_decompress(al.map->dso)) 309 + unlink(objdump_name); 310 + 297 311 if (ret > 0) { 298 312 /* 299 313 * The kernel maps are inaccurate - assume objdump is right in
+1 -1
tools/perf/tests/task-exit.c
··· 83 83 84 84 evsel = perf_evlist__first(evlist); 85 85 evsel->attr.task = 1; 86 - evsel->attr.sample_freq = 0; 86 + evsel->attr.sample_freq = 1; 87 87 evsel->attr.inherit = 0; 88 88 evsel->attr.watermark = 0; 89 89 evsel->attr.wakeup_events = 1;
+3
tools/perf/tests/tests.h
··· 34 34 int (*get_nr)(void); 35 35 const char *(*get_desc)(int subtest); 36 36 } subtest; 37 + bool (*is_supported)(void); 37 38 }; 38 39 39 40 /* Tests */ ··· 99 98 const char *test__clang_subtest_get_desc(int subtest); 100 99 int test__clang_subtest_get_nr(void); 101 100 int test__unit_number__scnprint(int subtest); 101 + 102 + bool test__bp_signal_is_supported(void); 102 103 103 104 #if defined(__arm__) || defined(__aarch64__) 104 105 #ifdef HAVE_DWARF_UNWIND_SUPPORT
+43 -29
tools/perf/util/annotate.c
··· 239 239 const char *s = strchr(ops->raw, '+'); 240 240 const char *c = strchr(ops->raw, ','); 241 241 242 - if (c++ != NULL) 242 + /* 243 + * skip over possible up to 2 operands to get to address, e.g.: 244 + * tbnz w0, #26, ffff0000083cd190 <security_file_permission+0xd0> 245 + */ 246 + if (c++ != NULL) { 243 247 ops->target.addr = strtoull(c, NULL, 16); 244 - else 248 + if (!ops->target.addr) { 249 + c = strchr(c, ','); 250 + if (c++ != NULL) 251 + ops->target.addr = strtoull(c, NULL, 16); 252 + } 253 + } else { 245 254 ops->target.addr = strtoull(ops->raw, NULL, 16); 255 + } 246 256 247 257 if (s++ != NULL) { 248 258 ops->target.offset = strtoull(s, NULL, 16); ··· 267 257 static int jump__scnprintf(struct ins *ins, char *bf, size_t size, 268 258 struct ins_operands *ops) 269 259 { 260 + const char *c = strchr(ops->raw, ','); 261 + 270 262 if (!ops->target.addr || ops->target.offset < 0) 271 263 return ins__raw_scnprintf(ins, bf, size, ops); 272 264 273 - return scnprintf(bf, size, "%-6.6s %" PRIx64, ins->name, ops->target.offset); 265 + if (c != NULL) { 266 + const char *c2 = strchr(c + 1, ','); 267 + 268 + /* check for 3-op insn */ 269 + if (c2 != NULL) 270 + c = c2; 271 + c++; 272 + 273 + /* mirror arch objdump's space-after-comma style */ 274 + if (*c == ' ') 275 + c++; 276 + } 277 + 278 + return scnprintf(bf, size, "%-6.6s %.*s%" PRIx64, 279 + ins->name, c ? c - ops->raw : 0, ops->raw, 280 + ops->target.offset); 274 281 } 275 282 276 283 static struct ins_ops jump_ops = { ··· 1321 1294 char linkname[PATH_MAX]; 1322 1295 char *build_id_filename; 1323 1296 char *build_id_path = NULL; 1297 + char *pos; 1324 1298 1325 1299 if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && 1326 1300 !dso__is_kcore(dso)) ··· 1341 1313 if (!build_id_path) 1342 1314 return -1; 1343 1315 1344 - dirname(build_id_path); 1316 + /* 1317 + * old style build-id cache has name of XX/XXXXXXX.. while 1318 + * new style has XX/XXXXXXX../{elf,kallsyms,vdso}. 1319 + * extract the build-id part of dirname in the new style only. 1320 + */ 1321 + pos = strrchr(build_id_path, '/'); 1322 + if (pos && strlen(pos) < SBUILD_ID_SIZE - 2) 1323 + dirname(build_id_path); 1345 1324 1346 1325 if (dso__is_kcore(dso) || 1347 1326 readlink(build_id_path, linkname, sizeof(linkname)) < 0 || ··· 1431 1396 sizeof(symfs_filename)); 1432 1397 } 1433 1398 } else if (dso__needs_decompress(dso)) { 1434 - char tmp[PATH_MAX]; 1435 - struct kmod_path m; 1436 - int fd; 1437 - bool ret; 1399 + char tmp[KMOD_DECOMP_LEN]; 1438 1400 1439 - if (kmod_path__parse_ext(&m, symfs_filename)) 1440 - goto out; 1441 - 1442 - snprintf(tmp, PATH_MAX, "/tmp/perf-kmod-XXXXXX"); 1443 - 1444 - fd = mkstemp(tmp); 1445 - if (fd < 0) { 1446 - free(m.ext); 1447 - goto out; 1448 - } 1449 - 1450 - ret = decompress_to_file(m.ext, symfs_filename, fd); 1451 - 1452 - if (ret) 1453 - pr_err("Cannot decompress %s %s\n", m.ext, symfs_filename); 1454 - 1455 - free(m.ext); 1456 - close(fd); 1457 - 1458 - if (!ret) 1401 + if (dso__decompress_kmodule_path(dso, symfs_filename, 1402 + tmp, sizeof(tmp)) < 0) 1459 1403 goto out; 1460 1404 1461 1405 strcpy(symfs_filename, tmp); ··· 1443 1429 snprintf(command, sizeof(command), 1444 1430 "%s %s%s --start-address=0x%016" PRIx64 1445 1431 " --stop-address=0x%016" PRIx64 1446 - " -l -d %s %s -C %s 2>/dev/null|grep -v %s:|expand", 1432 + " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand", 1447 1433 objdump_path ? objdump_path : "objdump", 1448 1434 disassembler_style ? "-M " : "", 1449 1435 disassembler_style ? disassembler_style : "",
-45
tools/perf/util/build-id.c
··· 278 278 return bf; 279 279 } 280 280 281 - bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size) 282 - { 283 - char *id_name = NULL, *ch; 284 - struct stat sb; 285 - char sbuild_id[SBUILD_ID_SIZE]; 286 - 287 - if (!dso->has_build_id) 288 - goto err; 289 - 290 - build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); 291 - id_name = build_id_cache__linkname(sbuild_id, NULL, 0); 292 - if (!id_name) 293 - goto err; 294 - if (access(id_name, F_OK)) 295 - goto err; 296 - if (lstat(id_name, &sb) == -1) 297 - goto err; 298 - if ((size_t)sb.st_size > size - 1) 299 - goto err; 300 - if (readlink(id_name, bf, size - 1) < 0) 301 - goto err; 302 - 303 - bf[sb.st_size] = '\0'; 304 - 305 - /* 306 - * link should be: 307 - * ../../lib/modules/4.4.0-rc4/kernel/net/ipv4/netfilter/nf_nat_ipv4.ko/a09fe3eb3147dafa4e3b31dbd6257e4d696bdc92 308 - */ 309 - ch = strrchr(bf, '/'); 310 - if (!ch) 311 - goto err; 312 - if (ch - 3 < bf) 313 - goto err; 314 - 315 - free(id_name); 316 - return strncmp(".ko", ch - 3, 3) == 0; 317 - err: 318 - pr_err("Invalid build id: %s\n", id_name ? : 319 - dso->long_name ? : 320 - dso->short_name ? : 321 - "[unknown]"); 322 - free(id_name); 323 - return false; 324 - } 325 - 326 281 #define dsos__for_each_with_build_id(pos, head) \ 327 282 list_for_each_entry(pos, head, node) \ 328 283 if (!pos->has_build_id) \
-1
tools/perf/util/build-id.h
··· 17 17 size_t size); 18 18 19 19 char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size); 20 - bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size); 21 20 22 21 int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event, 23 22 struct perf_sample *sample, struct perf_evsel *evsel,
+94 -6
tools/perf/util/dso.c
··· 248 248 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; 249 249 } 250 250 251 + static int decompress_kmodule(struct dso *dso, const char *name, char *tmpbuf) 252 + { 253 + int fd = -1; 254 + struct kmod_path m; 255 + 256 + if (!dso__needs_decompress(dso)) 257 + return -1; 258 + 259 + if (kmod_path__parse_ext(&m, dso->long_name)) 260 + return -1; 261 + 262 + if (!m.comp) 263 + goto out; 264 + 265 + fd = mkstemp(tmpbuf); 266 + if (fd < 0) { 267 + dso->load_errno = errno; 268 + goto out; 269 + } 270 + 271 + if (!decompress_to_file(m.ext, name, fd)) { 272 + dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE; 273 + close(fd); 274 + fd = -1; 275 + } 276 + 277 + out: 278 + free(m.ext); 279 + return fd; 280 + } 281 + 282 + int dso__decompress_kmodule_fd(struct dso *dso, const char *name) 283 + { 284 + char tmpbuf[] = KMOD_DECOMP_NAME; 285 + int fd; 286 + 287 + fd = decompress_kmodule(dso, name, tmpbuf); 288 + unlink(tmpbuf); 289 + return fd; 290 + } 291 + 292 + int dso__decompress_kmodule_path(struct dso *dso, const char *name, 293 + char *pathname, size_t len) 294 + { 295 + char tmpbuf[] = KMOD_DECOMP_NAME; 296 + int fd; 297 + 298 + fd = decompress_kmodule(dso, name, tmpbuf); 299 + if (fd < 0) { 300 + unlink(tmpbuf); 301 + return -1; 302 + } 303 + 304 + strncpy(pathname, tmpbuf, len); 305 + close(fd); 306 + return 0; 307 + } 308 + 251 309 /* 252 310 * Parses kernel module specified in @path and updates 253 311 * @m argument like: ··· 393 335 return 0; 394 336 } 395 337 338 + void dso__set_module_info(struct dso *dso, struct kmod_path *m, 339 + struct machine *machine) 340 + { 341 + if (machine__is_host(machine)) 342 + dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; 343 + else 344 + dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; 345 + 346 + /* _KMODULE_COMP should be next to _KMODULE */ 347 + if (m->kmod && m->comp) 348 + dso->symtab_type++; 349 + 350 + dso__set_short_name(dso, strdup(m->name), true); 351 + } 352 + 396 353 /* 397 354 * Global list of open DSOs and the counter. 398 355 */ ··· 454 381 455 382 static int __open_dso(struct dso *dso, struct machine *machine) 456 383 { 457 - int fd; 384 + int fd = -EINVAL; 458 385 char *root_dir = (char *)""; 459 386 char *name = malloc(PATH_MAX); 460 387 ··· 465 392 root_dir = machine->root_dir; 466 393 467 394 if (dso__read_binary_type_filename(dso, dso->binary_type, 468 - root_dir, name, PATH_MAX)) { 469 - free(name); 470 - return -EINVAL; 471 - } 395 + root_dir, name, PATH_MAX)) 396 + goto out; 472 397 473 398 if (!is_regular_file(name)) 474 - return -EINVAL; 399 + goto out; 400 + 401 + if (dso__needs_decompress(dso)) { 402 + char newpath[KMOD_DECOMP_LEN]; 403 + size_t len = sizeof(newpath); 404 + 405 + if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) { 406 + fd = -dso->load_errno; 407 + goto out; 408 + } 409 + 410 + strcpy(name, newpath); 411 + } 475 412 476 413 fd = do_open(name); 414 + 415 + if (dso__needs_decompress(dso)) 416 + unlink(name); 417 + 418 + out: 477 419 free(name); 478 420 return fd; 479 421 }
+9
tools/perf/util/dso.h
··· 244 244 bool is_kernel_module(const char *pathname, int cpumode); 245 245 bool decompress_to_file(const char *ext, const char *filename, int output_fd); 246 246 bool dso__needs_decompress(struct dso *dso); 247 + int dso__decompress_kmodule_fd(struct dso *dso, const char *name); 248 + int dso__decompress_kmodule_path(struct dso *dso, const char *name, 249 + char *pathname, size_t len); 250 + 251 + #define KMOD_DECOMP_NAME "/tmp/perf-kmod-XXXXXX" 252 + #define KMOD_DECOMP_LEN sizeof(KMOD_DECOMP_NAME) 247 253 248 254 struct kmod_path { 249 255 char *name; ··· 264 258 #define kmod_path__parse(__m, __p) __kmod_path__parse(__m, __p, false, false) 265 259 #define kmod_path__parse_name(__m, __p) __kmod_path__parse(__m, __p, true , false) 266 260 #define kmod_path__parse_ext(__m, __p) __kmod_path__parse(__m, __p, false, true) 261 + 262 + void dso__set_module_info(struct dso *dso, struct kmod_path *m, 263 + struct machine *machine); 267 264 268 265 /* 269 266 * The dso__data_* external interface provides following functions:
+12
tools/perf/util/evsel.c
··· 273 273 struct perf_evsel *evsel; 274 274 275 275 event_attr_init(&attr); 276 + /* 277 + * Unnamed union member, not supported as struct member named 278 + * initializer in older compilers such as gcc 4.4.7 279 + * 280 + * Just for probing the precise_ip: 281 + */ 282 + attr.sample_period = 1; 276 283 277 284 perf_event_attr__set_max_precise_ip(&attr); 285 + /* 286 + * Now let the usual logic to set up the perf_event_attr defaults 287 + * to kick in when we return and before perf_evsel__open() is called. 288 + */ 289 + attr.sample_period = 0; 278 290 279 291 evsel = perf_evsel__new(&attr); 280 292 if (evsel == NULL)
+11 -3
tools/perf/util/header.c
··· 841 841 842 842 /* 843 843 * default get_cpuid(): nothing gets recorded 844 - * actual implementation must be in arch/$(ARCH)/util/header.c 844 + * actual implementation must be in arch/$(SRCARCH)/util/header.c 845 845 */ 846 846 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused) 847 847 { ··· 1469 1469 1470 1470 dso__set_build_id(dso, &bev->build_id); 1471 1471 1472 - if (!is_kernel_module(filename, cpumode)) 1473 - dso->kernel = dso_type; 1472 + if (dso_type != DSO_TYPE_USER) { 1473 + struct kmod_path m = { .name = NULL, }; 1474 + 1475 + if (!kmod_path__parse_name(&m, filename) && m.kmod) 1476 + dso__set_module_info(dso, &m, machine); 1477 + else 1478 + dso->kernel = dso_type; 1479 + 1480 + free(m.name); 1481 + } 1474 1482 1475 1483 build_id__sprintf(dso->build_id, sizeof(dso->build_id), 1476 1484 sbuild_id);
+7 -14
tools/perf/util/machine.c
··· 572 572 if (dso == NULL) 573 573 goto out_unlock; 574 574 575 - if (machine__is_host(machine)) 576 - dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; 577 - else 578 - dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; 579 - 580 - /* _KMODULE_COMP should be next to _KMODULE */ 581 - if (m->kmod && m->comp) 582 - dso->symtab_type++; 583 - 584 - dso__set_short_name(dso, strdup(m->name), true); 575 + dso__set_module_info(dso, m, machine); 585 576 dso__set_long_name(dso, strdup(filename), true); 586 577 } 587 578 ··· 1209 1218 */ 1210 1219 map_groups__fixup_end(&machine->kmaps); 1211 1220 1212 - if (machine__get_running_kernel_start(machine, &name, &addr)) { 1213 - } else if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) { 1214 - machine__destroy_kernel_maps(machine); 1215 - return -1; 1221 + if (!machine__get_running_kernel_start(machine, &name, &addr)) { 1222 + if (name && 1223 + maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) { 1224 + machine__destroy_kernel_maps(machine); 1225 + return -1; 1226 + } 1216 1227 } 1217 1228 1218 1229 return 0;
+1 -1
tools/perf/util/probe-event.c
··· 619 619 struct map *map, unsigned long offs) 620 620 { 621 621 struct symbol *sym; 622 - u64 addr = tp->address + tp->offset - offs; 622 + u64 addr = tp->address - offs; 623 623 624 624 sym = map__find_symbol(map, addr); 625 625 if (!sym)
+1 -1
tools/perf/util/scripting-engines/trace-event-python.c
··· 1219 1219 fprintf(ofp, "# be retrieved using Python functions of the form " 1220 1220 "common_*(context).\n"); 1221 1221 1222 - fprintf(ofp, "# See the perf-trace-python Documentation for the list " 1222 + fprintf(ofp, "# See the perf-script-python Documentation for the list " 1223 1223 "of available functions.\n\n"); 1224 1224 1225 1225 fprintf(ofp, "import os\n");
+3 -38
tools/perf/util/symbol-elf.c
··· 637 637 return 0; 638 638 } 639 639 640 - static int decompress_kmodule(struct dso *dso, const char *name, 641 - enum dso_binary_type type) 642 - { 643 - int fd = -1; 644 - char tmpbuf[] = "/tmp/perf-kmod-XXXXXX"; 645 - struct kmod_path m; 646 - 647 - if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP && 648 - type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP && 649 - type != DSO_BINARY_TYPE__BUILD_ID_CACHE) 650 - return -1; 651 - 652 - if (type == DSO_BINARY_TYPE__BUILD_ID_CACHE) 653 - name = dso->long_name; 654 - 655 - if (kmod_path__parse_ext(&m, name) || !m.comp) 656 - return -1; 657 - 658 - fd = mkstemp(tmpbuf); 659 - if (fd < 0) { 660 - dso->load_errno = errno; 661 - goto out; 662 - } 663 - 664 - if (!decompress_to_file(m.ext, name, fd)) { 665 - dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE; 666 - close(fd); 667 - fd = -1; 668 - } 669 - 670 - unlink(tmpbuf); 671 - 672 - out: 673 - free(m.ext); 674 - return fd; 675 - } 676 - 677 640 bool symsrc__possibly_runtime(struct symsrc *ss) 678 641 { 679 642 return ss->dynsym || ss->opdsec; ··· 668 705 int fd; 669 706 670 707 if (dso__needs_decompress(dso)) { 671 - fd = decompress_kmodule(dso, name, type); 708 + fd = dso__decompress_kmodule_fd(dso, name); 672 709 if (fd < 0) 673 710 return -1; 711 + 712 + type = dso->symtab_type; 674 713 } else { 675 714 fd = open(name, O_RDONLY); 676 715 if (fd < 0) {
-4
tools/perf/util/symbol.c
··· 1562 1562 if (!runtime_ss && syms_ss) 1563 1563 runtime_ss = syms_ss; 1564 1564 1565 - if (syms_ss && syms_ss->type == DSO_BINARY_TYPE__BUILD_ID_CACHE) 1566 - if (dso__build_id_is_kmod(dso, name, PATH_MAX)) 1567 - kmod = true; 1568 - 1569 1565 if (syms_ss) 1570 1566 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod); 1571 1567 else
+17 -1
tools/perf/util/unwind-libdw.c
··· 39 39 return 0; 40 40 41 41 mod = dwfl_addrmodule(ui->dwfl, ip); 42 + if (mod) { 43 + Dwarf_Addr s; 44 + 45 + dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL); 46 + if (s != al->map->start) 47 + mod = 0; 48 + } 49 + 42 50 if (!mod) 43 51 mod = dwfl_report_elf(ui->dwfl, dso->short_name, 44 52 dso->long_name, -1, al->map->start, ··· 178 170 Dwarf_Addr pc; 179 171 bool isactivation; 180 172 173 + if (!dwfl_frame_pc(state, &pc, NULL)) { 174 + pr_err("%s", dwfl_errmsg(-1)); 175 + return DWARF_CB_ABORT; 176 + } 177 + 178 + // report the module before we query for isactivation 179 + report_module(pc, ui); 180 + 181 181 if (!dwfl_frame_pc(state, &pc, &isactivation)) { 182 182 pr_err("%s", dwfl_errmsg(-1)); 183 183 return DWARF_CB_ABORT; ··· 240 224 241 225 err = dwfl_getthread_frames(ui->dwfl, thread->tid, frame_callback, ui); 242 226 243 - if (err && !ui->max_stack) 227 + if (err && ui->max_stack != max_stack) 244 228 err = 0; 245 229 246 230 /*
+30 -11
tools/testing/selftests/bpf/bpf_endian.h
··· 1 1 #ifndef __BPF_ENDIAN__ 2 2 #define __BPF_ENDIAN__ 3 3 4 - #include <asm/byteorder.h> 4 + #include <linux/swab.h> 5 5 6 - #if __BYTE_ORDER == __LITTLE_ENDIAN 7 - # define __bpf_ntohs(x) __builtin_bswap16(x) 8 - # define __bpf_htons(x) __builtin_bswap16(x) 9 - #elif __BYTE_ORDER == __BIG_ENDIAN 10 - # define __bpf_ntohs(x) (x) 11 - # define __bpf_htons(x) (x) 6 + /* LLVM's BPF target selects the endianness of the CPU 7 + * it compiles on, or the user specifies (bpfel/bpfeb), 8 + * respectively. The used __BYTE_ORDER__ is defined by 9 + * the compiler, we cannot rely on __BYTE_ORDER from 10 + * libc headers, since it doesn't reflect the actual 11 + * requested byte order. 12 + * 13 + * Note, LLVM's BPF target has different __builtin_bswapX() 14 + * semantics. It does map to BPF_ALU | BPF_END | BPF_TO_BE 15 + * in bpfel and bpfeb case, which means below, that we map 16 + * to cpu_to_be16(). We could use it unconditionally in BPF 17 + * case, but better not rely on it, so that this header here 18 + * can be used from application and BPF program side, which 19 + * use different targets. 20 + */ 21 + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 22 + # define __bpf_ntohs(x) __builtin_bswap16(x) 23 + # define __bpf_htons(x) __builtin_bswap16(x) 24 + # define __bpf_constant_ntohs(x) ___constant_swab16(x) 25 + # define __bpf_constant_htons(x) ___constant_swab16(x) 26 + #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 27 + # define __bpf_ntohs(x) (x) 28 + # define __bpf_htons(x) (x) 29 + # define __bpf_constant_ntohs(x) (x) 30 + # define __bpf_constant_htons(x) (x) 12 31 #else 13 - # error "Fix your __BYTE_ORDER?!" 32 + # error "Fix your compiler's __BYTE_ORDER__?!" 14 33 #endif 15 34 16 35 #define bpf_htons(x) \ 17 36 (__builtin_constant_p(x) ? \ 18 - __constant_htons(x) : __bpf_htons(x)) 37 + __bpf_constant_htons(x) : __bpf_htons(x)) 19 38 #define bpf_ntohs(x) \ 20 39 (__builtin_constant_p(x) ? \ 21 - __constant_ntohs(x) : __bpf_ntohs(x)) 40 + __bpf_constant_ntohs(x) : __bpf_ntohs(x)) 22 41 23 - #endif 42 + #endif /* __BPF_ENDIAN__ */
+66
tools/testing/selftests/bpf/test_verifier.c
··· 3749 3749 .errstr = "invalid bpf_context access", 3750 3750 }, 3751 3751 { 3752 + "leak pointer into ctx 1", 3753 + .insns = { 3754 + BPF_MOV64_IMM(BPF_REG_0, 0), 3755 + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 3756 + offsetof(struct __sk_buff, cb[0])), 3757 + BPF_LD_MAP_FD(BPF_REG_2, 0), 3758 + BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2, 3759 + offsetof(struct __sk_buff, cb[0])), 3760 + BPF_EXIT_INSN(), 3761 + }, 3762 + .fixup_map1 = { 2 }, 3763 + .errstr_unpriv = "R2 leaks addr into mem", 3764 + .result_unpriv = REJECT, 3765 + .result = ACCEPT, 3766 + }, 3767 + { 3768 + "leak pointer into ctx 2", 3769 + .insns = { 3770 + BPF_MOV64_IMM(BPF_REG_0, 0), 3771 + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 3772 + offsetof(struct __sk_buff, cb[0])), 3773 + BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10, 3774 + offsetof(struct __sk_buff, cb[0])), 3775 + BPF_EXIT_INSN(), 3776 + }, 3777 + .errstr_unpriv = "R10 leaks addr into mem", 3778 + .result_unpriv = REJECT, 3779 + .result = ACCEPT, 3780 + }, 3781 + { 3782 + "leak pointer into ctx 3", 3783 + .insns = { 3784 + BPF_MOV64_IMM(BPF_REG_0, 0), 3785 + BPF_LD_MAP_FD(BPF_REG_2, 0), 3786 + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 3787 + offsetof(struct __sk_buff, cb[0])), 3788 + BPF_EXIT_INSN(), 3789 + }, 3790 + .fixup_map1 = { 1 }, 3791 + .errstr_unpriv = "R2 leaks addr into ctx", 3792 + .result_unpriv = REJECT, 3793 + .result = ACCEPT, 3794 + }, 3795 + { 3796 + "leak pointer into map val", 3797 + .insns = { 3798 + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), 3799 + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 3800 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 3801 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 3802 + BPF_LD_MAP_FD(BPF_REG_1, 0), 3803 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 3804 + BPF_FUNC_map_lookup_elem), 3805 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 3806 + BPF_MOV64_IMM(BPF_REG_3, 0), 3807 + BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), 3808 + BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0), 3809 + BPF_MOV64_IMM(BPF_REG_0, 0), 3810 + BPF_EXIT_INSN(), 3811 + }, 3812 + .fixup_map1 = { 4 }, 3813 + .errstr_unpriv = "R6 leaks addr into mem", 3814 + .result_unpriv = REJECT, 3815 + .result = ACCEPT, 3816 + }, 3817 + { 3752 3818 "helper access to map: full range", 3753 3819 .insns = { 3754 3820 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+1 -1
tools/testing/selftests/ntb/ntb_test.sh
··· 305 305 echo "Running remote perf test $WITH DMA" 306 306 write_file "" $REMOTE_PERF/run 307 307 echo -n " " 308 - read_file $LOCAL_PERF/run 308 + read_file $REMOTE_PERF/run 309 309 echo " Passed" 310 310 311 311 _modprobe -r ntb_perf
+1
usr/Kconfig
··· 220 220 endchoice 221 221 222 222 config INITRAMFS_COMPRESSION 223 + depends on INITRAMFS_SOURCE!="" 223 224 string 224 225 default "" if INITRAMFS_COMPRESSION_NONE 225 226 default ".gz" if INITRAMFS_COMPRESSION_GZIP
+1 -1
virt/kvm/arm/hyp/vgic-v3-sr.c
··· 22 22 #include <asm/kvm_hyp.h> 23 23 24 24 #define vtr_to_max_lr_idx(v) ((v) & 0xf) 25 - #define vtr_to_nr_pre_bits(v) (((u32)(v) >> 26) + 1) 25 + #define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1) 26 26 27 27 static u64 __hyp_text __gic_v3_get_lr(unsigned int lr) 28 28 {
+3
virt/kvm/arm/mmu.c
··· 879 879 pmd_t *pmd; 880 880 881 881 pud = stage2_get_pud(kvm, cache, addr); 882 + if (!pud) 883 + return NULL; 884 + 882 885 if (stage2_pud_none(*pud)) { 883 886 if (!cache) 884 887 return NULL;
+14 -2
virt/kvm/arm/vgic/vgic-mmio-v2.c
··· 226 226 227 227 switch (addr & 0xff) { 228 228 case GIC_CPU_CTRL: 229 - val = vmcr.ctlr; 229 + val = vmcr.grpen0 << GIC_CPU_CTRL_EnableGrp0_SHIFT; 230 + val |= vmcr.grpen1 << GIC_CPU_CTRL_EnableGrp1_SHIFT; 231 + val |= vmcr.ackctl << GIC_CPU_CTRL_AckCtl_SHIFT; 232 + val |= vmcr.fiqen << GIC_CPU_CTRL_FIQEn_SHIFT; 233 + val |= vmcr.cbpr << GIC_CPU_CTRL_CBPR_SHIFT; 234 + val |= vmcr.eoim << GIC_CPU_CTRL_EOImodeNS_SHIFT; 235 + 230 236 break; 231 237 case GIC_CPU_PRIMASK: 232 238 /* ··· 273 267 274 268 switch (addr & 0xff) { 275 269 case GIC_CPU_CTRL: 276 - vmcr.ctlr = val; 270 + vmcr.grpen0 = !!(val & GIC_CPU_CTRL_EnableGrp0); 271 + vmcr.grpen1 = !!(val & GIC_CPU_CTRL_EnableGrp1); 272 + vmcr.ackctl = !!(val & GIC_CPU_CTRL_AckCtl); 273 + vmcr.fiqen = !!(val & GIC_CPU_CTRL_FIQEn); 274 + vmcr.cbpr = !!(val & GIC_CPU_CTRL_CBPR); 275 + vmcr.eoim = !!(val & GIC_CPU_CTRL_EOImodeNS); 276 + 277 277 break; 278 278 case GIC_CPU_PRIMASK: 279 279 /*
+25 -3
virt/kvm/arm/vgic/vgic-v2.c
··· 177 177 struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; 178 178 u32 vmcr; 179 179 180 - vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK; 180 + vmcr = (vmcrp->grpen0 << GICH_VMCR_ENABLE_GRP0_SHIFT) & 181 + GICH_VMCR_ENABLE_GRP0_MASK; 182 + vmcr |= (vmcrp->grpen1 << GICH_VMCR_ENABLE_GRP1_SHIFT) & 183 + GICH_VMCR_ENABLE_GRP1_MASK; 184 + vmcr |= (vmcrp->ackctl << GICH_VMCR_ACK_CTL_SHIFT) & 185 + GICH_VMCR_ACK_CTL_MASK; 186 + vmcr |= (vmcrp->fiqen << GICH_VMCR_FIQ_EN_SHIFT) & 187 + GICH_VMCR_FIQ_EN_MASK; 188 + vmcr |= (vmcrp->cbpr << GICH_VMCR_CBPR_SHIFT) & 189 + GICH_VMCR_CBPR_MASK; 190 + vmcr |= (vmcrp->eoim << GICH_VMCR_EOI_MODE_SHIFT) & 191 + GICH_VMCR_EOI_MODE_MASK; 181 192 vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & 182 193 GICH_VMCR_ALIAS_BINPOINT_MASK; 183 194 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & ··· 206 195 207 196 vmcr = cpu_if->vgic_vmcr; 208 197 209 - vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> 210 - GICH_VMCR_CTRL_SHIFT; 198 + vmcrp->grpen0 = (vmcr & GICH_VMCR_ENABLE_GRP0_MASK) >> 199 + GICH_VMCR_ENABLE_GRP0_SHIFT; 200 + vmcrp->grpen1 = (vmcr & GICH_VMCR_ENABLE_GRP1_MASK) >> 201 + GICH_VMCR_ENABLE_GRP1_SHIFT; 202 + vmcrp->ackctl = (vmcr & GICH_VMCR_ACK_CTL_MASK) >> 203 + GICH_VMCR_ACK_CTL_SHIFT; 204 + vmcrp->fiqen = (vmcr & GICH_VMCR_FIQ_EN_MASK) >> 205 + GICH_VMCR_FIQ_EN_SHIFT; 206 + vmcrp->cbpr = (vmcr & GICH_VMCR_CBPR_MASK) >> 207 + GICH_VMCR_CBPR_SHIFT; 208 + vmcrp->eoim = (vmcr & GICH_VMCR_EOI_MODE_MASK) >> 209 + GICH_VMCR_EOI_MODE_SHIFT; 210 + 211 211 vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> 212 212 GICH_VMCR_ALIAS_BINPOINT_SHIFT; 213 213 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
+33 -14
virt/kvm/arm/vgic/vgic-v3.c
··· 159 159 void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) 160 160 { 161 161 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; 162 + u32 model = vcpu->kvm->arch.vgic.vgic_model; 162 163 u32 vmcr; 163 164 164 - /* 165 - * Ignore the FIQen bit, because GIC emulation always implies 166 - * SRE=1 which means the vFIQEn bit is also RES1. 167 - */ 168 - vmcr = ((vmcrp->ctlr >> ICC_CTLR_EL1_EOImode_SHIFT) << 169 - ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK; 170 - vmcr |= (vmcrp->ctlr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK; 165 + if (model == KVM_DEV_TYPE_ARM_VGIC_V2) { 166 + vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) & 167 + ICH_VMCR_ACK_CTL_MASK; 168 + vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) & 169 + ICH_VMCR_FIQ_EN_MASK; 170 + } else { 171 + /* 172 + * When emulating GICv3 on GICv3 with SRE=1 on the 173 + * VFIQEn bit is RES1 and the VAckCtl bit is RES0. 174 + */ 175 + vmcr = ICH_VMCR_FIQ_EN_MASK; 176 + } 177 + 178 + vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK; 179 + vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK; 171 180 vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK; 172 181 vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK; 173 182 vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK; ··· 189 180 void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp) 190 181 { 191 182 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; 183 + u32 model = vcpu->kvm->arch.vgic.vgic_model; 192 184 u32 vmcr; 193 185 194 186 vmcr = cpu_if->vgic_vmcr; 195 187 196 - /* 197 - * Ignore the FIQen bit, because GIC emulation always implies 198 - * SRE=1 which means the vFIQEn bit is also RES1. 199 - */ 200 - vmcrp->ctlr = ((vmcr >> ICH_VMCR_EOIM_SHIFT) << 201 - ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK; 202 - vmcrp->ctlr |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT; 188 + if (model == KVM_DEV_TYPE_ARM_VGIC_V2) { 189 + vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >> 190 + ICH_VMCR_ACK_CTL_SHIFT; 191 + vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >> 192 + ICH_VMCR_FIQ_EN_SHIFT; 193 + } else { 194 + /* 195 + * When emulating GICv3 on GICv3 with SRE=1 on the 196 + * VFIQEn bit is RES1 and the VAckCtl bit is RES0. 197 + */ 198 + vmcrp->fiqen = 1; 199 + vmcrp->ackctl = 0; 200 + } 201 + 202 + vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT; 203 + vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT; 203 204 vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT; 204 205 vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT; 205 206 vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
+8 -4
virt/kvm/arm/vgic/vgic.h
··· 111 111 * registers regardless of the hardware backed GIC used. 112 112 */ 113 113 struct vgic_vmcr { 114 - u32 ctlr; 114 + u32 grpen0; 115 + u32 grpen1; 116 + 117 + u32 ackctl; 118 + u32 fiqen; 119 + u32 cbpr; 120 + u32 eoim; 121 + 115 122 u32 abpr; 116 123 u32 bpr; 117 124 u32 pmr; /* Priority mask field in the GICC_PMR and 118 125 * ICC_PMR_EL1 priority field format */ 119 - /* Below member variable are valid only for GICv3 */ 120 - u32 grpen0; 121 - u32 grpen1; 122 126 }; 123 127 124 128 struct vgic_reg_attr {