Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

tools/testing/selftests/net/forwarding/Makefile
f62c5acc800e ("selftests/net/forwarding: add missing tests to Makefile")
50fe062c806e ("selftests: forwarding: new test, verify host mdb entries")
https://lore.kernel.org/all/20220502111539.0b7e4621@canb.auug.org.au/

Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+3012 -1567
+2 -2
Documentation/arm64/memory-tagging-extension.rst
··· 228 228 ----------------- 229 229 230 230 The allocation tags for user memory mapped with ``PROT_MTE`` are dumped 231 - in the core file as additional ``PT_ARM_MEMTAG_MTE`` segments. The 231 + in the core file as additional ``PT_AARCH64_MEMTAG_MTE`` segments. The 232 232 program header for such segment is defined as: 233 233 234 - :``p_type``: ``PT_ARM_MEMTAG_MTE`` 234 + :``p_type``: ``PT_AARCH64_MEMTAG_MTE`` 235 235 :``p_flags``: 0 236 236 :``p_offset``: segment file offset 237 237 :``p_vaddr``: segment virtual address, same as the corresponding
+11 -2
Documentation/devicetree/bindings/clock/microchip,mpfs.yaml
··· 22 22 const: microchip,mpfs-clkcfg 23 23 24 24 reg: 25 - maxItems: 1 25 + items: 26 + - description: | 27 + clock config registers: 28 + These registers contain enable, reset & divider tables for the, cpu, 29 + axi, ahb and rtc/mtimer reference clocks as well as enable and reset 30 + for the peripheral clocks. 31 + - description: | 32 + mss pll dri registers: 33 + Block of registers responsible for dynamic reconfiguration of the mss 34 + pll 26 35 27 36 clocks: 28 37 maxItems: 1 ··· 60 51 #size-cells = <2>; 61 52 clkcfg: clock-controller@20002000 { 62 53 compatible = "microchip,mpfs-clkcfg"; 63 - reg = <0x0 0x20002000 0x0 0x1000>; 54 + reg = <0x0 0x20002000 0x0 0x1000>, <0x0 0x3E001000 0x0 0x1000>; 64 55 clocks = <&ref>; 65 56 #clock-cells = <1>; 66 57 };
+3 -4
Documentation/devicetree/bindings/hwmon/ti,tmp421.yaml
··· 58 58 description: | 59 59 The value (two's complement) to be programmed in the channel specific N correction register. 60 60 For remote channels only. 61 - $ref: /schemas/types.yaml#/definitions/uint32 62 - items: 63 - minimum: 0 64 - maximum: 255 61 + $ref: /schemas/types.yaml#/definitions/int32 62 + minimum: -128 63 + maximum: 127 65 64 66 65 required: 67 66 - reg
+1 -1
Documentation/devicetree/bindings/mfd/atmel-flexcom.txt
··· 54 54 clock-names = "spi_clk"; 55 55 atmel,fifo-size = <32>; 56 56 57 - mtd_dataflash@0 { 57 + flash@0 { 58 58 compatible = "atmel,at25f512b"; 59 59 reg = <0>; 60 60 spi-max-frequency = <20000000>;
+12 -3
Documentation/devicetree/bindings/rtc/microchip,mfps-rtc.yaml
··· 31 31 to that of the RTC's count register. 32 32 33 33 clocks: 34 - maxItems: 1 34 + items: 35 + - description: | 36 + AHB clock 37 + - description: | 38 + Reference clock: divided by the prescaler to create a time-based 39 + strobe (typically 1 Hz) for the calendar counter. By default, the rtc 40 + on the PolarFire SoC shares it's reference with MTIMER so this will 41 + be a 1 MHz clock. 35 42 36 43 clock-names: 37 44 items: 38 45 - const: rtc 46 + - const: rtcref 39 47 40 48 required: 41 49 - compatible ··· 56 48 57 49 examples: 58 50 - | 51 + #include "dt-bindings/clock/microchip,mpfs-clock.h" 59 52 rtc@20124000 { 60 53 compatible = "microchip,mpfs-rtc"; 61 54 reg = <0x20124000 0x1000>; 62 - clocks = <&clkcfg 21>; 63 - clock-names = "rtc"; 55 + clocks = <&clkcfg CLK_RTC>, <&clkcfg CLK_RTCREF>; 56 + clock-names = "rtc", "rtcref"; 64 57 interrupts = <80>, <81>; 65 58 }; 66 59 ...
+1
Documentation/devicetree/bindings/usb/samsung,exynos-usb2.yaml
··· 62 62 - interrupts 63 63 - phys 64 64 - phy-names 65 + - reg 65 66 66 67 allOf: 67 68 - if:
+26 -16
Documentation/security/siphash.rst
··· 121 121 instead of SipHash's 128-bit key. However, this may appeal to some 122 122 high-performance `jhash` users. 123 123 124 - Danger! 124 + HalfSipHash support is provided through the "hsiphash" family of functions. 125 125 126 - Do not ever use HalfSipHash except for as a hashtable key function, and only 127 - then when you can be absolutely certain that the outputs will never be 128 - transmitted out of the kernel. This is only remotely useful over `jhash` as a 129 - means of mitigating hashtable flooding denial of service attacks. 126 + .. warning:: 127 + Do not ever use the hsiphash functions except for as a hashtable key 128 + function, and only then when you can be absolutely certain that the outputs 129 + will never be transmitted out of the kernel. This is only remotely useful 130 + over `jhash` as a means of mitigating hashtable flooding denial of service 131 + attacks. 130 132 131 - Generating a HalfSipHash key 132 - ============================ 133 + On 64-bit kernels, the hsiphash functions actually implement SipHash-1-3, a 134 + reduced-round variant of SipHash, instead of HalfSipHash-1-3. This is because in 135 + 64-bit code, SipHash-1-3 is no slower than HalfSipHash-1-3, and can be faster. 136 + Note, this does *not* mean that in 64-bit kernels the hsiphash functions are the 137 + same as the siphash ones, or that they are secure; the hsiphash functions still 138 + use a less secure reduced-round algorithm and truncate their outputs to 32 139 + bits. 140 + 141 + Generating a hsiphash key 142 + ========================= 133 143 134 144 Keys should always be generated from a cryptographically secure source of 135 - random numbers, either using get_random_bytes or get_random_once: 145 + random numbers, either using get_random_bytes or get_random_once:: 136 146 137 - hsiphash_key_t key; 138 - get_random_bytes(&key, sizeof(key)); 147 + hsiphash_key_t key; 148 + get_random_bytes(&key, sizeof(key)); 139 149 140 150 If you're not deriving your key from here, you're doing it wrong. 141 151 142 - Using the HalfSipHash functions 143 - =============================== 152 + Using the hsiphash functions 153 + ============================ 144 154 145 155 There are two variants of the function, one that takes a list of integers, and 146 156 one that takes a buffer:: ··· 193 183 Performance 194 184 =========== 195 185 196 - HalfSipHash is roughly 3 times slower than JenkinsHash. For many replacements, 197 - this will not be a problem, as the hashtable lookup isn't the bottleneck. And 198 - in general, this is probably a good sacrifice to make for the security and DoS 199 - resistance of HalfSipHash. 186 + hsiphash() is roughly 3 times slower than jhash(). For many replacements, this 187 + will not be a problem, as the hashtable lookup isn't the bottleneck. And in 188 + general, this is probably a good sacrifice to make for the security and DoS 189 + resistance of hsiphash().
+17 -7
Documentation/virt/kvm/api.rst
··· 5986 5986 #define KVM_SYSTEM_EVENT_RESET 2 5987 5987 #define KVM_SYSTEM_EVENT_CRASH 3 5988 5988 __u32 type; 5989 - __u64 flags; 5989 + __u32 ndata; 5990 + __u64 data[16]; 5990 5991 } system_event; 5991 5992 5992 5993 If exit_reason is KVM_EXIT_SYSTEM_EVENT then the vcpu has triggered 5993 5994 a system-level event using some architecture specific mechanism (hypercall 5994 5995 or some special instruction). In case of ARM64, this is triggered using 5995 - HVC instruction based PSCI call from the vcpu. The 'type' field describes 5996 - the system-level event type. The 'flags' field describes architecture 5997 - specific flags for the system-level event. 5996 + HVC instruction based PSCI call from the vcpu. 5998 5997 5998 + The 'type' field describes the system-level event type. 5999 5999 Valid values for 'type' are: 6000 6000 6001 6001 - KVM_SYSTEM_EVENT_SHUTDOWN -- the guest has requested a shutdown of the ··· 6010 6010 to ignore the request, or to gather VM memory core dump and/or 6011 6011 reset/shutdown of the VM. 6012 6012 6013 - Valid flags are: 6013 + If KVM_CAP_SYSTEM_EVENT_DATA is present, the 'data' field can contain 6014 + architecture specific information for the system-level event. Only 6015 + the first `ndata` items (possibly zero) of the data array are valid. 6014 6016 6015 - - KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2 (arm64 only) -- the guest issued 6016 - a SYSTEM_RESET2 call according to v1.1 of the PSCI specification. 6017 + - for arm64, data[0] is set to KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2 if 6018 + the guest issued a SYSTEM_RESET2 call according to v1.1 of the PSCI 6019 + specification. 6020 + 6021 + - for RISC-V, data[0] is set to the value of the second argument of the 6022 + ``sbi_system_reset`` call. 6023 + 6024 + Previous versions of Linux defined a `flags` member in this struct. The 6025 + field is now aliased to `data[0]`. Userspace can assume that it is only 6026 + written if ndata is greater than 0. 6017 6027 6018 6028 :: 6019 6029
+16
MAINTAINERS
··· 2644 2644 S: Maintained 2645 2645 C: irc://irc.libera.chat/linux-exynos 2646 2646 Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/ 2647 + B: mailto:linux-samsung-soc@vger.kernel.org 2647 2648 T: git git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux.git 2648 2649 F: Documentation/arm/samsung/ 2649 2650 F: Documentation/devicetree/bindings/arm/samsung/ ··· 11982 11981 M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> 11983 11982 L: linux-pm@vger.kernel.org 11984 11983 S: Supported 11984 + B: mailto:linux-samsung-soc@vger.kernel.org 11985 11985 F: Documentation/devicetree/bindings/power/supply/maxim,max14577.yaml 11986 11986 F: Documentation/devicetree/bindings/power/supply/maxim,max77693.yaml 11987 11987 F: drivers/power/supply/max14577_charger.c ··· 11994 11992 M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> 11995 11993 L: linux-kernel@vger.kernel.org 11996 11994 S: Supported 11995 + B: mailto:linux-samsung-soc@vger.kernel.org 11997 11996 F: Documentation/devicetree/bindings/*/maxim,max14577.yaml 11998 11997 F: Documentation/devicetree/bindings/*/maxim,max77686.yaml 11999 11998 F: Documentation/devicetree/bindings/*/maxim,max77693.yaml ··· 12688 12685 M: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org> 12689 12686 L: linux-kernel@vger.kernel.org 12690 12687 S: Maintained 12688 + B: mailto:krzysztof.kozlowski@linaro.org 12691 12689 T: git git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux-mem-ctrl.git 12692 12690 F: Documentation/devicetree/bindings/memory-controllers/ 12693 12691 F: drivers/memory/ ··· 15630 15626 S: Maintained 15631 15627 C: irc://irc.libera.chat/linux-exynos 15632 15628 Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/ 15629 + B: mailto:linux-samsung-soc@vger.kernel.org 15633 15630 T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/samsung.git 15634 15631 F: Documentation/devicetree/bindings/pinctrl/samsung,pinctrl*yaml 15635 15632 F: drivers/pinctrl/samsung/ ··· 17353 17348 M: Sylwester Nawrocki <s.nawrocki@samsung.com> 17354 17349 L: alsa-devel@alsa-project.org (moderated for non-subscribers) 17355 17350 S: Supported 17351 + B: mailto:linux-samsung-soc@vger.kernel.org 17356 17352 F: Documentation/devicetree/bindings/sound/samsung* 17357 17353 F: sound/soc/samsung/ 17358 17354 ··· 17398 17392 L: linux-kernel@vger.kernel.org 17399 17393 L: linux-samsung-soc@vger.kernel.org 17400 17394 S: Supported 17395 + B: mailto:linux-samsung-soc@vger.kernel.org 17401 17396 F: Documentation/devicetree/bindings/clock/samsung,s2mps11.yaml 17402 17397 F: Documentation/devicetree/bindings/mfd/samsung,s2m*.yaml 17403 17398 F: Documentation/devicetree/bindings/mfd/samsung,s5m*.yaml ··· 21463 21456 F: arch/x86/include/asm/uv/ 21464 21457 F: arch/x86/kernel/apic/x2apic_uv_x.c 21465 21458 F: arch/x86/platform/uv/ 21459 + 21460 + X86 STACK UNWINDING 21461 + M: Josh Poimboeuf <jpoimboe@redhat.com> 21462 + M: Peter Zijlstra <peterz@infradead.org> 21463 + S: Supported 21464 + F: arch/x86/include/asm/unwind*.h 21465 + F: arch/x86/kernel/dumpstack.c 21466 + F: arch/x86/kernel/stacktrace.c 21467 + F: arch/x86/kernel/unwind_*.c 21466 21468 21467 21469 X86 VDSO 21468 21470 M: Andy Lutomirski <luto@kernel.org>
+1 -1
Makefile
··· 2 2 VERSION = 5 3 3 PATCHLEVEL = 18 4 4 SUBLEVEL = 0 5 - EXTRAVERSION = -rc4 5 + EXTRAVERSION = -rc5 6 6 NAME = Superb Owl 7 7 8 8 # *DOCUMENTATION*
+2
arch/arm/boot/dts/am33xx-l4.dtsi
··· 263 263 compatible = "ti,am3359-tscadc"; 264 264 reg = <0x0 0x1000>; 265 265 interrupts = <16>; 266 + clocks = <&adc_tsc_fck>; 267 + clock-names = "fck"; 266 268 status = "disabled"; 267 269 dmas = <&edma 53 0>, <&edma 57 0>; 268 270 dma-names = "fifo0", "fifo1";
+40 -5
arch/arm/boot/dts/am3517-evm.dts
··· 161 161 162 162 /* HS USB Host PHY on PORT 1 */ 163 163 hsusb1_phy: hsusb1_phy { 164 + pinctrl-names = "default"; 165 + pinctrl-0 = <&hsusb1_rst_pins>; 164 166 compatible = "usb-nop-xceiv"; 165 167 reset-gpios = <&gpio2 25 GPIO_ACTIVE_LOW>; /* gpio_57 */ 166 168 #phy-cells = <0>; ··· 170 168 }; 171 169 172 170 &davinci_emac { 173 - status = "okay"; 171 + pinctrl-names = "default"; 172 + pinctrl-0 = <&ethernet_pins>; 173 + status = "okay"; 174 174 }; 175 175 176 176 &davinci_mdio { ··· 197 193 }; 198 194 199 195 &i2c2 { 196 + pinctrl-names = "default"; 197 + pinctrl-0 = <&i2c2_pins>; 200 198 clock-frequency = <400000>; 201 199 /* User DIP swithes [1:8] / User LEDS [1:2] */ 202 200 tca6416: gpio@21 { ··· 211 205 }; 212 206 213 207 &i2c3 { 208 + pinctrl-names = "default"; 209 + pinctrl-0 = <&i2c3_pins>; 214 210 clock-frequency = <400000>; 215 211 }; 216 212 ··· 231 223 }; 232 224 233 225 &usbhshost { 226 + pinctrl-names = "default"; 227 + pinctrl-0 = <&hsusb1_pins>; 234 228 port1-mode = "ehci-phy"; 235 229 }; 236 230 ··· 241 231 }; 242 232 243 233 &omap3_pmx_core { 244 - pinctrl-names = "default"; 245 - pinctrl-0 = <&hsusb1_rst_pins>; 234 + 235 + ethernet_pins: pinmux_ethernet_pins { 236 + pinctrl-single,pins = < 237 + OMAP3_CORE1_IOPAD(0x21fe, PIN_INPUT | MUX_MODE0) /* rmii_mdio_data */ 238 + OMAP3_CORE1_IOPAD(0x2200, MUX_MODE0) /* rmii_mdio_clk */ 239 + OMAP3_CORE1_IOPAD(0x2202, PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii_rxd0 */ 240 + OMAP3_CORE1_IOPAD(0x2204, PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii_rxd1 */ 241 + OMAP3_CORE1_IOPAD(0x2206, PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii_crs_dv */ 242 + OMAP3_CORE1_IOPAD(0x2208, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* rmii_rxer */ 243 + OMAP3_CORE1_IOPAD(0x220a, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* rmii_txd0 */ 244 + OMAP3_CORE1_IOPAD(0x220c, PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* rmii_txd1 */ 245 + OMAP3_CORE1_IOPAD(0x220e, PIN_OUTPUT_PULLDOWN |MUX_MODE0) /* rmii_txen */ 246 + OMAP3_CORE1_IOPAD(0x2210, PIN_INPUT_PULLDOWN | MUX_MODE0) /* rmii_50mhz_clk */ 247 + >; 248 + }; 249 + 250 + i2c2_pins: pinmux_i2c2_pins { 251 + pinctrl-single,pins = < 252 + OMAP3_CORE1_IOPAD(0x21be, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c2_scl */ 253 + OMAP3_CORE1_IOPAD(0x21c0, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c2_sda */ 254 + >; 255 + }; 256 + 257 + i2c3_pins: pinmux_i2c3_pins { 258 + pinctrl-single,pins = < 259 + OMAP3_CORE1_IOPAD(0x21c2, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c3_scl */ 260 + OMAP3_CORE1_IOPAD(0x21c4, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c3_sda */ 261 + >; 262 + }; 246 263 247 264 leds_pins: pinmux_leds_pins { 248 265 pinctrl-single,pins = < ··· 337 300 }; 338 301 339 302 &omap3_pmx_core2 { 340 - pinctrl-names = "default"; 341 - pinctrl-0 = <&hsusb1_pins>; 342 303 343 304 hsusb1_pins: pinmux_hsusb1_pins { 344 305 pinctrl-single,pins = <
+9
arch/arm/boot/dts/am3517-som.dtsi
··· 69 69 }; 70 70 71 71 &i2c1 { 72 + pinctrl-names = "default"; 73 + pinctrl-0 = <&i2c1_pins>; 72 74 clock-frequency = <400000>; 73 75 74 76 s35390a: s35390a@30 { ··· 180 178 }; 181 179 182 180 &omap3_pmx_core { 181 + 182 + i2c1_pins: pinmux_i2c1_pins { 183 + pinctrl-single,pins = < 184 + OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c1_scl */ 185 + OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c1_sda */ 186 + >; 187 + }; 183 188 184 189 wl12xx_buffer_pins: pinmux_wl12xx_buffer_pins { 185 190 pinctrl-single,pins = <
+1 -1
arch/arm/boot/dts/at91-dvk_su60_somc.dtsi
··· 44 44 status = "okay"; 45 45 46 46 /* spi0.0: 4M Flash Macronix MX25R4035FM1IL0 */ 47 - spi-flash@0 { 47 + flash@0 { 48 48 compatible = "mxicy,mx25u4035", "jedec,spi-nor"; 49 49 spi-max-frequency = <33000000>; 50 50 reg = <0>;
+1 -1
arch/arm/boot/dts/at91-kizbox3-hs.dts
··· 225 225 pinctrl_pio_io_reset: gpio_io_reset { 226 226 pinmux = <PIN_PB30__GPIO>; 227 227 bias-disable; 228 - drive-open-drain; 228 + drive-open-drain = <1>; 229 229 output-low; 230 230 }; 231 231 pinctrl_pio_input: gpio_input {
+1 -1
arch/arm/boot/dts/at91-kizbox3_common.dtsi
··· 211 211 pinmux = <PIN_PD12__FLEXCOM4_IO0>, //DATA 212 212 <PIN_PD13__FLEXCOM4_IO1>; //CLK 213 213 bias-disable; 214 - drive-open-drain; 214 + drive-open-drain = <1>; 215 215 }; 216 216 217 217 pinctrl_pwm0 {
+1 -1
arch/arm/boot/dts/at91-q5xr5.dts
··· 125 125 cs-gpios = <&pioA 3 GPIO_ACTIVE_HIGH>, <&pioC 11 GPIO_ACTIVE_LOW>, <0>, <0>; 126 126 status = "okay"; 127 127 128 - m25p80@0 { 128 + flash@0 { 129 129 compatible = "jedec,spi-nor"; 130 130 spi-max-frequency = <20000000>; 131 131 reg = <0>;
+1 -1
arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
··· 214 214 pinctrl-0 = <&pinctrl_qspi1_default>; 215 215 status = "disabled"; 216 216 217 - qspi1_flash: spi_flash@0 { 217 + qspi1_flash: flash@0 { 218 218 #address-cells = <1>; 219 219 #size-cells = <1>; 220 220 compatible = "jedec,spi-nor";
+1 -1
arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
··· 191 191 &qspi1 { 192 192 status = "okay"; 193 193 194 - qspi1_flash: spi_flash@0 { 194 + qspi1_flash: flash@0 { 195 195 status = "okay"; 196 196 }; 197 197 };
+1 -1
arch/arm/boot/dts/at91-sama5d2_xplained.dts
··· 137 137 pinctrl-0 = <&pinctrl_spi0_default>; 138 138 status = "okay"; 139 139 140 - m25p80@0 { 140 + flash@0 { 141 141 compatible = "atmel,at25df321a"; 142 142 reg = <0>; 143 143 spi-max-frequency = <50000000>;
+4 -4
arch/arm/boot/dts/at91-sama5d3_xplained.dts
··· 57 57 }; 58 58 59 59 spi0: spi@f0004000 { 60 - pinctrl-names = "default"; 61 - pinctrl-0 = <&pinctrl_spi0_cs>; 60 + pinctrl-names = "default", "cs"; 61 + pinctrl-1 = <&pinctrl_spi0_cs>; 62 62 cs-gpios = <&pioD 13 0>, <0>, <0>, <&pioD 16 0>; 63 63 status = "okay"; 64 64 }; ··· 171 171 }; 172 172 173 173 spi1: spi@f8008000 { 174 - pinctrl-names = "default"; 175 - pinctrl-0 = <&pinctrl_spi1_cs>; 174 + pinctrl-names = "default", "cs"; 175 + pinctrl-1 = <&pinctrl_spi1_cs>; 176 176 cs-gpios = <&pioC 25 0>; 177 177 status = "okay"; 178 178 };
+1 -1
arch/arm/boot/dts/at91-sama5d4_ma5d4.dtsi
··· 49 49 cs-gpios = <&pioC 3 0>, <0>, <0>, <0>; 50 50 status = "okay"; 51 51 52 - m25p80@0 { 52 + flash@0 { 53 53 compatible = "atmel,at25df321a"; 54 54 spi-max-frequency = <50000000>; 55 55 reg = <0>;
+3 -3
arch/arm/boot/dts/at91-sama5d4_xplained.dts
··· 81 81 }; 82 82 83 83 spi1: spi@fc018000 { 84 - pinctrl-names = "default"; 85 - pinctrl-0 = <&pinctrl_spi0_cs>; 84 + pinctrl-names = "default", "cs"; 85 + pinctrl-1 = <&pinctrl_spi1_cs>; 86 86 cs-gpios = <&pioB 21 0>; 87 87 status = "okay"; 88 88 }; ··· 140 140 atmel,pins = 141 141 <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>; 142 142 }; 143 - pinctrl_spi0_cs: spi0_cs_default { 143 + pinctrl_spi1_cs: spi1_cs_default { 144 144 atmel,pins = 145 145 <AT91_PIOB 21 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>; 146 146 };
+1 -1
arch/arm/boot/dts/at91-sama5d4ek.dts
··· 65 65 spi0: spi@f8010000 { 66 66 cs-gpios = <&pioC 3 0>, <0>, <0>, <0>; 67 67 status = "okay"; 68 - m25p80@0 { 68 + flash@0 { 69 69 compatible = "atmel,at25df321a"; 70 70 spi-max-frequency = <50000000>; 71 71 reg = <0>;
+2 -2
arch/arm/boot/dts/at91-sama7g5ek.dts
··· 495 495 pinctrl_flx3_default: flx3_default { 496 496 pinmux = <PIN_PD16__FLEXCOM3_IO0>, 497 497 <PIN_PD17__FLEXCOM3_IO1>; 498 - bias-disable; 498 + bias-pull-up; 499 499 }; 500 500 501 501 pinctrl_flx4_default: flx4_default { ··· 655 655 <PIN_PB21__QSPI0_INT>; 656 656 bias-disable; 657 657 slew-rate = <0>; 658 - atmel,drive-strength = <ATMEL_PIO_DRVSTR_HI>; 658 + atmel,drive-strength = <ATMEL_PIO_DRVSTR_ME>; 659 659 }; 660 660 661 661 pinctrl_sdmmc0_default: sdmmc0_default {
+1 -1
arch/arm/boot/dts/at91-vinco.dts
··· 59 59 spi0: spi@f8010000 { 60 60 cs-gpios = <&pioC 3 0>, <0>, <0>, <0>; 61 61 status = "okay"; 62 - m25p80@0 { 62 + flash@0 { 63 63 compatible = "n25q32b", "jedec,spi-nor"; 64 64 spi-max-frequency = <50000000>; 65 65 reg = <0>;
+2 -2
arch/arm/boot/dts/at91rm9200ek.dts
··· 73 73 spi0: spi@fffe0000 { 74 74 status = "okay"; 75 75 cs-gpios = <&pioA 3 0>, <0>, <0>, <0>; 76 - mtd_dataflash@0 { 76 + flash@0 { 77 77 compatible = "atmel,at45", "atmel,dataflash"; 78 78 spi-max-frequency = <15000000>; 79 79 reg = <0>; ··· 94 94 status = "okay"; 95 95 }; 96 96 97 - nor_flash@10000000 { 97 + flash@10000000 { 98 98 compatible = "cfi-flash"; 99 99 reg = <0x10000000 0x800000>; 100 100 linux,mtd-name = "physmap-flash.0";
+1 -1
arch/arm/boot/dts/at91sam9260ek.dts
··· 92 92 93 93 spi0: spi@fffc8000 { 94 94 cs-gpios = <0>, <&pioC 11 0>, <0>, <0>; 95 - mtd_dataflash@1 { 95 + flash@1 { 96 96 compatible = "atmel,at45", "atmel,dataflash"; 97 97 spi-max-frequency = <50000000>; 98 98 reg = <1>;
+1 -1
arch/arm/boot/dts/at91sam9261ek.dts
··· 145 145 cs-gpios = <&pioA 3 0>, <0>, <&pioA 28 0>, <0>; 146 146 status = "okay"; 147 147 148 - mtd_dataflash@0 { 148 + flash@0 { 149 149 compatible = "atmel,at45", "atmel,dataflash"; 150 150 reg = <0>; 151 151 spi-max-frequency = <15000000>;
+1 -1
arch/arm/boot/dts/at91sam9263ek.dts
··· 95 95 spi0: spi@fffa4000 { 96 96 status = "okay"; 97 97 cs-gpios = <&pioA 5 0>, <0>, <0>, <0>; 98 - mtd_dataflash@0 { 98 + flash@0 { 99 99 compatible = "atmel,at45", "atmel,dataflash"; 100 100 spi-max-frequency = <50000000>; 101 101 reg = <0>;
+44 -1
arch/arm/boot/dts/at91sam9g20ek_common.dtsi
··· 110 110 111 111 spi0: spi@fffc8000 { 112 112 cs-gpios = <0>, <&pioC 11 0>, <0>, <0>; 113 - mtd_dataflash@1 { 113 + flash@1 { 114 114 compatible = "atmel,at45", "atmel,dataflash"; 115 115 spi-max-frequency = <50000000>; 116 116 reg = <1>; ··· 214 214 24c512@50 { 215 215 compatible = "atmel,24c512"; 216 216 reg = <0x50>; 217 + vcc-supply = <&reg_3v3>; 217 218 }; 218 219 219 220 wm8731: wm8731@1b { 220 221 compatible = "wm8731"; 221 222 reg = <0x1b>; 223 + 224 + /* PCK0 at 12MHz */ 225 + clocks = <&pmc PMC_TYPE_SYSTEM 8>; 226 + clock-names = "mclk"; 227 + assigned-clocks = <&pmc PMC_TYPE_SYSTEM 8>; 228 + assigned-clock-rates = <12000000>; 229 + 230 + HPVDD-supply = <&vcc_dac>; 231 + AVDD-supply = <&vcc_dac>; 232 + DCVDD-supply = <&reg_3v3>; 233 + DBVDD-supply = <&reg_3v3>; 222 234 }; 223 235 }; 224 236 ··· 265 253 266 254 atmel,ssc-controller = <&ssc0>; 267 255 atmel,audio-codec = <&wm8731>; 256 + }; 257 + 258 + reg_5v: fixedregulator0 { 259 + compatible = "regulator-fixed"; 260 + regulator-name = "5V"; 261 + regulator-min-microvolt = <5000000>; 262 + regulator-max-microvolt = <5000000>; 263 + }; 264 + 265 + reg_3v3: fixedregulator1 { 266 + compatible = "regulator-fixed"; 267 + regulator-name = "3V3"; 268 + vin-supply = <&reg_5v>; 269 + regulator-min-microvolt = <3300000>; 270 + regulator-max-microvolt = <3300000>; 271 + }; 272 + 273 + reg_1v: fixedregulator2 { 274 + compatible = "regulator-fixed"; 275 + regulator-name = "1V"; 276 + vin-supply = <&reg_5v>; 277 + regulator-min-microvolt = <1000000>; 278 + regulator-max-microvolt = <1000000>; 279 + }; 280 + 281 + vcc_dac: fixedregulator3 { 282 + compatible = "regulator-fixed"; 283 + regulator-name = "VCC_DAC"; 284 + vin-supply = <&reg_3v3>; 285 + regulator-min-microvolt = <3300000>; 286 + regulator-max-microvolt = <3300000>; 268 287 }; 269 288 };
+1 -1
arch/arm/boot/dts/at91sam9m10g45ek.dts
··· 167 167 spi0: spi@fffa4000{ 168 168 status = "okay"; 169 169 cs-gpios = <&pioB 3 0>, <0>, <0>, <0>; 170 - mtd_dataflash@0 { 170 + flash@0 { 171 171 compatible = "atmel,at45", "atmel,dataflash"; 172 172 spi-max-frequency = <13000000>; 173 173 reg = <0>;
+1 -1
arch/arm/boot/dts/at91sam9n12ek.dts
··· 119 119 spi0: spi@f0000000 { 120 120 status = "okay"; 121 121 cs-gpios = <&pioA 14 0>, <0>, <0>, <0>; 122 - m25p80@0 { 122 + flash@0 { 123 123 compatible = "atmel,at25df321a"; 124 124 spi-max-frequency = <50000000>; 125 125 reg = <0>;
+1 -1
arch/arm/boot/dts/at91sam9rlek.dts
··· 180 180 spi0: spi@fffcc000 { 181 181 status = "okay"; 182 182 cs-gpios = <&pioA 28 0>, <0>, <0>, <0>; 183 - mtd_dataflash@0 { 183 + flash@0 { 184 184 compatible = "atmel,at45", "atmel,dataflash"; 185 185 spi-max-frequency = <15000000>; 186 186 reg = <0>;
+1 -1
arch/arm/boot/dts/at91sam9x5ek.dtsi
··· 125 125 cs-gpios = <&pioA 14 0>, <0>, <0>, <0>; 126 126 status = "disabled"; /* conflicts with mmc1 */ 127 127 128 - m25p80@0 { 128 + flash@0 { 129 129 compatible = "atmel,at25df321a"; 130 130 spi-max-frequency = <50000000>; 131 131 reg = <0>;
+2 -2
arch/arm/boot/dts/dra7-l4.dtsi
··· 4188 4188 reg = <0x1d0010 0x4>; 4189 4189 reg-names = "sysc"; 4190 4190 ti,sysc-midle = <SYSC_IDLE_FORCE>, 4191 - <SYSC_IDLE_NO>, 4192 - <SYSC_IDLE_SMART>; 4191 + <SYSC_IDLE_NO>; 4193 4192 ti,sysc-sidle = <SYSC_IDLE_FORCE>, 4194 4193 <SYSC_IDLE_NO>, 4195 4194 <SYSC_IDLE_SMART>; 4195 + power-domains = <&prm_vpe>; 4196 4196 clocks = <&vpe_clkctrl DRA7_VPE_VPE_CLKCTRL 0>; 4197 4197 clock-names = "fck"; 4198 4198 #address-cells = <1>;
+8 -2
arch/arm/boot/dts/imx6qdl-apalis.dtsi
··· 286 286 codec: sgtl5000@a { 287 287 compatible = "fsl,sgtl5000"; 288 288 reg = <0x0a>; 289 + pinctrl-names = "default"; 290 + pinctrl-0 = <&pinctrl_sgtl5000>; 289 291 clocks = <&clks IMX6QDL_CLK_CKO>; 290 292 VDDA-supply = <&reg_module_3v3_audio>; 291 293 VDDIO-supply = <&reg_module_3v3>; ··· 519 517 MX6QDL_PAD_DISP0_DAT21__AUD4_TXD 0x130b0 520 518 MX6QDL_PAD_DISP0_DAT22__AUD4_TXFS 0x130b0 521 519 MX6QDL_PAD_DISP0_DAT23__AUD4_RXD 0x130b0 522 - /* SGTL5000 sys_mclk */ 523 - MX6QDL_PAD_GPIO_5__CCM_CLKO1 0x130b0 524 520 >; 525 521 }; 526 522 ··· 808 808 fsl,pins = < 809 809 /* SD1 CD */ 810 810 MX6QDL_PAD_NANDF_CS1__GPIO6_IO14 0x000b0 811 + >; 812 + }; 813 + 814 + pinctrl_sgtl5000: sgtl5000grp { 815 + fsl,pins = < 816 + MX6QDL_PAD_GPIO_5__CCM_CLKO1 0x130b0 811 817 >; 812 818 }; 813 819
+1 -1
arch/arm/boot/dts/imx6ull-colibri.dtsi
··· 37 37 38 38 reg_sd1_vmmc: regulator-sd1-vmmc { 39 39 compatible = "regulator-gpio"; 40 - gpio = <&gpio5 9 GPIO_ACTIVE_HIGH>; 40 + gpios = <&gpio5 9 GPIO_ACTIVE_HIGH>; 41 41 pinctrl-names = "default"; 42 42 pinctrl-0 = <&pinctrl_snvs_reg_sd>; 43 43 regulator-always-on;
+15
arch/arm/boot/dts/logicpd-som-lv-35xx-devkit.dts
··· 11 11 model = "LogicPD Zoom OMAP35xx SOM-LV Development Kit"; 12 12 compatible = "logicpd,dm3730-som-lv-devkit", "ti,omap3430", "ti,omap3"; 13 13 }; 14 + 15 + &omap3_pmx_core2 { 16 + pinctrl-names = "default"; 17 + pinctrl-0 = <&hsusb2_2_pins>; 18 + hsusb2_2_pins: pinmux_hsusb2_2_pins { 19 + pinctrl-single,pins = < 20 + OMAP3430_CORE2_IOPAD(0x25f0, PIN_OUTPUT | MUX_MODE3) /* etk_d10.hsusb2_clk */ 21 + OMAP3430_CORE2_IOPAD(0x25f2, PIN_OUTPUT | MUX_MODE3) /* etk_d11.hsusb2_stp */ 22 + OMAP3430_CORE2_IOPAD(0x25f4, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d12.hsusb2_dir */ 23 + OMAP3430_CORE2_IOPAD(0x25f6, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d13.hsusb2_nxt */ 24 + OMAP3430_CORE2_IOPAD(0x25f8, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d14.hsusb2_data0 */ 25 + OMAP3430_CORE2_IOPAD(0x25fa, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d15.hsusb2_data1 */ 26 + >; 27 + }; 28 + };
+15
arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
··· 11 11 model = "LogicPD Zoom DM3730 SOM-LV Development Kit"; 12 12 compatible = "logicpd,dm3730-som-lv-devkit", "ti,omap3630", "ti,omap3"; 13 13 }; 14 + 15 + &omap3_pmx_core2 { 16 + pinctrl-names = "default"; 17 + pinctrl-0 = <&hsusb2_2_pins>; 18 + hsusb2_2_pins: pinmux_hsusb2_2_pins { 19 + pinctrl-single,pins = < 20 + OMAP3630_CORE2_IOPAD(0x25f0, PIN_OUTPUT | MUX_MODE3) /* etk_d10.hsusb2_clk */ 21 + OMAP3630_CORE2_IOPAD(0x25f2, PIN_OUTPUT | MUX_MODE3) /* etk_d11.hsusb2_stp */ 22 + OMAP3630_CORE2_IOPAD(0x25f4, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d12.hsusb2_dir */ 23 + OMAP3630_CORE2_IOPAD(0x25f6, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d13.hsusb2_nxt */ 24 + OMAP3630_CORE2_IOPAD(0x25f8, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d14.hsusb2_data0 */ 25 + OMAP3630_CORE2_IOPAD(0x25fa, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d15.hsusb2_data1 */ 26 + >; 27 + }; 28 + };
-15
arch/arm/boot/dts/logicpd-som-lv.dtsi
··· 265 265 }; 266 266 }; 267 267 268 - &omap3_pmx_core2 { 269 - pinctrl-names = "default"; 270 - pinctrl-0 = <&hsusb2_2_pins>; 271 - hsusb2_2_pins: pinmux_hsusb2_2_pins { 272 - pinctrl-single,pins = < 273 - OMAP3630_CORE2_IOPAD(0x25f0, PIN_OUTPUT | MUX_MODE3) /* etk_d10.hsusb2_clk */ 274 - OMAP3630_CORE2_IOPAD(0x25f2, PIN_OUTPUT | MUX_MODE3) /* etk_d11.hsusb2_stp */ 275 - OMAP3630_CORE2_IOPAD(0x25f4, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d12.hsusb2_dir */ 276 - OMAP3630_CORE2_IOPAD(0x25f6, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d13.hsusb2_nxt */ 277 - OMAP3630_CORE2_IOPAD(0x25f8, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d14.hsusb2_data0 */ 278 - OMAP3630_CORE2_IOPAD(0x25fa, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d15.hsusb2_data1 */ 279 - >; 280 - }; 281 - }; 282 - 283 268 &uart2 { 284 269 interrupts-extended = <&intc 73 &omap3_pmx_core OMAP3_UART2_RX>; 285 270 pinctrl-names = "default";
+2
arch/arm/boot/dts/omap3-gta04.dtsi
··· 31 31 aliases { 32 32 display0 = &lcd; 33 33 display1 = &tv0; 34 + /delete-property/ mmc2; 35 + /delete-property/ mmc3; 34 36 }; 35 37 36 38 ldo_3v3: fixedregulator {
+1 -1
arch/arm/boot/dts/sama5d3xmb.dtsi
··· 26 26 spi0: spi@f0004000 { 27 27 dmas = <0>, <0>; /* Do not use DMA for spi0 */ 28 28 29 - m25p80@0 { 29 + flash@0 { 30 30 compatible = "atmel,at25df321a"; 31 31 spi-max-frequency = <50000000>; 32 32 reg = <0>;
+1 -1
arch/arm/boot/dts/sama5d3xmb_cmp.dtsi
··· 25 25 spi0: spi@f0004000 { 26 26 dmas = <0>, <0>; /* Do not use DMA for spi0 */ 27 27 28 - m25p80@0 { 28 + flash@0 { 29 29 compatible = "atmel,at25df321a"; 30 30 spi-max-frequency = <50000000>; 31 31 reg = <0>;
+9 -9
arch/arm/boot/dts/sama7g5.dtsi
··· 601 601 #size-cells = <0>; 602 602 clocks = <&pmc PMC_TYPE_PERIPHERAL 39>; 603 603 atmel,fifo-size = <32>; 604 - dmas = <&dma0 AT91_XDMAC_DT_PERID(7)>, 605 - <&dma0 AT91_XDMAC_DT_PERID(8)>; 606 - dma-names = "rx", "tx"; 604 + dmas = <&dma0 AT91_XDMAC_DT_PERID(8)>, 605 + <&dma0 AT91_XDMAC_DT_PERID(7)>; 606 + dma-names = "tx", "rx"; 607 607 status = "disabled"; 608 608 }; 609 609 }; ··· 786 786 #size-cells = <0>; 787 787 clocks = <&pmc PMC_TYPE_PERIPHERAL 46>; 788 788 atmel,fifo-size = <32>; 789 - dmas = <&dma0 AT91_XDMAC_DT_PERID(21)>, 790 - <&dma0 AT91_XDMAC_DT_PERID(22)>; 791 - dma-names = "rx", "tx"; 789 + dmas = <&dma0 AT91_XDMAC_DT_PERID(22)>, 790 + <&dma0 AT91_XDMAC_DT_PERID(21)>; 791 + dma-names = "tx", "rx"; 792 792 status = "disabled"; 793 793 }; 794 794 }; ··· 810 810 #size-cells = <0>; 811 811 clocks = <&pmc PMC_TYPE_PERIPHERAL 47>; 812 812 atmel,fifo-size = <32>; 813 - dmas = <&dma0 AT91_XDMAC_DT_PERID(23)>, 814 - <&dma0 AT91_XDMAC_DT_PERID(24)>; 815 - dma-names = "rx", "tx"; 813 + dmas = <&dma0 AT91_XDMAC_DT_PERID(24)>, 814 + <&dma0 AT91_XDMAC_DT_PERID(23)>; 815 + dma-names = "tx", "rx"; 816 816 status = "disabled"; 817 817 }; 818 818 };
+1 -1
arch/arm/boot/dts/usb_a9263.dts
··· 60 60 spi0: spi@fffa4000 { 61 61 cs-gpios = <&pioB 15 GPIO_ACTIVE_HIGH>; 62 62 status = "okay"; 63 - mtd_dataflash@0 { 63 + flash@0 { 64 64 compatible = "atmel,at45", "atmel,dataflash"; 65 65 reg = <0>; 66 66 spi-max-frequency = <15000000>;
+1
arch/arm/configs/multi_v7_defconfig
··· 673 673 CONFIG_VIDEO_RENESAS_FDP1=m 674 674 CONFIG_VIDEO_RENESAS_JPU=m 675 675 CONFIG_VIDEO_RENESAS_VSP1=m 676 + CONFIG_VIDEO_TEGRA_VDE=m 676 677 CONFIG_V4L_TEST_DRIVERS=y 677 678 CONFIG_VIDEO_VIVID=m 678 679 CONFIG_VIDEO_ADV7180=m
+2 -1
arch/arm/configs/tegra_defconfig
··· 286 286 CONFIG_NVEC_POWER=y 287 287 CONFIG_NVEC_PAZ00=y 288 288 CONFIG_STAGING_MEDIA=y 289 - CONFIG_TEGRA_VDE=y 289 + CONFIG_V4L_MEM2MEM_DRIVERS=y 290 + CONFIG_VIDEO_TEGRA_VDE=y 290 291 CONFIG_CHROME_PLATFORMS=y 291 292 CONFIG_CROS_EC=y 292 293 CONFIG_CROS_EC_I2C=m
+2
arch/arm/mach-omap2/omap4-common.c
··· 314 314 315 315 np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-gic"); 316 316 gic_dist_base_addr = of_iomap(np, 0); 317 + of_node_put(np); 317 318 WARN_ON(!gic_dist_base_addr); 318 319 319 320 np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-twd-timer"); 320 321 twd_base = of_iomap(np, 0); 322 + of_node_put(np); 321 323 WARN_ON(!twd_base); 322 324 323 325 skip_errata_init:
-40
arch/arm64/boot/dts/amlogic/meson-g12b-a311d.dtsi
··· 11 11 compatible = "operating-points-v2"; 12 12 opp-shared; 13 13 14 - opp-100000000 { 15 - opp-hz = /bits/ 64 <100000000>; 16 - opp-microvolt = <731000>; 17 - }; 18 - 19 - opp-250000000 { 20 - opp-hz = /bits/ 64 <250000000>; 21 - opp-microvolt = <731000>; 22 - }; 23 - 24 - opp-500000000 { 25 - opp-hz = /bits/ 64 <500000000>; 26 - opp-microvolt = <731000>; 27 - }; 28 - 29 - opp-667000000 { 30 - opp-hz = /bits/ 64 <667000000>; 31 - opp-microvolt = <731000>; 32 - }; 33 - 34 14 opp-1000000000 { 35 15 opp-hz = /bits/ 64 <1000000000>; 36 16 opp-microvolt = <761000>; ··· 50 70 cpub_opp_table_1: opp-table-1 { 51 71 compatible = "operating-points-v2"; 52 72 opp-shared; 53 - 54 - opp-100000000 { 55 - opp-hz = /bits/ 64 <100000000>; 56 - opp-microvolt = <731000>; 57 - }; 58 - 59 - opp-250000000 { 60 - opp-hz = /bits/ 64 <250000000>; 61 - opp-microvolt = <731000>; 62 - }; 63 - 64 - opp-500000000 { 65 - opp-hz = /bits/ 64 <500000000>; 66 - opp-microvolt = <731000>; 67 - }; 68 - 69 - opp-667000000 { 70 - opp-hz = /bits/ 64 <667000000>; 71 - opp-microvolt = <731000>; 72 - }; 73 73 74 74 opp-1000000000 { 75 75 opp-hz = /bits/ 64 <1000000000>;
-40
arch/arm64/boot/dts/amlogic/meson-g12b-s922x.dtsi
··· 11 11 compatible = "operating-points-v2"; 12 12 opp-shared; 13 13 14 - opp-100000000 { 15 - opp-hz = /bits/ 64 <100000000>; 16 - opp-microvolt = <731000>; 17 - }; 18 - 19 - opp-250000000 { 20 - opp-hz = /bits/ 64 <250000000>; 21 - opp-microvolt = <731000>; 22 - }; 23 - 24 - opp-500000000 { 25 - opp-hz = /bits/ 64 <500000000>; 26 - opp-microvolt = <731000>; 27 - }; 28 - 29 - opp-667000000 { 30 - opp-hz = /bits/ 64 <667000000>; 31 - opp-microvolt = <731000>; 32 - }; 33 - 34 14 opp-1000000000 { 35 15 opp-hz = /bits/ 64 <1000000000>; 36 16 opp-microvolt = <731000>; ··· 55 75 cpub_opp_table_1: opp-table-1 { 56 76 compatible = "operating-points-v2"; 57 77 opp-shared; 58 - 59 - opp-100000000 { 60 - opp-hz = /bits/ 64 <100000000>; 61 - opp-microvolt = <751000>; 62 - }; 63 - 64 - opp-250000000 { 65 - opp-hz = /bits/ 64 <250000000>; 66 - opp-microvolt = <751000>; 67 - }; 68 - 69 - opp-500000000 { 70 - opp-hz = /bits/ 64 <500000000>; 71 - opp-microvolt = <751000>; 72 - }; 73 - 74 - opp-667000000 { 75 - opp-hz = /bits/ 64 <667000000>; 76 - opp-microvolt = <751000>; 77 - }; 78 78 79 79 opp-1000000000 { 80 80 opp-hz = /bits/ 64 <1000000000>;
+4 -4
arch/arm64/boot/dts/amlogic/meson-s4.dtsi
··· 13 13 14 14 cpu0: cpu@0 { 15 15 device_type = "cpu"; 16 - compatible = "arm,cortex-a35","arm,armv8"; 16 + compatible = "arm,cortex-a35"; 17 17 reg = <0x0 0x0>; 18 18 enable-method = "psci"; 19 19 }; 20 20 21 21 cpu1: cpu@1 { 22 22 device_type = "cpu"; 23 - compatible = "arm,cortex-a35","arm,armv8"; 23 + compatible = "arm,cortex-a35"; 24 24 reg = <0x0 0x1>; 25 25 enable-method = "psci"; 26 26 }; 27 27 28 28 cpu2: cpu@2 { 29 29 device_type = "cpu"; 30 - compatible = "arm,cortex-a35","arm,armv8"; 30 + compatible = "arm,cortex-a35"; 31 31 reg = <0x0 0x2>; 32 32 enable-method = "psci"; 33 33 }; 34 34 35 35 cpu3: cpu@3 { 36 36 device_type = "cpu"; 37 - compatible = "arm,cortex-a35","arm,armv8"; 37 + compatible = "arm,cortex-a35"; 38 38 reg = <0x0 0x3>; 39 39 enable-method = "psci"; 40 40 };
+1
arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
··· 437 437 "", 438 438 "eMMC_RST#", /* BOOT_12 */ 439 439 "eMMC_DS", /* BOOT_13 */ 440 + "", "", 440 441 /* GPIOC */ 441 442 "SD_D0_B", /* GPIOC_0 */ 442 443 "SD_D1_B", /* GPIOC_1 */
-20
arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
··· 95 95 compatible = "operating-points-v2"; 96 96 opp-shared; 97 97 98 - opp-100000000 { 99 - opp-hz = /bits/ 64 <100000000>; 100 - opp-microvolt = <730000>; 101 - }; 102 - 103 - opp-250000000 { 104 - opp-hz = /bits/ 64 <250000000>; 105 - opp-microvolt = <730000>; 106 - }; 107 - 108 - opp-500000000 { 109 - opp-hz = /bits/ 64 <500000000>; 110 - opp-microvolt = <730000>; 111 - }; 112 - 113 - opp-667000000 { 114 - opp-hz = /bits/ 64 <666666666>; 115 - opp-microvolt = <750000>; 116 - }; 117 - 118 98 opp-1000000000 { 119 99 opp-hz = /bits/ 64 <1000000000>; 120 100 opp-microvolt = <770000>;
+3 -1
arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
··· 146 146 147 147 &usbotg1 { 148 148 dr_mode = "otg"; 149 + over-current-active-low; 149 150 vbus-supply = <&reg_usb_otg1_vbus>; 150 151 status = "okay"; 151 152 }; 152 153 153 154 &usbotg2 { 154 155 dr_mode = "host"; 156 + disable-over-current; 155 157 status = "okay"; 156 158 }; 157 159 ··· 217 215 fsl,pins = < 218 216 MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6 219 217 MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0xd6 220 - MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6 218 + MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0xd6 221 219 MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0xd6 222 220 >; 223 221 };
+3 -1
arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi
··· 211 211 212 212 &usbotg1 { 213 213 dr_mode = "otg"; 214 + over-current-active-low; 214 215 vbus-supply = <&reg_usb_otg1_vbus>; 215 216 status = "okay"; 216 217 }; 217 218 218 219 &usbotg2 { 219 220 dr_mode = "host"; 221 + disable-over-current; 220 222 vbus-supply = <&reg_usb_otg2_vbus>; 221 223 status = "okay"; 222 224 }; ··· 311 309 fsl,pins = < 312 310 MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6 313 311 MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0xd6 314 - MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6 312 + MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0xd6 315 313 MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0xd6 316 314 >; 317 315 };
+3 -1
arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
··· 238 238 239 239 &usbotg1 { 240 240 dr_mode = "otg"; 241 + over-current-active-low; 241 242 vbus-supply = <&reg_usb_otg1_vbus>; 242 243 status = "okay"; 243 244 }; 244 245 245 246 &usbotg2 { 246 247 dr_mode = "host"; 248 + disable-over-current; 247 249 vbus-supply = <&reg_usb_otg2_vbus>; 248 250 status = "okay"; 249 251 }; ··· 360 358 fsl,pins = < 361 359 MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6 362 360 MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0xd6 363 - MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6 361 + MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0xd6 364 362 MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0xd6 365 363 >; 366 364 };
+4
arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
··· 59 59 interrupts = <3 IRQ_TYPE_LEVEL_LOW>; 60 60 rohm,reset-snvs-powered; 61 61 62 + #clock-cells = <0>; 63 + clocks = <&osc_32k 0>; 64 + clock-output-names = "clk-32k-out"; 65 + 62 66 regulators { 63 67 buck1_reg: BUCK1 { 64 68 regulator-name = "buck1";
+5 -5
arch/arm64/boot/dts/freescale/imx8mn.dtsi
··· 293 293 ranges; 294 294 295 295 sai2: sai@30020000 { 296 - compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai"; 296 + compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai"; 297 297 reg = <0x30020000 0x10000>; 298 298 interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>; 299 299 clocks = <&clk IMX8MN_CLK_SAI2_IPG>, ··· 307 307 }; 308 308 309 309 sai3: sai@30030000 { 310 - compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai"; 310 + compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai"; 311 311 reg = <0x30030000 0x10000>; 312 312 interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>; 313 313 clocks = <&clk IMX8MN_CLK_SAI3_IPG>, ··· 321 321 }; 322 322 323 323 sai5: sai@30050000 { 324 - compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai"; 324 + compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai"; 325 325 reg = <0x30050000 0x10000>; 326 326 interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>; 327 327 clocks = <&clk IMX8MN_CLK_SAI5_IPG>, ··· 337 337 }; 338 338 339 339 sai6: sai@30060000 { 340 - compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai"; 340 + compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai"; 341 341 reg = <0x30060000 0x10000>; 342 342 interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>; 343 343 clocks = <&clk IMX8MN_CLK_SAI6_IPG>, ··· 394 394 }; 395 395 396 396 sai7: sai@300b0000 { 397 - compatible = "fsl,imx8mm-sai", "fsl,imx8mq-sai"; 397 + compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai"; 398 398 reg = <0x300b0000 0x10000>; 399 399 interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>; 400 400 clocks = <&clk IMX8MN_CLK_SAI7_IPG>,
+1 -1
arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi
··· 253 253 #address-cells = <1>; 254 254 #size-cells = <1>; 255 255 spi-max-frequency = <84000000>; 256 - spi-tx-bus-width = <4>; 256 + spi-tx-bus-width = <1>; 257 257 spi-rx-bus-width = <4>; 258 258 }; 259 259 };
+1 -1
arch/arm64/boot/dts/freescale/imx8qm.dtsi
··· 196 196 }; 197 197 198 198 clk: clock-controller { 199 - compatible = "fsl,imx8qxp-clk", "fsl,scu-clk"; 199 + compatible = "fsl,imx8qm-clk", "fsl,scu-clk"; 200 200 #clock-cells = <2>; 201 201 }; 202 202
+4 -4
arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
··· 262 262 gpio4 { 263 263 pins = "gpio4"; 264 264 function = "32k-out1"; 265 - drive-push-pull; 265 + drive-push-pull = <1>; 266 266 }; 267 267 268 268 gpio5 { 269 269 pins = "gpio5"; 270 270 function = "gpio"; 271 - drive-push-pull; 271 + drive-push-pull = <0>; 272 272 }; 273 273 274 274 gpio6 { 275 275 pins = "gpio6"; 276 276 function = "gpio"; 277 - drive-push-pull; 277 + drive-push-pull = <1>; 278 278 }; 279 279 280 280 gpio7 { 281 281 pins = "gpio7"; 282 282 function = "gpio"; 283 - drive-push-pull; 283 + drive-push-pull = <0>; 284 284 }; 285 285 }; 286 286
+4 -4
arch/arm64/boot/dts/nvidia/tegra186-p3509-0000+p3636-0001.dts
··· 462 462 gpio4 { 463 463 pins = "gpio4"; 464 464 function = "32k-out1"; 465 - drive-push-pull; 465 + drive-push-pull = <1>; 466 466 }; 467 467 468 468 gpio5 { 469 469 pins = "gpio5"; 470 470 function = "gpio"; 471 - drive-push-pull; 471 + drive-push-pull = <0>; 472 472 }; 473 473 474 474 gpio6 { 475 475 pins = "gpio6"; 476 476 function = "gpio"; 477 - drive-push-pull; 477 + drive-push-pull = <1>; 478 478 }; 479 479 480 480 gpio7 { 481 481 pins = "gpio7"; 482 482 function = "gpio"; 483 - drive-push-pull; 483 + drive-push-pull = <1>; 484 484 }; 485 485 }; 486 486
+3 -3
arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
··· 174 174 gpio4 { 175 175 pins = "gpio4"; 176 176 function = "32k-out1"; 177 - drive-push-pull; 177 + drive-push-pull = <1>; 178 178 }; 179 179 180 180 gpio6 { 181 181 pins = "gpio6"; 182 182 function = "gpio"; 183 - drive-push-pull; 183 + drive-push-pull = <1>; 184 184 }; 185 185 186 186 gpio7 { 187 187 pins = "gpio7"; 188 188 function = "gpio"; 189 - drive-push-pull; 189 + drive-push-pull = <0>; 190 190 }; 191 191 }; 192 192
+3 -3
arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi
··· 148 148 gpio4 { 149 149 pins = "gpio4"; 150 150 function = "32k-out1"; 151 - drive-push-pull; 151 + drive-push-pull = <1>; 152 152 }; 153 153 154 154 gpio6 { 155 155 pins = "gpio6"; 156 156 function = "gpio"; 157 - drive-push-pull; 157 + drive-push-pull = <1>; 158 158 }; 159 159 160 160 gpio7 { 161 161 pins = "gpio7"; 162 162 function = "gpio"; 163 - drive-push-pull; 163 + drive-push-pull = <0>; 164 164 }; 165 165 }; 166 166
+3 -3
arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
··· 59 59 gpio1 { 60 60 pins = "gpio1"; 61 61 function = "fps-out"; 62 - drive-push-pull; 62 + drive-push-pull = <1>; 63 63 maxim,active-fps-source = <MAX77620_FPS_SRC_0>; 64 64 maxim,active-fps-power-up-slot = <7>; 65 65 maxim,active-fps-power-down-slot = <0>; ··· 68 68 gpio2_3 { 69 69 pins = "gpio2", "gpio3"; 70 70 function = "fps-out"; 71 - drive-open-drain; 71 + drive-open-drain = <1>; 72 72 maxim,active-fps-source = <MAX77620_FPS_SRC_0>; 73 73 }; 74 74 ··· 80 80 gpio5_6_7 { 81 81 pins = "gpio5", "gpio6", "gpio7"; 82 82 function = "gpio"; 83 - drive-push-pull; 83 + drive-push-pull = <1>; 84 84 }; 85 85 }; 86 86
+4 -4
arch/arm64/boot/dts/nvidia/tegra210-p2894.dtsi
··· 1351 1351 gpio1 { 1352 1352 pins = "gpio1"; 1353 1353 function = "fps-out"; 1354 - drive-push-pull; 1354 + drive-push-pull = <1>; 1355 1355 maxim,active-fps-source = <MAX77620_FPS_SRC_0>; 1356 1356 maxim,active-fps-power-up-slot = <7>; 1357 1357 maxim,active-fps-power-down-slot = <0>; ··· 1360 1360 gpio2 { 1361 1361 pins = "gpio2"; 1362 1362 function = "fps-out"; 1363 - drive-open-drain; 1363 + drive-open-drain = <1>; 1364 1364 maxim,active-fps-source = <MAX77620_FPS_SRC_0>; 1365 1365 }; 1366 1366 1367 1367 gpio3 { 1368 1368 pins = "gpio3"; 1369 1369 function = "fps-out"; 1370 - drive-open-drain; 1370 + drive-open-drain = <1>; 1371 1371 maxim,active-fps-source = <MAX77620_FPS_SRC_0>; 1372 1372 }; 1373 1373 ··· 1379 1379 gpio5_6_7 { 1380 1380 pins = "gpio5", "gpio6", "gpio7"; 1381 1381 function = "gpio"; 1382 - drive-push-pull; 1382 + drive-push-pull = <1>; 1383 1383 }; 1384 1384 }; 1385 1385
+4 -4
arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
··· 195 195 gpio1 { 196 196 pins = "gpio1"; 197 197 function = "fps-out"; 198 - drive-push-pull; 198 + drive-push-pull = <1>; 199 199 maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>; 200 200 maxim,active-fps-power-up-slot = <0>; 201 201 maxim,active-fps-power-down-slot = <7>; ··· 204 204 gpio2 { 205 205 pins = "gpio2"; 206 206 function = "fps-out"; 207 - drive-open-drain; 207 + drive-open-drain = <1>; 208 208 maxim,active-fps-source = <MAX77620_FPS_SRC_0>; 209 209 maxim,active-fps-power-up-slot = <0>; 210 210 maxim,active-fps-power-down-slot = <7>; ··· 213 213 gpio3 { 214 214 pins = "gpio3"; 215 215 function = "fps-out"; 216 - drive-open-drain; 216 + drive-open-drain = <1>; 217 217 maxim,active-fps-source = <MAX77620_FPS_SRC_0>; 218 218 maxim,active-fps-power-up-slot = <4>; 219 219 maxim,active-fps-power-down-slot = <3>; ··· 227 227 gpio5_6_7 { 228 228 pins = "gpio5", "gpio6", "gpio7"; 229 229 function = "gpio"; 230 - drive-push-pull; 230 + drive-push-pull = <1>; 231 231 }; 232 232 }; 233 233
+2 -2
arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
··· 1386 1386 gpio3 { 1387 1387 pins = "gpio3"; 1388 1388 function = "fps-out"; 1389 - drive-open-drain; 1389 + drive-open-drain = <1>; 1390 1390 maxim,active-fps-source = <MAX77620_FPS_SRC_0>; 1391 1391 maxim,active-fps-power-up-slot = <4>; 1392 1392 maxim,active-fps-power-down-slot = <2>; ··· 1395 1395 gpio5_6 { 1396 1396 pins = "gpio5", "gpio6"; 1397 1397 function = "gpio"; 1398 - drive-push-pull; 1398 + drive-push-pull = <1>; 1399 1399 }; 1400 1400 1401 1401 gpio4 {
+1
arch/arm64/include/asm/kvm_emulate.h
··· 40 40 void kvm_inject_vabt(struct kvm_vcpu *vcpu); 41 41 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); 42 42 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); 43 + void kvm_inject_size_fault(struct kvm_vcpu *vcpu); 43 44 44 45 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu); 45 46
+1 -1
arch/arm64/kernel/elfcore.c
··· 95 95 for_each_mte_vma(current, vma) { 96 96 struct elf_phdr phdr; 97 97 98 - phdr.p_type = PT_ARM_MEMTAG_MTE; 98 + phdr.p_type = PT_AARCH64_MEMTAG_MTE; 99 99 phdr.p_offset = offset; 100 100 phdr.p_vaddr = vma->vm_start; 101 101 phdr.p_paddr = 0;
+8 -8
arch/arm64/kvm/hyp/nvhe/host.S
··· 198 198 invalid_host_el2_vect // FIQ EL2h 199 199 invalid_host_el2_vect // Error EL2h 200 200 201 - host_el1_sync_vect // Synchronous 64-bit EL1 202 - invalid_host_el1_vect // IRQ 64-bit EL1 203 - invalid_host_el1_vect // FIQ 64-bit EL1 204 - invalid_host_el1_vect // Error 64-bit EL1 201 + host_el1_sync_vect // Synchronous 64-bit EL1/EL0 202 + invalid_host_el1_vect // IRQ 64-bit EL1/EL0 203 + invalid_host_el1_vect // FIQ 64-bit EL1/EL0 204 + invalid_host_el1_vect // Error 64-bit EL1/EL0 205 205 206 - invalid_host_el1_vect // Synchronous 32-bit EL1 207 - invalid_host_el1_vect // IRQ 32-bit EL1 208 - invalid_host_el1_vect // FIQ 32-bit EL1 209 - invalid_host_el1_vect // Error 32-bit EL1 206 + host_el1_sync_vect // Synchronous 32-bit EL1/EL0 207 + invalid_host_el1_vect // IRQ 32-bit EL1/EL0 208 + invalid_host_el1_vect // FIQ 32-bit EL1/EL0 209 + invalid_host_el1_vect // Error 32-bit EL1/EL0 210 210 SYM_CODE_END(__kvm_hyp_host_vector) 211 211 212 212 /*
+28
arch/arm64/kvm/inject_fault.c
··· 145 145 inject_abt64(vcpu, true, addr); 146 146 } 147 147 148 + void kvm_inject_size_fault(struct kvm_vcpu *vcpu) 149 + { 150 + unsigned long addr, esr; 151 + 152 + addr = kvm_vcpu_get_fault_ipa(vcpu); 153 + addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0); 154 + 155 + if (kvm_vcpu_trap_is_iabt(vcpu)) 156 + kvm_inject_pabt(vcpu, addr); 157 + else 158 + kvm_inject_dabt(vcpu, addr); 159 + 160 + /* 161 + * If AArch64 or LPAE, set FSC to 0 to indicate an Address 162 + * Size Fault at level 0, as if exceeding PARange. 163 + * 164 + * Non-LPAE guests will only get the external abort, as there 165 + * is no way to to describe the ASF. 166 + */ 167 + if (vcpu_el1_is_32bit(vcpu) && 168 + !(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE)) 169 + return; 170 + 171 + esr = vcpu_read_sys_reg(vcpu, ESR_EL1); 172 + esr &= ~GENMASK_ULL(5, 0); 173 + vcpu_write_sys_reg(vcpu, esr, ESR_EL1); 174 + } 175 + 148 176 /** 149 177 * kvm_inject_undefined - inject an undefined instruction into the guest 150 178 * @vcpu: The vCPU in which to inject the exception
+19
arch/arm64/kvm/mmu.c
··· 1337 1337 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); 1338 1338 is_iabt = kvm_vcpu_trap_is_iabt(vcpu); 1339 1339 1340 + if (fault_status == FSC_FAULT) { 1341 + /* Beyond sanitised PARange (which is the IPA limit) */ 1342 + if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) { 1343 + kvm_inject_size_fault(vcpu); 1344 + return 1; 1345 + } 1346 + 1347 + /* Falls between the IPA range and the PARange? */ 1348 + if (fault_ipa >= BIT_ULL(vcpu->arch.hw_mmu->pgt->ia_bits)) { 1349 + fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0); 1350 + 1351 + if (is_iabt) 1352 + kvm_inject_pabt(vcpu, fault_ipa); 1353 + else 1354 + kvm_inject_dabt(vcpu, fault_ipa); 1355 + return 1; 1356 + } 1357 + } 1358 + 1340 1359 /* Synchronous External Abort? */ 1341 1360 if (kvm_vcpu_abt_issea(vcpu)) { 1342 1361 /*
+22 -1
arch/arm64/kvm/pmu-emul.c
··· 177 177 struct kvm_pmu *pmu = &vcpu->arch.pmu; 178 178 struct kvm_pmc *pmc = &pmu->pmc[select_idx]; 179 179 180 + if (!kvm_vcpu_has_pmu(vcpu)) 181 + return 0; 182 + 180 183 counter = kvm_pmu_get_pair_counter_value(vcpu, pmc); 181 184 182 185 if (kvm_pmu_pmc_is_chained(pmc) && ··· 200 197 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) 201 198 { 202 199 u64 reg; 200 + 201 + if (!kvm_vcpu_has_pmu(vcpu)) 202 + return; 203 203 204 204 reg = (select_idx == ARMV8_PMU_CYCLE_IDX) 205 205 ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx; ··· 328 322 struct kvm_pmu *pmu = &vcpu->arch.pmu; 329 323 struct kvm_pmc *pmc; 330 324 325 + if (!kvm_vcpu_has_pmu(vcpu)) 326 + return; 327 + 331 328 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val) 332 329 return; 333 330 ··· 366 357 struct kvm_pmu *pmu = &vcpu->arch.pmu; 367 358 struct kvm_pmc *pmc; 368 359 369 - if (!val) 360 + if (!kvm_vcpu_has_pmu(vcpu) || !val) 370 361 return; 371 362 372 363 for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { ··· 536 527 struct kvm_pmu *pmu = &vcpu->arch.pmu; 537 528 int i; 538 529 530 + if (!kvm_vcpu_has_pmu(vcpu)) 531 + return; 532 + 539 533 if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) 540 534 return; 541 535 ··· 587 575 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) 588 576 { 589 577 int i; 578 + 579 + if (!kvm_vcpu_has_pmu(vcpu)) 580 + return; 590 581 591 582 if (val & ARMV8_PMU_PMCR_E) { 592 583 kvm_pmu_enable_counter_mask(vcpu, ··· 754 739 { 755 740 u64 reg, mask; 756 741 742 + if (!kvm_vcpu_has_pmu(vcpu)) 743 + return; 744 + 757 745 mask = ARMV8_PMU_EVTYPE_MASK; 758 746 mask &= ~ARMV8_PMU_EVTYPE_EVENT; 759 747 mask |= kvm_pmu_event_mask(vcpu->kvm); ··· 844 826 unsigned long *bmap = vcpu->kvm->arch.pmu_filter; 845 827 u64 val, mask = 0; 846 828 int base, i, nr_events; 829 + 830 + if (!kvm_vcpu_has_pmu(vcpu)) 831 + return 0; 847 832 848 833 if (!pmceid1) { 849 834 val = read_sysreg(pmceid0_el0);
+2 -1
arch/arm64/kvm/psci.c
··· 181 181 182 182 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event)); 183 183 vcpu->run->system_event.type = type; 184 - vcpu->run->system_event.flags = flags; 184 + vcpu->run->system_event.ndata = 1; 185 + vcpu->run->system_event.data[0] = flags; 185 186 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 186 187 } 187 188
+14 -2
arch/riscv/boot/dts/microchip/microchip-mpfs-fabric.dtsi
··· 7 7 reg = <0x0 0x41000000 0x0 0xF0>; 8 8 microchip,sync-update-mask = /bits/ 32 <0>; 9 9 #pwm-cells = <2>; 10 - clocks = <&clkcfg CLK_FIC3>; 10 + clocks = <&fabric_clk3>; 11 11 status = "disabled"; 12 12 }; 13 13 ··· 16 16 reg = <0x0 0x44000000 0x0 0x1000>; 17 17 #address-cells = <1>; 18 18 #size-cells = <0>; 19 - clocks = <&clkcfg CLK_FIC3>; 19 + clocks = <&fabric_clk3>; 20 20 interrupt-parent = <&plic>; 21 21 interrupts = <122>; 22 22 clock-frequency = <100000>; 23 23 status = "disabled"; 24 + }; 25 + 26 + fabric_clk3: fabric-clk3 { 27 + compatible = "fixed-clock"; 28 + #clock-cells = <0>; 29 + clock-frequency = <62500000>; 30 + }; 31 + 32 + fabric_clk1: fabric-clk1 { 33 + compatible = "fixed-clock"; 34 + #clock-cells = <0>; 35 + clock-frequency = <125000000>; 24 36 }; 25 37 };
+1 -1
arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
··· 45 45 }; 46 46 47 47 &refclk { 48 - clock-frequency = <600000000>; 48 + clock-frequency = <125000000>; 49 49 }; 50 50 51 51 &mmuart1 {
+5 -5
arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi
··· 141 141 }; 142 142 }; 143 143 144 - refclk: msspllclk { 144 + refclk: mssrefclk { 145 145 compatible = "fixed-clock"; 146 146 #clock-cells = <0>; 147 147 }; ··· 190 190 191 191 clkcfg: clkcfg@20002000 { 192 192 compatible = "microchip,mpfs-clkcfg"; 193 - reg = <0x0 0x20002000 0x0 0x1000>; 193 + reg = <0x0 0x20002000 0x0 0x1000>, <0x0 0x3E001000 0x0 0x1000>; 194 194 clocks = <&refclk>; 195 195 #clock-cells = <1>; 196 196 }; ··· 393 393 reg = <0x0 0x20124000 0x0 0x1000>; 394 394 interrupt-parent = <&plic>; 395 395 interrupts = <80>, <81>; 396 - clocks = <&clkcfg CLK_RTC>; 397 - clock-names = "rtc"; 396 + clocks = <&clkcfg CLK_RTC>, <&clkcfg CLK_RTCREF>; 397 + clock-names = "rtc", "rtcref"; 398 398 status = "disabled"; 399 399 }; 400 400 ··· 424 424 <0 0 0 3 &pcie_intc 2>, 425 425 <0 0 0 4 &pcie_intc 3>; 426 426 interrupt-map-mask = <0 0 0 7>; 427 - clocks = <&clkcfg CLK_FIC0>, <&clkcfg CLK_FIC1>, <&clkcfg CLK_FIC3>; 427 + clocks = <&fabric_clk1>, <&fabric_clk1>, <&fabric_clk3>; 428 428 clock-names = "fic0", "fic1", "fic3"; 429 429 ranges = <0x3000000 0x0 0x8000000 0x20 0x8000000 0x0 0x80000000>; 430 430 msi-parent = <&pcie>;
+1
arch/riscv/configs/defconfig
··· 101 101 CONFIG_VIRTIO_INPUT=y 102 102 CONFIG_VIRTIO_MMIO=y 103 103 CONFIG_RPMSG_CHAR=y 104 + CONFIG_RPMSG_CTRL=y 104 105 CONFIG_RPMSG_VIRTIO=y 105 106 CONFIG_EXT4_FS=y 106 107 CONFIG_EXT4_FS_POSIX_ACL=y
+1
arch/riscv/configs/rv32_defconfig
··· 93 93 CONFIG_VIRTIO_INPUT=y 94 94 CONFIG_VIRTIO_MMIO=y 95 95 CONFIG_RPMSG_CHAR=y 96 + CONFIG_RPMSG_CTRL=y 96 97 CONFIG_RPMSG_VIRTIO=y 97 98 CONFIG_EXT4_FS=y 98 99 CONFIG_EXT4_FS_POSIX_ACL=y
+1 -1
arch/riscv/kernel/patch.c
··· 104 104 struct patch_insn *patch = data; 105 105 int ret = 0; 106 106 107 - if (atomic_inc_return(&patch->cpu_count) == 1) { 107 + if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) { 108 108 ret = 109 109 patch_text_nosync(patch->addr, &patch->insn, 110 110 GET_INSN_LENGTH(patch->insn));
+3 -2
arch/riscv/kvm/vcpu_sbi.c
··· 83 83 84 84 void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu, 85 85 struct kvm_run *run, 86 - u32 type, u64 flags) 86 + u32 type, u64 reason) 87 87 { 88 88 unsigned long i; 89 89 struct kvm_vcpu *tmp; ··· 94 94 95 95 memset(&run->system_event, 0, sizeof(run->system_event)); 96 96 run->system_event.type = type; 97 - run->system_event.flags = flags; 97 + run->system_event.ndata = 1; 98 + run->system_event.data[0] = reason; 98 99 run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 99 100 } 100 101
+1 -1
arch/x86/Kconfig
··· 1866 1866 code with them to make this happen. 1867 1867 1868 1868 In addition to building the kernel with IBT, seal all functions that 1869 - are not indirect call targets, avoiding them ever becomming one. 1869 + are not indirect call targets, avoiding them ever becoming one. 1870 1870 1871 1871 This requires LTO like objtool runs and will slow down the build. It 1872 1872 does significantly reduce the number of ENDBR instructions in the
+3
arch/x86/entry/entry_64.S
··· 337 337 338 338 call \cfunc 339 339 340 + /* For some configurations \cfunc ends up being a noreturn. */ 341 + REACHABLE 342 + 340 343 jmp error_return 341 344 .endm 342 345
+3
arch/x86/include/asm/intel-family.h
··· 26 26 * _G - parts with extra graphics on 27 27 * _X - regular server parts 28 28 * _D - micro server parts 29 + * _N,_P - other mobile parts 29 30 * 30 31 * Historical OPTDIFFs: 31 32 * ··· 108 107 109 108 #define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */ 110 109 #define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */ 110 + #define INTEL_FAM6_ALDERLAKE_N 0xBE 111 111 112 112 #define INTEL_FAM6_RAPTORLAKE 0xB7 113 + #define INTEL_FAM6_RAPTORLAKE_P 0xBA 113 114 114 115 /* "Small Core" Processors (Atom) */ 115 116
+2
arch/x86/include/asm/microcode.h
··· 131 131 extern void load_ucode_ap(void); 132 132 void reload_early_microcode(void); 133 133 extern bool initrd_gone; 134 + void microcode_bsp_resume(void); 134 135 #else 135 136 static inline void __init load_ucode_bsp(void) { } 136 137 static inline void load_ucode_ap(void) { } 137 138 static inline void reload_early_microcode(void) { } 139 + static inline void microcode_bsp_resume(void) { } 138 140 #endif 139 141 140 142 #endif /* _ASM_X86_MICROCODE_H */
-4
arch/x86/include/asm/pgtable_types.h
··· 559 559 extern pte_t *lookup_address(unsigned long address, unsigned int *level); 560 560 extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, 561 561 unsigned int *level); 562 - 563 - struct mm_struct; 564 - extern pte_t *lookup_address_in_mm(struct mm_struct *mm, unsigned long address, 565 - unsigned int *level); 566 562 extern pmd_t *lookup_pmd_address(unsigned long address); 567 563 extern phys_addr_t slow_virt_to_phys(void *__address); 568 564 extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
+1
arch/x86/include/asm/static_call.h
··· 26 26 ".align 4 \n" \ 27 27 ".globl " STATIC_CALL_TRAMP_STR(name) " \n" \ 28 28 STATIC_CALL_TRAMP_STR(name) ": \n" \ 29 + ANNOTATE_NOENDBR \ 29 30 insns " \n" \ 30 31 ".byte 0x53, 0x43, 0x54 \n" \ 31 32 ".type " STATIC_CALL_TRAMP_STR(name) ", @function \n" \
+3 -3
arch/x86/kernel/cpu/microcode/core.c
··· 758 758 }; 759 759 760 760 /** 761 - * mc_bp_resume - Update boot CPU microcode during resume. 761 + * microcode_bsp_resume - Update boot CPU microcode during resume. 762 762 */ 763 - static void mc_bp_resume(void) 763 + void microcode_bsp_resume(void) 764 764 { 765 765 int cpu = smp_processor_id(); 766 766 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; ··· 772 772 } 773 773 774 774 static struct syscore_ops mc_syscore_ops = { 775 - .resume = mc_bp_resume, 775 + .resume = microcode_bsp_resume, 776 776 }; 777 777 778 778 static int mc_cpu_starting(unsigned int cpu)
+4 -4
arch/x86/kernel/unwind_orc.c
··· 339 339 struct stack_info *info = &state->stack_info; 340 340 void *addr = (void *)_addr; 341 341 342 - if (!on_stack(info, addr, len) && 343 - (get_stack_info(addr, state->task, info, &state->stack_mask))) 344 - return false; 342 + if (on_stack(info, addr, len)) 343 + return true; 345 344 346 - return true; 345 + return !get_stack_info(addr, state->task, info, &state->stack_mask) && 346 + on_stack(info, addr, len); 347 347 } 348 348 349 349 static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
+14 -5
arch/x86/kvm/cpuid.c
··· 1085 1085 case 0x80000000: 1086 1086 entry->eax = min(entry->eax, 0x80000021); 1087 1087 /* 1088 - * Serializing LFENCE is reported in a multitude of ways, 1089 - * and NullSegClearsBase is not reported in CPUID on Zen2; 1090 - * help userspace by providing the CPUID leaf ourselves. 1088 + * Serializing LFENCE is reported in a multitude of ways, and 1089 + * NullSegClearsBase is not reported in CPUID on Zen2; help 1090 + * userspace by providing the CPUID leaf ourselves. 1091 + * 1092 + * However, only do it if the host has CPUID leaf 0x8000001d. 1093 + * QEMU thinks that it can query the host blindly for that 1094 + * CPUID leaf if KVM reports that it supports 0x8000001d or 1095 + * above. The processor merrily returns values from the 1096 + * highest Intel leaf which QEMU tries to use as the guest's 1097 + * 0x8000001d. Even worse, this can result in an infinite 1098 + * loop if said highest leaf has no subleaves indexed by ECX. 1091 1099 */ 1092 - if (static_cpu_has(X86_FEATURE_LFENCE_RDTSC) 1093 - || !static_cpu_has_bug(X86_BUG_NULL_SEG)) 1100 + if (entry->eax >= 0x8000001d && 1101 + (static_cpu_has(X86_FEATURE_LFENCE_RDTSC) 1102 + || !static_cpu_has_bug(X86_BUG_NULL_SEG))) 1094 1103 entry->eax = max(entry->eax, 0x80000021); 1095 1104 break; 1096 1105 case 0x80000001:
+24
arch/x86/kvm/mmu.h
··· 65 65 return ((2ULL << (e - s)) - 1) << s; 66 66 } 67 67 68 + /* 69 + * The number of non-reserved physical address bits irrespective of features 70 + * that repurpose legal bits, e.g. MKTME. 71 + */ 72 + extern u8 __read_mostly shadow_phys_bits; 73 + 74 + static inline gfn_t kvm_mmu_max_gfn(void) 75 + { 76 + /* 77 + * Note that this uses the host MAXPHYADDR, not the guest's. 78 + * EPT/NPT cannot support GPAs that would exceed host.MAXPHYADDR; 79 + * assuming KVM is running on bare metal, guest accesses beyond 80 + * host.MAXPHYADDR will hit a #PF(RSVD) and never cause a vmexit 81 + * (either EPT Violation/Misconfig or #NPF), and so KVM will never 82 + * install a SPTE for such addresses. If KVM is running as a VM 83 + * itself, on the other hand, it might see a MAXPHYADDR that is less 84 + * than hardware's real MAXPHYADDR. Using the host MAXPHYADDR 85 + * disallows such SPTEs entirely and simplifies the TDP MMU. 86 + */ 87 + int max_gpa_bits = likely(tdp_enabled) ? shadow_phys_bits : 52; 88 + 89 + return (1ULL << (max_gpa_bits - PAGE_SHIFT)) - 1; 90 + } 91 + 68 92 void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask); 69 93 void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only); 70 94
+50 -7
arch/x86/kvm/mmu/mmu.c
··· 2804 2804 const struct kvm_memory_slot *slot) 2805 2805 { 2806 2806 unsigned long hva; 2807 - pte_t *pte; 2808 - int level; 2807 + unsigned long flags; 2808 + int level = PG_LEVEL_4K; 2809 + pgd_t pgd; 2810 + p4d_t p4d; 2811 + pud_t pud; 2812 + pmd_t pmd; 2809 2813 2810 2814 if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn)) 2811 2815 return PG_LEVEL_4K; ··· 2824 2820 */ 2825 2821 hva = __gfn_to_hva_memslot(slot, gfn); 2826 2822 2827 - pte = lookup_address_in_mm(kvm->mm, hva, &level); 2828 - if (unlikely(!pte)) 2829 - return PG_LEVEL_4K; 2823 + /* 2824 + * Lookup the mapping level in the current mm. The information 2825 + * may become stale soon, but it is safe to use as long as 2826 + * 1) mmu_notifier_retry was checked after taking mmu_lock, and 2827 + * 2) mmu_lock is taken now. 2828 + * 2829 + * We still need to disable IRQs to prevent concurrent tear down 2830 + * of page tables. 2831 + */ 2832 + local_irq_save(flags); 2830 2833 2834 + pgd = READ_ONCE(*pgd_offset(kvm->mm, hva)); 2835 + if (pgd_none(pgd)) 2836 + goto out; 2837 + 2838 + p4d = READ_ONCE(*p4d_offset(&pgd, hva)); 2839 + if (p4d_none(p4d) || !p4d_present(p4d)) 2840 + goto out; 2841 + 2842 + pud = READ_ONCE(*pud_offset(&p4d, hva)); 2843 + if (pud_none(pud) || !pud_present(pud)) 2844 + goto out; 2845 + 2846 + if (pud_large(pud)) { 2847 + level = PG_LEVEL_1G; 2848 + goto out; 2849 + } 2850 + 2851 + pmd = READ_ONCE(*pmd_offset(&pud, hva)); 2852 + if (pmd_none(pmd) || !pmd_present(pmd)) 2853 + goto out; 2854 + 2855 + if (pmd_large(pmd)) 2856 + level = PG_LEVEL_2M; 2857 + 2858 + out: 2859 + local_irq_restore(flags); 2831 2860 return level; 2832 2861 } 2833 2862 ··· 3029 2992 /* 3030 2993 * If MMIO caching is disabled, emulate immediately without 3031 2994 * touching the shadow page tables as attempting to install an 3032 - * MMIO SPTE will just be an expensive nop. 2995 + * MMIO SPTE will just be an expensive nop. Do not cache MMIO 2996 + * whose gfn is greater than host.MAXPHYADDR, any guest that 2997 + * generates such gfns is running nested and is being tricked 2998 + * by L0 userspace (you can observe gfn > L1.MAXPHYADDR if 2999 + * and only if L1's MAXPHYADDR is inaccurate with respect to 3000 + * the hardware's). 3033 3001 */ 3034 - if (unlikely(!shadow_mmio_value)) { 3002 + if (unlikely(!shadow_mmio_value) || 3003 + unlikely(fault->gfn > kvm_mmu_max_gfn())) { 3035 3004 *ret_val = RET_PF_EMULATE; 3036 3005 return true; 3037 3006 }
-6
arch/x86/kvm/mmu/spte.h
··· 201 201 */ 202 202 extern u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; 203 203 204 - /* 205 - * The number of non-reserved physical address bits irrespective of features 206 - * that repurpose legal bits, e.g. MKTME. 207 - */ 208 - extern u8 __read_mostly shadow_phys_bits; 209 - 210 204 static inline bool is_mmio_spte(u64 spte) 211 205 { 212 206 return (spte & shadow_mmio_mask) == shadow_mmio_value &&
+8 -7
arch/x86/kvm/mmu/tdp_mmu.c
··· 815 815 return iter->yielded; 816 816 } 817 817 818 - static inline gfn_t tdp_mmu_max_gfn_host(void) 818 + static inline gfn_t tdp_mmu_max_gfn_exclusive(void) 819 819 { 820 820 /* 821 - * Bound TDP MMU walks at host.MAXPHYADDR, guest accesses beyond that 822 - * will hit a #PF(RSVD) and never hit an EPT Violation/Misconfig / #NPF, 823 - * and so KVM will never install a SPTE for such addresses. 821 + * Bound TDP MMU walks at host.MAXPHYADDR. KVM disallows memslots with 822 + * a gpa range that would exceed the max gfn, and KVM does not create 823 + * MMIO SPTEs for "impossible" gfns, instead sending such accesses down 824 + * the slow emulation path every time. 824 825 */ 825 - return 1ULL << (shadow_phys_bits - PAGE_SHIFT); 826 + return kvm_mmu_max_gfn() + 1; 826 827 } 827 828 828 829 static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, ··· 831 830 { 832 831 struct tdp_iter iter; 833 832 834 - gfn_t end = tdp_mmu_max_gfn_host(); 833 + gfn_t end = tdp_mmu_max_gfn_exclusive(); 835 834 gfn_t start = 0; 836 835 837 836 for_each_tdp_pte_min_level(iter, root, zap_level, start, end) { ··· 924 923 { 925 924 struct tdp_iter iter; 926 925 927 - end = min(end, tdp_mmu_max_gfn_host()); 926 + end = min(end, tdp_mmu_max_gfn_exclusive()); 928 927 929 928 lockdep_assert_held_write(&kvm->mmu_lock); 930 929
+7 -1
arch/x86/kvm/x86.c
··· 10020 10020 if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) { 10021 10021 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 10022 10022 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; 10023 + vcpu->run->system_event.ndata = 0; 10023 10024 r = 0; 10024 10025 goto out; 10025 10026 } 10026 10027 if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) { 10027 10028 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; 10028 10029 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; 10030 + vcpu->run->system_event.ndata = 0; 10029 10031 r = 0; 10030 10032 goto out; 10031 10033 } ··· 12011 12009 struct kvm_memory_slot *new, 12012 12010 enum kvm_mr_change change) 12013 12011 { 12014 - if (change == KVM_MR_CREATE || change == KVM_MR_MOVE) 12012 + if (change == KVM_MR_CREATE || change == KVM_MR_MOVE) { 12013 + if ((new->base_gfn + new->npages - 1) > kvm_mmu_max_gfn()) 12014 + return -EINVAL; 12015 + 12015 12016 return kvm_alloc_memslot_metadata(kvm, new); 12017 + } 12016 12018 12017 12019 if (change == KVM_MR_FLAGS_ONLY) 12018 12020 memcpy(&new->arch, &old->arch, sizeof(old->arch));
+52 -35
arch/x86/lib/copy_user_64.S
··· 53 53 SYM_FUNC_START(copy_user_generic_unrolled) 54 54 ASM_STAC 55 55 cmpl $8,%edx 56 - jb 20f /* less then 8 bytes, go to byte copy loop */ 56 + jb .Lcopy_user_short_string_bytes 57 57 ALIGN_DESTINATION 58 58 movl %edx,%ecx 59 59 andl $63,%edx 60 60 shrl $6,%ecx 61 - jz .L_copy_short_string 61 + jz copy_user_short_string 62 62 1: movq (%rsi),%r8 63 63 2: movq 1*8(%rsi),%r9 64 64 3: movq 2*8(%rsi),%r10 ··· 79 79 leaq 64(%rdi),%rdi 80 80 decl %ecx 81 81 jnz 1b 82 - .L_copy_short_string: 83 - movl %edx,%ecx 84 - andl $7,%edx 85 - shrl $3,%ecx 86 - jz 20f 87 - 18: movq (%rsi),%r8 88 - 19: movq %r8,(%rdi) 89 - leaq 8(%rsi),%rsi 90 - leaq 8(%rdi),%rdi 91 - decl %ecx 92 - jnz 18b 93 - 20: andl %edx,%edx 94 - jz 23f 95 - movl %edx,%ecx 96 - 21: movb (%rsi),%al 97 - 22: movb %al,(%rdi) 98 - incq %rsi 99 - incq %rdi 100 - decl %ecx 101 - jnz 21b 102 - 23: xor %eax,%eax 103 - ASM_CLAC 104 - RET 82 + jmp copy_user_short_string 105 83 106 84 30: shll $6,%ecx 107 85 addl %ecx,%edx 108 - jmp 60f 109 - 40: leal (%rdx,%rcx,8),%edx 110 - jmp 60f 111 - 50: movl %ecx,%edx 112 - 60: jmp .Lcopy_user_handle_tail /* ecx is zerorest also */ 86 + jmp .Lcopy_user_handle_tail 113 87 114 88 _ASM_EXTABLE_CPY(1b, 30b) 115 89 _ASM_EXTABLE_CPY(2b, 30b) ··· 101 127 _ASM_EXTABLE_CPY(14b, 30b) 102 128 _ASM_EXTABLE_CPY(15b, 30b) 103 129 _ASM_EXTABLE_CPY(16b, 30b) 104 - _ASM_EXTABLE_CPY(18b, 40b) 105 - _ASM_EXTABLE_CPY(19b, 40b) 106 - _ASM_EXTABLE_CPY(21b, 50b) 107 - _ASM_EXTABLE_CPY(22b, 50b) 108 130 SYM_FUNC_END(copy_user_generic_unrolled) 109 131 EXPORT_SYMBOL(copy_user_generic_unrolled) 110 132 ··· 161 191 SYM_FUNC_START(copy_user_enhanced_fast_string) 162 192 ASM_STAC 163 193 /* CPUs without FSRM should avoid rep movsb for short copies */ 164 - ALTERNATIVE "cmpl $64, %edx; jb .L_copy_short_string", "", X86_FEATURE_FSRM 194 + ALTERNATIVE "cmpl $64, %edx; jb copy_user_short_string", "", X86_FEATURE_FSRM 165 195 movl %edx,%ecx 166 196 1: rep movsb 167 197 xorl %eax,%eax ··· 212 242 jmp .Lcopy_user_handle_tail 213 243 214 244 SYM_CODE_END(.Lcopy_user_handle_tail) 245 + 246 + /* 247 + * Finish memcpy of less than 64 bytes. #AC should already be set. 248 + * 249 + * Input: 250 + * rdi destination 251 + * rsi source 252 + * rdx count (< 64) 253 + * 254 + * Output: 255 + * eax uncopied bytes or 0 if successful. 256 + */ 257 + SYM_CODE_START_LOCAL(copy_user_short_string) 258 + movl %edx,%ecx 259 + andl $7,%edx 260 + shrl $3,%ecx 261 + jz .Lcopy_user_short_string_bytes 262 + 18: movq (%rsi),%r8 263 + 19: movq %r8,(%rdi) 264 + leaq 8(%rsi),%rsi 265 + leaq 8(%rdi),%rdi 266 + decl %ecx 267 + jnz 18b 268 + .Lcopy_user_short_string_bytes: 269 + andl %edx,%edx 270 + jz 23f 271 + movl %edx,%ecx 272 + 21: movb (%rsi),%al 273 + 22: movb %al,(%rdi) 274 + incq %rsi 275 + incq %rdi 276 + decl %ecx 277 + jnz 21b 278 + 23: xor %eax,%eax 279 + ASM_CLAC 280 + RET 281 + 282 + 40: leal (%rdx,%rcx,8),%edx 283 + jmp 60f 284 + 50: movl %ecx,%edx /* ecx is zerorest also */ 285 + 60: jmp .Lcopy_user_handle_tail 286 + 287 + _ASM_EXTABLE_CPY(18b, 40b) 288 + _ASM_EXTABLE_CPY(19b, 40b) 289 + _ASM_EXTABLE_CPY(21b, 50b) 290 + _ASM_EXTABLE_CPY(22b, 50b) 291 + SYM_CODE_END(copy_user_short_string) 215 292 216 293 /* 217 294 * copy_user_nocache - Uncached memory copy with exception handling
+4
arch/x86/lib/putuser.S
··· 48 48 cmp %_ASM_BX,%_ASM_CX 49 49 jae .Lbad_put_user 50 50 SYM_INNER_LABEL(__put_user_nocheck_1, SYM_L_GLOBAL) 51 + ENDBR 51 52 ASM_STAC 52 53 1: movb %al,(%_ASM_CX) 53 54 xor %ecx,%ecx ··· 63 62 cmp %_ASM_BX,%_ASM_CX 64 63 jae .Lbad_put_user 65 64 SYM_INNER_LABEL(__put_user_nocheck_2, SYM_L_GLOBAL) 65 + ENDBR 66 66 ASM_STAC 67 67 2: movw %ax,(%_ASM_CX) 68 68 xor %ecx,%ecx ··· 78 76 cmp %_ASM_BX,%_ASM_CX 79 77 jae .Lbad_put_user 80 78 SYM_INNER_LABEL(__put_user_nocheck_4, SYM_L_GLOBAL) 79 + ENDBR 81 80 ASM_STAC 82 81 3: movl %eax,(%_ASM_CX) 83 82 xor %ecx,%ecx ··· 93 90 cmp %_ASM_BX,%_ASM_CX 94 91 jae .Lbad_put_user 95 92 SYM_INNER_LABEL(__put_user_nocheck_8, SYM_L_GLOBAL) 93 + ENDBR 96 94 ASM_STAC 97 95 4: mov %_ASM_AX,(%_ASM_CX) 98 96 #ifdef CONFIG_X86_32
+1 -1
arch/x86/lib/retpoline.S
··· 31 31 .align RETPOLINE_THUNK_SIZE 32 32 SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL) 33 33 UNWIND_HINT_EMPTY 34 + ANNOTATE_NOENDBR 34 35 35 36 ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \ 36 37 __stringify(RETPOLINE \reg), X86_FEATURE_RETPOLINE, \ ··· 56 55 57 56 .align RETPOLINE_THUNK_SIZE 58 57 SYM_CODE_START(__x86_indirect_thunk_array) 59 - ANNOTATE_NOENDBR // apply_retpolines 60 58 61 59 #define GEN(reg) THUNK reg 62 60 #include <asm/GEN-for-each-reg.h>
-11
arch/x86/mm/pat/set_memory.c
··· 638 638 } 639 639 EXPORT_SYMBOL_GPL(lookup_address); 640 640 641 - /* 642 - * Lookup the page table entry for a virtual address in a given mm. Return a 643 - * pointer to the entry and the level of the mapping. 644 - */ 645 - pte_t *lookup_address_in_mm(struct mm_struct *mm, unsigned long address, 646 - unsigned int *level) 647 - { 648 - return lookup_address_in_pgd(pgd_offset(mm, address), address, level); 649 - } 650 - EXPORT_SYMBOL_GPL(lookup_address_in_mm); 651 - 652 641 static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address, 653 642 unsigned int *level) 654 643 {
+5 -1
arch/x86/pci/xen.c
··· 467 467 else 468 468 xen_msi_ops.setup_msi_irqs = xen_setup_msi_irqs; 469 469 xen_msi_ops.teardown_msi_irqs = xen_pv_teardown_msi_irqs; 470 - pci_msi_ignore_mask = 1; 471 470 } else if (xen_hvm_domain()) { 472 471 xen_msi_ops.setup_msi_irqs = xen_hvm_setup_msi_irqs; 473 472 xen_msi_ops.teardown_msi_irqs = xen_teardown_msi_irqs; ··· 480 481 * in allocating the native domain and never use it. 481 482 */ 482 483 x86_init.irqs.create_pci_msi_domain = xen_create_pci_msi_domain; 484 + /* 485 + * With XEN PIRQ/Eventchannels in use PCI/MSI[-X] masking is solely 486 + * controlled by the hypervisor. 487 + */ 488 + pci_msi_ignore_mask = 1; 483 489 } 484 490 485 491 #else /* CONFIG_PCI_MSI */
+1
arch/x86/platform/pvh/head.S
··· 50 50 #define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8) 51 51 52 52 SYM_CODE_START_LOCAL(pvh_start_xen) 53 + UNWIND_HINT_EMPTY 53 54 cld 54 55 55 56 lgdt (_pa(gdt))
+9 -1
arch/x86/power/cpu.c
··· 25 25 #include <asm/cpu.h> 26 26 #include <asm/mmu_context.h> 27 27 #include <asm/cpu_device_id.h> 28 + #include <asm/microcode.h> 28 29 29 30 #ifdef CONFIG_X86_32 30 31 __visible unsigned long saved_context_ebx; ··· 263 262 x86_platform.restore_sched_clock_state(); 264 263 mtrr_bp_restore(); 265 264 perf_restore_debug_store(); 266 - msr_restore_context(ctxt); 267 265 268 266 c = &cpu_data(smp_processor_id()); 269 267 if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL)) 270 268 init_ia32_feat_ctl(c); 269 + 270 + microcode_bsp_resume(); 271 + 272 + /* 273 + * This needs to happen after the microcode has been updated upon resume 274 + * because some of the MSRs are "emulated" in microcode. 275 + */ 276 + msr_restore_context(ctxt); 271 277 } 272 278 273 279 /* Needed by apm.c */
+1
arch/x86/xen/xen-head.S
··· 45 45 __INIT 46 46 SYM_CODE_START(startup_xen) 47 47 UNWIND_HINT_EMPTY 48 + ANNOTATE_NOENDBR 48 49 cld 49 50 50 51 /* Clear .bss */
+9 -3
block/bfq-iosched.c
··· 569 569 struct bfq_entity *entity = &bfqq->entity; 570 570 struct bfq_entity *inline_entities[BFQ_LIMIT_INLINE_DEPTH]; 571 571 struct bfq_entity **entities = inline_entities; 572 - int depth, level; 572 + int depth, level, alloc_depth = BFQ_LIMIT_INLINE_DEPTH; 573 573 int class_idx = bfqq->ioprio_class - 1; 574 574 struct bfq_sched_data *sched_data; 575 575 unsigned long wsum; ··· 578 578 if (!entity->on_st_or_in_serv) 579 579 return false; 580 580 581 + retry: 582 + spin_lock_irq(&bfqd->lock); 581 583 /* +1 for bfqq entity, root cgroup not included */ 582 584 depth = bfqg_to_blkg(bfqq_group(bfqq))->blkcg->css.cgroup->level + 1; 583 - if (depth > BFQ_LIMIT_INLINE_DEPTH) { 585 + if (depth > alloc_depth) { 586 + spin_unlock_irq(&bfqd->lock); 587 + if (entities != inline_entities) 588 + kfree(entities); 584 589 entities = kmalloc_array(depth, sizeof(*entities), GFP_NOIO); 585 590 if (!entities) 586 591 return false; 592 + alloc_depth = depth; 593 + goto retry; 587 594 } 588 595 589 - spin_lock_irq(&bfqd->lock); 590 596 sched_data = entity->sched_data; 591 597 /* Gather our ancestors as we need to traverse them in reverse order */ 592 598 level = 0;
+11 -1
block/blk-iocost.c
··· 2322 2322 iocg->hweight_donating = hwa; 2323 2323 iocg->hweight_after_donation = new_hwi; 2324 2324 list_add(&iocg->surplus_list, &surpluses); 2325 - } else { 2325 + } else if (!iocg->abs_vdebt) { 2326 + /* 2327 + * @iocg doesn't have enough to donate. Reset 2328 + * its inuse to active. 2329 + * 2330 + * Don't reset debtors as their inuse's are 2331 + * owned by debt handling. This shouldn't affect 2332 + * donation calculuation in any meaningful way 2333 + * as @iocg doesn't have a meaningful amount of 2334 + * share anyway. 2335 + */ 2326 2336 TRACE_IOCG_PATH(inuse_shortage, iocg, &now, 2327 2337 iocg->inuse, iocg->active, 2328 2338 iocg->hweight_inuse, new_hwi);
+1 -8
block/blk-mq.c
··· 1131 1131 trace_block_rq_issue(rq); 1132 1132 1133 1133 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { 1134 - u64 start_time; 1135 - #ifdef CONFIG_BLK_CGROUP 1136 - if (rq->bio) 1137 - start_time = bio_issue_time(&rq->bio->bi_issue); 1138 - else 1139 - #endif 1140 - start_time = ktime_get_ns(); 1141 - rq->io_start_time_ns = start_time; 1134 + rq->io_start_time_ns = ktime_get_ns(); 1142 1135 rq->stats_sectors = blk_rq_sectors(rq); 1143 1136 rq->rq_flags |= RQF_STATS; 1144 1137 rq_qos_issue(q, rq);
+9 -1
drivers/android/binder.c
··· 2295 2295 { 2296 2296 int ret = 0; 2297 2297 struct binder_sg_copy *sgc, *tmpsgc; 2298 + struct binder_ptr_fixup *tmppf; 2298 2299 struct binder_ptr_fixup *pf = 2299 2300 list_first_entry_or_null(pf_head, struct binder_ptr_fixup, 2300 2301 node); ··· 2350 2349 list_del(&sgc->node); 2351 2350 kfree(sgc); 2352 2351 } 2353 - BUG_ON(!list_empty(pf_head)); 2352 + list_for_each_entry_safe(pf, tmppf, pf_head, node) { 2353 + BUG_ON(pf->skip_size == 0); 2354 + list_del(&pf->node); 2355 + kfree(pf); 2356 + } 2354 2357 BUG_ON(!list_empty(sgc_head)); 2355 2358 2356 2359 return ret > 0 ? -EINVAL : ret; ··· 2490 2485 const void __user *sender_ufda_base; 2491 2486 struct binder_proc *proc = thread->proc; 2492 2487 int ret; 2488 + 2489 + if (fda->num_fds == 0) 2490 + return 0; 2493 2491 2494 2492 fd_buf_size = sizeof(u32) * fda->num_fds; 2495 2493 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
+10 -1
drivers/base/arch_topology.c
··· 667 667 core_mask = &cpu_topology[cpu].llc_sibling; 668 668 } 669 669 670 + /* 671 + * For systems with no shared cpu-side LLC but with clusters defined, 672 + * extend core_mask to cluster_siblings. The sched domain builder will 673 + * then remove MC as redundant with CLS if SCHED_CLUSTER is enabled. 674 + */ 675 + if (IS_ENABLED(CONFIG_SCHED_CLUSTER) && 676 + cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling)) 677 + core_mask = &cpu_topology[cpu].cluster_sibling; 678 + 670 679 return core_mask; 671 680 } 672 681 ··· 693 684 for_each_online_cpu(cpu) { 694 685 cpu_topo = &cpu_topology[cpu]; 695 686 696 - if (cpuid_topo->llc_id == cpu_topo->llc_id) { 687 + if (cpu_topo->llc_id != -1 && cpuid_topo->llc_id == cpu_topo->llc_id) { 697 688 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); 698 689 cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling); 699 690 }
+10
drivers/base/topology.c
··· 152 152 NULL 153 153 }; 154 154 155 + static umode_t topology_is_visible(struct kobject *kobj, 156 + struct attribute *attr, int unused) 157 + { 158 + if (attr == &dev_attr_ppin.attr && !topology_ppin(kobj_to_dev(kobj)->id)) 159 + return 0; 160 + 161 + return attr->mode; 162 + } 163 + 155 164 static const struct attribute_group topology_attr_group = { 156 165 .attrs = default_attrs, 157 166 .bin_attrs = bin_attrs, 167 + .is_visible = topology_is_visible, 158 168 .name = "topology" 159 169 }; 160 170
+5 -1
drivers/bus/fsl-mc/fsl-mc-msi.c
··· 224 224 if (error) 225 225 return error; 226 226 227 + msi_lock_descs(dev); 227 228 if (msi_first_desc(dev, MSI_DESC_ALL)) 228 - return -EINVAL; 229 + error = -EINVAL; 230 + msi_unlock_descs(dev); 231 + if (error) 232 + return error; 229 233 230 234 /* 231 235 * NOTE: Calling this function will trigger the invocation of the
+2 -3
drivers/bus/imx-weim.c
··· 352 352 353 353 pdev = of_find_device_by_node(rd->dn); 354 354 if (!pdev) { 355 - dev_err(&pdev->dev, 356 - "Could not find platform device for '%pOF'\n", 355 + pr_err("Could not find platform device for '%pOF'\n", 357 356 rd->dn); 358 357 359 358 ret = notifier_from_errno(-EINVAL); ··· 369 370 return ret; 370 371 } 371 372 372 - struct notifier_block weim_of_notifier = { 373 + static struct notifier_block weim_of_notifier = { 373 374 .notifier_call = of_weim_notify, 374 375 }; 375 376 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
+2
drivers/bus/mhi/host/pci_generic.c
··· 1060 1060 * the intermediate restore kernel reinitializes MHI device with new 1061 1061 * context. 1062 1062 */ 1063 + flush_work(&mhi_pdev->recovery_work); 1063 1064 if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) { 1064 1065 mhi_power_down(mhi_cntrl, true); 1065 1066 mhi_unprepare_after_power_down(mhi_cntrl); ··· 1086 1085 .resume = mhi_pci_resume, 1087 1086 .freeze = mhi_pci_freeze, 1088 1087 .thaw = mhi_pci_restore, 1088 + .poweroff = mhi_pci_freeze, 1089 1089 .restore = mhi_pci_restore, 1090 1090 #endif 1091 1091 };
+2
drivers/bus/sunxi-rsb.c
··· 227 227 228 228 dev_dbg(&rdev->dev, "device %s registered\n", dev_name(&rdev->dev)); 229 229 230 + return rdev; 231 + 230 232 err_device_add: 231 233 put_device(&rdev->dev); 232 234
+15 -1
drivers/bus/ti-sysc.c
··· 3232 3232 */ 3233 3233 static int sysc_check_active_timer(struct sysc *ddata) 3234 3234 { 3235 + int error; 3236 + 3235 3237 if (ddata->cap->type != TI_SYSC_OMAP2_TIMER && 3236 3238 ddata->cap->type != TI_SYSC_OMAP4_TIMER) 3237 3239 return 0; 3238 3240 3241 + /* 3242 + * Quirk for omap3 beagleboard revision A to B4 to use gpt12. 3243 + * Revision C and later are fixed with commit 23885389dbbb ("ARM: 3244 + * dts: Fix timer regression for beagleboard revision c"). This all 3245 + * can be dropped if we stop supporting old beagleboard revisions 3246 + * A to B4 at some point. 3247 + */ 3248 + if (sysc_soc->soc == SOC_3430) 3249 + error = -ENXIO; 3250 + else 3251 + error = -EBUSY; 3252 + 3239 3253 if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) && 3240 3254 (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)) 3241 - return -ENXIO; 3255 + return error; 3242 3256 3243 3257 return 0; 3244 3258 }
+6 -1
drivers/char/ipmi/ipmi_msghandler.c
··· 3677 3677 void ipmi_unregister_smi(struct ipmi_smi *intf) 3678 3678 { 3679 3679 struct ipmi_smi_watcher *w; 3680 - int intf_num = intf->intf_num, index; 3680 + int intf_num, index; 3681 3681 3682 + if (!intf) 3683 + return; 3684 + intf_num = intf->intf_num; 3682 3685 mutex_lock(&ipmi_interfaces_mutex); 3683 3686 intf->intf_num = -1; 3684 3687 intf->in_shutdown = true; ··· 4521 4518 } else 4522 4519 /* The message was sent, start the timer. */ 4523 4520 intf_start_seq_timer(intf, msg->msgid); 4521 + requeue = 0; 4522 + goto out; 4524 4523 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1)) 4525 4524 || (msg->rsp[1] != msg->data[1])) { 4526 4525 /*
+1 -4
drivers/char/ipmi/ipmi_si_intf.c
··· 2220 2220 return; 2221 2221 2222 2222 list_del(&smi_info->link); 2223 - 2224 - if (smi_info->intf) 2225 - ipmi_unregister_smi(smi_info->intf); 2226 - 2223 + ipmi_unregister_smi(smi_info->intf); 2227 2224 kfree(smi_info); 2228 2225 } 2229 2226
+8 -1
drivers/char/random.c
··· 318 318 * the resultant ChaCha state to the user, along with the second 319 319 * half of the block containing 32 bytes of random data that may 320 320 * be used; random_data_len may not be greater than 32. 321 + * 322 + * The returned ChaCha state contains within it a copy of the old 323 + * key value, at index 4, so the state should always be zeroed out 324 + * immediately after using in order to maintain forward secrecy. 325 + * If the state cannot be erased in a timely manner, then it is 326 + * safer to set the random_data parameter to &chacha_state[4] so 327 + * that this function overwrites it before returning. 321 328 */ 322 329 static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], 323 330 u32 chacha_state[CHACHA_STATE_WORDS], ··· 340 333 chacha20_block(chacha_state, first_block); 341 334 342 335 memcpy(key, first_block, CHACHA_KEY_SIZE); 343 - memmove(random_data, first_block + CHACHA_KEY_SIZE, random_data_len); 336 + memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len); 344 337 memzero_explicit(first_block, sizeof(first_block)); 345 338 } 346 339
+164 -31
drivers/clk/microchip/clk-mpfs.c
··· 11 11 #include <dt-bindings/clock/microchip,mpfs-clock.h> 12 12 13 13 /* address offset of control registers */ 14 + #define REG_MSSPLL_REF_CR 0x08u 15 + #define REG_MSSPLL_POSTDIV_CR 0x10u 16 + #define REG_MSSPLL_SSCG_2_CR 0x2Cu 14 17 #define REG_CLOCK_CONFIG_CR 0x08u 18 + #define REG_RTC_CLOCK_CR 0x0Cu 15 19 #define REG_SUBBLK_CLOCK_CR 0x84u 16 20 #define REG_SUBBLK_RESET_CR 0x88u 17 21 22 + #define MSSPLL_FBDIV_SHIFT 0x00u 23 + #define MSSPLL_FBDIV_WIDTH 0x0Cu 24 + #define MSSPLL_REFDIV_SHIFT 0x08u 25 + #define MSSPLL_REFDIV_WIDTH 0x06u 26 + #define MSSPLL_POSTDIV_SHIFT 0x08u 27 + #define MSSPLL_POSTDIV_WIDTH 0x07u 28 + #define MSSPLL_FIXED_DIV 4u 29 + 18 30 struct mpfs_clock_data { 19 31 void __iomem *base; 32 + void __iomem *msspll_base; 20 33 struct clk_hw_onecell_data hw_data; 21 34 }; 35 + 36 + struct mpfs_msspll_hw_clock { 37 + void __iomem *base; 38 + unsigned int id; 39 + u32 reg_offset; 40 + u32 shift; 41 + u32 width; 42 + u32 flags; 43 + struct clk_hw hw; 44 + struct clk_init_data init; 45 + }; 46 + 47 + #define to_mpfs_msspll_clk(_hw) container_of(_hw, struct mpfs_msspll_hw_clock, hw) 22 48 23 49 struct mpfs_cfg_clock { 24 50 const struct clk_div_table *table; 25 51 unsigned int id; 52 + u32 reg_offset; 26 53 u8 shift; 27 54 u8 width; 55 + u8 flags; 28 56 }; 29 57 30 58 struct mpfs_cfg_hw_clock { ··· 83 55 */ 84 56 static DEFINE_SPINLOCK(mpfs_clk_lock); 85 57 86 - static const struct clk_parent_data mpfs_cfg_parent[] = { 58 + static const struct clk_parent_data mpfs_ext_ref[] = { 87 59 { .index = 0 }, 88 60 }; 89 61 ··· 97 69 { 0, 0 } 98 70 }; 99 71 72 + /* 73 + * The only two supported reference clock frequencies for the PolarFire SoC are 74 + * 100 and 125 MHz, as the rtc reference is required to be 1 MHz. 75 + * It therefore only needs to have divider table entries corresponding to 76 + * divide by 100 and 125. 77 + */ 78 + static const struct clk_div_table mpfs_div_rtcref_table[] = { 79 + { 100, 100 }, { 125, 125 }, 80 + { 0, 0 } 81 + }; 82 + 83 + static unsigned long mpfs_clk_msspll_recalc_rate(struct clk_hw *hw, unsigned long prate) 84 + { 85 + struct mpfs_msspll_hw_clock *msspll_hw = to_mpfs_msspll_clk(hw); 86 + void __iomem *mult_addr = msspll_hw->base + msspll_hw->reg_offset; 87 + void __iomem *ref_div_addr = msspll_hw->base + REG_MSSPLL_REF_CR; 88 + void __iomem *postdiv_addr = msspll_hw->base + REG_MSSPLL_POSTDIV_CR; 89 + u32 mult, ref_div, postdiv; 90 + 91 + mult = readl_relaxed(mult_addr) >> MSSPLL_FBDIV_SHIFT; 92 + mult &= clk_div_mask(MSSPLL_FBDIV_WIDTH); 93 + ref_div = readl_relaxed(ref_div_addr) >> MSSPLL_REFDIV_SHIFT; 94 + ref_div &= clk_div_mask(MSSPLL_REFDIV_WIDTH); 95 + postdiv = readl_relaxed(postdiv_addr) >> MSSPLL_POSTDIV_SHIFT; 96 + postdiv &= clk_div_mask(MSSPLL_POSTDIV_WIDTH); 97 + 98 + return prate * mult / (ref_div * MSSPLL_FIXED_DIV * postdiv); 99 + } 100 + 101 + static const struct clk_ops mpfs_clk_msspll_ops = { 102 + .recalc_rate = mpfs_clk_msspll_recalc_rate, 103 + }; 104 + 105 + #define CLK_PLL(_id, _name, _parent, _shift, _width, _flags, _offset) { \ 106 + .id = _id, \ 107 + .shift = _shift, \ 108 + .width = _width, \ 109 + .reg_offset = _offset, \ 110 + .flags = _flags, \ 111 + .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parent, &mpfs_clk_msspll_ops, 0), \ 112 + } 113 + 114 + static struct mpfs_msspll_hw_clock mpfs_msspll_clks[] = { 115 + CLK_PLL(CLK_MSSPLL, "clk_msspll", mpfs_ext_ref, MSSPLL_FBDIV_SHIFT, 116 + MSSPLL_FBDIV_WIDTH, 0, REG_MSSPLL_SSCG_2_CR), 117 + }; 118 + 119 + static int mpfs_clk_register_msspll(struct device *dev, struct mpfs_msspll_hw_clock *msspll_hw, 120 + void __iomem *base) 121 + { 122 + msspll_hw->base = base; 123 + 124 + return devm_clk_hw_register(dev, &msspll_hw->hw); 125 + } 126 + 127 + static int mpfs_clk_register_mssplls(struct device *dev, struct mpfs_msspll_hw_clock *msspll_hws, 128 + unsigned int num_clks, struct mpfs_clock_data *data) 129 + { 130 + void __iomem *base = data->msspll_base; 131 + unsigned int i; 132 + int ret; 133 + 134 + for (i = 0; i < num_clks; i++) { 135 + struct mpfs_msspll_hw_clock *msspll_hw = &msspll_hws[i]; 136 + 137 + ret = mpfs_clk_register_msspll(dev, msspll_hw, base); 138 + if (ret) 139 + return dev_err_probe(dev, ret, "failed to register msspll id: %d\n", 140 + CLK_MSSPLL); 141 + 142 + data->hw_data.hws[msspll_hw->id] = &msspll_hw->hw; 143 + } 144 + 145 + return 0; 146 + } 147 + 148 + /* 149 + * "CFG" clocks 150 + */ 151 + 100 152 static unsigned long mpfs_cfg_clk_recalc_rate(struct clk_hw *hw, unsigned long prate) 101 153 { 102 154 struct mpfs_cfg_hw_clock *cfg_hw = to_mpfs_cfg_clk(hw); ··· 184 76 void __iomem *base_addr = cfg_hw->sys_base; 185 77 u32 val; 186 78 187 - val = readl_relaxed(base_addr + REG_CLOCK_CONFIG_CR) >> cfg->shift; 79 + val = readl_relaxed(base_addr + cfg->reg_offset) >> cfg->shift; 188 80 val &= clk_div_mask(cfg->width); 189 81 190 - return prate / (1u << val); 82 + return divider_recalc_rate(hw, prate, val, cfg->table, cfg->flags, cfg->width); 191 83 } 192 84 193 85 static long mpfs_cfg_clk_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) ··· 213 105 return divider_setting; 214 106 215 107 spin_lock_irqsave(&mpfs_clk_lock, flags); 216 - 217 - val = readl_relaxed(base_addr + REG_CLOCK_CONFIG_CR); 108 + val = readl_relaxed(base_addr + cfg->reg_offset); 218 109 val &= ~(clk_div_mask(cfg->width) << cfg_hw->cfg.shift); 219 110 val |= divider_setting << cfg->shift; 220 - writel_relaxed(val, base_addr + REG_CLOCK_CONFIG_CR); 111 + writel_relaxed(val, base_addr + cfg->reg_offset); 221 112 222 113 spin_unlock_irqrestore(&mpfs_clk_lock, flags); 223 114 ··· 229 122 .set_rate = mpfs_cfg_clk_set_rate, 230 123 }; 231 124 232 - #define CLK_CFG(_id, _name, _parent, _shift, _width, _table, _flags) { \ 233 - .cfg.id = _id, \ 234 - .cfg.shift = _shift, \ 235 - .cfg.width = _width, \ 236 - .cfg.table = _table, \ 237 - .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, _parent, &mpfs_clk_cfg_ops, \ 238 - _flags), \ 125 + #define CLK_CFG(_id, _name, _parent, _shift, _width, _table, _flags, _offset) { \ 126 + .cfg.id = _id, \ 127 + .cfg.shift = _shift, \ 128 + .cfg.width = _width, \ 129 + .cfg.table = _table, \ 130 + .cfg.reg_offset = _offset, \ 131 + .cfg.flags = _flags, \ 132 + .hw.init = CLK_HW_INIT(_name, _parent, &mpfs_clk_cfg_ops, 0), \ 239 133 } 240 134 241 135 static struct mpfs_cfg_hw_clock mpfs_cfg_clks[] = { 242 - CLK_CFG(CLK_CPU, "clk_cpu", mpfs_cfg_parent, 0, 2, mpfs_div_cpu_axi_table, 0), 243 - CLK_CFG(CLK_AXI, "clk_axi", mpfs_cfg_parent, 2, 2, mpfs_div_cpu_axi_table, 0), 244 - CLK_CFG(CLK_AHB, "clk_ahb", mpfs_cfg_parent, 4, 2, mpfs_div_ahb_table, 0), 136 + CLK_CFG(CLK_CPU, "clk_cpu", "clk_msspll", 0, 2, mpfs_div_cpu_axi_table, 0, 137 + REG_CLOCK_CONFIG_CR), 138 + CLK_CFG(CLK_AXI, "clk_axi", "clk_msspll", 2, 2, mpfs_div_cpu_axi_table, 0, 139 + REG_CLOCK_CONFIG_CR), 140 + CLK_CFG(CLK_AHB, "clk_ahb", "clk_msspll", 4, 2, mpfs_div_ahb_table, 0, 141 + REG_CLOCK_CONFIG_CR), 142 + { 143 + .cfg.id = CLK_RTCREF, 144 + .cfg.shift = 0, 145 + .cfg.width = 12, 146 + .cfg.table = mpfs_div_rtcref_table, 147 + .cfg.reg_offset = REG_RTC_CLOCK_CR, 148 + .cfg.flags = CLK_DIVIDER_ONE_BASED, 149 + .hw.init = 150 + CLK_HW_INIT_PARENTS_DATA("clk_rtcref", mpfs_ext_ref, &mpfs_clk_cfg_ops, 0), 151 + } 245 152 }; 246 153 247 154 static int mpfs_clk_register_cfg(struct device *dev, struct mpfs_cfg_hw_clock *cfg_hw, ··· 281 160 return dev_err_probe(dev, ret, "failed to register clock id: %d\n", 282 161 cfg_hw->cfg.id); 283 162 284 - id = cfg_hws[i].cfg.id; 163 + id = cfg_hw->cfg.id; 285 164 data->hw_data.hws[id] = &cfg_hw->hw; 286 165 } 287 166 288 167 return 0; 289 168 } 169 + 170 + /* 171 + * peripheral clocks - devices connected to axi or ahb buses. 172 + */ 290 173 291 174 static int mpfs_periph_clk_enable(struct clk_hw *hw) 292 175 { ··· 324 199 unsigned long flags; 325 200 326 201 spin_lock_irqsave(&mpfs_clk_lock, flags); 327 - 328 - reg = readl_relaxed(base_addr + REG_SUBBLK_RESET_CR); 329 - val = reg | (1u << periph->shift); 330 - writel_relaxed(val, base_addr + REG_SUBBLK_RESET_CR); 331 202 332 203 reg = readl_relaxed(base_addr + REG_SUBBLK_CLOCK_CR); 333 204 val = reg & ~(1u << periph->shift); ··· 370 249 * trap handler 371 250 * - CLK_MMUART0: reserved by the hss 372 251 * - CLK_DDRC: provides clock to the ddr subsystem 373 - * - CLK_FICx: these provide clocks for sections of the fpga fabric, disabling them would 374 - * cause the fabric to go into reset 252 + * - CLK_FICx: these provide the processor side clocks to the "FIC" (Fabric InterConnect) 253 + * clock domain crossers which provide the interface to the FPGA fabric. Disabling them 254 + * causes the FPGA fabric to go into reset. 255 + * - CLK_ATHENA: The athena clock is FIC4, which is reserved for the Athena TeraFire. 375 256 */ 376 257 377 258 static struct mpfs_periph_hw_clock mpfs_periph_clks[] = { ··· 381 258 CLK_PERIPH(CLK_MAC0, "clk_periph_mac0", PARENT_CLK(AHB), 1, 0), 382 259 CLK_PERIPH(CLK_MAC1, "clk_periph_mac1", PARENT_CLK(AHB), 2, 0), 383 260 CLK_PERIPH(CLK_MMC, "clk_periph_mmc", PARENT_CLK(AHB), 3, 0), 384 - CLK_PERIPH(CLK_TIMER, "clk_periph_timer", PARENT_CLK(AHB), 4, 0), 261 + CLK_PERIPH(CLK_TIMER, "clk_periph_timer", PARENT_CLK(RTCREF), 4, 0), 385 262 CLK_PERIPH(CLK_MMUART0, "clk_periph_mmuart0", PARENT_CLK(AHB), 5, CLK_IS_CRITICAL), 386 263 CLK_PERIPH(CLK_MMUART1, "clk_periph_mmuart1", PARENT_CLK(AHB), 6, 0), 387 264 CLK_PERIPH(CLK_MMUART2, "clk_periph_mmuart2", PARENT_CLK(AHB), 7, 0), ··· 400 277 CLK_PERIPH(CLK_GPIO1, "clk_periph_gpio1", PARENT_CLK(AHB), 21, 0), 401 278 CLK_PERIPH(CLK_GPIO2, "clk_periph_gpio2", PARENT_CLK(AHB), 22, 0), 402 279 CLK_PERIPH(CLK_DDRC, "clk_periph_ddrc", PARENT_CLK(AHB), 23, CLK_IS_CRITICAL), 403 - CLK_PERIPH(CLK_FIC0, "clk_periph_fic0", PARENT_CLK(AHB), 24, CLK_IS_CRITICAL), 404 - CLK_PERIPH(CLK_FIC1, "clk_periph_fic1", PARENT_CLK(AHB), 25, CLK_IS_CRITICAL), 405 - CLK_PERIPH(CLK_FIC2, "clk_periph_fic2", PARENT_CLK(AHB), 26, CLK_IS_CRITICAL), 406 - CLK_PERIPH(CLK_FIC3, "clk_periph_fic3", PARENT_CLK(AHB), 27, CLK_IS_CRITICAL), 407 - CLK_PERIPH(CLK_ATHENA, "clk_periph_athena", PARENT_CLK(AHB), 28, 0), 280 + CLK_PERIPH(CLK_FIC0, "clk_periph_fic0", PARENT_CLK(AXI), 24, CLK_IS_CRITICAL), 281 + CLK_PERIPH(CLK_FIC1, "clk_periph_fic1", PARENT_CLK(AXI), 25, CLK_IS_CRITICAL), 282 + CLK_PERIPH(CLK_FIC2, "clk_periph_fic2", PARENT_CLK(AXI), 26, CLK_IS_CRITICAL), 283 + CLK_PERIPH(CLK_FIC3, "clk_periph_fic3", PARENT_CLK(AXI), 27, CLK_IS_CRITICAL), 284 + CLK_PERIPH(CLK_ATHENA, "clk_periph_athena", PARENT_CLK(AXI), 28, CLK_IS_CRITICAL), 408 285 CLK_PERIPH(CLK_CFM, "clk_periph_cfm", PARENT_CLK(AHB), 29, 0), 409 286 }; 410 287 ··· 445 322 unsigned int num_clks; 446 323 int ret; 447 324 448 - /* CLK_RESERVED is not part of cfg_clks nor periph_clks, so add 1 */ 449 - num_clks = ARRAY_SIZE(mpfs_cfg_clks) + ARRAY_SIZE(mpfs_periph_clks) + 1; 325 + /* CLK_RESERVED is not part of clock arrays, so add 1 */ 326 + num_clks = ARRAY_SIZE(mpfs_msspll_clks) + ARRAY_SIZE(mpfs_cfg_clks) 327 + + ARRAY_SIZE(mpfs_periph_clks) + 1; 450 328 451 329 clk_data = devm_kzalloc(dev, struct_size(clk_data, hw_data.hws, num_clks), GFP_KERNEL); 452 330 if (!clk_data) ··· 457 333 if (IS_ERR(clk_data->base)) 458 334 return PTR_ERR(clk_data->base); 459 335 336 + clk_data->msspll_base = devm_platform_ioremap_resource(pdev, 1); 337 + if (IS_ERR(clk_data->msspll_base)) 338 + return PTR_ERR(clk_data->msspll_base); 339 + 460 340 clk_data->hw_data.num = num_clks; 341 + 342 + ret = mpfs_clk_register_mssplls(dev, mpfs_msspll_clks, ARRAY_SIZE(mpfs_msspll_clks), 343 + clk_data); 344 + if (ret) 345 + return ret; 461 346 462 347 ret = mpfs_clk_register_cfgs(dev, mpfs_cfg_clks, ARRAY_SIZE(mpfs_cfg_clks), clk_data); 463 348 if (ret)
+1 -1
drivers/clk/qcom/clk-rcg2.c
··· 818 818 static int clk_gfx3d_determine_rate(struct clk_hw *hw, 819 819 struct clk_rate_request *req) 820 820 { 821 - struct clk_rate_request parent_req = { }; 821 + struct clk_rate_request parent_req = { .min_rate = 0, .max_rate = ULONG_MAX }; 822 822 struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw); 823 823 struct clk_hw *xo, *p0, *p1, *p2; 824 824 unsigned long p0_rate;
+2
drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
··· 241 241 .ops = &ccu_mux_ops, 242 242 .parent_hws = rtc_32k_parents, 243 243 .num_parents = ARRAY_SIZE(rtc_32k_parents), /* updated during probe */ 244 + .flags = CLK_IS_CRITICAL, 244 245 }; 245 246 246 247 static struct ccu_mux rtc_32k_clk = { ··· 347 346 .compatible = "allwinner,sun50i-r329-rtc", 348 347 .data = &sun50i_r329_rtc_ccu_data, 349 348 }, 349 + {}, 350 350 }; 351 351 352 352 int sun6i_rtc_ccu_probe(struct device *dev, void __iomem *reg)
+2
drivers/clk/sunxi/clk-sun9i-mmc.c
··· 109 109 spin_lock_init(&data->lock); 110 110 111 111 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 112 + if (!r) 113 + return -EINVAL; 112 114 /* one clock/reset pair per word */ 113 115 count = DIV_ROUND_UP((resource_size(r)), SUN9I_MMC_WIDTH); 114 116 data->membase = devm_ioremap_resource(&pdev->dev, r);
+70 -35
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 2395 2395 return amdgpu_device_resume(drm_dev, true); 2396 2396 } 2397 2397 2398 + static int amdgpu_runtime_idle_check_display(struct device *dev) 2399 + { 2400 + struct pci_dev *pdev = to_pci_dev(dev); 2401 + struct drm_device *drm_dev = pci_get_drvdata(pdev); 2402 + struct amdgpu_device *adev = drm_to_adev(drm_dev); 2403 + 2404 + if (adev->mode_info.num_crtc) { 2405 + struct drm_connector *list_connector; 2406 + struct drm_connector_list_iter iter; 2407 + int ret = 0; 2408 + 2409 + /* XXX: Return busy if any displays are connected to avoid 2410 + * possible display wakeups after runtime resume due to 2411 + * hotplug events in case any displays were connected while 2412 + * the GPU was in suspend. Remove this once that is fixed. 2413 + */ 2414 + mutex_lock(&drm_dev->mode_config.mutex); 2415 + drm_connector_list_iter_begin(drm_dev, &iter); 2416 + drm_for_each_connector_iter(list_connector, &iter) { 2417 + if (list_connector->status == connector_status_connected) { 2418 + ret = -EBUSY; 2419 + break; 2420 + } 2421 + } 2422 + drm_connector_list_iter_end(&iter); 2423 + mutex_unlock(&drm_dev->mode_config.mutex); 2424 + 2425 + if (ret) 2426 + return ret; 2427 + 2428 + if (amdgpu_device_has_dc_support(adev)) { 2429 + struct drm_crtc *crtc; 2430 + 2431 + drm_for_each_crtc(crtc, drm_dev) { 2432 + drm_modeset_lock(&crtc->mutex, NULL); 2433 + if (crtc->state->active) 2434 + ret = -EBUSY; 2435 + drm_modeset_unlock(&crtc->mutex); 2436 + if (ret < 0) 2437 + break; 2438 + } 2439 + } else { 2440 + mutex_lock(&drm_dev->mode_config.mutex); 2441 + drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL); 2442 + 2443 + drm_connector_list_iter_begin(drm_dev, &iter); 2444 + drm_for_each_connector_iter(list_connector, &iter) { 2445 + if (list_connector->dpms == DRM_MODE_DPMS_ON) { 2446 + ret = -EBUSY; 2447 + break; 2448 + } 2449 + } 2450 + 2451 + drm_connector_list_iter_end(&iter); 2452 + 2453 + drm_modeset_unlock(&drm_dev->mode_config.connection_mutex); 2454 + mutex_unlock(&drm_dev->mode_config.mutex); 2455 + } 2456 + if (ret) 2457 + return ret; 2458 + } 2459 + 2460 + return 0; 2461 + } 2462 + 2398 2463 static int amdgpu_pmops_runtime_suspend(struct device *dev) 2399 2464 { 2400 2465 struct pci_dev *pdev = to_pci_dev(dev); ··· 2471 2406 pm_runtime_forbid(dev); 2472 2407 return -EBUSY; 2473 2408 } 2409 + 2410 + ret = amdgpu_runtime_idle_check_display(dev); 2411 + if (ret) 2412 + return ret; 2474 2413 2475 2414 /* wait for all rings to drain before suspending */ 2476 2415 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { ··· 2585 2516 return -EBUSY; 2586 2517 } 2587 2518 2588 - if (amdgpu_device_has_dc_support(adev)) { 2589 - struct drm_crtc *crtc; 2590 - 2591 - drm_for_each_crtc(crtc, drm_dev) { 2592 - drm_modeset_lock(&crtc->mutex, NULL); 2593 - if (crtc->state->active) 2594 - ret = -EBUSY; 2595 - drm_modeset_unlock(&crtc->mutex); 2596 - if (ret < 0) 2597 - break; 2598 - } 2599 - 2600 - } else { 2601 - struct drm_connector *list_connector; 2602 - struct drm_connector_list_iter iter; 2603 - 2604 - mutex_lock(&drm_dev->mode_config.mutex); 2605 - drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL); 2606 - 2607 - drm_connector_list_iter_begin(drm_dev, &iter); 2608 - drm_for_each_connector_iter(list_connector, &iter) { 2609 - if (list_connector->dpms == DRM_MODE_DPMS_ON) { 2610 - ret = -EBUSY; 2611 - break; 2612 - } 2613 - } 2614 - 2615 - drm_connector_list_iter_end(&iter); 2616 - 2617 - drm_modeset_unlock(&drm_dev->mode_config.connection_mutex); 2618 - mutex_unlock(&drm_dev->mode_config.mutex); 2619 - } 2620 - 2621 - if (ret == -EBUSY) 2622 - DRM_DEBUG_DRIVER("failing to power off - crtc active\n"); 2519 + ret = amdgpu_runtime_idle_check_display(dev); 2623 2520 2624 2521 pm_runtime_mark_last_busy(dev); 2625 2522 pm_runtime_autosuspend(dev);
+10
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
··· 1151 1151 int r; 1152 1152 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1153 1153 1154 + /* 1155 + * The issue mmhub can't disconnect from DF with MMHUB clock gating being disabled 1156 + * is a new problem observed at DF 3.0.3, however with the same suspend sequence not 1157 + * seen any issue on the DF 3.0.2 series platform. 1158 + */ 1159 + if (adev->in_s0ix && adev->ip_versions[DF_HWIP][0] > IP_VERSION(3, 0, 2)) { 1160 + dev_dbg(adev->dev, "keep mmhub clock gating being enabled for s0ix\n"); 1161 + return 0; 1162 + } 1163 + 1154 1164 r = adev->mmhub.funcs->set_clockgating(adev, state); 1155 1165 if (r) 1156 1166 return r;
+37 -46
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
··· 130 130 } 131 131 132 132 static void increment_queue_count(struct device_queue_manager *dqm, 133 - enum kfd_queue_type type) 133 + struct qcm_process_device *qpd, 134 + struct queue *q) 134 135 { 135 136 dqm->active_queue_count++; 136 - if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ) 137 + if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || 138 + q->properties.type == KFD_QUEUE_TYPE_DIQ) 137 139 dqm->active_cp_queue_count++; 140 + 141 + if (q->properties.is_gws) { 142 + dqm->gws_queue_count++; 143 + qpd->mapped_gws_queue = true; 144 + } 138 145 } 139 146 140 147 static void decrement_queue_count(struct device_queue_manager *dqm, 141 - enum kfd_queue_type type) 148 + struct qcm_process_device *qpd, 149 + struct queue *q) 142 150 { 143 151 dqm->active_queue_count--; 144 - if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ) 152 + if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || 153 + q->properties.type == KFD_QUEUE_TYPE_DIQ) 145 154 dqm->active_cp_queue_count--; 155 + 156 + if (q->properties.is_gws) { 157 + dqm->gws_queue_count--; 158 + qpd->mapped_gws_queue = false; 159 + } 146 160 } 147 161 148 162 /* ··· 426 412 list_add(&q->list, &qpd->queues_list); 427 413 qpd->queue_count++; 428 414 if (q->properties.is_active) 429 - increment_queue_count(dqm, q->properties.type); 415 + increment_queue_count(dqm, qpd, q); 430 416 431 417 /* 432 418 * Unconditionally increment this counter, regardless of the queue's ··· 615 601 deallocate_vmid(dqm, qpd, q); 616 602 } 617 603 qpd->queue_count--; 618 - if (q->properties.is_active) { 619 - decrement_queue_count(dqm, q->properties.type); 620 - if (q->properties.is_gws) { 621 - dqm->gws_queue_count--; 622 - qpd->mapped_gws_queue = false; 623 - } 624 - } 604 + if (q->properties.is_active) 605 + decrement_queue_count(dqm, qpd, q); 625 606 626 607 return retval; 627 608 } ··· 709 700 * dqm->active_queue_count to determine whether a new runlist must be 710 701 * uploaded. 711 702 */ 712 - if (q->properties.is_active && !prev_active) 713 - increment_queue_count(dqm, q->properties.type); 714 - else if (!q->properties.is_active && prev_active) 715 - decrement_queue_count(dqm, q->properties.type); 716 - 717 - if (q->gws && !q->properties.is_gws) { 703 + if (q->properties.is_active && !prev_active) { 704 + increment_queue_count(dqm, &pdd->qpd, q); 705 + } else if (!q->properties.is_active && prev_active) { 706 + decrement_queue_count(dqm, &pdd->qpd, q); 707 + } else if (q->gws && !q->properties.is_gws) { 718 708 if (q->properties.is_active) { 719 709 dqm->gws_queue_count++; 720 710 pdd->qpd.mapped_gws_queue = true; ··· 775 767 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( 776 768 q->properties.type)]; 777 769 q->properties.is_active = false; 778 - decrement_queue_count(dqm, q->properties.type); 779 - if (q->properties.is_gws) { 780 - dqm->gws_queue_count--; 781 - qpd->mapped_gws_queue = false; 782 - } 770 + decrement_queue_count(dqm, qpd, q); 783 771 784 772 if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n")) 785 773 continue; ··· 821 817 continue; 822 818 823 819 q->properties.is_active = false; 824 - decrement_queue_count(dqm, q->properties.type); 820 + decrement_queue_count(dqm, qpd, q); 825 821 } 826 822 pdd->last_evict_timestamp = get_jiffies_64(); 827 823 retval = execute_queues_cpsch(dqm, ··· 892 888 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( 893 889 q->properties.type)]; 894 890 q->properties.is_active = true; 895 - increment_queue_count(dqm, q->properties.type); 896 - if (q->properties.is_gws) { 897 - dqm->gws_queue_count++; 898 - qpd->mapped_gws_queue = true; 899 - } 891 + increment_queue_count(dqm, qpd, q); 900 892 901 893 if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n")) 902 894 continue; ··· 950 950 continue; 951 951 952 952 q->properties.is_active = true; 953 - increment_queue_count(dqm, q->properties.type); 953 + increment_queue_count(dqm, &pdd->qpd, q); 954 954 } 955 955 retval = execute_queues_cpsch(dqm, 956 956 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); ··· 1378 1378 dqm->total_queue_count); 1379 1379 1380 1380 list_add(&kq->list, &qpd->priv_queue_list); 1381 - increment_queue_count(dqm, kq->queue->properties.type); 1381 + increment_queue_count(dqm, qpd, kq->queue); 1382 1382 qpd->is_debug = true; 1383 1383 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); 1384 1384 dqm_unlock(dqm); ··· 1392 1392 { 1393 1393 dqm_lock(dqm); 1394 1394 list_del(&kq->list); 1395 - decrement_queue_count(dqm, kq->queue->properties.type); 1395 + decrement_queue_count(dqm, qpd, kq->queue); 1396 1396 qpd->is_debug = false; 1397 1397 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); 1398 1398 /* ··· 1467 1467 qpd->queue_count++; 1468 1468 1469 1469 if (q->properties.is_active) { 1470 - increment_queue_count(dqm, q->properties.type); 1470 + increment_queue_count(dqm, qpd, q); 1471 1471 1472 1472 execute_queues_cpsch(dqm, 1473 1473 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); ··· 1683 1683 list_del(&q->list); 1684 1684 qpd->queue_count--; 1685 1685 if (q->properties.is_active) { 1686 - decrement_queue_count(dqm, q->properties.type); 1686 + decrement_queue_count(dqm, qpd, q); 1687 1687 retval = execute_queues_cpsch(dqm, 1688 1688 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); 1689 1689 if (retval == -ETIME) 1690 1690 qpd->reset_wavefronts = true; 1691 - if (q->properties.is_gws) { 1692 - dqm->gws_queue_count--; 1693 - qpd->mapped_gws_queue = false; 1694 - } 1695 1691 } 1696 1692 1697 1693 /* ··· 1928 1932 /* Clean all kernel queues */ 1929 1933 list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) { 1930 1934 list_del(&kq->list); 1931 - decrement_queue_count(dqm, kq->queue->properties.type); 1935 + decrement_queue_count(dqm, qpd, kq->queue); 1932 1936 qpd->is_debug = false; 1933 1937 dqm->total_queue_count--; 1934 1938 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES; ··· 1941 1945 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) 1942 1946 deallocate_sdma_queue(dqm, q); 1943 1947 1944 - if (q->properties.is_active) { 1945 - decrement_queue_count(dqm, q->properties.type); 1946 - if (q->properties.is_gws) { 1947 - dqm->gws_queue_count--; 1948 - qpd->mapped_gws_queue = false; 1949 - } 1950 - } 1948 + if (q->properties.is_active) 1949 + decrement_queue_count(dqm, qpd, q); 1951 1950 1952 1951 dqm->total_queue_count--; 1953 1952 }
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
··· 1103 1103 uint32_t priority; 1104 1104 uint32_t q_percent; 1105 1105 uint32_t doorbell_id; 1106 - uint32_t is_gws; 1106 + uint32_t gws; 1107 1107 uint32_t sdma_id; 1108 1108 uint32_t eop_ring_buffer_size; 1109 1109 uint32_t ctx_save_restore_area_size;
+7 -3
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
··· 636 636 q_data->ctx_save_restore_area_size = 637 637 q->properties.ctx_save_restore_area_size; 638 638 639 + q_data->gws = !!q->gws; 640 + 639 641 ret = pqm_checkpoint_mqd(&pdd->process->pqm, q->properties.queue_id, mqd, ctl_stack); 640 642 if (ret) { 641 643 pr_err("Failed checkpoint queue_mqd (%d)\n", ret); ··· 745 743 struct kfd_criu_queue_priv_data *q_data) 746 744 { 747 745 qp->is_interop = false; 748 - qp->is_gws = q_data->is_gws; 749 746 qp->queue_percent = q_data->q_percent; 750 747 qp->priority = q_data->priority; 751 748 qp->queue_address = q_data->q_address; ··· 827 826 NULL); 828 827 if (ret) { 829 828 pr_err("Failed to create new queue err:%d\n", ret); 830 - ret = -EINVAL; 829 + goto exit; 831 830 } 831 + 832 + if (q_data->gws) 833 + ret = pqm_set_gws(&p->pqm, q_data->q_id, pdd->dev->gws); 832 834 833 835 exit: 834 836 if (ret) 835 - pr_err("Failed to create queue (%d)\n", ret); 837 + pr_err("Failed to restore queue (%d)\n", ret); 836 838 else 837 839 pr_debug("Queue id %d was restored successfully\n", queue_id); 838 840
+1
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
··· 997 997 return &clk_src->base; 998 998 } 999 999 1000 + kfree(clk_src); 1000 1001 BREAK_TO_DEBUGGER(); 1001 1002 return NULL; 1002 1003 }
+39
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
··· 427 427 void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) 428 428 { 429 429 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 430 + int i; 430 431 431 432 if (!adev->pm.dpm_enabled) 432 433 return; 433 434 434 435 if (!pp_funcs->pm_compute_clocks) 435 436 return; 437 + 438 + if (adev->mode_info.num_crtc) 439 + amdgpu_display_bandwidth_update(adev); 440 + 441 + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 442 + struct amdgpu_ring *ring = adev->rings[i]; 443 + if (ring && ring->sched.ready) 444 + amdgpu_fence_wait_empty(ring); 445 + } 436 446 437 447 mutex_lock(&adev->pm.mutex); 438 448 pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle); ··· 453 443 { 454 444 int ret = 0; 455 445 446 + if (adev->family == AMDGPU_FAMILY_SI) { 447 + mutex_lock(&adev->pm.mutex); 448 + if (enable) { 449 + adev->pm.dpm.uvd_active = true; 450 + adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 451 + } else { 452 + adev->pm.dpm.uvd_active = false; 453 + } 454 + mutex_unlock(&adev->pm.mutex); 455 + 456 + amdgpu_dpm_compute_clocks(adev); 457 + return; 458 + } 459 + 456 460 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 457 461 if (ret) 458 462 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", ··· 476 452 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 477 453 { 478 454 int ret = 0; 455 + 456 + if (adev->family == AMDGPU_FAMILY_SI) { 457 + mutex_lock(&adev->pm.mutex); 458 + if (enable) { 459 + adev->pm.dpm.vce_active = true; 460 + /* XXX select vce level based on ring/task */ 461 + adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 462 + } else { 463 + adev->pm.dpm.vce_active = false; 464 + } 465 + mutex_unlock(&adev->pm.mutex); 466 + 467 + amdgpu_dpm_compute_clocks(adev); 468 + return; 469 + } 479 470 480 471 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 481 472 if (ret)
-10
drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
··· 1028 1028 void amdgpu_legacy_dpm_compute_clocks(void *handle) 1029 1029 { 1030 1030 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1031 - int i = 0; 1032 - 1033 - if (adev->mode_info.num_crtc) 1034 - amdgpu_display_bandwidth_update(adev); 1035 - 1036 - for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 1037 - struct amdgpu_ring *ring = adev->rings[i]; 1038 - if (ring && ring->sched.ready) 1039 - amdgpu_fence_wait_empty(ring); 1040 - } 1041 1031 1042 1032 amdgpu_dpm_get_active_displays(adev); 1043 1033
-35
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
··· 3892 3892 } 3893 3893 #endif 3894 3894 3895 - static int si_set_powergating_by_smu(void *handle, 3896 - uint32_t block_type, 3897 - bool gate) 3898 - { 3899 - struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3900 - 3901 - switch (block_type) { 3902 - case AMD_IP_BLOCK_TYPE_UVD: 3903 - if (!gate) { 3904 - adev->pm.dpm.uvd_active = true; 3905 - adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 3906 - } else { 3907 - adev->pm.dpm.uvd_active = false; 3908 - } 3909 - 3910 - amdgpu_legacy_dpm_compute_clocks(handle); 3911 - break; 3912 - case AMD_IP_BLOCK_TYPE_VCE: 3913 - if (!gate) { 3914 - adev->pm.dpm.vce_active = true; 3915 - /* XXX select vce level based on ring/task */ 3916 - adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 3917 - } else { 3918 - adev->pm.dpm.vce_active = false; 3919 - } 3920 - 3921 - amdgpu_legacy_dpm_compute_clocks(handle); 3922 - break; 3923 - default: 3924 - break; 3925 - } 3926 - return 0; 3927 - } 3928 - 3929 3895 static int si_set_sw_state(struct amdgpu_device *adev) 3930 3896 { 3931 3897 return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ? ··· 8091 8125 .print_power_state = &si_dpm_print_power_state, 8092 8126 .debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level, 8093 8127 .force_performance_level = &si_dpm_force_performance_level, 8094 - .set_powergating_by_smu = &si_set_powergating_by_smu, 8095 8128 .vblank_too_short = &si_dpm_vblank_too_short, 8096 8129 .set_fan_control_mode = &si_dpm_set_fan_control_mode, 8097 8130 .get_fan_control_mode = &si_dpm_get_fan_control_mode,
-10
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
··· 1487 1487 { 1488 1488 struct pp_hwmgr *hwmgr = handle; 1489 1489 struct amdgpu_device *adev = hwmgr->adev; 1490 - int i = 0; 1491 - 1492 - if (adev->mode_info.num_crtc) 1493 - amdgpu_display_bandwidth_update(adev); 1494 - 1495 - for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 1496 - struct amdgpu_ring *ring = adev->rings[i]; 1497 - if (ring && ring->sched.ready) 1498 - amdgpu_fence_wait_empty(ring); 1499 - } 1500 1490 1501 1491 if (!amdgpu_device_has_dc_support(adev)) { 1502 1492 amdgpu_dpm_get_active_displays(adev);
+26 -8
drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
··· 97 97 98 98 #define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_1 0x359 99 99 100 + enum intel_dp_aux_backlight_modparam { 101 + INTEL_DP_AUX_BACKLIGHT_AUTO = -1, 102 + INTEL_DP_AUX_BACKLIGHT_OFF = 0, 103 + INTEL_DP_AUX_BACKLIGHT_ON = 1, 104 + INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2, 105 + INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3, 106 + }; 107 + 100 108 /* Intel EDP backlight callbacks */ 101 109 static bool 102 110 intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector) ··· 131 123 } else { 132 124 drm_dbg_kms(&i915->drm, "Detected unsupported HDR backlight interface version %d\n", 133 125 tcon_cap[0]); 126 + return false; 127 + } 128 + 129 + /* 130 + * If we don't have HDR static metadata there is no way to 131 + * runtime detect used range for nits based control. For now 132 + * do not use Intel proprietary eDP backlight control if we 133 + * don't have this data in panel EDID. In case we find panel 134 + * which supports only nits based control, but doesn't provide 135 + * HDR static metadata we need to start maintaining table of 136 + * ranges for such panels. 137 + */ 138 + if (i915->params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL && 139 + !(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type & 140 + BIT(HDMI_STATIC_METADATA_TYPE1))) { 141 + drm_info(&i915->drm, 142 + "Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n", 143 + INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL); 134 144 return false; 135 145 } 136 146 ··· 437 411 .disable = intel_dp_aux_vesa_disable_backlight, 438 412 .set = intel_dp_aux_vesa_set_backlight, 439 413 .get = intel_dp_aux_vesa_get_backlight, 440 - }; 441 - 442 - enum intel_dp_aux_backlight_modparam { 443 - INTEL_DP_AUX_BACKLIGHT_AUTO = -1, 444 - INTEL_DP_AUX_BACKLIGHT_OFF = 0, 445 - INTEL_DP_AUX_BACKLIGHT_ON = 1, 446 - INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2, 447 - INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3, 448 414 }; 449 415 450 416 int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
+1 -1
drivers/gpu/drm/i915/display/intel_fbc.c
··· 1037 1037 struct intel_plane_state *plane_state = 1038 1038 intel_atomic_get_new_plane_state(state, plane); 1039 1039 const struct drm_framebuffer *fb = plane_state->hw.fb; 1040 - struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc); 1040 + struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc); 1041 1041 const struct intel_crtc_state *crtc_state; 1042 1042 struct intel_fbc *fbc = plane->fbc; 1043 1043
+3 -3
drivers/gpu/drm/i915/i915_reg.h
··· 4345 4345 #define _DSPAADDR 0x70184 4346 4346 #define _DSPASTRIDE 0x70188 4347 4347 #define _DSPAPOS 0x7018C /* reserved */ 4348 - #define DISP_POS_Y_MASK REG_GENMASK(31, 0) 4348 + #define DISP_POS_Y_MASK REG_GENMASK(31, 16) 4349 4349 #define DISP_POS_Y(y) REG_FIELD_PREP(DISP_POS_Y_MASK, (y)) 4350 4350 #define DISP_POS_X_MASK REG_GENMASK(15, 0) 4351 4351 #define DISP_POS_X(x) REG_FIELD_PREP(DISP_POS_X_MASK, (x)) 4352 4352 #define _DSPASIZE 0x70190 4353 - #define DISP_HEIGHT_MASK REG_GENMASK(31, 0) 4353 + #define DISP_HEIGHT_MASK REG_GENMASK(31, 16) 4354 4354 #define DISP_HEIGHT(h) REG_FIELD_PREP(DISP_HEIGHT_MASK, (h)) 4355 4355 #define DISP_WIDTH_MASK REG_GENMASK(15, 0) 4356 4356 #define DISP_WIDTH(w) REG_FIELD_PREP(DISP_WIDTH_MASK, (w)) ··· 5152 5152 #define _SEL_FETCH_PLANE_BASE_6_A 0x70940 5153 5153 #define _SEL_FETCH_PLANE_BASE_7_A 0x70960 5154 5154 #define _SEL_FETCH_PLANE_BASE_CUR_A 0x70880 5155 - #define _SEL_FETCH_PLANE_BASE_1_B 0x70990 5155 + #define _SEL_FETCH_PLANE_BASE_1_B 0x71890 5156 5156 5157 5157 #define _SEL_FETCH_PLANE_BASE_A(plane) _PICK(plane, \ 5158 5158 _SEL_FETCH_PLANE_BASE_1_A, \
-3
drivers/gpu/drm/sun4i/sun4i_frontend.c
··· 222 222 223 223 /* Set the physical address of the buffer in memory */ 224 224 paddr = drm_fb_cma_get_gem_addr(fb, state, 0); 225 - paddr -= PHYS_OFFSET; 226 225 DRM_DEBUG_DRIVER("Setting buffer #0 address to %pad\n", &paddr); 227 226 regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR0_REG, paddr); 228 227 229 228 if (fb->format->num_planes > 1) { 230 229 paddr = drm_fb_cma_get_gem_addr(fb, state, swap ? 2 : 1); 231 - paddr -= PHYS_OFFSET; 232 230 DRM_DEBUG_DRIVER("Setting buffer #1 address to %pad\n", &paddr); 233 231 regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR1_REG, 234 232 paddr); ··· 234 236 235 237 if (fb->format->num_planes > 2) { 236 238 paddr = drm_fb_cma_get_gem_addr(fb, state, swap ? 1 : 2); 237 - paddr -= PHYS_OFFSET; 238 239 DRM_DEBUG_DRIVER("Setting buffer #2 address to %pad\n", &paddr); 239 240 regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR2_REG, 240 241 paddr);
+2 -2
drivers/hwmon/adt7470.c
··· 19 19 #include <linux/log2.h> 20 20 #include <linux/kthread.h> 21 21 #include <linux/regmap.h> 22 + #include <linux/sched.h> 22 23 #include <linux/slab.h> 23 24 #include <linux/util_macros.h> 24 25 ··· 295 294 adt7470_read_temperatures(data); 296 295 mutex_unlock(&data->lock); 297 296 298 - set_current_state(TASK_INTERRUPTIBLE); 299 297 if (kthread_should_stop()) 300 298 break; 301 299 302 - schedule_timeout(msecs_to_jiffies(data->auto_update_interval)); 300 + schedule_timeout_interruptible(msecs_to_jiffies(data->auto_update_interval)); 303 301 } 304 302 305 303 return 0;
+1 -1
drivers/hwmon/asus_wmi_sensors.c
··· 71 71 DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X399-A"), 72 72 DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X470-PRO"), 73 73 DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VI EXTREME"), 74 - DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VI HERO"), 74 + DMI_EXACT_MATCH_ASUS_BOARD_NAME("CROSSHAIR VI HERO"), 75 75 DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VI HERO (WI-FI AC)"), 76 76 DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VII HERO"), 77 77 DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VII HERO (WI-FI)"),
+3 -2
drivers/hwmon/f71882fg.c
··· 1578 1578 temp *= 125; 1579 1579 if (sign) 1580 1580 temp -= 128000; 1581 - } else 1582 - temp = data->temp[nr] * 1000; 1581 + } else { 1582 + temp = ((s8)data->temp[nr]) * 1000; 1583 + } 1583 1584 1584 1585 return sprintf(buf, "%d\n", temp); 1585 1586 }
+16
drivers/hwmon/pmbus/delta-ahe50dc-fan.c
··· 14 14 15 15 #define AHE50DC_PMBUS_READ_TEMP4 0xd0 16 16 17 + static int ahe50dc_fan_write_byte(struct i2c_client *client, int page, u8 value) 18 + { 19 + /* 20 + * The CLEAR_FAULTS operation seems to sometimes (unpredictably, perhaps 21 + * 5% of the time or so) trigger a problematic phenomenon in which the 22 + * fan speeds surge momentarily and at least some (perhaps all?) of the 23 + * system's power outputs experience a glitch. 24 + * 25 + * However, according to Delta it should be OK to simply not send any 26 + * CLEAR_FAULTS commands (the device doesn't seem to be capable of 27 + * reporting any faults anyway), so just blackhole them unconditionally. 28 + */ 29 + return value == PMBUS_CLEAR_FAULTS ? -EOPNOTSUPP : -ENODATA; 30 + } 31 + 17 32 static int ahe50dc_fan_read_word_data(struct i2c_client *client, int page, int phase, int reg) 18 33 { 19 34 /* temp1 in (virtual) page 1 is remapped to mfr-specific temp4 */ ··· 83 68 PMBUS_HAVE_VIN | PMBUS_HAVE_FAN12 | PMBUS_HAVE_FAN34 | 84 69 PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_STATUS_FAN34 | PMBUS_PAGE_VIRTUAL, 85 70 .func[1] = PMBUS_HAVE_TEMP | PMBUS_PAGE_VIRTUAL, 71 + .write_byte = ahe50dc_fan_write_byte, 86 72 .read_word_data = ahe50dc_fan_read_word_data, 87 73 }; 88 74
+3
drivers/hwmon/pmbus/pmbus_core.c
··· 2326 2326 data->has_status_word = true; 2327 2327 } 2328 2328 2329 + /* Make sure PEC is disabled, will be enabled later if needed */ 2330 + client->flags &= ~I2C_CLIENT_PEC; 2331 + 2329 2332 /* Enable PEC if the controller and bus supports it */ 2330 2333 if (!(data->flags & PMBUS_NO_CAPABILITY)) { 2331 2334 ret = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY);
+1 -1
drivers/hwmon/pmbus/xdpe12284.c
··· 124 124 return 0; 125 125 } 126 126 127 - static const struct regulator_desc xdpe122_reg_desc[] = { 127 + static const struct regulator_desc __maybe_unused xdpe122_reg_desc[] = { 128 128 PMBUS_REGULATOR("vout", 0), 129 129 PMBUS_REGULATOR("vout", 1), 130 130 };
+6 -6
drivers/iio/adc/ad7280a.c
··· 745 745 case IIO_EV_DIR_RISING: 746 746 addr = AD7280A_CELL_OVERVOLTAGE_REG; 747 747 ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, addr, 748 - 1, val); 748 + 1, value); 749 749 if (ret) 750 750 break; 751 751 st->cell_threshhigh = value; ··· 753 753 case IIO_EV_DIR_FALLING: 754 754 addr = AD7280A_CELL_UNDERVOLTAGE_REG; 755 755 ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, addr, 756 - 1, val); 756 + 1, value); 757 757 if (ret) 758 758 break; 759 759 st->cell_threshlow = value; ··· 770 770 case IIO_EV_DIR_RISING: 771 771 addr = AD7280A_AUX_ADC_OVERVOLTAGE_REG; 772 772 ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, addr, 773 - 1, val); 773 + 1, value); 774 774 if (ret) 775 775 break; 776 - st->aux_threshhigh = val; 776 + st->aux_threshhigh = value; 777 777 break; 778 778 case IIO_EV_DIR_FALLING: 779 779 addr = AD7280A_AUX_ADC_UNDERVOLTAGE_REG; 780 780 ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, addr, 781 - 1, val); 781 + 1, value); 782 782 if (ret) 783 783 break; 784 - st->aux_threshlow = val; 784 + st->aux_threshlow = value; 785 785 break; 786 786 default: 787 787 ret = -EINVAL;
+4 -1
drivers/iio/chemical/scd4x.c
··· 471 471 ret = scd4x_write_and_fetch(state, CMD_FRC, arg, &val, sizeof(val)); 472 472 mutex_unlock(&state->lock); 473 473 474 + if (ret) 475 + return ret; 476 + 474 477 if (val == 0xff) { 475 478 dev_err(dev, "forced calibration has failed"); 476 479 return -EINVAL; 477 480 } 478 481 479 - return ret ?: len; 482 + return len; 480 483 } 481 484 482 485 static IIO_DEVICE_ATTR_RW(calibration_auto_enable, 0);
+3 -3
drivers/iio/dac/ad3552r.c
··· 656 656 { 657 657 struct reg_addr_pool addr; 658 658 int ret; 659 - u16 val; 659 + int val; 660 660 661 661 dac->gpio_reset = devm_gpiod_get_optional(&dac->spi->dev, "reset", 662 662 GPIOD_OUT_LOW); ··· 809 809 810 810 gain_child = fwnode_get_named_child_node(child, 811 811 "custom-output-range-config"); 812 - if (IS_ERR(gain_child)) { 812 + if (!gain_child) { 813 813 dev_err(dev, 814 814 "mandatory custom-output-range-config property missing\n"); 815 - return PTR_ERR(gain_child); 815 + return -EINVAL; 816 816 } 817 817 818 818 dac->ch_data[ch].range_override = 1;
+1 -1
drivers/iio/dac/ad5446.c
··· 178 178 179 179 switch (m) { 180 180 case IIO_CHAN_INFO_RAW: 181 - *val = st->cached_val; 181 + *val = st->cached_val >> chan->scan_type.shift; 182 182 return IIO_VAL_INT; 183 183 case IIO_CHAN_INFO_SCALE: 184 184 *val = st->vref_mv;
+1 -1
drivers/iio/dac/ad5592r-base.c
··· 522 522 if (!ret) 523 523 st->channel_modes[reg] = tmp; 524 524 525 - fwnode_property_read_u32(child, "adi,off-state", &tmp); 525 + ret = fwnode_property_read_u32(child, "adi,off-state", &tmp); 526 526 if (!ret) 527 527 st->channel_offstate[reg] = tmp; 528 528 }
+1 -1
drivers/iio/dac/ltc2688.c
··· 298 298 if (ret) 299 299 return ret; 300 300 301 - *val = 16; 301 + *val2 = 16; 302 302 return IIO_VAL_FRACTIONAL_LOG2; 303 303 case IIO_CHAN_INFO_CALIBBIAS: 304 304 ret = regmap_read(st->regmap,
+18 -10
drivers/iio/dac/ti-dac5571.c
··· 19 19 #include <linux/i2c.h> 20 20 #include <linux/module.h> 21 21 #include <linux/mod_devicetable.h> 22 + #include <linux/property.h> 22 23 #include <linux/regulator/consumer.h> 23 24 24 25 enum chip_id { ··· 312 311 const struct dac5571_spec *spec; 313 312 struct dac5571_data *data; 314 313 struct iio_dev *indio_dev; 314 + enum chip_id chip_id; 315 315 int ret, i; 316 316 317 317 indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); ··· 328 326 indio_dev->modes = INDIO_DIRECT_MODE; 329 327 indio_dev->channels = dac5571_channels; 330 328 331 - spec = &dac5571_spec[id->driver_data]; 329 + if (dev_fwnode(dev)) 330 + chip_id = (uintptr_t)device_get_match_data(dev); 331 + else 332 + chip_id = id->driver_data; 333 + 334 + spec = &dac5571_spec[chip_id]; 335 + 332 336 indio_dev->num_channels = spec->num_channels; 333 337 data->spec = spec; 334 338 ··· 393 385 } 394 386 395 387 static const struct of_device_id dac5571_of_id[] = { 396 - {.compatible = "ti,dac5571"}, 397 - {.compatible = "ti,dac6571"}, 398 - {.compatible = "ti,dac7571"}, 399 - {.compatible = "ti,dac5574"}, 400 - {.compatible = "ti,dac6574"}, 401 - {.compatible = "ti,dac7574"}, 402 - {.compatible = "ti,dac5573"}, 403 - {.compatible = "ti,dac6573"}, 404 - {.compatible = "ti,dac7573"}, 388 + {.compatible = "ti,dac5571", .data = (void *)single_8bit}, 389 + {.compatible = "ti,dac6571", .data = (void *)single_10bit}, 390 + {.compatible = "ti,dac7571", .data = (void *)single_12bit}, 391 + {.compatible = "ti,dac5574", .data = (void *)quad_8bit}, 392 + {.compatible = "ti,dac6574", .data = (void *)quad_10bit}, 393 + {.compatible = "ti,dac7574", .data = (void *)quad_12bit}, 394 + {.compatible = "ti,dac5573", .data = (void *)quad_8bit}, 395 + {.compatible = "ti,dac6573", .data = (void *)quad_10bit}, 396 + {.compatible = "ti,dac7573", .data = (void *)quad_12bit}, 405 397 {} 406 398 }; 407 399 MODULE_DEVICE_TABLE(of, dac5571_of_id);
+1
drivers/iio/filter/Kconfig
··· 8 8 config ADMV8818 9 9 tristate "Analog Devices ADMV8818 High-Pass and Low-Pass Filter" 10 10 depends on SPI && COMMON_CLK && 64BIT 11 + select REGMAP_SPI 11 12 help 12 13 Say yes here to build support for Analog Devices ADMV8818 13 14 2 GHz to 18 GHz, Digitally Tunable, High-Pass and Low-Pass Filter.
+14 -6
drivers/iio/imu/bmi160/bmi160_core.c
··· 730 730 731 731 ret = regmap_write(data->regmap, BMI160_REG_CMD, BMI160_CMD_SOFTRESET); 732 732 if (ret) 733 - return ret; 733 + goto disable_regulator; 734 734 735 735 usleep_range(BMI160_SOFTRESET_USLEEP, BMI160_SOFTRESET_USLEEP + 1); 736 736 ··· 741 741 if (use_spi) { 742 742 ret = regmap_read(data->regmap, BMI160_REG_DUMMY, &val); 743 743 if (ret) 744 - return ret; 744 + goto disable_regulator; 745 745 } 746 746 747 747 ret = regmap_read(data->regmap, BMI160_REG_CHIP_ID, &val); 748 748 if (ret) { 749 749 dev_err(dev, "Error reading chip id\n"); 750 - return ret; 750 + goto disable_regulator; 751 751 } 752 752 if (val != BMI160_CHIP_ID_VAL) { 753 753 dev_err(dev, "Wrong chip id, got %x expected %x\n", 754 754 val, BMI160_CHIP_ID_VAL); 755 - return -ENODEV; 755 + ret = -ENODEV; 756 + goto disable_regulator; 756 757 } 757 758 758 759 ret = bmi160_set_mode(data, BMI160_ACCEL, true); 759 760 if (ret) 760 - return ret; 761 + goto disable_regulator; 761 762 762 763 ret = bmi160_set_mode(data, BMI160_GYRO, true); 763 764 if (ret) 764 - return ret; 765 + goto disable_accel; 765 766 766 767 return 0; 768 + 769 + disable_accel: 770 + bmi160_set_mode(data, BMI160_ACCEL, false); 771 + 772 + disable_regulator: 773 + regulator_bulk_disable(ARRAY_SIZE(data->supplies), data->supplies); 774 + return ret; 767 775 } 768 776 769 777 static int bmi160_data_rdy_trigger_set_state(struct iio_trigger *trig,
+9 -6
drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
··· 18 18 unsigned int mask, val; 19 19 int ret; 20 20 21 - /* setup interface registers */ 22 - ret = regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG6, 23 - INV_ICM42600_INTF_CONFIG6_MASK, 24 - INV_ICM42600_INTF_CONFIG6_I3C_EN); 25 - if (ret) 26 - return ret; 21 + /* 22 + * setup interface registers 23 + * This register write to REG_INTF_CONFIG6 enables a spike filter that 24 + * is impacting the line and can prevent the I2C ACK to be seen by the 25 + * controller. So we don't test the return value. 26 + */ 27 + regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG6, 28 + INV_ICM42600_INTF_CONFIG6_MASK, 29 + INV_ICM42600_INTF_CONFIG6_I3C_EN); 27 30 28 31 ret = regmap_update_bits(st->map, INV_ICM42600_REG_INTF_CONFIG4, 29 32 INV_ICM42600_INTF_CONFIG4_I3C_BUS_ONLY, 0);
+1
drivers/iio/magnetometer/ak8975.c
··· 416 416 if (ret) { 417 417 dev_warn(&data->client->dev, 418 418 "Failed to enable specified Vid supply\n"); 419 + regulator_disable(data->vdd); 419 420 return ret; 420 421 } 421 422
+25 -7
drivers/iio/proximity/sx9324.c
··· 70 70 #define SX9324_REG_AFE_PH2 0x2a 71 71 #define SX9324_REG_AFE_PH3 0x2b 72 72 #define SX9324_REG_AFE_CTRL8 0x2c 73 - #define SX9324_REG_AFE_CTRL8_RESFILTN_4KOHM 0x02 73 + #define SX9324_REG_AFE_CTRL8_RESERVED 0x10 74 + #define SX9324_REG_AFE_CTRL8_RESFILTIN_4KOHM 0x02 74 75 #define SX9324_REG_AFE_CTRL9 0x2d 75 76 #define SX9324_REG_AFE_CTRL9_AGAIN_1 0x08 76 77 77 78 #define SX9324_REG_PROX_CTRL0 0x30 78 79 #define SX9324_REG_PROX_CTRL0_GAIN_MASK GENMASK(5, 3) 79 - #define SX9324_REG_PROX_CTRL0_GAIN_1 0x80 80 + #define SX9324_REG_PROX_CTRL0_GAIN_SHIFT 3 81 + #define SX9324_REG_PROX_CTRL0_GAIN_RSVD 0x0 82 + #define SX9324_REG_PROX_CTRL0_GAIN_1 0x1 83 + #define SX9324_REG_PROX_CTRL0_GAIN_8 0x4 80 84 #define SX9324_REG_PROX_CTRL0_RAWFILT_MASK GENMASK(2, 0) 81 85 #define SX9324_REG_PROX_CTRL0_RAWFILT_1P50 0x01 82 86 #define SX9324_REG_PROX_CTRL1 0x31 ··· 383 379 if (ret) 384 380 return ret; 385 381 386 - *val = 1 << FIELD_GET(SX9324_REG_PROX_CTRL0_GAIN_MASK, regval); 382 + regval = FIELD_GET(SX9324_REG_PROX_CTRL0_GAIN_MASK, regval); 383 + if (regval) 384 + regval--; 385 + else if (regval == SX9324_REG_PROX_CTRL0_GAIN_RSVD || 386 + regval > SX9324_REG_PROX_CTRL0_GAIN_8) 387 + return -EINVAL; 388 + 389 + *val = 1 << regval; 387 390 388 391 return IIO_VAL_INT; 389 392 } ··· 736 725 unsigned int gain, reg; 737 726 int ret; 738 727 739 - gain = ilog2(val); 740 728 reg = SX9324_REG_PROX_CTRL0 + chan->channel / 2; 729 + 730 + gain = ilog2(val) + 1; 731 + if (val <= 0 || gain > SX9324_REG_PROX_CTRL0_GAIN_8) 732 + return -EINVAL; 733 + 741 734 gain = FIELD_PREP(SX9324_REG_PROX_CTRL0_GAIN_MASK, gain); 742 735 743 736 mutex_lock(&data->mutex); ··· 796 781 { SX9324_REG_AFE_PH2, 0x1a }, 797 782 { SX9324_REG_AFE_PH3, 0x16 }, 798 783 799 - { SX9324_REG_AFE_CTRL8, SX9324_REG_AFE_CTRL8_RESFILTN_4KOHM }, 784 + { SX9324_REG_AFE_CTRL8, SX9324_REG_AFE_CTRL8_RESERVED | 785 + SX9324_REG_AFE_CTRL8_RESFILTIN_4KOHM }, 800 786 { SX9324_REG_AFE_CTRL9, SX9324_REG_AFE_CTRL9_AGAIN_1 }, 801 787 802 - { SX9324_REG_PROX_CTRL0, SX9324_REG_PROX_CTRL0_GAIN_1 | 788 + { SX9324_REG_PROX_CTRL0, 789 + SX9324_REG_PROX_CTRL0_GAIN_1 << SX9324_REG_PROX_CTRL0_GAIN_SHIFT | 803 790 SX9324_REG_PROX_CTRL0_RAWFILT_1P50 }, 804 - { SX9324_REG_PROX_CTRL1, SX9324_REG_PROX_CTRL0_GAIN_1 | 791 + { SX9324_REG_PROX_CTRL1, 792 + SX9324_REG_PROX_CTRL0_GAIN_1 << SX9324_REG_PROX_CTRL0_GAIN_SHIFT | 805 793 SX9324_REG_PROX_CTRL0_RAWFILT_1P50 }, 806 794 { SX9324_REG_PROX_CTRL2, SX9324_REG_PROX_CTRL2_AVGNEG_THRESH_16K }, 807 795 { SX9324_REG_PROX_CTRL3, SX9324_REG_PROX_CTRL3_AVGDEB_2SAMPLES |
+1
drivers/iio/proximity/sx_common.c
··· 521 521 return dev_err_probe(dev, ret, "error reading WHOAMI\n"); 522 522 523 523 ACPI_COMPANION_SET(&indio_dev->dev, ACPI_COMPANION(dev)); 524 + indio_dev->dev.of_node = client->dev.of_node; 524 525 indio_dev->modes = INDIO_DIRECT_MODE; 525 526 526 527 indio_dev->channels = data->chip_info->iio_channels;
-21
drivers/interconnect/qcom/sc7180.c
··· 47 47 DEFINE_QNODE(qnm_snoc_gc, SC7180_MASTER_SNOC_GC_MEM_NOC, 1, 8, SC7180_SLAVE_LLCC); 48 48 DEFINE_QNODE(qnm_snoc_sf, SC7180_MASTER_SNOC_SF_MEM_NOC, 1, 16, SC7180_SLAVE_LLCC); 49 49 DEFINE_QNODE(qxm_gpu, SC7180_MASTER_GFX3D, 2, 32, SC7180_SLAVE_GEM_NOC_SNOC, SC7180_SLAVE_LLCC); 50 - DEFINE_QNODE(ipa_core_master, SC7180_MASTER_IPA_CORE, 1, 8, SC7180_SLAVE_IPA_CORE); 51 50 DEFINE_QNODE(llcc_mc, SC7180_MASTER_LLCC, 2, 4, SC7180_SLAVE_EBI1); 52 51 DEFINE_QNODE(qhm_mnoc_cfg, SC7180_MASTER_CNOC_MNOC_CFG, 1, 4, SC7180_SLAVE_SERVICE_MNOC); 53 52 DEFINE_QNODE(qxm_camnoc_hf0, SC7180_MASTER_CAMNOC_HF0, 2, 32, SC7180_SLAVE_MNOC_HF_MEM_NOC); ··· 128 129 DEFINE_QNODE(qns_gem_noc_snoc, SC7180_SLAVE_GEM_NOC_SNOC, 1, 8, SC7180_MASTER_GEM_NOC_SNOC); 129 130 DEFINE_QNODE(qns_llcc, SC7180_SLAVE_LLCC, 1, 16, SC7180_MASTER_LLCC); 130 131 DEFINE_QNODE(srvc_gemnoc, SC7180_SLAVE_SERVICE_GEM_NOC, 1, 4); 131 - DEFINE_QNODE(ipa_core_slave, SC7180_SLAVE_IPA_CORE, 1, 8); 132 132 DEFINE_QNODE(ebi, SC7180_SLAVE_EBI1, 2, 4); 133 133 DEFINE_QNODE(qns_mem_noc_hf, SC7180_SLAVE_MNOC_HF_MEM_NOC, 1, 32, SC7180_MASTER_MNOC_HF_MEM_NOC); 134 134 DEFINE_QNODE(qns_mem_noc_sf, SC7180_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SC7180_MASTER_MNOC_SF_MEM_NOC); ··· 158 160 DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc); 159 161 DEFINE_QBCM(bcm_mm0, "MM0", false, &qns_mem_noc_hf); 160 162 DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto); 161 - DEFINE_QBCM(bcm_ip0, "IP0", false, &ipa_core_slave); 162 163 DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy0, &qhs_aop, &qhs_aoss, &qhs_boot_rom, &qhs_camera_cfg, &qhs_camera_nrt_throttle_cfg, &qhs_camera_rt_throttle_cfg, &qhs_clk_ctl, &qhs_cpr_cx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_display_rt_throttle_cfg, &qhs_display_throttle_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_mss_cfg, &qhs_npu_cfg, &qhs_npu_dma_throttle_cfg, &qhs_npu_dsp_throttle_cfg, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qm_cfg, &qhs_qm_mpu_cfg, &qhs_qup0, &qhs_qup1, &qhs_security, &qhs_snoc_cfg, &qhs_tcsr, &qhs_tlmm_1, &qhs_tlmm_2, &qhs_tlmm_3, &qhs_ufs_mem_cfg, &qhs_usb3, &qhs_venus_cfg, &qhs_venus_throttle_cfg, &qhs_vsense_ctrl_cfg, &srvc_cnoc); 163 164 DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qhm_mnoc_cfg, &qxm_mdp0, &qxm_rot, &qxm_venus0, &qxm_venus_arm9); 164 165 DEFINE_QBCM(bcm_sh2, "SH2", false, &acm_sys_tcu); ··· 369 372 .num_bcms = ARRAY_SIZE(gem_noc_bcms), 370 373 }; 371 374 372 - static struct qcom_icc_bcm *ipa_virt_bcms[] = { 373 - &bcm_ip0, 374 - }; 375 - 376 - static struct qcom_icc_node *ipa_virt_nodes[] = { 377 - [MASTER_IPA_CORE] = &ipa_core_master, 378 - [SLAVE_IPA_CORE] = &ipa_core_slave, 379 - }; 380 - 381 - static struct qcom_icc_desc sc7180_ipa_virt = { 382 - .nodes = ipa_virt_nodes, 383 - .num_nodes = ARRAY_SIZE(ipa_virt_nodes), 384 - .bcms = ipa_virt_bcms, 385 - .num_bcms = ARRAY_SIZE(ipa_virt_bcms), 386 - }; 387 - 388 375 static struct qcom_icc_bcm *mc_virt_bcms[] = { 389 376 &bcm_acv, 390 377 &bcm_mc0, ··· 500 519 .data = &sc7180_dc_noc}, 501 520 { .compatible = "qcom,sc7180-gem-noc", 502 521 .data = &sc7180_gem_noc}, 503 - { .compatible = "qcom,sc7180-ipa-virt", 504 - .data = &sc7180_ipa_virt}, 505 522 { .compatible = "qcom,sc7180-mc-virt", 506 523 .data = &sc7180_mc_virt}, 507 524 { .compatible = "qcom,sc7180-mmss-noc",
-21
drivers/interconnect/qcom/sdx55.c
··· 18 18 #include "icc-rpmh.h" 19 19 #include "sdx55.h" 20 20 21 - DEFINE_QNODE(ipa_core_master, SDX55_MASTER_IPA_CORE, 1, 8, SDX55_SLAVE_IPA_CORE); 22 21 DEFINE_QNODE(llcc_mc, SDX55_MASTER_LLCC, 4, 4, SDX55_SLAVE_EBI_CH0); 23 22 DEFINE_QNODE(acm_tcu, SDX55_MASTER_TCU_0, 1, 8, SDX55_SLAVE_LLCC, SDX55_SLAVE_MEM_NOC_SNOC, SDX55_SLAVE_MEM_NOC_PCIE_SNOC); 24 23 DEFINE_QNODE(qnm_snoc_gc, SDX55_MASTER_SNOC_GC_MEM_NOC, 1, 8, SDX55_SLAVE_LLCC); ··· 39 40 DEFINE_QNODE(xm_qdss_etr, SDX55_MASTER_QDSS_ETR, 1, 8, SDX55_SLAVE_SNOC_CFG, SDX55_SLAVE_EMAC_CFG, SDX55_SLAVE_USB3, SDX55_SLAVE_AOSS, SDX55_SLAVE_SPMI_FETCHER, SDX55_SLAVE_QDSS_CFG, SDX55_SLAVE_PDM, SDX55_SLAVE_SNOC_MEM_NOC_GC, SDX55_SLAVE_TCSR, SDX55_SLAVE_CNOC_DDRSS, SDX55_SLAVE_SPMI_VGI_COEX, SDX55_SLAVE_QPIC, SDX55_SLAVE_OCIMEM, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_USB3_PHY_CFG, SDX55_SLAVE_AOP, SDX55_SLAVE_BLSP_1, SDX55_SLAVE_SDCC_1, SDX55_SLAVE_CNOC_MSS, SDX55_SLAVE_PCIE_PARF, SDX55_SLAVE_ECC_CFG, SDX55_SLAVE_AUDIO, SDX55_SLAVE_AOSS, SDX55_SLAVE_PRNG, SDX55_SLAVE_CRYPTO_0_CFG, SDX55_SLAVE_TCU, SDX55_SLAVE_CLK_CTL, SDX55_SLAVE_IMEM_CFG); 40 41 DEFINE_QNODE(xm_sdc1, SDX55_MASTER_SDCC_1, 1, 8, SDX55_SLAVE_AOSS, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_ANOC_SNOC, SDX55_SLAVE_AOP, SDX55_SLAVE_AUDIO); 41 42 DEFINE_QNODE(xm_usb3, SDX55_MASTER_USB3, 1, 8, SDX55_SLAVE_ANOC_SNOC); 42 - DEFINE_QNODE(ipa_core_slave, SDX55_SLAVE_IPA_CORE, 1, 8); 43 43 DEFINE_QNODE(ebi, SDX55_SLAVE_EBI_CH0, 1, 4); 44 44 DEFINE_QNODE(qns_llcc, SDX55_SLAVE_LLCC, 1, 16, SDX55_SLAVE_EBI_CH0); 45 45 DEFINE_QNODE(qns_memnoc_snoc, SDX55_SLAVE_MEM_NOC_SNOC, 1, 8, SDX55_MASTER_MEM_NOC_SNOC); ··· 80 82 DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi); 81 83 DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc); 82 84 DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto); 83 - DEFINE_QBCM(bcm_ip0, "IP0", false, &ipa_core_slave); 84 85 DEFINE_QBCM(bcm_pn0, "PN0", false, &qhm_snoc_cfg); 85 86 DEFINE_QBCM(bcm_sh3, "SH3", false, &xm_apps_rdwr); 86 87 DEFINE_QBCM(bcm_sh4, "SH4", false, &qns_memnoc_snoc, &qns_sys_pcie); ··· 216 219 .num_bcms = ARRAY_SIZE(system_noc_bcms), 217 220 }; 218 221 219 - static struct qcom_icc_bcm *ipa_virt_bcms[] = { 220 - &bcm_ip0, 221 - }; 222 - 223 - static struct qcom_icc_node *ipa_virt_nodes[] = { 224 - [MASTER_IPA_CORE] = &ipa_core_master, 225 - [SLAVE_IPA_CORE] = &ipa_core_slave, 226 - }; 227 - 228 - static const struct qcom_icc_desc sdx55_ipa_virt = { 229 - .nodes = ipa_virt_nodes, 230 - .num_nodes = ARRAY_SIZE(ipa_virt_nodes), 231 - .bcms = ipa_virt_bcms, 232 - .num_bcms = ARRAY_SIZE(ipa_virt_bcms), 233 - }; 234 - 235 222 static const struct of_device_id qnoc_of_match[] = { 236 223 { .compatible = "qcom,sdx55-mc-virt", 237 224 .data = &sdx55_mc_virt}, ··· 223 242 .data = &sdx55_mem_noc}, 224 243 { .compatible = "qcom,sdx55-system-noc", 225 244 .data = &sdx55_system_noc}, 226 - { .compatible = "qcom,sdx55-ipa-virt", 227 - .data = &sdx55_ipa_virt}, 228 245 { } 229 246 }; 230 247 MODULE_DEVICE_TABLE(of, qnoc_of_match);
+5 -5
drivers/iommu/apple-dart.c
··· 773 773 .get_resv_regions = apple_dart_get_resv_regions, 774 774 .put_resv_regions = generic_iommu_put_resv_regions, 775 775 .pgsize_bitmap = -1UL, /* Restricted during dart probe */ 776 + .owner = THIS_MODULE, 776 777 .default_domain_ops = &(const struct iommu_domain_ops) { 777 778 .attach_dev = apple_dart_attach_dev, 778 779 .detach_dev = apple_dart_detach_dev, ··· 860 859 dart->dev = dev; 861 860 spin_lock_init(&dart->lock); 862 861 863 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 862 + dart->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 863 + if (IS_ERR(dart->regs)) 864 + return PTR_ERR(dart->regs); 865 + 864 866 if (resource_size(res) < 0x4000) { 865 867 dev_err(dev, "MMIO region too small (%pr)\n", res); 866 868 return -EINVAL; 867 869 } 868 - 869 - dart->regs = devm_ioremap_resource(dev, res); 870 - if (IS_ERR(dart->regs)) 871 - return PTR_ERR(dart->regs); 872 870 873 871 dart->irq = platform_get_irq(pdev, 0); 874 872 if (dart->irq < 0)
+8 -1
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
··· 183 183 { 184 184 struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn); 185 185 struct arm_smmu_domain *smmu_domain = smmu_mn->domain; 186 - size_t size = end - start + 1; 186 + size_t size; 187 + 188 + /* 189 + * The mm_types defines vm_end as the first byte after the end address, 190 + * different from IOMMU subsystem using the last address of an address 191 + * range. So do a simple translation here by calculating size correctly. 192 + */ 193 + size = end - start; 187 194 188 195 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) 189 196 arm_smmu_tlb_inv_range_asid(start, size, smmu_mn->cd->asid,
+30
drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
··· 258 258 dev_name(dev), err); 259 259 } 260 260 261 + static int nvidia_smmu_init_context(struct arm_smmu_domain *smmu_domain, 262 + struct io_pgtable_cfg *pgtbl_cfg, 263 + struct device *dev) 264 + { 265 + struct arm_smmu_device *smmu = smmu_domain->smmu; 266 + const struct device_node *np = smmu->dev->of_node; 267 + 268 + /* 269 + * Tegra194 and Tegra234 SoCs have the erratum that causes walk cache 270 + * entries to not be invalidated correctly. The problem is that the walk 271 + * cache index generated for IOVA is not same across translation and 272 + * invalidation requests. This is leading to page faults when PMD entry 273 + * is released during unmap and populated with new PTE table during 274 + * subsequent map request. Disabling large page mappings avoids the 275 + * release of PMD entry and avoid translations seeing stale PMD entry in 276 + * walk cache. 277 + * Fix this by limiting the page mappings to PAGE_SIZE on Tegra194 and 278 + * Tegra234. 279 + */ 280 + if (of_device_is_compatible(np, "nvidia,tegra234-smmu") || 281 + of_device_is_compatible(np, "nvidia,tegra194-smmu")) { 282 + smmu->pgsize_bitmap = PAGE_SIZE; 283 + pgtbl_cfg->pgsize_bitmap = smmu->pgsize_bitmap; 284 + } 285 + 286 + return 0; 287 + } 288 + 261 289 static const struct arm_smmu_impl nvidia_smmu_impl = { 262 290 .read_reg = nvidia_smmu_read_reg, 263 291 .write_reg = nvidia_smmu_write_reg, ··· 296 268 .global_fault = nvidia_smmu_global_fault, 297 269 .context_fault = nvidia_smmu_context_fault, 298 270 .probe_finalize = nvidia_smmu_probe_finalize, 271 + .init_context = nvidia_smmu_init_context, 299 272 }; 300 273 301 274 static const struct arm_smmu_impl nvidia_smmu_single_impl = { 302 275 .probe_finalize = nvidia_smmu_probe_finalize, 276 + .init_context = nvidia_smmu_init_context, 303 277 }; 304 278 305 279 struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)
+24 -3
drivers/iommu/intel/iommu.c
··· 1588 1588 unsigned long pfn, unsigned int pages, 1589 1589 int ih, int map) 1590 1590 { 1591 - unsigned int mask = ilog2(__roundup_pow_of_two(pages)); 1591 + unsigned int aligned_pages = __roundup_pow_of_two(pages); 1592 + unsigned int mask = ilog2(aligned_pages); 1592 1593 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT; 1593 1594 u16 did = domain->iommu_did[iommu->seq_id]; 1594 1595 ··· 1601 1600 if (domain_use_first_level(domain)) { 1602 1601 qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, pages, ih); 1603 1602 } else { 1603 + unsigned long bitmask = aligned_pages - 1; 1604 + 1605 + /* 1606 + * PSI masks the low order bits of the base address. If the 1607 + * address isn't aligned to the mask, then compute a mask value 1608 + * needed to ensure the target range is flushed. 1609 + */ 1610 + if (unlikely(bitmask & pfn)) { 1611 + unsigned long end_pfn = pfn + pages - 1, shared_bits; 1612 + 1613 + /* 1614 + * Since end_pfn <= pfn + bitmask, the only way bits 1615 + * higher than bitmask can differ in pfn and end_pfn is 1616 + * by carrying. This means after masking out bitmask, 1617 + * high bits starting with the first set bit in 1618 + * shared_bits are all equal in both pfn and end_pfn. 1619 + */ 1620 + shared_bits = ~(pfn ^ end_pfn) & ~bitmask; 1621 + mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG; 1622 + } 1623 + 1604 1624 /* 1605 1625 * Fallback to domain selective flush if no PSI support or 1606 - * the size is too big. PSI requires page size to be 2 ^ x, 1607 - * and the base address is naturally aligned to the size. 1626 + * the size is too big. 1608 1627 */ 1609 1628 if (!cap_pgsel_inv(iommu->cap) || 1610 1629 mask > cap_max_amask_val(iommu->cap))
+4
drivers/iommu/intel/svm.c
··· 757 757 goto bad_req; 758 758 } 759 759 760 + /* Drop Stop Marker message. No need for a response. */ 761 + if (unlikely(req->lpig && !req->rd_req && !req->wr_req)) 762 + goto prq_advance; 763 + 760 764 if (!svm || svm->pasid != req->pasid) { 761 765 /* 762 766 * It can't go away, because the driver is not permitted
+8 -1
drivers/iommu/iommu.c
··· 506 506 list_for_each_entry(device, &group->devices, list) { 507 507 struct list_head dev_resv_regions; 508 508 509 + /* 510 + * Non-API groups still expose reserved_regions in sysfs, 511 + * so filter out calls that get here that way. 512 + */ 513 + if (!device->dev->iommu) 514 + break; 515 + 509 516 INIT_LIST_HEAD(&dev_resv_regions); 510 517 iommu_get_resv_regions(device->dev, &dev_resv_regions); 511 518 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); ··· 3026 3019 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 3027 3020 return -EACCES; 3028 3021 3029 - if (WARN_ON(!group)) 3022 + if (WARN_ON(!group) || !group->default_domain) 3030 3023 return -EINVAL; 3031 3024 3032 3025 if (sysfs_streq(buf, "identity"))
+46 -14
drivers/memory/renesas-rpc-if.c
··· 164 164 165 165 166 166 /* 167 - * Custom accessor functions to ensure SMRDR0 and SMWDR0 are always accessed 168 - * with proper width. Requires SMENR_SPIDE to be correctly set before! 167 + * Custom accessor functions to ensure SM[RW]DR[01] are always accessed with 168 + * proper width. Requires rpcif.xfer_size to be correctly set before! 169 169 */ 170 170 static int rpcif_reg_read(void *context, unsigned int reg, unsigned int *val) 171 171 { 172 172 struct rpcif *rpc = context; 173 173 174 - if (reg == RPCIF_SMRDR0 || reg == RPCIF_SMWDR0) { 175 - u32 spide = readl(rpc->base + RPCIF_SMENR) & RPCIF_SMENR_SPIDE(0xF); 176 - 177 - if (spide == 0x8) { 174 + switch (reg) { 175 + case RPCIF_SMRDR0: 176 + case RPCIF_SMWDR0: 177 + switch (rpc->xfer_size) { 178 + case 1: 178 179 *val = readb(rpc->base + reg); 179 180 return 0; 180 - } else if (spide == 0xC) { 181 + 182 + case 2: 181 183 *val = readw(rpc->base + reg); 182 184 return 0; 183 - } else if (spide != 0xF) { 185 + 186 + case 4: 187 + case 8: 188 + *val = readl(rpc->base + reg); 189 + return 0; 190 + 191 + default: 184 192 return -EILSEQ; 185 193 } 194 + 195 + case RPCIF_SMRDR1: 196 + case RPCIF_SMWDR1: 197 + if (rpc->xfer_size != 8) 198 + return -EILSEQ; 199 + break; 186 200 } 187 201 188 202 *val = readl(rpc->base + reg); ··· 207 193 { 208 194 struct rpcif *rpc = context; 209 195 210 - if (reg == RPCIF_SMRDR0 || reg == RPCIF_SMWDR0) { 211 - u32 spide = readl(rpc->base + RPCIF_SMENR) & RPCIF_SMENR_SPIDE(0xF); 212 - 213 - if (spide == 0x8) { 196 + switch (reg) { 197 + case RPCIF_SMWDR0: 198 + switch (rpc->xfer_size) { 199 + case 1: 214 200 writeb(val, rpc->base + reg); 215 201 return 0; 216 - } else if (spide == 0xC) { 202 + 203 + case 2: 217 204 writew(val, rpc->base + reg); 218 205 return 0; 219 - } else if (spide != 0xF) { 206 + 207 + case 4: 208 + case 8: 209 + writel(val, rpc->base + reg); 210 + return 0; 211 + 212 + default: 220 213 return -EILSEQ; 221 214 } 215 + 216 + case RPCIF_SMWDR1: 217 + if (rpc->xfer_size != 8) 218 + return -EILSEQ; 219 + break; 220 + 221 + case RPCIF_SMRDR0: 222 + case RPCIF_SMRDR1: 223 + return -EPERM; 222 224 } 223 225 224 226 writel(val, rpc->base + reg); ··· 499 469 500 470 smenr |= RPCIF_SMENR_SPIDE(rpcif_bits_set(rpc, nbytes)); 501 471 regmap_write(rpc->regmap, RPCIF_SMENR, smenr); 472 + rpc->xfer_size = nbytes; 502 473 503 474 memcpy(data, rpc->buffer + pos, nbytes); 504 475 if (nbytes == 8) { ··· 564 533 regmap_write(rpc->regmap, RPCIF_SMENR, smenr); 565 534 regmap_write(rpc->regmap, RPCIF_SMCR, 566 535 rpc->smcr | RPCIF_SMCR_SPIE); 536 + rpc->xfer_size = nbytes; 567 537 ret = wait_msg_xfer_end(rpc); 568 538 if (ret) 569 539 goto err_out;
+11 -8
drivers/misc/eeprom/at25.c
··· 31 31 */ 32 32 33 33 #define FM25_SN_LEN 8 /* serial number length */ 34 + #define EE_MAXADDRLEN 3 /* 24 bit addresses, up to 2 MBytes */ 35 + 34 36 struct at25_data { 35 37 struct spi_eeprom chip; 36 38 struct spi_device *spi; ··· 41 39 struct nvmem_config nvmem_config; 42 40 struct nvmem_device *nvmem; 43 41 u8 sernum[FM25_SN_LEN]; 42 + u8 command[EE_MAXADDRLEN + 1]; 44 43 }; 45 44 46 45 #define AT25_WREN 0x06 /* latch the write enable */ ··· 64 61 65 62 #define FM25_ID_LEN 9 /* ID length */ 66 63 67 - #define EE_MAXADDRLEN 3 /* 24 bit addresses, up to 2 MBytes */ 68 - 69 64 /* 70 65 * Specs often allow 5ms for a page write, sometimes 20ms; 71 66 * it's important to recover from write timeouts. ··· 79 78 { 80 79 struct at25_data *at25 = priv; 81 80 char *buf = val; 82 - u8 command[EE_MAXADDRLEN + 1]; 83 81 u8 *cp; 84 82 ssize_t status; 85 83 struct spi_transfer t[2]; ··· 92 92 if (unlikely(!count)) 93 93 return -EINVAL; 94 94 95 - cp = command; 95 + cp = at25->command; 96 96 97 97 instr = AT25_READ; 98 98 if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR) 99 99 if (offset >= BIT(at25->addrlen * 8)) 100 100 instr |= AT25_INSTR_BIT3; 101 + 102 + mutex_lock(&at25->lock); 103 + 101 104 *cp++ = instr; 102 105 103 106 /* 8/16/24-bit address is written MSB first */ ··· 119 116 spi_message_init(&m); 120 117 memset(t, 0, sizeof(t)); 121 118 122 - t[0].tx_buf = command; 119 + t[0].tx_buf = at25->command; 123 120 t[0].len = at25->addrlen + 1; 124 121 spi_message_add_tail(&t[0], &m); 125 122 126 123 t[1].rx_buf = buf; 127 124 t[1].len = count; 128 125 spi_message_add_tail(&t[1], &m); 129 - 130 - mutex_lock(&at25->lock); 131 126 132 127 /* 133 128 * Read it all at once. ··· 153 152 spi_message_init(&m); 154 153 memset(t, 0, sizeof(t)); 155 154 156 - t[0].tx_buf = &command; 155 + t[0].tx_buf = at25->command; 157 156 t[0].len = 1; 158 157 spi_message_add_tail(&t[0], &m); 159 158 ··· 162 161 spi_message_add_tail(&t[1], &m); 163 162 164 163 mutex_lock(&at25->lock); 164 + 165 + at25->command[0] = command; 165 166 166 167 status = spi_sync(at25->spi, &m); 167 168 dev_dbg(&at25->spi->dev, "read %d aux bytes --> %d\n", len, status);
+24 -22
drivers/net/can/grcan.c
··· 241 241 .rxsize = GRCAN_DEFAULT_BUFFER_SIZE, \ 242 242 } 243 243 244 - #define GRCAN_TXBUG_SAFE_GRLIB_VERSION 0x4100 244 + #define GRCAN_TXBUG_SAFE_GRLIB_VERSION 4100 245 245 #define GRLIB_VERSION_MASK 0xffff 246 246 247 247 /* GRCAN private data structure */ 248 248 struct grcan_priv { 249 249 struct can_priv can; /* must be the first member */ 250 250 struct net_device *dev; 251 + struct device *ofdev_dev; 251 252 struct napi_struct napi; 252 253 253 254 struct grcan_registers __iomem *regs; /* ioremap'ed registers */ ··· 922 921 struct grcan_priv *priv = netdev_priv(dev); 923 922 struct grcan_dma *dma = &priv->dma; 924 923 925 - dma_free_coherent(&dev->dev, dma->base_size, dma->base_buf, 924 + dma_free_coherent(priv->ofdev_dev, dma->base_size, dma->base_buf, 926 925 dma->base_handle); 927 926 memset(dma, 0, sizeof(*dma)); 928 927 } ··· 947 946 948 947 /* Extra GRCAN_BUFFER_ALIGNMENT to allow for alignment */ 949 948 dma->base_size = lsize + ssize + GRCAN_BUFFER_ALIGNMENT; 950 - dma->base_buf = dma_alloc_coherent(&dev->dev, 949 + dma->base_buf = dma_alloc_coherent(priv->ofdev_dev, 951 950 dma->base_size, 952 951 &dma->base_handle, 953 952 GFP_KERNEL); ··· 1103 1102 1104 1103 priv->closing = true; 1105 1104 if (priv->need_txbug_workaround) { 1105 + spin_unlock_irqrestore(&priv->lock, flags); 1106 1106 del_timer_sync(&priv->hang_timer); 1107 1107 del_timer_sync(&priv->rr_timer); 1108 + spin_lock_irqsave(&priv->lock, flags); 1108 1109 } 1109 1110 netif_stop_queue(dev); 1110 1111 grcan_stop_hardware(dev); ··· 1125 1122 return 0; 1126 1123 } 1127 1124 1128 - static int grcan_transmit_catch_up(struct net_device *dev, int budget) 1125 + static void grcan_transmit_catch_up(struct net_device *dev) 1129 1126 { 1130 1127 struct grcan_priv *priv = netdev_priv(dev); 1131 1128 unsigned long flags; ··· 1133 1130 1134 1131 spin_lock_irqsave(&priv->lock, flags); 1135 1132 1136 - work_done = catch_up_echo_skb(dev, budget, true); 1133 + work_done = catch_up_echo_skb(dev, -1, true); 1137 1134 if (work_done) { 1138 1135 if (!priv->resetting && !priv->closing && 1139 1136 !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) ··· 1147 1144 } 1148 1145 1149 1146 spin_unlock_irqrestore(&priv->lock, flags); 1150 - 1151 - return work_done; 1152 1147 } 1153 1148 1154 1149 static int grcan_receive(struct net_device *dev, int budget) ··· 1228 1227 struct net_device *dev = priv->dev; 1229 1228 struct grcan_registers __iomem *regs = priv->regs; 1230 1229 unsigned long flags; 1231 - int tx_work_done, rx_work_done; 1232 - int rx_budget = budget / 2; 1233 - int tx_budget = budget - rx_budget; 1230 + int work_done; 1234 1231 1235 - /* Half of the budget for receiving messages */ 1236 - rx_work_done = grcan_receive(dev, rx_budget); 1232 + work_done = grcan_receive(dev, budget); 1237 1233 1238 - /* Half of the budget for transmitting messages as that can trigger echo 1239 - * frames being received 1240 - */ 1241 - tx_work_done = grcan_transmit_catch_up(dev, tx_budget); 1234 + grcan_transmit_catch_up(dev); 1242 1235 1243 - if (rx_work_done < rx_budget && tx_work_done < tx_budget) { 1236 + if (work_done < budget) { 1244 1237 napi_complete(napi); 1245 1238 1246 1239 /* Guarantee no interference with a running reset that otherwise ··· 1251 1256 spin_unlock_irqrestore(&priv->lock, flags); 1252 1257 } 1253 1258 1254 - return rx_work_done + tx_work_done; 1259 + return work_done; 1255 1260 } 1256 1261 1257 1262 /* Work tx bug by waiting while for the risky situation to clear. If that fails, ··· 1582 1587 memcpy(&priv->config, &grcan_module_config, 1583 1588 sizeof(struct grcan_device_config)); 1584 1589 priv->dev = dev; 1590 + priv->ofdev_dev = &ofdev->dev; 1585 1591 priv->regs = base; 1586 1592 priv->can.bittiming_const = &grcan_bittiming_const; 1587 1593 priv->can.do_set_bittiming = grcan_set_bittiming; ··· 1635 1639 static int grcan_probe(struct platform_device *ofdev) 1636 1640 { 1637 1641 struct device_node *np = ofdev->dev.of_node; 1642 + struct device_node *sysid_parent; 1638 1643 u32 sysid, ambafreq; 1639 1644 int irq, err; 1640 1645 void __iomem *base; ··· 1644 1647 /* Compare GRLIB version number with the first that does not 1645 1648 * have the tx bug (see start_xmit) 1646 1649 */ 1647 - err = of_property_read_u32(np, "systemid", &sysid); 1648 - if (!err && ((sysid & GRLIB_VERSION_MASK) 1649 - >= GRCAN_TXBUG_SAFE_GRLIB_VERSION)) 1650 - txbug = false; 1650 + sysid_parent = of_find_node_by_path("/ambapp0"); 1651 + if (sysid_parent) { 1652 + of_node_get(sysid_parent); 1653 + err = of_property_read_u32(sysid_parent, "systemid", &sysid); 1654 + if (!err && ((sysid & GRLIB_VERSION_MASK) >= 1655 + GRCAN_TXBUG_SAFE_GRLIB_VERSION)) 1656 + txbug = false; 1657 + of_node_put(sysid_parent); 1658 + } 1651 1659 1652 1660 err = of_property_read_u32(np, "freq", &ambafreq); 1653 1661 if (err) {
+7 -29
drivers/net/dsa/b53/b53_common.c
··· 1354 1354 config->legacy_pre_march2020 = false; 1355 1355 } 1356 1356 1357 - int b53_phylink_mac_link_state(struct dsa_switch *ds, int port, 1358 - struct phylink_link_state *state) 1357 + static struct phylink_pcs *b53_phylink_mac_select_pcs(struct dsa_switch *ds, 1358 + int port, 1359 + phy_interface_t interface) 1359 1360 { 1360 1361 struct b53_device *dev = ds->priv; 1361 - int ret = -EOPNOTSUPP; 1362 1362 1363 - if ((phy_interface_mode_is_8023z(state->interface) || 1364 - state->interface == PHY_INTERFACE_MODE_SGMII) && 1365 - dev->ops->serdes_link_state) 1366 - ret = dev->ops->serdes_link_state(dev, port, state); 1363 + if (!dev->ops->phylink_mac_select_pcs) 1364 + return NULL; 1367 1365 1368 - return ret; 1366 + return dev->ops->phylink_mac_select_pcs(dev, port, interface); 1369 1367 } 1370 - EXPORT_SYMBOL(b53_phylink_mac_link_state); 1371 1368 1372 1369 void b53_phylink_mac_config(struct dsa_switch *ds, int port, 1373 1370 unsigned int mode, 1374 1371 const struct phylink_link_state *state) 1375 1372 { 1376 - struct b53_device *dev = ds->priv; 1377 - 1378 - if (mode == MLO_AN_PHY || mode == MLO_AN_FIXED) 1379 - return; 1380 - 1381 - if ((phy_interface_mode_is_8023z(state->interface) || 1382 - state->interface == PHY_INTERFACE_MODE_SGMII) && 1383 - dev->ops->serdes_config) 1384 - dev->ops->serdes_config(dev, port, mode, state); 1385 1373 } 1386 1374 EXPORT_SYMBOL(b53_phylink_mac_config); 1387 - 1388 - void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port) 1389 - { 1390 - struct b53_device *dev = ds->priv; 1391 - 1392 - if (dev->ops->serdes_an_restart) 1393 - dev->ops->serdes_an_restart(dev, port); 1394 - } 1395 - EXPORT_SYMBOL(b53_phylink_mac_an_restart); 1396 1375 1397 1376 void b53_phylink_mac_link_down(struct dsa_switch *ds, int port, 1398 1377 unsigned int mode, ··· 2248 2269 .phy_write = b53_phy_write16, 2249 2270 .adjust_link = b53_adjust_link, 2250 2271 .phylink_get_caps = b53_phylink_get_caps, 2251 - .phylink_mac_link_state = b53_phylink_mac_link_state, 2272 + .phylink_mac_select_pcs = b53_phylink_mac_select_pcs, 2252 2273 .phylink_mac_config = b53_phylink_mac_config, 2253 - .phylink_mac_an_restart = b53_phylink_mac_an_restart, 2254 2274 .phylink_mac_link_down = b53_phylink_mac_link_down, 2255 2275 .phylink_mac_link_up = b53_phylink_mac_link_up, 2256 2276 .port_enable = b53_enable_port,
+13 -11
drivers/net/dsa/b53/b53_priv.h
··· 21 21 22 22 #include <linux/kernel.h> 23 23 #include <linux/mutex.h> 24 - #include <linux/phy.h> 24 + #include <linux/phylink.h> 25 25 #include <linux/etherdevice.h> 26 26 #include <net/dsa.h> 27 27 ··· 29 29 30 30 struct b53_device; 31 31 struct net_device; 32 - struct phylink_link_state; 33 32 34 33 struct b53_io_ops { 35 34 int (*read8)(struct b53_device *dev, u8 page, u8 reg, u8 *value); ··· 47 48 void (*irq_disable)(struct b53_device *dev, int port); 48 49 void (*phylink_get_caps)(struct b53_device *dev, int port, 49 50 struct phylink_config *config); 51 + struct phylink_pcs *(*phylink_mac_select_pcs)(struct b53_device *dev, 52 + int port, 53 + phy_interface_t interface); 50 54 u8 (*serdes_map_lane)(struct b53_device *dev, int port); 51 - int (*serdes_link_state)(struct b53_device *dev, int port, 52 - struct phylink_link_state *state); 53 - void (*serdes_config)(struct b53_device *dev, int port, 54 - unsigned int mode, 55 - const struct phylink_link_state *state); 56 - void (*serdes_an_restart)(struct b53_device *dev, int port); 57 55 void (*serdes_link_set)(struct b53_device *dev, int port, 58 56 unsigned int mode, phy_interface_t interface, 59 57 bool link_up); ··· 81 85 BCM7278_DEVICE_ID = 0x7278, 82 86 }; 83 87 88 + struct b53_pcs { 89 + struct phylink_pcs pcs; 90 + struct b53_device *dev; 91 + u8 lane; 92 + }; 93 + 84 94 #define B53_N_PORTS 9 85 95 #define B53_N_PORTS_25 6 96 + #define B53_N_PCS 2 86 97 87 98 struct b53_port { 88 99 u16 vlan_ctl_mask; ··· 146 143 bool vlan_enabled; 147 144 unsigned int num_ports; 148 145 struct b53_port *ports; 146 + 147 + struct b53_pcs pcs[B53_N_PCS]; 149 148 }; 150 149 151 150 #define b53_for_each_port(dev, i) \ ··· 341 336 struct netlink_ext_ack *extack); 342 337 int b53_setup_devlink_resources(struct dsa_switch *ds); 343 338 void b53_port_event(struct dsa_switch *ds, int port); 344 - int b53_phylink_mac_link_state(struct dsa_switch *ds, int port, 345 - struct phylink_link_state *state); 346 339 void b53_phylink_mac_config(struct dsa_switch *ds, int port, 347 340 unsigned int mode, 348 341 const struct phylink_link_state *state); 349 - void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port); 350 342 void b53_phylink_mac_link_down(struct dsa_switch *ds, int port, 351 343 unsigned int mode, 352 344 phy_interface_t interface);
+51 -23
drivers/net/dsa/b53/b53_serdes.c
··· 17 17 #include "b53_serdes.h" 18 18 #include "b53_regs.h" 19 19 20 + static inline struct b53_pcs *pcs_to_b53_pcs(struct phylink_pcs *pcs) 21 + { 22 + return container_of(pcs, struct b53_pcs, pcs); 23 + } 24 + 20 25 static void b53_serdes_write_blk(struct b53_device *dev, u8 offset, u16 block, 21 26 u16 value) 22 27 { ··· 65 60 return b53_serdes_read_blk(dev, offset, block); 66 61 } 67 62 68 - void b53_serdes_config(struct b53_device *dev, int port, unsigned int mode, 69 - const struct phylink_link_state *state) 63 + static int b53_serdes_config(struct phylink_pcs *pcs, unsigned int mode, 64 + phy_interface_t interface, 65 + const unsigned long *advertising, 66 + bool permit_pause_to_mac) 70 67 { 71 - u8 lane = b53_serdes_map_lane(dev, port); 68 + struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev; 69 + u8 lane = pcs_to_b53_pcs(pcs)->lane; 72 70 u16 reg; 73 - 74 - if (lane == B53_INVALID_LANE) 75 - return; 76 71 77 72 reg = b53_serdes_read(dev, lane, B53_SERDES_DIGITAL_CONTROL(1), 78 73 SERDES_DIGITAL_BLK); 79 - if (state->interface == PHY_INTERFACE_MODE_1000BASEX) 74 + if (interface == PHY_INTERFACE_MODE_1000BASEX) 80 75 reg |= FIBER_MODE_1000X; 81 76 else 82 77 reg &= ~FIBER_MODE_1000X; 83 78 b53_serdes_write(dev, lane, B53_SERDES_DIGITAL_CONTROL(1), 84 79 SERDES_DIGITAL_BLK, reg); 80 + 81 + return 0; 85 82 } 86 - EXPORT_SYMBOL(b53_serdes_config); 87 83 88 - void b53_serdes_an_restart(struct b53_device *dev, int port) 84 + static void b53_serdes_an_restart(struct phylink_pcs *pcs) 89 85 { 90 - u8 lane = b53_serdes_map_lane(dev, port); 86 + struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev; 87 + u8 lane = pcs_to_b53_pcs(pcs)->lane; 91 88 u16 reg; 92 - 93 - if (lane == B53_INVALID_LANE) 94 - return; 95 89 96 90 reg = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMCR), 97 91 SERDES_MII_BLK); ··· 98 94 b53_serdes_write(dev, lane, B53_SERDES_MII_REG(MII_BMCR), 99 95 SERDES_MII_BLK, reg); 100 96 } 101 - EXPORT_SYMBOL(b53_serdes_an_restart); 102 97 103 - int b53_serdes_link_state(struct b53_device *dev, int port, 104 - struct phylink_link_state *state) 98 + static void b53_serdes_get_state(struct phylink_pcs *pcs, 99 + struct phylink_link_state *state) 105 100 { 106 - u8 lane = b53_serdes_map_lane(dev, port); 101 + struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev; 102 + u8 lane = pcs_to_b53_pcs(pcs)->lane; 107 103 u16 dig, bmsr; 108 - 109 - if (lane == B53_INVALID_LANE) 110 - return 1; 111 104 112 105 dig = b53_serdes_read(dev, lane, B53_SERDES_DIGITAL_STATUS, 113 106 SERDES_DIGITAL_BLK); ··· 134 133 state->pause |= MLO_PAUSE_RX; 135 134 if (dig & PAUSE_RESOLUTION_TX_SIDE) 136 135 state->pause |= MLO_PAUSE_TX; 137 - 138 - return 0; 139 136 } 140 - EXPORT_SYMBOL(b53_serdes_link_state); 141 137 142 138 void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode, 143 139 phy_interface_t interface, bool link_up) ··· 155 157 SERDES_MII_BLK, reg); 156 158 } 157 159 EXPORT_SYMBOL(b53_serdes_link_set); 160 + 161 + static const struct phylink_pcs_ops b53_pcs_ops = { 162 + .pcs_get_state = b53_serdes_get_state, 163 + .pcs_config = b53_serdes_config, 164 + .pcs_an_restart = b53_serdes_an_restart, 165 + }; 158 166 159 167 void b53_serdes_phylink_get_caps(struct b53_device *dev, int port, 160 168 struct phylink_config *config) ··· 191 187 } 192 188 EXPORT_SYMBOL(b53_serdes_phylink_get_caps); 193 189 190 + struct phylink_pcs *b53_serdes_phylink_mac_select_pcs(struct b53_device *dev, 191 + int port, 192 + phy_interface_t interface) 193 + { 194 + u8 lane = b53_serdes_map_lane(dev, port); 195 + 196 + if (lane == B53_INVALID_LANE || lane >= B53_N_PCS || 197 + !dev->pcs[lane].dev) 198 + return NULL; 199 + 200 + if (!phy_interface_mode_is_8023z(interface) && 201 + interface != PHY_INTERFACE_MODE_SGMII) 202 + return NULL; 203 + 204 + return &dev->pcs[lane].pcs; 205 + } 206 + EXPORT_SYMBOL(b53_serdes_phylink_mac_select_pcs); 207 + 194 208 int b53_serdes_init(struct b53_device *dev, int port) 195 209 { 196 210 u8 lane = b53_serdes_map_lane(dev, port); 211 + struct b53_pcs *pcs; 197 212 u16 id0, msb, lsb; 198 213 199 214 if (lane == B53_INVALID_LANE) ··· 234 211 (id0 >> SERDES_ID0_REV_LETTER_SHIFT) + 0x41, 235 212 (id0 >> SERDES_ID0_REV_NUM_SHIFT) & SERDES_ID0_REV_NUM_MASK, 236 213 (u32)msb << 16 | lsb); 214 + 215 + pcs = &dev->pcs[lane]; 216 + pcs->dev = dev; 217 + pcs->lane = lane; 218 + pcs->pcs.ops = &b53_pcs_ops; 237 219 238 220 return 0; 239 221 }
+3 -6
drivers/net/dsa/b53/b53_serdes.h
··· 107 107 return dev->ops->serdes_map_lane(dev, port); 108 108 } 109 109 110 - int b53_serdes_get_link(struct b53_device *dev, int port); 111 - int b53_serdes_link_state(struct b53_device *dev, int port, 112 - struct phylink_link_state *state); 113 - void b53_serdes_config(struct b53_device *dev, int port, unsigned int mode, 114 - const struct phylink_link_state *state); 115 - void b53_serdes_an_restart(struct b53_device *dev, int port); 116 110 void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode, 117 111 phy_interface_t interface, bool link_up); 112 + struct phylink_pcs *b53_serdes_phylink_mac_select_pcs(struct b53_device *dev, 113 + int port, 114 + phy_interface_t interface); 118 115 void b53_serdes_phylink_get_caps(struct b53_device *dev, int port, 119 116 struct phylink_config *config); 120 117 #if IS_ENABLED(CONFIG_B53_SERDES)
+1 -3
drivers/net/dsa/b53/b53_srab.c
··· 491 491 .irq_disable = b53_srab_irq_disable, 492 492 .phylink_get_caps = b53_srab_phylink_get_caps, 493 493 #if IS_ENABLED(CONFIG_B53_SERDES) 494 + .phylink_mac_select_pcs = b53_serdes_phylink_mac_select_pcs, 494 495 .serdes_map_lane = b53_srab_serdes_map_lane, 495 - .serdes_link_state = b53_serdes_link_state, 496 - .serdes_config = b53_serdes_config, 497 - .serdes_an_restart = b53_serdes_an_restart, 498 496 .serdes_link_set = b53_serdes_link_set, 499 497 #endif 500 498 };
+34 -4
drivers/net/dsa/microchip/ksz9477.c
··· 896 896 bool ingress, struct netlink_ext_ack *extack) 897 897 { 898 898 struct ksz_device *dev = ds->priv; 899 + u8 data; 900 + int p; 901 + 902 + /* Limit to one sniffer port 903 + * Check if any of the port is already set for sniffing 904 + * If yes, instruct the user to remove the previous entry & exit 905 + */ 906 + for (p = 0; p < dev->port_cnt; p++) { 907 + /* Skip the current sniffing port */ 908 + if (p == mirror->to_local_port) 909 + continue; 910 + 911 + ksz_pread8(dev, p, P_MIRROR_CTRL, &data); 912 + 913 + if (data & PORT_MIRROR_SNIFFER) { 914 + NL_SET_ERR_MSG_MOD(extack, 915 + "Sniffer port is already configured, delete existing rules & retry"); 916 + return -EBUSY; 917 + } 918 + } 899 919 900 920 if (ingress) 901 921 ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true); 902 922 else 903 923 ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true); 904 - 905 - ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false); 906 924 907 925 /* configure mirror port */ 908 926 ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL, ··· 935 917 struct dsa_mall_mirror_tc_entry *mirror) 936 918 { 937 919 struct ksz_device *dev = ds->priv; 920 + bool in_use = false; 938 921 u8 data; 922 + int p; 939 923 940 924 if (mirror->ingress) 941 925 ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false); 942 926 else 943 927 ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false); 944 928 945 - ksz_pread8(dev, port, P_MIRROR_CTRL, &data); 946 929 947 - if (!(data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) 930 + /* Check if any of the port is still referring to sniffer port */ 931 + for (p = 0; p < dev->port_cnt; p++) { 932 + ksz_pread8(dev, p, P_MIRROR_CTRL, &data); 933 + 934 + if ((data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) { 935 + in_use = true; 936 + break; 937 + } 938 + } 939 + 940 + /* delete sniffing if there are no other mirroring rules */ 941 + if (!in_use) 948 942 ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL, 949 943 PORT_MIRROR_SNIFFER, false); 950 944 }
+1
drivers/net/dsa/mt7530.c
··· 2234 2234 ret = of_get_phy_mode(mac_np, &interface); 2235 2235 if (ret && ret != -ENODEV) { 2236 2236 of_node_put(mac_np); 2237 + of_node_put(phy_node); 2237 2238 return ret; 2238 2239 } 2239 2240 id = of_mdio_parse_addr(ds->dev, phy_node);
+8 -5
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 2827 2827 u32 idx = le32_to_cpu(nqcmp->cq_handle_low); 2828 2828 struct bnxt_cp_ring_info *cpr2; 2829 2829 2830 + /* No more budget for RX work */ 2831 + if (budget && work_done >= budget && idx == BNXT_RX_HDL) 2832 + break; 2833 + 2830 2834 cpr2 = cpr->cp_ring_arr[idx]; 2831 2835 work_done += __bnxt_poll_work(bp, cpr2, 2832 2836 budget - work_done); ··· 11132 11128 11133 11129 if (bp->flags & BNXT_FLAG_CHIP_P5) 11134 11130 return bnxt_rfs_supported(bp); 11135 - if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp)) 11131 + if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) 11136 11132 return false; 11137 11133 11138 11134 vnics = 1 + bp->rx_nr_rings; ··· 13386 13382 goto init_dflt_ring_err; 13387 13383 13388 13384 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; 13389 - if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { 13390 - bp->flags |= BNXT_FLAG_RFS; 13391 - bp->dev->features |= NETIF_F_NTUPLE; 13392 - } 13385 + 13386 + bnxt_set_dflt_rfs(bp); 13387 + 13393 13388 init_dflt_ring_err: 13394 13389 bnxt_ulp_irq_restart(bp, rc); 13395 13390 return rc;
+7 -8
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
··· 846 846 if (rc) 847 847 return rc; 848 848 849 - if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) { 850 - bnxt_ptp_timecounter_init(bp, false); 851 - rc = bnxt_ptp_init_rtc(bp, phc_cfg); 852 - if (rc) 853 - goto out; 854 - } 855 - 856 849 if (ptp->ptp_clock && bnxt_pps_config_ok(bp)) 857 850 return 0; 858 851 ··· 854 861 atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS); 855 862 spin_lock_init(&ptp->ptp_lock); 856 863 857 - if (!(bp->fw_cap & BNXT_FW_CAP_PTP_RTC)) 864 + if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) { 865 + bnxt_ptp_timecounter_init(bp, false); 866 + rc = bnxt_ptp_init_rtc(bp, phc_cfg); 867 + if (rc) 868 + goto out; 869 + } else { 858 870 bnxt_ptp_timecounter_init(bp, true); 871 + } 859 872 860 873 ptp->ptp_info = bnxt_ptp_caps; 861 874 if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) {
+8 -8
drivers/net/ethernet/cavium/thunder/nic_main.c
··· 59 59 60 60 /* MSI-X */ 61 61 u8 num_vec; 62 - bool irq_allocated[NIC_PF_MSIX_VECTORS]; 62 + unsigned int irq_allocated[NIC_PF_MSIX_VECTORS]; 63 63 char irq_name[NIC_PF_MSIX_VECTORS][20]; 64 64 }; 65 65 ··· 1150 1150 u64 intr; 1151 1151 u8 vf; 1152 1152 1153 - if (irq == pci_irq_vector(nic->pdev, NIC_PF_INTR_ID_MBOX0)) 1153 + if (irq == nic->irq_allocated[NIC_PF_INTR_ID_MBOX0]) 1154 1154 mbx = 0; 1155 1155 else 1156 1156 mbx = 1; ··· 1176 1176 1177 1177 for (irq = 0; irq < nic->num_vec; irq++) { 1178 1178 if (nic->irq_allocated[irq]) 1179 - free_irq(pci_irq_vector(nic->pdev, irq), nic); 1180 - nic->irq_allocated[irq] = false; 1179 + free_irq(nic->irq_allocated[irq], nic); 1180 + nic->irq_allocated[irq] = 0; 1181 1181 } 1182 1182 } 1183 1183 1184 1184 static int nic_register_interrupts(struct nicpf *nic) 1185 1185 { 1186 - int i, ret; 1186 + int i, ret, irq; 1187 1187 nic->num_vec = pci_msix_vec_count(nic->pdev); 1188 1188 1189 1189 /* Enable MSI-X */ ··· 1201 1201 sprintf(nic->irq_name[i], 1202 1202 "NICPF Mbox%d", (i - NIC_PF_INTR_ID_MBOX0)); 1203 1203 1204 - ret = request_irq(pci_irq_vector(nic->pdev, i), 1205 - nic_mbx_intr_handler, 0, 1204 + irq = pci_irq_vector(nic->pdev, i); 1205 + ret = request_irq(irq, nic_mbx_intr_handler, 0, 1206 1206 nic->irq_name[i], nic); 1207 1207 if (ret) 1208 1208 goto fail; 1209 1209 1210 - nic->irq_allocated[i] = true; 1210 + nic->irq_allocated[i] = irq; 1211 1211 } 1212 1212 1213 1213 /* Enable mailbox interrupt */
+5 -2
drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
··· 771 771 /* If we only have one page, still need to get shadown wqe when 772 772 * wqe rolling-over page 773 773 */ 774 - if (curr_pg != end_pg || MASKED_WQE_IDX(wq, end_prod_idx) < *prod_idx) { 774 + if (curr_pg != end_pg || end_prod_idx < *prod_idx) { 775 775 void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; 776 776 777 777 copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx); ··· 841 841 842 842 *cons_idx = curr_cons_idx; 843 843 844 - if (curr_pg != end_pg) { 844 + /* If we only have one page, still need to get shadown wqe when 845 + * wqe rolling-over page 846 + */ 847 + if (curr_pg != end_pg || end_cons_idx < curr_cons_idx) { 845 848 void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; 846 849 847 850 copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx);
+1
drivers/net/ethernet/mediatek/mtk_sgmii.c
··· 26 26 break; 27 27 28 28 ss->regmap[i] = syscon_node_to_regmap(np); 29 + of_node_put(np); 29 30 if (IS_ERR(ss->regmap[i])) 30 31 return PTR_ERR(ss->regmap[i]); 31 32 }
+25 -6
drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
··· 31 31 struct mlx5_rsc_dump { 32 32 u32 pdn; 33 33 u32 mkey; 34 + u32 number_of_menu_items; 34 35 u16 fw_segment_type[MLX5_SGMT_TYPE_NUM]; 35 36 }; 36 37 ··· 51 50 return -EINVAL; 52 51 } 53 52 54 - static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page) 53 + #define MLX5_RSC_DUMP_MENU_HEADER_SIZE (MLX5_ST_SZ_BYTES(resource_dump_info_segment) + \ 54 + MLX5_ST_SZ_BYTES(resource_dump_command_segment) + \ 55 + MLX5_ST_SZ_BYTES(resource_dump_menu_segment)) 56 + 57 + static int mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page, 58 + int read_size, int start_idx) 55 59 { 56 60 void *data = page_address(page); 57 61 enum mlx5_sgmt_type sgmt_idx; 58 62 int num_of_items; 59 63 char *sgmt_name; 60 64 void *member; 65 + int size = 0; 61 66 void *menu; 62 67 int i; 63 68 64 - menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu); 65 - num_of_items = MLX5_GET(resource_dump_menu_segment, menu, num_of_records); 69 + if (!start_idx) { 70 + menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu); 71 + rsc_dump->number_of_menu_items = MLX5_GET(resource_dump_menu_segment, menu, 72 + num_of_records); 73 + size = MLX5_RSC_DUMP_MENU_HEADER_SIZE; 74 + data += size; 75 + } 76 + num_of_items = rsc_dump->number_of_menu_items; 66 77 67 - for (i = 0; i < num_of_items; i++) { 68 - member = MLX5_ADDR_OF(resource_dump_menu_segment, menu, record[i]); 78 + for (i = 0; start_idx + i < num_of_items; i++) { 79 + size += MLX5_ST_SZ_BYTES(resource_dump_menu_record); 80 + if (size >= read_size) 81 + return start_idx + i; 82 + 83 + member = data + MLX5_ST_SZ_BYTES(resource_dump_menu_record) * i; 69 84 sgmt_name = MLX5_ADDR_OF(resource_dump_menu_record, member, segment_name); 70 85 sgmt_idx = mlx5_rsc_dump_sgmt_get_by_name(sgmt_name); 71 86 if (sgmt_idx == -EINVAL) ··· 89 72 rsc_dump->fw_segment_type[sgmt_idx] = MLX5_GET(resource_dump_menu_record, 90 73 member, segment_type); 91 74 } 75 + return 0; 92 76 } 93 77 94 78 static int mlx5_rsc_dump_trigger(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd, ··· 186 168 struct mlx5_rsc_dump_cmd *cmd = NULL; 187 169 struct mlx5_rsc_key key = {}; 188 170 struct page *page; 171 + int start_idx = 0; 189 172 int size; 190 173 int err; 191 174 ··· 208 189 if (err < 0) 209 190 goto destroy_cmd; 210 191 211 - mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page); 192 + start_idx = mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page, size, start_idx); 212 193 213 194 } while (err > 0); 214 195
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
··· 309 309 if (err) 310 310 return err; 311 311 312 - err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, port_buff_cell_sz, 313 - xoff, &port_buffer, &update_buffer); 312 + err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, xoff, 313 + port_buff_cell_sz, &port_buffer, &update_buffer); 314 314 if (err) 315 315 return err; 316 316 }
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
··· 145 145 146 146 flow_action_for_each(i, act, flow_action) { 147 147 tc_act = mlx5e_tc_act_get(act->id, ns_type); 148 - if (!tc_act || !tc_act->post_parse || 149 - !tc_act->can_offload(parse_state, act, i, attr)) 148 + if (!tc_act || !tc_act->post_parse) 150 149 continue; 151 150 152 151 err = tc_act->post_parse(parse_state, priv, attr);
+32 -2
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
··· 45 45 if (mlx5e_is_eswitch_flow(parse_state->flow)) 46 46 attr->esw_attr->split_count = attr->esw_attr->out_count; 47 47 48 - if (!clear_action) { 48 + if (clear_action) { 49 + parse_state->ct_clear = true; 50 + } else { 49 51 attr->flags |= MLX5_ATTR_FLAG_CT; 50 52 flow_flag_set(parse_state->flow, CT); 51 53 parse_state->ct = true; 52 54 } 53 - parse_state->ct_clear = clear_action; 55 + 56 + return 0; 57 + } 58 + 59 + static int 60 + tc_act_post_parse_ct(struct mlx5e_tc_act_parse_state *parse_state, 61 + struct mlx5e_priv *priv, 62 + struct mlx5_flow_attr *attr) 63 + { 64 + struct mlx5e_tc_mod_hdr_acts *mod_acts = &attr->parse_attr->mod_hdr_acts; 65 + int err; 66 + 67 + /* If ct action exist, we can ignore previous ct_clear actions */ 68 + if (parse_state->ct) 69 + return 0; 70 + 71 + if (parse_state->ct_clear) { 72 + err = mlx5_tc_ct_set_ct_clear_regs(parse_state->ct_priv, mod_acts); 73 + if (err) { 74 + NL_SET_ERR_MSG_MOD(parse_state->extack, 75 + "Failed to set registers for ct clear"); 76 + return err; 77 + } 78 + attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 79 + 80 + /* Prevent handling of additional, redundant clear actions */ 81 + parse_state->ct_clear = false; 82 + } 54 83 55 84 return 0; 56 85 } ··· 99 70 .can_offload = tc_act_can_offload_ct, 100 71 .parse_action = tc_act_parse_ct, 101 72 .is_multi_table_act = tc_act_is_multi_table_act_ct, 73 + .post_parse = tc_act_post_parse_ct, 102 74 }; 103 75
+10 -14
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
··· 582 582 return 0; 583 583 } 584 584 585 + int mlx5_tc_ct_set_ct_clear_regs(struct mlx5_tc_ct_priv *priv, 586 + struct mlx5e_tc_mod_hdr_acts *mod_acts) 587 + { 588 + return mlx5_tc_ct_entry_set_registers(priv, mod_acts, 0, 0, 0, 0); 589 + } 590 + 585 591 static int 586 592 mlx5_tc_ct_parse_mangle_to_mod_act(struct flow_action_entry *act, 587 593 char *modact) ··· 1416 1410 const struct flow_action_entry *act, 1417 1411 struct netlink_ext_ack *extack) 1418 1412 { 1419 - bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR; 1420 - int err; 1421 - 1422 1413 if (!priv) { 1423 1414 NL_SET_ERR_MSG_MOD(extack, 1424 1415 "offload of ct action isn't available"); ··· 1426 1423 attr->ct_attr.ct_action = act->ct.action; 1427 1424 attr->ct_attr.nf_ft = act->ct.flow_table; 1428 1425 1429 - if (!clear_action) 1430 - goto out; 1431 - 1432 - err = mlx5_tc_ct_entry_set_registers(priv, mod_acts, 0, 0, 0, 0); 1433 - if (err) { 1434 - NL_SET_ERR_MSG_MOD(extack, "Failed to set registers for ct clear"); 1435 - return err; 1436 - } 1437 - attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 1438 - 1439 - out: 1440 1426 return 0; 1441 1427 } 1442 1428 ··· 1741 1749 static void 1742 1750 mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) 1743 1751 { 1752 + struct mlx5e_priv *priv; 1753 + 1744 1754 if (!refcount_dec_and_test(&ft->refcount)) 1745 1755 return; 1746 1756 ··· 1752 1758 rhashtable_free_and_destroy(&ft->ct_entries_ht, 1753 1759 mlx5_tc_ct_flush_ft_entry, 1754 1760 ct_priv); 1761 + priv = netdev_priv(ct_priv->netdev); 1762 + flush_workqueue(priv->wq); 1755 1763 mlx5_tc_ct_free_pre_ct_tables(ft); 1756 1764 mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id); 1757 1765 kfree(ft);
+11
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
··· 129 129 mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv, 130 130 struct sk_buff *skb, u8 zone_restore_id); 131 131 132 + int 133 + mlx5_tc_ct_set_ct_clear_regs(struct mlx5_tc_ct_priv *priv, 134 + struct mlx5e_tc_mod_hdr_acts *mod_acts); 135 + 132 136 #else /* CONFIG_MLX5_TC_CT */ 133 137 134 138 static inline struct mlx5_tc_ct_priv * ··· 172 168 mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec) 173 169 { 174 170 return 0; 171 + } 172 + 173 + static inline int 174 + mlx5_tc_ct_set_ct_clear_regs(struct mlx5_tc_ct_priv *priv, 175 + struct mlx5e_tc_mod_hdr_acts *mod_acts) 176 + { 177 + return -EOPNOTSUPP; 175 178 } 176 179 177 180 static inline int
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
··· 713 713 struct net_device *filter_dev) 714 714 { 715 715 struct mlx5_esw_flow_attr *esw_attr = flow_attr->esw_attr; 716 + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 716 717 struct mlx5e_tc_int_port *int_port; 717 718 TC_TUN_ROUTE_ATTR_INIT(attr); 718 719 u16 vport_num; ··· 748 747 esw_attr->rx_tun_attr->vni = MLX5_GET(fte_match_param, spec->match_value, 749 748 misc_parameters.vxlan_vni); 750 749 esw_attr->rx_tun_attr->decap_vport = vport_num; 751 - } else if (netif_is_ovs_master(attr.route_dev)) { 750 + } else if (netif_is_ovs_master(attr.route_dev) && mlx5e_tc_int_port_supported(esw)) { 752 751 int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv), 753 752 attr.route_dev->ifindex, 754 753 MLX5E_TC_INT_PORT_INGRESS);
+10
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
··· 1191 1191 return err; 1192 1192 WRITE_ONCE(priv->dcbx_dp.trust_state, trust_state); 1193 1193 1194 + if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_PCP && priv->dcbx.dscp_app_cnt) { 1195 + /* 1196 + * Align the driver state with the register state. 1197 + * Temporary state change is required to enable the app list reset. 1198 + */ 1199 + priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_DSCP; 1200 + mlx5e_dcbnl_delete_app(priv); 1201 + priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP; 1202 + } 1203 + 1194 1204 mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params, 1195 1205 priv->dcbx_dp.trust_state); 1196 1206
+11
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 2459 2459 match.key->vlan_priority); 2460 2460 2461 2461 *match_level = MLX5_MATCH_L2; 2462 + 2463 + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN) && 2464 + match.mask->vlan_eth_type && 2465 + MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, 2466 + ft_field_support.outer_second_vid, 2467 + fs_type)) { 2468 + MLX5_SET(fte_match_set_misc, misc_c, 2469 + outer_second_cvlan_tag, 1); 2470 + spec->match_criteria_enable |= 2471 + MLX5_MATCH_MISC_PARAMETERS; 2472 + } 2462 2473 } 2463 2474 } else if (*match_level != MLX5_MATCH_NONE) { 2464 2475 /* cvlan_tag enabled in match criteria and
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 139 139 if (mlx5_esw_indir_table_decap_vport(attr)) 140 140 vport = mlx5_esw_indir_table_decap_vport(attr); 141 141 142 - if (esw_attr->int_port) 142 + if (attr && !attr->chain && esw_attr->int_port) 143 143 metadata = 144 144 mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port); 145 145 else
+36 -26
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
··· 155 155 } 156 156 } 157 157 158 - static void mlx5_sync_reset_reload_work(struct work_struct *work) 159 - { 160 - struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset, 161 - reset_reload_work); 162 - struct mlx5_core_dev *dev = fw_reset->dev; 163 - int err; 164 - 165 - mlx5_enter_error_state(dev, true); 166 - mlx5_unload_one(dev); 167 - err = mlx5_health_wait_pci_up(dev); 168 - if (err) 169 - mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n"); 170 - fw_reset->ret = err; 171 - mlx5_fw_reset_complete_reload(dev); 172 - } 173 - 174 158 static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev) 175 159 { 176 160 struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; ··· 162 178 del_timer_sync(&fw_reset->timer); 163 179 } 164 180 165 - static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health) 181 + static int mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health) 166 182 { 167 183 struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; 168 184 185 + if (!test_and_clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) { 186 + mlx5_core_warn(dev, "Reset request was already cleared\n"); 187 + return -EALREADY; 188 + } 189 + 169 190 mlx5_stop_sync_reset_poll(dev); 170 - clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags); 171 191 if (poll_health) 172 192 mlx5_start_health_poll(dev); 193 + return 0; 194 + } 195 + 196 + static void mlx5_sync_reset_reload_work(struct work_struct *work) 197 + { 198 + struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset, 199 + reset_reload_work); 200 + struct mlx5_core_dev *dev = fw_reset->dev; 201 + int err; 202 + 203 + mlx5_sync_reset_clear_reset_requested(dev, false); 204 + mlx5_enter_error_state(dev, true); 205 + mlx5_unload_one(dev); 206 + err = mlx5_health_wait_pci_up(dev); 207 + if (err) 208 + mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n"); 209 + fw_reset->ret = err; 210 + mlx5_fw_reset_complete_reload(dev); 173 211 } 174 212 175 213 #define MLX5_RESET_POLL_INTERVAL (HZ / 10) ··· 208 202 209 203 if (fatal_error) { 210 204 mlx5_core_warn(dev, "Got Device Reset\n"); 211 - mlx5_sync_reset_clear_reset_requested(dev, false); 212 205 queue_work(fw_reset->wq, &fw_reset->reset_reload_work); 213 206 return; 214 207 } ··· 234 229 return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, 0, 2, false); 235 230 } 236 231 237 - static void mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev) 232 + static int mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev) 238 233 { 239 234 struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; 240 235 236 + if (test_and_set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) { 237 + mlx5_core_warn(dev, "Reset request was already set\n"); 238 + return -EALREADY; 239 + } 241 240 mlx5_stop_health_poll(dev, true); 242 - set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags); 243 241 mlx5_start_sync_reset_poll(dev); 242 + return 0; 244 243 } 245 244 246 245 static void mlx5_fw_live_patch_event(struct work_struct *work) ··· 273 264 err ? "Failed" : "Sent"); 274 265 return; 275 266 } 276 - mlx5_sync_reset_set_reset_requested(dev); 267 + if (mlx5_sync_reset_set_reset_requested(dev)) 268 + return; 269 + 277 270 err = mlx5_fw_reset_set_reset_sync_ack(dev); 278 271 if (err) 279 272 mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack Failed. Error code: %d\n", err); ··· 373 362 struct mlx5_core_dev *dev = fw_reset->dev; 374 363 int err; 375 364 376 - mlx5_sync_reset_clear_reset_requested(dev, false); 365 + if (mlx5_sync_reset_clear_reset_requested(dev, false)) 366 + return; 377 367 378 368 mlx5_core_warn(dev, "Sync Reset now. Device is going to reset.\n"); 379 369 ··· 403 391 reset_abort_work); 404 392 struct mlx5_core_dev *dev = fw_reset->dev; 405 393 406 - if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) 394 + if (mlx5_sync_reset_clear_reset_requested(dev, true)) 407 395 return; 408 - 409 - mlx5_sync_reset_clear_reset_requested(dev, true); 410 396 mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n"); 411 397 } 412 398
+24 -14
drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
··· 100 100 flush_workqueue(mp->wq); 101 101 } 102 102 103 + static void mlx5_lag_fib_set(struct lag_mp *mp, struct fib_info *fi, u32 dst, int dst_len) 104 + { 105 + mp->fib.mfi = fi; 106 + mp->fib.priority = fi->fib_priority; 107 + mp->fib.dst = dst; 108 + mp->fib.dst_len = dst_len; 109 + } 110 + 103 111 struct mlx5_fib_event_work { 104 112 struct work_struct work; 105 113 struct mlx5_lag *ldev; ··· 118 110 }; 119 111 }; 120 112 121 - static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, 122 - unsigned long event, 123 - struct fib_info *fi) 113 + static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, unsigned long event, 114 + struct fib_entry_notifier_info *fen_info) 124 115 { 116 + struct fib_info *fi = fen_info->fi; 125 117 struct lag_mp *mp = &ldev->lag_mp; 126 118 struct fib_nh *fib_nh0, *fib_nh1; 127 119 unsigned int nhs; ··· 129 121 /* Handle delete event */ 130 122 if (event == FIB_EVENT_ENTRY_DEL) { 131 123 /* stop track */ 132 - if (mp->mfi == fi) 133 - mp->mfi = NULL; 124 + if (mp->fib.mfi == fi) 125 + mp->fib.mfi = NULL; 134 126 return; 135 127 } 136 128 137 129 /* Handle multipath entry with lower priority value */ 138 - if (mp->mfi && mp->mfi != fi && fi->fib_priority >= mp->mfi->fib_priority) 130 + if (mp->fib.mfi && mp->fib.mfi != fi && 131 + (mp->fib.dst != fen_info->dst || mp->fib.dst_len != fen_info->dst_len) && 132 + fi->fib_priority >= mp->fib.priority) 139 133 return; 140 134 141 135 /* Handle add/replace event */ ··· 153 143 154 144 i++; 155 145 mlx5_lag_set_port_affinity(ldev, i); 146 + mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len); 156 147 } 157 148 158 - mp->mfi = fi; 159 149 return; 160 150 } 161 151 ··· 175 165 } 176 166 177 167 /* First time we see multipath route */ 178 - if (!mp->mfi && !__mlx5_lag_is_active(ldev)) { 168 + if (!mp->fib.mfi && !__mlx5_lag_is_active(ldev)) { 179 169 struct lag_tracker tracker; 180 170 181 171 tracker = ldev->tracker; ··· 183 173 } 184 174 185 175 mlx5_lag_set_port_affinity(ldev, MLX5_LAG_NORMAL_AFFINITY); 186 - mp->mfi = fi; 176 + mlx5_lag_fib_set(mp, fi, fen_info->dst, fen_info->dst_len); 187 177 } 188 178 189 179 static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev, ··· 194 184 struct lag_mp *mp = &ldev->lag_mp; 195 185 196 186 /* Check the nh event is related to the route */ 197 - if (!mp->mfi || mp->mfi != fi) 187 + if (!mp->fib.mfi || mp->fib.mfi != fi) 198 188 return; 199 189 200 190 /* nh added/removed */ ··· 224 214 case FIB_EVENT_ENTRY_REPLACE: 225 215 case FIB_EVENT_ENTRY_DEL: 226 216 mlx5_lag_fib_route_event(ldev, fib_work->event, 227 - fib_work->fen_info.fi); 217 + &fib_work->fen_info); 228 218 fib_info_put(fib_work->fen_info.fi); 229 219 break; 230 220 case FIB_EVENT_NH_ADD: ··· 323 313 /* Clear mfi, as it might become stale when a route delete event 324 314 * has been missed, see mlx5_lag_fib_route_event(). 325 315 */ 326 - ldev->lag_mp.mfi = NULL; 316 + ldev->lag_mp.fib.mfi = NULL; 327 317 } 328 318 329 319 int mlx5_lag_mp_init(struct mlx5_lag *ldev) ··· 334 324 /* always clear mfi, as it might become stale when a route delete event 335 325 * has been missed 336 326 */ 337 - mp->mfi = NULL; 327 + mp->fib.mfi = NULL; 338 328 339 329 if (mp->fib_nb.notifier_call) 340 330 return 0; ··· 364 354 unregister_fib_notifier(&init_net, &mp->fib_nb); 365 355 destroy_workqueue(mp->wq); 366 356 mp->fib_nb.notifier_call = NULL; 367 - mp->mfi = NULL; 357 + mp->fib.mfi = NULL; 368 358 }
+6 -1
drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h
··· 15 15 16 16 struct lag_mp { 17 17 struct notifier_block fib_nb; 18 - struct fib_info *mfi; /* used in tracking fib events */ 18 + struct { 19 + const void *mfi; /* used in tracking fib events */ 20 + u32 priority; 21 + u32 dst; 22 + int dst_len; 23 + } fib; 19 24 struct workqueue_struct *wq; 20 25 }; 21 26
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
··· 505 505 struct ttc_params ttc_params = {}; 506 506 507 507 mlx5_lag_set_inner_ttc_params(ldev, &ttc_params); 508 - port_sel->inner.ttc = mlx5_create_ttc_table(dev, &ttc_params); 508 + port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params); 509 509 if (IS_ERR(port_sel->inner.ttc)) 510 510 return PTR_ERR(port_sel->inner.ttc); 511 511
+2
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
··· 408 408 for (tt = 0; tt < MLX5_NUM_TT; tt++) { 409 409 struct mlx5_ttc_rule *rule = &rules[tt]; 410 410 411 + if (test_bit(tt, params->ignore_dests)) 412 + continue; 411 413 rule->rule = mlx5_generate_inner_ttc_rule(dev, ft, 412 414 &params->dests[tt], 413 415 ttc_rules[tt].etype,
+1 -1
drivers/net/ethernet/smsc/smsc911x.c
··· 2431 2431 if (irq == -EPROBE_DEFER) { 2432 2432 retval = -EPROBE_DEFER; 2433 2433 goto out_0; 2434 - } else if (irq <= 0) { 2434 + } else if (irq < 0) { 2435 2435 pr_warn("Could not allocate irq resource\n"); 2436 2436 retval = -ENODEV; 2437 2437 goto out_0;
+1
drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
··· 454 454 plat->has_gmac4 = 1; 455 455 plat->force_sf_dma_mode = 0; 456 456 plat->tso_en = 1; 457 + plat->sph_disable = 1; 457 458 458 459 /* Multiplying factor to the clk_eee_i clock time 459 460 * period to make it closer to 100 ns. This value
+1
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
··· 907 907 908 908 ret = mdio_mux_init(priv->device, mdio_mux, mdio_mux_syscon_switch_fn, 909 909 &gmac->mux_handle, priv, priv->mii); 910 + of_node_put(mdio_mux); 910 911 return ret; 911 912 } 912 913
+1 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 7015 7015 dev_info(priv->device, "TSO feature enabled\n"); 7016 7016 } 7017 7017 7018 - if (priv->dma_cap.sphen) { 7018 + if (priv->dma_cap.sphen && !priv->plat->sph_disable) { 7019 7019 ndev->hw_features |= NETIF_F_GRO; 7020 7020 priv->sph_cap = true; 7021 7021 priv->sph = priv->sph_cap;
+4 -1
drivers/net/ethernet/ti/cpsw_new.c
··· 1240 1240 data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM, 1241 1241 sizeof(struct cpsw_slave_data), 1242 1242 GFP_KERNEL); 1243 - if (!data->slave_data) 1243 + if (!data->slave_data) { 1244 + of_node_put(tmp_node); 1244 1245 return -ENOMEM; 1246 + } 1245 1247 1246 1248 /* Populate all the child nodes here... 1247 1249 */ ··· 1337 1335 1338 1336 err_node_put: 1339 1337 of_node_put(port_np); 1338 + of_node_put(tmp_node); 1340 1339 return ret; 1341 1340 } 1342 1341
+12 -18
drivers/net/ethernet/xilinx/xilinx_emaclite.c
··· 803 803 static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) 804 804 { 805 805 struct mii_bus *bus; 806 - int rc; 807 806 struct resource res; 808 807 struct device_node *np = of_get_parent(lp->phy_node); 809 808 struct device_node *npp; 809 + int rc, ret; 810 810 811 811 /* Don't register the MDIO bus if the phy_node or its parent node 812 812 * can't be found. ··· 816 816 return -ENODEV; 817 817 } 818 818 npp = of_get_parent(np); 819 - 820 - of_address_to_resource(npp, 0, &res); 819 + ret = of_address_to_resource(npp, 0, &res); 820 + of_node_put(npp); 821 + if (ret) { 822 + dev_err(dev, "%s resource error!\n", 823 + dev->of_node->full_name); 824 + of_node_put(np); 825 + return ret; 826 + } 821 827 if (lp->ndev->mem_start != res.start) { 822 828 struct phy_device *phydev; 823 829 ··· 833 827 "MDIO of the phy is not registered yet\n"); 834 828 else 835 829 put_device(&phydev->mdio.dev); 830 + of_node_put(np); 836 831 return 0; 837 832 } 838 833 ··· 846 839 bus = mdiobus_alloc(); 847 840 if (!bus) { 848 841 dev_err(dev, "Failed to allocate mdiobus\n"); 842 + of_node_put(np); 849 843 return -ENOMEM; 850 844 } 851 845 ··· 859 851 bus->parent = dev; 860 852 861 853 rc = of_mdiobus_register(bus, np); 854 + of_node_put(np); 862 855 if (rc) { 863 856 dev_err(dev, "Failed to register mdio bus.\n"); 864 857 goto err_register; ··· 916 907 xemaclite_disable_interrupts(lp); 917 908 918 909 if (lp->phy_node) { 919 - u32 bmcr; 920 - 921 910 lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node, 922 911 xemaclite_adjust_link, 0, 923 912 PHY_INTERFACE_MODE_MII); ··· 926 919 927 920 /* EmacLite doesn't support giga-bit speeds */ 928 921 phy_set_max_speed(lp->phy_dev, SPEED_100); 929 - 930 - /* Don't advertise 1000BASE-T Full/Half duplex speeds */ 931 - phy_write(lp->phy_dev, MII_CTRL1000, 0); 932 - 933 - /* Advertise only 10 and 100mbps full/half duplex speeds */ 934 - phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL | 935 - ADVERTISE_CSMA); 936 - 937 - /* Restart auto negotiation */ 938 - bmcr = phy_read(lp->phy_dev, MII_BMCR); 939 - bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 940 - phy_write(lp->phy_dev, MII_BMCR, bmcr); 941 - 942 922 phy_start(lp->phy_dev); 943 923 } 944 924
+1 -1
drivers/net/mdio/mdio-mux-bcm6368.c
··· 115 115 md->mii_bus = devm_mdiobus_alloc(&pdev->dev); 116 116 if (!md->mii_bus) { 117 117 dev_err(&pdev->dev, "mdiomux bus alloc failed\n"); 118 - return ENOMEM; 118 + return -ENOMEM; 119 119 } 120 120 121 121 bus = md->mii_bus;
+11 -1
drivers/net/phy/sfp.c
··· 250 250 struct sfp_eeprom_id id; 251 251 unsigned int module_power_mW; 252 252 unsigned int module_t_start_up; 253 + bool tx_fault_ignore; 253 254 254 255 #if IS_ENABLED(CONFIG_HWMON) 255 256 struct sfp_diag diag; ··· 1957 1956 else 1958 1957 sfp->module_t_start_up = T_START_UP; 1959 1958 1959 + if (!memcmp(id.base.vendor_name, "HUAWEI ", 16) && 1960 + !memcmp(id.base.vendor_pn, "MA5671A ", 16)) 1961 + sfp->tx_fault_ignore = true; 1962 + else 1963 + sfp->tx_fault_ignore = false; 1964 + 1960 1965 return 0; 1961 1966 } 1962 1967 ··· 2416 2409 mutex_lock(&sfp->st_mutex); 2417 2410 state = sfp_get_state(sfp); 2418 2411 changed = state ^ sfp->state; 2419 - changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT; 2412 + if (sfp->tx_fault_ignore) 2413 + changed &= SFP_F_PRESENT | SFP_F_LOS; 2414 + else 2415 + changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT; 2420 2416 2421 2417 for (i = 0; i < GPIO_MAX; i++) 2422 2418 if (changed & BIT(i))
+1 -1
drivers/nfc/nfcmrvl/main.c
··· 183 183 { 184 184 struct nci_dev *ndev = priv->ndev; 185 185 186 + nci_unregister_device(ndev); 186 187 if (priv->ndev->nfc_dev->fw_download_in_progress) 187 188 nfcmrvl_fw_dnld_abort(priv); 188 189 ··· 192 191 if (gpio_is_valid(priv->config.reset_n_io)) 193 192 gpio_free(priv->config.reset_n_io); 194 193 195 - nci_unregister_device(ndev); 196 194 nci_free_device(ndev); 197 195 kfree(priv); 198 196 }
+12 -8
drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
··· 414 414 415 415 ret = clk_prepare_enable(priv->clk_ref); 416 416 if (ret) 417 - goto err_disable_clk_ref; 417 + return ret; 418 418 419 419 priv->reset = devm_reset_control_array_get_exclusive(dev); 420 - if (IS_ERR(priv->reset)) 421 - return PTR_ERR(priv->reset); 420 + if (IS_ERR(priv->reset)) { 421 + ret = PTR_ERR(priv->reset); 422 + goto err_disable_clk_ref; 423 + } 422 424 423 425 priv->phy = devm_phy_create(dev, np, &phy_g12a_usb3_pcie_ops); 424 426 if (IS_ERR(priv->phy)) { 425 427 ret = PTR_ERR(priv->phy); 426 - if (ret != -EPROBE_DEFER) 427 - dev_err(dev, "failed to create PHY\n"); 428 - 429 - return ret; 428 + dev_err_probe(dev, ret, "failed to create PHY\n"); 429 + goto err_disable_clk_ref; 430 430 } 431 431 432 432 phy_set_drvdata(priv->phy, priv); ··· 434 434 435 435 phy_provider = devm_of_phy_provider_register(dev, 436 436 phy_g12a_usb3_pcie_xlate); 437 + if (IS_ERR(phy_provider)) { 438 + ret = PTR_ERR(phy_provider); 439 + goto err_disable_clk_ref; 440 + } 437 441 438 - return PTR_ERR_OR_ZERO(phy_provider); 442 + return 0; 439 443 440 444 err_disable_clk_ref: 441 445 clk_disable_unprepare(priv->clk_ref);
+2 -1
drivers/phy/motorola/phy-mapphone-mdm6600.c
··· 629 629 cleanup: 630 630 if (error < 0) 631 631 phy_mdm6600_device_power_off(ddata); 632 - 632 + pm_runtime_disable(ddata->dev); 633 + pm_runtime_dont_use_autosuspend(ddata->dev); 633 634 return error; 634 635 } 635 636
+15 -6
drivers/phy/samsung/phy-exynos5250-sata.c
··· 187 187 return -EINVAL; 188 188 189 189 sata_phy->client = of_find_i2c_device_by_node(node); 190 + of_node_put(node); 190 191 if (!sata_phy->client) 191 192 return -EPROBE_DEFER; 192 193 ··· 196 195 sata_phy->phyclk = devm_clk_get(dev, "sata_phyctrl"); 197 196 if (IS_ERR(sata_phy->phyclk)) { 198 197 dev_err(dev, "failed to get clk for PHY\n"); 199 - return PTR_ERR(sata_phy->phyclk); 198 + ret = PTR_ERR(sata_phy->phyclk); 199 + goto put_dev; 200 200 } 201 201 202 202 ret = clk_prepare_enable(sata_phy->phyclk); 203 203 if (ret < 0) { 204 204 dev_err(dev, "failed to enable source clk\n"); 205 - return ret; 205 + goto put_dev; 206 206 } 207 207 208 208 sata_phy->phy = devm_phy_create(dev, NULL, &exynos_sata_phy_ops); 209 209 if (IS_ERR(sata_phy->phy)) { 210 - clk_disable_unprepare(sata_phy->phyclk); 211 210 dev_err(dev, "failed to create PHY\n"); 212 - return PTR_ERR(sata_phy->phy); 211 + ret = PTR_ERR(sata_phy->phy); 212 + goto clk_disable; 213 213 } 214 214 215 215 phy_set_drvdata(sata_phy->phy, sata_phy); ··· 218 216 phy_provider = devm_of_phy_provider_register(dev, 219 217 of_phy_simple_xlate); 220 218 if (IS_ERR(phy_provider)) { 221 - clk_disable_unprepare(sata_phy->phyclk); 222 - return PTR_ERR(phy_provider); 219 + ret = PTR_ERR(phy_provider); 220 + goto clk_disable; 223 221 } 224 222 225 223 return 0; 224 + 225 + clk_disable: 226 + clk_disable_unprepare(sata_phy->phyclk); 227 + put_dev: 228 + put_device(&sata_phy->client->dev); 229 + 230 + return ret; 226 231 } 227 232 228 233 static const struct of_device_id exynos_sata_phy_of_match[] = {
+1 -1
drivers/phy/ti/phy-am654-serdes.c
··· 838 838 839 839 clk_err: 840 840 of_clk_del_provider(node); 841 - 841 + pm_runtime_disable(dev); 842 842 return ret; 843 843 } 844 844
+1 -1
drivers/phy/ti/phy-omap-usb2.c
··· 215 215 return 0; 216 216 217 217 err1: 218 - clk_disable(phy->wkupclk); 218 + clk_disable_unprepare(phy->wkupclk); 219 219 220 220 err0: 221 221 return ret;
+1
drivers/phy/ti/phy-ti-pipe3.c
··· 696 696 } 697 697 698 698 control_pdev = of_find_device_by_node(control_node); 699 + of_node_put(control_node); 699 700 if (!control_pdev) { 700 701 dev_err(dev, "Failed to get control device\n"); 701 702 return -EINVAL;
+9 -3
drivers/phy/ti/phy-tusb1210.c
··· 155 155 } 156 156 157 157 #ifdef CONFIG_POWER_SUPPLY 158 - const char * const tusb1210_chg_det_states[] = { 158 + static const char * const tusb1210_chg_det_states[] = { 159 159 "CHG_DET_CONNECTING", 160 160 "CHG_DET_START_DET", 161 161 "CHG_DET_READ_DET", ··· 537 537 tusb1210_probe_charger_detect(tusb); 538 538 539 539 tusb->phy = ulpi_phy_create(ulpi, &phy_ops); 540 - if (IS_ERR(tusb->phy)) 541 - return PTR_ERR(tusb->phy); 540 + if (IS_ERR(tusb->phy)) { 541 + ret = PTR_ERR(tusb->phy); 542 + goto err_remove_charger; 543 + } 542 544 543 545 phy_set_drvdata(tusb->phy, tusb); 544 546 ulpi_set_drvdata(ulpi, tusb); 545 547 return 0; 548 + 549 + err_remove_charger: 550 + tusb1210_remove_charger_detect(tusb); 551 + return ret; 546 552 } 547 553 548 554 static void tusb1210_remove(struct ulpi *ulpi)
+1 -1
drivers/soc/imx/imx8m-blk-ctrl.c
··· 50 50 u32 mipi_phy_rst_mask; 51 51 }; 52 52 53 - #define DOMAIN_MAX_CLKS 3 53 + #define DOMAIN_MAX_CLKS 4 54 54 55 55 struct imx8m_blk_ctrl_domain { 56 56 struct generic_pm_domain genpd;
+5 -5
drivers/target/target_core_pscsi.c
··· 588 588 } 589 589 590 590 static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status, 591 - unsigned char *req_sense) 591 + unsigned char *req_sense, int valid_data) 592 592 { 593 593 struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev); 594 594 struct scsi_device *sd = pdv->pdv_sd; ··· 681 681 * back despite framework assumption that a 682 682 * check condition means there is no data 683 683 */ 684 - if (sd->type == TYPE_TAPE && 684 + if (sd->type == TYPE_TAPE && valid_data && 685 685 cmd->data_direction == DMA_FROM_DEVICE) { 686 686 /* 687 687 * is sense data valid, fixed format, ··· 1032 1032 struct se_cmd *cmd = req->end_io_data; 1033 1033 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req); 1034 1034 enum sam_status scsi_status = scmd->result & 0xff; 1035 + int valid_data = cmd->data_length - scmd->resid_len; 1035 1036 u8 *cdb = cmd->priv; 1036 1037 1037 1038 if (scsi_status != SAM_STAT_GOOD) { ··· 1040 1039 " 0x%02x Result: 0x%08x\n", cmd, cdb[0], scmd->result); 1041 1040 } 1042 1041 1043 - pscsi_complete_cmd(cmd, scsi_status, scmd->sense_buffer); 1042 + pscsi_complete_cmd(cmd, scsi_status, scmd->sense_buffer, valid_data); 1044 1043 1045 1044 switch (host_byte(scmd->result)) { 1046 1045 case DID_OK: 1047 - target_complete_cmd_with_length(cmd, scsi_status, 1048 - cmd->data_length - scmd->resid_len); 1046 + target_complete_cmd_with_length(cmd, scsi_status, valid_data); 1049 1047 break; 1050 1048 default: 1051 1049 pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"
+1
drivers/tee/optee/ffa_abi.c
··· 865 865 rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL); 866 866 optee_supp_uninit(&optee->supp); 867 867 mutex_destroy(&optee->call_queue.mutex); 868 + mutex_destroy(&optee->ffa.mutex); 868 869 err_unreg_supp_teedev: 869 870 tee_device_unregister(optee->supp_teedev); 870 871 err_unreg_teedev:
+281 -206
drivers/tty/n_gsm.c
··· 73 73 */ 74 74 #define MAX_MRU 1500 75 75 #define MAX_MTU 1500 76 + /* SOF, ADDR, CTRL, LEN1, LEN2, ..., FCS, EOF */ 77 + #define PROT_OVERHEAD 7 76 78 #define GSM_NET_TX_TIMEOUT (HZ*10) 77 79 78 80 /* ··· 221 219 int encoding; 222 220 u8 control; 223 221 u8 fcs; 224 - u8 received_fcs; 225 222 u8 *txframe; /* TX framing buffer */ 226 223 227 224 /* Method for the receiver side */ ··· 232 231 int initiator; /* Did we initiate connection */ 233 232 bool dead; /* Has the mux been shut down */ 234 233 struct gsm_dlci *dlci[NUM_DLCI]; 234 + int old_c_iflag; /* termios c_iflag value before attach */ 235 235 bool constipated; /* Asked by remote to shut up */ 236 236 237 237 spinlock_t tx_lock; ··· 273 271 274 272 static struct tty_driver *gsm_tty_driver; 275 273 276 - /* Save dlci open address */ 277 - static int addr_open[256] = { 0 }; 278 - /* Save dlci open count */ 279 - static int addr_cnt; 280 274 /* 281 275 * This section of the driver logic implements the GSM encodings 282 276 * both the basic and the 'advanced'. Reliable transport is not ··· 367 369 #define GOOD_FCS 0xCF 368 370 369 371 static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len); 372 + static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk); 370 373 371 374 /** 372 375 * gsm_fcs_add - update FCS ··· 831 832 break; 832 833 case 2: /* Unstructed with modem bits. 833 834 Always one byte as we never send inline break data */ 834 - *dp++ = gsm_encode_modem(dlci); 835 + *dp++ = (gsm_encode_modem(dlci) << 1) | EA; 835 836 break; 836 837 } 837 838 WARN_ON(kfifo_out_locked(&dlci->fifo, dp , len, &dlci->lock) != len); ··· 912 913 dev_kfree_skb_any(dlci->skb); 913 914 dlci->skb = NULL; 914 915 } 916 + return size; 917 + } 918 + 919 + /** 920 + * gsm_dlci_modem_output - try and push modem status out of a DLCI 921 + * @gsm: mux 922 + * @dlci: the DLCI to pull modem status from 923 + * @brk: break signal 924 + * 925 + * Push an empty frame in to the transmit queue to update the modem status 926 + * bits and to transmit an optional break. 927 + * 928 + * Caller must hold the tx_lock of the mux. 929 + */ 930 + 931 + static int gsm_dlci_modem_output(struct gsm_mux *gsm, struct gsm_dlci *dlci, 932 + u8 brk) 933 + { 934 + u8 *dp = NULL; 935 + struct gsm_msg *msg; 936 + int size = 0; 937 + 938 + /* for modem bits without break data */ 939 + switch (dlci->adaption) { 940 + case 1: /* Unstructured */ 941 + break; 942 + case 2: /* Unstructured with modem bits. */ 943 + size++; 944 + if (brk > 0) 945 + size++; 946 + break; 947 + default: 948 + pr_err("%s: unsupported adaption %d\n", __func__, 949 + dlci->adaption); 950 + return -EINVAL; 951 + } 952 + 953 + msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype); 954 + if (!msg) { 955 + pr_err("%s: gsm_data_alloc error", __func__); 956 + return -ENOMEM; 957 + } 958 + dp = msg->data; 959 + switch (dlci->adaption) { 960 + case 1: /* Unstructured */ 961 + break; 962 + case 2: /* Unstructured with modem bits. */ 963 + if (brk == 0) { 964 + *dp++ = (gsm_encode_modem(dlci) << 1) | EA; 965 + } else { 966 + *dp++ = gsm_encode_modem(dlci) << 1; 967 + *dp++ = (brk << 4) | 2 | EA; /* Length, Break, EA */ 968 + } 969 + break; 970 + default: 971 + /* Handled above */ 972 + break; 973 + } 974 + 975 + __gsm_data_queue(dlci, msg); 915 976 return size; 916 977 } 917 978 ··· 1152 1093 { 1153 1094 unsigned int addr = 0; 1154 1095 unsigned int modem = 0; 1155 - unsigned int brk = 0; 1156 1096 struct gsm_dlci *dlci; 1157 1097 int len = clen; 1158 1098 int slen; ··· 1181 1123 return; 1182 1124 } 1183 1125 len--; 1184 - if (len > 0) { 1185 - while (gsm_read_ea(&brk, *dp++) == 0) { 1186 - len--; 1187 - if (len == 0) 1188 - return; 1189 - } 1190 - modem <<= 7; 1191 - modem |= (brk & 0x7f); 1192 - } 1193 1126 tty = tty_port_tty_get(&dlci->port); 1194 - gsm_process_modem(tty, dlci, modem, slen); 1127 + gsm_process_modem(tty, dlci, modem, slen - len); 1195 1128 if (tty) { 1196 1129 tty_wakeup(tty); 1197 1130 tty_kref_put(tty); ··· 1242 1193 } 1243 1194 1244 1195 static void gsm_dlci_begin_close(struct gsm_dlci *dlci); 1245 - static void gsm_dlci_close(struct gsm_dlci *dlci); 1246 1196 1247 1197 /** 1248 1198 * gsm_control_message - DLCI 0 control processing ··· 1260 1212 { 1261 1213 u8 buf[1]; 1262 1214 unsigned long flags; 1263 - struct gsm_dlci *dlci; 1264 - int i; 1265 - int address; 1266 1215 1267 1216 switch (command) { 1268 1217 case CMD_CLD: { 1269 - if (addr_cnt > 0) { 1270 - for (i = 0; i < addr_cnt; i++) { 1271 - address = addr_open[i]; 1272 - dlci = gsm->dlci[address]; 1273 - gsm_dlci_close(dlci); 1274 - addr_open[i] = 0; 1275 - } 1276 - } 1218 + struct gsm_dlci *dlci = gsm->dlci[0]; 1277 1219 /* Modem wishes to close down */ 1278 - dlci = gsm->dlci[0]; 1279 1220 if (dlci) { 1280 1221 dlci->dead = true; 1281 1222 gsm->dead = true; 1282 - gsm_dlci_close(dlci); 1283 - addr_cnt = 0; 1284 - gsm_response(gsm, 0, UA|PF); 1223 + gsm_dlci_begin_close(dlci); 1285 1224 } 1286 1225 } 1287 1226 break; ··· 1361 1326 1362 1327 static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl) 1363 1328 { 1364 - struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 1, gsm->ftype); 1329 + struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 2, gsm->ftype); 1365 1330 if (msg == NULL) 1366 1331 return; 1367 - msg->data[0] = (ctrl->cmd << 1) | 2 | EA; /* command */ 1368 - memcpy(msg->data + 1, ctrl->data, ctrl->len); 1332 + msg->data[0] = (ctrl->cmd << 1) | CR | EA; /* command */ 1333 + msg->data[1] = (ctrl->len << 1) | EA; 1334 + memcpy(msg->data + 2, ctrl->data, ctrl->len); 1369 1335 gsm_data_queue(gsm->dlci[0], msg); 1370 1336 } 1371 1337 ··· 1389 1353 spin_lock_irqsave(&gsm->control_lock, flags); 1390 1354 ctrl = gsm->pending_cmd; 1391 1355 if (ctrl) { 1392 - gsm->cretries--; 1393 1356 if (gsm->cretries == 0) { 1394 1357 gsm->pending_cmd = NULL; 1395 1358 ctrl->error = -ETIMEDOUT; ··· 1397 1362 wake_up(&gsm->event); 1398 1363 return; 1399 1364 } 1365 + gsm->cretries--; 1400 1366 gsm_control_transmit(gsm, ctrl); 1401 1367 mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100); 1402 1368 } ··· 1438 1402 1439 1403 /* If DLCI0 is in ADM mode skip retries, it won't respond */ 1440 1404 if (gsm->dlci[0]->mode == DLCI_MODE_ADM) 1441 - gsm->cretries = 1; 1405 + gsm->cretries = 0; 1442 1406 else 1443 1407 gsm->cretries = gsm->n2; 1444 1408 ··· 1486 1450 1487 1451 static void gsm_dlci_close(struct gsm_dlci *dlci) 1488 1452 { 1453 + unsigned long flags; 1454 + 1489 1455 del_timer(&dlci->t1); 1490 1456 if (debug & 8) 1491 1457 pr_debug("DLCI %d goes closed.\n", dlci->addr); 1492 1458 dlci->state = DLCI_CLOSED; 1493 1459 if (dlci->addr != 0) { 1494 1460 tty_port_tty_hangup(&dlci->port, false); 1461 + spin_lock_irqsave(&dlci->lock, flags); 1495 1462 kfifo_reset(&dlci->fifo); 1463 + spin_unlock_irqrestore(&dlci->lock, flags); 1496 1464 /* Ensure that gsmtty_open() can return. */ 1497 1465 tty_port_set_initialized(&dlci->port, 0); 1498 1466 wake_up_interruptible(&dlci->port.open_wait); 1499 1467 } else 1500 1468 dlci->gsm->dead = true; 1501 - /* Unregister gsmtty driver,report gsmtty dev remove uevent for user */ 1502 - tty_unregister_device(gsm_tty_driver, dlci->addr); 1503 1469 wake_up(&dlci->gsm->event); 1504 1470 /* A DLCI 0 close is a MUX termination so we need to kick that 1505 1471 back to userspace somehow */ ··· 1523 1485 dlci->state = DLCI_OPEN; 1524 1486 if (debug & 8) 1525 1487 pr_debug("DLCI %d goes open.\n", dlci->addr); 1526 - /* Register gsmtty driver,report gsmtty dev add uevent for user */ 1527 - tty_register_device(gsm_tty_driver, dlci->addr, NULL); 1488 + /* Send current modem state */ 1489 + if (dlci->addr) 1490 + gsm_modem_update(dlci, 0); 1528 1491 wake_up(&dlci->gsm->event); 1529 1492 } 1530 1493 ··· 1662 1623 tty = tty_port_tty_get(port); 1663 1624 if (tty) { 1664 1625 gsm_process_modem(tty, dlci, modem, slen); 1626 + tty_wakeup(tty); 1665 1627 tty_kref_put(tty); 1666 1628 } 1667 1629 fallthrough; ··· 1833 1793 struct gsm_dlci *dlci; 1834 1794 u8 cr; 1835 1795 int address; 1836 - int i, j, k, address_tmp; 1837 - /* We have to sneak a look at the packet body to do the FCS. 1838 - A somewhat layering violation in the spec */ 1839 1796 1840 - if ((gsm->control & ~PF) == UI) 1841 - gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->len); 1842 - if (gsm->encoding == 0) { 1843 - /* WARNING: gsm->received_fcs is used for 1844 - gsm->encoding = 0 only. 1845 - In this case it contain the last piece of data 1846 - required to generate final CRC */ 1847 - gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->received_fcs); 1848 - } 1849 1797 if (gsm->fcs != GOOD_FCS) { 1850 1798 gsm->bad_fcs++; 1851 1799 if (debug & 4) ··· 1864 1836 else { 1865 1837 gsm_response(gsm, address, UA|PF); 1866 1838 gsm_dlci_open(dlci); 1867 - /* Save dlci open address */ 1868 - if (address) { 1869 - addr_open[addr_cnt] = address; 1870 - addr_cnt++; 1871 - } 1872 1839 } 1873 1840 break; 1874 1841 case DISC|PF: ··· 1874 1851 return; 1875 1852 } 1876 1853 /* Real close complete */ 1877 - if (!address) { 1878 - if (addr_cnt > 0) { 1879 - for (i = 0; i < addr_cnt; i++) { 1880 - address = addr_open[i]; 1881 - dlci = gsm->dlci[address]; 1882 - gsm_dlci_close(dlci); 1883 - addr_open[i] = 0; 1884 - } 1885 - } 1886 - dlci = gsm->dlci[0]; 1887 - gsm_dlci_close(dlci); 1888 - addr_cnt = 0; 1889 - gsm_response(gsm, 0, UA|PF); 1890 - } else { 1891 - gsm_response(gsm, address, UA|PF); 1892 - gsm_dlci_close(dlci); 1893 - /* clear dlci address */ 1894 - for (j = 0; j < addr_cnt; j++) { 1895 - address_tmp = addr_open[j]; 1896 - if (address_tmp == address) { 1897 - for (k = j; k < addr_cnt; k++) 1898 - addr_open[k] = addr_open[k+1]; 1899 - addr_cnt--; 1900 - break; 1901 - } 1902 - } 1903 - } 1854 + gsm_response(gsm, address, UA|PF); 1855 + gsm_dlci_close(dlci); 1904 1856 break; 1905 - case UA: 1906 1857 case UA|PF: 1907 1858 if (cr == 0 || dlci == NULL) 1908 1859 break; ··· 1990 1993 break; 1991 1994 case GSM_DATA: /* Data */ 1992 1995 gsm->buf[gsm->count++] = c; 1993 - if (gsm->count == gsm->len) 1996 + if (gsm->count == gsm->len) { 1997 + /* Calculate final FCS for UI frames over all data */ 1998 + if ((gsm->control & ~PF) != UIH) { 1999 + gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, 2000 + gsm->count); 2001 + } 1994 2002 gsm->state = GSM_FCS; 2003 + } 1995 2004 break; 1996 2005 case GSM_FCS: /* FCS follows the packet */ 1997 - gsm->received_fcs = c; 1998 - gsm_queue(gsm); 2006 + gsm->fcs = gsm_fcs_add(gsm->fcs, c); 1999 2007 gsm->state = GSM_SSOF; 2000 2008 break; 2001 2009 case GSM_SSOF: 2002 - if (c == GSM0_SOF) { 2003 - gsm->state = GSM_SEARCH; 2004 - break; 2005 - } 2010 + gsm->state = GSM_SEARCH; 2011 + if (c == GSM0_SOF) 2012 + gsm_queue(gsm); 2013 + else 2014 + gsm->bad_size++; 2006 2015 break; 2007 2016 default: 2008 2017 pr_debug("%s: unhandled state: %d\n", __func__, gsm->state); ··· 2026 2023 2027 2024 static void gsm1_receive(struct gsm_mux *gsm, unsigned char c) 2028 2025 { 2026 + /* handle XON/XOFF */ 2027 + if ((c & ISO_IEC_646_MASK) == XON) { 2028 + gsm->constipated = true; 2029 + return; 2030 + } else if ((c & ISO_IEC_646_MASK) == XOFF) { 2031 + gsm->constipated = false; 2032 + /* Kick the link in case it is idling */ 2033 + gsm_data_kick(gsm, NULL); 2034 + return; 2035 + } 2029 2036 if (c == GSM1_SOF) { 2030 - /* EOF is only valid in frame if we have got to the data state 2031 - and received at least one byte (the FCS) */ 2032 - if (gsm->state == GSM_DATA && gsm->count) { 2033 - /* Extract the FCS */ 2037 + /* EOF is only valid in frame if we have got to the data state */ 2038 + if (gsm->state == GSM_DATA) { 2039 + if (gsm->count < 1) { 2040 + /* Missing FSC */ 2041 + gsm->malformed++; 2042 + gsm->state = GSM_START; 2043 + return; 2044 + } 2045 + /* Remove the FCS from data */ 2034 2046 gsm->count--; 2047 + if ((gsm->control & ~PF) != UIH) { 2048 + /* Calculate final FCS for UI frames over all 2049 + * data but FCS 2050 + */ 2051 + gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, 2052 + gsm->count); 2053 + } 2054 + /* Add the FCS itself to test against GOOD_FCS */ 2035 2055 gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->buf[gsm->count]); 2036 2056 gsm->len = gsm->count; 2037 2057 gsm_queue(gsm); ··· 2063 2037 } 2064 2038 /* Any partial frame was a runt so go back to start */ 2065 2039 if (gsm->state != GSM_START) { 2066 - gsm->malformed++; 2040 + if (gsm->state != GSM_SEARCH) 2041 + gsm->malformed++; 2067 2042 gsm->state = GSM_START; 2068 2043 } 2069 2044 /* A SOF in GSM_START means we are still reading idling or ··· 2133 2106 gsm->io_error++; 2134 2107 } 2135 2108 2136 - static int gsm_disconnect(struct gsm_mux *gsm) 2137 - { 2138 - struct gsm_dlci *dlci = gsm->dlci[0]; 2139 - struct gsm_control *gc; 2140 - 2141 - if (!dlci) 2142 - return 0; 2143 - 2144 - /* In theory disconnecting DLCI 0 is sufficient but for some 2145 - modems this is apparently not the case. */ 2146 - gc = gsm_control_send(gsm, CMD_CLD, NULL, 0); 2147 - if (gc) 2148 - gsm_control_wait(gsm, gc); 2149 - 2150 - del_timer_sync(&gsm->t2_timer); 2151 - /* Now we are sure T2 has stopped */ 2152 - 2153 - gsm_dlci_begin_close(dlci); 2154 - wait_event_interruptible(gsm->event, 2155 - dlci->state == DLCI_CLOSED); 2156 - 2157 - if (signal_pending(current)) 2158 - return -EINTR; 2159 - 2160 - return 0; 2161 - } 2162 - 2163 2109 /** 2164 2110 * gsm_cleanup_mux - generic GSM protocol cleanup 2165 2111 * @gsm: our mux 2112 + * @disc: disconnect link? 2166 2113 * 2167 2114 * Clean up the bits of the mux which are the same for all framing 2168 2115 * protocols. Remove the mux from the mux table, stop all the timers 2169 2116 * and then shut down each device hanging up the channels as we go. 2170 2117 */ 2171 2118 2172 - static void gsm_cleanup_mux(struct gsm_mux *gsm) 2119 + static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc) 2173 2120 { 2174 2121 int i; 2175 2122 struct gsm_dlci *dlci = gsm->dlci[0]; 2176 2123 struct gsm_msg *txq, *ntxq; 2177 2124 2178 2125 gsm->dead = true; 2179 - 2180 - spin_lock(&gsm_mux_lock); 2181 - for (i = 0; i < MAX_MUX; i++) { 2182 - if (gsm_mux[i] == gsm) { 2183 - gsm_mux[i] = NULL; 2184 - break; 2185 - } 2186 - } 2187 - spin_unlock(&gsm_mux_lock); 2188 - /* open failed before registering => nothing to do */ 2189 - if (i == MAX_MUX) 2190 - return; 2191 - 2192 - del_timer_sync(&gsm->t2_timer); 2193 - /* Now we are sure T2 has stopped */ 2194 - if (dlci) 2195 - dlci->dead = true; 2196 - 2197 - /* Free up any link layer users */ 2198 2126 mutex_lock(&gsm->mutex); 2199 - for (i = 0; i < NUM_DLCI; i++) 2127 + 2128 + if (dlci) { 2129 + if (disc && dlci->state != DLCI_CLOSED) { 2130 + gsm_dlci_begin_close(dlci); 2131 + wait_event(gsm->event, dlci->state == DLCI_CLOSED); 2132 + } 2133 + dlci->dead = true; 2134 + } 2135 + 2136 + /* Finish outstanding timers, making sure they are done */ 2137 + del_timer_sync(&gsm->t2_timer); 2138 + 2139 + /* Free up any link layer users and finally the control channel */ 2140 + for (i = NUM_DLCI - 1; i >= 0; i--) 2200 2141 if (gsm->dlci[i]) 2201 2142 gsm_dlci_release(gsm->dlci[i]); 2202 2143 mutex_unlock(&gsm->mutex); 2203 2144 /* Now wipe the queues */ 2145 + tty_ldisc_flush(gsm->tty); 2204 2146 list_for_each_entry_safe(txq, ntxq, &gsm->tx_list, list) 2205 2147 kfree(txq); 2206 2148 INIT_LIST_HEAD(&gsm->tx_list); ··· 2187 2191 static int gsm_activate_mux(struct gsm_mux *gsm) 2188 2192 { 2189 2193 struct gsm_dlci *dlci; 2190 - int i = 0; 2191 2194 2192 2195 timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0); 2193 2196 init_waitqueue_head(&gsm->event); ··· 2197 2202 gsm->receive = gsm0_receive; 2198 2203 else 2199 2204 gsm->receive = gsm1_receive; 2200 - 2201 - spin_lock(&gsm_mux_lock); 2202 - for (i = 0; i < MAX_MUX; i++) { 2203 - if (gsm_mux[i] == NULL) { 2204 - gsm->num = i; 2205 - gsm_mux[i] = gsm; 2206 - break; 2207 - } 2208 - } 2209 - spin_unlock(&gsm_mux_lock); 2210 - if (i == MAX_MUX) 2211 - return -EBUSY; 2212 2205 2213 2206 dlci = gsm_dlci_alloc(gsm, 0); 2214 2207 if (dlci == NULL) ··· 2213 2230 */ 2214 2231 static void gsm_free_mux(struct gsm_mux *gsm) 2215 2232 { 2233 + int i; 2234 + 2235 + for (i = 0; i < MAX_MUX; i++) { 2236 + if (gsm == gsm_mux[i]) { 2237 + gsm_mux[i] = NULL; 2238 + break; 2239 + } 2240 + } 2241 + mutex_destroy(&gsm->mutex); 2216 2242 kfree(gsm->txframe); 2217 2243 kfree(gsm->buf); 2218 2244 kfree(gsm); ··· 2241 2249 2242 2250 static inline void mux_get(struct gsm_mux *gsm) 2243 2251 { 2252 + unsigned long flags; 2253 + 2254 + spin_lock_irqsave(&gsm_mux_lock, flags); 2244 2255 kref_get(&gsm->ref); 2256 + spin_unlock_irqrestore(&gsm_mux_lock, flags); 2245 2257 } 2246 2258 2247 2259 static inline void mux_put(struct gsm_mux *gsm) 2248 2260 { 2261 + unsigned long flags; 2262 + 2263 + spin_lock_irqsave(&gsm_mux_lock, flags); 2249 2264 kref_put(&gsm->ref, gsm_free_muxr); 2265 + spin_unlock_irqrestore(&gsm_mux_lock, flags); 2250 2266 } 2251 2267 2252 2268 static inline unsigned int mux_num_to_base(struct gsm_mux *gsm) ··· 2275 2275 2276 2276 static struct gsm_mux *gsm_alloc_mux(void) 2277 2277 { 2278 + int i; 2278 2279 struct gsm_mux *gsm = kzalloc(sizeof(struct gsm_mux), GFP_KERNEL); 2279 2280 if (gsm == NULL) 2280 2281 return NULL; ··· 2284 2283 kfree(gsm); 2285 2284 return NULL; 2286 2285 } 2287 - gsm->txframe = kmalloc(2 * MAX_MRU + 2, GFP_KERNEL); 2286 + gsm->txframe = kmalloc(2 * (MAX_MTU + PROT_OVERHEAD - 1), GFP_KERNEL); 2288 2287 if (gsm->txframe == NULL) { 2289 2288 kfree(gsm->buf); 2290 2289 kfree(gsm); ··· 2304 2303 gsm->mru = 64; /* Default to encoding 1 so these should be 64 */ 2305 2304 gsm->mtu = 64; 2306 2305 gsm->dead = true; /* Avoid early tty opens */ 2306 + 2307 + /* Store the instance to the mux array or abort if no space is 2308 + * available. 2309 + */ 2310 + spin_lock(&gsm_mux_lock); 2311 + for (i = 0; i < MAX_MUX; i++) { 2312 + if (!gsm_mux[i]) { 2313 + gsm_mux[i] = gsm; 2314 + gsm->num = i; 2315 + break; 2316 + } 2317 + } 2318 + spin_unlock(&gsm_mux_lock); 2319 + if (i == MAX_MUX) { 2320 + mutex_destroy(&gsm->mutex); 2321 + kfree(gsm->txframe); 2322 + kfree(gsm->buf); 2323 + kfree(gsm); 2324 + return NULL; 2325 + } 2307 2326 2308 2327 return gsm; 2309 2328 } ··· 2360 2339 /* Check the MRU/MTU range looks sane */ 2361 2340 if (c->mru > MAX_MRU || c->mtu > MAX_MTU || c->mru < 8 || c->mtu < 8) 2362 2341 return -EINVAL; 2363 - if (c->n2 < 3) 2342 + if (c->n2 > 255) 2364 2343 return -EINVAL; 2365 2344 if (c->encapsulation > 1) /* Basic, advanced, no I */ 2366 2345 return -EINVAL; ··· 2391 2370 2392 2371 /* 2393 2372 * Close down what is needed, restart and initiate the new 2394 - * configuration 2373 + * configuration. On the first time there is no DLCI[0] 2374 + * and closing or cleaning up is not necessary. 2395 2375 */ 2396 - 2397 - if (gsm->initiator && (need_close || need_restart)) { 2398 - int ret; 2399 - 2400 - ret = gsm_disconnect(gsm); 2401 - 2402 - if (ret) 2403 - return ret; 2404 - } 2405 - if (need_restart) 2406 - gsm_cleanup_mux(gsm); 2376 + if (need_close || need_restart) 2377 + gsm_cleanup_mux(gsm, true); 2407 2378 2408 2379 gsm->initiator = c->initiator; 2409 2380 gsm->mru = c->mru; ··· 2463 2450 int ret, i; 2464 2451 2465 2452 gsm->tty = tty_kref_get(tty); 2453 + /* Turn off tty XON/XOFF handling to handle it explicitly. */ 2454 + gsm->old_c_iflag = tty->termios.c_iflag; 2455 + tty->termios.c_iflag &= (IXON | IXOFF); 2466 2456 ret = gsm_activate_mux(gsm); 2467 2457 if (ret != 0) 2468 2458 tty_kref_put(gsm->tty); 2469 2459 else { 2470 2460 /* Don't register device 0 - this is the control channel and not 2471 2461 a usable tty interface */ 2472 - if (gsm->initiator) { 2473 - base = mux_num_to_base(gsm); /* Base for this MUX */ 2474 - for (i = 1; i < NUM_DLCI; i++) { 2475 - struct device *dev; 2462 + base = mux_num_to_base(gsm); /* Base for this MUX */ 2463 + for (i = 1; i < NUM_DLCI; i++) { 2464 + struct device *dev; 2476 2465 2477 - dev = tty_register_device(gsm_tty_driver, 2466 + dev = tty_register_device(gsm_tty_driver, 2478 2467 base + i, NULL); 2479 - if (IS_ERR(dev)) { 2480 - for (i--; i >= 1; i--) 2481 - tty_unregister_device(gsm_tty_driver, 2482 - base + i); 2483 - return PTR_ERR(dev); 2484 - } 2468 + if (IS_ERR(dev)) { 2469 + for (i--; i >= 1; i--) 2470 + tty_unregister_device(gsm_tty_driver, 2471 + base + i); 2472 + return PTR_ERR(dev); 2485 2473 } 2486 2474 } 2487 2475 } ··· 2504 2490 int i; 2505 2491 2506 2492 WARN_ON(tty != gsm->tty); 2507 - if (gsm->initiator) { 2508 - for (i = 1; i < NUM_DLCI; i++) 2509 - tty_unregister_device(gsm_tty_driver, base + i); 2510 - } 2511 - gsm_cleanup_mux(gsm); 2493 + for (i = 1; i < NUM_DLCI; i++) 2494 + tty_unregister_device(gsm_tty_driver, base + i); 2495 + /* Restore tty XON/XOFF handling. */ 2496 + gsm->tty->termios.c_iflag = gsm->old_c_iflag; 2512 2497 tty_kref_put(gsm->tty); 2513 2498 gsm->tty = NULL; 2514 2499 } ··· 2572 2559 { 2573 2560 struct gsm_mux *gsm = tty->disc_data; 2574 2561 2562 + /* The ldisc locks and closes the port before calling our close. This 2563 + * means we have no way to do a proper disconnect. We will not bother 2564 + * to do one. 2565 + */ 2566 + gsm_cleanup_mux(gsm, false); 2567 + 2575 2568 gsmld_detach_gsm(tty, gsm); 2576 2569 2577 2570 gsmld_flush_buffer(tty); ··· 2616 2597 2617 2598 ret = gsmld_attach_gsm(tty, gsm); 2618 2599 if (ret != 0) { 2619 - gsm_cleanup_mux(gsm); 2600 + gsm_cleanup_mux(gsm, false); 2620 2601 mux_put(gsm); 2621 2602 } 2622 2603 return ret; ··· 2973 2954 2974 2955 #define TX_SIZE 512 2975 2956 2976 - static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk) 2957 + /** 2958 + * gsm_modem_upd_via_data - send modem bits via convergence layer 2959 + * @dlci: channel 2960 + * @brk: break signal 2961 + * 2962 + * Send an empty frame to signal mobile state changes and to transmit the 2963 + * break signal for adaption 2. 2964 + */ 2965 + 2966 + static void gsm_modem_upd_via_data(struct gsm_dlci *dlci, u8 brk) 2977 2967 { 2978 - u8 modembits[5]; 2968 + struct gsm_mux *gsm = dlci->gsm; 2969 + unsigned long flags; 2970 + 2971 + if (dlci->state != DLCI_OPEN || dlci->adaption != 2) 2972 + return; 2973 + 2974 + spin_lock_irqsave(&gsm->tx_lock, flags); 2975 + gsm_dlci_modem_output(gsm, dlci, brk); 2976 + spin_unlock_irqrestore(&gsm->tx_lock, flags); 2977 + } 2978 + 2979 + /** 2980 + * gsm_modem_upd_via_msc - send modem bits via control frame 2981 + * @dlci: channel 2982 + * @brk: break signal 2983 + */ 2984 + 2985 + static int gsm_modem_upd_via_msc(struct gsm_dlci *dlci, u8 brk) 2986 + { 2987 + u8 modembits[3]; 2979 2988 struct gsm_control *ctrl; 2980 2989 int len = 2; 2981 2990 2982 - if (brk) 2983 - len++; 2991 + if (dlci->gsm->encoding != 0) 2992 + return 0; 2984 2993 2985 - modembits[0] = len << 1 | EA; /* Data bytes */ 2986 - modembits[1] = dlci->addr << 2 | 3; /* DLCI, EA, 1 */ 2987 - modembits[2] = gsm_encode_modem(dlci) << 1 | EA; 2988 - if (brk) 2989 - modembits[3] = brk << 4 | 2 | EA; /* Valid, EA */ 2990 - ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len + 1); 2994 + modembits[0] = (dlci->addr << 2) | 2 | EA; /* DLCI, Valid, EA */ 2995 + if (!brk) { 2996 + modembits[1] = (gsm_encode_modem(dlci) << 1) | EA; 2997 + } else { 2998 + modembits[1] = gsm_encode_modem(dlci) << 1; 2999 + modembits[2] = (brk << 4) | 2 | EA; /* Length, Break, EA */ 3000 + len++; 3001 + } 3002 + ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len); 2991 3003 if (ctrl == NULL) 2992 3004 return -ENOMEM; 2993 3005 return gsm_control_wait(dlci->gsm, ctrl); 3006 + } 3007 + 3008 + /** 3009 + * gsm_modem_update - send modem status line state 3010 + * @dlci: channel 3011 + * @brk: break signal 3012 + */ 3013 + 3014 + static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk) 3015 + { 3016 + if (dlci->adaption == 2) { 3017 + /* Send convergence layer type 2 empty data frame. */ 3018 + gsm_modem_upd_via_data(dlci, brk); 3019 + return 0; 3020 + } else if (dlci->gsm->encoding == 0) { 3021 + /* Send as MSC control message. */ 3022 + return gsm_modem_upd_via_msc(dlci, brk); 3023 + } 3024 + 3025 + /* Modem status lines are not supported. */ 3026 + return -EPROTONOSUPPORT; 2994 3027 } 2995 3028 2996 3029 static int gsm_carrier_raised(struct tty_port *port) ··· 3077 3006 modem_tx &= ~(TIOCM_DTR | TIOCM_RTS); 3078 3007 if (modem_tx != dlci->modem_tx) { 3079 3008 dlci->modem_tx = modem_tx; 3080 - gsmtty_modem_update(dlci, 0); 3009 + gsm_modem_update(dlci, 0); 3081 3010 } 3082 3011 } 3083 3012 ··· 3226 3155 static void gsmtty_flush_buffer(struct tty_struct *tty) 3227 3156 { 3228 3157 struct gsm_dlci *dlci = tty->driver_data; 3158 + unsigned long flags; 3159 + 3229 3160 if (dlci->state == DLCI_CLOSED) 3230 3161 return; 3231 3162 /* Caution needed: If we implement reliable transport classes 3232 3163 then the data being transmitted can't simply be junked once 3233 3164 it has first hit the stack. Until then we can just blow it 3234 3165 away */ 3166 + spin_lock_irqsave(&dlci->lock, flags); 3235 3167 kfifo_reset(&dlci->fifo); 3168 + spin_unlock_irqrestore(&dlci->lock, flags); 3236 3169 /* Need to unhook this DLCI from the transmit queue logic */ 3237 3170 } 3238 3171 ··· 3268 3193 3269 3194 if (modem_tx != dlci->modem_tx) { 3270 3195 dlci->modem_tx = modem_tx; 3271 - return gsmtty_modem_update(dlci, 0); 3196 + return gsm_modem_update(dlci, 0); 3272 3197 } 3273 3198 return 0; 3274 3199 } ··· 3329 3254 dlci->modem_tx &= ~TIOCM_RTS; 3330 3255 dlci->throttled = true; 3331 3256 /* Send an MSC with RTS cleared */ 3332 - gsmtty_modem_update(dlci, 0); 3257 + gsm_modem_update(dlci, 0); 3333 3258 } 3334 3259 3335 3260 static void gsmtty_unthrottle(struct tty_struct *tty) ··· 3341 3266 dlci->modem_tx |= TIOCM_RTS; 3342 3267 dlci->throttled = false; 3343 3268 /* Send an MSC with RTS set */ 3344 - gsmtty_modem_update(dlci, 0); 3269 + gsm_modem_update(dlci, 0); 3345 3270 } 3346 3271 3347 3272 static int gsmtty_break_ctl(struct tty_struct *tty, int state) ··· 3359 3284 if (encode > 0x0F) 3360 3285 encode = 0x0F; /* Best effort */ 3361 3286 } 3362 - return gsmtty_modem_update(dlci, encode); 3287 + return gsm_modem_update(dlci, encode); 3363 3288 } 3364 3289 3365 3290 static void gsmtty_cleanup(struct tty_struct *tty)
+4 -4
drivers/tty/serial/8250/8250_pci.c
··· 2667 2667 pbn_panacom2, 2668 2668 pbn_panacom4, 2669 2669 pbn_plx_romulus, 2670 - pbn_endrun_2_4000000, 2670 + pbn_endrun_2_3906250, 2671 2671 pbn_oxsemi, 2672 2672 pbn_oxsemi_1_3906250, 2673 2673 pbn_oxsemi_2_3906250, ··· 3195 3195 * signal now many ports are available 3196 3196 * 2 port 952 Uart support 3197 3197 */ 3198 - [pbn_endrun_2_4000000] = { 3198 + [pbn_endrun_2_3906250] = { 3199 3199 .flags = FL_BASE0, 3200 3200 .num_ports = 2, 3201 - .base_baud = 4000000, 3201 + .base_baud = 3906250, 3202 3202 .uart_offset = 0x200, 3203 3203 .first_offset = 0x1000, 3204 3204 }, ··· 4115 4115 */ 4116 4116 { PCI_VENDOR_ID_ENDRUN, PCI_DEVICE_ID_ENDRUN_1588, 4117 4117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4118 - pbn_endrun_2_4000000 }, 4118 + pbn_endrun_2_3906250 }, 4119 4119 /* 4120 4120 * Quatech cards. These actually have configurable clocks but for 4121 4121 * now we just use the default.
+3 -3
drivers/tty/serial/8250/8250_port.c
··· 1675 1675 struct uart_8250_port *up = up_to_u8250p(port); 1676 1676 struct uart_8250_em485 *em485 = up->em485; 1677 1677 1678 - serial8250_rpm_get_tx(up); 1679 - 1680 1678 if (!port->x_char && uart_circ_empty(&port->state->xmit)) 1681 1679 return; 1680 + 1681 + serial8250_rpm_get_tx(up); 1682 1682 1683 1683 if (em485 && 1684 1684 em485->active_timer == &em485->start_tx_timer) ··· 3329 3329 3330 3330 serial8250_set_divisor(port, baud, quot, frac); 3331 3331 serial_port_out(port, UART_LCR, up->lcr); 3332 - serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS); 3332 + serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS); 3333 3333 } 3334 3334 3335 3335 /*
+7 -2
drivers/tty/serial/amba-pl011.c
··· 1255 1255 1256 1256 static void pl011_rs485_tx_stop(struct uart_amba_port *uap) 1257 1257 { 1258 + /* 1259 + * To be on the safe side only time out after twice as many iterations 1260 + * as fifo size. 1261 + */ 1262 + const int MAX_TX_DRAIN_ITERS = uap->port.fifosize * 2; 1258 1263 struct uart_port *port = &uap->port; 1259 1264 int i = 0; 1260 1265 u32 cr; 1261 1266 1262 1267 /* Wait until hardware tx queue is empty */ 1263 1268 while (!pl011_tx_empty(port)) { 1264 - if (i == port->fifosize) { 1269 + if (i > MAX_TX_DRAIN_ITERS) { 1265 1270 dev_warn(port->dev, 1266 1271 "timeout while draining hardware tx queue\n"); 1267 1272 break; ··· 2057 2052 * with the given baud rate. We use this as the poll interval when we 2058 2053 * wait for the tx queue to empty. 2059 2054 */ 2060 - uap->rs485_tx_drain_interval = (bits * 1000 * 1000) / baud; 2055 + uap->rs485_tx_drain_interval = DIV_ROUND_UP(bits * 1000 * 1000, baud); 2061 2056 2062 2057 pl011_setup_status_masks(port, termios); 2063 2058
+1 -1
drivers/tty/serial/imx.c
··· 1448 1448 imx_uart_writel(sport, ucr1, UCR1); 1449 1449 1450 1450 ucr4 = imx_uart_readl(sport, UCR4) & ~(UCR4_OREN | UCR4_INVR); 1451 - if (!sport->dma_is_enabled) 1451 + if (!dma_is_inited) 1452 1452 ucr4 |= UCR4_OREN; 1453 1453 if (sport->inverted_rx) 1454 1454 ucr4 |= UCR4_INVR;
+2 -4
drivers/tty/serial/sc16is7xx.c
··· 1238 1238 1239 1239 /* Disable all interrupts */ 1240 1240 sc16is7xx_port_write(port, SC16IS7XX_IER_REG, 0); 1241 - /* Disable TX/RX, clear auto RS485 and RTS invert */ 1241 + /* Disable TX/RX */ 1242 1242 sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG, 1243 1243 SC16IS7XX_EFCR_RXDISABLE_BIT | 1244 - SC16IS7XX_EFCR_TXDISABLE_BIT | 1245 - SC16IS7XX_EFCR_AUTO_RS485_BIT | 1246 - SC16IS7XX_EFCR_RTS_INVERT_BIT, 1244 + SC16IS7XX_EFCR_TXDISABLE_BIT, 1247 1245 SC16IS7XX_EFCR_RXDISABLE_BIT | 1248 1246 SC16IS7XX_EFCR_TXDISABLE_BIT); 1249 1247
+5 -2
drivers/usb/cdns3/cdns3-gadget.c
··· 2684 2684 struct usb_request *request; 2685 2685 struct cdns3_request *priv_req; 2686 2686 struct cdns3_trb *trb = NULL; 2687 + struct cdns3_trb trb_tmp; 2687 2688 int ret; 2688 2689 int val; 2689 2690 ··· 2694 2693 if (request) { 2695 2694 priv_req = to_cdns3_request(request); 2696 2695 trb = priv_req->trb; 2697 - if (trb) 2696 + if (trb) { 2697 + trb_tmp = *trb; 2698 2698 trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE); 2699 + } 2699 2700 } 2700 2701 2701 2702 writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd); ··· 2712 2709 2713 2710 if (request) { 2714 2711 if (trb) 2715 - trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE); 2712 + *trb = trb_tmp; 2716 2713 2717 2714 cdns3_rearm_transfer(priv_ep, 1); 2718 2715 }
+9 -5
drivers/usb/core/devio.c
··· 1209 1209 1210 1210 usb_unlock_device(dev); 1211 1211 i = usbfs_start_wait_urb(urb, tmo, &actlen); 1212 + 1213 + /* Linger a bit, prior to the next control message. */ 1214 + if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG) 1215 + msleep(200); 1212 1216 usb_lock_device(dev); 1213 1217 snoop_urb(dev, NULL, pipe, actlen, i, COMPLETE, tbuf, actlen); 1214 1218 if (!i && actlen) { 1215 1219 if (copy_to_user(ctrl->data, tbuf, actlen)) { 1216 1220 ret = -EFAULT; 1217 - goto recv_fault; 1221 + goto done; 1218 1222 } 1219 1223 } 1220 1224 } else { ··· 1235 1231 1236 1232 usb_unlock_device(dev); 1237 1233 i = usbfs_start_wait_urb(urb, tmo, &actlen); 1234 + 1235 + /* Linger a bit, prior to the next control message. */ 1236 + if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG) 1237 + msleep(200); 1238 1238 usb_lock_device(dev); 1239 1239 snoop_urb(dev, NULL, pipe, actlen, i, COMPLETE, NULL, 0); 1240 1240 } ··· 1250 1242 } 1251 1243 ret = (i < 0 ? i : actlen); 1252 1244 1253 - recv_fault: 1254 - /* Linger a bit, prior to the next control message. */ 1255 - if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG) 1256 - msleep(200); 1257 1245 done: 1258 1246 kfree(dr); 1259 1247 usb_free_urb(urb);
+6
drivers/usb/core/quirks.c
··· 404 404 { USB_DEVICE(0x0b05, 0x17e0), .driver_info = 405 405 USB_QUIRK_IGNORE_REMOTE_WAKEUP }, 406 406 407 + /* Realtek Semiconductor Corp. Mass Storage Device (Multicard Reader)*/ 408 + { USB_DEVICE(0x0bda, 0x0151), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, 409 + 407 410 /* Realtek hub in Dell WD19 (Type-C) */ 408 411 { USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM }, 409 412 ··· 509 506 510 507 /* DJI CineSSD */ 511 508 { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, 509 + 510 + /* VCOM device */ 511 + { USB_DEVICE(0x4296, 0x7570), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, 512 512 513 513 /* INTEL VALUE SSD */ 514 514 { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
+29 -5
drivers/usb/dwc3/core.c
··· 274 274 275 275 reg = dwc3_readl(dwc->regs, DWC3_DCTL); 276 276 reg |= DWC3_DCTL_CSFTRST; 277 - dwc3_writel(dwc->regs, DWC3_DCTL, reg); 277 + reg &= ~DWC3_DCTL_RUN_STOP; 278 + dwc3_gadget_dctl_write_safe(dwc, reg); 278 279 279 280 /* 280 281 * For DWC_usb31 controller 1.90a and later, the DCTL.CSFRST bit ··· 1378 1377 u8 lpm_nyet_threshold; 1379 1378 u8 tx_de_emphasis; 1380 1379 u8 hird_threshold; 1381 - u8 rx_thr_num_pkt_prd; 1382 - u8 rx_max_burst_prd; 1383 - u8 tx_thr_num_pkt_prd; 1384 - u8 tx_max_burst_prd; 1380 + u8 rx_thr_num_pkt_prd = 0; 1381 + u8 rx_max_burst_prd = 0; 1382 + u8 tx_thr_num_pkt_prd = 0; 1383 + u8 tx_max_burst_prd = 0; 1385 1384 u8 tx_fifo_resize_max_num; 1386 1385 const char *usb_psy_name; 1387 1386 int ret; ··· 1691 1690 /* 1692 1691 * Clocks are optional, but new DT platforms should support all 1693 1692 * clocks as required by the DT-binding. 1693 + * Some devices have different clock names in legacy device trees, 1694 + * check for them to retain backwards compatibility. 1694 1695 */ 1695 1696 dwc->bus_clk = devm_clk_get_optional(dev, "bus_early"); 1696 1697 if (IS_ERR(dwc->bus_clk)) 1697 1698 return dev_err_probe(dev, PTR_ERR(dwc->bus_clk), 1698 1699 "could not get bus clock\n"); 1699 1700 1701 + if (dwc->bus_clk == NULL) { 1702 + dwc->bus_clk = devm_clk_get_optional(dev, "bus_clk"); 1703 + if (IS_ERR(dwc->bus_clk)) 1704 + return dev_err_probe(dev, PTR_ERR(dwc->bus_clk), 1705 + "could not get bus clock\n"); 1706 + } 1707 + 1700 1708 dwc->ref_clk = devm_clk_get_optional(dev, "ref"); 1701 1709 if (IS_ERR(dwc->ref_clk)) 1702 1710 return dev_err_probe(dev, PTR_ERR(dwc->ref_clk), 1703 1711 "could not get ref clock\n"); 1704 1712 1713 + if (dwc->ref_clk == NULL) { 1714 + dwc->ref_clk = devm_clk_get_optional(dev, "ref_clk"); 1715 + if (IS_ERR(dwc->ref_clk)) 1716 + return dev_err_probe(dev, PTR_ERR(dwc->ref_clk), 1717 + "could not get ref clock\n"); 1718 + } 1719 + 1705 1720 dwc->susp_clk = devm_clk_get_optional(dev, "suspend"); 1706 1721 if (IS_ERR(dwc->susp_clk)) 1707 1722 return dev_err_probe(dev, PTR_ERR(dwc->susp_clk), 1708 1723 "could not get suspend clock\n"); 1724 + 1725 + if (dwc->susp_clk == NULL) { 1726 + dwc->susp_clk = devm_clk_get_optional(dev, "suspend_clk"); 1727 + if (IS_ERR(dwc->susp_clk)) 1728 + return dev_err_probe(dev, PTR_ERR(dwc->susp_clk), 1729 + "could not get suspend clock\n"); 1730 + } 1709 1731 } 1710 1732 1711 1733 ret = reset_control_deassert(dwc->reset);
+5 -6
drivers/usb/dwc3/drd.c
··· 584 584 { 585 585 int ret, irq; 586 586 587 + if (ROLE_SWITCH && 588 + device_property_read_bool(dwc->dev, "usb-role-switch")) 589 + return dwc3_setup_role_switch(dwc); 590 + 587 591 dwc->edev = dwc3_get_extcon(dwc); 588 592 if (IS_ERR(dwc->edev)) 589 593 return PTR_ERR(dwc->edev); 590 594 591 - if (ROLE_SWITCH && 592 - device_property_read_bool(dwc->dev, "usb-role-switch")) { 593 - ret = dwc3_setup_role_switch(dwc); 594 - if (ret < 0) 595 - return ret; 596 - } else if (dwc->edev) { 595 + if (dwc->edev) { 597 596 dwc->edev_nb.notifier_call = dwc3_drd_notifier; 598 597 ret = extcon_register_notifier(dwc->edev, EXTCON_USB_HOST, 599 598 &dwc->edev_nb);
+8
drivers/usb/dwc3/dwc3-pci.c
··· 45 45 #define PCI_DEVICE_ID_INTEL_ADLM 0x54ee 46 46 #define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1 47 47 #define PCI_DEVICE_ID_INTEL_RPLS 0x7a61 48 + #define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1 49 + #define PCI_DEVICE_ID_INTEL_MTL 0x7e7e 48 50 #define PCI_DEVICE_ID_INTEL_TGL 0x9a15 49 51 #define PCI_DEVICE_ID_AMD_MR 0x163a 50 52 ··· 456 454 (kernel_ulong_t) &dwc3_pci_intel_swnode, }, 457 455 458 456 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPLS), 457 + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, 458 + 459 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLP), 460 + (kernel_ulong_t) &dwc3_pci_intel_swnode, }, 461 + 462 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL), 459 463 (kernel_ulong_t) &dwc3_pci_intel_swnode, }, 460 464 461 465 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL),
+30 -1
drivers/usb/dwc3/gadget.c
··· 3274 3274 const struct dwc3_event_depevt *event, 3275 3275 struct dwc3_request *req, int status) 3276 3276 { 3277 + int request_status; 3277 3278 int ret; 3278 3279 3279 3280 if (req->request.num_mapped_sgs) ··· 3295 3294 req->needs_extra_trb = false; 3296 3295 } 3297 3296 3298 - dwc3_gadget_giveback(dep, req, status); 3297 + /* 3298 + * The event status only reflects the status of the TRB with IOC set. 3299 + * For the requests that don't set interrupt on completion, the driver 3300 + * needs to check and return the status of the completed TRBs associated 3301 + * with the request. Use the status of the last TRB of the request. 3302 + */ 3303 + if (req->request.no_interrupt) { 3304 + struct dwc3_trb *trb; 3305 + 3306 + trb = dwc3_ep_prev_trb(dep, dep->trb_dequeue); 3307 + switch (DWC3_TRB_SIZE_TRBSTS(trb->size)) { 3308 + case DWC3_TRBSTS_MISSED_ISOC: 3309 + /* Isoc endpoint only */ 3310 + request_status = -EXDEV; 3311 + break; 3312 + case DWC3_TRB_STS_XFER_IN_PROG: 3313 + /* Applicable when End Transfer with ForceRM=0 */ 3314 + case DWC3_TRBSTS_SETUP_PENDING: 3315 + /* Control endpoint only */ 3316 + case DWC3_TRBSTS_OK: 3317 + default: 3318 + request_status = 0; 3319 + break; 3320 + } 3321 + } else { 3322 + request_status = status; 3323 + } 3324 + 3325 + dwc3_gadget_giveback(dep, req, request_status); 3299 3326 3300 3327 out: 3301 3328 return ret;
+2
drivers/usb/gadget/configfs.c
··· 1438 1438 usb_ep_autoconfig_reset(cdev->gadget); 1439 1439 spin_lock_irqsave(&gi->spinlock, flags); 1440 1440 cdev->gadget = NULL; 1441 + cdev->deactivations = 0; 1442 + gadget->deactivated = false; 1441 1443 set_gadget_data(gadget, NULL); 1442 1444 spin_unlock_irqrestore(&gi->spinlock, flags); 1443 1445 }
+2
drivers/usb/gadget/function/uvc_queue.c
··· 264 264 buf->state = UVC_BUF_STATE_ERROR; 265 265 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR); 266 266 } 267 + queue->buf_used = 0; 268 + 267 269 /* This must be protected by the irqlock spinlock to avoid race 268 270 * conditions between uvc_queue_buffer and the disconnection event that 269 271 * could result in an interruptible wait in uvc_dequeue_buffer. Do not
+23
drivers/usb/host/ehci-hcd.c
··· 1103 1103 1104 1104 #ifdef CONFIG_PM 1105 1105 1106 + /* Clear wakeup signal locked in zhaoxin platform when device plug in. */ 1107 + static void ehci_zx_wakeup_clear(struct ehci_hcd *ehci) 1108 + { 1109 + u32 __iomem *reg = &ehci->regs->port_status[4]; 1110 + u32 t1 = ehci_readl(ehci, reg); 1111 + 1112 + t1 &= (u32)~0xf0000; 1113 + t1 |= PORT_TEST_FORCE; 1114 + ehci_writel(ehci, t1, reg); 1115 + t1 = ehci_readl(ehci, reg); 1116 + msleep(1); 1117 + t1 &= (u32)~0xf0000; 1118 + ehci_writel(ehci, t1, reg); 1119 + ehci_readl(ehci, reg); 1120 + msleep(1); 1121 + t1 = ehci_readl(ehci, reg); 1122 + ehci_writel(ehci, t1 | PORT_CSC, reg); 1123 + ehci_readl(ehci, reg); 1124 + } 1125 + 1106 1126 /* suspend/resume, section 4.3 */ 1107 1127 1108 1128 /* These routines handle the generic parts of controller suspend/resume */ ··· 1173 1153 1174 1154 if (ehci->shutdown) 1175 1155 return 0; /* Controller is dead */ 1156 + 1157 + if (ehci->zx_wakeup_clear_needed) 1158 + ehci_zx_wakeup_clear(ehci); 1176 1159 1177 1160 /* 1178 1161 * If CF is still set and reset isn't forced
+4
drivers/usb/host/ehci-pci.c
··· 231 231 ehci->is_aspeed = 1; 232 232 } 233 233 break; 234 + case PCI_VENDOR_ID_ZHAOXIN: 235 + if (pdev->device == 0x3104 && (pdev->revision & 0xf0) == 0x90) 236 + ehci->zx_wakeup_clear_needed = 1; 237 + break; 234 238 } 235 239 236 240 /* optional debug port, normally in the first BAR */
+1
drivers/usb/host/ehci.h
··· 220 220 unsigned imx28_write_fix:1; /* For Freescale i.MX28 */ 221 221 unsigned spurious_oc:1; 222 222 unsigned is_aspeed:1; 223 + unsigned zx_wakeup_clear_needed:1; 223 224 224 225 /* required for usb32 quirk */ 225 226 #define OHCI_CTRL_HCFS (3 << 6)
+1 -1
drivers/usb/host/xhci-hub.c
··· 1434 1434 } 1435 1435 spin_unlock_irqrestore(&xhci->lock, flags); 1436 1436 if (!wait_for_completion_timeout(&bus_state->u3exit_done[wIndex], 1437 - msecs_to_jiffies(100))) 1437 + msecs_to_jiffies(500))) 1438 1438 xhci_dbg(xhci, "missing U0 port change event for port %d-%d\n", 1439 1439 hcd->self.busnum, wIndex + 1); 1440 1440 spin_lock_irqsave(&xhci->lock, flags);
+3 -1
drivers/usb/host/xhci-pci.c
··· 59 59 #define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13 60 60 #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138 61 61 #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI 0x461e 62 + #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed 62 63 63 64 #define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639 64 65 #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 ··· 267 266 pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI || 268 267 pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI || 269 268 pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI || 270 - pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI)) 269 + pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI || 270 + pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI)) 271 271 xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; 272 272 273 273 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+1
drivers/usb/host/xhci-ring.c
··· 3141 3141 if (event_loop++ < TRBS_PER_SEGMENT / 2) 3142 3142 continue; 3143 3143 xhci_update_erst_dequeue(xhci, event_ring_deq); 3144 + event_ring_deq = xhci->event_ring->dequeue; 3144 3145 3145 3146 /* ring is half-full, force isoc trbs to interrupt more often */ 3146 3147 if (xhci->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
+2 -2
drivers/usb/host/xhci-tegra.c
··· 1034 1034 int rc; 1035 1035 1036 1036 if (tegra->use_genpd) { 1037 - rc = pm_runtime_get_sync(tegra->genpd_dev_ss); 1037 + rc = pm_runtime_resume_and_get(tegra->genpd_dev_ss); 1038 1038 if (rc < 0) { 1039 1039 dev_err(dev, "failed to enable XUSB SS partition\n"); 1040 1040 return rc; 1041 1041 } 1042 1042 1043 - rc = pm_runtime_get_sync(tegra->genpd_dev_host); 1043 + rc = pm_runtime_resume_and_get(tegra->genpd_dev_host); 1044 1044 if (rc < 0) { 1045 1045 dev_err(dev, "failed to enable XUSB Host partition\n"); 1046 1046 pm_runtime_put_sync(tegra->genpd_dev_ss);
+11
drivers/usb/host/xhci.c
··· 781 781 if (xhci->quirks & XHCI_SPURIOUS_REBOOT) 782 782 usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev)); 783 783 784 + /* Don't poll the roothubs after shutdown. */ 785 + xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", 786 + __func__, hcd->self.busnum); 787 + clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 788 + del_timer_sync(&hcd->rh_timer); 789 + 790 + if (xhci->shared_hcd) { 791 + clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 792 + del_timer_sync(&xhci->shared_hcd->rh_timer); 793 + } 794 + 784 795 spin_lock_irq(&xhci->lock); 785 796 xhci_halt(xhci); 786 797 /* Workaround for spurious wakeups at shutdown with HSW */
+5 -5
drivers/usb/misc/qcom_eud.c
··· 186 186 187 187 chip->dev = &pdev->dev; 188 188 189 - ret = devm_add_action_or_reset(chip->dev, eud_role_switch_release, chip); 190 - if (ret) 191 - return dev_err_probe(chip->dev, ret, 192 - "failed to add role switch release action\n"); 193 - 194 189 chip->role_sw = usb_role_switch_get(&pdev->dev); 195 190 if (IS_ERR(chip->role_sw)) 196 191 return dev_err_probe(chip->dev, PTR_ERR(chip->role_sw), 197 192 "failed to get role switch\n"); 193 + 194 + ret = devm_add_action_or_reset(chip->dev, eud_role_switch_release, chip); 195 + if (ret) 196 + return dev_err_probe(chip->dev, ret, 197 + "failed to add role switch release action\n"); 198 198 199 199 chip->base = devm_platform_ioremap_resource(pdev, 0); 200 200 if (IS_ERR(chip->base))
+1 -2
drivers/usb/misc/uss720.c
··· 71 71 72 72 dev_dbg(&priv->usbdev->dev, "destroying priv datastructure\n"); 73 73 usb_put_dev(priv->usbdev); 74 + priv->usbdev = NULL; 74 75 kfree(priv); 75 76 } 76 77 ··· 737 736 parport_announce_port(pp); 738 737 739 738 usb_set_intfdata(intf, pp); 740 - usb_put_dev(usbdev); 741 739 return 0; 742 740 743 741 probe_abort: ··· 754 754 usb_set_intfdata(intf, NULL); 755 755 if (pp) { 756 756 priv = pp->private_data; 757 - priv->usbdev = NULL; 758 757 priv->pp = NULL; 759 758 dev_dbg(&intf->dev, "parport_remove_port\n"); 760 759 parport_remove_port(pp);
+2 -4
drivers/usb/mtu3/mtu3_dr.c
··· 21 21 22 22 static void toggle_opstate(struct ssusb_mtk *ssusb) 23 23 { 24 - if (!ssusb->otg_switch.is_u3_drd) { 25 - mtu3_setbits(ssusb->mac_base, U3D_DEVICE_CONTROL, DC_SESSION); 26 - mtu3_setbits(ssusb->mac_base, U3D_POWER_MANAGEMENT, SOFT_CONN); 27 - } 24 + mtu3_setbits(ssusb->mac_base, U3D_DEVICE_CONTROL, DC_SESSION); 25 + mtu3_setbits(ssusb->mac_base, U3D_POWER_MANAGEMENT, SOFT_CONN); 28 26 } 29 27 30 28 /* only port0 supports dual-role mode */
+7
drivers/usb/phy/phy-generic.c
··· 268 268 return -EPROBE_DEFER; 269 269 } 270 270 271 + nop->vbus_draw = devm_regulator_get_exclusive(dev, "vbus"); 272 + if (PTR_ERR(nop->vbus_draw) == -ENODEV) 273 + nop->vbus_draw = NULL; 274 + if (IS_ERR(nop->vbus_draw)) 275 + return dev_err_probe(dev, PTR_ERR(nop->vbus_draw), 276 + "could not get vbus regulator\n"); 277 + 271 278 nop->dev = dev; 272 279 nop->phy.dev = nop->dev; 273 280 nop->phy.label = "nop-xceiv";
+2
drivers/usb/serial/cp210x.c
··· 194 194 { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */ 195 195 { USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */ 196 196 { USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */ 197 + { USB_DEVICE(0x17A8, 0x0101) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Int Ant) */ 198 + { USB_DEVICE(0x17A8, 0x0102) }, /* Kamstrup 868 MHz wM-Bus C-Mode Meter Reader (Ext Ant) */ 197 199 { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ 198 200 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ 199 201 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+12
drivers/usb/serial/option.c
··· 432 432 #define CINTERION_PRODUCT_CLS8 0x00b0 433 433 #define CINTERION_PRODUCT_MV31_MBIM 0x00b3 434 434 #define CINTERION_PRODUCT_MV31_RMNET 0x00b7 435 + #define CINTERION_PRODUCT_MV32_WA 0x00f1 436 + #define CINTERION_PRODUCT_MV32_WB 0x00f2 435 437 436 438 /* Olivetti products */ 437 439 #define OLIVETTI_VENDOR_ID 0x0b3c ··· 1219 1217 .driver_info = NCTRL(0) | RSVD(1) }, 1220 1218 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1056, 0xff), /* Telit FD980 */ 1221 1219 .driver_info = NCTRL(2) | RSVD(3) }, 1220 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1057, 0xff), /* Telit FN980 */ 1221 + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, 1222 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1058, 0xff), /* Telit FN980 (PCIe) */ 1223 + .driver_info = NCTRL(0) | RSVD(1) }, 1222 1224 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1060, 0xff), /* Telit LN920 (rmnet) */ 1223 1225 .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, 1224 1226 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1061, 0xff), /* Telit LN920 (MBIM) */ ··· 1239 1233 .driver_info = NCTRL(2) | RSVD(3) }, 1240 1234 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990 (ECM) */ 1241 1235 .driver_info = NCTRL(0) | RSVD(1) }, 1236 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990 (PCIe) */ 1237 + .driver_info = RSVD(0) }, 1242 1238 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), 1243 1239 .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, 1244 1240 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), ··· 1977 1969 .driver_info = RSVD(3)}, 1978 1970 { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff), 1979 1971 .driver_info = RSVD(0)}, 1972 + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff), 1973 + .driver_info = RSVD(3)}, 1974 + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff), 1975 + .driver_info = RSVD(3)}, 1980 1976 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100), 1981 1977 .driver_info = RSVD(4) }, 1982 1978 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
+2 -3
drivers/usb/serial/whiteheat.c
··· 584 584 switch (command) { 585 585 case WHITEHEAT_GET_DTR_RTS: 586 586 info = usb_get_serial_port_data(port); 587 - memcpy(&info->mcr, command_info->result_buffer, 588 - sizeof(struct whiteheat_dr_info)); 589 - break; 587 + info->mcr = command_info->result_buffer[0]; 588 + break; 590 589 } 591 590 } 592 591 exit:
+1
drivers/usb/typec/Kconfig
··· 56 56 tristate "Richtek RT1719 Sink Only Type-C controller driver" 57 57 depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH 58 58 depends on I2C 59 + depends on POWER_SUPPLY 59 60 select REGMAP_I2C 60 61 help 61 62 Say Y or M here if your system has Richtek RT1719 sink only
+17 -7
drivers/usb/typec/ucsi/ucsi.c
··· 949 949 role == TYPEC_HOST)) 950 950 goto out_unlock; 951 951 952 + reinit_completion(&con->complete); 953 + 952 954 command = UCSI_SET_UOR | UCSI_CONNECTOR_NUMBER(con->num); 953 955 command |= UCSI_SET_UOR_ROLE(role); 954 956 command |= UCSI_SET_UOR_ACCEPT_ROLE_SWAPS; ··· 958 956 if (ret < 0) 959 957 goto out_unlock; 960 958 959 + mutex_unlock(&con->lock); 960 + 961 961 if (!wait_for_completion_timeout(&con->complete, 962 - msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS))) 963 - ret = -ETIMEDOUT; 962 + msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS))) 963 + return -ETIMEDOUT; 964 + 965 + return 0; 964 966 965 967 out_unlock: 966 968 mutex_unlock(&con->lock); 967 969 968 - return ret < 0 ? ret : 0; 970 + return ret; 969 971 } 970 972 971 973 static int ucsi_pr_swap(struct typec_port *port, enum typec_role role) ··· 991 985 if (cur_role == role) 992 986 goto out_unlock; 993 987 988 + reinit_completion(&con->complete); 989 + 994 990 command = UCSI_SET_PDR | UCSI_CONNECTOR_NUMBER(con->num); 995 991 command |= UCSI_SET_PDR_ROLE(role); 996 992 command |= UCSI_SET_PDR_ACCEPT_ROLE_SWAPS; ··· 1000 992 if (ret < 0) 1001 993 goto out_unlock; 1002 994 995 + mutex_unlock(&con->lock); 996 + 1003 997 if (!wait_for_completion_timeout(&con->complete, 1004 - msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS))) { 1005 - ret = -ETIMEDOUT; 1006 - goto out_unlock; 1007 - } 998 + msecs_to_jiffies(UCSI_SWAP_TIMEOUT_MS))) 999 + return -ETIMEDOUT; 1000 + 1001 + mutex_lock(&con->lock); 1008 1002 1009 1003 /* Something has gone wrong while swapping the role */ 1010 1004 if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) !=
+11
fs/btrfs/btrfs_inode.h
··· 384 384 return ret; 385 385 } 386 386 387 + /* 388 + * Check if the inode has flags compatible with compression 389 + */ 390 + static inline bool btrfs_inode_can_compress(const struct btrfs_inode *inode) 391 + { 392 + if (inode->flags & BTRFS_INODE_NODATACOW || 393 + inode->flags & BTRFS_INODE_NODATASUM) 394 + return false; 395 + return true; 396 + } 397 + 387 398 struct btrfs_dio_private { 388 399 struct inode *inode; 389 400
+2 -13
fs/btrfs/inode.c
··· 481 481 } 482 482 483 483 /* 484 - * Check if the inode has flags compatible with compression 485 - */ 486 - static inline bool inode_can_compress(struct btrfs_inode *inode) 487 - { 488 - if (inode->flags & BTRFS_INODE_NODATACOW || 489 - inode->flags & BTRFS_INODE_NODATASUM) 490 - return false; 491 - return true; 492 - } 493 - 494 - /* 495 484 * Check if the inode needs to be submitted to compression, based on mount 496 485 * options, defragmentation, properties or heuristics. 497 486 */ ··· 489 500 { 490 501 struct btrfs_fs_info *fs_info = inode->root->fs_info; 491 502 492 - if (!inode_can_compress(inode)) { 503 + if (!btrfs_inode_can_compress(inode)) { 493 504 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG), 494 505 KERN_ERR "BTRFS: unexpected compression for ino %llu\n", 495 506 btrfs_ino(inode)); ··· 2008 2019 ASSERT(!zoned || btrfs_is_data_reloc_root(inode->root)); 2009 2020 ret = run_delalloc_nocow(inode, locked_page, start, end, 2010 2021 page_started, nr_written); 2011 - } else if (!inode_can_compress(inode) || 2022 + } else if (!btrfs_inode_can_compress(inode) || 2012 2023 !inode_need_compress(inode, start, end)) { 2013 2024 if (zoned) 2014 2025 ret = run_delalloc_zoned(inode, locked_page, start, end,
+54 -5
fs/btrfs/props.c
··· 17 17 struct prop_handler { 18 18 struct hlist_node node; 19 19 const char *xattr_name; 20 - int (*validate)(const char *value, size_t len); 20 + int (*validate)(const struct btrfs_inode *inode, const char *value, 21 + size_t len); 21 22 int (*apply)(struct inode *inode, const char *value, size_t len); 22 23 const char *(*extract)(struct inode *inode); 24 + bool (*ignore)(const struct btrfs_inode *inode); 23 25 int inheritable; 24 26 }; 25 27 ··· 57 55 return NULL; 58 56 } 59 57 60 - int btrfs_validate_prop(const char *name, const char *value, size_t value_len) 58 + int btrfs_validate_prop(const struct btrfs_inode *inode, const char *name, 59 + const char *value, size_t value_len) 61 60 { 62 61 const struct prop_handler *handler; 63 62 ··· 72 69 if (value_len == 0) 73 70 return 0; 74 71 75 - return handler->validate(value, value_len); 72 + return handler->validate(inode, value, value_len); 73 + } 74 + 75 + /* 76 + * Check if a property should be ignored (not set) for an inode. 77 + * 78 + * @inode: The target inode. 79 + * @name: The property's name. 80 + * 81 + * The caller must be sure the given property name is valid, for example by 82 + * having previously called btrfs_validate_prop(). 83 + * 84 + * Returns: true if the property should be ignored for the given inode 85 + * false if the property must not be ignored for the given inode 86 + */ 87 + bool btrfs_ignore_prop(const struct btrfs_inode *inode, const char *name) 88 + { 89 + const struct prop_handler *handler; 90 + 91 + handler = find_prop_handler(name, NULL); 92 + ASSERT(handler != NULL); 93 + 94 + return handler->ignore(inode); 76 95 } 77 96 78 97 int btrfs_set_prop(struct btrfs_trans_handle *trans, struct inode *inode, ··· 277 252 return ret; 278 253 } 279 254 280 - static int prop_compression_validate(const char *value, size_t len) 255 + static int prop_compression_validate(const struct btrfs_inode *inode, 256 + const char *value, size_t len) 281 257 { 258 + if (!btrfs_inode_can_compress(inode)) 259 + return -EINVAL; 260 + 282 261 if (!value) 283 262 return 0; 284 263 ··· 339 310 return 0; 340 311 } 341 312 313 + static bool prop_compression_ignore(const struct btrfs_inode *inode) 314 + { 315 + /* 316 + * Compression only has effect for regular files, and for directories 317 + * we set it just to propagate it to new files created inside them. 318 + * Everything else (symlinks, devices, sockets, fifos) is pointless as 319 + * it will do nothing, so don't waste metadata space on a compression 320 + * xattr for anything that is neither a file nor a directory. 321 + */ 322 + if (!S_ISREG(inode->vfs_inode.i_mode) && 323 + !S_ISDIR(inode->vfs_inode.i_mode)) 324 + return true; 325 + 326 + return false; 327 + } 328 + 342 329 static const char *prop_compression_extract(struct inode *inode) 343 330 { 344 331 switch (BTRFS_I(inode)->prop_compress) { ··· 375 330 .validate = prop_compression_validate, 376 331 .apply = prop_compression_apply, 377 332 .extract = prop_compression_extract, 333 + .ignore = prop_compression_ignore, 378 334 .inheritable = 1 379 335 }, 380 336 }; ··· 402 356 if (!h->inheritable) 403 357 continue; 404 358 359 + if (h->ignore(BTRFS_I(inode))) 360 + continue; 361 + 405 362 value = h->extract(parent); 406 363 if (!value) 407 364 continue; ··· 413 364 * This is not strictly necessary as the property should be 414 365 * valid, but in case it isn't, don't propagate it further. 415 366 */ 416 - ret = h->validate(value, strlen(value)); 367 + ret = h->validate(BTRFS_I(inode), value, strlen(value)); 417 368 if (ret) 418 369 continue; 419 370
+3 -1
fs/btrfs/props.h
··· 13 13 int btrfs_set_prop(struct btrfs_trans_handle *trans, struct inode *inode, 14 14 const char *name, const char *value, size_t value_len, 15 15 int flags); 16 - int btrfs_validate_prop(const char *name, const char *value, size_t value_len); 16 + int btrfs_validate_prop(const struct btrfs_inode *inode, const char *name, 17 + const char *value, size_t value_len); 18 + bool btrfs_ignore_prop(const struct btrfs_inode *inode, const char *name); 17 19 18 20 int btrfs_load_inode_props(struct inode *inode, struct btrfs_path *path); 19 21
+13 -1
fs/btrfs/tree-log.c
··· 5805 5805 } 5806 5806 5807 5807 /* 5808 + * For symlinks, we must always log their content, which is stored in an 5809 + * inline extent, otherwise we could end up with an empty symlink after 5810 + * log replay, which is invalid on linux (symlink(2) returns -ENOENT if 5811 + * one attempts to create an empty symlink). 5812 + * We don't need to worry about flushing delalloc, because when we create 5813 + * the inline extent when the symlink is created (we never have delalloc 5814 + * for symlinks). 5815 + */ 5816 + if (S_ISLNK(inode->vfs_inode.i_mode)) 5817 + inode_only = LOG_INODE_ALL; 5818 + 5819 + /* 5808 5820 * Before logging the inode item, cache the value returned by 5809 5821 * inode_logged(), because after that we have the need to figure out if 5810 5822 * the inode was previously logged in this transaction. ··· 6194 6182 } 6195 6183 6196 6184 ctx->log_new_dentries = false; 6197 - if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK) 6185 + if (type == BTRFS_FT_DIR) 6198 6186 log_mode = LOG_INODE_ALL; 6199 6187 ret = btrfs_log_inode(trans, BTRFS_I(di_inode), 6200 6188 log_mode, ctx);
+8 -3
fs/btrfs/xattr.c
··· 262 262 inode_inc_iversion(inode); 263 263 inode->i_ctime = current_time(inode); 264 264 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 265 - BUG_ON(ret); 265 + if (ret) 266 + btrfs_abort_transaction(trans, ret); 266 267 out: 267 268 if (start_trans) 268 269 btrfs_end_transaction(trans); ··· 404 403 struct btrfs_root *root = BTRFS_I(inode)->root; 405 404 406 405 name = xattr_full_name(handler, name); 407 - ret = btrfs_validate_prop(name, value, size); 406 + ret = btrfs_validate_prop(BTRFS_I(inode), name, value, size); 408 407 if (ret) 409 408 return ret; 409 + 410 + if (btrfs_ignore_prop(BTRFS_I(inode), name)) 411 + return 0; 410 412 411 413 trans = btrfs_start_transaction(root, 2); 412 414 if (IS_ERR(trans)) ··· 420 416 inode_inc_iversion(inode); 421 417 inode->i_ctime = current_time(inode); 422 418 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 423 - BUG_ON(ret); 419 + if (ret) 420 + btrfs_abort_transaction(trans, ret); 424 421 } 425 422 426 423 btrfs_end_transaction(trans);
+7
fs/ceph/caps.c
··· 2274 2274 list_for_each_entry(req, &ci->i_unsafe_dirops, 2275 2275 r_unsafe_dir_item) { 2276 2276 s = req->r_session; 2277 + if (!s) 2278 + continue; 2277 2279 if (unlikely(s->s_mds >= max_sessions)) { 2278 2280 spin_unlock(&ci->i_unsafe_lock); 2279 2281 for (i = 0; i < max_sessions; i++) { ··· 2296 2294 list_for_each_entry(req, &ci->i_unsafe_iops, 2297 2295 r_unsafe_target_item) { 2298 2296 s = req->r_session; 2297 + if (!s) 2298 + continue; 2299 2299 if (unlikely(s->s_mds >= max_sessions)) { 2300 2300 spin_unlock(&ci->i_unsafe_lock); 2301 2301 for (i = 0; i < max_sessions; i++) { ··· 3874 3870 dout("handle_cap_export inode %p ci %p mds%d mseq %d target %d\n", 3875 3871 inode, ci, mds, mseq, target); 3876 3872 retry: 3873 + down_read(&mdsc->snap_rwsem); 3877 3874 spin_lock(&ci->i_ceph_lock); 3878 3875 cap = __get_cap_for_mds(ci, mds); 3879 3876 if (!cap || cap->cap_id != le64_to_cpu(ex->cap_id)) ··· 3938 3933 } 3939 3934 3940 3935 spin_unlock(&ci->i_ceph_lock); 3936 + up_read(&mdsc->snap_rwsem); 3941 3937 mutex_unlock(&session->s_mutex); 3942 3938 3943 3939 /* open target session */ ··· 3964 3958 3965 3959 out_unlock: 3966 3960 spin_unlock(&ci->i_ceph_lock); 3961 + up_read(&mdsc->snap_rwsem); 3967 3962 mutex_unlock(&session->s_mutex); 3968 3963 if (tsession) { 3969 3964 mutex_unlock(&tsession->s_mutex);
-6
fs/ceph/mds_client.c
··· 4434 4434 4435 4435 bool check_session_state(struct ceph_mds_session *s) 4436 4436 { 4437 - struct ceph_fs_client *fsc = s->s_mdsc->fsc; 4438 - 4439 4437 switch (s->s_state) { 4440 4438 case CEPH_MDS_SESSION_OPEN: 4441 4439 if (s->s_ttl && time_after(jiffies, s->s_ttl)) { ··· 4442 4444 } 4443 4445 break; 4444 4446 case CEPH_MDS_SESSION_CLOSING: 4445 - /* Should never reach this when not force unmounting */ 4446 - WARN_ON_ONCE(s->s_ttl && 4447 - READ_ONCE(fsc->mount_state) != CEPH_MOUNT_SHUTDOWN); 4448 - fallthrough; 4449 4447 case CEPH_MDS_SESSION_NEW: 4450 4448 case CEPH_MDS_SESSION_RESTARTING: 4451 4449 case CEPH_MDS_SESSION_CLOSED:
+6 -1
fs/io_uring.c
··· 3783 3783 if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll) 3784 3784 return -EOPNOTSUPP; 3785 3785 3786 + kiocb->private = NULL; 3786 3787 kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE; 3787 3788 kiocb->ki_complete = io_complete_rw_iopoll; 3788 3789 req->iopoll_completed = 0; ··· 5208 5207 5209 5208 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) 5210 5209 return -EINVAL; 5210 + if (unlikely(sqe->addr2 || sqe->file_index)) 5211 + return -EINVAL; 5211 5212 5212 5213 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); 5213 5214 sr->len = READ_ONCE(sqe->len); ··· 5420 5417 struct io_sr_msg *sr = &req->sr_msg; 5421 5418 5422 5419 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) 5420 + return -EINVAL; 5421 + if (unlikely(sqe->addr2 || sqe->file_index)) 5423 5422 return -EINVAL; 5424 5423 5425 5424 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); ··· 10593 10588 ret = -EFAULT; 10594 10589 break; 10595 10590 } 10596 - if (reg.resv || reg.offset >= IO_RINGFD_REG_MAX) { 10591 + if (reg.resv || reg.data || reg.offset >= IO_RINGFD_REG_MAX) { 10597 10592 ret = -EINVAL; 10598 10593 break; 10599 10594 }
+6 -1
fs/kernfs/dir.c
··· 1406 1406 */ 1407 1407 void kernfs_remove(struct kernfs_node *kn) 1408 1408 { 1409 - struct kernfs_root *root = kernfs_root(kn); 1409 + struct kernfs_root *root; 1410 + 1411 + if (!kn) 1412 + return; 1413 + 1414 + root = kernfs_root(kn); 1410 1415 1411 1416 down_write(&root->kernfs_rwsem); 1412 1417 __kernfs_remove(kn);
+6 -5
include/asm-generic/bug.h
··· 21 21 #include <linux/panic.h> 22 22 #include <linux/printk.h> 23 23 24 + struct warn_args; 25 + struct pt_regs; 26 + 27 + void __warn(const char *file, int line, void *caller, unsigned taint, 28 + struct pt_regs *regs, struct warn_args *args); 29 + 24 30 #ifdef CONFIG_BUG 25 31 26 32 #ifdef CONFIG_GENERIC_BUG ··· 116 110 #endif 117 111 118 112 /* used internally by panic.c */ 119 - struct warn_args; 120 - struct pt_regs; 121 - 122 - void __warn(const char *file, int line, void *caller, unsigned taint, 123 - struct pt_regs *regs, struct warn_args *args); 124 113 125 114 #ifndef WARN_ON 126 115 #define WARN_ON(condition) ({ \
+4 -1
include/dt-bindings/clock/microchip,mpfs-clock.h
··· 1 1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ 2 2 /* 3 3 * Daire McNamara,<daire.mcnamara@microchip.com> 4 - * Copyright (C) 2020 Microchip Technology Inc. All rights reserved. 4 + * Copyright (C) 2020-2022 Microchip Technology Inc. All rights reserved. 5 5 */ 6 6 7 7 #ifndef _DT_BINDINGS_CLK_MICROCHIP_MPFS_H_ ··· 41 41 #define CLK_FIC3 30 42 42 #define CLK_ATHENA 31 43 43 #define CLK_CFM 32 44 + 45 + #define CLK_RTCREF 33 46 + #define CLK_MSSPLL 34 44 47 45 48 #endif /* _DT_BINDINGS_CLK_MICROCHIP_MPFS_H_ */
+1 -1
include/linux/cpu.h
··· 167 167 static inline void suspend_enable_secondary_cpus(void) { } 168 168 #endif /* !CONFIG_PM_SLEEP_SMP */ 169 169 170 - void cpu_startup_entry(enum cpuhp_state state); 170 + void __noreturn cpu_startup_entry(enum cpuhp_state state); 171 171 172 172 void cpu_idle_poll_ctrl(bool enable); 173 173
+1
include/linux/stmmac.h
··· 270 270 int msi_rx_base_vec; 271 271 int msi_tx_base_vec; 272 272 bool use_phy_wol; 273 + bool sph_disable; 273 274 }; 274 275 #endif
+1 -1
include/linux/usb/pd_bdo.h
··· 15 15 #define BDO_MODE_CARRIER2 (5 << 28) 16 16 #define BDO_MODE_CARRIER3 (6 << 28) 17 17 #define BDO_MODE_EYE (7 << 28) 18 - #define BDO_MODE_TESTDATA (8 << 28) 18 + #define BDO_MODE_TESTDATA (8U << 28) 19 19 20 20 #define BDO_MODE_MASK(mode) ((mode) & 0xf0000000) 21 21
+1
include/memory/renesas-rpc-if.h
··· 72 72 enum rpcif_type type; 73 73 enum rpcif_data_dir dir; 74 74 u8 bus_size; 75 + u8 xfer_size; 75 76 void *buffer; 76 77 u32 xferlen; 77 78 u32 smcr;
+1 -1
include/net/inet_hashtables.h
··· 425 425 } 426 426 427 427 int __inet_hash_connect(struct inet_timewait_death_row *death_row, 428 - struct sock *sk, u32 port_offset, 428 + struct sock *sk, u64 port_offset, 429 429 int (*check_established)(struct inet_timewait_death_row *, 430 430 struct sock *, __u16, 431 431 struct inet_timewait_sock **));
+2 -2
include/net/secure_seq.h
··· 4 4 5 5 #include <linux/types.h> 6 6 7 - u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); 8 - u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, 7 + u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); 8 + u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, 9 9 __be16 dport); 10 10 u32 secure_tcp_seq(__be32 saddr, __be32 daddr, 11 11 __be16 sport, __be16 dport);
+1 -1
include/uapi/linux/elf.h
··· 42 42 43 43 44 44 /* ARM MTE memory tag segment type */ 45 - #define PT_ARM_MEMTAG_MTE (PT_LOPROC + 0x1) 45 + #define PT_AARCH64_MEMTAG_MTE (PT_LOPROC + 0x2) 46 46 47 47 /* 48 48 * Extended Numbering
+9 -1
include/uapi/linux/kvm.h
··· 445 445 #define KVM_SYSTEM_EVENT_RESET 2 446 446 #define KVM_SYSTEM_EVENT_CRASH 3 447 447 __u32 type; 448 - __u64 flags; 448 + __u32 ndata; 449 + union { 450 + #ifndef __KERNEL__ 451 + __u64 flags; 452 + #endif 453 + __u64 data[16]; 454 + }; 449 455 } system_event; 450 456 /* KVM_EXIT_S390_STSI */ 451 457 struct { ··· 1150 1144 #define KVM_CAP_S390_MEM_OP_EXTENSION 211 1151 1145 #define KVM_CAP_PMU_CAPABILITY 212 1152 1146 #define KVM_CAP_DISABLE_QUIRKS2 213 1147 + /* #define KVM_CAP_VM_TSC_CONTROL 214 */ 1148 + #define KVM_CAP_SYSTEM_EVENT_DATA 215 1153 1149 1154 1150 #ifdef KVM_CAP_IRQ_ROUTING 1155 1151
+1 -1
lib/strncpy_from_user.c
··· 25 25 * hit it), 'max' is the address space maximum (and we return 26 26 * -EFAULT if we hit it). 27 27 */ 28 - static inline long do_strncpy_from_user(char *dst, const char __user *src, 28 + static __always_inline long do_strncpy_from_user(char *dst, const char __user *src, 29 29 unsigned long count, unsigned long max) 30 30 { 31 31 const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+1 -1
lib/strnlen_user.c
··· 20 20 * if it fits in a aligned 'long'. The caller needs to check 21 21 * the return value against "> max". 22 22 */ 23 - static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max) 23 + static __always_inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max) 24 24 { 25 25 const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; 26 26 unsigned long align, res = 0;
+5 -20
net/can/isotp.c
··· 1187 1187 1188 1188 lock_sock(sk); 1189 1189 1190 + if (so->bound) { 1191 + err = -EINVAL; 1192 + goto out; 1193 + } 1194 + 1190 1195 /* do not register frame reception for functional addressing */ 1191 1196 if (so->opt.flags & CAN_ISOTP_SF_BROADCAST) 1192 1197 do_rx_reg = 0; ··· 1201 1196 err = -EADDRNOTAVAIL; 1202 1197 goto out; 1203 1198 } 1204 - 1205 - if (so->bound && addr->can_ifindex == so->ifindex && 1206 - rx_id == so->rxid && tx_id == so->txid) 1207 - goto out; 1208 1199 1209 1200 dev = dev_get_by_index(net, addr->can_ifindex); 1210 1201 if (!dev) { ··· 1235 1234 } 1236 1235 1237 1236 dev_put(dev); 1238 - 1239 - if (so->bound && do_rx_reg) { 1240 - /* unregister old filter */ 1241 - if (so->ifindex) { 1242 - dev = dev_get_by_index(net, so->ifindex); 1243 - if (dev) { 1244 - can_rx_unregister(net, dev, so->rxid, 1245 - SINGLE_MASK(so->rxid), 1246 - isotp_rcv, sk); 1247 - can_rx_unregister(net, dev, so->txid, 1248 - SINGLE_MASK(so->txid), 1249 - isotp_rcv_echo, sk); 1250 - dev_put(dev); 1251 - } 1252 - } 1253 - } 1254 1237 1255 1238 /* switch to new settings */ 1256 1239 so->ifindex = ifindex;
+5 -1
net/ceph/osd_client.c
··· 2385 2385 if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) { 2386 2386 err = -ENOSPC; 2387 2387 } else { 2388 - pr_warn_ratelimited("FULL or reached pool quota\n"); 2388 + if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) 2389 + pr_warn_ratelimited("cluster is full (osdmap FULL)\n"); 2390 + else 2391 + pr_warn_ratelimited("pool %lld is full or reached quota\n", 2392 + req->r_t.base_oloc.pool); 2389 2393 req->r_t.paused = true; 2390 2394 maybe_request_map(osdc); 2391 2395 }
+11 -5
net/core/secure_seq.c
··· 22 22 static siphash_aligned_key_t net_secret; 23 23 static siphash_aligned_key_t ts_secret; 24 24 25 + #define EPHEMERAL_PORT_SHUFFLE_PERIOD (10 * HZ) 26 + 25 27 static __always_inline void net_secret_init(void) 26 28 { 27 29 net_get_random_once(&net_secret, sizeof(net_secret)); ··· 96 94 } 97 95 EXPORT_SYMBOL(secure_tcpv6_seq); 98 96 99 - u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, 97 + u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, 100 98 __be16 dport) 101 99 { 102 100 const struct { 103 101 struct in6_addr saddr; 104 102 struct in6_addr daddr; 103 + unsigned int timeseed; 105 104 __be16 dport; 106 105 } __aligned(SIPHASH_ALIGNMENT) combined = { 107 106 .saddr = *(struct in6_addr *)saddr, 108 107 .daddr = *(struct in6_addr *)daddr, 109 - .dport = dport 108 + .timeseed = jiffies / EPHEMERAL_PORT_SHUFFLE_PERIOD, 109 + .dport = dport, 110 110 }; 111 111 net_secret_init(); 112 112 return siphash(&combined, offsetofend(typeof(combined), dport), ··· 146 142 } 147 143 EXPORT_SYMBOL_GPL(secure_tcp_seq); 148 144 149 - u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) 145 + u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) 150 146 { 151 147 net_secret_init(); 152 - return siphash_3u32((__force u32)saddr, (__force u32)daddr, 153 - (__force u16)dport, &net_secret); 148 + return siphash_4u32((__force u32)saddr, (__force u32)daddr, 149 + (__force u16)dport, 150 + jiffies / EPHEMERAL_PORT_SHUFFLE_PERIOD, 151 + &net_secret); 154 152 } 155 153 EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral); 156 154 #endif
+6 -3
net/ipv4/igmp.c
··· 2403 2403 /* decrease mem now to avoid the memleak warning */ 2404 2404 atomic_sub(struct_size(psl, sl_addr, psl->sl_max), 2405 2405 &sk->sk_omem_alloc); 2406 - kfree_rcu(psl, rcu); 2407 2406 } 2408 2407 rcu_assign_pointer(pmc->sflist, newpsl); 2408 + if (psl) 2409 + kfree_rcu(psl, rcu); 2409 2410 psl = newpsl; 2410 2411 } 2411 2412 rv = 1; /* > 0 for insert logic below if sl_count is 0 */ ··· 2508 2507 /* decrease mem now to avoid the memleak warning */ 2509 2508 atomic_sub(struct_size(psl, sl_addr, psl->sl_max), 2510 2509 &sk->sk_omem_alloc); 2511 - kfree_rcu(psl, rcu); 2512 - } else 2510 + } else { 2513 2511 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 2514 2512 0, NULL, 0); 2513 + } 2515 2514 rcu_assign_pointer(pmc->sflist, newpsl); 2515 + if (psl) 2516 + kfree_rcu(psl, rcu); 2516 2517 pmc->sfmode = msf->imsf_fmode; 2517 2518 err = 0; 2518 2519 done:
+27 -15
net/ipv4/inet_hashtables.c
··· 504 504 return -EADDRNOTAVAIL; 505 505 } 506 506 507 - static u32 inet_sk_port_offset(const struct sock *sk) 507 + static u64 inet_sk_port_offset(const struct sock *sk) 508 508 { 509 509 const struct inet_sock *inet = inet_sk(sk); 510 510 ··· 726 726 * Note that we use 32bit integers (vs RFC 'short integers') 727 727 * because 2^16 is not a multiple of num_ephemeral and this 728 728 * property might be used by clever attacker. 729 - * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, 730 - * we use 256 instead to really give more isolation and 731 - * privacy, this only consumes 1 KB of kernel memory. 729 + * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though 730 + * attacks were since demonstrated, thus we use 65536 instead to really 731 + * give more isolation and privacy, at the expense of 256kB of kernel 732 + * memory. 732 733 */ 733 - #define INET_TABLE_PERTURB_SHIFT 8 734 - static u32 table_perturb[1 << INET_TABLE_PERTURB_SHIFT]; 734 + #define INET_TABLE_PERTURB_SHIFT 16 735 + #define INET_TABLE_PERTURB_SIZE (1 << INET_TABLE_PERTURB_SHIFT) 736 + static u32 *table_perturb; 735 737 736 738 int __inet_hash_connect(struct inet_timewait_death_row *death_row, 737 - struct sock *sk, u32 port_offset, 739 + struct sock *sk, u64 port_offset, 738 740 int (*check_established)(struct inet_timewait_death_row *, 739 741 struct sock *, __u16, struct inet_timewait_sock **)) 740 742 { ··· 776 774 if (likely(remaining > 1)) 777 775 remaining &= ~1U; 778 776 779 - net_get_random_once(table_perturb, sizeof(table_perturb)); 780 - index = hash_32(port_offset, INET_TABLE_PERTURB_SHIFT); 777 + net_get_random_once(table_perturb, 778 + INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb)); 779 + index = port_offset & (INET_TABLE_PERTURB_SIZE - 1); 781 780 782 - offset = (READ_ONCE(table_perturb[index]) + port_offset) % remaining; 781 + offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32); 782 + offset %= remaining; 783 + 783 784 /* In first pass we try ports of @low parity. 784 785 * inet_csk_get_port() does the opposite choice. 785 786 */ ··· 836 831 return -EADDRNOTAVAIL; 837 832 838 833 ok: 839 - /* If our first attempt found a candidate, skip next candidate 840 - * in 1/16 of cases to add some noise. 834 + /* Here we want to add a little bit of randomness to the next source 835 + * port that will be chosen. We use a max() with a random here so that 836 + * on low contention the randomness is maximal and on high contention 837 + * it may be inexistent. 841 838 */ 842 - if (!i && !(prandom_u32() % 16)) 843 - i = 2; 839 + i = max_t(int, i, (prandom_u32() & 7) * 2); 844 840 WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2); 845 841 846 842 /* Head lock still held and bh's disabled */ ··· 865 859 int inet_hash_connect(struct inet_timewait_death_row *death_row, 866 860 struct sock *sk) 867 861 { 868 - u32 port_offset = 0; 862 + u64 port_offset = 0; 869 863 870 864 if (!inet_sk(sk)->inet_num) 871 865 port_offset = inet_sk_port_offset(sk); ··· 915 909 low_limit, 916 910 high_limit); 917 911 init_hashinfo_lhash2(h); 912 + 913 + /* this one is used for source ports of outgoing connections */ 914 + table_perturb = kmalloc_array(INET_TABLE_PERTURB_SIZE, 915 + sizeof(*table_perturb), GFP_KERNEL); 916 + if (!table_perturb) 917 + panic("TCP: failed to alloc table_perturb"); 918 918 } 919 919 920 920 int inet_hashinfo2_init_mod(struct inet_hashinfo *h)
+2 -2
net/ipv6/inet6_hashtables.c
··· 308 308 return -EADDRNOTAVAIL; 309 309 } 310 310 311 - static u32 inet6_sk_port_offset(const struct sock *sk) 311 + static u64 inet6_sk_port_offset(const struct sock *sk) 312 312 { 313 313 const struct inet_sock *inet = inet_sk(sk); 314 314 ··· 320 320 int inet6_hash_connect(struct inet_timewait_death_row *death_row, 321 321 struct sock *sk) 322 322 { 323 - u32 port_offset = 0; 323 + u64 port_offset = 0; 324 324 325 325 if (!inet_sk(sk)->inet_num) 326 326 port_offset = inet6_sk_port_offset(sk);
+4 -4
net/ipv6/mcast.c
··· 460 460 newpsl->sl_addr[i] = psl->sl_addr[i]; 461 461 atomic_sub(struct_size(psl, sl_addr, psl->sl_max), 462 462 &sk->sk_omem_alloc); 463 - kfree_rcu(psl, rcu); 464 463 } 464 + rcu_assign_pointer(pmc->sflist, newpsl); 465 + kfree_rcu(psl, rcu); 465 466 psl = newpsl; 466 - rcu_assign_pointer(pmc->sflist, psl); 467 467 } 468 468 rv = 1; /* > 0 for insert logic below if sl_count is 0 */ 469 469 for (i = 0; i < psl->sl_count; i++) { ··· 565 565 psl->sl_count, psl->sl_addr, 0); 566 566 atomic_sub(struct_size(psl, sl_addr, psl->sl_max), 567 567 &sk->sk_omem_alloc); 568 - kfree_rcu(psl, rcu); 569 568 } else { 570 569 ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); 571 570 } 572 - mutex_unlock(&idev->mc_lock); 573 571 rcu_assign_pointer(pmc->sflist, newpsl); 572 + mutex_unlock(&idev->mc_lock); 573 + kfree_rcu(psl, rcu); 574 574 pmc->sfmode = gsf->gf_fmode; 575 575 err = 0; 576 576 done:
+14 -15
net/nfc/core.c
··· 38 38 39 39 device_lock(&dev->dev); 40 40 41 - if (!device_is_registered(&dev->dev)) { 41 + if (dev->shutting_down) { 42 42 rc = -ENODEV; 43 43 goto error; 44 44 } ··· 94 94 95 95 device_lock(&dev->dev); 96 96 97 - if (!device_is_registered(&dev->dev)) { 97 + if (dev->shutting_down) { 98 98 rc = -ENODEV; 99 99 goto error; 100 100 } ··· 142 142 143 143 device_lock(&dev->dev); 144 144 145 - if (!device_is_registered(&dev->dev)) { 145 + if (dev->shutting_down) { 146 146 rc = -ENODEV; 147 147 goto error; 148 148 } ··· 207 207 208 208 device_lock(&dev->dev); 209 209 210 - if (!device_is_registered(&dev->dev)) { 210 + if (dev->shutting_down) { 211 211 rc = -ENODEV; 212 212 goto error; 213 213 } ··· 246 246 247 247 device_lock(&dev->dev); 248 248 249 - if (!device_is_registered(&dev->dev)) { 249 + if (dev->shutting_down) { 250 250 rc = -ENODEV; 251 251 goto error; 252 252 } ··· 291 291 292 292 device_lock(&dev->dev); 293 293 294 - if (!device_is_registered(&dev->dev)) { 294 + if (dev->shutting_down) { 295 295 rc = -ENODEV; 296 296 goto error; 297 297 } ··· 335 335 336 336 device_lock(&dev->dev); 337 337 338 - if (!device_is_registered(&dev->dev)) { 338 + if (dev->shutting_down) { 339 339 rc = -ENODEV; 340 340 goto error; 341 341 } ··· 401 401 402 402 device_lock(&dev->dev); 403 403 404 - if (!device_is_registered(&dev->dev)) { 404 + if (dev->shutting_down) { 405 405 rc = -ENODEV; 406 406 goto error; 407 407 } ··· 448 448 449 449 device_lock(&dev->dev); 450 450 451 - if (!device_is_registered(&dev->dev)) { 451 + if (dev->shutting_down) { 452 452 rc = -ENODEV; 453 453 goto error; 454 454 } ··· 495 495 496 496 device_lock(&dev->dev); 497 497 498 - if (!device_is_registered(&dev->dev)) { 498 + if (dev->shutting_down) { 499 499 rc = -ENODEV; 500 500 kfree_skb(skb); 501 501 goto error; ··· 552 552 553 553 device_lock(&dev->dev); 554 554 555 - if (!device_is_registered(&dev->dev)) { 555 + if (dev->shutting_down) { 556 556 rc = -ENODEV; 557 557 goto error; 558 558 } ··· 601 601 602 602 device_lock(&dev->dev); 603 603 604 - if (!device_is_registered(&dev->dev)) { 604 + if (dev->shutting_down) { 605 605 rc = -ENODEV; 606 606 goto error; 607 607 } ··· 1134 1134 dev->rfkill = NULL; 1135 1135 } 1136 1136 } 1137 + dev->shutting_down = false; 1137 1138 device_unlock(&dev->dev); 1138 1139 1139 1140 rc = nfc_genl_device_added(dev); ··· 1168 1167 rfkill_destroy(dev->rfkill); 1169 1168 dev->rfkill = NULL; 1170 1169 } 1170 + dev->shutting_down = true; 1171 1171 device_unlock(&dev->dev); 1172 1172 1173 1173 if (dev->ops->check_presence) { 1174 - device_lock(&dev->dev); 1175 - dev->shutting_down = true; 1176 - device_unlock(&dev->dev); 1177 1174 del_timer_sync(&dev->check_pres_timer); 1178 1175 cancel_work_sync(&dev->check_pres_work); 1179 1176 }
+2 -2
net/nfc/netlink.c
··· 1244 1244 struct sk_buff *msg; 1245 1245 void *hdr; 1246 1246 1247 - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1247 + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 1248 1248 if (!msg) 1249 1249 return -ENOMEM; 1250 1250 ··· 1260 1260 1261 1261 genlmsg_end(msg, hdr); 1262 1262 1263 - genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_KERNEL); 1263 + genlmsg_multicast(&nfc_genl_family, msg, 0, 0, GFP_ATOMIC); 1264 1264 1265 1265 return 0; 1266 1266
+8
net/rds/tcp.c
··· 495 495 496 496 tcp_sock_set_nodelay(sock->sk); 497 497 lock_sock(sk); 498 + /* TCP timer functions might access net namespace even after 499 + * a process which created this net namespace terminated. 500 + */ 501 + if (!sk->sk_net_refcnt) { 502 + sk->sk_net_refcnt = 1; 503 + get_net_track(net, &sk->ns_tracker, GFP_KERNEL); 504 + sock_inuse_add(net, 1); 505 + } 498 506 if (rtn->sndbuf_size > 0) { 499 507 sk->sk_sndbuf = rtn->sndbuf_size; 500 508 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
+3
net/rxrpc/local_object.c
··· 117 117 local, srx->transport_type, srx->transport.family); 118 118 119 119 udp_conf.family = srx->transport.family; 120 + udp_conf.use_udp_checksums = true; 120 121 if (udp_conf.family == AF_INET) { 121 122 udp_conf.local_ip = srx->transport.sin.sin_addr; 122 123 udp_conf.local_udp_port = srx->transport.sin.sin_port; ··· 125 124 } else { 126 125 udp_conf.local_ip6 = srx->transport.sin6.sin6_addr; 127 126 udp_conf.local_udp_port = srx->transport.sin6.sin6_port; 127 + udp_conf.use_udp6_tx_checksums = true; 128 + udp_conf.use_udp6_rx_checksums = true; 128 129 #endif 129 130 } 130 131 ret = udp_sock_create(net, &udp_conf, &local->socket);
+1 -1
scripts/Makefile.build
··· 231 231 $(if $(part-of-module), --module) \ 232 232 $(if $(CONFIG_X86_KERNEL_IBT), --lto --ibt) \ 233 233 $(if $(CONFIG_FRAME_POINTER),, --no-fp) \ 234 - $(if $(CONFIG_GCOV_KERNEL)$(CONFIG_LTO_CLANG), --no-unreachable)\ 234 + $(if $(CONFIG_GCOV_KERNEL), --no-unreachable) \ 235 235 $(if $(CONFIG_RETPOLINE), --retpoline) \ 236 236 $(if $(CONFIG_X86_SMAP), --uaccess) \ 237 237 $(if $(CONFIG_FTRACE_MCOUNT_USE_OBJTOOL), --mcount) \
+49 -45
tools/objtool/check.c
··· 184 184 "do_group_exit", 185 185 "stop_this_cpu", 186 186 "__invalid_creds", 187 + "cpu_startup_entry", 187 188 }; 188 189 189 190 if (!func) ··· 560 559 else if (reloc->addend == reloc->sym->sec->sh.sh_size) { 561 560 insn = find_last_insn(file, reloc->sym->sec); 562 561 if (!insn) { 563 - WARN("can't find unreachable insn at %s+0x%x", 562 + WARN("can't find unreachable insn at %s+0x%lx", 564 563 reloc->sym->sec->name, reloc->addend); 565 564 return -1; 566 565 } 567 566 } else { 568 - WARN("can't find unreachable insn at %s+0x%x", 567 + WARN("can't find unreachable insn at %s+0x%lx", 569 568 reloc->sym->sec->name, reloc->addend); 570 569 return -1; 571 570 } ··· 595 594 else if (reloc->addend == reloc->sym->sec->sh.sh_size) { 596 595 insn = find_last_insn(file, reloc->sym->sec); 597 596 if (!insn) { 598 - WARN("can't find reachable insn at %s+0x%x", 597 + WARN("can't find reachable insn at %s+0x%lx", 599 598 reloc->sym->sec->name, reloc->addend); 600 599 return -1; 601 600 } 602 601 } else { 603 - WARN("can't find reachable insn at %s+0x%x", 602 + WARN("can't find reachable insn at %s+0x%lx", 604 603 reloc->sym->sec->name, reloc->addend); 605 604 return -1; 606 605 } ··· 1272 1271 */ 1273 1272 static int add_jump_destinations(struct objtool_file *file) 1274 1273 { 1275 - struct instruction *insn; 1274 + struct instruction *insn, *jump_dest; 1276 1275 struct reloc *reloc; 1277 1276 struct section *dest_sec; 1278 1277 unsigned long dest_off; 1279 1278 1280 1279 for_each_insn(file, insn) { 1280 + if (insn->jump_dest) { 1281 + /* 1282 + * handle_group_alt() may have previously set 1283 + * 'jump_dest' for some alternatives. 1284 + */ 1285 + continue; 1286 + } 1281 1287 if (!is_static_jump(insn)) 1282 1288 continue; 1283 1289 ··· 1299 1291 add_retpoline_call(file, insn); 1300 1292 continue; 1301 1293 } else if (insn->func) { 1302 - /* internal or external sibling call (with reloc) */ 1294 + /* 1295 + * External sibling call or internal sibling call with 1296 + * STT_FUNC reloc. 1297 + */ 1303 1298 add_call_dest(file, insn, reloc->sym, true); 1304 1299 continue; 1305 1300 } else if (reloc->sym->sec->idx) { ··· 1314 1303 continue; 1315 1304 } 1316 1305 1317 - insn->jump_dest = find_insn(file, dest_sec, dest_off); 1318 - if (!insn->jump_dest) { 1319 - 1320 - /* 1321 - * This is a special case where an alt instruction 1322 - * jumps past the end of the section. These are 1323 - * handled later in handle_group_alt(). 1324 - */ 1325 - if (!strcmp(insn->sec->name, ".altinstr_replacement")) 1326 - continue; 1327 - 1306 + jump_dest = find_insn(file, dest_sec, dest_off); 1307 + if (!jump_dest) { 1328 1308 WARN_FUNC("can't find jump dest instruction at %s+0x%lx", 1329 1309 insn->sec, insn->offset, dest_sec->name, 1330 1310 dest_off); ··· 1325 1323 /* 1326 1324 * Cross-function jump. 1327 1325 */ 1328 - if (insn->func && insn->jump_dest->func && 1329 - insn->func != insn->jump_dest->func) { 1326 + if (insn->func && jump_dest->func && 1327 + insn->func != jump_dest->func) { 1330 1328 1331 1329 /* 1332 1330 * For GCC 8+, create parent/child links for any cold ··· 1344 1342 * subfunction is through a jump table. 1345 1343 */ 1346 1344 if (!strstr(insn->func->name, ".cold") && 1347 - strstr(insn->jump_dest->func->name, ".cold")) { 1348 - insn->func->cfunc = insn->jump_dest->func; 1349 - insn->jump_dest->func->pfunc = insn->func; 1345 + strstr(jump_dest->func->name, ".cold")) { 1346 + insn->func->cfunc = jump_dest->func; 1347 + jump_dest->func->pfunc = insn->func; 1350 1348 1351 - } else if (!same_function(insn, insn->jump_dest) && 1352 - is_first_func_insn(file, insn->jump_dest)) { 1353 - /* internal sibling call (without reloc) */ 1354 - add_call_dest(file, insn, insn->jump_dest->func, true); 1349 + } else if (!same_function(insn, jump_dest) && 1350 + is_first_func_insn(file, jump_dest)) { 1351 + /* 1352 + * Internal sibling call without reloc or with 1353 + * STT_SECTION reloc. 1354 + */ 1355 + add_call_dest(file, insn, jump_dest->func, true); 1356 + continue; 1355 1357 } 1356 1358 } 1359 + 1360 + insn->jump_dest = jump_dest; 1357 1361 } 1358 1362 1359 1363 return 0; ··· 1548 1540 continue; 1549 1541 1550 1542 dest_off = arch_jump_destination(insn); 1551 - if (dest_off == special_alt->new_off + special_alt->new_len) 1543 + if (dest_off == special_alt->new_off + special_alt->new_len) { 1552 1544 insn->jump_dest = next_insn_same_sec(file, last_orig_insn); 1553 - 1554 - if (!insn->jump_dest) { 1555 - WARN_FUNC("can't find alternative jump destination", 1556 - insn->sec, insn->offset); 1557 - return -1; 1545 + if (!insn->jump_dest) { 1546 + WARN_FUNC("can't find alternative jump destination", 1547 + insn->sec, insn->offset); 1548 + return -1; 1549 + } 1558 1550 } 1559 1551 } 1560 1552 ··· 2253 2245 return ret; 2254 2246 2255 2247 /* 2256 - * Must be before add_special_section_alts() as that depends on 2257 - * jump_dest being set. 2248 + * Must be before add_jump_destinations(), which depends on 'func' 2249 + * being set for alternatives, to enable proper sibling call detection. 2258 2250 */ 2259 - ret = add_jump_destinations(file); 2251 + ret = add_special_section_alts(file); 2260 2252 if (ret) 2261 2253 return ret; 2262 2254 2263 - ret = add_special_section_alts(file); 2255 + ret = add_jump_destinations(file); 2264 2256 if (ret) 2265 2257 return ret; 2266 2258 ··· 3218 3210 static void warn_noendbr(const char *msg, struct section *sec, unsigned long offset, 3219 3211 struct instruction *dest) 3220 3212 { 3221 - WARN_FUNC("%srelocation to !ENDBR: %s+0x%lx", sec, offset, msg, 3222 - dest->func ? dest->func->name : dest->sec->name, 3223 - dest->func ? dest->offset - dest->func->offset : dest->offset); 3213 + WARN_FUNC("%srelocation to !ENDBR: %s", sec, offset, msg, 3214 + offstr(dest->sec, dest->offset)); 3224 3215 } 3225 3216 3226 3217 static void validate_ibt_dest(struct objtool_file *file, struct instruction *insn, ··· 3310 3303 while (1) { 3311 3304 next_insn = next_insn_to_validate(file, insn); 3312 3305 3313 - if (file->c_file && func && insn->func && func != insn->func->pfunc) { 3306 + if (func && insn->func && func != insn->func->pfunc) { 3314 3307 WARN("%s() falls through to next function %s()", 3315 3308 func->name, insn->func->name); 3316 3309 return 1; ··· 3823 3816 struct instruction *dest; 3824 3817 3825 3818 dest = validate_ibt_reloc(file, reloc); 3826 - if (is_data && dest && !dest->noendbr) { 3827 - warn_noendbr("data ", reloc->sym->sec, 3828 - reloc->sym->offset + reloc->addend, 3829 - dest); 3830 - } 3819 + if (is_data && dest && !dest->noendbr) 3820 + warn_noendbr("data ", sec, reloc->offset, dest); 3831 3821 } 3832 3822 } 3833 3823
+166 -23
tools/objtool/elf.c
··· 546 546 int reltype); 547 547 548 548 int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset, 549 - unsigned int type, struct symbol *sym, int addend) 549 + unsigned int type, struct symbol *sym, long addend) 550 550 { 551 551 struct reloc *reloc; 552 552 ··· 575 575 return 0; 576 576 } 577 577 578 + /* 579 + * Ensure that any reloc section containing references to @sym is marked 580 + * changed such that it will get re-generated in elf_rebuild_reloc_sections() 581 + * with the new symbol index. 582 + */ 583 + static void elf_dirty_reloc_sym(struct elf *elf, struct symbol *sym) 584 + { 585 + struct section *sec; 586 + 587 + list_for_each_entry(sec, &elf->sections, list) { 588 + struct reloc *reloc; 589 + 590 + if (sec->changed) 591 + continue; 592 + 593 + list_for_each_entry(reloc, &sec->reloc_list, list) { 594 + if (reloc->sym == sym) { 595 + sec->changed = true; 596 + break; 597 + } 598 + } 599 + } 600 + } 601 + 602 + /* 603 + * Move the first global symbol, as per sh_info, into a new, higher symbol 604 + * index. This fees up the shndx for a new local symbol. 605 + */ 606 + static int elf_move_global_symbol(struct elf *elf, struct section *symtab, 607 + struct section *symtab_shndx) 608 + { 609 + Elf_Data *data, *shndx_data = NULL; 610 + Elf32_Word first_non_local; 611 + struct symbol *sym; 612 + Elf_Scn *s; 613 + 614 + first_non_local = symtab->sh.sh_info; 615 + 616 + sym = find_symbol_by_index(elf, first_non_local); 617 + if (!sym) { 618 + WARN("no non-local symbols !?"); 619 + return first_non_local; 620 + } 621 + 622 + s = elf_getscn(elf->elf, symtab->idx); 623 + if (!s) { 624 + WARN_ELF("elf_getscn"); 625 + return -1; 626 + } 627 + 628 + data = elf_newdata(s); 629 + if (!data) { 630 + WARN_ELF("elf_newdata"); 631 + return -1; 632 + } 633 + 634 + data->d_buf = &sym->sym; 635 + data->d_size = sizeof(sym->sym); 636 + data->d_align = 1; 637 + data->d_type = ELF_T_SYM; 638 + 639 + sym->idx = symtab->sh.sh_size / sizeof(sym->sym); 640 + elf_dirty_reloc_sym(elf, sym); 641 + 642 + symtab->sh.sh_info += 1; 643 + symtab->sh.sh_size += data->d_size; 644 + symtab->changed = true; 645 + 646 + if (symtab_shndx) { 647 + s = elf_getscn(elf->elf, symtab_shndx->idx); 648 + if (!s) { 649 + WARN_ELF("elf_getscn"); 650 + return -1; 651 + } 652 + 653 + shndx_data = elf_newdata(s); 654 + if (!shndx_data) { 655 + WARN_ELF("elf_newshndx_data"); 656 + return -1; 657 + } 658 + 659 + shndx_data->d_buf = &sym->sec->idx; 660 + shndx_data->d_size = sizeof(Elf32_Word); 661 + shndx_data->d_align = 4; 662 + shndx_data->d_type = ELF_T_WORD; 663 + 664 + symtab_shndx->sh.sh_size += 4; 665 + symtab_shndx->changed = true; 666 + } 667 + 668 + return first_non_local; 669 + } 670 + 671 + static struct symbol * 672 + elf_create_section_symbol(struct elf *elf, struct section *sec) 673 + { 674 + struct section *symtab, *symtab_shndx; 675 + Elf_Data *shndx_data = NULL; 676 + struct symbol *sym; 677 + Elf32_Word shndx; 678 + 679 + symtab = find_section_by_name(elf, ".symtab"); 680 + if (symtab) { 681 + symtab_shndx = find_section_by_name(elf, ".symtab_shndx"); 682 + if (symtab_shndx) 683 + shndx_data = symtab_shndx->data; 684 + } else { 685 + WARN("no .symtab"); 686 + return NULL; 687 + } 688 + 689 + sym = malloc(sizeof(*sym)); 690 + if (!sym) { 691 + perror("malloc"); 692 + return NULL; 693 + } 694 + memset(sym, 0, sizeof(*sym)); 695 + 696 + sym->idx = elf_move_global_symbol(elf, symtab, symtab_shndx); 697 + if (sym->idx < 0) { 698 + WARN("elf_move_global_symbol"); 699 + return NULL; 700 + } 701 + 702 + sym->name = sec->name; 703 + sym->sec = sec; 704 + 705 + // st_name 0 706 + sym->sym.st_info = GELF_ST_INFO(STB_LOCAL, STT_SECTION); 707 + // st_other 0 708 + // st_value 0 709 + // st_size 0 710 + shndx = sec->idx; 711 + if (shndx >= SHN_UNDEF && shndx < SHN_LORESERVE) { 712 + sym->sym.st_shndx = shndx; 713 + if (!shndx_data) 714 + shndx = 0; 715 + } else { 716 + sym->sym.st_shndx = SHN_XINDEX; 717 + if (!shndx_data) { 718 + WARN("no .symtab_shndx"); 719 + return NULL; 720 + } 721 + } 722 + 723 + if (!gelf_update_symshndx(symtab->data, shndx_data, sym->idx, &sym->sym, shndx)) { 724 + WARN_ELF("gelf_update_symshndx"); 725 + return NULL; 726 + } 727 + 728 + elf_add_symbol(elf, sym); 729 + 730 + return sym; 731 + } 732 + 578 733 int elf_add_reloc_to_insn(struct elf *elf, struct section *sec, 579 734 unsigned long offset, unsigned int type, 580 735 struct section *insn_sec, unsigned long insn_off) 581 736 { 582 - struct symbol *sym; 583 - int addend; 737 + struct symbol *sym = insn_sec->sym; 738 + int addend = insn_off; 584 739 585 - if (insn_sec->sym) { 586 - sym = insn_sec->sym; 587 - addend = insn_off; 588 - 589 - } else { 740 + if (!sym) { 590 741 /* 591 - * The Clang assembler strips section symbols, so we have to 592 - * reference the function symbol instead: 742 + * Due to how weak functions work, we must use section based 743 + * relocations. Symbol based relocations would result in the 744 + * weak and non-weak function annotations being overlaid on the 745 + * non-weak function after linking. 593 746 */ 594 - sym = find_symbol_containing(insn_sec, insn_off); 595 - if (!sym) { 596 - /* 597 - * Hack alert. This happens when we need to reference 598 - * the NOP pad insn immediately after the function. 599 - */ 600 - sym = find_symbol_containing(insn_sec, insn_off - 1); 601 - } 602 - 603 - if (!sym) { 604 - WARN("can't find symbol containing %s+0x%lx", insn_sec->name, insn_off); 747 + sym = elf_create_section_symbol(elf, insn_sec); 748 + if (!sym) 605 749 return -1; 606 - } 607 750 608 - addend = insn_off - sym->offset; 751 + insn_sec->sym = sym; 609 752 } 610 753 611 754 return elf_add_reloc(elf, sec, offset, type, sym, addend);
+2 -2
tools/objtool/include/objtool/elf.h
··· 73 73 struct symbol *sym; 74 74 unsigned long offset; 75 75 unsigned int type; 76 - int addend; 76 + long addend; 77 77 int idx; 78 78 bool jump_table_start; 79 79 }; ··· 135 135 struct section *elf_create_section(struct elf *elf, const char *name, unsigned int sh_flags, size_t entsize, int nr); 136 136 137 137 int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset, 138 - unsigned int type, struct symbol *sym, int addend); 138 + unsigned int type, struct symbol *sym, long addend); 139 139 int elf_add_reloc_to_insn(struct elf *elf, struct section *sec, 140 140 unsigned long offset, unsigned int type, 141 141 struct section *insn_sec, unsigned long insn_off);
+1 -1
tools/objtool/include/objtool/objtool.h
··· 27 27 struct list_head static_call_list; 28 28 struct list_head mcount_loc_list; 29 29 struct list_head endbr_list; 30 - bool ignore_unreachables, c_file, hints, rodata; 30 + bool ignore_unreachables, hints, rodata; 31 31 32 32 unsigned int nr_endbr; 33 33 unsigned int nr_endbr_int;
-1
tools/objtool/objtool.c
··· 129 129 INIT_LIST_HEAD(&file.static_call_list); 130 130 INIT_LIST_HEAD(&file.mcount_loc_list); 131 131 INIT_LIST_HEAD(&file.endbr_list); 132 - file.c_file = !vmlinux && find_section_by_name(file.elf, ".comment"); 133 132 file.ignore_unreachables = no_unreachable; 134 133 file.hints = false; 135 134
+10
tools/perf/arch/arm64/util/arm-spe.c
··· 148 148 bool privileged = perf_event_paranoid_check(-1); 149 149 struct evsel *tracking_evsel; 150 150 int err; 151 + u64 bit; 151 152 152 153 sper->evlist = evlist; 153 154 ··· 245 244 * on the opening of the event or the SPE data produced. 246 245 */ 247 246 evsel__set_sample_bit(arm_spe_evsel, DATA_SRC); 247 + 248 + /* 249 + * The PHYS_ADDR flag does not affect the driver behaviour, it is used to 250 + * inform that the resulting output's SPE samples contain physical addresses 251 + * where applicable. 252 + */ 253 + bit = perf_pmu__format_bits(&arm_spe_pmu->format, "pa_enable"); 254 + if (arm_spe_evsel->core.attr.config & bit) 255 + evsel__set_sample_bit(arm_spe_evsel, PHYS_ADDR); 248 256 249 257 /* Add dummy event to keep tracking */ 250 258 err = parse_events(evlist, "dummy:u", NULL);
-21
tools/perf/arch/arm64/util/machine.c
··· 8 8 #include "callchain.h" 9 9 #include "record.h" 10 10 11 - /* On arm64, kernel text segment starts at high memory address, 12 - * for example 0xffff 0000 8xxx xxxx. Modules start at a low memory 13 - * address, like 0xffff 0000 00ax xxxx. When only small amount of 14 - * memory is used by modules, gap between end of module's text segment 15 - * and start of kernel text segment may reach 2G. 16 - * Therefore do not fill this gap and do not assign it to the kernel dso map. 17 - */ 18 - 19 - #define SYMBOL_LIMIT (1 << 12) /* 4K */ 20 - 21 - void arch__symbols__fixup_end(struct symbol *p, struct symbol *c) 22 - { 23 - if ((strchr(p->name, '[') && strchr(c->name, '[') == NULL) || 24 - (strchr(p->name, '[') == NULL && strchr(c->name, '['))) 25 - /* Limit range of last symbol in module and kernel */ 26 - p->end += SYMBOL_LIMIT; 27 - else 28 - p->end = c->start; 29 - pr_debug4("%s sym:%s end:%#" PRIx64 "\n", __func__, p->name, p->end); 30 - } 31 - 32 11 void arch__add_leaf_frame_record_opts(struct record_opts *opts) 33 12 { 34 13 opts->sample_user_regs |= sample_reg_masks[PERF_REG_ARM64_LR].mask;
-1
tools/perf/arch/powerpc/util/Build
··· 1 1 perf-y += header.o 2 - perf-y += machine.o 3 2 perf-y += kvm-stat.o 4 3 perf-y += perf_regs.o 5 4 perf-y += mem-events.o
-25
tools/perf/arch/powerpc/util/machine.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - 3 - #include <inttypes.h> 4 - #include <stdio.h> 5 - #include <string.h> 6 - #include <internal/lib.h> // page_size 7 - #include "debug.h" 8 - #include "symbol.h" 9 - 10 - /* On powerpc kernel text segment start at memory addresses, 0xc000000000000000 11 - * whereas the modules are located at very high memory addresses, 12 - * for example 0xc00800000xxxxxxx. The gap between end of kernel text segment 13 - * and beginning of first module's text segment is very high. 14 - * Therefore do not fill this gap and do not assign it to the kernel dso map. 15 - */ 16 - 17 - void arch__symbols__fixup_end(struct symbol *p, struct symbol *c) 18 - { 19 - if (strchr(p->name, '[') == NULL && strchr(c->name, '[')) 20 - /* Limit the range of last kernel symbol */ 21 - p->end += page_size; 22 - else 23 - p->end = c->start; 24 - pr_debug4("%s sym:%s end:%#" PRIx64 "\n", __func__, p->name, p->end); 25 - }
-16
tools/perf/arch/s390/util/machine.c
··· 35 35 36 36 return 0; 37 37 } 38 - 39 - /* On s390 kernel text segment start is located at very low memory addresses, 40 - * for example 0x10000. Modules are located at very high memory addresses, 41 - * for example 0x3ff xxxx xxxx. The gap between end of kernel text segment 42 - * and beginning of first module's text segment is very big. 43 - * Therefore do not fill this gap and do not assign it to the kernel dso map. 44 - */ 45 - void arch__symbols__fixup_end(struct symbol *p, struct symbol *c) 46 - { 47 - if (strchr(p->name, '[') == NULL && strchr(c->name, '[')) 48 - /* Last kernel symbol mapped to end of page */ 49 - p->end = roundup(p->end, page_size); 50 - else 51 - p->end = c->start; 52 - pr_debug4("%s sym:%s end:%#" PRIx64 "\n", __func__, p->name, p->end); 53 - }
+1
tools/perf/tests/attr/README
··· 60 60 perf record -R kill (test-record-raw) 61 61 perf record -c 2 -e arm_spe_0// -- kill (test-record-spe-period) 62 62 perf record -e arm_spe_0/period=3/ -- kill (test-record-spe-period-term) 63 + perf record -e arm_spe_0/pa_enable=1/ -- kill (test-record-spe-physical-address) 63 64 perf stat -e cycles kill (test-stat-basic) 64 65 perf stat kill (test-stat-default) 65 66 perf stat -d kill (test-stat-detailed-1)
+12
tools/perf/tests/attr/test-record-spe-physical-address
··· 1 + [config] 2 + command = record 3 + args = --no-bpf-event -e arm_spe_0/pa_enable=1/ -- kill >/dev/null 2>&1 4 + ret = 1 5 + arch = aarch64 6 + 7 + [event-10:base-record-spe] 8 + # 622727 is the decimal of IP|TID|TIME|CPU|IDENTIFIER|DATA_SRC|PHYS_ADDR 9 + sample_type=622727 10 + 11 + # dummy event 12 + [event-1:base-record-spe]
+3 -2
tools/perf/util/arm-spe.c
··· 1033 1033 memset(&attr, 0, sizeof(struct perf_event_attr)); 1034 1034 attr.size = sizeof(struct perf_event_attr); 1035 1035 attr.type = PERF_TYPE_HARDWARE; 1036 - attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK; 1036 + attr.sample_type = evsel->core.attr.sample_type & 1037 + (PERF_SAMPLE_MASK | PERF_SAMPLE_PHYS_ADDR); 1037 1038 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID | 1038 1039 PERF_SAMPLE_PERIOD | PERF_SAMPLE_DATA_SRC | 1039 - PERF_SAMPLE_WEIGHT; 1040 + PERF_SAMPLE_WEIGHT | PERF_SAMPLE_ADDR; 1040 1041 if (spe->timeless_decoding) 1041 1042 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME; 1042 1043 else
+1 -1
tools/perf/util/session.c
··· 2576 2576 if (perf_data__is_pipe(session->data)) 2577 2577 return __perf_session__process_pipe_events(session); 2578 2578 2579 - if (perf_data__is_dir(session->data)) 2579 + if (perf_data__is_dir(session->data) && session->data->dir.nr) 2580 2580 return __perf_session__process_dir_events(session); 2581 2581 2582 2582 return __perf_session__process_events(session);
+1 -1
tools/perf/util/symbol-elf.c
··· 1290 1290 * For misannotated, zeroed, ASM function sizes. 1291 1291 */ 1292 1292 if (nr > 0) { 1293 - symbols__fixup_end(&dso->symbols); 1293 + symbols__fixup_end(&dso->symbols, false); 1294 1294 symbols__fixup_duplicate(&dso->symbols); 1295 1295 if (kmap) { 1296 1296 /*
+27 -10
tools/perf/util/symbol.c
··· 101 101 return tail - str; 102 102 } 103 103 104 - void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c) 105 - { 106 - p->end = c->start; 107 - } 108 - 109 104 const char * __weak arch__normalize_symbol_name(const char *name) 110 105 { 111 106 return name; ··· 212 217 } 213 218 } 214 219 215 - void symbols__fixup_end(struct rb_root_cached *symbols) 220 + /* Update zero-sized symbols using the address of the next symbol */ 221 + void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms) 216 222 { 217 223 struct rb_node *nd, *prevnd = rb_first_cached(symbols); 218 224 struct symbol *curr, *prev; ··· 227 231 prev = curr; 228 232 curr = rb_entry(nd, struct symbol, rb_node); 229 233 230 - if (prev->end == prev->start || prev->end != curr->start) 231 - arch__symbols__fixup_end(prev, curr); 234 + /* 235 + * On some architecture kernel text segment start is located at 236 + * some low memory address, while modules are located at high 237 + * memory addresses (or vice versa). The gap between end of 238 + * kernel text segment and beginning of first module's text 239 + * segment is very big. Therefore do not fill this gap and do 240 + * not assign it to the kernel dso map (kallsyms). 241 + * 242 + * In kallsyms, it determines module symbols using '[' character 243 + * like in: 244 + * ffffffffc1937000 T hdmi_driver_init [snd_hda_codec_hdmi] 245 + */ 246 + if (prev->end == prev->start) { 247 + /* Last kernel/module symbol mapped to end of page */ 248 + if (is_kallsyms && (!strchr(prev->name, '[') != 249 + !strchr(curr->name, '['))) 250 + prev->end = roundup(prev->end + 4096, 4096); 251 + else 252 + prev->end = curr->start; 253 + 254 + pr_debug4("%s sym:%s end:%#" PRIx64 "\n", 255 + __func__, prev->name, prev->end); 256 + } 232 257 } 233 258 234 259 /* Last entry */ ··· 1484 1467 if (kallsyms__delta(kmap, filename, &delta)) 1485 1468 return -1; 1486 1469 1487 - symbols__fixup_end(&dso->symbols); 1470 + symbols__fixup_end(&dso->symbols, true); 1488 1471 symbols__fixup_duplicate(&dso->symbols); 1489 1472 1490 1473 if (dso->kernel == DSO_SPACE__KERNEL_GUEST) ··· 1676 1659 #undef bfd_asymbol_section 1677 1660 #endif 1678 1661 1679 - symbols__fixup_end(&dso->symbols); 1662 + symbols__fixup_end(&dso->symbols, false); 1680 1663 symbols__fixup_duplicate(&dso->symbols); 1681 1664 dso->adjust_symbols = 1; 1682 1665
+1 -2
tools/perf/util/symbol.h
··· 203 203 bool kernel); 204 204 void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym); 205 205 void symbols__fixup_duplicate(struct rb_root_cached *symbols); 206 - void symbols__fixup_end(struct rb_root_cached *symbols); 206 + void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms); 207 207 void maps__fixup_end(struct maps *maps); 208 208 209 209 typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data); ··· 241 241 #define SYMBOL_A 0 242 242 #define SYMBOL_B 1 243 243 244 - void arch__symbols__fixup_end(struct symbol *p, struct symbol *c); 245 244 int arch__compare_symbol_names(const char *namea, const char *nameb); 246 245 int arch__compare_symbol_names_n(const char *namea, const char *nameb, 247 246 unsigned int n);
+1 -1
tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
··· 190 190 191 191 tc filter add dev $eth0 ingress chain $(IS2 0 0) pref 1 \ 192 192 protocol ipv4 flower skip_sw ip_proto udp dst_port 5201 \ 193 - action police rate 50mbit burst 64k \ 193 + action police rate 50mbit burst 64k conform-exceed drop/pipe \ 194 194 action goto chain $(IS2 1 0) 195 195 } 196 196
+2 -1
tools/testing/selftests/net/Makefile
··· 30 30 TEST_PROGS += gro.sh 31 31 TEST_PROGS += gre_gso.sh 32 32 TEST_PROGS += cmsg_so_mark.sh 33 - TEST_PROGS += cmsg_time.sh 33 + TEST_PROGS += cmsg_time.sh cmsg_ipv6.sh 34 34 TEST_PROGS += srv6_end_dt46_l3vpn_test.sh 35 35 TEST_PROGS += srv6_end_dt4_l3vpn_test.sh 36 36 TEST_PROGS += srv6_end_dt6_l3vpn_test.sh ··· 55 55 TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls 56 56 TEST_GEN_FILES += toeplitz 57 57 TEST_GEN_FILES += cmsg_sender 58 + TEST_PROGS += test_vxlan_vnifiltering.sh 58 59 59 60 TEST_FILES := settings 60 61
+33
tools/testing/selftests/net/forwarding/Makefile
··· 3 3 TEST_PROGS = bridge_igmp.sh \ 4 4 bridge_locked_port.sh \ 5 5 bridge_mdb.sh \ 6 + bridge_mld.sh \ 6 7 bridge_port_isolation.sh \ 7 8 bridge_sticky_fdb.sh \ 8 9 bridge_vlan_aware.sh \ 10 + bridge_vlan_mcast.sh \ 9 11 bridge_vlan_unaware.sh \ 12 + custom_multipath_hash.sh \ 13 + dual_vxlan_bridge.sh \ 14 + ethtool_extended_state.sh \ 10 15 ethtool.sh \ 16 + gre_custom_multipath_hash.sh \ 11 17 gre_inner_v4_multipath.sh \ 12 18 gre_inner_v6_multipath.sh \ 19 + gre_multipath_nh_res.sh \ 20 + gre_multipath_nh.sh \ 13 21 gre_multipath.sh \ 22 + hw_stats_l3.sh \ 14 23 ip6_forward_instats_vrf.sh \ 24 + ip6gre_custom_multipath_hash.sh \ 25 + ip6gre_flat_key.sh \ 26 + ip6gre_flat_keys.sh \ 27 + ip6gre_flat.sh \ 28 + ip6gre_hier_key.sh \ 29 + ip6gre_hier_keys.sh \ 30 + ip6gre_hier.sh \ 15 31 ip6gre_inner_v4_multipath.sh \ 16 32 ip6gre_inner_v6_multipath.sh \ 17 33 ipip_flat_gre_key.sh \ ··· 51 35 mirror_gre_vlan_bridge_1q.sh \ 52 36 mirror_gre_vlan.sh \ 53 37 mirror_vlan.sh \ 38 + pedit_dsfield.sh \ 39 + pedit_ip.sh \ 40 + pedit_l4port.sh \ 41 + q_in_vni_ipv6.sh \ 42 + q_in_vni.sh \ 54 43 router_bridge.sh \ 55 44 router_bridge_vlan.sh \ 56 45 router_broadcast.sh \ 46 + router_mpath_nh_res.sh \ 57 47 router_mpath_nh.sh \ 58 48 router_multicast.sh \ 59 49 router_multipath.sh \ 50 + router_nh.sh \ 60 51 router.sh \ 61 52 router_vid_1.sh \ 62 53 sch_ets.sh \ 54 + sch_red.sh \ 63 55 sch_tbf_ets.sh \ 64 56 sch_tbf_prio.sh \ 65 57 sch_tbf_root.sh \ 58 + skbedit_priority.sh \ 66 59 tc_actions.sh \ 67 60 tc_chains.sh \ 68 61 tc_flower_router.sh \ 69 62 tc_flower.sh \ 70 63 tc_mpls_l2vpn.sh \ 64 + tc_police.sh \ 71 65 tc_shblocks.sh \ 72 66 tc_vlan_modify.sh \ 67 + vxlan_asymmetric_ipv6.sh \ 73 68 vxlan_asymmetric.sh \ 69 + vxlan_bridge_1d_ipv6.sh \ 70 + vxlan_bridge_1d_port_8472_ipv6.sh \ 74 71 vxlan_bridge_1d_port_8472.sh \ 75 72 vxlan_bridge_1d.sh \ 73 + vxlan_bridge_1q_ipv6.sh \ 74 + vxlan_bridge_1q_port_8472_ipv6.sh 76 75 vxlan_bridge_1q_port_8472.sh \ 77 76 vxlan_bridge_1q.sh \ 77 + vxlan_symmetric_ipv6.sh \ 78 78 vxlan_symmetric.sh 79 79 80 80 TEST_PROGS_EXTENDED := devlink_lib.sh \ 81 81 ethtool_lib.sh \ 82 82 fib_offload_lib.sh \ 83 83 forwarding.config.sample \ 84 + ip6gre_lib.sh \ 84 85 ipip_lib.sh \ 85 86 lib.sh \ 86 87 mirror_gre_lib.sh \
+3
tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh
··· 61 61 62 62 vrf_prepare 63 63 mirror_gre_topo_create 64 + # Avoid changing br1's PVID while it is operational as a L3 interface. 65 + ip link set dev br1 down 64 66 65 67 ip link set dev $swp3 master br1 66 68 bridge vlan add dev br1 vid 555 pvid untagged self 69 + ip link set dev br1 up 67 70 ip address add dev br1 192.0.2.129/28 68 71 ip address add dev br1 2001:db8:2::1/64 69 72
+2 -2
tools/testing/selftests/net/so_txtime.c
··· 421 421 "Options:\n" 422 422 " -4 only IPv4\n" 423 423 " -6 only IPv6\n" 424 - " -c <clock> monotonic (default) or tai\n" 424 + " -c <clock> monotonic or tai (default)\n" 425 425 " -D <addr> destination IP address (server)\n" 426 426 " -S <addr> source IP address (client)\n" 427 427 " -r run rx mode\n" ··· 475 475 cfg_rx = true; 476 476 break; 477 477 case 't': 478 - cfg_start_time_ns = strtol(optarg, NULL, 0); 478 + cfg_start_time_ns = strtoll(optarg, NULL, 0); 479 479 break; 480 480 case 'm': 481 481 cfg_mark = strtol(optarg, NULL, 0);
+5 -5
tools/testing/selftests/seccomp/seccomp_bpf.c
··· 955 955 ASSERT_EQ(0, ret); 956 956 957 957 EXPECT_EQ(parent, syscall(__NR_getppid)); 958 - EXPECT_EQ(-1, read(0, NULL, 0)); 958 + EXPECT_EQ(-1, read(-1, NULL, 0)); 959 959 EXPECT_EQ(E2BIG, errno); 960 960 } 961 961 ··· 974 974 975 975 EXPECT_EQ(parent, syscall(__NR_getppid)); 976 976 /* "errno" of 0 is ok. */ 977 - EXPECT_EQ(0, read(0, NULL, 0)); 977 + EXPECT_EQ(0, read(-1, NULL, 0)); 978 978 } 979 979 980 980 /* ··· 995 995 ASSERT_EQ(0, ret); 996 996 997 997 EXPECT_EQ(parent, syscall(__NR_getppid)); 998 - EXPECT_EQ(-1, read(0, NULL, 0)); 998 + EXPECT_EQ(-1, read(-1, NULL, 0)); 999 999 EXPECT_EQ(4095, errno); 1000 1000 } 1001 1001 ··· 1026 1026 ASSERT_EQ(0, ret); 1027 1027 1028 1028 EXPECT_EQ(parent, syscall(__NR_getppid)); 1029 - EXPECT_EQ(-1, read(0, NULL, 0)); 1029 + EXPECT_EQ(-1, read(-1, NULL, 0)); 1030 1030 EXPECT_EQ(12, errno); 1031 1031 } 1032 1032 ··· 2623 2623 ret = prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0); 2624 2624 if (!ret) 2625 2625 return (void *)SIBLING_EXIT_NEWPRIVS; 2626 - read(0, NULL, 0); 2626 + read(-1, NULL, 0); 2627 2627 return (void *)SIBLING_EXIT_UNKILLED; 2628 2628 } 2629 2629
+23 -11
tools/testing/selftests/wireguard/netns.sh
··· 22 22 # interfaces in $ns1 and $ns2. See https://www.wireguard.com/netns/ for further 23 23 # details on how this is accomplished. 24 24 set -e 25 + shopt -s extglob 25 26 26 27 exec 3>&1 27 28 export LANG=C 28 29 export WG_HIDE_KEYS=never 30 + NPROC=( /sys/devices/system/cpu/cpu+([0-9]) ); NPROC=${#NPROC[@]} 29 31 netns0="wg-test-$$-0" 30 32 netns1="wg-test-$$-1" 31 33 netns2="wg-test-$$-2" ··· 145 143 n1 iperf3 -Z -t 3 -b 0 -u -c fd00::2 146 144 147 145 # TCP over IPv4, in parallel 148 - for max in 4 5 50; do 149 - local pids=( ) 150 - for ((i=0; i < max; ++i)) do 151 - n2 iperf3 -p $(( 5200 + i )) -s -1 -B 192.168.241.2 & 152 - pids+=( $! ); waitiperf $netns2 $! $(( 5200 + i )) 153 - done 154 - for ((i=0; i < max; ++i)) do 155 - n1 iperf3 -Z -t 3 -p $(( 5200 + i )) -c 192.168.241.2 & 156 - done 157 - wait "${pids[@]}" 146 + local pids=( ) i 147 + for ((i=0; i < NPROC; ++i)) do 148 + n2 iperf3 -p $(( 5200 + i )) -s -1 -B 192.168.241.2 & 149 + pids+=( $! ); waitiperf $netns2 $! $(( 5200 + i )) 158 150 done 151 + for ((i=0; i < NPROC; ++i)) do 152 + n1 iperf3 -Z -t 3 -p $(( 5200 + i )) -c 192.168.241.2 & 153 + done 154 + wait "${pids[@]}" 159 155 } 160 156 161 157 [[ $(ip1 link show dev wg0) =~ mtu\ ([0-9]+) ]] && orig_mtu="${BASH_REMATCH[1]}" ··· 280 280 ! n0 ping -W 1 -c 10 -f 192.168.241.2 || false 281 281 sleep 1 282 282 read _ _ tx_bytes_after < <(n0 wg show wg1 transfer) 283 - (( tx_bytes_after - tx_bytes_before < 70000 )) 283 + if ! (( tx_bytes_after - tx_bytes_before < 70000 )); then 284 + errstart=$'\x1b[37m\x1b[41m\x1b[1m' 285 + errend=$'\x1b[0m' 286 + echo "${errstart} ${errend}" 287 + echo "${errstart} E R R O R ${errend}" 288 + echo "${errstart} ${errend}" 289 + echo "${errstart} This architecture does not do the right thing ${errend}" 290 + echo "${errstart} with cross-namespace routing loops. This test ${errend}" 291 + echo "${errstart} has thus technically failed but, as this issue ${errend}" 292 + echo "${errstart} is as yet unsolved, these tests will continue ${errend}" 293 + echo "${errstart} onward. :( ${errend}" 294 + echo "${errstart} ${errend}" 295 + fi 284 296 285 297 ip0 link del wg1 286 298 ip1 link del wg0
+1
tools/testing/selftests/wireguard/qemu/.gitignore
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 build/ 3 3 distfiles/ 4 + ccache/
+137 -70
tools/testing/selftests/wireguard/qemu/Makefile
··· 4 4 5 5 PWD := $(shell pwd) 6 6 7 - CHOST := $(shell gcc -dumpmachine) 8 - HOST_ARCH := $(firstword $(subst -, ,$(CHOST))) 9 - ifneq (,$(ARCH)) 10 - CBUILD := $(subst -gcc,,$(lastword $(subst /, ,$(firstword $(wildcard $(foreach bindir,$(subst :, ,$(PATH)),$(bindir)/$(ARCH)-*-gcc)))))) 11 - ifeq (,$(CBUILD)) 12 - $(error The toolchain for $(ARCH) is not installed) 13 - endif 14 - else 15 - CBUILD := $(CHOST) 16 - ARCH := $(firstword $(subst -, ,$(CBUILD))) 17 - endif 18 - 19 7 # Set these from the environment to override 20 8 KERNEL_PATH ?= $(PWD)/../../../../.. 21 9 BUILD_PATH ?= $(PWD)/build/$(ARCH) 22 10 DISTFILES_PATH ?= $(PWD)/distfiles 23 11 NR_CPUS ?= 4 12 + ARCH ?= 13 + CBUILD := $(shell gcc -dumpmachine) 14 + HOST_ARCH := $(firstword $(subst -, ,$(CBUILD))) 15 + ifeq ($(ARCH),) 16 + ARCH := $(HOST_ARCH) 17 + endif 24 18 25 19 MIRROR := https://download.wireguard.com/qemu-test/distfiles/ 20 + 21 + KERNEL_BUILD_PATH := $(BUILD_PATH)/kernel$(if $(findstring yes,$(DEBUG_KERNEL)),-debug) 22 + rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d)) 23 + WIREGUARD_SOURCES := $(call rwildcard,$(KERNEL_PATH)/drivers/net/wireguard/,*) 26 24 27 25 default: qemu 28 26 ··· 34 36 endef 35 37 36 38 define file_download = 37 - $(DISTFILES_PATH)/$(1): 39 + $(DISTFILES_PATH)/$(1): | $(4) 38 40 mkdir -p $(DISTFILES_PATH) 39 - flock -x $$@.lock -c '[ -f $$@ ] && exit 0; wget -O $$@.tmp $(MIRROR)$(1) || wget -O $$@.tmp $(2)$(1) || rm -f $$@.tmp; [ -f $$@.tmp ] || exit 1; if echo "$(3) $$@.tmp" | sha256sum -c -; then mv $$@.tmp $$@; else rm -f $$@.tmp; exit 71; fi' 41 + flock -x $$@.lock -c '[ -f $$@ ] && exit 0; wget -O $$@.tmp $(MIRROR)$(1) || wget -O $$@.tmp $(2)$(1) || rm -f $$@.tmp; [ -f $$@.tmp ] || exit 1; if ([ -n "$(4)" ] && sed -n "s#^\([a-f0-9]\{64\}\) \($(1)\)\$$$$#\1 $(DISTFILES_PATH)/\2.tmp#p" "$(4)" || echo "$(3) $$@.tmp") | sha256sum -c -; then mv $$@.tmp $$@; else rm -f $$@.tmp; exit 71; fi' 40 42 endef 41 43 42 - $(eval $(call tar_download,MUSL,musl,1.2.0,.tar.gz,https://musl.libc.org/releases/,c6de7b191139142d3f9a7b5b702c9cae1b5ee6e7f57e582da9328629408fd4e8)) 43 - $(eval $(call tar_download,IPERF,iperf,3.7,.tar.gz,https://downloads.es.net/pub/iperf/,d846040224317caf2f75c843d309a950a7db23f9b44b94688ccbe557d6d1710c)) 44 - $(eval $(call tar_download,BASH,bash,5.0,.tar.gz,https://ftp.gnu.org/gnu/bash/,b4a80f2ac66170b2913efbfb9f2594f1f76c7b1afd11f799e22035d63077fb4d)) 45 - $(eval $(call tar_download,IPROUTE2,iproute2,5.6.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/,1b5b0e25ce6e23da7526ea1da044e814ad85ba761b10dd29c2b027c056b04692)) 46 - $(eval $(call tar_download,IPTABLES,iptables,1.8.4,.tar.bz2,https://www.netfilter.org/projects/iptables/files/,993a3a5490a544c2cbf2ef15cf7e7ed21af1845baf228318d5c36ef8827e157c)) 47 - $(eval $(call tar_download,NMAP,nmap,7.80,.tar.bz2,https://nmap.org/dist/,fcfa5a0e42099e12e4bf7a68ebe6fde05553383a682e816a7ec9256ab4773faa)) 44 + $(eval $(call tar_download,IPERF,iperf,3.11,.tar.gz,https://downloads.es.net/pub/iperf/,de8cb409fad61a0574f4cb07eb19ce1159707403ac2dc01b5d175e91240b7e5f)) 45 + $(eval $(call tar_download,BASH,bash,5.1.16,.tar.gz,https://ftp.gnu.org/gnu/bash/,5bac17218d3911834520dad13cd1f85ab944e1c09ae1aba55906be1f8192f558)) 46 + $(eval $(call tar_download,IPROUTE2,iproute2,5.17.0,.tar.gz,https://www.kernel.org/pub/linux/utils/net/iproute2/,bda331d5c4606138892f23a565d78fca18919b4d508a0b7ca8391c2da2db68b9)) 47 + $(eval $(call tar_download,IPTABLES,iptables,1.8.7,.tar.bz2,https://www.netfilter.org/projects/iptables/files/,c109c96bb04998cd44156622d36f8e04b140701ec60531a10668cfdff5e8d8f0)) 48 + $(eval $(call tar_download,NMAP,nmap,7.92,.tgz,https://nmap.org/dist/,064183ea642dc4c12b1ab3b5358ce1cef7d2e7e11ffa2849f16d339f5b717117)) 48 49 $(eval $(call tar_download,IPUTILS,iputils,s20190709,.tar.gz,https://github.com/iputils/iputils/archive/s20190709.tar.gz/#,a15720dd741d7538dd2645f9f516d193636ae4300ff7dbc8bfca757bf166490a)) 49 - $(eval $(call tar_download,WIREGUARD_TOOLS,wireguard-tools,1.0.20200206,.tar.xz,https://git.zx2c4.com/wireguard-tools/snapshot/,f5207248c6a3c3e3bfc9ab30b91c1897b00802ed861e1f9faaed873366078c64)) 50 + $(eval $(call tar_download,WIREGUARD_TOOLS,wireguard-tools,1.0.20210914,.tar.xz,https://git.zx2c4.com/wireguard-tools/snapshot/,97ff31489217bb265b7ae850d3d0f335ab07d2652ba1feec88b734bc96bd05ac)) 50 51 51 - KERNEL_BUILD_PATH := $(BUILD_PATH)/kernel$(if $(findstring yes,$(DEBUG_KERNEL)),-debug) 52 - rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d)) 53 - WIREGUARD_SOURCES := $(call rwildcard,$(KERNEL_PATH)/drivers/net/wireguard/,*) 54 - 55 - export CFLAGS ?= -O3 -pipe 56 - export LDFLAGS ?= 57 - export CPPFLAGS := -I$(BUILD_PATH)/include 58 - 52 + export CFLAGS := -O3 -pipe 59 53 ifeq ($(HOST_ARCH),$(ARCH)) 60 - CROSS_COMPILE_FLAG := --host=$(CHOST) 61 54 CFLAGS += -march=native 62 - STRIP := strip 63 - else 64 - $(info Cross compilation: building for $(CBUILD) using $(CHOST)) 65 - CROSS_COMPILE_FLAG := --build=$(CBUILD) --host=$(CHOST) 66 - export CROSS_COMPILE=$(CBUILD)- 67 - STRIP := $(CBUILD)-strip 68 55 endif 56 + export LDFLAGS := 57 + export CPPFLAGS := 58 + 59 + QEMU_VPORT_RESULT := 69 60 ifeq ($(ARCH),aarch64) 61 + CHOST := aarch64-linux-musl 70 62 QEMU_ARCH := aarch64 71 63 KERNEL_ARCH := arm64 72 64 KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm64/boot/Image 65 + QEMU_VPORT_RESULT := virtio-serial-device 73 66 ifeq ($(HOST_ARCH),$(ARCH)) 74 67 QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm 75 68 else ··· 68 79 CFLAGS += -march=armv8-a -mtune=cortex-a53 69 80 endif 70 81 else ifeq ($(ARCH),aarch64_be) 82 + CHOST := aarch64_be-linux-musl 71 83 QEMU_ARCH := aarch64 72 84 KERNEL_ARCH := arm64 73 85 KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm64/boot/Image 86 + QEMU_VPORT_RESULT := virtio-serial-device 74 87 ifeq ($(HOST_ARCH),$(ARCH)) 75 88 QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm 76 89 else ··· 80 89 CFLAGS += -march=armv8-a -mtune=cortex-a53 81 90 endif 82 91 else ifeq ($(ARCH),arm) 92 + CHOST := arm-linux-musleabi 83 93 QEMU_ARCH := arm 84 94 KERNEL_ARCH := arm 85 95 KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm/boot/zImage 96 + QEMU_VPORT_RESULT := virtio-serial-device 86 97 ifeq ($(HOST_ARCH),$(ARCH)) 87 98 QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm 88 99 else ··· 92 99 CFLAGS += -march=armv7-a -mtune=cortex-a15 -mabi=aapcs-linux 93 100 endif 94 101 else ifeq ($(ARCH),armeb) 102 + CHOST := armeb-linux-musleabi 95 103 QEMU_ARCH := arm 96 104 KERNEL_ARCH := arm 97 105 KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm/boot/zImage 106 + QEMU_VPORT_RESULT := virtio-serial-device 98 107 ifeq ($(HOST_ARCH),$(ARCH)) 99 108 QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm 100 109 else ··· 105 110 LDFLAGS += -Wl,--be8 106 111 endif 107 112 else ifeq ($(ARCH),x86_64) 113 + CHOST := x86_64-linux-musl 108 114 QEMU_ARCH := x86_64 109 115 KERNEL_ARCH := x86_64 110 116 KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage ··· 116 120 CFLAGS += -march=skylake-avx512 117 121 endif 118 122 else ifeq ($(ARCH),i686) 123 + CHOST := i686-linux-musl 119 124 QEMU_ARCH := i386 120 125 KERNEL_ARCH := x86 121 126 KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage ··· 127 130 CFLAGS += -march=prescott 128 131 endif 129 132 else ifeq ($(ARCH),mips64) 133 + CHOST := mips64-linux-musl 130 134 QEMU_ARCH := mips64 131 135 KERNEL_ARCH := mips 132 136 KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux ··· 139 141 CFLAGS += -march=mips64r2 -EB 140 142 endif 141 143 else ifeq ($(ARCH),mips64el) 144 + CHOST := mips64el-linux-musl 142 145 QEMU_ARCH := mips64el 143 146 KERNEL_ARCH := mips 144 147 KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux ··· 151 152 CFLAGS += -march=mips64r2 -EL 152 153 endif 153 154 else ifeq ($(ARCH),mips) 155 + CHOST := mips-linux-musl 154 156 QEMU_ARCH := mips 155 157 KERNEL_ARCH := mips 156 158 KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux ··· 163 163 CFLAGS += -march=mips32r2 -EB 164 164 endif 165 165 else ifeq ($(ARCH),mipsel) 166 + CHOST := mipsel-linux-musl 166 167 QEMU_ARCH := mipsel 167 168 KERNEL_ARCH := mips 168 169 KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux ··· 174 173 QEMU_MACHINE := -cpu 24Kf -machine malta -smp 1 175 174 CFLAGS += -march=mips32r2 -EL 176 175 endif 177 - else ifeq ($(ARCH),powerpc64le) 176 + else ifeq ($(ARCH),powerpc64) 177 + CHOST := powerpc64-linux-musl 178 178 QEMU_ARCH := ppc64 179 179 KERNEL_ARCH := powerpc 180 180 KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux ··· 184 182 else 185 183 QEMU_MACHINE := -machine pseries 186 184 endif 187 - CFLAGS += -mcpu=powerpc64le -mlong-double-64 185 + else ifeq ($(ARCH),powerpc64le) 186 + CHOST := powerpc64le-linux-musl 187 + QEMU_ARCH := ppc64 188 + KERNEL_ARCH := powerpc 189 + KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux 190 + ifeq ($(HOST_ARCH),$(ARCH)) 191 + QEMU_MACHINE := -cpu host,accel=kvm -machine pseries 192 + else 193 + QEMU_MACHINE := -machine pseries 194 + endif 188 195 else ifeq ($(ARCH),powerpc) 196 + CHOST := powerpc-linux-musl 189 197 QEMU_ARCH := ppc 190 198 KERNEL_ARCH := powerpc 191 199 KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/powerpc/boot/uImage ··· 204 192 else 205 193 QEMU_MACHINE := -machine ppce500 206 194 endif 207 - CFLAGS += -mcpu=powerpc -mlong-double-64 -msecure-plt 208 195 else ifeq ($(ARCH),m68k) 196 + CHOST := m68k-linux-musl 209 197 QEMU_ARCH := m68k 210 198 KERNEL_ARCH := m68k 211 199 KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux 212 200 KERNEL_CMDLINE := $(shell sed -n 's/CONFIG_CMDLINE=\(.*\)/\1/p' arch/m68k.config) 213 201 ifeq ($(HOST_ARCH),$(ARCH)) 214 - QEMU_MACHINE := -cpu host,accel=kvm -machine q800 -smp 1 -append $(KERNEL_CMDLINE) 202 + QEMU_MACHINE := -cpu host,accel=kvm -machine q800 -append $(KERNEL_CMDLINE) 215 203 else 216 204 QEMU_MACHINE := -machine q800 -smp 1 -append $(KERNEL_CMDLINE) 217 205 endif 206 + else ifeq ($(ARCH),riscv64) 207 + CHOST := riscv64-linux-musl 208 + QEMU_ARCH := riscv64 209 + KERNEL_ARCH := riscv 210 + KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/riscv/boot/Image 211 + QEMU_VPORT_RESULT := virtio-serial-device 212 + ifeq ($(HOST_ARCH),$(ARCH)) 213 + QEMU_MACHINE := -cpu host,accel=kvm -machine virt 218 214 else 219 - $(error I only build: x86_64, i686, arm, armeb, aarch64, aarch64_be, mips, mipsel, mips64, mips64el, powerpc64le, powerpc, m68k) 215 + QEMU_MACHINE := -cpu rv64 -machine virt 216 + endif 217 + else ifeq ($(ARCH),riscv32) 218 + CHOST := riscv32-linux-musl 219 + QEMU_ARCH := riscv32 220 + KERNEL_ARCH := riscv 221 + KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/riscv/boot/Image 222 + QEMU_VPORT_RESULT := virtio-serial-device 223 + ifeq ($(HOST_ARCH),$(ARCH)) 224 + QEMU_MACHINE := -cpu host,accel=kvm -machine virt 225 + else 226 + QEMU_MACHINE := -cpu rv32 -machine virt 227 + endif 228 + else ifeq ($(ARCH),s390x) 229 + CHOST := s390x-linux-musl 230 + QEMU_ARCH := s390x 231 + KERNEL_ARCH := s390 232 + KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/s390/boot/bzImage 233 + KERNEL_CMDLINE := $(shell sed -n 's/CONFIG_CMDLINE=\(.*\)/\1/p' arch/s390x.config) 234 + QEMU_VPORT_RESULT := virtio-serial-ccw 235 + ifeq ($(HOST_ARCH),$(ARCH)) 236 + QEMU_MACHINE := -cpu host,accel=kvm -machine s390-ccw-virtio -append $(KERNEL_CMDLINE) 237 + else 238 + QEMU_MACHINE := -machine s390-ccw-virtio -append $(KERNEL_CMDLINE) 239 + endif 240 + else 241 + $(error I only build: x86_64, i686, arm, armeb, aarch64, aarch64_be, mips, mipsel, mips64, mips64el, powerpc64, powerpc64le, powerpc, m68k, riscv64, riscv32, s390x) 220 242 endif 221 243 222 - REAL_CC := $(CBUILD)-gcc 223 - MUSL_CC := $(BUILD_PATH)/musl-gcc 224 - export CC := $(MUSL_CC) 225 - USERSPACE_DEPS := $(MUSL_CC) $(BUILD_PATH)/include/.installed $(BUILD_PATH)/include/linux/.installed 244 + TOOLCHAIN_FILENAME := $(CHOST)-cross.tgz 245 + TOOLCHAIN_TAR := $(DISTFILES_PATH)/$(TOOLCHAIN_FILENAME) 246 + TOOLCHAIN_PATH := $(BUILD_PATH)/$(CHOST)-cross 247 + TOOLCHAIN_DIR := https://download.wireguard.com/qemu-test/toolchains/20211123/ 248 + $(eval $(call file_download,toolchain-sha256sums-20211123,$(TOOLCHAIN_DIR)SHA256SUMS#,83da033fd8c798df476c21d9612da2dfb896ec62fbed4ceec5eefc0e56b3f0c8)) 249 + $(eval $(call file_download,$(TOOLCHAIN_FILENAME),$(TOOLCHAIN_DIR),,$(DISTFILES_PATH)/toolchain-sha256sums-20211123)) 226 250 251 + STRIP := $(CHOST)-strip 252 + CROSS_COMPILE_FLAG := --build=$(CBUILD) --host=$(CHOST) 253 + $(info Building for $(CHOST) using $(CBUILD)) 254 + export CROSS_COMPILE := $(CHOST)- 255 + export PATH := $(TOOLCHAIN_PATH)/bin:$(PATH) 256 + export CC := $(CHOST)-gcc 257 + CCACHE_PATH := $(shell which ccache 2>/dev/null) 258 + ifneq ($(CCACHE_PATH),) 259 + export KBUILD_BUILD_TIMESTAMP := Fri Jun 5 15:58:00 CEST 2015 260 + export PATH := $(TOOLCHAIN_PATH)/bin/ccache:$(PATH) 261 + export CCACHE_SLOPPINESS := file_macro,time_macros 262 + export CCACHE_DIR ?= $(PWD)/ccache 263 + endif 264 + 265 + USERSPACE_DEPS := $(TOOLCHAIN_PATH)/.installed $(TOOLCHAIN_PATH)/$(CHOST)/include/linux/.installed 266 + 267 + comma := , 227 268 build: $(KERNEL_BZIMAGE) 228 269 qemu: $(KERNEL_BZIMAGE) 229 270 rm -f $(BUILD_PATH)/result ··· 287 222 $(QEMU_MACHINE) \ 288 223 -m $$(grep -q CONFIG_DEBUG_KMEMLEAK=y $(KERNEL_BUILD_PATH)/.config && echo 1G || echo 256M) \ 289 224 -serial stdio \ 290 - -serial file:$(BUILD_PATH)/result \ 225 + -chardev file,path=$(BUILD_PATH)/result,id=result \ 226 + $(if $(QEMU_VPORT_RESULT),-device $(QEMU_VPORT_RESULT) -device virtserialport$(comma)chardev=result,-serial chardev:result) \ 291 227 -no-reboot \ 292 228 -monitor none \ 293 229 -kernel $< 294 230 grep -Fq success $(BUILD_PATH)/result 295 231 296 - $(BUILD_PATH)/init-cpio-spec.txt: 232 + $(BUILD_PATH)/init-cpio-spec.txt: $(TOOLCHAIN_PATH)/.installed $(BUILD_PATH)/init 297 233 mkdir -p $(BUILD_PATH) 298 234 echo "file /init $(BUILD_PATH)/init 755 0 0" > $@ 299 235 echo "file /init.sh $(PWD)/../netns.sh 755 0 0" >> $@ ··· 312 246 echo "slink /bin/iptables xtables-legacy-multi 777 0 0" >> $@ 313 247 echo "slink /bin/ping6 ping 777 0 0" >> $@ 314 248 echo "dir /lib 755 0 0" >> $@ 315 - echo "file /lib/libc.so $(MUSL_PATH)/lib/libc.so 755 0 0" >> $@ 316 - echo "slink /lib/ld-linux.so.1 libc.so 777 0 0" >> $@ 249 + echo "file /lib/libc.so $(TOOLCHAIN_PATH)/$(CHOST)/lib/libc.so 755 0 0" >> $@ 250 + echo "slink $$($(CHOST)-readelf -p .interp '$(BUILD_PATH)/init'| grep -o '/lib/.*') libc.so 777 0 0" >> $@ 317 251 318 - $(KERNEL_BUILD_PATH)/.config: kernel.config arch/$(ARCH).config 252 + $(KERNEL_BUILD_PATH)/.config: $(TOOLCHAIN_PATH)/.installed kernel.config arch/$(ARCH).config 319 253 mkdir -p $(KERNEL_BUILD_PATH) 320 254 cp kernel.config $(KERNEL_BUILD_PATH)/minimal.config 321 255 printf 'CONFIG_NR_CPUS=$(NR_CPUS)\nCONFIG_INITRAMFS_SOURCE="$(BUILD_PATH)/init-cpio-spec.txt"\n' >> $(KERNEL_BUILD_PATH)/minimal.config ··· 324 258 cd $(KERNEL_BUILD_PATH) && ARCH=$(KERNEL_ARCH) $(KERNEL_PATH)/scripts/kconfig/merge_config.sh -n $(KERNEL_BUILD_PATH)/.config $(KERNEL_BUILD_PATH)/minimal.config 325 259 $(if $(findstring yes,$(DEBUG_KERNEL)),cp debug.config $(KERNEL_BUILD_PATH) && cd $(KERNEL_BUILD_PATH) && ARCH=$(KERNEL_ARCH) $(KERNEL_PATH)/scripts/kconfig/merge_config.sh -n $(KERNEL_BUILD_PATH)/.config debug.config,) 326 260 327 - $(KERNEL_BZIMAGE): $(KERNEL_BUILD_PATH)/.config $(BUILD_PATH)/init-cpio-spec.txt $(MUSL_PATH)/lib/libc.so $(IPERF_PATH)/src/iperf3 $(IPUTILS_PATH)/ping $(BASH_PATH)/bash $(IPROUTE2_PATH)/misc/ss $(IPROUTE2_PATH)/ip/ip $(IPTABLES_PATH)/iptables/xtables-legacy-multi $(NMAP_PATH)/ncat/ncat $(WIREGUARD_TOOLS_PATH)/src/wg $(BUILD_PATH)/init ../netns.sh $(WIREGUARD_SOURCES) 261 + $(KERNEL_BZIMAGE): $(TOOLCHAIN_PATH)/.installed $(KERNEL_BUILD_PATH)/.config $(BUILD_PATH)/init-cpio-spec.txt $(IPERF_PATH)/src/iperf3 $(IPUTILS_PATH)/ping $(BASH_PATH)/bash $(IPROUTE2_PATH)/misc/ss $(IPROUTE2_PATH)/ip/ip $(IPTABLES_PATH)/iptables/xtables-legacy-multi $(NMAP_PATH)/ncat/ncat $(WIREGUARD_TOOLS_PATH)/src/wg $(BUILD_PATH)/init ../netns.sh $(WIREGUARD_SOURCES) 328 262 $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) 329 263 330 - $(BUILD_PATH)/include/linux/.installed: | $(KERNEL_BUILD_PATH)/.config 331 - $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) INSTALL_HDR_PATH=$(BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) headers_install 264 + $(TOOLCHAIN_PATH)/$(CHOST)/include/linux/.installed: | $(KERNEL_BUILD_PATH)/.config $(TOOLCHAIN_PATH)/.installed 265 + rm -rf $(TOOLCHAIN_PATH)/$(CHOST)/include/linux 266 + $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) INSTALL_HDR_PATH=$(TOOLCHAIN_PATH)/$(CHOST) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) headers_install 332 267 touch $@ 333 268 334 - $(MUSL_PATH)/lib/libc.so: $(MUSL_TAR) 269 + $(TOOLCHAIN_PATH)/.installed: $(TOOLCHAIN_TAR) 335 270 mkdir -p $(BUILD_PATH) 336 271 flock -s $<.lock tar -C $(BUILD_PATH) -xf $< 337 - cd $(MUSL_PATH) && CC=$(REAL_CC) ./configure --prefix=/ --disable-static --build=$(CBUILD) 338 - $(MAKE) -C $(MUSL_PATH) 339 - $(STRIP) -s $@ 340 - 341 - $(BUILD_PATH)/include/.installed: $(MUSL_PATH)/lib/libc.so 342 - $(MAKE) -C $(MUSL_PATH) DESTDIR=$(BUILD_PATH) install-headers 272 + $(STRIP) -s $(TOOLCHAIN_PATH)/$(CHOST)/lib/libc.so 273 + ifneq ($(CCACHE_PATH),) 274 + mkdir -p $(TOOLCHAIN_PATH)/bin/ccache 275 + ln -s $(CCACHE_PATH) $(TOOLCHAIN_PATH)/bin/ccache/$(CC) 276 + endif 343 277 touch $@ 344 - 345 - $(MUSL_CC): $(MUSL_PATH)/lib/libc.so 346 - sh $(MUSL_PATH)/tools/musl-gcc.specs.sh $(BUILD_PATH)/include $(MUSL_PATH)/lib /lib/ld-linux.so.1 > $(BUILD_PATH)/musl-gcc.specs 347 - printf '#!/bin/sh\nexec "$(REAL_CC)" --specs="$(BUILD_PATH)/musl-gcc.specs" "$$@"\n' > $(BUILD_PATH)/musl-gcc 348 - chmod +x $(BUILD_PATH)/musl-gcc 349 278 350 279 $(IPERF_PATH)/.installed: $(IPERF_TAR) 351 280 mkdir -p $(BUILD_PATH) ··· 350 289 touch $@ 351 290 352 291 $(IPERF_PATH)/src/iperf3: | $(IPERF_PATH)/.installed $(USERSPACE_DEPS) 292 + cd $(IPERF_PATH) && autoreconf -fi 353 293 cd $(IPERF_PATH) && CFLAGS="$(CFLAGS) -D_GNU_SOURCE" ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --with-openssl=no 354 294 $(MAKE) -C $(IPERF_PATH) 355 295 $(STRIP) -s $@ ··· 366 304 367 305 $(BUILD_PATH)/init: init.c | $(USERSPACE_DEPS) 368 306 mkdir -p $(BUILD_PATH) 369 - $(MUSL_CC) -o $@ $(CFLAGS) $(LDFLAGS) -std=gnu11 $< 307 + $(CC) -o $@ $(CFLAGS) $(LDFLAGS) -std=gnu11 $< 370 308 $(STRIP) -s $@ 371 309 372 310 $(IPUTILS_PATH)/.installed: $(IPUTILS_TAR) ··· 385 323 touch $@ 386 324 387 325 $(BASH_PATH)/bash: | $(BASH_PATH)/.installed $(USERSPACE_DEPS) 388 - cd $(BASH_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --without-bash-malloc --disable-debugger --disable-help-builtin --disable-history --disable-multibyte --disable-progcomp --disable-readline --disable-mem-scramble 326 + cd $(BASH_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --without-bash-malloc --disable-debugger --disable-help-builtin --disable-history --disable-progcomp --disable-readline --disable-mem-scramble 389 327 $(MAKE) -C $(BASH_PATH) 390 328 $(STRIP) -s $@ 391 329 392 330 $(IPROUTE2_PATH)/.installed: $(IPROUTE2_TAR) 393 331 mkdir -p $(BUILD_PATH) 394 332 flock -s $<.lock tar -C $(BUILD_PATH) -xf $< 395 - printf 'CC:=$(CC)\nPKG_CONFIG:=pkg-config\nTC_CONFIG_XT:=n\nTC_CONFIG_ATM:=n\nTC_CONFIG_IPSET:=n\nIP_CONFIG_SETNS:=y\nHAVE_ELF:=n\nHAVE_MNL:=n\nHAVE_BERKELEY_DB:=n\nHAVE_LATEX:=n\nHAVE_PDFLATEX:=n\nCFLAGS+=-DHAVE_SETNS\n' > $(IPROUTE2_PATH)/config.mk 396 - printf 'lib: snapshot\n\t$$(MAKE) -C lib\nip/ip: lib\n\t$$(MAKE) -C ip ip\nmisc/ss: lib\n\t$$(MAKE) -C misc ss\n' >> $(IPROUTE2_PATH)/Makefile 333 + printf 'CC:=$(CC)\nPKG_CONFIG:=pkg-config\nTC_CONFIG_XT:=n\nTC_CONFIG_ATM:=n\nTC_CONFIG_IPSET:=n\nIP_CONFIG_SETNS:=y\nHAVE_ELF:=n\nHAVE_MNL:=n\nHAVE_BERKELEY_DB:=n\nHAVE_LATEX:=n\nHAVE_PDFLATEX:=n\nCFLAGS+=-DHAVE_SETNS -DHAVE_HANDLE_AT\n' > $(IPROUTE2_PATH)/config.mk 334 + printf 'libutil.a.done:\n\tflock -x $$@.lock $$(MAKE) -C lib\n\ttouch $$@\nip/ip: libutil.a.done\n\t$$(MAKE) -C ip ip\nmisc/ss: libutil.a.done\n\t$$(MAKE) -C misc ss\n' >> $(IPROUTE2_PATH)/Makefile 397 335 touch $@ 398 336 399 337 $(IPROUTE2_PATH)/ip/ip: | $(IPROUTE2_PATH)/.installed $(USERSPACE_DEPS) ··· 432 370 distclean: clean 433 371 rm -rf $(DISTFILES_PATH) 434 372 373 + cacheclean: clean 374 + ifneq ($(CCACHE_DIR),) 375 + rm -rf $(CCACHE_DIR) 376 + endif 377 + 435 378 menuconfig: $(KERNEL_BUILD_PATH)/.config 436 379 $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) menuconfig 437 380 438 - .PHONY: qemu build clean distclean menuconfig 381 + .PHONY: qemu build clean distclean cacheclean menuconfig 439 382 .DELETE_ON_ERROR:
+4 -1
tools/testing/selftests/wireguard/qemu/arch/aarch64.config
··· 1 1 CONFIG_SERIAL_AMBA_PL011=y 2 2 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y 3 + CONFIG_VIRTIO_MENU=y 4 + CONFIG_VIRTIO_MMIO=y 5 + CONFIG_VIRTIO_CONSOLE=y 3 6 CONFIG_CMDLINE_BOOL=y 4 - CONFIG_CMDLINE="console=ttyAMA0 wg.success=ttyAMA1" 7 + CONFIG_CMDLINE="console=ttyAMA0 wg.success=vport0p1 panic_on_warn=1" 5 8 CONFIG_FRAME_WARN=1280
+4 -1
tools/testing/selftests/wireguard/qemu/arch/aarch64_be.config
··· 1 1 CONFIG_CPU_BIG_ENDIAN=y 2 2 CONFIG_SERIAL_AMBA_PL011=y 3 3 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y 4 + CONFIG_VIRTIO_MENU=y 5 + CONFIG_VIRTIO_MMIO=y 6 + CONFIG_VIRTIO_CONSOLE=y 4 7 CONFIG_CMDLINE_BOOL=y 5 - CONFIG_CMDLINE="console=ttyAMA0 wg.success=ttyAMA1" 8 + CONFIG_CMDLINE="console=ttyAMA0 wg.success=vport0p1 panic_on_warn=1" 6 9 CONFIG_FRAME_WARN=1280
+4 -1
tools/testing/selftests/wireguard/qemu/arch/arm.config
··· 4 4 CONFIG_THUMB2_KERNEL=n 5 5 CONFIG_SERIAL_AMBA_PL011=y 6 6 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y 7 + CONFIG_VIRTIO_MENU=y 8 + CONFIG_VIRTIO_MMIO=y 9 + CONFIG_VIRTIO_CONSOLE=y 7 10 CONFIG_CMDLINE_BOOL=y 8 - CONFIG_CMDLINE="console=ttyAMA0 wg.success=ttyAMA1" 11 + CONFIG_CMDLINE="console=ttyAMA0 wg.success=vport0p1 panic_on_warn=1" 9 12 CONFIG_FRAME_WARN=1024
+4 -1
tools/testing/selftests/wireguard/qemu/arch/armeb.config
··· 4 4 CONFIG_THUMB2_KERNEL=n 5 5 CONFIG_SERIAL_AMBA_PL011=y 6 6 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y 7 + CONFIG_VIRTIO_MENU=y 8 + CONFIG_VIRTIO_MMIO=y 9 + CONFIG_VIRTIO_CONSOLE=y 7 10 CONFIG_CMDLINE_BOOL=y 8 - CONFIG_CMDLINE="console=ttyAMA0 wg.success=ttyAMA1" 11 + CONFIG_CMDLINE="console=ttyAMA0 wg.success=vport0p1 panic_on_warn=1" 9 12 CONFIG_CPU_BIG_ENDIAN=y 10 13 CONFIG_FRAME_WARN=1024
+1 -1
tools/testing/selftests/wireguard/qemu/arch/i686.config
··· 2 2 CONFIG_SERIAL_8250=y 3 3 CONFIG_SERIAL_8250_CONSOLE=y 4 4 CONFIG_CMDLINE_BOOL=y 5 - CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" 5 + CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1" 6 6 CONFIG_FRAME_WARN=1024
+1 -1
tools/testing/selftests/wireguard/qemu/arch/m68k.config
··· 5 5 CONFIG_SERIAL_PMACZILOG=y 6 6 CONFIG_SERIAL_PMACZILOG_TTYS=y 7 7 CONFIG_SERIAL_PMACZILOG_CONSOLE=y 8 - CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" 8 + CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1" 9 9 CONFIG_FRAME_WARN=1024
+1 -1
tools/testing/selftests/wireguard/qemu/arch/mips.config
··· 7 7 CONFIG_SERIAL_8250=y 8 8 CONFIG_SERIAL_8250_CONSOLE=y 9 9 CONFIG_CMDLINE_BOOL=y 10 - CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" 10 + CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1" 11 11 CONFIG_FRAME_WARN=1024
+1 -1
tools/testing/selftests/wireguard/qemu/arch/mips64.config
··· 10 10 CONFIG_SERIAL_8250=y 11 11 CONFIG_SERIAL_8250_CONSOLE=y 12 12 CONFIG_CMDLINE_BOOL=y 13 - CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" 13 + CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1" 14 14 CONFIG_FRAME_WARN=1280
+1 -1
tools/testing/selftests/wireguard/qemu/arch/mips64el.config
··· 11 11 CONFIG_SERIAL_8250=y 12 12 CONFIG_SERIAL_8250_CONSOLE=y 13 13 CONFIG_CMDLINE_BOOL=y 14 - CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" 14 + CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1" 15 15 CONFIG_FRAME_WARN=1280
+1 -1
tools/testing/selftests/wireguard/qemu/arch/mipsel.config
··· 8 8 CONFIG_SERIAL_8250=y 9 9 CONFIG_SERIAL_8250_CONSOLE=y 10 10 CONFIG_CMDLINE_BOOL=y 11 - CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" 11 + CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1" 12 12 CONFIG_FRAME_WARN=1024
+1 -1
tools/testing/selftests/wireguard/qemu/arch/powerpc.config
··· 6 6 CONFIG_SERIAL_8250_CONSOLE=y 7 7 CONFIG_MATH_EMULATION=y 8 8 CONFIG_CMDLINE_BOOL=y 9 - CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" 9 + CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1" 10 10 CONFIG_FRAME_WARN=1024
+13
tools/testing/selftests/wireguard/qemu/arch/powerpc64.config
··· 1 + CONFIG_PPC64=y 2 + CONFIG_PPC_PSERIES=y 3 + CONFIG_ALTIVEC=y 4 + CONFIG_VSX=y 5 + CONFIG_PPC_OF_BOOT_TRAMPOLINE=y 6 + CONFIG_PPC_RADIX_MMU=y 7 + CONFIG_HVC_CONSOLE=y 8 + CONFIG_CPU_BIG_ENDIAN=y 9 + CONFIG_CMDLINE_BOOL=y 10 + CONFIG_CMDLINE="console=hvc0 wg.success=hvc1 panic_on_warn=1" 11 + CONFIG_SECTION_MISMATCH_WARN_ONLY=y 12 + CONFIG_FRAME_WARN=1280 13 + CONFIG_THREAD_SHIFT=14
+1 -1
tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config
··· 7 7 CONFIG_HVC_CONSOLE=y 8 8 CONFIG_CPU_LITTLE_ENDIAN=y 9 9 CONFIG_CMDLINE_BOOL=y 10 - CONFIG_CMDLINE="console=hvc0 wg.success=hvc1" 10 + CONFIG_CMDLINE="console=hvc0 wg.success=hvc1 panic_on_warn=1" 11 11 CONFIG_SECTION_MISMATCH_WARN_ONLY=y 12 12 CONFIG_FRAME_WARN=1280 13 13 CONFIG_THREAD_SHIFT=14
+12
tools/testing/selftests/wireguard/qemu/arch/riscv32.config
··· 1 + CONFIG_ARCH_RV32I=y 2 + CONFIG_MMU=y 3 + CONFIG_FPU=y 4 + CONFIG_SOC_VIRT=y 5 + CONFIG_SERIAL_8250=y 6 + CONFIG_SERIAL_8250_CONSOLE=y 7 + CONFIG_SERIAL_OF_PLATFORM=y 8 + CONFIG_VIRTIO_MENU=y 9 + CONFIG_VIRTIO_MMIO=y 10 + CONFIG_VIRTIO_CONSOLE=y 11 + CONFIG_CMDLINE="console=ttyS0 wg.success=vport0p1 panic_on_warn=1" 12 + CONFIG_CMDLINE_FORCE=y
+12
tools/testing/selftests/wireguard/qemu/arch/riscv64.config
··· 1 + CONFIG_ARCH_RV64I=y 2 + CONFIG_MMU=y 3 + CONFIG_FPU=y 4 + CONFIG_SOC_VIRT=y 5 + CONFIG_SERIAL_8250=y 6 + CONFIG_SERIAL_8250_CONSOLE=y 7 + CONFIG_SERIAL_OF_PLATFORM=y 8 + CONFIG_VIRTIO_MENU=y 9 + CONFIG_VIRTIO_MMIO=y 10 + CONFIG_VIRTIO_CONSOLE=y 11 + CONFIG_CMDLINE="console=ttyS0 wg.success=vport0p1 panic_on_warn=1" 12 + CONFIG_CMDLINE_FORCE=y
+6
tools/testing/selftests/wireguard/qemu/arch/s390x.config
··· 1 + CONFIG_SCLP_VT220_TTY=y 2 + CONFIG_SCLP_VT220_CONSOLE=y 3 + CONFIG_VIRTIO_MENU=y 4 + CONFIG_VIRTIO_CONSOLE=y 5 + CONFIG_S390_GUEST=y 6 + CONFIG_CMDLINE="console=ttysclp0 wg.success=vport0p1 panic_on_warn=1"
+1 -1
tools/testing/selftests/wireguard/qemu/arch/x86_64.config
··· 2 2 CONFIG_SERIAL_8250=y 3 3 CONFIG_SERIAL_8250_CONSOLE=y 4 4 CONFIG_CMDLINE_BOOL=y 5 - CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" 5 + CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1 panic_on_warn=1" 6 6 CONFIG_FRAME_WARN=1280
-6
tools/testing/selftests/wireguard/qemu/init.c
··· 110 110 panic("write(exception-trace)"); 111 111 close(fd); 112 112 } 113 - fd = open("/proc/sys/kernel/panic_on_warn", O_WRONLY); 114 - if (fd >= 0) { 115 - if (write(fd, "1\n", 2) != 2) 116 - panic("write(panic_on_warn)"); 117 - close(fd); 118 - } 119 113 } 120 114 121 115 static void kmod_selftests(void)
+1
virt/kvm/kvm_main.c
··· 4354 4354 return 0; 4355 4355 #endif 4356 4356 case KVM_CAP_BINARY_STATS_FD: 4357 + case KVM_CAP_SYSTEM_EVENT_DATA: 4357 4358 return 1; 4358 4359 default: 4359 4360 break;